diff --git a/Makefile b/Makefile index d3723b2f6d6c..5957afa29692 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 18 -SUBLEVEL = 14 +SUBLEVEL = 15 EXTRAVERSION = NAME = Superb Owl
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index c6ca1b9cbf71..8e236a022156 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -73,6 +73,7 @@ ifeq ($(CONFIG_PERF_EVENTS),y) endif
KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax) +KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# GCC versions that support the "-mstrict-align" option default to allowing # unaligned accesses. While unaligned accesses are explicitly allowed in the diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index fe1742c4ca49..1f156098a5bf 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -278,9 +278,9 @@ enum { };
/* - * For formats with LBR_TSX flags (e.g. LBR_FORMAT_EIP_FLAGS2), bits 61:62 in - * MSR_LAST_BRANCH_FROM_x are the TSX flags when TSX is supported, but when - * TSX is not supported they have no consistent behavior: + * For format LBR_FORMAT_EIP_FLAGS2, bits 61:62 in MSR_LAST_BRANCH_FROM_x + * are the TSX flags when TSX is supported, but when TSX is not supported + * they have no consistent behavior: * * - For wrmsr(), bits 61:62 are considered part of the sign extension. * - For HW updates (branch captures) bits 61:62 are always OFF and are not @@ -288,7 +288,7 @@ enum { * * Therefore, if: * - * 1) LBR has TSX format + * 1) LBR format LBR_FORMAT_EIP_FLAGS2 * 2) CPU has no TSX support enabled * * ... then any value passed to wrmsr() must be sign extended to 63 bits and any @@ -300,7 +300,7 @@ static inline bool lbr_from_signext_quirk_needed(void) bool tsx_support = boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM);
- return !tsx_support && x86_pmu.lbr_has_tsx; + return !tsx_support; }
static DEFINE_STATIC_KEY_FALSE(lbr_from_quirk_key); @@ -1611,9 +1611,6 @@ void intel_pmu_lbr_init_hsw(void) x86_pmu.lbr_sel_map = hsw_lbr_sel_map;
x86_get_pmu(smp_processor_id())->task_ctx_cache = create_lbr_kmem_cache(size, 0); - - if (lbr_from_signext_quirk_needed()) - static_branch_enable(&lbr_from_quirk_key); }
/* skylake */ @@ -1704,7 +1701,11 @@ void intel_pmu_lbr_init(void) switch (x86_pmu.intel_cap.lbr_format) { case LBR_FORMAT_EIP_FLAGS2: x86_pmu.lbr_has_tsx = 1; - fallthrough; + x86_pmu.lbr_from_flags = 1; + if (lbr_from_signext_quirk_needed()) + static_branch_enable(&lbr_from_quirk_key); + break; + case LBR_FORMAT_EIP_FLAGS: x86_pmu.lbr_from_flags = 1; break; diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 5d09ded0c491..49889f171e86 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -301,6 +301,7 @@ #define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */ #define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */ #define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */ +#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 10a3bfc1eb23..38a3e86e665e 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -297,6 +297,8 @@ do { \ alternative_msr_write(MSR_IA32_SPEC_CTRL, \ spec_ctrl_current() | SPEC_CTRL_IBRS, \ X86_FEATURE_USE_IBRS_FW); \ + alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \ + X86_FEATURE_USE_IBPB_FW); \ } while (0)
#define firmware_restrict_branch_speculation_end() \ diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 46427b785bc8..d440f6726df0 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -555,7 +555,9 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end) dest = addr + insn.length + insn.immediate.value;
if (__static_call_fixup(addr, op, dest) || - WARN_ON_ONCE(dest != &__x86_return_thunk)) + WARN_ONCE(dest != &__x86_return_thunk, + "missing return thunk: %pS-%pS: %*ph", + addr, dest, 5, addr)) continue;
DPRINTK("return thunk at: %pS (%px) len: %d to: %pS", diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 0b64e894b383..8179fa4d5004 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -968,6 +968,7 @@ static inline const char *spectre_v2_module_string(void) { return ""; } #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n" #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n" #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n" +#define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
#ifdef CONFIG_BPF_SYSCALL void unpriv_ebpf_notify(int new_state) @@ -1408,6 +1409,8 @@ static void __init spectre_v2_select_mitigation(void)
case SPECTRE_V2_IBRS: setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS); + if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) + pr_warn(SPECTRE_V2_IBRS_PERF_MSG); break;
case SPECTRE_V2_LFENCE: @@ -1509,7 +1512,16 @@ static void __init spectre_v2_select_mitigation(void) * the CPU supports Enhanced IBRS, kernel might un-intentionally not * enable IBRS around firmware calls. */ - if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { + if (boot_cpu_has_bug(X86_BUG_RETBLEED) && + (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || + boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)) { + + if (retbleed_cmd != RETBLEED_CMD_IBPB) { + setup_force_cpu_cap(X86_FEATURE_USE_IBPB_FW); + pr_info("Enabling Speculation Barrier for firmware calls\n"); + } + + } else if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) { setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW); pr_info("Enabling Restricted Speculation for firmware calls\n"); } diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c index 57ca7aa0e169..b8e26b6b5523 100644 --- a/drivers/acpi/cppc_acpi.c +++ b/drivers/acpi/cppc_acpi.c @@ -764,7 +764,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr)
if (!osc_cpc_flexible_adr_space_confirmed) { pr_debug("Flexible address space capability not supported\n"); - goto out_free; + if (!cpc_supported_by_cpu()) + goto out_free; }
addr = ioremap(gas_t->address, gas_t->bit_width/8); @@ -791,7 +792,8 @@ int acpi_cppc_processor_probe(struct acpi_processor *pr) } if (!osc_cpc_flexible_adr_space_confirmed) { pr_debug("Flexible address space capability not supported\n"); - goto out_free; + if (!cpc_supported_by_cpu()) + goto out_free; } } else { if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) { diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 541ced27d941..de1e934a4f7e 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -446,14 +446,93 @@ static const struct mhi_pci_dev_info mhi_sierra_em919x_info = { .sideband_wake = false, };
+static const struct mhi_channel_config mhi_telit_fn980_hw_v1_channels[] = { + MHI_CHANNEL_CONFIG_UL(14, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_DL(15, "QMI", 32, 0), + MHI_CHANNEL_CONFIG_UL(20, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_DL_AUTOQUEUE(21, "IPCR", 16, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0", 128, 1), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0", 128, 2), +}; + +static struct mhi_event_config mhi_telit_fn980_hw_v1_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_HW_DATA(1, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(2, 2048, 101) +}; + +static struct mhi_controller_config modem_telit_fn980_hw_v1_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn980_hw_v1_channels), + .ch_cfg = mhi_telit_fn980_hw_v1_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn980_hw_v1_events), + .event_cfg = mhi_telit_fn980_hw_v1_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn980_hw_v1_info = { + .name = "telit-fn980-hwv1", + .fw = "qcom/sdx55m/sbl1.mbn", + .edl = "qcom/sdx55m/edl.mbn", + .config = &modem_telit_fn980_hw_v1_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .mru_default = 32768, + .sideband_wake = false, +}; + +static const struct mhi_channel_config mhi_telit_fn990_channels[] = { + MHI_CHANNEL_CONFIG_UL_SBL(2, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_DL_SBL(3, "SAHARA", 32, 0), + MHI_CHANNEL_CONFIG_UL(4, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_DL(5, "DIAG", 64, 1), + MHI_CHANNEL_CONFIG_UL(12, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_DL(13, "MBIM", 32, 0), + MHI_CHANNEL_CONFIG_UL(32, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_DL(33, "DUN", 32, 0), + MHI_CHANNEL_CONFIG_HW_UL(100, "IP_HW0_MBIM", 128, 2), + MHI_CHANNEL_CONFIG_HW_DL(101, "IP_HW0_MBIM", 128, 3), +}; + +static struct mhi_event_config mhi_telit_fn990_events[] = { + MHI_EVENT_CONFIG_CTRL(0, 128), + MHI_EVENT_CONFIG_DATA(1, 128), + MHI_EVENT_CONFIG_HW_DATA(2, 1024, 100), + MHI_EVENT_CONFIG_HW_DATA(3, 2048, 101) +}; + +static const struct mhi_controller_config modem_telit_fn990_config = { + .max_channels = 128, + .timeout_ms = 20000, + .num_channels = ARRAY_SIZE(mhi_telit_fn990_channels), + .ch_cfg = mhi_telit_fn990_channels, + .num_events = ARRAY_SIZE(mhi_telit_fn990_events), + .event_cfg = mhi_telit_fn990_events, +}; + +static const struct mhi_pci_dev_info mhi_telit_fn990_info = { + .name = "telit-fn990", + .config = &modem_telit_fn990_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, + .mru_default = 32768, +}; + static const struct pci_device_id mhi_pci_id_table[] = { /* EM919x (sdx55), use the same vid:pid as qcom-sdx55m */ { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x18d7, 0x0200), .driver_data = (kernel_ulong_t) &mhi_sierra_em919x_info }, + /* Telit FN980 hardware revision v1 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0306, 0x1C5D, 0x2000), + .driver_data = (kernel_ulong_t) &mhi_telit_fn980_hw_v1_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0306), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx55_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0304), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx24_info }, + /* Telit FN990 */ + { PCI_DEVICE_SUB(PCI_VENDOR_ID_QCOM, 0x0308, 0x1c5d, 0x2010), + .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, { PCI_DEVICE(0x1eac, 0x1001), /* EM120R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, { PCI_DEVICE(0x1eac, 0x1002), /* EM160R-GL (sdx24) */ diff --git a/drivers/clk/clk-lan966x.c b/drivers/clk/clk-lan966x.c index d1535ac13e89..81cb90955d68 100644 --- a/drivers/clk/clk-lan966x.c +++ b/drivers/clk/clk-lan966x.c @@ -213,7 +213,7 @@ static int lan966x_gate_clk_register(struct device *dev,
hw_data->hws[i] = devm_clk_hw_register_gate(dev, clk_gate_desc[idx].name, - "lan966x", 0, base, + "lan966x", 0, gate_base, clk_gate_desc[idx].bit_idx, 0, &clk_gate_lock);
diff --git a/drivers/crypto/qat/qat_4xxx/adf_drv.c b/drivers/crypto/qat/qat_4xxx/adf_drv.c index fa4c350c1bf9..a6c78b9c730b 100644 --- a/drivers/crypto/qat/qat_4xxx/adf_drv.c +++ b/drivers/crypto/qat/qat_4xxx/adf_drv.c @@ -75,13 +75,6 @@ static int adf_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err;
- /* Temporarily set the number of crypto instances to zero to avoid - * registering the crypto algorithms. - * This will be removed when the algorithms will support the - * CRYPTO_TFM_REQ_MAY_BACKLOG flag - */ - instances = 0; - for (i = 0; i < instances; i++) { val = i; bank = i * 2; diff --git a/drivers/crypto/qat/qat_common/Makefile b/drivers/crypto/qat/qat_common/Makefile index f25a6c8edfc7..04f058acc4d3 100644 --- a/drivers/crypto/qat/qat_common/Makefile +++ b/drivers/crypto/qat/qat_common/Makefile @@ -16,6 +16,7 @@ intel_qat-objs := adf_cfg.o \ qat_crypto.o \ qat_algs.o \ qat_asym_algs.o \ + qat_algs_send.o \ qat_uclo.o \ qat_hal.o
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c index 8ba28409fb74..630d0483c4e0 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.c +++ b/drivers/crypto/qat/qat_common/adf_transport.c @@ -8,6 +8,9 @@ #include "adf_cfg.h" #include "adf_common_drv.h"
+#define ADF_MAX_RING_THRESHOLD 80 +#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100) + static inline u32 adf_modulo(u32 data, u32 shift) { u32 div = data >> shift; @@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring) bank->irq_mask); }
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring) +{ + return atomic_read(ring->inflights) > ring->threshold; +} + int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg) { struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev); @@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, struct adf_etr_bank_data *bank; struct adf_etr_ring_data *ring; char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES]; + int max_inflights; u32 ring_num; int ret;
@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, ring->ring_size = adf_verify_ring_size(msg_size, num_msgs); ring->head = 0; ring->tail = 0; + max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size); + ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD); atomic_set(ring->inflights, 0); ret = adf_init_ring(ring); if (ret) diff --git a/drivers/crypto/qat/qat_common/adf_transport.h b/drivers/crypto/qat/qat_common/adf_transport.h index 2c95f1697c76..e6ef6f9b7691 100644 --- a/drivers/crypto/qat/qat_common/adf_transport.h +++ b/drivers/crypto/qat/qat_common/adf_transport.h @@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section, const char *ring_name, adf_callback_fn callback, int poll_mode, struct adf_etr_ring_data **ring_ptr);
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring); int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg); void adf_remove_ring(struct adf_etr_ring_data *ring); #endif diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h index 501bcf0f1809..8b2c92ba7ca1 100644 --- a/drivers/crypto/qat/qat_common/adf_transport_internal.h +++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h @@ -22,6 +22,7 @@ struct adf_etr_ring_data { spinlock_t lock; /* protects ring data struct */ u16 head; u16 tail; + u32 threshold; u8 ring_number; u8 ring_size; u8 msg_size; diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c index f998ed58457c..873533dc43a7 100644 --- a/drivers/crypto/qat/qat_common/qat_algs.c +++ b/drivers/crypto/qat/qat_common/qat_algs.c @@ -17,7 +17,7 @@ #include <crypto/xts.h> #include <linux/dma-mapping.h> #include "adf_accel_devices.h" -#include "adf_transport.h" +#include "qat_algs_send.h" #include "adf_common_drv.h" #include "qat_crypto.h" #include "icp_qat_hw.h" @@ -46,19 +46,6 @@ static DEFINE_MUTEX(algs_lock); static unsigned int active_devs;
-struct qat_alg_buf { - u32 len; - u32 resrvd; - u64 addr; -} __packed; - -struct qat_alg_buf_list { - u64 resrvd; - u32 num_bufs; - u32 num_mapped_bufs; - struct qat_alg_buf bufers[]; -} __packed __aligned(64); - /* Common content descriptor */ struct qat_alg_cd { union { @@ -693,7 +680,10 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, bl->bufers[i].len, DMA_BIDIRECTIONAL);
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE); - kfree(bl); + + if (!qat_req->buf.sgl_src_valid) + kfree(bl); + if (blp != blpout) { /* If out of place operation dma unmap only data */ int bufless = blout->num_bufs - blout->num_mapped_bufs; @@ -704,7 +694,9 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst, DMA_BIDIRECTIONAL); } dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE); - kfree(blout); + + if (!qat_req->buf.sgl_dst_valid) + kfree(blout); } }
@@ -721,15 +713,24 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, dma_addr_t blp = DMA_MAPPING_ERROR; dma_addr_t bloutp = DMA_MAPPING_ERROR; struct scatterlist *sg; - size_t sz_out, sz = struct_size(bufl, bufers, n + 1); + size_t sz_out, sz = struct_size(bufl, bufers, n); + int node = dev_to_node(&GET_DEV(inst->accel_dev));
if (unlikely(!n)) return -EINVAL;
- bufl = kzalloc_node(sz, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!bufl)) - return -ENOMEM; + qat_req->buf.sgl_src_valid = false; + qat_req->buf.sgl_dst_valid = false; + + if (n > QAT_MAX_BUFF_DESC) { + bufl = kzalloc_node(sz, GFP_ATOMIC, node); + if (unlikely(!bufl)) + return -ENOMEM; + } else { + bufl = &qat_req->buf.sgl_src.sgl_hdr; + memset(bufl, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_src_valid = true; + }
for_each_sg(sgl, sg, n, i) bufl->bufers[i].addr = DMA_MAPPING_ERROR; @@ -760,12 +761,18 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, struct qat_alg_buf *bufers;
n = sg_nents(sglout); - sz_out = struct_size(buflout, bufers, n + 1); + sz_out = struct_size(buflout, bufers, n); sg_nctr = 0; - buflout = kzalloc_node(sz_out, GFP_ATOMIC, - dev_to_node(&GET_DEV(inst->accel_dev))); - if (unlikely(!buflout)) - goto err_in; + + if (n > QAT_MAX_BUFF_DESC) { + buflout = kzalloc_node(sz_out, GFP_ATOMIC, node); + if (unlikely(!buflout)) + goto err_in; + } else { + buflout = &qat_req->buf.sgl_dst.sgl_hdr; + memset(buflout, 0, sizeof(struct qat_alg_buf_list)); + qat_req->buf.sgl_dst_valid = true; + }
bufers = buflout->bufers; for_each_sg(sglout, sg, n, i) @@ -810,7 +817,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, dma_unmap_single(dev, buflout->bufers[i].addr, buflout->bufers[i].len, DMA_BIDIRECTIONAL); - kfree(buflout); + + if (!qat_req->buf.sgl_dst_valid) + kfree(buflout);
err_in: if (!dma_mapping_error(dev, blp)) @@ -823,7 +832,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst, bufl->bufers[i].len, DMA_BIDIRECTIONAL);
- kfree(bufl); + if (!qat_req->buf.sgl_src_valid) + kfree(bufl);
dev_err(dev, "Failed to map buf for dma\n"); return -ENOMEM; @@ -925,8 +935,25 @@ void qat_alg_callback(void *resp) struct icp_qat_fw_la_resp *qat_resp = resp; struct qat_crypto_request *qat_req = (void *)(__force long)qat_resp->opaque_data; + struct qat_instance_backlog *backlog = qat_req->alg_req.backlog;
qat_req->cb(qat_resp, qat_req); + + qat_alg_send_backlog(backlog); +} + +static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req, + struct qat_crypto_instance *inst, + struct crypto_async_request *base) +{ + struct qat_alg_req *alg_req = &qat_req->alg_req; + + alg_req->fw_req = (u32 *)&qat_req->req; + alg_req->tx_ring = inst->sym_tx; + alg_req->base = base; + alg_req->backlog = &inst->backlog; + + return qat_alg_send_message(alg_req); }
static int qat_alg_aead_dec(struct aead_request *areq) @@ -939,7 +966,7 @@ static int qat_alg_aead_dec(struct aead_request *areq) struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; int digst_size = crypto_aead_authsize(aead_tfm); - int ret, ctr = 0; + int ret; u32 cipher_len;
cipher_len = areq->cryptlen - digst_size; @@ -965,15 +992,12 @@ static int qat_alg_aead_dec(struct aead_request *areq) auth_param = (void *)((u8 *)cipher_param + sizeof(*cipher_param)); auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + cipher_param->cipher_length; - do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10);
- if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; }
static int qat_alg_aead_enc(struct aead_request *areq) @@ -986,7 +1010,7 @@ static int qat_alg_aead_enc(struct aead_request *areq) struct icp_qat_fw_la_auth_req_params *auth_param; struct icp_qat_fw_la_bulk_req *msg; u8 *iv = areq->iv; - int ret, ctr = 0; + int ret;
if (areq->cryptlen % AES_BLOCK_SIZE != 0) return -EINVAL; @@ -1013,15 +1037,11 @@ static int qat_alg_aead_enc(struct aead_request *areq) auth_param->auth_off = 0; auth_param->auth_len = areq->assoclen + areq->cryptlen;
- do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; }
static int qat_alg_skcipher_rekey(struct qat_alg_skcipher_ctx *ctx, @@ -1174,7 +1194,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req) struct qat_crypto_request *qat_req = skcipher_request_ctx(req); struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_bulk_req *msg; - int ret, ctr = 0; + int ret;
if (req->cryptlen == 0) return 0; @@ -1198,15 +1218,11 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
- do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; }
static int qat_alg_skcipher_blk_encrypt(struct skcipher_request *req) @@ -1243,7 +1259,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) struct qat_crypto_request *qat_req = skcipher_request_ctx(req); struct icp_qat_fw_la_cipher_req_params *cipher_param; struct icp_qat_fw_la_bulk_req *msg; - int ret, ctr = 0; + int ret;
if (req->cryptlen == 0) return 0; @@ -1268,15 +1284,11 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req) qat_alg_set_req_iv(qat_req); qat_alg_update_iv(qat_req);
- do { - ret = adf_send_message(ctx->inst->sym_tx, (u32 *)msg); - } while (ret == -EAGAIN && ctr++ < 10); - - if (ret == -EAGAIN) { + ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base); + if (ret == -ENOSPC) qat_alg_free_bufl(ctx->inst, qat_req); - return -EBUSY; - } - return -EINPROGRESS; + + return ret; }
static int qat_alg_skcipher_blk_decrypt(struct skcipher_request *req) diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.c b/drivers/crypto/qat/qat_common/qat_algs_send.c new file mode 100644 index 000000000000..ff5b4347f783 --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_algs_send.c @@ -0,0 +1,86 @@ +// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) +/* Copyright(c) 2022 Intel Corporation */ +#include "adf_transport.h" +#include "qat_algs_send.h" +#include "qat_crypto.h" + +#define ADF_MAX_RETRIES 20 + +static int qat_alg_send_message_retry(struct qat_alg_req *req) +{ + int ret = 0, ctr = 0; + + do { + ret = adf_send_message(req->tx_ring, req->fw_req); + } while (ret == -EAGAIN && ctr++ < ADF_MAX_RETRIES); + + if (ret == -EAGAIN) + return -ENOSPC; + + return -EINPROGRESS; +} + +void qat_alg_send_backlog(struct qat_instance_backlog *backlog) +{ + struct qat_alg_req *req, *tmp; + + spin_lock_bh(&backlog->lock); + list_for_each_entry_safe(req, tmp, &backlog->list, list) { + if (adf_send_message(req->tx_ring, req->fw_req)) { + /* The HW ring is full. Do nothing. + * qat_alg_send_backlog() will be invoked again by + * another callback. + */ + break; + } + list_del(&req->list); + req->base->complete(req->base, -EINPROGRESS); + } + spin_unlock_bh(&backlog->lock); +} + +static void qat_alg_backlog_req(struct qat_alg_req *req, + struct qat_instance_backlog *backlog) +{ + INIT_LIST_HEAD(&req->list); + + spin_lock_bh(&backlog->lock); + list_add_tail(&req->list, &backlog->list); + spin_unlock_bh(&backlog->lock); +} + +static int qat_alg_send_message_maybacklog(struct qat_alg_req *req) +{ + struct qat_instance_backlog *backlog = req->backlog; + struct adf_etr_ring_data *tx_ring = req->tx_ring; + u32 *fw_req = req->fw_req; + + /* If any request is already backlogged, then add to backlog list */ + if (!list_empty(&backlog->list)) + goto enqueue; + + /* If ring is nearly full, then add to backlog list */ + if (adf_ring_nearly_full(tx_ring)) + goto enqueue; + + /* If adding request to HW ring fails, then add to backlog list */ + if (adf_send_message(tx_ring, fw_req)) + goto enqueue; + + return -EINPROGRESS; + +enqueue: + qat_alg_backlog_req(req, backlog); + + return -EBUSY; +} + +int qat_alg_send_message(struct qat_alg_req *req) +{ + u32 flags = req->base->flags; + + if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG) + return qat_alg_send_message_maybacklog(req); + else + return qat_alg_send_message_retry(req); +} diff --git a/drivers/crypto/qat/qat_common/qat_algs_send.h b/drivers/crypto/qat/qat_common/qat_algs_send.h new file mode 100644 index 000000000000..5ce9f4f69d8f --- /dev/null +++ b/drivers/crypto/qat/qat_common/qat_algs_send.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only) */ +/* Copyright(c) 2022 Intel Corporation */ +#ifndef QAT_ALGS_SEND_H +#define QAT_ALGS_SEND_H + +#include "qat_crypto.h" + +int qat_alg_send_message(struct qat_alg_req *req); +void qat_alg_send_backlog(struct qat_instance_backlog *backlog); + +#endif diff --git a/drivers/crypto/qat/qat_common/qat_asym_algs.c b/drivers/crypto/qat/qat_common/qat_asym_algs.c index b0b78445418b..7173a2a0a484 100644 --- a/drivers/crypto/qat/qat_common/qat_asym_algs.c +++ b/drivers/crypto/qat/qat_common/qat_asym_algs.c @@ -12,6 +12,7 @@ #include <crypto/scatterwalk.h> #include "icp_qat_fw_pke.h" #include "adf_accel_devices.h" +#include "qat_algs_send.h" #include "adf_transport.h" #include "adf_common_drv.h" #include "qat_crypto.h" @@ -135,8 +136,23 @@ struct qat_asym_request { } areq; int err; void (*cb)(struct icp_qat_fw_pke_resp *resp); + struct qat_alg_req alg_req; } __aligned(64);
+static int qat_alg_send_asym_message(struct qat_asym_request *qat_req, + struct qat_crypto_instance *inst, + struct crypto_async_request *base) +{ + struct qat_alg_req *alg_req = &qat_req->alg_req; + + alg_req->fw_req = (u32 *)&qat_req->req; + alg_req->tx_ring = inst->pke_tx; + alg_req->base = base; + alg_req->backlog = &inst->backlog; + + return qat_alg_send_message(alg_req); +} + static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) { struct qat_asym_request *req = (void *)(__force long)resp->opaque; @@ -148,26 +164,21 @@ static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp) err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
if (areq->src) { - if (req->src_align) - dma_free_coherent(dev, req->ctx.dh->p_size, - req->src_align, req->in.dh.in.b); - else - dma_unmap_single(dev, req->in.dh.in.b, - req->ctx.dh->p_size, DMA_TO_DEVICE); + dma_unmap_single(dev, req->in.dh.in.b, req->ctx.dh->p_size, + DMA_TO_DEVICE); + kfree_sensitive(req->src_align); }
areq->dst_len = req->ctx.dh->p_size; if (req->dst_align) { scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, areq->dst_len, 1); - - dma_free_coherent(dev, req->ctx.dh->p_size, req->dst_align, - req->out.dh.r); - } else { - dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, - DMA_FROM_DEVICE); + kfree_sensitive(req->dst_align); }
+ dma_unmap_single(dev, req->out.dh.r, req->ctx.dh->p_size, + DMA_FROM_DEVICE); + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_dh_input_params), DMA_TO_DEVICE); dma_unmap_single(dev, req->phy_out, @@ -213,8 +224,9 @@ static int qat_dh_compute_value(struct kpp_request *req) struct qat_asym_request *qat_req = PTR_ALIGN(kpp_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; - int ret, ctr = 0; + int ret; int n_input_params = 0; + u8 *vaddr;
if (unlikely(!ctx->xa)) return -EINVAL; @@ -223,6 +235,10 @@ static int qat_dh_compute_value(struct kpp_request *req) req->dst_len = ctx->p_size; return -EOVERFLOW; } + + if (req->src_len > ctx->p_size) + return -EINVAL; + memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); @@ -271,27 +287,24 @@ static int qat_dh_compute_value(struct kpp_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->p_size) { qat_req->src_align = NULL; - qat_req->in.dh.in.b = dma_map_single(dev, - sg_virt(req->src), - req->src_len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, - qat_req->in.dh.in.b))) - return ret; - + vaddr = sg_virt(req->src); } else { int shift = ctx->p_size - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, - ctx->p_size, - &qat_req->in.dh.in.b, - GFP_KERNEL); + qat_req->src_align = kzalloc(ctx->p_size, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, 0, req->src_len, 0); + + vaddr = qat_req->src_align; } + + qat_req->in.dh.in.b = dma_map_single(dev, vaddr, ctx->p_size, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->in.dh.in.b))) + goto unmap_src; } /* * dst can be of any size in valid range, but HW expects it to be the @@ -302,20 +315,18 @@ static int qat_dh_compute_value(struct kpp_request *req) */ if (sg_is_last(req->dst) && req->dst_len == ctx->p_size) { qat_req->dst_align = NULL; - qat_req->out.dh.r = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE); - - if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) - goto unmap_src; - + vaddr = sg_virt(req->dst); } else { - qat_req->dst_align = dma_alloc_coherent(dev, ctx->p_size, - &qat_req->out.dh.r, - GFP_KERNEL); + qat_req->dst_align = kzalloc(ctx->p_size, GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; + + vaddr = qat_req->dst_align; } + qat_req->out.dh.r = dma_map_single(dev, vaddr, ctx->p_size, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->out.dh.r))) + goto unmap_dst;
qat_req->in.dh.in_tab[n_input_params] = 0; qat_req->out.dh.out_tab[1] = 0; @@ -338,13 +349,13 @@ static int qat_dh_compute_value(struct kpp_request *req) msg->input_param_count = n_input_params; msg->output_param_count = 1;
- do { - ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); - } while (ret == -EBUSY && ctr++ < 100); + ret = qat_alg_send_asym_message(qat_req, inst, &req->base); + if (ret == -ENOSPC) + goto unmap_all;
- if (!ret) - return -EINPROGRESS; + return ret;
+unmap_all: if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_dh_output_params), @@ -355,23 +366,17 @@ static int qat_dh_compute_value(struct kpp_request *req) sizeof(struct qat_dh_input_params), DMA_TO_DEVICE); unmap_dst: - if (qat_req->dst_align) - dma_free_coherent(dev, ctx->p_size, qat_req->dst_align, - qat_req->out.dh.r); - else - if (!dma_mapping_error(dev, qat_req->out.dh.r)) - dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, - DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.dh.r)) + dma_unmap_single(dev, qat_req->out.dh.r, ctx->p_size, + DMA_FROM_DEVICE); + kfree_sensitive(qat_req->dst_align); unmap_src: if (req->src) { - if (qat_req->src_align) - dma_free_coherent(dev, ctx->p_size, qat_req->src_align, - qat_req->in.dh.in.b); - else - if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) - dma_unmap_single(dev, qat_req->in.dh.in.b, - ctx->p_size, - DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.dh.in.b)) + dma_unmap_single(dev, qat_req->in.dh.in.b, + ctx->p_size, + DMA_TO_DEVICE); + kfree_sensitive(qat_req->src_align); } return ret; } @@ -420,14 +425,17 @@ static int qat_dh_set_params(struct qat_dh_ctx *ctx, struct dh *params) static void qat_dh_clear_ctx(struct device *dev, struct qat_dh_ctx *ctx) { if (ctx->g) { + memset(ctx->g, 0, ctx->p_size); dma_free_coherent(dev, ctx->p_size, ctx->g, ctx->dma_g); ctx->g = NULL; } if (ctx->xa) { + memset(ctx->xa, 0, ctx->p_size); dma_free_coherent(dev, ctx->p_size, ctx->xa, ctx->dma_xa); ctx->xa = NULL; } if (ctx->p) { + memset(ctx->p, 0, ctx->p_size); dma_free_coherent(dev, ctx->p_size, ctx->p, ctx->dma_p); ctx->p = NULL; } @@ -510,25 +518,22 @@ static void qat_rsa_cb(struct icp_qat_fw_pke_resp *resp)
err = (err == ICP_QAT_FW_COMN_STATUS_FLAG_OK) ? 0 : -EINVAL;
- if (req->src_align) - dma_free_coherent(dev, req->ctx.rsa->key_sz, req->src_align, - req->in.rsa.enc.m); - else - dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, - DMA_TO_DEVICE); + kfree_sensitive(req->src_align); + + dma_unmap_single(dev, req->in.rsa.enc.m, req->ctx.rsa->key_sz, + DMA_TO_DEVICE);
areq->dst_len = req->ctx.rsa->key_sz; if (req->dst_align) { scatterwalk_map_and_copy(req->dst_align, areq->dst, 0, areq->dst_len, 1);
- dma_free_coherent(dev, req->ctx.rsa->key_sz, req->dst_align, - req->out.rsa.enc.c); - } else { - dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, - DMA_FROM_DEVICE); + kfree_sensitive(req->dst_align); }
+ dma_unmap_single(dev, req->out.rsa.enc.c, req->ctx.rsa->key_sz, + DMA_FROM_DEVICE); + dma_unmap_single(dev, req->phy_in, sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); dma_unmap_single(dev, req->phy_out, @@ -542,8 +547,11 @@ void qat_alg_asym_callback(void *_resp) { struct icp_qat_fw_pke_resp *resp = _resp; struct qat_asym_request *areq = (void *)(__force long)resp->opaque; + struct qat_instance_backlog *backlog = areq->alg_req.backlog;
areq->cb(resp); + + qat_alg_send_backlog(backlog); }
#define PKE_RSA_EP_512 0x1c161b21 @@ -642,7 +650,8 @@ static int qat_rsa_enc(struct akcipher_request *req) struct qat_asym_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; - int ret, ctr = 0; + u8 *vaddr; + int ret;
if (unlikely(!ctx->n || !ctx->e)) return -EINVAL; @@ -651,6 +660,10 @@ static int qat_rsa_enc(struct akcipher_request *req) req->dst_len = ctx->key_sz; return -EOVERFLOW; } + + if (req->src_len > ctx->key_sz) + return -EINVAL; + memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); @@ -679,40 +692,39 @@ static int qat_rsa_enc(struct akcipher_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { qat_req->src_align = NULL; - qat_req->in.rsa.enc.m = dma_map_single(dev, sg_virt(req->src), - req->src_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) - return ret; - + vaddr = sg_virt(req->src); } else { int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.enc.m, - GFP_KERNEL); + qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, 0, req->src_len, 0); + vaddr = qat_req->src_align; } - if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { - qat_req->dst_align = NULL; - qat_req->out.rsa.enc.c = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) - goto unmap_src; + qat_req->in.rsa.enc.m = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.enc.m))) + goto unmap_src;
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { + qat_req->dst_align = NULL; + vaddr = sg_virt(req->dst); } else { - qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.enc.c, - GFP_KERNEL); + qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; - + vaddr = qat_req->dst_align; } + + qat_req->out.rsa.enc.c = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.enc.c))) + goto unmap_dst; + qat_req->in.rsa.in_tab[3] = 0; qat_req->out.rsa.out_tab[1] = 0; qat_req->phy_in = dma_map_single(dev, &qat_req->in.rsa.enc.m, @@ -732,13 +744,14 @@ static int qat_rsa_enc(struct akcipher_request *req) msg->pke_mid.opaque = (u64)(__force long)qat_req; msg->input_param_count = 3; msg->output_param_count = 1; - do { - ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); - } while (ret == -EBUSY && ctr++ < 100);
- if (!ret) - return -EINPROGRESS; + ret = qat_alg_send_asym_message(qat_req, inst, &req->base); + if (ret == -ENOSPC) + goto unmap_all;
+ return ret; + +unmap_all: if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_rsa_output_params), @@ -749,21 +762,15 @@ static int qat_rsa_enc(struct akcipher_request *req) sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); unmap_dst: - if (qat_req->dst_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, - qat_req->out.rsa.enc.c); - else - if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) - dma_unmap_single(dev, qat_req->out.rsa.enc.c, - ctx->key_sz, DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.rsa.enc.c)) + dma_unmap_single(dev, qat_req->out.rsa.enc.c, + ctx->key_sz, DMA_FROM_DEVICE); + kfree_sensitive(qat_req->dst_align); unmap_src: - if (qat_req->src_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.rsa.enc.m); - else - if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) - dma_unmap_single(dev, qat_req->in.rsa.enc.m, - ctx->key_sz, DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.rsa.enc.m)) + dma_unmap_single(dev, qat_req->in.rsa.enc.m, ctx->key_sz, + DMA_TO_DEVICE); + kfree_sensitive(qat_req->src_align); return ret; }
@@ -776,7 +783,8 @@ static int qat_rsa_dec(struct akcipher_request *req) struct qat_asym_request *qat_req = PTR_ALIGN(akcipher_request_ctx(req), 64); struct icp_qat_fw_pke_request *msg = &qat_req->req; - int ret, ctr = 0; + u8 *vaddr; + int ret;
if (unlikely(!ctx->n || !ctx->d)) return -EINVAL; @@ -785,6 +793,10 @@ static int qat_rsa_dec(struct akcipher_request *req) req->dst_len = ctx->key_sz; return -EOVERFLOW; } + + if (req->src_len > ctx->key_sz) + return -EINVAL; + memset(msg, '\0', sizeof(*msg)); ICP_QAT_FW_PKE_HDR_VALID_FLAG_SET(msg->pke_hdr, ICP_QAT_FW_COMN_REQ_FLAG_SET); @@ -823,40 +835,37 @@ static int qat_rsa_dec(struct akcipher_request *req) */ if (sg_is_last(req->src) && req->src_len == ctx->key_sz) { qat_req->src_align = NULL; - qat_req->in.rsa.dec.c = dma_map_single(dev, sg_virt(req->src), - req->dst_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) - return ret; - + vaddr = sg_virt(req->src); } else { int shift = ctx->key_sz - req->src_len;
- qat_req->src_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->in.rsa.dec.c, - GFP_KERNEL); + qat_req->src_align = kzalloc(ctx->key_sz, GFP_KERNEL); if (unlikely(!qat_req->src_align)) return ret;
scatterwalk_map_and_copy(qat_req->src_align + shift, req->src, 0, req->src_len, 0); + vaddr = qat_req->src_align; } - if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { - qat_req->dst_align = NULL; - qat_req->out.rsa.dec.m = dma_map_single(dev, sg_virt(req->dst), - req->dst_len, - DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) - goto unmap_src; + qat_req->in.rsa.dec.c = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->in.rsa.dec.c))) + goto unmap_src;
+ if (sg_is_last(req->dst) && req->dst_len == ctx->key_sz) { + qat_req->dst_align = NULL; + vaddr = sg_virt(req->dst); } else { - qat_req->dst_align = dma_alloc_coherent(dev, ctx->key_sz, - &qat_req->out.rsa.dec.m, - GFP_KERNEL); + qat_req->dst_align = kzalloc(ctx->key_sz, GFP_KERNEL); if (unlikely(!qat_req->dst_align)) goto unmap_src; - + vaddr = qat_req->dst_align; } + qat_req->out.rsa.dec.m = dma_map_single(dev, vaddr, ctx->key_sz, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(dev, qat_req->out.rsa.dec.m))) + goto unmap_dst;
if (ctx->crt_mode) qat_req->in.rsa.in_tab[6] = 0; @@ -884,13 +893,14 @@ static int qat_rsa_dec(struct akcipher_request *req) msg->input_param_count = 3;
msg->output_param_count = 1; - do { - ret = adf_send_message(ctx->inst->pke_tx, (u32 *)msg); - } while (ret == -EBUSY && ctr++ < 100);
- if (!ret) - return -EINPROGRESS; + ret = qat_alg_send_asym_message(qat_req, inst, &req->base); + if (ret == -ENOSPC) + goto unmap_all; + + return ret;
+unmap_all: if (!dma_mapping_error(dev, qat_req->phy_out)) dma_unmap_single(dev, qat_req->phy_out, sizeof(struct qat_rsa_output_params), @@ -901,21 +911,15 @@ static int qat_rsa_dec(struct akcipher_request *req) sizeof(struct qat_rsa_input_params), DMA_TO_DEVICE); unmap_dst: - if (qat_req->dst_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->dst_align, - qat_req->out.rsa.dec.m); - else - if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) - dma_unmap_single(dev, qat_req->out.rsa.dec.m, - ctx->key_sz, DMA_FROM_DEVICE); + if (!dma_mapping_error(dev, qat_req->out.rsa.dec.m)) + dma_unmap_single(dev, qat_req->out.rsa.dec.m, + ctx->key_sz, DMA_FROM_DEVICE); + kfree_sensitive(qat_req->dst_align); unmap_src: - if (qat_req->src_align) - dma_free_coherent(dev, ctx->key_sz, qat_req->src_align, - qat_req->in.rsa.dec.c); - else - if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) - dma_unmap_single(dev, qat_req->in.rsa.dec.c, - ctx->key_sz, DMA_TO_DEVICE); + if (!dma_mapping_error(dev, qat_req->in.rsa.dec.c)) + dma_unmap_single(dev, qat_req->in.rsa.dec.c, ctx->key_sz, + DMA_TO_DEVICE); + kfree_sensitive(qat_req->src_align); return ret; }
@@ -1233,18 +1237,8 @@ static void qat_rsa_exit_tfm(struct crypto_akcipher *tfm) struct qat_rsa_ctx *ctx = akcipher_tfm_ctx(tfm); struct device *dev = &GET_DEV(ctx->inst->accel_dev);
- if (ctx->n) - dma_free_coherent(dev, ctx->key_sz, ctx->n, ctx->dma_n); - if (ctx->e) - dma_free_coherent(dev, ctx->key_sz, ctx->e, ctx->dma_e); - if (ctx->d) { - memset(ctx->d, '\0', ctx->key_sz); - dma_free_coherent(dev, ctx->key_sz, ctx->d, ctx->dma_d); - } + qat_rsa_clear_ctx(dev, ctx); qat_crypto_put_instance(ctx->inst); - ctx->n = NULL; - ctx->e = NULL; - ctx->d = NULL; }
static struct akcipher_alg rsa = { diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c index 67c9588e89df..9341d892533a 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.c +++ b/drivers/crypto/qat/qat_common/qat_crypto.c @@ -161,13 +161,6 @@ int qat_crypto_dev_config(struct adf_accel_dev *accel_dev) if (ret) goto err;
- /* Temporarily set the number of crypto instances to zero to avoid - * registering the crypto algorithms. - * This will be removed when the algorithms will support the - * CRYPTO_TFM_REQ_MAY_BACKLOG flag - */ - instances = 0; - for (i = 0; i < instances; i++) { val = i; snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_BANK_NUM, i); @@ -353,6 +346,9 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev) &inst->pke_rx); if (ret) goto err; + + INIT_LIST_HEAD(&inst->backlog.list); + spin_lock_init(&inst->backlog.lock); } return 0; err: diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h index b6a4c95ae003..245b6d9a3650 100644 --- a/drivers/crypto/qat/qat_common/qat_crypto.h +++ b/drivers/crypto/qat/qat_common/qat_crypto.h @@ -9,6 +9,19 @@ #include "adf_accel_devices.h" #include "icp_qat_fw_la.h"
+struct qat_instance_backlog { + struct list_head list; + spinlock_t lock; /* protects backlog list */ +}; + +struct qat_alg_req { + u32 *fw_req; + struct adf_etr_ring_data *tx_ring; + struct crypto_async_request *base; + struct list_head list; + struct qat_instance_backlog *backlog; +}; + struct qat_crypto_instance { struct adf_etr_ring_data *sym_tx; struct adf_etr_ring_data *sym_rx; @@ -19,8 +32,29 @@ struct qat_crypto_instance { unsigned long state; int id; atomic_t refctr; + struct qat_instance_backlog backlog; };
+#define QAT_MAX_BUFF_DESC 4 + +struct qat_alg_buf { + u32 len; + u32 resrvd; + u64 addr; +} __packed; + +struct qat_alg_buf_list { + u64 resrvd; + u32 num_bufs; + u32 num_mapped_bufs; + struct qat_alg_buf bufers[]; +} __packed; + +struct qat_alg_fixed_buf_list { + struct qat_alg_buf_list sgl_hdr; + struct qat_alg_buf descriptors[QAT_MAX_BUFF_DESC]; +} __packed __aligned(64); + struct qat_crypto_request_buffs { struct qat_alg_buf_list *bl; dma_addr_t blp; @@ -28,6 +62,10 @@ struct qat_crypto_request_buffs { dma_addr_t bloutp; size_t sz; size_t sz_out; + bool sgl_src_valid; + bool sgl_dst_valid; + struct qat_alg_fixed_buf_list sgl_src; + struct qat_alg_fixed_buf_list sgl_dst; };
struct qat_crypto_request; @@ -53,6 +91,7 @@ struct qat_crypto_request { u8 iv[AES_BLOCK_SIZE]; }; bool encryption; + struct qat_alg_req alg_req; };
static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev) diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 33683295a0bf..64befd6f702b 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -351,6 +351,9 @@ static const struct regmap_config pca953x_i2c_regmap = { .reg_bits = 8, .val_bits = 8,
+ .use_single_read = true, + .use_single_write = true, + .readable_reg = pca953x_readable_register, .writeable_reg = pca953x_writeable_register, .volatile_reg = pca953x_volatile_register, @@ -894,15 +897,18 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, static int device_pca95xx_init(struct pca953x_chip *chip, u32 invert) { DECLARE_BITMAP(val, MAX_LINE); + u8 regaddr; int ret;
- ret = regcache_sync_region(chip->regmap, chip->regs->output, - chip->regs->output + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); + ret = regcache_sync_region(chip->regmap, regaddr, + regaddr + NBANK(chip) - 1); if (ret) goto out;
- ret = regcache_sync_region(chip->regmap, chip->regs->direction, - chip->regs->direction + NBANK(chip)); + regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); + ret = regcache_sync_region(chip->regmap, regaddr, + regaddr + NBANK(chip) - 1); if (ret) goto out;
@@ -1115,14 +1121,14 @@ static int pca953x_regcache_sync(struct device *dev) * sync these registers first and only then sync the rest. */ regaddr = pca953x_recalc_addr(chip, chip->regs->direction, 0); - ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); + ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync GPIO dir registers: %d\n", ret); return ret; }
regaddr = pca953x_recalc_addr(chip, chip->regs->output, 0); - ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip)); + ret = regcache_sync_region(chip->regmap, regaddr, regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync GPIO out registers: %d\n", ret); return ret; @@ -1132,7 +1138,7 @@ static int pca953x_regcache_sync(struct device *dev) if (chip->driver_data & PCA_PCAL) { regaddr = pca953x_recalc_addr(chip, PCAL953X_IN_LATCH, 0); ret = regcache_sync_region(chip->regmap, regaddr, - regaddr + NBANK(chip)); + regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync INT latch registers: %d\n", ret); @@ -1141,7 +1147,7 @@ static int pca953x_regcache_sync(struct device *dev)
regaddr = pca953x_recalc_addr(chip, PCAL953X_INT_MASK, 0); ret = regcache_sync_region(chip->regmap, regaddr, - regaddr + NBANK(chip)); + regaddr + NBANK(chip) - 1); if (ret) { dev_err(dev, "Failed to sync INT mask registers: %d\n", ret); diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c index b6d3a57e27ed..7f8e2fed2988 100644 --- a/drivers/gpio/gpio-xilinx.c +++ b/drivers/gpio/gpio-xilinx.c @@ -99,7 +99,7 @@ static inline void xgpio_set_value32(unsigned long *map, int bit, u32 v) const unsigned long offset = (bit % BITS_PER_LONG) & BIT(5);
map[index] &= ~(0xFFFFFFFFul << offset); - map[index] |= v << offset; + map[index] |= (unsigned long)v << offset; }
static inline int xgpio_regoffset(struct xgpio_instance *chip, int ch) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 810965bd0692..a2575195c4e0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1670,7 +1670,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); #endif - if (dc_enable_dmub_notifications(adev->dm.dc)) { + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { init_completion(&adev->dm.dmub_aux_transfer_done); adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); if (!adev->dm.dmub_notify) { @@ -1708,6 +1708,13 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) goto error; }
+ /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point, for + * example HPD from DPIA. + */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) + dc_enable_dmub_outbox(adev->dm.dc); + /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev);
@@ -2701,9 +2708,6 @@ static int dm_resume(void *handle) */ link_enc_cfg_copy(adev->dm.dc->current_state, dc_state);
- if (dc_enable_dmub_notifications(adev->dm.dc)) - amdgpu_dm_outbox_init(adev); - r = dm_dmub_hw_init(adev); if (r) DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -2721,6 +2725,11 @@ static int dm_resume(void *handle) } }
+ if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } + WARN_ON(!dc_commit_state(dm->dc, dc_state));
dm_gpureset_commit_state(dm->cached_dc_state, dm); @@ -2742,13 +2751,15 @@ static int dm_resume(void *handle) /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ dc_resource_state_construct(dm->dc, dm_state->context);
- /* Re-enable outbox interrupts for DPIA. */ - if (dc_enable_dmub_notifications(adev->dm.dc)) - amdgpu_dm_outbox_init(adev); - /* Before powering on DC we need to re-initialize DMUB. */ dm_dmub_hw_resume(adev);
+ /* Re-enable outbox interrupts for DPIA. */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } + /* power on hardware */ dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0);
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c index d5962a34c01d..e5fc875990c4 100644 --- a/drivers/gpu/drm/drm_gem_ttm_helper.c +++ b/drivers/gpu/drm/drm_gem_ttm_helper.c @@ -64,8 +64,13 @@ int drm_gem_ttm_vmap(struct drm_gem_object *gem, struct iosys_map *map) { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem); + int ret; + + dma_resv_lock(gem->resv, NULL); + ret = ttm_bo_vmap(bo, map); + dma_resv_unlock(gem->resv);
- return ttm_bo_vmap(bo, map); + return ret; } EXPORT_SYMBOL(drm_gem_ttm_vmap);
@@ -82,7 +87,9 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem, { struct ttm_buffer_object *bo = drm_gem_ttm_of_gem(gem);
+ dma_resv_lock(gem->resv, NULL); ttm_bo_vunmap(bo, map); + dma_resv_unlock(gem->resv); } EXPORT_SYMBOL(drm_gem_ttm_vunmap);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-dev.c b/drivers/gpu/drm/imx/dcss/dcss-dev.c index c849533ca83e..3f5750cc2673 100644 --- a/drivers/gpu/drm/imx/dcss/dcss-dev.c +++ b/drivers/gpu/drm/imx/dcss/dcss-dev.c @@ -207,6 +207,7 @@ struct dcss_dev *dcss_dev_create(struct device *dev, bool hdmi_output)
ret = dcss_submodules_init(dcss); if (ret) { + of_node_put(dcss->of_port); dev_err(dev, "submodules initialization failed\n"); goto clks_err; } @@ -237,6 +238,8 @@ void dcss_dev_destroy(struct dcss_dev *dcss) dcss_clocks_disable(dcss); }
+ of_node_put(dcss->of_port); + pm_runtime_disable(dcss->dev);
dcss_submodules_stop(dcss); diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index f7bfcf63d48e..701a258d2e11 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -713,7 +713,7 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) of_property_read_u32(dev->of_node, "hpd-reliable-delay-ms", &reliable_ms); desc->delay.hpd_reliable = reliable_ms; of_property_read_u32(dev->of_node, "hpd-absent-delay-ms", &absent_ms); - desc->delay.hpd_reliable = absent_ms; + desc->delay.hpd_absent = absent_ms;
/* Power the panel on so we can read the EDID */ ret = pm_runtime_get_sync(dev); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 191c56064f19..6b25b2f4f5a3 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -190,7 +190,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) } EXPORT_SYMBOL(drm_sched_entity_flush);
-static void drm_sched_entity_kill_jobs_irq_work(struct irq_work *wrk) +static void drm_sched_entity_kill_jobs_work(struct work_struct *wrk) { struct drm_sched_job *job = container_of(wrk, typeof(*job), work);
@@ -207,8 +207,8 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct drm_sched_job *job = container_of(cb, struct drm_sched_job, finish_cb);
- init_irq_work(&job->work, drm_sched_entity_kill_jobs_irq_work); - irq_work_queue(&job->work); + INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); + schedule_work(&job->work); }
static struct dma_fence * diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 3d6f8ee355bf..630cfa4ddd46 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -388,9 +388,9 @@ static irqreturn_t cdns_i2c_slave_isr(void *ptr) */ static irqreturn_t cdns_i2c_master_isr(void *ptr) { - unsigned int isr_status, avail_bytes, updatetx; + unsigned int isr_status, avail_bytes; unsigned int bytes_to_send; - bool hold_quirk; + bool updatetx; struct cdns_i2c *id = ptr; /* Signal completion only after everything is updated */ int done_flag = 0; @@ -410,11 +410,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) * Check if transfer size register needs to be updated again for a * large data receive operation. */ - updatetx = 0; - if (id->recv_count > id->curr_recv_count) - updatetx = 1; - - hold_quirk = (id->quirks & CDNS_I2C_BROKEN_HOLD_BIT) && updatetx; + updatetx = id->recv_count > id->curr_recv_count;
/* When receiving, handle data interrupt and completion interrupt */ if (id->p_recv_buf && @@ -445,7 +441,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) break; }
- if (cdns_is_holdquirk(id, hold_quirk)) + if (cdns_is_holdquirk(id, updatetx)) break; }
@@ -456,7 +452,7 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) * maintain transfer size non-zero while performing a large * receive operation. */ - if (cdns_is_holdquirk(id, hold_quirk)) { + if (cdns_is_holdquirk(id, updatetx)) { /* wait while fifo is full */ while (cdns_i2c_readreg(CDNS_I2C_XFER_SIZE_OFFSET) != (id->curr_recv_count - CDNS_I2C_FIFO_DEPTH)) @@ -478,22 +474,6 @@ static irqreturn_t cdns_i2c_master_isr(void *ptr) CDNS_I2C_XFER_SIZE_OFFSET); id->curr_recv_count = id->recv_count; } - } else if (id->recv_count && !hold_quirk && - !id->curr_recv_count) { - - /* Set the slave address in address register*/ - cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, - CDNS_I2C_ADDR_OFFSET); - - if (id->recv_count > CDNS_I2C_TRANSFER_SIZE) { - cdns_i2c_writereg(CDNS_I2C_TRANSFER_SIZE, - CDNS_I2C_XFER_SIZE_OFFSET); - id->curr_recv_count = CDNS_I2C_TRANSFER_SIZE; - } else { - cdns_i2c_writereg(id->recv_count, - CDNS_I2C_XFER_SIZE_OFFSET); - id->curr_recv_count = id->recv_count; - } }
/* Clear hold (if not repeated start) and signal completion */ diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 56aa424fd71d..815cc561386b 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -49,7 +49,7 @@ #define MLXCPLD_LPCI2C_NACK_IND 2
#define MLXCPLD_I2C_FREQ_1000KHZ_SET 0x04 -#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0c +#define MLXCPLD_I2C_FREQ_400KHZ_SET 0x0e #define MLXCPLD_I2C_FREQ_100KHZ_SET 0x42
enum mlxcpld_i2c_frequency { diff --git a/drivers/infiniband/hw/irdma/cm.c b/drivers/infiniband/hw/irdma/cm.c index 638bf4a1ed94..646fa8677490 100644 --- a/drivers/infiniband/hw/irdma/cm.c +++ b/drivers/infiniband/hw/irdma/cm.c @@ -4231,10 +4231,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, struct irdma_cm_node *cm_node; struct list_head teardown_list; struct ib_qp_attr attr; - struct irdma_sc_vsi *vsi = &iwdev->vsi; - struct irdma_sc_qp *sc_qp; - struct irdma_qp *qp; - int i;
INIT_LIST_HEAD(&teardown_list);
@@ -4251,52 +4247,6 @@ void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, irdma_cm_disconn(cm_node->iwqp); irdma_rem_ref_cm_node(cm_node); } - if (!iwdev->roce_mode) - return; - - INIT_LIST_HEAD(&teardown_list); - for (i = 0; i < IRDMA_MAX_USER_PRIORITY; i++) { - mutex_lock(&vsi->qos[i].qos_mutex); - list_for_each_safe (list_node, list_core_temp, - &vsi->qos[i].qplist) { - u32 qp_ip[4]; - - sc_qp = container_of(list_node, struct irdma_sc_qp, - list); - if (sc_qp->qp_uk.qp_type != IRDMA_QP_TYPE_ROCE_RC) - continue; - - qp = sc_qp->qp_uk.back_qp; - if (!disconnect_all) { - if (nfo->ipv4) - qp_ip[0] = qp->udp_info.local_ipaddr[3]; - else - memcpy(qp_ip, - &qp->udp_info.local_ipaddr[0], - sizeof(qp_ip)); - } - - if (disconnect_all || - (nfo->vlan_id == (qp->udp_info.vlan_tag & VLAN_VID_MASK) && - !memcmp(qp_ip, ipaddr, nfo->ipv4 ? 4 : 16))) { - spin_lock(&iwdev->rf->qptable_lock); - if (iwdev->rf->qp_table[sc_qp->qp_uk.qp_id]) { - irdma_qp_add_ref(&qp->ibqp); - list_add(&qp->teardown_entry, - &teardown_list); - } - spin_unlock(&iwdev->rf->qptable_lock); - } - } - mutex_unlock(&vsi->qos[i].qos_mutex); - } - - list_for_each_safe (list_node, list_core_temp, &teardown_list) { - qp = container_of(list_node, struct irdma_qp, teardown_entry); - attr.qp_state = IB_QPS_ERR; - irdma_modify_qp_roce(&qp->ibqp, &attr, IB_QP_STATE, NULL); - irdma_qp_rem_ref(&qp->ibqp); - } }
/** diff --git a/drivers/infiniband/hw/irdma/i40iw_hw.c b/drivers/infiniband/hw/irdma/i40iw_hw.c index e46fc110004d..50299f58b6b3 100644 --- a/drivers/infiniband/hw/irdma/i40iw_hw.c +++ b/drivers/infiniband/hw/irdma/i40iw_hw.c @@ -201,6 +201,7 @@ void i40iw_init_hw(struct irdma_sc_dev *dev) dev->hw_attrs.uk_attrs.max_hw_read_sges = I40IW_MAX_SGE_RD; dev->hw_attrs.max_hw_device_pages = I40IW_MAX_PUSH_PAGE_COUNT; dev->hw_attrs.uk_attrs.max_hw_inline = I40IW_MAX_INLINE_DATA_SIZE; + dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M; dev->hw_attrs.max_hw_ird = I40IW_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = I40IW_MAX_ORD_SIZE; dev->hw_attrs.max_hw_wqes = I40IW_MAX_WQ_ENTRIES; diff --git a/drivers/infiniband/hw/irdma/icrdma_hw.c b/drivers/infiniband/hw/irdma/icrdma_hw.c index cf53b17510cd..5986fd906308 100644 --- a/drivers/infiniband/hw/irdma/icrdma_hw.c +++ b/drivers/infiniband/hw/irdma/icrdma_hw.c @@ -139,6 +139,7 @@ void icrdma_init_hw(struct irdma_sc_dev *dev) dev->cqp_db = dev->hw_regs[IRDMA_CQPDB]; dev->cq_ack_db = dev->hw_regs[IRDMA_CQACK]; dev->irq_ops = &icrdma_irq_ops; + dev->hw_attrs.page_size_cap = SZ_4K | SZ_2M | SZ_1G; dev->hw_attrs.max_hw_ird = ICRDMA_MAX_IRD_SIZE; dev->hw_attrs.max_hw_ord = ICRDMA_MAX_ORD_SIZE; dev->hw_attrs.max_stat_inst = ICRDMA_MAX_STATS_COUNT; diff --git a/drivers/infiniband/hw/irdma/irdma.h b/drivers/infiniband/hw/irdma/irdma.h index 46c12334c735..4789e85d717b 100644 --- a/drivers/infiniband/hw/irdma/irdma.h +++ b/drivers/infiniband/hw/irdma/irdma.h @@ -127,6 +127,7 @@ struct irdma_hw_attrs { u64 max_hw_outbound_msg_size; u64 max_hw_inbound_msg_size; u64 max_mr_size; + u64 page_size_cap; u32 min_hw_qp_id; u32 min_hw_aeq_size; u32 max_hw_aeq_size; diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 52f3e88f8569..6daa149dcbda 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -30,7 +30,7 @@ static int irdma_query_device(struct ib_device *ibdev, props->vendor_part_id = pcidev->device;
props->hw_ver = rf->pcidev->revision; - props->page_size_cap = SZ_4K | SZ_2M | SZ_1G; + props->page_size_cap = hw_attrs->page_size_cap; props->max_mr_size = hw_attrs->max_mr_size; props->max_qp = rf->max_qp - rf->used_qps; props->max_qp_wr = hw_attrs->max_qp_wr; @@ -2764,7 +2764,7 @@ static struct ib_mr *irdma_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
if (req.reg_type == IRDMA_MEMREG_TYPE_MEM) { iwmr->page_size = ib_umem_find_best_pgsz(region, - SZ_4K | SZ_2M | SZ_1G, + iwdev->rf->sc_dev.hw_attrs.page_size_cap, virt); if (unlikely(!iwmr->page_size)) { kfree(iwmr); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 64e27c2821f9..ada23040cb65 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -1303,8 +1303,9 @@ static int sdhci_omap_probe(struct platform_device *pdev) /* * omap_device_pm_domain has callbacks to enable the main * functional clock, interface clock and also configure the - * SYSCONFIG register of omap devices. The callback will be invoked - * as part of pm_runtime_get_sync. + * SYSCONFIG register to clear any boot loader set voltage + * capabilities before calling sdhci_setup_host(). The + * callback will be invoked as part of pm_runtime_get_sync. */ pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, 50); @@ -1446,7 +1447,8 @@ static int __maybe_unused sdhci_omap_runtime_suspend(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
- sdhci_runtime_suspend_host(host); + if (omap_host->con != -EINVAL) + sdhci_runtime_suspend_host(host);
sdhci_omap_context_save(omap_host);
@@ -1463,10 +1465,10 @@ static int __maybe_unused sdhci_omap_runtime_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- if (omap_host->con != -EINVAL) + if (omap_host->con != -EINVAL) { sdhci_omap_context_restore(omap_host); - - sdhci_runtime_resume_host(host, 0); + sdhci_runtime_resume_host(host, 0); + }
return 0; } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 44b14c9dc9a7..a626028336d3 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -655,9 +655,10 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, unsigned int tRP_ps; bool use_half_period; int sample_delay_ps, sample_delay_factor; - u16 busy_timeout_cycles; + unsigned int busy_timeout_cycles; u8 wrn_dly_sel; unsigned long clk_rate, min_rate; + u64 busy_timeout_ps;
if (sdr->tRC_min >= 30000) { /* ONFI non-EDO modes [0-3] */ @@ -690,7 +691,8 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); - busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); + busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max); + busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | diff --git a/drivers/net/amt.c b/drivers/net/amt.c index 14fe03dbd9b1..acf5ea96652f 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -563,7 +563,7 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt) ihv3->nsrcs = 0; ihv3->resv = 0; ihv3->suppress = false; - ihv3->qrv = amt->net->ipv4.sysctl_igmp_qrv; + ihv3->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv); ihv3->csum = 0; csum = &ihv3->csum; csum_start = (void *)ihv3; @@ -577,14 +577,14 @@ static struct sk_buff *amt_build_igmp_gq(struct amt_dev *amt) return skb; }
-static void __amt_update_gw_status(struct amt_dev *amt, enum amt_status status, - bool validate) +static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status, + bool validate) { if (validate && amt->status >= status) return; netdev_dbg(amt->dev, "Update GW status %s -> %s", status_str[amt->status], status_str[status]); - amt->status = status; + WRITE_ONCE(amt->status, status); }
static void __amt_update_relay_status(struct amt_tunnel_list *tunnel, @@ -600,14 +600,6 @@ static void __amt_update_relay_status(struct amt_tunnel_list *tunnel, tunnel->status = status; }
-static void amt_update_gw_status(struct amt_dev *amt, enum amt_status status, - bool validate) -{ - spin_lock_bh(&amt->lock); - __amt_update_gw_status(amt, status, validate); - spin_unlock_bh(&amt->lock); -} - static void amt_update_relay_status(struct amt_tunnel_list *tunnel, enum amt_status status, bool validate) { @@ -700,9 +692,7 @@ static void amt_send_discovery(struct amt_dev *amt) if (unlikely(net_xmit_eval(err))) amt->dev->stats.tx_errors++;
- spin_lock_bh(&amt->lock); - __amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true); - spin_unlock_bh(&amt->lock); + amt_update_gw_status(amt, AMT_STATUS_SENT_DISCOVERY, true); out: rcu_read_unlock(); } @@ -900,6 +890,28 @@ static void amt_send_mld_gq(struct amt_dev *amt, struct amt_tunnel_list *tunnel) } #endif
+static bool amt_queue_event(struct amt_dev *amt, enum amt_event event, + struct sk_buff *skb) +{ + int index; + + spin_lock_bh(&amt->lock); + if (amt->nr_events >= AMT_MAX_EVENTS) { + spin_unlock_bh(&amt->lock); + return 1; + } + + index = (amt->event_idx + amt->nr_events) % AMT_MAX_EVENTS; + amt->events[index].event = event; + amt->events[index].skb = skb; + amt->nr_events++; + amt->event_idx %= AMT_MAX_EVENTS; + queue_work(amt_wq, &amt->event_wq); + spin_unlock_bh(&amt->lock); + + return 0; +} + static void amt_secret_work(struct work_struct *work) { struct amt_dev *amt = container_of(to_delayed_work(work), @@ -913,58 +925,72 @@ static void amt_secret_work(struct work_struct *work) msecs_to_jiffies(AMT_SECRET_TIMEOUT)); }
-static void amt_discovery_work(struct work_struct *work) +static void amt_event_send_discovery(struct amt_dev *amt) { - struct amt_dev *amt = container_of(to_delayed_work(work), - struct amt_dev, - discovery_wq); - - spin_lock_bh(&amt->lock); if (amt->status > AMT_STATUS_SENT_DISCOVERY) goto out; get_random_bytes(&amt->nonce, sizeof(__be32)); - spin_unlock_bh(&amt->lock);
amt_send_discovery(amt); - spin_lock_bh(&amt->lock); out: mod_delayed_work(amt_wq, &amt->discovery_wq, msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); - spin_unlock_bh(&amt->lock); }
-static void amt_req_work(struct work_struct *work) +static void amt_discovery_work(struct work_struct *work) { struct amt_dev *amt = container_of(to_delayed_work(work), struct amt_dev, - req_wq); + discovery_wq); + + if (amt_queue_event(amt, AMT_EVENT_SEND_DISCOVERY, NULL)) + mod_delayed_work(amt_wq, &amt->discovery_wq, + msecs_to_jiffies(AMT_DISCOVERY_TIMEOUT)); +} + +static void amt_event_send_request(struct amt_dev *amt) +{ u32 exp;
- spin_lock_bh(&amt->lock); if (amt->status < AMT_STATUS_RECEIVED_ADVERTISEMENT) goto out;
if (amt->req_cnt > AMT_MAX_REQ_COUNT) { netdev_dbg(amt->dev, "Gateway is not ready"); amt->qi = AMT_INIT_REQ_TIMEOUT; - amt->ready4 = false; - amt->ready6 = false; + WRITE_ONCE(amt->ready4, false); + WRITE_ONCE(amt->ready6, false); amt->remote_ip = 0; - __amt_update_gw_status(amt, AMT_STATUS_INIT, false); + amt_update_gw_status(amt, AMT_STATUS_INIT, false); amt->req_cnt = 0; + amt->nonce = 0; goto out; } - spin_unlock_bh(&amt->lock); + + if (!amt->req_cnt) { + WRITE_ONCE(amt->ready4, false); + WRITE_ONCE(amt->ready6, false); + get_random_bytes(&amt->nonce, sizeof(__be32)); + }
amt_send_request(amt, false); amt_send_request(amt, true); - spin_lock_bh(&amt->lock); - __amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); + amt_update_gw_status(amt, AMT_STATUS_SENT_REQUEST, true); amt->req_cnt++; out: exp = min_t(u32, (1 * (1 << amt->req_cnt)), AMT_MAX_REQ_TIMEOUT); mod_delayed_work(amt_wq, &amt->req_wq, msecs_to_jiffies(exp * 1000)); - spin_unlock_bh(&amt->lock); +} + +static void amt_req_work(struct work_struct *work) +{ + struct amt_dev *amt = container_of(to_delayed_work(work), + struct amt_dev, + req_wq); + + if (amt_queue_event(amt, AMT_EVENT_SEND_REQUEST, NULL)) + mod_delayed_work(amt_wq, &amt->req_wq, + msecs_to_jiffies(100)); }
static bool amt_send_membership_update(struct amt_dev *amt, @@ -1220,7 +1246,8 @@ static netdev_tx_t amt_dev_xmit(struct sk_buff *skb, struct net_device *dev) /* Gateway only passes IGMP/MLD packets */ if (!report) goto free; - if ((!v6 && !amt->ready4) || (v6 && !amt->ready6)) + if ((!v6 && !READ_ONCE(amt->ready4)) || + (v6 && !READ_ONCE(amt->ready6))) goto free; if (amt_send_membership_update(amt, skb, v6)) goto free; @@ -2236,6 +2263,10 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) ipv4_is_zeronet(amta->ip4)) return true;
+ if (amt->status != AMT_STATUS_SENT_DISCOVERY || + amt->nonce != amta->nonce) + return true; + amt->remote_ip = amta->ip4; netdev_dbg(amt->dev, "advertised remote ip = %pI4\n", &amt->remote_ip); mod_delayed_work(amt_wq, &amt->req_wq, 0); @@ -2251,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) struct ethhdr *eth; struct iphdr *iph;
+ if (READ_ONCE(amt->status) != AMT_STATUS_SENT_UPDATE) + return true; + hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2325,6 +2359,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt, if (amtmq->reserved || amtmq->version) return true;
+ if (amtmq->nonce != amt->nonce) + return true; + hdr_size -= sizeof(*eth); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) return true; @@ -2339,6 +2376,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt,
iph = ip_hdr(skb); if (iph->version == 4) { + if (READ_ONCE(amt->ready4)) + return true; + if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3))) return true; @@ -2349,12 +2389,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt, ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); - spin_lock_bh(&amt->lock); - amt->ready4 = true; + WRITE_ONCE(amt->ready4, true); amt->mac = amtmq->response_mac; amt->req_cnt = 0; amt->qi = ihv3->qqic; - spin_unlock_bh(&amt->lock); skb->protocol = htons(ETH_P_IP); eth->h_proto = htons(ETH_P_IP); ip_eth_mc_map(iph->daddr, eth->h_dest); @@ -2363,6 +2401,9 @@ static bool amt_membership_query_handler(struct amt_dev *amt, struct mld2_query *mld2q; struct ipv6hdr *ip6h;
+ if (READ_ONCE(amt->ready6)) + return true; + if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + sizeof(*mld2q))) return true; @@ -2374,12 +2415,10 @@ static bool amt_membership_query_handler(struct amt_dev *amt, mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); - spin_lock_bh(&amt->lock); - amt->ready6 = true; + WRITE_ONCE(amt->ready6, true); amt->mac = amtmq->response_mac; amt->req_cnt = 0; amt->qi = mld2q->mld2q_qqic; - spin_unlock_bh(&amt->lock); skb->protocol = htons(ETH_P_IPV6); eth->h_proto = htons(ETH_P_IPV6); ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest); @@ -2392,12 +2431,14 @@ static bool amt_membership_query_handler(struct amt_dev *amt, skb->pkt_type = PACKET_MULTICAST; skb->ip_summed = CHECKSUM_NONE; len = skb->len; + local_bh_disable(); if (__netif_rx(skb) == NET_RX_SUCCESS) { amt_update_gw_status(amt, AMT_STATUS_RECEIVED_QUERY, true); dev_sw_netstats_rx_add(amt->dev, len); } else { amt->dev->stats.rx_dropped++; } + local_bh_enable();
return false; } @@ -2638,7 +2679,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) if (tunnel->ip4 == iph->saddr) goto send;
+ spin_lock_bh(&amt->lock); if (amt->nr_tunnels >= amt->max_tunnels) { + spin_unlock_bh(&amt->lock); icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0); return true; } @@ -2646,8 +2689,10 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) tunnel = kzalloc(sizeof(*tunnel) + (sizeof(struct hlist_head) * amt->hash_buckets), GFP_ATOMIC); - if (!tunnel) + if (!tunnel) { + spin_unlock_bh(&amt->lock); return true; + }
tunnel->source_port = udph->source; tunnel->ip4 = iph->saddr; @@ -2660,10 +2705,9 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb)
INIT_DELAYED_WORK(&tunnel->gc_wq, amt_tunnel_expire);
- spin_lock_bh(&amt->lock); list_add_tail_rcu(&tunnel->list, &amt->tunnel_list); tunnel->key = amt->key; - amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true); + __amt_update_relay_status(tunnel, AMT_STATUS_RECEIVED_REQUEST, true); amt->nr_tunnels++; mod_delayed_work(amt_wq, &tunnel->gc_wq, msecs_to_jiffies(amt_gmi(amt))); @@ -2688,6 +2732,38 @@ static bool amt_request_handler(struct amt_dev *amt, struct sk_buff *skb) return false; }
+static void amt_gw_rcv(struct amt_dev *amt, struct sk_buff *skb) +{ + int type = amt_parse_type(skb); + int err = 1; + + if (type == -1) + goto drop; + + if (amt->mode == AMT_MODE_GATEWAY) { + switch (type) { + case AMT_MSG_ADVERTISEMENT: + err = amt_advertisement_handler(amt, skb); + break; + case AMT_MSG_MEMBERSHIP_QUERY: + err = amt_membership_query_handler(amt, skb); + if (!err) + return; + break; + default: + netdev_dbg(amt->dev, "Invalid type of Gateway\n"); + break; + } + } +drop: + if (err) { + amt->dev->stats.rx_dropped++; + kfree_skb(skb); + } else { + consume_skb(skb); + } +} + static int amt_rcv(struct sock *sk, struct sk_buff *skb) { struct amt_dev *amt; @@ -2719,8 +2795,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) err = true; goto drop; } - err = amt_advertisement_handler(amt, skb); - break; + if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) { + netdev_dbg(amt->dev, "AMT Event queue full\n"); + err = true; + goto drop; + } + goto out; case AMT_MSG_MULTICAST_DATA: if (iph->saddr != amt->remote_ip) { netdev_dbg(amt->dev, "Invalid Relay IP\n"); @@ -2738,11 +2818,12 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) err = true; goto drop; } - err = amt_membership_query_handler(amt, skb); - if (err) + if (amt_queue_event(amt, AMT_EVENT_RECEIVE, skb)) { + netdev_dbg(amt->dev, "AMT Event queue full\n"); + err = true; goto drop; - else - goto out; + } + goto out; default: err = true; netdev_dbg(amt->dev, "Invalid type of Gateway\n"); @@ -2780,6 +2861,46 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) return 0; }
+static void amt_event_work(struct work_struct *work) +{ + struct amt_dev *amt = container_of(work, struct amt_dev, event_wq); + struct sk_buff *skb; + u8 event; + int i; + + for (i = 0; i < AMT_MAX_EVENTS; i++) { + spin_lock_bh(&amt->lock); + if (amt->nr_events == 0) { + spin_unlock_bh(&amt->lock); + return; + } + event = amt->events[amt->event_idx].event; + skb = amt->events[amt->event_idx].skb; + amt->events[amt->event_idx].event = AMT_EVENT_NONE; + amt->events[amt->event_idx].skb = NULL; + amt->nr_events--; + amt->event_idx++; + amt->event_idx %= AMT_MAX_EVENTS; + spin_unlock_bh(&amt->lock); + + switch (event) { + case AMT_EVENT_RECEIVE: + amt_gw_rcv(amt, skb); + break; + case AMT_EVENT_SEND_DISCOVERY: + amt_event_send_discovery(amt); + break; + case AMT_EVENT_SEND_REQUEST: + amt_event_send_request(amt); + break; + default: + if (skb) + kfree_skb(skb); + break; + } + } +} + static int amt_err_lookup(struct sock *sk, struct sk_buff *skb) { struct amt_dev *amt; @@ -2804,7 +2925,7 @@ static int amt_err_lookup(struct sock *sk, struct sk_buff *skb) break; case AMT_MSG_REQUEST: case AMT_MSG_MEMBERSHIP_UPDATE: - if (amt->status >= AMT_STATUS_RECEIVED_ADVERTISEMENT) + if (READ_ONCE(amt->status) >= AMT_STATUS_RECEIVED_ADVERTISEMENT) mod_delayed_work(amt_wq, &amt->req_wq, 0); break; default: @@ -2867,6 +2988,8 @@ static int amt_dev_open(struct net_device *dev)
amt->ready4 = false; amt->ready6 = false; + amt->event_idx = 0; + amt->nr_events = 0;
err = amt_socket_create(amt); if (err) @@ -2874,6 +2997,7 @@ static int amt_dev_open(struct net_device *dev)
amt->req_cnt = 0; amt->remote_ip = 0; + amt->nonce = 0; get_random_bytes(&amt->key, sizeof(siphash_key_t));
amt->status = AMT_STATUS_INIT; @@ -2892,6 +3016,8 @@ static int amt_dev_stop(struct net_device *dev) struct amt_dev *amt = netdev_priv(dev); struct amt_tunnel_list *tunnel, *tmp; struct socket *sock; + struct sk_buff *skb; + int i;
cancel_delayed_work_sync(&amt->req_wq); cancel_delayed_work_sync(&amt->discovery_wq); @@ -2904,6 +3030,15 @@ static int amt_dev_stop(struct net_device *dev) if (sock) udp_tunnel_sock_release(sock);
+ cancel_work_sync(&amt->event_wq); + for (i = 0; i < AMT_MAX_EVENTS; i++) { + skb = amt->events[i].skb; + if (skb) + kfree_skb(skb); + amt->events[i].event = AMT_EVENT_NONE; + amt->events[i].skb = NULL; + } + amt->ready4 = false; amt->ready6 = false; amt->req_cnt = 0; @@ -3095,7 +3230,7 @@ static int amt_newlink(struct net *net, struct net_device *dev, goto err; } if (amt->mode == AMT_MODE_RELAY) { - amt->qrv = amt->net->ipv4.sysctl_igmp_qrv; + amt->qrv = READ_ONCE(amt->net->ipv4.sysctl_igmp_qrv); amt->qri = 10; dev->needed_headroom = amt->stream_dev->needed_headroom + AMT_RELAY_HLEN; @@ -3146,8 +3281,8 @@ static int amt_newlink(struct net *net, struct net_device *dev, INIT_DELAYED_WORK(&amt->discovery_wq, amt_discovery_work); INIT_DELAYED_WORK(&amt->req_wq, amt_req_work); INIT_DELAYED_WORK(&amt->secret_wq, amt_secret_work); + INIT_WORK(&amt->event_wq, amt_event_work); INIT_LIST_HEAD(&amt->tunnel_list); - return 0; err: dev_put(amt->stream_dev); @@ -3280,7 +3415,7 @@ static int __init amt_init(void) if (err < 0) goto unregister_notifier;
- amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 1); + amt_wq = alloc_workqueue("amt", WQ_UNBOUND, 0); if (!amt_wq) { err = -ENOMEM; goto rtnl_unregister; diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 589996cef5db..8d457d2c3bcc 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1850,6 +1850,7 @@ static int rcar_canfd_probe(struct platform_device *pdev) of_child = of_get_child_by_name(pdev->dev.of_node, name); if (of_child && of_device_is_available(of_child)) channels_mask |= BIT(i); + of_node_put(of_child); }
if (chip_id != RENESAS_RZG2L) { diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 8014b18d9391..aa0bcf01e20a 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -447,18 +447,21 @@ int ksz_switch_register(struct ksz_device *dev, ports = of_get_child_by_name(dev->dev->of_node, "ethernet-ports"); if (!ports) ports = of_get_child_by_name(dev->dev->of_node, "ports"); - if (ports) + if (ports) { for_each_available_child_of_node(ports, port) { if (of_property_read_u32(port, "reg", &port_num)) continue; if (!(dev->port_mask & BIT(port_num))) { of_node_put(port); + of_node_put(ports); return -EINVAL; } of_get_phy_mode(port, &dev->ports[port_num].interface); } + of_node_put(ports); + } dev->synclko_125 = of_property_read_bool(dev->dev->of_node, "microchip,synclko-125"); dev->synclko_disable = of_property_read_bool(dev->dev->of_node, diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index b33841c6507a..7734c6b1baca 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -3383,12 +3383,28 @@ static const struct of_device_id sja1105_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, sja1105_dt_ids);
+static const struct spi_device_id sja1105_spi_ids[] = { + { "sja1105e" }, + { "sja1105t" }, + { "sja1105p" }, + { "sja1105q" }, + { "sja1105r" }, + { "sja1105s" }, + { "sja1110a" }, + { "sja1110b" }, + { "sja1110c" }, + { "sja1110d" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, sja1105_spi_ids); + static struct spi_driver sja1105_driver = { .driver = { .name = "sja1105", .owner = THIS_MODULE, .of_match_table = of_match_ptr(sja1105_dt_ids), }, + .id_table = sja1105_spi_ids, .probe = sja1105_probe, .remove = sja1105_remove, .shutdown = sja1105_shutdown, diff --git a/drivers/net/dsa/vitesse-vsc73xx-spi.c b/drivers/net/dsa/vitesse-vsc73xx-spi.c index 3110895358d8..97a92e6da60d 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-spi.c +++ b/drivers/net/dsa/vitesse-vsc73xx-spi.c @@ -205,10 +205,20 @@ static const struct of_device_id vsc73xx_of_match[] = { }; MODULE_DEVICE_TABLE(of, vsc73xx_of_match);
+static const struct spi_device_id vsc73xx_spi_ids[] = { + { "vsc7385" }, + { "vsc7388" }, + { "vsc7395" }, + { "vsc7398" }, + { }, +}; +MODULE_DEVICE_TABLE(spi, vsc73xx_spi_ids); + static struct spi_driver vsc73xx_spi_driver = { .probe = vsc73xx_spi_probe, .remove = vsc73xx_spi_remove, .shutdown = vsc73xx_spi_shutdown, + .id_table = vsc73xx_spi_ids, .driver = { .name = "vsc73xx-spi", .of_match_table = vsc73xx_of_match, diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index 7c760aa65540..ddfe9208529a 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1236,8 +1236,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, csk->sndbuf = newsk->sk_sndbuf; csk->smac_idx = ((struct port_info *)netdev_priv(ndev))->smt_idx; RCV_WSCALE(tp) = select_rcv_wscale(tcp_full_space(newsk), - sock_net(newsk)-> - ipv4.sysctl_tcp_window_scaling, + READ_ONCE(sock_net(newsk)-> + ipv4.sysctl_tcp_window_scaling), tp->window_clamp); neigh_release(n); inet_inherit_port(&tcp_hashinfo, lsk, newsk); @@ -1384,7 +1384,7 @@ static void chtls_pass_accept_request(struct sock *sk, #endif } if (req->tcpopt.wsf <= 14 && - sock_net(sk)->ipv4.sysctl_tcp_window_scaling) { + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { inet_rsk(oreq)->wscale_ok = 1; inet_rsk(oreq)->snd_wscale = req->tcpopt.wsf; } diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index 528eb0f223b1..b4f5e57d0285 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c @@ -2287,7 +2287,7 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
/* Uses sync mcc */ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, - u8 page_num, u8 *data) + u8 page_num, u32 off, u32 len, u8 *data) { struct be_dma_mem cmd; struct be_mcc_wrb *wrb; @@ -2321,10 +2321,10 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, req->port = cpu_to_le32(adapter->hba_port_num); req->page_num = cpu_to_le32(page_num); status = be_mcc_notify_wait(adapter); - if (!status) { + if (!status && len > 0) { struct be_cmd_resp_port_type *resp = cmd.va;
- memcpy(data, resp->page_data, PAGE_DATA_LEN); + memcpy(data, resp->page_data + off, len); } err: mutex_unlock(&adapter->mcc_lock); @@ -2415,7 +2415,7 @@ int be_cmd_query_cable_type(struct be_adapter *adapter) int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { switch (adapter->phy.interface_type) { case PHY_TYPE_QSFP: @@ -2440,7 +2440,7 @@ int be_cmd_query_sfp_info(struct be_adapter *adapter) int status;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { strlcpy(adapter->phy.vendor_name, page_data + SFP_VENDOR_NAME_OFFSET, SFP_VENDOR_NAME_LEN - 1); diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h index db1f3b908582..e2085c68c0ee 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.h +++ b/drivers/net/ethernet/emulex/benet/be_cmds.h @@ -2427,7 +2427,7 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon, int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state); int be_cmd_read_port_transceiver_data(struct be_adapter *adapter, - u8 page_num, u8 *data); + u8 page_num, u32 off, u32 len, u8 *data); int be_cmd_query_cable_type(struct be_adapter *adapter); int be_cmd_query_sfp_info(struct be_adapter *adapter); int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index dfa784339781..bd0df189d871 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -1344,7 +1344,7 @@ static int be_get_module_info(struct net_device *netdev, return -EOPNOTSUPP;
status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - page_data); + 0, PAGE_DATA_LEN, page_data); if (!status) { if (!page_data[SFP_PLUS_SFF_8472_COMP]) { modinfo->type = ETH_MODULE_SFF_8079; @@ -1362,25 +1362,32 @@ static int be_get_module_eeprom(struct net_device *netdev, { struct be_adapter *adapter = netdev_priv(netdev); int status; + u32 begin, end;
if (!check_privilege(adapter, MAX_PRIVILEGES)) return -EOPNOTSUPP;
- status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, - data); - if (status) - goto err; + begin = eeprom->offset; + end = eeprom->offset + eeprom->len; + + if (begin < PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A0, begin, + min_t(u32, end, PAGE_DATA_LEN) - begin, + data); + if (status) + goto err; + + data += PAGE_DATA_LEN - begin; + begin = PAGE_DATA_LEN; + }
- if (eeprom->offset + eeprom->len > PAGE_DATA_LEN) { - status = be_cmd_read_port_transceiver_data(adapter, - TR_PAGE_A2, - data + - PAGE_DATA_LEN); + if (end > PAGE_DATA_LEN) { + status = be_cmd_read_port_transceiver_data(adapter, TR_PAGE_A2, + begin - PAGE_DATA_LEN, + end - begin, data); if (status) goto err; } - if (eeprom->offset) - memcpy(data, data + eeprom->offset, eeprom->len); err: return be_cmd_status(status); } diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index 13382df2f2ef..bcf680e83811 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -630,7 +630,6 @@ struct e1000_phy_info { bool disable_polarity_correction; bool is_mdix; bool polarity_correction; - bool reset_disable; bool speed_downgraded; bool autoneg_wait_to_complete; }; diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index e6c8e6d5234f..9466f65a6da7 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -2050,10 +2050,6 @@ static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw) bool blocked = false; int i = 0;
- /* Check the PHY (LCD) reset flag */ - if (hw->phy.reset_disable) - return true; - while ((blocked = !(er32(FWSM) & E1000_ICH_FWSM_RSPCIPHY)) && (i++ < 30)) usleep_range(10000, 11000); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 638a3ddd7ada..2504b11c3169 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -271,7 +271,6 @@ #define I217_CGFREG_ENABLE_MTA_RESET 0x0002 #define I217_MEMPWR PHY_REG(772, 26) #define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010 -#define I217_MEMPWR_MOEM 0x1000
/* Receive Address Initial CRC Calculation */ #define E1000_PCH_RAICC(_n) (0x05F50 + ((_n) * 4)) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index fa06f68c8c80..f1729940e46c 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6494,6 +6494,10 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && hw->mac.type >= e1000_pch_adp) { + /* Keep the GPT clock enabled for CSME */ + mac_data = er32(FEXTNVM); + mac_data |= BIT(3); + ew32(FEXTNVM, mac_data); /* Request ME unconfigure the device from S0ix */ mac_data = er32(H2ME); mac_data &= ~E1000_H2ME_START_DPG; @@ -6987,21 +6991,8 @@ static __maybe_unused int e1000e_pm_suspend(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); - struct e1000_hw *hw = &adapter->hw; - u16 phy_data; int rc;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && - hw->mac.type >= e1000_pch_adp) { - /* Mask OEM Bits / Gig Disable / Restart AN (772_26[12] = 1) */ - e1e_rphy(hw, I217_MEMPWR, &phy_data); - phy_data |= I217_MEMPWR_MOEM; - e1e_wphy(hw, I217_MEMPWR, phy_data); - - /* Disable LCD reset */ - hw->phy.reset_disable = true; - } - e1000e_flush_lpic(pdev);
e1000e_pm_freeze(dev); @@ -7023,8 +7014,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct e1000_adapter *adapter = netdev_priv(netdev); struct pci_dev *pdev = to_pci_dev(dev); - struct e1000_hw *hw = &adapter->hw; - u16 phy_data; int rc;
/* Introduce S0ix implementation */ @@ -7035,17 +7024,6 @@ static __maybe_unused int e1000e_pm_resume(struct device *dev) if (rc) return rc;
- if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && - hw->mac.type >= e1000_pch_adp) { - /* Unmask OEM Bits / Gig Disable / Restart AN 772_26[12] = 0 */ - e1e_rphy(hw, I217_MEMPWR, &phy_data); - phy_data &= ~I217_MEMPWR_MOEM; - e1e_wphy(hw, I217_MEMPWR, phy_data); - - /* Enable LCD reset */ - hw->phy.reset_disable = false; - } - return e1000e_pm_thaw(dev); }
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 77eb9c726205..6f01bffd7e5c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10645,7 +10645,7 @@ static int i40e_reset(struct i40e_pf *pf) **/ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { - int old_recovery_mode_bit = test_bit(__I40E_RECOVERY_MODE, pf->state); + const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_hw *hw = &pf->hw; i40e_status ret; @@ -10653,13 +10653,11 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) int v;
if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && - i40e_check_recovery_mode(pf)) { + is_recovery_mode_reported) i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); - }
if (test_bit(__I40E_DOWN, pf->state) && - !test_bit(__I40E_RECOVERY_MODE, pf->state) && - !old_recovery_mode_bit) + !test_bit(__I40E_RECOVERY_MODE, pf->state)) goto clear_recovery; dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
@@ -10686,13 +10684,12 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) * accordingly with regard to resources initialization * and deinitialization */ - if (test_bit(__I40E_RECOVERY_MODE, pf->state) || - old_recovery_mode_bit) { + if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { if (i40e_get_capabilities(pf, i40e_aqc_opc_list_func_capabilities)) goto end_unlock;
- if (test_bit(__I40E_RECOVERY_MODE, pf->state)) { + if (is_recovery_mode_reported) { /* we're staying in recovery mode so we'll reinitialize * misc vector here */ diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 49aed3e506a6..0ea0361cd86b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -64,7 +64,6 @@ struct iavf_vsi { u16 id; DECLARE_BITMAP(state, __IAVF_VSI_STATE_SIZE__); int base_vector; - u16 work_limit; u16 qs_handle; void *priv; /* client driver data reference. */ }; @@ -159,8 +158,12 @@ struct iavf_vlan { struct iavf_vlan_filter { struct list_head list; struct iavf_vlan vlan; - bool remove; /* filter needs to be removed */ - bool add; /* filter needs to be added */ + struct { + u8 is_new_vlan:1; /* filter is new, wait for PF answer */ + u8 remove:1; /* filter needs to be removed */ + u8 add:1; /* filter needs to be added */ + u8 padding:5; + }; };
#define IAVF_MAX_TRAFFIC_CLASS 4 @@ -461,6 +464,10 @@ static inline const char *iavf_state_str(enum iavf_state_t state) return "__IAVF_INIT_VERSION_CHECK"; case __IAVF_INIT_GET_RESOURCES: return "__IAVF_INIT_GET_RESOURCES"; + case __IAVF_INIT_EXTENDED_CAPS: + return "__IAVF_INIT_EXTENDED_CAPS"; + case __IAVF_INIT_CONFIG_ADAPTER: + return "__IAVF_INIT_CONFIG_ADAPTER"; case __IAVF_INIT_SW: return "__IAVF_INIT_SW"; case __IAVF_INIT_FAILED: @@ -520,6 +527,7 @@ int iavf_get_vf_config(struct iavf_adapter *adapter); int iavf_get_vf_vlan_v2_caps(struct iavf_adapter *adapter); int iavf_send_vf_offload_vlan_v2_msg(struct iavf_adapter *adapter); void iavf_set_queue_vlan_tag_loc(struct iavf_adapter *adapter); +u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter); void iavf_irq_enable(struct iavf_adapter *adapter, bool flush); void iavf_configure_queues(struct iavf_adapter *adapter); void iavf_deconfigure_queues(struct iavf_adapter *adapter); diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 3bb56714beb0..e535d4c3da49 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -692,12 +692,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct iavf_adapter *adapter = netdev_priv(netdev); - struct iavf_vsi *vsi = &adapter->vsi; struct iavf_ring *rx_ring, *tx_ring;
- ec->tx_max_coalesced_frames = vsi->work_limit; - ec->rx_max_coalesced_frames = vsi->work_limit; - /* Rx and Tx usecs per queue value. If user doesn't specify the * queue, return queue 0's value to represent. */ @@ -825,12 +821,8 @@ static int __iavf_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, int queue) { struct iavf_adapter *adapter = netdev_priv(netdev); - struct iavf_vsi *vsi = &adapter->vsi; int i;
- if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq) - vsi->work_limit = ec->tx_max_coalesced_frames_irq; - if (ec->rx_coalesce_usecs == 0) { if (ec->use_adaptive_rx_coalesce) netif_info(adapter, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); @@ -1969,8 +1961,6 @@ static int iavf_set_rxfh(struct net_device *netdev, const u32 *indir,
static const struct ethtool_ops iavf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES | - ETHTOOL_COALESCE_MAX_FRAMES_IRQ | ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = iavf_get_drvinfo, .get_link = ethtool_op_get_link, diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index f3ecb3bca33d..2e2c153ce46a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -843,7 +843,7 @@ static void iavf_restore_filters(struct iavf_adapter *adapter) * iavf_get_num_vlans_added - get number of VLANs added * @adapter: board private structure */ -static u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) +u16 iavf_get_num_vlans_added(struct iavf_adapter *adapter) { return bitmap_weight(adapter->vsi.active_cvlans, VLAN_N_VID) + bitmap_weight(adapter->vsi.active_svlans, VLAN_N_VID); @@ -906,11 +906,6 @@ static int iavf_vlan_rx_add_vid(struct net_device *netdev, if (!iavf_add_vlan(adapter, IAVF_VLAN(vid, be16_to_cpu(proto)))) return -ENOMEM;
- if (proto == cpu_to_be16(ETH_P_8021Q)) - set_bit(vid, adapter->vsi.active_cvlans); - else - set_bit(vid, adapter->vsi.active_svlans); - return 0; }
@@ -2245,7 +2240,6 @@ int iavf_parse_vf_resource_msg(struct iavf_adapter *adapter)
adapter->vsi.back = adapter; adapter->vsi.base_vector = 1; - adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK; vsi->netdev = adapter->netdev; vsi->qs_handle = adapter->vsi_res->qset_handle; if (adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) { @@ -2956,6 +2950,9 @@ static void iavf_reset_task(struct work_struct *work) adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER; iavf_misc_irq_enable(adapter);
+ bitmap_clear(adapter->vsi.active_cvlans, 0, VLAN_N_VID); + bitmap_clear(adapter->vsi.active_svlans, 0, VLAN_N_VID); + mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
/* We were running when the reset started, so we need to restore some diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index 978f651c6b09..06d18797d25a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -194,7 +194,7 @@ static bool iavf_clean_tx_irq(struct iavf_vsi *vsi, struct iavf_tx_buffer *tx_buf; struct iavf_tx_desc *tx_desc; unsigned int total_bytes = 0, total_packets = 0; - unsigned int budget = vsi->work_limit; + unsigned int budget = IAVF_DEFAULT_IRQ_WORK;
tx_buf = &tx_ring->tx_bi[i]; tx_desc = IAVF_TX_DESC(tx_ring, i); @@ -1285,11 +1285,10 @@ static struct iavf_rx_buffer *iavf_get_rx_buffer(struct iavf_ring *rx_ring, { struct iavf_rx_buffer *rx_buffer;
- if (!size) - return NULL; - rx_buffer = &rx_ring->rx_bi[rx_ring->next_to_clean]; prefetchw(rx_buffer->page); + if (!size) + return rx_buffer;
/* we are reusing so sync this buffer for CPU use */ dma_sync_single_range_for_cpu(rx_ring->dev, diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 782450d5c12f..1603e99bae4a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -626,6 +626,33 @@ static void iavf_mac_add_reject(struct iavf_adapter *adapter) spin_unlock_bh(&adapter->mac_vlan_list_lock); }
+/** + * iavf_vlan_add_reject + * @adapter: adapter structure + * + * Remove VLAN filters from list based on PF response. + **/ +static void iavf_vlan_add_reject(struct iavf_adapter *adapter) +{ + struct iavf_vlan_filter *f, *ftmp; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) { + if (f->is_new_vlan) { + if (f->vlan.tpid == ETH_P_8021Q) + clear_bit(f->vlan.vid, + adapter->vsi.active_cvlans); + else + clear_bit(f->vlan.vid, + adapter->vsi.active_svlans); + + list_del(&f->list); + kfree(f); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); +} + /** * iavf_add_vlans * @adapter: adapter structure @@ -683,6 +710,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter) vvfl->vlan_id[i] = f->vlan.vid; i++; f->add = false; + f->is_new_vlan = true; if (i == count) break; } @@ -695,10 +723,18 @@ void iavf_add_vlans(struct iavf_adapter *adapter) iavf_send_pf_msg(adapter, VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len); kfree(vvfl); } else { + u16 max_vlans = adapter->vlan_v2_caps.filtering.max_filters; + u16 current_vlans = iavf_get_num_vlans_added(adapter); struct virtchnl_vlan_filter_list_v2 *vvfl_v2;
adapter->current_op = VIRTCHNL_OP_ADD_VLAN_V2;
+ if ((count + current_vlans) > max_vlans && + current_vlans < max_vlans) { + count = max_vlans - iavf_get_num_vlans_added(adapter); + more = true; + } + len = sizeof(*vvfl_v2) + ((count - 1) * sizeof(struct virtchnl_vlan_filter)); if (len > IAVF_MAX_AQ_BUF_SIZE) { @@ -725,6 +761,9 @@ void iavf_add_vlans(struct iavf_adapter *adapter) &adapter->vlan_v2_caps.filtering.filtering_support; struct virtchnl_vlan *vlan;
+ if (i == count) + break; + /* give priority over outer if it's enabled */ if (filtering_support->outer) vlan = &vvfl_v2->filters[i].outer; @@ -736,8 +775,7 @@ void iavf_add_vlans(struct iavf_adapter *adapter)
i++; f->add = false; - if (i == count) - break; + f->is_new_vlan = true; } }
@@ -2080,6 +2118,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, */ iavf_netdev_features_vlan_strip_set(netdev, true); break; + case VIRTCHNL_OP_ADD_VLAN_V2: + iavf_vlan_add_reject(adapter); + dev_warn(&adapter->pdev->dev, "Failed to add VLAN filter, error %s\n", + iavf_stat_str(&adapter->hw, v_retval)); + break; default: dev_err(&adapter->pdev->dev, "PF returned error %d (%s) to our request %d\n", v_retval, iavf_stat_str(&adapter->hw, v_retval), @@ -2332,6 +2375,24 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, spin_unlock_bh(&adapter->adv_rss_lock); } break; + case VIRTCHNL_OP_ADD_VLAN_V2: { + struct iavf_vlan_filter *f; + + spin_lock_bh(&adapter->mac_vlan_list_lock); + list_for_each_entry(f, &adapter->vlan_filter_list, list) { + if (f->is_new_vlan) { + f->is_new_vlan = false; + if (f->vlan.tpid == ETH_P_8021Q) + set_bit(f->vlan.vid, + adapter->vsi.active_cvlans); + else + set_bit(f->vlan.vid, + adapter->vsi.active_svlans); + } + } + spin_unlock_bh(&adapter->mac_vlan_list_lock); + } + break; case VIRTCHNL_OP_ENABLE_VLAN_STRIPPING: /* PF enabled vlan strip on this VF. * Update netdev->features if needed to be in sync with ethtool. diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 74b2c590ed5d..38e46e9ba8bb 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6171,6 +6171,9 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg) u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u32 value = 0;
+ if (IGC_REMOVED(hw_addr)) + return ~value; + value = readl(&hw_addr[reg]);
/* reads should not return all F's */ diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index e197a33d93a0..026c3b65fc37 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -306,7 +306,8 @@ u32 igc_rd32(struct igc_hw *hw, u32 reg); #define wr32(reg, val) \ do { \ u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ - writel((val), &hw_addr[(reg)]); \ + if (!IGC_REMOVED(hw_addr)) \ + writel((val), &hw_addr[(reg)]); \ } while (0)
#define rd32(reg) (igc_rd32(hw, reg)) @@ -318,4 +319,6 @@ do { \
#define array_rd32(reg, offset) (igc_rd32(hw, (reg) + ((offset) << 2)))
+#define IGC_REMOVED(h) unlikely(!(h)) + #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 921a4d977d65..8813b4dd6872 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -779,6 +779,7 @@ struct ixgbe_adapter { #ifdef CONFIG_IXGBE_IPSEC struct ixgbe_ipsec *ipsec; #endif /* CONFIG_IXGBE_IPSEC */ + spinlock_t vfs_lock; };
static inline int ixgbe_determine_xdp_q_idx(int cpu) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index c4a4954aa317..6c403f112d29 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -6402,6 +6402,9 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter, /* n-tuple support exists, always init our spinlock */ spin_lock_init(&adapter->fdir_perfect_lock);
+ /* init spinlock to avoid concurrency of VF resources */ + spin_lock_init(&adapter->vfs_lock); + #ifdef CONFIG_IXGBE_DCB ixgbe_init_dcb(adapter); #endif diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index d4e63f0644c3..a1e69c734863 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -205,10 +205,13 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter, unsigned int max_vfs) int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) { unsigned int num_vfs = adapter->num_vfs, vf; + unsigned long flags; int rss;
+ spin_lock_irqsave(&adapter->vfs_lock, flags); /* set num VFs to 0 to prevent access to vfinfo */ adapter->num_vfs = 0; + spin_unlock_irqrestore(&adapter->vfs_lock, flags);
/* put the reference to all of the vf devices */ for (vf = 0; vf < num_vfs; ++vf) { @@ -1355,8 +1358,10 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) void ixgbe_msg_task(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; + unsigned long flags; u32 vf;
+ spin_lock_irqsave(&adapter->vfs_lock, flags); for (vf = 0; vf < adapter->num_vfs; vf++) { /* process any reset requests */ if (!ixgbe_check_for_rst(hw, vf)) @@ -1370,6 +1375,7 @@ void ixgbe_msg_task(struct ixgbe_adapter *adapter) if (!ixgbe_check_for_ack(hw, vf)) ixgbe_rcv_ack_from_vf(adapter, vf); } + spin_unlock_irqrestore(&adapter->vfs_lock, flags); }
static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) diff --git a/drivers/net/ethernet/marvell/prestera/prestera_flower.c b/drivers/net/ethernet/marvell/prestera/prestera_flower.c index 921959a980ee..d8cfa4a7de0f 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_flower.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_flower.c @@ -139,12 +139,12 @@ static int prestera_flower_parse_meta(struct prestera_acl_rule *rule, } port = netdev_priv(ingress_dev);
- mask = htons(0x1FFF); - key = htons(port->hw_id); + mask = htons(0x1FFF << 3); + key = htons(port->hw_id << 3); rule_match_set(r_match->key, SYS_PORT, key); rule_match_set(r_match->mask, SYS_PORT, mask);
- mask = htons(0x1FF); + mask = htons(0x3FF); key = htons(port->dev_id); rule_match_set(r_match->key, SYS_DEV, key); rule_match_set(r_match->mask, SYS_DEV, mask); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 7ad663c5b1ab..c00d6c4ed37c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -5387,7 +5387,7 @@ static bool mlxsw_sp_fi_is_gateway(const struct mlxsw_sp *mlxsw_sp, { const struct fib_nh *nh = fib_info_nh(fi, 0);
- return nh->fib_nh_scope == RT_SCOPE_LINK || + return nh->fib_nh_gw_family || mlxsw_sp_nexthop4_ipip_type(mlxsw_sp, nh, NULL); }
@@ -10263,7 +10263,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, unsigned long *fields = config->fields; u32 hash_fields;
- switch (net->ipv4.sysctl_fib_multipath_hash_policy) { + switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) { case 0: mlxsw_sp_mp4_hash_outer_addr(config); break; @@ -10281,7 +10281,7 @@ static void mlxsw_sp_mp4_hash_init(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_mp_hash_inner_l3(config); break; case 3: - hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; + hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); /* Outer */ MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_NOT_TCP_NOT_UDP); MLXSW_SP_MP_HASH_HEADER_SET(headers, IPV4_EN_TCP_UDP); @@ -10462,13 +10462,14 @@ static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) static int __mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) { struct net *net = mlxsw_sp_net(mlxsw_sp); - bool usp = net->ipv4.sysctl_ip_fwd_update_priority; char rgcr_pl[MLXSW_REG_RGCR_LEN]; u64 max_rifs; + bool usp;
if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_RIFS)) return -EIO; max_rifs = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); + usp = READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority);
mlxsw_reg_rgcr_pack(rgcr_pl, true, true); mlxsw_reg_rgcr_max_router_interfaces_set(rgcr_pl, max_rifs); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c index 005e56ea5da1..5893770bfd94 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_mac.c @@ -75,6 +75,9 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, unsigned int vid, enum macaccess_entry_type type) { + int ret; + + spin_lock(&lan966x->mac_lock); lan966x_mac_select(lan966x, mac, vid);
/* Issue a write command */ @@ -86,7 +89,10 @@ static int __lan966x_mac_learn(struct lan966x *lan966x, int pgid, ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_LEARN), lan966x, ANA_MACACCESS);
- return lan966x_mac_wait_for_completion(lan966x); + ret = lan966x_mac_wait_for_completion(lan966x); + spin_unlock(&lan966x->mac_lock); + + return ret; }
/* The mask of the front ports is encoded inside the mac parameter via a call @@ -113,11 +119,13 @@ int lan966x_mac_learn(struct lan966x *lan966x, int port, return __lan966x_mac_learn(lan966x, port, false, mac, vid, type); }
-int lan966x_mac_forget(struct lan966x *lan966x, - const unsigned char mac[ETH_ALEN], - unsigned int vid, - enum macaccess_entry_type type) +static int lan966x_mac_forget_locked(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) { + lockdep_assert_held(&lan966x->mac_lock); + lan966x_mac_select(lan966x, mac, vid);
/* Issue a forget command */ @@ -128,6 +136,20 @@ int lan966x_mac_forget(struct lan966x *lan966x, return lan966x_mac_wait_for_completion(lan966x); }
+int lan966x_mac_forget(struct lan966x *lan966x, + const unsigned char mac[ETH_ALEN], + unsigned int vid, + enum macaccess_entry_type type) +{ + int ret; + + spin_lock(&lan966x->mac_lock); + ret = lan966x_mac_forget_locked(lan966x, mac, vid, type); + spin_unlock(&lan966x->mac_lock); + + return ret; +} + int lan966x_mac_cpu_learn(struct lan966x *lan966x, const char *addr, u16 vid) { return lan966x_mac_learn(lan966x, PGID_CPU, addr, vid, ENTRYTYPE_LOCKED); @@ -161,7 +183,7 @@ static struct lan966x_mac_entry *lan966x_mac_alloc_entry(const unsigned char *ma { struct lan966x_mac_entry *mac_entry;
- mac_entry = kzalloc(sizeof(*mac_entry), GFP_KERNEL); + mac_entry = kzalloc(sizeof(*mac_entry), GFP_ATOMIC); if (!mac_entry) return NULL;
@@ -179,7 +201,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, struct lan966x_mac_entry *res = NULL; struct lan966x_mac_entry *mac_entry;
- spin_lock(&lan966x->mac_lock); list_for_each_entry(mac_entry, &lan966x->mac_entries, list) { if (mac_entry->vid == vid && ether_addr_equal(mac, mac_entry->mac) && @@ -188,7 +209,6 @@ static struct lan966x_mac_entry *lan966x_mac_find_entry(struct lan966x *lan966x, break; } } - spin_unlock(&lan966x->mac_lock);
return res; } @@ -231,8 +251,11 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, { struct lan966x_mac_entry *mac_entry;
- if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) + spin_lock(&lan966x->mac_lock); + if (lan966x_mac_lookup(lan966x, addr, vid, ENTRYTYPE_NORMAL)) { + spin_unlock(&lan966x->mac_lock); return 0; + }
/* In case the entry already exists, don't add it again to SW, * just update HW, but we need to look in the actual HW because @@ -241,21 +264,25 @@ int lan966x_mac_add_entry(struct lan966x *lan966x, struct lan966x_port *port, * add the entry but without the extern_learn flag. */ mac_entry = lan966x_mac_find_entry(lan966x, addr, vid, port->chip_port); - if (mac_entry) - return lan966x_mac_learn(lan966x, port->chip_port, - addr, vid, ENTRYTYPE_LOCKED); + if (mac_entry) { + spin_unlock(&lan966x->mac_lock); + goto mac_learn; + }
mac_entry = lan966x_mac_alloc_entry(addr, vid, port->chip_port); - if (!mac_entry) + if (!mac_entry) { + spin_unlock(&lan966x->mac_lock); return -ENOMEM; + }
- spin_lock(&lan966x->mac_lock); list_add_tail(&mac_entry->list, &lan966x->mac_entries); spin_unlock(&lan966x->mac_lock);
- lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); lan966x_fdb_call_notifiers(SWITCHDEV_FDB_OFFLOADED, addr, vid, port->dev);
+mac_learn: + lan966x_mac_learn(lan966x, port->chip_port, addr, vid, ENTRYTYPE_LOCKED); + return 0; }
@@ -269,8 +296,9 @@ int lan966x_mac_del_entry(struct lan966x *lan966x, const unsigned char *addr, list) { if (mac_entry->vid == vid && ether_addr_equal(addr, mac_entry->mac)) { - lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid, - ENTRYTYPE_LOCKED); + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, + ENTRYTYPE_LOCKED);
list_del(&mac_entry->list); kfree(mac_entry); @@ -288,8 +316,8 @@ void lan966x_mac_purge_entries(struct lan966x *lan966x) spin_lock(&lan966x->mac_lock); list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) { - lan966x_mac_forget(lan966x, mac_entry->mac, mac_entry->vid, - ENTRYTYPE_LOCKED); + lan966x_mac_forget_locked(lan966x, mac_entry->mac, + mac_entry->vid, ENTRYTYPE_LOCKED);
list_del(&mac_entry->list); kfree(mac_entry); @@ -325,10 +353,13 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, { struct lan966x_mac_entry *mac_entry, *tmp; unsigned char mac[ETH_ALEN] __aligned(2); + struct list_head mac_deleted_entries; u32 dest_idx; u32 column; u16 vid;
+ INIT_LIST_HEAD(&mac_deleted_entries); + spin_lock(&lan966x->mac_lock); list_for_each_entry_safe(mac_entry, tmp, &lan966x->mac_entries, list) { bool found = false; @@ -362,20 +393,26 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, }
if (!found) { - /* Notify the bridge that the entry doesn't exist - * anymore in the HW and remove the entry from the SW - * list - */ - lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, - mac_entry->mac, mac_entry->vid, - lan966x->ports[mac_entry->port_index]->dev); - list_del(&mac_entry->list); - kfree(mac_entry); + /* Move the entry from SW list to a tmp list such that + * it would be deleted later + */ + list_add_tail(&mac_entry->list, &mac_deleted_entries); } } spin_unlock(&lan966x->mac_lock);
+ list_for_each_entry_safe(mac_entry, tmp, &mac_deleted_entries, list) { + /* Notify the bridge that the entry doesn't exist + * anymore in the HW + */ + lan966x_mac_notifiers(SWITCHDEV_FDB_DEL_TO_BRIDGE, + mac_entry->mac, mac_entry->vid, + lan966x->ports[mac_entry->port_index]->dev); + list_del(&mac_entry->list); + kfree(mac_entry); + } + /* Now go to the list of columns and see if any entry was not in the SW * list, then that means that the entry is new so it needs to notify the * bridge. @@ -396,13 +433,20 @@ static void lan966x_mac_irq_process(struct lan966x *lan966x, u32 row, if (WARN_ON(dest_idx >= lan966x->num_phys_ports)) continue;
+ spin_lock(&lan966x->mac_lock); + mac_entry = lan966x_mac_find_entry(lan966x, mac, vid, dest_idx); + if (mac_entry) { + spin_unlock(&lan966x->mac_lock); + continue; + } + mac_entry = lan966x_mac_alloc_entry(mac, vid, dest_idx); - if (!mac_entry) + if (!mac_entry) { + spin_unlock(&lan966x->mac_lock); return; + }
mac_entry->row = row; - - spin_lock(&lan966x->mac_lock); list_add_tail(&mac_entry->list, &lan966x->mac_entries); spin_unlock(&lan966x->mac_lock);
@@ -424,6 +468,7 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) lan966x, ANA_MACTINDX);
while (1) { + spin_lock(&lan966x->mac_lock); lan_rmw(ANA_MACACCESS_MAC_TABLE_CMD_SET(MACACCESS_CMD_SYNC_GET_NEXT), ANA_MACACCESS_MAC_TABLE_CMD, lan966x, ANA_MACACCESS); @@ -447,12 +492,15 @@ irqreturn_t lan966x_mac_irq_handler(struct lan966x *lan966x) stop = false;
if (column == LAN966X_MAC_COLUMNS - 1 && - index == 0 && stop) + index == 0 && stop) { + spin_unlock(&lan966x->mac_lock); break; + }
entry[column].mach = lan_rd(lan966x, ANA_MACHDATA); entry[column].macl = lan_rd(lan966x, ANA_MACLDATA); entry[column].maca = lan_rd(lan966x, ANA_MACACCESS); + spin_unlock(&lan966x->mac_lock);
/* Once all the columns are read process them */ if (column == LAN966X_MAC_COLUMNS - 1) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 1b9421e844a9..79036767c99d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -473,7 +473,7 @@ nfp_fl_set_tun(struct nfp_app *app, struct nfp_fl_set_tun *set_tun, set_tun->ttl = ip4_dst_hoplimit(&rt->dst); ip_rt_put(rt); } else { - set_tun->ttl = net->ipv4.sysctl_ip_default_ttl; + set_tun->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); } }
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c index 6ff88df58767..ca8ab290013c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -576,32 +576,7 @@ static int mediatek_dwmac_init(struct platform_device *pdev, void *priv) } }
- ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks); - if (ret) { - dev_err(plat->dev, "failed to enable clks, err = %d\n", ret); - return ret; - } - - ret = clk_prepare_enable(plat->rmii_internal_clk); - if (ret) { - dev_err(plat->dev, "failed to enable rmii internal clk, err = %d\n", ret); - goto err_clk; - } - return 0; - -err_clk: - clk_bulk_disable_unprepare(variant->num_clks, plat->clks); - return ret; -} - -static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv) -{ - struct mediatek_dwmac_plat_data *plat = priv; - const struct mediatek_dwmac_variant *variant = plat->variant; - - clk_disable_unprepare(plat->rmii_internal_clk); - clk_bulk_disable_unprepare(variant->num_clks, plat->clks); }
static int mediatek_dwmac_clks_config(void *priv, bool enabled) @@ -643,7 +618,6 @@ static int mediatek_dwmac_common_data(struct platform_device *pdev, plat->addr64 = priv_plat->variant->dma_bit_mask; plat->bsp_priv = priv_plat; plat->init = mediatek_dwmac_init; - plat->exit = mediatek_dwmac_exit; plat->clks_config = mediatek_dwmac_clks_config; if (priv_plat->variant->dwmac_fix_mac_speed) plat->fix_mac_speed = priv_plat->variant->dwmac_fix_mac_speed; @@ -712,13 +686,32 @@ static int mediatek_dwmac_probe(struct platform_device *pdev) mediatek_dwmac_common_data(pdev, plat_dat, priv_plat); mediatek_dwmac_init(pdev, priv_plat);
+ ret = mediatek_dwmac_clks_config(priv_plat, true); + if (ret) + return ret; + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) { stmmac_remove_config_dt(pdev, plat_dat); - return ret; + goto err_drv_probe; }
return 0; + +err_drv_probe: + mediatek_dwmac_clks_config(priv_plat, false); + return ret; +} + +static int mediatek_dwmac_remove(struct platform_device *pdev) +{ + struct mediatek_dwmac_plat_data *priv_plat = get_stmmac_bsp_priv(&pdev->dev); + int ret; + + ret = stmmac_pltfr_remove(pdev); + mediatek_dwmac_clks_config(priv_plat, false); + + return ret; }
static const struct of_device_id mediatek_dwmac_match[] = { @@ -733,7 +726,7 @@ MODULE_DEVICE_TABLE(of, mediatek_dwmac_match);
static struct platform_driver mediatek_dwmac_driver = { .probe = mediatek_dwmac_probe, - .remove = stmmac_pltfr_remove, + .remove = mediatek_dwmac_remove, .driver = { .name = "dwmac-mediatek", .pm = &stmmac_pltfr_pm_ops, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index fd41db65fe1d..af3339041134 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -219,6 +219,9 @@ static void dwmac4_map_mtl_dma(struct mac_device_info *hw, u32 queue, u32 chan) if (queue == 0 || queue == 4) { value &= ~MTL_RXQ_DMA_Q04MDMACH_MASK; value |= MTL_RXQ_DMA_Q04MDMACH(chan); + } else if (queue > 4) { + value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue - 4); + value |= MTL_RXQ_DMA_QXMDMACH(chan, queue - 4); } else { value &= ~MTL_RXQ_DMA_QXMDMACH_MASK(queue); value |= MTL_RXQ_DMA_QXMDMACH(chan, queue); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index abfb3cd5958d..9c3055ee2608 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -803,14 +803,6 @@ static int stmmac_ethtool_op_set_eee(struct net_device *dev, netdev_warn(priv->dev, "Setting EEE tx-lpi is not supported\n");
- if (priv->hw->xpcs) { - ret = xpcs_config_eee(priv->hw->xpcs, - priv->plat->mult_fact_100ns, - edata->eee_enabled); - if (ret) - return ret; - } - if (!edata->eee_enabled) stmmac_disable_eee_mode(priv);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 2525a80353b7..6a7f63a58aef 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -834,19 +834,10 @@ int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags) struct timespec64 now; u32 sec_inc = 0; u64 temp = 0; - int ret;
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) return -EOPNOTSUPP;
- ret = clk_prepare_enable(priv->plat->clk_ptp_ref); - if (ret < 0) { - netdev_warn(priv->dev, - "failed to enable PTP reference clock: %pe\n", - ERR_PTR(ret)); - return ret; - } - stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); priv->systime_flags = systime_flags;
@@ -3270,6 +3261,14 @@ static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
stmmac_mmc_setup(priv);
+ if (ptp_register) { + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + } + ret = stmmac_init_ptp(priv); if (ret == -EOPNOTSUPP) netdev_info(priv->dev, "PTP not supported by HW\n"); @@ -7220,8 +7219,6 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__);
pm_runtime_get_sync(dev); - pm_runtime_disable(dev); - pm_runtime_put_noidle(dev);
stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); @@ -7248,6 +7245,9 @@ int stmmac_dvr_remove(struct device *dev) mutex_destroy(&priv->lock); bitmap_free(priv->af_xdp_zc_qps);
+ pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + return 0; } EXPORT_SYMBOL_GPL(stmmac_dvr_remove); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 11e1055e8260..9f5cac4000da 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -815,7 +815,13 @@ static int __maybe_unused stmmac_pltfr_noirq_resume(struct device *dev) if (ret) return ret;
- stmmac_init_tstamp_counter(priv, priv->systime_flags); + ret = clk_prepare_enable(priv->plat->clk_ptp_ref); + if (ret < 0) { + netdev_warn(priv->dev, + "failed to enable PTP reference clock: %pe\n", + ERR_PTR(ret)); + return ret; + } }
return 0; diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 873f6deabbd1..dc1f6d8444ad 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1801,7 +1801,7 @@ static const struct driver_info ax88179_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1814,7 +1814,7 @@ static const struct driver_info ax88178a_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1827,7 +1827,7 @@ static const struct driver_info cypress_GX3_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1840,7 +1840,7 @@ static const struct driver_info dlink_dub1312_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1853,7 +1853,7 @@ static const struct driver_info sitecom_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1866,7 +1866,7 @@ static const struct driver_info samsung_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1879,7 +1879,7 @@ static const struct driver_info lenovo_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1892,7 +1892,7 @@ static const struct driver_info belkin_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1905,7 +1905,7 @@ static const struct driver_info toshiba_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1918,7 +1918,7 @@ static const struct driver_info mct_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1931,7 +1931,7 @@ static const struct driver_info at_umc2000_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1944,7 +1944,7 @@ static const struct driver_info at_umc200_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1957,7 +1957,7 @@ static const struct driver_info at_umc2000sp_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index ee41088c5251..6b4efae11e57 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -32,7 +32,7 @@ #define NETNEXT_VERSION "12"
/* Information for net */ -#define NET_VERSION "12" +#define NET_VERSION "13"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers nic_swsd@realtek.com" @@ -5915,7 +5915,8 @@ static void r8153_enter_oob(struct r8152 *tp)
wait_oob_link_list_ready(tp);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, mtu_to_size(tp->netdev->mtu)); + ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT);
switch (tp->version) { case RTL_VER_03: @@ -5951,6 +5952,10 @@ static void r8153_enter_oob(struct r8152 *tp) ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data |= MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR); @@ -6553,6 +6558,9 @@ static void rtl8156_down(struct r8152 *tp) rtl_disable(tp); rtl_reset_bmu(tp);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, 1522); + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_DEFAULT); + /* Clear teredo wake event. bit[15:8] is the teredo wakeup * type. Set it to zero. bits[7:0] are the W1C bits about * the events. Set them to all 1 to clear them. @@ -6563,6 +6571,10 @@ static void rtl8156_down(struct r8152 *tp) ocp_data |= NOW_IS_OOB; ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7); + ocp_data |= MCU_BORW_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_SFF_STS_7, ocp_data); + rtl_rx_vlan_en(tp, true); rxdy_gated_en(tp, false);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index d270a204324e..fb185cb05258 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -604,17 +604,19 @@ static unsigned int hv_msi_get_int_vector(struct irq_data *data) return cfg->vector; }
-static void hv_set_msi_entry_from_desc(union hv_msi_entry *msi_entry, - struct msi_desc *msi_desc) -{ - msi_entry->address.as_uint32 = msi_desc->msg.address_lo; - msi_entry->data.as_uint32 = msi_desc->msg.data; -} - static int hv_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { - return pci_msi_prepare(domain, dev, nvec, info); + int ret = pci_msi_prepare(domain, dev, nvec, info); + + /* + * By using the interrupt remapper in the hypervisor IOMMU, contiguous + * CPU vectors is not needed for multi-MSI + */ + if (info->type == X86_IRQ_ALLOC_TYPE_PCI_MSI) + info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS; + + return ret; }
/** @@ -631,6 +633,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) { struct msi_desc *msi_desc = irq_data_get_msi_desc(data); struct hv_retarget_device_interrupt *params; + struct tran_int_desc *int_desc; struct hv_pcibus_device *hbus; struct cpumask *dest; cpumask_var_t tmp; @@ -645,6 +648,7 @@ static void hv_arch_irq_unmask(struct irq_data *data) pdev = msi_desc_to_pci_dev(msi_desc); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); + int_desc = data->chip_data;
spin_lock_irqsave(&hbus->retarget_msi_interrupt_lock, flags);
@@ -652,7 +656,8 @@ static void hv_arch_irq_unmask(struct irq_data *data) memset(params, 0, sizeof(*params)); params->partition_id = HV_PARTITION_ID_SELF; params->int_entry.source = HV_INTERRUPT_SOURCE_MSI; - hv_set_msi_entry_from_desc(¶ms->int_entry.msi_entry, msi_desc); + params->int_entry.msi_entry.address.as_uint32 = int_desc->address & 0xffffffff; + params->int_entry.msi_entry.data.as_uint32 = int_desc->data; params->device_id = (hbus->hdev->dev_instance.b[5] << 24) | (hbus->hdev->dev_instance.b[4] << 16) | (hbus->hdev->dev_instance.b[7] << 8) | @@ -1513,6 +1518,10 @@ static void hv_int_desc_free(struct hv_pci_dev *hpdev, u8 buffer[sizeof(struct pci_delete_interrupt)]; } ctxt;
+ if (!int_desc->vector_count) { + kfree(int_desc); + return; + } memset(&ctxt, 0, sizeof(ctxt)); int_pkt = (struct pci_delete_interrupt *)&ctxt.pkt.message; int_pkt->message_type.type = @@ -1597,12 +1606,12 @@ static void hv_pci_compose_compl(void *context, struct pci_response *resp,
static u32 hv_compose_msi_req_v1( struct pci_create_interrupt *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) + u32 slot, u8 vector, u8 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE;
/* @@ -1625,14 +1634,14 @@ static int hv_compose_msi_req_get_cpu(struct cpumask *affinity)
static u32 hv_compose_msi_req_v2( struct pci_create_interrupt2 *int_pkt, struct cpumask *affinity, - u32 slot, u8 vector) + u32 slot, u8 vector, u8 vector_count) { int cpu;
int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE2; int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = @@ -1644,7 +1653,7 @@ static u32 hv_compose_msi_req_v2(
static u32 hv_compose_msi_req_v3( struct pci_create_interrupt3 *int_pkt, struct cpumask *affinity, - u32 slot, u32 vector) + u32 slot, u32 vector, u8 vector_count) { int cpu;
@@ -1652,7 +1661,7 @@ static u32 hv_compose_msi_req_v3( int_pkt->wslot.slot = slot; int_pkt->int_desc.vector = vector; int_pkt->int_desc.reserved = 0; - int_pkt->int_desc.vector_count = 1; + int_pkt->int_desc.vector_count = vector_count; int_pkt->int_desc.delivery_mode = DELIVERY_MODE; cpu = hv_compose_msi_req_get_cpu(affinity); int_pkt->int_desc.processor_array[0] = @@ -1683,6 +1692,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct cpumask *dest; struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; + struct msi_desc *msi_desc; + u8 vector, vector_count; struct { struct pci_packet pci_pkt; union { @@ -1695,7 +1706,17 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) u32 size; int ret;
- pdev = msi_desc_to_pci_dev(irq_data_get_msi_desc(data)); + /* Reuse the previous allocation */ + if (data->chip_data) { + int_desc = data->chip_data; + msg->address_hi = int_desc->address >> 32; + msg->address_lo = int_desc->address & 0xffffffff; + msg->data = int_desc->data; + return; + } + + msi_desc = irq_data_get_msi_desc(data); + pdev = msi_desc_to_pci_dev(msi_desc); dest = irq_data_get_effective_affinity_mask(data); pbus = pdev->bus; hbus = container_of(pbus->sysdata, struct hv_pcibus_device, sysdata); @@ -1704,17 +1725,40 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) if (!hpdev) goto return_null_message;
- /* Free any previous message that might have already been composed. */ - if (data->chip_data) { - int_desc = data->chip_data; - data->chip_data = NULL; - hv_int_desc_free(hpdev, int_desc); - } - int_desc = kzalloc(sizeof(*int_desc), GFP_ATOMIC); if (!int_desc) goto drop_reference;
+ if (!msi_desc->pci.msi_attrib.is_msix && msi_desc->nvec_used > 1) { + /* + * If this is not the first MSI of Multi MSI, we already have + * a mapping. Can exit early. + */ + if (msi_desc->irq != data->irq) { + data->chip_data = int_desc; + int_desc->address = msi_desc->msg.address_lo | + (u64)msi_desc->msg.address_hi << 32; + int_desc->data = msi_desc->msg.data + + (data->irq - msi_desc->irq); + msg->address_hi = msi_desc->msg.address_hi; + msg->address_lo = msi_desc->msg.address_lo; + msg->data = int_desc->data; + put_pcichild(hpdev); + return; + } + /* + * The vector we select here is a dummy value. The correct + * value gets sent to the hypervisor in unmask(). This needs + * to be aligned with the count, and also not zero. Multi-msi + * is powers of 2 up to 32, so 32 will always work here. + */ + vector = 32; + vector_count = msi_desc->nvec_used; + } else { + vector = hv_msi_get_int_vector(data); + vector_count = 1; + } + memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); ctxt.pci_pkt.completion_func = hv_pci_compose_compl; @@ -1725,7 +1769,8 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break;
case PCI_PROTOCOL_VERSION_1_2: @@ -1733,14 +1778,16 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break;
case PCI_PROTOCOL_VERSION_1_4: size = hv_compose_msi_req_v3(&ctxt.int_pkts.v3, dest, hpdev->desc.win_slot.slot, - hv_msi_get_int_vector(data)); + vector, + vector_count); break;
default: diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c index adccf03b3e5a..b920dd5237c7 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-37xx.c @@ -101,7 +101,7 @@ struct armada_37xx_pinctrl { struct device *dev; struct gpio_chip gpio_chip; struct irq_chip irq_chip; - spinlock_t irq_lock; + raw_spinlock_t irq_lock; struct pinctrl_desc pctl; struct pinctrl_dev *pctl_dev; struct armada_37xx_pin_group *groups; @@ -522,9 +522,9 @@ static void armada_37xx_irq_ack(struct irq_data *d) unsigned long flags;
armada_37xx_irq_update_reg(®, d); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); writel(d->mask, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); }
static void armada_37xx_irq_mask(struct irq_data *d) @@ -535,10 +535,10 @@ static void armada_37xx_irq_mask(struct irq_data *d) unsigned long flags;
armada_37xx_irq_update_reg(®, d); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); writel(val & ~d->mask, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); }
static void armada_37xx_irq_unmask(struct irq_data *d) @@ -549,10 +549,10 @@ static void armada_37xx_irq_unmask(struct irq_data *d) unsigned long flags;
armada_37xx_irq_update_reg(®, d); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); writel(val | d->mask, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); }
static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) @@ -563,14 +563,14 @@ static int armada_37xx_irq_set_wake(struct irq_data *d, unsigned int on) unsigned long flags;
armada_37xx_irq_update_reg(®, d); - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); val = readl(info->base + reg); if (on) val |= (BIT(d->hwirq % GPIO_PER_REG)); else val &= ~(BIT(d->hwirq % GPIO_PER_REG)); writel(val, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0; } @@ -582,7 +582,7 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) u32 val, reg = IRQ_POL; unsigned long flags;
- spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); armada_37xx_irq_update_reg(®, d); val = readl(info->base + reg); switch (type) { @@ -606,11 +606,11 @@ static int armada_37xx_irq_set_type(struct irq_data *d, unsigned int type) break; } default: - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); return -EINVAL; } writel(val, info->base + reg); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags);
return 0; } @@ -625,7 +625,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info,
regmap_read(info->regmap, INPUT_VAL + 4*reg_idx, &l);
- spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); p = readl(info->base + IRQ_POL + 4 * reg_idx); if ((p ^ l) & (1 << bit_num)) { /* @@ -646,7 +646,7 @@ static int armada_37xx_edge_both_irq_swap_pol(struct armada_37xx_pinctrl *info, ret = -1; }
- spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); return ret; }
@@ -663,11 +663,11 @@ static void armada_37xx_irq_handler(struct irq_desc *desc) u32 status; unsigned long flags;
- spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); status = readl_relaxed(info->base + IRQ_STATUS + 4 * i); /* Manage only the interrupt that was enabled */ status &= readl_relaxed(info->base + IRQ_EN + 4 * i); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); while (status) { u32 hwirq = ffs(status) - 1; u32 virq = irq_find_mapping(d, hwirq + @@ -694,12 +694,12 @@ static void armada_37xx_irq_handler(struct irq_desc *desc)
update_status: /* Update status in case a new IRQ appears */ - spin_lock_irqsave(&info->irq_lock, flags); + raw_spin_lock_irqsave(&info->irq_lock, flags); status = readl_relaxed(info->base + IRQ_STATUS + 4 * i); /* Manage only the interrupt that was enabled */ status &= readl_relaxed(info->base + IRQ_EN + 4 * i); - spin_unlock_irqrestore(&info->irq_lock, flags); + raw_spin_unlock_irqrestore(&info->irq_lock, flags); } } chained_irq_exit(chip, desc); @@ -726,23 +726,13 @@ static int armada_37xx_irqchip_register(struct platform_device *pdev, struct gpio_chip *gc = &info->gpio_chip; struct irq_chip *irqchip = &info->irq_chip; struct gpio_irq_chip *girq = &gc->irq; + struct device_node *np = to_of_node(gc->fwnode); struct device *dev = &pdev->dev; - struct device_node *np; - int ret = -ENODEV, i, nr_irq_parent; + unsigned int i, nr_irq_parent;
- /* Check if we have at least one gpio-controller child node */ - for_each_child_of_node(dev->of_node, np) { - if (of_property_read_bool(np, "gpio-controller")) { - ret = 0; - break; - } - } - if (ret) - return dev_err_probe(dev, ret, "no gpio-controller child node\n"); + raw_spin_lock_init(&info->irq_lock);
nr_irq_parent = of_irq_count(np); - spin_lock_init(&info->irq_lock); - if (!nr_irq_parent) { dev_err(dev, "invalid or no IRQ\n"); return 0; @@ -1121,25 +1111,40 @@ static const struct of_device_id armada_37xx_pinctrl_of_match[] = { { }, };
+static const struct regmap_config armada_37xx_pinctrl_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, + .use_raw_spinlock = true, +}; + static int __init armada_37xx_pinctrl_probe(struct platform_device *pdev) { struct armada_37xx_pinctrl *info; struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; struct regmap *regmap; + void __iomem *base; int ret;
+ base = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); + if (IS_ERR(base)) { + dev_err(dev, "failed to ioremap base address: %pe\n", base); + return PTR_ERR(base); + } + + regmap = devm_regmap_init_mmio(dev, base, + &armada_37xx_pinctrl_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to create regmap: %pe\n", regmap); + return PTR_ERR(regmap); + } + info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM;
info->dev = dev; - - regmap = syscon_node_to_regmap(np); - if (IS_ERR(regmap)) - return dev_err_probe(dev, PTR_ERR(regmap), "cannot get regmap\n"); info->regmap = regmap; - info->data = of_device_get_match_data(dev);
ret = armada_37xx_pinctrl_register(pdev, info); diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index 6a956ee94494..6ee9f0de8ede 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -28,19 +28,12 @@ #define ocelot_clrsetbits(addr, clear, set) \ writel((readl(addr) & ~(clear)) | (set), (addr))
-/* PINCONFIG bits (sparx5 only) */ enum { PINCONF_BIAS, PINCONF_SCHMITT, PINCONF_DRIVE_STRENGTH, };
-#define BIAS_PD_BIT BIT(4) -#define BIAS_PU_BIT BIT(3) -#define BIAS_BITS (BIAS_PD_BIT|BIAS_PU_BIT) -#define SCHMITT_BIT BIT(2) -#define DRIVE_BITS GENMASK(1, 0) - /* GPIO standard registers */ #define OCELOT_GPIO_OUT_SET 0x0 #define OCELOT_GPIO_OUT_CLR 0x4 @@ -314,6 +307,13 @@ struct ocelot_pin_caps { unsigned char a_functions[OCELOT_FUNC_PER_PIN]; /* Additional functions */ };
+struct ocelot_pincfg_data { + u8 pd_bit; + u8 pu_bit; + u8 drive_bits; + u8 schmitt_bit; +}; + struct ocelot_pinctrl { struct device *dev; struct pinctrl_dev *pctl; @@ -321,10 +321,16 @@ struct ocelot_pinctrl { struct regmap *map; struct regmap *pincfg; struct pinctrl_desc *desc; + const struct ocelot_pincfg_data *pincfg_data; struct ocelot_pmx_func func[FUNC_MAX]; u8 stride; };
+struct ocelot_match_data { + struct pinctrl_desc desc; + struct ocelot_pincfg_data pincfg_data; +}; + #define LUTON_P(p, f0, f1) \ static struct ocelot_pin_caps luton_pin_##p = { \ .pin = p, \ @@ -1318,24 +1324,27 @@ static int ocelot_hw_get_value(struct ocelot_pinctrl *info, int ret = -EOPNOTSUPP;
if (info->pincfg) { + const struct ocelot_pincfg_data *opd = info->pincfg_data; u32 regcfg;
- ret = regmap_read(info->pincfg, pin, ®cfg); + ret = regmap_read(info->pincfg, + pin * regmap_get_reg_stride(info->pincfg), + ®cfg); if (ret) return ret;
ret = 0; switch (reg) { case PINCONF_BIAS: - *val = regcfg & BIAS_BITS; + *val = regcfg & (opd->pd_bit | opd->pu_bit); break;
case PINCONF_SCHMITT: - *val = regcfg & SCHMITT_BIT; + *val = regcfg & opd->schmitt_bit; break;
case PINCONF_DRIVE_STRENGTH: - *val = regcfg & DRIVE_BITS; + *val = regcfg & opd->drive_bits; break;
default: @@ -1352,14 +1361,18 @@ static int ocelot_pincfg_clrsetbits(struct ocelot_pinctrl *info, u32 regaddr, u32 val; int ret;
- ret = regmap_read(info->pincfg, regaddr, &val); + ret = regmap_read(info->pincfg, + regaddr * regmap_get_reg_stride(info->pincfg), + &val); if (ret) return ret;
val &= ~clrbits; val |= setbits;
- ret = regmap_write(info->pincfg, regaddr, val); + ret = regmap_write(info->pincfg, + regaddr * regmap_get_reg_stride(info->pincfg), + val);
return ret; } @@ -1372,23 +1385,27 @@ static int ocelot_hw_set_value(struct ocelot_pinctrl *info, int ret = -EOPNOTSUPP;
if (info->pincfg) { + const struct ocelot_pincfg_data *opd = info->pincfg_data;
ret = 0; switch (reg) { case PINCONF_BIAS: - ret = ocelot_pincfg_clrsetbits(info, pin, BIAS_BITS, + ret = ocelot_pincfg_clrsetbits(info, pin, + opd->pd_bit | opd->pu_bit, val); break;
case PINCONF_SCHMITT: - ret = ocelot_pincfg_clrsetbits(info, pin, SCHMITT_BIT, + ret = ocelot_pincfg_clrsetbits(info, pin, + opd->schmitt_bit, val); break;
case PINCONF_DRIVE_STRENGTH: if (val <= 3) ret = ocelot_pincfg_clrsetbits(info, pin, - DRIVE_BITS, val); + opd->drive_bits, + val); else ret = -EINVAL; break; @@ -1418,17 +1435,20 @@ static int ocelot_pinconf_get(struct pinctrl_dev *pctldev, if (param == PIN_CONFIG_BIAS_DISABLE) val = (val == 0); else if (param == PIN_CONFIG_BIAS_PULL_DOWN) - val = (val & BIAS_PD_BIT ? true : false); + val = !!(val & info->pincfg_data->pd_bit); else /* PIN_CONFIG_BIAS_PULL_UP */ - val = (val & BIAS_PU_BIT ? true : false); + val = !!(val & info->pincfg_data->pu_bit); break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE: + if (!info->pincfg_data->schmitt_bit) + return -EOPNOTSUPP; + err = ocelot_hw_get_value(info, pin, PINCONF_SCHMITT, &val); if (err) return err;
- val = (val & SCHMITT_BIT ? true : false); + val = !!(val & info->pincfg_data->schmitt_bit); break;
case PIN_CONFIG_DRIVE_STRENGTH: @@ -1472,6 +1492,7 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, unsigned long *configs, unsigned int num_configs) { struct ocelot_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); + const struct ocelot_pincfg_data *opd = info->pincfg_data; u32 param, arg, p; int cfg, err = 0;
@@ -1484,8 +1505,8 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, case PIN_CONFIG_BIAS_PULL_UP: case PIN_CONFIG_BIAS_PULL_DOWN: arg = (param == PIN_CONFIG_BIAS_DISABLE) ? 0 : - (param == PIN_CONFIG_BIAS_PULL_UP) ? BIAS_PU_BIT : - BIAS_PD_BIT; + (param == PIN_CONFIG_BIAS_PULL_UP) ? + opd->pu_bit : opd->pd_bit;
err = ocelot_hw_set_value(info, pin, PINCONF_BIAS, arg); if (err) @@ -1494,7 +1515,10 @@ static int ocelot_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin, break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE: - arg = arg ? SCHMITT_BIT : 0; + if (!opd->schmitt_bit) + return -EOPNOTSUPP; + + arg = arg ? opd->schmitt_bit : 0; err = ocelot_hw_set_value(info, pin, PINCONF_SCHMITT, arg); if (err) @@ -1555,69 +1579,94 @@ static const struct pinctrl_ops ocelot_pctl_ops = { .dt_free_map = pinconf_generic_dt_free_map, };
-static struct pinctrl_desc luton_desc = { - .name = "luton-pinctrl", - .pins = luton_pins, - .npins = ARRAY_SIZE(luton_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data luton_desc = { + .desc = { + .name = "luton-pinctrl", + .pins = luton_pins, + .npins = ARRAY_SIZE(luton_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, };
-static struct pinctrl_desc serval_desc = { - .name = "serval-pinctrl", - .pins = serval_pins, - .npins = ARRAY_SIZE(serval_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data serval_desc = { + .desc = { + .name = "serval-pinctrl", + .pins = serval_pins, + .npins = ARRAY_SIZE(serval_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, };
-static struct pinctrl_desc ocelot_desc = { - .name = "ocelot-pinctrl", - .pins = ocelot_pins, - .npins = ARRAY_SIZE(ocelot_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data ocelot_desc = { + .desc = { + .name = "ocelot-pinctrl", + .pins = ocelot_pins, + .npins = ARRAY_SIZE(ocelot_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, };
-static struct pinctrl_desc jaguar2_desc = { - .name = "jaguar2-pinctrl", - .pins = jaguar2_pins, - .npins = ARRAY_SIZE(jaguar2_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data jaguar2_desc = { + .desc = { + .name = "jaguar2-pinctrl", + .pins = jaguar2_pins, + .npins = ARRAY_SIZE(jaguar2_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, };
-static struct pinctrl_desc servalt_desc = { - .name = "servalt-pinctrl", - .pins = servalt_pins, - .npins = ARRAY_SIZE(servalt_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .owner = THIS_MODULE, +static struct ocelot_match_data servalt_desc = { + .desc = { + .name = "servalt-pinctrl", + .pins = servalt_pins, + .npins = ARRAY_SIZE(servalt_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .owner = THIS_MODULE, + }, };
-static struct pinctrl_desc sparx5_desc = { - .name = "sparx5-pinctrl", - .pins = sparx5_pins, - .npins = ARRAY_SIZE(sparx5_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &ocelot_pmx_ops, - .confops = &ocelot_confops, - .owner = THIS_MODULE, +static struct ocelot_match_data sparx5_desc = { + .desc = { + .name = "sparx5-pinctrl", + .pins = sparx5_pins, + .npins = ARRAY_SIZE(sparx5_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &ocelot_pmx_ops, + .confops = &ocelot_confops, + .owner = THIS_MODULE, + }, + .pincfg_data = { + .pd_bit = BIT(4), + .pu_bit = BIT(3), + .drive_bits = GENMASK(1, 0), + .schmitt_bit = BIT(2), + }, };
-static struct pinctrl_desc lan966x_desc = { - .name = "lan966x-pinctrl", - .pins = lan966x_pins, - .npins = ARRAY_SIZE(lan966x_pins), - .pctlops = &ocelot_pctl_ops, - .pmxops = &lan966x_pmx_ops, - .confops = &ocelot_confops, - .owner = THIS_MODULE, +static struct ocelot_match_data lan966x_desc = { + .desc = { + .name = "lan966x-pinctrl", + .pins = lan966x_pins, + .npins = ARRAY_SIZE(lan966x_pins), + .pctlops = &ocelot_pctl_ops, + .pmxops = &lan966x_pmx_ops, + .confops = &ocelot_confops, + .owner = THIS_MODULE, + }, + .pincfg_data = { + .pd_bit = BIT(3), + .pu_bit = BIT(2), + .drive_bits = GENMASK(1, 0), + }, };
static int ocelot_create_group_func_map(struct device *dev, @@ -1883,7 +1932,8 @@ static const struct of_device_id ocelot_pinctrl_of_match[] = { {}, };
-static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) +static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev, + const struct ocelot_pinctrl *info) { void __iomem *base;
@@ -1891,7 +1941,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev) .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = 32, + .max_register = info->desc->npins * 4, .name = "pincfg", };
@@ -1906,6 +1956,7 @@ static struct regmap *ocelot_pinctrl_create_pincfg(struct platform_device *pdev)
static int ocelot_pinctrl_probe(struct platform_device *pdev) { + const struct ocelot_match_data *data; struct device *dev = &pdev->dev; struct ocelot_pinctrl *info; struct regmap *pincfg; @@ -1921,7 +1972,16 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev) if (!info) return -ENOMEM;
- info->desc = (struct pinctrl_desc *)device_get_match_data(dev); + data = device_get_match_data(dev); + if (!data) + return -EINVAL; + + info->desc = devm_kmemdup(dev, &data->desc, sizeof(*info->desc), + GFP_KERNEL); + if (!info->desc) + return -ENOMEM; + + info->pincfg_data = &data->pincfg_data;
base = devm_ioremap_resource(dev, platform_get_resource(pdev, IORESOURCE_MEM, 0)); @@ -1942,7 +2002,7 @@ static int ocelot_pinctrl_probe(struct platform_device *pdev)
/* Pinconf registers */ if (info->desc->confops) { - pincfg = ocelot_pinctrl_create_pincfg(pdev); + pincfg = ocelot_pinctrl_create_pincfg(pdev, info); if (IS_ERR(pincfg)) dev_dbg(dev, "Failed to create pincfg regmap\n"); else diff --git a/drivers/pinctrl/ralink/Kconfig b/drivers/pinctrl/ralink/Kconfig index a76ee3deb8c3..d0f0a8f2b9b7 100644 --- a/drivers/pinctrl/ralink/Kconfig +++ b/drivers/pinctrl/ralink/Kconfig @@ -3,37 +3,33 @@ menu "Ralink pinctrl drivers" depends on RALINK
config PINCTRL_RALINK - bool "Ralink pin control support" - default y if RALINK - -config PINCTRL_RT2880 - bool "RT2880 pinctrl driver for RALINK/Mediatek SOCs" + bool "Ralink pinctrl driver" select PINMUX select GENERIC_PINCONF
config PINCTRL_MT7620 bool "mt7620 pinctrl driver for RALINK/Mediatek SOCs" depends on RALINK && SOC_MT7620 - select PINCTRL_RT2880 + select PINCTRL_RALINK
config PINCTRL_MT7621 bool "mt7621 pinctrl driver for RALINK/Mediatek SOCs" depends on RALINK && SOC_MT7621 - select PINCTRL_RT2880 + select PINCTRL_RALINK
config PINCTRL_RT288X bool "RT288X pinctrl driver for RALINK/Mediatek SOCs" depends on RALINK && SOC_RT288X - select PINCTRL_RT2880 + select PINCTRL_RALINK
config PINCTRL_RT305X bool "RT305X pinctrl driver for RALINK/Mediatek SOCs" depends on RALINK && SOC_RT305X - select PINCTRL_RT2880 + select PINCTRL_RALINK
config PINCTRL_RT3883 bool "RT3883 pinctrl driver for RALINK/Mediatek SOCs" depends on RALINK && SOC_RT3883 - select PINCTRL_RT2880 + select PINCTRL_RALINK
endmenu diff --git a/drivers/pinctrl/ralink/Makefile b/drivers/pinctrl/ralink/Makefile index a15610206ced..2c1323b74e96 100644 --- a/drivers/pinctrl/ralink/Makefile +++ b/drivers/pinctrl/ralink/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PINCTRL_RT2880) += pinctrl-rt2880.o +obj-$(CONFIG_PINCTRL_RALINK) += pinctrl-ralink.o
obj-$(CONFIG_PINCTRL_MT7620) += pinctrl-mt7620.o obj-$(CONFIG_PINCTRL_MT7621) += pinctrl-mt7621.o diff --git a/drivers/pinctrl/ralink/pinctrl-mt7620.c b/drivers/pinctrl/ralink/pinctrl-mt7620.c index 6853b5b8b0fe..51b863d85c51 100644 --- a/drivers/pinctrl/ralink/pinctrl-mt7620.c +++ b/drivers/pinctrl/ralink/pinctrl-mt7620.c @@ -5,7 +5,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h"
#define MT7620_GPIO_MODE_UART0_SHIFT 2 #define MT7620_GPIO_MODE_UART0_MASK 0x7 @@ -54,20 +54,20 @@ #define MT7620_GPIO_MODE_EPHY 15 #define MT7620_GPIO_MODE_PA 20
-static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) }; -static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) }; -static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) }; -static struct rt2880_pmx_func mdio_grp[] = { +static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) }; +static struct ralink_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) }; +static struct ralink_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) }; +static struct ralink_pmx_func mdio_grp[] = { FUNC("mdio", MT7620_GPIO_MODE_MDIO, 22, 2), FUNC("refclk", MT7620_GPIO_MODE_MDIO_REFCLK, 22, 2), }; -static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) }; -static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) }; -static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) }; -static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) }; -static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) }; -static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) }; -static struct rt2880_pmx_func uartf_grp[] = { +static struct ralink_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) }; +static struct ralink_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) }; +static struct ralink_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) }; +static struct ralink_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) }; +static struct ralink_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) }; +static struct ralink_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) }; +static struct ralink_pmx_func uartf_grp[] = { FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8), FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8), FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8), @@ -76,20 +76,20 @@ static struct rt2880_pmx_func uartf_grp[] = { FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4), FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4), }; -static struct rt2880_pmx_func wdt_grp[] = { +static struct ralink_pmx_func wdt_grp[] = { FUNC("wdt rst", 0, 17, 1), FUNC("wdt refclk", 0, 17, 1), }; -static struct rt2880_pmx_func pcie_rst_grp[] = { +static struct ralink_pmx_func pcie_rst_grp[] = { FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1), FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1) }; -static struct rt2880_pmx_func nd_sd_grp[] = { +static struct ralink_pmx_func nd_sd_grp[] = { FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15), FUNC("sd", MT7620_GPIO_MODE_SD, 47, 13) };
-static struct rt2880_pmx_group mt7620a_pinmux_data[] = { +static struct ralink_pmx_group mt7620a_pinmux_data[] = { GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C), GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK, MT7620_GPIO_MODE_UART0_SHIFT), @@ -112,262 +112,262 @@ static struct rt2880_pmx_group mt7620a_pinmux_data[] = { { 0 } };
-static struct rt2880_pmx_func pwm1_grp_mt7628[] = { +static struct ralink_pmx_func pwm1_grp_mt76x8[] = { FUNC("sdxc d6", 3, 19, 1), FUNC("utif", 2, 19, 1), FUNC("gpio", 1, 19, 1), FUNC("pwm1", 0, 19, 1), };
-static struct rt2880_pmx_func pwm0_grp_mt7628[] = { +static struct ralink_pmx_func pwm0_grp_mt76x8[] = { FUNC("sdxc d7", 3, 18, 1), FUNC("utif", 2, 18, 1), FUNC("gpio", 1, 18, 1), FUNC("pwm0", 0, 18, 1), };
-static struct rt2880_pmx_func uart2_grp_mt7628[] = { +static struct ralink_pmx_func uart2_grp_mt76x8[] = { FUNC("sdxc d5 d4", 3, 20, 2), FUNC("pwm", 2, 20, 2), FUNC("gpio", 1, 20, 2), FUNC("uart2", 0, 20, 2), };
-static struct rt2880_pmx_func uart1_grp_mt7628[] = { +static struct ralink_pmx_func uart1_grp_mt76x8[] = { FUNC("sw_r", 3, 45, 2), FUNC("pwm", 2, 45, 2), FUNC("gpio", 1, 45, 2), FUNC("uart1", 0, 45, 2), };
-static struct rt2880_pmx_func i2c_grp_mt7628[] = { +static struct ralink_pmx_func i2c_grp_mt76x8[] = { FUNC("-", 3, 4, 2), FUNC("debug", 2, 4, 2), FUNC("gpio", 1, 4, 2), FUNC("i2c", 0, 4, 2), };
-static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("refclk", 0, 37, 1) }; -static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 36, 1) }; -static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 38, 1) }; -static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) }; +static struct ralink_pmx_func refclk_grp_mt76x8[] = { FUNC("refclk", 0, 37, 1) }; +static struct ralink_pmx_func perst_grp_mt76x8[] = { FUNC("perst", 0, 36, 1) }; +static struct ralink_pmx_func wdt_grp_mt76x8[] = { FUNC("wdt", 0, 38, 1) }; +static struct ralink_pmx_func spi_grp_mt76x8[] = { FUNC("spi", 0, 7, 4) };
-static struct rt2880_pmx_func sd_mode_grp_mt7628[] = { +static struct ralink_pmx_func sd_mode_grp_mt76x8[] = { FUNC("jtag", 3, 22, 8), FUNC("utif", 2, 22, 8), FUNC("gpio", 1, 22, 8), FUNC("sdxc", 0, 22, 8), };
-static struct rt2880_pmx_func uart0_grp_mt7628[] = { +static struct ralink_pmx_func uart0_grp_mt76x8[] = { FUNC("-", 3, 12, 2), FUNC("-", 2, 12, 2), FUNC("gpio", 1, 12, 2), FUNC("uart0", 0, 12, 2), };
-static struct rt2880_pmx_func i2s_grp_mt7628[] = { +static struct ralink_pmx_func i2s_grp_mt76x8[] = { FUNC("antenna", 3, 0, 4), FUNC("pcm", 2, 0, 4), FUNC("gpio", 1, 0, 4), FUNC("i2s", 0, 0, 4), };
-static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = { +static struct ralink_pmx_func spi_cs1_grp_mt76x8[] = { FUNC("-", 3, 6, 1), FUNC("refclk", 2, 6, 1), FUNC("gpio", 1, 6, 1), FUNC("spi cs1", 0, 6, 1), };
-static struct rt2880_pmx_func spis_grp_mt7628[] = { +static struct ralink_pmx_func spis_grp_mt76x8[] = { FUNC("pwm_uart2", 3, 14, 4), FUNC("utif", 2, 14, 4), FUNC("gpio", 1, 14, 4), FUNC("spis", 0, 14, 4), };
-static struct rt2880_pmx_func gpio_grp_mt7628[] = { +static struct ralink_pmx_func gpio_grp_mt76x8[] = { FUNC("pcie", 3, 11, 1), FUNC("refclk", 2, 11, 1), FUNC("gpio", 1, 11, 1), FUNC("gpio", 0, 11, 1), };
-static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p4led_kn_grp_mt76x8[] = { FUNC("jtag", 3, 30, 1), FUNC("utif", 2, 30, 1), FUNC("gpio", 1, 30, 1), FUNC("p4led_kn", 0, 30, 1), };
-static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p3led_kn_grp_mt76x8[] = { FUNC("jtag", 3, 31, 1), FUNC("utif", 2, 31, 1), FUNC("gpio", 1, 31, 1), FUNC("p3led_kn", 0, 31, 1), };
-static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p2led_kn_grp_mt76x8[] = { FUNC("jtag", 3, 32, 1), FUNC("utif", 2, 32, 1), FUNC("gpio", 1, 32, 1), FUNC("p2led_kn", 0, 32, 1), };
-static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p1led_kn_grp_mt76x8[] = { FUNC("jtag", 3, 33, 1), FUNC("utif", 2, 33, 1), FUNC("gpio", 1, 33, 1), FUNC("p1led_kn", 0, 33, 1), };
-static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = { +static struct ralink_pmx_func p0led_kn_grp_mt76x8[] = { FUNC("jtag", 3, 34, 1), FUNC("rsvd", 2, 34, 1), FUNC("gpio", 1, 34, 1), FUNC("p0led_kn", 0, 34, 1), };
-static struct rt2880_pmx_func wled_kn_grp_mt7628[] = { +static struct ralink_pmx_func wled_kn_grp_mt76x8[] = { FUNC("rsvd", 3, 35, 1), FUNC("rsvd", 2, 35, 1), FUNC("gpio", 1, 35, 1), FUNC("wled_kn", 0, 35, 1), };
-static struct rt2880_pmx_func p4led_an_grp_mt7628[] = { +static struct ralink_pmx_func p4led_an_grp_mt76x8[] = { FUNC("jtag", 3, 39, 1), FUNC("utif", 2, 39, 1), FUNC("gpio", 1, 39, 1), FUNC("p4led_an", 0, 39, 1), };
-static struct rt2880_pmx_func p3led_an_grp_mt7628[] = { +static struct ralink_pmx_func p3led_an_grp_mt76x8[] = { FUNC("jtag", 3, 40, 1), FUNC("utif", 2, 40, 1), FUNC("gpio", 1, 40, 1), FUNC("p3led_an", 0, 40, 1), };
-static struct rt2880_pmx_func p2led_an_grp_mt7628[] = { +static struct ralink_pmx_func p2led_an_grp_mt76x8[] = { FUNC("jtag", 3, 41, 1), FUNC("utif", 2, 41, 1), FUNC("gpio", 1, 41, 1), FUNC("p2led_an", 0, 41, 1), };
-static struct rt2880_pmx_func p1led_an_grp_mt7628[] = { +static struct ralink_pmx_func p1led_an_grp_mt76x8[] = { FUNC("jtag", 3, 42, 1), FUNC("utif", 2, 42, 1), FUNC("gpio", 1, 42, 1), FUNC("p1led_an", 0, 42, 1), };
-static struct rt2880_pmx_func p0led_an_grp_mt7628[] = { +static struct ralink_pmx_func p0led_an_grp_mt76x8[] = { FUNC("jtag", 3, 43, 1), FUNC("rsvd", 2, 43, 1), FUNC("gpio", 1, 43, 1), FUNC("p0led_an", 0, 43, 1), };
-static struct rt2880_pmx_func wled_an_grp_mt7628[] = { +static struct ralink_pmx_func wled_an_grp_mt76x8[] = { FUNC("rsvd", 3, 44, 1), FUNC("rsvd", 2, 44, 1), FUNC("gpio", 1, 44, 1), FUNC("wled_an", 0, 44, 1), };
-#define MT7628_GPIO_MODE_MASK 0x3 - -#define MT7628_GPIO_MODE_P4LED_KN 58 -#define MT7628_GPIO_MODE_P3LED_KN 56 -#define MT7628_GPIO_MODE_P2LED_KN 54 -#define MT7628_GPIO_MODE_P1LED_KN 52 -#define MT7628_GPIO_MODE_P0LED_KN 50 -#define MT7628_GPIO_MODE_WLED_KN 48 -#define MT7628_GPIO_MODE_P4LED_AN 42 -#define MT7628_GPIO_MODE_P3LED_AN 40 -#define MT7628_GPIO_MODE_P2LED_AN 38 -#define MT7628_GPIO_MODE_P1LED_AN 36 -#define MT7628_GPIO_MODE_P0LED_AN 34 -#define MT7628_GPIO_MODE_WLED_AN 32 -#define MT7628_GPIO_MODE_PWM1 30 -#define MT7628_GPIO_MODE_PWM0 28 -#define MT7628_GPIO_MODE_UART2 26 -#define MT7628_GPIO_MODE_UART1 24 -#define MT7628_GPIO_MODE_I2C 20 -#define MT7628_GPIO_MODE_REFCLK 18 -#define MT7628_GPIO_MODE_PERST 16 -#define MT7628_GPIO_MODE_WDT 14 -#define MT7628_GPIO_MODE_SPI 12 -#define MT7628_GPIO_MODE_SDMODE 10 -#define MT7628_GPIO_MODE_UART0 8 -#define MT7628_GPIO_MODE_I2S 6 -#define MT7628_GPIO_MODE_CS1 4 -#define MT7628_GPIO_MODE_SPIS 2 -#define MT7628_GPIO_MODE_GPIO 0 - -static struct rt2880_pmx_group mt7628an_pinmux_data[] = { - GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_PWM1), - GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_PWM0), - GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_UART2), - GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_UART1), - GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_I2C), - GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK), - GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST), - GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT), - GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI), - GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_SDMODE), - GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_UART0), - GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_I2S), - GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_CS1), - GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_SPIS), - GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_GPIO), - GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_WLED_AN), - GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P0LED_AN), - GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P1LED_AN), - GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P2LED_AN), - GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P3LED_AN), - GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P4LED_AN), - GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_WLED_KN), - GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P0LED_KN), - GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P1LED_KN), - GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P2LED_KN), - GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P3LED_KN), - GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK, - 1, MT7628_GPIO_MODE_P4LED_KN), +#define MT76X8_GPIO_MODE_MASK 0x3 + +#define MT76X8_GPIO_MODE_P4LED_KN 58 +#define MT76X8_GPIO_MODE_P3LED_KN 56 +#define MT76X8_GPIO_MODE_P2LED_KN 54 +#define MT76X8_GPIO_MODE_P1LED_KN 52 +#define MT76X8_GPIO_MODE_P0LED_KN 50 +#define MT76X8_GPIO_MODE_WLED_KN 48 +#define MT76X8_GPIO_MODE_P4LED_AN 42 +#define MT76X8_GPIO_MODE_P3LED_AN 40 +#define MT76X8_GPIO_MODE_P2LED_AN 38 +#define MT76X8_GPIO_MODE_P1LED_AN 36 +#define MT76X8_GPIO_MODE_P0LED_AN 34 +#define MT76X8_GPIO_MODE_WLED_AN 32 +#define MT76X8_GPIO_MODE_PWM1 30 +#define MT76X8_GPIO_MODE_PWM0 28 +#define MT76X8_GPIO_MODE_UART2 26 +#define MT76X8_GPIO_MODE_UART1 24 +#define MT76X8_GPIO_MODE_I2C 20 +#define MT76X8_GPIO_MODE_REFCLK 18 +#define MT76X8_GPIO_MODE_PERST 16 +#define MT76X8_GPIO_MODE_WDT 14 +#define MT76X8_GPIO_MODE_SPI 12 +#define MT76X8_GPIO_MODE_SDMODE 10 +#define MT76X8_GPIO_MODE_UART0 8 +#define MT76X8_GPIO_MODE_I2S 6 +#define MT76X8_GPIO_MODE_CS1 4 +#define MT76X8_GPIO_MODE_SPIS 2 +#define MT76X8_GPIO_MODE_GPIO 0 + +static struct ralink_pmx_group mt76x8_pinmux_data[] = { + GRP_G("pwm1", pwm1_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_PWM1), + GRP_G("pwm0", pwm0_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_PWM0), + GRP_G("uart2", uart2_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_UART2), + GRP_G("uart1", uart1_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_UART1), + GRP_G("i2c", i2c_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_I2C), + GRP("refclk", refclk_grp_mt76x8, 1, MT76X8_GPIO_MODE_REFCLK), + GRP("perst", perst_grp_mt76x8, 1, MT76X8_GPIO_MODE_PERST), + GRP("wdt", wdt_grp_mt76x8, 1, MT76X8_GPIO_MODE_WDT), + GRP("spi", spi_grp_mt76x8, 1, MT76X8_GPIO_MODE_SPI), + GRP_G("sdmode", sd_mode_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_SDMODE), + GRP_G("uart0", uart0_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_UART0), + GRP_G("i2s", i2s_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_I2S), + GRP_G("spi cs1", spi_cs1_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_CS1), + GRP_G("spis", spis_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_SPIS), + GRP_G("gpio", gpio_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_GPIO), + GRP_G("wled_an", wled_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_WLED_AN), + GRP_G("p0led_an", p0led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P0LED_AN), + GRP_G("p1led_an", p1led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P1LED_AN), + GRP_G("p2led_an", p2led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P2LED_AN), + GRP_G("p3led_an", p3led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P3LED_AN), + GRP_G("p4led_an", p4led_an_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P4LED_AN), + GRP_G("wled_kn", wled_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_WLED_KN), + GRP_G("p0led_kn", p0led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P0LED_KN), + GRP_G("p1led_kn", p1led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P1LED_KN), + GRP_G("p2led_kn", p2led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P2LED_KN), + GRP_G("p3led_kn", p3led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P3LED_KN), + GRP_G("p4led_kn", p4led_kn_grp_mt76x8, MT76X8_GPIO_MODE_MASK, + 1, MT76X8_GPIO_MODE_P4LED_KN), { 0 } };
static int mt7620_pinmux_probe(struct platform_device *pdev) { if (is_mt76x8()) - return rt2880_pinmux_init(pdev, mt7628an_pinmux_data); + return ralink_pinmux_init(pdev, mt76x8_pinmux_data); else - return rt2880_pinmux_init(pdev, mt7620a_pinmux_data); + return ralink_pinmux_init(pdev, mt7620a_pinmux_data); }
static const struct of_device_id mt7620_pinmux_match[] = { diff --git a/drivers/pinctrl/ralink/pinctrl-mt7621.c b/drivers/pinctrl/ralink/pinctrl-mt7621.c index 7d96144c474e..14b89cb43d4c 100644 --- a/drivers/pinctrl/ralink/pinctrl-mt7621.c +++ b/drivers/pinctrl/ralink/pinctrl-mt7621.c @@ -3,7 +3,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h"
#define MT7621_GPIO_MODE_UART1 1 #define MT7621_GPIO_MODE_I2C 2 @@ -34,40 +34,40 @@ #define MT7621_GPIO_MODE_SDHCI_SHIFT 18 #define MT7621_GPIO_MODE_SDHCI_GPIO 1
-static struct rt2880_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; -static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; -static struct rt2880_pmx_func uart3_grp[] = { +static struct ralink_pmx_func uart1_grp[] = { FUNC("uart1", 0, 1, 2) }; +static struct ralink_pmx_func i2c_grp[] = { FUNC("i2c", 0, 3, 2) }; +static struct ralink_pmx_func uart3_grp[] = { FUNC("uart3", 0, 5, 4), FUNC("i2s", 2, 5, 4), FUNC("spdif3", 3, 5, 4), }; -static struct rt2880_pmx_func uart2_grp[] = { +static struct ralink_pmx_func uart2_grp[] = { FUNC("uart2", 0, 9, 4), FUNC("pcm", 2, 9, 4), FUNC("spdif2", 3, 9, 4), }; -static struct rt2880_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) }; -static struct rt2880_pmx_func wdt_grp[] = { +static struct ralink_pmx_func jtag_grp[] = { FUNC("jtag", 0, 13, 5) }; +static struct ralink_pmx_func wdt_grp[] = { FUNC("wdt rst", 0, 18, 1), FUNC("wdt refclk", 2, 18, 1), }; -static struct rt2880_pmx_func pcie_rst_grp[] = { +static struct ralink_pmx_func pcie_rst_grp[] = { FUNC("pcie rst", MT7621_GPIO_MODE_PCIE_RST, 19, 1), FUNC("pcie refclk", MT7621_GPIO_MODE_PCIE_REF, 19, 1) }; -static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) }; -static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) }; -static struct rt2880_pmx_func spi_grp[] = { +static struct ralink_pmx_func mdio_grp[] = { FUNC("mdio", 0, 20, 2) }; +static struct ralink_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 22, 12) }; +static struct ralink_pmx_func spi_grp[] = { FUNC("spi", 0, 34, 7), FUNC("nand1", 2, 34, 7), }; -static struct rt2880_pmx_func sdhci_grp[] = { +static struct ralink_pmx_func sdhci_grp[] = { FUNC("sdhci", 0, 41, 8), FUNC("nand2", 2, 41, 8), }; -static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) }; +static struct ralink_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 49, 12) };
-static struct rt2880_pmx_group mt7621_pinmux_data[] = { +static struct ralink_pmx_group mt7621_pinmux_data[] = { GRP("uart1", uart1_grp, 1, MT7621_GPIO_MODE_UART1), GRP("i2c", i2c_grp, 1, MT7621_GPIO_MODE_I2C), GRP_G("uart3", uart3_grp, MT7621_GPIO_MODE_UART3_MASK, @@ -92,7 +92,7 @@ static struct rt2880_pmx_group mt7621_pinmux_data[] = {
static int mt7621_pinmux_probe(struct platform_device *pdev) { - return rt2880_pinmux_init(pdev, mt7621_pinmux_data); + return ralink_pinmux_init(pdev, mt7621_pinmux_data); }
static const struct of_device_id mt7621_pinmux_match[] = { diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.c b/drivers/pinctrl/ralink/pinctrl-ralink.c new file mode 100644 index 000000000000..3a8268a43d74 --- /dev/null +++ b/drivers/pinctrl/ralink/pinctrl-ralink.c @@ -0,0 +1,351 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2013 John Crispin blogic@openwrt.org + */ + +#include <linux/module.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/pinctrl/pinctrl.h> +#include <linux/pinctrl/pinconf.h> +#include <linux/pinctrl/pinconf-generic.h> +#include <linux/pinctrl/pinmux.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pinctrl/machine.h> + +#include <asm/mach-ralink/ralink_regs.h> +#include <asm/mach-ralink/mt7620.h> + +#include "pinctrl-ralink.h" +#include "../core.h" +#include "../pinctrl-utils.h" + +#define SYSC_REG_GPIO_MODE 0x60 +#define SYSC_REG_GPIO_MODE2 0x64 + +struct ralink_priv { + struct device *dev; + + struct pinctrl_pin_desc *pads; + struct pinctrl_desc *desc; + + struct ralink_pmx_func **func; + int func_count; + + struct ralink_pmx_group *groups; + const char **group_names; + int group_count; + + u8 *gpio; + int max_pins; +}; + +static int ralink_get_group_count(struct pinctrl_dev *pctrldev) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->group_count; +} + +static const char *ralink_get_group_name(struct pinctrl_dev *pctrldev, + unsigned int group) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return (group >= p->group_count) ? NULL : p->group_names[group]; +} + +static int ralink_get_group_pins(struct pinctrl_dev *pctrldev, + unsigned int group, + const unsigned int **pins, + unsigned int *num_pins) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (group >= p->group_count) + return -EINVAL; + + *pins = p->groups[group].func[0].pins; + *num_pins = p->groups[group].func[0].pin_count; + + return 0; +} + +static const struct pinctrl_ops ralink_pctrl_ops = { + .get_groups_count = ralink_get_group_count, + .get_group_name = ralink_get_group_name, + .get_group_pins = ralink_get_group_pins, + .dt_node_to_map = pinconf_generic_dt_node_to_map_all, + .dt_free_map = pinconf_generic_dt_free_map, +}; + +static int ralink_pmx_func_count(struct pinctrl_dev *pctrldev) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->func_count; +} + +static const char *ralink_pmx_func_name(struct pinctrl_dev *pctrldev, + unsigned int func) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + return p->func[func]->name; +} + +static int ralink_pmx_group_get_groups(struct pinctrl_dev *pctrldev, + unsigned int func, + const char * const **groups, + unsigned int * const num_groups) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (p->func[func]->group_count == 1) + *groups = &p->group_names[p->func[func]->groups[0]]; + else + *groups = p->group_names; + + *num_groups = p->func[func]->group_count; + + return 0; +} + +static int ralink_pmx_group_enable(struct pinctrl_dev *pctrldev, + unsigned int func, unsigned int group) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + u32 mode = 0; + u32 reg = SYSC_REG_GPIO_MODE; + int i; + int shift; + + /* dont allow double use */ + if (p->groups[group].enabled) { + dev_err(p->dev, "%s is already enabled\n", + p->groups[group].name); + return 0; + } + + p->groups[group].enabled = 1; + p->func[func]->enabled = 1; + + shift = p->groups[group].shift; + if (shift >= 32) { + shift -= 32; + reg = SYSC_REG_GPIO_MODE2; + } + mode = rt_sysc_r32(reg); + mode &= ~(p->groups[group].mask << shift); + + /* mark the pins as gpio */ + for (i = 0; i < p->groups[group].func[0].pin_count; i++) + p->gpio[p->groups[group].func[0].pins[i]] = 1; + + /* function 0 is gpio and needs special handling */ + if (func == 0) { + mode |= p->groups[group].gpio << shift; + } else { + for (i = 0; i < p->func[func]->pin_count; i++) + p->gpio[p->func[func]->pins[i]] = 0; + mode |= p->func[func]->value << shift; + } + rt_sysc_w32(mode, reg); + + return 0; +} + +static int ralink_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, + struct pinctrl_gpio_range *range, + unsigned int pin) +{ + struct ralink_priv *p = pinctrl_dev_get_drvdata(pctrldev); + + if (!p->gpio[pin]) { + dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); + return -EINVAL; + } + + return 0; +} + +static const struct pinmux_ops ralink_pmx_group_ops = { + .get_functions_count = ralink_pmx_func_count, + .get_function_name = ralink_pmx_func_name, + .get_function_groups = ralink_pmx_group_get_groups, + .set_mux = ralink_pmx_group_enable, + .gpio_request_enable = ralink_pmx_group_gpio_request_enable, +}; + +static struct pinctrl_desc ralink_pctrl_desc = { + .owner = THIS_MODULE, + .name = "ralink-pinmux", + .pctlops = &ralink_pctrl_ops, + .pmxops = &ralink_pmx_group_ops, +}; + +static struct ralink_pmx_func gpio_func = { + .name = "gpio", +}; + +static int ralink_pinmux_index(struct ralink_priv *p) +{ + struct ralink_pmx_group *mux = p->groups; + int i, j, c = 0; + + /* count the mux functions */ + while (mux->name) { + p->group_count++; + mux++; + } + + /* allocate the group names array needed by the gpio function */ + p->group_names = devm_kcalloc(p->dev, p->group_count, + sizeof(char *), GFP_KERNEL); + if (!p->group_names) + return -ENOMEM; + + for (i = 0; i < p->group_count; i++) { + p->group_names[i] = p->groups[i].name; + p->func_count += p->groups[i].func_count; + } + + /* we have a dummy function[0] for gpio */ + p->func_count++; + + /* allocate our function and group mapping index buffers */ + p->func = devm_kcalloc(p->dev, p->func_count, + sizeof(*p->func), GFP_KERNEL); + gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int), + GFP_KERNEL); + if (!p->func || !gpio_func.groups) + return -ENOMEM; + + /* add a backpointer to the function so it knows its group */ + gpio_func.group_count = p->group_count; + for (i = 0; i < gpio_func.group_count; i++) + gpio_func.groups[i] = i; + + p->func[c] = &gpio_func; + c++; + + /* add remaining functions */ + for (i = 0; i < p->group_count; i++) { + for (j = 0; j < p->groups[i].func_count; j++) { + p->func[c] = &p->groups[i].func[j]; + p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int), + GFP_KERNEL); + if (!p->func[c]->groups) + return -ENOMEM; + p->func[c]->groups[0] = i; + p->func[c]->group_count = 1; + c++; + } + } + return 0; +} + +static int ralink_pinmux_pins(struct ralink_priv *p) +{ + int i, j; + + /* + * loop over the functions and initialize the pins array. + * also work out the highest pin used. + */ + for (i = 0; i < p->func_count; i++) { + int pin; + + if (!p->func[i]->pin_count) + continue; + + p->func[i]->pins = devm_kcalloc(p->dev, + p->func[i]->pin_count, + sizeof(int), + GFP_KERNEL); + if (!p->func[i]->pins) + return -ENOMEM; + for (j = 0; j < p->func[i]->pin_count; j++) + p->func[i]->pins[j] = p->func[i]->pin_first + j; + + pin = p->func[i]->pin_first + p->func[i]->pin_count; + if (pin > p->max_pins) + p->max_pins = pin; + } + + /* the buffer that tells us which pins are gpio */ + p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL); + /* the pads needed to tell pinctrl about our pins */ + p->pads = devm_kcalloc(p->dev, p->max_pins, + sizeof(struct pinctrl_pin_desc), GFP_KERNEL); + if (!p->pads || !p->gpio) + return -ENOMEM; + + memset(p->gpio, 1, sizeof(u8) * p->max_pins); + for (i = 0; i < p->func_count; i++) { + if (!p->func[i]->pin_count) + continue; + + for (j = 0; j < p->func[i]->pin_count; j++) + p->gpio[p->func[i]->pins[j]] = 0; + } + + /* pin 0 is always a gpio */ + p->gpio[0] = 1; + + /* set the pads */ + for (i = 0; i < p->max_pins; i++) { + /* strlen("ioXY") + 1 = 5 */ + char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); + + if (!name) + return -ENOMEM; + snprintf(name, 5, "io%d", i); + p->pads[i].number = i; + p->pads[i].name = name; + } + p->desc->pins = p->pads; + p->desc->npins = p->max_pins; + + return 0; +} + +int ralink_pinmux_init(struct platform_device *pdev, + struct ralink_pmx_group *data) +{ + struct ralink_priv *p; + struct pinctrl_dev *dev; + int err; + + if (!data) + return -ENOTSUPP; + + /* setup the private data */ + p = devm_kzalloc(&pdev->dev, sizeof(struct ralink_priv), GFP_KERNEL); + if (!p) + return -ENOMEM; + + p->dev = &pdev->dev; + p->desc = &ralink_pctrl_desc; + p->groups = data; + platform_set_drvdata(pdev, p); + + /* init the device */ + err = ralink_pinmux_index(p); + if (err) { + dev_err(&pdev->dev, "failed to load index\n"); + return err; + } + + err = ralink_pinmux_pins(p); + if (err) { + dev_err(&pdev->dev, "failed to load pins\n"); + return err; + } + dev = pinctrl_register(p->desc, &pdev->dev, p); + + return PTR_ERR_OR_ZERO(dev); +} diff --git a/drivers/pinctrl/ralink/pinctrl-ralink.h b/drivers/pinctrl/ralink/pinctrl-ralink.h new file mode 100644 index 000000000000..134969409585 --- /dev/null +++ b/drivers/pinctrl/ralink/pinctrl-ralink.h @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 John Crispin john@phrozen.org + */ + +#ifndef _PINCTRL_RALINK_H__ +#define _PINCTRL_RALINK_H__ + +#define FUNC(name, value, pin_first, pin_count) \ + { name, value, pin_first, pin_count } + +#define GRP(_name, _func, _mask, _shift) \ + { .name = _name, .mask = _mask, .shift = _shift, \ + .func = _func, .gpio = _mask, \ + .func_count = ARRAY_SIZE(_func) } + +#define GRP_G(_name, _func, _mask, _gpio, _shift) \ + { .name = _name, .mask = _mask, .shift = _shift, \ + .func = _func, .gpio = _gpio, \ + .func_count = ARRAY_SIZE(_func) } + +struct ralink_pmx_group; + +struct ralink_pmx_func { + const char *name; + const char value; + + int pin_first; + int pin_count; + int *pins; + + int *groups; + int group_count; + + int enabled; +}; + +struct ralink_pmx_group { + const char *name; + int enabled; + + const u32 shift; + const char mask; + const char gpio; + + struct ralink_pmx_func *func; + int func_count; +}; + +int ralink_pinmux_init(struct platform_device *pdev, + struct ralink_pmx_group *data); + +#endif diff --git a/drivers/pinctrl/ralink/pinctrl-rt2880.c b/drivers/pinctrl/ralink/pinctrl-rt2880.c deleted file mode 100644 index 96fc06d1b8b9..000000000000 --- a/drivers/pinctrl/ralink/pinctrl-rt2880.c +++ /dev/null @@ -1,349 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Copyright (C) 2013 John Crispin blogic@openwrt.org - */ - -#include <linux/module.h> -#include <linux/device.h> -#include <linux/io.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/of.h> -#include <linux/pinctrl/pinctrl.h> -#include <linux/pinctrl/pinconf.h> -#include <linux/pinctrl/pinconf-generic.h> -#include <linux/pinctrl/pinmux.h> -#include <linux/pinctrl/consumer.h> -#include <linux/pinctrl/machine.h> - -#include <asm/mach-ralink/ralink_regs.h> -#include <asm/mach-ralink/mt7620.h> - -#include "pinmux.h" -#include "../core.h" -#include "../pinctrl-utils.h" - -#define SYSC_REG_GPIO_MODE 0x60 -#define SYSC_REG_GPIO_MODE2 0x64 - -struct rt2880_priv { - struct device *dev; - - struct pinctrl_pin_desc *pads; - struct pinctrl_desc *desc; - - struct rt2880_pmx_func **func; - int func_count; - - struct rt2880_pmx_group *groups; - const char **group_names; - int group_count; - - u8 *gpio; - int max_pins; -}; - -static int rt2880_get_group_count(struct pinctrl_dev *pctrldev) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - return p->group_count; -} - -static const char *rt2880_get_group_name(struct pinctrl_dev *pctrldev, - unsigned int group) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - return (group >= p->group_count) ? NULL : p->group_names[group]; -} - -static int rt2880_get_group_pins(struct pinctrl_dev *pctrldev, - unsigned int group, - const unsigned int **pins, - unsigned int *num_pins) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - if (group >= p->group_count) - return -EINVAL; - - *pins = p->groups[group].func[0].pins; - *num_pins = p->groups[group].func[0].pin_count; - - return 0; -} - -static const struct pinctrl_ops rt2880_pctrl_ops = { - .get_groups_count = rt2880_get_group_count, - .get_group_name = rt2880_get_group_name, - .get_group_pins = rt2880_get_group_pins, - .dt_node_to_map = pinconf_generic_dt_node_to_map_all, - .dt_free_map = pinconf_generic_dt_free_map, -}; - -static int rt2880_pmx_func_count(struct pinctrl_dev *pctrldev) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - return p->func_count; -} - -static const char *rt2880_pmx_func_name(struct pinctrl_dev *pctrldev, - unsigned int func) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - return p->func[func]->name; -} - -static int rt2880_pmx_group_get_groups(struct pinctrl_dev *pctrldev, - unsigned int func, - const char * const **groups, - unsigned int * const num_groups) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - if (p->func[func]->group_count == 1) - *groups = &p->group_names[p->func[func]->groups[0]]; - else - *groups = p->group_names; - - *num_groups = p->func[func]->group_count; - - return 0; -} - -static int rt2880_pmx_group_enable(struct pinctrl_dev *pctrldev, - unsigned int func, unsigned int group) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - u32 mode = 0; - u32 reg = SYSC_REG_GPIO_MODE; - int i; - int shift; - - /* dont allow double use */ - if (p->groups[group].enabled) { - dev_err(p->dev, "%s is already enabled\n", - p->groups[group].name); - return 0; - } - - p->groups[group].enabled = 1; - p->func[func]->enabled = 1; - - shift = p->groups[group].shift; - if (shift >= 32) { - shift -= 32; - reg = SYSC_REG_GPIO_MODE2; - } - mode = rt_sysc_r32(reg); - mode &= ~(p->groups[group].mask << shift); - - /* mark the pins as gpio */ - for (i = 0; i < p->groups[group].func[0].pin_count; i++) - p->gpio[p->groups[group].func[0].pins[i]] = 1; - - /* function 0 is gpio and needs special handling */ - if (func == 0) { - mode |= p->groups[group].gpio << shift; - } else { - for (i = 0; i < p->func[func]->pin_count; i++) - p->gpio[p->func[func]->pins[i]] = 0; - mode |= p->func[func]->value << shift; - } - rt_sysc_w32(mode, reg); - - return 0; -} - -static int rt2880_pmx_group_gpio_request_enable(struct pinctrl_dev *pctrldev, - struct pinctrl_gpio_range *range, - unsigned int pin) -{ - struct rt2880_priv *p = pinctrl_dev_get_drvdata(pctrldev); - - if (!p->gpio[pin]) { - dev_err(p->dev, "pin %d is not set to gpio mux\n", pin); - return -EINVAL; - } - - return 0; -} - -static const struct pinmux_ops rt2880_pmx_group_ops = { - .get_functions_count = rt2880_pmx_func_count, - .get_function_name = rt2880_pmx_func_name, - .get_function_groups = rt2880_pmx_group_get_groups, - .set_mux = rt2880_pmx_group_enable, - .gpio_request_enable = rt2880_pmx_group_gpio_request_enable, -}; - -static struct pinctrl_desc rt2880_pctrl_desc = { - .owner = THIS_MODULE, - .name = "rt2880-pinmux", - .pctlops = &rt2880_pctrl_ops, - .pmxops = &rt2880_pmx_group_ops, -}; - -static struct rt2880_pmx_func gpio_func = { - .name = "gpio", -}; - -static int rt2880_pinmux_index(struct rt2880_priv *p) -{ - struct rt2880_pmx_group *mux = p->groups; - int i, j, c = 0; - - /* count the mux functions */ - while (mux->name) { - p->group_count++; - mux++; - } - - /* allocate the group names array needed by the gpio function */ - p->group_names = devm_kcalloc(p->dev, p->group_count, - sizeof(char *), GFP_KERNEL); - if (!p->group_names) - return -ENOMEM; - - for (i = 0; i < p->group_count; i++) { - p->group_names[i] = p->groups[i].name; - p->func_count += p->groups[i].func_count; - } - - /* we have a dummy function[0] for gpio */ - p->func_count++; - - /* allocate our function and group mapping index buffers */ - p->func = devm_kcalloc(p->dev, p->func_count, - sizeof(*p->func), GFP_KERNEL); - gpio_func.groups = devm_kcalloc(p->dev, p->group_count, sizeof(int), - GFP_KERNEL); - if (!p->func || !gpio_func.groups) - return -ENOMEM; - - /* add a backpointer to the function so it knows its group */ - gpio_func.group_count = p->group_count; - for (i = 0; i < gpio_func.group_count; i++) - gpio_func.groups[i] = i; - - p->func[c] = &gpio_func; - c++; - - /* add remaining functions */ - for (i = 0; i < p->group_count; i++) { - for (j = 0; j < p->groups[i].func_count; j++) { - p->func[c] = &p->groups[i].func[j]; - p->func[c]->groups = devm_kzalloc(p->dev, sizeof(int), - GFP_KERNEL); - if (!p->func[c]->groups) - return -ENOMEM; - p->func[c]->groups[0] = i; - p->func[c]->group_count = 1; - c++; - } - } - return 0; -} - -static int rt2880_pinmux_pins(struct rt2880_priv *p) -{ - int i, j; - - /* - * loop over the functions and initialize the pins array. - * also work out the highest pin used. - */ - for (i = 0; i < p->func_count; i++) { - int pin; - - if (!p->func[i]->pin_count) - continue; - - p->func[i]->pins = devm_kcalloc(p->dev, - p->func[i]->pin_count, - sizeof(int), - GFP_KERNEL); - for (j = 0; j < p->func[i]->pin_count; j++) - p->func[i]->pins[j] = p->func[i]->pin_first + j; - - pin = p->func[i]->pin_first + p->func[i]->pin_count; - if (pin > p->max_pins) - p->max_pins = pin; - } - - /* the buffer that tells us which pins are gpio */ - p->gpio = devm_kcalloc(p->dev, p->max_pins, sizeof(u8), GFP_KERNEL); - /* the pads needed to tell pinctrl about our pins */ - p->pads = devm_kcalloc(p->dev, p->max_pins, - sizeof(struct pinctrl_pin_desc), GFP_KERNEL); - if (!p->pads || !p->gpio) - return -ENOMEM; - - memset(p->gpio, 1, sizeof(u8) * p->max_pins); - for (i = 0; i < p->func_count; i++) { - if (!p->func[i]->pin_count) - continue; - - for (j = 0; j < p->func[i]->pin_count; j++) - p->gpio[p->func[i]->pins[j]] = 0; - } - - /* pin 0 is always a gpio */ - p->gpio[0] = 1; - - /* set the pads */ - for (i = 0; i < p->max_pins; i++) { - /* strlen("ioXY") + 1 = 5 */ - char *name = devm_kzalloc(p->dev, 5, GFP_KERNEL); - - if (!name) - return -ENOMEM; - snprintf(name, 5, "io%d", i); - p->pads[i].number = i; - p->pads[i].name = name; - } - p->desc->pins = p->pads; - p->desc->npins = p->max_pins; - - return 0; -} - -int rt2880_pinmux_init(struct platform_device *pdev, - struct rt2880_pmx_group *data) -{ - struct rt2880_priv *p; - struct pinctrl_dev *dev; - int err; - - if (!data) - return -ENOTSUPP; - - /* setup the private data */ - p = devm_kzalloc(&pdev->dev, sizeof(struct rt2880_priv), GFP_KERNEL); - if (!p) - return -ENOMEM; - - p->dev = &pdev->dev; - p->desc = &rt2880_pctrl_desc; - p->groups = data; - platform_set_drvdata(pdev, p); - - /* init the device */ - err = rt2880_pinmux_index(p); - if (err) { - dev_err(&pdev->dev, "failed to load index\n"); - return err; - } - - err = rt2880_pinmux_pins(p); - if (err) { - dev_err(&pdev->dev, "failed to load pins\n"); - return err; - } - dev = pinctrl_register(p->desc, &pdev->dev, p); - - return PTR_ERR_OR_ZERO(dev); -} diff --git a/drivers/pinctrl/ralink/pinctrl-rt288x.c b/drivers/pinctrl/ralink/pinctrl-rt288x.c index 0744aebbace5..40c45140ff8a 100644 --- a/drivers/pinctrl/ralink/pinctrl-rt288x.c +++ b/drivers/pinctrl/ralink/pinctrl-rt288x.c @@ -4,7 +4,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h"
#define RT2880_GPIO_MODE_I2C BIT(0) #define RT2880_GPIO_MODE_UART0 BIT(1) @@ -15,15 +15,15 @@ #define RT2880_GPIO_MODE_SDRAM BIT(6) #define RT2880_GPIO_MODE_PCI BIT(7)
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; -static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; -static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) }; -static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; -static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; -static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; -static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) }; +static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) }; +static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; +static struct ralink_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
-static struct rt2880_pmx_group rt2880_pinmux_data_act[] = { +static struct ralink_pmx_group rt2880_pinmux_data_act[] = { GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI), GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0), @@ -36,7 +36,7 @@ static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
static int rt288x_pinmux_probe(struct platform_device *pdev) { - return rt2880_pinmux_init(pdev, rt2880_pinmux_data_act); + return ralink_pinmux_init(pdev, rt2880_pinmux_data_act); }
static const struct of_device_id rt288x_pinmux_match[] = { diff --git a/drivers/pinctrl/ralink/pinctrl-rt305x.c b/drivers/pinctrl/ralink/pinctrl-rt305x.c index 5d8fa156c003..25527ca1ccaa 100644 --- a/drivers/pinctrl/ralink/pinctrl-rt305x.c +++ b/drivers/pinctrl/ralink/pinctrl-rt305x.c @@ -5,7 +5,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h"
#define RT305X_GPIO_MODE_UART0_SHIFT 2 #define RT305X_GPIO_MODE_UART0_MASK 0x7 @@ -31,9 +31,9 @@ #define RT3352_GPIO_MODE_LNA 18 #define RT3352_GPIO_MODE_PA 20
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; -static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; -static struct rt2880_pmx_func uartf_func[] = { +static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct ralink_pmx_func uartf_func[] = { FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8), FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8), FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8), @@ -42,28 +42,28 @@ static struct rt2880_pmx_func uartf_func[] = { FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4), FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4), }; -static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; -static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; -static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; -static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; -static struct rt2880_pmx_func rt5350_cs1_func[] = { +static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; +static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct ralink_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) }; +static struct ralink_pmx_func rt5350_cs1_func[] = { FUNC("spi_cs1", 0, 27, 1), FUNC("wdg_cs1", 1, 27, 1), }; -static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; -static struct rt2880_pmx_func rt3352_rgmii_func[] = { +static struct ralink_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) }; +static struct ralink_pmx_func rt3352_rgmii_func[] = { FUNC("rgmii", 0, 24, 12) }; -static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; -static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; -static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; -static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; -static struct rt2880_pmx_func rt3352_cs1_func[] = { +static struct ralink_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) }; +static struct ralink_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) }; +static struct ralink_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) }; +static struct ralink_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) }; +static struct ralink_pmx_func rt3352_cs1_func[] = { FUNC("spi_cs1", 0, 45, 1), FUNC("wdg_cs1", 1, 45, 1), };
-static struct rt2880_pmx_group rt3050_pinmux_data[] = { +static struct ralink_pmx_group rt3050_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, @@ -76,7 +76,7 @@ static struct rt2880_pmx_group rt3050_pinmux_data[] = { { 0 } };
-static struct rt2880_pmx_group rt3352_pinmux_data[] = { +static struct ralink_pmx_group rt3352_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, @@ -92,7 +92,7 @@ static struct rt2880_pmx_group rt3352_pinmux_data[] = { { 0 } };
-static struct rt2880_pmx_group rt5350_pinmux_data[] = { +static struct ralink_pmx_group rt5350_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK, @@ -107,11 +107,11 @@ static struct rt2880_pmx_group rt5350_pinmux_data[] = { static int rt305x_pinmux_probe(struct platform_device *pdev) { if (soc_is_rt5350()) - return rt2880_pinmux_init(pdev, rt5350_pinmux_data); + return ralink_pinmux_init(pdev, rt5350_pinmux_data); else if (soc_is_rt305x() || soc_is_rt3350()) - return rt2880_pinmux_init(pdev, rt3050_pinmux_data); + return ralink_pinmux_init(pdev, rt3050_pinmux_data); else if (soc_is_rt3352()) - return rt2880_pinmux_init(pdev, rt3352_pinmux_data); + return ralink_pinmux_init(pdev, rt3352_pinmux_data); else return -EINVAL; } diff --git a/drivers/pinctrl/ralink/pinctrl-rt3883.c b/drivers/pinctrl/ralink/pinctrl-rt3883.c index 3e0e1b4caa64..0b8674dbe188 100644 --- a/drivers/pinctrl/ralink/pinctrl-rt3883.c +++ b/drivers/pinctrl/ralink/pinctrl-rt3883.c @@ -3,7 +3,7 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> -#include "pinmux.h" +#include "pinctrl-ralink.h"
#define RT3883_GPIO_MODE_UART0_SHIFT 2 #define RT3883_GPIO_MODE_UART0_MASK 0x7 @@ -39,9 +39,9 @@ #define RT3883_GPIO_MODE_LNA_G_GPIO 0x3 #define RT3883_GPIO_MODE_LNA_G _RT3883_GPIO_MODE_LNA_G(RT3883_GPIO_MODE_LNA_G_MASK)
-static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; -static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; -static struct rt2880_pmx_func uartf_func[] = { +static struct ralink_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) }; +static struct ralink_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) }; +static struct ralink_pmx_func uartf_func[] = { FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8), FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8), FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8), @@ -50,21 +50,21 @@ static struct rt2880_pmx_func uartf_func[] = { FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4), FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4), }; -static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; -static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; -static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; -static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; -static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; -static struct rt2880_pmx_func pci_func[] = { +static struct ralink_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) }; +static struct ralink_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) }; +static struct ralink_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) }; +static struct ralink_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) }; +static struct ralink_pmx_func lna_g_func[] = { FUNC("lna g", 0, 35, 3) }; +static struct ralink_pmx_func pci_func[] = { FUNC("pci-dev", 0, 40, 32), FUNC("pci-host2", 1, 40, 32), FUNC("pci-host1", 2, 40, 32), FUNC("pci-fnc", 3, 40, 32) }; -static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; -static struct rt2880_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) }; +static struct ralink_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) }; +static struct ralink_pmx_func ge2_func[] = { FUNC("ge2", 0, 84, 12) };
-static struct rt2880_pmx_group rt3883_pinmux_data[] = { +static struct ralink_pmx_group rt3883_pinmux_data[] = { GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C), GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI), GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK, @@ -83,7 +83,7 @@ static struct rt2880_pmx_group rt3883_pinmux_data[] = {
static int rt3883_pinmux_probe(struct platform_device *pdev) { - return rt2880_pinmux_init(pdev, rt3883_pinmux_data); + return ralink_pinmux_init(pdev, rt3883_pinmux_data); }
static const struct of_device_id rt3883_pinmux_match[] = { diff --git a/drivers/pinctrl/ralink/pinmux.h b/drivers/pinctrl/ralink/pinmux.h deleted file mode 100644 index 0046abe3bcc7..000000000000 --- a/drivers/pinctrl/ralink/pinmux.h +++ /dev/null @@ -1,53 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2012 John Crispin john@phrozen.org - */ - -#ifndef _RT288X_PINMUX_H__ -#define _RT288X_PINMUX_H__ - -#define FUNC(name, value, pin_first, pin_count) \ - { name, value, pin_first, pin_count } - -#define GRP(_name, _func, _mask, _shift) \ - { .name = _name, .mask = _mask, .shift = _shift, \ - .func = _func, .gpio = _mask, \ - .func_count = ARRAY_SIZE(_func) } - -#define GRP_G(_name, _func, _mask, _gpio, _shift) \ - { .name = _name, .mask = _mask, .shift = _shift, \ - .func = _func, .gpio = _gpio, \ - .func_count = ARRAY_SIZE(_func) } - -struct rt2880_pmx_group; - -struct rt2880_pmx_func { - const char *name; - const char value; - - int pin_first; - int pin_count; - int *pins; - - int *groups; - int group_count; - - int enabled; -}; - -struct rt2880_pmx_group { - const char *name; - int enabled; - - const u32 shift; - const char mask; - const char gpio; - - struct rt2880_pmx_func *func; - int func_count; -}; - -int rt2880_pinmux_init(struct platform_device *pdev, - struct rt2880_pmx_group *data); - -#endif diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index f7c9459f6628..edd0d0af5c14 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -1299,15 +1299,17 @@ static int stm32_gpiolib_register_bank(struct stm32_pinctrl *pctl, bank->bank_ioport_nr = bank_ioport_nr; spin_lock_init(&bank->lock);
- /* create irq hierarchical domain */ - bank->fwnode = of_node_to_fwnode(np); + if (pctl->domain) { + /* create irq hierarchical domain */ + bank->fwnode = of_node_to_fwnode(np);
- bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, - STM32_GPIO_IRQ_LINE, bank->fwnode, - &stm32_gpio_domain_ops, bank); + bank->domain = irq_domain_create_hierarchy(pctl->domain, 0, STM32_GPIO_IRQ_LINE, + bank->fwnode, &stm32_gpio_domain_ops, + bank);
- if (!bank->domain) - return -ENODEV; + if (!bank->domain) + return -ENODEV; + }
err = gpiochip_add_data(&bank->gpio_chip, bank); if (err) { @@ -1466,6 +1468,8 @@ int stm32_pctl_probe(struct platform_device *pdev) pctl->domain = stm32_pctrl_get_irq_domain(np); if (IS_ERR(pctl->domain)) return PTR_ERR(pctl->domain); + if (!pctl->domain) + dev_warn(dev, "pinctrl without interrupt support\n");
/* hwspinlock is optional */ hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); diff --git a/drivers/pinctrl/sunplus/sppctl.c b/drivers/pinctrl/sunplus/sppctl.c index 3ba47040ac42..2b3335ab56c6 100644 --- a/drivers/pinctrl/sunplus/sppctl.c +++ b/drivers/pinctrl/sunplus/sppctl.c @@ -871,6 +871,9 @@ static int sppctl_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node }
*map = kcalloc(*num_maps + nmG, sizeof(**map), GFP_KERNEL); + if (*map == NULL) + return -ENOMEM; + for (i = 0; i < (*num_maps); i++) { dt_pin = be32_to_cpu(list[i]); pin_num = FIELD_GET(GENMASK(31, 24), dt_pin); diff --git a/drivers/power/reset/arm-versatile-reboot.c b/drivers/power/reset/arm-versatile-reboot.c index 08d0a07b58ef..c7624d7611a7 100644 --- a/drivers/power/reset/arm-versatile-reboot.c +++ b/drivers/power/reset/arm-versatile-reboot.c @@ -146,6 +146,7 @@ static int __init versatile_reboot_probe(void) versatile_reboot_type = (enum versatile_reboot)reboot_id->data;
syscon_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(syscon_regmap)) return PTR_ERR(syscon_regmap);
diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index ec8a404d71b4..4339fa9ff009 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -3148,6 +3148,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) ret = ab8500_fg_init_hw_registers(di); if (ret) { dev_err(dev, "failed to initialize registers\n"); + destroy_workqueue(di->fg_wq); return ret; }
@@ -3159,6 +3160,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) di->fg_psy = devm_power_supply_register(dev, &ab8500_fg_desc, &psy_cfg); if (IS_ERR(di->fg_psy)) { dev_err(dev, "failed to register FG psy\n"); + destroy_workqueue(di->fg_wq); return PTR_ERR(di->fg_psy); }
@@ -3174,8 +3176,10 @@ static int ab8500_fg_probe(struct platform_device *pdev) /* Register primary interrupt handlers */ for (i = 0; i < ARRAY_SIZE(ab8500_fg_irq); i++) { irq = platform_get_irq_byname(pdev, ab8500_fg_irq[i].name); - if (irq < 0) + if (irq < 0) { + destroy_workqueue(di->fg_wq); return irq; + }
ret = devm_request_threaded_irq(dev, irq, NULL, ab8500_fg_irq[i].isr, @@ -3185,6 +3189,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) if (ret != 0) { dev_err(dev, "failed to request %s IRQ %d: %d\n", ab8500_fg_irq[i].name, irq, ret); + destroy_workqueue(di->fg_wq); return ret; } dev_dbg(dev, "Requested %s IRQ %d: %d\n", @@ -3200,6 +3205,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) ret = ab8500_fg_sysfs_init(di); if (ret) { dev_err(dev, "failed to create sysfs entry\n"); + destroy_workqueue(di->fg_wq); return ret; }
@@ -3207,6 +3213,7 @@ static int ab8500_fg_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "failed to create FG psy\n"); ab8500_fg_sysfs_exit(di); + destroy_workqueue(di->fg_wq); return ret; }
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 775c0bf2f923..0933948d7df3 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -1138,10 +1138,14 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr, struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
/* if an error occurred and we have an active dma, then terminate */ - dmaengine_terminate_sync(ctlr->dma_tx); - bs->tx_dma_active = false; - dmaengine_terminate_sync(ctlr->dma_rx); - bs->rx_dma_active = false; + if (ctlr->dma_tx) { + dmaengine_terminate_sync(ctlr->dma_tx); + bs->tx_dma_active = false; + } + if (ctlr->dma_rx) { + dmaengine_terminate_sync(ctlr->dma_rx); + bs->rx_dma_active = false; + } bcm2835_spi_undo_prologue(bs);
/* and reset */ diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c index 5b485cd96c93..5298a3a43bc7 100644 --- a/fs/dlm/lock.c +++ b/fs/dlm/lock.c @@ -4085,13 +4085,14 @@ static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len) rv = _create_message(ls, sizeof(struct dlm_message) + len, dir_nodeid, DLM_MSG_REMOVE, &ms, &mh); if (rv) - return; + goto out;
memcpy(ms->m_extra, name, len); ms->m_hash = hash;
send_message(mh, ms);
+out: spin_lock(&ls->ls_remove_spin); ls->ls_remove_len = 0; memset(ls->ls_remove_name, 0, DLM_RESNAME_MAXLEN); diff --git a/fs/exfat/namei.c b/fs/exfat/namei.c index a02a04a993bf..c6eaf7e9ea74 100644 --- a/fs/exfat/namei.c +++ b/fs/exfat/namei.c @@ -1080,6 +1080,7 @@ static int exfat_rename_file(struct inode *inode, struct exfat_chain *p_dir,
exfat_remove_entries(inode, p_dir, oldentry, 0, num_old_entries); + ei->dir = *p_dir; ei->entry = newentry; } else { if (exfat_get_entry_type(epold) == TYPE_FILE) { @@ -1167,28 +1168,6 @@ static int exfat_move_file(struct inode *inode, struct exfat_chain *p_olddir, return 0; }
-static void exfat_update_parent_info(struct exfat_inode_info *ei, - struct inode *parent_inode) -{ - struct exfat_sb_info *sbi = EXFAT_SB(parent_inode->i_sb); - struct exfat_inode_info *parent_ei = EXFAT_I(parent_inode); - loff_t parent_isize = i_size_read(parent_inode); - - /* - * the problem that struct exfat_inode_info caches wrong parent info. - * - * because of flag-mismatch of ei->dir, - * there is abnormal traversing cluster chain. - */ - if (unlikely(parent_ei->flags != ei->dir.flags || - parent_isize != EXFAT_CLU_TO_B(ei->dir.size, sbi) || - parent_ei->start_clu != ei->dir.dir)) { - exfat_chain_set(&ei->dir, parent_ei->start_clu, - EXFAT_B_TO_CLU_ROUND_UP(parent_isize, sbi), - parent_ei->flags); - } -} - /* rename or move a old file into a new file */ static int __exfat_rename(struct inode *old_parent_inode, struct exfat_inode_info *ei, struct inode *new_parent_inode, @@ -1219,9 +1198,9 @@ static int __exfat_rename(struct inode *old_parent_inode, return -ENOENT; }
- exfat_update_parent_info(ei, old_parent_inode); - - exfat_chain_dup(&olddir, &ei->dir); + exfat_chain_set(&olddir, EXFAT_I(old_parent_inode)->start_clu, + EXFAT_B_TO_CLU_ROUND_UP(i_size_read(old_parent_inode), sbi), + EXFAT_I(old_parent_inode)->flags); dentry = ei->entry;
ep = exfat_get_dentry(sb, &olddir, dentry, &old_bh); @@ -1241,8 +1220,6 @@ static int __exfat_rename(struct inode *old_parent_inode, goto out; }
- exfat_update_parent_info(new_ei, new_parent_inode); - p_dir = &(new_ei->dir); new_entry = new_ei->entry; ep = exfat_get_dentry(sb, p_dir, new_entry, &new_bh); diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h index 944f83ef9f2e..dcd15e024976 100644 --- a/include/drm/gpu_scheduler.h +++ b/include/drm/gpu_scheduler.h @@ -28,7 +28,7 @@ #include <linux/dma-fence.h> #include <linux/completion.h> #include <linux/xarray.h> -#include <linux/irq_work.h> +#include <linux/workqueue.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
@@ -294,7 +294,7 @@ struct drm_sched_job { */ union { struct dma_fence_cb finish_cb; - struct irq_work work; + struct work_struct work; };
uint64_t id; diff --git a/include/net/amt.h b/include/net/amt.h index 7a4db8b903ee..44acadf3a69e 100644 --- a/include/net/amt.h +++ b/include/net/amt.h @@ -78,6 +78,15 @@ enum amt_status {
#define AMT_STATUS_MAX (__AMT_STATUS_MAX - 1)
+/* Gateway events only */ +enum amt_event { + AMT_EVENT_NONE, + AMT_EVENT_RECEIVE, + AMT_EVENT_SEND_DISCOVERY, + AMT_EVENT_SEND_REQUEST, + __AMT_EVENT_MAX, +}; + struct amt_header { #if defined(__LITTLE_ENDIAN_BITFIELD) u8 type:4, @@ -292,6 +301,12 @@ struct amt_group_node { struct hlist_head sources[]; };
+#define AMT_MAX_EVENTS 16 +struct amt_events { + enum amt_event event; + struct sk_buff *skb; +}; + struct amt_dev { struct net_device *dev; struct net_device *stream_dev; @@ -308,6 +323,7 @@ struct amt_dev { struct delayed_work req_wq; /* Protected by RTNL */ struct delayed_work secret_wq; + struct work_struct event_wq; /* AMT status */ enum amt_status status; /* Generated key */ @@ -345,6 +361,10 @@ struct amt_dev { /* Used only in gateway mode */ u64 mac:48, reserved:16; + /* AMT gateway side message handler queue */ + struct amt_events events[AMT_MAX_EVENTS]; + u8 event_idx; + u8 nr_events; };
#define AMT_TOS 0xc0 diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 98e1ec1a14f0..749bb1e46087 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h @@ -207,7 +207,7 @@ static inline bool inet_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_tcp_l3mdev_accept, + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h index 48e4c59d85e2..6395f6b9a5d2 100644 --- a/include/net/inet_sock.h +++ b/include/net/inet_sock.h @@ -107,7 +107,8 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) { - if (!sk->sk_mark && sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept) + if (!sk->sk_mark && + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fwmark_accept)) return skb->mark;
return sk->sk_mark; @@ -116,14 +117,15 @@ static inline u32 inet_request_mark(const struct sock *sk, struct sk_buff *skb) static inline int inet_request_bound_dev_if(const struct sock *sk, struct sk_buff *skb) { + int bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk);
- if (!sk->sk_bound_dev_if && net->ipv4.sysctl_tcp_l3mdev_accept) + if (!bound_dev_if && READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, skb->skb_iif); #endif
- return sk->sk_bound_dev_if; + return bound_dev_if; }
static inline int inet_sk_bound_l3mdev(const struct sock *sk) @@ -131,7 +133,7 @@ static inline int inet_sk_bound_l3mdev(const struct sock *sk) #ifdef CONFIG_NET_L3_MASTER_DEV struct net *net = sock_net(sk);
- if (!net->ipv4.sysctl_tcp_l3mdev_accept) + if (!READ_ONCE(net->ipv4.sysctl_tcp_l3mdev_accept)) return l3mdev_master_ifindex_by_index(net, sk->sk_bound_dev_if); #endif @@ -373,7 +375,7 @@ static inline bool inet_get_convert_csum(struct sock *sk) static inline bool inet_can_nonlocal_bind(struct net *net, struct inet_sock *inet) { - return net->ipv4.sysctl_ip_nonlocal_bind || + return READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind) || inet->freebind || inet->transparent; }
diff --git a/include/net/ip.h b/include/net/ip.h index 26fffda78cca..1c979fd1904c 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -357,7 +357,7 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port) { - return port < net->ipv4.sysctl_ip_prot_sock; + return port < READ_ONCE(net->ipv4.sysctl_ip_prot_sock); }
#else @@ -384,7 +384,7 @@ void ipfrag_init(void); void ip_static_sysctl_init(void);
#define IP4_REPLY_MARK(net, mark) \ - ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0) + (READ_ONCE((net)->ipv4.sysctl_fwmark_reflect) ? (mark) : 0)
static inline bool ip_is_fragment(const struct iphdr *iph) { @@ -446,7 +446,7 @@ static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst, struct net *net = dev_net(dst->dev); unsigned int mtu;
- if (net->ipv4.sysctl_ip_fwd_use_pmtu || + if (READ_ONCE(net->ipv4.sysctl_ip_fwd_use_pmtu) || ip_mtu_locked(dst) || !forwarding) { mtu = rt->rt_pmtu; diff --git a/include/net/protocol.h b/include/net/protocol.h index f51c06ae365f..6aef8cb11cc8 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -35,8 +35,6 @@
/* This is used to register protocols. */ struct net_protocol { - int (*early_demux)(struct sk_buff *skb); - int (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */ @@ -52,8 +50,6 @@ struct net_protocol {
#if IS_ENABLED(CONFIG_IPV6) struct inet6_protocol { - void (*early_demux)(struct sk_buff *skb); - void (*early_demux_handler)(struct sk_buff *skb); int (*handler)(struct sk_buff *skb);
/* This returns an error if we weren't able to handle the error. */ diff --git a/include/net/route.h b/include/net/route.h index 25404fc2b483..08df79436485 100644 --- a/include/net/route.h +++ b/include/net/route.h @@ -361,7 +361,7 @@ static inline int ip4_dst_hoplimit(const struct dst_entry *dst) struct net *net = dev_net(dst->dev);
if (hoplimit == 0) - hoplimit = net->ipv4.sysctl_ip_default_ttl; + hoplimit = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); return hoplimit; }
diff --git a/include/net/tcp.h b/include/net/tcp.h index 2d9a78b3beaa..4f5de382e192 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -932,7 +932,7 @@ extern const struct inet_connection_sock_af_ops ipv6_specific;
INDIRECT_CALLABLE_DECLARE(void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)); INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *skb)); -INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *skb)); +void tcp_v6_early_demux(struct sk_buff *skb);
#endif
@@ -1421,8 +1421,8 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); s32 delta;
- if (!sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle || tp->packets_out || - ca_ops->cong_control) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) || + tp->packets_out || ca_ops->cong_control) return; delta = tcp_jiffies32 - tp->lsndtime; if (delta > inet_csk(sk)->icsk_rto) @@ -1511,21 +1511,24 @@ static inline int keepalive_intvl_when(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp);
- return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl; + return tp->keepalive_intvl ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_intvl); }
static inline int keepalive_time_when(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp);
- return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time; + return tp->keepalive_time ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); }
static inline int keepalive_probes(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp);
- return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes; + return tp->keepalive_probes ? : + READ_ONCE(net->ipv4.sysctl_tcp_keepalive_probes); }
static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) @@ -1538,7 +1541,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
static inline int tcp_fin_time(const struct sock *sk) { - int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout; + int fin_timeout = tcp_sk(sk)->linger2 ? : + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fin_timeout); const int rto = inet_csk(sk)->icsk_rto;
if (fin_timeout < (rto << 2) - (rto >> 1)) @@ -2041,7 +2045,7 @@ void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr); static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp) { struct net *net = sock_net((struct sock *)tp); - return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat; + return tp->notsent_lowat ?: READ_ONCE(net->ipv4.sysctl_tcp_notsent_lowat); }
bool tcp_stream_memory_free(const struct sock *sk, int wake); diff --git a/include/net/udp.h b/include/net/udp.h index f1c2a88c9005..abe91ab9030d 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -167,7 +167,7 @@ static inline void udp_csum_pull_header(struct sk_buff *skb) typedef struct sock *(*udp_lookup_t)(const struct sk_buff *skb, __be16 sport, __be16 dport);
-INDIRECT_CALLABLE_DECLARE(void udp_v6_early_demux(struct sk_buff *)); +void udp_v6_early_demux(struct sk_buff *skb); INDIRECT_CALLABLE_DECLARE(int udpv6_rcv(struct sk_buff *));
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, @@ -238,7 +238,7 @@ static inline bool udp_sk_bound_dev_eq(struct net *net, int bound_dev_if, int dif, int sdif) { #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV) - return inet_bound_dev_eq(!!net->ipv4.sysctl_udp_l3mdev_accept, + return inet_bound_dev_eq(!!READ_ONCE(net->ipv4.sysctl_udp_l3mdev_accept), bound_dev_if, dif, sdif); #else return inet_bound_dev_eq(true, bound_dev_if, dif, sdif); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 1e92b52fc814..3adff3831c04 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -68,11 +68,13 @@ void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, uns { u8 *ptr = NULL;
- if (k >= SKF_NET_OFF) + if (k >= SKF_NET_OFF) { ptr = skb_network_header(skb) + k - SKF_NET_OFF; - else if (k >= SKF_LL_OFF) + } else if (k >= SKF_LL_OFF) { + if (unlikely(!skb_mac_header_was_set(skb))) + return NULL; ptr = skb_mac_header(skb) + k - SKF_LL_OFF; - + } if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb)) return ptr;
diff --git a/kernel/events/core.c b/kernel/events/core.c index 950b25c3f210..82238406f5f5 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -6254,10 +6254,10 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
if (!atomic_inc_not_zero(&event->rb->mmap_count)) { /* - * Raced against perf_mmap_close() through - * perf_event_set_output(). Try again, hope for better - * luck. + * Raced against perf_mmap_close(); remove the + * event and try again. */ + ring_buffer_attach(event, NULL); mutex_unlock(&event->mmap_mutex); goto again; } @@ -11826,14 +11826,25 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, goto out; }
+static void mutex_lock_double(struct mutex *a, struct mutex *b) +{ + if (b < a) + swap(a, b); + + mutex_lock(a); + mutex_lock_nested(b, SINGLE_DEPTH_NESTING); +} + static int perf_event_set_output(struct perf_event *event, struct perf_event *output_event) { struct perf_buffer *rb = NULL; int ret = -EINVAL;
- if (!output_event) + if (!output_event) { + mutex_lock(&event->mmap_mutex); goto set; + }
/* don't allow circular references */ if (event == output_event) @@ -11871,8 +11882,15 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) event->pmu != output_event->pmu) goto out;
+ /* + * Hold both mmap_mutex to serialize against perf_mmap_close(). Since + * output_event is already on rb->event_list, and the list iteration + * restarts after every removal, it is guaranteed this new event is + * observed *OR* if output_event is already removed, it's guaranteed we + * observe !rb->mmap_count. + */ + mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); set: - mutex_lock(&event->mmap_mutex); /* Can't redirect output if we've got an active mmap() */ if (atomic_read(&event->mmap_count)) goto unlock; @@ -11882,6 +11900,12 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) rb = ring_buffer_get(output_event); if (!rb) goto unlock; + + /* did we race against perf_mmap_close() */ + if (!atomic_read(&rb->mmap_count)) { + ring_buffer_put(rb); + goto unlock; + } }
ring_buffer_attach(event, rb); @@ -11889,20 +11913,13 @@ perf_event_set_output(struct perf_event *event, struct perf_event *output_event) ret = 0; unlock: mutex_unlock(&event->mmap_mutex); + if (output_event) + mutex_unlock(&output_event->mmap_mutex);
out: return ret; }
-static void mutex_lock_double(struct mutex *a, struct mutex *b) -{ - if (b < a) - swap(a, b); - - mutex_lock(a); - mutex_lock_nested(b, SINGLE_DEPTH_NESTING); -} - static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) { bool nmi_safe = false; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index b61281d10458..10a916ec6482 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1669,7 +1669,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) * the throttle. */ p->dl.dl_throttled = 0; - BUG_ON(!is_dl_boosted(&p->dl) || flags != ENQUEUE_REPLENISH); + if (!(flags & ENQUEUE_REPLENISH)) + printk_deferred_once("sched: DL de-boosted task PID %d: REPLENISH flag missing\n", + task_pid_nr(p)); + return; }
diff --git a/kernel/watch_queue.c b/kernel/watch_queue.c index 230038d4f908..bb9962b33f95 100644 --- a/kernel/watch_queue.c +++ b/kernel/watch_queue.c @@ -34,6 +34,27 @@ MODULE_LICENSE("GPL"); #define WATCH_QUEUE_NOTE_SIZE 128 #define WATCH_QUEUE_NOTES_PER_PAGE (PAGE_SIZE / WATCH_QUEUE_NOTE_SIZE)
+/* + * This must be called under the RCU read-lock, which makes + * sure that the wqueue still exists. It can then take the lock, + * and check that the wqueue hasn't been destroyed, which in + * turn makes sure that the notification pipe still exists. + */ +static inline bool lock_wqueue(struct watch_queue *wqueue) +{ + spin_lock_bh(&wqueue->lock); + if (unlikely(wqueue->defunct)) { + spin_unlock_bh(&wqueue->lock); + return false; + } + return true; +} + +static inline void unlock_wqueue(struct watch_queue *wqueue) +{ + spin_unlock_bh(&wqueue->lock); +} + static void watch_queue_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { @@ -69,6 +90,10 @@ static const struct pipe_buf_operations watch_queue_pipe_buf_ops = {
/* * Post a notification to a watch queue. + * + * Must be called with the RCU lock for reading, and the + * watch_queue lock held, which guarantees that the pipe + * hasn't been released. */ static bool post_one_notification(struct watch_queue *wqueue, struct watch_notification *n) @@ -85,9 +110,6 @@ static bool post_one_notification(struct watch_queue *wqueue,
spin_lock_irq(&pipe->rd_wait.lock);
- if (wqueue->defunct) - goto out; - mask = pipe->ring_size - 1; head = pipe->head; tail = pipe->tail; @@ -203,7 +225,10 @@ void __post_watch_notification(struct watch_list *wlist, if (security_post_notification(watch->cred, cred, n) < 0) continue;
- post_one_notification(wqueue, n); + if (lock_wqueue(wqueue)) { + post_one_notification(wqueue, n); + unlock_wqueue(wqueue); + } }
rcu_read_unlock(); @@ -462,11 +487,12 @@ int add_watch_to_object(struct watch *watch, struct watch_list *wlist) return -EAGAIN; }
- spin_lock_bh(&wqueue->lock); - kref_get(&wqueue->usage); - kref_get(&watch->usage); - hlist_add_head(&watch->queue_node, &wqueue->watches); - spin_unlock_bh(&wqueue->lock); + if (lock_wqueue(wqueue)) { + kref_get(&wqueue->usage); + kref_get(&watch->usage); + hlist_add_head(&watch->queue_node, &wqueue->watches); + unlock_wqueue(wqueue); + }
hlist_add_head(&watch->list_node, &wlist->watchers); return 0; @@ -520,20 +546,15 @@ int remove_watch_from_object(struct watch_list *wlist, struct watch_queue *wq,
wqueue = rcu_dereference(watch->queue);
- /* We don't need the watch list lock for the next bit as RCU is - * protecting *wqueue from deallocation. - */ - if (wqueue) { + if (lock_wqueue(wqueue)) { post_one_notification(wqueue, &n.watch);
- spin_lock_bh(&wqueue->lock); - if (!hlist_unhashed(&watch->queue_node)) { hlist_del_init_rcu(&watch->queue_node); put_watch(watch); }
- spin_unlock_bh(&wqueue->lock); + unlock_wqueue(wqueue); }
if (wlist->release_watch) { diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 8c74107a2b15..ea6dee61bc9d 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -350,7 +350,7 @@ static void mpol_rebind_preferred(struct mempolicy *pol, */ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask) { - if (!pol) + if (!pol || pol->mode == MPOL_LOCAL) return; if (!mpol_store_user_nodemask(pol) && nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) diff --git a/net/core/filter.c b/net/core/filter.c index 6391c1885bca..d0b0c163d3f3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -7031,7 +7031,7 @@ BPF_CALL_5(bpf_tcp_check_syncookie, struct sock *, sk, void *, iph, u32, iph_len if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) return -EINVAL;
- if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies)) return -EINVAL;
if (!th->ack || th->rst || th->syn) @@ -7106,7 +7106,7 @@ BPF_CALL_5(bpf_tcp_gen_syncookie, struct sock *, sk, void *, iph, u32, iph_len, if (sk->sk_protocol != IPPROTO_TCP || sk->sk_state != TCP_LISTEN) return -EINVAL;
- if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies)) return -ENOENT;
if (!th->syn || th->ack || th->fin || th->rst) diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c index 5f85e01d4093..b0ff6153be62 100644 --- a/net/core/secure_seq.c +++ b/net/core/secure_seq.c @@ -64,7 +64,7 @@ u32 secure_tcpv6_ts_off(const struct net *net, .daddr = *(struct in6_addr *)daddr, };
- if (net->ipv4.sysctl_tcp_timestamps != 1) + if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) return 0;
ts_secret_init(); @@ -120,7 +120,7 @@ EXPORT_SYMBOL(secure_ipv6_port_ephemeral); #ifdef CONFIG_INET u32 secure_tcp_ts_off(const struct net *net, __be32 saddr, __be32 daddr) { - if (net->ipv4.sysctl_tcp_timestamps != 1) + if (READ_ONCE(net->ipv4.sysctl_tcp_timestamps) != 1) return 0;
ts_secret_init(); diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c index 3f00a28fe762..5daa1fa54249 100644 --- a/net/core/sock_reuseport.c +++ b/net/core/sock_reuseport.c @@ -387,7 +387,7 @@ void reuseport_stop_listen_sock(struct sock *sk) prog = rcu_dereference_protected(reuse->prog, lockdep_is_held(&reuseport_lock));
- if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req || + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req) || (prog && prog->expected_attach_type == BPF_SK_REUSEPORT_SELECT_OR_MIGRATE)) { /* Migration capable, move sk from the listening section * to the closed section. @@ -545,7 +545,7 @@ struct sock *reuseport_migrate_sock(struct sock *sk, hash = migrating_sk->sk_hash; prog = rcu_dereference(reuse->prog); if (!prog || prog->expected_attach_type != BPF_SK_REUSEPORT_SELECT_OR_MIGRATE) { - if (sock_net(sk)->ipv4.sysctl_tcp_migrate_req) + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_migrate_req)) goto select_by_hash; goto failure; } diff --git a/net/dsa/port.c b/net/dsa/port.c index bdccb613285d..7bc79e28d48e 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -242,6 +242,60 @@ void dsa_port_disable(struct dsa_port *dp) rtnl_unlock(); }
+static void dsa_port_reset_vlan_filtering(struct dsa_port *dp, + struct dsa_bridge bridge) +{ + struct netlink_ext_ack extack = {0}; + bool change_vlan_filtering = false; + struct dsa_switch *ds = dp->ds; + struct dsa_port *other_dp; + bool vlan_filtering; + int err; + + if (ds->needs_standalone_vlan_filtering && + !br_vlan_enabled(bridge.dev)) { + change_vlan_filtering = true; + vlan_filtering = true; + } else if (!ds->needs_standalone_vlan_filtering && + br_vlan_enabled(bridge.dev)) { + change_vlan_filtering = true; + vlan_filtering = false; + } + + /* If the bridge was vlan_filtering, the bridge core doesn't trigger an + * event for changing vlan_filtering setting upon slave ports leaving + * it. That is a good thing, because that lets us handle it and also + * handle the case where the switch's vlan_filtering setting is global + * (not per port). When that happens, the correct moment to trigger the + * vlan_filtering callback is only when the last port leaves the last + * VLAN-aware bridge. + */ + if (change_vlan_filtering && ds->vlan_filtering_is_global) { + dsa_switch_for_each_port(other_dp, ds) { + struct net_device *br = dsa_port_bridge_dev_get(other_dp); + + if (br && br_vlan_enabled(br)) { + change_vlan_filtering = false; + break; + } + } + } + + if (!change_vlan_filtering) + return; + + err = dsa_port_vlan_filtering(dp, vlan_filtering, &extack); + if (extack._msg) { + dev_err(ds->dev, "port %d: %s\n", dp->index, + extack._msg); + } + if (err && err != -EOPNOTSUPP) { + dev_err(ds->dev, + "port %d failed to reset VLAN filtering to %d: %pe\n", + dp->index, vlan_filtering, ERR_PTR(err)); + } +} + static int dsa_port_inherit_brport_flags(struct dsa_port *dp, struct netlink_ext_ack *extack) { @@ -313,7 +367,8 @@ static int dsa_port_switchdev_sync_attrs(struct dsa_port *dp, return 0; }
-static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) +static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp, + struct dsa_bridge bridge) { /* Configure the port for standalone mode (no address learning, * flood everything). @@ -333,7 +388,7 @@ static void dsa_port_switchdev_unsync_attrs(struct dsa_port *dp) */ dsa_port_set_state_now(dp, BR_STATE_FORWARDING, true);
- /* VLAN filtering is handled by dsa_switch_bridge_leave */ + dsa_port_reset_vlan_filtering(dp, bridge);
/* Ageing time may be global to the switch chip, so don't change it * here because we have no good reason (or value) to change it to. @@ -502,7 +557,7 @@ void dsa_port_bridge_leave(struct dsa_port *dp, struct net_device *br) "port %d failed to notify DSA_NOTIFIER_BRIDGE_LEAVE: %pe\n", dp->index, ERR_PTR(err));
- dsa_port_switchdev_unsync_attrs(dp); + dsa_port_switchdev_unsync_attrs(dp, info.bridge); }
int dsa_port_lag_change(struct dsa_port *dp, @@ -752,7 +807,7 @@ int dsa_port_vlan_filtering(struct dsa_port *dp, bool vlan_filtering, ds->vlan_filtering = vlan_filtering;
dsa_switch_for_each_user_port(other_dp, ds) { - struct net_device *slave = dp->slave; + struct net_device *slave = other_dp->slave;
/* We might be called in the unbind path, so not * all slave devices might still be registered. diff --git a/net/dsa/switch.c b/net/dsa/switch.c index d25cd1da3eb3..d8a80cf9742c 100644 --- a/net/dsa/switch.c +++ b/net/dsa/switch.c @@ -115,62 +115,10 @@ static int dsa_switch_bridge_join(struct dsa_switch *ds, return 0; }
-static int dsa_switch_sync_vlan_filtering(struct dsa_switch *ds, - struct dsa_notifier_bridge_info *info) -{ - struct netlink_ext_ack extack = {0}; - bool change_vlan_filtering = false; - bool vlan_filtering; - struct dsa_port *dp; - int err; - - if (ds->needs_standalone_vlan_filtering && - !br_vlan_enabled(info->bridge.dev)) { - change_vlan_filtering = true; - vlan_filtering = true; - } else if (!ds->needs_standalone_vlan_filtering && - br_vlan_enabled(info->bridge.dev)) { - change_vlan_filtering = true; - vlan_filtering = false; - } - - /* If the bridge was vlan_filtering, the bridge core doesn't trigger an - * event for changing vlan_filtering setting upon slave ports leaving - * it. That is a good thing, because that lets us handle it and also - * handle the case where the switch's vlan_filtering setting is global - * (not per port). When that happens, the correct moment to trigger the - * vlan_filtering callback is only when the last port leaves the last - * VLAN-aware bridge. - */ - if (change_vlan_filtering && ds->vlan_filtering_is_global) { - dsa_switch_for_each_port(dp, ds) { - struct net_device *br = dsa_port_bridge_dev_get(dp); - - if (br && br_vlan_enabled(br)) { - change_vlan_filtering = false; - break; - } - } - } - - if (change_vlan_filtering) { - err = dsa_port_vlan_filtering(dsa_to_port(ds, info->port), - vlan_filtering, &extack); - if (extack._msg) - dev_err(ds->dev, "port %d: %s\n", info->port, - extack._msg); - if (err && err != -EOPNOTSUPP) - return err; - } - - return 0; -} - static int dsa_switch_bridge_leave(struct dsa_switch *ds, struct dsa_notifier_bridge_info *info) { struct dsa_switch_tree *dst = ds->dst; - int err;
if (dst->index == info->tree_index && ds->index == info->sw_index && ds->ops->port_bridge_leave) @@ -182,12 +130,6 @@ static int dsa_switch_bridge_leave(struct dsa_switch *ds, info->sw_index, info->port, info->bridge);
- if (ds->dst->index == info->tree_index && ds->index == info->sw_index) { - err = dsa_switch_sync_vlan_filtering(ds, info); - if (err) - return err; - } - return 0; }
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 98bc180563d1..5c207367b3b4 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -217,7 +217,7 @@ int inet_listen(struct socket *sock, int backlog) * because the socket was in TCP_LISTEN state previously but * was shutdown() rather than close(). */ - tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen; + tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); if ((tcp_fastopen & TFO_SERVER_WO_SOCKOPT1) && (tcp_fastopen & TFO_SERVER_ENABLE) && !inet_csk(sk)->icsk_accept_queue.fastopenq.max_qlen) { @@ -335,7 +335,7 @@ static int inet_create(struct net *net, struct socket *sock, int protocol, inet->hdrincl = 1; }
- if (net->ipv4.sysctl_ip_no_pmtu_disc) + if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) inet->pmtudisc = IP_PMTUDISC_DONT; else inet->pmtudisc = IP_PMTUDISC_WANT; @@ -1711,24 +1711,14 @@ static const struct net_protocol igmp_protocol = { }; #endif
-/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct net_protocol tcp_protocol = { - .early_demux = tcp_v4_early_demux, - .early_demux_handler = tcp_v4_early_demux, +static const struct net_protocol tcp_protocol = { .handler = tcp_v4_rcv, .err_handler = tcp_v4_err, .no_policy = 1, .icmp_strict_tag_validation = 1, };
-/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct net_protocol udp_protocol = { - .early_demux = udp_v4_early_demux, - .early_demux_handler = udp_v4_early_demux, +static const struct net_protocol udp_protocol = { .handler = udp_rcv, .err_handler = udp_err, .no_policy = 1, diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index 720f65f7bd0b..9f5c1c26c8f2 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -2216,7 +2216,7 @@ void fib_select_multipath(struct fib_result *res, int hash) }
change_nexthops(fi) { - if (net->ipv4.sysctl_fib_multipath_use_neigh) { + if (READ_ONCE(net->ipv4.sysctl_fib_multipath_use_neigh)) { if (!fib_good_nh(nexthop_nh)) continue; if (!first) { diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c index c13ceda9ce5d..d8cfa6241c04 100644 --- a/net/ipv4/icmp.c +++ b/net/ipv4/icmp.c @@ -878,7 +878,7 @@ static bool icmp_unreach(struct sk_buff *skb) * values please see * Documentation/networking/ip-sysctl.rst */ - switch (net->ipv4.sysctl_ip_no_pmtu_disc) { + switch (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) { default: net_dbg_ratelimited("%pI4: fragmentation needed and DF set\n", &iph->daddr); diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index 1d9e6d5e9a76..0a0010f89627 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -467,7 +467,8 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
if (pmc->multiaddr == IGMP_ALL_HOSTS) return skb; - if (ipv4_is_local_multicast(pmc->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(pmc->multiaddr) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return skb;
mtu = READ_ONCE(dev->mtu); @@ -593,7 +594,7 @@ static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc) if (pmc->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(pmc->multiaddr) && - !net->ipv4.sysctl_igmp_llm_reports) + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue; spin_lock_bh(&pmc->lock); if (pmc->sfcount[MCAST_EXCLUDE]) @@ -736,7 +737,8 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, if (type == IGMPV3_HOST_MEMBERSHIP_REPORT) return igmpv3_send_report(in_dev, pmc);
- if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(group) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return 0;
if (type == IGMP_HOST_LEAVE_MESSAGE) @@ -825,7 +827,7 @@ static void igmp_ifc_event(struct in_device *in_dev) struct net *net = dev_net(in_dev->dev); if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) return; - WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv); + WRITE_ONCE(in_dev->mr_ifc_count, in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv)); igmp_ifc_start_timer(in_dev, 1); }
@@ -920,7 +922,8 @@ static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
if (group == IGMP_ALL_HOSTS) return false; - if (ipv4_is_local_multicast(group) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(group) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return false;
rcu_read_lock(); @@ -1006,7 +1009,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, * received value was zero, use the default or statically * configured value. */ - in_dev->mr_qrv = ih3->qrv ?: net->ipv4.sysctl_igmp_qrv; + in_dev->mr_qrv = ih3->qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); in_dev->mr_qi = IGMPV3_QQIC(ih3->qqic)*HZ ?: IGMP_QUERY_INTERVAL;
/* RFC3376, 8.3. Query Response Interval: @@ -1045,7 +1048,7 @@ static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb, if (im->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(im->multiaddr) && - !net->ipv4.sysctl_igmp_llm_reports) + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue; spin_lock_bh(&im->lock); if (im->tm_running) @@ -1186,7 +1189,7 @@ static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im, pmc->interface = im->interface; in_dev_hold(in_dev); pmc->multiaddr = im->multiaddr; - pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); pmc->sfmode = im->sfmode; if (pmc->sfmode == MCAST_INCLUDE) { struct ip_sf_list *psf; @@ -1237,9 +1240,11 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im) swap(im->tomb, pmc->tomb); swap(im->sources, pmc->sources); for (psf = im->sources; psf; psf = psf->sf_next) - psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + psf->sf_crcount = in_dev->mr_qrv ?: + READ_ONCE(net->ipv4.sysctl_igmp_qrv); } else { - im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + im->crcount = in_dev->mr_qrv ?: + READ_ONCE(net->ipv4.sysctl_igmp_qrv); } in_dev_put(pmc->interface); kfree_pmc(pmc); @@ -1296,7 +1301,8 @@ static void __igmp_group_dropped(struct ip_mc_list *im, gfp_t gfp) #ifdef CONFIG_IP_MULTICAST if (im->multiaddr == IGMP_ALL_HOSTS) return; - if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(im->multiaddr) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return;
reporter = im->reporter; @@ -1338,13 +1344,14 @@ static void igmp_group_added(struct ip_mc_list *im) #ifdef CONFIG_IP_MULTICAST if (im->multiaddr == IGMP_ALL_HOSTS) return; - if (ipv4_is_local_multicast(im->multiaddr) && !net->ipv4.sysctl_igmp_llm_reports) + if (ipv4_is_local_multicast(im->multiaddr) && + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) return;
if (in_dev->dead) return;
- im->unsolicit_count = net->ipv4.sysctl_igmp_qrv; + im->unsolicit_count = READ_ONCE(net->ipv4.sysctl_igmp_qrv); if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) { spin_lock_bh(&im->lock); igmp_start_timer(im, IGMP_INITIAL_REPORT_DELAY); @@ -1358,7 +1365,7 @@ static void igmp_group_added(struct ip_mc_list *im) * IN() to IN(A). */ if (im->sfmode == MCAST_EXCLUDE) - im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + im->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv);
igmp_ifc_event(in_dev); #endif @@ -1642,7 +1649,7 @@ static void ip_mc_rejoin_groups(struct in_device *in_dev) if (im->multiaddr == IGMP_ALL_HOSTS) continue; if (ipv4_is_local_multicast(im->multiaddr) && - !net->ipv4.sysctl_igmp_llm_reports) + !READ_ONCE(net->ipv4.sysctl_igmp_llm_reports)) continue;
/* a failover is happening and switches @@ -1749,7 +1756,7 @@ static void ip_mc_reset(struct in_device *in_dev)
in_dev->mr_qi = IGMP_QUERY_INTERVAL; in_dev->mr_qri = IGMP_QUERY_RESPONSE_INTERVAL; - in_dev->mr_qrv = net->ipv4.sysctl_igmp_qrv; + in_dev->mr_qrv = READ_ONCE(net->ipv4.sysctl_igmp_qrv); } #else static void ip_mc_reset(struct in_device *in_dev) @@ -1883,7 +1890,7 @@ static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode, #ifdef CONFIG_IP_MULTICAST if (psf->sf_oldin && !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) { - psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + psf->sf_crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); psf->sf_next = pmc->tomb; pmc->tomb = psf; rv = 1; @@ -1947,7 +1954,7 @@ static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode, /* filter mode change */ pmc->sfmode = MCAST_INCLUDE; #ifdef CONFIG_IP_MULTICAST - pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; @@ -2126,7 +2133,7 @@ static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode, #ifdef CONFIG_IP_MULTICAST /* else no filters; keep old mode for reports */
- pmc->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv; + pmc->crcount = in_dev->mr_qrv ?: READ_ONCE(net->ipv4.sysctl_igmp_qrv); WRITE_ONCE(in_dev->mr_ifc_count, pmc->crcount); for (psf = pmc->sources; psf; psf = psf->sf_next) psf->sf_crcount = 0; @@ -2192,7 +2199,7 @@ static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr, count++; } err = -ENOBUFS; - if (count >= net->ipv4.sysctl_igmp_max_memberships) + if (count >= READ_ONCE(net->ipv4.sysctl_igmp_max_memberships)) goto done; iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL); if (!iml) @@ -2379,7 +2386,7 @@ int ip_mc_source(int add, int omode, struct sock *sk, struct } /* else, add a new source to the filter */
- if (psl && psl->sl_count >= net->ipv4.sysctl_igmp_max_msf) { + if (psl && psl->sl_count >= READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { err = -ENOBUFS; goto done; } diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 1e5b53c2bb26..cdc750ced525 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -259,7 +259,7 @@ inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int * goto other_half_scan; }
- if (net->ipv4.sysctl_ip_autobind_reuse && !relax) { + if (READ_ONCE(net->ipv4.sysctl_ip_autobind_reuse) && !relax) { /* We still have a chance to connect to different destinations */ relax = true; goto ports_exhausted; @@ -829,7 +829,8 @@ static void reqsk_timer_handler(struct timer_list *t)
icsk = inet_csk(sk_listener); net = sock_net(sk_listener); - max_syn_ack_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries; + max_syn_ack_retries = icsk->icsk_syn_retries ? : + READ_ONCE(net->ipv4.sysctl_tcp_synack_retries); /* Normally all the openreqs are young and become mature * (i.e. converted to established socket) for first timeout. * If synack was not acknowledged for 1 second, it means diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index 92ba3350274b..03bb7c51b618 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -151,7 +151,7 @@ int ip_forward(struct sk_buff *skb) !skb_sec_path(skb)) ip_rt_send_redirect(skb);
- if (net->ipv4.sysctl_ip_fwd_update_priority) + if (READ_ONCE(net->ipv4.sysctl_ip_fwd_update_priority)) skb->priority = rt_tos2priority(iph->tos);
return NF_HOOK(NFPROTO_IPV4, NF_INET_FORWARD, diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c index 95f7bb052784..f3fd6c398309 100644 --- a/net/ipv4/ip_input.c +++ b/net/ipv4/ip_input.c @@ -312,14 +312,13 @@ static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph, ip_hdr(hint)->tos == iph->tos; }
-INDIRECT_CALLABLE_DECLARE(int udp_v4_early_demux(struct sk_buff *)); -INDIRECT_CALLABLE_DECLARE(int tcp_v4_early_demux(struct sk_buff *)); +int tcp_v4_early_demux(struct sk_buff *skb); +int udp_v4_early_demux(struct sk_buff *skb); static int ip_rcv_finish_core(struct net *net, struct sock *sk, struct sk_buff *skb, struct net_device *dev, const struct sk_buff *hint) { const struct iphdr *iph = ip_hdr(skb); - int (*edemux)(struct sk_buff *skb); int err, drop_reason; struct rtable *rt;
@@ -332,21 +331,29 @@ static int ip_rcv_finish_core(struct net *net, struct sock *sk, goto drop_error; }
- if (net->ipv4.sysctl_ip_early_demux && + if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && !skb_dst(skb) && !skb->sk && !ip_is_fragment(iph)) { - const struct net_protocol *ipprot; - int protocol = iph->protocol; - - ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) { - err = INDIRECT_CALL_2(edemux, tcp_v4_early_demux, - udp_v4_early_demux, skb); - if (unlikely(err)) - goto drop_error; - /* must reload iph, skb->head might have changed */ - iph = ip_hdr(skb); + switch (iph->protocol) { + case IPPROTO_TCP: + if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) { + tcp_v4_early_demux(skb); + + /* must reload iph, skb->head might have changed */ + iph = ip_hdr(skb); + } + break; + case IPPROTO_UDP: + if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) { + err = udp_v4_early_demux(skb); + if (unlikely(err)) + goto drop_error; + + /* must reload iph, skb->head might have changed */ + iph = ip_hdr(skb); + } + break; } }
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 445a9ecaefa1..a8a323ecbb54 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -782,7 +782,7 @@ static int ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval, int optlen) /* numsrc >= (4G-140)/128 overflow in 32 bits */ err = -ENOBUFS; if (gsf->gf_numsrc >= 0x1ffffff || - gsf->gf_numsrc > sock_net(sk)->ipv4.sysctl_igmp_max_msf) + gsf->gf_numsrc > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf)) goto out_free_gsf;
err = -EINVAL; @@ -832,7 +832,7 @@ static int compat_ip_set_mcast_msfilter(struct sock *sk, sockptr_t optval,
/* numsrc >= (4G-140)/128 overflow in 32 bits */ err = -ENOBUFS; - if (n > sock_net(sk)->ipv4.sysctl_igmp_max_msf) + if (n > READ_ONCE(sock_net(sk)->ipv4.sysctl_igmp_max_msf)) goto out_free_gsf; err = set_mcast_msfilter(sk, gf32->gf_interface, n, gf32->gf_fmode, &gf32->gf_group, gf32->gf_slist_flex); @@ -1244,7 +1244,7 @@ static int do_ip_setsockopt(struct sock *sk, int level, int optname, } /* numsrc >= (1G-4) overflow in 32 bits */ if (msf->imsf_numsrc >= 0x3ffffffcU || - msf->imsf_numsrc > net->ipv4.sysctl_igmp_max_msf) { + msf->imsf_numsrc > READ_ONCE(net->ipv4.sysctl_igmp_max_msf)) { kfree(msf); err = -ENOBUFS; break; @@ -1606,7 +1606,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, { struct net *net = sock_net(sk); val = (inet->uc_ttl == -1 ? - net->ipv4.sysctl_ip_default_ttl : + READ_ONCE(net->ipv4.sysctl_ip_default_ttl) : inet->uc_ttl); break; } diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c index 4eed5afca392..f2edb40c0db0 100644 --- a/net/ipv4/netfilter/nf_reject_ipv4.c +++ b/net/ipv4/netfilter/nf_reject_ipv4.c @@ -62,7 +62,7 @@ struct sk_buff *nf_reject_skb_v4_tcp_reset(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER); niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP, - net->ipv4.sysctl_ip_default_ttl); + READ_ONCE(net->ipv4.sysctl_ip_default_ttl)); nf_reject_ip_tcphdr_put(nskb, oldskb, oth); niph->tot_len = htons(nskb->len); ip_send_check(niph); @@ -115,7 +115,7 @@ struct sk_buff *nf_reject_skb_v4_unreach(struct net *net,
skb_reserve(nskb, LL_MAX_HEADER); niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP, - net->ipv4.sysctl_ip_default_ttl); + READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
skb_reset_transport_header(nskb); icmph = skb_put_zero(nskb, sizeof(struct icmphdr)); diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c index 28836071f0a6..0088a4c64d77 100644 --- a/net/ipv4/proc.c +++ b/net/ipv4/proc.c @@ -387,7 +387,7 @@ static int snmp_seq_show_ipstats(struct seq_file *seq, void *v)
seq_printf(seq, "\nIp: %d %d", IPV4_DEVCONF_ALL(net, FORWARDING) ? 1 : 2, - net->ipv4.sysctl_ip_default_ttl); + READ_ONCE(net->ipv4.sysctl_ip_default_ttl));
BUILD_BUG_ON(offsetof(struct ipstats_mib, mibs) != 0); snmp_get_cpu_field64_batch(buff64, snmp4_ipstats_list, diff --git a/net/ipv4/route.c b/net/ipv4/route.c index ed01063d8f30..02a0a397a2f3 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1397,7 +1397,7 @@ u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr) struct fib_info *fi = res->fi; u32 mtu = 0;
- if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu || + if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) || fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU)) mtu = fi->fib_mtu;
@@ -1928,7 +1928,7 @@ static u32 fib_multipath_custom_hash_outer(const struct net *net, const struct sk_buff *skb, bool *p_has_inner) { - u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; + u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); struct flow_keys keys, hash_keys;
if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) @@ -1957,7 +1957,7 @@ static u32 fib_multipath_custom_hash_inner(const struct net *net, const struct sk_buff *skb, bool has_inner) { - u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; + u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); struct flow_keys keys, hash_keys;
/* We assume the packet carries an encapsulation, but if none was @@ -2017,7 +2017,7 @@ static u32 fib_multipath_custom_hash_skb(const struct net *net, static u32 fib_multipath_custom_hash_fl4(const struct net *net, const struct flowi4 *fl4) { - u32 hash_fields = net->ipv4.sysctl_fib_multipath_hash_fields; + u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields); struct flow_keys hash_keys;
if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK)) @@ -2047,7 +2047,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, struct flow_keys hash_keys; u32 mhash = 0;
- switch (net->ipv4.sysctl_fib_multipath_hash_policy) { + switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) { case 0: memset(&hash_keys, 0, sizeof(hash_keys)); hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS; diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index b387c4835155..942d2dfa1115 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -247,12 +247,12 @@ bool cookie_timestamp_decode(const struct net *net, return true; }
- if (!net->ipv4.sysctl_tcp_timestamps) + if (!READ_ONCE(net->ipv4.sysctl_tcp_timestamps)) return false;
tcp_opt->sack_ok = (options & TS_OPT_SACK) ? TCP_SACK_SEEN : 0;
- if (tcp_opt->sack_ok && !net->ipv4.sysctl_tcp_sack) + if (tcp_opt->sack_ok && !READ_ONCE(net->ipv4.sysctl_tcp_sack)) return false;
if ((options & TS_OPT_WSCALE_MASK) == TS_OPT_WSCALE_MASK) @@ -261,7 +261,7 @@ bool cookie_timestamp_decode(const struct net *net, tcp_opt->wscale_ok = 1; tcp_opt->snd_wscale = options & TS_OPT_WSCALE_MASK;
- return net->ipv4.sysctl_tcp_window_scaling != 0; + return READ_ONCE(net->ipv4.sysctl_tcp_window_scaling) != 0; } EXPORT_SYMBOL(cookie_timestamp_decode);
@@ -340,7 +340,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) struct flowi4 fl4; u32 tsoff = 0;
- if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || + !th->ack || th->rst) goto out;
if (tcp_synq_no_recent_overflow(sk)) diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index ffe0264a51b8..344cdcd5a7d5 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -88,7 +88,7 @@ static int ipv4_local_port_range(struct ctl_table *table, int write, * port limit. */ if ((range[1] < range[0]) || - (range[0] < net->ipv4.sysctl_ip_prot_sock)) + (range[0] < READ_ONCE(net->ipv4.sysctl_ip_prot_sock))) ret = -EINVAL; else set_local_port_range(net, range); @@ -114,7 +114,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write, .extra2 = &ip_privileged_port_max, };
- pports = net->ipv4.sysctl_ip_prot_sock; + pports = READ_ONCE(net->ipv4.sysctl_ip_prot_sock);
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
@@ -126,7 +126,7 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write, if (range[0] < pports) ret = -EINVAL; else - net->ipv4.sysctl_ip_prot_sock = pports; + WRITE_ONCE(net->ipv4.sysctl_ip_prot_sock, pports); }
return ret; @@ -354,61 +354,6 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write, return ret; }
-static void proc_configure_early_demux(int enabled, int protocol) -{ - struct net_protocol *ipprot; -#if IS_ENABLED(CONFIG_IPV6) - struct inet6_protocol *ip6prot; -#endif - - rcu_read_lock(); - - ipprot = rcu_dereference(inet_protos[protocol]); - if (ipprot) - ipprot->early_demux = enabled ? ipprot->early_demux_handler : - NULL; - -#if IS_ENABLED(CONFIG_IPV6) - ip6prot = rcu_dereference(inet6_protos[protocol]); - if (ip6prot) - ip6prot->early_demux = enabled ? ip6prot->early_demux_handler : - NULL; -#endif - rcu_read_unlock(); -} - -static int proc_tcp_early_demux(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - - ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos); - - if (write && !ret) { - int enabled = init_net.ipv4.sysctl_tcp_early_demux; - - proc_configure_early_demux(enabled, IPPROTO_TCP); - } - - return ret; -} - -static int proc_udp_early_demux(struct ctl_table *table, int write, - void *buffer, size_t *lenp, loff_t *ppos) -{ - int ret = 0; - - ret = proc_dou8vec_minmax(table, write, buffer, lenp, ppos); - - if (write && !ret) { - int enabled = init_net.ipv4.sysctl_udp_early_demux; - - proc_configure_early_demux(enabled, IPPROTO_UDP); - } - - return ret; -} - static int proc_tfo_blackhole_detect_timeout(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) @@ -711,14 +656,14 @@ static struct ctl_table ipv4_net_table[] = { .data = &init_net.ipv4.sysctl_udp_early_demux, .maxlen = sizeof(u8), .mode = 0644, - .proc_handler = proc_udp_early_demux + .proc_handler = proc_dou8vec_minmax, }, { .procname = "tcp_early_demux", .data = &init_net.ipv4.sysctl_tcp_early_demux, .maxlen = sizeof(u8), .mode = 0644, - .proc_handler = proc_tcp_early_demux + .proc_handler = proc_dou8vec_minmax, }, { .procname = "nexthop_compat_mode", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index f2fd1779d925..28db838e604a 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -441,7 +441,7 @@ void tcp_init_sock(struct sock *sk) tp->snd_cwnd_clamp = ~0; tp->mss_cache = TCP_MSS_DEFAULT;
- tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; + tp->reordering = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering); tcp_assign_congestion_control(sk);
tp->tsoffset = 0; @@ -1151,7 +1151,8 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, struct sockaddr *uaddr = msg->msg_name; int err, flags;
- if (!(sock_net(sk)->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) || + if (!(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & + TFO_CLIENT_ENABLE) || (uaddr && msg->msg_namelen >= sizeof(uaddr->sa_family) && uaddr->sa_family == AF_UNSPEC)) return -EOPNOTSUPP; @@ -3638,7 +3639,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level, int optname, case TCP_FASTOPEN_CONNECT: if (val > 1 || val < 0) { err = -EINVAL; - } else if (net->ipv4.sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) { + } else if (READ_ONCE(net->ipv4.sysctl_tcp_fastopen) & + TFO_CLIENT_ENABLE) { if (sk->sk_state == TCP_CLOSE) tp->fastopen_connect = val; else @@ -3988,12 +3990,13 @@ static int do_tcp_getsockopt(struct sock *sk, int level, val = keepalive_probes(tp); break; case TCP_SYNCNT: - val = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; + val = icsk->icsk_syn_retries ? : + READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); break; case TCP_LINGER2: val = tp->linger2; if (val >= 0) - val = (val ? : net->ipv4.sysctl_tcp_fin_timeout) / HZ; + val = (val ? : READ_ONCE(net->ipv4.sysctl_tcp_fin_timeout)) / HZ; break; case TCP_DEFER_ACCEPT: val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index fdbcf2a6d08e..825b216d11f5 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -332,7 +332,7 @@ static bool tcp_fastopen_no_cookie(const struct sock *sk, const struct dst_entry *dst, int flag) { - return (sock_net(sk)->ipv4.sysctl_tcp_fastopen & flag) || + return (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen) & flag) || tcp_sk(sk)->fastopen_no_cookie || (dst && dst_metric(dst, RTAX_FASTOPEN_NO_COOKIE)); } @@ -347,7 +347,7 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb, const struct dst_entry *dst) { bool syn_data = TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1; - int tcp_fastopen = sock_net(sk)->ipv4.sysctl_tcp_fastopen; + int tcp_fastopen = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen); struct tcp_fastopen_cookie valid_foc = { .len = -1 }; struct sock *child; int ret = 0; @@ -489,7 +489,7 @@ void tcp_fastopen_active_disable(struct sock *sk) { struct net *net = sock_net(sk);
- if (!sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout)) return;
/* Paired with READ_ONCE() in tcp_fastopen_active_should_disable() */ @@ -510,7 +510,8 @@ void tcp_fastopen_active_disable(struct sock *sk) */ bool tcp_fastopen_active_should_disable(struct sock *sk) { - unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout; + unsigned int tfo_bh_timeout = + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout); unsigned long timeout; int tfo_da_times; int multiplier; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2d71bcfcc759..19b186a4a8e8 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -1051,7 +1051,7 @@ static void tcp_check_sack_reordering(struct sock *sk, const u32 low_seq, tp->undo_marker ? tp->undo_retrans : 0); #endif tp->reordering = min_t(u32, (metric + mss - 1) / mss, - sock_net(sk)->ipv4.sysctl_tcp_max_reordering); + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); }
/* This exciting event is worth to be remembered. 8) */ @@ -2030,7 +2030,7 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend) return;
tp->reordering = min_t(u32, tp->packets_out + addend, - sock_net(sk)->ipv4.sysctl_tcp_max_reordering); + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_max_reordering)); tp->reord_seen++; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRENOREORDER); } @@ -2095,7 +2095,8 @@ static inline void tcp_init_undo(struct tcp_sock *tp)
static bool tcp_is_rack(const struct sock *sk) { - return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION; + return READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & + TCP_RACK_LOSS_DETECTION; }
/* If we detect SACK reneging, forget all SACK information @@ -2139,6 +2140,7 @@ void tcp_enter_loss(struct sock *sk) struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; + u8 reordering;
tcp_timeout_mark_lost(sk);
@@ -2159,10 +2161,12 @@ void tcp_enter_loss(struct sock *sk) /* Timeout in disordered state after receiving substantial DUPACKs * suggests that the degree of reordering is over-estimated. */ + reordering = READ_ONCE(net->ipv4.sysctl_tcp_reordering); if (icsk->icsk_ca_state <= TCP_CA_Disorder && - tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) + tp->sacked_out >= reordering) tp->reordering = min_t(unsigned int, tp->reordering, - net->ipv4.sysctl_tcp_reordering); + reordering); + tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; tcp_ecn_queue_cwr(tp); @@ -3464,7 +3468,8 @@ static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) * new SACK or ECE mark may first advance cwnd here and later reduce * cwnd in tcp_fastretrans_alert() based on more states. */ - if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) + if (tcp_sk(sk)->reordering > + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_reordering)) return flag & FLAG_FORWARD_PROGRESS;
return flag & FLAG_DATA_ACKED; @@ -4056,7 +4061,7 @@ void tcp_parse_options(const struct net *net, break; case TCPOPT_WINDOW: if (opsize == TCPOLEN_WINDOW && th->syn && - !estab && net->ipv4.sysctl_tcp_window_scaling) { + !estab && READ_ONCE(net->ipv4.sysctl_tcp_window_scaling)) { __u8 snd_wscale = *(__u8 *)ptr; opt_rx->wscale_ok = 1; if (snd_wscale > TCP_MAX_WSCALE) { @@ -4072,7 +4077,7 @@ void tcp_parse_options(const struct net *net, case TCPOPT_TIMESTAMP: if ((opsize == TCPOLEN_TIMESTAMP) && ((estab && opt_rx->tstamp_ok) || - (!estab && net->ipv4.sysctl_tcp_timestamps))) { + (!estab && READ_ONCE(net->ipv4.sysctl_tcp_timestamps)))) { opt_rx->saw_tstamp = 1; opt_rx->rcv_tsval = get_unaligned_be32(ptr); opt_rx->rcv_tsecr = get_unaligned_be32(ptr + 4); @@ -4080,7 +4085,7 @@ void tcp_parse_options(const struct net *net, break; case TCPOPT_SACK_PERM: if (opsize == TCPOLEN_SACK_PERM && th->syn && - !estab && net->ipv4.sysctl_tcp_sack) { + !estab && READ_ONCE(net->ipv4.sysctl_tcp_sack)) { opt_rx->sack_ok = TCP_SACK_SEEN; tcp_sack_reset(opt_rx); } @@ -5571,7 +5576,7 @@ static void tcp_check_urg(struct sock *sk, const struct tcphdr *th) struct tcp_sock *tp = tcp_sk(sk); u32 ptr = ntohs(th->urg_ptr);
- if (ptr && !sock_net(sk)->ipv4.sysctl_tcp_stdurg) + if (ptr && !READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_stdurg)) ptr--; ptr += ntohl(th->seq);
@@ -6780,11 +6785,14 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto) { struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; const char *msg = "Dropping request"; - bool want_cookie = false; struct net *net = sock_net(sk); + bool want_cookie = false; + u8 syncookies; + + syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
#ifdef CONFIG_SYN_COOKIES - if (net->ipv4.sysctl_tcp_syncookies) { + if (syncookies) { msg = "Sending cookies"; want_cookie = true; __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES); @@ -6792,8 +6800,7 @@ static bool tcp_syn_flood_action(const struct sock *sk, const char *proto) #endif __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
- if (!queue->synflood_warned && - net->ipv4.sysctl_tcp_syncookies != 2 && + if (!queue->synflood_warned && syncookies != 2 && xchg(&queue->synflood_warned, 1) == 0) net_info_ratelimited("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n", proto, sk->sk_num, msg); @@ -6842,7 +6849,7 @@ u16 tcp_get_syncookie_mss(struct request_sock_ops *rsk_ops, struct tcp_sock *tp = tcp_sk(sk); u16 mss;
- if (sock_net(sk)->ipv4.sysctl_tcp_syncookies != 2 && + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) != 2 && !inet_csk_reqsk_queue_is_full(sk)) return 0;
@@ -6876,13 +6883,15 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, bool want_cookie = false; struct dst_entry *dst; struct flowi fl; + u8 syncookies; + + syncookies = READ_ONCE(net->ipv4.sysctl_tcp_syncookies);
/* TW buckets are converted to open requests without * limitations, they conserve resources and peer is * evidently real one. */ - if ((net->ipv4.sysctl_tcp_syncookies == 2 || - inet_csk_reqsk_queue_is_full(sk)) && !isn) { + if ((syncookies == 2 || inet_csk_reqsk_queue_is_full(sk)) && !isn) { want_cookie = tcp_syn_flood_action(sk, rsk_ops->slab_name); if (!want_cookie) goto drop; @@ -6931,10 +6940,12 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops, tcp_rsk(req)->ts_off = af_ops->init_ts_off(net, skb);
if (!want_cookie && !isn) { + int max_syn_backlog = READ_ONCE(net->ipv4.sysctl_max_syn_backlog); + /* Kill the following clause, if you dislike this way. */ - if (!net->ipv4.sysctl_tcp_syncookies && - (net->ipv4.sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) < - (net->ipv4.sysctl_max_syn_backlog >> 2)) && + if (!syncookies && + (max_syn_backlog - inet_csk_reqsk_queue_len(sk) < + (max_syn_backlog >> 2)) && !tcp_peer_is_proven(req, dst)) { /* Without syncookies last quarter of * backlog is filled with destinations, diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index cd78b4fc334f..a57f96b86874 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -108,10 +108,10 @@ static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) { + int reuse = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tw_reuse); const struct inet_timewait_sock *tw = inet_twsk(sktw); const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); - int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
if (reuse == 2) { /* Still does not detect *everything* that goes through diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 7029b0e98edb..a501150deaa3 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -428,7 +428,8 @@ void tcp_update_metrics(struct sock *sk) if (!tcp_metric_locked(tm, TCP_METRIC_REORDERING)) { val = tcp_metric_get(tm, TCP_METRIC_REORDERING); if (val < tp->reordering && - tp->reordering != net->ipv4.sysctl_tcp_reordering) + tp->reordering != + READ_ONCE(net->ipv4.sysctl_tcp_reordering)) tcp_metric_set(tm, TCP_METRIC_REORDERING, tp->reordering); } diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 6854bb1fb32b..cb95d88497ae 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -173,7 +173,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, * Oh well... nobody has a sufficient solution to this * protocol bug yet. */ - if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) { + if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) { kill: inet_twsk_deschedule_put(tw); return TCP_TW_SUCCESS; @@ -781,7 +781,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, if (sk != req->rsk_listener) __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
- if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) { + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) { inet_rsk(req)->acked = 1; return NULL; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 34249469e361..3554a4c1e1b8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -789,18 +789,18 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, opts->mss = tcp_advertise_mss(sk); remaining -= TCPOLEN_MSS_ALIGNED;
- if (likely(sock_net(sk)->ipv4.sysctl_tcp_timestamps && !*md5)) { + if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps) && !*md5)) { opts->options |= OPTION_TS; opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset; opts->tsecr = tp->rx_opt.ts_recent; remaining -= TCPOLEN_TSTAMP_ALIGNED; } - if (likely(sock_net(sk)->ipv4.sysctl_tcp_window_scaling)) { + if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling))) { opts->ws = tp->rx_opt.rcv_wscale; opts->options |= OPTION_WSCALE; remaining -= TCPOLEN_WSCALE_ALIGNED; } - if (likely(sock_net(sk)->ipv4.sysctl_tcp_sack)) { + if (likely(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_sack))) { opts->options |= OPTION_SACK_ADVERTISE; if (unlikely(!(OPTION_TS & opts->options))) remaining -= TCPOLEN_SACKPERM_ALIGNED; @@ -1717,7 +1717,8 @@ static inline int __tcp_mtu_to_mss(struct sock *sk, int pmtu) mss_now -= icsk->icsk_ext_hdr_len;
/* Then reserve room for full set of TCP options and 8 bytes of data */ - mss_now = max(mss_now, sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss); + mss_now = max(mss_now, + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_min_snd_mss)); return mss_now; }
@@ -1760,10 +1761,10 @@ void tcp_mtup_init(struct sock *sk) struct inet_connection_sock *icsk = inet_csk(sk); struct net *net = sock_net(sk);
- icsk->icsk_mtup.enabled = net->ipv4.sysctl_tcp_mtu_probing > 1; + icsk->icsk_mtup.enabled = READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing) > 1; icsk->icsk_mtup.search_high = tp->rx_opt.mss_clamp + sizeof(struct tcphdr) + icsk->icsk_af_ops->net_header_len; - icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); + icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, READ_ONCE(net->ipv4.sysctl_tcp_base_mss)); icsk->icsk_mtup.probe_size = 0; if (icsk->icsk_mtup.enabled) icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; @@ -1895,7 +1896,7 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) if (tp->packets_out > tp->snd_cwnd_used) tp->snd_cwnd_used = tp->packets_out;
- if (sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle && + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_slow_start_after_idle) && (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && !ca_ops->cong_control) tcp_cwnd_application_limited(sk); @@ -2280,7 +2281,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk) u32 interval; s32 delta;
- interval = net->ipv4.sysctl_tcp_probe_interval; + interval = READ_ONCE(net->ipv4.sysctl_tcp_probe_interval); delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp; if (unlikely(delta >= interval * HZ)) { int mss = tcp_current_mss(sk); @@ -2364,7 +2365,7 @@ static int tcp_mtu_probe(struct sock *sk) * probing process by not resetting search range to its orignal. */ if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high) || - interval < net->ipv4.sysctl_tcp_probe_threshold) { + interval < READ_ONCE(net->ipv4.sysctl_tcp_probe_threshold)) { /* Check whether enough time has elaplased for * another round of probing. */ @@ -2738,7 +2739,7 @@ bool tcp_schedule_loss_probe(struct sock *sk, bool advancing_rto) if (rcu_access_pointer(tp->fastopen_rsk)) return false;
- early_retrans = sock_net(sk)->ipv4.sysctl_tcp_early_retrans; + early_retrans = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_early_retrans); /* Schedule a loss probe in 2*RTT for SACK capable connections * not in loss recovery, that are either limited by cwnd or application. */ @@ -3102,7 +3103,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *to, struct sk_buff *skb = to, *tmp; bool first = true;
- if (!sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retrans_collapse)) return; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) return; @@ -3644,7 +3645,7 @@ static void tcp_connect_init(struct sock *sk) * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. */ tp->tcp_header_len = sizeof(struct tcphdr); - if (sock_net(sk)->ipv4.sysctl_tcp_timestamps) + if (READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_timestamps)) tp->tcp_header_len += TCPOLEN_TSTAMP_ALIGNED;
#ifdef CONFIG_TCP_MD5SIG @@ -3680,7 +3681,7 @@ static void tcp_connect_init(struct sock *sk) tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), &tp->rcv_wnd, &tp->window_clamp, - sock_net(sk)->ipv4.sysctl_tcp_window_scaling, + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_window_scaling), &rcv_wscale, rcv_wnd);
@@ -4087,7 +4088,7 @@ void tcp_send_probe0(struct sock *sk)
icsk->icsk_probes_out++; if (err <= 0) { - if (icsk->icsk_backoff < net->ipv4.sysctl_tcp_retries2) + if (icsk->icsk_backoff < READ_ONCE(net->ipv4.sysctl_tcp_retries2)) icsk->icsk_backoff++; timeout = tcp_probe0_when(sk, TCP_RTO_MAX); } else { diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c index fd113f6226ef..ac14216f6204 100644 --- a/net/ipv4/tcp_recovery.c +++ b/net/ipv4/tcp_recovery.c @@ -19,7 +19,8 @@ static u32 tcp_rack_reo_wnd(const struct sock *sk) return 0;
if (tp->sacked_out >= tp->reordering && - !(sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_NO_DUPTHRESH)) + !(READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & + TCP_RACK_NO_DUPTHRESH)) return 0; }
@@ -192,7 +193,8 @@ void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs) { struct tcp_sock *tp = tcp_sk(sk);
- if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND || + if ((READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_recovery) & + TCP_RACK_STATIC_REO_WND) || !rs->prior_delivered) return;
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 20cf4a98c69d..50bba370486e 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -143,7 +143,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) */ static int tcp_orphan_retries(struct sock *sk, bool alive) { - int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */ + int retries = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_orphan_retries); /* May be zero. */
/* We know from an ICMP that something is wrong. */ if (sk->sk_err_soft && !alive) @@ -163,7 +163,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) int mss;
/* Black hole detection */ - if (!net->ipv4.sysctl_tcp_mtu_probing) + if (!READ_ONCE(net->ipv4.sysctl_tcp_mtu_probing)) return;
if (!icsk->icsk_mtup.enabled) { @@ -171,9 +171,9 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) icsk->icsk_mtup.probe_timestamp = tcp_jiffies32; } else { mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1; - mss = min(net->ipv4.sysctl_tcp_base_mss, mss); - mss = max(mss, net->ipv4.sysctl_tcp_mtu_probe_floor); - mss = max(mss, net->ipv4.sysctl_tcp_min_snd_mss); + mss = min(READ_ONCE(net->ipv4.sysctl_tcp_base_mss), mss); + mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_mtu_probe_floor)); + mss = max(mss, READ_ONCE(net->ipv4.sysctl_tcp_min_snd_mss)); icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); } tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); @@ -239,17 +239,18 @@ static int tcp_write_timeout(struct sock *sk) if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) { if (icsk->icsk_retransmits) __dst_negative_advice(sk); - retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; + retry_until = icsk->icsk_syn_retries ? : + READ_ONCE(net->ipv4.sysctl_tcp_syn_retries); expired = icsk->icsk_retransmits >= retry_until; } else { - if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) { + if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1), 0)) { /* Black hole detection */ tcp_mtu_probing(icsk, sk);
__dst_negative_advice(sk); }
- retry_until = net->ipv4.sysctl_tcp_retries2; + retry_until = READ_ONCE(net->ipv4.sysctl_tcp_retries2); if (sock_flag(sk, SOCK_DEAD)) { const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
@@ -380,7 +381,7 @@ static void tcp_probe_timer(struct sock *sk) msecs_to_jiffies(icsk->icsk_user_timeout)) goto abort;
- max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; + max_probes = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_retries2); if (sock_flag(sk, SOCK_DEAD)) { const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
@@ -406,12 +407,15 @@ abort: tcp_write_err(sk); static void tcp_fastopen_synack_timer(struct sock *sk, struct request_sock *req) { struct inet_connection_sock *icsk = inet_csk(sk); - int max_retries = icsk->icsk_syn_retries ? : - sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */ struct tcp_sock *tp = tcp_sk(sk); + int max_retries;
req->rsk_ops->syn_ack_timeout(req);
+ /* add one more retry for fastopen */ + max_retries = icsk->icsk_syn_retries ? : + READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_synack_retries) + 1; + if (req->num_timeout >= max_retries) { tcp_write_err(sk); return; @@ -574,7 +578,7 @@ void tcp_retransmit_timer(struct sock *sk) * linear-timeout retransmissions into a black hole */ if (sk->sk_state == TCP_ESTABLISHED && - (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) && + (tp->thin_lto || READ_ONCE(net->ipv4.sysctl_tcp_thin_linear_timeouts)) && tcp_stream_is_thin(tp) && icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk->icsk_backoff = 0; @@ -585,7 +589,7 @@ void tcp_retransmit_timer(struct sock *sk) } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX); - if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0)) + if (retransmits_timed_out(sk, READ_ONCE(net->ipv4.sysctl_tcp_retries1) + 1, 0)) __sk_dst_reset(sk);
out:; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 7d7b7523d126..ef1e6545d869 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -226,7 +226,7 @@ static int inet6_create(struct net *net, struct socket *sock, int protocol, RCU_INIT_POINTER(inet->mc_list, NULL); inet->rcv_tos = 0;
- if (net->ipv4.sysctl_ip_no_pmtu_disc) + if (READ_ONCE(net->ipv4.sysctl_ip_no_pmtu_disc)) inet->pmtudisc = IP_PMTUDISC_DONT; else inet->pmtudisc = IP_PMTUDISC_WANT; diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index 5b5ea35635f9..caba03e551ef 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -45,20 +45,23 @@ #include <net/inet_ecn.h> #include <net/dst_metadata.h>
-INDIRECT_CALLABLE_DECLARE(void tcp_v6_early_demux(struct sk_buff *)); static void ip6_rcv_finish_core(struct net *net, struct sock *sk, struct sk_buff *skb) { - void (*edemux)(struct sk_buff *skb); - - if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { - const struct inet6_protocol *ipprot; - - ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); - if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) - INDIRECT_CALL_2(edemux, tcp_v6_early_demux, - udp_v6_early_demux, skb); + if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && + !skb_dst(skb) && !skb->sk) { + switch (ipv6_hdr(skb)->nexthdr) { + case IPPROTO_TCP: + if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) + tcp_v6_early_demux(skb); + break; + case IPPROTO_UDP: + if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) + udp_v6_early_demux(skb); + break; + } } + if (!skb_valid_dst(skb)) ip6_route_input(skb); } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index 9cc123f000fb..5014aa663452 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -141,7 +141,8 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) __u8 rcv_wscale; u32 tsoff = 0;
- if (!sock_net(sk)->ipv4.sysctl_tcp_syncookies || !th->ack || th->rst) + if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_syncookies) || + !th->ack || th->rst) goto out;
if (tcp_synq_no_recent_overflow(sk)) diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index cbc5fff3d846..5185c11dc444 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1822,7 +1822,7 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) goto discard_it; }
-INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb) +void tcp_v6_early_demux(struct sk_buff *skb) { const struct ipv6hdr *hdr; const struct tcphdr *th; @@ -2176,12 +2176,7 @@ struct proto tcpv6_prot = { }; EXPORT_SYMBOL_GPL(tcpv6_prot);
-/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct inet6_protocol tcpv6_protocol = { - .early_demux = tcp_v6_early_demux, - .early_demux_handler = tcp_v6_early_demux, +static const struct inet6_protocol tcpv6_protocol = { .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index a535c3f2e4af..aea28bf701be 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1052,7 +1052,7 @@ static struct sock *__udp6_lib_demux_lookup(struct net *net, return NULL; }
-INDIRECT_CALLABLE_SCOPE void udp_v6_early_demux(struct sk_buff *skb) +void udp_v6_early_demux(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct udphdr *uh; @@ -1660,12 +1660,7 @@ int udpv6_getsockopt(struct sock *sk, int level, int optname, return ipv6_getsockopt(sk, level, optname, optval, optlen); }
-/* thinking of making this const? Don't. - * early_demux can change based on sysctl. - */ -static struct inet6_protocol udpv6_protocol = { - .early_demux = udp_v6_early_demux, - .early_demux_handler = udp_v6_early_demux, +static const struct inet6_protocol udpv6_protocol = { .handler = udpv6_rcv, .err_handler = udpv6_err, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c index e479dd0561c5..16915f8eef2b 100644 --- a/net/netfilter/nf_synproxy_core.c +++ b/net/netfilter/nf_synproxy_core.c @@ -405,7 +405,7 @@ synproxy_build_ip(struct net *net, struct sk_buff *skb, __be32 saddr, iph->tos = 0; iph->id = 0; iph->frag_off = htons(IP_DF); - iph->ttl = net->ipv4.sysctl_ip_default_ttl; + iph->ttl = READ_ONCE(net->ipv4.sysctl_ip_default_ttl); iph->protocol = IPPROTO_TCP; iph->check = 0; iph->saddr = saddr; diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index 2d4dc1468a9a..6fd33c75d6bb 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -3531,7 +3531,7 @@ int tc_setup_action(struct flow_action *flow_action, struct tc_action *actions[], struct netlink_ext_ack *extack) { - int i, j, index, err = 0; + int i, j, k, index, err = 0; struct tc_action *act;
BUILD_BUG_ON(TCA_ACT_HW_STATS_ANY != FLOW_ACTION_HW_STATS_ANY); @@ -3551,14 +3551,18 @@ int tc_setup_action(struct flow_action *flow_action, if (err) goto err_out_locked;
- entry->hw_stats = tc_act_hw_stats(act->hw_stats); - entry->hw_index = act->tcfa_index; index = 0; err = tc_setup_offload_act(act, entry, &index, extack); - if (!err) - j += index; - else + if (err) goto err_out_locked; + + for (k = 0; k < index ; k++) { + entry[k].hw_stats = tc_act_hw_stats(act->hw_stats); + entry[k].hw_index = act->tcfa_index; + } + + j += index; + spin_unlock_bh(&act->tcfa_lock); }
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c index 35928fefae33..1a094b087d88 100644 --- a/net/sctp/protocol.c +++ b/net/sctp/protocol.c @@ -358,7 +358,7 @@ static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) if (addr->v4.sin_addr.s_addr != htonl(INADDR_ANY) && ret != RTN_LOCAL && !sp->inet.freebind && - !net->ipv4.sysctl_ip_nonlocal_bind) + !READ_ONCE(net->ipv4.sysctl_ip_nonlocal_bind)) return 0;
if (ipv6_only_sock(sctp_opt2sk(sp))) diff --git a/net/smc/smc_llc.c b/net/smc/smc_llc.c index c4d057b2941d..0bde36b56472 100644 --- a/net/smc/smc_llc.c +++ b/net/smc/smc_llc.c @@ -2122,7 +2122,7 @@ void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) init_waitqueue_head(&lgr->llc_flow_waiter); init_waitqueue_head(&lgr->llc_msg_waiter); mutex_init(&lgr->llc_conf_mutex); - lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time; + lgr->llc_testlink_time = READ_ONCE(net->ipv4.sysctl_tcp_keepalive_time); }
/* called after lgr was removed from lgr_list */ diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 3a61bb594544..9c3933781ad4 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -97,13 +97,16 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx) unsigned long flags;
spin_lock_irqsave(&tls_device_lock, flags); + if (unlikely(!refcount_dec_and_test(&ctx->refcount))) + goto unlock; + list_move_tail(&ctx->list, &tls_device_gc_list);
/* schedule_work inside the spinlock * to make sure tls_device_down waits for that work. */ schedule_work(&tls_device_gc_work); - +unlock: spin_unlock_irqrestore(&tls_device_lock, flags); }
@@ -194,8 +197,7 @@ void tls_device_sk_destruct(struct sock *sk) clean_acked_data_disable(inet_csk(sk)); }
- if (refcount_dec_and_test(&tls_ctx->refcount)) - tls_device_queue_ctx_destruction(tls_ctx); + tls_device_queue_ctx_destruction(tls_ctx); } EXPORT_SYMBOL_GPL(tls_device_sk_destruct);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index f1876ea61fdc..f1a0bab920a5 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -2678,8 +2678,10 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, *num_xfrms = 0; return 0; } - if (IS_ERR(pols[0])) + if (IS_ERR(pols[0])) { + *num_pols = 0; return PTR_ERR(pols[0]); + }
*num_xfrms = pols[0]->xfrm_nr;
@@ -2694,6 +2696,7 @@ static int xfrm_expand_policies(const struct flowi *fl, u16 family, if (pols[1]) { if (IS_ERR(pols[1])) { xfrm_pols_put(pols, *num_pols); + *num_pols = 0; return PTR_ERR(pols[1]); } (*num_pols)++; diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index b749935152ba..b4ce16a934a2 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2620,7 +2620,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload) int err;
if (family == AF_INET && - xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc) + READ_ONCE(xs_net(x)->ipv4.sysctl_ip_no_pmtu_disc)) x->props.flags |= XFRM_STATE_NOPMTUDISC;
err = -EPROTONOSUPPORT; diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index eea6e92500b8..50ebad1e3abb 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -2181,6 +2181,10 @@ bool ima_appraise_signature(enum kernel_read_file_id id) if (id >= READING_MAX_ID) return false;
+ if (id == READING_KEXEC_IMAGE && !(ima_appraise & IMA_APPRAISE_ENFORCE) + && security_locked_down(LOCKDOWN_KEXEC)) + return false; + func = read_idmap[id] ?: FILE_CHECK;
rcu_read_lock(); diff --git a/sound/soc/sof/intel/hda-loader.c b/sound/soc/sof/intel/hda-loader.c index 88d23924e1bf..87313145d14f 100644 --- a/sound/soc/sof/intel/hda-loader.c +++ b/sound/soc/sof/intel/hda-loader.c @@ -397,7 +397,8 @@ int hda_dsp_cl_boot_firmware(struct snd_sof_dev *sdev) struct firmware stripped_firmware; int ret, ret1, i;
- if ((sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT) && + if ((sdev->system_suspend_target < SOF_SUSPEND_S4) && + (sdev->fw_ready.flags & SOF_IPC_INFO_D3_PERSISTENT) && !(sof_debug_check_flag(SOF_DBG_IGNORE_D3_PERSISTENT)) && !sdev->first_boot) { dev_dbg(sdev->dev, "IMR restore supported, booting from IMR directly\n"); diff --git a/sound/soc/sof/pm.c b/sound/soc/sof/pm.c index 1c319582ca6f..76351f7f3243 100644 --- a/sound/soc/sof/pm.c +++ b/sound/soc/sof/pm.c @@ -23,6 +23,9 @@ static u32 snd_sof_dsp_power_target(struct snd_sof_dev *sdev) u32 target_dsp_state;
switch (sdev->system_suspend_target) { + case SOF_SUSPEND_S5: + case SOF_SUSPEND_S4: + /* DSP should be in D3 if the system is suspending to S3+ */ case SOF_SUSPEND_S3: /* DSP should be in D3 if the system is suspending to S3 */ target_dsp_state = SOF_DSP_PM_D3; @@ -327,8 +330,24 @@ int snd_sof_prepare(struct device *dev) return 0;
#if defined(CONFIG_ACPI) - if (acpi_target_system_state() == ACPI_STATE_S0) + switch (acpi_target_system_state()) { + case ACPI_STATE_S0: sdev->system_suspend_target = SOF_SUSPEND_S0IX; + break; + case ACPI_STATE_S1: + case ACPI_STATE_S2: + case ACPI_STATE_S3: + sdev->system_suspend_target = SOF_SUSPEND_S3; + break; + case ACPI_STATE_S4: + sdev->system_suspend_target = SOF_SUSPEND_S4; + break; + case ACPI_STATE_S5: + sdev->system_suspend_target = SOF_SUSPEND_S5; + break; + default: + break; + } #endif
return 0; diff --git a/sound/soc/sof/sof-priv.h b/sound/soc/sof/sof-priv.h index 0d9b640ae24c..c856f0d84e49 100644 --- a/sound/soc/sof/sof-priv.h +++ b/sound/soc/sof/sof-priv.h @@ -85,6 +85,8 @@ enum sof_system_suspend_state { SOF_SUSPEND_NONE = 0, SOF_SUSPEND_S0IX, SOF_SUSPEND_S3, + SOF_SUSPEND_S4, + SOF_SUSPEND_S5, };
enum sof_dfsentry_type { diff --git a/tools/perf/tests/perf-time-to-tsc.c b/tools/perf/tests/perf-time-to-tsc.c index 4ad0dfbc8b21..7c7d20fc503a 100644 --- a/tools/perf/tests/perf-time-to-tsc.c +++ b/tools/perf/tests/perf-time-to-tsc.c @@ -20,8 +20,6 @@ #include "tsc.h" #include "mmap.h" #include "tests.h" -#include "pmu.h" -#include "pmu-hybrid.h"
/* * Except x86_64/i386 and Arm64, other archs don't support TSC in perf. Just @@ -106,28 +104,21 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su
evlist__config(evlist, &opts, NULL);
- evsel = evlist__first(evlist); - - evsel->core.attr.comm = 1; - evsel->core.attr.disabled = 1; - evsel->core.attr.enable_on_exec = 0; - - /* - * For hybrid "cycles:u", it creates two events. - * Init the second evsel here. - */ - if (perf_pmu__has_hybrid() && perf_pmu__hybrid_mounted("cpu_atom")) { - evsel = evsel__next(evsel); + /* For hybrid "cycles:u", it creates two events */ + evlist__for_each_entry(evlist, evsel) { evsel->core.attr.comm = 1; evsel->core.attr.disabled = 1; evsel->core.attr.enable_on_exec = 0; }
- if (evlist__open(evlist) == -ENOENT) { - err = TEST_SKIP; + ret = evlist__open(evlist); + if (ret < 0) { + if (ret == -ENOENT) + err = TEST_SKIP; + else + pr_debug("evlist__open() failed\n"); goto out_err; } - CHECK__(evlist__open(evlist));
CHECK__(evlist__mmap(evlist, UINT_MAX));
@@ -167,10 +158,12 @@ static int test__perf_time_to_tsc(struct test_suite *test __maybe_unused, int su goto next_event;
if (strcmp(event->comm.comm, comm1) == 0) { + CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event)); CHECK__(evsel__parse_sample(evsel, event, &sample)); comm1_time = sample.time; } if (strcmp(event->comm.comm, comm2) == 0) { + CHECK_NOT_NULL__(evsel = evlist__event2evsel(evlist, event)); CHECK__(evsel__parse_sample(evsel, event, &sample)); comm2_time = sample.time; } diff --git a/tools/testing/selftests/gpio/Makefile b/tools/testing/selftests/gpio/Makefile index 71b306602368..616ed4019655 100644 --- a/tools/testing/selftests/gpio/Makefile +++ b/tools/testing/selftests/gpio/Makefile @@ -3,6 +3,6 @@ TEST_PROGS := gpio-mockup.sh gpio-sim.sh TEST_FILES := gpio-mockup-sysfs.sh TEST_GEN_PROGS_EXTENDED := gpio-mockup-cdev gpio-chip-info gpio-line-name -CFLAGS += -O2 -g -Wall -I../../../../usr/include/ +CFLAGS += -O2 -g -Wall -I../../../../usr/include/ $(KHDR_INCLUDES)
include ../lib.mk diff --git a/tools/testing/selftests/kvm/rseq_test.c b/tools/testing/selftests/kvm/rseq_test.c index 4158da0da2bb..2237d1aac801 100644 --- a/tools/testing/selftests/kvm/rseq_test.c +++ b/tools/testing/selftests/kvm/rseq_test.c @@ -82,8 +82,9 @@ static int next_cpu(int cpu) return cpu; }
-static void *migration_worker(void *ign) +static void *migration_worker(void *__rseq_tid) { + pid_t rseq_tid = (pid_t)(unsigned long)__rseq_tid; cpu_set_t allowed_mask; int r, i, cpu;
@@ -106,7 +107,7 @@ static void *migration_worker(void *ign) * stable, i.e. while changing affinity is in-progress. */ smp_wmb(); - r = sched_setaffinity(0, sizeof(allowed_mask), &allowed_mask); + r = sched_setaffinity(rseq_tid, sizeof(allowed_mask), &allowed_mask); TEST_ASSERT(!r, "sched_setaffinity failed, errno = %d (%s)", errno, strerror(errno)); smp_wmb(); @@ -231,7 +232,8 @@ int main(int argc, char *argv[]) vm = vm_create_default(VCPU_ID, 0, guest_code); ucall_init(vm, NULL);
- pthread_create(&migration_thread, NULL, migration_worker, 0); + pthread_create(&migration_thread, NULL, migration_worker, + (void *)(unsigned long)gettid());
for (i = 0; !done; i++) { vcpu_run(vm, VCPU_ID); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5ab12214e18d..24cb37d19c63 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4299,8 +4299,11 @@ static int kvm_ioctl_create_device(struct kvm *kvm, kvm_put_kvm_no_destroy(kvm); mutex_lock(&kvm->lock); list_del(&dev->vm_node); + if (ops->release) + ops->release(dev); mutex_unlock(&kvm->lock); - ops->destroy(dev); + if (ops->destroy) + ops->destroy(dev); return ret; }