From: Bartosz Szczepanek bsz@semihalf.com
This patch introduces an OS-independent library with the configuration routines of new PP2 NIC, whose support will be added in following commits. The library is obtained from Marvell.
Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Bartosz Szczepanek bsz@semihalf.com Signed-off-by: Marcin Wojtas mw@semihalf.com --- Drivers/Net/Pp2Dxe/mvpp2_lib.c | 4299 ++++++++++++++++++++++++++++++++++++++++ Drivers/Net/Pp2Dxe/mvpp2_lib.h | 2362 ++++++++++++++++++++++ 2 files changed, 6661 insertions(+) create mode 100644 Drivers/Net/Pp2Dxe/mvpp2_lib.c create mode 100644 Drivers/Net/Pp2Dxe/mvpp2_lib.h
diff --git a/Drivers/Net/Pp2Dxe/mvpp2_lib.c b/Drivers/Net/Pp2Dxe/mvpp2_lib.c new file mode 100644 index 0000000..e18158d --- /dev/null +++ b/Drivers/Net/Pp2Dxe/mvpp2_lib.c @@ -0,0 +1,4299 @@ +/******************************************************************************* +Copyright (C) 2016 Marvell International Ltd. + +This software file (the "File") is owned and distributed by Marvell +International Ltd. and/or its affiliates ("Marvell") under the following +alternative licensing terms. Once you have made an election to distribute the +File under one of the following license alternatives, please (i) delete this +introductory statement regarding license alternatives, (ii) delete the three +license alternatives that you have not elected to use and (iii) preserve the +Marvell copyright notice above. + +******************************************************************************** +Marvell Commercial License Option + +If you received this File from Marvell and you have entered into a commercial +license agreement (a "Commercial License") with Marvell, the File is licensed +to you under the terms of the applicable Commercial License. + +******************************************************************************** +Marvell GPL License Option + +This program is free software: you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the Free +Software Foundation, either version 2 of the License, or any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see http://www.gnu.org/licenses/. + +******************************************************************************** +Marvell GNU General Public License FreeRTOS Exception + +If you received this File from Marvell, you may opt to use, redistribute and/or +modify this File in accordance with the terms and conditions of the Lesser +General Public License Version 2.1 plus the following FreeRTOS exception. +An independent module is a module which is not derived from or based on +FreeRTOS. +Clause 1: +Linking FreeRTOS statically or dynamically with other modules is making a +combined work based on FreeRTOS. Thus, the terms and conditions of the GNU +General Public License cover the whole combination. +As a special exception, the copyright holder of FreeRTOS gives you permission +to link FreeRTOS with independent modules that communicate with FreeRTOS solely +through the FreeRTOS API interface, regardless of the license terms of these +independent modules, and to copy and distribute the resulting combined work +under terms of your choice, provided that: +1. Every copy of the combined work is accompanied by a written statement that +details to the recipient the version of FreeRTOS used and an offer by yourself +to provide the FreeRTOS source code (including any modifications you may have +made) should the recipient request it. +2. The combined work is not itself an RTOS, scheduler, kernel or related +product. +3. The independent modules add significant and primary functionality to +FreeRTOS and do not merely extend the existing functionality already present in +FreeRTOS. +Clause 2: +FreeRTOS may not be used for any competitive or comparative purpose, including +the publication of any form of run time or compile time metric, without the +express permission of Real Time Engineers Ltd. (this is the norm within the +industry and is intended to ensure information accuracy). + +******************************************************************************** +Marvell BSD License Option + +If you received this File from Marvell, you may opt to use, redistribute and/or +modify this File under the following licensing terms. +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of Marvell nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#include "Pp2Dxe.h" +#include "mvpp2_lib.h" + +/* Parser configuration routines */ + +/* Update parser tcam and sram hw entries */ +static MV_32 mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +{ + MV_32 i; + + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return MVPP2_EINVAL; + + /* Clear entry invalidation bit */ + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]); + + return 0; +} + +/* Read tcam entry from hw */ +static MV_32 mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) +{ + MV_32 i; + + if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1) + return MVPP2_EINVAL; + + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + + pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv, + MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD)); + if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK) + return MVPP2_PRS_TCAM_ENTRY_INVALID; + + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i)); + + /* Write sram index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i)); + + return 0; +} + +/* Invalidate tcam hw entry */ +static MV_VOID mvpp2_prs_hw_inv(struct mvpp2 *priv, MV_32 index) +{ + /* Write index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD), + MVPP2_PRS_TCAM_INV_MASK); +} + +/* Enable shadow table entry and set its lookup ID */ +static MV_VOID mvpp2_prs_shadow_set(struct mvpp2 *priv, MV_32 index, MV_32 lu) +{ + priv->prs_shadow[index].valid = MV_TRUE; + priv->prs_shadow[index].lu = lu; +} + +/* Update ri fields in shadow table entry */ +static MV_VOID mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, MV_32 index, + MV_U32 ri, MV_U32 ri_mask) +{ + priv->prs_shadow[index].ri_mask = ri_mask; + priv->prs_shadow[index].ri = ri; +} + +/* Update lookup field in tcam sw entry */ +static MV_VOID mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, MV_U32 lu) +{ + MV_32 enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu; + pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK; +} + +/* Update mask for single port in tcam sw entry */ +static MV_VOID mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe, + MV_U32 port, MV_BOOL add) +{ + MV_32 enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + if (add) + pe->tcam.byte[enable_off] &= ~(1 << port); + else + pe->tcam.byte[enable_off] |= 1 << port; +} + +/* Update port map in tcam sw entry */ +static MV_VOID mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe, + MV_U32 ports) +{ + MV_U8 port_mask = MVPP2_PRS_PORT_MASK; + MV_32 enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0; + pe->tcam.byte[enable_off] &= ~port_mask; + pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK; +} + +/* Obtain port map from tcam sw entry */ +static MV_U32 mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe) +{ + MV_32 enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE); + + return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK; +} + +/* Set byte of data and its enable bits in tcam sw entry */ +static MV_VOID mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe, + MV_U32 offs, MV_U8 byte, + MV_U8 enable) +{ + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte; + pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable; +} + +/* Get byte of data and its enable bits from tcam sw entry */ +static MV_VOID mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe, + MV_U32 offs, MV_U8 *byte, + MV_U8 *enable) +{ + *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)]; + *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)]; +} + +/* Compare tcam data bytes with a pattern */ +static MV_BOOL mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, MV_32 offs, + MV_U16 data) +{ + MV_32 off = MVPP2_PRS_TCAM_DATA_BYTE(offs); + MV_U16 tcam_data; + + tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off]; + if (tcam_data != data) + return MV_FALSE; + return MV_TRUE; +} + +/* Update ai bits in tcam sw entry */ +static MV_VOID mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe, + MV_U32 bits, MV_U32 enable) +{ + MV_32 i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE; + + for (i = 0; i < MVPP2_PRS_AI_BITS; i++) { + + if (!(enable & BIT(i))) + continue; + + if (bits & BIT(i)) + pe->tcam.byte[ai_idx] |= 1 << i; + else + pe->tcam.byte[ai_idx] &= ~(1 << i); + } + + pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable; +} + +/* Get ai bits from tcam sw entry */ +static MV_32 mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe) +{ + return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE]; +} + +/* Get dword of data and its enable bits from tcam sw entry */ +static MV_VOID mvpp2_prs_tcam_data_dword_get(struct mvpp2_prs_entry *pe, + MV_U32 offs, MV_U32 *word, + MV_U32 *enable) +{ + MV_32 index, offset; + MV_U8 byte, mask; + + for (index = 0; index < 4; index++) { + offset = (offs * 4) + index; + mvpp2_prs_tcam_data_byte_get(pe, offset, &byte, &mask); + ((MV_U8 *)word)[index] = byte; + ((MV_U8 *)enable)[index] = mask; + } +} + +/* Set ethertype in tcam sw entry */ +static MV_VOID mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, MV_32 offset, + MV_U16 ethertype) +{ + mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff); + mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff); +} + +/* Set bits in sram sw entry */ +static MV_VOID mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, + MV_32 bit_num, MV_32 val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8)); +} + +/* Clear bits in sram sw entry */ +static MV_VOID mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, + MV_32 bit_num, MV_32 val) +{ + pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8)); +} + +/* Update ri bits in sram sw entry */ +static MV_VOID mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe, + MV_U32 bits, MV_U32 mask) +{ + MV_U32 i; + + for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) { + MV_32 ri_off = MVPP2_PRS_SRAM_RI_OFFS; + + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ri_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1); + } +} + +/* Obtain ri bits from sram sw entry */ +static MV_32 mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe) +{ + return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD]; +} + +/* Update ai bits in sram sw entry */ +static MV_VOID mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe, + MV_U32 bits, MV_U32 mask) +{ + MV_U32 i; + MV_32 ai_off = MVPP2_PRS_SRAM_AI_OFFS; + + for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) { + + if (!(mask & BIT(i))) + continue; + + if (bits & BIT(i)) + mvpp2_prs_sram_bits_set(pe, ai_off + i, 1); + else + mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1); + + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1); + } +} + +/* Read ai bits from sram sw entry */ +static MV_32 mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe) +{ + MV_U8 bits; + MV_32 ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS); + MV_32 ai_en_off = ai_off + 1; + MV_32 ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8; + + bits = (pe->sram.byte[ai_off] >> ai_shift) | + (pe->sram.byte[ai_en_off] << (8 - ai_shift)); + + return bits; +} + +/* In sram sw entry set lookup ID field of the tcam key to be used in the next + * lookup MV_32eration + */ +static MV_VOID mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe, + MV_U32 lu) +{ + MV_32 sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS; + + mvpp2_prs_sram_bits_clear(pe, sram_next_off, + MVPP2_PRS_SRAM_NEXT_LU_MASK); + mvpp2_prs_sram_bits_set(pe, sram_next_off, lu); +} + +/* In the sram sw entry set sign and value of the next lookup offset + * and the offset value generated to the classifier + */ +static MV_VOID mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, MV_32 shift, + MV_U32 op) +{ + /* Set sign */ + if (shift < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + shift = 0 - shift; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1); + } + + /* Set value */ + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] = + (MV_U8)shift; + + /* Reset and set operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* In the sram sw entry set sign and value of the user defined offset + * generated to the classifier + */ +static MV_VOID mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe, + MV_U32 type, MV_32 offset, + MV_U32 op) +{ + /* Set sign */ + if (offset < 0) { + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + offset = 0 - offset; + } else { + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1); + } + + /* Set value */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS, + MVPP2_PRS_SRAM_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS + + MVPP2_PRS_SRAM_UDF_BITS)] |= + (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8))); + + /* Set offset type */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, + MVPP2_PRS_SRAM_UDF_TYPE_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type); + + /* Set offset operation */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, + MVPP2_PRS_SRAM_OP_SEL_UDF_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &= + ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >> + (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS + + MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |= + (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8))); + + /* Set base offset as current */ + mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1); +} + +/* Find parser flow entry */ +static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, + MV_32 flow) +{ + struct mvpp2_prs_entry *pe; + MV_32 tid; + MV_U32 dword, enable; + + pe = mvpp2_alloc(sizeof(*pe)); + if (!pe) + return MVPP2_NULL; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); + + /* Go through the all entires with MVPP2_PRS_LU_FLOWS */ + for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) { + MV_U8 bits; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS) + continue; + + pe->index = tid; + mvpp2_prs_hw_read(priv, pe); + + /* Check result info, because there maybe + * several TCAM lines to generate the same flow */ + mvpp2_prs_tcam_data_dword_get(pe, 0, &dword, &enable); + if ((dword != 0) || (enable != 0)) + continue; + + bits = mvpp2_prs_sram_ai_get(pe); + + /* Sram store classification lookup ID in AI bits [5:0] */ + if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow) + return pe; + } + mvpp2_free(pe); + + return MVPP2_NULL; +} + +/* Return first free tcam index, seeking from start to end */ +static MV_32 mvpp2_prs_tcam_first_free(struct mvpp2 *priv, MV_U8 start, + MV_U8 end) +{ + MV_32 tid; + + if (start > end) + mvpp2_swap(start, end); + + if (end >= MVPP2_PRS_TCAM_SRAM_SIZE) + end = MVPP2_PRS_TCAM_SRAM_SIZE - 1; + + for (tid = start; tid <= end; tid++) { + if (!priv->prs_shadow[tid].valid) + return tid; + } + + return MVPP2_EINVAL; +} + +/* Enable/disable dropping all mac da's */ +static MV_VOID mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, MV_32 port, + MV_BOOL add) +{ + struct mvpp2_prs_entry pe; + + if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) { + /* Entry exist - update port only */ + pe.index = MVPP2_PE_DROP_ALL; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = MVPP2_PE_DROP_ALL; + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set port to promiscuous mode */ +MV_VOID mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, MV_32 port, MV_BOOL add) +{ + struct mvpp2_prs_entry pe; + + /* Promiscuous mode - Accept unknown packets */ + + if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) { + /* Entry exist - update port only */ + pe.index = MVPP2_PE_MAC_PROMISCUOUS; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = MVPP2_PE_MAC_PROMISCUOUS; + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set result info bits */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST, + MVPP2_PRS_RI_L2_CAST_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * MV_ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Accept multicast */ +MV_VOID mvpp2_prs_mac_multi_set(struct mvpp2 *priv, MV_32 port, MV_32 index, + MV_BOOL add) +{ + struct mvpp2_prs_entry pe; + MV_U8 da_mc; + + /* Ethernet multicast address first byte is + * 0x01 for IPv4 and 0x33 for IPv6 + */ + da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33; + + if (priv->prs_shadow[index].valid) { + /* Entry exist - update port only */ + pe.index = index; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + pe.index = index; + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA); + + /* Set result info bits */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST, + MVPP2_PRS_RI_L2_CAST_MASK); + + /* Update tcam entry data first byte */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(&pe, 2 * MV_ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa packets */ +static MV_VOID mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, MV_32 port, + MV_BOOL add, MV_BOOL tagged, + MV_BOOL extend) +{ + struct mvpp2_prs_entry pe; + MV_32 tid, shift; + + if (extend) { + tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + pe.index = tid; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/ + mvpp2_prs_sram_shift_set(&pe, shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, 0, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + /* If packet is tagged continue check vlans */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + } else { + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set entry for dsa ethertype */ +static MV_VOID mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, MV_32 port, + MV_BOOL add, MV_BOOL tagged, + MV_BOOL extend) +{ + struct mvpp2_prs_entry pe; + MV_32 tid, shift, port_mask; + + if (extend) { + tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED : + MVPP2_PE_ETYPE_EDSA_UNTAGGED; + port_mask = 0; + shift = 8; + } else { + tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED : + MVPP2_PE_ETYPE_DSA_UNTAGGED; + port_mask = MVPP2_PRS_PORT_MASK; + shift = 4; + } + + if (priv->prs_shadow[tid].valid) { + /* Entry exist - update port only */ + pe.index = tid; + mvpp2_prs_hw_read(priv, &pe); + } else { + /* Entry doesn't exist - create new */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = tid; + + /* Set ethertype */ + mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_EDSA); + mvpp2_prs_match_etype(&pe, 2, 0); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK, + MVPP2_PRS_RI_DSA_MASK); + /* Shift ethertype + 2 byte reserved + tag*/ + mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA); + + if (tagged) { + /* Set tagged bit in DSA tag */ + mvpp2_prs_tcam_data_byte_set(&pe, + MVPP2_ETH_TYPE_LEN + 2 + 3, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT, + MVPP2_PRS_TCAM_DSA_TAGGED_BIT); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, + MVPP2_PRS_SRAM_AI_MASK); + /* If packet is tagged continue check vlans */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + } else { + /* Set result info bits to 'no vlans' */ + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + } + /* Mask/unmask all ports, depending on dsa type */ + mvpp2_prs_tcam_port_map_set(&pe, port_mask); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(&pe, port, add); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Search for existing single/triple vlan entry */ +static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv, + MV_U16 tpid, MV_32 ai) +{ + struct mvpp2_prs_entry *pe; + MV_32 tid; + + pe = mvpp2_alloc(sizeof(*pe)); + if (!pe) + return MVPP2_NULL; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + MV_U32 ri_bits, ai_bits; + MV_BOOL match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + pe->index = tid; + + mvpp2_prs_hw_read(priv, pe); + match = mvpp2_prs_tcam_data_cmp(pe, 0, mvpp2_swab16(tpid)); + if (!match) + continue; + + /* Get vlan type */ + ri_bits = mvpp2_prs_sram_ri_get(pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + + /* Get current ai value from tcam */ + ai_bits = mvpp2_prs_tcam_ai_get(pe); + /* Clear double vlan bit */ + ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT; + + if (ai != ai_bits) + continue; + + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + return pe; + } + mvpp2_free(pe); + + return MVPP2_NULL; +} + +/* Add/update single/triple vlan entry */ +MV_32 mvpp2_prs_vlan_add(struct mvpp2 *priv, MV_U16 tpid, MV_32 ai, + MV_U32 port_map) +{ + struct mvpp2_prs_entry *pe; + MV_32 tid_aux, tid; + MV_32 ret = 0; + + pe = mvpp2_prs_vlan_find(priv, tpid, ai); + + if (!pe) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe = mvpp2_alloc(sizeof(*pe)); + if (!pe) + return MVPP2_ENOMEM; + + /* Get last double vlan tid */ + for (tid_aux = MVPP2_PE_LAST_FREE_TID; + tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) { + MV_U32 ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + pe->index = tid_aux; + mvpp2_prs_hw_read(priv, pe); + ri_bits = mvpp2_prs_sram_ri_get(pe); + if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) == + MVPP2_PRS_RI_VLAN_DOUBLE) + break; + } + + if (tid <= tid_aux) { + ret = MVPP2_EINVAL; + goto error; + } + + mvpp2_memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); + pe->index = tid; + + mvpp2_prs_match_etype(pe, 0, tpid); + + mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2); + /* Shift 4 bytes - skip 1 vlan tag */ + mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Clear all ai bits for next iteration */ + mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + if (ai == MVPP2_PRS_SINGLE_VLAN_AI) { + mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE, + MVPP2_PRS_RI_VLAN_MASK); + } else { + ai |= MVPP2_PRS_DBL_VLAN_AI_BIT; + mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE, + MVPP2_PRS_RI_VLAN_MASK); + } + mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); + } + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(pe, port_map); + + mvpp2_prs_hw_write(priv, pe); + +error: + mvpp2_free(pe); + + return ret; +} + +/* Get first free double vlan ai number */ +MV_32 mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv) +{ + MV_32 i; + + for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) { + if (!priv->prs_double_vlans[i]) + return i; + } + + return MVPP2_EINVAL; +} + +/* Search for existing double vlan entry */ +struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv, + MV_U16 tpid1, + MV_U16 tpid2) +{ + struct mvpp2_prs_entry *pe; + MV_32 tid; + + pe = mvpp2_alloc(sizeof(*pe)); + if (!pe) + return MVPP2_NULL; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); + + /* Go through the all entries with MVPP2_PRS_LU_VLAN */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + MV_U32 ri_mask; + MV_BOOL match; + + if (!priv->prs_shadow[tid].valid || + priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN) + continue; + + pe->index = tid; + mvpp2_prs_hw_read(priv, pe); + + match = mvpp2_prs_tcam_data_cmp(pe, 0, mvpp2_swab16(tpid1)) + && mvpp2_prs_tcam_data_cmp(pe, 4, mvpp2_swab16(tpid2)); + + if (!match) + continue; + + ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK; + if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE) + return pe; + } + mvpp2_free(pe); + + return MVPP2_NULL; +} + +/* Add or update double vlan entry */ +MV_32 mvpp2_prs_double_vlan_add(struct mvpp2 *priv, MV_U16 tpid1, + MV_U16 tpid2, + MV_U32 port_map) +{ + struct mvpp2_prs_entry *pe; + MV_32 tid_aux, tid, ai, ret = 0; + + pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); + + if (!pe) { + /* Create new tcam entry */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe = mvpp2_alloc(sizeof(*pe)); + if (!pe) + return MVPP2_ENOMEM; + + /* Set ai value for new double vlan entry */ + ai = mvpp2_prs_double_vlan_ai_free_get(priv); + if (ai < 0) { + ret = ai; + goto error; + } + + /* Get first single/triple vlan tid */ + for (tid_aux = MVPP2_PE_FIRST_FREE_TID; + tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) { + MV_U32 ri_bits; + + if (!priv->prs_shadow[tid_aux].valid || + priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN) + continue; + + pe->index = tid_aux; + mvpp2_prs_hw_read(priv, pe); + ri_bits = mvpp2_prs_sram_ri_get(pe); + ri_bits &= MVPP2_PRS_RI_VLAN_MASK; + if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE || + ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE) + break; + } + + if (tid >= tid_aux) { + ret = MVPP2_ERANGE; + goto error; + } + + mvpp2_memset(pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); + pe->index = tid; + + priv->prs_double_vlans[ai] = MV_TRUE; + + mvpp2_prs_match_etype(pe, 0, tpid1); + mvpp2_prs_match_etype(pe, 4, tpid2); + + mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN); + /* Shift 8 bytes - skip 2 vlan tags */ + mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_SRAM_AI_MASK); + + mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN); + } + + /* Update ports' mask */ + mvpp2_prs_tcam_port_map_set(pe, port_map); + mvpp2_prs_hw_write(priv, pe); + +error: + mvpp2_free(pe); + return ret; +} + +/* IPv4 header parsing for fragmentation and L4 offset */ +static MV_32 mvpp2_prs_ip4_proto(struct mvpp2 *priv, MV_U16 proto, + MV_U32 ri, MV_U32 ri_mask) +{ + struct mvpp2_prs_entry pe; + MV_32 tid; + + if ((proto != MV_IPPR_TCP) && (proto != MV_IPPR_UDP) && + (proto != MV_IPPR_IGMP)) + return MVPP2_EINVAL; + + /* Fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(mvpp2_iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK, + ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Not fragmented packet */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + + mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L); + mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv4 L3 multicast or broadcast */ +static MV_32 mvpp2_prs_ip4_cast(struct mvpp2 *priv, MV_U16 l3_cast) +{ + struct mvpp2_prs_entry pe; + MV_32 mask, tid; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = tid; + + switch (l3_cast) { + case MVPP2_PRS_L3_MULTI_CAST: + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC, + MVPP2_PRS_IPV4_MC_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + case MVPP2_PRS_L3_BROAD_CAST: + mask = MVPP2_PRS_IPV4_BC_MASK; + mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask); + mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + break; + default: + return MVPP2_EINVAL; + } + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for protocols over IPv6 */ +static MV_32 mvpp2_prs_ip6_proto(struct mvpp2 *priv, MV_U16 proto, + MV_U32 ri, MV_U32 ri_mask) +{ + struct mvpp2_prs_entry pe; + MV_32 tid; + + if ((proto != MV_IPPR_TCP) && (proto != MV_IPPR_UDP) && + (proto != MV_IPPR_ICMPV6) && (proto != MV_IPPR_IPIP)) + return MVPP2_EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, ri, ri_mask); + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(mvpp2_ipv6hdr) - 6, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Write HW */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* IPv6 L3 multicast entry */ +static MV_32 mvpp2_prs_ip6_cast(struct mvpp2 *priv, MV_U16 l3_cast) +{ + struct mvpp2_prs_entry pe; + MV_32 tid; + + if (l3_cast != MVPP2_PRS_L3_MULTI_CAST) + return MVPP2_EINVAL; + + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPv6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC, + MVPP2_PRS_IPV6_MC_MASK); + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Parser per-port initialization */ +static MV_VOID mvpp2_prs_hw_port_init(struct mvpp2 *priv, MV_32 port, + MV_32 lu_first, MV_32 lu_max, + MV_32 offset) +{ + MV_U32 val; + + /* Set lookup ID */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG); + val &= ~MVPP2_PRS_PORT_LU_MASK(port); + val |= MVPP2_PRS_PORT_LU_VAL(port, lu_first); + mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val); + + /* Set maximum number of loops for packet received from port */ + val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port)); + val &= ~MVPP2_PRS_MAX_LOOP_MASK(port); + val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max); + mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val); + + /* Set initial offset for packet header extraction for the first + * searching loop + */ + val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port)); + val &= ~MVPP2_PRS_INIT_OFF_MASK(port); + val |= MVPP2_PRS_INIT_OFF_VAL(port, offset); + mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val); +} + +/* Default flow entries initialization for all ports */ +static MV_VOID mvpp2_prs_def_flow_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + MV_32 port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, 0); + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_hw_write(priv, &pe); + } +} + +/* Set default entry for Marvell Header field */ +static MV_VOID mvpp2_prs_mh_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + + pe.index = MVPP2_PE_MH_DEFAULT; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH); + mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH); + mvpp2_prs_hw_write(priv, &pe); +} + +/* Set default entires (place holder) for promiscuous, non-promiscuous and + * multicast MAC addresses + */ +static MV_VOID mvpp2_prs_mac_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + + /* Non-promiscuous mode for all ports - DROP unknown packets */ + pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS; + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC); + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_DROP_MASK); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, &pe); + + /* place holders only - no ports */ + mvpp2_prs_mac_drop_all_set(priv, 0, MV_FALSE); + mvpp2_prs_mac_promisc_set(priv, 0, MV_FALSE); + mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, MV_FALSE); + mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, MV_FALSE); +} + +/* Set default entries for various types of dsa packets */ +static MV_VOID mvpp2_prs_dsa_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + + /* None tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, MV_FALSE, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_EDSA); + + /* Tagged EDSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, MV_FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, MV_FALSE, MVPP2_PRS_UNTAGGED, + MVPP2_PRS_DSA); + + /* Tagged DSA entry - place holder */ + mvpp2_prs_dsa_tag_set(priv, 0, MV_FALSE, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* None tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, MV_FALSE, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + + /* Tagged EDSA ethertype entry - place holder*/ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, MV_FALSE, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + + /* None tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, MV_TRUE, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + + /* Tagged DSA ethertype entry */ + mvpp2_prs_dsa_tag_ethertype_set(priv, 0, MV_TRUE, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + + /* Set default entry, in case DSA or EDSA tag not found */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA); + pe.index = MVPP2_PE_DSA_DEFAULT; + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN); + + /* Shift 0 bytes */ + mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC); + + /* Clear all sram ai bits for next iteration */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + mvpp2_prs_hw_write(priv, &pe); +} + +/* Match basic ethertypes */ +static MV_32 mvpp2_prs_etype_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + MV_32 tid; + + /* Ethertype: PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_PPP_SES); + + mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = MV_FALSE; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK, + MVPP2_PRS_RI_PPPOE_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: ARP */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_ARP); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = MV_TRUE; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: LBTD */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = MV_TRUE; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_IP); + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = MV_FALSE; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv4 with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + /* Clear tcam data before updating */ + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0; + pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD, + MVPP2_PRS_IPV4_HEAD_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = MV_FALSE; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Ethertype: IPv6 without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MV_ETH_P_IPV6); + + /* Skip DIP of IPV6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 + + MVPP2_MAX_L3_ADDR_SIZE, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = MV_FALSE; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2); + pe.index = MVPP2_PE_ETH_TYPE_UN; + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Generate flow in the next iteration*/ + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Set L3 offset even it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2); + priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF; + priv->prs_shadow[pe.index].finish = MV_TRUE; + mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Configure vlan entries and detect up to 2 successive VLAN tags. + * Possible options: + * 0x8100, 0x88A8 + * 0x8100, 0x8100 + * 0x8100 + * 0x88A8 + */ +static MV_32 mvpp2_prs_vlan_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + MV_32 err; + + /* Double VLAN: 0x8100, 0x88A8 */ + err = mvpp2_prs_double_vlan_add(priv, MV_ETH_P_8021Q, MV_ETH_P_8021AD, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Double VLAN: 0x8100, 0x8100 */ + err = mvpp2_prs_double_vlan_add(priv, MV_ETH_P_8021Q, MV_ETH_P_8021Q, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x88a8 */ + err = mvpp2_prs_vlan_add(priv, MV_ETH_P_8021AD, + MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Single VLAN: 0x8100 */ + err = mvpp2_prs_vlan_add(priv, MV_ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI, + MVPP2_PRS_PORT_MASK); + if (err) + return err; + + /* Set default double vlan entry */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_DBL; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + /* Clear ai for next iterations */ + mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE, + MVPP2_PRS_RI_VLAN_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT, + MVPP2_PRS_DBL_VLAN_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + /* Set default vlan none entry */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN); + pe.index = MVPP2_PE_VLAN_NONE; + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE, + MVPP2_PRS_RI_VLAN_MASK); + + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Set entries for PPPoE ethertype */ +static MV_32 mvpp2_prs_pppoe_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + MV_32 tid; + + /* IPv4 over PPPoE with options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MV_PPP_IP); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IP header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv4 over PPPoE without options */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + pe.index = tid; + + mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL, + MVPP2_PRS_IPV4_HEAD_MASK | + MVPP2_PRS_IPV4_IHL_MASK); + + /* Clear ri before updating */ + pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0; + pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0; + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* IPv6 over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_match_etype(&pe, 0, MV_PPP_IPV6); + + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6, + MVPP2_PRS_RI_L3_PROTO_MASK); + /* Skip eth_type + 4 bytes of IPv6 header */ + mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L3 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + /* Non-IP over PPPoE */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE); + pe.index = tid; + + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN, + MVPP2_PRS_RI_L3_PROTO_MASK); + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + /* Set L3 offset even if it's unknown L3 */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3, + MVPP2_ETH_TYPE_LEN, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv4 */ +static MV_32 mvpp2_prs_ip4_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + MV_32 err; + + /* Set entries for TCP, UDP and IGMP over IPv4 */ + err = mvpp2_prs_ip4_proto(priv, MV_IPPR_TCP, MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, MV_IPPR_UDP, MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip4_proto(priv, MV_IPPR_IGMP, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 Broadcast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST); + if (err) + return err; + + /* IPv4 Multicast */ + err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Default IPv4 entry for unknown protocols */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_PROTO_UN; + + /* Set next lu to IPv4 */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4); + mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + /* Set L4 offset */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(mvpp2_iphdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv4 entry for unicast address */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4); + pe.index = MVPP2_PE_IP4_ADDR_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT, + MVPP2_PRS_IPV4_DIP_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Initialize entries for IPv6 */ +static MV_32 mvpp2_prs_ip6_init(struct mvpp2 *priv) +{ + struct mvpp2_prs_entry pe; + MV_32 tid, err; + + /* Set entries for TCP, UDP and ICMP over IPv6 */ + err = mvpp2_prs_ip6_proto(priv, MV_IPPR_TCP, + MVPP2_PRS_RI_L4_TCP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, MV_IPPR_UDP, + MVPP2_PRS_RI_L4_UDP, + MVPP2_PRS_RI_L4_PROTO_MASK); + if (err) + return err; + + err = mvpp2_prs_ip6_proto(priv, MV_IPPR_ICMPV6, + MVPP2_PRS_RI_CPU_CODE_RX_SPEC | + MVPP2_PRS_RI_UDF3_RX_SPECIAL, + MVPP2_PRS_RI_CPU_CODE_MASK | + MVPP2_PRS_RI_UDF3_MASK); + if (err) + return err; + + /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */ + /* Result Info: UDF7=1, DS lite */ + err = mvpp2_prs_ip6_proto(priv, MV_IPPR_IPIP, + MVPP2_PRS_RI_UDF7_IP6_LITE, + MVPP2_PRS_RI_UDF7_MASK); + if (err) + return err; + + /* IPv6 multicast */ + err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST); + if (err) + return err; + + /* Entry for checking hop limit */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + MVPP2_PE_LAST_FREE_TID); + if (tid < 0) + return tid; + + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = tid; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN | + MVPP2_PRS_RI_DROP_MASK, + MVPP2_PRS_RI_L3_PROTO_MASK | + MVPP2_PRS_RI_DROP_MASK); + + mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK); + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown protocols */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + /* Set L4 offset relatively to our current place */ + mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4, + sizeof(mvpp2_ipv6hdr) - 4, + MVPP2_PRS_SRAM_OP_SEL_UDF_ADD); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unknown ext protocols */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_EXT_PROTO_UN; + + /* Finished: go to flowid generation */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS); + mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER, + MVPP2_PRS_RI_L4_PROTO_MASK); + + mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT, + MVPP2_PRS_IPV6_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4); + mvpp2_prs_hw_write(priv, &pe); + + /* Default IPv6 entry for unicast address */ + mvpp2_memset(&pe, 0, sizeof(struct mvpp2_prs_entry)); + mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6); + pe.index = MVPP2_PE_IP6_ADDR_UN; + + /* Finished: go to IPv6 again */ + mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6); + mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST, + MVPP2_PRS_RI_L3_ADDR_MASK); + mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT, + MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Shift back to IPV6 NH */ + mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT); + /* Unmask all ports */ + mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK); + + /* Update shadow table and hw entry */ + mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6); + mvpp2_prs_hw_write(priv, &pe); + + return 0; +} + +/* Parser default initialization */ +MV_32 mvpp2_prs_default_init(struct mvpp2 *priv) +{ + MV_32 err, index, i; + + /* Enable tcam table */ + mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK); + + /* Clear all tcam and sram entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) { + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0); + + mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index); + for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0); + } + + /* Invalidate all tcam entries */ + for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) + mvpp2_prs_hw_inv(priv, index); + + /* Always start from lookup = 0 */ + for (index = 0; index < MVPP2_MAX_PORTS; index++) + mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH, + MVPP2_PRS_PORT_LU_MAX, 0); + + mvpp2_prs_def_flow_init(priv); + + mvpp2_prs_mh_init(priv); + + mvpp2_prs_mac_init(priv); + + mvpp2_prs_dsa_init(priv); + + err = mvpp2_prs_etype_init(priv); + if (err) + return err; + + err = mvpp2_prs_vlan_init(priv); + if (err) + return err; + + err = mvpp2_prs_pppoe_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip6_init(priv); + if (err) + return err; + + err = mvpp2_prs_ip4_init(priv); + if (err) + return err; + + return 0; +} + +/* Compare MAC DA with tcam entry data */ +static MV_BOOL mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe, + const MV_U8 *da, MV_U8 *mask) +{ + MV_U8 tcam_byte, tcam_mask; + MV_32 index; + + for (index = 0; index < MV_ETH_ALEN; index++) { + mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask); + if (tcam_mask != mask[index]) + return MV_FALSE; + + if ((tcam_mask & tcam_byte) != (da[index] & mask[index])) + return MV_FALSE; + } + + return MV_TRUE; +} + +/* Find tcam entry with matched pair <MAC DA, port> */ +static struct mvpp2_prs_entry * +mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, MV_32 pmap, const MV_U8 *da, + MV_U8 *mask, MV_32 udf_type) +{ + struct mvpp2_prs_entry *pe; + MV_32 tid; + + pe = mvpp2_alloc(sizeof(*pe)); + if (!pe) + return MVPP2_NULL; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); + + /* Go through the all entires with MVPP2_PRS_LU_MAC */ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + MV_U32 entry_pmap; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != udf_type)) + continue; + + pe->index = tid; + mvpp2_prs_hw_read(priv, pe); + entry_pmap = mvpp2_prs_tcam_port_map_get(pe); + + if (mvpp2_prs_mac_range_equals(pe, da, mask) && + entry_pmap == pmap) + return pe; + } + mvpp2_free(pe); + + return MVPP2_NULL; +} + +/* Update parser's mac da entry */ +MV_32 mvpp2_prs_mac_da_accept(struct mvpp2 *priv, MV_32 port, + const MV_U8 *da, MV_BOOL add) +{ + struct mvpp2_prs_entry *pe; + MV_U32 pmap, len, ri; + MV_U8 mask[MV_ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; + MV_32 tid; + + /* Scan TCAM and see if entry with this <MAC DA, port> already exist */ + pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask, + MVPP2_PRS_UDF_MAC_DEF); + + /* No such entry */ + if (!pe) { + if (!add) + return 0; + + /* Create new TCAM entry */ + /* Find first range mac entry*/ + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) + if (priv->prs_shadow[tid].valid && + (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) && + (priv->prs_shadow[tid].udf == + MVPP2_PRS_UDF_MAC_RANGE)) + break; + + /* Go through the all entries from first to last */ + tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID, + tid - 1); + if (tid < 0) + return tid; + + pe = mvpp2_alloc(sizeof(*pe)); + if (!pe) + return -1; + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC); + pe->index = tid; + + /* Mask all ports */ + mvpp2_prs_tcam_port_map_set(pe, 0); + } + + /* Update port mask */ + mvpp2_prs_tcam_port_set(pe, port, add); + + /* Invalidate the entry if no ports are left enabled */ + pmap = mvpp2_prs_tcam_port_map_get(pe); + if (pmap == 0) { + if (add) { + mvpp2_free(pe); + return -1; + } + mvpp2_prs_hw_inv(priv, pe->index); + priv->prs_shadow[pe->index].valid = MV_FALSE; + mvpp2_free(pe); + return 0; + } + + /* Continue - set next lookup */ + mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA); + + /* Set match on DA */ + len = MV_ETH_ALEN; + while (len--) + mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff); + + /* Set result info bits */ + if (mvpp2_is_broadcast_ether_addr(da)) + ri = MVPP2_PRS_RI_L2_BCAST; + else if (mvpp2_is_multicast_ether_addr(da)) + ri = MVPP2_PRS_RI_L2_MCAST; + else + ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK; + + mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK | + MVPP2_PRS_RI_MAC_ME_MASK); + + /* Shift to ethertype */ + mvpp2_prs_sram_shift_set(pe, 2 * MV_ETH_ALEN, + MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD); + + /* Update shadow table and hw entry */ + priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF; + mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC); + mvpp2_prs_hw_write(priv, pe); + + mvpp2_free(pe); + + return 0; +} + +/* Delete all port's multicast simple (not range) entries */ +MV_VOID mvpp2_prs_mcast_del_all(struct mvpp2 *priv, MV_32 port) +{ + struct mvpp2_prs_entry pe; + MV_32 index, tid; + + for (tid = MVPP2_PE_FIRST_FREE_TID; + tid <= MVPP2_PE_LAST_FREE_TID; tid++) { + MV_U8 da[MV_ETH_ALEN], da_mask[MV_ETH_ALEN]; + + if (!priv->prs_shadow[tid].valid || + (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) || + (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF)) + continue; + + /* Only simple mac entries */ + pe.index = tid; + mvpp2_prs_hw_read(priv, &pe); + + /* Read mac addr from entry */ + for (index = 0; index < MV_ETH_ALEN; index++) + mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index], + &da_mask[index]); + + if (mvpp2_is_multicast_ether_addr(da) && + !mvpp2_is_broadcast_ether_addr(da)) + /* Delete this entry */ + mvpp2_prs_mac_da_accept(priv, port, da, MV_FALSE); + } +} + +MV_32 mvpp2_prs_tag_mode_set(struct mvpp2 *priv, MV_32 port, MV_32 type) +{ + switch (type) { + case MVPP2_TAG_TYPE_EDSA: + /* Add port to EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, MV_TRUE, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, MV_TRUE, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + /* Remove port from DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, MV_FALSE, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, MV_FALSE, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + break; + + case MVPP2_TAG_TYPE_DSA: + /* Add port to DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, MV_TRUE, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, MV_TRUE, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + /* Remove port from EDSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, MV_FALSE, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, MV_FALSE, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + case MVPP2_TAG_TYPE_MH: + case MVPP2_TAG_TYPE_NONE: + /* Remove port form EDSA and DSA entries */ + mvpp2_prs_dsa_tag_set(priv, port, MV_FALSE, + MVPP2_PRS_TAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, MV_FALSE, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA); + mvpp2_prs_dsa_tag_set(priv, port, MV_FALSE, + MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA); + mvpp2_prs_dsa_tag_set(priv, port, MV_FALSE, + MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA); + break; + + default: + if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA)) + return MVPP2_EINVAL; + } + + return 0; +} + +/* Set prs flow for the port */ +MV_32 mvpp2_prs_def_flow(struct mvpp2_port *port) +{ + struct mvpp2_prs_entry *pe; + MV_32 tid; + + pe = mvpp2_prs_flow_find(port->priv, port->id); + + /* Such entry not exist */ + if (!pe) { + /* Go through the all entires from last to first */ + tid = mvpp2_prs_tcam_first_free(port->priv, + MVPP2_PE_LAST_FREE_TID, + MVPP2_PE_FIRST_FREE_TID); + if (tid < 0) + return tid; + + pe = mvpp2_alloc(sizeof(*pe)); + if (!pe) + return MVPP2_ENOMEM; + + mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS); + pe->index = tid; + + /* Set flow ID*/ + mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK); + mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1); + + /* Update shadow table */ + mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS); + } + + mvpp2_prs_tcam_port_map_set(pe, (1 << port->id)); + mvpp2_prs_hw_write(port->priv, pe); + mvpp2_free(pe); + + return 0; +} + +/* Classifier configuration routines */ + +/* Update classification flow table registers */ +static MV_VOID mvpp2_cls_flow_write(struct mvpp2 *priv, + struct mvpp2_cls_flow_entry *fe) +{ + mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG, fe->data[0]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG, fe->data[1]); + mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG, fe->data[2]); +} + +/* Update classification lookup table register */ +MV_VOID mvpp2_cls_lookup_write(struct mvpp2 *priv, + struct mvpp2_cls_lookup_entry *le) +{ + MV_U32 val; + + val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid; + mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val); + mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data); +} + +/* Classifier default initialization */ +MV_VOID mvpp2_cls_init(struct mvpp2 *priv) +{ + struct mvpp2_cls_lookup_entry le; + struct mvpp2_cls_flow_entry fe; + MV_32 index; + + /* Enable classifier */ + mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK); + + /* Clear classifier flow table */ + mvpp2_memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS); + for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) { + fe.index = index; + mvpp2_cls_flow_write(priv, &fe); + } + + /* Clear classifier lookup table */ + le.data = 0; + for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) { + le.lkpid = index; + le.way = 0; + mvpp2_cls_lookup_write(priv, &le); + + le.way = 1; + mvpp2_cls_lookup_write(priv, &le); + } +} + +MV_VOID mvpp2_cls_port_config(struct mvpp2_port *port) +{ + struct mvpp2_cls_lookup_entry le; + MV_U32 val; + + /* Set way for the port */ + val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG); + val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val); + + /* Pick the entry to be accessed in lookup ID decoding table + * according to the way and lkpid. + */ + le.lkpid = port->id; + le.way = 0; + le.data = 0; + + /* Set initial CPU queue for receiving packets */ + le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK; + le.data |= port->first_rxq; + + /* Disable classification engines */ + le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK; + + /* Update lookup ID table entry */ + mvpp2_cls_lookup_write(port->priv, &le); +} + +/* Set CPU queue number for oversize packets */ +MV_VOID mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port) +{ + + mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id), + port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK); + +#ifdef MVPP2_V1 + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id), + (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS)); + + val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG); + val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id); + mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val); +#endif +} + +/* BM helper routines */ + + +MV_VOID mvpp2_bm_pool_hw_create(struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, MV_32 size) +{ +#ifdef MVPP2_V1 + MV_U32 val; + + mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), + bm_pool->phys_addr); + mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size); + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id)); + val |= MVPP2_BM_START_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val); + + bm_pool->type = MVPP2_BM_FREE; + bm_pool->size = size; + bm_pool->pkt_size = 0; + bm_pool->buf_num = 0; +#else + bm_pool->size = size; + + mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id), + lower_32_bits(bm_pool->phys_addr)); + + mvpp2_write(priv, MVPP22_BM_POOL_BASE_HIGH_REG, + (upper_32_bits(bm_pool->phys_addr)& + MVPP22_BM_POOL_BASE_HIGH_REG)); + mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), + bm_pool->size); +#endif +} + +/* Set pool buffer size */ +MV_VOID mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + MV_32 buf_size) +{ + MV_U32 val; + + bm_pool->buf_size = buf_size; + + val = MVPP2_ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET); + mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val); +} + +MV_VOID mvpp2_bm_stop(struct mvpp2 *priv, MV_32 pool) +{ + MV_U32 val, i; + + for (i = 0; i < MVPP2_BM_SIZE; i++) + mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(0)); + + val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool)); + val |= MVPP2_BM_STOP_MASK; + mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool), val); + +} + +MV_VOID mvpp2_bm_irq_clear(struct mvpp2 *priv, MV_32 pool) +{ + /* Mask BM all interrupts */ + mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(pool), 0); + /* Clear BM cause register */ + mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(pool), 0); +} + +/* Attach long pool to rxq */ +MV_VOID mvpp2_rxq_long_pool_set(struct mvpp2_port *port, + MV_32 lrxq, MV_32 long_pool) +{ + MV_U32 val; + MV_32 prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq].id; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_POOL_LONG_MASK; + val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) & + MVPP2_RXQ_POOL_LONG_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Attach short pool to rxq */ +MV_VOID mvpp2_rxq_short_pool_set(struct mvpp2_port *port, + MV_32 lrxq, MV_32 short_pool) +{ + MV_U32 val; + MV_32 prxq; + + /* Get queue physical ID */ + prxq = port->rxqs[lrxq].id; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_POOL_SHORT_MASK; + val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) & + MVPP2_RXQ_POOL_SHORT_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Release multicast buffer */ +MV_VOID mvpp2_bm_pool_mc_put(struct mvpp2_port *port, MV_32 pool, + MV_U32 buf_phys_addr, MV_U32 buf_virt_addr, + MV_32 mc_id) +{ + MV_U32 val = 0; + + val |= (mc_id & MVPP2_BM_MC_ID_MASK); + mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val); + + mvpp2_bm_pool_put(port->priv, pool, + buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK, + buf_virt_addr); +} + +/* Refill BM pool */ +MV_VOID mvpp2_pool_refill(struct mvpp2_port *port, MV_U32 bm, + MV_U32 phys_addr, MV_U32 cookie) +{ + MV_32 pool = mvpp2_bm_cookie_pool_get(bm); + + mvpp2_bm_pool_put(port->priv, pool, phys_addr, cookie); +} + +/* Mask the current CPU's Rx/Tx interrupts */ +MV_VOID mvpp2_interrupts_mask(MV_VOID *arg) +{ + struct mvpp2_port *port = arg; + + mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0); +} + +/* Unmask the current CPU's Rx/Tx interrupts */ +MV_VOID mvpp2_interrupts_unmask(MV_VOID *arg) +{ + struct mvpp2_port *port = arg; + + mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), + (MVPP2_CAUSE_MISC_SUM_MASK | + MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); +} + +/* Port configuration routines */ + +static MV_VOID mvpp2_port_mii_set(struct mvpp2_port *port) +{ + MV_U32 val; + + val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_2_REG); + + switch (port->phy_interface) { + case MV_MODE_SGMII: + val |= MVPP2_GMAC_INBAND_AN_MASK; + break; + case MV_MODE_RGMII: + val |= MVPP2_GMAC_PORT_RGMII_MASK; + default: + val &= ~MVPP2_GMAC_PCS_ENABLE_MASK; + } + + mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_2_REG, val); +} + +static MV_VOID mvpp2_port_fc_adv_enable(struct mvpp2_port *port) +{ + MV_U32 val; + + val = mvpp2_gmac_read(port, MVPP2_GMAC_AUTONEG_CONFIG); + val |= MVPP2_GMAC_FC_ADV_EN; + mvpp2_gmac_write(port, MVPP2_GMAC_AUTONEG_CONFIG, val); +} + +MV_VOID mvpp2_port_enable(struct mvpp2_port *port) +{ + MV_U32 val; + + val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG); + val |= MVPP2_GMAC_PORT_EN_MASK; + val |= MVPP2_GMAC_MIB_CNTR_EN_MASK; + mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val); +} + +MV_VOID mvpp2_port_disable(struct mvpp2_port *port) +{ + MV_U32 val; + + val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG); + val &= ~(MVPP2_GMAC_PORT_EN_MASK); + mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val); +} + +/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */ +static MV_VOID mvpp2_port_periodic_xon_disable(struct mvpp2_port *port) +{ + MV_U32 val; + + val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_1_REG) & + ~MVPP2_GMAC_PERIODIC_XON_EN_MASK; + mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_1_REG, val); +} + +/* Configure loopback port */ +#ifdef MVPP2_V1 +static MV_VOID mvpp2_port_loopback_set(struct mvpp2_port *port) +{ + MV_U32 val; + + val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_1_REG); + + if (port->speed == SPEED_1000) + val |= MVPP2_GMAC_GMII_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_GMII_LB_EN_MASK; + + if (port->phy_interface == MV_MODE_SGMII) + val |= MVPP2_GMAC_PCS_LB_EN_MASK; + else + val &= ~MVPP2_GMAC_PCS_LB_EN_MASK; + + mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_1_REG, val); +} +#endif + +static MV_VOID mvpp2_port_reset(struct mvpp2_port *port) +{ + MV_U32 val; + + val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_2_REG) & + ~MVPP2_GMAC_PORT_RESET_MASK; + mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_2_REG, val); + + while (mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_2_REG) & + MVPP2_GMAC_PORT_RESET_MASK) + continue; +} + +/* Set defaults to the MVPP2 port */ +MV_VOID mvpp2_defaults_set(struct mvpp2_port *port) +{ + MV_32 tx_port_num, val, queue, ptxq; + +#ifdef MVPP2_V1 + /* Configure port to loopback if needed */ + if (port->flags & MVPP2_F_LOOPBACK) + mvpp2_port_loopback_set(port); + + /* Update TX FIFO MIN Threshold */ + val = mvpp2_gmac_read(port, MVPP2_GMAC_PORT_FIFO_CFG_1_REG); + val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK; + /* Min. TX threshold must be less than minimal packet length */ + val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2); + mvpp2_gmac_write(port, MVPP2_GMAC_PORT_FIFO_CFG_1_REG, val); +#endif + + /* Disable Legacy WRR, Disable EJP, Release from reset */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, + tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0); + + /* Close bandwidth for all queues */ + for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) { + ptxq = mvpp2_txq_phys(port->id, queue); + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0); + } + + /* Set refill period to 1 usec, refill tokens + * and bucket size to maximum + */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG, + port->priv->tclk / MVPP2_USEC_PER_SEC); + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG); + val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXP_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val); + val = MVPP2_TXP_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + + /* Set MaximumLowLatencyPacketSize value to 256 */ + mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id), + MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK | + MVPP2_RX_LOW_LATENCY_PKT_SIZE(256)); + +#ifdef MVPP2_V1 + /* Enable Rx cache snoop */ + MV_32 lrxq; + for (lrxq = 0; lrxq < rxq_number; lrxq++) { + queue = port->rxqs[lrxq].id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_SNOOP_PKT_SIZE_MASK | + MVPP2_SNOOP_BUF_HDR_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +#else + /* Mask all interrupts to all present cpus */ + mvpp2_interrupts_disable(port, 0x1); +#endif + +} + +/* Enable/disable receiving packets */ +MV_VOID mvpp2_ingress_enable(struct mvpp2_port *port) +{ + MV_U32 val; + MV_32 lrxq, queue; + + for (lrxq = 0; lrxq < rxq_number; lrxq++) { + queue = port->rxqs[lrxq].id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val &= ~MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +MV_VOID mvpp2_ingress_disable(struct mvpp2_port *port) +{ + MV_U32 val; + MV_32 lrxq, queue; + + for (lrxq = 0; lrxq < rxq_number; lrxq++) { + queue = port->rxqs[lrxq].id; + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue)); + val |= MVPP2_RXQ_DISABLE_MASK; + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val); + } +} + +/* Enable transmit via physical egress queue + * - HW starts take descriptors from DRAM + */ +MV_VOID mvpp2_egress_enable(struct mvpp2_port *port) +{ + MV_U32 qmap; + MV_32 queue; + MV_32 tx_port_num = mvpp2_egress_port(port); + + /* Enable all initialized TXs. */ + qmap = 0; + for (queue = 0; queue < txq_number; queue++) { + struct mvpp2_tx_queue *txq = &port->txqs[queue]; + + if (txq->descs != MVPP2_NULL) + qmap |= (1 << queue); + } + + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap); +} + +/* Disable transmit via physical egress queue + * - HW doesn't take descriptors from DRAM + */ +MV_VOID mvpp2_egress_disable(struct mvpp2_port *port) +{ + MV_U32 reg_data; + MV_32 delay; + MV_32 tx_port_num = mvpp2_egress_port(port); + + /* Issue stop command for active channels only */ + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) & + MVPP2_TXP_SCHED_ENQ_MASK; + if (reg_data != 0) + mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, + (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET)); + + /* Wait for all Tx activity to terminate. */ + delay = 0; + do { + if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) { + mvpp2_printf("Tx stop timed out, status=0x%08x\n", + reg_data); + break; + } + mvpp2_mdelay(1); + delay++; + + /* Check port TX Command register that all + * Tx queues are stopped + */ + reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG); + } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK); +} + +/* Rx descriptors helper methods */ + +/* Set rx queue offset */ +static MV_VOID mvpp2_rxq_offset_set(struct mvpp2_port *port, + MV_32 prxq, MV_32 offset) +{ + MV_U32 val; + + /* Convert offset from bytes to units of 32 bytes */ + offset = offset >> 5; + + val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq)); + val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK; + + /* Offset is in */ + val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) & + MVPP2_RXQ_PACKET_OFFSET_MASK); + + mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val); +} + +/* Obtain BM cookie information from descriptor */ +MV_U32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc, MV_32 cpu) +{ + MV_32 pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >> + MVPP2_RXD_BM_POOL_ID_OFFS; + + return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) | + ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS); +} + +/* Tx descriptors helper methods */ + +MV_32 mvpp2_txq_drain_set(struct mvpp2_port *port, MV_32 txq, MV_BOOL en) +{ + MV_U32 reg_val; + MV_32 ptxq = mvpp2_txq_phys(port->id, txq); + + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, ptxq); + reg_val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG); + + if (en) + reg_val |= MVPP2_TXQ_DRAIN_EN_MASK; + else + reg_val &= ~MVPP2_TXQ_DRAIN_EN_MASK; + + mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, reg_val); + + return 0; +} + +/* Get number of Tx descriptors waiting to be transmitted by HW */ +MV_32 mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + MV_U32 val; + + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); + val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); + + return val & MVPP2_TXQ_PENDING_MASK; +} + +/* Get number of occupied aggregated Tx descriptors */ +MV_U32 mvpp2_aggr_txq_pend_desc_num_get(struct mvpp2 *pp2, int cpu) +{ + MV_U32 reg_val; + + reg_val = mvpp2_read(pp2, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + + return reg_val & MVPP2_AGGR_TXQ_PENDING_MASK; +} + +/* Get pointer to next Tx descriptor to be processed (send) by HW */ +struct mvpp2_tx_desc * +mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq) +{ + MV_32 tx_desc = txq->next_desc_to_proc; + + txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc); + return txq->descs + tx_desc; +} + +/* Update HW with number of aggregated Tx descriptors to be sent */ +MV_VOID mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, MV_32 pending) +{ + /* aggregated access - relevant TXQ number is written in TX desc */ + mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending); +} + +/* Check if there are enough free descriptors in aggregated txq. + * If not, update the number of occupied descriptors and repeat the check. + */ +MV_32 mvpp2_aggr_desc_num_check(struct mvpp2 *priv, + struct mvpp2_tx_queue *aggr_txq, MV_32 num, + MV_32 cpu) +{ + if ((aggr_txq->count + num) > aggr_txq->size) { + /* Update number of occupied aggregated Tx descriptors */ + MV_U32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu)); + + aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK; + } + + if ((aggr_txq->count + num) > aggr_txq->size) + return MVPP2_ENOMEM; + + return 0; +} + +/* Reserved Tx descriptors allocation request */ +MV_32 mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, + struct mvpp2_tx_queue *txq, MV_32 num) +{ + MV_U32 val; + + val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num; + mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val); + + val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG); + + return val & MVPP2_TXQ_RSVD_RSLT_MASK; +} + +/* Release the last allocated Tx descriptor. Useful to handle DMA + * mapping failures in the Tx path. + */ +MV_VOID mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq) +{ + if (txq->next_desc_to_proc == 0) + txq->next_desc_to_proc = txq->last_desc - 1; + else + txq->next_desc_to_proc--; +} + +/* Set Tx descriptors fields relevant for CSUM calculation */ +MV_U32 mvpp2_txq_desc_csum(MV_32 l3_offs, MV_32 l3_proto, + MV_32 ip_hdr_len, MV_32 l4_proto) +{ + MV_U32 command; + + /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk, + * G_L4_chk, L4_type required only for checksum calculation + */ + command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT); + command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT); + command |= MVPP2_TXD_IP_CSUM_DISABLE; + + if (l3_proto == mvpp2_swab16(MV_ETH_P_IP)) { + command &= ~MVPP2_TXD_IP_CSUM_DISABLE; /* enable IPv4 csum */ + command &= ~MVPP2_TXD_L3_IP6; /* enable IPv4 */ + } else { + command |= MVPP2_TXD_L3_IP6; /* enable IPv6 */ + } + + if (l4_proto == MV_IPPR_TCP) { + command &= ~MVPP2_TXD_L4_UDP; /* enable TCP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else if (l4_proto == MV_IPPR_UDP) { + command |= MVPP2_TXD_L4_UDP; /* enable UDP */ + command &= ~MVPP2_TXD_L4_CSUM_FRAG; /* generate L4 csum */ + } else { + command |= MVPP2_TXD_L4_CSUM_NOT; + } + + return command; +} + +MV_VOID mvpp2_txq_sent_counter_clear(MV_VOID *arg) +{ + struct mvpp2_port *port = arg; + MV_32 queue; + + for (queue = 0; queue < txq_number; queue++) { + MV_32 id = port->txqs[queue].id; + + mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id)); + } +} + +/* Change maximum receive size of the port */ +MV_VOID mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port) +{ + MV_U32 val; + + val = mvpp2_gmac_read(port, MVPP2_GMAC_CTRL_0_REG); + val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK; + val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) << + MVPP2_GMAC_MAX_RX_SIZE_OFFS); + mvpp2_gmac_write(port, MVPP2_GMAC_CTRL_0_REG, val); +} + +/* Set max sizes for Tx queues */ +MV_VOID mvpp2_txp_max_tx_size_set(struct mvpp2_port *port) +{ + MV_U32 val, size, mtu; + MV_32 txq, tx_port_num; + + mtu = port->pkt_size * 8; + if (mtu > MVPP2_TXP_MTU_MAX) + mtu = MVPP2_TXP_MTU_MAX; + + /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */ + mtu = 3 * mtu; + + /* Indirect access to registers */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + /* Set MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG); + val &= ~MVPP2_TXP_MTU_MAX; + val |= mtu; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val); + + /* TXP token size and all TXQs token size must be larger that MTU */ + val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG); + size = val & MVPP2_TXP_TOKEN_SIZE_MAX; + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXP_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val); + } + + for (txq = 0; txq < txq_number; txq++) { + val = mvpp2_read(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq)); + size = val & MVPP2_TXQ_TOKEN_SIZE_MAX; + + if (size < mtu) { + size = mtu; + val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX; + val |= size; + mvpp2_write(port->priv, + MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq), + val); + } + } +} + +/* Set the number of packets that will be received before Rx MV_32errupt + * will be generated by HW. + */ +MV_VOID mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq, MV_U32 pkts) +{ + MV_U32 val; + + val = (pkts & MVPP2_OCCUPIED_THRESH_MASK); + mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val); + + rxq->pkts_coal = pkts; +} + +/* Set the time delay in usec before Rx MV_32errupt */ +MV_VOID mvpp2_rx_time_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq, MV_U32 usec) +{ + MV_U32 val; + + val = (port->priv->tclk / MVPP2_USEC_PER_SEC) * usec; + mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val); + + rxq->time_coal = usec; +} + +/* Rx/Tx queue initialization/cleanup methods */ + +MV_VOID mvpp2_rxq_hw_init(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + rxq->last_desc = rxq->size - 1; + + /* Zero occupied and non-occupied counters - direct access */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + + /* Set Rx descriptors queue starting address - indirect access */ + mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); +#ifdef MVPP2_V1 + mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, + rxq->descs_phys >> MVPP21_DESC_ADDR_SHIFT); +#else + mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, + rxq->descs_phys >> MVPP22_DESC_ADDR_SHIFT); +#endif + mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size); + mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0); + + /* Set Offset */ + mvpp2_rxq_offset_set(port, rxq->id, MVPP2_RXQ_OFFSET); + + /* Set coalescing pkts and time */ + mvpp2_rx_pkts_coal_set(port, rxq, MVPP2_RX_COAL_PKTS); + mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal); + + /* Add number of descriptors ready for receiving packets */ + mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size); +} + +/* Push packets received by the RXQ to BM pool */ +MV_VOID mvpp2_rxq_drop_pkts(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq, + MV_32 cpu) +{ + MV_32 rx_received; + + rx_received = mvpp2_rxq_received(port, rxq->id); + if (!rx_received) + return; + +#ifdef MVPP2_V1 + MV_32 i; + for (i = 0; i < rx_received; i++) { + struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq); + MV_U32 bm = mvpp2_bm_cookie_build(rx_desc, cpu); + + mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr, + rx_desc->buf_cookie); + } +#endif + mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received); +} + +MV_VOID mvpp2_rxq_hw_deinit(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq) +{ + rxq->descs = MVPP2_NULL; + rxq->last_desc = 0; + rxq->next_desc_to_proc = 0; + rxq->descs_phys = 0; + + /* Clear Rx descriptors queue starting address and size; + * free descriptor number + */ + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0); + mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id); + mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0); + mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0); +} + +MV_VOID mvpp2_txq_hw_init(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + MV_32 desc, desc_per_txq, tx_port_num; + MV_U32 val; + + txq->last_desc = txq->size - 1; + + /* Set Tx descriptors queue starting address - indirect access */ + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size & + MVPP2_TXQ_DESC_SIZE_MASK); + mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0); + mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG, + txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET); + val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG); + val &= ~MVPP2_TXQ_PENDING_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val); + + /* Calculate base address in prefetch buffer. We reserve 16 descriptors + * for each existing TXQ. + * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT + * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS + */ + desc_per_txq = 16; + desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) + + (txq->log_id * desc_per_txq); + + mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, + MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 | + MVPP2_PREF_BUF_THRESH(desc_per_txq/2)); + + /* WRR / EJP configuration - indirect access */ + tx_port_num = mvpp2_egress_port(port); + mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num); + + val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id)); + val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK; + val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1); + val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val); + + val = MVPP2_TXQ_TOKEN_SIZE_MAX; + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id), + val); +} + +MV_VOID mvpp2_txq_hw_deinit(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + txq->descs = MVPP2_NULL; + txq->last_desc = 0; + txq->next_desc_to_proc = 0; + txq->descs_phys = 0; + + /* Set minimum bandwidth for disabled TXQs */ + mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0); + + /* Set Tx descriptors queue starting address and size */ + mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0); + mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0); +} + +/* Allocate and initialize descriptors for aggr TXQ */ +MV_VOID mvpp2_aggr_txq_hw_init(struct mvpp2_tx_queue *aggr_txq, + MV_32 desc_num, MV_32 cpu, + struct mvpp2 *priv) +{ + aggr_txq->last_desc = aggr_txq->size - 1; + + /* Aggr TXQ no reset WA */ + aggr_txq->next_desc_to_proc = mvpp2_read(priv, + MVPP2_AGGR_TXQ_INDEX_REG(cpu)); + + /* Set Tx descriptors queue starting address */ + /* indirect access */ +#ifndef MVPP2_V1 + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), aggr_txq->descs_phys + >> MVPP22_DESC_ADDR_SHIFT); +#else + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu), aggr_txq->descs_phys + >> MVPP21_DESC_ADDR_SHIFT); +#endif + mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num + & MVPP2_AGGR_TXQ_DESC_SIZE_MASK); + +} + +/* Enable gmac */ +MV_VOID mvpp2_port_power_up(struct mvpp2_port *port) +{ + mvpp2_port_mii_set(port); + mvpp2_port_periodic_xon_disable(port); + mvpp2_port_fc_adv_enable(port); + mvpp2_port_reset(port); +} + +/* Initialize Rx FIFO's */ +MV_VOID mvpp2_rx_fifo_init(struct mvpp2 *priv) +{ + MV_32 port; + + for (port = 0; port < MVPP2_MAX_PORTS; port++) { + mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_DATA_SIZE); + mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port), + MVPP2_RX_FIFO_PORT_ATTR_SIZE); + } + + mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG, + MVPP2_RX_FIFO_PORT_MIN_PKT); + mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1); +} + +MV_VOID mv_gop110_netc_active_port(struct mvpp2_port *pp2_port, MV_U32 port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_1); + reg &= ~(NETC_PORTS_ACTIVE_MASK(port)); + + val <<= NETC_PORTS_ACTIVE_OFFSET(port); + val &= NETC_PORTS_ACTIVE_MASK(port); + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_1, reg); +} + +static MV_VOID mv_gop110_netc_xaui_enable(struct mvpp2_port *pp2_port, MV_U32 port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, SD1_CONTROL_1_REG); + reg &= ~SD1_CONTROL_XAUI_EN_MASK; + + val <<= SD1_CONTROL_XAUI_EN_OFFSET; + val &= SD1_CONTROL_XAUI_EN_MASK; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, SD1_CONTROL_1_REG, reg); +} + +static MV_VOID mv_gop110_netc_rxaui0_enable(struct mvpp2_port *pp2_port, MV_U32 port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, SD1_CONTROL_1_REG); + reg &= ~SD1_CONTROL_RXAUI0_L23_EN_MASK; + + val <<= SD1_CONTROL_RXAUI0_L23_EN_OFFSET; + val &= SD1_CONTROL_RXAUI0_L23_EN_MASK; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, SD1_CONTROL_1_REG, reg); +} + +static MV_VOID mv_gop110_netc_rxaui1_enable(struct mvpp2_port *pp2_port, MV_U32 port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, SD1_CONTROL_1_REG); + reg &= ~SD1_CONTROL_RXAUI1_L45_EN_MASK; + + val <<= SD1_CONTROL_RXAUI1_L45_EN_OFFSET; + val &= SD1_CONTROL_RXAUI1_L45_EN_MASK; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, SD1_CONTROL_1_REG, reg); +} + +static MV_VOID mv_gop110_netc_mii_mode(struct mvpp2_port *pp2_port, MV_U32 port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, MV_NETCOMP_CONTROL_0); + reg &= ~NETC_GBE_PORT1_MII_MODE_MASK; + + val <<= NETC_GBE_PORT1_MII_MODE_OFFSET; + val &= NETC_GBE_PORT1_MII_MODE_MASK; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, MV_NETCOMP_CONTROL_0, reg); +} + +static MV_VOID mv_gop110_netc_gop_reset(struct mvpp2_port *pp2_port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, MV_GOP_SOFT_RESET_1_REG); + reg &= ~NETC_GOP_SOFT_RESET_MASK; + + val <<= NETC_GOP_SOFT_RESET_OFFSET; + val &= NETC_GOP_SOFT_RESET_MASK; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, MV_GOP_SOFT_RESET_1_REG, reg); +} + +static MV_VOID mv_gop110_netc_gop_clock_logic_set(struct mvpp2_port *pp2_port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_0); + reg &= ~NETC_CLK_DIV_PHASE_MASK; + + val <<= NETC_CLK_DIV_PHASE_OFFSET; + val &= NETC_CLK_DIV_PHASE_MASK; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_0, reg); +} + +static MV_VOID mv_gop110_netc_port_rf_reset(struct mvpp2_port *pp2_port, MV_U32 port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_1); + reg &= ~(NETC_PORT_GIG_RF_RESET_MASK(port)); + + val <<= NETC_PORT_GIG_RF_RESET_OFFSET(port); + val &= NETC_PORT_GIG_RF_RESET_MASK(port); + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_1, reg); +} + +static MV_VOID mv_gop110_netc_gbe_sgmii_mode_select(struct mvpp2_port *pp2_port, MV_U32 port, + MV_U32 val) +{ + MV_U32 reg, mask, offset; + + if (port == 2) { + mask = NETC_GBE_PORT0_SGMII_MODE_MASK; + offset = NETC_GBE_PORT0_SGMII_MODE_OFFSET; + } else { + mask = NETC_GBE_PORT1_SGMII_MODE_MASK; + offset = NETC_GBE_PORT1_SGMII_MODE_OFFSET; + } + reg = mvpp2_rfu1_read(pp2_port->priv, MV_NETCOMP_CONTROL_0); + reg &= ~mask; + + val <<= offset; + val &= mask; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, MV_NETCOMP_CONTROL_0, reg); +} + +static MV_VOID mv_gop110_netc_bus_width_select(struct mvpp2_port *pp2_port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_0); + reg &= ~NETC_BUS_WIDTH_SELECT_MASK; + + val <<= NETC_BUS_WIDTH_SELECT_OFFSET; + val &= NETC_BUS_WIDTH_SELECT_MASK; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_0, reg); +} + +static MV_VOID mv_gop110_netc_sample_stages_timing(struct mvpp2_port *pp2_port, MV_U32 val) +{ + MV_U32 reg; + + reg = mvpp2_rfu1_read(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_0); + reg &= ~NETC_GIG_RX_DATA_SAMPLE_MASK; + + val <<= NETC_GIG_RX_DATA_SAMPLE_OFFSET; + val &= NETC_GIG_RX_DATA_SAMPLE_MASK; + + reg |= val; + + mvpp2_rfu1_write(pp2_port->priv, MV_NETCOMP_PORTS_CONTROL_0, reg); +} + +static MV_VOID mv_gop110_netc_mac_to_xgmii(struct mvpp2_port *pp2_port, MV_U32 port, + enum mv_netc_phase phase) +{ + switch (phase) { + case MV_NETC_FIRST_PHASE: + /* Set Bus Width to HB mode = 1 */ + mv_gop110_netc_bus_width_select(pp2_port, 1); + /* Select RGMII mode */ + mv_gop110_netc_gbe_sgmii_mode_select(pp2_port, port, + MV_NETC_GBE_XMII); + break; + case MV_NETC_SECOND_PHASE: + /* De-assert the relevant port HB reset */ + mv_gop110_netc_port_rf_reset(pp2_port, port, 1); + break; + } +} + +static MV_VOID mv_gop110_netc_mac_to_sgmii(struct mvpp2_port *pp2_port, MV_U32 port, + enum mv_netc_phase phase) +{ + switch (phase) { + case MV_NETC_FIRST_PHASE: + /* Set Bus Width to HB mode = 1 */ + mv_gop110_netc_bus_width_select(pp2_port, 1); + /* Select SGMII mode */ + if (port >= 1) + mv_gop110_netc_gbe_sgmii_mode_select(pp2_port, port, + MV_NETC_GBE_SGMII); + + /* Configure the sample stages */ + mv_gop110_netc_sample_stages_timing(pp2_port, 0); + /* Configure the ComPhy Selector */ + /* mv_gop110_netc_com_phy_selector_config(netComplex); */ + break; + case MV_NETC_SECOND_PHASE: + /* De-assert the relevant port HB reset */ + mv_gop110_netc_port_rf_reset(pp2_port, port, 1); + break; + } +} + +static MV_VOID mv_gop110_netc_mac_to_rxaui(struct mvpp2_port *pp2_port, MV_U32 port, + enum mv_netc_phase phase, + enum mv_netc_lanes lanes) +{ + /* Currently only RXAUI0 supported */ + if (port != 0) + return; + + switch (phase) { + case MV_NETC_FIRST_PHASE: + /* RXAUI Serdes/s Clock alignment */ + if (lanes == MV_NETC_LANE_23) + mv_gop110_netc_rxaui0_enable(pp2_port, port, 1); + else + mv_gop110_netc_rxaui1_enable(pp2_port, port, 1); + break; + case MV_NETC_SECOND_PHASE: + /* De-assert the relevant port HB reset */ + mv_gop110_netc_port_rf_reset(pp2_port, port, 1); + break; + } +} + +static MV_VOID mv_gop110_netc_mac_to_xaui(struct mvpp2_port *pp2_port, MV_U32 port, + enum mv_netc_phase phase) +{ + switch (phase) { + case MV_NETC_FIRST_PHASE: + /* RXAUI Serdes/s Clock alignment */ + mv_gop110_netc_xaui_enable(pp2_port, port, 1); + break; + case MV_NETC_SECOND_PHASE: + /* De-assert the relevant port HB reset */ + mv_gop110_netc_port_rf_reset(pp2_port, port, 1); + break; + } +} + +MV_32 mv_gop110_netc_init(struct mvpp2_port *pp2_port, + MV_U32 net_comp_config, enum mv_netc_phase phase) +{ + MV_U32 c = net_comp_config; + + if (c & MV_NETC_GE_MAC0_RXAUI_L23) + mv_gop110_netc_mac_to_rxaui(pp2_port, 0, phase, MV_NETC_LANE_23); + + if (c & MV_NETC_GE_MAC0_RXAUI_L45) + mv_gop110_netc_mac_to_rxaui(pp2_port, 0, phase, MV_NETC_LANE_45); + + if (c & MV_NETC_GE_MAC0_XAUI) + mv_gop110_netc_mac_to_xaui(pp2_port, 0, phase); + + if (c & MV_NETC_GE_MAC2_SGMII) + mv_gop110_netc_mac_to_sgmii(pp2_port, 2, phase); + else + mv_gop110_netc_mac_to_xgmii(pp2_port, 2, phase); + if (c & MV_NETC_GE_MAC3_SGMII) + mv_gop110_netc_mac_to_sgmii(pp2_port, 3, phase); + else { + mv_gop110_netc_mac_to_xgmii(pp2_port, 3, phase); + if (c & MV_NETC_GE_MAC3_RGMII) + mv_gop110_netc_mii_mode(pp2_port, 3, MV_NETC_GBE_RGMII); + else + mv_gop110_netc_mii_mode(pp2_port, 3, MV_NETC_GBE_MII); + } + + /* Activate gop ports 0, 2, 3 */ + mv_gop110_netc_active_port(pp2_port, 0, 1); + mv_gop110_netc_active_port(pp2_port, 2, 1); + mv_gop110_netc_active_port(pp2_port, 3, 1); + + if (phase == MV_NETC_SECOND_PHASE) { + /* Enable the GOP internal clock logic */ + mv_gop110_netc_gop_clock_logic_set(pp2_port, 1); + /* De-assert GOP unit reset */ + mv_gop110_netc_gop_reset(pp2_port, 1); + } + return 0; +} +MV_U32 mvp_pp2x_gop110_netc_cfg_create(struct mvpp2_port *pp2_port) +{ + MV_U32 val = 0; + + if (pp2_port->gop_index == 0) { + if (pp2_port->phy_interface == + MV_MODE_XAUI) + val |= MV_NETC_GE_MAC0_XAUI; + else if (pp2_port->phy_interface == + MV_MODE_RXAUI) + val |= MV_NETC_GE_MAC0_RXAUI_L23; + } + if (pp2_port->gop_index == 2) { + if (pp2_port->phy_interface == + MV_MODE_SGMII) + val |= MV_NETC_GE_MAC2_SGMII; + } + if (pp2_port->gop_index == 3) { + if (pp2_port->phy_interface == + MV_MODE_SGMII) + val |= MV_NETC_GE_MAC3_SGMII; + else if (pp2_port->phy_interface == + MV_MODE_RGMII) + val |= MV_NETC_GE_MAC3_RGMII; + } + + return val; +} + +/* +* mv_port_init +* Init physical port. Configures the port mode and all it's elements +* accordingly. +* Does not verify that the selected mode/port number is valid at the +* core level. +*/ +MV_32 mv_gop110_port_init(struct mvpp2_port *pp2_port) +{ + + switch (pp2_port->phy_interface) { + case MV_MODE_RGMII: + mv_gop110_gmac_reset(pp2_port, RESET); + /* configure PCS */ + mv_gop110_gpcs_mode_cfg(pp2_port, MV_FALSE); + mv_gop110_bypass_clk_cfg(pp2_port, MV_TRUE); + + /* configure MAC */ + mv_gop110_gmac_mode_cfg(pp2_port); + /* pcs unreset */ + mv_gop110_gpcs_reset(pp2_port, UNRESET); + /* mac unreset */ + mv_gop110_gmac_reset(pp2_port, UNRESET); + break; + case MV_MODE_SGMII: + case MV_MODE_QSGMII: + /* configure PCS */ + mv_gop110_gpcs_mode_cfg(pp2_port, MV_TRUE); + + /* configure MAC */ + mv_gop110_gmac_mode_cfg(pp2_port); + /* select proper Mac mode */ + mv_gop110_xlg_2_gig_mac_cfg(pp2_port); + + /* pcs unreset */ + mv_gop110_gpcs_reset(pp2_port, UNRESET); + /* mac unreset */ + mv_gop110_gmac_reset(pp2_port, UNRESET); + break; + default: + return -1; + } + + return 0; +} + +/* Set the MAC to reset or exit from reset */ +MV_32 mv_gop110_gmac_reset(struct mvpp2_port *pp2_port, enum mv_reset reset) +{ + MV_U32 reg_addr; + MV_U32 val; + + reg_addr = MVPP2_PORT_CTRL2_REG; + + /* read - modify - write */ + val = mv_gop110_gmac_read(pp2_port, reg_addr); + if (reset == RESET) + val |= MVPP2_PORT_CTRL2_PORTMACRESET_MASK; + else + val &= ~MVPP2_PORT_CTRL2_PORTMACRESET_MASK; + mv_gop110_gmac_write(pp2_port, reg_addr, val); + + return 0; +} +/* +* mv_gop110_gpcs_mode_cfg +*Configure port to working with Gig PCS or don't. +*/ +MV_32 mv_gop110_gpcs_mode_cfg(struct mvpp2_port *pp2_port, MV_BOOL en) +{ + MV_U32 val; + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL2_REG); + + if (en) + val |= MVPP2_PORT_CTRL2_PCS_EN_MASK; + else + val &= ~MVPP2_PORT_CTRL2_PCS_EN_MASK; + + /* enable / disable PCS on this port */ + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL2_REG, val); + + return 0; +} + +MV_32 mv_gop110_bypass_clk_cfg(struct mvpp2_port *pp2_port, MV_BOOL en) +{ + MV_U32 val; + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL2_REG); + + if (en) + val |= MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK; + else + val &= ~MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK; + + /* enable / disable PCS on this port */ + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL2_REG, val); + + return 0; +} + +MV_32 mv_gop110_gpcs_reset(struct mvpp2_port *pp2_port, enum mv_reset act) +{ + MV_U32 reg_data; + + reg_data = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL2_REG); + if (act == RESET) + U32_SET_FIELD(reg_data, MVPP2_PORT_CTRL2_SGMII_MODE_MASK, 0); + else + U32_SET_FIELD(reg_data, MVPP2_PORT_CTRL2_SGMII_MODE_MASK, + 1 << MVPP2_PORT_CTRL2_SGMII_MODE_OFFS); + + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL2_REG, reg_data); + return 0; +} + +MV_VOID mv_gop110_xlg_2_gig_mac_cfg(struct mvpp2_port *pp2_port) +{ + MV_U32 reg_val; + + /* relevant only for MAC0 (XLG0 and GMAC0) */ + if (pp2_port->gop_index > 0) + return; + + /* configure 1Gig MAC mode */ + reg_val = mvpp2_xlg_read(pp2_port, + MV_XLG_PORT_MAC_CTRL3_REG); + U32_SET_FIELD(reg_val, MV_XLG_MAC_CTRL3_MACMODESELECT_MASK, + (0 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS)); + mvpp2_xlg_write(pp2_port, MV_XLG_PORT_MAC_CTRL3_REG, + reg_val); +} +/* Set the internal mux's to the required MAC in the GOP */ +MV_32 mv_gop110_gmac_mode_cfg(struct mvpp2_port *pp2_port) +{ + MV_U32 reg_addr; + MV_U32 val; + + /* Set TX FIFO thresholds */ + switch (pp2_port->phy_interface) { + case MV_MODE_SGMII: + if (pp2_port->speed == MV_PORT_SPEED_2500) + mv_gop110_gmac_sgmii2_5_cfg(pp2_port); + else + mv_gop110_gmac_sgmii_cfg(pp2_port); + break; + case MV_MODE_RGMII: + mv_gop110_gmac_rgmii_cfg(pp2_port); + break; + case MV_MODE_QSGMII: + mv_gop110_gmac_qsgmii_cfg(pp2_port); + break; + default: + return -1; + } + + /* Jumbo frame support - 0x1400*2= 0x2800 bytes */ + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL0_REG); + U32_SET_FIELD(val, MVPP2_PORT_CTRL0_FRAMESIZELIMIT_MASK, + (0x1400 << MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS)); + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL0_REG, val); + + /* PeriodicXonEn disable */ + reg_addr = MVPP2_PORT_CTRL1_REG; + val = mv_gop110_gmac_read(pp2_port, reg_addr); + val &= ~MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK; + mv_gop110_gmac_write(pp2_port, reg_addr, val); + + /* mask all ports interrupts */ + mv_gop110_gmac_port_link_event_mask(pp2_port); + +#if MV_PP2x_INTERRUPT + /* unmask link change interrupt */ + val = mv_gop110_gmac_read(pp2_port, MVPP2_INTERRUPT_MASK_REG); + val |= MVPP2_INTERRUPT_CAUSE_LINK_CHANGE_MASK; + val |= 1; /* unmask summary bit */ + mv_gop110_gmac_write(pp2_port, MVPP2_INTERRUPT_MASK_REG, val); +#endif + return 0; +} + +MV_VOID mv_gop110_gmac_rgmii_cfg(struct mvpp2_port *pp2_port) +{ + MV_U32 val, thresh, an; + + /*configure minimal level of the Tx FIFO before the lower part starts to read a packet*/ + thresh = MV_RGMII_TX_FIFO_MIN_TH; + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_FIFO_CFG_1_REG); + U32_SET_FIELD(val, MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK, + (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)); + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_FIFO_CFG_1_REG, val); + + /* Disable bypass of sync module */ + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL4_REG); + val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK; + /* configure DP clock select according to mode */ + val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK; + val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; + val |= MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL4_REG, val); + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL2_REG); + val &= ~MVPP2_PORT_CTRL2_DIS_PADING_OFFS; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL2_REG, val); + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL0_REG); + /* configure GIG MAC to SGMII mode */ + val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL0_REG, val); + + /* configure AN 0xb8e8 */ + an = MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_AUTO_NEG_CFG_REG, an); +} +MV_VOID mv_gop110_gmac_sgmii2_5_cfg(struct mvpp2_port *pp2_port) +{ + MV_U32 val, thresh, an; + + /*configure minimal level of the Tx FIFO before the lower part starts to read a packet*/ + thresh = MV_SGMII2_5_TX_FIFO_MIN_TH; + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_FIFO_CFG_1_REG); + U32_SET_FIELD(val, MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK, + (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)); + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_FIFO_CFG_1_REG, val); + + /* Disable bypass of sync module */ + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL4_REG); + val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK; + /* configure DP clock select according to mode */ + val |= MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK; + /* configure QSGMII bypass according to mode */ + val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL4_REG, val); + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL2_REG); + val |= MVPP2_PORT_CTRL2_DIS_PADING_OFFS; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL2_REG, val); + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL0_REG); + /* configure GIG MAC to 1000Base-X mode connected to a fiber transceiver */ + val |= MVPP2_PORT_CTRL0_PORTTYPE_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL0_REG, val); + + /* configure AN 0x9268 */ + an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK | + MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK | + MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK | + MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK | + MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_AUTO_NEG_CFG_REG, an); +} +MV_VOID mv_gop110_gmac_sgmii_cfg(struct mvpp2_port *pp2_port) +{ + MV_U32 val, thresh, an; + + /*configure minimal level of the Tx FIFO before the lower part starts to read a packet*/ + thresh = MV_SGMII_TX_FIFO_MIN_TH; + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_FIFO_CFG_1_REG); + U32_SET_FIELD(val, MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK, + (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)); + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_FIFO_CFG_1_REG, val); + + /* Disable bypass of sync module */ + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL4_REG); + val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK; + /* configure DP clock select according to mode */ + val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK; + /* configure QSGMII bypass according to mode */ + val |= MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL4_REG, val); + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL2_REG); + val |= MVPP2_PORT_CTRL2_DIS_PADING_OFFS; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL2_REG, val); + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL0_REG); + /* configure GIG MAC to SGMII mode */ + val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL0_REG, val); + + /* configure AN */ + an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_AUTO_NEG_CFG_REG, an); +} + +MV_VOID mv_gop110_gmac_qsgmii_cfg(struct mvpp2_port *pp2_port) +{ + MV_U32 val, thresh, an; + + /*configure minimal level of the Tx FIFO before the lower part starts to read a packet*/ + thresh = MV_SGMII_TX_FIFO_MIN_TH; + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_FIFO_CFG_1_REG); + U32_SET_FIELD(val, MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK, + (thresh << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS)); + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_FIFO_CFG_1_REG, val); + + /* Disable bypass of sync module */ + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL4_REG); + val |= MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK; + /* configure DP clock select according to mode */ + val &= ~MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK; + val &= ~MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK; + /* configure QSGMII bypass according to mode */ + val &= ~MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL4_REG, val); + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL2_REG); + val &= ~MVPP2_PORT_CTRL2_DIS_PADING_OFFS; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL2_REG, val); + + val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL0_REG); + /* configure GIG MAC to SGMII mode */ + val &= ~MVPP2_PORT_CTRL0_PORTTYPE_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL0_REG, val); + + /* configure AN 0xB8EC */ + an = MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK | + MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK; + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_AUTO_NEG_CFG_REG, an); +} +/* +* mv_gop_phy_addr_cfg +*/ +MV_32 mvpp2_smi_phy_addr_cfg(struct mvpp2_port *pp2_port, MV_32 port, MV_32 addr) +{ + mvpp2_smi_write(pp2_port->priv, MV_SMI_PHY_ADDRESS_REG(port), addr); + + return 0; +} +MV_BOOL mv_gop110_port_is_link_up(struct mvpp2_port *pp2_port) +{ + switch (pp2_port->phy_interface) { + case MV_MODE_RGMII: + case MV_MODE_SGMII: + case MV_MODE_QSGMII: + return mv_gop110_gmac_link_status_get(pp2_port); + break; + case MV_MODE_XAUI: + case MV_MODE_RXAUI: + gBS->Stall(1000); +// return mv_gop110_xlg_mac_link_status_get(pp2_port); + return MV_FALSE; + break; + default: + return MV_FALSE; + } +} +/* Get MAC link status */ +MV_BOOL mv_gop110_gmac_link_status_get(struct mvpp2_port *pp2_port) +{ + MV_U32 reg_addr; + MV_U32 val; + + reg_addr = MVPP2_PORT_STATUS0_REG; + + val = mv_gop110_gmac_read(pp2_port, reg_addr); + return (val & 1) ? MV_TRUE : MV_FALSE; +} + +/* BM */ +INTN mvpp2_bm_pool_ctrl(struct mvpp2 *pp2, INTN pool, enum mvpp2_command cmd) +{ + MV_U32 reg_val = 0; + reg_val = mvpp2_read(pp2, MVPP2_BM_POOL_CTRL_REG(pool)); + + switch (cmd) { + case MVPP2_START: + reg_val |= MVPP2_BM_START_MASK; + break; + + case MVPP2_STOP: + reg_val |= MVPP2_BM_STOP_MASK; + break; + + default: + return -1; + } + mvpp2_write(pp2, MVPP2_BM_POOL_CTRL_REG(pool), reg_val); + + return 0; +} + +MV_VOID mv_gop110_port_disable(struct mvpp2_port *pp2_port) +{ + + switch (pp2_port->phy_interface) { + case MV_MODE_RGMII: + case MV_MODE_SGMII: + case MV_MODE_QSGMII: + mv_gop110_gmac_port_disable(pp2_port); + break; +/* + case MV_MODE_XAUI: + case MV_MODE_RXAUI: + mv_gop110_xlg_mac_port_disable(gop, port_num); + break; + */ + + default: + return; + } +} + +MV_VOID mv_gop110_port_enable(struct mvpp2_port *pp2_port) +{ + + switch (pp2_port->phy_interface) { + case MV_MODE_RGMII: + case MV_MODE_SGMII: + case MV_MODE_QSGMII: + mv_gop110_gmac_port_enable(pp2_port); + break; +/* + case MV_MODE_XAUI: + case MV_MODE_RXAUI: + mv_gop110_xlg_mac_port_disable(gop, port_num); + break; + */ + + default: + return; + } +} + +/* Enable port and MIB counters */ +MV_VOID mv_gop110_gmac_port_enable(struct mvpp2_port *pp2_port) +{ + MV_U32 reg_val; + + reg_val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL0_REG); + reg_val |= MVPP2_PORT_CTRL0_PORTEN_MASK; + reg_val |= MVPP2_PORT_CTRL0_COUNT_EN_MASK; + + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL0_REG, reg_val); +} + +/* Disable port */ +MV_VOID mv_gop110_gmac_port_disable(struct mvpp2_port *pp2_port) +{ + MV_U32 reg_val; + + /* mask all ports interrupts */ + mv_gop110_gmac_port_link_event_mask(pp2_port); + + reg_val = mv_gop110_gmac_read(pp2_port, MVPP2_PORT_CTRL0_REG); + reg_val &= ~MVPP2_PORT_CTRL0_PORTEN_MASK; + + mv_gop110_gmac_write(pp2_port, MVPP2_PORT_CTRL0_REG, reg_val); +} + +MV_VOID mv_gop110_gmac_port_link_event_mask(struct mvpp2_port *pp2_port) +{ + MV_U32 reg_val; + + reg_val = mv_gop110_gmac_read(pp2_port, + MV_GMAC_INTERRUPT_SUM_MASK_REG); + reg_val &= ~MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK; + mv_gop110_gmac_write(pp2_port, MV_GMAC_INTERRUPT_SUM_MASK_REG, + reg_val); +} + +MV_32 mv_gop110_port_events_mask(struct mvpp2_port *pp2_port) +{ + + switch (pp2_port->phy_interface) { + case MV_MODE_RGMII: + case MV_MODE_SGMII: + case MV_MODE_QSGMII: + mv_gop110_gmac_port_link_event_mask(pp2_port); + break; + default: + return -1; + } + return 0; +} + +MV_32 mv_gop110_fl_cfg(struct mvpp2_port *pp2_port) +{ + + switch (pp2_port->phy_interface) { + case MV_MODE_RGMII: + case MV_MODE_SGMII: + case MV_MODE_QSGMII: + /* disable AN */ + mv_gop110_speed_duplex_set(pp2_port, pp2_port->speed, + MV_PORT_DUPLEX_FULL); + break; + + case MV_MODE_XAUI: + case MV_MODE_RXAUI: + return 0; + + default: + return -1; + } + return 0; +} +/* set port speed and duplex */ +MV_32 mv_gop110_speed_duplex_set(struct mvpp2_port *pp2_port, + MV_32 speed, enum mv_port_duplex duplex) +{ + + switch (pp2_port->phy_interface) { + case MV_MODE_RGMII: + case MV_MODE_SGMII: + case MV_MODE_QSGMII: + mv_gop110_gmac_speed_duplex_set(pp2_port, speed, duplex); + break; + + case MV_MODE_XAUI: + case MV_MODE_RXAUI: + break; + + default: + return -1; + } + return 0; +} + +/* Sets port speed to Auto Negotiation / 1000 / 100 / 10 Mbps. +* Sets port duplex to Auto Negotiation / Full / Half Duplex. +*/ +MV_32 mv_gop110_gmac_speed_duplex_set(struct mvpp2_port *pp2_port, + MV_32 speed, enum mv_port_duplex duplex) +{ + MV_U32 reg_val; + + reg_val = mvpp2_gmac_read(pp2_port, + MVPP2_PORT_AUTO_NEG_CFG_REG); + + switch (speed) { + case MV_PORT_SPEED_2500: + case MV_PORT_SPEED_1000: + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK; + reg_val |= MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK; + /* the 100/10 bit doesn't matter in this case */ + break; + case MV_PORT_SPEED_100: + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK; + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK; + reg_val |= MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK; + break; + case MV_PORT_SPEED_10: + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK; + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK; + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK; + break; + default: + return MVPP2_EINVAL; + } + + switch (duplex) { + case MV_PORT_DUPLEX_AN: + reg_val |= MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK; + /* the other bits don't matter in this case */ + break; + case MV_PORT_DUPLEX_HALF: + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK; + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK; + break; + case MV_PORT_DUPLEX_FULL: + reg_val &= ~MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK; + reg_val |= MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK; + break; + default: + return MVPP2_EINVAL; + } + + mvpp2_gmac_write(pp2_port, MVPP2_PORT_AUTO_NEG_CFG_REG, + reg_val); + return 0; +} + +MV_VOID mvpp2_axi_config(struct mvpp2 *pp2) +{ + /* Config AXI Read&Write Normal and Soop mode */ + mvpp2_write(pp2, MVPP22_AXI_BM_WR_ATTR_REG, + MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); + mvpp2_write(pp2, MVPP22_AXI_BM_RD_ATTR_REG, + MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); + mvpp2_write(pp2, MVPP22_AXI_AGGRQ_DESCR_RD_ATTR_REG, + MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); + mvpp2_write(pp2, MVPP22_AXI_TXQ_DESCR_WR_ATTR_REG, + MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); + mvpp2_write(pp2, MVPP22_AXI_TXQ_DESCR_RD_ATTR_REG, + MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); + mvpp2_write(pp2, MVPP22_AXI_RXQ_DESCR_WR_ATTR_REG, + MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); + mvpp2_write(pp2, MVPP22_AXI_RX_DATA_WR_ATTR_REG, + MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); + mvpp2_write(pp2, MVPP22_AXI_TX_DATA_RD_ATTR_REG, + MVPP22_AXI_ATTR_SNOOP_CNTRL_BIT); +} + +/* Cleanup Tx ports */ +MV_VOID mvpp2_txp_clean(struct mvpp2_port *pp, MV_32 txp, + struct mvpp2_tx_queue *txq) +{ + MV_32 delay, pending; + MV_U32 reg_val; + + mvpp2_write(pp->priv, MVPP2_TXQ_NUM_REG, txq->id); + reg_val = mvpp2_read(pp->priv, MVPP2_TXQ_PREF_BUF_REG); + reg_val |= MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_write(pp->priv, MVPP2_TXQ_PREF_BUF_REG, reg_val); + + /* The napi queue has been stopped so wait for all packets + * to be transmitted. + */ + delay = 0; + do { + if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) { + mvpp2_printf("port %d: cleaning queue %d timed out\n", + pp->id, txq->log_id); + break; + } + mvpp2_mdelay(1); + delay++; + + pending = mvpp2_txq_pend_desc_num_get(pp, txq); + } while (pending); + + reg_val &= ~MVPP2_TXQ_DRAIN_EN_MASK; + mvpp2_write(pp->priv, MVPP2_TXQ_PREF_BUF_REG, reg_val); +} + +/* Cleanup all Tx queues */ +MV_VOID mvpp2_cleanup_txqs(struct mvpp2_port *pp) +{ + struct mvpp2_tx_queue *txq; + MV_32 txp, queue; + MV_U32 reg_val; + + reg_val = mvpp2_read(pp->priv, MVPP2_TX_PORT_FLUSH_REG); + + /* Reset Tx ports and delete Tx queues */ + for (txp = 0; txp < pp->txp_num; txp++) { + reg_val |= MVPP2_TX_PORT_FLUSH_MASK(pp->id); + mvpp2_write(pp->priv, MVPP2_TX_PORT_FLUSH_REG, reg_val); + + for (queue = 0; queue < txq_number; queue++) { + txq = &pp->txqs[txp * txq_number + queue]; + mvpp2_txp_clean(pp, txp, txq); + mvpp2_txq_hw_deinit(pp, txq); + } + + reg_val &= ~MVPP2_TX_PORT_FLUSH_MASK(pp->id); + mvpp2_write(pp->priv, MVPP2_TX_PORT_FLUSH_REG, reg_val); + } +} + +/* Cleanup all Rx queues */ +MV_VOID mvpp2_cleanup_rxqs(struct mvpp2_port *pp) +{ + MV_32 queue; + + for (queue = 0; queue < rxq_number; queue++) + mvpp2_rxq_hw_deinit(pp, &pp->rxqs[queue]); +} diff --git a/Drivers/Net/Pp2Dxe/mvpp2_lib.h b/Drivers/Net/Pp2Dxe/mvpp2_lib.h new file mode 100644 index 0000000..907b067 --- /dev/null +++ b/Drivers/Net/Pp2Dxe/mvpp2_lib.h @@ -0,0 +1,2362 @@ +/******************************************************************************* +Copyright (C) 2016 Marvell International Ltd. + +This software file (the "File") is owned and distributed by Marvell +International Ltd. and/or its affiliates ("Marvell") under the following +alternative licensing terms. Once you have made an election to distribute the +File under one of the following license alternatives, please (i) delete this +introductory statement regarding license alternatives, (ii) delete the three +license alternatives that you have not elected to use and (iii) preserve the +Marvell copyright notice above. + +******************************************************************************** +Marvell Commercial License Option + +If you received this File from Marvell and you have entered into a commercial +license agreement (a "Commercial License") with Marvell, the File is licensed +to you under the terms of the applicable Commercial License. + +******************************************************************************** +Marvell GPL License Option + +This program is free software: you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the Free +Software Foundation, either version 2 of the License, or any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see http://www.gnu.org/licenses/. + +******************************************************************************** +Marvell GNU General Public License FreeRTOS Exception + +If you received this File from Marvell, you may opt to use, redistribute and/or +modify this File in accordance with the terms and conditions of the Lesser +General Public License Version 2.1 plus the following FreeRTOS exception. +An independent module is a module which is not derived from or based on +FreeRTOS. +Clause 1: +Linking FreeRTOS statically or dynamically with other modules is making a +combined work based on FreeRTOS. Thus, the terms and conditions of the GNU +General Public License cover the whole combination. +As a special exception, the copyright holder of FreeRTOS gives you permission +to link FreeRTOS with independent modules that communicate with FreeRTOS solely +through the FreeRTOS API interface, regardless of the license terms of these +independent modules, and to copy and distribute the resulting combined work +under terms of your choice, provided that: +1. Every copy of the combined work is accompanied by a written statement that +details to the recipient the version of FreeRTOS used and an offer by yourself +to provide the FreeRTOS source code (including any modifications you may have +made) should the recipient request it. +2. The combined work is not itself an RTOS, scheduler, kernel or related +product. +3. The independent modules add significant and primary functionality to +FreeRTOS and do not merely extend the existing functionality already present in +FreeRTOS. +Clause 2: +FreeRTOS may not be used for any competitive or comparative purpose, including +the publication of any form of run time or compile time metric, without the +express permission of Real Time Engineers Ltd. (this is the norm within the +industry and is intended to ensure information accuracy). + +******************************************************************************** +Marvell BSD License Option + +If you received this File from Marvell, you may opt to use, redistribute and/or +modify this File under the following licensing terms. +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + * Neither the name of Marvell nor the names of its contributors may be + used to endorse or promote products derived from this software without + specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +*******************************************************************************/ + +#ifndef _MVPP_LIB_H_ +#define _MVPP_LIB_H_ + +#ifndef BIT +#define BIT(nr) (1 << (nr)) +#endif + +/* RX Fifo Registers */ +#define MVPP2_RX_DATA_FIFO_SIZE_REG(port) (0x00 + 4 * (port)) +#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port) (0x20 + 4 * (port)) +#define MVPP2_RX_MIN_PKT_SIZE_REG 0x60 +#define MVPP2_RX_FIFO_INIT_REG 0x64 + +/* RX DMA Top Registers */ +#define MVPP2_RX_CTRL_REG(port) (0x140 + 4 * (port)) +#define MVPP2_RX_LOW_LATENCY_PKT_SIZE(s) (((s) & 0xfff) << 16) +#define MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK BIT(31) +#define MVPP2_POOL_BUF_SIZE_REG(pool) (0x180 + 4 * (pool)) +#define MVPP2_POOL_BUF_SIZE_OFFSET 5 +#define MVPP2_RXQ_CONFIG_REG(rxq) (0x800 + 4 * (rxq)) +#define MVPP2_SNOOP_PKT_SIZE_MASK 0x1ff +#define MVPP2_SNOOP_BUF_HDR_MASK BIT(9) +#define MVPP2_RXQ_POOL_SHORT_OFFS 20 +#define MVPP2_RXQ_POOL_SHORT_MASK 0x700000 +#define MVPP2_RXQ_POOL_LONG_OFFS 24 +#define MVPP2_RXQ_POOL_LONG_MASK 0x7000000 +#define MVPP2_RXQ_PACKET_OFFSET_OFFS 28 +#define MVPP2_RXQ_PACKET_OFFSET_MASK 0x70000000 +#define MVPP2_RXQ_DISABLE_MASK BIT(31) + +/* Parser Registers */ +#define MVPP2_PRS_INIT_LOOKUP_REG 0x1000 +#define MVPP2_PRS_PORT_LU_MAX 0xf +#define MVPP2_PRS_PORT_LU_MASK(port) (0xff << ((port) * 4)) +#define MVPP2_PRS_PORT_LU_VAL(port, val) ((val) << ((port) * 4)) +#define MVPP2_PRS_INIT_OFFS_REG(port) (0x1004 + ((port) & 4)) +#define MVPP2_PRS_INIT_OFF_MASK(port) (0x3f << (((port) % 4) * 8)) +#define MVPP2_PRS_INIT_OFF_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_REG(port) (0x100c + ((port) & 4)) +#define MVPP2_PRS_MAX_LOOP_MASK(port) (0xff << (((port) % 4) * 8)) +#define MVPP2_PRS_MAX_LOOP_VAL(port, val) ((val) << (((port) % 4) * 8)) +#define MVPP2_PRS_TCAM_IDX_REG 0x1100 +#define MVPP2_PRS_TCAM_DATA_REG(idx) (0x1104 + (idx) * 4) +#define MVPP2_PRS_TCAM_INV_MASK BIT(31) +#define MVPP2_PRS_SRAM_IDX_REG 0x1200 +#define MVPP2_PRS_SRAM_DATA_REG(idx) (0x1204 + (idx) * 4) +#define MVPP2_PRS_TCAM_CTRL_REG 0x1230 +#define MVPP2_PRS_TCAM_EN_MASK BIT(0) + +/* Classifier Registers */ +#define MVPP2_CLS_MODE_REG 0x1800 +#define MVPP2_CLS_MODE_ACTIVE_MASK BIT(0) +#define MVPP2_CLS_PORT_WAY_REG 0x1810 +#define MVPP2_CLS_PORT_WAY_MASK(port) (1 << (port)) +#define MVPP2_CLS_LKP_INDEX_REG 0x1814 +#define MVPP2_CLS_LKP_INDEX_WAY_OFFS 6 +#define MVPP2_CLS_LKP_TBL_REG 0x1818 +#define MVPP2_CLS_LKP_TBL_RXQ_MASK 0xff +#define MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK BIT(25) +#define MVPP2_CLS_FLOW_INDEX_REG 0x1820 +#define MVPP2_CLS_FLOW_TBL0_REG 0x1824 +#define MVPP2_CLS_FLOW_TBL1_REG 0x1828 +#define MVPP2_CLS_FLOW_TBL2_REG 0x182c +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port) (0x1980 + ((port) * 4)) +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS 3 +#define MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK 0x7 +#define MVPP2_CLS_SWFWD_P2HQ_REG(port) (0x19b0 + ((port) * 4)) +#define MVPP2_CLS_SWFWD_PCTRL_REG 0x19d0 +#define MVPP2_CLS_SWFWD_PCTRL_MASK(port) (1 << (port)) + +/* Descriptor Manager Top Registers */ +#define MVPP2_RXQ_NUM_REG 0x2040 +#define MVPP2_RXQ_DESC_ADDR_REG 0x2044 +#define MVPP2_RXQ_DESC_SIZE_REG 0x2048 +#define MVPP2_RXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq) (0x3000 + 4 * (rxq)) +#define MVPP2_RXQ_NUM_PROCESSED_OFFSET 0 +#define MVPP2_RXQ_NUM_NEW_OFFSET 16 +#define MVPP2_RXQ_STATUS_REG(rxq) (0x3400 + 4 * (rxq)) +#define MVPP2_RXQ_OCCUPIED_MASK 0x3fff +#define MVPP2_RXQ_NON_OCCUPIED_OFFSET 16 +#define MVPP2_RXQ_NON_OCCUPIED_MASK 0x3fff0000 +#define MVPP2_RXQ_THRESH_REG 0x204c +#define MVPP2_OCCUPIED_THRESH_OFFSET 0 +#define MVPP2_OCCUPIED_THRESH_MASK 0x3fff +#define MVPP2_RXQ_INDEX_REG 0x2050 +#define MVPP2_TXQ_NUM_REG 0x2080 +#define MVPP2_TXQ_DESC_ADDR_REG 0x2084 +#define MVPP22_TXQ_DESC_ADDR_HIGH_REG 0x20a8 +#define MVPP22_TXQ_DESC_ADDR_HIGH_MASK 0xff +#define MVPP2_TXQ_DESC_SIZE_REG 0x2088 +#define MVPP2_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_AGGR_TXQ_UPDATE_REG 0x2090 +#define MVPP2_TXQ_THRESH_REG 0x2094 +#define MVPP2_TRANSMITTED_THRESH_OFFSET 16 +#define MVPP2_TRANSMITTED_THRESH_MASK 0x3fff0000 +#define MVPP2_TXQ_INDEX_REG 0x2098 +#define MVPP2_TXQ_PREF_BUF_REG 0x209c +#define MVPP2_PREF_BUF_PTR(desc) ((desc) & 0xfff) +#define MVPP2_PREF_BUF_SIZE_4 (BIT(12) | BIT(13)) +#define MVPP2_PREF_BUF_SIZE_16 (BIT(12) | BIT(14)) +#define MVPP2_PREF_BUF_THRESH(val) ((val) << 17) +#define MVPP2_TXQ_DRAIN_EN_MASK BIT(31) +#define MVPP2_TXQ_PENDING_REG 0x20a0 +#define MVPP2_TXQ_PENDING_MASK 0x3fff +#define MVPP2_TXQ_INT_STATUS_REG 0x20a4 +#define MVPP2_TXQ_SENT_REG(txq) (0x3c00 + 4 * (txq)) +#define MVPP22_TXQ_SENT_REG(txq) (0x3e00 + 4 * (txq-128)) +#define MVPP2_TRANSMITTED_COUNT_OFFSET 16 +#define MVPP2_TRANSMITTED_COUNT_MASK 0x3fff0000 +#define MVPP2_TXQ_RSVD_REQ_REG 0x20b0 +#define MVPP2_TXQ_RSVD_REQ_Q_OFFSET 16 +#define MVPP2_TXQ_RSVD_RSLT_REG 0x20b4 +#define MVPP2_TXQ_RSVD_RSLT_MASK 0x3fff +#define MVPP2_TXQ_RSVD_CLR_REG 0x20b8 +#define MVPP2_TXQ_RSVD_CLR_OFFSET 16 +#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu) (0x2100 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu) (0x2140 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_DESC_SIZE_MASK 0x3ff0 +#define MVPP2_AGGR_TXQ_STATUS_REG(cpu) (0x2180 + 4 * (cpu)) +#define MVPP2_AGGR_TXQ_PENDING_MASK 0x3fff +#define MVPP2_AGGR_TXQ_INDEX_REG(cpu) (0x21c0 + 4 * (cpu)) + +/* MBUS bridge registers */ +#define MVPP2_WIN_BASE(w) (0x4000 + ((w) << 2)) +#define MVPP2_WIN_SIZE(w) (0x4020 + ((w) << 2)) +#define MVPP2_WIN_REMAP(w) (0x4040 + ((w) << 2)) +#define MVPP2_BASE_ADDR_ENABLE 0x4060 + +/* Interrupt Cause and Mask registers */ +#define MVPP2_ISR_RX_THRESHOLD_REG(rxq) (0x5200 + 4 * (rxq)) +#define MVPP2_ISR_RXQ_GROUP_REG(rxq) (0x5400 + 4 * (rxq)) +#define MVPP2_ISR_ENABLE_REG(port) (0x5420 + 4 * (port)) +#define MVPP2_ISR_ENABLE_INTERRUPT(mask) ((mask) & 0xffff) +#define MVPP2_ISR_DISABLE_INTERRUPT(mask) (((mask) << 16) & 0xffff0000) +#define MVPP2_ISR_RX_TX_CAUSE_REG(port) (0x5480 + 4 * (port)) +#define MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK 0xff0000 +#define MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK BIT(24) +#define MVPP2_CAUSE_FCS_ERR_MASK BIT(25) +#define MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK BIT(26) +#define MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK BIT(29) +#define MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK BIT(30) +#define MVPP2_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_RX_TX_MASK_REG(port) (0x54a0 + 4 * (port)) +#define MVPP2_ISR_PON_RX_TX_MASK_REG 0x54bc +#define MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK 0xffff +#define MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK 0x3fc00000 +#define MVPP2_PON_CAUSE_MISC_SUM_MASK BIT(31) +#define MVPP2_ISR_MISC_CAUSE_REG 0x55b0 + +/* Buffer Manager registers */ +#define MVPP2_BM_POOL_BASE_REG(pool) (0x6000 + ((pool) * 4)) +#define MVPP2_BM_POOL_BASE_ADDR_MASK 0xfffff80 +#define MVPP2_BM_POOL_SIZE_REG(pool) (0x6040 + ((pool) * 4)) +#define MVPP2_BM_POOL_SIZE_MASK 0xfff0 +#define MVPP2_BM_POOL_READ_PTR_REG(pool) (0x6080 + ((pool) * 4)) +#define MVPP2_BM_POOL_GET_READ_PTR_MASK 0xfff0 +#define MVPP2_BM_POOL_PTRS_NUM_REG(pool) (0x60c0 + ((pool) * 4)) +#define MVPP2_BM_POOL_PTRS_NUM_MASK 0xfff0 +#define MVPP2_BM_BPPI_READ_PTR_REG(pool) (0x6100 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool) (0x6140 + ((pool) * 4)) +#define MVPP2_BM_BPPI_PTR_NUM_MASK 0x7ff +#define MVPP2_BM_BPPI_PREFETCH_FULL_MASK BIT(16) +#define MVPP2_BM_POOL_CTRL_REG(pool) (0x6200 + ((pool) * 4)) +#define MVPP2_BM_START_MASK BIT(0) +#define MVPP2_BM_STOP_MASK BIT(1) +#define MVPP2_BM_STATE_MASK BIT(4) +#define MVPP2_BM_LOW_THRESH_OFFS 8 +#define MVPP2_BM_LOW_THRESH_MASK 0x7f00 +#define MVPP2_BM_LOW_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_LOW_THRESH_OFFS) +#define MVPP2_BM_HIGH_THRESH_OFFS 16 +#define MVPP2_BM_HIGH_THRESH_MASK 0x7f0000 +#define MVPP2_BM_HIGH_THRESH_VALUE(val) ((val) << \ + MVPP2_BM_HIGH_THRESH_OFFS) +#define MVPP2_BM_INTR_CAUSE_REG(pool) (0x6240 + ((pool) * 4)) +#define MVPP2_BM_RELEASED_DELAY_MASK BIT(0) +#define MVPP2_BM_ALLOC_FAILED_MASK BIT(1) +#define MVPP2_BM_BPPE_EMPTY_MASK BIT(2) +#define MVPP2_BM_BPPE_FULL_MASK BIT(3) +#define MVPP2_BM_AVAILABLE_BP_LOW_MASK BIT(4) +#define MVPP2_BM_INTR_MASK_REG(pool) (0x6280 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_REG(pool) (0x6400 + ((pool) * 4)) +#define MVPP2_BM_PHY_ALLOC_GRNTD_MASK BIT(0) +#define MVPP2_BM_VIRT_ALLOC_REG 0x6440 +#define MVPP2_BM_PHY_RLS_REG(pool) (0x6480 + ((pool) * 4)) +#define MVPP2_BM_PHY_RLS_MC_BUFF_MASK BIT(0) +#define MVPP2_BM_PHY_RLS_PRIO_EN_MASK BIT(1) +#define MVPP2_BM_PHY_RLS_GRNTD_MASK BIT(2) +#define MVPP2_BM_VIRT_RLS_REG 0x64c0 +#define MVPP2_BM_MC_RLS_REG 0x64c4 +#define MVPP2_BM_MC_ID_MASK 0xfff +#define MVPP2_BM_FORCE_RELEASE_MASK BIT(12) + +#define MVPP22_BM_PHY_VIRT_HIGH_ALLOC_REG 0x6444 +#define MVPP22_BM_PHY_HIGH_ALLOC_OFFSET 0 +#define MVPP22_BM_VIRT_HIGH_ALLOC_OFFSET 8 +#define MVPP22_BM_VIRT_HIGH_ALLOC_MASK 0xff00 + +#define MVPP22_BM_PHY_VIRT_HIGH_RLS_REG 0x64c4 /* Not a mixup */ + +#define MVPP22_BM_PHY_HIGH_RLS_OFFSET 0 +#define MVPP22_BM_VIRT_HIGH_RLS_OFFST 8 + +#define MVPP22_BM_POOL_BASE_HIGH_REG 0x6310 +#define MVPP22_BM_POOL_BASE_HIGH_MASK 0xff +#define MVPP2_BM_PRIO_CTRL_REG 0x6800 + + +/* TX Scheduler registers */ +#define MVPP2_TXP_SCHED_PORT_INDEX_REG 0x8000 +#define MVPP2_TXP_SCHED_Q_CMD_REG 0x8004 +#define MVPP2_TXP_SCHED_ENQ_MASK 0xff +#define MVPP2_TXP_SCHED_DISQ_OFFSET 8 +#define MVPP2_TXP_SCHED_CMD_1_REG 0x8010 +#define MVPP2_TXP_SCHED_PERIOD_REG 0x8018 +#define MVPP2_TXP_SCHED_MTU_REG 0x801c +#define MVPP2_TXP_MTU_MAX 0x7FFFF +#define MVPP2_TXP_SCHED_REFILL_REG 0x8020 +#define MVPP2_TXP_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXP_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXP_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG 0x8024 +#define MVPP2_TXP_TOKEN_SIZE_MAX 0xffffffff +#define MVPP2_TXQ_SCHED_REFILL_REG(q) (0x8040 + ((q) << 2)) +#define MVPP2_TXQ_REFILL_TOKENS_ALL_MASK 0x7ffff +#define MVPP2_TXQ_REFILL_PERIOD_ALL_MASK 0x3ff00000 +#define MVPP2_TXQ_REFILL_PERIOD_MASK(v) ((v) << 20) +#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q) (0x8060 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_SIZE_MAX 0x7fffffff +#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q) (0x8080 + ((q) << 2)) +#define MVPP2_TXQ_TOKEN_CNTR_MAX 0xffffffff + +/* TX general registers */ +#define MVPP2_TX_SNOOP_REG 0x8800 +#define MVPP2_TX_PORT_FLUSH_REG 0x8810 +#define MVPP2_TX_PORT_FLUSH_MASK(port) (1 << (port)) + +/* LMS registers */ +#define MVPP2_SRC_ADDR_MIDDLE 0x24 +#define MVPP2_SRC_ADDR_HIGH 0x28 +#define MVPP2_PHY_AN_CFG0_REG 0x34 +#define MVPP2_PHY_AN_STOP_SMI0_MASK BIT(7) +#define MVPP2_MIB_COUNTERS_BASE(port) (0x1000 + ((port) >> 1) * \ + 0x400 + (port) * 0x400) +#define MVPP2_MIB_LATE_COLLISION 0x7c +#define MVPP2_ISR_SUM_MASK_REG 0x220c +#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG 0x305c +#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT 0x27 + +/* Per-port registers */ +#define MVPP2_GMAC_CTRL_0_REG 0x0 +#define MVPP2_GMAC_PORT_EN_MASK BIT(0) +#define MVPP2_GMAC_MAX_RX_SIZE_OFFS 2 +#define MVPP2_GMAC_MAX_RX_SIZE_MASK 0x7ffc +#define MVPP2_GMAC_MIB_CNTR_EN_MASK BIT(15) +#define MVPP2_GMAC_CTRL_1_REG 0x4 +#define MVPP2_GMAC_PERIODIC_XON_EN_MASK BIT(1) +#define MVPP2_GMAC_GMII_LB_EN_MASK BIT(5) +#define MVPP2_GMAC_PCS_LB_EN_BIT 6 +#define MVPP2_GMAC_PCS_LB_EN_MASK BIT(6) +#define MVPP2_GMAC_SA_LOW_OFFS 7 +#define MVPP2_GMAC_CTRL_2_REG 0x8 +#define MVPP2_GMAC_INBAND_AN_MASK BIT(0) +#define MVPP2_GMAC_PCS_ENABLE_MASK BIT(3) +#define MVPP2_GMAC_PORT_RGMII_MASK BIT(4) +#define MVPP2_GMAC_PORT_RESET_MASK BIT(6) +#define MVPP2_GMAC_AUTONEG_CONFIG 0xc +#define MVPP2_GMAC_FORCE_LINK_DOWN BIT(0) +#define MVPP2_GMAC_FORCE_LINK_PASS BIT(1) +#define MVPP2_GMAC_CONFIG_MII_SPEED BIT(5) +#define MVPP2_GMAC_CONFIG_GMII_SPEED BIT(6) +#define MVPP2_GMAC_AN_SPEED_EN BIT(7) +#define MVPP2_GMAC_FC_ADV_EN BIT(9) +#define MVPP2_GMAC_CONFIG_FULL_DUPLEX BIT(12) +#define MVPP2_GMAC_AN_DUPLEX_EN BIT(13) +#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG 0x1c +#define MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK 0x1fc0 +#define MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \ + MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK) +/* Port Interrupt Cause */ +#define MV_GMAC_INTERRUPT_CAUSE_REG (0x0020) +/* Port Interrupt Mask */ +#define MV_GMAC_INTERRUPT_MASK_REG (0x0024) +#define MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_OFFS 1 +#define MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_MASK \ + (0x1 << MV_GMAC_INTERRUPT_CAUSE_LINK_CHANGE_OFFS) + +/* Port Interrupt Summary Cause */ +#define MV_GMAC_INTERRUPT_SUM_CAUSE_REG (0x00A0) +/* Port Interrupt Summary Mask */ +#define MV_GMAC_INTERRUPT_SUM_MASK_REG (0x00A4) +#define MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_OFFS 1 +#define MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_MASK \ + (0x1 << MV_GMAC_INTERRUPT_SUM_CAUSE_LINK_CHANGE_OFFS) + +/* Port Mac Control0 */ +#define MVPP2_PORT_CTRL0_REG (0x0000) +#define MVPP2_PORT_CTRL0_PORTEN_OFFS 0 +#define MVPP2_PORT_CTRL0_PORTEN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL0_PORTEN_OFFS) + +#define MVPP2_PORT_CTRL0_PORTTYPE_OFFS 1 +#define MVPP2_PORT_CTRL0_PORTTYPE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL0_PORTTYPE_OFFS) + +#define MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS 2 +#define MVPP2_PORT_CTRL0_FRAMESIZELIMIT_MASK \ + (0x00001fff << MVPP2_PORT_CTRL0_FRAMESIZELIMIT_OFFS) + +#define MVPP2_PORT_CTRL0_COUNT_EN_OFFS 15 +#define MVPP2_PORT_CTRL0_COUNT_EN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL0_COUNT_EN_OFFS) + + +/* Port Mac Control1 */ +#define MVPP2_PORT_CTRL1_REG (0x0004) +#define MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_OFFS 0 +#define MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_MASK \ + (0x00000001 << MVPP2_PORT_CTRL1_EN_RX_CRC_CHECK_OFFS) + +#define MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_OFFS 1 +#define MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_MASK \ + (0x00000001 << MVPP2_PORT_CTRL1_EN_PERIODIC_FC_XON_OFFS) + +#define MVPP2_PORT_CTRL1_MGMII_MODE_OFFS 2 +#define MVPP2_PORT_CTRL1_MGMII_MODE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL1_MGMII_MODE_OFFS) + +#define MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_OFFS 3 +#define MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL1_PFC_CASCADE_PORT_ENABLE_OFFS) + +#define MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_OFFS 4 +#define MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_MASK \ + (0x00000001 << MVPP2_PORT_CTRL1_DIS_EXCESSIVE_COL_OFFS) + +#define MVPP2_PORT_CTRL1_GMII_LOOPBACK_OFFS 5 +#define MVPP2_PORT_CTRL1_GMII_LOOPBACK_MASK \ + (0x00000001 << MVPP2_PORT_CTRL1_GMII_LOOPBACK_OFFS) + +#define MVPP2_PORT_CTRL1_PCS_LOOPBACK_OFFS 6 +#define MVPP2_PORT_CTRL1_PCS_LOOPBACK_MASK \ + (0x00000001 << MVPP2_PORT_CTRL1_PCS_LOOPBACK_OFFS) + +#define MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_OFFS 7 +#define MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_MASK \ + (0x000000ff << MVPP2_PORT_CTRL1_FC_SA_ADDR_LO_OFFS) + +#define MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_OFFS 15 +#define MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL1_EN_SHORT_PREAMBLE_OFFS) + + +/* Port Mac Control2 */ +#define MVPP2_PORT_CTRL2_REG (0x0008) +#define MVPP2_PORT_CTRL2_SGMII_MODE_OFFS 0 +#define MVPP2_PORT_CTRL2_SGMII_MODE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_SGMII_MODE_OFFS) + +#define MVPP2_PORT_CTRL2_FC_MODE_OFFS 1 +#define MVPP2_PORT_CTRL2_FC_MODE_MASK \ + (0x00000003 << MVPP2_PORT_CTRL2_FC_MODE_OFFS) + +#define MVPP2_PORT_CTRL2_PCS_EN_OFFS 3 +#define MVPP2_PORT_CTRL2_PCS_EN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_PCS_EN_OFFS) + +#define MVPP2_PORT_CTRL2_RGMII_MODE_OFFS 4 +#define MVPP2_PORT_CTRL2_RGMII_MODE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_RGMII_MODE_OFFS) + +#define MVPP2_PORT_CTRL2_DIS_PADING_OFFS 5 +#define MVPP2_PORT_CTRL2_DIS_PADING_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_DIS_PADING_OFFS) + +#define MVPP2_PORT_CTRL2_PORTMACRESET_OFFS 6 +#define MVPP2_PORT_CTRL2_PORTMACRESET_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_PORTMACRESET_OFFS) + +#define MVPP2_PORT_CTRL2_TX_DRAIN_OFFS 7 +#define MVPP2_PORT_CTRL2_TX_DRAIN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_TX_DRAIN_OFFS) + +#define MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_OFFS 8 +#define MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_EN_MII_ODD_PRE_OFFS) + +#define MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_OFFS 9 +#define MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_CLK_125_BYPS_EN_OFFS) + +#define MVPP2_PORT_CTRL2_PRBS_CHECK_EN_OFFS 10 +#define MVPP2_PORT_CTRL2_PRBS_CHECK_EN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_PRBS_CHECK_EN_OFFS) + +#define MVPP2_PORT_CTRL2_PRBS_GEN_EN_OFFS 11 +#define MVPP2_PORT_CTRL2_PRBS_GEN_EN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_PRBS_GEN_EN_OFFS) + +#define MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_OFFS 12 +#define MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_MASK \ + (0x00000003 << MVPP2_PORT_CTRL2_SELECT_DATA_TO_TX_OFFS) + +#define MVPP2_PORT_CTRL2_EN_COL_ON_BP_OFFS 14 +#define MVPP2_PORT_CTRL2_EN_COL_ON_BP_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_EN_COL_ON_BP_OFFS) + +#define MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_OFFS 15 +#define MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL2_EARLY_REJECT_MODE_OFFS) + + +/* Port Auto-negotiation Configuration */ +#define MVPP2_PORT_AUTO_NEG_CFG_REG (0x000c) +#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_OFFS 0 +#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_DOWN_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_OFFS 1 +#define MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_FORCE_LINK_UP_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_OFFS 2 +#define MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_PCS_AN_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_OFFS 3 +#define MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_AN_BYPASS_EN_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_OFFS 4 +#define MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_INBAND_RESTARTAN_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_OFFS 5 +#define MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_MII_SPEED_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_OFFS 6 +#define MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_GMII_SPEED_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_OFFS 7 +#define MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_AN_SPEED_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_OFFS 9 +#define MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_ADV_PAUSE_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_OFFS 10 +#define MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_ADV_ASM_PAUSE_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_OFFS 11 +#define MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_FC_AN_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_OFFS 12 +#define MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_SET_FULL_DX_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS 13 +#define MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_EN_FDX_AN_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_OFFS 14 +#define MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_MASK \ + (0x00000001 << MVPP2_PORT_AUTO_NEG_CFG_PHY_MODE_OFFS) + +#define MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_OFFS 15 +#define MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_MASK \ + (0x00000001 << \ + MVPP2_PORT_AUTO_NEG_CFG_CHOOSE_SAMPLE_TX_CONFIG_OFFS) + +/* Port Status0 */ +#define MVPP2_PORT_STATUS0_REG (0x0010) +#define MVPP2_PORT_STATUS0_LINKUP_OFFS 0 +#define MVPP2_PORT_STATUS0_LINKUP_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_LINKUP_OFFS) + +#define MVPP2_PORT_STATUS0_GMIISPEED_OFFS 1 +#define MVPP2_PORT_STATUS0_GMIISPEED_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_GMIISPEED_OFFS) + +#define MVPP2_PORT_STATUS0_MIISPEED_OFFS 2 +#define MVPP2_PORT_STATUS0_MIISPEED_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_MIISPEED_OFFS) + +#define MVPP2_PORT_STATUS0_FULLDX_OFFS 3 +#define MVPP2_PORT_STATUS0_FULLDX_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_FULLDX_OFFS) + +#define MVPP2_PORT_STATUS0_RXFCEN_OFFS 4 +#define MVPP2_PORT_STATUS0_RXFCEN_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_RXFCEN_OFFS) + +#define MVPP2_PORT_STATUS0_TXFCEN_OFFS 5 +#define MVPP2_PORT_STATUS0_TXFCEN_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_TXFCEN_OFFS) + +#define MVPP2_PORT_STATUS0_PORTRXPAUSE_OFFS 6 +#define MVPP2_PORT_STATUS0_PORTRXPAUSE_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_PORTRXPAUSE_OFFS) + +#define MVPP2_PORT_STATUS0_PORTTXPAUSE_OFFS 7 +#define MVPP2_PORT_STATUS0_PORTTXPAUSE_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_PORTTXPAUSE_OFFS) + +#define MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_OFFS 8 +#define MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_PORTIS_DOINGPRESSURE_OFFS) + +#define MVPP2_PORT_STATUS0_PORTBUFFULL_OFFS 9 +#define MVPP2_PORT_STATUS0_PORTBUFFULL_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_PORTBUFFULL_OFFS) + +#define MVPP2_PORT_STATUS0_SYNCFAIL10MS_OFFS 10 +#define MVPP2_PORT_STATUS0_SYNCFAIL10MS_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_SYNCFAIL10MS_OFFS) + +#define MVPP2_PORT_STATUS0_ANDONE_OFFS 11 +#define MVPP2_PORT_STATUS0_ANDONE_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_ANDONE_OFFS) + +#define MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_OFFS 12 +#define MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_MASK \ + (0x00000001 << \ + MVPP2_PORT_STATUS0_INBAND_AUTONEG_BYPASSACT_OFFS) + +#define MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_OFFS 13 +#define MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_SERDESPLL_LOCKED_OFFS) + +#define MVPP2_PORT_STATUS0_SYNCOK_OFFS 14 +#define MVPP2_PORT_STATUS0_SYNCOK_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_SYNCOK_OFFS) + +#define MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_OFFS 15 +#define MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_MASK \ + (0x00000001 << MVPP2_PORT_STATUS0_SQUELCHNOT_DETECTED_OFFS) + + +/* Port Serial Parameters Configuration */ +#define MVPP2_PORT_SERIAL_PARAM_CFG_REG (0x0014) +#define MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_OFFS 0 +#define MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_UNIDIRECTIONAL_ENABLE_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_OFFS 1 +#define MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_RETRANSMIT_COLLISION_DOMAIN_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_OFFS 2 +#define MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_PUMA2_BTS1444_EN_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS 3 +#define MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_FORWARD_802_3X_FC_EN_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_OFFS 4 +#define MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_MASK \ + (0x00000001 << MVPP2_PORT_SERIAL_PARAM_CFG_BP_EN_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_OFFS 5 +#define MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_RX_NEGEDGE_SAMPLE_EN_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_OFFS 6 +#define MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_MASK \ + (0x0000003f << \ + MVPP2_PORT_SERIAL_PARAM_CFG_COL_DOMAIN_LIMIT_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_OFFS 12 +#define MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_PERIODIC_TYPE_SELECT_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_OFFS 13 +#define MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_PER_PRIORITY_FC_EN_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_OFFS 14 +#define MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_TX_STANDARD_PRBS7_OFFS) + +#define MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_OFFS 15 +#define MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERIAL_PARAM_CFG_REVERSE_PRBS_RX_OFFS) + + +/* Port Fifo Configuration 0 */ +#define MVPP2_PORT_FIFO_CFG_0_REG (0x0018) +#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_OFFS 0 +#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_MASK \ + (0x000000ff << MVPP2_PORT_FIFO_CFG_0_TX_FIFO_HIGH_WM_OFFS) + +#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_OFFS 8 +#define MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_MASK \ + (0x000000ff << MVPP2_PORT_FIFO_CFG_0_TX_FIFO_LOW_WM_OFFS) + + +/* Port Fifo Configuration 1 */ +#define MVPP2_PORT_FIFO_CFG_1_REG (0x001c) +#define MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_OFFS 0 +#define MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_MASK \ + (0x0000003f << MVPP2_PORT_FIFO_CFG_1_RX_FIFO_MAX_TH_OFFS) + +#define MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS 6 +#define MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_MASK \ + (0x000000ff << MVPP2_PORT_FIFO_CFG_1_TX_FIFO_MIN_TH_OFFS) + +#define MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_OFFS 15 +#define MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_MASK \ + (0x00000001 << MVPP2_PORT_FIFO_CFG_1_PORT_EN_FIX_EN_OFFS) + + +/* Port Serdes Configuration0 */ +#define MVPP2_PORT_SERDES_CFG0_REG (0x0028) +#define MVPP2_PORT_SERDES_CFG0_SERDESRESET_OFFS 0 +#define MVPP2_PORT_SERDES_CFG0_SERDESRESET_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_SERDESRESET_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_PU_TX_OFFS 1 +#define MVPP2_PORT_SERDES_CFG0_PU_TX_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_TX_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_PU_RX_OFFS 2 +#define MVPP2_PORT_SERDES_CFG0_PU_RX_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_RX_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_PU_PLL_OFFS 3 +#define MVPP2_PORT_SERDES_CFG0_PU_PLL_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_PLL_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_PU_IVREF_OFFS 4 +#define MVPP2_PORT_SERDES_CFG0_PU_IVREF_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_PU_IVREF_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_TESTEN_OFFS 5 +#define MVPP2_PORT_SERDES_CFG0_TESTEN_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_TESTEN_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_DPHER_EN_OFFS 6 +#define MVPP2_PORT_SERDES_CFG0_DPHER_EN_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_DPHER_EN_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_OFFS 7 +#define MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERDES_CFG0_RUDI_INVALID_ENABLE_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_OFFS 8 +#define MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERDES_CFG0_ACK_OVERRIDE_ENABLE_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_OFFS 9 +#define MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_CONFIG_WORD_ENABLE_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_OFFS 10 +#define MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_MASK \ + (0x00000001 << \ + MVPP2_PORT_SERDES_CFG0_SYNC_FAIL_INT_ENABLE_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_OFFS 11 +#define MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_MASTER_MODE_ENABLE_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_TERM75_TX_OFFS 12 +#define MVPP2_PORT_SERDES_CFG0_TERM75_TX_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_TERM75_TX_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_OUTAMP_OFFS 13 +#define MVPP2_PORT_SERDES_CFG0_OUTAMP_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_OUTAMP_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_OFFS 14 +#define MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_BTS712_FIX_EN_OFFS) + +#define MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_OFFS 15 +#define MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_MASK \ + (0x00000001 << MVPP2_PORT_SERDES_CFG0_BTS156_FIX_EN_OFFS) + + +/* Port Serdes Configuration1 */ +#define MVPP2_PORT_SERDES_CFG1_REG (0x002c) +#define MVPP2_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_OFFS 0 +#define MVPP2_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_MASK \ + (0x00000001 << \ + MVPP2_GMAC_PORT_SERDES_CFG1_SMII_RX_10MB_CLK_EDGE_SEL_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_OFFS 1 +#define MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_MASK \ + (0x00000001 << \ + MVPP2_GMAC_PORT_SERDES_CFG1_SMII_TX_10MB_CLK_EDGE_SEL_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_MEN_OFFS 2 +#define MVPP2_GMAC_PORT_SERDES_CFG1_MEN_MASK \ + (0x00000003 << MVPP2_GMAC_PORT_SERDES_CFG1_MEN_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_OFFS 4 +#define MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_MASK \ + (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_VCMS_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_OFFS 5 +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_MASK \ + (0x00000001 << \ + MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_USE_SIGDET_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_OFFS 6 +#define MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_MASK \ + (0x00000001 << \ + MVPP2_GMAC_PORT_SERDES_CFG1_EN_CRS_MASK_TX_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_OFFS 7 +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_MASK \ + (0x00000001 << MVPP2_GMAC_PORT_SERDES_CFG1_100FX_ENABLE_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_OFFS 8 +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_MASK \ + (0x0000001f << \ + MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_PHY_ADDRESS_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_OFFS 13 +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_MASK \ + (0x00000001 << \ + MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SIGDET_POLARITY_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_OFFS 14 +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_MASK \ + (0x00000001 << \ + MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_INTERRUPT_POLARITY_OFFS) + +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_OFFS 15 +#define MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_MASK \ + (0x00000001 << \ + MVPP2_GMAC_PORT_SERDES_CFG1_100FX_PCS_SERDES_POLARITY_OFFS) + +/* Port Serdes Configuration2 */ +#define MVPP2_PORT_SERDES_CFG2_REG (0x0030) +#define MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_OFFS 0 +#define MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_MASK \ + (0x0000ffff << \ + MVPP2_PORT_SERDES_CFG2_AN_ADV_CONFIGURATION_OFFS) + +/* Port Serdes Configuration3 */ +#define MVPP2_PORT_SERDES_CFG3_REG (0x0034) +#define MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_OFFS 0 +#define MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_MASK \ + (0x0000ffff << \ + MVPP2_PORT_SERDES_CFG3_ABILITY_MATCH_STATUS_OFFS) + +/* Port Prbs Status */ +#define MVPP2_PORT_PRBS_STATUS_REG (0x0038) +#define MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_OFFS 0 +#define MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_MASK \ + (0x00000001 << MVPP2_PORT_PRBS_STATUS_PRBSCHECK_LOCKED_OFFS) + +#define MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_OFFS 1 +#define MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_MASK \ + (0x00000001 << MVPP2_PORT_PRBS_STATUS_PRBSCHECKRDY_OFFS) + + +/* Port Prbs Error Counter */ +#define MVPP2_PORT_PRBS_ERR_CNTR_REG (0x003c) +#define MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_OFFS 0 +#define MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_MASK \ + (0x0000ffff << MVPP2_PORT_PRBS_ERR_CNTR_PRBSBITERRCNT_OFFS) + + +/* Port Status1 */ +#define MVPP2_PORT_STATUS1_REG (0x0040) +#define MVPP2_PORT_STATUS1_MEDIAACTIVE_OFFS 0 +#define MVPP2_PORT_STATUS1_MEDIAACTIVE_MASK \ + (0x00000001 << MVPP2_PORT_STATUS1_MEDIAACTIVE_OFFS) + + +/* Port Mib Counters Control */ +#define MVPP2_PORT_MIB_CNTRS_CTRL_REG (0x0044) +#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_OFFS 0 +#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_MASK \ + (0x00000001 << \ + MVPP2_PORT_MIB_CNTRS_CTRL_MIB_COPY_TRIGGER_OFFS) + +#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__OFFS 1 +#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__MASK \ + (0x00000001 << \ + MVPP2_PORT_MIB_CNTRS_CTRL_MIB_CLEAR_ON_READ__OFFS) + +#define MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_OFFS 2 +#define MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_MASK \ + (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_RX_HISTOGRAM_EN_OFFS) + +#define MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_OFFS 3 +#define MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_MASK \ + (0x00000001 << MVPP2_PORT_MIB_CNTRS_CTRL_TX_HISTOGRAM_EN_OFFS) + +#define MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS 4 +#define MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__MASK \ + (0x00000001 << \ + MVPP2_PORT_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS) + +#define MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__OFFS 5 +#define MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__MASK \ + (0x00000001 << \ + MVPP2_PORT_MIB_CNTRS_CTRL_XCAT_BTS_340_EN__OFFS) + +#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS 6 +#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_MASK \ + (0x00000001 << \ + MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS) + +#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS 7 +#define MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_MASK \ + (0x00000001 << \ + MVPP2_PORT_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS) + + +/* Port Mac Control3 */ +#define MVPP2_PORT_CTRL3_REG (0x0048) +#define MVPP2_PORT_CTRL3_BUF_SIZE_OFFS 0 +#define MVPP2_PORT_CTRL3_BUF_SIZE_MASK \ + (0x0000003f << MVPP2_PORT_CTRL3_BUF_SIZE_OFFS) + +#define MVPP2_PORT_CTRL3_IPG_DATA_OFFS 6 +#define MVPP2_PORT_CTRL3_IPG_DATA_MASK \ + (0x000001ff << MVPP2_PORT_CTRL3_IPG_DATA_OFFS) + +#define MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_OFFS 15 +#define MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL3_LLFC_GLOBAL_FC_ENABLE_OFFS) +#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK 0xff + +/* Port Mac Control4 */ +#define MVPP2_PORT_CTRL4_REG (0x0090) +#define MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_OFFS 0 +#define MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_EXT_PIN_GMII_SEL_OFFS) + +#define MVPP2_PORT_CTRL4_PREAMBLE_FIX_OFFS 1 +#define MVPP2_PORT_CTRL4_PREAMBLE_FIX_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_PREAMBLE_FIX_OFFS) + +#define MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_OFFS 2 +#define MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_SQ_DETECT_FIX_EN_OFFS) + +#define MVPP2_PORT_CTRL4_FC_EN_RX_OFFS 3 +#define MVPP2_PORT_CTRL4_FC_EN_RX_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_FC_EN_RX_OFFS) + +#define MVPP2_PORT_CTRL4_FC_EN_TX_OFFS 4 +#define MVPP2_PORT_CTRL4_FC_EN_TX_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_FC_EN_TX_OFFS) + +#define MVPP2_PORT_CTRL4_DP_CLK_SEL_OFFS 5 +#define MVPP2_PORT_CTRL4_DP_CLK_SEL_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_DP_CLK_SEL_OFFS) + +#define MVPP2_PORT_CTRL4_SYNC_BYPASS_OFFS 6 +#define MVPP2_PORT_CTRL4_SYNC_BYPASS_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_SYNC_BYPASS_OFFS) + +#define MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_OFFS 7 +#define MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_QSGMII_BYPASS_ACTIVE_OFFS) + +#define MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_OFFS 8 +#define MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_COUNT_EXTERNAL_FC_EN_OFFS) + +#define MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_OFFS 9 +#define MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_MASK \ + (0x00000001 << MVPP2_PORT_CTRL4_MARVELL_HEADER_EN_OFFS) + +#define MVPP2_PORT_CTRL4_LEDS_NUMBER_OFFS 10 +#define MVPP2_PORT_CTRL4_LEDS_NUMBER_MASK \ + (0x0000003f << MVPP2_PORT_CTRL4_LEDS_NUMBER_OFFS) + +/* Descriptor ring Macros */ +#define MVPP2_QUEUE_NEXT_DESC(q, index) \ + (((index) < (q)->last_desc) ? ((index) + 1) : 0) + +/* Various constants */ + +/* Coalescing */ +#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 +#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL +#define MVPP2_RX_COAL_PKTS 32 +#define MVPP2_RX_COAL_USEC 100 + +/* The two bytes Marvell header. Either contains a special value used + * by Marvell switches when a specific hardware mode is enabled (not + * supported by this driver) or is filled automatically by zeroes on + * the RX side. Those two bytes being at the front of the Ethernet + * header, they allow to have the IP header aligned on a 4 bytes + * boundary automatically: the hardware skips those two bytes on its + * own. + */ +#define MVPP2_MH_SIZE 2 +#define MVPP2_ETH_TYPE_LEN 2 +#define MVPP2_PPPOE_HDR_SIZE 8 +#define MVPP2_VLAN_TAG_LEN 4 + +/* Lbtd 802.3 type */ +#define MVPP2_IP_LBDT_TYPE 0xfffa + +#define MVPP2_CPU_D_CACHE_LINE_SIZE 32 +#define MVPP2_TX_CSUM_MAX_SIZE 9800 + +/* Timeout constants */ +#define MVPP2_TX_DISABLE_TIMEOUT_MSEC 1000 +#define MVPP2_TX_PENDING_TIMEOUT_MSEC 1000 + +#define MVPP2_TX_MTU_MAX 0x7ffff + +/* Maximum number of T-CONTs of PON port */ +#define MVPP2_MAX_TCONT 16 + +/* Maximum number of supported ports */ +#define MVPP2_MAX_PORTS 4 + +/* Maximum number of TXQs used by single port */ +#define MVPP2_MAX_TXQ 8 + +/* Maximum number of RXQs used by single port */ +#define MVPP2_MAX_RXQ 8 + +/* Dfault number of RXQs in use */ +#define MVPP2_DEFAULT_RXQ 4 + +/* Total number of RXQs available to all ports */ +#define MVPP2_RXQ_TOTAL_NUM (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ) + +/* Max number of Rx descriptors */ +#define MVPP2_MAX_RXD 32 + +/* Max number of Tx descriptors */ +#define MVPP2_MAX_TXD 32 + +/* Amount of Tx descriptors that can be reserved at once by CPU */ +#define MVPP2_CPU_DESC_CHUNK 64 + +/* Max number of Tx descriptors in each aggregated queue */ +#define MVPP2_AGGR_TXQ_SIZE 256 + +/* Descriptor aligned size */ +#define MVPP2_DESC_ALIGNED_SIZE 32 + +/* Descriptor alignment mask */ +#define MVPP2_TX_DESC_ALIGN (MVPP2_DESC_ALIGNED_SIZE - 1) + +/* RX FIFO constants */ +#define MVPP2_RX_FIFO_PORT_DATA_SIZE 0x2000 +#define MVPP2_RX_FIFO_PORT_ATTR_SIZE 0x80 +#define MVPP2_RX_FIFO_PORT_MIN_PKT 0x80 + +#define MVPP2_BIT_TO_BYTE(bit) ((bit) / 8) + +/* IPv6 max L3 address size */ +#define MVPP2_MAX_L3_ADDR_SIZE 16 + +/* Port flags */ +#define MVPP2_F_LOOPBACK BIT(0) + +/* Marvell tag types */ +enum mvpp2_tag_type { + MVPP2_TAG_TYPE_NONE = 0, + MVPP2_TAG_TYPE_MH = 1, + MVPP2_TAG_TYPE_DSA = 2, + MVPP2_TAG_TYPE_EDSA = 3, + MVPP2_TAG_TYPE_VLAN = 4, + MVPP2_TAG_TYPE_LAST = 5 +}; + +/* Parser constants */ +#define MVPP2_PRS_TCAM_SRAM_SIZE 256 +#define MVPP2_PRS_TCAM_WORDS 6 +#define MVPP2_PRS_SRAM_WORDS 4 +#define MVPP2_PRS_FLOW_ID_SIZE 64 +#define MVPP2_PRS_FLOW_ID_MASK 0x3f +#define MVPP2_PRS_TCAM_ENTRY_INVALID 1 +#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT BIT(5) +#define MVPP2_PRS_IPV4_HEAD 0x40 +#define MVPP2_PRS_IPV4_HEAD_MASK 0xf0 +#define MVPP2_PRS_IPV4_MC 0xe0 +#define MVPP2_PRS_IPV4_MC_MASK 0xf0 +#define MVPP2_PRS_IPV4_BC_MASK 0xff +#define MVPP2_PRS_IPV4_IHL 0x5 +#define MVPP2_PRS_IPV4_IHL_MASK 0xf +#define MVPP2_PRS_IPV6_MC 0xff +#define MVPP2_PRS_IPV6_MC_MASK 0xff +#define MVPP2_PRS_IPV6_HOP_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK 0xff +#define MVPP2_PRS_TCAM_PROTO_MASK_L 0x3f +#define MVPP2_PRS_DBL_VLANS_MAX 100 + +/* Tcam structure: + * - lookup ID - 4 bits + * - port ID - 1 byte + * - additional information - 1 byte + * - header data - 8 bytes + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0). + */ +#define MVPP2_PRS_AI_BITS 8 +#define MVPP2_PRS_PORT_MASK 0xff +#define MVPP2_PRS_LU_MASK 0xf +#define MVPP2_PRS_TCAM_DATA_BYTE(offs) \ + (((offs) - ((offs) % 2)) * 2 + ((offs) % 2)) +#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs) \ + (((offs) * 2) - ((offs) % 2) + 2) +#define MVPP2_PRS_TCAM_AI_BYTE 16 +#define MVPP2_PRS_TCAM_PORT_BYTE 17 +#define MVPP2_PRS_TCAM_LU_BYTE 20 +#define MVPP2_PRS_TCAM_EN_OFFS(offs) ((offs) + 2) +#define MVPP2_PRS_TCAM_INV_WORD 5 +/* Tcam entries ID */ +#define MVPP2_PE_DROP_ALL 0 +#define MVPP2_PE_FIRST_FREE_TID 1 +#define MVPP2_PE_LAST_FREE_TID (MVPP2_PRS_TCAM_SRAM_SIZE - 31) +#define MVPP2_PE_IP6_EXT_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 30) +#define MVPP2_PE_MAC_MC_IP6 (MVPP2_PRS_TCAM_SRAM_SIZE - 29) +#define MVPP2_PE_IP6_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 28) +#define MVPP2_PE_IP4_ADDR_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 27) +#define MVPP2_PE_LAST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 26) +#define MVPP2_PE_FIRST_DEFAULT_FLOW (MVPP2_PRS_TCAM_SRAM_SIZE - 19) +#define MVPP2_PE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 18) +#define MVPP2_PE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 17) +#define MVPP2_PE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 16) +#define MVPP2_PE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 15) +#define MVPP2_PE_ETYPE_EDSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 14) +#define MVPP2_PE_ETYPE_EDSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 13) +#define MVPP2_PE_ETYPE_DSA_TAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 12) +#define MVPP2_PE_ETYPE_DSA_UNTAGGED (MVPP2_PRS_TCAM_SRAM_SIZE - 11) +#define MVPP2_PE_MH_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 10) +#define MVPP2_PE_DSA_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 9) +#define MVPP2_PE_IP6_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 8) +#define MVPP2_PE_IP4_PROTO_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 7) +#define MVPP2_PE_ETH_TYPE_UN (MVPP2_PRS_TCAM_SRAM_SIZE - 6) +#define MVPP2_PE_VLAN_DBL (MVPP2_PRS_TCAM_SRAM_SIZE - 5) +#define MVPP2_PE_VLAN_NONE (MVPP2_PRS_TCAM_SRAM_SIZE - 4) +#define MVPP2_PE_MAC_MC_ALL (MVPP2_PRS_TCAM_SRAM_SIZE - 3) +#define MVPP2_PE_MAC_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 2) +#define MVPP2_PE_MAC_NON_PROMISCUOUS (MVPP2_PRS_TCAM_SRAM_SIZE - 1) + +/* Sram structure + * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0). + */ +#define MVPP2_PRS_SRAM_RI_OFFS 0 +#define MVPP2_PRS_SRAM_RI_WORD 0 +#define MVPP2_PRS_SRAM_RI_CTRL_OFFS 32 +#define MVPP2_PRS_SRAM_RI_CTRL_WORD 1 +#define MVPP2_PRS_SRAM_RI_CTRL_BITS 32 +#define MVPP2_PRS_SRAM_SHIFT_OFFS 64 +#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT 72 +#define MVPP2_PRS_SRAM_UDF_OFFS 73 +#define MVPP2_PRS_SRAM_UDF_BITS 8 +#define MVPP2_PRS_SRAM_UDF_MASK 0xff +#define MVPP2_PRS_SRAM_UDF_SIGN_BIT 81 +#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS 82 +#define MVPP2_PRS_SRAM_UDF_TYPE_MASK 0x7 +#define MVPP2_PRS_SRAM_UDF_TYPE_L3 1 +#define MVPP2_PRS_SRAM_UDF_TYPE_L4 4 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS 85 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD 1 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS 87 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK 0x3 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD 0 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD 2 +#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD 3 +#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS 89 +#define MVPP2_PRS_SRAM_AI_OFFS 90 +#define MVPP2_PRS_SRAM_AI_CTRL_OFFS 98 +#define MVPP2_PRS_SRAM_AI_CTRL_BITS 8 +#define MVPP2_PRS_SRAM_AI_MASK 0xff +#define MVPP2_PRS_SRAM_NEXT_LU_OFFS 106 +#define MVPP2_PRS_SRAM_NEXT_LU_MASK 0xf +#define MVPP2_PRS_SRAM_LU_DONE_BIT 110 +#define MVPP2_PRS_SRAM_LU_GEN_BIT 111 + +/* Sram result info bits assignment */ +#define MVPP2_PRS_RI_MAC_ME_MASK 0x1 +#define MVPP2_PRS_RI_DSA_MASK 0x2 +#define MVPP2_PRS_RI_VLAN_MASK 0xc +#define MVPP2_PRS_RI_VLAN_NONE ~(BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_VLAN_SINGLE BIT(2) +#define MVPP2_PRS_RI_VLAN_DOUBLE BIT(3) +#define MVPP2_PRS_RI_VLAN_TRIPLE (BIT(2) | BIT(3)) +#define MVPP2_PRS_RI_CPU_CODE_MASK 0x70 +#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC BIT(4) +#define MVPP2_PRS_RI_L2_CAST_MASK 0x600 +#define MVPP2_PRS_RI_L2_UCAST ~(BIT(9) | BIT(10)) +#define MVPP2_PRS_RI_L2_MCAST BIT(9) +#define MVPP2_PRS_RI_L2_BCAST BIT(10) +#define MVPP2_PRS_RI_PPPOE_MASK 0x800 +#define MVPP2_PRS_RI_L3_PROTO_MASK 0x7000 +#define MVPP2_PRS_RI_L3_UN ~(BIT(12) | BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_IP4 BIT(12) +#define MVPP2_PRS_RI_L3_IP4_OPT BIT(13) +#define MVPP2_PRS_RI_L3_IP4_OTHER (BIT(12) | BIT(13)) +#define MVPP2_PRS_RI_L3_IP6 BIT(14) +#define MVPP2_PRS_RI_L3_IP6_EXT (BIT(12) | BIT(14)) +#define MVPP2_PRS_RI_L3_ARP (BIT(13) | BIT(14)) +#define MVPP2_PRS_RI_L3_ADDR_MASK 0x18000 +#define MVPP2_PRS_RI_L3_UCAST ~(BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_L3_MCAST BIT(15) +#define MVPP2_PRS_RI_L3_BCAST (BIT(15) | BIT(16)) +#define MVPP2_PRS_RI_IP_FRAG_MASK 0x20000 +#define MVPP2_PRS_RI_UDF3_MASK 0x300000 +#define MVPP2_PRS_RI_UDF3_RX_SPECIAL BIT(21) +#define MVPP2_PRS_RI_L4_PROTO_MASK 0x1c00000 +#define MVPP2_PRS_RI_L4_TCP BIT(22) +#define MVPP2_PRS_RI_L4_UDP BIT(23) +#define MVPP2_PRS_RI_L4_OTHER (BIT(22) | BIT(23)) +#define MVPP2_PRS_RI_UDF7_MASK 0x60000000 +#define MVPP2_PRS_RI_UDF7_IP6_LITE BIT(29) +#define MVPP2_PRS_RI_DROP_MASK 0x80000000 + +/* Sram additional info bits assignment */ +#define MVPP2_PRS_IPV4_DIP_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT BIT(0) +#define MVPP2_PRS_IPV6_EXT_AI_BIT BIT(1) +#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT BIT(2) +#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT BIT(3) +#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT BIT(4) +#define MVPP2_PRS_SINGLE_VLAN_AI 0 +#define MVPP2_PRS_DBL_VLAN_AI_BIT BIT(7) + +/* DSA/EDSA type */ +#define MVPP2_PRS_TAGGED MV_TRUE +#define MVPP2_PRS_UNTAGGED MV_FALSE +#define MVPP2_PRS_EDSA MV_TRUE +#define MVPP2_PRS_DSA MV_FALSE + +/* MAC entries, shadow udf */ +enum mvpp2_prs_udf { + MVPP2_PRS_UDF_MAC_DEF, + MVPP2_PRS_UDF_MAC_RANGE, + MVPP2_PRS_UDF_L2_DEF, + MVPP2_PRS_UDF_L2_DEF_COPY, + MVPP2_PRS_UDF_L2_USER, +}; + +/* Lookup ID */ +enum mvpp2_prs_lookup { + MVPP2_PRS_LU_MH, + MVPP2_PRS_LU_MAC, + MVPP2_PRS_LU_DSA, + MVPP2_PRS_LU_VLAN, + MVPP2_PRS_LU_L2, + MVPP2_PRS_LU_PPPOE, + MVPP2_PRS_LU_IP4, + MVPP2_PRS_LU_IP6, + MVPP2_PRS_LU_FLOWS, + MVPP2_PRS_LU_LAST, +}; + +/* L3 cast enum */ +enum mvpp2_prs_l3_cast { + MVPP2_PRS_L3_UNI_CAST, + MVPP2_PRS_L3_MULTI_CAST, + MVPP2_PRS_L3_BROAD_CAST +}; +/* Classifier constants */ +#define MVPP2_CLS_FLOWS_TBL_SIZE 512 +#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3 +#define MVPP2_CLS_LKP_TBL_SIZE 64 + +/* BM constants */ +#define MVPP2_BM_POOLS_NUM 8 +#define MVPP2_BM_LONG_BUF_NUM 1024 +#define MVPP2_BM_SHORT_BUF_NUM 2048 +#define MVPP2_BM_POOL_SIZE_MAX (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4) +#define MVPP2_BM_POOL_PTR_ALIGN 128 +#define MVPP2_BM_SWF_LONG_POOL(port) ((port > 2) ? 2 : port) +#define MVPP2_BM_SWF_SHORT_POOL 3 + +/* BM cookie (32 bits) definition */ +#define MVPP2_BM_COOKIE_POOL_OFFS 8 +#define MVPP2_BM_COOKIE_CPU_OFFS 24 + +/* BM short pool packet size + * These value assure that for SWF the total number + * of bytes allocated for each buffer will be 512 + */ +#define MVPP2_BM_SHORT_PKT_SIZE MVPP2_RX_MAX_PKT_SIZE(512) + +/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the + * layout of the transmit and reception DMA descriptors, and their + * layout is therefore defined by the hardware design + */ + +#define MVPP2_TXD_L3_OFF_SHIFT 0 +#define MVPP2_TXD_IP_HLEN_SHIFT 8 +#define MVPP2_TXD_L4_CSUM_FRAG BIT(13) +#define MVPP2_TXD_L4_CSUM_NOT BIT(14) +#define MVPP2_TXD_IP_CSUM_DISABLE BIT(15) +#define MVPP2_TXD_PADDING_DISABLE BIT(23) +#define MVPP2_TXD_L4_UDP BIT(24) +#define MVPP2_TXD_L3_IP6 BIT(26) +#define MVPP2_TXD_L_DESC BIT(28) +#define MVPP2_TXD_F_DESC BIT(29) + +#define MVPP2_RXD_ERR_SUMMARY BIT(15) +#define MVPP2_RXD_ERR_CODE_MASK (BIT(13) | BIT(14)) +#define MVPP2_RXD_ERR_CRC 0x0 +#define MVPP2_RXD_ERR_OVERRUN BIT(13) +#define MVPP2_RXD_ERR_RESOURCE (BIT(13) | BIT(14)) +#define MVPP2_RXD_BM_POOL_ID_OFFS 16 +#define MVPP2_RXD_BM_POOL_ID_MASK (BIT(16) | BIT(17) | BIT(18)) +#define MVPP2_RXD_HWF_SYNC BIT(21) +#define MVPP2_RXD_L4_CSUM_OK BIT(22) +#define MVPP2_RXD_IP4_HEADER_ERR BIT(24) +#define MVPP2_RXD_L4_TCP BIT(25) +#define MVPP2_RXD_L4_UDP BIT(26) +#define MVPP2_RXD_L3_IP4 BIT(28) +#define MVPP2_RXD_L3_IP6 BIT(30) +#define MVPP2_RXD_BUF_HDR BIT(31) + +struct mvpp2_tx_desc { + MV_U32 command; /* Options used by HW for packet transmitting.*/ + MV_U8 packet_offset; /* the offset from the buffer beginning */ + MV_U8 phys_txq; /* destination queue ID */ + MV_U16 data_size; /* data size of transmitted packet in bytes */ +#ifdef MVPP2_V1 + MV_U32 buf_phys_addr; /* physical addr of transmitted buffer */ + MV_U32 buf_cookie; /* cookie for access to TX buffer in tx path */ + MV_U32 reserved1[3]; /* hw_cmd (for future use, BM, PON, PNC) */ + MV_U32 reserved2; /* reserved (for future use) */ +#else + MV_U64 rsrvd_hw_cmd1; /* hw_cmd (BM, PON, PNC) */ + MV_U64 buf_phys_addr_hw_cmd2; + MV_U64 buf_cookie_bm_qset_hw_cmd3; +#endif +}; + +struct mvpp2_rx_desc { + MV_U32 status; /* info about received packet */ + MV_U16 reserved1; /* parser_info (for future use, PnC) */ + MV_U16 data_size; /* size of received packet in bytes */ +#ifdef MVPP2_V1 + MV_U32 buf_phys_addr; /* physical address of the buffer */ + MV_U32 buf_cookie; /* cookie for access to RX buffer in rx path */ + MV_U16 reserved2; /* gem_port_id (for future use, PON) */ + MV_U16 reserved3; /* csum_l4 (for future use, PnC) */ + MV_U8 reserved4; /* bm_qset (for future use, BM) */ + MV_U8 reserved5; + MV_U16 reserved6; /* classify_info (for future use, PnC) */ + MV_U32 reserved7; /* flow_id (for future use, PnC) */ + MV_U32 reserved8; +#else + MV_U16 rsrvd_gem; /* gem_port_id (for future use, PON) */ + MV_U16 rsrvd_l4csum; /* csum_l4 (for future use, PnC) */ + MV_U32 rsrvd_timestamp; + MV_U64 buf_phys_addr_key_hash; + MV_U64 buf_cookie_bm_qset_cls_info; +#endif +}; + +union mvpp2_prs_tcam_entry { + MV_U32 word[MVPP2_PRS_TCAM_WORDS]; + MV_U8 byte[MVPP2_PRS_TCAM_WORDS * 4]; +}; + +union mvpp2_prs_sram_entry { + MV_U32 word[MVPP2_PRS_SRAM_WORDS]; + MV_U8 byte[MVPP2_PRS_SRAM_WORDS * 4]; +}; + +struct mvpp2_prs_entry { + MV_U32 index; + union mvpp2_prs_tcam_entry tcam; + union mvpp2_prs_sram_entry sram; +}; + +struct mvpp2_prs_shadow { + MV_BOOL valid; + MV_BOOL finish; + + /* Lookup ID */ + MV_32 lu; + + /* User defined offset */ + MV_32 udf; + + /* Result info */ + MV_U32 ri; + MV_U32 ri_mask; +}; + +struct mvpp2_cls_flow_entry { + MV_U32 index; + MV_U32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS]; +}; + +struct mvpp2_cls_lookup_entry { + MV_U32 lkpid; + MV_U32 way; + MV_U32 data; +}; + +struct mvpp2_buff_hdr { + MV_U32 next_buff_phys_addr; + MV_U32 next_buff_virt_addr; + MV_U16 byte_count; + MV_U16 info; + MV_U8 reserved1; /* bm_qset (for future use, BM) */ +}; + +/* Buffer header info bits */ +#define MVPP2_B_HDR_INFO_MC_ID_MASK 0xfff +#define MVPP2_B_HDR_INFO_MC_ID(info) ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK) +#define MVPP2_B_HDR_INFO_LAST_OFFS 12 +#define MVPP2_B_HDR_INFO_LAST_MASK BIT(12) +#define MVPP2_B_HDR_INFO_IS_LAST(info) \ + ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS) + +/* SD1 Control1 */ +#define SD1_CONTROL_1_REG (0x148) + +#define SD1_CONTROL_XAUI_EN_OFFSET 28 +#define SD1_CONTROL_XAUI_EN_MASK (0x1 << SD1_CONTROL_XAUI_EN_OFFSET) + +#define SD1_CONTROL_RXAUI0_L23_EN_OFFSET 27 +#define SD1_CONTROL_RXAUI0_L23_EN_MASK (0x1 << \ + SD1_CONTROL_RXAUI0_L23_EN_OFFSET) + +#define SD1_CONTROL_RXAUI1_L45_EN_OFFSET 26 +#define SD1_CONTROL_RXAUI1_L45_EN_MASK (0x1 << \ + SD1_CONTROL_RXAUI1_L45_EN_OFFSET) +/* System Soft Reset 1 */ +#define MV_GOP_SOFT_RESET_1_REG 0x108 + +#define NETC_GOP_SOFT_RESET_OFFSET 6 +#define NETC_GOP_SOFT_RESET_MASK (0x1 << NETC_GOP_SOFT_RESET_OFFSET) + +/* Ports Control 0 */ +#define MV_NETCOMP_PORTS_CONTROL_0 (0x110) + +#define NETC_CLK_DIV_PHASE_OFFSET 31 +#define NETC_CLK_DIV_PHASE_MASK (0x1 << NETC_CLK_DIV_PHASE_OFFSET) + +#define NETC_GIG_RX_DATA_SAMPLE_OFFSET 29 +#define NETC_GIG_RX_DATA_SAMPLE_MASK (0x1 << NETC_GIG_RX_DATA_SAMPLE_OFFSET) + +#define NETC_BUS_WIDTH_SELECT_OFFSET 1 +#define NETC_BUS_WIDTH_SELECT_MASK (0x1 << NETC_BUS_WIDTH_SELECT_OFFSET) + +#define NETC_GOP_ENABLE_OFFSET 0 +#define NETC_GOP_ENABLE_MASK (0x1 << NETC_GOP_ENABLE_OFFSET) + +/* Ports Control 1 */ +#define MV_NETCOMP_PORTS_CONTROL_1 (0x114) + +#define NETC_PORT_GIG_RF_RESET_OFFSET(port) (28 + port) +#define NETC_PORT_GIG_RF_RESET_MASK(port) \ + (0x1 << NETC_PORT_GIG_RF_RESET_OFFSET(port)) + +#define NETC_PORTS_ACTIVE_OFFSET(port) (0 + port) +#define NETC_PORTS_ACTIVE_MASK(port) (0x1 << NETC_PORTS_ACTIVE_OFFSET(port)) + +/* Ports Status */ +#define MV_NETCOMP_PORTS_STATUS (0x11C) +#define NETC_PORTS_STATUS_OFFSET(port) (0 + port) +#define NETC_PORTS_STATUS_MASK(port) (0x1 << NETC_PORTS_STATUS_OFFSET(port)) + +/* Networking Complex Control 0 */ +#define MV_NETCOMP_CONTROL_0 (0x120) + +#define NETC_GBE_PORT1_MII_MODE_OFFSET 2 +#define NETC_GBE_PORT1_MII_MODE_MASK \ + (0x1 << NETC_GBE_PORT1_MII_MODE_OFFSET) + +#define NETC_GBE_PORT1_SGMII_MODE_OFFSET 1 +#define NETC_GBE_PORT1_SGMII_MODE_MASK \ + (0x1 << NETC_GBE_PORT1_SGMII_MODE_OFFSET) + +#define NETC_GBE_PORT0_SGMII_MODE_OFFSET 0 +#define NETC_GBE_PORT0_SGMII_MODE_MASK \ + (0x1 << NETC_GBE_PORT0_SGMII_MODE_OFFSET) + +/* Port Mac Control0 */ +#define MV_XLG_PORT_MAC_CTRL0_REG (0x0000) +#define MV_XLG_MAC_CTRL0_PORTEN_OFFS 0 +#define MV_XLG_MAC_CTRL0_PORTEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_PORTEN_OFFS) + +#define MV_XLG_MAC_CTRL0_MACRESETN_OFFS 1 +#define MV_XLG_MAC_CTRL0_MACRESETN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_MACRESETN_OFFS) + +#define MV_XLG_MAC_CTRL0_FORCELINKDOWN_OFFS 2 +#define MV_XLG_MAC_CTRL0_FORCELINKDOWN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_FORCELINKDOWN_OFFS) + +#define MV_XLG_MAC_CTRL0_FORCELINKPASS_OFFS 3 +#define MV_XLG_MAC_CTRL0_FORCELINKPASS_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_FORCELINKPASS_OFFS) + +#define MV_XLG_MAC_CTRL0_TXIPGMODE_OFFS 5 +#define MV_XLG_MAC_CTRL0_TXIPGMODE_MASK \ + (0x00000003 << MV_XLG_MAC_CTRL0_TXIPGMODE_OFFS) + +#define MV_XLG_MAC_CTRL0_RXFCEN_OFFS 7 +#define MV_XLG_MAC_CTRL0_RXFCEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_RXFCEN_OFFS) + +#define MV_XLG_MAC_CTRL0_TXFCEN_OFFS 8 +#define MV_XLG_MAC_CTRL0_TXFCEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_TXFCEN_OFFS) + +#define MV_XLG_MAC_CTRL0_RXCRCCHECKEN_OFFS 9 +#define MV_XLG_MAC_CTRL0_RXCRCCHECKEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_RXCRCCHECKEN_OFFS) + +#define MV_XLG_MAC_CTRL0_PERIODICXONEN_OFFS 10 +#define MV_XLG_MAC_CTRL0_PERIODICXONEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_PERIODICXONEN_OFFS) + +#define MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_OFFS 11 +#define MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_RXCRCSTRIPEN_OFFS) + +#define MV_XLG_MAC_CTRL0_PADDINGDIS_OFFS 13 +#define MV_XLG_MAC_CTRL0_PADDINGDIS_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_PADDINGDIS_OFFS) + +#define MV_XLG_MAC_CTRL0_MIBCNTDIS_OFFS 14 +#define MV_XLG_MAC_CTRL0_MIBCNTDIS_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_MIBCNTDIS_OFFS) + +#define MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_OFFS 15 +#define MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL0_PFC_CASCADE_PORT_ENABLE_OFFS) + + +/* Port Mac Control1 */ +#define MV_XLG_PORT_MAC_CTRL1_REG (0x0004) +#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS 0 +#define MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_MASK \ + (0x00001fff << MV_XLG_MAC_CTRL1_FRAMESIZELIMIT_OFFS) + +#define MV_XLG_MAC_CTRL1_MACLOOPBACKEN_OFFS 13 +#define MV_XLG_MAC_CTRL1_MACLOOPBACKEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL1_MACLOOPBACKEN_OFFS) + +#define MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_OFFS 14 +#define MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL1_XGMIILOOPBACKEN_OFFS) + +#define MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_OFFS 15 +#define MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL1_LOOPBACKCLOCKSELECT_OFFS) + + +/* Port Mac Control2 */ +#define MV_XLG_PORT_MAC_CTRL2_REG (0x0008) +#define MV_XLG_MAC_CTRL2_SALOW_7_0_OFFS 0 +#define MV_XLG_MAC_CTRL2_SALOW_7_0_MASK \ + (0x000000ff << MV_XLG_MAC_CTRL2_SALOW_7_0_OFFS) + +#define MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_OFFS 8 +#define MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL2_UNIDIRECTIONALEN_OFFS) + +#define MV_XLG_MAC_CTRL2_FIXEDIPGBASE_OFFS 9 +#define MV_XLG_MAC_CTRL2_FIXEDIPGBASE_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL2_FIXEDIPGBASE_OFFS) + +#define MV_XLG_MAC_CTRL2_PERIODICXOFFEN_OFFS 10 +#define MV_XLG_MAC_CTRL2_PERIODICXOFFEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL2_PERIODICXOFFEN_OFFS) + +#define MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_OFFS 13 +#define MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL2_SIMPLEXMODEEN_OFFS) + +#define MV_XLG_MAC_CTRL2_FC_MODE_OFFS 14 +#define MV_XLG_MAC_CTRL2_FC_MODE_MASK \ + (0x00000003 << MV_XLG_MAC_CTRL2_FC_MODE_OFFS) + + +/* Port Status */ +#define MV_XLG_MAC_PORT_STATUS_REG (0x000c) +#define MV_XLG_MAC_PORT_STATUS_LINKSTATUS_OFFS 0 +#define MV_XLG_MAC_PORT_STATUS_LINKSTATUS_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_LINKSTATUS_OFFS) + +#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_OFFS 1 +#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_REMOTEFAULT_OFFS) + +#define MV_XLG_MAC_PORT_STATUS_LOCALFAULT_OFFS 2 +#define MV_XLG_MAC_PORT_STATUS_LOCALFAULT_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_LOCALFAULT_OFFS) + +#define MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_OFFS 3 +#define MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_LINKSTATUSCLEAN_OFFS) + +#define MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_OFFS 4 +#define MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_LOCALFAULTCLEAN_OFFS) + +#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_OFFS 5 +#define MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_REMOTEFAULTCLEAN_OFFS) + +#define MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_OFFS 6 +#define MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_PORTRXPAUSE_OFFS) + +#define MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_OFFS 7 +#define MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_PORTTXPAUSE_OFFS) + +#define MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_OFFS 8 +#define MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_STATUS_PFC_SYNC_FIFO_FULL_OFFS) + + +/* Port Fifos Thresholds Configuration */ +#define MV_XLG_PORT_FIFOS_THRS_CFG_REG (0x001) +#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_OFFS 0 +#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_MASK \ + (0x0000001f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_RXFULLTHR_OFFS) + +#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_OFFS 5 +#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_MASK \ + (0x0000003f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXFIFOSIZE_OFFS) + +#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_OFFS 11 +#define MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_MASK \ + (0x0000001f << MV_XLG_MAC_PORT_FIFOS_THRS_CFG_TXRDTHR_OFFS) + + +/* Port Mac Control3 */ +#define MV_XLG_PORT_MAC_CTRL3_REG (0x001c) +#define MV_XLG_MAC_CTRL3_BUFSIZE_OFFS 0 +#define MV_XLG_MAC_CTRL3_BUFSIZE_MASK \ + (0x0000003f << MV_XLG_MAC_CTRL3_BUFSIZE_OFFS) + +#define MV_XLG_MAC_CTRL3_XTRAIPG_OFFS 6 +#define MV_XLG_MAC_CTRL3_XTRAIPG_MASK \ + (0x0000007f << MV_XLG_MAC_CTRL3_XTRAIPG_OFFS) + +#define MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS 13 +#define MV_XLG_MAC_CTRL3_MACMODESELECT_MASK \ + (0x00000007 << MV_XLG_MAC_CTRL3_MACMODESELECT_OFFS) + + +/* Port Per Prio Flow Control Status */ +#define MV_XLG_PORT_PER_PRIO_FLOW_CTRL_STATUS_REG (0x0020) +#define MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_OFFS 0 +#define MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_MASK \ + (0x00000001 << \ + MV_XLG_MAC_PORT_PER_PRIO_FLOW_CTRL_STATUS_PRIONSTATUS_OFFS) + + +/* Debug Bus Status */ +#define MV_XLG_DEBUG_BUS_STATUS_REG (0x0024) +#define MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_OFFS 0 +#define MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_MASK \ + (0x0000ffff << MV_XLG_MAC_DEBUG_BUS_STATUS_DEBUG_BUS_OFFS) + + +/* Port Metal Fix */ +#define MV_XLG_PORT_METAL_FIX_REG (0x002c) +#define MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__OFFS 0 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_EOP_IN_FIFO__OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__OFFS 1 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_LTF_FIX__OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__OFFS 2 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_HOLD_FIX__OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__OFFS 3 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_LED_FIX__OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__OFFS 4 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_PAD_PROTECT__OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__OFFS 5 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS44__OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__OFFS 6 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_NX_BTS42__OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_OFFS 7 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_FLUSH_FIX_OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_OFFS 8 +#define MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_MASK \ + (0x00000001 << MV_XLG_MAC_PORT_METAL_FIX_EN_PORT_EN_FIX_OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_OFFS 9 +#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_MASK \ + (0x0000000f << MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF0_BITS_OFFS) + +#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_OFFS 13 +#define MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_MASK \ + (0x00000007 << MV_XLG_MAC_PORT_METAL_FIX_SPARE_DEF1_BITS_OFFS) + +/* Xg Mib Counters Control */ +#define MV_XLG_MIB_CNTRS_CTRL_REG (0x0030) +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_OFFS 0 +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_MASK \ + (0x00000001 << \ + MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGCAPTURETRIGGER_OFFS) + +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_OFFS 1 +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_MASK \ + (0x00000001 << \ + MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGDONTCLEARAFTERREAD_OFFS) + +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_OFFS 2 +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_MASK \ + (0x00000001 << \ + MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGRXHISTOGRAMEN_OFFS) + +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_OFFS 3 +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_MASK \ + (0x00000001 << \ + MV_XLG_MAC_XG_MIB_CNTRS_CTRL_XGTXHISTOGRAMEN_OFFS) + +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS 4 +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__MASK \ + (0x00000001 << \ + MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MFA1_BTT940_FIX_ENABLE__OFFS) + +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_OFFS 5 +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_MASK \ + (0x0000003f << \ + MV_XLG_MAC_XG_MIB_CNTRS_CTRL_LEDS_NUMBER_OFFS) + +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS 11 +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_MASK \ + (0x00000001 << \ + MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_COUNT_HIST_OFFS) + +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS 12 +#define MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_MASK \ + (0x00000001 << \ + MV_XLG_MAC_XG_MIB_CNTRS_CTRL_MIB_4_LIMIT_1518_1522_OFFS) + +/* Cn/ccfc Timer%i */ +#define MV_XLG_CNCCFC_TIMERI_REG(t) ((0x0038 + t*4)) +#define MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_OFFS 0 +#define MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_MASK \ + (0x0000ffff << MV_XLG_MAC_CNCCFC_TIMERI_PORTSPEEDTIMER_OFFS) + +/* Ppfc Control */ +#define MV_XLG_MAC_PPFC_CTRL_REG (0x0060) +#define MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_OFFS 0 +#define MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_MASK \ + (0x00000001 << MV_XLG_MAC_PPFC_CTRL_GLOBAL_PAUSE_ENI_OFFS) + +#define MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_OFFS 9 +#define MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_MASK \ + (0x00000001 << MV_XLG_MAC_PPFC_CTRL_DIP_BTS_677_EN_OFFS) + +/* Fc Dsa Tag 0 */ +#define MV_XLG_MAC_FC_DSA_TAG_0_REG (0x0068) +#define MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_OFFS 0 +#define MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_MASK \ + (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_0_DSATAGREG0_OFFS) + +/* Fc Dsa Tag 1 */ +#define MV_XLG_MAC_FC_DSA_TAG_1_REG (0x006c) +#define MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_OFFS 0 +#define MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_MASK \ + (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_1_DSATAGREG1_OFFS) + +/* Fc Dsa Tag 2 */ +#define MV_XLG_MAC_FC_DSA_TAG_2_REG (0x0070) +#define MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_OFFS 0 +#define MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_MASK \ + (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_2_DSATAGREG2_OFFS) + +/* Fc Dsa Tag 3 */ +#define MV_XLG_MAC_FC_DSA_TAG_3_REG (0x0074) +#define MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_OFFS 0 +#define MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_MASK \ + (0x0000ffff << MV_XLG_MAC_FC_DSA_TAG_3_DSATAGREG3_OFFS) + +/* Dic Budget Compensation */ +#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_REG (0x0080) +#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_OFFS 0 +#define MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_MASK \ + (0x0000ffff << \ + MV_XLG_MAC_DIC_BUDGET_COMPENSATION_DIC_COUNTER_TO_ADD_8BYTES_OFFS) + +/* Port Mac Control4 */ +#define MV_XLG_PORT_MAC_CTRL4_REG (0x0084) +#define MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_OFFS 0 +#define MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_LLFC_GLOBAL_FC_ENABLE_OFFS) + +#define MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_OFFS 1 +#define MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_LED_STREAM_SELECT_OFFS) + +#define MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_OFFS 2 +#define MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_DEBUG_BUS_SELECT_OFFS) + +#define MV_XLG_MAC_CTRL4_MASK_PCS_RESET_OFFS 3 +#define MV_XLG_MAC_CTRL4_MASK_PCS_RESET_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_MASK_PCS_RESET_OFFS) + +#define MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_OFFS 4 +#define MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_MASK \ + (0x00000001 << \ + MV_XLG_MAC_CTRL4_ENABLE_SHORT_PREAMBLE_FOR_XLG_OFFS) + +#define MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS 5 +#define MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_802_3X_FC_EN_OFFS) + +#define MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS 6 +#define MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_PFC_EN_OFFS) + +#define MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_OFFS 7 +#define MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_FORWARD_UNKNOWN_FC_EN_OFFS) + +#define MV_XLG_MAC_CTRL4_USE_XPCS_OFFS 8 +#define MV_XLG_MAC_CTRL4_USE_XPCS_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_USE_XPCS_OFFS) + +#define MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_OFFS 9 +#define MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_DMA_INTERFACE_IS_64_BIT_OFFS) + +#define MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_OFFS 10 +#define MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_MASK \ + (0x00000003 << MV_XLG_MAC_CTRL4_TX_DMA_INTERFACE_BITS_OFFS) + +#define MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS 12 +#define MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_MASK \ + (0x00000001 << MV_XLG_MAC_CTRL4_MAC_MODE_DMA_1G_OFFS) + + +/* Port Mac Control5 */ +#define MV_XLG_PORT_MAC_CTRL5_REG (0x0088) +#define MV_XLG_MAC_CTRL5_TXIPGLENGTH_OFFS 0 +#define MV_XLG_MAC_CTRL5_TXIPGLENGTH_MASK \ + (0x0000000f << MV_XLG_MAC_CTRL5_TXIPGLENGTH_OFFS) + +#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_OFFS 4 +#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_MASK \ + (0x00000007 << MV_XLG_MAC_CTRL5_PREAMBLELENGTHTX_OFFS) + +#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_OFFS 7 +#define MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_MASK \ + (0x00000007 << MV_XLG_MAC_CTRL5_PREAMBLELENGTHRX_OFFS) + +#define MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_OFFS 10 +#define MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_MASK \ + (0x00000007 << MV_XLG_MAC_CTRL5_TXNUMCRCBYTES_OFFS) + +#define MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_OFFS 13 +#define MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_MASK \ + (0x00000007 << MV_XLG_MAC_CTRL5_RXNUMCRCBYTES_OFFS) + + +/* External Control */ +#define MV_XLG_MAC_EXT_CTRL_REG (0x0090) +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_OFFS 0 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL0_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_OFFS 1 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL1_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_OFFS 2 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL2_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_OFFS 3 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL3_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_OFFS 4 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL4_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_OFFS 5 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL5_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_OFFS 6 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL6_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_OFFS 7 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL7_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_OFFS 8 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL8_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_OFFS 9 +#define MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXTERNAL_CTRL9_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_OFFS 10 +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_10_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_OFFS 11 +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_11_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_OFFS 12 +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_12_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_OFFS 13 +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_13_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_OFFS 14 +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_14_OFFS) + +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_OFFS 15 +#define MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_MASK \ + (0x00000001 << MV_XLG_MAC_EXT_CTRL_EXT_CTRL_15_OFFS) + + +/* Macro Control */ +#define MV_XLG_MAC_MACRO_CTRL_REG (0x0094) +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_OFFS 0 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_0_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_OFFS 1 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_1_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_OFFS 2 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_2_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_OFFS 3 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_3_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_OFFS 4 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_4_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_OFFS 5 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_5_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_OFFS 6 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_6_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_OFFS 7 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_7_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_OFFS 8 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_8_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_OFFS 9 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_9_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_OFFS 10 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_10_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_OFFS 11 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_11_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_OFFS 12 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_12_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_OFFS 13 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_13_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_OFFS 14 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_14_OFFS) + +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_OFFS 15 +#define MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_MASK \ + (0x00000001 << MV_XLG_MAC_MACRO_CTRL_MACRO_CTRL_15_OFFS) + +#define MV_XLG_MAC_DIC_PPM_IPG_REDUCE_REG (0x0094) + +/* Port Interrupt Cause */ +#define MV_XLG_INTERRUPT_CAUSE_REG (0x0014) +/* Port Interrupt Mask */ +#define MV_XLG_INTERRUPT_MASK_REG (0x0018) +#define MV_XLG_INTERRUPT_LINK_CHANGE_OFFS 1 +#define MV_XLG_INTERRUPT_LINK_CHANGE_MASK \ + (0x1 << MV_XLG_INTERRUPT_LINK_CHANGE_OFFS) + +/* Port Interrupt Summary Cause */ +#define MV_XLG_EXTERNAL_INTERRUPT_CAUSE_REG (0x0058) +/* Port Interrupt Summary Mask */ +#define MV_XLG_EXTERNAL_INTERRUPT_MASK_REG (0x005C) +#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_OFFS 1 +#define MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_MASK \ + (0x1 << MV_XLG_EXTERNAL_INTERRUPT_LINK_CHANGE_OFFS) +/* port related */ +enum mv_reset {RESET, UNRESET}; + +/*All PPV22 Addresses are 40-bit */ +#define MVPP22_ADDR_HIGH_SIZE 8 +#define MVPP22_ADDR_HIGH_MASK ((1<<MVPP22_ADDR_HIGH_SIZE) - 1) +#define MVPP22_ADDR_MASK (0xFFFFFFFFFF) +/* Desc addr shift */ +#define MVPP21_DESC_ADDR_SHIFT 0 /*Applies to RXQ, AGGR_TXQ*/ +#define MVPP22_DESC_ADDR_SHIFT (9-1) /*Applies to RXQ, AGGR_TXQ*/ + +/* Net Complex */ +enum mv_netc_topology { + MV_NETC_GE_MAC0_RXAUI_L23 = BIT(0), + MV_NETC_GE_MAC0_RXAUI_L45 = BIT(1), + MV_NETC_GE_MAC0_XAUI = BIT(2), + MV_NETC_GE_MAC2_SGMII = BIT(3), + MV_NETC_GE_MAC3_SGMII = BIT(4), + MV_NETC_GE_MAC3_RGMII = BIT(5), +}; + +enum mv_netc_phase { + MV_NETC_FIRST_PHASE, + MV_NETC_SECOND_PHASE, +}; + +enum mv_netc_sgmii_xmi_mode { + MV_NETC_GBE_SGMII, + MV_NETC_GBE_XMII, +}; + +enum mv_netc_mii_mode { + MV_NETC_GBE_RGMII, + MV_NETC_GBE_MII, +}; + +enum mv_netc_lanes { + MV_NETC_LANE_23, + MV_NETC_LANE_45, +}; + +/* PHY_ADDRESS_REGISTER0 Register */ +#define MV_SMI_PHY_ADDRESS_REG(n) (0xC + 0x4*n) +#define MV_SMI_PHY_ADDRESS_PHYAD_OFFS 0 +#define MV_SMI_PHY_ADDRESS_PHYAD_MASK \ + (0x1F << MV_SMI_PHY_ADDRESS_PHYAD_OFFS) + +enum mv_port_duplex { + MV_PORT_DUPLEX_AN, + MV_PORT_DUPLEX_HALF, + MV_PORT_DUPLEX_FULL +}; + +/* Static declaractions */ + +/* Number of RXQs used by single port */ +static MV_32 rxq_number = 1; +/* Number of TXQs used by single port */ +static MV_32 txq_number = 1; + +MV_VOID mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, MV_32 port, MV_BOOL add); +MV_VOID mvpp2_prs_mac_multi_set(struct mvpp2 *priv, MV_32 port, MV_32 index, + MV_BOOL add); +MV_32 mvpp2_prs_default_init(struct mvpp2 *priv); +MV_32 mvpp2_prs_mac_da_accept(struct mvpp2 *priv, MV_32 port, + const MV_U8 *da, MV_BOOL add); +MV_VOID mvpp2_prs_mcast_del_all(struct mvpp2 *priv, MV_32 port); +MV_32 mvpp2_prs_tag_mode_set(struct mvpp2 *priv, MV_32 port, MV_32 type); +MV_32 mvpp2_prs_def_flow(struct mvpp2_port *port); +MV_VOID mvpp2_cls_init(struct mvpp2 *priv); +MV_VOID mvpp2_cls_port_config(struct mvpp2_port *port); +MV_VOID mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port); +MV_VOID mvpp2_bm_pool_hw_create(struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, MV_32 size); +MV_VOID mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv, + struct mvpp2_bm_pool *bm_pool, + MV_32 buf_size); +MV_VOID mvpp2_bm_stop(struct mvpp2 *priv, MV_32 pool); +MV_VOID mvpp2_bm_irq_clear(struct mvpp2 *priv, MV_32 pool); +MV_VOID mvpp2_rxq_long_pool_set(struct mvpp2_port *port, + MV_32 lrxq, MV_32 long_pool); +MV_VOID mvpp2_rxq_short_pool_set(struct mvpp2_port *port, + MV_32 lrxq, MV_32 short_pool); +MV_VOID mvpp2_bm_pool_mc_put(struct mvpp2_port *port, MV_32 pool, + MV_U32 buf_phys_addr, MV_U32 buf_virt_addr, + MV_32 mc_id); +MV_VOID mvpp2_pool_refill(struct mvpp2_port *port, MV_U32 bm, + MV_U32 phys_addr, MV_U32 cookie); +MV_VOID mvpp2_interrupts_mask(MV_VOID *arg); +MV_VOID mvpp2_interrupts_unmask(MV_VOID *arg); +MV_VOID mvpp2_port_enable(struct mvpp2_port *port); +MV_VOID mvpp2_port_disable(struct mvpp2_port *port); +MV_VOID mvpp2_defaults_set(struct mvpp2_port *port); +MV_VOID mvpp2_ingress_enable(struct mvpp2_port *port); +MV_VOID mvpp2_ingress_disable(struct mvpp2_port *port); +MV_VOID mvpp2_egress_enable(struct mvpp2_port *port); +MV_VOID mvpp2_egress_disable(struct mvpp2_port *port); +MV_U32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc, MV_32 cpu); +MV_32 mvpp2_txq_drain_set(struct mvpp2_port *port, MV_32 txq, MV_BOOL en); +MV_32 mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq); +MV_U32 mvpp2_aggr_txq_pend_desc_num_get(struct mvpp2 *pp2, int cpu); +struct mvpp2_tx_desc * +mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq); +MV_VOID mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, MV_32 pending); +MV_32 mvpp2_aggr_desc_num_check(struct mvpp2 *priv, + struct mvpp2_tx_queue *aggr_txq, + MV_32 num, MV_32 cpu); +MV_32 mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv, + struct mvpp2_tx_queue *txq, MV_32 num); +MV_VOID mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq); +MV_U32 mvpp2_txq_desc_csum(MV_32 l3_offs, MV_32 l3_proto, + MV_32 ip_hdr_len, MV_32 l4_proto); +MV_VOID mvpp2_txq_sent_counter_clear(MV_VOID *arg); +MV_VOID mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port); +MV_VOID mvpp2_txp_max_tx_size_set(struct mvpp2_port *port); +MV_VOID mvpp2_rx_pkts_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq, MV_U32 pkts); +MV_VOID mvpp2_rx_time_coal_set(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq, MV_U32 usec); +MV_VOID mvpp2_aggr_txq_hw_init(struct mvpp2_tx_queue *aggr_txq, + MV_32 desc_num, MV_32 cpu, + struct mvpp2 *priv); +MV_VOID mvpp2_rxq_hw_init(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq); +MV_VOID mvpp2_rxq_drop_pkts(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq, + MV_32 cpu); +MV_VOID mvpp2_txq_hw_init(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq); +MV_VOID mvpp2_txq_hw_deinit(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq); +MV_VOID mvpp2_port_power_up(struct mvpp2_port *port); +MV_VOID mvpp2_rx_fifo_init(struct mvpp2 *priv); +MV_VOID mvpp2_rxq_hw_deinit(struct mvpp2_port *port, + struct mvpp2_rx_queue *rxq); +MV_U32 mvp_pp2x_gop110_netc_cfg_create(struct mvpp2_port *pp2_port); +MV_32 mv_gop110_netc_init(struct mvpp2_port *mvport, + MV_U32 net_comp_config, enum mv_netc_phase phase); + +MV_U32 mvp_pp2x_gop110_netc_cfg_create(struct mvpp2_port *pp2_port); +MV_32 mv_gop110_port_init(struct mvpp2_port *pp2_port); +MV_32 mv_gop110_gmac_reset(struct mvpp2_port *pp2_port, enum mv_reset reset); +MV_32 mv_gop110_gpcs_mode_cfg(struct mvpp2_port *pp2_port, MV_BOOL en); +MV_32 mv_gop110_bypass_clk_cfg(struct mvpp2_port *pp2_port, MV_BOOL en); +MV_32 mv_gop110_gpcs_reset(struct mvpp2_port *pp2_port, enum mv_reset act); +MV_VOID mv_gop110_xlg_2_gig_mac_cfg(struct mvpp2_port *pp2_port); +MV_32 mv_gop110_gmac_mode_cfg(struct mvpp2_port *pp2_port); +MV_VOID mv_gop110_gmac_rgmii_cfg(struct mvpp2_port *pp2_port); +MV_VOID mv_gop110_gmac_sgmii2_5_cfg(struct mvpp2_port *pp2_port); +MV_VOID mv_gop110_gmac_sgmii_cfg(struct mvpp2_port *pp2_port); +MV_VOID mv_gop110_gmac_qsgmii_cfg(struct mvpp2_port *pp2_port); +MV_32 mvpp2_smi_phy_addr_cfg(struct mvpp2_port *pp2_port, MV_32 port, MV_32 addr); +MV_BOOL mv_gop110_port_is_link_up(struct mvpp2_port *pp2_port); +MV_BOOL mv_gop110_gmac_link_status_get(struct mvpp2_port *pp2_port); +INTN mvpp2_bm_pool_ctrl(struct mvpp2 *pp2, INTN pool, + enum mvpp2_command cmd); +MV_VOID mv_gop110_port_disable(struct mvpp2_port *pp2_port); +MV_VOID mv_gop110_port_enable(struct mvpp2_port *pp2_port); +MV_VOID mv_gop110_gmac_port_enable(struct mvpp2_port *pp2_port); +MV_VOID mv_gop110_gmac_port_disable(struct mvpp2_port *pp2_port); +MV_VOID mv_gop110_gmac_port_link_event_mask(struct mvpp2_port *pp2_port); +MV_32 mv_gop110_port_events_mask(struct mvpp2_port *pp2_port); +MV_32 mv_gop110_fl_cfg(struct mvpp2_port *pp2_port); +MV_32 mv_gop110_speed_duplex_set(struct mvpp2_port *pp2_port, + MV_32 speed, enum mv_port_duplex duplex); +MV_32 mv_gop110_gmac_speed_duplex_set(struct mvpp2_port *pp2_port, + MV_32 speed, enum mv_port_duplex duplex); +MV_VOID mvpp2_axi_config(struct mvpp2 *pp2); +MV_VOID mvpp2_txp_clean(struct mvpp2_port *pp, MV_32 txp, + struct mvpp2_tx_queue *txq); +MV_VOID mvpp2_cleanup_txqs(struct mvpp2_port *pp); +MV_VOID mvpp2_cleanup_rxqs(struct mvpp2_port *pp); + +/* Get number of physical egress port */ +static inline MV_32 mvpp2_egress_port(struct mvpp2_port *port) +{ + return MVPP2_MAX_TCONT + port->id; +} + +/* Get number of physical TXQ */ +static inline MV_32 mvpp2_txq_phys(MV_32 port, MV_32 txq) +{ + return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq; +} + +/* Set pool number in a BM cookie */ +static inline MV_U32 mvpp2_bm_cookie_pool_set(MV_U32 cookie, MV_32 pool) +{ + MV_U32 bm; + + bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS); + bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS); + + return bm; +} + +/* Get pool number from a BM cookie */ +static inline MV_32 mvpp2_bm_cookie_pool_get(MV_U32 cookie) +{ + return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF; +} + +#ifdef MVPP2_V1 +/* Release buffer to BM */ +static inline MV_VOID mvpp2_bm_pool_put(struct mvpp2 *pp2, MV_32 pool, + MV_U32 buf_phys_addr, MV_U32 buf_virt_addr) +{ + mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr); + mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr); +} +#else +static inline MV_VOID mvpp2_bm_pool_put(struct mvpp2 *pp2, MV_32 pool, + MV_U64 buf_phys_addr, MV_U64 buf_virt_addr) +{ + MV_U32 val = 0; + + val = (upper_32_bits(buf_virt_addr) & MVPP22_ADDR_HIGH_MASK) + << MVPP22_BM_VIRT_HIGH_RLS_OFFST; + val |= (upper_32_bits(buf_phys_addr) & MVPP22_ADDR_HIGH_MASK) + << MVPP22_BM_PHY_HIGH_RLS_OFFSET; + mvpp2_write(pp2, MVPP22_BM_PHY_VIRT_HIGH_RLS_REG, val); + mvpp2_write(pp2, MVPP2_BM_VIRT_RLS_REG, (MV_U32)buf_virt_addr); + mvpp2_write(pp2, MVPP2_BM_PHY_RLS_REG(pool), (MV_U32)buf_phys_addr); +} +#endif + +static inline MV_VOID mvpp2_interrupts_enable(struct mvpp2_port *port, + MV_32 cpu_mask) +{ + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask)); +} + +static inline MV_VOID mvpp2_interrupts_disable(struct mvpp2_port *port, + MV_32 cpu_mask) +{ + mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id), + MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask)); +} + +/* Get number of Rx descriptors occupied by received packets */ +static inline MV_32 +mvpp2_rxq_received(struct mvpp2_port *port, MV_32 rxq_id) +{ + MV_U32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id)); + + return val & MVPP2_RXQ_OCCUPIED_MASK; +} + +/* Update Rx queue status with the number of occupied and available + * Rx descriptor slots. + */ +static inline MV_VOID +mvpp2_rxq_status_update(struct mvpp2_port *port, MV_32 rxq_id, + MV_32 used_count, MV_32 free_count) +{ + /* Decrement the number of used descriptors and increment count + * increment the number of free descriptors. + */ + MV_U32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET); + + mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val); +} + +/* Get pointer to next RX descriptor to be processed by SW */ +static inline struct mvpp2_rx_desc * +mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq) +{ + MV_32 rx_desc = rxq->next_desc_to_proc; + + rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc); + mvpp2_prefetch(rxq->descs + rxq->next_desc_to_proc); + return rxq->descs + rx_desc; +} + +/* Get number of sent descriptors and decrement counter. + * The number of sent descriptors is returned. + * Per-CPU access + */ +static inline MV_32 mvpp2_txq_sent_desc_proc(struct mvpp2_port *port, + struct mvpp2_tx_queue *txq) +{ + MV_U32 val; + + /* Reading status reg resets transmitted descriptor counter */ +#ifdef MVPP2V1 + val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id)); +#else + val = mvpp2_read(port->priv, MVPP22_TXQ_SENT_REG(txq->id)); +#endif + + return (val & MVPP2_TRANSMITTED_COUNT_MASK) >> + MVPP2_TRANSMITTED_COUNT_OFFSET; +} + +static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port, + MV_U32 cause) +{ + MV_32 queue = mvpp2_fls(cause) - 1; + + return &port->rxqs[queue]; +} + +static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, + MV_U32 cause) +{ + MV_32 queue = mvpp2_fls(cause) - 1; + + return &port->txqs[queue]; +} + +static inline void mvpp2x2_txdesc_phys_addr_set(dma_addr_t phys_addr, + struct mvpp2_tx_desc *tx_desc) { + UINT64 *buf_phys_addr_p = &tx_desc->buf_phys_addr_hw_cmd2; + + *buf_phys_addr_p &= ~(MVPP22_ADDR_MASK); + *buf_phys_addr_p |= phys_addr & MVPP22_ADDR_MASK; +} +#endif /* _MVPP_LIB_H_ */