1572890efSRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 2572890efSRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 3572890efSRavi Kumar * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 4572890efSRavi Kumar */ 5572890efSRavi Kumar 6572890efSRavi Kumar #include "axgbe_ethdev.h" 7572890efSRavi Kumar #include "axgbe_common.h" 8572890efSRavi Kumar #include "axgbe_phy.h" 97c4158a5SRavi Kumar #include "axgbe_rxtx.h" 107c4158a5SRavi Kumar 1186578516SGirish Nandibasappa static uint32_t bitrev32(uint32_t x) 1286578516SGirish Nandibasappa { 1386578516SGirish Nandibasappa x = (x >> 16) | (x << 16); 1486578516SGirish Nandibasappa x = (((x & 0xff00ff00) >> 8) | ((x & 0x00ff00ff) << 8)); 1586578516SGirish Nandibasappa x = (((x & 0xf0f0f0f0) >> 4) | ((x & 0x0f0f0f0f) << 4)); 1686578516SGirish Nandibasappa x = (((x & 0xcccccccc) >> 2) | ((x & 0x33333333) << 2)); 1786578516SGirish Nandibasappa x = (((x & 0xaaaaaaaa) >> 1) | ((x & 0x55555555) << 1)); 1886578516SGirish Nandibasappa return x; 1986578516SGirish Nandibasappa } 2086578516SGirish Nandibasappa 2186578516SGirish Nandibasappa /*MSB set bit from 32 to 1*/ 2286578516SGirish Nandibasappa static int get_lastbit_set(int x) 2386578516SGirish Nandibasappa { 2486578516SGirish Nandibasappa int r = 32; 2586578516SGirish Nandibasappa 2686578516SGirish Nandibasappa if (!x) 2786578516SGirish Nandibasappa return 0; 2886578516SGirish Nandibasappa if (!(x & 0xffff0000)) { 2986578516SGirish Nandibasappa x <<= 16; 3086578516SGirish Nandibasappa r -= 16; 3186578516SGirish Nandibasappa } 3286578516SGirish Nandibasappa if (!(x & 0xff000000)) { 3386578516SGirish Nandibasappa x <<= 8; 3486578516SGirish Nandibasappa r -= 8; 3586578516SGirish Nandibasappa } 3686578516SGirish Nandibasappa if (!(x & 0xf0000000)) { 3786578516SGirish Nandibasappa x <<= 4; 3886578516SGirish Nandibasappa r -= 4; 3986578516SGirish Nandibasappa } 4086578516SGirish Nandibasappa if (!(x & 0xc0000000)) { 4186578516SGirish Nandibasappa x <<= 2; 4286578516SGirish Nandibasappa r -= 2; 4386578516SGirish Nandibasappa } 4486578516SGirish Nandibasappa if (!(x & 0x80000000)) { 4586578516SGirish Nandibasappa x <<= 1; 4686578516SGirish Nandibasappa r -= 1; 4786578516SGirish Nandibasappa } 4886578516SGirish Nandibasappa return r; 4986578516SGirish Nandibasappa } 5086578516SGirish Nandibasappa 517c4158a5SRavi Kumar static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata) 527c4158a5SRavi Kumar { 5335b2d13fSOlivier Matz return pdata->eth_dev->data->mtu + RTE_ETHER_HDR_LEN + 5425cf2630SFerruh Yigit RTE_ETHER_CRC_LEN + RTE_VLAN_HLEN; 557c4158a5SRavi Kumar } 56572890efSRavi Kumar 574ac7516bSRavi Kumar /* query busy bit */ 584ac7516bSRavi Kumar static int mdio_complete(struct axgbe_port *pdata) 594ac7516bSRavi Kumar { 604ac7516bSRavi Kumar if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY)) 614ac7516bSRavi Kumar return 1; 624ac7516bSRavi Kumar 634ac7516bSRavi Kumar return 0; 644ac7516bSRavi Kumar } 654ac7516bSRavi Kumar 66d06394d2SVenkat Kumar Ande static unsigned int axgbe_create_mdio_sca(int port, int reg) 67d06394d2SVenkat Kumar Ande { 68d06394d2SVenkat Kumar Ande unsigned int mdio_sca, da; 69d06394d2SVenkat Kumar Ande 70d06394d2SVenkat Kumar Ande da = (reg & MII_ADDR_C45) ? reg >> 16 : 0; 71d06394d2SVenkat Kumar Ande 72d06394d2SVenkat Kumar Ande mdio_sca = 0; 73d06394d2SVenkat Kumar Ande AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg); 74d06394d2SVenkat Kumar Ande AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port); 75d06394d2SVenkat Kumar Ande AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da); 76d06394d2SVenkat Kumar Ande 77d06394d2SVenkat Kumar Ande return mdio_sca; 78d06394d2SVenkat Kumar Ande } 79d06394d2SVenkat Kumar Ande 804ac7516bSRavi Kumar static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr, 814ac7516bSRavi Kumar int reg, u16 val) 824ac7516bSRavi Kumar { 834ac7516bSRavi Kumar unsigned int mdio_sca, mdio_sccd; 844ac7516bSRavi Kumar uint64_t timeout; 854ac7516bSRavi Kumar 86d06394d2SVenkat Kumar Ande mdio_sca = axgbe_create_mdio_sca(addr, reg); 874ac7516bSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 884ac7516bSRavi Kumar 894ac7516bSRavi Kumar mdio_sccd = 0; 904ac7516bSRavi Kumar AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val); 914ac7516bSRavi Kumar AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1); 924ac7516bSRavi Kumar AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 934ac7516bSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 944ac7516bSRavi Kumar 954ac7516bSRavi Kumar timeout = rte_get_timer_cycles() + rte_get_timer_hz(); 964ac7516bSRavi Kumar while (time_before(rte_get_timer_cycles(), timeout)) { 974ac7516bSRavi Kumar rte_delay_us(100); 984ac7516bSRavi Kumar if (mdio_complete(pdata)) 994ac7516bSRavi Kumar return 0; 1004ac7516bSRavi Kumar } 1014ac7516bSRavi Kumar 1024ac7516bSRavi Kumar PMD_DRV_LOG(ERR, "Mdio write operation timed out\n"); 1034ac7516bSRavi Kumar return -ETIMEDOUT; 1044ac7516bSRavi Kumar } 1054ac7516bSRavi Kumar 1064ac7516bSRavi Kumar static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr, 1074ac7516bSRavi Kumar int reg) 1084ac7516bSRavi Kumar { 1094ac7516bSRavi Kumar unsigned int mdio_sca, mdio_sccd; 1104ac7516bSRavi Kumar uint64_t timeout; 1114ac7516bSRavi Kumar 112d06394d2SVenkat Kumar Ande mdio_sca = axgbe_create_mdio_sca(addr, reg); 1134ac7516bSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca); 1144ac7516bSRavi Kumar 1154ac7516bSRavi Kumar mdio_sccd = 0; 1164ac7516bSRavi Kumar AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3); 1174ac7516bSRavi Kumar AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1); 1184ac7516bSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd); 1194ac7516bSRavi Kumar 1204ac7516bSRavi Kumar timeout = rte_get_timer_cycles() + rte_get_timer_hz(); 1214ac7516bSRavi Kumar 1224ac7516bSRavi Kumar while (time_before(rte_get_timer_cycles(), timeout)) { 1234ac7516bSRavi Kumar rte_delay_us(100); 1244ac7516bSRavi Kumar if (mdio_complete(pdata)) 1254ac7516bSRavi Kumar goto success; 1264ac7516bSRavi Kumar } 1274ac7516bSRavi Kumar 1284ac7516bSRavi Kumar PMD_DRV_LOG(ERR, "Mdio read operation timed out\n"); 1294ac7516bSRavi Kumar return -ETIMEDOUT; 1304ac7516bSRavi Kumar 1314ac7516bSRavi Kumar success: 1324ac7516bSRavi Kumar return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA); 1334ac7516bSRavi Kumar } 1344ac7516bSRavi Kumar 1354ac7516bSRavi Kumar static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port, 1364ac7516bSRavi Kumar enum axgbe_mdio_mode mode) 1374ac7516bSRavi Kumar { 1384ac7516bSRavi Kumar unsigned int reg_val = 0; 1394ac7516bSRavi Kumar 1404ac7516bSRavi Kumar switch (mode) { 1414ac7516bSRavi Kumar case AXGBE_MDIO_MODE_CL22: 1424ac7516bSRavi Kumar if (port > AXGMAC_MAX_C22_PORT) 1434ac7516bSRavi Kumar return -EINVAL; 1444ac7516bSRavi Kumar reg_val |= (1 << port); 1454ac7516bSRavi Kumar break; 1464ac7516bSRavi Kumar case AXGBE_MDIO_MODE_CL45: 1474ac7516bSRavi Kumar break; 1484ac7516bSRavi Kumar default: 1494ac7516bSRavi Kumar return -EINVAL; 1504ac7516bSRavi Kumar } 1514ac7516bSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val); 1524ac7516bSRavi Kumar 1534ac7516bSRavi Kumar return 0; 1544ac7516bSRavi Kumar } 1554ac7516bSRavi Kumar 1564ac7516bSRavi Kumar static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata, 1574ac7516bSRavi Kumar int prtad __rte_unused, int mmd_reg) 1584ac7516bSRavi Kumar { 1594ac7516bSRavi Kumar unsigned int mmd_address, index, offset; 1604ac7516bSRavi Kumar int mmd_data; 1614ac7516bSRavi Kumar 1624ac7516bSRavi Kumar if (mmd_reg & MII_ADDR_C45) 1634ac7516bSRavi Kumar mmd_address = mmd_reg & ~MII_ADDR_C45; 1644ac7516bSRavi Kumar else 1654ac7516bSRavi Kumar mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 1664ac7516bSRavi Kumar 1674ac7516bSRavi Kumar /* The PCS registers are accessed using mmio. The underlying 1684ac7516bSRavi Kumar * management interface uses indirect addressing to access the MMD 1694ac7516bSRavi Kumar * register sets. This requires accessing of the PCS register in two 1704ac7516bSRavi Kumar * phases, an address phase and a data phase. 1714ac7516bSRavi Kumar * 1724ac7516bSRavi Kumar * The mmio interface is based on 16-bit offsets and values. All 1734ac7516bSRavi Kumar * register offsets must therefore be adjusted by left shifting the 1744ac7516bSRavi Kumar * offset 1 bit and reading 16 bits of data. 1754ac7516bSRavi Kumar */ 1764ac7516bSRavi Kumar mmd_address <<= 1; 1774ac7516bSRavi Kumar index = mmd_address & ~pdata->xpcs_window_mask; 1784ac7516bSRavi Kumar offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 1794ac7516bSRavi Kumar 1804ac7516bSRavi Kumar pthread_mutex_lock(&pdata->xpcs_mutex); 1814ac7516bSRavi Kumar 1824ac7516bSRavi Kumar XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 1834ac7516bSRavi Kumar mmd_data = XPCS16_IOREAD(pdata, offset); 1844ac7516bSRavi Kumar 1854ac7516bSRavi Kumar pthread_mutex_unlock(&pdata->xpcs_mutex); 1864ac7516bSRavi Kumar 1874ac7516bSRavi Kumar return mmd_data; 1884ac7516bSRavi Kumar } 1894ac7516bSRavi Kumar 1904ac7516bSRavi Kumar static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata, 1914ac7516bSRavi Kumar int prtad __rte_unused, 1924ac7516bSRavi Kumar int mmd_reg, int mmd_data) 1934ac7516bSRavi Kumar { 1944ac7516bSRavi Kumar unsigned int mmd_address, index, offset; 1954ac7516bSRavi Kumar 1964ac7516bSRavi Kumar if (mmd_reg & MII_ADDR_C45) 1974ac7516bSRavi Kumar mmd_address = mmd_reg & ~MII_ADDR_C45; 1984ac7516bSRavi Kumar else 1994ac7516bSRavi Kumar mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff); 2004ac7516bSRavi Kumar 2014ac7516bSRavi Kumar /* The PCS registers are accessed using mmio. The underlying 2024ac7516bSRavi Kumar * management interface uses indirect addressing to access the MMD 2034ac7516bSRavi Kumar * register sets. This requires accessing of the PCS register in two 2044ac7516bSRavi Kumar * phases, an address phase and a data phase. 2054ac7516bSRavi Kumar * 2064ac7516bSRavi Kumar * The mmio interface is based on 16-bit offsets and values. All 2074ac7516bSRavi Kumar * register offsets must therefore be adjusted by left shifting the 2084ac7516bSRavi Kumar * offset 1 bit and writing 16 bits of data. 2094ac7516bSRavi Kumar */ 2104ac7516bSRavi Kumar mmd_address <<= 1; 2114ac7516bSRavi Kumar index = mmd_address & ~pdata->xpcs_window_mask; 2124ac7516bSRavi Kumar offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask); 2134ac7516bSRavi Kumar 2144ac7516bSRavi Kumar pthread_mutex_lock(&pdata->xpcs_mutex); 2154ac7516bSRavi Kumar 2164ac7516bSRavi Kumar XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index); 2174ac7516bSRavi Kumar XPCS16_IOWRITE(pdata, offset, mmd_data); 2184ac7516bSRavi Kumar 2194ac7516bSRavi Kumar pthread_mutex_unlock(&pdata->xpcs_mutex); 2204ac7516bSRavi Kumar } 2214ac7516bSRavi Kumar 2224ac7516bSRavi Kumar static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad, 2234ac7516bSRavi Kumar int mmd_reg) 2244ac7516bSRavi Kumar { 2254ac7516bSRavi Kumar switch (pdata->vdata->xpcs_access) { 2264ac7516bSRavi Kumar case AXGBE_XPCS_ACCESS_V1: 2274ac7516bSRavi Kumar PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n"); 2284ac7516bSRavi Kumar return -1; 2294ac7516bSRavi Kumar case AXGBE_XPCS_ACCESS_V2: 2304ac7516bSRavi Kumar default: 2314ac7516bSRavi Kumar return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg); 2324ac7516bSRavi Kumar } 2334ac7516bSRavi Kumar } 2344ac7516bSRavi Kumar 2354ac7516bSRavi Kumar static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad, 2364ac7516bSRavi Kumar int mmd_reg, int mmd_data) 2374ac7516bSRavi Kumar { 2384ac7516bSRavi Kumar switch (pdata->vdata->xpcs_access) { 2394ac7516bSRavi Kumar case AXGBE_XPCS_ACCESS_V1: 2404ac7516bSRavi Kumar PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n"); 2414ac7516bSRavi Kumar return; 2424ac7516bSRavi Kumar case AXGBE_XPCS_ACCESS_V2: 2434ac7516bSRavi Kumar default: 2444ac7516bSRavi Kumar return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data); 2454ac7516bSRavi Kumar } 2464ac7516bSRavi Kumar } 2474ac7516bSRavi Kumar 248a5c72737SRavi Kumar static int axgbe_set_speed(struct axgbe_port *pdata, int speed) 249a5c72737SRavi Kumar { 250a5c72737SRavi Kumar unsigned int ss; 251a5c72737SRavi Kumar 252a5c72737SRavi Kumar switch (speed) { 253*1f9d2d3aSVenkat Kumar Ande case SPEED_10: 254*1f9d2d3aSVenkat Kumar Ande ss = 0x07; 255*1f9d2d3aSVenkat Kumar Ande break; 256a5c72737SRavi Kumar case SPEED_1000: 257a5c72737SRavi Kumar ss = 0x03; 258a5c72737SRavi Kumar break; 259a5c72737SRavi Kumar case SPEED_2500: 260a5c72737SRavi Kumar ss = 0x02; 261a5c72737SRavi Kumar break; 262a5c72737SRavi Kumar case SPEED_10000: 263a5c72737SRavi Kumar ss = 0x00; 264a5c72737SRavi Kumar break; 265a5c72737SRavi Kumar default: 266a5c72737SRavi Kumar return -EINVAL; 267a5c72737SRavi Kumar } 268a5c72737SRavi Kumar 269a5c72737SRavi Kumar if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss) 270a5c72737SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss); 271a5c72737SRavi Kumar 272a5c72737SRavi Kumar return 0; 273a5c72737SRavi Kumar } 274a5c72737SRavi Kumar 275b4b24f3eSVenkat Kumar Ande static unsigned int axgbe_get_fc_queue_count(struct axgbe_port *pdata) 276b4b24f3eSVenkat Kumar Ande { 277b4b24f3eSVenkat Kumar Ande unsigned int max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES; 278b4b24f3eSVenkat Kumar Ande 279b4b24f3eSVenkat Kumar Ande /* From MAC ver 30H the TFCR is per priority, instead of per queue */ 280b4b24f3eSVenkat Kumar Ande if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30) 281b4b24f3eSVenkat Kumar Ande return max_q_count; 282b4b24f3eSVenkat Kumar Ande else 283b4b24f3eSVenkat Kumar Ande return (RTE_MIN(pdata->tx_q_count, max_q_count)); 284b4b24f3eSVenkat Kumar Ande } 285b4b24f3eSVenkat Kumar Ande 2867c4158a5SRavi Kumar static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata) 2877c4158a5SRavi Kumar { 2887c4158a5SRavi Kumar unsigned int reg, reg_val; 289b4b24f3eSVenkat Kumar Ande unsigned int i, q_count; 2907c4158a5SRavi Kumar 2917c4158a5SRavi Kumar /* Clear MTL flow control */ 2927c4158a5SRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) 2937c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0); 2947c4158a5SRavi Kumar 2957c4158a5SRavi Kumar /* Clear MAC flow control */ 296b4b24f3eSVenkat Kumar Ande q_count = axgbe_get_fc_queue_count(pdata); 2977c4158a5SRavi Kumar reg = MAC_Q0TFCR; 2987c4158a5SRavi Kumar for (i = 0; i < q_count; i++) { 2997c4158a5SRavi Kumar reg_val = AXGMAC_IOREAD(pdata, reg); 3007c4158a5SRavi Kumar AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0); 3017c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, reg, reg_val); 3027c4158a5SRavi Kumar 3037c4158a5SRavi Kumar reg += MAC_QTFCR_INC; 3047c4158a5SRavi Kumar } 3057c4158a5SRavi Kumar 3067c4158a5SRavi Kumar return 0; 3077c4158a5SRavi Kumar } 3087c4158a5SRavi Kumar 3097c4158a5SRavi Kumar static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata) 3107c4158a5SRavi Kumar { 3117c4158a5SRavi Kumar unsigned int reg, reg_val; 312b4b24f3eSVenkat Kumar Ande unsigned int i, q_count; 3137c4158a5SRavi Kumar 3147c4158a5SRavi Kumar /* Set MTL flow control */ 3157c4158a5SRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) { 3167c4158a5SRavi Kumar unsigned int ehfc = 0; 3177c4158a5SRavi Kumar 3187c4158a5SRavi Kumar /* Flow control thresholds are established */ 3197c4158a5SRavi Kumar if (pdata->rx_rfd[i]) 3207c4158a5SRavi Kumar ehfc = 1; 3217c4158a5SRavi Kumar 3227c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc); 3234216cdc0SChandu Babu N 3244216cdc0SChandu Babu N PMD_DRV_LOG(DEBUG, "flow control %s for RXq%u\n", 3254216cdc0SChandu Babu N ehfc ? "enabled" : "disabled", i); 3267c4158a5SRavi Kumar } 3277c4158a5SRavi Kumar 3287c4158a5SRavi Kumar /* Set MAC flow control */ 329b4b24f3eSVenkat Kumar Ande q_count = axgbe_get_fc_queue_count(pdata); 3307c4158a5SRavi Kumar reg = MAC_Q0TFCR; 3317c4158a5SRavi Kumar for (i = 0; i < q_count; i++) { 3327c4158a5SRavi Kumar reg_val = AXGMAC_IOREAD(pdata, reg); 3337c4158a5SRavi Kumar 3347c4158a5SRavi Kumar /* Enable transmit flow control */ 3357c4158a5SRavi Kumar AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1); 3367c4158a5SRavi Kumar /* Set pause time */ 3377c4158a5SRavi Kumar AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff); 3387c4158a5SRavi Kumar 3397c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, reg, reg_val); 3407c4158a5SRavi Kumar 3417c4158a5SRavi Kumar reg += MAC_QTFCR_INC; 3427c4158a5SRavi Kumar } 3437c4158a5SRavi Kumar 3447c4158a5SRavi Kumar return 0; 3457c4158a5SRavi Kumar } 3467c4158a5SRavi Kumar 3477c4158a5SRavi Kumar static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata) 3487c4158a5SRavi Kumar { 3497c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0); 3507c4158a5SRavi Kumar 3517c4158a5SRavi Kumar return 0; 3527c4158a5SRavi Kumar } 3537c4158a5SRavi Kumar 3547c4158a5SRavi Kumar static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata) 3557c4158a5SRavi Kumar { 3567c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1); 3577c4158a5SRavi Kumar 3587c4158a5SRavi Kumar return 0; 3597c4158a5SRavi Kumar } 3607c4158a5SRavi Kumar 3617c4158a5SRavi Kumar static int axgbe_config_tx_flow_control(struct axgbe_port *pdata) 3627c4158a5SRavi Kumar { 3637c4158a5SRavi Kumar if (pdata->tx_pause) 3647c4158a5SRavi Kumar axgbe_enable_tx_flow_control(pdata); 3657c4158a5SRavi Kumar else 3667c4158a5SRavi Kumar axgbe_disable_tx_flow_control(pdata); 3677c4158a5SRavi Kumar 3687c4158a5SRavi Kumar return 0; 3697c4158a5SRavi Kumar } 3707c4158a5SRavi Kumar 3717c4158a5SRavi Kumar static int axgbe_config_rx_flow_control(struct axgbe_port *pdata) 3727c4158a5SRavi Kumar { 3737c4158a5SRavi Kumar if (pdata->rx_pause) 3747c4158a5SRavi Kumar axgbe_enable_rx_flow_control(pdata); 3757c4158a5SRavi Kumar else 3767c4158a5SRavi Kumar axgbe_disable_rx_flow_control(pdata); 3777c4158a5SRavi Kumar 3787c4158a5SRavi Kumar return 0; 3797c4158a5SRavi Kumar } 3807c4158a5SRavi Kumar 3817c4158a5SRavi Kumar static void axgbe_config_flow_control(struct axgbe_port *pdata) 3827c4158a5SRavi Kumar { 3837c4158a5SRavi Kumar axgbe_config_tx_flow_control(pdata); 3847c4158a5SRavi Kumar axgbe_config_rx_flow_control(pdata); 3857c4158a5SRavi Kumar 3867c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0); 3877c4158a5SRavi Kumar } 3887c4158a5SRavi Kumar 3897c4158a5SRavi Kumar static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata, 3907c4158a5SRavi Kumar unsigned int queue, 3917c4158a5SRavi Kumar unsigned int q_fifo_size) 3927c4158a5SRavi Kumar { 3937c4158a5SRavi Kumar unsigned int frame_fifo_size; 3947c4158a5SRavi Kumar unsigned int rfa, rfd; 3957c4158a5SRavi Kumar 3967c4158a5SRavi Kumar frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata)); 3977c4158a5SRavi Kumar 3987c4158a5SRavi Kumar /* This path deals with just maximum frame sizes which are 3997c4158a5SRavi Kumar * limited to a jumbo frame of 9,000 (plus headers, etc.) 4007c4158a5SRavi Kumar * so we can never exceed the maximum allowable RFA/RFD 4017c4158a5SRavi Kumar * values. 4027c4158a5SRavi Kumar */ 4037c4158a5SRavi Kumar if (q_fifo_size <= 2048) { 4047c4158a5SRavi Kumar /* rx_rfd to zero to signal no flow control */ 4057c4158a5SRavi Kumar pdata->rx_rfa[queue] = 0; 4067c4158a5SRavi Kumar pdata->rx_rfd[queue] = 0; 4077c4158a5SRavi Kumar return; 4087c4158a5SRavi Kumar } 4097c4158a5SRavi Kumar 4107c4158a5SRavi Kumar if (q_fifo_size <= 4096) { 4117c4158a5SRavi Kumar /* Between 2048 and 4096 */ 4127c4158a5SRavi Kumar pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */ 4137c4158a5SRavi Kumar pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */ 4147c4158a5SRavi Kumar return; 4157c4158a5SRavi Kumar } 4167c4158a5SRavi Kumar 4177c4158a5SRavi Kumar if (q_fifo_size <= frame_fifo_size) { 4187c4158a5SRavi Kumar /* Between 4096 and max-frame */ 4197c4158a5SRavi Kumar pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */ 4207c4158a5SRavi Kumar pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */ 4217c4158a5SRavi Kumar return; 4227c4158a5SRavi Kumar } 4237c4158a5SRavi Kumar 4247c4158a5SRavi Kumar if (q_fifo_size <= (frame_fifo_size * 3)) { 4257c4158a5SRavi Kumar /* Between max-frame and 3 max-frames, 4267c4158a5SRavi Kumar * trigger if we get just over a frame of data and 4277c4158a5SRavi Kumar * resume when we have just under half a frame left. 4287c4158a5SRavi Kumar */ 4297c4158a5SRavi Kumar rfa = q_fifo_size - frame_fifo_size; 4307c4158a5SRavi Kumar rfd = rfa + (frame_fifo_size / 2); 4317c4158a5SRavi Kumar } else { 4327c4158a5SRavi Kumar /* Above 3 max-frames - trigger when just over 4337c4158a5SRavi Kumar * 2 frames of space available 4347c4158a5SRavi Kumar */ 4357c4158a5SRavi Kumar rfa = frame_fifo_size * 2; 4367c4158a5SRavi Kumar rfa += AXGMAC_FLOW_CONTROL_UNIT; 4377c4158a5SRavi Kumar rfd = rfa + frame_fifo_size; 4387c4158a5SRavi Kumar } 4397c4158a5SRavi Kumar 4407c4158a5SRavi Kumar pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa); 4417c4158a5SRavi Kumar pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd); 4427c4158a5SRavi Kumar } 4437c4158a5SRavi Kumar 4447c4158a5SRavi Kumar static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata) 4457c4158a5SRavi Kumar { 4467c4158a5SRavi Kumar unsigned int q_fifo_size; 4477c4158a5SRavi Kumar unsigned int i; 4487c4158a5SRavi Kumar 4497c4158a5SRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) { 4507c4158a5SRavi Kumar q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT; 4517c4158a5SRavi Kumar 4527c4158a5SRavi Kumar axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size); 4537c4158a5SRavi Kumar } 4547c4158a5SRavi Kumar } 4557c4158a5SRavi Kumar 4567c4158a5SRavi Kumar static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata) 4577c4158a5SRavi Kumar { 4587c4158a5SRavi Kumar unsigned int i; 4597c4158a5SRavi Kumar 4607c4158a5SRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) { 4617c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 4627c4158a5SRavi Kumar pdata->rx_rfa[i]); 4637c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4647c4158a5SRavi Kumar pdata->rx_rfd[i]); 4657c4158a5SRavi Kumar } 4667c4158a5SRavi Kumar } 4677c4158a5SRavi Kumar 46886578516SGirish Nandibasappa static int axgbe_enable_rx_vlan_stripping(struct axgbe_port *pdata) 46986578516SGirish Nandibasappa { 47086578516SGirish Nandibasappa /* Put the VLAN tag in the Rx descriptor */ 47186578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1); 47286578516SGirish Nandibasappa 47386578516SGirish Nandibasappa /* Don't check the VLAN type */ 47486578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1); 47586578516SGirish Nandibasappa 47686578516SGirish Nandibasappa /* Check only C-TAG (0x8100) packets */ 47786578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0); 47886578516SGirish Nandibasappa 47986578516SGirish Nandibasappa /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */ 48086578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0); 48186578516SGirish Nandibasappa 48286578516SGirish Nandibasappa /* Enable VLAN tag stripping */ 48386578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3); 48486578516SGirish Nandibasappa return 0; 48586578516SGirish Nandibasappa } 48686578516SGirish Nandibasappa 48786578516SGirish Nandibasappa static int axgbe_disable_rx_vlan_stripping(struct axgbe_port *pdata) 48886578516SGirish Nandibasappa { 48986578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0); 49086578516SGirish Nandibasappa return 0; 49186578516SGirish Nandibasappa } 49286578516SGirish Nandibasappa 49386578516SGirish Nandibasappa static int axgbe_enable_rx_vlan_filtering(struct axgbe_port *pdata) 49486578516SGirish Nandibasappa { 49586578516SGirish Nandibasappa /* Enable VLAN filtering */ 49686578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1); 49786578516SGirish Nandibasappa 49886578516SGirish Nandibasappa /* Enable VLAN Hash Table filtering */ 49986578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1); 50086578516SGirish Nandibasappa 50186578516SGirish Nandibasappa /* Disable VLAN tag inverse matching */ 50286578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0); 50386578516SGirish Nandibasappa 50486578516SGirish Nandibasappa /* Only filter on the lower 12-bits of the VLAN tag */ 50586578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1); 50686578516SGirish Nandibasappa 50786578516SGirish Nandibasappa /* In order for the VLAN Hash Table filtering to be effective, 50886578516SGirish Nandibasappa * the VLAN tag identifier in the VLAN Tag Register must not 50986578516SGirish Nandibasappa * be zero. Set the VLAN tag identifier to "1" to enable the 51086578516SGirish Nandibasappa * VLAN Hash Table filtering. This implies that a VLAN tag of 51186578516SGirish Nandibasappa * 1 will always pass filtering. 51286578516SGirish Nandibasappa */ 51386578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1); 51486578516SGirish Nandibasappa return 0; 51586578516SGirish Nandibasappa } 51686578516SGirish Nandibasappa 51786578516SGirish Nandibasappa static int axgbe_disable_rx_vlan_filtering(struct axgbe_port *pdata) 51886578516SGirish Nandibasappa { 51986578516SGirish Nandibasappa /* Disable VLAN filtering */ 52086578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0); 52186578516SGirish Nandibasappa return 0; 52286578516SGirish Nandibasappa } 52386578516SGirish Nandibasappa 52486578516SGirish Nandibasappa static u32 axgbe_vid_crc32_le(__le16 vid_le) 52586578516SGirish Nandibasappa { 52686578516SGirish Nandibasappa u32 poly = 0xedb88320; /* CRCPOLY_LE */ 52786578516SGirish Nandibasappa u32 crc = ~0; 52886578516SGirish Nandibasappa u32 temp = 0; 52986578516SGirish Nandibasappa unsigned char *data = (unsigned char *)&vid_le; 53086578516SGirish Nandibasappa unsigned char data_byte = 0; 53186578516SGirish Nandibasappa int i, bits; 53286578516SGirish Nandibasappa 53386578516SGirish Nandibasappa bits = get_lastbit_set(VLAN_VID_MASK); 53486578516SGirish Nandibasappa for (i = 0; i < bits; i++) { 53586578516SGirish Nandibasappa if ((i % 8) == 0) 53686578516SGirish Nandibasappa data_byte = data[i / 8]; 53786578516SGirish Nandibasappa 53886578516SGirish Nandibasappa temp = ((crc & 1) ^ data_byte) & 1; 53986578516SGirish Nandibasappa crc >>= 1; 54086578516SGirish Nandibasappa data_byte >>= 1; 54186578516SGirish Nandibasappa 54286578516SGirish Nandibasappa if (temp) 54386578516SGirish Nandibasappa crc ^= poly; 54486578516SGirish Nandibasappa } 54586578516SGirish Nandibasappa return crc; 54686578516SGirish Nandibasappa } 54786578516SGirish Nandibasappa 54886578516SGirish Nandibasappa static int axgbe_update_vlan_hash_table(struct axgbe_port *pdata) 54986578516SGirish Nandibasappa { 55086578516SGirish Nandibasappa u32 crc = 0; 55186578516SGirish Nandibasappa u16 vid; 55286578516SGirish Nandibasappa __le16 vid_le = 0; 55386578516SGirish Nandibasappa u16 vlan_hash_table = 0; 55486578516SGirish Nandibasappa unsigned int reg = 0; 55586578516SGirish Nandibasappa unsigned long vid_idx, vid_valid; 55686578516SGirish Nandibasappa 55786578516SGirish Nandibasappa /* Generate the VLAN Hash Table value */ 55886578516SGirish Nandibasappa for (vid = 0; vid < VLAN_N_VID; vid++) { 55986578516SGirish Nandibasappa vid_idx = VLAN_TABLE_IDX(vid); 56086578516SGirish Nandibasappa vid_valid = pdata->active_vlans[vid_idx]; 56186578516SGirish Nandibasappa vid_valid = (unsigned long)vid_valid >> (vid - (64 * vid_idx)); 56286578516SGirish Nandibasappa if (vid_valid & 1) 56386578516SGirish Nandibasappa PMD_DRV_LOG(DEBUG, 56486578516SGirish Nandibasappa "vid:%d pdata->active_vlans[%ld]=0x%lx\n", 56586578516SGirish Nandibasappa vid, vid_idx, pdata->active_vlans[vid_idx]); 56686578516SGirish Nandibasappa else 56786578516SGirish Nandibasappa continue; 56886578516SGirish Nandibasappa 56986578516SGirish Nandibasappa vid_le = rte_cpu_to_le_16(vid); 57086578516SGirish Nandibasappa crc = bitrev32(~axgbe_vid_crc32_le(vid_le)) >> 28; 57186578516SGirish Nandibasappa vlan_hash_table |= (1 << crc); 57286578516SGirish Nandibasappa PMD_DRV_LOG(DEBUG, "crc = %d vlan_hash_table = 0x%x\n", 57386578516SGirish Nandibasappa crc, vlan_hash_table); 57486578516SGirish Nandibasappa } 57586578516SGirish Nandibasappa /* Set the VLAN Hash Table filtering register */ 57686578516SGirish Nandibasappa AXGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table); 57786578516SGirish Nandibasappa reg = AXGMAC_IOREAD(pdata, MAC_VLANHTR); 57886578516SGirish Nandibasappa PMD_DRV_LOG(DEBUG, "vlan_hash_table reg val = 0x%x\n", reg); 57986578516SGirish Nandibasappa return 0; 58086578516SGirish Nandibasappa } 58186578516SGirish Nandibasappa 582572890efSRavi Kumar static int __axgbe_exit(struct axgbe_port *pdata) 583572890efSRavi Kumar { 584572890efSRavi Kumar unsigned int count = 2000; 585572890efSRavi Kumar 586572890efSRavi Kumar /* Issue a software reset */ 587572890efSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1); 588572890efSRavi Kumar rte_delay_us(10); 589572890efSRavi Kumar 590572890efSRavi Kumar /* Poll Until Poll Condition */ 591572890efSRavi Kumar while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR)) 592572890efSRavi Kumar rte_delay_us(500); 593572890efSRavi Kumar 594572890efSRavi Kumar if (!count) 595572890efSRavi Kumar return -EBUSY; 596572890efSRavi Kumar 597572890efSRavi Kumar return 0; 598572890efSRavi Kumar } 599572890efSRavi Kumar 600572890efSRavi Kumar static int axgbe_exit(struct axgbe_port *pdata) 601572890efSRavi Kumar { 602572890efSRavi Kumar int ret; 603572890efSRavi Kumar 604572890efSRavi Kumar /* To guard against possible incorrectly generated interrupts, 605572890efSRavi Kumar * issue the software reset twice. 606572890efSRavi Kumar */ 607572890efSRavi Kumar ret = __axgbe_exit(pdata); 608572890efSRavi Kumar if (ret) 609572890efSRavi Kumar return ret; 610572890efSRavi Kumar 611572890efSRavi Kumar return __axgbe_exit(pdata); 612572890efSRavi Kumar } 613572890efSRavi Kumar 6147c4158a5SRavi Kumar static int axgbe_flush_tx_queues(struct axgbe_port *pdata) 6157c4158a5SRavi Kumar { 6167c4158a5SRavi Kumar unsigned int i, count; 6177c4158a5SRavi Kumar 6187c4158a5SRavi Kumar if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21) 6197c4158a5SRavi Kumar return 0; 6207c4158a5SRavi Kumar 6217c4158a5SRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 6227c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1); 6237c4158a5SRavi Kumar 6247c4158a5SRavi Kumar /* Poll Until Poll Condition */ 6257c4158a5SRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) { 6267c4158a5SRavi Kumar count = 2000; 6277c4158a5SRavi Kumar while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i, 6287c4158a5SRavi Kumar MTL_Q_TQOMR, FTQ)) 6297c4158a5SRavi Kumar rte_delay_us(500); 6307c4158a5SRavi Kumar 6317c4158a5SRavi Kumar if (!count) 6327c4158a5SRavi Kumar return -EBUSY; 6337c4158a5SRavi Kumar } 6347c4158a5SRavi Kumar 6357c4158a5SRavi Kumar return 0; 6367c4158a5SRavi Kumar } 6377c4158a5SRavi Kumar 6387c4158a5SRavi Kumar static void axgbe_config_dma_bus(struct axgbe_port *pdata) 6397c4158a5SRavi Kumar { 6407c4158a5SRavi Kumar /* Set enhanced addressing mode */ 6417c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1); 6427c4158a5SRavi Kumar 6437c4158a5SRavi Kumar /* Out standing read/write requests*/ 6447c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f); 6457c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f); 6467c4158a5SRavi Kumar 6477c4158a5SRavi Kumar /* Set the System Bus mode */ 6487c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1); 6497c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1); 6507c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1); 6517c4158a5SRavi Kumar } 6527c4158a5SRavi Kumar 6537c4158a5SRavi Kumar static void axgbe_config_dma_cache(struct axgbe_port *pdata) 6547c4158a5SRavi Kumar { 6557c4158a5SRavi Kumar unsigned int arcache, awcache, arwcache; 6567c4158a5SRavi Kumar 6577c4158a5SRavi Kumar arcache = 0; 6584e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0xf); 6594e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, 0xf); 6604e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, 0xf); 6617c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache); 6627c4158a5SRavi Kumar 6637c4158a5SRavi Kumar awcache = 0; 6644e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0xf); 6654e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0xf); 6664e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0xf); 6674e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0xf); 6687c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache); 6697c4158a5SRavi Kumar 6707c4158a5SRavi Kumar arwcache = 0; 6714e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0xf); 6724e6d9f19SVenkat Kumar Ande AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0xf); 6737c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache); 6747c4158a5SRavi Kumar } 6757c4158a5SRavi Kumar 6767c4158a5SRavi Kumar static void axgbe_config_edma_control(struct axgbe_port *pdata) 6777c4158a5SRavi Kumar { 6787c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5); 6797c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5); 6807c4158a5SRavi Kumar } 6817c4158a5SRavi Kumar 6827c4158a5SRavi Kumar static int axgbe_config_osp_mode(struct axgbe_port *pdata) 6837c4158a5SRavi Kumar { 6847c4158a5SRavi Kumar /* Force DMA to operate on second packet before closing descriptors 6857c4158a5SRavi Kumar * of first packet 6867c4158a5SRavi Kumar */ 6877c4158a5SRavi Kumar struct axgbe_tx_queue *txq; 6887c4158a5SRavi Kumar unsigned int i; 6897c4158a5SRavi Kumar 6907c4158a5SRavi Kumar for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { 6917c4158a5SRavi Kumar txq = pdata->eth_dev->data->tx_queues[i]; 6927c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP, 6937c4158a5SRavi Kumar pdata->tx_osp_mode); 6947c4158a5SRavi Kumar } 6957c4158a5SRavi Kumar 6967c4158a5SRavi Kumar return 0; 6977c4158a5SRavi Kumar } 6987c4158a5SRavi Kumar 6997c4158a5SRavi Kumar static int axgbe_config_pblx8(struct axgbe_port *pdata) 7007c4158a5SRavi Kumar { 7017c4158a5SRavi Kumar struct axgbe_tx_queue *txq; 7027c4158a5SRavi Kumar unsigned int i; 7037c4158a5SRavi Kumar 7047c4158a5SRavi Kumar for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { 7057c4158a5SRavi Kumar txq = pdata->eth_dev->data->tx_queues[i]; 7067c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8, 7077c4158a5SRavi Kumar pdata->pblx8); 7087c4158a5SRavi Kumar } 7097c4158a5SRavi Kumar return 0; 7107c4158a5SRavi Kumar } 7117c4158a5SRavi Kumar 7127c4158a5SRavi Kumar static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata) 7137c4158a5SRavi Kumar { 7147c4158a5SRavi Kumar struct axgbe_tx_queue *txq; 7157c4158a5SRavi Kumar unsigned int i; 7167c4158a5SRavi Kumar 7177c4158a5SRavi Kumar for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { 7187c4158a5SRavi Kumar txq = pdata->eth_dev->data->tx_queues[i]; 7197c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL, 7207c4158a5SRavi Kumar pdata->tx_pbl); 7217c4158a5SRavi Kumar } 7227c4158a5SRavi Kumar 7237c4158a5SRavi Kumar return 0; 7247c4158a5SRavi Kumar } 7257c4158a5SRavi Kumar 7267c4158a5SRavi Kumar static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata) 7277c4158a5SRavi Kumar { 7287c4158a5SRavi Kumar struct axgbe_rx_queue *rxq; 7297c4158a5SRavi Kumar unsigned int i; 7307c4158a5SRavi Kumar 7317c4158a5SRavi Kumar for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { 7327c4158a5SRavi Kumar rxq = pdata->eth_dev->data->rx_queues[i]; 7337c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL, 7347c4158a5SRavi Kumar pdata->rx_pbl); 7357c4158a5SRavi Kumar } 7367c4158a5SRavi Kumar 7377c4158a5SRavi Kumar return 0; 7387c4158a5SRavi Kumar } 7397c4158a5SRavi Kumar 7407c4158a5SRavi Kumar static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata) 7417c4158a5SRavi Kumar { 7427c4158a5SRavi Kumar struct axgbe_rx_queue *rxq; 7437c4158a5SRavi Kumar unsigned int i; 7447c4158a5SRavi Kumar 7457c4158a5SRavi Kumar for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { 7467c4158a5SRavi Kumar rxq = pdata->eth_dev->data->rx_queues[i]; 7477c4158a5SRavi Kumar 7487c4158a5SRavi Kumar rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - 7497c4158a5SRavi Kumar RTE_PKTMBUF_HEADROOM; 7507c4158a5SRavi Kumar rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) & 7517c4158a5SRavi Kumar ~(AXGBE_RX_BUF_ALIGN - 1); 7527c4158a5SRavi Kumar 7537c4158a5SRavi Kumar if (rxq->buf_size > pdata->rx_buf_size) 7547c4158a5SRavi Kumar pdata->rx_buf_size = rxq->buf_size; 7557c4158a5SRavi Kumar 7567c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ, 7577c4158a5SRavi Kumar rxq->buf_size); 7587c4158a5SRavi Kumar } 7597c4158a5SRavi Kumar } 7607c4158a5SRavi Kumar 7617c4158a5SRavi Kumar static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type, 7627c4158a5SRavi Kumar unsigned int index, unsigned int val) 7637c4158a5SRavi Kumar { 7647c4158a5SRavi Kumar unsigned int wait; 7657c4158a5SRavi Kumar 7667c4158a5SRavi Kumar if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 7677c4158a5SRavi Kumar return -EBUSY; 7687c4158a5SRavi Kumar 7697c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, MAC_RSSDR, val); 7707c4158a5SRavi Kumar 7717c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index); 7727c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type); 7737c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0); 7747c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1); 7757c4158a5SRavi Kumar 7767c4158a5SRavi Kumar wait = 1000; 7777c4158a5SRavi Kumar while (wait--) { 7787c4158a5SRavi Kumar if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) 7797c4158a5SRavi Kumar return 0; 7807c4158a5SRavi Kumar 7817c4158a5SRavi Kumar rte_delay_us(1500); 7827c4158a5SRavi Kumar } 7837c4158a5SRavi Kumar 7847c4158a5SRavi Kumar return -EBUSY; 7857c4158a5SRavi Kumar } 7867c4158a5SRavi Kumar 78776d7664dSChandu Babu N int axgbe_write_rss_hash_key(struct axgbe_port *pdata) 7887c4158a5SRavi Kumar { 7897c4158a5SRavi Kumar struct rte_eth_rss_conf *rss_conf; 7907c4158a5SRavi Kumar unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32); 7917c4158a5SRavi Kumar unsigned int *key; 7927c4158a5SRavi Kumar int ret; 7937c4158a5SRavi Kumar 7947c4158a5SRavi Kumar rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 7957c4158a5SRavi Kumar 7967c4158a5SRavi Kumar if (!rss_conf->rss_key) 7977c4158a5SRavi Kumar key = (unsigned int *)&pdata->rss_key; 7987c4158a5SRavi Kumar else 7997c4158a5SRavi Kumar key = (unsigned int *)&rss_conf->rss_key; 8007c4158a5SRavi Kumar 8017c4158a5SRavi Kumar while (key_regs--) { 8027c4158a5SRavi Kumar ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE, 8037c4158a5SRavi Kumar key_regs, *key++); 8047c4158a5SRavi Kumar if (ret) 8057c4158a5SRavi Kumar return ret; 8067c4158a5SRavi Kumar } 8077c4158a5SRavi Kumar 8087c4158a5SRavi Kumar return 0; 8097c4158a5SRavi Kumar } 8107c4158a5SRavi Kumar 81176d7664dSChandu Babu N int axgbe_write_rss_lookup_table(struct axgbe_port *pdata) 8127c4158a5SRavi Kumar { 8137c4158a5SRavi Kumar unsigned int i; 8147c4158a5SRavi Kumar int ret; 8157c4158a5SRavi Kumar 8167c4158a5SRavi Kumar for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) { 8177c4158a5SRavi Kumar ret = axgbe_write_rss_reg(pdata, 8187c4158a5SRavi Kumar AXGBE_RSS_LOOKUP_TABLE_TYPE, i, 8197c4158a5SRavi Kumar pdata->rss_table[i]); 8207c4158a5SRavi Kumar if (ret) 8217c4158a5SRavi Kumar return ret; 8227c4158a5SRavi Kumar } 8237c4158a5SRavi Kumar 8247c4158a5SRavi Kumar return 0; 8257c4158a5SRavi Kumar } 8267c4158a5SRavi Kumar 8277c4158a5SRavi Kumar static int axgbe_enable_rss(struct axgbe_port *pdata) 8287c4158a5SRavi Kumar { 8297c4158a5SRavi Kumar int ret; 8307c4158a5SRavi Kumar 8317c4158a5SRavi Kumar /* Program the hash key */ 8327c4158a5SRavi Kumar ret = axgbe_write_rss_hash_key(pdata); 8337c4158a5SRavi Kumar if (ret) 8347c4158a5SRavi Kumar return ret; 8357c4158a5SRavi Kumar 8367c4158a5SRavi Kumar /* Program the lookup table */ 8377c4158a5SRavi Kumar ret = axgbe_write_rss_lookup_table(pdata); 8387c4158a5SRavi Kumar if (ret) 8397c4158a5SRavi Kumar return ret; 8407c4158a5SRavi Kumar 8417c4158a5SRavi Kumar /* Set the RSS options */ 8427c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options); 8437c4158a5SRavi Kumar 8447c4158a5SRavi Kumar /* Enable RSS */ 8457c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1); 8467c4158a5SRavi Kumar 8477c4158a5SRavi Kumar return 0; 8487c4158a5SRavi Kumar } 8497c4158a5SRavi Kumar 8507c4158a5SRavi Kumar static void axgbe_rss_options(struct axgbe_port *pdata) 8517c4158a5SRavi Kumar { 8527c4158a5SRavi Kumar struct rte_eth_rss_conf *rss_conf; 8537c4158a5SRavi Kumar uint64_t rss_hf; 8547c4158a5SRavi Kumar 8557c4158a5SRavi Kumar rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf; 85676d7664dSChandu Babu N pdata->rss_hf = rss_conf->rss_hf; 8577c4158a5SRavi Kumar rss_hf = rss_conf->rss_hf; 8587c4158a5SRavi Kumar 859295968d1SFerruh Yigit if (rss_hf & (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_IPV6)) 8607c4158a5SRavi Kumar AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1); 861295968d1SFerruh Yigit if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV6_TCP)) 8627c4158a5SRavi Kumar AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1); 863295968d1SFerruh Yigit if (rss_hf & (RTE_ETH_RSS_NONFRAG_IPV4_UDP | RTE_ETH_RSS_NONFRAG_IPV6_UDP)) 8647c4158a5SRavi Kumar AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); 8657c4158a5SRavi Kumar } 8667c4158a5SRavi Kumar 8677c4158a5SRavi Kumar static int axgbe_config_rss(struct axgbe_port *pdata) 8687c4158a5SRavi Kumar { 8697c4158a5SRavi Kumar uint32_t i; 8707c4158a5SRavi Kumar 8717c4158a5SRavi Kumar if (pdata->rss_enable) { 8727c4158a5SRavi Kumar /* Initialize RSS hash key and lookup table */ 8737c4158a5SRavi Kumar uint32_t *key = (uint32_t *)pdata->rss_key; 8747c4158a5SRavi Kumar 8757c4158a5SRavi Kumar for (i = 0; i < sizeof(pdata->rss_key) / 4; i++) 8767c4158a5SRavi Kumar *key++ = (uint32_t)rte_rand(); 8777c4158a5SRavi Kumar for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++) 8787c4158a5SRavi Kumar AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, 8797c4158a5SRavi Kumar i % pdata->eth_dev->data->nb_rx_queues); 8807c4158a5SRavi Kumar axgbe_rss_options(pdata); 8817c4158a5SRavi Kumar if (axgbe_enable_rss(pdata)) { 8827c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "Error in enabling RSS support\n"); 8837c4158a5SRavi Kumar return -1; 8847c4158a5SRavi Kumar } 8857c4158a5SRavi Kumar } else { 8867c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0); 8877c4158a5SRavi Kumar } 8887c4158a5SRavi Kumar 8897c4158a5SRavi Kumar return 0; 8907c4158a5SRavi Kumar } 8917c4158a5SRavi Kumar 8927c4158a5SRavi Kumar static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata) 8937c4158a5SRavi Kumar { 8947c4158a5SRavi Kumar struct axgbe_tx_queue *txq; 8957c4158a5SRavi Kumar unsigned int dma_ch_isr, dma_ch_ier; 8967c4158a5SRavi Kumar unsigned int i; 8977c4158a5SRavi Kumar 8987c4158a5SRavi Kumar for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { 8997c4158a5SRavi Kumar txq = pdata->eth_dev->data->tx_queues[i]; 9007c4158a5SRavi Kumar 9017c4158a5SRavi Kumar /* Clear all the interrupts which are set */ 9027c4158a5SRavi Kumar dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR); 9037c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr); 9047c4158a5SRavi Kumar 9057c4158a5SRavi Kumar /* Clear all interrupt enable bits */ 9067c4158a5SRavi Kumar dma_ch_ier = 0; 9077c4158a5SRavi Kumar 9087c4158a5SRavi Kumar /* Enable following interrupts 9097c4158a5SRavi Kumar * NIE - Normal Interrupt Summary Enable 9107c4158a5SRavi Kumar * AIE - Abnormal Interrupt Summary Enable 9117c4158a5SRavi Kumar * FBEE - Fatal Bus Error Enable 9127c4158a5SRavi Kumar */ 9137c4158a5SRavi Kumar AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0); 9147c4158a5SRavi Kumar AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1); 9157c4158a5SRavi Kumar AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1); 9167c4158a5SRavi Kumar 9177c4158a5SRavi Kumar /* Enable following Rx interrupts 9187c4158a5SRavi Kumar * RBUE - Receive Buffer Unavailable Enable 9197c4158a5SRavi Kumar * RIE - Receive Interrupt Enable (unless using 9207c4158a5SRavi Kumar * per channel interrupts in edge triggered 9217c4158a5SRavi Kumar * mode) 9227c4158a5SRavi Kumar */ 9237c4158a5SRavi Kumar AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0); 9247c4158a5SRavi Kumar 9257c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier); 9267c4158a5SRavi Kumar } 9277c4158a5SRavi Kumar } 9287c4158a5SRavi Kumar 9297c4158a5SRavi Kumar static void wrapper_tx_desc_init(struct axgbe_port *pdata) 9307c4158a5SRavi Kumar { 9317c4158a5SRavi Kumar struct axgbe_tx_queue *txq; 9327c4158a5SRavi Kumar unsigned int i; 9337c4158a5SRavi Kumar 9347c4158a5SRavi Kumar for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) { 9357c4158a5SRavi Kumar txq = pdata->eth_dev->data->tx_queues[i]; 9367c4158a5SRavi Kumar txq->cur = 0; 9377c4158a5SRavi Kumar txq->dirty = 0; 9387c4158a5SRavi Kumar /* Update the total number of Tx descriptors */ 9397c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1); 9407c4158a5SRavi Kumar /* Update the starting address of descriptor ring */ 9417c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI, 9427c4158a5SRavi Kumar high32_value(txq->ring_phys_addr)); 9437c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO, 9447c4158a5SRavi Kumar low32_value(txq->ring_phys_addr)); 9457c4158a5SRavi Kumar } 9467c4158a5SRavi Kumar } 9477c4158a5SRavi Kumar 9487c4158a5SRavi Kumar static int wrapper_rx_desc_init(struct axgbe_port *pdata) 9497c4158a5SRavi Kumar { 9507c4158a5SRavi Kumar struct axgbe_rx_queue *rxq; 9517c4158a5SRavi Kumar struct rte_mbuf *mbuf; 9527c4158a5SRavi Kumar volatile union axgbe_rx_desc *desc; 9537c4158a5SRavi Kumar unsigned int i, j; 9547c4158a5SRavi Kumar 9557c4158a5SRavi Kumar for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) { 9567c4158a5SRavi Kumar rxq = pdata->eth_dev->data->rx_queues[i]; 9577c4158a5SRavi Kumar 9587c4158a5SRavi Kumar /* Initialize software ring entries */ 9597c4158a5SRavi Kumar rxq->mbuf_alloc = 0; 9607c4158a5SRavi Kumar rxq->cur = 0; 9617c4158a5SRavi Kumar rxq->dirty = 0; 9627c4158a5SRavi Kumar desc = AXGBE_GET_DESC_PT(rxq, 0); 9637c4158a5SRavi Kumar 9647c4158a5SRavi Kumar for (j = 0; j < rxq->nb_desc; j++) { 9657c4158a5SRavi Kumar mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 9667c4158a5SRavi Kumar if (mbuf == NULL) { 9677c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n", 9687c4158a5SRavi Kumar (unsigned int)rxq->queue_id, j); 9697483341aSXueming Li axgbe_dev_rx_queue_release(pdata->eth_dev, i); 9707c4158a5SRavi Kumar return -ENOMEM; 9717c4158a5SRavi Kumar } 9727c4158a5SRavi Kumar rxq->sw_ring[j] = mbuf; 9737c4158a5SRavi Kumar /* Mbuf populate */ 9747c4158a5SRavi Kumar mbuf->next = NULL; 9757c4158a5SRavi Kumar mbuf->data_off = RTE_PKTMBUF_HEADROOM; 9767c4158a5SRavi Kumar mbuf->nb_segs = 1; 9777c4158a5SRavi Kumar mbuf->port = rxq->port_id; 9787c4158a5SRavi Kumar desc->read.baddr = 9797c4158a5SRavi Kumar rte_cpu_to_le_64( 9807c4158a5SRavi Kumar rte_mbuf_data_iova_default(mbuf)); 9817c4158a5SRavi Kumar rte_wmb(); 9827c4158a5SRavi Kumar AXGMAC_SET_BITS_LE(desc->read.desc3, 9837c4158a5SRavi Kumar RX_NORMAL_DESC3, OWN, 1); 9847c4158a5SRavi Kumar rte_wmb(); 9857c4158a5SRavi Kumar rxq->mbuf_alloc++; 9867c4158a5SRavi Kumar desc++; 9877c4158a5SRavi Kumar } 9887c4158a5SRavi Kumar /* Update the total number of Rx descriptors */ 9897c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR, 9907c4158a5SRavi Kumar rxq->nb_desc - 1); 9917c4158a5SRavi Kumar /* Update the starting address of descriptor ring */ 9927c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI, 9937c4158a5SRavi Kumar high32_value(rxq->ring_phys_addr)); 9947c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO, 9957c4158a5SRavi Kumar low32_value(rxq->ring_phys_addr)); 9967c4158a5SRavi Kumar /* Update the Rx Descriptor Tail Pointer */ 9977c4158a5SRavi Kumar AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 9987c4158a5SRavi Kumar low32_value(rxq->ring_phys_addr + 9997c4158a5SRavi Kumar (rxq->nb_desc - 1) * 10007c4158a5SRavi Kumar sizeof(union axgbe_rx_desc))); 10017c4158a5SRavi Kumar } 10027c4158a5SRavi Kumar return 0; 10037c4158a5SRavi Kumar } 10047c4158a5SRavi Kumar 10057c4158a5SRavi Kumar static void axgbe_config_mtl_mode(struct axgbe_port *pdata) 10067c4158a5SRavi Kumar { 10077c4158a5SRavi Kumar unsigned int i; 10087c4158a5SRavi Kumar 10097c4158a5SRavi Kumar /* Set Tx to weighted round robin scheduling algorithm */ 10107c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR); 10117c4158a5SRavi Kumar 10127c4158a5SRavi Kumar /* Set Tx traffic classes to use WRR algorithm with equal weights */ 10137c4158a5SRavi Kumar for (i = 0; i < pdata->hw_feat.tc_cnt; i++) { 10147c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, 10157c4158a5SRavi Kumar MTL_TSA_ETS); 10167c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1); 10177c4158a5SRavi Kumar } 10187c4158a5SRavi Kumar 10197c4158a5SRavi Kumar /* Set Rx to strict priority algorithm */ 10207c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP); 10217c4158a5SRavi Kumar } 10227c4158a5SRavi Kumar 10237c4158a5SRavi Kumar static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val) 10247c4158a5SRavi Kumar { 10257c4158a5SRavi Kumar unsigned int i; 10267c4158a5SRavi Kumar 10277c4158a5SRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 10287c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val); 10297c4158a5SRavi Kumar 10307c4158a5SRavi Kumar return 0; 10317c4158a5SRavi Kumar } 10327c4158a5SRavi Kumar 10337c4158a5SRavi Kumar static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val) 10347c4158a5SRavi Kumar { 10357c4158a5SRavi Kumar unsigned int i; 10367c4158a5SRavi Kumar 10377c4158a5SRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) 10387c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val); 10397c4158a5SRavi Kumar 10407c4158a5SRavi Kumar return 0; 10417c4158a5SRavi Kumar } 10427c4158a5SRavi Kumar 10437c4158a5SRavi Kumar static int axgbe_config_tx_threshold(struct axgbe_port *pdata, 10447c4158a5SRavi Kumar unsigned int val) 10457c4158a5SRavi Kumar { 10467c4158a5SRavi Kumar unsigned int i; 10477c4158a5SRavi Kumar 10487c4158a5SRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 10497c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val); 10507c4158a5SRavi Kumar 10517c4158a5SRavi Kumar return 0; 10527c4158a5SRavi Kumar } 10537c4158a5SRavi Kumar 10547c4158a5SRavi Kumar static int axgbe_config_rx_threshold(struct axgbe_port *pdata, 10557c4158a5SRavi Kumar unsigned int val) 10567c4158a5SRavi Kumar { 10577c4158a5SRavi Kumar unsigned int i; 10587c4158a5SRavi Kumar 10597c4158a5SRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) 10607c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val); 10617c4158a5SRavi Kumar 10627c4158a5SRavi Kumar return 0; 10637c4158a5SRavi Kumar } 10647c4158a5SRavi Kumar 10657be78d02SJosh Soref /* Distributing FIFO size */ 10667c4158a5SRavi Kumar static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata) 10677c4158a5SRavi Kumar { 10687c4158a5SRavi Kumar unsigned int fifo_size; 10697c4158a5SRavi Kumar unsigned int q_fifo_size; 10707c4158a5SRavi Kumar unsigned int p_fifo, i; 10717c4158a5SRavi Kumar 10727c4158a5SRavi Kumar fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 10737c4158a5SRavi Kumar pdata->hw_feat.rx_fifo_size); 10747c4158a5SRavi Kumar q_fifo_size = fifo_size / pdata->rx_q_count; 10757c4158a5SRavi Kumar 10767c4158a5SRavi Kumar /* Calculate the fifo setting by dividing the queue's fifo size 10777c4158a5SRavi Kumar * by the fifo allocation increment (with 0 representing the 10787c4158a5SRavi Kumar * base allocation increment so decrement the result 10797c4158a5SRavi Kumar * by 1). 10807c4158a5SRavi Kumar */ 10817c4158a5SRavi Kumar p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT; 10827c4158a5SRavi Kumar if (p_fifo) 10837c4158a5SRavi Kumar p_fifo--; 10847c4158a5SRavi Kumar 10857c4158a5SRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) 10867c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo); 10877c4158a5SRavi Kumar pdata->fifo = p_fifo; 10887c4158a5SRavi Kumar 10897c4158a5SRavi Kumar /*Calculate and config Flow control threshold*/ 10907c4158a5SRavi Kumar axgbe_calculate_flow_control_threshold(pdata); 10917c4158a5SRavi Kumar axgbe_config_flow_control_threshold(pdata); 10924216cdc0SChandu Babu N 10934216cdc0SChandu Babu N PMD_DRV_LOG(DEBUG, "%d Rx hardware queues, %d byte fifo per queue\n", 10944216cdc0SChandu Babu N pdata->rx_q_count, q_fifo_size); 10957c4158a5SRavi Kumar } 10967c4158a5SRavi Kumar 10977c4158a5SRavi Kumar static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata) 10987c4158a5SRavi Kumar { 10997c4158a5SRavi Kumar unsigned int fifo_size; 11007c4158a5SRavi Kumar unsigned int q_fifo_size; 11017c4158a5SRavi Kumar unsigned int p_fifo, i; 11027c4158a5SRavi Kumar 11037c4158a5SRavi Kumar fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 11047c4158a5SRavi Kumar pdata->hw_feat.tx_fifo_size); 11057c4158a5SRavi Kumar q_fifo_size = fifo_size / pdata->tx_q_count; 11067c4158a5SRavi Kumar 11077c4158a5SRavi Kumar /* Calculate the fifo setting by dividing the queue's fifo size 11087c4158a5SRavi Kumar * by the fifo allocation increment (with 0 representing the 11097c4158a5SRavi Kumar * base allocation increment so decrement the result 11107c4158a5SRavi Kumar * by 1). 11117c4158a5SRavi Kumar */ 11127c4158a5SRavi Kumar p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT; 11137c4158a5SRavi Kumar if (p_fifo) 11147c4158a5SRavi Kumar p_fifo--; 11157c4158a5SRavi Kumar 11167c4158a5SRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 11177c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo); 11184216cdc0SChandu Babu N 11194216cdc0SChandu Babu N PMD_DRV_LOG(DEBUG, "%d Tx hardware queues, %d byte fifo per queue\n", 11204216cdc0SChandu Babu N pdata->tx_q_count, q_fifo_size); 11217c4158a5SRavi Kumar } 11227c4158a5SRavi Kumar 11237c4158a5SRavi Kumar static void axgbe_config_queue_mapping(struct axgbe_port *pdata) 11247c4158a5SRavi Kumar { 11257c4158a5SRavi Kumar unsigned int qptc, qptc_extra, queue; 11267c4158a5SRavi Kumar unsigned int i, j, reg, reg_val; 11277c4158a5SRavi Kumar 11287c4158a5SRavi Kumar /* Map the MTL Tx Queues to Traffic Classes 11297c4158a5SRavi Kumar * Note: Tx Queues >= Traffic Classes 11307c4158a5SRavi Kumar */ 11317c4158a5SRavi Kumar qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt; 11327c4158a5SRavi Kumar qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt; 11337c4158a5SRavi Kumar 11347c4158a5SRavi Kumar for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) { 11354216cdc0SChandu Babu N for (j = 0; j < qptc; j++) { 11364216cdc0SChandu Babu N PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i); 11377c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 11387c4158a5SRavi Kumar Q2TCMAP, i); 11394216cdc0SChandu Babu N } 11404216cdc0SChandu Babu N if (i < qptc_extra) { 11414216cdc0SChandu Babu N PMD_DRV_LOG(DEBUG, "TXq%u mapped to TC%u\n", queue, i); 11427c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR, 11437c4158a5SRavi Kumar Q2TCMAP, i); 11447c4158a5SRavi Kumar } 11454216cdc0SChandu Babu N } 11467c4158a5SRavi Kumar 11477c4158a5SRavi Kumar if (pdata->rss_enable) { 11487c4158a5SRavi Kumar /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */ 11497c4158a5SRavi Kumar reg = MTL_RQDCM0R; 11507c4158a5SRavi Kumar reg_val = 0; 11517c4158a5SRavi Kumar for (i = 0; i < pdata->rx_q_count;) { 11527c4158a5SRavi Kumar reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3)); 11537c4158a5SRavi Kumar 11547c4158a5SRavi Kumar if ((i % MTL_RQDCM_Q_PER_REG) && 11557c4158a5SRavi Kumar (i != pdata->rx_q_count)) 11567c4158a5SRavi Kumar continue; 11577c4158a5SRavi Kumar 11587c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, reg, reg_val); 11597c4158a5SRavi Kumar 11607c4158a5SRavi Kumar reg += MTL_RQDCM_INC; 11617c4158a5SRavi Kumar reg_val = 0; 11627c4158a5SRavi Kumar } 11637c4158a5SRavi Kumar } 11647c4158a5SRavi Kumar } 11657c4158a5SRavi Kumar 11667c4158a5SRavi Kumar static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata) 11677c4158a5SRavi Kumar { 11687c4158a5SRavi Kumar unsigned int mtl_q_isr; 11697c4158a5SRavi Kumar unsigned int q_count, i; 11707c4158a5SRavi Kumar 11717c4158a5SRavi Kumar q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt); 11727c4158a5SRavi Kumar for (i = 0; i < q_count; i++) { 11737c4158a5SRavi Kumar /* Clear all the interrupts which are set */ 11747c4158a5SRavi Kumar mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR); 11757c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr); 11767c4158a5SRavi Kumar 11777c4158a5SRavi Kumar /* No MTL interrupts to be enabled */ 11787c4158a5SRavi Kumar AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0); 11797c4158a5SRavi Kumar } 11807c4158a5SRavi Kumar } 11817c4158a5SRavi Kumar 1182e01d9b2eSChandu Babu N static uint32_t crc32_le(uint32_t crc, uint8_t *p, uint32_t len) 1183e01d9b2eSChandu Babu N { 1184e01d9b2eSChandu Babu N int i; 1185e01d9b2eSChandu Babu N while (len--) { 1186e01d9b2eSChandu Babu N crc ^= *p++; 1187e01d9b2eSChandu Babu N for (i = 0; i < 8; i++) 1188e01d9b2eSChandu Babu N crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); 1189e01d9b2eSChandu Babu N } 1190e01d9b2eSChandu Babu N return crc; 1191e01d9b2eSChandu Babu N } 1192e01d9b2eSChandu Babu N 1193e01d9b2eSChandu Babu N void axgbe_set_mac_hash_table(struct axgbe_port *pdata, u8 *addr, bool add) 1194e01d9b2eSChandu Babu N { 1195e01d9b2eSChandu Babu N uint32_t crc, htable_index, htable_bitmask; 1196e01d9b2eSChandu Babu N 1197e01d9b2eSChandu Babu N crc = bitrev32(~crc32_le(~0, addr, RTE_ETHER_ADDR_LEN)); 1198e01d9b2eSChandu Babu N crc >>= pdata->hash_table_shift; 1199e01d9b2eSChandu Babu N htable_index = crc >> 5; 1200e01d9b2eSChandu Babu N htable_bitmask = 1 << (crc & 0x1f); 1201e01d9b2eSChandu Babu N 1202e01d9b2eSChandu Babu N if (add) { 1203e01d9b2eSChandu Babu N pdata->uc_hash_table[htable_index] |= htable_bitmask; 1204e01d9b2eSChandu Babu N pdata->uc_hash_mac_addr++; 1205e01d9b2eSChandu Babu N } else { 1206e01d9b2eSChandu Babu N pdata->uc_hash_table[htable_index] &= ~htable_bitmask; 1207e01d9b2eSChandu Babu N pdata->uc_hash_mac_addr--; 1208e01d9b2eSChandu Babu N } 1209e01d9b2eSChandu Babu N PMD_DRV_LOG(DEBUG, "%s MAC hash table Bit %d at Index %#x\n", 1210e01d9b2eSChandu Babu N add ? "set" : "clear", (crc & 0x1f), htable_index); 1211e01d9b2eSChandu Babu N 1212e01d9b2eSChandu Babu N AXGMAC_IOWRITE(pdata, MAC_HTR(htable_index), 1213e01d9b2eSChandu Babu N pdata->uc_hash_table[htable_index]); 1214e01d9b2eSChandu Babu N } 1215e01d9b2eSChandu Babu N 121649a5e622SChandu Babu N void axgbe_set_mac_addn_addr(struct axgbe_port *pdata, u8 *addr, uint32_t index) 121749a5e622SChandu Babu N { 121849a5e622SChandu Babu N unsigned int mac_addr_hi, mac_addr_lo; 121949a5e622SChandu Babu N u8 *mac_addr; 122049a5e622SChandu Babu N 122149a5e622SChandu Babu N mac_addr_lo = 0; 122249a5e622SChandu Babu N mac_addr_hi = 0; 122349a5e622SChandu Babu N 122449a5e622SChandu Babu N if (addr) { 122549a5e622SChandu Babu N mac_addr = (u8 *)&mac_addr_lo; 122649a5e622SChandu Babu N mac_addr[0] = addr[0]; 122749a5e622SChandu Babu N mac_addr[1] = addr[1]; 122849a5e622SChandu Babu N mac_addr[2] = addr[2]; 122949a5e622SChandu Babu N mac_addr[3] = addr[3]; 123049a5e622SChandu Babu N mac_addr = (u8 *)&mac_addr_hi; 123149a5e622SChandu Babu N mac_addr[0] = addr[4]; 123249a5e622SChandu Babu N mac_addr[1] = addr[5]; 123349a5e622SChandu Babu N 123449a5e622SChandu Babu N /*Address Enable: Use this Addr for Perfect Filtering */ 123549a5e622SChandu Babu N AXGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1); 123649a5e622SChandu Babu N } 123749a5e622SChandu Babu N 123849a5e622SChandu Babu N PMD_DRV_LOG(DEBUG, "%s mac address at %#x\n", 123949a5e622SChandu Babu N addr ? "set" : "clear", index); 124049a5e622SChandu Babu N 124149a5e622SChandu Babu N AXGMAC_IOWRITE(pdata, MAC_MACAHR(index), mac_addr_hi); 124249a5e622SChandu Babu N AXGMAC_IOWRITE(pdata, MAC_MACALR(index), mac_addr_lo); 124349a5e622SChandu Babu N } 124449a5e622SChandu Babu N 12457c4158a5SRavi Kumar static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr) 12467c4158a5SRavi Kumar { 12477c4158a5SRavi Kumar unsigned int mac_addr_hi, mac_addr_lo; 12487c4158a5SRavi Kumar 12497c4158a5SRavi Kumar mac_addr_hi = (addr[5] << 8) | (addr[4] << 0); 12507c4158a5SRavi Kumar mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) | 12517c4158a5SRavi Kumar (addr[1] << 8) | (addr[0] << 0); 12527c4158a5SRavi Kumar 12537c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi); 12547c4158a5SRavi Kumar AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo); 12557c4158a5SRavi Kumar 12567c4158a5SRavi Kumar return 0; 12577c4158a5SRavi Kumar } 12587c4158a5SRavi Kumar 1259e01d9b2eSChandu Babu N static void axgbe_config_mac_hash_table(struct axgbe_port *pdata) 1260e01d9b2eSChandu Babu N { 1261e01d9b2eSChandu Babu N struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 1262e01d9b2eSChandu Babu N 1263e01d9b2eSChandu Babu N pdata->hash_table_shift = 0; 1264e01d9b2eSChandu Babu N pdata->hash_table_count = 0; 1265e01d9b2eSChandu Babu N pdata->uc_hash_mac_addr = 0; 1266e01d9b2eSChandu Babu N memset(pdata->uc_hash_table, 0, sizeof(pdata->uc_hash_table)); 1267e01d9b2eSChandu Babu N 1268e01d9b2eSChandu Babu N if (hw_feat->hash_table_size) { 1269e01d9b2eSChandu Babu N pdata->hash_table_shift = 26 - (hw_feat->hash_table_size >> 7); 1270e01d9b2eSChandu Babu N pdata->hash_table_count = hw_feat->hash_table_size / 32; 1271e01d9b2eSChandu Babu N } 1272e01d9b2eSChandu Babu N } 1273e01d9b2eSChandu Babu N 12747c4158a5SRavi Kumar static void axgbe_config_mac_address(struct axgbe_port *pdata) 12757c4158a5SRavi Kumar { 12767c4158a5SRavi Kumar axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes); 12777c4158a5SRavi Kumar } 12787c4158a5SRavi Kumar 12797c4158a5SRavi Kumar static void axgbe_config_jumbo_enable(struct axgbe_port *pdata) 12807c4158a5SRavi Kumar { 12817c4158a5SRavi Kumar unsigned int val; 12827c4158a5SRavi Kumar 12837c4158a5SRavi Kumar val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0; 12847c4158a5SRavi Kumar 12857c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val); 12867c4158a5SRavi Kumar } 12877c4158a5SRavi Kumar 12887c4158a5SRavi Kumar static void axgbe_config_mac_speed(struct axgbe_port *pdata) 12897c4158a5SRavi Kumar { 12907c4158a5SRavi Kumar axgbe_set_speed(pdata, pdata->phy_speed); 12917c4158a5SRavi Kumar } 12927c4158a5SRavi Kumar 12937c4158a5SRavi Kumar static void axgbe_config_checksum_offload(struct axgbe_port *pdata) 12947c4158a5SRavi Kumar { 12957c4158a5SRavi Kumar if (pdata->rx_csum_enable) 12967c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1); 12977c4158a5SRavi Kumar else 12987c4158a5SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0); 12997c4158a5SRavi Kumar } 13007c4158a5SRavi Kumar 13019d1ef6b2SChandu Babu N static void axgbe_config_mmc(struct axgbe_port *pdata) 13029d1ef6b2SChandu Babu N { 13039d1ef6b2SChandu Babu N struct axgbe_mmc_stats *stats = &pdata->mmc_stats; 13049d1ef6b2SChandu Babu N 13059d1ef6b2SChandu Babu N /* Reset stats */ 13069d1ef6b2SChandu Babu N memset(stats, 0, sizeof(*stats)); 13079d1ef6b2SChandu Babu N 13089d1ef6b2SChandu Babu N /* Set counters to reset on read */ 13099d1ef6b2SChandu Babu N AXGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1); 13109d1ef6b2SChandu Babu N 13119d1ef6b2SChandu Babu N /* Reset the counters */ 13129d1ef6b2SChandu Babu N AXGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1); 13139d1ef6b2SChandu Babu N } 13149d1ef6b2SChandu Babu N 13157c4158a5SRavi Kumar static int axgbe_init(struct axgbe_port *pdata) 13167c4158a5SRavi Kumar { 13177c4158a5SRavi Kumar int ret; 13187c4158a5SRavi Kumar 13197c4158a5SRavi Kumar /* Flush Tx queues */ 13207c4158a5SRavi Kumar ret = axgbe_flush_tx_queues(pdata); 13217c4158a5SRavi Kumar if (ret) 13227c4158a5SRavi Kumar return ret; 13237c4158a5SRavi Kumar /* Initialize DMA related features */ 13247c4158a5SRavi Kumar axgbe_config_dma_bus(pdata); 13257c4158a5SRavi Kumar axgbe_config_dma_cache(pdata); 13267c4158a5SRavi Kumar axgbe_config_edma_control(pdata); 13277c4158a5SRavi Kumar axgbe_config_osp_mode(pdata); 13287c4158a5SRavi Kumar axgbe_config_pblx8(pdata); 13297c4158a5SRavi Kumar axgbe_config_tx_pbl_val(pdata); 13307c4158a5SRavi Kumar axgbe_config_rx_pbl_val(pdata); 13317c4158a5SRavi Kumar axgbe_config_rx_buffer_size(pdata); 13327c4158a5SRavi Kumar axgbe_config_rss(pdata); 13337c4158a5SRavi Kumar wrapper_tx_desc_init(pdata); 13347c4158a5SRavi Kumar ret = wrapper_rx_desc_init(pdata); 13357c4158a5SRavi Kumar if (ret) 13367c4158a5SRavi Kumar return ret; 13377c4158a5SRavi Kumar axgbe_enable_dma_interrupts(pdata); 13387c4158a5SRavi Kumar 13397c4158a5SRavi Kumar /* Initialize MTL related features */ 13407c4158a5SRavi Kumar axgbe_config_mtl_mode(pdata); 13417c4158a5SRavi Kumar axgbe_config_queue_mapping(pdata); 13427c4158a5SRavi Kumar axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode); 13437c4158a5SRavi Kumar axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode); 13447c4158a5SRavi Kumar axgbe_config_tx_threshold(pdata, pdata->tx_threshold); 13457c4158a5SRavi Kumar axgbe_config_rx_threshold(pdata, pdata->rx_threshold); 13467c4158a5SRavi Kumar axgbe_config_tx_fifo_size(pdata); 13477c4158a5SRavi Kumar axgbe_config_rx_fifo_size(pdata); 13487c4158a5SRavi Kumar 13497c4158a5SRavi Kumar axgbe_enable_mtl_interrupts(pdata); 13507c4158a5SRavi Kumar 13517c4158a5SRavi Kumar /* Initialize MAC related features */ 1352e01d9b2eSChandu Babu N axgbe_config_mac_hash_table(pdata); 13537c4158a5SRavi Kumar axgbe_config_mac_address(pdata); 13547c4158a5SRavi Kumar axgbe_config_jumbo_enable(pdata); 13557c4158a5SRavi Kumar axgbe_config_flow_control(pdata); 13567c4158a5SRavi Kumar axgbe_config_mac_speed(pdata); 13577c4158a5SRavi Kumar axgbe_config_checksum_offload(pdata); 13589d1ef6b2SChandu Babu N axgbe_config_mmc(pdata); 13597c4158a5SRavi Kumar 13607c4158a5SRavi Kumar return 0; 13617c4158a5SRavi Kumar } 13627c4158a5SRavi Kumar 1363572890efSRavi Kumar void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if) 1364572890efSRavi Kumar { 1365572890efSRavi Kumar hw_if->exit = axgbe_exit; 13667c4158a5SRavi Kumar hw_if->config_flow_control = axgbe_config_flow_control; 13674ac7516bSRavi Kumar 13687c4158a5SRavi Kumar hw_if->init = axgbe_init; 1369a5c72737SRavi Kumar 13704ac7516bSRavi Kumar hw_if->read_mmd_regs = axgbe_read_mmd_regs; 13714ac7516bSRavi Kumar hw_if->write_mmd_regs = axgbe_write_mmd_regs; 13724ac7516bSRavi Kumar 1373a5c72737SRavi Kumar hw_if->set_speed = axgbe_set_speed; 1374a5c72737SRavi Kumar 13754ac7516bSRavi Kumar hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode; 13764ac7516bSRavi Kumar hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs; 13774ac7516bSRavi Kumar hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs; 13787c4158a5SRavi Kumar /* For FLOW ctrl */ 13797c4158a5SRavi Kumar hw_if->config_tx_flow_control = axgbe_config_tx_flow_control; 13807c4158a5SRavi Kumar hw_if->config_rx_flow_control = axgbe_config_rx_flow_control; 138186578516SGirish Nandibasappa 138286578516SGirish Nandibasappa /*vlan*/ 138386578516SGirish Nandibasappa hw_if->enable_rx_vlan_stripping = axgbe_enable_rx_vlan_stripping; 138486578516SGirish Nandibasappa hw_if->disable_rx_vlan_stripping = axgbe_disable_rx_vlan_stripping; 138586578516SGirish Nandibasappa hw_if->enable_rx_vlan_filtering = axgbe_enable_rx_vlan_filtering; 138686578516SGirish Nandibasappa hw_if->disable_rx_vlan_filtering = axgbe_disable_rx_vlan_filtering; 138786578516SGirish Nandibasappa hw_if->update_vlan_hash_table = axgbe_update_vlan_hash_table; 1388572890efSRavi Kumar } 1389