18691632fSRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 28691632fSRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 38691632fSRavi Kumar * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 48691632fSRavi Kumar */ 58691632fSRavi Kumar 69e890103SRavi Kumar #include "axgbe_rxtx.h" 78691632fSRavi Kumar #include "axgbe_ethdev.h" 8572890efSRavi Kumar #include "axgbe_common.h" 9572890efSRavi Kumar #include "axgbe_phy.h" 108691632fSRavi Kumar 118691632fSRavi Kumar static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 128691632fSRavi Kumar static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 137c4158a5SRavi Kumar static int axgbe_dev_configure(struct rte_eth_dev *dev); 147c4158a5SRavi Kumar static int axgbe_dev_start(struct rte_eth_dev *dev); 157c4158a5SRavi Kumar static void axgbe_dev_stop(struct rte_eth_dev *dev); 16456ff159SRavi Kumar static void axgbe_dev_interrupt_handler(void *param); 179e890103SRavi Kumar static void axgbe_dev_close(struct rte_eth_dev *dev); 18*44d45ffeSRavi Kumar static int axgbe_dev_link_update(struct rte_eth_dev *dev, 19*44d45ffeSRavi Kumar int wait_to_complete); 209e890103SRavi Kumar static void axgbe_dev_info_get(struct rte_eth_dev *dev, 219e890103SRavi Kumar struct rte_eth_dev_info *dev_info); 228691632fSRavi Kumar 238691632fSRavi Kumar /* The set of PCI devices this driver supports */ 248691632fSRavi Kumar #define AMD_PCI_VENDOR_ID 0x1022 258691632fSRavi Kumar #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 268691632fSRavi Kumar #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 278691632fSRavi Kumar 288691632fSRavi Kumar int axgbe_logtype_init; 298691632fSRavi Kumar int axgbe_logtype_driver; 308691632fSRavi Kumar 318691632fSRavi Kumar static const struct rte_pci_id pci_id_axgbe_map[] = { 328691632fSRavi Kumar {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 338691632fSRavi Kumar {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 348691632fSRavi Kumar { .vendor_id = 0, }, 358691632fSRavi Kumar }; 368691632fSRavi Kumar 37572890efSRavi Kumar static struct axgbe_version_data axgbe_v2a = { 384ac7516bSRavi Kumar .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 39572890efSRavi Kumar .xpcs_access = AXGBE_XPCS_ACCESS_V2, 40572890efSRavi Kumar .mmc_64bit = 1, 41572890efSRavi Kumar .tx_max_fifo_size = 229376, 42572890efSRavi Kumar .rx_max_fifo_size = 229376, 43572890efSRavi Kumar .tx_tstamp_workaround = 1, 44572890efSRavi Kumar .ecc_support = 1, 45572890efSRavi Kumar .i2c_support = 1, 46572890efSRavi Kumar }; 47572890efSRavi Kumar 48572890efSRavi Kumar static struct axgbe_version_data axgbe_v2b = { 494ac7516bSRavi Kumar .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 50572890efSRavi Kumar .xpcs_access = AXGBE_XPCS_ACCESS_V2, 51572890efSRavi Kumar .mmc_64bit = 1, 52572890efSRavi Kumar .tx_max_fifo_size = 65536, 53572890efSRavi Kumar .rx_max_fifo_size = 65536, 54572890efSRavi Kumar .tx_tstamp_workaround = 1, 55572890efSRavi Kumar .ecc_support = 1, 56572890efSRavi Kumar .i2c_support = 1, 57572890efSRavi Kumar }; 58572890efSRavi Kumar 599e890103SRavi Kumar static const struct rte_eth_desc_lim rx_desc_lim = { 609e890103SRavi Kumar .nb_max = AXGBE_MAX_RING_DESC, 619e890103SRavi Kumar .nb_min = AXGBE_MIN_RING_DESC, 629e890103SRavi Kumar .nb_align = 8, 639e890103SRavi Kumar }; 649e890103SRavi Kumar 659e890103SRavi Kumar static const struct rte_eth_desc_lim tx_desc_lim = { 669e890103SRavi Kumar .nb_max = AXGBE_MAX_RING_DESC, 679e890103SRavi Kumar .nb_min = AXGBE_MIN_RING_DESC, 689e890103SRavi Kumar .nb_align = 8, 699e890103SRavi Kumar }; 709e890103SRavi Kumar 719e890103SRavi Kumar static const struct eth_dev_ops axgbe_eth_dev_ops = { 727c4158a5SRavi Kumar .dev_configure = axgbe_dev_configure, 737c4158a5SRavi Kumar .dev_start = axgbe_dev_start, 747c4158a5SRavi Kumar .dev_stop = axgbe_dev_stop, 759e890103SRavi Kumar .dev_close = axgbe_dev_close, 76*44d45ffeSRavi Kumar .link_update = axgbe_dev_link_update, 779e890103SRavi Kumar .dev_infos_get = axgbe_dev_info_get, 789e890103SRavi Kumar .rx_queue_setup = axgbe_dev_rx_queue_setup, 799e890103SRavi Kumar .rx_queue_release = axgbe_dev_rx_queue_release, 809e890103SRavi Kumar .tx_queue_setup = axgbe_dev_tx_queue_setup, 819e890103SRavi Kumar .tx_queue_release = axgbe_dev_tx_queue_release, 829e890103SRavi Kumar }; 839e890103SRavi Kumar 847c4158a5SRavi Kumar static int axgbe_phy_reset(struct axgbe_port *pdata) 857c4158a5SRavi Kumar { 867c4158a5SRavi Kumar pdata->phy_link = -1; 877c4158a5SRavi Kumar pdata->phy_speed = SPEED_UNKNOWN; 887c4158a5SRavi Kumar return pdata->phy_if.phy_reset(pdata); 897c4158a5SRavi Kumar } 907c4158a5SRavi Kumar 91456ff159SRavi Kumar /* 92456ff159SRavi Kumar * Interrupt handler triggered by NIC for handling 93456ff159SRavi Kumar * specific interrupt. 94456ff159SRavi Kumar * 95456ff159SRavi Kumar * @param handle 96456ff159SRavi Kumar * Pointer to interrupt handle. 97456ff159SRavi Kumar * @param param 98456ff159SRavi Kumar * The address of parameter (struct rte_eth_dev *) regsitered before. 99456ff159SRavi Kumar * 100456ff159SRavi Kumar * @return 101456ff159SRavi Kumar * void 102456ff159SRavi Kumar */ 103456ff159SRavi Kumar static void 104456ff159SRavi Kumar axgbe_dev_interrupt_handler(void *param) 105456ff159SRavi Kumar { 106456ff159SRavi Kumar struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 107456ff159SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1088590b93dSRavi Kumar unsigned int dma_isr, dma_ch_isr; 109456ff159SRavi Kumar 110456ff159SRavi Kumar pdata->phy_if.an_isr(pdata); 1118590b93dSRavi Kumar /*DMA related interrupts*/ 1128590b93dSRavi Kumar dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 1138590b93dSRavi Kumar if (dma_isr) { 1148590b93dSRavi Kumar if (dma_isr & 1) { 1158590b93dSRavi Kumar dma_ch_isr = 1168590b93dSRavi Kumar AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 1178590b93dSRavi Kumar pdata->rx_queues[0], 1188590b93dSRavi Kumar DMA_CH_SR); 1198590b93dSRavi Kumar AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 1208590b93dSRavi Kumar pdata->rx_queues[0], 1218590b93dSRavi Kumar DMA_CH_SR, dma_ch_isr); 1228590b93dSRavi Kumar } 1238590b93dSRavi Kumar } 124456ff159SRavi Kumar /* Enable interrupts since disabled after generation*/ 125456ff159SRavi Kumar rte_intr_enable(&pdata->pci_dev->intr_handle); 126456ff159SRavi Kumar } 127456ff159SRavi Kumar 1287c4158a5SRavi Kumar /* 1297c4158a5SRavi Kumar * Configure device link speed and setup link. 1307c4158a5SRavi Kumar * It returns 0 on success. 1317c4158a5SRavi Kumar */ 1327c4158a5SRavi Kumar static int 1337c4158a5SRavi Kumar axgbe_dev_configure(struct rte_eth_dev *dev) 1347c4158a5SRavi Kumar { 1357c4158a5SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1367c4158a5SRavi Kumar /* Checksum offload to hardware */ 1377c4158a5SRavi Kumar pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 1387c4158a5SRavi Kumar DEV_RX_OFFLOAD_CHECKSUM; 1397c4158a5SRavi Kumar return 0; 1407c4158a5SRavi Kumar } 1417c4158a5SRavi Kumar 1427c4158a5SRavi Kumar static int 1437c4158a5SRavi Kumar axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 1447c4158a5SRavi Kumar { 1457c4158a5SRavi Kumar struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; 1467c4158a5SRavi Kumar 1477c4158a5SRavi Kumar if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1487c4158a5SRavi Kumar pdata->rss_enable = 1; 1497c4158a5SRavi Kumar else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 1507c4158a5SRavi Kumar pdata->rss_enable = 0; 1517c4158a5SRavi Kumar else 1527c4158a5SRavi Kumar return -1; 1537c4158a5SRavi Kumar return 0; 1547c4158a5SRavi Kumar } 1557c4158a5SRavi Kumar 1567c4158a5SRavi Kumar static int 1577c4158a5SRavi Kumar axgbe_dev_start(struct rte_eth_dev *dev) 1587c4158a5SRavi Kumar { 1597c4158a5SRavi Kumar PMD_INIT_FUNC_TRACE(); 1607c4158a5SRavi Kumar struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; 1617c4158a5SRavi Kumar int ret; 1627c4158a5SRavi Kumar 1637c4158a5SRavi Kumar /* Multiqueue RSS */ 1647c4158a5SRavi Kumar ret = axgbe_dev_rx_mq_config(dev); 1657c4158a5SRavi Kumar if (ret) { 1667c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 1677c4158a5SRavi Kumar return ret; 1687c4158a5SRavi Kumar } 1697c4158a5SRavi Kumar ret = axgbe_phy_reset(pdata); 1707c4158a5SRavi Kumar if (ret) { 1717c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "phy reset failed\n"); 1727c4158a5SRavi Kumar return ret; 1737c4158a5SRavi Kumar } 1747c4158a5SRavi Kumar ret = pdata->hw_if.init(pdata); 1757c4158a5SRavi Kumar if (ret) { 1767c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "dev_init failed\n"); 1777c4158a5SRavi Kumar return ret; 1787c4158a5SRavi Kumar } 1797c4158a5SRavi Kumar 1807c4158a5SRavi Kumar /* enable uio/vfio intr/eventfd mapping */ 1817c4158a5SRavi Kumar rte_intr_enable(&pdata->pci_dev->intr_handle); 1827c4158a5SRavi Kumar 1837c4158a5SRavi Kumar /* phy start*/ 1847c4158a5SRavi Kumar pdata->phy_if.phy_start(pdata); 1858590b93dSRavi Kumar axgbe_dev_enable_tx(dev); 1868590b93dSRavi Kumar axgbe_dev_enable_rx(dev); 1877c4158a5SRavi Kumar 1887c4158a5SRavi Kumar axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); 1897c4158a5SRavi Kumar axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); 1907c4158a5SRavi Kumar return 0; 1917c4158a5SRavi Kumar } 1927c4158a5SRavi Kumar 1937c4158a5SRavi Kumar /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 1947c4158a5SRavi Kumar static void 1957c4158a5SRavi Kumar axgbe_dev_stop(struct rte_eth_dev *dev) 1967c4158a5SRavi Kumar { 1977c4158a5SRavi Kumar PMD_INIT_FUNC_TRACE(); 1987c4158a5SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1997c4158a5SRavi Kumar 2007c4158a5SRavi Kumar rte_intr_disable(&pdata->pci_dev->intr_handle); 2017c4158a5SRavi Kumar 2027c4158a5SRavi Kumar if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) 2037c4158a5SRavi Kumar return; 2047c4158a5SRavi Kumar 2057c4158a5SRavi Kumar axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 2068590b93dSRavi Kumar axgbe_dev_disable_tx(dev); 2078590b93dSRavi Kumar axgbe_dev_disable_rx(dev); 2087c4158a5SRavi Kumar 2097c4158a5SRavi Kumar pdata->phy_if.phy_stop(pdata); 2107c4158a5SRavi Kumar pdata->hw_if.exit(pdata); 2117c4158a5SRavi Kumar memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 2127c4158a5SRavi Kumar axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 2137c4158a5SRavi Kumar } 2147c4158a5SRavi Kumar 2159e890103SRavi Kumar /* Clear all resources like TX/RX queues. */ 2169e890103SRavi Kumar static void 2179e890103SRavi Kumar axgbe_dev_close(struct rte_eth_dev *dev) 2189e890103SRavi Kumar { 2199e890103SRavi Kumar axgbe_dev_clear_queues(dev); 2209e890103SRavi Kumar } 2219e890103SRavi Kumar 222*44d45ffeSRavi Kumar /* return 0 means link status changed, -1 means not changed */ 223*44d45ffeSRavi Kumar static int 224*44d45ffeSRavi Kumar axgbe_dev_link_update(struct rte_eth_dev *dev, 225*44d45ffeSRavi Kumar int wait_to_complete __rte_unused) 226*44d45ffeSRavi Kumar { 227*44d45ffeSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 228*44d45ffeSRavi Kumar struct rte_eth_link link; 229*44d45ffeSRavi Kumar int ret = 0; 230*44d45ffeSRavi Kumar 231*44d45ffeSRavi Kumar PMD_INIT_FUNC_TRACE(); 232*44d45ffeSRavi Kumar rte_delay_ms(800); 233*44d45ffeSRavi Kumar 234*44d45ffeSRavi Kumar pdata->phy_if.phy_status(pdata); 235*44d45ffeSRavi Kumar 236*44d45ffeSRavi Kumar memset(&link, 0, sizeof(struct rte_eth_link)); 237*44d45ffeSRavi Kumar link.link_duplex = pdata->phy.duplex; 238*44d45ffeSRavi Kumar link.link_status = pdata->phy_link; 239*44d45ffeSRavi Kumar link.link_speed = pdata->phy_speed; 240*44d45ffeSRavi Kumar link.link_autoneg = !(dev->data->dev_conf.link_speeds & 241*44d45ffeSRavi Kumar ETH_LINK_SPEED_FIXED); 242*44d45ffeSRavi Kumar ret = rte_eth_linkstatus_set(dev, &link); 243*44d45ffeSRavi Kumar if (ret == -1) 244*44d45ffeSRavi Kumar PMD_DRV_LOG(ERR, "No change in link status\n"); 245*44d45ffeSRavi Kumar 246*44d45ffeSRavi Kumar return ret; 247*44d45ffeSRavi Kumar } 248*44d45ffeSRavi Kumar 2499e890103SRavi Kumar static void 2509e890103SRavi Kumar axgbe_dev_info_get(struct rte_eth_dev *dev, 2519e890103SRavi Kumar struct rte_eth_dev_info *dev_info) 2529e890103SRavi Kumar { 2539e890103SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 2549e890103SRavi Kumar 2559e890103SRavi Kumar dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); 2569e890103SRavi Kumar dev_info->max_rx_queues = pdata->rx_ring_count; 2579e890103SRavi Kumar dev_info->max_tx_queues = pdata->tx_ring_count; 2589e890103SRavi Kumar dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 2599e890103SRavi Kumar dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 2609e890103SRavi Kumar dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS; 2619e890103SRavi Kumar dev_info->speed_capa = ETH_LINK_SPEED_10G; 2629e890103SRavi Kumar 2639e890103SRavi Kumar dev_info->rx_offload_capa = 2649e890103SRavi Kumar DEV_RX_OFFLOAD_IPV4_CKSUM | 2659e890103SRavi Kumar DEV_RX_OFFLOAD_UDP_CKSUM | 2669e890103SRavi Kumar DEV_RX_OFFLOAD_TCP_CKSUM; 2679e890103SRavi Kumar 2689e890103SRavi Kumar dev_info->tx_offload_capa = 2699e890103SRavi Kumar DEV_TX_OFFLOAD_IPV4_CKSUM | 2709e890103SRavi Kumar DEV_TX_OFFLOAD_UDP_CKSUM | 2719e890103SRavi Kumar DEV_TX_OFFLOAD_TCP_CKSUM; 2729e890103SRavi Kumar 2739e890103SRavi Kumar if (pdata->hw_feat.rss) { 2749e890103SRavi Kumar dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 2759e890103SRavi Kumar dev_info->reta_size = pdata->hw_feat.hash_table_size; 2769e890103SRavi Kumar dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 2779e890103SRavi Kumar } 2789e890103SRavi Kumar 2799e890103SRavi Kumar dev_info->rx_desc_lim = rx_desc_lim; 2809e890103SRavi Kumar dev_info->tx_desc_lim = tx_desc_lim; 2819e890103SRavi Kumar 2829e890103SRavi Kumar dev_info->default_rxconf = (struct rte_eth_rxconf) { 2839e890103SRavi Kumar .rx_free_thresh = AXGBE_RX_FREE_THRESH, 2849e890103SRavi Kumar }; 2859e890103SRavi Kumar 2869e890103SRavi Kumar dev_info->default_txconf = (struct rte_eth_txconf) { 2879e890103SRavi Kumar .tx_free_thresh = AXGBE_TX_FREE_THRESH, 2889e890103SRavi Kumar .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | 2899e890103SRavi Kumar ETH_TXQ_FLAGS_NOOFFLOADS, 2909e890103SRavi Kumar }; 2919e890103SRavi Kumar } 2929e890103SRavi Kumar 293572890efSRavi Kumar static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 294572890efSRavi Kumar { 295572890efSRavi Kumar unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 296572890efSRavi Kumar struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 297572890efSRavi Kumar 298572890efSRavi Kumar mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 299572890efSRavi Kumar mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 300572890efSRavi Kumar mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 301572890efSRavi Kumar 302572890efSRavi Kumar memset(hw_feat, 0, sizeof(*hw_feat)); 303572890efSRavi Kumar 304572890efSRavi Kumar hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 305572890efSRavi Kumar 306572890efSRavi Kumar /* Hardware feature register 0 */ 307572890efSRavi Kumar hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 308572890efSRavi Kumar hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 309572890efSRavi Kumar hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 310572890efSRavi Kumar hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 311572890efSRavi Kumar hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 312572890efSRavi Kumar hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 313572890efSRavi Kumar hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 314572890efSRavi Kumar hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 315572890efSRavi Kumar hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 316572890efSRavi Kumar hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 317572890efSRavi Kumar hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 318572890efSRavi Kumar hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 319572890efSRavi Kumar ADDMACADRSEL); 320572890efSRavi Kumar hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 321572890efSRavi Kumar hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 322572890efSRavi Kumar 323572890efSRavi Kumar /* Hardware feature register 1 */ 324572890efSRavi Kumar hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 325572890efSRavi Kumar RXFIFOSIZE); 326572890efSRavi Kumar hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 327572890efSRavi Kumar TXFIFOSIZE); 328572890efSRavi Kumar hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 329572890efSRavi Kumar MAC_HWF1R, ADVTHWORD); 330572890efSRavi Kumar hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 331572890efSRavi Kumar hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 332572890efSRavi Kumar hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 333572890efSRavi Kumar hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 334572890efSRavi Kumar hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 335572890efSRavi Kumar hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 336572890efSRavi Kumar hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 337572890efSRavi Kumar hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 338572890efSRavi Kumar HASHTBLSZ); 339572890efSRavi Kumar hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 340572890efSRavi Kumar L3L4FNUM); 341572890efSRavi Kumar 342572890efSRavi Kumar /* Hardware feature register 2 */ 343572890efSRavi Kumar hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 344572890efSRavi Kumar hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 345572890efSRavi Kumar hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 346572890efSRavi Kumar hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 347572890efSRavi Kumar hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 348572890efSRavi Kumar hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 349572890efSRavi Kumar AUXSNAPNUM); 350572890efSRavi Kumar 351572890efSRavi Kumar /* Translate the Hash Table size into actual number */ 352572890efSRavi Kumar switch (hw_feat->hash_table_size) { 353572890efSRavi Kumar case 0: 354572890efSRavi Kumar break; 355572890efSRavi Kumar case 1: 356572890efSRavi Kumar hw_feat->hash_table_size = 64; 357572890efSRavi Kumar break; 358572890efSRavi Kumar case 2: 359572890efSRavi Kumar hw_feat->hash_table_size = 128; 360572890efSRavi Kumar break; 361572890efSRavi Kumar case 3: 362572890efSRavi Kumar hw_feat->hash_table_size = 256; 363572890efSRavi Kumar break; 364572890efSRavi Kumar } 365572890efSRavi Kumar 366572890efSRavi Kumar /* Translate the address width setting into actual number */ 367572890efSRavi Kumar switch (hw_feat->dma_width) { 368572890efSRavi Kumar case 0: 369572890efSRavi Kumar hw_feat->dma_width = 32; 370572890efSRavi Kumar break; 371572890efSRavi Kumar case 1: 372572890efSRavi Kumar hw_feat->dma_width = 40; 373572890efSRavi Kumar break; 374572890efSRavi Kumar case 2: 375572890efSRavi Kumar hw_feat->dma_width = 48; 376572890efSRavi Kumar break; 377572890efSRavi Kumar default: 378572890efSRavi Kumar hw_feat->dma_width = 32; 379572890efSRavi Kumar } 380572890efSRavi Kumar 381572890efSRavi Kumar /* The Queue, Channel and TC counts are zero based so increment them 382572890efSRavi Kumar * to get the actual number 383572890efSRavi Kumar */ 384572890efSRavi Kumar hw_feat->rx_q_cnt++; 385572890efSRavi Kumar hw_feat->tx_q_cnt++; 386572890efSRavi Kumar hw_feat->rx_ch_cnt++; 387572890efSRavi Kumar hw_feat->tx_ch_cnt++; 388572890efSRavi Kumar hw_feat->tc_cnt++; 389572890efSRavi Kumar 390572890efSRavi Kumar /* Translate the fifo sizes into actual numbers */ 391572890efSRavi Kumar hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 392572890efSRavi Kumar hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 393572890efSRavi Kumar } 394572890efSRavi Kumar 395572890efSRavi Kumar static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 396572890efSRavi Kumar { 397572890efSRavi Kumar axgbe_init_function_ptrs_dev(&pdata->hw_if); 3984ac7516bSRavi Kumar axgbe_init_function_ptrs_phy(&pdata->phy_if); 3994ac7516bSRavi Kumar axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 4004ac7516bSRavi Kumar pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 401572890efSRavi Kumar } 402572890efSRavi Kumar 403572890efSRavi Kumar static void axgbe_set_counts(struct axgbe_port *pdata) 404572890efSRavi Kumar { 405572890efSRavi Kumar /* Set all the function pointers */ 406572890efSRavi Kumar axgbe_init_all_fptrs(pdata); 407572890efSRavi Kumar 408572890efSRavi Kumar /* Populate the hardware features */ 409572890efSRavi Kumar axgbe_get_all_hw_features(pdata); 410572890efSRavi Kumar 411572890efSRavi Kumar /* Set default max values if not provided */ 412572890efSRavi Kumar if (!pdata->tx_max_channel_count) 413572890efSRavi Kumar pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 414572890efSRavi Kumar if (!pdata->rx_max_channel_count) 415572890efSRavi Kumar pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 416572890efSRavi Kumar 417572890efSRavi Kumar if (!pdata->tx_max_q_count) 418572890efSRavi Kumar pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 419572890efSRavi Kumar if (!pdata->rx_max_q_count) 420572890efSRavi Kumar pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 421572890efSRavi Kumar 422572890efSRavi Kumar /* Calculate the number of Tx and Rx rings to be created 423572890efSRavi Kumar * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 424572890efSRavi Kumar * the number of Tx queues to the number of Tx channels 425572890efSRavi Kumar * enabled 426572890efSRavi Kumar * -Rx (DMA) Channels do not map 1-to-1 so use the actual 427572890efSRavi Kumar * number of Rx queues or maximum allowed 428572890efSRavi Kumar */ 429572890efSRavi Kumar pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 430572890efSRavi Kumar pdata->tx_max_channel_count); 431572890efSRavi Kumar pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 432572890efSRavi Kumar pdata->tx_max_q_count); 433572890efSRavi Kumar 434572890efSRavi Kumar pdata->tx_q_count = pdata->tx_ring_count; 435572890efSRavi Kumar 436572890efSRavi Kumar pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 437572890efSRavi Kumar pdata->rx_max_channel_count); 438572890efSRavi Kumar 439572890efSRavi Kumar pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 440572890efSRavi Kumar pdata->rx_max_q_count); 441572890efSRavi Kumar } 442572890efSRavi Kumar 443572890efSRavi Kumar static void axgbe_default_config(struct axgbe_port *pdata) 444572890efSRavi Kumar { 445572890efSRavi Kumar pdata->pblx8 = DMA_PBL_X8_ENABLE; 446572890efSRavi Kumar pdata->tx_sf_mode = MTL_TSF_ENABLE; 447572890efSRavi Kumar pdata->tx_threshold = MTL_TX_THRESHOLD_64; 448572890efSRavi Kumar pdata->tx_pbl = DMA_PBL_32; 449572890efSRavi Kumar pdata->tx_osp_mode = DMA_OSP_ENABLE; 450572890efSRavi Kumar pdata->rx_sf_mode = MTL_RSF_ENABLE; 451572890efSRavi Kumar pdata->rx_threshold = MTL_RX_THRESHOLD_64; 452572890efSRavi Kumar pdata->rx_pbl = DMA_PBL_32; 453572890efSRavi Kumar pdata->pause_autoneg = 1; 454572890efSRavi Kumar pdata->tx_pause = 0; 455572890efSRavi Kumar pdata->rx_pause = 0; 456572890efSRavi Kumar pdata->phy_speed = SPEED_UNKNOWN; 457572890efSRavi Kumar pdata->power_down = 0; 458572890efSRavi Kumar } 459572890efSRavi Kumar 4608691632fSRavi Kumar /* 4618691632fSRavi Kumar * It returns 0 on success. 4628691632fSRavi Kumar */ 4638691632fSRavi Kumar static int 4648691632fSRavi Kumar eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 4658691632fSRavi Kumar { 4668691632fSRavi Kumar PMD_INIT_FUNC_TRACE(); 4678691632fSRavi Kumar struct axgbe_port *pdata; 4688691632fSRavi Kumar struct rte_pci_device *pci_dev; 469572890efSRavi Kumar uint32_t reg, mac_lo, mac_hi; 470572890efSRavi Kumar int ret; 4718691632fSRavi Kumar 4729e890103SRavi Kumar eth_dev->dev_ops = &axgbe_eth_dev_ops; 4738590b93dSRavi Kumar eth_dev->rx_pkt_burst = &axgbe_recv_pkts; 4749e890103SRavi Kumar 4758691632fSRavi Kumar /* 4768691632fSRavi Kumar * For secondary processes, we don't initialise any further as primary 4778691632fSRavi Kumar * has already done this work. 4788691632fSRavi Kumar */ 4798691632fSRavi Kumar if (rte_eal_process_type() != RTE_PROC_PRIMARY) 4808691632fSRavi Kumar return 0; 4818691632fSRavi Kumar 4828691632fSRavi Kumar pdata = (struct axgbe_port *)eth_dev->data->dev_private; 483572890efSRavi Kumar /* initial state */ 484572890efSRavi Kumar axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 485572890efSRavi Kumar axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 4868691632fSRavi Kumar pdata->eth_dev = eth_dev; 4878691632fSRavi Kumar 4888691632fSRavi Kumar pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 4898691632fSRavi Kumar pdata->pci_dev = pci_dev; 4908691632fSRavi Kumar 491572890efSRavi Kumar pdata->xgmac_regs = 492572890efSRavi Kumar (uint64_t)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 493572890efSRavi Kumar pdata->xprop_regs = pdata->xgmac_regs + AXGBE_MAC_PROP_OFFSET; 494572890efSRavi Kumar pdata->xi2c_regs = pdata->xgmac_regs + AXGBE_I2C_CTRL_OFFSET; 495572890efSRavi Kumar pdata->xpcs_regs = (uint64_t)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 496572890efSRavi Kumar 497572890efSRavi Kumar /* version specific driver data*/ 498572890efSRavi Kumar if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 499572890efSRavi Kumar pdata->vdata = &axgbe_v2a; 500572890efSRavi Kumar else 501572890efSRavi Kumar pdata->vdata = &axgbe_v2b; 502572890efSRavi Kumar 503572890efSRavi Kumar /* Configure the PCS indirect addressing support */ 504572890efSRavi Kumar reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); 505572890efSRavi Kumar pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 506572890efSRavi Kumar pdata->xpcs_window <<= 6; 507572890efSRavi Kumar pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 508572890efSRavi Kumar pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 509572890efSRavi Kumar pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 510572890efSRavi Kumar pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 511572890efSRavi Kumar pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 512572890efSRavi Kumar PMD_INIT_LOG(DEBUG, 513572890efSRavi Kumar "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 514572890efSRavi Kumar pdata->xpcs_window_size, pdata->xpcs_window_mask); 515572890efSRavi Kumar XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 516572890efSRavi Kumar 517572890efSRavi Kumar /* Retrieve the MAC address */ 518572890efSRavi Kumar mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 519572890efSRavi Kumar mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 520572890efSRavi Kumar pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 521572890efSRavi Kumar pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 522572890efSRavi Kumar pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 523572890efSRavi Kumar pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 524572890efSRavi Kumar pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 525572890efSRavi Kumar pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 526572890efSRavi Kumar 527572890efSRavi Kumar eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", 528572890efSRavi Kumar ETHER_ADDR_LEN, 0); 529572890efSRavi Kumar if (!eth_dev->data->mac_addrs) { 530572890efSRavi Kumar PMD_INIT_LOG(ERR, 531572890efSRavi Kumar "Failed to alloc %u bytes needed to store MAC addr tbl", 532572890efSRavi Kumar ETHER_ADDR_LEN); 533572890efSRavi Kumar return -ENOMEM; 534572890efSRavi Kumar } 535572890efSRavi Kumar 536572890efSRavi Kumar if (!is_valid_assigned_ether_addr(&pdata->mac_addr)) 537572890efSRavi Kumar eth_random_addr(pdata->mac_addr.addr_bytes); 538572890efSRavi Kumar 539572890efSRavi Kumar /* Copy the permanent MAC address */ 540572890efSRavi Kumar ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 541572890efSRavi Kumar 542572890efSRavi Kumar /* Clock settings */ 543572890efSRavi Kumar pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 544572890efSRavi Kumar pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 545572890efSRavi Kumar 546572890efSRavi Kumar /* Set the DMA coherency values */ 547572890efSRavi Kumar pdata->coherent = 1; 548572890efSRavi Kumar pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 549572890efSRavi Kumar pdata->arcache = AXGBE_DMA_OS_ARCACHE; 550572890efSRavi Kumar pdata->awcache = AXGBE_DMA_OS_AWCACHE; 551572890efSRavi Kumar 552572890efSRavi Kumar /* Set the maximum channels and queues */ 553572890efSRavi Kumar reg = XP_IOREAD(pdata, XP_PROP_1); 554572890efSRavi Kumar pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 555572890efSRavi Kumar pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 556572890efSRavi Kumar pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 557572890efSRavi Kumar pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 558572890efSRavi Kumar 559572890efSRavi Kumar /* Set the hardware channel and queue counts */ 560572890efSRavi Kumar axgbe_set_counts(pdata); 561572890efSRavi Kumar 562572890efSRavi Kumar /* Set the maximum fifo amounts */ 563572890efSRavi Kumar reg = XP_IOREAD(pdata, XP_PROP_2); 564572890efSRavi Kumar pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 565572890efSRavi Kumar pdata->tx_max_fifo_size *= 16384; 566572890efSRavi Kumar pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 567572890efSRavi Kumar pdata->vdata->tx_max_fifo_size); 568572890efSRavi Kumar pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 569572890efSRavi Kumar pdata->rx_max_fifo_size *= 16384; 570572890efSRavi Kumar pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 571572890efSRavi Kumar pdata->vdata->rx_max_fifo_size); 572572890efSRavi Kumar /* Issue software reset to DMA */ 573572890efSRavi Kumar ret = pdata->hw_if.exit(pdata); 574572890efSRavi Kumar if (ret) 575572890efSRavi Kumar PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 576572890efSRavi Kumar 577572890efSRavi Kumar /* Set default configuration data */ 578572890efSRavi Kumar axgbe_default_config(pdata); 579572890efSRavi Kumar 580572890efSRavi Kumar /* Set default max values if not provided */ 581572890efSRavi Kumar if (!pdata->tx_max_fifo_size) 582572890efSRavi Kumar pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 583572890efSRavi Kumar if (!pdata->rx_max_fifo_size) 584572890efSRavi Kumar pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 585572890efSRavi Kumar 5869e890103SRavi Kumar pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 5879e890103SRavi Kumar pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 588572890efSRavi Kumar pthread_mutex_init(&pdata->xpcs_mutex, NULL); 589572890efSRavi Kumar pthread_mutex_init(&pdata->i2c_mutex, NULL); 590572890efSRavi Kumar pthread_mutex_init(&pdata->an_mutex, NULL); 591572890efSRavi Kumar pthread_mutex_init(&pdata->phy_mutex, NULL); 592572890efSRavi Kumar 5934ac7516bSRavi Kumar ret = pdata->phy_if.phy_init(pdata); 5944ac7516bSRavi Kumar if (ret) { 5954ac7516bSRavi Kumar rte_free(eth_dev->data->mac_addrs); 5964ac7516bSRavi Kumar return ret; 5974ac7516bSRavi Kumar } 5984ac7516bSRavi Kumar 599456ff159SRavi Kumar rte_intr_callback_register(&pci_dev->intr_handle, 600456ff159SRavi Kumar axgbe_dev_interrupt_handler, 601456ff159SRavi Kumar (void *)eth_dev); 6028691632fSRavi Kumar PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 6038691632fSRavi Kumar eth_dev->data->port_id, pci_dev->id.vendor_id, 6048691632fSRavi Kumar pci_dev->id.device_id); 6058691632fSRavi Kumar 6068691632fSRavi Kumar return 0; 6078691632fSRavi Kumar } 6088691632fSRavi Kumar 6098691632fSRavi Kumar static int 610572890efSRavi Kumar eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 6118691632fSRavi Kumar { 612456ff159SRavi Kumar struct rte_pci_device *pci_dev; 613456ff159SRavi Kumar 6148691632fSRavi Kumar PMD_INIT_FUNC_TRACE(); 6158691632fSRavi Kumar 616572890efSRavi Kumar if (rte_eal_process_type() != RTE_PROC_PRIMARY) 617572890efSRavi Kumar return 0; 618572890efSRavi Kumar 619456ff159SRavi Kumar pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 620572890efSRavi Kumar /*Free macaddres*/ 621572890efSRavi Kumar rte_free(eth_dev->data->mac_addrs); 622572890efSRavi Kumar eth_dev->data->mac_addrs = NULL; 6239e890103SRavi Kumar eth_dev->dev_ops = NULL; 6248590b93dSRavi Kumar eth_dev->rx_pkt_burst = NULL; 6258590b93dSRavi Kumar eth_dev->tx_pkt_burst = NULL; 6269e890103SRavi Kumar axgbe_dev_clear_queues(eth_dev); 627572890efSRavi Kumar 628456ff159SRavi Kumar /* disable uio intr before callback unregister */ 629456ff159SRavi Kumar rte_intr_disable(&pci_dev->intr_handle); 630456ff159SRavi Kumar rte_intr_callback_unregister(&pci_dev->intr_handle, 631456ff159SRavi Kumar axgbe_dev_interrupt_handler, 632456ff159SRavi Kumar (void *)eth_dev); 633456ff159SRavi Kumar 6348691632fSRavi Kumar return 0; 6358691632fSRavi Kumar } 6368691632fSRavi Kumar 6378691632fSRavi Kumar static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 6388691632fSRavi Kumar struct rte_pci_device *pci_dev) 6398691632fSRavi Kumar { 6408691632fSRavi Kumar return rte_eth_dev_pci_generic_probe(pci_dev, 6418691632fSRavi Kumar sizeof(struct axgbe_port), eth_axgbe_dev_init); 6428691632fSRavi Kumar } 6438691632fSRavi Kumar 6448691632fSRavi Kumar static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 6458691632fSRavi Kumar { 6468691632fSRavi Kumar return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 6478691632fSRavi Kumar } 6488691632fSRavi Kumar 6498691632fSRavi Kumar static struct rte_pci_driver rte_axgbe_pmd = { 6508691632fSRavi Kumar .id_table = pci_id_axgbe_map, 6518691632fSRavi Kumar .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 6528691632fSRavi Kumar .probe = eth_axgbe_pci_probe, 6538691632fSRavi Kumar .remove = eth_axgbe_pci_remove, 6548691632fSRavi Kumar }; 6558691632fSRavi Kumar 6568691632fSRavi Kumar RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 6578691632fSRavi Kumar RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 6588691632fSRavi Kumar RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 6598691632fSRavi Kumar 6608691632fSRavi Kumar RTE_INIT(axgbe_init_log); 6618691632fSRavi Kumar static void 6628691632fSRavi Kumar axgbe_init_log(void) 6638691632fSRavi Kumar { 6648691632fSRavi Kumar axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); 6658691632fSRavi Kumar if (axgbe_logtype_init >= 0) 6668691632fSRavi Kumar rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); 6678691632fSRavi Kumar axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); 6688691632fSRavi Kumar if (axgbe_logtype_driver >= 0) 6698691632fSRavi Kumar rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); 6708691632fSRavi Kumar } 671