18691632fSRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 28691632fSRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 38691632fSRavi Kumar * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 48691632fSRavi Kumar */ 58691632fSRavi Kumar 69e890103SRavi Kumar #include "axgbe_rxtx.h" 78691632fSRavi Kumar #include "axgbe_ethdev.h" 8572890efSRavi Kumar #include "axgbe_common.h" 9572890efSRavi Kumar #include "axgbe_phy.h" 108691632fSRavi Kumar 118691632fSRavi Kumar static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 128691632fSRavi Kumar static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 137c4158a5SRavi Kumar static int axgbe_dev_configure(struct rte_eth_dev *dev); 147c4158a5SRavi Kumar static int axgbe_dev_start(struct rte_eth_dev *dev); 157c4158a5SRavi Kumar static void axgbe_dev_stop(struct rte_eth_dev *dev); 16456ff159SRavi Kumar static void axgbe_dev_interrupt_handler(void *param); 179e890103SRavi Kumar static void axgbe_dev_close(struct rte_eth_dev *dev); 18fa3e0440SRavi Kumar static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 19fa3e0440SRavi Kumar static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 20fa3e0440SRavi Kumar static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 21fa3e0440SRavi Kumar static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 2244d45ffeSRavi Kumar static int axgbe_dev_link_update(struct rte_eth_dev *dev, 2344d45ffeSRavi Kumar int wait_to_complete); 243e730511SRavi Kumar static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 253e730511SRavi Kumar struct rte_eth_stats *stats); 263e730511SRavi Kumar static void axgbe_dev_stats_reset(struct rte_eth_dev *dev); 279e890103SRavi Kumar static void axgbe_dev_info_get(struct rte_eth_dev *dev, 289e890103SRavi Kumar struct rte_eth_dev_info *dev_info); 298691632fSRavi Kumar 308691632fSRavi Kumar /* The set of PCI devices this driver supports */ 318691632fSRavi Kumar #define AMD_PCI_VENDOR_ID 0x1022 328691632fSRavi Kumar #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 338691632fSRavi Kumar #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 348691632fSRavi Kumar 358691632fSRavi Kumar int axgbe_logtype_init; 368691632fSRavi Kumar int axgbe_logtype_driver; 378691632fSRavi Kumar 388691632fSRavi Kumar static const struct rte_pci_id pci_id_axgbe_map[] = { 398691632fSRavi Kumar {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 408691632fSRavi Kumar {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 418691632fSRavi Kumar { .vendor_id = 0, }, 428691632fSRavi Kumar }; 438691632fSRavi Kumar 44572890efSRavi Kumar static struct axgbe_version_data axgbe_v2a = { 454ac7516bSRavi Kumar .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 46572890efSRavi Kumar .xpcs_access = AXGBE_XPCS_ACCESS_V2, 47572890efSRavi Kumar .mmc_64bit = 1, 48572890efSRavi Kumar .tx_max_fifo_size = 229376, 49572890efSRavi Kumar .rx_max_fifo_size = 229376, 50572890efSRavi Kumar .tx_tstamp_workaround = 1, 51572890efSRavi Kumar .ecc_support = 1, 52572890efSRavi Kumar .i2c_support = 1, 5300072056SRavi Kumar .an_cdr_workaround = 1, 54572890efSRavi Kumar }; 55572890efSRavi Kumar 56572890efSRavi Kumar static struct axgbe_version_data axgbe_v2b = { 574ac7516bSRavi Kumar .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 58572890efSRavi Kumar .xpcs_access = AXGBE_XPCS_ACCESS_V2, 59572890efSRavi Kumar .mmc_64bit = 1, 60572890efSRavi Kumar .tx_max_fifo_size = 65536, 61572890efSRavi Kumar .rx_max_fifo_size = 65536, 62572890efSRavi Kumar .tx_tstamp_workaround = 1, 63572890efSRavi Kumar .ecc_support = 1, 64572890efSRavi Kumar .i2c_support = 1, 6500072056SRavi Kumar .an_cdr_workaround = 1, 66572890efSRavi Kumar }; 67572890efSRavi Kumar 689e890103SRavi Kumar static const struct rte_eth_desc_lim rx_desc_lim = { 699e890103SRavi Kumar .nb_max = AXGBE_MAX_RING_DESC, 709e890103SRavi Kumar .nb_min = AXGBE_MIN_RING_DESC, 719e890103SRavi Kumar .nb_align = 8, 729e890103SRavi Kumar }; 739e890103SRavi Kumar 749e890103SRavi Kumar static const struct rte_eth_desc_lim tx_desc_lim = { 759e890103SRavi Kumar .nb_max = AXGBE_MAX_RING_DESC, 769e890103SRavi Kumar .nb_min = AXGBE_MIN_RING_DESC, 779e890103SRavi Kumar .nb_align = 8, 789e890103SRavi Kumar }; 799e890103SRavi Kumar 809e890103SRavi Kumar static const struct eth_dev_ops axgbe_eth_dev_ops = { 817c4158a5SRavi Kumar .dev_configure = axgbe_dev_configure, 827c4158a5SRavi Kumar .dev_start = axgbe_dev_start, 837c4158a5SRavi Kumar .dev_stop = axgbe_dev_stop, 849e890103SRavi Kumar .dev_close = axgbe_dev_close, 85fa3e0440SRavi Kumar .promiscuous_enable = axgbe_dev_promiscuous_enable, 86fa3e0440SRavi Kumar .promiscuous_disable = axgbe_dev_promiscuous_disable, 87fa3e0440SRavi Kumar .allmulticast_enable = axgbe_dev_allmulticast_enable, 88fa3e0440SRavi Kumar .allmulticast_disable = axgbe_dev_allmulticast_disable, 8944d45ffeSRavi Kumar .link_update = axgbe_dev_link_update, 903e730511SRavi Kumar .stats_get = axgbe_dev_stats_get, 913e730511SRavi Kumar .stats_reset = axgbe_dev_stats_reset, 929e890103SRavi Kumar .dev_infos_get = axgbe_dev_info_get, 939e890103SRavi Kumar .rx_queue_setup = axgbe_dev_rx_queue_setup, 949e890103SRavi Kumar .rx_queue_release = axgbe_dev_rx_queue_release, 959e890103SRavi Kumar .tx_queue_setup = axgbe_dev_tx_queue_setup, 969e890103SRavi Kumar .tx_queue_release = axgbe_dev_tx_queue_release, 979e890103SRavi Kumar }; 989e890103SRavi Kumar 997c4158a5SRavi Kumar static int axgbe_phy_reset(struct axgbe_port *pdata) 1007c4158a5SRavi Kumar { 1017c4158a5SRavi Kumar pdata->phy_link = -1; 1027c4158a5SRavi Kumar pdata->phy_speed = SPEED_UNKNOWN; 1037c4158a5SRavi Kumar return pdata->phy_if.phy_reset(pdata); 1047c4158a5SRavi Kumar } 1057c4158a5SRavi Kumar 106456ff159SRavi Kumar /* 107456ff159SRavi Kumar * Interrupt handler triggered by NIC for handling 108456ff159SRavi Kumar * specific interrupt. 109456ff159SRavi Kumar * 110456ff159SRavi Kumar * @param handle 111456ff159SRavi Kumar * Pointer to interrupt handle. 112456ff159SRavi Kumar * @param param 113456ff159SRavi Kumar * The address of parameter (struct rte_eth_dev *) regsitered before. 114456ff159SRavi Kumar * 115456ff159SRavi Kumar * @return 116456ff159SRavi Kumar * void 117456ff159SRavi Kumar */ 118456ff159SRavi Kumar static void 119456ff159SRavi Kumar axgbe_dev_interrupt_handler(void *param) 120456ff159SRavi Kumar { 121456ff159SRavi Kumar struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 122456ff159SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1238590b93dSRavi Kumar unsigned int dma_isr, dma_ch_isr; 124456ff159SRavi Kumar 125456ff159SRavi Kumar pdata->phy_if.an_isr(pdata); 1268590b93dSRavi Kumar /*DMA related interrupts*/ 1278590b93dSRavi Kumar dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 1288590b93dSRavi Kumar if (dma_isr) { 1298590b93dSRavi Kumar if (dma_isr & 1) { 1308590b93dSRavi Kumar dma_ch_isr = 1318590b93dSRavi Kumar AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 1328590b93dSRavi Kumar pdata->rx_queues[0], 1338590b93dSRavi Kumar DMA_CH_SR); 1348590b93dSRavi Kumar AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 1358590b93dSRavi Kumar pdata->rx_queues[0], 1368590b93dSRavi Kumar DMA_CH_SR, dma_ch_isr); 1378590b93dSRavi Kumar } 1388590b93dSRavi Kumar } 139456ff159SRavi Kumar /* Enable interrupts since disabled after generation*/ 140456ff159SRavi Kumar rte_intr_enable(&pdata->pci_dev->intr_handle); 141456ff159SRavi Kumar } 142456ff159SRavi Kumar 1437c4158a5SRavi Kumar /* 1447c4158a5SRavi Kumar * Configure device link speed and setup link. 1457c4158a5SRavi Kumar * It returns 0 on success. 1467c4158a5SRavi Kumar */ 1477c4158a5SRavi Kumar static int 1487c4158a5SRavi Kumar axgbe_dev_configure(struct rte_eth_dev *dev) 1497c4158a5SRavi Kumar { 1507c4158a5SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1517c4158a5SRavi Kumar /* Checksum offload to hardware */ 1527c4158a5SRavi Kumar pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 1537c4158a5SRavi Kumar DEV_RX_OFFLOAD_CHECKSUM; 1547c4158a5SRavi Kumar return 0; 1557c4158a5SRavi Kumar } 1567c4158a5SRavi Kumar 1577c4158a5SRavi Kumar static int 1587c4158a5SRavi Kumar axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 1597c4158a5SRavi Kumar { 1600bc212a8SStephen Hemminger struct axgbe_port *pdata = dev->data->dev_private; 1617c4158a5SRavi Kumar 1627c4158a5SRavi Kumar if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1637c4158a5SRavi Kumar pdata->rss_enable = 1; 1647c4158a5SRavi Kumar else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 1657c4158a5SRavi Kumar pdata->rss_enable = 0; 1667c4158a5SRavi Kumar else 1677c4158a5SRavi Kumar return -1; 1687c4158a5SRavi Kumar return 0; 1697c4158a5SRavi Kumar } 1707c4158a5SRavi Kumar 1717c4158a5SRavi Kumar static int 1727c4158a5SRavi Kumar axgbe_dev_start(struct rte_eth_dev *dev) 1737c4158a5SRavi Kumar { 1740bc212a8SStephen Hemminger struct axgbe_port *pdata = dev->data->dev_private; 1757c4158a5SRavi Kumar int ret; 1767c4158a5SRavi Kumar 1770bc212a8SStephen Hemminger PMD_INIT_FUNC_TRACE(); 1780bc212a8SStephen Hemminger 1797c4158a5SRavi Kumar /* Multiqueue RSS */ 1807c4158a5SRavi Kumar ret = axgbe_dev_rx_mq_config(dev); 1817c4158a5SRavi Kumar if (ret) { 1827c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 1837c4158a5SRavi Kumar return ret; 1847c4158a5SRavi Kumar } 1857c4158a5SRavi Kumar ret = axgbe_phy_reset(pdata); 1867c4158a5SRavi Kumar if (ret) { 1877c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "phy reset failed\n"); 1887c4158a5SRavi Kumar return ret; 1897c4158a5SRavi Kumar } 1907c4158a5SRavi Kumar ret = pdata->hw_if.init(pdata); 1917c4158a5SRavi Kumar if (ret) { 1927c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "dev_init failed\n"); 1937c4158a5SRavi Kumar return ret; 1947c4158a5SRavi Kumar } 1957c4158a5SRavi Kumar 1967c4158a5SRavi Kumar /* enable uio/vfio intr/eventfd mapping */ 1977c4158a5SRavi Kumar rte_intr_enable(&pdata->pci_dev->intr_handle); 1987c4158a5SRavi Kumar 1997c4158a5SRavi Kumar /* phy start*/ 2007c4158a5SRavi Kumar pdata->phy_if.phy_start(pdata); 2018590b93dSRavi Kumar axgbe_dev_enable_tx(dev); 2028590b93dSRavi Kumar axgbe_dev_enable_rx(dev); 2037c4158a5SRavi Kumar 2047c4158a5SRavi Kumar axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); 2057c4158a5SRavi Kumar axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); 2067c4158a5SRavi Kumar return 0; 2077c4158a5SRavi Kumar } 2087c4158a5SRavi Kumar 2097c4158a5SRavi Kumar /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 2107c4158a5SRavi Kumar static void 2117c4158a5SRavi Kumar axgbe_dev_stop(struct rte_eth_dev *dev) 2127c4158a5SRavi Kumar { 2137c4158a5SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 2147c4158a5SRavi Kumar 2150bc212a8SStephen Hemminger PMD_INIT_FUNC_TRACE(); 2160bc212a8SStephen Hemminger 2177c4158a5SRavi Kumar rte_intr_disable(&pdata->pci_dev->intr_handle); 2187c4158a5SRavi Kumar 2197c4158a5SRavi Kumar if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) 2207c4158a5SRavi Kumar return; 2217c4158a5SRavi Kumar 2227c4158a5SRavi Kumar axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 2238590b93dSRavi Kumar axgbe_dev_disable_tx(dev); 2248590b93dSRavi Kumar axgbe_dev_disable_rx(dev); 2257c4158a5SRavi Kumar 2267c4158a5SRavi Kumar pdata->phy_if.phy_stop(pdata); 2277c4158a5SRavi Kumar pdata->hw_if.exit(pdata); 2287c4158a5SRavi Kumar memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 2297c4158a5SRavi Kumar axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 2307c4158a5SRavi Kumar } 2317c4158a5SRavi Kumar 2329e890103SRavi Kumar /* Clear all resources like TX/RX queues. */ 2339e890103SRavi Kumar static void 2349e890103SRavi Kumar axgbe_dev_close(struct rte_eth_dev *dev) 2359e890103SRavi Kumar { 2369e890103SRavi Kumar axgbe_dev_clear_queues(dev); 2379e890103SRavi Kumar } 2389e890103SRavi Kumar 239fa3e0440SRavi Kumar static void 240fa3e0440SRavi Kumar axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 241fa3e0440SRavi Kumar { 242fa3e0440SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 243fa3e0440SRavi Kumar 2440bc212a8SStephen Hemminger PMD_INIT_FUNC_TRACE(); 2450bc212a8SStephen Hemminger 246fa3e0440SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 247fa3e0440SRavi Kumar } 248fa3e0440SRavi Kumar 249fa3e0440SRavi Kumar static void 250fa3e0440SRavi Kumar axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 251fa3e0440SRavi Kumar { 252fa3e0440SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 253fa3e0440SRavi Kumar 2540bc212a8SStephen Hemminger PMD_INIT_FUNC_TRACE(); 2550bc212a8SStephen Hemminger 256fa3e0440SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 257fa3e0440SRavi Kumar } 258fa3e0440SRavi Kumar 259fa3e0440SRavi Kumar static void 260fa3e0440SRavi Kumar axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 261fa3e0440SRavi Kumar { 262fa3e0440SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 263fa3e0440SRavi Kumar 2640bc212a8SStephen Hemminger PMD_INIT_FUNC_TRACE(); 2650bc212a8SStephen Hemminger 266fa3e0440SRavi Kumar if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 267fa3e0440SRavi Kumar return; 268fa3e0440SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 269fa3e0440SRavi Kumar } 270fa3e0440SRavi Kumar 271fa3e0440SRavi Kumar static void 272fa3e0440SRavi Kumar axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 273fa3e0440SRavi Kumar { 274fa3e0440SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 275fa3e0440SRavi Kumar 2760bc212a8SStephen Hemminger PMD_INIT_FUNC_TRACE(); 2770bc212a8SStephen Hemminger 278fa3e0440SRavi Kumar if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 279fa3e0440SRavi Kumar return; 280fa3e0440SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 281fa3e0440SRavi Kumar } 282fa3e0440SRavi Kumar 28344d45ffeSRavi Kumar /* return 0 means link status changed, -1 means not changed */ 28444d45ffeSRavi Kumar static int 28544d45ffeSRavi Kumar axgbe_dev_link_update(struct rte_eth_dev *dev, 28644d45ffeSRavi Kumar int wait_to_complete __rte_unused) 28744d45ffeSRavi Kumar { 28844d45ffeSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 28944d45ffeSRavi Kumar struct rte_eth_link link; 29044d45ffeSRavi Kumar int ret = 0; 29144d45ffeSRavi Kumar 29244d45ffeSRavi Kumar PMD_INIT_FUNC_TRACE(); 29344d45ffeSRavi Kumar rte_delay_ms(800); 29444d45ffeSRavi Kumar 29544d45ffeSRavi Kumar pdata->phy_if.phy_status(pdata); 29644d45ffeSRavi Kumar 29744d45ffeSRavi Kumar memset(&link, 0, sizeof(struct rte_eth_link)); 29844d45ffeSRavi Kumar link.link_duplex = pdata->phy.duplex; 29944d45ffeSRavi Kumar link.link_status = pdata->phy_link; 30044d45ffeSRavi Kumar link.link_speed = pdata->phy_speed; 30144d45ffeSRavi Kumar link.link_autoneg = !(dev->data->dev_conf.link_speeds & 30244d45ffeSRavi Kumar ETH_LINK_SPEED_FIXED); 30344d45ffeSRavi Kumar ret = rte_eth_linkstatus_set(dev, &link); 30444d45ffeSRavi Kumar if (ret == -1) 30544d45ffeSRavi Kumar PMD_DRV_LOG(ERR, "No change in link status\n"); 30644d45ffeSRavi Kumar 30744d45ffeSRavi Kumar return ret; 30844d45ffeSRavi Kumar } 30944d45ffeSRavi Kumar 3103e730511SRavi Kumar static int 3113e730511SRavi Kumar axgbe_dev_stats_get(struct rte_eth_dev *dev, 3123e730511SRavi Kumar struct rte_eth_stats *stats) 3133e730511SRavi Kumar { 3143e730511SRavi Kumar struct axgbe_rx_queue *rxq; 3153e730511SRavi Kumar struct axgbe_tx_queue *txq; 3163e730511SRavi Kumar unsigned int i; 3173e730511SRavi Kumar 3183e730511SRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 3193e730511SRavi Kumar rxq = dev->data->rx_queues[i]; 3203e730511SRavi Kumar stats->q_ipackets[i] = rxq->pkts; 3213e730511SRavi Kumar stats->ipackets += rxq->pkts; 3223e730511SRavi Kumar stats->q_ibytes[i] = rxq->bytes; 3233e730511SRavi Kumar stats->ibytes += rxq->bytes; 3243e730511SRavi Kumar } 3253e730511SRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 3263e730511SRavi Kumar txq = dev->data->tx_queues[i]; 3273e730511SRavi Kumar stats->q_opackets[i] = txq->pkts; 3283e730511SRavi Kumar stats->opackets += txq->pkts; 3293e730511SRavi Kumar stats->q_obytes[i] = txq->bytes; 3303e730511SRavi Kumar stats->obytes += txq->bytes; 3313e730511SRavi Kumar } 3323e730511SRavi Kumar 3333e730511SRavi Kumar return 0; 3343e730511SRavi Kumar } 3353e730511SRavi Kumar 3363e730511SRavi Kumar static void 3373e730511SRavi Kumar axgbe_dev_stats_reset(struct rte_eth_dev *dev) 3383e730511SRavi Kumar { 3393e730511SRavi Kumar struct axgbe_rx_queue *rxq; 3403e730511SRavi Kumar struct axgbe_tx_queue *txq; 3413e730511SRavi Kumar unsigned int i; 3423e730511SRavi Kumar 3433e730511SRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 3443e730511SRavi Kumar rxq = dev->data->rx_queues[i]; 3453e730511SRavi Kumar rxq->pkts = 0; 3463e730511SRavi Kumar rxq->bytes = 0; 3473e730511SRavi Kumar rxq->errors = 0; 3483e730511SRavi Kumar } 3493e730511SRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 3503e730511SRavi Kumar txq = dev->data->tx_queues[i]; 3513e730511SRavi Kumar txq->pkts = 0; 3523e730511SRavi Kumar txq->bytes = 0; 3533e730511SRavi Kumar txq->errors = 0; 3543e730511SRavi Kumar } 3553e730511SRavi Kumar } 3563e730511SRavi Kumar 3579e890103SRavi Kumar static void 358cd8c7c7cSFerruh Yigit axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3599e890103SRavi Kumar { 3609e890103SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 3619e890103SRavi Kumar 3629e890103SRavi Kumar dev_info->max_rx_queues = pdata->rx_ring_count; 3639e890103SRavi Kumar dev_info->max_tx_queues = pdata->tx_ring_count; 3649e890103SRavi Kumar dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 3659e890103SRavi Kumar dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 3669e890103SRavi Kumar dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS; 3679e890103SRavi Kumar dev_info->speed_capa = ETH_LINK_SPEED_10G; 3689e890103SRavi Kumar 3699e890103SRavi Kumar dev_info->rx_offload_capa = 3709e890103SRavi Kumar DEV_RX_OFFLOAD_IPV4_CKSUM | 3719e890103SRavi Kumar DEV_RX_OFFLOAD_UDP_CKSUM | 37270815c9eSFerruh Yigit DEV_RX_OFFLOAD_TCP_CKSUM | 37370815c9eSFerruh Yigit DEV_RX_OFFLOAD_KEEP_CRC; 3749e890103SRavi Kumar 3759e890103SRavi Kumar dev_info->tx_offload_capa = 3769e890103SRavi Kumar DEV_TX_OFFLOAD_IPV4_CKSUM | 3779e890103SRavi Kumar DEV_TX_OFFLOAD_UDP_CKSUM | 3789e890103SRavi Kumar DEV_TX_OFFLOAD_TCP_CKSUM; 3799e890103SRavi Kumar 3809e890103SRavi Kumar if (pdata->hw_feat.rss) { 3819e890103SRavi Kumar dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 3829e890103SRavi Kumar dev_info->reta_size = pdata->hw_feat.hash_table_size; 3839e890103SRavi Kumar dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 3849e890103SRavi Kumar } 3859e890103SRavi Kumar 3869e890103SRavi Kumar dev_info->rx_desc_lim = rx_desc_lim; 3879e890103SRavi Kumar dev_info->tx_desc_lim = tx_desc_lim; 3889e890103SRavi Kumar 3899e890103SRavi Kumar dev_info->default_rxconf = (struct rte_eth_rxconf) { 3909e890103SRavi Kumar .rx_free_thresh = AXGBE_RX_FREE_THRESH, 3919e890103SRavi Kumar }; 3929e890103SRavi Kumar 3939e890103SRavi Kumar dev_info->default_txconf = (struct rte_eth_txconf) { 3949e890103SRavi Kumar .tx_free_thresh = AXGBE_TX_FREE_THRESH, 3959e890103SRavi Kumar }; 3969e890103SRavi Kumar } 3979e890103SRavi Kumar 398572890efSRavi Kumar static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 399572890efSRavi Kumar { 400572890efSRavi Kumar unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 401572890efSRavi Kumar struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 402572890efSRavi Kumar 403572890efSRavi Kumar mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 404572890efSRavi Kumar mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 405572890efSRavi Kumar mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 406572890efSRavi Kumar 407572890efSRavi Kumar memset(hw_feat, 0, sizeof(*hw_feat)); 408572890efSRavi Kumar 409572890efSRavi Kumar hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 410572890efSRavi Kumar 411572890efSRavi Kumar /* Hardware feature register 0 */ 412572890efSRavi Kumar hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 413572890efSRavi Kumar hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 414572890efSRavi Kumar hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 415572890efSRavi Kumar hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 416572890efSRavi Kumar hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 417572890efSRavi Kumar hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 418572890efSRavi Kumar hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 419572890efSRavi Kumar hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 420572890efSRavi Kumar hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 421572890efSRavi Kumar hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 422572890efSRavi Kumar hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 423572890efSRavi Kumar hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 424572890efSRavi Kumar ADDMACADRSEL); 425572890efSRavi Kumar hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 426572890efSRavi Kumar hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 427572890efSRavi Kumar 428572890efSRavi Kumar /* Hardware feature register 1 */ 429572890efSRavi Kumar hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 430572890efSRavi Kumar RXFIFOSIZE); 431572890efSRavi Kumar hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 432572890efSRavi Kumar TXFIFOSIZE); 433572890efSRavi Kumar hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 434572890efSRavi Kumar MAC_HWF1R, ADVTHWORD); 435572890efSRavi Kumar hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 436572890efSRavi Kumar hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 437572890efSRavi Kumar hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 438572890efSRavi Kumar hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 439572890efSRavi Kumar hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 440572890efSRavi Kumar hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 441572890efSRavi Kumar hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 442572890efSRavi Kumar hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 443572890efSRavi Kumar HASHTBLSZ); 444572890efSRavi Kumar hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 445572890efSRavi Kumar L3L4FNUM); 446572890efSRavi Kumar 447572890efSRavi Kumar /* Hardware feature register 2 */ 448572890efSRavi Kumar hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 449572890efSRavi Kumar hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 450572890efSRavi Kumar hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 451572890efSRavi Kumar hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 452572890efSRavi Kumar hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 453572890efSRavi Kumar hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 454572890efSRavi Kumar AUXSNAPNUM); 455572890efSRavi Kumar 456572890efSRavi Kumar /* Translate the Hash Table size into actual number */ 457572890efSRavi Kumar switch (hw_feat->hash_table_size) { 458572890efSRavi Kumar case 0: 459572890efSRavi Kumar break; 460572890efSRavi Kumar case 1: 461572890efSRavi Kumar hw_feat->hash_table_size = 64; 462572890efSRavi Kumar break; 463572890efSRavi Kumar case 2: 464572890efSRavi Kumar hw_feat->hash_table_size = 128; 465572890efSRavi Kumar break; 466572890efSRavi Kumar case 3: 467572890efSRavi Kumar hw_feat->hash_table_size = 256; 468572890efSRavi Kumar break; 469572890efSRavi Kumar } 470572890efSRavi Kumar 471572890efSRavi Kumar /* Translate the address width setting into actual number */ 472572890efSRavi Kumar switch (hw_feat->dma_width) { 473572890efSRavi Kumar case 0: 474572890efSRavi Kumar hw_feat->dma_width = 32; 475572890efSRavi Kumar break; 476572890efSRavi Kumar case 1: 477572890efSRavi Kumar hw_feat->dma_width = 40; 478572890efSRavi Kumar break; 479572890efSRavi Kumar case 2: 480572890efSRavi Kumar hw_feat->dma_width = 48; 481572890efSRavi Kumar break; 482572890efSRavi Kumar default: 483572890efSRavi Kumar hw_feat->dma_width = 32; 484572890efSRavi Kumar } 485572890efSRavi Kumar 486572890efSRavi Kumar /* The Queue, Channel and TC counts are zero based so increment them 487572890efSRavi Kumar * to get the actual number 488572890efSRavi Kumar */ 489572890efSRavi Kumar hw_feat->rx_q_cnt++; 490572890efSRavi Kumar hw_feat->tx_q_cnt++; 491572890efSRavi Kumar hw_feat->rx_ch_cnt++; 492572890efSRavi Kumar hw_feat->tx_ch_cnt++; 493572890efSRavi Kumar hw_feat->tc_cnt++; 494572890efSRavi Kumar 495572890efSRavi Kumar /* Translate the fifo sizes into actual numbers */ 496572890efSRavi Kumar hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 497572890efSRavi Kumar hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 498572890efSRavi Kumar } 499572890efSRavi Kumar 500572890efSRavi Kumar static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 501572890efSRavi Kumar { 502572890efSRavi Kumar axgbe_init_function_ptrs_dev(&pdata->hw_if); 5034ac7516bSRavi Kumar axgbe_init_function_ptrs_phy(&pdata->phy_if); 5044ac7516bSRavi Kumar axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 5054ac7516bSRavi Kumar pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 506572890efSRavi Kumar } 507572890efSRavi Kumar 508572890efSRavi Kumar static void axgbe_set_counts(struct axgbe_port *pdata) 509572890efSRavi Kumar { 510572890efSRavi Kumar /* Set all the function pointers */ 511572890efSRavi Kumar axgbe_init_all_fptrs(pdata); 512572890efSRavi Kumar 513572890efSRavi Kumar /* Populate the hardware features */ 514572890efSRavi Kumar axgbe_get_all_hw_features(pdata); 515572890efSRavi Kumar 516572890efSRavi Kumar /* Set default max values if not provided */ 517572890efSRavi Kumar if (!pdata->tx_max_channel_count) 518572890efSRavi Kumar pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 519572890efSRavi Kumar if (!pdata->rx_max_channel_count) 520572890efSRavi Kumar pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 521572890efSRavi Kumar 522572890efSRavi Kumar if (!pdata->tx_max_q_count) 523572890efSRavi Kumar pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 524572890efSRavi Kumar if (!pdata->rx_max_q_count) 525572890efSRavi Kumar pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 526572890efSRavi Kumar 527572890efSRavi Kumar /* Calculate the number of Tx and Rx rings to be created 528572890efSRavi Kumar * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 529572890efSRavi Kumar * the number of Tx queues to the number of Tx channels 530572890efSRavi Kumar * enabled 531572890efSRavi Kumar * -Rx (DMA) Channels do not map 1-to-1 so use the actual 532572890efSRavi Kumar * number of Rx queues or maximum allowed 533572890efSRavi Kumar */ 534572890efSRavi Kumar pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 535572890efSRavi Kumar pdata->tx_max_channel_count); 536572890efSRavi Kumar pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 537572890efSRavi Kumar pdata->tx_max_q_count); 538572890efSRavi Kumar 539572890efSRavi Kumar pdata->tx_q_count = pdata->tx_ring_count; 540572890efSRavi Kumar 541572890efSRavi Kumar pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 542572890efSRavi Kumar pdata->rx_max_channel_count); 543572890efSRavi Kumar 544572890efSRavi Kumar pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 545572890efSRavi Kumar pdata->rx_max_q_count); 546572890efSRavi Kumar } 547572890efSRavi Kumar 548572890efSRavi Kumar static void axgbe_default_config(struct axgbe_port *pdata) 549572890efSRavi Kumar { 550572890efSRavi Kumar pdata->pblx8 = DMA_PBL_X8_ENABLE; 551572890efSRavi Kumar pdata->tx_sf_mode = MTL_TSF_ENABLE; 552572890efSRavi Kumar pdata->tx_threshold = MTL_TX_THRESHOLD_64; 553572890efSRavi Kumar pdata->tx_pbl = DMA_PBL_32; 554572890efSRavi Kumar pdata->tx_osp_mode = DMA_OSP_ENABLE; 555572890efSRavi Kumar pdata->rx_sf_mode = MTL_RSF_ENABLE; 556572890efSRavi Kumar pdata->rx_threshold = MTL_RX_THRESHOLD_64; 557572890efSRavi Kumar pdata->rx_pbl = DMA_PBL_32; 558572890efSRavi Kumar pdata->pause_autoneg = 1; 559572890efSRavi Kumar pdata->tx_pause = 0; 560572890efSRavi Kumar pdata->rx_pause = 0; 561572890efSRavi Kumar pdata->phy_speed = SPEED_UNKNOWN; 562572890efSRavi Kumar pdata->power_down = 0; 563572890efSRavi Kumar } 564572890efSRavi Kumar 5658691632fSRavi Kumar /* 5668691632fSRavi Kumar * It returns 0 on success. 5678691632fSRavi Kumar */ 5688691632fSRavi Kumar static int 5698691632fSRavi Kumar eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 5708691632fSRavi Kumar { 5718691632fSRavi Kumar PMD_INIT_FUNC_TRACE(); 5728691632fSRavi Kumar struct axgbe_port *pdata; 5738691632fSRavi Kumar struct rte_pci_device *pci_dev; 574572890efSRavi Kumar uint32_t reg, mac_lo, mac_hi; 575572890efSRavi Kumar int ret; 5768691632fSRavi Kumar 5779e890103SRavi Kumar eth_dev->dev_ops = &axgbe_eth_dev_ops; 5788590b93dSRavi Kumar eth_dev->rx_pkt_burst = &axgbe_recv_pkts; 5799e890103SRavi Kumar 5808691632fSRavi Kumar /* 5818691632fSRavi Kumar * For secondary processes, we don't initialise any further as primary 5828691632fSRavi Kumar * has already done this work. 5838691632fSRavi Kumar */ 5848691632fSRavi Kumar if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5858691632fSRavi Kumar return 0; 5868691632fSRavi Kumar 5870bc212a8SStephen Hemminger pdata = eth_dev->data->dev_private; 588572890efSRavi Kumar /* initial state */ 589572890efSRavi Kumar axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 590572890efSRavi Kumar axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 5918691632fSRavi Kumar pdata->eth_dev = eth_dev; 5928691632fSRavi Kumar 5938691632fSRavi Kumar pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 5948691632fSRavi Kumar pdata->pci_dev = pci_dev; 5958691632fSRavi Kumar 596572890efSRavi Kumar pdata->xgmac_regs = 5977784d0d3SRavi Kumar (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 5987784d0d3SRavi Kumar pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 5997784d0d3SRavi Kumar + AXGBE_MAC_PROP_OFFSET); 6007784d0d3SRavi Kumar pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 6017784d0d3SRavi Kumar + AXGBE_I2C_CTRL_OFFSET); 6027784d0d3SRavi Kumar pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 603572890efSRavi Kumar 604572890efSRavi Kumar /* version specific driver data*/ 605572890efSRavi Kumar if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 606572890efSRavi Kumar pdata->vdata = &axgbe_v2a; 607572890efSRavi Kumar else 608572890efSRavi Kumar pdata->vdata = &axgbe_v2b; 609572890efSRavi Kumar 610572890efSRavi Kumar /* Configure the PCS indirect addressing support */ 611572890efSRavi Kumar reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); 612572890efSRavi Kumar pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 613572890efSRavi Kumar pdata->xpcs_window <<= 6; 614572890efSRavi Kumar pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 615572890efSRavi Kumar pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 616572890efSRavi Kumar pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 617572890efSRavi Kumar pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 618572890efSRavi Kumar pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 619572890efSRavi Kumar PMD_INIT_LOG(DEBUG, 620572890efSRavi Kumar "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 621572890efSRavi Kumar pdata->xpcs_window_size, pdata->xpcs_window_mask); 622572890efSRavi Kumar XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 623572890efSRavi Kumar 624572890efSRavi Kumar /* Retrieve the MAC address */ 625572890efSRavi Kumar mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 626572890efSRavi Kumar mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 627572890efSRavi Kumar pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 628572890efSRavi Kumar pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 629572890efSRavi Kumar pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 630572890efSRavi Kumar pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 631572890efSRavi Kumar pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 632572890efSRavi Kumar pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 633572890efSRavi Kumar 634572890efSRavi Kumar eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", 63535b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN, 0); 636572890efSRavi Kumar if (!eth_dev->data->mac_addrs) { 637572890efSRavi Kumar PMD_INIT_LOG(ERR, 638572890efSRavi Kumar "Failed to alloc %u bytes needed to store MAC addr tbl", 63935b2d13fSOlivier Matz RTE_ETHER_ADDR_LEN); 640572890efSRavi Kumar return -ENOMEM; 641572890efSRavi Kumar } 642572890efSRavi Kumar 643538da7a1SOlivier Matz if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 644538da7a1SOlivier Matz rte_eth_random_addr(pdata->mac_addr.addr_bytes); 645572890efSRavi Kumar 646572890efSRavi Kumar /* Copy the permanent MAC address */ 647538da7a1SOlivier Matz rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 648572890efSRavi Kumar 649572890efSRavi Kumar /* Clock settings */ 650572890efSRavi Kumar pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 651572890efSRavi Kumar pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 652572890efSRavi Kumar 653572890efSRavi Kumar /* Set the DMA coherency values */ 654572890efSRavi Kumar pdata->coherent = 1; 655572890efSRavi Kumar pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 656572890efSRavi Kumar pdata->arcache = AXGBE_DMA_OS_ARCACHE; 657572890efSRavi Kumar pdata->awcache = AXGBE_DMA_OS_AWCACHE; 658572890efSRavi Kumar 659572890efSRavi Kumar /* Set the maximum channels and queues */ 660572890efSRavi Kumar reg = XP_IOREAD(pdata, XP_PROP_1); 661572890efSRavi Kumar pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 662572890efSRavi Kumar pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 663572890efSRavi Kumar pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 664572890efSRavi Kumar pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 665572890efSRavi Kumar 666572890efSRavi Kumar /* Set the hardware channel and queue counts */ 667572890efSRavi Kumar axgbe_set_counts(pdata); 668572890efSRavi Kumar 669572890efSRavi Kumar /* Set the maximum fifo amounts */ 670572890efSRavi Kumar reg = XP_IOREAD(pdata, XP_PROP_2); 671572890efSRavi Kumar pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 672572890efSRavi Kumar pdata->tx_max_fifo_size *= 16384; 673572890efSRavi Kumar pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 674572890efSRavi Kumar pdata->vdata->tx_max_fifo_size); 675572890efSRavi Kumar pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 676572890efSRavi Kumar pdata->rx_max_fifo_size *= 16384; 677572890efSRavi Kumar pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 678572890efSRavi Kumar pdata->vdata->rx_max_fifo_size); 679572890efSRavi Kumar /* Issue software reset to DMA */ 680572890efSRavi Kumar ret = pdata->hw_if.exit(pdata); 681572890efSRavi Kumar if (ret) 682572890efSRavi Kumar PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 683572890efSRavi Kumar 684572890efSRavi Kumar /* Set default configuration data */ 685572890efSRavi Kumar axgbe_default_config(pdata); 686572890efSRavi Kumar 687572890efSRavi Kumar /* Set default max values if not provided */ 688572890efSRavi Kumar if (!pdata->tx_max_fifo_size) 689572890efSRavi Kumar pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 690572890efSRavi Kumar if (!pdata->rx_max_fifo_size) 691572890efSRavi Kumar pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 692572890efSRavi Kumar 6939e890103SRavi Kumar pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 6949e890103SRavi Kumar pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 695572890efSRavi Kumar pthread_mutex_init(&pdata->xpcs_mutex, NULL); 696572890efSRavi Kumar pthread_mutex_init(&pdata->i2c_mutex, NULL); 697572890efSRavi Kumar pthread_mutex_init(&pdata->an_mutex, NULL); 698572890efSRavi Kumar pthread_mutex_init(&pdata->phy_mutex, NULL); 699572890efSRavi Kumar 7004ac7516bSRavi Kumar ret = pdata->phy_if.phy_init(pdata); 7014ac7516bSRavi Kumar if (ret) { 7024ac7516bSRavi Kumar rte_free(eth_dev->data->mac_addrs); 703*e7f2fa88SDavid Marchand eth_dev->data->mac_addrs = NULL; 7044ac7516bSRavi Kumar return ret; 7054ac7516bSRavi Kumar } 7064ac7516bSRavi Kumar 707456ff159SRavi Kumar rte_intr_callback_register(&pci_dev->intr_handle, 708456ff159SRavi Kumar axgbe_dev_interrupt_handler, 709456ff159SRavi Kumar (void *)eth_dev); 7108691632fSRavi Kumar PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 7118691632fSRavi Kumar eth_dev->data->port_id, pci_dev->id.vendor_id, 7128691632fSRavi Kumar pci_dev->id.device_id); 7138691632fSRavi Kumar 7148691632fSRavi Kumar return 0; 7158691632fSRavi Kumar } 7168691632fSRavi Kumar 7178691632fSRavi Kumar static int 718572890efSRavi Kumar eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 7198691632fSRavi Kumar { 720456ff159SRavi Kumar struct rte_pci_device *pci_dev; 721456ff159SRavi Kumar 7228691632fSRavi Kumar PMD_INIT_FUNC_TRACE(); 7238691632fSRavi Kumar 724572890efSRavi Kumar if (rte_eal_process_type() != RTE_PROC_PRIMARY) 725572890efSRavi Kumar return 0; 726572890efSRavi Kumar 727456ff159SRavi Kumar pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 7289e890103SRavi Kumar eth_dev->dev_ops = NULL; 7298590b93dSRavi Kumar eth_dev->rx_pkt_burst = NULL; 7308590b93dSRavi Kumar eth_dev->tx_pkt_burst = NULL; 7319e890103SRavi Kumar axgbe_dev_clear_queues(eth_dev); 732572890efSRavi Kumar 733456ff159SRavi Kumar /* disable uio intr before callback unregister */ 734456ff159SRavi Kumar rte_intr_disable(&pci_dev->intr_handle); 735456ff159SRavi Kumar rte_intr_callback_unregister(&pci_dev->intr_handle, 736456ff159SRavi Kumar axgbe_dev_interrupt_handler, 737456ff159SRavi Kumar (void *)eth_dev); 738456ff159SRavi Kumar 7398691632fSRavi Kumar return 0; 7408691632fSRavi Kumar } 7418691632fSRavi Kumar 7428691632fSRavi Kumar static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 7438691632fSRavi Kumar struct rte_pci_device *pci_dev) 7448691632fSRavi Kumar { 7458691632fSRavi Kumar return rte_eth_dev_pci_generic_probe(pci_dev, 7468691632fSRavi Kumar sizeof(struct axgbe_port), eth_axgbe_dev_init); 7478691632fSRavi Kumar } 7488691632fSRavi Kumar 7498691632fSRavi Kumar static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 7508691632fSRavi Kumar { 7518691632fSRavi Kumar return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 7528691632fSRavi Kumar } 7538691632fSRavi Kumar 7548691632fSRavi Kumar static struct rte_pci_driver rte_axgbe_pmd = { 7558691632fSRavi Kumar .id_table = pci_id_axgbe_map, 7568691632fSRavi Kumar .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 7578691632fSRavi Kumar .probe = eth_axgbe_pci_probe, 7588691632fSRavi Kumar .remove = eth_axgbe_pci_remove, 7598691632fSRavi Kumar }; 7608691632fSRavi Kumar 7618691632fSRavi Kumar RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 7628691632fSRavi Kumar RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 7638691632fSRavi Kumar RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 7648691632fSRavi Kumar 765f8e99896SThomas Monjalon RTE_INIT(axgbe_init_log) 7668691632fSRavi Kumar { 7678691632fSRavi Kumar axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); 7688691632fSRavi Kumar if (axgbe_logtype_init >= 0) 7698691632fSRavi Kumar rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); 7708691632fSRavi Kumar axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); 7718691632fSRavi Kumar if (axgbe_logtype_driver >= 0) 7728691632fSRavi Kumar rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); 7738691632fSRavi Kumar } 774