18691632fSRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 28691632fSRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 38691632fSRavi Kumar * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 48691632fSRavi Kumar */ 58691632fSRavi Kumar 69e890103SRavi Kumar #include "axgbe_rxtx.h" 78691632fSRavi Kumar #include "axgbe_ethdev.h" 8572890efSRavi Kumar #include "axgbe_common.h" 9572890efSRavi Kumar #include "axgbe_phy.h" 108691632fSRavi Kumar 118691632fSRavi Kumar static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev); 128691632fSRavi Kumar static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev); 137c4158a5SRavi Kumar static int axgbe_dev_configure(struct rte_eth_dev *dev); 147c4158a5SRavi Kumar static int axgbe_dev_start(struct rte_eth_dev *dev); 157c4158a5SRavi Kumar static void axgbe_dev_stop(struct rte_eth_dev *dev); 16456ff159SRavi Kumar static void axgbe_dev_interrupt_handler(void *param); 179e890103SRavi Kumar static void axgbe_dev_close(struct rte_eth_dev *dev); 18fa3e0440SRavi Kumar static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev); 19fa3e0440SRavi Kumar static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev); 20fa3e0440SRavi Kumar static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev); 21fa3e0440SRavi Kumar static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev); 2244d45ffeSRavi Kumar static int axgbe_dev_link_update(struct rte_eth_dev *dev, 2344d45ffeSRavi Kumar int wait_to_complete); 243e730511SRavi Kumar static int axgbe_dev_stats_get(struct rte_eth_dev *dev, 253e730511SRavi Kumar struct rte_eth_stats *stats); 263e730511SRavi Kumar static void axgbe_dev_stats_reset(struct rte_eth_dev *dev); 279e890103SRavi Kumar static void axgbe_dev_info_get(struct rte_eth_dev *dev, 289e890103SRavi Kumar struct rte_eth_dev_info *dev_info); 298691632fSRavi Kumar 308691632fSRavi Kumar /* The set of PCI devices this driver supports */ 318691632fSRavi Kumar #define AMD_PCI_VENDOR_ID 0x1022 328691632fSRavi Kumar #define AMD_PCI_AXGBE_DEVICE_V2A 0x1458 338691632fSRavi Kumar #define AMD_PCI_AXGBE_DEVICE_V2B 0x1459 348691632fSRavi Kumar 358691632fSRavi Kumar int axgbe_logtype_init; 368691632fSRavi Kumar int axgbe_logtype_driver; 378691632fSRavi Kumar 388691632fSRavi Kumar static const struct rte_pci_id pci_id_axgbe_map[] = { 398691632fSRavi Kumar {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)}, 408691632fSRavi Kumar {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)}, 418691632fSRavi Kumar { .vendor_id = 0, }, 428691632fSRavi Kumar }; 438691632fSRavi Kumar 44572890efSRavi Kumar static struct axgbe_version_data axgbe_v2a = { 454ac7516bSRavi Kumar .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 46572890efSRavi Kumar .xpcs_access = AXGBE_XPCS_ACCESS_V2, 47572890efSRavi Kumar .mmc_64bit = 1, 48572890efSRavi Kumar .tx_max_fifo_size = 229376, 49572890efSRavi Kumar .rx_max_fifo_size = 229376, 50572890efSRavi Kumar .tx_tstamp_workaround = 1, 51572890efSRavi Kumar .ecc_support = 1, 52572890efSRavi Kumar .i2c_support = 1, 5300072056SRavi Kumar .an_cdr_workaround = 1, 54572890efSRavi Kumar }; 55572890efSRavi Kumar 56572890efSRavi Kumar static struct axgbe_version_data axgbe_v2b = { 574ac7516bSRavi Kumar .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2, 58572890efSRavi Kumar .xpcs_access = AXGBE_XPCS_ACCESS_V2, 59572890efSRavi Kumar .mmc_64bit = 1, 60572890efSRavi Kumar .tx_max_fifo_size = 65536, 61572890efSRavi Kumar .rx_max_fifo_size = 65536, 62572890efSRavi Kumar .tx_tstamp_workaround = 1, 63572890efSRavi Kumar .ecc_support = 1, 64572890efSRavi Kumar .i2c_support = 1, 6500072056SRavi Kumar .an_cdr_workaround = 1, 66572890efSRavi Kumar }; 67572890efSRavi Kumar 689e890103SRavi Kumar static const struct rte_eth_desc_lim rx_desc_lim = { 699e890103SRavi Kumar .nb_max = AXGBE_MAX_RING_DESC, 709e890103SRavi Kumar .nb_min = AXGBE_MIN_RING_DESC, 719e890103SRavi Kumar .nb_align = 8, 729e890103SRavi Kumar }; 739e890103SRavi Kumar 749e890103SRavi Kumar static const struct rte_eth_desc_lim tx_desc_lim = { 759e890103SRavi Kumar .nb_max = AXGBE_MAX_RING_DESC, 769e890103SRavi Kumar .nb_min = AXGBE_MIN_RING_DESC, 779e890103SRavi Kumar .nb_align = 8, 789e890103SRavi Kumar }; 799e890103SRavi Kumar 809e890103SRavi Kumar static const struct eth_dev_ops axgbe_eth_dev_ops = { 817c4158a5SRavi Kumar .dev_configure = axgbe_dev_configure, 827c4158a5SRavi Kumar .dev_start = axgbe_dev_start, 837c4158a5SRavi Kumar .dev_stop = axgbe_dev_stop, 849e890103SRavi Kumar .dev_close = axgbe_dev_close, 85fa3e0440SRavi Kumar .promiscuous_enable = axgbe_dev_promiscuous_enable, 86fa3e0440SRavi Kumar .promiscuous_disable = axgbe_dev_promiscuous_disable, 87fa3e0440SRavi Kumar .allmulticast_enable = axgbe_dev_allmulticast_enable, 88fa3e0440SRavi Kumar .allmulticast_disable = axgbe_dev_allmulticast_disable, 8944d45ffeSRavi Kumar .link_update = axgbe_dev_link_update, 903e730511SRavi Kumar .stats_get = axgbe_dev_stats_get, 913e730511SRavi Kumar .stats_reset = axgbe_dev_stats_reset, 929e890103SRavi Kumar .dev_infos_get = axgbe_dev_info_get, 939e890103SRavi Kumar .rx_queue_setup = axgbe_dev_rx_queue_setup, 949e890103SRavi Kumar .rx_queue_release = axgbe_dev_rx_queue_release, 959e890103SRavi Kumar .tx_queue_setup = axgbe_dev_tx_queue_setup, 969e890103SRavi Kumar .tx_queue_release = axgbe_dev_tx_queue_release, 979e890103SRavi Kumar }; 989e890103SRavi Kumar 997c4158a5SRavi Kumar static int axgbe_phy_reset(struct axgbe_port *pdata) 1007c4158a5SRavi Kumar { 1017c4158a5SRavi Kumar pdata->phy_link = -1; 1027c4158a5SRavi Kumar pdata->phy_speed = SPEED_UNKNOWN; 1037c4158a5SRavi Kumar return pdata->phy_if.phy_reset(pdata); 1047c4158a5SRavi Kumar } 1057c4158a5SRavi Kumar 106456ff159SRavi Kumar /* 107456ff159SRavi Kumar * Interrupt handler triggered by NIC for handling 108456ff159SRavi Kumar * specific interrupt. 109456ff159SRavi Kumar * 110456ff159SRavi Kumar * @param handle 111456ff159SRavi Kumar * Pointer to interrupt handle. 112456ff159SRavi Kumar * @param param 113456ff159SRavi Kumar * The address of parameter (struct rte_eth_dev *) regsitered before. 114456ff159SRavi Kumar * 115456ff159SRavi Kumar * @return 116456ff159SRavi Kumar * void 117456ff159SRavi Kumar */ 118456ff159SRavi Kumar static void 119456ff159SRavi Kumar axgbe_dev_interrupt_handler(void *param) 120456ff159SRavi Kumar { 121456ff159SRavi Kumar struct rte_eth_dev *dev = (struct rte_eth_dev *)param; 122456ff159SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1238590b93dSRavi Kumar unsigned int dma_isr, dma_ch_isr; 124456ff159SRavi Kumar 125456ff159SRavi Kumar pdata->phy_if.an_isr(pdata); 1268590b93dSRavi Kumar /*DMA related interrupts*/ 1278590b93dSRavi Kumar dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR); 1288590b93dSRavi Kumar if (dma_isr) { 1298590b93dSRavi Kumar if (dma_isr & 1) { 1308590b93dSRavi Kumar dma_ch_isr = 1318590b93dSRavi Kumar AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *) 1328590b93dSRavi Kumar pdata->rx_queues[0], 1338590b93dSRavi Kumar DMA_CH_SR); 1348590b93dSRavi Kumar AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *) 1358590b93dSRavi Kumar pdata->rx_queues[0], 1368590b93dSRavi Kumar DMA_CH_SR, dma_ch_isr); 1378590b93dSRavi Kumar } 1388590b93dSRavi Kumar } 139456ff159SRavi Kumar /* Enable interrupts since disabled after generation*/ 140456ff159SRavi Kumar rte_intr_enable(&pdata->pci_dev->intr_handle); 141456ff159SRavi Kumar } 142456ff159SRavi Kumar 1437c4158a5SRavi Kumar /* 1447c4158a5SRavi Kumar * Configure device link speed and setup link. 1457c4158a5SRavi Kumar * It returns 0 on success. 1467c4158a5SRavi Kumar */ 1477c4158a5SRavi Kumar static int 1487c4158a5SRavi Kumar axgbe_dev_configure(struct rte_eth_dev *dev) 1497c4158a5SRavi Kumar { 1507c4158a5SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1517c4158a5SRavi Kumar /* Checksum offload to hardware */ 1527c4158a5SRavi Kumar pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads & 1537c4158a5SRavi Kumar DEV_RX_OFFLOAD_CHECKSUM; 1547c4158a5SRavi Kumar return 0; 1557c4158a5SRavi Kumar } 1567c4158a5SRavi Kumar 1577c4158a5SRavi Kumar static int 1587c4158a5SRavi Kumar axgbe_dev_rx_mq_config(struct rte_eth_dev *dev) 1597c4158a5SRavi Kumar { 1607c4158a5SRavi Kumar struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; 1617c4158a5SRavi Kumar 1627c4158a5SRavi Kumar if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) 1637c4158a5SRavi Kumar pdata->rss_enable = 1; 1647c4158a5SRavi Kumar else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) 1657c4158a5SRavi Kumar pdata->rss_enable = 0; 1667c4158a5SRavi Kumar else 1677c4158a5SRavi Kumar return -1; 1687c4158a5SRavi Kumar return 0; 1697c4158a5SRavi Kumar } 1707c4158a5SRavi Kumar 1717c4158a5SRavi Kumar static int 1727c4158a5SRavi Kumar axgbe_dev_start(struct rte_eth_dev *dev) 1737c4158a5SRavi Kumar { 1747c4158a5SRavi Kumar PMD_INIT_FUNC_TRACE(); 1757c4158a5SRavi Kumar struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private; 1767c4158a5SRavi Kumar int ret; 1777c4158a5SRavi Kumar 1787c4158a5SRavi Kumar /* Multiqueue RSS */ 1797c4158a5SRavi Kumar ret = axgbe_dev_rx_mq_config(dev); 1807c4158a5SRavi Kumar if (ret) { 1817c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "Unable to config RX MQ\n"); 1827c4158a5SRavi Kumar return ret; 1837c4158a5SRavi Kumar } 1847c4158a5SRavi Kumar ret = axgbe_phy_reset(pdata); 1857c4158a5SRavi Kumar if (ret) { 1867c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "phy reset failed\n"); 1877c4158a5SRavi Kumar return ret; 1887c4158a5SRavi Kumar } 1897c4158a5SRavi Kumar ret = pdata->hw_if.init(pdata); 1907c4158a5SRavi Kumar if (ret) { 1917c4158a5SRavi Kumar PMD_DRV_LOG(ERR, "dev_init failed\n"); 1927c4158a5SRavi Kumar return ret; 1937c4158a5SRavi Kumar } 1947c4158a5SRavi Kumar 1957c4158a5SRavi Kumar /* enable uio/vfio intr/eventfd mapping */ 1967c4158a5SRavi Kumar rte_intr_enable(&pdata->pci_dev->intr_handle); 1977c4158a5SRavi Kumar 1987c4158a5SRavi Kumar /* phy start*/ 1997c4158a5SRavi Kumar pdata->phy_if.phy_start(pdata); 2008590b93dSRavi Kumar axgbe_dev_enable_tx(dev); 2018590b93dSRavi Kumar axgbe_dev_enable_rx(dev); 2027c4158a5SRavi Kumar 2037c4158a5SRavi Kumar axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state); 2047c4158a5SRavi Kumar axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state); 2057c4158a5SRavi Kumar return 0; 2067c4158a5SRavi Kumar } 2077c4158a5SRavi Kumar 2087c4158a5SRavi Kumar /* Stop device: disable rx and tx functions to allow for reconfiguring. */ 2097c4158a5SRavi Kumar static void 2107c4158a5SRavi Kumar axgbe_dev_stop(struct rte_eth_dev *dev) 2117c4158a5SRavi Kumar { 2127c4158a5SRavi Kumar PMD_INIT_FUNC_TRACE(); 2137c4158a5SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 2147c4158a5SRavi Kumar 2157c4158a5SRavi Kumar rte_intr_disable(&pdata->pci_dev->intr_handle); 2167c4158a5SRavi Kumar 2177c4158a5SRavi Kumar if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state)) 2187c4158a5SRavi Kumar return; 2197c4158a5SRavi Kumar 2207c4158a5SRavi Kumar axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 2218590b93dSRavi Kumar axgbe_dev_disable_tx(dev); 2228590b93dSRavi Kumar axgbe_dev_disable_rx(dev); 2237c4158a5SRavi Kumar 2247c4158a5SRavi Kumar pdata->phy_if.phy_stop(pdata); 2257c4158a5SRavi Kumar pdata->hw_if.exit(pdata); 2267c4158a5SRavi Kumar memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link)); 2277c4158a5SRavi Kumar axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 2287c4158a5SRavi Kumar } 2297c4158a5SRavi Kumar 2309e890103SRavi Kumar /* Clear all resources like TX/RX queues. */ 2319e890103SRavi Kumar static void 2329e890103SRavi Kumar axgbe_dev_close(struct rte_eth_dev *dev) 2339e890103SRavi Kumar { 2349e890103SRavi Kumar axgbe_dev_clear_queues(dev); 2359e890103SRavi Kumar } 2369e890103SRavi Kumar 237fa3e0440SRavi Kumar static void 238fa3e0440SRavi Kumar axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev) 239fa3e0440SRavi Kumar { 240fa3e0440SRavi Kumar PMD_INIT_FUNC_TRACE(); 241fa3e0440SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 242fa3e0440SRavi Kumar 243fa3e0440SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1); 244fa3e0440SRavi Kumar } 245fa3e0440SRavi Kumar 246fa3e0440SRavi Kumar static void 247fa3e0440SRavi Kumar axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev) 248fa3e0440SRavi Kumar { 249fa3e0440SRavi Kumar PMD_INIT_FUNC_TRACE(); 250fa3e0440SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 251fa3e0440SRavi Kumar 252fa3e0440SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0); 253fa3e0440SRavi Kumar } 254fa3e0440SRavi Kumar 255fa3e0440SRavi Kumar static void 256fa3e0440SRavi Kumar axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev) 257fa3e0440SRavi Kumar { 258fa3e0440SRavi Kumar PMD_INIT_FUNC_TRACE(); 259fa3e0440SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 260fa3e0440SRavi Kumar 261fa3e0440SRavi Kumar if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 262fa3e0440SRavi Kumar return; 263fa3e0440SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1); 264fa3e0440SRavi Kumar } 265fa3e0440SRavi Kumar 266fa3e0440SRavi Kumar static void 267fa3e0440SRavi Kumar axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev) 268fa3e0440SRavi Kumar { 269fa3e0440SRavi Kumar PMD_INIT_FUNC_TRACE(); 270fa3e0440SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 271fa3e0440SRavi Kumar 272fa3e0440SRavi Kumar if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM)) 273fa3e0440SRavi Kumar return; 274fa3e0440SRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0); 275fa3e0440SRavi Kumar } 276fa3e0440SRavi Kumar 27744d45ffeSRavi Kumar /* return 0 means link status changed, -1 means not changed */ 27844d45ffeSRavi Kumar static int 27944d45ffeSRavi Kumar axgbe_dev_link_update(struct rte_eth_dev *dev, 28044d45ffeSRavi Kumar int wait_to_complete __rte_unused) 28144d45ffeSRavi Kumar { 28244d45ffeSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 28344d45ffeSRavi Kumar struct rte_eth_link link; 28444d45ffeSRavi Kumar int ret = 0; 28544d45ffeSRavi Kumar 28644d45ffeSRavi Kumar PMD_INIT_FUNC_TRACE(); 28744d45ffeSRavi Kumar rte_delay_ms(800); 28844d45ffeSRavi Kumar 28944d45ffeSRavi Kumar pdata->phy_if.phy_status(pdata); 29044d45ffeSRavi Kumar 29144d45ffeSRavi Kumar memset(&link, 0, sizeof(struct rte_eth_link)); 29244d45ffeSRavi Kumar link.link_duplex = pdata->phy.duplex; 29344d45ffeSRavi Kumar link.link_status = pdata->phy_link; 29444d45ffeSRavi Kumar link.link_speed = pdata->phy_speed; 29544d45ffeSRavi Kumar link.link_autoneg = !(dev->data->dev_conf.link_speeds & 29644d45ffeSRavi Kumar ETH_LINK_SPEED_FIXED); 29744d45ffeSRavi Kumar ret = rte_eth_linkstatus_set(dev, &link); 29844d45ffeSRavi Kumar if (ret == -1) 29944d45ffeSRavi Kumar PMD_DRV_LOG(ERR, "No change in link status\n"); 30044d45ffeSRavi Kumar 30144d45ffeSRavi Kumar return ret; 30244d45ffeSRavi Kumar } 30344d45ffeSRavi Kumar 3043e730511SRavi Kumar static int 3053e730511SRavi Kumar axgbe_dev_stats_get(struct rte_eth_dev *dev, 3063e730511SRavi Kumar struct rte_eth_stats *stats) 3073e730511SRavi Kumar { 3083e730511SRavi Kumar struct axgbe_rx_queue *rxq; 3093e730511SRavi Kumar struct axgbe_tx_queue *txq; 3103e730511SRavi Kumar unsigned int i; 3113e730511SRavi Kumar 3123e730511SRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 3133e730511SRavi Kumar rxq = dev->data->rx_queues[i]; 3143e730511SRavi Kumar stats->q_ipackets[i] = rxq->pkts; 3153e730511SRavi Kumar stats->ipackets += rxq->pkts; 3163e730511SRavi Kumar stats->q_ibytes[i] = rxq->bytes; 3173e730511SRavi Kumar stats->ibytes += rxq->bytes; 3183e730511SRavi Kumar } 3193e730511SRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 3203e730511SRavi Kumar txq = dev->data->tx_queues[i]; 3213e730511SRavi Kumar stats->q_opackets[i] = txq->pkts; 3223e730511SRavi Kumar stats->opackets += txq->pkts; 3233e730511SRavi Kumar stats->q_obytes[i] = txq->bytes; 3243e730511SRavi Kumar stats->obytes += txq->bytes; 3253e730511SRavi Kumar } 3263e730511SRavi Kumar 3273e730511SRavi Kumar return 0; 3283e730511SRavi Kumar } 3293e730511SRavi Kumar 3303e730511SRavi Kumar static void 3313e730511SRavi Kumar axgbe_dev_stats_reset(struct rte_eth_dev *dev) 3323e730511SRavi Kumar { 3333e730511SRavi Kumar struct axgbe_rx_queue *rxq; 3343e730511SRavi Kumar struct axgbe_tx_queue *txq; 3353e730511SRavi Kumar unsigned int i; 3363e730511SRavi Kumar 3373e730511SRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 3383e730511SRavi Kumar rxq = dev->data->rx_queues[i]; 3393e730511SRavi Kumar rxq->pkts = 0; 3403e730511SRavi Kumar rxq->bytes = 0; 3413e730511SRavi Kumar rxq->errors = 0; 3423e730511SRavi Kumar } 3433e730511SRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 3443e730511SRavi Kumar txq = dev->data->tx_queues[i]; 3453e730511SRavi Kumar txq->pkts = 0; 3463e730511SRavi Kumar txq->bytes = 0; 3473e730511SRavi Kumar txq->errors = 0; 3483e730511SRavi Kumar } 3493e730511SRavi Kumar } 3503e730511SRavi Kumar 3519e890103SRavi Kumar static void 352cd8c7c7cSFerruh Yigit axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) 3539e890103SRavi Kumar { 3549e890103SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 3559e890103SRavi Kumar 3569e890103SRavi Kumar dev_info->max_rx_queues = pdata->rx_ring_count; 3579e890103SRavi Kumar dev_info->max_tx_queues = pdata->tx_ring_count; 3589e890103SRavi Kumar dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE; 3599e890103SRavi Kumar dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE; 3609e890103SRavi Kumar dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS; 3619e890103SRavi Kumar dev_info->speed_capa = ETH_LINK_SPEED_10G; 3629e890103SRavi Kumar 3639e890103SRavi Kumar dev_info->rx_offload_capa = 3649e890103SRavi Kumar DEV_RX_OFFLOAD_IPV4_CKSUM | 3659e890103SRavi Kumar DEV_RX_OFFLOAD_UDP_CKSUM | 36670815c9eSFerruh Yigit DEV_RX_OFFLOAD_TCP_CKSUM | 36770815c9eSFerruh Yigit DEV_RX_OFFLOAD_KEEP_CRC; 3689e890103SRavi Kumar 3699e890103SRavi Kumar dev_info->tx_offload_capa = 3709e890103SRavi Kumar DEV_TX_OFFLOAD_IPV4_CKSUM | 3719e890103SRavi Kumar DEV_TX_OFFLOAD_UDP_CKSUM | 3729e890103SRavi Kumar DEV_TX_OFFLOAD_TCP_CKSUM; 3739e890103SRavi Kumar 3749e890103SRavi Kumar if (pdata->hw_feat.rss) { 3759e890103SRavi Kumar dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD; 3769e890103SRavi Kumar dev_info->reta_size = pdata->hw_feat.hash_table_size; 3779e890103SRavi Kumar dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE; 3789e890103SRavi Kumar } 3799e890103SRavi Kumar 3809e890103SRavi Kumar dev_info->rx_desc_lim = rx_desc_lim; 3819e890103SRavi Kumar dev_info->tx_desc_lim = tx_desc_lim; 3829e890103SRavi Kumar 3839e890103SRavi Kumar dev_info->default_rxconf = (struct rte_eth_rxconf) { 3849e890103SRavi Kumar .rx_free_thresh = AXGBE_RX_FREE_THRESH, 3859e890103SRavi Kumar }; 3869e890103SRavi Kumar 3879e890103SRavi Kumar dev_info->default_txconf = (struct rte_eth_txconf) { 3889e890103SRavi Kumar .tx_free_thresh = AXGBE_TX_FREE_THRESH, 3899e890103SRavi Kumar }; 3909e890103SRavi Kumar } 3919e890103SRavi Kumar 392572890efSRavi Kumar static void axgbe_get_all_hw_features(struct axgbe_port *pdata) 393572890efSRavi Kumar { 394572890efSRavi Kumar unsigned int mac_hfr0, mac_hfr1, mac_hfr2; 395572890efSRavi Kumar struct axgbe_hw_features *hw_feat = &pdata->hw_feat; 396572890efSRavi Kumar 397572890efSRavi Kumar mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R); 398572890efSRavi Kumar mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R); 399572890efSRavi Kumar mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R); 400572890efSRavi Kumar 401572890efSRavi Kumar memset(hw_feat, 0, sizeof(*hw_feat)); 402572890efSRavi Kumar 403572890efSRavi Kumar hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR); 404572890efSRavi Kumar 405572890efSRavi Kumar /* Hardware feature register 0 */ 406572890efSRavi Kumar hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL); 407572890efSRavi Kumar hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH); 408572890efSRavi Kumar hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL); 409572890efSRavi Kumar hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL); 410572890efSRavi Kumar hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL); 411572890efSRavi Kumar hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL); 412572890efSRavi Kumar hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL); 413572890efSRavi Kumar hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL); 414572890efSRavi Kumar hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL); 415572890efSRavi Kumar hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL); 416572890efSRavi Kumar hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL); 417572890efSRavi Kumar hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, 418572890efSRavi Kumar ADDMACADRSEL); 419572890efSRavi Kumar hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL); 420572890efSRavi Kumar hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS); 421572890efSRavi Kumar 422572890efSRavi Kumar /* Hardware feature register 1 */ 423572890efSRavi Kumar hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 424572890efSRavi Kumar RXFIFOSIZE); 425572890efSRavi Kumar hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 426572890efSRavi Kumar TXFIFOSIZE); 427572890efSRavi Kumar hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1, 428572890efSRavi Kumar MAC_HWF1R, ADVTHWORD); 429572890efSRavi Kumar hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64); 430572890efSRavi Kumar hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN); 431572890efSRavi Kumar hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN); 432572890efSRavi Kumar hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN); 433572890efSRavi Kumar hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA); 434572890efSRavi Kumar hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN); 435572890efSRavi Kumar hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC); 436572890efSRavi Kumar hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 437572890efSRavi Kumar HASHTBLSZ); 438572890efSRavi Kumar hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, 439572890efSRavi Kumar L3L4FNUM); 440572890efSRavi Kumar 441572890efSRavi Kumar /* Hardware feature register 2 */ 442572890efSRavi Kumar hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT); 443572890efSRavi Kumar hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT); 444572890efSRavi Kumar hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT); 445572890efSRavi Kumar hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT); 446572890efSRavi Kumar hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM); 447572890efSRavi Kumar hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, 448572890efSRavi Kumar AUXSNAPNUM); 449572890efSRavi Kumar 450572890efSRavi Kumar /* Translate the Hash Table size into actual number */ 451572890efSRavi Kumar switch (hw_feat->hash_table_size) { 452572890efSRavi Kumar case 0: 453572890efSRavi Kumar break; 454572890efSRavi Kumar case 1: 455572890efSRavi Kumar hw_feat->hash_table_size = 64; 456572890efSRavi Kumar break; 457572890efSRavi Kumar case 2: 458572890efSRavi Kumar hw_feat->hash_table_size = 128; 459572890efSRavi Kumar break; 460572890efSRavi Kumar case 3: 461572890efSRavi Kumar hw_feat->hash_table_size = 256; 462572890efSRavi Kumar break; 463572890efSRavi Kumar } 464572890efSRavi Kumar 465572890efSRavi Kumar /* Translate the address width setting into actual number */ 466572890efSRavi Kumar switch (hw_feat->dma_width) { 467572890efSRavi Kumar case 0: 468572890efSRavi Kumar hw_feat->dma_width = 32; 469572890efSRavi Kumar break; 470572890efSRavi Kumar case 1: 471572890efSRavi Kumar hw_feat->dma_width = 40; 472572890efSRavi Kumar break; 473572890efSRavi Kumar case 2: 474572890efSRavi Kumar hw_feat->dma_width = 48; 475572890efSRavi Kumar break; 476572890efSRavi Kumar default: 477572890efSRavi Kumar hw_feat->dma_width = 32; 478572890efSRavi Kumar } 479572890efSRavi Kumar 480572890efSRavi Kumar /* The Queue, Channel and TC counts are zero based so increment them 481572890efSRavi Kumar * to get the actual number 482572890efSRavi Kumar */ 483572890efSRavi Kumar hw_feat->rx_q_cnt++; 484572890efSRavi Kumar hw_feat->tx_q_cnt++; 485572890efSRavi Kumar hw_feat->rx_ch_cnt++; 486572890efSRavi Kumar hw_feat->tx_ch_cnt++; 487572890efSRavi Kumar hw_feat->tc_cnt++; 488572890efSRavi Kumar 489572890efSRavi Kumar /* Translate the fifo sizes into actual numbers */ 490572890efSRavi Kumar hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7); 491572890efSRavi Kumar hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7); 492572890efSRavi Kumar } 493572890efSRavi Kumar 494572890efSRavi Kumar static void axgbe_init_all_fptrs(struct axgbe_port *pdata) 495572890efSRavi Kumar { 496572890efSRavi Kumar axgbe_init_function_ptrs_dev(&pdata->hw_if); 4974ac7516bSRavi Kumar axgbe_init_function_ptrs_phy(&pdata->phy_if); 4984ac7516bSRavi Kumar axgbe_init_function_ptrs_i2c(&pdata->i2c_if); 4994ac7516bSRavi Kumar pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if); 500572890efSRavi Kumar } 501572890efSRavi Kumar 502572890efSRavi Kumar static void axgbe_set_counts(struct axgbe_port *pdata) 503572890efSRavi Kumar { 504572890efSRavi Kumar /* Set all the function pointers */ 505572890efSRavi Kumar axgbe_init_all_fptrs(pdata); 506572890efSRavi Kumar 507572890efSRavi Kumar /* Populate the hardware features */ 508572890efSRavi Kumar axgbe_get_all_hw_features(pdata); 509572890efSRavi Kumar 510572890efSRavi Kumar /* Set default max values if not provided */ 511572890efSRavi Kumar if (!pdata->tx_max_channel_count) 512572890efSRavi Kumar pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt; 513572890efSRavi Kumar if (!pdata->rx_max_channel_count) 514572890efSRavi Kumar pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt; 515572890efSRavi Kumar 516572890efSRavi Kumar if (!pdata->tx_max_q_count) 517572890efSRavi Kumar pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt; 518572890efSRavi Kumar if (!pdata->rx_max_q_count) 519572890efSRavi Kumar pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt; 520572890efSRavi Kumar 521572890efSRavi Kumar /* Calculate the number of Tx and Rx rings to be created 522572890efSRavi Kumar * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set 523572890efSRavi Kumar * the number of Tx queues to the number of Tx channels 524572890efSRavi Kumar * enabled 525572890efSRavi Kumar * -Rx (DMA) Channels do not map 1-to-1 so use the actual 526572890efSRavi Kumar * number of Rx queues or maximum allowed 527572890efSRavi Kumar */ 528572890efSRavi Kumar pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt, 529572890efSRavi Kumar pdata->tx_max_channel_count); 530572890efSRavi Kumar pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count, 531572890efSRavi Kumar pdata->tx_max_q_count); 532572890efSRavi Kumar 533572890efSRavi Kumar pdata->tx_q_count = pdata->tx_ring_count; 534572890efSRavi Kumar 535572890efSRavi Kumar pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt, 536572890efSRavi Kumar pdata->rx_max_channel_count); 537572890efSRavi Kumar 538572890efSRavi Kumar pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt, 539572890efSRavi Kumar pdata->rx_max_q_count); 540572890efSRavi Kumar } 541572890efSRavi Kumar 542572890efSRavi Kumar static void axgbe_default_config(struct axgbe_port *pdata) 543572890efSRavi Kumar { 544572890efSRavi Kumar pdata->pblx8 = DMA_PBL_X8_ENABLE; 545572890efSRavi Kumar pdata->tx_sf_mode = MTL_TSF_ENABLE; 546572890efSRavi Kumar pdata->tx_threshold = MTL_TX_THRESHOLD_64; 547572890efSRavi Kumar pdata->tx_pbl = DMA_PBL_32; 548572890efSRavi Kumar pdata->tx_osp_mode = DMA_OSP_ENABLE; 549572890efSRavi Kumar pdata->rx_sf_mode = MTL_RSF_ENABLE; 550572890efSRavi Kumar pdata->rx_threshold = MTL_RX_THRESHOLD_64; 551572890efSRavi Kumar pdata->rx_pbl = DMA_PBL_32; 552572890efSRavi Kumar pdata->pause_autoneg = 1; 553572890efSRavi Kumar pdata->tx_pause = 0; 554572890efSRavi Kumar pdata->rx_pause = 0; 555572890efSRavi Kumar pdata->phy_speed = SPEED_UNKNOWN; 556572890efSRavi Kumar pdata->power_down = 0; 557572890efSRavi Kumar } 558572890efSRavi Kumar 5598691632fSRavi Kumar /* 5608691632fSRavi Kumar * It returns 0 on success. 5618691632fSRavi Kumar */ 5628691632fSRavi Kumar static int 5638691632fSRavi Kumar eth_axgbe_dev_init(struct rte_eth_dev *eth_dev) 5648691632fSRavi Kumar { 5658691632fSRavi Kumar PMD_INIT_FUNC_TRACE(); 5668691632fSRavi Kumar struct axgbe_port *pdata; 5678691632fSRavi Kumar struct rte_pci_device *pci_dev; 568572890efSRavi Kumar uint32_t reg, mac_lo, mac_hi; 569572890efSRavi Kumar int ret; 5708691632fSRavi Kumar 5719e890103SRavi Kumar eth_dev->dev_ops = &axgbe_eth_dev_ops; 5728590b93dSRavi Kumar eth_dev->rx_pkt_burst = &axgbe_recv_pkts; 5739e890103SRavi Kumar 5748691632fSRavi Kumar /* 5758691632fSRavi Kumar * For secondary processes, we don't initialise any further as primary 5768691632fSRavi Kumar * has already done this work. 5778691632fSRavi Kumar */ 5788691632fSRavi Kumar if (rte_eal_process_type() != RTE_PROC_PRIMARY) 5798691632fSRavi Kumar return 0; 5808691632fSRavi Kumar 5818691632fSRavi Kumar pdata = (struct axgbe_port *)eth_dev->data->dev_private; 582572890efSRavi Kumar /* initial state */ 583572890efSRavi Kumar axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state); 584572890efSRavi Kumar axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state); 5858691632fSRavi Kumar pdata->eth_dev = eth_dev; 5868691632fSRavi Kumar 5878691632fSRavi Kumar pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 5888691632fSRavi Kumar pdata->pci_dev = pci_dev; 5898691632fSRavi Kumar 590572890efSRavi Kumar pdata->xgmac_regs = 5917784d0d3SRavi Kumar (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr; 5927784d0d3SRavi Kumar pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs 5937784d0d3SRavi Kumar + AXGBE_MAC_PROP_OFFSET); 5947784d0d3SRavi Kumar pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs 5957784d0d3SRavi Kumar + AXGBE_I2C_CTRL_OFFSET); 5967784d0d3SRavi Kumar pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr; 597572890efSRavi Kumar 598572890efSRavi Kumar /* version specific driver data*/ 599572890efSRavi Kumar if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A) 600572890efSRavi Kumar pdata->vdata = &axgbe_v2a; 601572890efSRavi Kumar else 602572890efSRavi Kumar pdata->vdata = &axgbe_v2b; 603572890efSRavi Kumar 604572890efSRavi Kumar /* Configure the PCS indirect addressing support */ 605572890efSRavi Kumar reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF); 606572890efSRavi Kumar pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET); 607572890efSRavi Kumar pdata->xpcs_window <<= 6; 608572890efSRavi Kumar pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE); 609572890efSRavi Kumar pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7); 610572890efSRavi Kumar pdata->xpcs_window_mask = pdata->xpcs_window_size - 1; 611572890efSRavi Kumar pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; 612572890efSRavi Kumar pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; 613572890efSRavi Kumar PMD_INIT_LOG(DEBUG, 614572890efSRavi Kumar "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window, 615572890efSRavi Kumar pdata->xpcs_window_size, pdata->xpcs_window_mask); 616572890efSRavi Kumar XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff); 617572890efSRavi Kumar 618572890efSRavi Kumar /* Retrieve the MAC address */ 619572890efSRavi Kumar mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO); 620572890efSRavi Kumar mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI); 621572890efSRavi Kumar pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff; 622572890efSRavi Kumar pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff; 623572890efSRavi Kumar pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff; 624572890efSRavi Kumar pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff; 625572890efSRavi Kumar pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff; 626572890efSRavi Kumar pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff; 627572890efSRavi Kumar 628572890efSRavi Kumar eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr", 629572890efSRavi Kumar ETHER_ADDR_LEN, 0); 630572890efSRavi Kumar if (!eth_dev->data->mac_addrs) { 631572890efSRavi Kumar PMD_INIT_LOG(ERR, 632572890efSRavi Kumar "Failed to alloc %u bytes needed to store MAC addr tbl", 633572890efSRavi Kumar ETHER_ADDR_LEN); 634572890efSRavi Kumar return -ENOMEM; 635572890efSRavi Kumar } 636572890efSRavi Kumar 637*538da7a1SOlivier Matz if (!rte_is_valid_assigned_ether_addr(&pdata->mac_addr)) 638*538da7a1SOlivier Matz rte_eth_random_addr(pdata->mac_addr.addr_bytes); 639572890efSRavi Kumar 640572890efSRavi Kumar /* Copy the permanent MAC address */ 641*538da7a1SOlivier Matz rte_ether_addr_copy(&pdata->mac_addr, ð_dev->data->mac_addrs[0]); 642572890efSRavi Kumar 643572890efSRavi Kumar /* Clock settings */ 644572890efSRavi Kumar pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ; 645572890efSRavi Kumar pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ; 646572890efSRavi Kumar 647572890efSRavi Kumar /* Set the DMA coherency values */ 648572890efSRavi Kumar pdata->coherent = 1; 649572890efSRavi Kumar pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN; 650572890efSRavi Kumar pdata->arcache = AXGBE_DMA_OS_ARCACHE; 651572890efSRavi Kumar pdata->awcache = AXGBE_DMA_OS_AWCACHE; 652572890efSRavi Kumar 653572890efSRavi Kumar /* Set the maximum channels and queues */ 654572890efSRavi Kumar reg = XP_IOREAD(pdata, XP_PROP_1); 655572890efSRavi Kumar pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA); 656572890efSRavi Kumar pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA); 657572890efSRavi Kumar pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES); 658572890efSRavi Kumar pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES); 659572890efSRavi Kumar 660572890efSRavi Kumar /* Set the hardware channel and queue counts */ 661572890efSRavi Kumar axgbe_set_counts(pdata); 662572890efSRavi Kumar 663572890efSRavi Kumar /* Set the maximum fifo amounts */ 664572890efSRavi Kumar reg = XP_IOREAD(pdata, XP_PROP_2); 665572890efSRavi Kumar pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE); 666572890efSRavi Kumar pdata->tx_max_fifo_size *= 16384; 667572890efSRavi Kumar pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size, 668572890efSRavi Kumar pdata->vdata->tx_max_fifo_size); 669572890efSRavi Kumar pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE); 670572890efSRavi Kumar pdata->rx_max_fifo_size *= 16384; 671572890efSRavi Kumar pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size, 672572890efSRavi Kumar pdata->vdata->rx_max_fifo_size); 673572890efSRavi Kumar /* Issue software reset to DMA */ 674572890efSRavi Kumar ret = pdata->hw_if.exit(pdata); 675572890efSRavi Kumar if (ret) 676572890efSRavi Kumar PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n"); 677572890efSRavi Kumar 678572890efSRavi Kumar /* Set default configuration data */ 679572890efSRavi Kumar axgbe_default_config(pdata); 680572890efSRavi Kumar 681572890efSRavi Kumar /* Set default max values if not provided */ 682572890efSRavi Kumar if (!pdata->tx_max_fifo_size) 683572890efSRavi Kumar pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size; 684572890efSRavi Kumar if (!pdata->rx_max_fifo_size) 685572890efSRavi Kumar pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size; 686572890efSRavi Kumar 6879e890103SRavi Kumar pdata->tx_desc_count = AXGBE_MAX_RING_DESC; 6889e890103SRavi Kumar pdata->rx_desc_count = AXGBE_MAX_RING_DESC; 689572890efSRavi Kumar pthread_mutex_init(&pdata->xpcs_mutex, NULL); 690572890efSRavi Kumar pthread_mutex_init(&pdata->i2c_mutex, NULL); 691572890efSRavi Kumar pthread_mutex_init(&pdata->an_mutex, NULL); 692572890efSRavi Kumar pthread_mutex_init(&pdata->phy_mutex, NULL); 693572890efSRavi Kumar 6944ac7516bSRavi Kumar ret = pdata->phy_if.phy_init(pdata); 6954ac7516bSRavi Kumar if (ret) { 6964ac7516bSRavi Kumar rte_free(eth_dev->data->mac_addrs); 6974ac7516bSRavi Kumar return ret; 6984ac7516bSRavi Kumar } 6994ac7516bSRavi Kumar 700456ff159SRavi Kumar rte_intr_callback_register(&pci_dev->intr_handle, 701456ff159SRavi Kumar axgbe_dev_interrupt_handler, 702456ff159SRavi Kumar (void *)eth_dev); 7038691632fSRavi Kumar PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", 7048691632fSRavi Kumar eth_dev->data->port_id, pci_dev->id.vendor_id, 7058691632fSRavi Kumar pci_dev->id.device_id); 7068691632fSRavi Kumar 7078691632fSRavi Kumar return 0; 7088691632fSRavi Kumar } 7098691632fSRavi Kumar 7108691632fSRavi Kumar static int 711572890efSRavi Kumar eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev) 7128691632fSRavi Kumar { 713456ff159SRavi Kumar struct rte_pci_device *pci_dev; 714456ff159SRavi Kumar 7158691632fSRavi Kumar PMD_INIT_FUNC_TRACE(); 7168691632fSRavi Kumar 717572890efSRavi Kumar if (rte_eal_process_type() != RTE_PROC_PRIMARY) 718572890efSRavi Kumar return 0; 719572890efSRavi Kumar 720456ff159SRavi Kumar pci_dev = RTE_DEV_TO_PCI(eth_dev->device); 7219e890103SRavi Kumar eth_dev->dev_ops = NULL; 7228590b93dSRavi Kumar eth_dev->rx_pkt_burst = NULL; 7238590b93dSRavi Kumar eth_dev->tx_pkt_burst = NULL; 7249e890103SRavi Kumar axgbe_dev_clear_queues(eth_dev); 725572890efSRavi Kumar 726456ff159SRavi Kumar /* disable uio intr before callback unregister */ 727456ff159SRavi Kumar rte_intr_disable(&pci_dev->intr_handle); 728456ff159SRavi Kumar rte_intr_callback_unregister(&pci_dev->intr_handle, 729456ff159SRavi Kumar axgbe_dev_interrupt_handler, 730456ff159SRavi Kumar (void *)eth_dev); 731456ff159SRavi Kumar 7328691632fSRavi Kumar return 0; 7338691632fSRavi Kumar } 7348691632fSRavi Kumar 7358691632fSRavi Kumar static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, 7368691632fSRavi Kumar struct rte_pci_device *pci_dev) 7378691632fSRavi Kumar { 7388691632fSRavi Kumar return rte_eth_dev_pci_generic_probe(pci_dev, 7398691632fSRavi Kumar sizeof(struct axgbe_port), eth_axgbe_dev_init); 7408691632fSRavi Kumar } 7418691632fSRavi Kumar 7428691632fSRavi Kumar static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev) 7438691632fSRavi Kumar { 7448691632fSRavi Kumar return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit); 7458691632fSRavi Kumar } 7468691632fSRavi Kumar 7478691632fSRavi Kumar static struct rte_pci_driver rte_axgbe_pmd = { 7488691632fSRavi Kumar .id_table = pci_id_axgbe_map, 7498691632fSRavi Kumar .drv_flags = RTE_PCI_DRV_NEED_MAPPING, 7508691632fSRavi Kumar .probe = eth_axgbe_pci_probe, 7518691632fSRavi Kumar .remove = eth_axgbe_pci_remove, 7528691632fSRavi Kumar }; 7538691632fSRavi Kumar 7548691632fSRavi Kumar RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd); 7558691632fSRavi Kumar RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map); 7568691632fSRavi Kumar RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci"); 7578691632fSRavi Kumar 758f8e99896SThomas Monjalon RTE_INIT(axgbe_init_log) 7598691632fSRavi Kumar { 7608691632fSRavi Kumar axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init"); 7618691632fSRavi Kumar if (axgbe_logtype_init >= 0) 7628691632fSRavi Kumar rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE); 7638691632fSRavi Kumar axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver"); 7648691632fSRavi Kumar if (axgbe_logtype_driver >= 0) 7658691632fSRavi Kumar rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE); 7668691632fSRavi Kumar } 767