19e890103SRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 29e890103SRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 39e890103SRavi Kumar * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 49e890103SRavi Kumar */ 59e890103SRavi Kumar 69e890103SRavi Kumar #include "axgbe_ethdev.h" 79e890103SRavi Kumar #include "axgbe_rxtx.h" 89e890103SRavi Kumar #include "axgbe_phy.h" 99e890103SRavi Kumar 109e890103SRavi Kumar #include <rte_time.h> 119e890103SRavi Kumar #include <rte_mempool.h> 129e890103SRavi Kumar #include <rte_mbuf.h> 132b11056dSCiara Power #include <rte_vect.h> 149e890103SRavi Kumar 159e890103SRavi Kumar static void 169e890103SRavi Kumar axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue) 179e890103SRavi Kumar { 189e890103SRavi Kumar uint16_t i; 199e890103SRavi Kumar struct rte_mbuf **sw_ring; 209e890103SRavi Kumar 219e890103SRavi Kumar if (rx_queue) { 229e890103SRavi Kumar sw_ring = rx_queue->sw_ring; 239e890103SRavi Kumar if (sw_ring) { 249e890103SRavi Kumar for (i = 0; i < rx_queue->nb_desc; i++) { 259e890103SRavi Kumar rte_pktmbuf_free(sw_ring[i]); 269e890103SRavi Kumar } 279e890103SRavi Kumar rte_free(sw_ring); 289e890103SRavi Kumar } 299e890103SRavi Kumar rte_free(rx_queue); 309e890103SRavi Kumar } 319e890103SRavi Kumar } 329e890103SRavi Kumar 337483341aSXueming Li void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 349e890103SRavi Kumar { 357483341aSXueming Li axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); 369e890103SRavi Kumar } 379e890103SRavi Kumar 389e890103SRavi Kumar int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 399e890103SRavi Kumar uint16_t nb_desc, unsigned int socket_id, 409e890103SRavi Kumar const struct rte_eth_rxconf *rx_conf, 419e890103SRavi Kumar struct rte_mempool *mp) 429e890103SRavi Kumar { 439e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 449e890103SRavi Kumar uint32_t size; 459e890103SRavi Kumar const struct rte_memzone *dma; 469e890103SRavi Kumar struct axgbe_rx_queue *rxq; 479e890103SRavi Kumar uint32_t rx_desc = nb_desc; 489e890103SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 499e890103SRavi Kumar 509e890103SRavi Kumar /* 519e890103SRavi Kumar * validate Rx descriptors count 529e890103SRavi Kumar * should be power of 2 and less than h/w supported 539e890103SRavi Kumar */ 549e890103SRavi Kumar if ((!rte_is_power_of_2(rx_desc)) || 559e890103SRavi Kumar rx_desc > pdata->rx_desc_count) 569e890103SRavi Kumar return -EINVAL; 579e890103SRavi Kumar /* First allocate the rx queue data structure */ 589e890103SRavi Kumar rxq = rte_zmalloc_socket("ethdev RX queue", 599e890103SRavi Kumar sizeof(struct axgbe_rx_queue), 609e890103SRavi Kumar RTE_CACHE_LINE_SIZE, socket_id); 619e890103SRavi Kumar if (!rxq) { 629e890103SRavi Kumar PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); 639e890103SRavi Kumar return -ENOMEM; 649e890103SRavi Kumar } 659e890103SRavi Kumar 669e890103SRavi Kumar rxq->cur = 0; 679e890103SRavi Kumar rxq->dirty = 0; 689e890103SRavi Kumar rxq->pdata = pdata; 699e890103SRavi Kumar rxq->mb_pool = mp; 709e890103SRavi Kumar rxq->queue_id = queue_idx; 719e890103SRavi Kumar rxq->port_id = dev->data->port_id; 729e890103SRavi Kumar rxq->nb_desc = rx_desc; 737784d0d3SRavi Kumar rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 747784d0d3SRavi Kumar (DMA_CH_INC * rxq->queue_id)); 757784d0d3SRavi Kumar rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs + 769e890103SRavi Kumar DMA_CH_RDTR_LO); 77295968d1SFerruh Yigit if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 7835b2d13fSOlivier Matz rxq->crc_len = RTE_ETHER_CRC_LEN; 7970815c9eSFerruh Yigit else 8070815c9eSFerruh Yigit rxq->crc_len = 0; 819e890103SRavi Kumar 829e890103SRavi Kumar /* CRC strip in AXGBE supports per port not per queue */ 839e890103SRavi Kumar pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0; 849e890103SRavi Kumar rxq->free_thresh = rx_conf->rx_free_thresh ? 859e890103SRavi Kumar rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH; 869e890103SRavi Kumar if (rxq->free_thresh > rxq->nb_desc) 879e890103SRavi Kumar rxq->free_thresh = rxq->nb_desc >> 3; 889e890103SRavi Kumar 899e890103SRavi Kumar /* Allocate RX ring hardware descriptors */ 909e890103SRavi Kumar size = rxq->nb_desc * sizeof(union axgbe_rx_desc); 919e890103SRavi Kumar dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, 929e890103SRavi Kumar socket_id); 939e890103SRavi Kumar if (!dma) { 949e890103SRavi Kumar PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n"); 959e890103SRavi Kumar axgbe_rx_queue_release(rxq); 969e890103SRavi Kumar return -ENOMEM; 979e890103SRavi Kumar } 98ce627d63SThomas Monjalon rxq->ring_phys_addr = (uint64_t)dma->iova; 999e890103SRavi Kumar rxq->desc = (volatile union axgbe_rx_desc *)dma->addr; 1009e890103SRavi Kumar memset((void *)rxq->desc, 0, size); 1019e890103SRavi Kumar /* Allocate software ring */ 1029e890103SRavi Kumar size = rxq->nb_desc * sizeof(struct rte_mbuf *); 1039e890103SRavi Kumar rxq->sw_ring = rte_zmalloc_socket("sw_ring", size, 1049e890103SRavi Kumar RTE_CACHE_LINE_SIZE, 1059e890103SRavi Kumar socket_id); 1069e890103SRavi Kumar if (!rxq->sw_ring) { 1079e890103SRavi Kumar PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n"); 1089e890103SRavi Kumar axgbe_rx_queue_release(rxq); 1099e890103SRavi Kumar return -ENOMEM; 1109e890103SRavi Kumar } 1119e890103SRavi Kumar dev->data->rx_queues[queue_idx] = rxq; 1129e890103SRavi Kumar if (!pdata->rx_queues) 1139e890103SRavi Kumar pdata->rx_queues = dev->data->rx_queues; 1149e890103SRavi Kumar 1159e890103SRavi Kumar return 0; 1169e890103SRavi Kumar } 1179e890103SRavi Kumar 1188590b93dSRavi Kumar static void axgbe_prepare_rx_stop(struct axgbe_port *pdata, 1198590b93dSRavi Kumar unsigned int queue) 1208590b93dSRavi Kumar { 1218590b93dSRavi Kumar unsigned int rx_status; 1228590b93dSRavi Kumar unsigned long rx_timeout; 1238590b93dSRavi Kumar 1248590b93dSRavi Kumar /* The Rx engine cannot be stopped if it is actively processing 1258590b93dSRavi Kumar * packets. Wait for the Rx queue to empty the Rx fifo. Don't 1268590b93dSRavi Kumar * wait forever though... 1278590b93dSRavi Kumar */ 1288590b93dSRavi Kumar rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 1298590b93dSRavi Kumar rte_get_timer_hz()); 1308590b93dSRavi Kumar 1318590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), rx_timeout)) { 1328590b93dSRavi Kumar rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 1338590b93dSRavi Kumar if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 1348590b93dSRavi Kumar (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 1358590b93dSRavi Kumar break; 1368590b93dSRavi Kumar 1378590b93dSRavi Kumar rte_delay_us(900); 1388590b93dSRavi Kumar } 1398590b93dSRavi Kumar 1408590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), rx_timeout)) 1418590b93dSRavi Kumar PMD_DRV_LOG(ERR, 1428590b93dSRavi Kumar "timed out waiting for Rx queue %u to empty\n", 1438590b93dSRavi Kumar queue); 1448590b93dSRavi Kumar } 1458590b93dSRavi Kumar 1468590b93dSRavi Kumar void axgbe_dev_disable_rx(struct rte_eth_dev *dev) 1478590b93dSRavi Kumar { 1488590b93dSRavi Kumar struct axgbe_rx_queue *rxq; 1498590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1508590b93dSRavi Kumar unsigned int i; 1518590b93dSRavi Kumar 1528590b93dSRavi Kumar /* Disable MAC Rx */ 1538590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 1548590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 1558590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 1568590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 1578590b93dSRavi Kumar 1588590b93dSRavi Kumar /* Prepare for Rx DMA channel stop */ 1598590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1608590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1618590b93dSRavi Kumar axgbe_prepare_rx_stop(pdata, i); 1628590b93dSRavi Kumar } 1638590b93dSRavi Kumar /* Disable each Rx queue */ 1648590b93dSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 1658590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1668590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1678590b93dSRavi Kumar /* Disable Rx DMA channel */ 1688590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); 1698590b93dSRavi Kumar } 1708590b93dSRavi Kumar } 1718590b93dSRavi Kumar 1728590b93dSRavi Kumar void axgbe_dev_enable_rx(struct rte_eth_dev *dev) 1738590b93dSRavi Kumar { 1748590b93dSRavi Kumar struct axgbe_rx_queue *rxq; 1758590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1768590b93dSRavi Kumar unsigned int i; 1778590b93dSRavi Kumar unsigned int reg_val = 0; 1788590b93dSRavi Kumar 1798590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1808590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1818590b93dSRavi Kumar /* Enable Rx DMA channel */ 1828590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); 1838590b93dSRavi Kumar } 1848590b93dSRavi Kumar 1858590b93dSRavi Kumar reg_val = 0; 1868590b93dSRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) 1878590b93dSRavi Kumar reg_val |= (0x02 << (i << 1)); 1888590b93dSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 1898590b93dSRavi Kumar 1908590b93dSRavi Kumar /* Enable MAC Rx */ 1918590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 1928590b93dSRavi Kumar /* Frame is forwarded after stripping CRC to application*/ 1938590b93dSRavi Kumar if (pdata->crc_strip_enable) { 1948590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 1958590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 1968590b93dSRavi Kumar } 1978590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 1988590b93dSRavi Kumar } 1998590b93dSRavi Kumar 2008590b93dSRavi Kumar /* Rx function one to one refresh */ 2018590b93dSRavi Kumar uint16_t 2028590b93dSRavi Kumar axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2038590b93dSRavi Kumar uint16_t nb_pkts) 2048590b93dSRavi Kumar { 2058590b93dSRavi Kumar PMD_INIT_FUNC_TRACE(); 2068590b93dSRavi Kumar uint16_t nb_rx = 0; 2078590b93dSRavi Kumar struct axgbe_rx_queue *rxq = rx_queue; 2088590b93dSRavi Kumar volatile union axgbe_rx_desc *desc; 2098590b93dSRavi Kumar uint64_t old_dirty = rxq->dirty; 2108590b93dSRavi Kumar struct rte_mbuf *mbuf, *tmbuf; 21186578516SGirish Nandibasappa unsigned int err, etlt; 2128590b93dSRavi Kumar uint32_t error_status; 2138590b93dSRavi Kumar uint16_t idx, pidx, pkt_len; 21486578516SGirish Nandibasappa uint64_t offloads; 2158590b93dSRavi Kumar 2168590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 2178590b93dSRavi Kumar while (nb_rx < nb_pkts) { 2188590b93dSRavi Kumar if (unlikely(idx == rxq->nb_desc)) 2198590b93dSRavi Kumar idx = 0; 2208590b93dSRavi Kumar 2218590b93dSRavi Kumar desc = &rxq->desc[idx]; 2228590b93dSRavi Kumar 2238590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 2248590b93dSRavi Kumar break; 2258590b93dSRavi Kumar tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 2268590b93dSRavi Kumar if (unlikely(!tmbuf)) { 2278590b93dSRavi Kumar PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" 2288590b93dSRavi Kumar " queue_id = %u\n", 2298590b93dSRavi Kumar (unsigned int)rxq->port_id, 2308590b93dSRavi Kumar (unsigned int)rxq->queue_id); 2318590b93dSRavi Kumar rte_eth_devices[ 2328590b93dSRavi Kumar rxq->port_id].data->rx_mbuf_alloc_failed++; 2339d1ef6b2SChandu Babu N rxq->rx_mbuf_alloc_failed++; 2348590b93dSRavi Kumar break; 2358590b93dSRavi Kumar } 2368590b93dSRavi Kumar pidx = idx + 1; 2378590b93dSRavi Kumar if (unlikely(pidx == rxq->nb_desc)) 2388590b93dSRavi Kumar pidx = 0; 2398590b93dSRavi Kumar 2408590b93dSRavi Kumar rte_prefetch0(rxq->sw_ring[pidx]); 2418590b93dSRavi Kumar if ((pidx & 0x3) == 0) { 2428590b93dSRavi Kumar rte_prefetch0(&rxq->desc[pidx]); 2438590b93dSRavi Kumar rte_prefetch0(&rxq->sw_ring[pidx]); 2448590b93dSRavi Kumar } 2458590b93dSRavi Kumar 2468590b93dSRavi Kumar mbuf = rxq->sw_ring[idx]; 2478590b93dSRavi Kumar /* Check for any errors and free mbuf*/ 2488590b93dSRavi Kumar err = AXGMAC_GET_BITS_LE(desc->write.desc3, 2498590b93dSRavi Kumar RX_NORMAL_DESC3, ES); 2508590b93dSRavi Kumar error_status = 0; 2518590b93dSRavi Kumar if (unlikely(err)) { 2528590b93dSRavi Kumar error_status = desc->write.desc3 & AXGBE_ERR_STATUS; 2538590b93dSRavi Kumar if ((error_status != AXGBE_L3_CSUM_ERR) && 2548590b93dSRavi Kumar (error_status != AXGBE_L4_CSUM_ERR)) { 2558590b93dSRavi Kumar rxq->errors++; 2568590b93dSRavi Kumar rte_pktmbuf_free(mbuf); 2578590b93dSRavi Kumar goto err_set; 2588590b93dSRavi Kumar } 2598590b93dSRavi Kumar } 2608590b93dSRavi Kumar if (rxq->pdata->rx_csum_enable) { 2618590b93dSRavi Kumar mbuf->ol_flags = 0; 262daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 263daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 2648590b93dSRavi Kumar if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 265daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; 266daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 267daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 268daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 2698590b93dSRavi Kumar } else if ( 2708590b93dSRavi Kumar unlikely(error_status == AXGBE_L4_CSUM_ERR)) { 271daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 272daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 2738590b93dSRavi Kumar } 2748590b93dSRavi Kumar } 2758590b93dSRavi Kumar rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 2768590b93dSRavi Kumar /* Get the RSS hash */ 2778590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 2788590b93dSRavi Kumar mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); 27986578516SGirish Nandibasappa etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 28086578516SGirish Nandibasappa RX_NORMAL_DESC3, ETLT); 28186578516SGirish Nandibasappa offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; 28286578516SGirish Nandibasappa if (!err || !etlt) { 28386578516SGirish Nandibasappa if (etlt == RX_CVLAN_TAG_PRESENT) { 284daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 28586578516SGirish Nandibasappa mbuf->vlan_tci = 28686578516SGirish Nandibasappa AXGMAC_GET_BITS_LE(desc->write.desc0, 28786578516SGirish Nandibasappa RX_NORMAL_DESC0, OVT); 288295968d1SFerruh Yigit if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 289daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED; 29086578516SGirish Nandibasappa else 291daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED; 29286578516SGirish Nandibasappa } else { 29386578516SGirish Nandibasappa mbuf->ol_flags &= 294daa02b5cSOlivier Matz ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED); 29586578516SGirish Nandibasappa mbuf->vlan_tci = 0; 29686578516SGirish Nandibasappa } 29786578516SGirish Nandibasappa } 298e0444948SSelwin Sebastian /* Indicate if a Context Descriptor is next */ 299e0444948SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA)) 300daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP 301daa02b5cSOlivier Matz | RTE_MBUF_F_RX_IEEE1588_TMST; 3028590b93dSRavi Kumar pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, 3038590b93dSRavi Kumar PL) - rxq->crc_len; 3048590b93dSRavi Kumar /* Mbuf populate */ 3058590b93dSRavi Kumar mbuf->next = NULL; 3068590b93dSRavi Kumar mbuf->data_off = RTE_PKTMBUF_HEADROOM; 3078590b93dSRavi Kumar mbuf->nb_segs = 1; 3088590b93dSRavi Kumar mbuf->port = rxq->port_id; 3098590b93dSRavi Kumar mbuf->pkt_len = pkt_len; 3108590b93dSRavi Kumar mbuf->data_len = pkt_len; 3118590b93dSRavi Kumar rxq->bytes += pkt_len; 3128590b93dSRavi Kumar rx_pkts[nb_rx++] = mbuf; 3138590b93dSRavi Kumar err_set: 3148590b93dSRavi Kumar rxq->cur++; 3158590b93dSRavi Kumar rxq->sw_ring[idx++] = tmbuf; 3168590b93dSRavi Kumar desc->read.baddr = 3178590b93dSRavi Kumar rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 3188590b93dSRavi Kumar memset((void *)(&desc->read.desc2), 0, 8); 3198590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 3208590b93dSRavi Kumar rxq->dirty++; 3218590b93dSRavi Kumar } 3228590b93dSRavi Kumar rxq->pkts += nb_rx; 3238590b93dSRavi Kumar if (rxq->dirty != old_dirty) { 3248590b93dSRavi Kumar rte_wmb(); 3258590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); 3268590b93dSRavi Kumar AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 3278590b93dSRavi Kumar low32_value(rxq->ring_phys_addr + 3288590b93dSRavi Kumar (idx * sizeof(union axgbe_rx_desc)))); 3298590b93dSRavi Kumar } 3308590b93dSRavi Kumar 3318590b93dSRavi Kumar return nb_rx; 3328590b93dSRavi Kumar } 3338590b93dSRavi Kumar 334965b3127SSelwin Sebastian 335965b3127SSelwin Sebastian uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, 336965b3127SSelwin Sebastian struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 337965b3127SSelwin Sebastian { 338965b3127SSelwin Sebastian PMD_INIT_FUNC_TRACE(); 339965b3127SSelwin Sebastian uint16_t nb_rx = 0; 340965b3127SSelwin Sebastian struct axgbe_rx_queue *rxq = rx_queue; 341965b3127SSelwin Sebastian volatile union axgbe_rx_desc *desc; 342965b3127SSelwin Sebastian 343965b3127SSelwin Sebastian uint64_t old_dirty = rxq->dirty; 344965b3127SSelwin Sebastian struct rte_mbuf *first_seg = NULL; 345965b3127SSelwin Sebastian struct rte_mbuf *mbuf, *tmbuf; 3460fda97d7SBhagyada Modali unsigned int err = 0, etlt; 3470fda97d7SBhagyada Modali uint32_t error_status = 0; 348965b3127SSelwin Sebastian uint16_t idx, pidx, data_len = 0, pkt_len = 0; 34986578516SGirish Nandibasappa uint64_t offloads; 350965b3127SSelwin Sebastian 351965b3127SSelwin Sebastian idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 352965b3127SSelwin Sebastian while (nb_rx < nb_pkts) { 353965b3127SSelwin Sebastian bool eop = 0; 354965b3127SSelwin Sebastian next_desc: 355965b3127SSelwin Sebastian if (unlikely(idx == rxq->nb_desc)) 356965b3127SSelwin Sebastian idx = 0; 357965b3127SSelwin Sebastian 358965b3127SSelwin Sebastian desc = &rxq->desc[idx]; 359965b3127SSelwin Sebastian 360965b3127SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 361965b3127SSelwin Sebastian break; 362965b3127SSelwin Sebastian 363965b3127SSelwin Sebastian tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 364965b3127SSelwin Sebastian if (unlikely(!tmbuf)) { 365965b3127SSelwin Sebastian PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" 366965b3127SSelwin Sebastian " queue_id = %u\n", 367965b3127SSelwin Sebastian (unsigned int)rxq->port_id, 368965b3127SSelwin Sebastian (unsigned int)rxq->queue_id); 369965b3127SSelwin Sebastian rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; 370965b3127SSelwin Sebastian break; 371965b3127SSelwin Sebastian } 372965b3127SSelwin Sebastian 373965b3127SSelwin Sebastian pidx = idx + 1; 374965b3127SSelwin Sebastian if (unlikely(pidx == rxq->nb_desc)) 375965b3127SSelwin Sebastian pidx = 0; 376965b3127SSelwin Sebastian 377965b3127SSelwin Sebastian rte_prefetch0(rxq->sw_ring[pidx]); 378965b3127SSelwin Sebastian if ((pidx & 0x3) == 0) { 379965b3127SSelwin Sebastian rte_prefetch0(&rxq->desc[pidx]); 380965b3127SSelwin Sebastian rte_prefetch0(&rxq->sw_ring[pidx]); 381965b3127SSelwin Sebastian } 382965b3127SSelwin Sebastian 383965b3127SSelwin Sebastian mbuf = rxq->sw_ring[idx]; 384965b3127SSelwin Sebastian rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 385965b3127SSelwin Sebastian 386965b3127SSelwin Sebastian if (!AXGMAC_GET_BITS_LE(desc->write.desc3, 387965b3127SSelwin Sebastian RX_NORMAL_DESC3, LD)) { 388965b3127SSelwin Sebastian eop = 0; 389965b3127SSelwin Sebastian pkt_len = rxq->buf_size; 390965b3127SSelwin Sebastian data_len = pkt_len; 391965b3127SSelwin Sebastian } else { 392965b3127SSelwin Sebastian eop = 1; 393965b3127SSelwin Sebastian pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, 394e7384535SBhagyada Modali RX_NORMAL_DESC3, PL) - rxq->crc_len; 395e7384535SBhagyada Modali data_len = pkt_len % rxq->buf_size; 3960fda97d7SBhagyada Modali /* Check for any errors and free mbuf*/ 3970fda97d7SBhagyada Modali err = AXGMAC_GET_BITS_LE(desc->write.desc3, 3980fda97d7SBhagyada Modali RX_NORMAL_DESC3, ES); 3990fda97d7SBhagyada Modali error_status = 0; 4000fda97d7SBhagyada Modali if (unlikely(err)) { 4010fda97d7SBhagyada Modali error_status = desc->write.desc3 & 4020fda97d7SBhagyada Modali AXGBE_ERR_STATUS; 4030fda97d7SBhagyada Modali if (error_status != AXGBE_L3_CSUM_ERR && 4040fda97d7SBhagyada Modali error_status != AXGBE_L4_CSUM_ERR) { 4050fda97d7SBhagyada Modali rxq->errors++; 4060fda97d7SBhagyada Modali rte_pktmbuf_free(mbuf); 4070fda97d7SBhagyada Modali rte_pktmbuf_free(first_seg); 4080fda97d7SBhagyada Modali first_seg = NULL; 4090fda97d7SBhagyada Modali eop = 0; 4100fda97d7SBhagyada Modali goto err_set; 4110fda97d7SBhagyada Modali } 4120fda97d7SBhagyada Modali } 4130fda97d7SBhagyada Modali 414965b3127SSelwin Sebastian } 415*d901cc05SBhagyada Modali /* Mbuf populate */ 416*d901cc05SBhagyada Modali mbuf->data_off = RTE_PKTMBUF_HEADROOM; 417*d901cc05SBhagyada Modali mbuf->data_len = data_len; 418*d901cc05SBhagyada Modali mbuf->pkt_len = data_len; 419965b3127SSelwin Sebastian 420965b3127SSelwin Sebastian if (first_seg != NULL) { 421965b3127SSelwin Sebastian if (rte_pktmbuf_chain(first_seg, mbuf) != 0) 422965b3127SSelwin Sebastian rte_mempool_put(rxq->mb_pool, 423965b3127SSelwin Sebastian first_seg); 424965b3127SSelwin Sebastian } else { 425965b3127SSelwin Sebastian first_seg = mbuf; 426965b3127SSelwin Sebastian } 427965b3127SSelwin Sebastian 428965b3127SSelwin Sebastian /* Get the RSS hash */ 429965b3127SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 430965b3127SSelwin Sebastian mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); 43186578516SGirish Nandibasappa etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 43286578516SGirish Nandibasappa RX_NORMAL_DESC3, ETLT); 43386578516SGirish Nandibasappa offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; 43486578516SGirish Nandibasappa if (!err || !etlt) { 43586578516SGirish Nandibasappa if (etlt == RX_CVLAN_TAG_PRESENT) { 436daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 43786578516SGirish Nandibasappa mbuf->vlan_tci = 43886578516SGirish Nandibasappa AXGMAC_GET_BITS_LE(desc->write.desc0, 43986578516SGirish Nandibasappa RX_NORMAL_DESC0, OVT); 440295968d1SFerruh Yigit if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 441daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED; 44286578516SGirish Nandibasappa else 443daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED; 44486578516SGirish Nandibasappa } else { 44586578516SGirish Nandibasappa mbuf->ol_flags &= 446daa02b5cSOlivier Matz ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED); 44786578516SGirish Nandibasappa mbuf->vlan_tci = 0; 44886578516SGirish Nandibasappa } 44986578516SGirish Nandibasappa } 450965b3127SSelwin Sebastian 451965b3127SSelwin Sebastian err_set: 452965b3127SSelwin Sebastian rxq->cur++; 453965b3127SSelwin Sebastian rxq->sw_ring[idx++] = tmbuf; 454965b3127SSelwin Sebastian desc->read.baddr = 455965b3127SSelwin Sebastian rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 456965b3127SSelwin Sebastian memset((void *)(&desc->read.desc2), 0, 8); 457965b3127SSelwin Sebastian AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 458965b3127SSelwin Sebastian rxq->dirty++; 459965b3127SSelwin Sebastian 460965b3127SSelwin Sebastian if (!eop) { 461965b3127SSelwin Sebastian rte_pktmbuf_free(mbuf); 462965b3127SSelwin Sebastian goto next_desc; 463965b3127SSelwin Sebastian } 464965b3127SSelwin Sebastian 465965b3127SSelwin Sebastian first_seg->pkt_len = pkt_len; 466965b3127SSelwin Sebastian rxq->bytes += pkt_len; 467965b3127SSelwin Sebastian mbuf->next = NULL; 468965b3127SSelwin Sebastian 469965b3127SSelwin Sebastian first_seg->port = rxq->port_id; 470965b3127SSelwin Sebastian if (rxq->pdata->rx_csum_enable) { 471965b3127SSelwin Sebastian mbuf->ol_flags = 0; 472daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 473daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 474965b3127SSelwin Sebastian if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 475daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; 476daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 477daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 478daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 479965b3127SSelwin Sebastian } else if (unlikely(error_status 480965b3127SSelwin Sebastian == AXGBE_L4_CSUM_ERR)) { 481daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 482daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 483965b3127SSelwin Sebastian } 484965b3127SSelwin Sebastian } 485965b3127SSelwin Sebastian 486965b3127SSelwin Sebastian rx_pkts[nb_rx++] = first_seg; 487965b3127SSelwin Sebastian 488965b3127SSelwin Sebastian /* Setup receipt context for a new packet.*/ 489965b3127SSelwin Sebastian first_seg = NULL; 490965b3127SSelwin Sebastian } 491965b3127SSelwin Sebastian 492965b3127SSelwin Sebastian /* Save receive context.*/ 493965b3127SSelwin Sebastian rxq->pkts += nb_rx; 494965b3127SSelwin Sebastian 495965b3127SSelwin Sebastian if (rxq->dirty != old_dirty) { 496965b3127SSelwin Sebastian rte_wmb(); 497965b3127SSelwin Sebastian idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); 498965b3127SSelwin Sebastian AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 499965b3127SSelwin Sebastian low32_value(rxq->ring_phys_addr + 500965b3127SSelwin Sebastian (idx * sizeof(union axgbe_rx_desc)))); 501965b3127SSelwin Sebastian } 502965b3127SSelwin Sebastian return nb_rx; 503965b3127SSelwin Sebastian } 504965b3127SSelwin Sebastian 5059e890103SRavi Kumar /* Tx Apis */ 5069e890103SRavi Kumar static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) 5079e890103SRavi Kumar { 5089e890103SRavi Kumar uint16_t i; 5099e890103SRavi Kumar struct rte_mbuf **sw_ring; 5109e890103SRavi Kumar 5119e890103SRavi Kumar if (tx_queue) { 5129e890103SRavi Kumar sw_ring = tx_queue->sw_ring; 5139e890103SRavi Kumar if (sw_ring) { 5149e890103SRavi Kumar for (i = 0; i < tx_queue->nb_desc; i++) { 5159e890103SRavi Kumar rte_pktmbuf_free(sw_ring[i]); 5169e890103SRavi Kumar } 5179e890103SRavi Kumar rte_free(sw_ring); 5189e890103SRavi Kumar } 5199e890103SRavi Kumar rte_free(tx_queue); 5209e890103SRavi Kumar } 5219e890103SRavi Kumar } 5229e890103SRavi Kumar 5237483341aSXueming Li void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 5249e890103SRavi Kumar { 5257483341aSXueming Li axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); 5269e890103SRavi Kumar } 5279e890103SRavi Kumar 5289e890103SRavi Kumar int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 5299e890103SRavi Kumar uint16_t nb_desc, unsigned int socket_id, 5309e890103SRavi Kumar const struct rte_eth_txconf *tx_conf) 5319e890103SRavi Kumar { 5329e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 5339e890103SRavi Kumar uint32_t tx_desc; 5349e890103SRavi Kumar struct axgbe_port *pdata; 5359e890103SRavi Kumar struct axgbe_tx_queue *txq; 5369e890103SRavi Kumar unsigned int tsize; 5379e890103SRavi Kumar const struct rte_memzone *tz; 53886578516SGirish Nandibasappa uint64_t offloads; 5399e890103SRavi Kumar 5409e890103SRavi Kumar tx_desc = nb_desc; 5410bc212a8SStephen Hemminger pdata = dev->data->dev_private; 5429e890103SRavi Kumar 5439e890103SRavi Kumar /* 5449e890103SRavi Kumar * validate tx descriptors count 5459e890103SRavi Kumar * should be power of 2 and less than h/w supported 5469e890103SRavi Kumar */ 5479e890103SRavi Kumar if ((!rte_is_power_of_2(tx_desc)) || 5489e890103SRavi Kumar tx_desc > pdata->tx_desc_count || 5499e890103SRavi Kumar tx_desc < AXGBE_MIN_RING_DESC) 5509e890103SRavi Kumar return -EINVAL; 5519e890103SRavi Kumar 5529e890103SRavi Kumar /* First allocate the tx queue data structure */ 5539e890103SRavi Kumar txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue), 5549e890103SRavi Kumar RTE_CACHE_LINE_SIZE); 5559e890103SRavi Kumar if (!txq) 5569e890103SRavi Kumar return -ENOMEM; 5579e890103SRavi Kumar txq->pdata = pdata; 55886578516SGirish Nandibasappa offloads = tx_conf->offloads | 55986578516SGirish Nandibasappa txq->pdata->eth_dev->data->dev_conf.txmode.offloads; 5609e890103SRavi Kumar txq->nb_desc = tx_desc; 5619e890103SRavi Kumar txq->free_thresh = tx_conf->tx_free_thresh ? 5629e890103SRavi Kumar tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; 5639e890103SRavi Kumar if (txq->free_thresh > txq->nb_desc) 5649e890103SRavi Kumar txq->free_thresh = (txq->nb_desc >> 1); 5659e890103SRavi Kumar txq->free_batch_cnt = txq->free_thresh; 5669e890103SRavi Kumar 5678590b93dSRavi Kumar /* In vector_tx path threshold should be multiple of queue_size*/ 5688590b93dSRavi Kumar if (txq->nb_desc % txq->free_thresh != 0) 5698590b93dSRavi Kumar txq->vector_disable = 1; 5708590b93dSRavi Kumar 57186578516SGirish Nandibasappa if (offloads != 0) 5729e890103SRavi Kumar txq->vector_disable = 1; 5739e890103SRavi Kumar 5749e890103SRavi Kumar /* Allocate TX ring hardware descriptors */ 5759e890103SRavi Kumar tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc); 5769e890103SRavi Kumar tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, 5779e890103SRavi Kumar tsize, AXGBE_DESC_ALIGN, socket_id); 5789e890103SRavi Kumar if (!tz) { 5799e890103SRavi Kumar axgbe_tx_queue_release(txq); 5809e890103SRavi Kumar return -ENOMEM; 5819e890103SRavi Kumar } 5829e890103SRavi Kumar memset(tz->addr, 0, tsize); 583ce627d63SThomas Monjalon txq->ring_phys_addr = (uint64_t)tz->iova; 5849e890103SRavi Kumar txq->desc = tz->addr; 5859e890103SRavi Kumar txq->queue_id = queue_idx; 5869e890103SRavi Kumar txq->port_id = dev->data->port_id; 5877784d0d3SRavi Kumar txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 5887784d0d3SRavi Kumar (DMA_CH_INC * txq->queue_id)); 5897784d0d3SRavi Kumar txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs + 5909e890103SRavi Kumar DMA_CH_TDTR_LO); 5919e890103SRavi Kumar txq->cur = 0; 5929e890103SRavi Kumar txq->dirty = 0; 5939e890103SRavi Kumar txq->nb_desc_free = txq->nb_desc; 5949e890103SRavi Kumar /* Allocate software ring */ 5959e890103SRavi Kumar tsize = txq->nb_desc * sizeof(struct rte_mbuf *); 5969e890103SRavi Kumar txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, 5979e890103SRavi Kumar RTE_CACHE_LINE_SIZE); 5989e890103SRavi Kumar if (!txq->sw_ring) { 5999e890103SRavi Kumar axgbe_tx_queue_release(txq); 6009e890103SRavi Kumar return -ENOMEM; 6019e890103SRavi Kumar } 6029e890103SRavi Kumar dev->data->tx_queues[queue_idx] = txq; 6039e890103SRavi Kumar if (!pdata->tx_queues) 6049e890103SRavi Kumar pdata->tx_queues = dev->data->tx_queues; 6059e890103SRavi Kumar 6062b11056dSCiara Power if (txq->vector_disable || 6072b11056dSCiara Power rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) 6088590b93dSRavi Kumar dev->tx_pkt_burst = &axgbe_xmit_pkts; 6098590b93dSRavi Kumar else 6108590b93dSRavi Kumar #ifdef RTE_ARCH_X86 6118590b93dSRavi Kumar dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; 6128590b93dSRavi Kumar #else 6138590b93dSRavi Kumar dev->tx_pkt_burst = &axgbe_xmit_pkts; 6148590b93dSRavi Kumar #endif 6158590b93dSRavi Kumar 6169e890103SRavi Kumar return 0; 6179e890103SRavi Kumar } 6189e890103SRavi Kumar 619ff70acdfSSelwin Sebastian int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev, 620ff70acdfSSelwin Sebastian char *fw_version, size_t fw_size) 621ff70acdfSSelwin Sebastian { 622ff70acdfSSelwin Sebastian struct axgbe_port *pdata; 623ff70acdfSSelwin Sebastian struct axgbe_hw_features *hw_feat; 624ff70acdfSSelwin Sebastian int ret; 625ff70acdfSSelwin Sebastian 626ff70acdfSSelwin Sebastian pdata = (struct axgbe_port *)eth_dev->data->dev_private; 627ff70acdfSSelwin Sebastian hw_feat = &pdata->hw_feat; 628ff70acdfSSelwin Sebastian 629ff70acdfSSelwin Sebastian ret = snprintf(fw_version, fw_size, "%d.%d.%d", 630ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), 631ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), 632ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); 633ff70acdfSSelwin Sebastian if (ret < 0) 634ff70acdfSSelwin Sebastian return -EINVAL; 635ff70acdfSSelwin Sebastian 636ff70acdfSSelwin Sebastian ret += 1; /* add the size of '\0' */ 637ff70acdfSSelwin Sebastian if (fw_size < (size_t)ret) 638ff70acdfSSelwin Sebastian return ret; 639ff70acdfSSelwin Sebastian else 640ff70acdfSSelwin Sebastian return 0; 641ff70acdfSSelwin Sebastian } 642ff70acdfSSelwin Sebastian 6438590b93dSRavi Kumar static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, 6448590b93dSRavi Kumar unsigned int queue) 6458590b93dSRavi Kumar { 6468590b93dSRavi Kumar unsigned int tx_status; 6478590b93dSRavi Kumar unsigned long tx_timeout; 6488590b93dSRavi Kumar 6498590b93dSRavi Kumar /* The Tx engine cannot be stopped if it is actively processing 6508590b93dSRavi Kumar * packets. Wait for the Tx queue to empty the Tx fifo. Don't 6518590b93dSRavi Kumar * wait forever though... 6528590b93dSRavi Kumar */ 6538590b93dSRavi Kumar tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 6548590b93dSRavi Kumar rte_get_timer_hz()); 6558590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), tx_timeout)) { 6568590b93dSRavi Kumar tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 6578590b93dSRavi Kumar if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 6588590b93dSRavi Kumar (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 6598590b93dSRavi Kumar break; 6608590b93dSRavi Kumar 6618590b93dSRavi Kumar rte_delay_us(900); 6628590b93dSRavi Kumar } 6638590b93dSRavi Kumar 6648590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), tx_timeout)) 6658590b93dSRavi Kumar PMD_DRV_LOG(ERR, 6668590b93dSRavi Kumar "timed out waiting for Tx queue %u to empty\n", 6678590b93dSRavi Kumar queue); 6688590b93dSRavi Kumar } 6698590b93dSRavi Kumar 6708590b93dSRavi Kumar static void axgbe_prepare_tx_stop(struct axgbe_port *pdata, 6718590b93dSRavi Kumar unsigned int queue) 6728590b93dSRavi Kumar { 6738590b93dSRavi Kumar unsigned int tx_dsr, tx_pos, tx_qidx; 6748590b93dSRavi Kumar unsigned int tx_status; 6758590b93dSRavi Kumar unsigned long tx_timeout; 6768590b93dSRavi Kumar 6778590b93dSRavi Kumar if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 6788590b93dSRavi Kumar return axgbe_txq_prepare_tx_stop(pdata, queue); 6798590b93dSRavi Kumar 6808590b93dSRavi Kumar /* Calculate the status register to read and the position within */ 6818590b93dSRavi Kumar if (queue < DMA_DSRX_FIRST_QUEUE) { 6828590b93dSRavi Kumar tx_dsr = DMA_DSR0; 6838590b93dSRavi Kumar tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 6848590b93dSRavi Kumar } else { 6858590b93dSRavi Kumar tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 6868590b93dSRavi Kumar 6878590b93dSRavi Kumar tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 6888590b93dSRavi Kumar tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 6898590b93dSRavi Kumar DMA_DSRX_TPS_START; 6908590b93dSRavi Kumar } 6918590b93dSRavi Kumar 6928590b93dSRavi Kumar /* The Tx engine cannot be stopped if it is actively processing 6938590b93dSRavi Kumar * descriptors. Wait for the Tx engine to enter the stopped or 6948590b93dSRavi Kumar * suspended state. Don't wait forever though... 6958590b93dSRavi Kumar */ 6968590b93dSRavi Kumar tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 6978590b93dSRavi Kumar rte_get_timer_hz()); 6988590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), tx_timeout)) { 6998590b93dSRavi Kumar tx_status = AXGMAC_IOREAD(pdata, tx_dsr); 7008590b93dSRavi Kumar tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 7018590b93dSRavi Kumar if ((tx_status == DMA_TPS_STOPPED) || 7028590b93dSRavi Kumar (tx_status == DMA_TPS_SUSPENDED)) 7038590b93dSRavi Kumar break; 7048590b93dSRavi Kumar 7058590b93dSRavi Kumar rte_delay_us(900); 7068590b93dSRavi Kumar } 7078590b93dSRavi Kumar 7088590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), tx_timeout)) 7098590b93dSRavi Kumar PMD_DRV_LOG(ERR, 7108590b93dSRavi Kumar "timed out waiting for Tx DMA channel %u to stop\n", 7118590b93dSRavi Kumar queue); 7128590b93dSRavi Kumar } 7138590b93dSRavi Kumar 7148590b93dSRavi Kumar void axgbe_dev_disable_tx(struct rte_eth_dev *dev) 7158590b93dSRavi Kumar { 7168590b93dSRavi Kumar struct axgbe_tx_queue *txq; 7178590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 7188590b93dSRavi Kumar unsigned int i; 7198590b93dSRavi Kumar 7208590b93dSRavi Kumar /* Prepare for stopping DMA channel */ 7218590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) { 7228590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7238590b93dSRavi Kumar axgbe_prepare_tx_stop(pdata, i); 7248590b93dSRavi Kumar } 7258590b93dSRavi Kumar /* Disable MAC Tx */ 7268590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 7278590b93dSRavi Kumar /* Disable each Tx queue*/ 7288590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 7298590b93dSRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 7308590b93dSRavi Kumar 0); 7318590b93dSRavi Kumar /* Disable each Tx DMA channel */ 7328590b93dSRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 7338590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7348590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); 7358590b93dSRavi Kumar } 7368590b93dSRavi Kumar } 7378590b93dSRavi Kumar 7388590b93dSRavi Kumar void axgbe_dev_enable_tx(struct rte_eth_dev *dev) 7398590b93dSRavi Kumar { 7408590b93dSRavi Kumar struct axgbe_tx_queue *txq; 7418590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 7428590b93dSRavi Kumar unsigned int i; 7438590b93dSRavi Kumar 7448590b93dSRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 7458590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7468590b93dSRavi Kumar /* Enable Tx DMA channel */ 7478590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); 7488590b93dSRavi Kumar } 7498590b93dSRavi Kumar /* Enable Tx queue*/ 7508590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 7518590b93dSRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 7528590b93dSRavi Kumar MTL_Q_ENABLED); 7538590b93dSRavi Kumar /* Enable MAC Tx */ 7548590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 7558590b93dSRavi Kumar } 7568590b93dSRavi Kumar 7578590b93dSRavi Kumar /* Free Tx conformed mbufs */ 7588590b93dSRavi Kumar static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) 7598590b93dSRavi Kumar { 7608590b93dSRavi Kumar volatile struct axgbe_tx_desc *desc; 7618590b93dSRavi Kumar uint16_t idx; 7628590b93dSRavi Kumar 7638590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); 7648590b93dSRavi Kumar while (txq->cur != txq->dirty) { 7658590b93dSRavi Kumar if (unlikely(idx == txq->nb_desc)) 7668590b93dSRavi Kumar idx = 0; 7678590b93dSRavi Kumar desc = &txq->desc[idx]; 7688590b93dSRavi Kumar /* Check for ownership */ 7698590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 7708590b93dSRavi Kumar return; 7718590b93dSRavi Kumar memset((void *)&desc->desc2, 0, 8); 7728590b93dSRavi Kumar /* Free mbuf */ 7738590b93dSRavi Kumar rte_pktmbuf_free(txq->sw_ring[idx]); 7748590b93dSRavi Kumar txq->sw_ring[idx++] = NULL; 7758590b93dSRavi Kumar txq->dirty++; 7768590b93dSRavi Kumar } 7778590b93dSRavi Kumar } 7788590b93dSRavi Kumar 7798590b93dSRavi Kumar /* Tx Descriptor formation 7808590b93dSRavi Kumar * Considering each mbuf requires one desc 7818590b93dSRavi Kumar * mbuf is linear 7828590b93dSRavi Kumar */ 7838590b93dSRavi Kumar static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, 7848590b93dSRavi Kumar struct rte_mbuf *mbuf) 7858590b93dSRavi Kumar { 7868590b93dSRavi Kumar volatile struct axgbe_tx_desc *desc; 7878590b93dSRavi Kumar uint16_t idx; 7888590b93dSRavi Kumar uint64_t mask; 7898590b93dSRavi Kumar 7908590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 7918590b93dSRavi Kumar desc = &txq->desc[idx]; 7928590b93dSRavi Kumar 7938590b93dSRavi Kumar /* Update buffer address and length */ 7948590b93dSRavi Kumar desc->baddr = rte_mbuf_data_iova(mbuf); 7958590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 7968590b93dSRavi Kumar mbuf->pkt_len); 7978590b93dSRavi Kumar /* Total msg length to transmit */ 7988590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, 7998590b93dSRavi Kumar mbuf->pkt_len); 800e0444948SSelwin Sebastian /* Timestamp enablement check */ 801daa02b5cSOlivier Matz if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 802e0444948SSelwin Sebastian AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); 803e0444948SSelwin Sebastian rte_wmb(); 8048590b93dSRavi Kumar /* Mark it as First and Last Descriptor */ 8058590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); 8068590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); 8078590b93dSRavi Kumar /* Mark it as a NORMAL descriptor */ 8088590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 8098590b93dSRavi Kumar /* configure h/w Offload */ 810daa02b5cSOlivier Matz mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; 811daa02b5cSOlivier Matz if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) 8128590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 813daa02b5cSOlivier Matz else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 8148590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); 8158590b93dSRavi Kumar rte_wmb(); 8168590b93dSRavi Kumar 817daa02b5cSOlivier Matz if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 81886578516SGirish Nandibasappa /* Mark it as a CONTEXT descriptor */ 81986578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 82086578516SGirish Nandibasappa CTXT, 1); 82186578516SGirish Nandibasappa /* Set the VLAN tag */ 82286578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 82386578516SGirish Nandibasappa VT, mbuf->vlan_tci); 82486578516SGirish Nandibasappa /* Indicate this descriptor contains the VLAN tag */ 82586578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 82686578516SGirish Nandibasappa VLTV, 1); 82786578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 82886578516SGirish Nandibasappa TX_NORMAL_DESC2_VLAN_INSERT); 82986578516SGirish Nandibasappa } else { 83086578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); 83186578516SGirish Nandibasappa } 83286578516SGirish Nandibasappa rte_wmb(); 83386578516SGirish Nandibasappa 8348590b93dSRavi Kumar /* Set OWN bit */ 8358590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 8368590b93dSRavi Kumar rte_wmb(); 8378590b93dSRavi Kumar 83886578516SGirish Nandibasappa 8398590b93dSRavi Kumar /* Save mbuf */ 8408590b93dSRavi Kumar txq->sw_ring[idx] = mbuf; 8418590b93dSRavi Kumar /* Update current index*/ 8428590b93dSRavi Kumar txq->cur++; 8438590b93dSRavi Kumar /* Update stats */ 8448590b93dSRavi Kumar txq->bytes += mbuf->pkt_len; 8458590b93dSRavi Kumar 8468590b93dSRavi Kumar return 0; 8478590b93dSRavi Kumar } 8488590b93dSRavi Kumar 8498590b93dSRavi Kumar /* Eal supported tx wrapper*/ 8508590b93dSRavi Kumar uint16_t 8518590b93dSRavi Kumar axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 8528590b93dSRavi Kumar uint16_t nb_pkts) 8538590b93dSRavi Kumar { 8548590b93dSRavi Kumar PMD_INIT_FUNC_TRACE(); 8558590b93dSRavi Kumar 8568590b93dSRavi Kumar if (unlikely(nb_pkts == 0)) 8578590b93dSRavi Kumar return nb_pkts; 8588590b93dSRavi Kumar 8598590b93dSRavi Kumar struct axgbe_tx_queue *txq; 8608590b93dSRavi Kumar uint16_t nb_desc_free; 8618590b93dSRavi Kumar uint16_t nb_pkt_sent = 0; 8628590b93dSRavi Kumar uint16_t idx; 8638590b93dSRavi Kumar uint32_t tail_addr; 8648590b93dSRavi Kumar struct rte_mbuf *mbuf; 8658590b93dSRavi Kumar 8668590b93dSRavi Kumar txq = (struct axgbe_tx_queue *)tx_queue; 8678590b93dSRavi Kumar nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 8688590b93dSRavi Kumar 8698590b93dSRavi Kumar if (unlikely(nb_desc_free <= txq->free_thresh)) { 8708590b93dSRavi Kumar axgbe_xmit_cleanup(txq); 8718590b93dSRavi Kumar nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 8728590b93dSRavi Kumar if (unlikely(nb_desc_free == 0)) 8738590b93dSRavi Kumar return 0; 8748590b93dSRavi Kumar } 8758590b93dSRavi Kumar nb_pkts = RTE_MIN(nb_desc_free, nb_pkts); 8768590b93dSRavi Kumar while (nb_pkts--) { 8778590b93dSRavi Kumar mbuf = *tx_pkts++; 8788590b93dSRavi Kumar if (axgbe_xmit_hw(txq, mbuf)) 8798590b93dSRavi Kumar goto out; 8808590b93dSRavi Kumar nb_pkt_sent++; 8818590b93dSRavi Kumar } 8828590b93dSRavi Kumar out: 8838590b93dSRavi Kumar /* Sync read and write */ 8848590b93dSRavi Kumar rte_mb(); 8858590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 8868590b93dSRavi Kumar tail_addr = low32_value(txq->ring_phys_addr + 8878590b93dSRavi Kumar idx * sizeof(struct axgbe_tx_desc)); 8888590b93dSRavi Kumar /* Update tail reg with next immediate address to kick Tx DMA channel*/ 8898590b93dSRavi Kumar AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); 8908590b93dSRavi Kumar txq->pkts += nb_pkt_sent; 8918590b93dSRavi Kumar return nb_pkt_sent; 8928590b93dSRavi Kumar } 8938590b93dSRavi Kumar 8949e890103SRavi Kumar void axgbe_dev_clear_queues(struct rte_eth_dev *dev) 8959e890103SRavi Kumar { 8969e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 8979e890103SRavi Kumar uint8_t i; 8989e890103SRavi Kumar struct axgbe_rx_queue *rxq; 8999e890103SRavi Kumar struct axgbe_tx_queue *txq; 9009e890103SRavi Kumar 9019e890103SRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 9029e890103SRavi Kumar rxq = dev->data->rx_queues[i]; 9039e890103SRavi Kumar 9049e890103SRavi Kumar if (rxq) { 9059e890103SRavi Kumar axgbe_rx_queue_release(rxq); 9069e890103SRavi Kumar dev->data->rx_queues[i] = NULL; 9079e890103SRavi Kumar } 9089e890103SRavi Kumar } 9099e890103SRavi Kumar 9109e890103SRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 9119e890103SRavi Kumar txq = dev->data->tx_queues[i]; 9129e890103SRavi Kumar 9139e890103SRavi Kumar if (txq) { 9149e890103SRavi Kumar axgbe_tx_queue_release(txq); 9159e890103SRavi Kumar dev->data->tx_queues[i] = NULL; 9169e890103SRavi Kumar } 9179e890103SRavi Kumar } 9189e890103SRavi Kumar } 9190962b605SAmaranath Somalapuram 9200962b605SAmaranath Somalapuram int 9210962b605SAmaranath Somalapuram axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 9220962b605SAmaranath Somalapuram { 9230962b605SAmaranath Somalapuram struct axgbe_rx_queue *rxq = rx_queue; 9240962b605SAmaranath Somalapuram volatile union axgbe_rx_desc *desc; 9250962b605SAmaranath Somalapuram uint16_t idx; 9260962b605SAmaranath Somalapuram 9270962b605SAmaranath Somalapuram 9280962b605SAmaranath Somalapuram if (unlikely(offset >= rxq->nb_desc)) 9290962b605SAmaranath Somalapuram return -EINVAL; 9300962b605SAmaranath Somalapuram 9310962b605SAmaranath Somalapuram if (offset >= rxq->nb_desc - rxq->dirty) 9320962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_UNAVAIL; 9330962b605SAmaranath Somalapuram 9340962b605SAmaranath Somalapuram idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 9350962b605SAmaranath Somalapuram desc = &rxq->desc[idx + offset]; 9360962b605SAmaranath Somalapuram 9370962b605SAmaranath Somalapuram if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 9380962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_DONE; 9390962b605SAmaranath Somalapuram 9400962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_AVAIL; 9410962b605SAmaranath Somalapuram } 9420962b605SAmaranath Somalapuram 9430962b605SAmaranath Somalapuram int 9440962b605SAmaranath Somalapuram axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 9450962b605SAmaranath Somalapuram { 9460962b605SAmaranath Somalapuram struct axgbe_tx_queue *txq = tx_queue; 9470962b605SAmaranath Somalapuram volatile struct axgbe_tx_desc *desc; 9480962b605SAmaranath Somalapuram uint16_t idx; 9490962b605SAmaranath Somalapuram 9500962b605SAmaranath Somalapuram 9510962b605SAmaranath Somalapuram if (unlikely(offset >= txq->nb_desc)) 9520962b605SAmaranath Somalapuram return -EINVAL; 9530962b605SAmaranath Somalapuram 9540962b605SAmaranath Somalapuram if (offset >= txq->nb_desc - txq->dirty) 9550962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_UNAVAIL; 9560962b605SAmaranath Somalapuram 9570962b605SAmaranath Somalapuram idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1); 9580962b605SAmaranath Somalapuram desc = &txq->desc[idx + offset]; 9590962b605SAmaranath Somalapuram 9600962b605SAmaranath Somalapuram if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 9610962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_DONE; 9620962b605SAmaranath Somalapuram 9630962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_FULL; 9640962b605SAmaranath Somalapuram } 965