19e890103SRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 29e890103SRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 39e890103SRavi Kumar * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 49e890103SRavi Kumar */ 59e890103SRavi Kumar 69e890103SRavi Kumar #include "axgbe_ethdev.h" 79e890103SRavi Kumar #include "axgbe_rxtx.h" 89e890103SRavi Kumar #include "axgbe_phy.h" 99e890103SRavi Kumar 109e890103SRavi Kumar #include <rte_time.h> 119e890103SRavi Kumar #include <rte_mempool.h> 129e890103SRavi Kumar #include <rte_mbuf.h> 132b11056dSCiara Power #include <rte_vect.h> 149e890103SRavi Kumar 159e890103SRavi Kumar static void 169e890103SRavi Kumar axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue) 179e890103SRavi Kumar { 189e890103SRavi Kumar uint16_t i; 199e890103SRavi Kumar struct rte_mbuf **sw_ring; 209e890103SRavi Kumar 219e890103SRavi Kumar if (rx_queue) { 229e890103SRavi Kumar sw_ring = rx_queue->sw_ring; 239e890103SRavi Kumar if (sw_ring) { 249e890103SRavi Kumar for (i = 0; i < rx_queue->nb_desc; i++) { 259e890103SRavi Kumar if (sw_ring[i]) 269e890103SRavi Kumar rte_pktmbuf_free(sw_ring[i]); 279e890103SRavi Kumar } 289e890103SRavi Kumar rte_free(sw_ring); 299e890103SRavi Kumar } 309e890103SRavi Kumar rte_free(rx_queue); 319e890103SRavi Kumar } 329e890103SRavi Kumar } 339e890103SRavi Kumar 347483341aSXueming Li void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 359e890103SRavi Kumar { 367483341aSXueming Li axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); 379e890103SRavi Kumar } 389e890103SRavi Kumar 399e890103SRavi Kumar int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 409e890103SRavi Kumar uint16_t nb_desc, unsigned int socket_id, 419e890103SRavi Kumar const struct rte_eth_rxconf *rx_conf, 429e890103SRavi Kumar struct rte_mempool *mp) 439e890103SRavi Kumar { 449e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 459e890103SRavi Kumar uint32_t size; 469e890103SRavi Kumar const struct rte_memzone *dma; 479e890103SRavi Kumar struct axgbe_rx_queue *rxq; 489e890103SRavi Kumar uint32_t rx_desc = nb_desc; 499e890103SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 509e890103SRavi Kumar 519e890103SRavi Kumar /* 529e890103SRavi Kumar * validate Rx descriptors count 539e890103SRavi Kumar * should be power of 2 and less than h/w supported 549e890103SRavi Kumar */ 559e890103SRavi Kumar if ((!rte_is_power_of_2(rx_desc)) || 569e890103SRavi Kumar rx_desc > pdata->rx_desc_count) 579e890103SRavi Kumar return -EINVAL; 589e890103SRavi Kumar /* First allocate the rx queue data structure */ 599e890103SRavi Kumar rxq = rte_zmalloc_socket("ethdev RX queue", 609e890103SRavi Kumar sizeof(struct axgbe_rx_queue), 619e890103SRavi Kumar RTE_CACHE_LINE_SIZE, socket_id); 629e890103SRavi Kumar if (!rxq) { 639e890103SRavi Kumar PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); 649e890103SRavi Kumar return -ENOMEM; 659e890103SRavi Kumar } 669e890103SRavi Kumar 679e890103SRavi Kumar rxq->cur = 0; 689e890103SRavi Kumar rxq->dirty = 0; 699e890103SRavi Kumar rxq->pdata = pdata; 709e890103SRavi Kumar rxq->mb_pool = mp; 719e890103SRavi Kumar rxq->queue_id = queue_idx; 729e890103SRavi Kumar rxq->port_id = dev->data->port_id; 739e890103SRavi Kumar rxq->nb_desc = rx_desc; 747784d0d3SRavi Kumar rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 757784d0d3SRavi Kumar (DMA_CH_INC * rxq->queue_id)); 767784d0d3SRavi Kumar rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs + 779e890103SRavi Kumar DMA_CH_RDTR_LO); 78295968d1SFerruh Yigit if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 7935b2d13fSOlivier Matz rxq->crc_len = RTE_ETHER_CRC_LEN; 8070815c9eSFerruh Yigit else 8170815c9eSFerruh Yigit rxq->crc_len = 0; 829e890103SRavi Kumar 839e890103SRavi Kumar /* CRC strip in AXGBE supports per port not per queue */ 849e890103SRavi Kumar pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0; 859e890103SRavi Kumar rxq->free_thresh = rx_conf->rx_free_thresh ? 869e890103SRavi Kumar rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH; 879e890103SRavi Kumar if (rxq->free_thresh > rxq->nb_desc) 889e890103SRavi Kumar rxq->free_thresh = rxq->nb_desc >> 3; 899e890103SRavi Kumar 909e890103SRavi Kumar /* Allocate RX ring hardware descriptors */ 919e890103SRavi Kumar size = rxq->nb_desc * sizeof(union axgbe_rx_desc); 929e890103SRavi Kumar dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, 939e890103SRavi Kumar socket_id); 949e890103SRavi Kumar if (!dma) { 959e890103SRavi Kumar PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n"); 969e890103SRavi Kumar axgbe_rx_queue_release(rxq); 979e890103SRavi Kumar return -ENOMEM; 989e890103SRavi Kumar } 99ce627d63SThomas Monjalon rxq->ring_phys_addr = (uint64_t)dma->iova; 1009e890103SRavi Kumar rxq->desc = (volatile union axgbe_rx_desc *)dma->addr; 1019e890103SRavi Kumar memset((void *)rxq->desc, 0, size); 1029e890103SRavi Kumar /* Allocate software ring */ 1039e890103SRavi Kumar size = rxq->nb_desc * sizeof(struct rte_mbuf *); 1049e890103SRavi Kumar rxq->sw_ring = rte_zmalloc_socket("sw_ring", size, 1059e890103SRavi Kumar RTE_CACHE_LINE_SIZE, 1069e890103SRavi Kumar socket_id); 1079e890103SRavi Kumar if (!rxq->sw_ring) { 1089e890103SRavi Kumar PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n"); 1099e890103SRavi Kumar axgbe_rx_queue_release(rxq); 1109e890103SRavi Kumar return -ENOMEM; 1119e890103SRavi Kumar } 1129e890103SRavi Kumar dev->data->rx_queues[queue_idx] = rxq; 1139e890103SRavi Kumar if (!pdata->rx_queues) 1149e890103SRavi Kumar pdata->rx_queues = dev->data->rx_queues; 1159e890103SRavi Kumar 1169e890103SRavi Kumar return 0; 1179e890103SRavi Kumar } 1189e890103SRavi Kumar 1198590b93dSRavi Kumar static void axgbe_prepare_rx_stop(struct axgbe_port *pdata, 1208590b93dSRavi Kumar unsigned int queue) 1218590b93dSRavi Kumar { 1228590b93dSRavi Kumar unsigned int rx_status; 1238590b93dSRavi Kumar unsigned long rx_timeout; 1248590b93dSRavi Kumar 1258590b93dSRavi Kumar /* The Rx engine cannot be stopped if it is actively processing 1268590b93dSRavi Kumar * packets. Wait for the Rx queue to empty the Rx fifo. Don't 1278590b93dSRavi Kumar * wait forever though... 1288590b93dSRavi Kumar */ 1298590b93dSRavi Kumar rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 1308590b93dSRavi Kumar rte_get_timer_hz()); 1318590b93dSRavi Kumar 1328590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), rx_timeout)) { 1338590b93dSRavi Kumar rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 1348590b93dSRavi Kumar if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 1358590b93dSRavi Kumar (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 1368590b93dSRavi Kumar break; 1378590b93dSRavi Kumar 1388590b93dSRavi Kumar rte_delay_us(900); 1398590b93dSRavi Kumar } 1408590b93dSRavi Kumar 1418590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), rx_timeout)) 1428590b93dSRavi Kumar PMD_DRV_LOG(ERR, 1438590b93dSRavi Kumar "timed out waiting for Rx queue %u to empty\n", 1448590b93dSRavi Kumar queue); 1458590b93dSRavi Kumar } 1468590b93dSRavi Kumar 1478590b93dSRavi Kumar void axgbe_dev_disable_rx(struct rte_eth_dev *dev) 1488590b93dSRavi Kumar { 1498590b93dSRavi Kumar struct axgbe_rx_queue *rxq; 1508590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1518590b93dSRavi Kumar unsigned int i; 1528590b93dSRavi Kumar 1538590b93dSRavi Kumar /* Disable MAC Rx */ 1548590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 1558590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 1568590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 1578590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 1588590b93dSRavi Kumar 1598590b93dSRavi Kumar /* Prepare for Rx DMA channel stop */ 1608590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1618590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1628590b93dSRavi Kumar axgbe_prepare_rx_stop(pdata, i); 1638590b93dSRavi Kumar } 1648590b93dSRavi Kumar /* Disable each Rx queue */ 1658590b93dSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 1668590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1678590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1688590b93dSRavi Kumar /* Disable Rx DMA channel */ 1698590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); 1708590b93dSRavi Kumar } 1718590b93dSRavi Kumar } 1728590b93dSRavi Kumar 1738590b93dSRavi Kumar void axgbe_dev_enable_rx(struct rte_eth_dev *dev) 1748590b93dSRavi Kumar { 1758590b93dSRavi Kumar struct axgbe_rx_queue *rxq; 1768590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1778590b93dSRavi Kumar unsigned int i; 1788590b93dSRavi Kumar unsigned int reg_val = 0; 1798590b93dSRavi Kumar 1808590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1818590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1828590b93dSRavi Kumar /* Enable Rx DMA channel */ 1838590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); 1848590b93dSRavi Kumar } 1858590b93dSRavi Kumar 1868590b93dSRavi Kumar reg_val = 0; 1878590b93dSRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) 1888590b93dSRavi Kumar reg_val |= (0x02 << (i << 1)); 1898590b93dSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 1908590b93dSRavi Kumar 1918590b93dSRavi Kumar /* Enable MAC Rx */ 1928590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 1938590b93dSRavi Kumar /* Frame is forwarded after stripping CRC to application*/ 1948590b93dSRavi Kumar if (pdata->crc_strip_enable) { 1958590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 1968590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 1978590b93dSRavi Kumar } 1988590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 1998590b93dSRavi Kumar } 2008590b93dSRavi Kumar 2018590b93dSRavi Kumar /* Rx function one to one refresh */ 2028590b93dSRavi Kumar uint16_t 2038590b93dSRavi Kumar axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2048590b93dSRavi Kumar uint16_t nb_pkts) 2058590b93dSRavi Kumar { 2068590b93dSRavi Kumar PMD_INIT_FUNC_TRACE(); 2078590b93dSRavi Kumar uint16_t nb_rx = 0; 2088590b93dSRavi Kumar struct axgbe_rx_queue *rxq = rx_queue; 2098590b93dSRavi Kumar volatile union axgbe_rx_desc *desc; 2108590b93dSRavi Kumar uint64_t old_dirty = rxq->dirty; 2118590b93dSRavi Kumar struct rte_mbuf *mbuf, *tmbuf; 21286578516SGirish Nandibasappa unsigned int err, etlt; 2138590b93dSRavi Kumar uint32_t error_status; 2148590b93dSRavi Kumar uint16_t idx, pidx, pkt_len; 21586578516SGirish Nandibasappa uint64_t offloads; 2168590b93dSRavi Kumar 2178590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 2188590b93dSRavi Kumar while (nb_rx < nb_pkts) { 2198590b93dSRavi Kumar if (unlikely(idx == rxq->nb_desc)) 2208590b93dSRavi Kumar idx = 0; 2218590b93dSRavi Kumar 2228590b93dSRavi Kumar desc = &rxq->desc[idx]; 2238590b93dSRavi Kumar 2248590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 2258590b93dSRavi Kumar break; 2268590b93dSRavi Kumar tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 2278590b93dSRavi Kumar if (unlikely(!tmbuf)) { 2288590b93dSRavi Kumar PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" 2298590b93dSRavi Kumar " queue_id = %u\n", 2308590b93dSRavi Kumar (unsigned int)rxq->port_id, 2318590b93dSRavi Kumar (unsigned int)rxq->queue_id); 2328590b93dSRavi Kumar rte_eth_devices[ 2338590b93dSRavi Kumar rxq->port_id].data->rx_mbuf_alloc_failed++; 2349d1ef6b2SChandu Babu N rxq->rx_mbuf_alloc_failed++; 2358590b93dSRavi Kumar break; 2368590b93dSRavi Kumar } 2378590b93dSRavi Kumar pidx = idx + 1; 2388590b93dSRavi Kumar if (unlikely(pidx == rxq->nb_desc)) 2398590b93dSRavi Kumar pidx = 0; 2408590b93dSRavi Kumar 2418590b93dSRavi Kumar rte_prefetch0(rxq->sw_ring[pidx]); 2428590b93dSRavi Kumar if ((pidx & 0x3) == 0) { 2438590b93dSRavi Kumar rte_prefetch0(&rxq->desc[pidx]); 2448590b93dSRavi Kumar rte_prefetch0(&rxq->sw_ring[pidx]); 2458590b93dSRavi Kumar } 2468590b93dSRavi Kumar 2478590b93dSRavi Kumar mbuf = rxq->sw_ring[idx]; 2488590b93dSRavi Kumar /* Check for any errors and free mbuf*/ 2498590b93dSRavi Kumar err = AXGMAC_GET_BITS_LE(desc->write.desc3, 2508590b93dSRavi Kumar RX_NORMAL_DESC3, ES); 2518590b93dSRavi Kumar error_status = 0; 2528590b93dSRavi Kumar if (unlikely(err)) { 2538590b93dSRavi Kumar error_status = desc->write.desc3 & AXGBE_ERR_STATUS; 2548590b93dSRavi Kumar if ((error_status != AXGBE_L3_CSUM_ERR) && 2558590b93dSRavi Kumar (error_status != AXGBE_L4_CSUM_ERR)) { 2568590b93dSRavi Kumar rxq->errors++; 2578590b93dSRavi Kumar rte_pktmbuf_free(mbuf); 2588590b93dSRavi Kumar goto err_set; 2598590b93dSRavi Kumar } 2608590b93dSRavi Kumar } 2618590b93dSRavi Kumar if (rxq->pdata->rx_csum_enable) { 2628590b93dSRavi Kumar mbuf->ol_flags = 0; 263*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 264*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 2658590b93dSRavi Kumar if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 266*daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; 267*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 268*daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 269*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 2708590b93dSRavi Kumar } else if ( 2718590b93dSRavi Kumar unlikely(error_status == AXGBE_L4_CSUM_ERR)) { 272*daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 273*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 2748590b93dSRavi Kumar } 2758590b93dSRavi Kumar } 2768590b93dSRavi Kumar rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 2778590b93dSRavi Kumar /* Get the RSS hash */ 2788590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 2798590b93dSRavi Kumar mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); 28086578516SGirish Nandibasappa etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 28186578516SGirish Nandibasappa RX_NORMAL_DESC3, ETLT); 28286578516SGirish Nandibasappa offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; 28386578516SGirish Nandibasappa if (!err || !etlt) { 28486578516SGirish Nandibasappa if (etlt == RX_CVLAN_TAG_PRESENT) { 285*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 28686578516SGirish Nandibasappa mbuf->vlan_tci = 28786578516SGirish Nandibasappa AXGMAC_GET_BITS_LE(desc->write.desc0, 28886578516SGirish Nandibasappa RX_NORMAL_DESC0, OVT); 289295968d1SFerruh Yigit if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 290*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED; 29186578516SGirish Nandibasappa else 292*daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED; 29386578516SGirish Nandibasappa } else { 29486578516SGirish Nandibasappa mbuf->ol_flags &= 295*daa02b5cSOlivier Matz ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED); 29686578516SGirish Nandibasappa mbuf->vlan_tci = 0; 29786578516SGirish Nandibasappa } 29886578516SGirish Nandibasappa } 299e0444948SSelwin Sebastian /* Indicate if a Context Descriptor is next */ 300e0444948SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA)) 301*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP 302*daa02b5cSOlivier Matz | RTE_MBUF_F_RX_IEEE1588_TMST; 3038590b93dSRavi Kumar pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, 3048590b93dSRavi Kumar PL) - rxq->crc_len; 3058590b93dSRavi Kumar /* Mbuf populate */ 3068590b93dSRavi Kumar mbuf->next = NULL; 3078590b93dSRavi Kumar mbuf->data_off = RTE_PKTMBUF_HEADROOM; 3088590b93dSRavi Kumar mbuf->nb_segs = 1; 3098590b93dSRavi Kumar mbuf->port = rxq->port_id; 3108590b93dSRavi Kumar mbuf->pkt_len = pkt_len; 3118590b93dSRavi Kumar mbuf->data_len = pkt_len; 3128590b93dSRavi Kumar rxq->bytes += pkt_len; 3138590b93dSRavi Kumar rx_pkts[nb_rx++] = mbuf; 3148590b93dSRavi Kumar err_set: 3158590b93dSRavi Kumar rxq->cur++; 3168590b93dSRavi Kumar rxq->sw_ring[idx++] = tmbuf; 3178590b93dSRavi Kumar desc->read.baddr = 3188590b93dSRavi Kumar rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 3198590b93dSRavi Kumar memset((void *)(&desc->read.desc2), 0, 8); 3208590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 3218590b93dSRavi Kumar rxq->dirty++; 3228590b93dSRavi Kumar } 3238590b93dSRavi Kumar rxq->pkts += nb_rx; 3248590b93dSRavi Kumar if (rxq->dirty != old_dirty) { 3258590b93dSRavi Kumar rte_wmb(); 3268590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); 3278590b93dSRavi Kumar AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 3288590b93dSRavi Kumar low32_value(rxq->ring_phys_addr + 3298590b93dSRavi Kumar (idx * sizeof(union axgbe_rx_desc)))); 3308590b93dSRavi Kumar } 3318590b93dSRavi Kumar 3328590b93dSRavi Kumar return nb_rx; 3338590b93dSRavi Kumar } 3348590b93dSRavi Kumar 335965b3127SSelwin Sebastian 336965b3127SSelwin Sebastian uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, 337965b3127SSelwin Sebastian struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 338965b3127SSelwin Sebastian { 339965b3127SSelwin Sebastian PMD_INIT_FUNC_TRACE(); 340965b3127SSelwin Sebastian uint16_t nb_rx = 0; 341965b3127SSelwin Sebastian struct axgbe_rx_queue *rxq = rx_queue; 342965b3127SSelwin Sebastian volatile union axgbe_rx_desc *desc; 343965b3127SSelwin Sebastian 344965b3127SSelwin Sebastian uint64_t old_dirty = rxq->dirty; 345965b3127SSelwin Sebastian struct rte_mbuf *first_seg = NULL; 346965b3127SSelwin Sebastian struct rte_mbuf *mbuf, *tmbuf; 34786578516SGirish Nandibasappa unsigned int err, etlt; 348965b3127SSelwin Sebastian uint32_t error_status; 349965b3127SSelwin Sebastian uint16_t idx, pidx, data_len = 0, pkt_len = 0; 35086578516SGirish Nandibasappa uint64_t offloads; 351965b3127SSelwin Sebastian 352965b3127SSelwin Sebastian idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 353965b3127SSelwin Sebastian while (nb_rx < nb_pkts) { 354965b3127SSelwin Sebastian bool eop = 0; 355965b3127SSelwin Sebastian next_desc: 356965b3127SSelwin Sebastian if (unlikely(idx == rxq->nb_desc)) 357965b3127SSelwin Sebastian idx = 0; 358965b3127SSelwin Sebastian 359965b3127SSelwin Sebastian desc = &rxq->desc[idx]; 360965b3127SSelwin Sebastian 361965b3127SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 362965b3127SSelwin Sebastian break; 363965b3127SSelwin Sebastian 364965b3127SSelwin Sebastian tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 365965b3127SSelwin Sebastian if (unlikely(!tmbuf)) { 366965b3127SSelwin Sebastian PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u" 367965b3127SSelwin Sebastian " queue_id = %u\n", 368965b3127SSelwin Sebastian (unsigned int)rxq->port_id, 369965b3127SSelwin Sebastian (unsigned int)rxq->queue_id); 370965b3127SSelwin Sebastian rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; 371965b3127SSelwin Sebastian break; 372965b3127SSelwin Sebastian } 373965b3127SSelwin Sebastian 374965b3127SSelwin Sebastian pidx = idx + 1; 375965b3127SSelwin Sebastian if (unlikely(pidx == rxq->nb_desc)) 376965b3127SSelwin Sebastian pidx = 0; 377965b3127SSelwin Sebastian 378965b3127SSelwin Sebastian rte_prefetch0(rxq->sw_ring[pidx]); 379965b3127SSelwin Sebastian if ((pidx & 0x3) == 0) { 380965b3127SSelwin Sebastian rte_prefetch0(&rxq->desc[pidx]); 381965b3127SSelwin Sebastian rte_prefetch0(&rxq->sw_ring[pidx]); 382965b3127SSelwin Sebastian } 383965b3127SSelwin Sebastian 384965b3127SSelwin Sebastian mbuf = rxq->sw_ring[idx]; 385965b3127SSelwin Sebastian /* Check for any errors and free mbuf*/ 386965b3127SSelwin Sebastian err = AXGMAC_GET_BITS_LE(desc->write.desc3, 387965b3127SSelwin Sebastian RX_NORMAL_DESC3, ES); 388965b3127SSelwin Sebastian error_status = 0; 389965b3127SSelwin Sebastian if (unlikely(err)) { 390965b3127SSelwin Sebastian error_status = desc->write.desc3 & AXGBE_ERR_STATUS; 391965b3127SSelwin Sebastian if ((error_status != AXGBE_L3_CSUM_ERR) 392965b3127SSelwin Sebastian && (error_status != AXGBE_L4_CSUM_ERR)) { 393965b3127SSelwin Sebastian rxq->errors++; 394965b3127SSelwin Sebastian rte_pktmbuf_free(mbuf); 395965b3127SSelwin Sebastian goto err_set; 396965b3127SSelwin Sebastian } 397965b3127SSelwin Sebastian } 398965b3127SSelwin Sebastian rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 399965b3127SSelwin Sebastian 400965b3127SSelwin Sebastian if (!AXGMAC_GET_BITS_LE(desc->write.desc3, 401965b3127SSelwin Sebastian RX_NORMAL_DESC3, LD)) { 402965b3127SSelwin Sebastian eop = 0; 403965b3127SSelwin Sebastian pkt_len = rxq->buf_size; 404965b3127SSelwin Sebastian data_len = pkt_len; 405965b3127SSelwin Sebastian } else { 406965b3127SSelwin Sebastian eop = 1; 407965b3127SSelwin Sebastian pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, 408965b3127SSelwin Sebastian RX_NORMAL_DESC3, PL); 409965b3127SSelwin Sebastian data_len = pkt_len - rxq->crc_len; 410965b3127SSelwin Sebastian } 411965b3127SSelwin Sebastian 412965b3127SSelwin Sebastian if (first_seg != NULL) { 413965b3127SSelwin Sebastian if (rte_pktmbuf_chain(first_seg, mbuf) != 0) 414965b3127SSelwin Sebastian rte_mempool_put(rxq->mb_pool, 415965b3127SSelwin Sebastian first_seg); 416965b3127SSelwin Sebastian } else { 417965b3127SSelwin Sebastian first_seg = mbuf; 418965b3127SSelwin Sebastian } 419965b3127SSelwin Sebastian 420965b3127SSelwin Sebastian /* Get the RSS hash */ 421965b3127SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 422965b3127SSelwin Sebastian mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); 42386578516SGirish Nandibasappa etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 42486578516SGirish Nandibasappa RX_NORMAL_DESC3, ETLT); 42586578516SGirish Nandibasappa offloads = rxq->pdata->eth_dev->data->dev_conf.rxmode.offloads; 42686578516SGirish Nandibasappa if (!err || !etlt) { 42786578516SGirish Nandibasappa if (etlt == RX_CVLAN_TAG_PRESENT) { 428*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 42986578516SGirish Nandibasappa mbuf->vlan_tci = 43086578516SGirish Nandibasappa AXGMAC_GET_BITS_LE(desc->write.desc0, 43186578516SGirish Nandibasappa RX_NORMAL_DESC0, OVT); 432295968d1SFerruh Yigit if (offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 433*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED; 43486578516SGirish Nandibasappa else 435*daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED; 43686578516SGirish Nandibasappa } else { 43786578516SGirish Nandibasappa mbuf->ol_flags &= 438*daa02b5cSOlivier Matz ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED); 43986578516SGirish Nandibasappa mbuf->vlan_tci = 0; 44086578516SGirish Nandibasappa } 44186578516SGirish Nandibasappa } 442965b3127SSelwin Sebastian /* Mbuf populate */ 443965b3127SSelwin Sebastian mbuf->data_off = RTE_PKTMBUF_HEADROOM; 444965b3127SSelwin Sebastian mbuf->data_len = data_len; 445965b3127SSelwin Sebastian 446965b3127SSelwin Sebastian err_set: 447965b3127SSelwin Sebastian rxq->cur++; 448965b3127SSelwin Sebastian rxq->sw_ring[idx++] = tmbuf; 449965b3127SSelwin Sebastian desc->read.baddr = 450965b3127SSelwin Sebastian rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 451965b3127SSelwin Sebastian memset((void *)(&desc->read.desc2), 0, 8); 452965b3127SSelwin Sebastian AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 453965b3127SSelwin Sebastian rxq->dirty++; 454965b3127SSelwin Sebastian 455965b3127SSelwin Sebastian if (!eop) { 456965b3127SSelwin Sebastian rte_pktmbuf_free(mbuf); 457965b3127SSelwin Sebastian goto next_desc; 458965b3127SSelwin Sebastian } 459965b3127SSelwin Sebastian 460965b3127SSelwin Sebastian first_seg->pkt_len = pkt_len; 461965b3127SSelwin Sebastian rxq->bytes += pkt_len; 462965b3127SSelwin Sebastian mbuf->next = NULL; 463965b3127SSelwin Sebastian 464965b3127SSelwin Sebastian first_seg->port = rxq->port_id; 465965b3127SSelwin Sebastian if (rxq->pdata->rx_csum_enable) { 466965b3127SSelwin Sebastian mbuf->ol_flags = 0; 467*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 468*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 469965b3127SSelwin Sebastian if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 470*daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; 471*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 472*daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 473*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 474965b3127SSelwin Sebastian } else if (unlikely(error_status 475965b3127SSelwin Sebastian == AXGBE_L4_CSUM_ERR)) { 476*daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 477*daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 478965b3127SSelwin Sebastian } 479965b3127SSelwin Sebastian } 480965b3127SSelwin Sebastian 481965b3127SSelwin Sebastian rx_pkts[nb_rx++] = first_seg; 482965b3127SSelwin Sebastian 483965b3127SSelwin Sebastian /* Setup receipt context for a new packet.*/ 484965b3127SSelwin Sebastian first_seg = NULL; 485965b3127SSelwin Sebastian } 486965b3127SSelwin Sebastian 487965b3127SSelwin Sebastian /* Save receive context.*/ 488965b3127SSelwin Sebastian rxq->pkts += nb_rx; 489965b3127SSelwin Sebastian 490965b3127SSelwin Sebastian if (rxq->dirty != old_dirty) { 491965b3127SSelwin Sebastian rte_wmb(); 492965b3127SSelwin Sebastian idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); 493965b3127SSelwin Sebastian AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 494965b3127SSelwin Sebastian low32_value(rxq->ring_phys_addr + 495965b3127SSelwin Sebastian (idx * sizeof(union axgbe_rx_desc)))); 496965b3127SSelwin Sebastian } 497965b3127SSelwin Sebastian return nb_rx; 498965b3127SSelwin Sebastian } 499965b3127SSelwin Sebastian 5009e890103SRavi Kumar /* Tx Apis */ 5019e890103SRavi Kumar static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) 5029e890103SRavi Kumar { 5039e890103SRavi Kumar uint16_t i; 5049e890103SRavi Kumar struct rte_mbuf **sw_ring; 5059e890103SRavi Kumar 5069e890103SRavi Kumar if (tx_queue) { 5079e890103SRavi Kumar sw_ring = tx_queue->sw_ring; 5089e890103SRavi Kumar if (sw_ring) { 5099e890103SRavi Kumar for (i = 0; i < tx_queue->nb_desc; i++) { 5109e890103SRavi Kumar if (sw_ring[i]) 5119e890103SRavi Kumar rte_pktmbuf_free(sw_ring[i]); 5129e890103SRavi Kumar } 5139e890103SRavi Kumar rte_free(sw_ring); 5149e890103SRavi Kumar } 5159e890103SRavi Kumar rte_free(tx_queue); 5169e890103SRavi Kumar } 5179e890103SRavi Kumar } 5189e890103SRavi Kumar 5197483341aSXueming Li void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 5209e890103SRavi Kumar { 5217483341aSXueming Li axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); 5229e890103SRavi Kumar } 5239e890103SRavi Kumar 5249e890103SRavi Kumar int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 5259e890103SRavi Kumar uint16_t nb_desc, unsigned int socket_id, 5269e890103SRavi Kumar const struct rte_eth_txconf *tx_conf) 5279e890103SRavi Kumar { 5289e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 5299e890103SRavi Kumar uint32_t tx_desc; 5309e890103SRavi Kumar struct axgbe_port *pdata; 5319e890103SRavi Kumar struct axgbe_tx_queue *txq; 5329e890103SRavi Kumar unsigned int tsize; 5339e890103SRavi Kumar const struct rte_memzone *tz; 53486578516SGirish Nandibasappa uint64_t offloads; 5359e890103SRavi Kumar 5369e890103SRavi Kumar tx_desc = nb_desc; 5370bc212a8SStephen Hemminger pdata = dev->data->dev_private; 5389e890103SRavi Kumar 5399e890103SRavi Kumar /* 5409e890103SRavi Kumar * validate tx descriptors count 5419e890103SRavi Kumar * should be power of 2 and less than h/w supported 5429e890103SRavi Kumar */ 5439e890103SRavi Kumar if ((!rte_is_power_of_2(tx_desc)) || 5449e890103SRavi Kumar tx_desc > pdata->tx_desc_count || 5459e890103SRavi Kumar tx_desc < AXGBE_MIN_RING_DESC) 5469e890103SRavi Kumar return -EINVAL; 5479e890103SRavi Kumar 5489e890103SRavi Kumar /* First allocate the tx queue data structure */ 5499e890103SRavi Kumar txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue), 5509e890103SRavi Kumar RTE_CACHE_LINE_SIZE); 5519e890103SRavi Kumar if (!txq) 5529e890103SRavi Kumar return -ENOMEM; 5539e890103SRavi Kumar txq->pdata = pdata; 55486578516SGirish Nandibasappa offloads = tx_conf->offloads | 55586578516SGirish Nandibasappa txq->pdata->eth_dev->data->dev_conf.txmode.offloads; 5569e890103SRavi Kumar txq->nb_desc = tx_desc; 5579e890103SRavi Kumar txq->free_thresh = tx_conf->tx_free_thresh ? 5589e890103SRavi Kumar tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; 5599e890103SRavi Kumar if (txq->free_thresh > txq->nb_desc) 5609e890103SRavi Kumar txq->free_thresh = (txq->nb_desc >> 1); 5619e890103SRavi Kumar txq->free_batch_cnt = txq->free_thresh; 5629e890103SRavi Kumar 5638590b93dSRavi Kumar /* In vector_tx path threshold should be multiple of queue_size*/ 5648590b93dSRavi Kumar if (txq->nb_desc % txq->free_thresh != 0) 5658590b93dSRavi Kumar txq->vector_disable = 1; 5668590b93dSRavi Kumar 56786578516SGirish Nandibasappa if (offloads != 0) 5689e890103SRavi Kumar txq->vector_disable = 1; 5699e890103SRavi Kumar 5709e890103SRavi Kumar /* Allocate TX ring hardware descriptors */ 5719e890103SRavi Kumar tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc); 5729e890103SRavi Kumar tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, 5739e890103SRavi Kumar tsize, AXGBE_DESC_ALIGN, socket_id); 5749e890103SRavi Kumar if (!tz) { 5759e890103SRavi Kumar axgbe_tx_queue_release(txq); 5769e890103SRavi Kumar return -ENOMEM; 5779e890103SRavi Kumar } 5789e890103SRavi Kumar memset(tz->addr, 0, tsize); 579ce627d63SThomas Monjalon txq->ring_phys_addr = (uint64_t)tz->iova; 5809e890103SRavi Kumar txq->desc = tz->addr; 5819e890103SRavi Kumar txq->queue_id = queue_idx; 5829e890103SRavi Kumar txq->port_id = dev->data->port_id; 5837784d0d3SRavi Kumar txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 5847784d0d3SRavi Kumar (DMA_CH_INC * txq->queue_id)); 5857784d0d3SRavi Kumar txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs + 5869e890103SRavi Kumar DMA_CH_TDTR_LO); 5879e890103SRavi Kumar txq->cur = 0; 5889e890103SRavi Kumar txq->dirty = 0; 5899e890103SRavi Kumar txq->nb_desc_free = txq->nb_desc; 5909e890103SRavi Kumar /* Allocate software ring */ 5919e890103SRavi Kumar tsize = txq->nb_desc * sizeof(struct rte_mbuf *); 5929e890103SRavi Kumar txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, 5939e890103SRavi Kumar RTE_CACHE_LINE_SIZE); 5949e890103SRavi Kumar if (!txq->sw_ring) { 5959e890103SRavi Kumar axgbe_tx_queue_release(txq); 5969e890103SRavi Kumar return -ENOMEM; 5979e890103SRavi Kumar } 5989e890103SRavi Kumar dev->data->tx_queues[queue_idx] = txq; 5999e890103SRavi Kumar if (!pdata->tx_queues) 6009e890103SRavi Kumar pdata->tx_queues = dev->data->tx_queues; 6019e890103SRavi Kumar 6022b11056dSCiara Power if (txq->vector_disable || 6032b11056dSCiara Power rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_128) 6048590b93dSRavi Kumar dev->tx_pkt_burst = &axgbe_xmit_pkts; 6058590b93dSRavi Kumar else 6068590b93dSRavi Kumar #ifdef RTE_ARCH_X86 6078590b93dSRavi Kumar dev->tx_pkt_burst = &axgbe_xmit_pkts_vec; 6088590b93dSRavi Kumar #else 6098590b93dSRavi Kumar dev->tx_pkt_burst = &axgbe_xmit_pkts; 6108590b93dSRavi Kumar #endif 6118590b93dSRavi Kumar 6129e890103SRavi Kumar return 0; 6139e890103SRavi Kumar } 6149e890103SRavi Kumar 615ff70acdfSSelwin Sebastian int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev, 616ff70acdfSSelwin Sebastian char *fw_version, size_t fw_size) 617ff70acdfSSelwin Sebastian { 618ff70acdfSSelwin Sebastian struct axgbe_port *pdata; 619ff70acdfSSelwin Sebastian struct axgbe_hw_features *hw_feat; 620ff70acdfSSelwin Sebastian int ret; 621ff70acdfSSelwin Sebastian 622ff70acdfSSelwin Sebastian pdata = (struct axgbe_port *)eth_dev->data->dev_private; 623ff70acdfSSelwin Sebastian hw_feat = &pdata->hw_feat; 624ff70acdfSSelwin Sebastian 625ff70acdfSSelwin Sebastian ret = snprintf(fw_version, fw_size, "%d.%d.%d", 626ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), 627ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), 628ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); 629ff70acdfSSelwin Sebastian if (ret < 0) 630ff70acdfSSelwin Sebastian return -EINVAL; 631ff70acdfSSelwin Sebastian 632ff70acdfSSelwin Sebastian ret += 1; /* add the size of '\0' */ 633ff70acdfSSelwin Sebastian if (fw_size < (size_t)ret) 634ff70acdfSSelwin Sebastian return ret; 635ff70acdfSSelwin Sebastian else 636ff70acdfSSelwin Sebastian return 0; 637ff70acdfSSelwin Sebastian } 638ff70acdfSSelwin Sebastian 6398590b93dSRavi Kumar static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, 6408590b93dSRavi Kumar unsigned int queue) 6418590b93dSRavi Kumar { 6428590b93dSRavi Kumar unsigned int tx_status; 6438590b93dSRavi Kumar unsigned long tx_timeout; 6448590b93dSRavi Kumar 6458590b93dSRavi Kumar /* The Tx engine cannot be stopped if it is actively processing 6468590b93dSRavi Kumar * packets. Wait for the Tx queue to empty the Tx fifo. Don't 6478590b93dSRavi Kumar * wait forever though... 6488590b93dSRavi Kumar */ 6498590b93dSRavi Kumar tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 6508590b93dSRavi Kumar rte_get_timer_hz()); 6518590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), tx_timeout)) { 6528590b93dSRavi Kumar tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 6538590b93dSRavi Kumar if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 6548590b93dSRavi Kumar (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 6558590b93dSRavi Kumar break; 6568590b93dSRavi Kumar 6578590b93dSRavi Kumar rte_delay_us(900); 6588590b93dSRavi Kumar } 6598590b93dSRavi Kumar 6608590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), tx_timeout)) 6618590b93dSRavi Kumar PMD_DRV_LOG(ERR, 6628590b93dSRavi Kumar "timed out waiting for Tx queue %u to empty\n", 6638590b93dSRavi Kumar queue); 6648590b93dSRavi Kumar } 6658590b93dSRavi Kumar 6668590b93dSRavi Kumar static void axgbe_prepare_tx_stop(struct axgbe_port *pdata, 6678590b93dSRavi Kumar unsigned int queue) 6688590b93dSRavi Kumar { 6698590b93dSRavi Kumar unsigned int tx_dsr, tx_pos, tx_qidx; 6708590b93dSRavi Kumar unsigned int tx_status; 6718590b93dSRavi Kumar unsigned long tx_timeout; 6728590b93dSRavi Kumar 6738590b93dSRavi Kumar if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 6748590b93dSRavi Kumar return axgbe_txq_prepare_tx_stop(pdata, queue); 6758590b93dSRavi Kumar 6768590b93dSRavi Kumar /* Calculate the status register to read and the position within */ 6778590b93dSRavi Kumar if (queue < DMA_DSRX_FIRST_QUEUE) { 6788590b93dSRavi Kumar tx_dsr = DMA_DSR0; 6798590b93dSRavi Kumar tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 6808590b93dSRavi Kumar } else { 6818590b93dSRavi Kumar tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 6828590b93dSRavi Kumar 6838590b93dSRavi Kumar tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 6848590b93dSRavi Kumar tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 6858590b93dSRavi Kumar DMA_DSRX_TPS_START; 6868590b93dSRavi Kumar } 6878590b93dSRavi Kumar 6888590b93dSRavi Kumar /* The Tx engine cannot be stopped if it is actively processing 6898590b93dSRavi Kumar * descriptors. Wait for the Tx engine to enter the stopped or 6908590b93dSRavi Kumar * suspended state. Don't wait forever though... 6918590b93dSRavi Kumar */ 6928590b93dSRavi Kumar tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 6938590b93dSRavi Kumar rte_get_timer_hz()); 6948590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), tx_timeout)) { 6958590b93dSRavi Kumar tx_status = AXGMAC_IOREAD(pdata, tx_dsr); 6968590b93dSRavi Kumar tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 6978590b93dSRavi Kumar if ((tx_status == DMA_TPS_STOPPED) || 6988590b93dSRavi Kumar (tx_status == DMA_TPS_SUSPENDED)) 6998590b93dSRavi Kumar break; 7008590b93dSRavi Kumar 7018590b93dSRavi Kumar rte_delay_us(900); 7028590b93dSRavi Kumar } 7038590b93dSRavi Kumar 7048590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), tx_timeout)) 7058590b93dSRavi Kumar PMD_DRV_LOG(ERR, 7068590b93dSRavi Kumar "timed out waiting for Tx DMA channel %u to stop\n", 7078590b93dSRavi Kumar queue); 7088590b93dSRavi Kumar } 7098590b93dSRavi Kumar 7108590b93dSRavi Kumar void axgbe_dev_disable_tx(struct rte_eth_dev *dev) 7118590b93dSRavi Kumar { 7128590b93dSRavi Kumar struct axgbe_tx_queue *txq; 7138590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 7148590b93dSRavi Kumar unsigned int i; 7158590b93dSRavi Kumar 7168590b93dSRavi Kumar /* Prepare for stopping DMA channel */ 7178590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) { 7188590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7198590b93dSRavi Kumar axgbe_prepare_tx_stop(pdata, i); 7208590b93dSRavi Kumar } 7218590b93dSRavi Kumar /* Disable MAC Tx */ 7228590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 7238590b93dSRavi Kumar /* Disable each Tx queue*/ 7248590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 7258590b93dSRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 7268590b93dSRavi Kumar 0); 7278590b93dSRavi Kumar /* Disable each Tx DMA channel */ 7288590b93dSRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 7298590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7308590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); 7318590b93dSRavi Kumar } 7328590b93dSRavi Kumar } 7338590b93dSRavi Kumar 7348590b93dSRavi Kumar void axgbe_dev_enable_tx(struct rte_eth_dev *dev) 7358590b93dSRavi Kumar { 7368590b93dSRavi Kumar struct axgbe_tx_queue *txq; 7378590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 7388590b93dSRavi Kumar unsigned int i; 7398590b93dSRavi Kumar 7408590b93dSRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 7418590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7428590b93dSRavi Kumar /* Enable Tx DMA channel */ 7438590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); 7448590b93dSRavi Kumar } 7458590b93dSRavi Kumar /* Enable Tx queue*/ 7468590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 7478590b93dSRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 7488590b93dSRavi Kumar MTL_Q_ENABLED); 7498590b93dSRavi Kumar /* Enable MAC Tx */ 7508590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 7518590b93dSRavi Kumar } 7528590b93dSRavi Kumar 7538590b93dSRavi Kumar /* Free Tx conformed mbufs */ 7548590b93dSRavi Kumar static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) 7558590b93dSRavi Kumar { 7568590b93dSRavi Kumar volatile struct axgbe_tx_desc *desc; 7578590b93dSRavi Kumar uint16_t idx; 7588590b93dSRavi Kumar 7598590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); 7608590b93dSRavi Kumar while (txq->cur != txq->dirty) { 7618590b93dSRavi Kumar if (unlikely(idx == txq->nb_desc)) 7628590b93dSRavi Kumar idx = 0; 7638590b93dSRavi Kumar desc = &txq->desc[idx]; 7648590b93dSRavi Kumar /* Check for ownership */ 7658590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 7668590b93dSRavi Kumar return; 7678590b93dSRavi Kumar memset((void *)&desc->desc2, 0, 8); 7688590b93dSRavi Kumar /* Free mbuf */ 7698590b93dSRavi Kumar rte_pktmbuf_free(txq->sw_ring[idx]); 7708590b93dSRavi Kumar txq->sw_ring[idx++] = NULL; 7718590b93dSRavi Kumar txq->dirty++; 7728590b93dSRavi Kumar } 7738590b93dSRavi Kumar } 7748590b93dSRavi Kumar 7758590b93dSRavi Kumar /* Tx Descriptor formation 7768590b93dSRavi Kumar * Considering each mbuf requires one desc 7778590b93dSRavi Kumar * mbuf is linear 7788590b93dSRavi Kumar */ 7798590b93dSRavi Kumar static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, 7808590b93dSRavi Kumar struct rte_mbuf *mbuf) 7818590b93dSRavi Kumar { 7828590b93dSRavi Kumar volatile struct axgbe_tx_desc *desc; 7838590b93dSRavi Kumar uint16_t idx; 7848590b93dSRavi Kumar uint64_t mask; 7858590b93dSRavi Kumar 7868590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 7878590b93dSRavi Kumar desc = &txq->desc[idx]; 7888590b93dSRavi Kumar 7898590b93dSRavi Kumar /* Update buffer address and length */ 7908590b93dSRavi Kumar desc->baddr = rte_mbuf_data_iova(mbuf); 7918590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 7928590b93dSRavi Kumar mbuf->pkt_len); 7938590b93dSRavi Kumar /* Total msg length to transmit */ 7948590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, 7958590b93dSRavi Kumar mbuf->pkt_len); 796e0444948SSelwin Sebastian /* Timestamp enablement check */ 797*daa02b5cSOlivier Matz if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 798e0444948SSelwin Sebastian AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); 799e0444948SSelwin Sebastian rte_wmb(); 8008590b93dSRavi Kumar /* Mark it as First and Last Descriptor */ 8018590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); 8028590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); 8038590b93dSRavi Kumar /* Mark it as a NORMAL descriptor */ 8048590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 8058590b93dSRavi Kumar /* configure h/w Offload */ 806*daa02b5cSOlivier Matz mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; 807*daa02b5cSOlivier Matz if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) 8088590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 809*daa02b5cSOlivier Matz else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 8108590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); 8118590b93dSRavi Kumar rte_wmb(); 8128590b93dSRavi Kumar 813*daa02b5cSOlivier Matz if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 81486578516SGirish Nandibasappa /* Mark it as a CONTEXT descriptor */ 81586578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 81686578516SGirish Nandibasappa CTXT, 1); 81786578516SGirish Nandibasappa /* Set the VLAN tag */ 81886578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 81986578516SGirish Nandibasappa VT, mbuf->vlan_tci); 82086578516SGirish Nandibasappa /* Indicate this descriptor contains the VLAN tag */ 82186578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 82286578516SGirish Nandibasappa VLTV, 1); 82386578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 82486578516SGirish Nandibasappa TX_NORMAL_DESC2_VLAN_INSERT); 82586578516SGirish Nandibasappa } else { 82686578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); 82786578516SGirish Nandibasappa } 82886578516SGirish Nandibasappa rte_wmb(); 82986578516SGirish Nandibasappa 8308590b93dSRavi Kumar /* Set OWN bit */ 8318590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 8328590b93dSRavi Kumar rte_wmb(); 8338590b93dSRavi Kumar 83486578516SGirish Nandibasappa 8358590b93dSRavi Kumar /* Save mbuf */ 8368590b93dSRavi Kumar txq->sw_ring[idx] = mbuf; 8378590b93dSRavi Kumar /* Update current index*/ 8388590b93dSRavi Kumar txq->cur++; 8398590b93dSRavi Kumar /* Update stats */ 8408590b93dSRavi Kumar txq->bytes += mbuf->pkt_len; 8418590b93dSRavi Kumar 8428590b93dSRavi Kumar return 0; 8438590b93dSRavi Kumar } 8448590b93dSRavi Kumar 8458590b93dSRavi Kumar /* Eal supported tx wrapper*/ 8468590b93dSRavi Kumar uint16_t 8478590b93dSRavi Kumar axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 8488590b93dSRavi Kumar uint16_t nb_pkts) 8498590b93dSRavi Kumar { 8508590b93dSRavi Kumar PMD_INIT_FUNC_TRACE(); 8518590b93dSRavi Kumar 8528590b93dSRavi Kumar if (unlikely(nb_pkts == 0)) 8538590b93dSRavi Kumar return nb_pkts; 8548590b93dSRavi Kumar 8558590b93dSRavi Kumar struct axgbe_tx_queue *txq; 8568590b93dSRavi Kumar uint16_t nb_desc_free; 8578590b93dSRavi Kumar uint16_t nb_pkt_sent = 0; 8588590b93dSRavi Kumar uint16_t idx; 8598590b93dSRavi Kumar uint32_t tail_addr; 8608590b93dSRavi Kumar struct rte_mbuf *mbuf; 8618590b93dSRavi Kumar 8628590b93dSRavi Kumar txq = (struct axgbe_tx_queue *)tx_queue; 8638590b93dSRavi Kumar nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 8648590b93dSRavi Kumar 8658590b93dSRavi Kumar if (unlikely(nb_desc_free <= txq->free_thresh)) { 8668590b93dSRavi Kumar axgbe_xmit_cleanup(txq); 8678590b93dSRavi Kumar nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 8688590b93dSRavi Kumar if (unlikely(nb_desc_free == 0)) 8698590b93dSRavi Kumar return 0; 8708590b93dSRavi Kumar } 8718590b93dSRavi Kumar nb_pkts = RTE_MIN(nb_desc_free, nb_pkts); 8728590b93dSRavi Kumar while (nb_pkts--) { 8738590b93dSRavi Kumar mbuf = *tx_pkts++; 8748590b93dSRavi Kumar if (axgbe_xmit_hw(txq, mbuf)) 8758590b93dSRavi Kumar goto out; 8768590b93dSRavi Kumar nb_pkt_sent++; 8778590b93dSRavi Kumar } 8788590b93dSRavi Kumar out: 8798590b93dSRavi Kumar /* Sync read and write */ 8808590b93dSRavi Kumar rte_mb(); 8818590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 8828590b93dSRavi Kumar tail_addr = low32_value(txq->ring_phys_addr + 8838590b93dSRavi Kumar idx * sizeof(struct axgbe_tx_desc)); 8848590b93dSRavi Kumar /* Update tail reg with next immediate address to kick Tx DMA channel*/ 8858590b93dSRavi Kumar AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); 8868590b93dSRavi Kumar txq->pkts += nb_pkt_sent; 8878590b93dSRavi Kumar return nb_pkt_sent; 8888590b93dSRavi Kumar } 8898590b93dSRavi Kumar 8909e890103SRavi Kumar void axgbe_dev_clear_queues(struct rte_eth_dev *dev) 8919e890103SRavi Kumar { 8929e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 8939e890103SRavi Kumar uint8_t i; 8949e890103SRavi Kumar struct axgbe_rx_queue *rxq; 8959e890103SRavi Kumar struct axgbe_tx_queue *txq; 8969e890103SRavi Kumar 8979e890103SRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 8989e890103SRavi Kumar rxq = dev->data->rx_queues[i]; 8999e890103SRavi Kumar 9009e890103SRavi Kumar if (rxq) { 9019e890103SRavi Kumar axgbe_rx_queue_release(rxq); 9029e890103SRavi Kumar dev->data->rx_queues[i] = NULL; 9039e890103SRavi Kumar } 9049e890103SRavi Kumar } 9059e890103SRavi Kumar 9069e890103SRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 9079e890103SRavi Kumar txq = dev->data->tx_queues[i]; 9089e890103SRavi Kumar 9099e890103SRavi Kumar if (txq) { 9109e890103SRavi Kumar axgbe_tx_queue_release(txq); 9119e890103SRavi Kumar dev->data->tx_queues[i] = NULL; 9129e890103SRavi Kumar } 9139e890103SRavi Kumar } 9149e890103SRavi Kumar } 9150962b605SAmaranath Somalapuram 9160962b605SAmaranath Somalapuram int 9170962b605SAmaranath Somalapuram axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 9180962b605SAmaranath Somalapuram { 9190962b605SAmaranath Somalapuram struct axgbe_rx_queue *rxq = rx_queue; 9200962b605SAmaranath Somalapuram volatile union axgbe_rx_desc *desc; 9210962b605SAmaranath Somalapuram uint16_t idx; 9220962b605SAmaranath Somalapuram 9230962b605SAmaranath Somalapuram 9240962b605SAmaranath Somalapuram if (unlikely(offset >= rxq->nb_desc)) 9250962b605SAmaranath Somalapuram return -EINVAL; 9260962b605SAmaranath Somalapuram 9270962b605SAmaranath Somalapuram if (offset >= rxq->nb_desc - rxq->dirty) 9280962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_UNAVAIL; 9290962b605SAmaranath Somalapuram 9300962b605SAmaranath Somalapuram idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 9310962b605SAmaranath Somalapuram desc = &rxq->desc[idx + offset]; 9320962b605SAmaranath Somalapuram 9330962b605SAmaranath Somalapuram if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 9340962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_DONE; 9350962b605SAmaranath Somalapuram 9360962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_AVAIL; 9370962b605SAmaranath Somalapuram } 9380962b605SAmaranath Somalapuram 9390962b605SAmaranath Somalapuram int 9400962b605SAmaranath Somalapuram axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 9410962b605SAmaranath Somalapuram { 9420962b605SAmaranath Somalapuram struct axgbe_tx_queue *txq = tx_queue; 9430962b605SAmaranath Somalapuram volatile struct axgbe_tx_desc *desc; 9440962b605SAmaranath Somalapuram uint16_t idx; 9450962b605SAmaranath Somalapuram 9460962b605SAmaranath Somalapuram 9470962b605SAmaranath Somalapuram if (unlikely(offset >= txq->nb_desc)) 9480962b605SAmaranath Somalapuram return -EINVAL; 9490962b605SAmaranath Somalapuram 9500962b605SAmaranath Somalapuram if (offset >= txq->nb_desc - txq->dirty) 9510962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_UNAVAIL; 9520962b605SAmaranath Somalapuram 9530962b605SAmaranath Somalapuram idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1); 9540962b605SAmaranath Somalapuram desc = &txq->desc[idx + offset]; 9550962b605SAmaranath Somalapuram 9560962b605SAmaranath Somalapuram if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 9570962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_DONE; 9580962b605SAmaranath Somalapuram 9590962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_FULL; 9600962b605SAmaranath Somalapuram } 961