19e890103SRavi Kumar /* SPDX-License-Identifier: BSD-3-Clause 29e890103SRavi Kumar * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved. 39e890103SRavi Kumar * Copyright(c) 2018 Synopsys, Inc. All rights reserved. 49e890103SRavi Kumar */ 59e890103SRavi Kumar 69e890103SRavi Kumar #include "axgbe_ethdev.h" 79e890103SRavi Kumar #include "axgbe_rxtx.h" 89e890103SRavi Kumar #include "axgbe_phy.h" 99e890103SRavi Kumar 109e890103SRavi Kumar #include <rte_time.h> 119e890103SRavi Kumar #include <rte_mempool.h> 129e890103SRavi Kumar #include <rte_mbuf.h> 132b11056dSCiara Power #include <rte_vect.h> 149e890103SRavi Kumar 159e890103SRavi Kumar static void 169e890103SRavi Kumar axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue) 179e890103SRavi Kumar { 189e890103SRavi Kumar uint16_t i; 199e890103SRavi Kumar struct rte_mbuf **sw_ring; 209e890103SRavi Kumar 219e890103SRavi Kumar if (rx_queue) { 229e890103SRavi Kumar sw_ring = rx_queue->sw_ring; 239e890103SRavi Kumar if (sw_ring) { 249e890103SRavi Kumar for (i = 0; i < rx_queue->nb_desc; i++) { 259e890103SRavi Kumar rte_pktmbuf_free(sw_ring[i]); 269e890103SRavi Kumar } 279e890103SRavi Kumar rte_free(sw_ring); 289e890103SRavi Kumar } 299e890103SRavi Kumar rte_free(rx_queue); 309e890103SRavi Kumar } 319e890103SRavi Kumar } 329e890103SRavi Kumar 337483341aSXueming Li void axgbe_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 349e890103SRavi Kumar { 357483341aSXueming Li axgbe_rx_queue_release(dev->data->rx_queues[queue_idx]); 369e890103SRavi Kumar } 379e890103SRavi Kumar 389e890103SRavi Kumar int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 399e890103SRavi Kumar uint16_t nb_desc, unsigned int socket_id, 409e890103SRavi Kumar const struct rte_eth_rxconf *rx_conf, 419e890103SRavi Kumar struct rte_mempool *mp) 429e890103SRavi Kumar { 439e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 449e890103SRavi Kumar uint32_t size; 459e890103SRavi Kumar const struct rte_memzone *dma; 469e890103SRavi Kumar struct axgbe_rx_queue *rxq; 479e890103SRavi Kumar uint32_t rx_desc = nb_desc; 489e890103SRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 499e890103SRavi Kumar 509e890103SRavi Kumar /* 519e890103SRavi Kumar * validate Rx descriptors count 529e890103SRavi Kumar * should be power of 2 and less than h/w supported 539e890103SRavi Kumar */ 549e890103SRavi Kumar if ((!rte_is_power_of_2(rx_desc)) || 559e890103SRavi Kumar rx_desc > pdata->rx_desc_count) 569e890103SRavi Kumar return -EINVAL; 579e890103SRavi Kumar /* First allocate the rx queue data structure */ 589e890103SRavi Kumar rxq = rte_zmalloc_socket("ethdev RX queue", 599e890103SRavi Kumar sizeof(struct axgbe_rx_queue), 609e890103SRavi Kumar RTE_CACHE_LINE_SIZE, socket_id); 619e890103SRavi Kumar if (!rxq) { 629e890103SRavi Kumar PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!"); 639e890103SRavi Kumar return -ENOMEM; 649e890103SRavi Kumar } 659e890103SRavi Kumar 669e890103SRavi Kumar rxq->cur = 0; 679e890103SRavi Kumar rxq->dirty = 0; 689e890103SRavi Kumar rxq->pdata = pdata; 699e890103SRavi Kumar rxq->mb_pool = mp; 709e890103SRavi Kumar rxq->queue_id = queue_idx; 719e890103SRavi Kumar rxq->port_id = dev->data->port_id; 729e890103SRavi Kumar rxq->nb_desc = rx_desc; 737784d0d3SRavi Kumar rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 747784d0d3SRavi Kumar (DMA_CH_INC * rxq->queue_id)); 757784d0d3SRavi Kumar rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs + 769e890103SRavi Kumar DMA_CH_RDTR_LO); 77295968d1SFerruh Yigit if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 7835b2d13fSOlivier Matz rxq->crc_len = RTE_ETHER_CRC_LEN; 7970815c9eSFerruh Yigit else 8070815c9eSFerruh Yigit rxq->crc_len = 0; 819e890103SRavi Kumar 829e890103SRavi Kumar /* CRC strip in AXGBE supports per port not per queue */ 839e890103SRavi Kumar pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0; 849e890103SRavi Kumar rxq->free_thresh = rx_conf->rx_free_thresh ? 859e890103SRavi Kumar rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH; 869e890103SRavi Kumar if (rxq->free_thresh > rxq->nb_desc) 879e890103SRavi Kumar rxq->free_thresh = rxq->nb_desc >> 3; 889e890103SRavi Kumar 8908977da0SJesna K E rxq->offloads = rx_conf->offloads | dev->data->dev_conf.rxmode.offloads; 909e890103SRavi Kumar /* Allocate RX ring hardware descriptors */ 919e890103SRavi Kumar size = rxq->nb_desc * sizeof(union axgbe_rx_desc); 929e890103SRavi Kumar dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128, 939e890103SRavi Kumar socket_id); 949e890103SRavi Kumar if (!dma) { 95e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "ring_dma_zone_reserve for rx_ring failed"); 969e890103SRavi Kumar axgbe_rx_queue_release(rxq); 979e890103SRavi Kumar return -ENOMEM; 989e890103SRavi Kumar } 99ce627d63SThomas Monjalon rxq->ring_phys_addr = (uint64_t)dma->iova; 1009e890103SRavi Kumar rxq->desc = (volatile union axgbe_rx_desc *)dma->addr; 1019e890103SRavi Kumar memset((void *)rxq->desc, 0, size); 1029e890103SRavi Kumar /* Allocate software ring */ 1039e890103SRavi Kumar size = rxq->nb_desc * sizeof(struct rte_mbuf *); 1049e890103SRavi Kumar rxq->sw_ring = rte_zmalloc_socket("sw_ring", size, 1059e890103SRavi Kumar RTE_CACHE_LINE_SIZE, 1069e890103SRavi Kumar socket_id); 1079e890103SRavi Kumar if (!rxq->sw_ring) { 108e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "rte_zmalloc for sw_ring failed"); 1099e890103SRavi Kumar axgbe_rx_queue_release(rxq); 1109e890103SRavi Kumar return -ENOMEM; 1119e890103SRavi Kumar } 1129e890103SRavi Kumar dev->data->rx_queues[queue_idx] = rxq; 1139e890103SRavi Kumar if (!pdata->rx_queues) 1149e890103SRavi Kumar pdata->rx_queues = dev->data->rx_queues; 1159e890103SRavi Kumar 1169e890103SRavi Kumar return 0; 1179e890103SRavi Kumar } 1189e890103SRavi Kumar 1198590b93dSRavi Kumar static void axgbe_prepare_rx_stop(struct axgbe_port *pdata, 1208590b93dSRavi Kumar unsigned int queue) 1218590b93dSRavi Kumar { 1228590b93dSRavi Kumar unsigned int rx_status; 1238590b93dSRavi Kumar unsigned long rx_timeout; 1248590b93dSRavi Kumar 1258590b93dSRavi Kumar /* The Rx engine cannot be stopped if it is actively processing 1268590b93dSRavi Kumar * packets. Wait for the Rx queue to empty the Rx fifo. Don't 1278590b93dSRavi Kumar * wait forever though... 1288590b93dSRavi Kumar */ 1298590b93dSRavi Kumar rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 1308590b93dSRavi Kumar rte_get_timer_hz()); 1318590b93dSRavi Kumar 1328590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), rx_timeout)) { 1338590b93dSRavi Kumar rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR); 1348590b93dSRavi Kumar if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) && 1358590b93dSRavi Kumar (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0)) 1368590b93dSRavi Kumar break; 1378590b93dSRavi Kumar 1388590b93dSRavi Kumar rte_delay_us(900); 1398590b93dSRavi Kumar } 1408590b93dSRavi Kumar 1418590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), rx_timeout)) 142e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, 143e99981afSDavid Marchand "timed out waiting for Rx queue %u to empty", 1448590b93dSRavi Kumar queue); 1458590b93dSRavi Kumar } 1468590b93dSRavi Kumar 1478590b93dSRavi Kumar void axgbe_dev_disable_rx(struct rte_eth_dev *dev) 1488590b93dSRavi Kumar { 1498590b93dSRavi Kumar struct axgbe_rx_queue *rxq; 1508590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1518590b93dSRavi Kumar unsigned int i; 1528590b93dSRavi Kumar 1538590b93dSRavi Kumar /* Disable MAC Rx */ 1548590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0); 1558590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0); 1568590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0); 1578590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0); 1588590b93dSRavi Kumar 1598590b93dSRavi Kumar /* Prepare for Rx DMA channel stop */ 1608590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1618590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1628590b93dSRavi Kumar axgbe_prepare_rx_stop(pdata, i); 1638590b93dSRavi Kumar } 1648590b93dSRavi Kumar /* Disable each Rx queue */ 1658590b93dSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0); 1668590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1678590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1688590b93dSRavi Kumar /* Disable Rx DMA channel */ 1698590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0); 1708590b93dSRavi Kumar } 1718590b93dSRavi Kumar } 1728590b93dSRavi Kumar 1738590b93dSRavi Kumar void axgbe_dev_enable_rx(struct rte_eth_dev *dev) 1748590b93dSRavi Kumar { 1758590b93dSRavi Kumar struct axgbe_rx_queue *rxq; 1768590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 1778590b93dSRavi Kumar unsigned int i; 1788590b93dSRavi Kumar unsigned int reg_val = 0; 1798590b93dSRavi Kumar 1808590b93dSRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 1818590b93dSRavi Kumar rxq = dev->data->rx_queues[i]; 1828590b93dSRavi Kumar /* Enable Rx DMA channel */ 1838590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1); 1848590b93dSRavi Kumar } 1858590b93dSRavi Kumar 1868590b93dSRavi Kumar reg_val = 0; 1878590b93dSRavi Kumar for (i = 0; i < pdata->rx_q_count; i++) 1888590b93dSRavi Kumar reg_val |= (0x02 << (i << 1)); 1898590b93dSRavi Kumar AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val); 1908590b93dSRavi Kumar 1918590b93dSRavi Kumar /* Enable MAC Rx */ 1928590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1); 1938590b93dSRavi Kumar /* Frame is forwarded after stripping CRC to application*/ 1948590b93dSRavi Kumar if (pdata->crc_strip_enable) { 1958590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1); 1968590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1); 1978590b93dSRavi Kumar } 1988590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1); 1998590b93dSRavi Kumar } 2008590b93dSRavi Kumar 2018590b93dSRavi Kumar /* Rx function one to one refresh */ 2028590b93dSRavi Kumar uint16_t 2038590b93dSRavi Kumar axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 2048590b93dSRavi Kumar uint16_t nb_pkts) 2058590b93dSRavi Kumar { 2068590b93dSRavi Kumar PMD_INIT_FUNC_TRACE(); 2078590b93dSRavi Kumar uint16_t nb_rx = 0; 2088590b93dSRavi Kumar struct axgbe_rx_queue *rxq = rx_queue; 2098590b93dSRavi Kumar volatile union axgbe_rx_desc *desc; 2108590b93dSRavi Kumar uint64_t old_dirty = rxq->dirty; 2118590b93dSRavi Kumar struct rte_mbuf *mbuf, *tmbuf; 21286578516SGirish Nandibasappa unsigned int err, etlt; 2138590b93dSRavi Kumar uint32_t error_status; 2148590b93dSRavi Kumar uint16_t idx, pidx, pkt_len; 2158590b93dSRavi Kumar 2168590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 2178590b93dSRavi Kumar while (nb_rx < nb_pkts) { 2188590b93dSRavi Kumar if (unlikely(idx == rxq->nb_desc)) 2198590b93dSRavi Kumar idx = 0; 2208590b93dSRavi Kumar 2218590b93dSRavi Kumar desc = &rxq->desc[idx]; 2228590b93dSRavi Kumar 2238590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 2248590b93dSRavi Kumar break; 2258590b93dSRavi Kumar tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 2268590b93dSRavi Kumar if (unlikely(!tmbuf)) { 227e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed port_id = %u" 228e99981afSDavid Marchand " queue_id = %u", 2298590b93dSRavi Kumar (unsigned int)rxq->port_id, 2308590b93dSRavi Kumar (unsigned int)rxq->queue_id); 2318590b93dSRavi Kumar rte_eth_devices[ 2328590b93dSRavi Kumar rxq->port_id].data->rx_mbuf_alloc_failed++; 2339d1ef6b2SChandu Babu N rxq->rx_mbuf_alloc_failed++; 2348590b93dSRavi Kumar break; 2358590b93dSRavi Kumar } 2368590b93dSRavi Kumar pidx = idx + 1; 2378590b93dSRavi Kumar if (unlikely(pidx == rxq->nb_desc)) 2388590b93dSRavi Kumar pidx = 0; 2398590b93dSRavi Kumar 2408590b93dSRavi Kumar rte_prefetch0(rxq->sw_ring[pidx]); 2418590b93dSRavi Kumar if ((pidx & 0x3) == 0) { 2428590b93dSRavi Kumar rte_prefetch0(&rxq->desc[pidx]); 2438590b93dSRavi Kumar rte_prefetch0(&rxq->sw_ring[pidx]); 2448590b93dSRavi Kumar } 2458590b93dSRavi Kumar 2468590b93dSRavi Kumar mbuf = rxq->sw_ring[idx]; 2478590b93dSRavi Kumar /* Check for any errors and free mbuf*/ 2488590b93dSRavi Kumar err = AXGMAC_GET_BITS_LE(desc->write.desc3, 2498590b93dSRavi Kumar RX_NORMAL_DESC3, ES); 2508590b93dSRavi Kumar error_status = 0; 2518590b93dSRavi Kumar if (unlikely(err)) { 2528590b93dSRavi Kumar error_status = desc->write.desc3 & AXGBE_ERR_STATUS; 2538590b93dSRavi Kumar if ((error_status != AXGBE_L3_CSUM_ERR) && 2548590b93dSRavi Kumar (error_status != AXGBE_L4_CSUM_ERR)) { 2558590b93dSRavi Kumar rxq->errors++; 2568590b93dSRavi Kumar rte_pktmbuf_free(mbuf); 2578590b93dSRavi Kumar goto err_set; 2588590b93dSRavi Kumar } 2598590b93dSRavi Kumar } 2608590b93dSRavi Kumar if (rxq->pdata->rx_csum_enable) { 2618590b93dSRavi Kumar mbuf->ol_flags = 0; 262daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 263daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 2648590b93dSRavi Kumar if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 265daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; 266daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 267daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 268daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 2698590b93dSRavi Kumar } else if ( 2708590b93dSRavi Kumar unlikely(error_status == AXGBE_L4_CSUM_ERR)) { 271daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 272daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 2738590b93dSRavi Kumar } 2748590b93dSRavi Kumar } 2758590b93dSRavi Kumar rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 2768590b93dSRavi Kumar /* Get the RSS hash */ 2778590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 2788590b93dSRavi Kumar mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1); 27986578516SGirish Nandibasappa etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 28086578516SGirish Nandibasappa RX_NORMAL_DESC3, ETLT); 28186578516SGirish Nandibasappa if (!err || !etlt) { 28286578516SGirish Nandibasappa if (etlt == RX_CVLAN_TAG_PRESENT) { 283daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 28486578516SGirish Nandibasappa mbuf->vlan_tci = 28586578516SGirish Nandibasappa AXGMAC_GET_BITS_LE(desc->write.desc0, 28686578516SGirish Nandibasappa RX_NORMAL_DESC0, OVT); 28708977da0SJesna K E if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 288daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN_STRIPPED; 28986578516SGirish Nandibasappa else 290daa02b5cSOlivier Matz mbuf->ol_flags &= ~RTE_MBUF_F_RX_VLAN_STRIPPED; 29186578516SGirish Nandibasappa } else { 29286578516SGirish Nandibasappa mbuf->ol_flags &= 293daa02b5cSOlivier Matz ~(RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED); 29486578516SGirish Nandibasappa mbuf->vlan_tci = 0; 29586578516SGirish Nandibasappa } 29686578516SGirish Nandibasappa } 297e0444948SSelwin Sebastian /* Indicate if a Context Descriptor is next */ 298e0444948SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, CDA)) 299daa02b5cSOlivier Matz mbuf->ol_flags |= RTE_MBUF_F_RX_IEEE1588_PTP 300daa02b5cSOlivier Matz | RTE_MBUF_F_RX_IEEE1588_TMST; 3018590b93dSRavi Kumar pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, 3028590b93dSRavi Kumar PL) - rxq->crc_len; 3038590b93dSRavi Kumar /* Mbuf populate */ 3048590b93dSRavi Kumar mbuf->next = NULL; 3058590b93dSRavi Kumar mbuf->data_off = RTE_PKTMBUF_HEADROOM; 3068590b93dSRavi Kumar mbuf->nb_segs = 1; 3078590b93dSRavi Kumar mbuf->port = rxq->port_id; 3088590b93dSRavi Kumar mbuf->pkt_len = pkt_len; 3098590b93dSRavi Kumar mbuf->data_len = pkt_len; 3108590b93dSRavi Kumar rxq->bytes += pkt_len; 3118590b93dSRavi Kumar rx_pkts[nb_rx++] = mbuf; 3128590b93dSRavi Kumar err_set: 3138590b93dSRavi Kumar rxq->cur++; 3148590b93dSRavi Kumar rxq->sw_ring[idx++] = tmbuf; 3158590b93dSRavi Kumar desc->read.baddr = 3168590b93dSRavi Kumar rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 3178590b93dSRavi Kumar memset((void *)(&desc->read.desc2), 0, 8); 3188590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 3198590b93dSRavi Kumar rxq->dirty++; 3208590b93dSRavi Kumar } 3218590b93dSRavi Kumar rxq->pkts += nb_rx; 3228590b93dSRavi Kumar if (rxq->dirty != old_dirty) { 3238590b93dSRavi Kumar rte_wmb(); 3248590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1); 3258590b93dSRavi Kumar AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 3268590b93dSRavi Kumar low32_value(rxq->ring_phys_addr + 3278590b93dSRavi Kumar (idx * sizeof(union axgbe_rx_desc)))); 3288590b93dSRavi Kumar } 3298590b93dSRavi Kumar 3308590b93dSRavi Kumar return nb_rx; 3318590b93dSRavi Kumar } 3328590b93dSRavi Kumar 333965b3127SSelwin Sebastian 334965b3127SSelwin Sebastian uint16_t eth_axgbe_recv_scattered_pkts(void *rx_queue, 335965b3127SSelwin Sebastian struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 336965b3127SSelwin Sebastian { 337965b3127SSelwin Sebastian PMD_INIT_FUNC_TRACE(); 338965b3127SSelwin Sebastian uint16_t nb_rx = 0; 339965b3127SSelwin Sebastian struct axgbe_rx_queue *rxq = rx_queue; 340965b3127SSelwin Sebastian volatile union axgbe_rx_desc *desc; 341965b3127SSelwin Sebastian 342965b3127SSelwin Sebastian struct rte_mbuf *first_seg = NULL; 343965b3127SSelwin Sebastian struct rte_mbuf *mbuf, *tmbuf; 3440fda97d7SBhagyada Modali unsigned int err = 0, etlt; 3450fda97d7SBhagyada Modali uint32_t error_status = 0; 346965b3127SSelwin Sebastian uint16_t idx, pidx, data_len = 0, pkt_len = 0; 3472a761aecSBhagyada Modali bool eop = 0; 348965b3127SSelwin Sebastian 349965b3127SSelwin Sebastian idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 3502a761aecSBhagyada Modali 351965b3127SSelwin Sebastian while (nb_rx < nb_pkts) { 352965b3127SSelwin Sebastian next_desc: 35327701638SBhagyada Modali idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 354965b3127SSelwin Sebastian 355965b3127SSelwin Sebastian desc = &rxq->desc[idx]; 356965b3127SSelwin Sebastian 357965b3127SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 358965b3127SSelwin Sebastian break; 359965b3127SSelwin Sebastian 360965b3127SSelwin Sebastian tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 361965b3127SSelwin Sebastian if (unlikely(!tmbuf)) { 362e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, "RX mbuf alloc failed port_id = %u" 363e99981afSDavid Marchand " queue_id = %u", 364965b3127SSelwin Sebastian (unsigned int)rxq->port_id, 365965b3127SSelwin Sebastian (unsigned int)rxq->queue_id); 366965b3127SSelwin Sebastian rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed++; 367965b3127SSelwin Sebastian break; 368965b3127SSelwin Sebastian } 369965b3127SSelwin Sebastian 370965b3127SSelwin Sebastian pidx = idx + 1; 371965b3127SSelwin Sebastian if (unlikely(pidx == rxq->nb_desc)) 372965b3127SSelwin Sebastian pidx = 0; 373965b3127SSelwin Sebastian 374965b3127SSelwin Sebastian rte_prefetch0(rxq->sw_ring[pidx]); 375965b3127SSelwin Sebastian if ((pidx & 0x3) == 0) { 376965b3127SSelwin Sebastian rte_prefetch0(&rxq->desc[pidx]); 377965b3127SSelwin Sebastian rte_prefetch0(&rxq->sw_ring[pidx]); 378965b3127SSelwin Sebastian } 379965b3127SSelwin Sebastian 380965b3127SSelwin Sebastian mbuf = rxq->sw_ring[idx]; 381965b3127SSelwin Sebastian rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *)); 382965b3127SSelwin Sebastian 383965b3127SSelwin Sebastian if (!AXGMAC_GET_BITS_LE(desc->write.desc3, 384965b3127SSelwin Sebastian RX_NORMAL_DESC3, LD)) { 385965b3127SSelwin Sebastian eop = 0; 386965b3127SSelwin Sebastian pkt_len = rxq->buf_size; 387965b3127SSelwin Sebastian data_len = pkt_len; 388965b3127SSelwin Sebastian } else { 389965b3127SSelwin Sebastian eop = 1; 390965b3127SSelwin Sebastian pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, 391e7384535SBhagyada Modali RX_NORMAL_DESC3, PL) - rxq->crc_len; 392e7384535SBhagyada Modali data_len = pkt_len % rxq->buf_size; 3930fda97d7SBhagyada Modali /* Check for any errors and free mbuf*/ 3940fda97d7SBhagyada Modali err = AXGMAC_GET_BITS_LE(desc->write.desc3, 3950fda97d7SBhagyada Modali RX_NORMAL_DESC3, ES); 3960fda97d7SBhagyada Modali error_status = 0; 3970fda97d7SBhagyada Modali if (unlikely(err)) { 3980fda97d7SBhagyada Modali error_status = desc->write.desc3 & 3990fda97d7SBhagyada Modali AXGBE_ERR_STATUS; 4000fda97d7SBhagyada Modali if (error_status != AXGBE_L3_CSUM_ERR && 4010fda97d7SBhagyada Modali error_status != AXGBE_L4_CSUM_ERR) { 4020fda97d7SBhagyada Modali rxq->errors++; 4030fda97d7SBhagyada Modali rte_pktmbuf_free(mbuf); 4040fda97d7SBhagyada Modali rte_pktmbuf_free(first_seg); 4050fda97d7SBhagyada Modali first_seg = NULL; 4060fda97d7SBhagyada Modali eop = 0; 4070fda97d7SBhagyada Modali goto err_set; 4080fda97d7SBhagyada Modali } 4090fda97d7SBhagyada Modali } 4100fda97d7SBhagyada Modali 411965b3127SSelwin Sebastian } 412d901cc05SBhagyada Modali /* Mbuf populate */ 413d901cc05SBhagyada Modali mbuf->data_off = RTE_PKTMBUF_HEADROOM; 414d901cc05SBhagyada Modali mbuf->data_len = data_len; 415d901cc05SBhagyada Modali mbuf->pkt_len = data_len; 416965b3127SSelwin Sebastian 41791907ec2SBhagyada Modali if (rxq->saved_mbuf) { 41891907ec2SBhagyada Modali first_seg = rxq->saved_mbuf; 41991907ec2SBhagyada Modali rxq->saved_mbuf = NULL; 42091907ec2SBhagyada Modali } 42191907ec2SBhagyada Modali 422965b3127SSelwin Sebastian if (first_seg != NULL) { 4232a761aecSBhagyada Modali if (rte_pktmbuf_chain(first_seg, mbuf) != 0) { 42430ff4d00SBhagyada Modali rte_pktmbuf_free(first_seg); 42530ff4d00SBhagyada Modali first_seg = NULL; 42630ff4d00SBhagyada Modali rte_pktmbuf_free(mbuf); 42791907ec2SBhagyada Modali rxq->saved_mbuf = NULL; 42830ff4d00SBhagyada Modali rxq->errors++; 4292a761aecSBhagyada Modali eop = 0; 4302a761aecSBhagyada Modali break; 4312a761aecSBhagyada Modali } 432965b3127SSelwin Sebastian } else { 433965b3127SSelwin Sebastian first_seg = mbuf; 434965b3127SSelwin Sebastian } 435965b3127SSelwin Sebastian 436965b3127SSelwin Sebastian /* Get the RSS hash */ 437965b3127SSelwin Sebastian if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV)) 438970d2e22SBhagyada Modali first_seg->hash.rss = 439970d2e22SBhagyada Modali rte_le_to_cpu_32(desc->write.desc1); 44086578516SGirish Nandibasappa etlt = AXGMAC_GET_BITS_LE(desc->write.desc3, 44186578516SGirish Nandibasappa RX_NORMAL_DESC3, ETLT); 44286578516SGirish Nandibasappa if (!err || !etlt) { 44386578516SGirish Nandibasappa if (etlt == RX_CVLAN_TAG_PRESENT) { 444970d2e22SBhagyada Modali first_seg->ol_flags |= RTE_MBUF_F_RX_VLAN; 445970d2e22SBhagyada Modali first_seg->vlan_tci = 44686578516SGirish Nandibasappa AXGMAC_GET_BITS_LE(desc->write.desc0, 44786578516SGirish Nandibasappa RX_NORMAL_DESC0, OVT); 44808977da0SJesna K E if (rxq->offloads & RTE_ETH_RX_OFFLOAD_VLAN_STRIP) 449970d2e22SBhagyada Modali first_seg->ol_flags |= 450970d2e22SBhagyada Modali RTE_MBUF_F_RX_VLAN_STRIPPED; 45186578516SGirish Nandibasappa else 452970d2e22SBhagyada Modali first_seg->ol_flags &= 453970d2e22SBhagyada Modali ~RTE_MBUF_F_RX_VLAN_STRIPPED; 45486578516SGirish Nandibasappa } else { 455970d2e22SBhagyada Modali first_seg->ol_flags &= 45627701638SBhagyada Modali ~(RTE_MBUF_F_RX_VLAN | 45727701638SBhagyada Modali RTE_MBUF_F_RX_VLAN_STRIPPED); 458970d2e22SBhagyada Modali first_seg->vlan_tci = 0; 45986578516SGirish Nandibasappa } 46086578516SGirish Nandibasappa } 461965b3127SSelwin Sebastian 462965b3127SSelwin Sebastian err_set: 463965b3127SSelwin Sebastian rxq->cur++; 46427701638SBhagyada Modali rxq->sw_ring[idx] = tmbuf; 465965b3127SSelwin Sebastian desc->read.baddr = 466965b3127SSelwin Sebastian rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf)); 467965b3127SSelwin Sebastian memset((void *)(&desc->read.desc2), 0, 8); 468965b3127SSelwin Sebastian AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1); 469965b3127SSelwin Sebastian 470e867021dSBhagyada Modali if (!eop) 471965b3127SSelwin Sebastian goto next_desc; 4722a761aecSBhagyada Modali eop = 0; 473965b3127SSelwin Sebastian 474965b3127SSelwin Sebastian rxq->bytes += pkt_len; 475965b3127SSelwin Sebastian 476965b3127SSelwin Sebastian first_seg->port = rxq->port_id; 477965b3127SSelwin Sebastian if (rxq->pdata->rx_csum_enable) { 478970d2e22SBhagyada Modali first_seg->ol_flags = 0; 479970d2e22SBhagyada Modali first_seg->ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 480970d2e22SBhagyada Modali first_seg->ol_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 481965b3127SSelwin Sebastian if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) { 482970d2e22SBhagyada Modali first_seg->ol_flags &= 483970d2e22SBhagyada Modali ~RTE_MBUF_F_RX_IP_CKSUM_GOOD; 484970d2e22SBhagyada Modali first_seg->ol_flags |= 485970d2e22SBhagyada Modali RTE_MBUF_F_RX_IP_CKSUM_BAD; 486970d2e22SBhagyada Modali first_seg->ol_flags &= 487970d2e22SBhagyada Modali ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 488970d2e22SBhagyada Modali first_seg->ol_flags |= 489970d2e22SBhagyada Modali RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 490965b3127SSelwin Sebastian } else if (unlikely(error_status 491965b3127SSelwin Sebastian == AXGBE_L4_CSUM_ERR)) { 492970d2e22SBhagyada Modali first_seg->ol_flags &= 493970d2e22SBhagyada Modali ~RTE_MBUF_F_RX_L4_CKSUM_GOOD; 494970d2e22SBhagyada Modali first_seg->ol_flags |= 495970d2e22SBhagyada Modali RTE_MBUF_F_RX_L4_CKSUM_BAD; 496965b3127SSelwin Sebastian } 497965b3127SSelwin Sebastian } 498965b3127SSelwin Sebastian 499965b3127SSelwin Sebastian rx_pkts[nb_rx++] = first_seg; 500965b3127SSelwin Sebastian 501965b3127SSelwin Sebastian /* Setup receipt context for a new packet.*/ 502965b3127SSelwin Sebastian first_seg = NULL; 503965b3127SSelwin Sebastian } 504965b3127SSelwin Sebastian 50591907ec2SBhagyada Modali /* Check if we need to save state before leaving */ 50691907ec2SBhagyada Modali if (first_seg != NULL && eop == 0) 50791907ec2SBhagyada Modali rxq->saved_mbuf = first_seg; 50891907ec2SBhagyada Modali 509965b3127SSelwin Sebastian /* Save receive context.*/ 510965b3127SSelwin Sebastian rxq->pkts += nb_rx; 511965b3127SSelwin Sebastian 51227701638SBhagyada Modali if (rxq->dirty != rxq->cur) { 513965b3127SSelwin Sebastian rte_wmb(); 51427701638SBhagyada Modali idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur - 1); 515965b3127SSelwin Sebastian AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO, 516965b3127SSelwin Sebastian low32_value(rxq->ring_phys_addr + 517965b3127SSelwin Sebastian (idx * sizeof(union axgbe_rx_desc)))); 51827701638SBhagyada Modali rxq->dirty = rxq->cur; 519965b3127SSelwin Sebastian } 520965b3127SSelwin Sebastian return nb_rx; 521965b3127SSelwin Sebastian } 522965b3127SSelwin Sebastian 5239e890103SRavi Kumar /* Tx Apis */ 5249e890103SRavi Kumar static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue) 5259e890103SRavi Kumar { 5269e890103SRavi Kumar uint16_t i; 5279e890103SRavi Kumar struct rte_mbuf **sw_ring; 5289e890103SRavi Kumar 5299e890103SRavi Kumar if (tx_queue) { 5309e890103SRavi Kumar sw_ring = tx_queue->sw_ring; 5319e890103SRavi Kumar if (sw_ring) { 5329e890103SRavi Kumar for (i = 0; i < tx_queue->nb_desc; i++) { 5339e890103SRavi Kumar rte_pktmbuf_free(sw_ring[i]); 5349e890103SRavi Kumar } 5359e890103SRavi Kumar rte_free(sw_ring); 5369e890103SRavi Kumar } 5379e890103SRavi Kumar rte_free(tx_queue); 5389e890103SRavi Kumar } 5399e890103SRavi Kumar } 5409e890103SRavi Kumar 5417483341aSXueming Li void axgbe_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t queue_idx) 5429e890103SRavi Kumar { 5437483341aSXueming Li axgbe_tx_queue_release(dev->data->tx_queues[queue_idx]); 5449e890103SRavi Kumar } 5459e890103SRavi Kumar 5469e890103SRavi Kumar int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, 5479e890103SRavi Kumar uint16_t nb_desc, unsigned int socket_id, 5489e890103SRavi Kumar const struct rte_eth_txconf *tx_conf) 5499e890103SRavi Kumar { 5509e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 5519e890103SRavi Kumar uint32_t tx_desc; 5529e890103SRavi Kumar struct axgbe_port *pdata; 5539e890103SRavi Kumar struct axgbe_tx_queue *txq; 5549e890103SRavi Kumar unsigned int tsize; 5559e890103SRavi Kumar const struct rte_memzone *tz; 55686578516SGirish Nandibasappa uint64_t offloads; 55793bffd8fSBhagyada Modali struct rte_eth_dev_data *dev_data = dev->data; 5589e890103SRavi Kumar 5599e890103SRavi Kumar tx_desc = nb_desc; 5600bc212a8SStephen Hemminger pdata = dev->data->dev_private; 5619e890103SRavi Kumar 5629e890103SRavi Kumar /* 5639e890103SRavi Kumar * validate tx descriptors count 5649e890103SRavi Kumar * should be power of 2 and less than h/w supported 5659e890103SRavi Kumar */ 5669e890103SRavi Kumar if ((!rte_is_power_of_2(tx_desc)) || 5679e890103SRavi Kumar tx_desc > pdata->tx_desc_count || 5689e890103SRavi Kumar tx_desc < AXGBE_MIN_RING_DESC) 5699e890103SRavi Kumar return -EINVAL; 5709e890103SRavi Kumar 5719e890103SRavi Kumar /* First allocate the tx queue data structure */ 5729e890103SRavi Kumar txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue), 5739e890103SRavi Kumar RTE_CACHE_LINE_SIZE); 5749e890103SRavi Kumar if (!txq) 5759e890103SRavi Kumar return -ENOMEM; 5769e890103SRavi Kumar txq->pdata = pdata; 57786578516SGirish Nandibasappa offloads = tx_conf->offloads | 57808977da0SJesna K E dev->data->dev_conf.txmode.offloads; 5799e890103SRavi Kumar txq->nb_desc = tx_desc; 5809e890103SRavi Kumar txq->free_thresh = tx_conf->tx_free_thresh ? 5819e890103SRavi Kumar tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH; 5829e890103SRavi Kumar if (txq->free_thresh > txq->nb_desc) 5839e890103SRavi Kumar txq->free_thresh = (txq->nb_desc >> 1); 5849e890103SRavi Kumar txq->free_batch_cnt = txq->free_thresh; 5859e890103SRavi Kumar 5868590b93dSRavi Kumar /* In vector_tx path threshold should be multiple of queue_size*/ 5878590b93dSRavi Kumar if (txq->nb_desc % txq->free_thresh != 0) 5888590b93dSRavi Kumar txq->vector_disable = 1; 5898590b93dSRavi Kumar 59086578516SGirish Nandibasappa if (offloads != 0) 5919e890103SRavi Kumar txq->vector_disable = 1; 5929e890103SRavi Kumar 5939e890103SRavi Kumar /* Allocate TX ring hardware descriptors */ 5949e890103SRavi Kumar tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc); 5959e890103SRavi Kumar tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx, 5969e890103SRavi Kumar tsize, AXGBE_DESC_ALIGN, socket_id); 5979e890103SRavi Kumar if (!tz) { 5989e890103SRavi Kumar axgbe_tx_queue_release(txq); 5999e890103SRavi Kumar return -ENOMEM; 6009e890103SRavi Kumar } 6019e890103SRavi Kumar memset(tz->addr, 0, tsize); 602ce627d63SThomas Monjalon txq->ring_phys_addr = (uint64_t)tz->iova; 6039e890103SRavi Kumar txq->desc = tz->addr; 6049e890103SRavi Kumar txq->queue_id = queue_idx; 6059e890103SRavi Kumar txq->port_id = dev->data->port_id; 60608977da0SJesna K E txq->offloads = offloads; 6077784d0d3SRavi Kumar txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE + 6087784d0d3SRavi Kumar (DMA_CH_INC * txq->queue_id)); 6097784d0d3SRavi Kumar txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs + 6109e890103SRavi Kumar DMA_CH_TDTR_LO); 6119e890103SRavi Kumar txq->cur = 0; 6129e890103SRavi Kumar txq->dirty = 0; 6139e890103SRavi Kumar txq->nb_desc_free = txq->nb_desc; 6149e890103SRavi Kumar /* Allocate software ring */ 6159e890103SRavi Kumar tsize = txq->nb_desc * sizeof(struct rte_mbuf *); 6169e890103SRavi Kumar txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize, 6179e890103SRavi Kumar RTE_CACHE_LINE_SIZE); 6189e890103SRavi Kumar if (!txq->sw_ring) { 6199e890103SRavi Kumar axgbe_tx_queue_release(txq); 6209e890103SRavi Kumar return -ENOMEM; 6219e890103SRavi Kumar } 6229e890103SRavi Kumar dev->data->tx_queues[queue_idx] = txq; 6239e890103SRavi Kumar if (!pdata->tx_queues) 6249e890103SRavi Kumar pdata->tx_queues = dev->data->tx_queues; 6259e890103SRavi Kumar 62693bffd8fSBhagyada Modali if ((dev_data->dev_conf.txmode.offloads & 62793bffd8fSBhagyada Modali RTE_ETH_TX_OFFLOAD_MULTI_SEGS)) 62893bffd8fSBhagyada Modali pdata->multi_segs_tx = true; 62993bffd8fSBhagyada Modali 630*186f8e8cSJesna K E if ((dev_data->dev_conf.txmode.offloads & 631*186f8e8cSJesna K E RTE_ETH_TX_OFFLOAD_TCP_TSO)) 632*186f8e8cSJesna K E pdata->tso_tx = true; 6338590b93dSRavi Kumar 6349e890103SRavi Kumar return 0; 6359e890103SRavi Kumar } 6369e890103SRavi Kumar 637ff70acdfSSelwin Sebastian int axgbe_dev_fw_version_get(struct rte_eth_dev *eth_dev, 638ff70acdfSSelwin Sebastian char *fw_version, size_t fw_size) 639ff70acdfSSelwin Sebastian { 640ff70acdfSSelwin Sebastian struct axgbe_port *pdata; 641ff70acdfSSelwin Sebastian struct axgbe_hw_features *hw_feat; 642ff70acdfSSelwin Sebastian int ret; 643ff70acdfSSelwin Sebastian 644ff70acdfSSelwin Sebastian pdata = (struct axgbe_port *)eth_dev->data->dev_private; 645ff70acdfSSelwin Sebastian hw_feat = &pdata->hw_feat; 646ff70acdfSSelwin Sebastian 647ff70acdfSSelwin Sebastian ret = snprintf(fw_version, fw_size, "%d.%d.%d", 648ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, USERVER), 649ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, DEVID), 650ff70acdfSSelwin Sebastian AXGMAC_GET_BITS(hw_feat->version, MAC_VR, SNPSVER)); 651ff70acdfSSelwin Sebastian if (ret < 0) 652ff70acdfSSelwin Sebastian return -EINVAL; 653ff70acdfSSelwin Sebastian 654ff70acdfSSelwin Sebastian ret += 1; /* add the size of '\0' */ 655ff70acdfSSelwin Sebastian if (fw_size < (size_t)ret) 656ff70acdfSSelwin Sebastian return ret; 657ff70acdfSSelwin Sebastian else 658ff70acdfSSelwin Sebastian return 0; 659ff70acdfSSelwin Sebastian } 660ff70acdfSSelwin Sebastian 6618590b93dSRavi Kumar static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata, 6628590b93dSRavi Kumar unsigned int queue) 6638590b93dSRavi Kumar { 6648590b93dSRavi Kumar unsigned int tx_status; 6658590b93dSRavi Kumar unsigned long tx_timeout; 6668590b93dSRavi Kumar 6678590b93dSRavi Kumar /* The Tx engine cannot be stopped if it is actively processing 6688590b93dSRavi Kumar * packets. Wait for the Tx queue to empty the Tx fifo. Don't 6698590b93dSRavi Kumar * wait forever though... 6708590b93dSRavi Kumar */ 6718590b93dSRavi Kumar tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 6728590b93dSRavi Kumar rte_get_timer_hz()); 6738590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), tx_timeout)) { 6748590b93dSRavi Kumar tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR); 6758590b93dSRavi Kumar if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) && 6768590b93dSRavi Kumar (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0)) 6778590b93dSRavi Kumar break; 6788590b93dSRavi Kumar 6798590b93dSRavi Kumar rte_delay_us(900); 6808590b93dSRavi Kumar } 6818590b93dSRavi Kumar 6828590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), tx_timeout)) 683e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, 684e99981afSDavid Marchand "timed out waiting for Tx queue %u to empty", 6858590b93dSRavi Kumar queue); 6868590b93dSRavi Kumar } 6878590b93dSRavi Kumar 6888590b93dSRavi Kumar static void axgbe_prepare_tx_stop(struct axgbe_port *pdata, 6898590b93dSRavi Kumar unsigned int queue) 6908590b93dSRavi Kumar { 6918590b93dSRavi Kumar unsigned int tx_dsr, tx_pos, tx_qidx; 6928590b93dSRavi Kumar unsigned int tx_status; 6938590b93dSRavi Kumar unsigned long tx_timeout; 6948590b93dSRavi Kumar 6958590b93dSRavi Kumar if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20) 6968590b93dSRavi Kumar return axgbe_txq_prepare_tx_stop(pdata, queue); 6978590b93dSRavi Kumar 6988590b93dSRavi Kumar /* Calculate the status register to read and the position within */ 6998590b93dSRavi Kumar if (queue < DMA_DSRX_FIRST_QUEUE) { 7008590b93dSRavi Kumar tx_dsr = DMA_DSR0; 7018590b93dSRavi Kumar tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START; 7028590b93dSRavi Kumar } else { 7038590b93dSRavi Kumar tx_qidx = queue - DMA_DSRX_FIRST_QUEUE; 7048590b93dSRavi Kumar 7058590b93dSRavi Kumar tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC); 7068590b93dSRavi Kumar tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) + 7078590b93dSRavi Kumar DMA_DSRX_TPS_START; 7088590b93dSRavi Kumar } 7098590b93dSRavi Kumar 7108590b93dSRavi Kumar /* The Tx engine cannot be stopped if it is actively processing 7118590b93dSRavi Kumar * descriptors. Wait for the Tx engine to enter the stopped or 7128590b93dSRavi Kumar * suspended state. Don't wait forever though... 7138590b93dSRavi Kumar */ 7148590b93dSRavi Kumar tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT * 7158590b93dSRavi Kumar rte_get_timer_hz()); 7168590b93dSRavi Kumar while (time_before(rte_get_timer_cycles(), tx_timeout)) { 7178590b93dSRavi Kumar tx_status = AXGMAC_IOREAD(pdata, tx_dsr); 7188590b93dSRavi Kumar tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH); 7198590b93dSRavi Kumar if ((tx_status == DMA_TPS_STOPPED) || 7208590b93dSRavi Kumar (tx_status == DMA_TPS_SUSPENDED)) 7218590b93dSRavi Kumar break; 7228590b93dSRavi Kumar 7238590b93dSRavi Kumar rte_delay_us(900); 7248590b93dSRavi Kumar } 7258590b93dSRavi Kumar 7268590b93dSRavi Kumar if (!time_before(rte_get_timer_cycles(), tx_timeout)) 727e99981afSDavid Marchand PMD_DRV_LOG_LINE(ERR, 728e99981afSDavid Marchand "timed out waiting for Tx DMA channel %u to stop", 7298590b93dSRavi Kumar queue); 7308590b93dSRavi Kumar } 7318590b93dSRavi Kumar 7328590b93dSRavi Kumar void axgbe_dev_disable_tx(struct rte_eth_dev *dev) 7338590b93dSRavi Kumar { 7348590b93dSRavi Kumar struct axgbe_tx_queue *txq; 7358590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 7368590b93dSRavi Kumar unsigned int i; 7378590b93dSRavi Kumar 7388590b93dSRavi Kumar /* Prepare for stopping DMA channel */ 7398590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) { 7408590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7418590b93dSRavi Kumar axgbe_prepare_tx_stop(pdata, i); 7428590b93dSRavi Kumar } 7438590b93dSRavi Kumar /* Disable MAC Tx */ 7448590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); 7458590b93dSRavi Kumar /* Disable each Tx queue*/ 7468590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 7478590b93dSRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 7488590b93dSRavi Kumar 0); 7498590b93dSRavi Kumar /* Disable each Tx DMA channel */ 7508590b93dSRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 7518590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7528590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0); 7538590b93dSRavi Kumar } 7548590b93dSRavi Kumar } 7558590b93dSRavi Kumar 7568590b93dSRavi Kumar void axgbe_dev_enable_tx(struct rte_eth_dev *dev) 7578590b93dSRavi Kumar { 7588590b93dSRavi Kumar struct axgbe_tx_queue *txq; 7598590b93dSRavi Kumar struct axgbe_port *pdata = dev->data->dev_private; 7608590b93dSRavi Kumar unsigned int i; 7618590b93dSRavi Kumar 7628590b93dSRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 7638590b93dSRavi Kumar txq = dev->data->tx_queues[i]; 7648590b93dSRavi Kumar /* Enable Tx DMA channel */ 7658590b93dSRavi Kumar AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1); 7668590b93dSRavi Kumar } 7678590b93dSRavi Kumar /* Enable Tx queue*/ 7688590b93dSRavi Kumar for (i = 0; i < pdata->tx_q_count; i++) 7698590b93dSRavi Kumar AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 7708590b93dSRavi Kumar MTL_Q_ENABLED); 7718590b93dSRavi Kumar /* Enable MAC Tx */ 7728590b93dSRavi Kumar AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1); 7738590b93dSRavi Kumar } 7748590b93dSRavi Kumar 77593bffd8fSBhagyada Modali /* Free Tx conformed mbufs segments */ 77693bffd8fSBhagyada Modali static void 77793bffd8fSBhagyada Modali axgbe_xmit_cleanup_seg(struct axgbe_tx_queue *txq) 77893bffd8fSBhagyada Modali { 77993bffd8fSBhagyada Modali volatile struct axgbe_tx_desc *desc; 78093bffd8fSBhagyada Modali uint16_t idx; 78193bffd8fSBhagyada Modali 78293bffd8fSBhagyada Modali idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); 78393bffd8fSBhagyada Modali while (txq->cur != txq->dirty) { 78493bffd8fSBhagyada Modali if (unlikely(idx == txq->nb_desc)) 78593bffd8fSBhagyada Modali idx = 0; 78693bffd8fSBhagyada Modali desc = &txq->desc[idx]; 78793bffd8fSBhagyada Modali /* Check for ownership */ 78893bffd8fSBhagyada Modali if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 78993bffd8fSBhagyada Modali return; 79093bffd8fSBhagyada Modali memset((void *)&desc->desc2, 0, 8); 79193bffd8fSBhagyada Modali /* Free mbuf */ 79293bffd8fSBhagyada Modali rte_pktmbuf_free_seg(txq->sw_ring[idx]); 79393bffd8fSBhagyada Modali txq->sw_ring[idx++] = NULL; 79493bffd8fSBhagyada Modali txq->dirty++; 79593bffd8fSBhagyada Modali } 79693bffd8fSBhagyada Modali } 79793bffd8fSBhagyada Modali 7988590b93dSRavi Kumar /* Free Tx conformed mbufs */ 7998590b93dSRavi Kumar static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq) 8008590b93dSRavi Kumar { 8018590b93dSRavi Kumar volatile struct axgbe_tx_desc *desc; 8028590b93dSRavi Kumar uint16_t idx; 8038590b93dSRavi Kumar 8048590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->dirty); 8058590b93dSRavi Kumar while (txq->cur != txq->dirty) { 8068590b93dSRavi Kumar if (unlikely(idx == txq->nb_desc)) 8078590b93dSRavi Kumar idx = 0; 8088590b93dSRavi Kumar desc = &txq->desc[idx]; 8098590b93dSRavi Kumar /* Check for ownership */ 8108590b93dSRavi Kumar if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 8118590b93dSRavi Kumar return; 8128590b93dSRavi Kumar memset((void *)&desc->desc2, 0, 8); 8138590b93dSRavi Kumar /* Free mbuf */ 8148590b93dSRavi Kumar rte_pktmbuf_free(txq->sw_ring[idx]); 8158590b93dSRavi Kumar txq->sw_ring[idx++] = NULL; 8168590b93dSRavi Kumar txq->dirty++; 8178590b93dSRavi Kumar } 8188590b93dSRavi Kumar } 8198590b93dSRavi Kumar 8208590b93dSRavi Kumar /* Tx Descriptor formation 8218590b93dSRavi Kumar * Considering each mbuf requires one desc 8228590b93dSRavi Kumar * mbuf is linear 8238590b93dSRavi Kumar */ 8248590b93dSRavi Kumar static int axgbe_xmit_hw(struct axgbe_tx_queue *txq, 8258590b93dSRavi Kumar struct rte_mbuf *mbuf) 8268590b93dSRavi Kumar { 8278590b93dSRavi Kumar volatile struct axgbe_tx_desc *desc; 8288590b93dSRavi Kumar uint16_t idx; 8298590b93dSRavi Kumar uint64_t mask; 830*186f8e8cSJesna K E int start_index; 831*186f8e8cSJesna K E uint64_t l2_len = 0; 832*186f8e8cSJesna K E uint64_t l3_len = 0; 833*186f8e8cSJesna K E uint64_t l4_len = 0; 834*186f8e8cSJesna K E uint64_t tso_segz = 0; 835*186f8e8cSJesna K E uint64_t total_hdr_len; 836*186f8e8cSJesna K E int tso = 0; 837*186f8e8cSJesna K E 838*186f8e8cSJesna K E /*Parameters required for tso*/ 839*186f8e8cSJesna K E l2_len = mbuf->l2_len; 840*186f8e8cSJesna K E l3_len = mbuf->l3_len; 841*186f8e8cSJesna K E l4_len = mbuf->l4_len; 842*186f8e8cSJesna K E total_hdr_len = l2_len + l3_len + l4_len; 843*186f8e8cSJesna K E tso_segz = mbuf->tso_segsz; 844*186f8e8cSJesna K E 845*186f8e8cSJesna K E if (txq->pdata->tso_tx) 846*186f8e8cSJesna K E tso = 1; 847*186f8e8cSJesna K E else 848*186f8e8cSJesna K E tso = 0; 849*186f8e8cSJesna K E 850*186f8e8cSJesna K E AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, MSS, tso_segz); 8518590b93dSRavi Kumar 8528590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 8538590b93dSRavi Kumar desc = &txq->desc[idx]; 8548590b93dSRavi Kumar 855*186f8e8cSJesna K E /* Saving the start index for setting the OWN bit finally */ 856*186f8e8cSJesna K E start_index = idx; 857*186f8e8cSJesna K E if (tso) { 858*186f8e8cSJesna K E /* Update buffer address and length */ 859*186f8e8cSJesna K E desc->baddr = rte_mbuf_data_iova(mbuf); 860*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 861*186f8e8cSJesna K E total_hdr_len); 862*186f8e8cSJesna K E } else { 8638590b93dSRavi Kumar /* Update buffer address and length */ 8648590b93dSRavi Kumar desc->baddr = rte_mbuf_data_iova(mbuf); 8658590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 8668590b93dSRavi Kumar mbuf->pkt_len); 8678590b93dSRavi Kumar /* Total msg length to transmit */ 8688590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, 8698590b93dSRavi Kumar mbuf->pkt_len); 870*186f8e8cSJesna K E } 871e0444948SSelwin Sebastian /* Timestamp enablement check */ 872daa02b5cSOlivier Matz if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 873e0444948SSelwin Sebastian AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); 874e0444948SSelwin Sebastian rte_wmb(); 8758590b93dSRavi Kumar /* Mark it as First and Last Descriptor */ 8768590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); 877*186f8e8cSJesna K E /* Mark it as a NORMAL descriptor */ 878*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 879*186f8e8cSJesna K E if (tso) { 880*186f8e8cSJesna K E /*Register settings for TSO*/ 881*186f8e8cSJesna K E /* Enable TSO */ 882*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TSE, 1); 883*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, TPL, 884*186f8e8cSJesna K E ((mbuf->pkt_len) - total_hdr_len)); 885*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, THL, 886*186f8e8cSJesna K E (l4_len / 4)); 887*186f8e8cSJesna K E rte_wmb(); 888*186f8e8cSJesna K E txq->cur++; 889*186f8e8cSJesna K E idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 890*186f8e8cSJesna K E desc = &txq->desc[idx]; 891*186f8e8cSJesna K E desc->baddr = rte_mbuf_data_iova(mbuf); 892*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 893*186f8e8cSJesna K E (mbuf->pkt_len) - total_hdr_len); 894*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 8958590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); 8968590b93dSRavi Kumar /* Mark it as a NORMAL descriptor */ 8978590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 898*186f8e8cSJesna K E } else { 899*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); 900*186f8e8cSJesna K E } 9018590b93dSRavi Kumar /* configure h/w Offload */ 902daa02b5cSOlivier Matz mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; 903daa02b5cSOlivier Matz if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) 9048590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 905daa02b5cSOlivier Matz else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 9068590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); 9078590b93dSRavi Kumar rte_wmb(); 9088590b93dSRavi Kumar 909daa02b5cSOlivier Matz if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 91086578516SGirish Nandibasappa /* Mark it as a CONTEXT descriptor */ 91186578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 91286578516SGirish Nandibasappa CTXT, 1); 91386578516SGirish Nandibasappa /* Set the VLAN tag */ 91486578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 91586578516SGirish Nandibasappa VT, mbuf->vlan_tci); 91686578516SGirish Nandibasappa /* Indicate this descriptor contains the VLAN tag */ 91786578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 91886578516SGirish Nandibasappa VLTV, 1); 91986578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 92086578516SGirish Nandibasappa TX_NORMAL_DESC2_VLAN_INSERT); 92186578516SGirish Nandibasappa } else { 92286578516SGirish Nandibasappa AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); 92386578516SGirish Nandibasappa } 92486578516SGirish Nandibasappa 925*186f8e8cSJesna K E if (!tso) { 9268590b93dSRavi Kumar AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 9278590b93dSRavi Kumar rte_wmb(); 928*186f8e8cSJesna K E } else { 929*186f8e8cSJesna K E /* Set OWN bit for the first descriptor */ 930*186f8e8cSJesna K E desc = &txq->desc[start_index]; 931*186f8e8cSJesna K E AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 932*186f8e8cSJesna K E rte_wmb(); 933*186f8e8cSJesna K E } 93486578516SGirish Nandibasappa 9358590b93dSRavi Kumar /* Save mbuf */ 9368590b93dSRavi Kumar txq->sw_ring[idx] = mbuf; 9378590b93dSRavi Kumar /* Update current index*/ 9388590b93dSRavi Kumar txq->cur++; 9398590b93dSRavi Kumar /* Update stats */ 9408590b93dSRavi Kumar txq->bytes += mbuf->pkt_len; 9418590b93dSRavi Kumar 9428590b93dSRavi Kumar return 0; 9438590b93dSRavi Kumar } 9448590b93dSRavi Kumar 94593bffd8fSBhagyada Modali /* Tx Descriptor formation for segmented mbuf 94693bffd8fSBhagyada Modali * Each mbuf will require multiple descriptors 94793bffd8fSBhagyada Modali */ 94893bffd8fSBhagyada Modali 94993bffd8fSBhagyada Modali static int 95093bffd8fSBhagyada Modali axgbe_xmit_hw_seg(struct axgbe_tx_queue *txq, 95193bffd8fSBhagyada Modali struct rte_mbuf *mbuf) 95293bffd8fSBhagyada Modali { 95393bffd8fSBhagyada Modali volatile struct axgbe_tx_desc *desc; 95493bffd8fSBhagyada Modali uint16_t idx; 95593bffd8fSBhagyada Modali uint64_t mask; 95693bffd8fSBhagyada Modali int start_index; 95793bffd8fSBhagyada Modali uint32_t pkt_len = 0; 95893bffd8fSBhagyada Modali int nb_desc_free; 95993bffd8fSBhagyada Modali struct rte_mbuf *tx_pkt; 960*186f8e8cSJesna K E uint32_t tso = 0; 96193bffd8fSBhagyada Modali 96293bffd8fSBhagyada Modali nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 96393bffd8fSBhagyada Modali 96493bffd8fSBhagyada Modali if (mbuf->nb_segs > nb_desc_free) { 96593bffd8fSBhagyada Modali axgbe_xmit_cleanup_seg(txq); 96693bffd8fSBhagyada Modali nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 96793bffd8fSBhagyada Modali if (unlikely(mbuf->nb_segs > nb_desc_free)) 96893bffd8fSBhagyada Modali return RTE_ETH_TX_DESC_UNAVAIL; 96993bffd8fSBhagyada Modali } 97093bffd8fSBhagyada Modali 971*186f8e8cSJesna K E if (txq->pdata->tso_tx) 972*186f8e8cSJesna K E tso = 1; 973*186f8e8cSJesna K E else 974*186f8e8cSJesna K E tso = 0; 975*186f8e8cSJesna K E 976*186f8e8cSJesna K E if (tso) { 977*186f8e8cSJesna K E axgbe_xmit_hw(txq, mbuf); 978*186f8e8cSJesna K E } else { 97993bffd8fSBhagyada Modali idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 98093bffd8fSBhagyada Modali desc = &txq->desc[idx]; 98193bffd8fSBhagyada Modali /* Saving the start index for setting the OWN bit finally */ 98293bffd8fSBhagyada Modali start_index = idx; 98393bffd8fSBhagyada Modali tx_pkt = mbuf; 98493bffd8fSBhagyada Modali /* Max_pkt len = 9018 ; need to update it according to Jumbo pkt size */ 98593bffd8fSBhagyada Modali pkt_len = tx_pkt->pkt_len; 98693bffd8fSBhagyada Modali 98793bffd8fSBhagyada Modali /* Update buffer address and length */ 98893bffd8fSBhagyada Modali desc->baddr = rte_mbuf_data_iova(tx_pkt); 98993bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L, 99093bffd8fSBhagyada Modali tx_pkt->data_len); 99193bffd8fSBhagyada Modali /* Total msg length to transmit */ 99293bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL, 99393bffd8fSBhagyada Modali tx_pkt->pkt_len); 99493bffd8fSBhagyada Modali /* Timestamp enablement check */ 99593bffd8fSBhagyada Modali if (mbuf->ol_flags & RTE_MBUF_F_TX_IEEE1588_TMST) 99693bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, TTSE, 1); 99793bffd8fSBhagyada Modali rte_wmb(); 99893bffd8fSBhagyada Modali /* Mark it as First Descriptor */ 99993bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1); 100093bffd8fSBhagyada Modali /* Mark it as a NORMAL descriptor */ 100193bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 100293bffd8fSBhagyada Modali /* configure h/w Offload */ 100393bffd8fSBhagyada Modali mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; 100493bffd8fSBhagyada Modali if (mask == RTE_MBUF_F_TX_TCP_CKSUM || mask == RTE_MBUF_F_TX_UDP_CKSUM) 100593bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3); 100693bffd8fSBhagyada Modali else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 100793bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1); 100893bffd8fSBhagyada Modali rte_wmb(); 100993bffd8fSBhagyada Modali if (mbuf->ol_flags & (RTE_MBUF_F_TX_VLAN | RTE_MBUF_F_TX_QINQ)) { 101093bffd8fSBhagyada Modali /* Mark it as a CONTEXT descriptor */ 101193bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 101293bffd8fSBhagyada Modali CTXT, 1); 101393bffd8fSBhagyada Modali /* Set the VLAN tag */ 101493bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 101593bffd8fSBhagyada Modali VT, mbuf->vlan_tci); 101693bffd8fSBhagyada Modali /* Indicate this descriptor contains the VLAN tag */ 101793bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_CONTEXT_DESC3, 101893bffd8fSBhagyada Modali VLTV, 1); 101993bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 102093bffd8fSBhagyada Modali TX_NORMAL_DESC2_VLAN_INSERT); 102193bffd8fSBhagyada Modali } else { 102293bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, VTIR, 0x0); 102393bffd8fSBhagyada Modali } 102493bffd8fSBhagyada Modali rte_wmb(); 102593bffd8fSBhagyada Modali /* Save mbuf */ 102693bffd8fSBhagyada Modali txq->sw_ring[idx] = tx_pkt; 102793bffd8fSBhagyada Modali /* Update current index*/ 102893bffd8fSBhagyada Modali txq->cur++; 102993bffd8fSBhagyada Modali tx_pkt = tx_pkt->next; 103093bffd8fSBhagyada Modali while (tx_pkt != NULL) { 103193bffd8fSBhagyada Modali idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 103293bffd8fSBhagyada Modali desc = &txq->desc[idx]; 103393bffd8fSBhagyada Modali /* Update buffer address and length */ 103493bffd8fSBhagyada Modali desc->baddr = rte_mbuf_data_iova(tx_pkt); 103593bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc2, 103693bffd8fSBhagyada Modali TX_NORMAL_DESC2, HL_B1L, tx_pkt->data_len); 103793bffd8fSBhagyada Modali rte_wmb(); 103893bffd8fSBhagyada Modali /* Mark it as a NORMAL descriptor */ 103993bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0); 104093bffd8fSBhagyada Modali /* configure h/w Offload */ 104193bffd8fSBhagyada Modali mask = mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK; 104293bffd8fSBhagyada Modali if (mask == RTE_MBUF_F_TX_TCP_CKSUM || 104393bffd8fSBhagyada Modali mask == RTE_MBUF_F_TX_UDP_CKSUM) 104493bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, 104593bffd8fSBhagyada Modali TX_NORMAL_DESC3, CIC, 0x3); 104693bffd8fSBhagyada Modali else if (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) 104793bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, 104893bffd8fSBhagyada Modali TX_NORMAL_DESC3, CIC, 0x1); 104993bffd8fSBhagyada Modali rte_wmb(); 105093bffd8fSBhagyada Modali /* Set OWN bit */ 105193bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 105293bffd8fSBhagyada Modali rte_wmb(); 105393bffd8fSBhagyada Modali /* Save mbuf */ 105493bffd8fSBhagyada Modali txq->sw_ring[idx] = tx_pkt; 105593bffd8fSBhagyada Modali /* Update current index*/ 105693bffd8fSBhagyada Modali txq->cur++; 105793bffd8fSBhagyada Modali tx_pkt = tx_pkt->next; 105893bffd8fSBhagyada Modali } 105993bffd8fSBhagyada Modali 106093bffd8fSBhagyada Modali /* Set LD bit for the last descriptor */ 106193bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1); 106293bffd8fSBhagyada Modali rte_wmb(); 106393bffd8fSBhagyada Modali 106493bffd8fSBhagyada Modali /* Update stats */ 106593bffd8fSBhagyada Modali txq->bytes += pkt_len; 106693bffd8fSBhagyada Modali 106793bffd8fSBhagyada Modali /* Set OWN bit for the first descriptor */ 106893bffd8fSBhagyada Modali desc = &txq->desc[start_index]; 106993bffd8fSBhagyada Modali AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1); 107093bffd8fSBhagyada Modali rte_wmb(); 1071*186f8e8cSJesna K E } 107293bffd8fSBhagyada Modali return 0; 107393bffd8fSBhagyada Modali } 107493bffd8fSBhagyada Modali 107593bffd8fSBhagyada Modali /* Eal supported tx wrapper- Segmented*/ 107693bffd8fSBhagyada Modali uint16_t 107793bffd8fSBhagyada Modali axgbe_xmit_pkts_seg(void *tx_queue, struct rte_mbuf **tx_pkts, 107893bffd8fSBhagyada Modali uint16_t nb_pkts) 107993bffd8fSBhagyada Modali { 108093bffd8fSBhagyada Modali PMD_INIT_FUNC_TRACE(); 108193bffd8fSBhagyada Modali 108293bffd8fSBhagyada Modali struct axgbe_tx_queue *txq; 108393bffd8fSBhagyada Modali uint16_t nb_desc_free; 108493bffd8fSBhagyada Modali uint16_t nb_pkt_sent = 0; 108593bffd8fSBhagyada Modali uint16_t idx; 108693bffd8fSBhagyada Modali uint32_t tail_addr; 108793bffd8fSBhagyada Modali struct rte_mbuf *mbuf = NULL; 108893bffd8fSBhagyada Modali 108993bffd8fSBhagyada Modali if (unlikely(nb_pkts == 0)) 109093bffd8fSBhagyada Modali return nb_pkts; 109193bffd8fSBhagyada Modali 109293bffd8fSBhagyada Modali txq = (struct axgbe_tx_queue *)tx_queue; 109393bffd8fSBhagyada Modali 109493bffd8fSBhagyada Modali nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 109593bffd8fSBhagyada Modali if (unlikely(nb_desc_free <= txq->free_thresh)) { 109693bffd8fSBhagyada Modali axgbe_xmit_cleanup_seg(txq); 109793bffd8fSBhagyada Modali nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 109893bffd8fSBhagyada Modali if (unlikely(nb_desc_free == 0)) 109993bffd8fSBhagyada Modali return 0; 110093bffd8fSBhagyada Modali } 110193bffd8fSBhagyada Modali 110293bffd8fSBhagyada Modali while (nb_pkts--) { 110393bffd8fSBhagyada Modali mbuf = *tx_pkts++; 110493bffd8fSBhagyada Modali 110593bffd8fSBhagyada Modali if (axgbe_xmit_hw_seg(txq, mbuf)) 110693bffd8fSBhagyada Modali goto out; 110793bffd8fSBhagyada Modali nb_pkt_sent++; 110893bffd8fSBhagyada Modali } 110993bffd8fSBhagyada Modali out: 111093bffd8fSBhagyada Modali /* Sync read and write */ 111193bffd8fSBhagyada Modali rte_mb(); 111293bffd8fSBhagyada Modali idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 111393bffd8fSBhagyada Modali tail_addr = low32_value(txq->ring_phys_addr + 111493bffd8fSBhagyada Modali idx * sizeof(struct axgbe_tx_desc)); 111593bffd8fSBhagyada Modali /* Update tail reg with next immediate address to kick Tx DMA channel*/ 111693bffd8fSBhagyada Modali AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); 111793bffd8fSBhagyada Modali txq->pkts += nb_pkt_sent; 111893bffd8fSBhagyada Modali return nb_pkt_sent; 111993bffd8fSBhagyada Modali } 112093bffd8fSBhagyada Modali 11218590b93dSRavi Kumar /* Eal supported tx wrapper*/ 11228590b93dSRavi Kumar uint16_t 11238590b93dSRavi Kumar axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 11248590b93dSRavi Kumar uint16_t nb_pkts) 11258590b93dSRavi Kumar { 11268590b93dSRavi Kumar PMD_INIT_FUNC_TRACE(); 11278590b93dSRavi Kumar 11288590b93dSRavi Kumar if (unlikely(nb_pkts == 0)) 11298590b93dSRavi Kumar return nb_pkts; 11308590b93dSRavi Kumar 11318590b93dSRavi Kumar struct axgbe_tx_queue *txq; 11328590b93dSRavi Kumar uint16_t nb_desc_free; 11338590b93dSRavi Kumar uint16_t nb_pkt_sent = 0; 11348590b93dSRavi Kumar uint16_t idx; 11358590b93dSRavi Kumar uint32_t tail_addr; 11368590b93dSRavi Kumar struct rte_mbuf *mbuf; 11378590b93dSRavi Kumar 11388590b93dSRavi Kumar txq = (struct axgbe_tx_queue *)tx_queue; 11398590b93dSRavi Kumar nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 11408590b93dSRavi Kumar 11418590b93dSRavi Kumar if (unlikely(nb_desc_free <= txq->free_thresh)) { 11428590b93dSRavi Kumar axgbe_xmit_cleanup(txq); 11438590b93dSRavi Kumar nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty); 11448590b93dSRavi Kumar if (unlikely(nb_desc_free == 0)) 11458590b93dSRavi Kumar return 0; 11468590b93dSRavi Kumar } 11478590b93dSRavi Kumar nb_pkts = RTE_MIN(nb_desc_free, nb_pkts); 11488590b93dSRavi Kumar while (nb_pkts--) { 11498590b93dSRavi Kumar mbuf = *tx_pkts++; 11508590b93dSRavi Kumar if (axgbe_xmit_hw(txq, mbuf)) 11518590b93dSRavi Kumar goto out; 11528590b93dSRavi Kumar nb_pkt_sent++; 11538590b93dSRavi Kumar } 11548590b93dSRavi Kumar out: 11558590b93dSRavi Kumar /* Sync read and write */ 11568590b93dSRavi Kumar rte_mb(); 11578590b93dSRavi Kumar idx = AXGBE_GET_DESC_IDX(txq, txq->cur); 11588590b93dSRavi Kumar tail_addr = low32_value(txq->ring_phys_addr + 11598590b93dSRavi Kumar idx * sizeof(struct axgbe_tx_desc)); 11608590b93dSRavi Kumar /* Update tail reg with next immediate address to kick Tx DMA channel*/ 11618590b93dSRavi Kumar AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr); 11628590b93dSRavi Kumar txq->pkts += nb_pkt_sent; 11638590b93dSRavi Kumar return nb_pkt_sent; 11648590b93dSRavi Kumar } 11658590b93dSRavi Kumar 11669e890103SRavi Kumar void axgbe_dev_clear_queues(struct rte_eth_dev *dev) 11679e890103SRavi Kumar { 11689e890103SRavi Kumar PMD_INIT_FUNC_TRACE(); 11699e890103SRavi Kumar uint8_t i; 11709e890103SRavi Kumar struct axgbe_rx_queue *rxq; 11719e890103SRavi Kumar struct axgbe_tx_queue *txq; 11729e890103SRavi Kumar 11739e890103SRavi Kumar for (i = 0; i < dev->data->nb_rx_queues; i++) { 11749e890103SRavi Kumar rxq = dev->data->rx_queues[i]; 11759e890103SRavi Kumar 11769e890103SRavi Kumar if (rxq) { 11779e890103SRavi Kumar axgbe_rx_queue_release(rxq); 11789e890103SRavi Kumar dev->data->rx_queues[i] = NULL; 11799e890103SRavi Kumar } 11800236016cSJie Hai dev->data->rx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 11819e890103SRavi Kumar } 11829e890103SRavi Kumar 11839e890103SRavi Kumar for (i = 0; i < dev->data->nb_tx_queues; i++) { 11849e890103SRavi Kumar txq = dev->data->tx_queues[i]; 11859e890103SRavi Kumar 11869e890103SRavi Kumar if (txq) { 11879e890103SRavi Kumar axgbe_tx_queue_release(txq); 11889e890103SRavi Kumar dev->data->tx_queues[i] = NULL; 11899e890103SRavi Kumar } 11900236016cSJie Hai dev->data->tx_queue_state[i] = RTE_ETH_QUEUE_STATE_STOPPED; 11919e890103SRavi Kumar } 11929e890103SRavi Kumar } 11930962b605SAmaranath Somalapuram 11940962b605SAmaranath Somalapuram int 11950962b605SAmaranath Somalapuram axgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 11960962b605SAmaranath Somalapuram { 11970962b605SAmaranath Somalapuram struct axgbe_rx_queue *rxq = rx_queue; 11980962b605SAmaranath Somalapuram volatile union axgbe_rx_desc *desc; 11990962b605SAmaranath Somalapuram uint16_t idx; 12000962b605SAmaranath Somalapuram 12010962b605SAmaranath Somalapuram 12020962b605SAmaranath Somalapuram if (unlikely(offset >= rxq->nb_desc)) 12030962b605SAmaranath Somalapuram return -EINVAL; 12040962b605SAmaranath Somalapuram 12050962b605SAmaranath Somalapuram if (offset >= rxq->nb_desc - rxq->dirty) 12060962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_UNAVAIL; 12070962b605SAmaranath Somalapuram 12080962b605SAmaranath Somalapuram idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur); 12090962b605SAmaranath Somalapuram desc = &rxq->desc[idx + offset]; 12100962b605SAmaranath Somalapuram 12110962b605SAmaranath Somalapuram if (!AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN)) 12120962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_DONE; 12130962b605SAmaranath Somalapuram 12140962b605SAmaranath Somalapuram return RTE_ETH_RX_DESC_AVAIL; 12150962b605SAmaranath Somalapuram } 12160962b605SAmaranath Somalapuram 12170962b605SAmaranath Somalapuram int 12180962b605SAmaranath Somalapuram axgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 12190962b605SAmaranath Somalapuram { 12200962b605SAmaranath Somalapuram struct axgbe_tx_queue *txq = tx_queue; 12210962b605SAmaranath Somalapuram volatile struct axgbe_tx_desc *desc; 12220962b605SAmaranath Somalapuram uint16_t idx; 12230962b605SAmaranath Somalapuram 12240962b605SAmaranath Somalapuram 12250962b605SAmaranath Somalapuram if (unlikely(offset >= txq->nb_desc)) 12260962b605SAmaranath Somalapuram return -EINVAL; 12270962b605SAmaranath Somalapuram 12280962b605SAmaranath Somalapuram if (offset >= txq->nb_desc - txq->dirty) 12290962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_UNAVAIL; 12300962b605SAmaranath Somalapuram 12310962b605SAmaranath Somalapuram idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt - 1); 12320962b605SAmaranath Somalapuram desc = &txq->desc[idx + offset]; 12330962b605SAmaranath Somalapuram 12340962b605SAmaranath Somalapuram if (!AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN)) 12350962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_DONE; 12360962b605SAmaranath Somalapuram 12370962b605SAmaranath Somalapuram return RTE_ETH_TX_DESC_FULL; 12380962b605SAmaranath Somalapuram } 1239