1b78958e2SPavel Belous /* SPDX-License-Identifier: BSD-3-Clause 2b78958e2SPavel Belous * Copyright(c) 2018 Aquantia Corporation 3b78958e2SPavel Belous */ 4b78958e2SPavel Belous 53d38e3dcSIgor Russkikh #include <rte_malloc.h> 6df96fd0dSBruce Richardson #include <ethdev_driver.h> 72b1472d7SPavel Belous #include <rte_net.h> 83d38e3dcSIgor Russkikh 9b78958e2SPavel Belous #include "atl_ethdev.h" 103d38e3dcSIgor Russkikh #include "atl_hw_regs.h" 113d38e3dcSIgor Russkikh 123d38e3dcSIgor Russkikh #include "atl_logs.h" 133d38e3dcSIgor Russkikh #include "hw_atl/hw_atl_llh.h" 143d38e3dcSIgor Russkikh #include "hw_atl/hw_atl_b0.h" 153d38e3dcSIgor Russkikh #include "hw_atl/hw_atl_b0_internal.h" 163d38e3dcSIgor Russkikh 172b1472d7SPavel Belous #define ATL_TX_CKSUM_OFFLOAD_MASK ( \ 18daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 19daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK | \ 20daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG) 212b1472d7SPavel Belous 222b1472d7SPavel Belous #define ATL_TX_OFFLOAD_MASK ( \ 23daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 24daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 25daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV4 | \ 26daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 27daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK | \ 28daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG) 292b1472d7SPavel Belous 302b1472d7SPavel Belous #define ATL_TX_OFFLOAD_NOTSUP_MASK \ 31daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ ATL_TX_OFFLOAD_MASK) 322b1472d7SPavel Belous 333d38e3dcSIgor Russkikh /** 343d38e3dcSIgor Russkikh * Structure associated with each descriptor of the RX ring of a RX queue. 353d38e3dcSIgor Russkikh */ 363d38e3dcSIgor Russkikh struct atl_rx_entry { 373d38e3dcSIgor Russkikh struct rte_mbuf *mbuf; 383d38e3dcSIgor Russkikh }; 393d38e3dcSIgor Russkikh 403d38e3dcSIgor Russkikh /** 412b1472d7SPavel Belous * Structure associated with each descriptor of the TX ring of a TX queue. 422b1472d7SPavel Belous */ 432b1472d7SPavel Belous struct atl_tx_entry { 442b1472d7SPavel Belous struct rte_mbuf *mbuf; 452b1472d7SPavel Belous uint16_t next_id; 462b1472d7SPavel Belous uint16_t last_id; 472b1472d7SPavel Belous }; 482b1472d7SPavel Belous 492b1472d7SPavel Belous /** 503d38e3dcSIgor Russkikh * Structure associated with each RX queue. 513d38e3dcSIgor Russkikh */ 523d38e3dcSIgor Russkikh struct atl_rx_queue { 533d38e3dcSIgor Russkikh struct rte_mempool *mb_pool; 543d38e3dcSIgor Russkikh struct hw_atl_rxd_s *hw_ring; 553d38e3dcSIgor Russkikh uint64_t hw_ring_phys_addr; 563d38e3dcSIgor Russkikh struct atl_rx_entry *sw_ring; 573d38e3dcSIgor Russkikh uint16_t nb_rx_desc; 583d38e3dcSIgor Russkikh uint16_t rx_tail; 593d38e3dcSIgor Russkikh uint16_t nb_rx_hold; 603d38e3dcSIgor Russkikh uint16_t rx_free_thresh; 613d38e3dcSIgor Russkikh uint16_t queue_id; 623d38e3dcSIgor Russkikh uint16_t port_id; 633d38e3dcSIgor Russkikh uint16_t buff_size; 643d38e3dcSIgor Russkikh bool l3_csum_enabled; 653d38e3dcSIgor Russkikh bool l4_csum_enabled; 663d38e3dcSIgor Russkikh }; 673d38e3dcSIgor Russkikh 682b1472d7SPavel Belous /** 692b1472d7SPavel Belous * Structure associated with each TX queue. 702b1472d7SPavel Belous */ 712b1472d7SPavel Belous struct atl_tx_queue { 722b1472d7SPavel Belous struct hw_atl_txd_s *hw_ring; 732b1472d7SPavel Belous uint64_t hw_ring_phys_addr; 742b1472d7SPavel Belous struct atl_tx_entry *sw_ring; 752b1472d7SPavel Belous uint16_t nb_tx_desc; 762b1472d7SPavel Belous uint16_t tx_tail; 772b1472d7SPavel Belous uint16_t tx_head; 782b1472d7SPavel Belous uint16_t queue_id; 792b1472d7SPavel Belous uint16_t port_id; 802b1472d7SPavel Belous uint16_t tx_free_thresh; 812b1472d7SPavel Belous uint16_t tx_free; 822b1472d7SPavel Belous }; 832b1472d7SPavel Belous 843d38e3dcSIgor Russkikh static inline void 853d38e3dcSIgor Russkikh atl_reset_rx_queue(struct atl_rx_queue *rxq) 863d38e3dcSIgor Russkikh { 873d38e3dcSIgor Russkikh struct hw_atl_rxd_s *rxd = NULL; 883d38e3dcSIgor Russkikh int i; 893d38e3dcSIgor Russkikh 903d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 913d38e3dcSIgor Russkikh 923d38e3dcSIgor Russkikh for (i = 0; i < rxq->nb_rx_desc; i++) { 933d38e3dcSIgor Russkikh rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i]; 943d38e3dcSIgor Russkikh rxd->buf_addr = 0; 953d38e3dcSIgor Russkikh rxd->hdr_addr = 0; 963d38e3dcSIgor Russkikh } 973d38e3dcSIgor Russkikh 983d38e3dcSIgor Russkikh rxq->rx_tail = 0; 993d38e3dcSIgor Russkikh } 1003d38e3dcSIgor Russkikh 1013d38e3dcSIgor Russkikh int 1023d38e3dcSIgor Russkikh atl_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id, 1033d38e3dcSIgor Russkikh uint16_t nb_rx_desc, unsigned int socket_id, 1043d38e3dcSIgor Russkikh const struct rte_eth_rxconf *rx_conf, 1053d38e3dcSIgor Russkikh struct rte_mempool *mb_pool) 1063d38e3dcSIgor Russkikh { 1073d38e3dcSIgor Russkikh struct atl_rx_queue *rxq; 1083d38e3dcSIgor Russkikh const struct rte_memzone *mz; 1093d38e3dcSIgor Russkikh 1103d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 1113d38e3dcSIgor Russkikh 1123d38e3dcSIgor Russkikh /* make sure a valid number of descriptors have been requested */ 1133d38e3dcSIgor Russkikh if (nb_rx_desc < AQ_HW_MIN_RX_RING_SIZE || 1143d38e3dcSIgor Russkikh nb_rx_desc > AQ_HW_MAX_RX_RING_SIZE) { 1153d38e3dcSIgor Russkikh PMD_INIT_LOG(ERR, "Number of Rx descriptors must be " 1163d38e3dcSIgor Russkikh "less than or equal to %d, " 1173d38e3dcSIgor Russkikh "greater than or equal to %d", AQ_HW_MAX_RX_RING_SIZE, 1183d38e3dcSIgor Russkikh AQ_HW_MIN_RX_RING_SIZE); 1193d38e3dcSIgor Russkikh return -EINVAL; 1203d38e3dcSIgor Russkikh } 1213d38e3dcSIgor Russkikh 1223d38e3dcSIgor Russkikh /* 1233d38e3dcSIgor Russkikh * if this queue existed already, free the associated memory. The 1243d38e3dcSIgor Russkikh * queue cannot be reused in case we need to allocate memory on 1253d38e3dcSIgor Russkikh * different socket than was previously used. 1263d38e3dcSIgor Russkikh */ 1273d38e3dcSIgor Russkikh if (dev->data->rx_queues[rx_queue_id] != NULL) { 1287483341aSXueming Li atl_rx_queue_release(dev, rx_queue_id); 1293d38e3dcSIgor Russkikh dev->data->rx_queues[rx_queue_id] = NULL; 1303d38e3dcSIgor Russkikh } 1313d38e3dcSIgor Russkikh 1323d38e3dcSIgor Russkikh /* allocate memory for the queue structure */ 1333d38e3dcSIgor Russkikh rxq = rte_zmalloc_socket("atlantic Rx queue", sizeof(*rxq), 1343d38e3dcSIgor Russkikh RTE_CACHE_LINE_SIZE, socket_id); 1353d38e3dcSIgor Russkikh if (rxq == NULL) { 1363d38e3dcSIgor Russkikh PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); 1373d38e3dcSIgor Russkikh return -ENOMEM; 1383d38e3dcSIgor Russkikh } 1393d38e3dcSIgor Russkikh 1403d38e3dcSIgor Russkikh /* setup queue */ 1413d38e3dcSIgor Russkikh rxq->mb_pool = mb_pool; 1423d38e3dcSIgor Russkikh rxq->nb_rx_desc = nb_rx_desc; 1433d38e3dcSIgor Russkikh rxq->port_id = dev->data->port_id; 1443d38e3dcSIgor Russkikh rxq->queue_id = rx_queue_id; 1453d38e3dcSIgor Russkikh rxq->rx_free_thresh = rx_conf->rx_free_thresh; 1463d38e3dcSIgor Russkikh 1473d38e3dcSIgor Russkikh rxq->l3_csum_enabled = dev->data->dev_conf.rxmode.offloads & 148295968d1SFerruh Yigit RTE_ETH_RX_OFFLOAD_IPV4_CKSUM; 1493d38e3dcSIgor Russkikh rxq->l4_csum_enabled = dev->data->dev_conf.rxmode.offloads & 150295968d1SFerruh Yigit (RTE_ETH_RX_OFFLOAD_UDP_CKSUM | RTE_ETH_RX_OFFLOAD_TCP_CKSUM); 151295968d1SFerruh Yigit if (dev->data->dev_conf.rxmode.offloads & RTE_ETH_RX_OFFLOAD_KEEP_CRC) 1523d38e3dcSIgor Russkikh PMD_DRV_LOG(ERR, "PMD does not support KEEP_CRC offload"); 1533d38e3dcSIgor Russkikh 1543d38e3dcSIgor Russkikh /* allocate memory for the software ring */ 1553d38e3dcSIgor Russkikh rxq->sw_ring = rte_zmalloc_socket("atlantic sw rx ring", 1563d38e3dcSIgor Russkikh nb_rx_desc * sizeof(struct atl_rx_entry), 1573d38e3dcSIgor Russkikh RTE_CACHE_LINE_SIZE, socket_id); 1583d38e3dcSIgor Russkikh if (rxq->sw_ring == NULL) { 1593d38e3dcSIgor Russkikh PMD_INIT_LOG(ERR, 1603d38e3dcSIgor Russkikh "Port %d: Cannot allocate software ring for queue %d", 1613d38e3dcSIgor Russkikh rxq->port_id, rxq->queue_id); 1623d38e3dcSIgor Russkikh rte_free(rxq); 1633d38e3dcSIgor Russkikh return -ENOMEM; 1643d38e3dcSIgor Russkikh } 1653d38e3dcSIgor Russkikh 1663d38e3dcSIgor Russkikh /* 1673d38e3dcSIgor Russkikh * allocate memory for the hardware descriptor ring. A memzone large 1683d38e3dcSIgor Russkikh * enough to hold the maximum ring size is requested to allow for 1693d38e3dcSIgor Russkikh * resizing in later calls to the queue setup function. 1703d38e3dcSIgor Russkikh */ 1713d38e3dcSIgor Russkikh mz = rte_eth_dma_zone_reserve(dev, "rx hw_ring", rx_queue_id, 1723d38e3dcSIgor Russkikh HW_ATL_B0_MAX_RXD * 1733d38e3dcSIgor Russkikh sizeof(struct hw_atl_rxd_s), 1743d38e3dcSIgor Russkikh 128, socket_id); 1753d38e3dcSIgor Russkikh if (mz == NULL) { 1763d38e3dcSIgor Russkikh PMD_INIT_LOG(ERR, 1773d38e3dcSIgor Russkikh "Port %d: Cannot allocate hardware ring for queue %d", 1783d38e3dcSIgor Russkikh rxq->port_id, rxq->queue_id); 1793d38e3dcSIgor Russkikh rte_free(rxq->sw_ring); 1803d38e3dcSIgor Russkikh rte_free(rxq); 1813d38e3dcSIgor Russkikh return -ENOMEM; 1823d38e3dcSIgor Russkikh } 1833d38e3dcSIgor Russkikh rxq->hw_ring = mz->addr; 1843d38e3dcSIgor Russkikh rxq->hw_ring_phys_addr = mz->iova; 1853d38e3dcSIgor Russkikh 1863d38e3dcSIgor Russkikh atl_reset_rx_queue(rxq); 1873d38e3dcSIgor Russkikh 1883d38e3dcSIgor Russkikh dev->data->rx_queues[rx_queue_id] = rxq; 1893d38e3dcSIgor Russkikh return 0; 1903d38e3dcSIgor Russkikh } 191b78958e2SPavel Belous 1922b1472d7SPavel Belous static inline void 1932b1472d7SPavel Belous atl_reset_tx_queue(struct atl_tx_queue *txq) 194b78958e2SPavel Belous { 1952b1472d7SPavel Belous struct atl_tx_entry *tx_entry; 1962b1472d7SPavel Belous union hw_atl_txc_s *txc; 1972b1472d7SPavel Belous uint16_t i; 1982b1472d7SPavel Belous 1992b1472d7SPavel Belous PMD_INIT_FUNC_TRACE(); 2002b1472d7SPavel Belous 2012b1472d7SPavel Belous if (!txq) { 2022b1472d7SPavel Belous PMD_DRV_LOG(ERR, "Pointer to txq is NULL"); 2032b1472d7SPavel Belous return; 2042b1472d7SPavel Belous } 2052b1472d7SPavel Belous 2062b1472d7SPavel Belous tx_entry = txq->sw_ring; 2072b1472d7SPavel Belous 2082b1472d7SPavel Belous for (i = 0; i < txq->nb_tx_desc; i++) { 2092b1472d7SPavel Belous txc = (union hw_atl_txc_s *)&txq->hw_ring[i]; 2102b1472d7SPavel Belous txc->flags1 = 0; 2112b1472d7SPavel Belous txc->flags2 = 2; 2122b1472d7SPavel Belous } 2132b1472d7SPavel Belous 2142b1472d7SPavel Belous for (i = 0; i < txq->nb_tx_desc; i++) { 2152b1472d7SPavel Belous txq->hw_ring[i].dd = 1; 2162b1472d7SPavel Belous tx_entry[i].mbuf = NULL; 2172b1472d7SPavel Belous } 2182b1472d7SPavel Belous 2192b1472d7SPavel Belous txq->tx_tail = 0; 2202b1472d7SPavel Belous txq->tx_head = 0; 2212b1472d7SPavel Belous txq->tx_free = txq->nb_tx_desc - 1; 2222b1472d7SPavel Belous } 2232b1472d7SPavel Belous 2242b1472d7SPavel Belous int 2252b1472d7SPavel Belous atl_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id, 2262b1472d7SPavel Belous uint16_t nb_tx_desc, unsigned int socket_id, 2272b1472d7SPavel Belous const struct rte_eth_txconf *tx_conf) 2282b1472d7SPavel Belous { 2292b1472d7SPavel Belous struct atl_tx_queue *txq; 2302b1472d7SPavel Belous const struct rte_memzone *mz; 2312b1472d7SPavel Belous 2322b1472d7SPavel Belous PMD_INIT_FUNC_TRACE(); 2332b1472d7SPavel Belous 2342b1472d7SPavel Belous /* make sure a valid number of descriptors have been requested */ 2352b1472d7SPavel Belous if (nb_tx_desc < AQ_HW_MIN_TX_RING_SIZE || 2362b1472d7SPavel Belous nb_tx_desc > AQ_HW_MAX_TX_RING_SIZE) { 2372b1472d7SPavel Belous PMD_INIT_LOG(ERR, "Number of Tx descriptors must be " 2382b1472d7SPavel Belous "less than or equal to %d, " 2392b1472d7SPavel Belous "greater than or equal to %d", AQ_HW_MAX_TX_RING_SIZE, 2402b1472d7SPavel Belous AQ_HW_MIN_TX_RING_SIZE); 2412b1472d7SPavel Belous return -EINVAL; 2422b1472d7SPavel Belous } 2432b1472d7SPavel Belous 2442b1472d7SPavel Belous /* 2452b1472d7SPavel Belous * if this queue existed already, free the associated memory. The 2462b1472d7SPavel Belous * queue cannot be reused in case we need to allocate memory on 2472b1472d7SPavel Belous * different socket than was previously used. 2482b1472d7SPavel Belous */ 2492b1472d7SPavel Belous if (dev->data->tx_queues[tx_queue_id] != NULL) { 2507483341aSXueming Li atl_tx_queue_release(dev, tx_queue_id); 2512b1472d7SPavel Belous dev->data->tx_queues[tx_queue_id] = NULL; 2522b1472d7SPavel Belous } 2532b1472d7SPavel Belous 2542b1472d7SPavel Belous /* allocate memory for the queue structure */ 2552b1472d7SPavel Belous txq = rte_zmalloc_socket("atlantic Tx queue", sizeof(*txq), 2562b1472d7SPavel Belous RTE_CACHE_LINE_SIZE, socket_id); 2572b1472d7SPavel Belous if (txq == NULL) { 2582b1472d7SPavel Belous PMD_INIT_LOG(ERR, "Cannot allocate queue structure"); 2592b1472d7SPavel Belous return -ENOMEM; 2602b1472d7SPavel Belous } 2612b1472d7SPavel Belous 2622b1472d7SPavel Belous /* setup queue */ 2632b1472d7SPavel Belous txq->nb_tx_desc = nb_tx_desc; 2642b1472d7SPavel Belous txq->port_id = dev->data->port_id; 2652b1472d7SPavel Belous txq->queue_id = tx_queue_id; 2662b1472d7SPavel Belous txq->tx_free_thresh = tx_conf->tx_free_thresh; 2672b1472d7SPavel Belous 2682b1472d7SPavel Belous 2692b1472d7SPavel Belous /* allocate memory for the software ring */ 2702b1472d7SPavel Belous txq->sw_ring = rte_zmalloc_socket("atlantic sw tx ring", 2712b1472d7SPavel Belous nb_tx_desc * sizeof(struct atl_tx_entry), 2722b1472d7SPavel Belous RTE_CACHE_LINE_SIZE, socket_id); 2732b1472d7SPavel Belous if (txq->sw_ring == NULL) { 2742b1472d7SPavel Belous PMD_INIT_LOG(ERR, 2752b1472d7SPavel Belous "Port %d: Cannot allocate software ring for queue %d", 2762b1472d7SPavel Belous txq->port_id, txq->queue_id); 2772b1472d7SPavel Belous rte_free(txq); 2782b1472d7SPavel Belous return -ENOMEM; 2792b1472d7SPavel Belous } 2802b1472d7SPavel Belous 2812b1472d7SPavel Belous /* 2822b1472d7SPavel Belous * allocate memory for the hardware descriptor ring. A memzone large 2832b1472d7SPavel Belous * enough to hold the maximum ring size is requested to allow for 2842b1472d7SPavel Belous * resizing in later calls to the queue setup function. 2852b1472d7SPavel Belous */ 2862b1472d7SPavel Belous mz = rte_eth_dma_zone_reserve(dev, "tx hw_ring", tx_queue_id, 2872b1472d7SPavel Belous HW_ATL_B0_MAX_TXD * sizeof(struct hw_atl_txd_s), 2882b1472d7SPavel Belous 128, socket_id); 2892b1472d7SPavel Belous if (mz == NULL) { 2902b1472d7SPavel Belous PMD_INIT_LOG(ERR, 2912b1472d7SPavel Belous "Port %d: Cannot allocate hardware ring for queue %d", 2922b1472d7SPavel Belous txq->port_id, txq->queue_id); 2932b1472d7SPavel Belous rte_free(txq->sw_ring); 2942b1472d7SPavel Belous rte_free(txq); 2952b1472d7SPavel Belous return -ENOMEM; 2962b1472d7SPavel Belous } 2972b1472d7SPavel Belous txq->hw_ring = mz->addr; 2982b1472d7SPavel Belous txq->hw_ring_phys_addr = mz->iova; 2992b1472d7SPavel Belous 3002b1472d7SPavel Belous atl_reset_tx_queue(txq); 3012b1472d7SPavel Belous 3022b1472d7SPavel Belous dev->data->tx_queues[tx_queue_id] = txq; 303b78958e2SPavel Belous return 0; 304b78958e2SPavel Belous } 305b78958e2SPavel Belous 306b78958e2SPavel Belous int 3072b1472d7SPavel Belous atl_tx_init(struct rte_eth_dev *eth_dev) 3082b1472d7SPavel Belous { 3092b1472d7SPavel Belous struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 3102b1472d7SPavel Belous struct atl_tx_queue *txq; 3112b1472d7SPavel Belous uint64_t base_addr = 0; 3122b1472d7SPavel Belous int i = 0; 3132b1472d7SPavel Belous int err = 0; 3142b1472d7SPavel Belous 3152b1472d7SPavel Belous PMD_INIT_FUNC_TRACE(); 3162b1472d7SPavel Belous 3172b1472d7SPavel Belous for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { 3182b1472d7SPavel Belous txq = eth_dev->data->tx_queues[i]; 3192b1472d7SPavel Belous base_addr = txq->hw_ring_phys_addr; 3202b1472d7SPavel Belous 3212b1472d7SPavel Belous err = hw_atl_b0_hw_ring_tx_init(hw, base_addr, 3222b1472d7SPavel Belous txq->queue_id, 3232b1472d7SPavel Belous txq->nb_tx_desc, 0, 3242b1472d7SPavel Belous txq->port_id); 3252b1472d7SPavel Belous 3262b1472d7SPavel Belous if (err) { 3272b1472d7SPavel Belous PMD_INIT_LOG(ERR, 3282b1472d7SPavel Belous "Port %d: Cannot init TX queue %d", 3292b1472d7SPavel Belous txq->port_id, txq->queue_id); 3302b1472d7SPavel Belous break; 3312b1472d7SPavel Belous } 3322b1472d7SPavel Belous } 3332b1472d7SPavel Belous 3342b1472d7SPavel Belous return err; 3352b1472d7SPavel Belous } 3362b1472d7SPavel Belous 3372b1472d7SPavel Belous int 3383d38e3dcSIgor Russkikh atl_rx_init(struct rte_eth_dev *eth_dev) 339b78958e2SPavel Belous { 3403d38e3dcSIgor Russkikh struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); 3413af0d308SIgor Russkikh struct aq_rss_parameters *rss_params = &hw->aq_nic_cfg->aq_rss; 3423d38e3dcSIgor Russkikh struct atl_rx_queue *rxq; 3433d38e3dcSIgor Russkikh uint64_t base_addr = 0; 3443d38e3dcSIgor Russkikh int i = 0; 3453d38e3dcSIgor Russkikh int err = 0; 3463d38e3dcSIgor Russkikh 3473d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 3483d38e3dcSIgor Russkikh 3493d38e3dcSIgor Russkikh for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 3503d38e3dcSIgor Russkikh rxq = eth_dev->data->rx_queues[i]; 3513d38e3dcSIgor Russkikh base_addr = rxq->hw_ring_phys_addr; 3523d38e3dcSIgor Russkikh 3533d38e3dcSIgor Russkikh /* Take requested pool mbuf size and adapt 3543d38e3dcSIgor Russkikh * descriptor buffer to best fit 3553d38e3dcSIgor Russkikh */ 3563d38e3dcSIgor Russkikh int buff_size = rte_pktmbuf_data_room_size(rxq->mb_pool) - 3573d38e3dcSIgor Russkikh RTE_PKTMBUF_HEADROOM; 3583d38e3dcSIgor Russkikh 3593d38e3dcSIgor Russkikh buff_size = RTE_ALIGN_FLOOR(buff_size, 1024); 3603d38e3dcSIgor Russkikh if (buff_size > HW_ATL_B0_RXD_BUF_SIZE_MAX) { 3613d38e3dcSIgor Russkikh PMD_INIT_LOG(WARNING, 362*f665790aSDavid Marchand "Port %d queue %d: mem pool buff size is too big", 3633d38e3dcSIgor Russkikh rxq->port_id, rxq->queue_id); 3643d38e3dcSIgor Russkikh buff_size = HW_ATL_B0_RXD_BUF_SIZE_MAX; 3653d38e3dcSIgor Russkikh } 3663d38e3dcSIgor Russkikh if (buff_size < 1024) { 3673d38e3dcSIgor Russkikh PMD_INIT_LOG(ERR, 368*f665790aSDavid Marchand "Port %d queue %d: mem pool buff size is too small", 3693d38e3dcSIgor Russkikh rxq->port_id, rxq->queue_id); 3703d38e3dcSIgor Russkikh return -EINVAL; 3713d38e3dcSIgor Russkikh } 3723d38e3dcSIgor Russkikh rxq->buff_size = buff_size; 3733d38e3dcSIgor Russkikh 3743d38e3dcSIgor Russkikh err = hw_atl_b0_hw_ring_rx_init(hw, base_addr, rxq->queue_id, 3753d38e3dcSIgor Russkikh rxq->nb_rx_desc, buff_size, 0, 3763d38e3dcSIgor Russkikh rxq->port_id); 3773d38e3dcSIgor Russkikh 3783d38e3dcSIgor Russkikh if (err) { 3793d38e3dcSIgor Russkikh PMD_INIT_LOG(ERR, "Port %d: Cannot init RX queue %d", 3803d38e3dcSIgor Russkikh rxq->port_id, rxq->queue_id); 3813d38e3dcSIgor Russkikh break; 3823d38e3dcSIgor Russkikh } 3833d38e3dcSIgor Russkikh } 3843d38e3dcSIgor Russkikh 3853af0d308SIgor Russkikh for (i = rss_params->indirection_table_size; i--;) 3863af0d308SIgor Russkikh rss_params->indirection_table[i] = i & 3873af0d308SIgor Russkikh (eth_dev->data->nb_rx_queues - 1); 3883af0d308SIgor Russkikh hw_atl_b0_hw_rss_set(hw, rss_params); 3893d38e3dcSIgor Russkikh return err; 3903d38e3dcSIgor Russkikh } 3913d38e3dcSIgor Russkikh 3923d38e3dcSIgor Russkikh static int 3933d38e3dcSIgor Russkikh atl_alloc_rx_queue_mbufs(struct atl_rx_queue *rxq) 3943d38e3dcSIgor Russkikh { 3953d38e3dcSIgor Russkikh struct atl_rx_entry *rx_entry = rxq->sw_ring; 3963d38e3dcSIgor Russkikh struct hw_atl_rxd_s *rxd; 3973d38e3dcSIgor Russkikh uint64_t dma_addr = 0; 3983d38e3dcSIgor Russkikh uint32_t i = 0; 3993d38e3dcSIgor Russkikh 4003d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 4013d38e3dcSIgor Russkikh 4023d38e3dcSIgor Russkikh /* fill Rx ring */ 4033d38e3dcSIgor Russkikh for (i = 0; i < rxq->nb_rx_desc; i++) { 4043d38e3dcSIgor Russkikh struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 4053d38e3dcSIgor Russkikh 4063d38e3dcSIgor Russkikh if (mbuf == NULL) { 4073d38e3dcSIgor Russkikh PMD_INIT_LOG(ERR, 4083d38e3dcSIgor Russkikh "Port %d: mbuf alloc failed for rx queue %d", 4093d38e3dcSIgor Russkikh rxq->port_id, rxq->queue_id); 4103d38e3dcSIgor Russkikh return -ENOMEM; 4113d38e3dcSIgor Russkikh } 4123d38e3dcSIgor Russkikh 4133d38e3dcSIgor Russkikh mbuf->data_off = RTE_PKTMBUF_HEADROOM; 4143d38e3dcSIgor Russkikh mbuf->port = rxq->port_id; 4153d38e3dcSIgor Russkikh 4163d38e3dcSIgor Russkikh dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(mbuf)); 4173d38e3dcSIgor Russkikh rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[i]; 4183d38e3dcSIgor Russkikh rxd->buf_addr = dma_addr; 4193d38e3dcSIgor Russkikh rxd->hdr_addr = 0; 4203d38e3dcSIgor Russkikh rx_entry[i].mbuf = mbuf; 4213d38e3dcSIgor Russkikh } 4223d38e3dcSIgor Russkikh 423b78958e2SPavel Belous return 0; 424b78958e2SPavel Belous } 425b78958e2SPavel Belous 4263d38e3dcSIgor Russkikh static void 4273d38e3dcSIgor Russkikh atl_rx_queue_release_mbufs(struct atl_rx_queue *rxq) 4283d38e3dcSIgor Russkikh { 4293d38e3dcSIgor Russkikh int i; 4303d38e3dcSIgor Russkikh 4313d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 4323d38e3dcSIgor Russkikh 4333d38e3dcSIgor Russkikh if (rxq->sw_ring != NULL) { 4343d38e3dcSIgor Russkikh for (i = 0; i < rxq->nb_rx_desc; i++) { 4353d38e3dcSIgor Russkikh if (rxq->sw_ring[i].mbuf != NULL) { 4363d38e3dcSIgor Russkikh rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); 4373d38e3dcSIgor Russkikh rxq->sw_ring[i].mbuf = NULL; 4383d38e3dcSIgor Russkikh } 4393d38e3dcSIgor Russkikh } 4403d38e3dcSIgor Russkikh } 4413d38e3dcSIgor Russkikh } 4423d38e3dcSIgor Russkikh 4433d38e3dcSIgor Russkikh int 4443d38e3dcSIgor Russkikh atl_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id) 4453d38e3dcSIgor Russkikh { 4463d38e3dcSIgor Russkikh struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4473d38e3dcSIgor Russkikh struct atl_rx_queue *rxq = NULL; 4483d38e3dcSIgor Russkikh 4493d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 4503d38e3dcSIgor Russkikh 4513d38e3dcSIgor Russkikh if (rx_queue_id < dev->data->nb_rx_queues) { 4523d38e3dcSIgor Russkikh rxq = dev->data->rx_queues[rx_queue_id]; 4533d38e3dcSIgor Russkikh 4543d38e3dcSIgor Russkikh if (atl_alloc_rx_queue_mbufs(rxq) != 0) { 4553d38e3dcSIgor Russkikh PMD_INIT_LOG(ERR, 4563d38e3dcSIgor Russkikh "Port %d: Allocate mbufs for queue %d failed", 4573d38e3dcSIgor Russkikh rxq->port_id, rxq->queue_id); 4583d38e3dcSIgor Russkikh return -1; 4593d38e3dcSIgor Russkikh } 4603d38e3dcSIgor Russkikh 4613d38e3dcSIgor Russkikh hw_atl_b0_hw_ring_rx_start(hw, rx_queue_id); 4623d38e3dcSIgor Russkikh 4633d38e3dcSIgor Russkikh rte_wmb(); 4643d38e3dcSIgor Russkikh hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, rxq->nb_rx_desc - 1, 4653d38e3dcSIgor Russkikh rx_queue_id); 4663d38e3dcSIgor Russkikh dev->data->rx_queue_state[rx_queue_id] = 4673d38e3dcSIgor Russkikh RTE_ETH_QUEUE_STATE_STARTED; 4683d38e3dcSIgor Russkikh } else { 4693d38e3dcSIgor Russkikh return -1; 4703d38e3dcSIgor Russkikh } 4713d38e3dcSIgor Russkikh 4723d38e3dcSIgor Russkikh return 0; 4733d38e3dcSIgor Russkikh } 4743d38e3dcSIgor Russkikh 4753d38e3dcSIgor Russkikh int 4763d38e3dcSIgor Russkikh atl_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 4773d38e3dcSIgor Russkikh { 4783d38e3dcSIgor Russkikh struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 4793d38e3dcSIgor Russkikh struct atl_rx_queue *rxq = NULL; 4803d38e3dcSIgor Russkikh 4813d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 4823d38e3dcSIgor Russkikh 4833d38e3dcSIgor Russkikh if (rx_queue_id < dev->data->nb_rx_queues) { 4843d38e3dcSIgor Russkikh rxq = dev->data->rx_queues[rx_queue_id]; 4853d38e3dcSIgor Russkikh 4863d38e3dcSIgor Russkikh hw_atl_b0_hw_ring_rx_stop(hw, rx_queue_id); 4873d38e3dcSIgor Russkikh 4883d38e3dcSIgor Russkikh atl_rx_queue_release_mbufs(rxq); 4893d38e3dcSIgor Russkikh atl_reset_rx_queue(rxq); 4903d38e3dcSIgor Russkikh 4913d38e3dcSIgor Russkikh dev->data->rx_queue_state[rx_queue_id] = 4923d38e3dcSIgor Russkikh RTE_ETH_QUEUE_STATE_STOPPED; 4933d38e3dcSIgor Russkikh } else { 4943d38e3dcSIgor Russkikh return -1; 4953d38e3dcSIgor Russkikh } 4963d38e3dcSIgor Russkikh 4973d38e3dcSIgor Russkikh return 0; 4983d38e3dcSIgor Russkikh } 4993d38e3dcSIgor Russkikh 5003d38e3dcSIgor Russkikh void 5017483341aSXueming Li atl_rx_queue_release(struct rte_eth_dev *dev, uint16_t rx_queue_id) 5023d38e3dcSIgor Russkikh { 5037483341aSXueming Li struct atl_rx_queue *rxq = dev->data->rx_queues[rx_queue_id]; 5047483341aSXueming Li 5053d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 5063d38e3dcSIgor Russkikh 5077483341aSXueming Li if (rxq != NULL) { 5083d38e3dcSIgor Russkikh atl_rx_queue_release_mbufs(rxq); 5093d38e3dcSIgor Russkikh rte_free(rxq->sw_ring); 5103d38e3dcSIgor Russkikh rte_free(rxq); 5113d38e3dcSIgor Russkikh } 5123d38e3dcSIgor Russkikh } 5133d38e3dcSIgor Russkikh 5142b1472d7SPavel Belous static void 5152b1472d7SPavel Belous atl_tx_queue_release_mbufs(struct atl_tx_queue *txq) 516b78958e2SPavel Belous { 5172b1472d7SPavel Belous int i; 5182b1472d7SPavel Belous 5192b1472d7SPavel Belous PMD_INIT_FUNC_TRACE(); 5202b1472d7SPavel Belous 5212b1472d7SPavel Belous if (txq->sw_ring != NULL) { 5222b1472d7SPavel Belous for (i = 0; i < txq->nb_tx_desc; i++) { 5232b1472d7SPavel Belous if (txq->sw_ring[i].mbuf != NULL) { 5242b1472d7SPavel Belous rte_pktmbuf_free_seg(txq->sw_ring[i].mbuf); 5252b1472d7SPavel Belous txq->sw_ring[i].mbuf = NULL; 5262b1472d7SPavel Belous } 5272b1472d7SPavel Belous } 5282b1472d7SPavel Belous } 5292b1472d7SPavel Belous } 5302b1472d7SPavel Belous 5312b1472d7SPavel Belous int 5322b1472d7SPavel Belous atl_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id) 5332b1472d7SPavel Belous { 5342b1472d7SPavel Belous struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5352b1472d7SPavel Belous 5362b1472d7SPavel Belous PMD_INIT_FUNC_TRACE(); 5372b1472d7SPavel Belous 5382b1472d7SPavel Belous if (tx_queue_id < dev->data->nb_tx_queues) { 5392b1472d7SPavel Belous hw_atl_b0_hw_ring_tx_start(hw, tx_queue_id); 5402b1472d7SPavel Belous 5412b1472d7SPavel Belous rte_wmb(); 5422b1472d7SPavel Belous hw_atl_b0_hw_tx_ring_tail_update(hw, 0, tx_queue_id); 5432b1472d7SPavel Belous dev->data->tx_queue_state[tx_queue_id] = 5442b1472d7SPavel Belous RTE_ETH_QUEUE_STATE_STARTED; 5452b1472d7SPavel Belous } else { 5462b1472d7SPavel Belous return -1; 5472b1472d7SPavel Belous } 5482b1472d7SPavel Belous 549b78958e2SPavel Belous return 0; 550b78958e2SPavel Belous } 551b78958e2SPavel Belous 5522b1472d7SPavel Belous int 5532b1472d7SPavel Belous atl_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 5542b1472d7SPavel Belous { 5552b1472d7SPavel Belous struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 5562b1472d7SPavel Belous struct atl_tx_queue *txq; 5572b1472d7SPavel Belous 5582b1472d7SPavel Belous PMD_INIT_FUNC_TRACE(); 5592b1472d7SPavel Belous 5602b1472d7SPavel Belous txq = dev->data->tx_queues[tx_queue_id]; 5612b1472d7SPavel Belous 5622b1472d7SPavel Belous hw_atl_b0_hw_ring_tx_stop(hw, tx_queue_id); 5632b1472d7SPavel Belous 5642b1472d7SPavel Belous atl_tx_queue_release_mbufs(txq); 5652b1472d7SPavel Belous atl_reset_tx_queue(txq); 5662b1472d7SPavel Belous dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 5672b1472d7SPavel Belous 5682b1472d7SPavel Belous return 0; 5692b1472d7SPavel Belous } 5702b1472d7SPavel Belous 5712b1472d7SPavel Belous void 5727483341aSXueming Li atl_tx_queue_release(struct rte_eth_dev *dev, uint16_t tx_queue_id) 5732b1472d7SPavel Belous { 5747483341aSXueming Li struct atl_tx_queue *txq = dev->data->tx_queues[tx_queue_id]; 5757483341aSXueming Li 5762b1472d7SPavel Belous PMD_INIT_FUNC_TRACE(); 5772b1472d7SPavel Belous 5787483341aSXueming Li if (txq != NULL) { 5792b1472d7SPavel Belous atl_tx_queue_release_mbufs(txq); 5802b1472d7SPavel Belous rte_free(txq->sw_ring); 5812b1472d7SPavel Belous rte_free(txq); 5822b1472d7SPavel Belous } 5832b1472d7SPavel Belous } 5842b1472d7SPavel Belous 5853d38e3dcSIgor Russkikh void 5863d38e3dcSIgor Russkikh atl_free_queues(struct rte_eth_dev *dev) 587b78958e2SPavel Belous { 5883d38e3dcSIgor Russkikh unsigned int i; 5893d38e3dcSIgor Russkikh 5903d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 5913d38e3dcSIgor Russkikh 5923d38e3dcSIgor Russkikh for (i = 0; i < dev->data->nb_rx_queues; i++) { 5937483341aSXueming Li atl_rx_queue_release(dev, i); 5943d38e3dcSIgor Russkikh dev->data->rx_queues[i] = 0; 5953d38e3dcSIgor Russkikh } 5963d38e3dcSIgor Russkikh dev->data->nb_rx_queues = 0; 5972b1472d7SPavel Belous 5982b1472d7SPavel Belous for (i = 0; i < dev->data->nb_tx_queues; i++) { 5997483341aSXueming Li atl_tx_queue_release(dev, i); 6002b1472d7SPavel Belous dev->data->tx_queues[i] = 0; 6012b1472d7SPavel Belous } 6022b1472d7SPavel Belous dev->data->nb_tx_queues = 0; 6033d38e3dcSIgor Russkikh } 6043d38e3dcSIgor Russkikh 6053d38e3dcSIgor Russkikh int 6063d38e3dcSIgor Russkikh atl_start_queues(struct rte_eth_dev *dev) 6073d38e3dcSIgor Russkikh { 6083d38e3dcSIgor Russkikh int i; 6093d38e3dcSIgor Russkikh 6103d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 6113d38e3dcSIgor Russkikh 6122b1472d7SPavel Belous for (i = 0; i < dev->data->nb_tx_queues; i++) { 6132b1472d7SPavel Belous if (atl_tx_queue_start(dev, i) != 0) { 6142b1472d7SPavel Belous PMD_DRV_LOG(ERR, 6152b1472d7SPavel Belous "Port %d: Start Tx queue %d failed", 6162b1472d7SPavel Belous dev->data->port_id, i); 6172b1472d7SPavel Belous return -1; 6182b1472d7SPavel Belous } 6192b1472d7SPavel Belous } 6202b1472d7SPavel Belous 6213d38e3dcSIgor Russkikh for (i = 0; i < dev->data->nb_rx_queues; i++) { 6223d38e3dcSIgor Russkikh if (atl_rx_queue_start(dev, i) != 0) { 6233d38e3dcSIgor Russkikh PMD_DRV_LOG(ERR, 6243d38e3dcSIgor Russkikh "Port %d: Start Rx queue %d failed", 6253d38e3dcSIgor Russkikh dev->data->port_id, i); 6263d38e3dcSIgor Russkikh return -1; 6273d38e3dcSIgor Russkikh } 6283d38e3dcSIgor Russkikh } 6293d38e3dcSIgor Russkikh 630b78958e2SPavel Belous return 0; 631b78958e2SPavel Belous } 632b78958e2SPavel Belous 6333d38e3dcSIgor Russkikh int 6343d38e3dcSIgor Russkikh atl_stop_queues(struct rte_eth_dev *dev) 6353d38e3dcSIgor Russkikh { 6363d38e3dcSIgor Russkikh int i; 6373d38e3dcSIgor Russkikh 6383d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 6393d38e3dcSIgor Russkikh 6402b1472d7SPavel Belous for (i = 0; i < dev->data->nb_tx_queues; i++) { 6412b1472d7SPavel Belous if (atl_tx_queue_stop(dev, i) != 0) { 6422b1472d7SPavel Belous PMD_DRV_LOG(ERR, 6432b1472d7SPavel Belous "Port %d: Stop Tx queue %d failed", 6442b1472d7SPavel Belous dev->data->port_id, i); 6452b1472d7SPavel Belous return -1; 6462b1472d7SPavel Belous } 6472b1472d7SPavel Belous } 6482b1472d7SPavel Belous 6493d38e3dcSIgor Russkikh for (i = 0; i < dev->data->nb_rx_queues; i++) { 6503d38e3dcSIgor Russkikh if (atl_rx_queue_stop(dev, i) != 0) { 6513d38e3dcSIgor Russkikh PMD_DRV_LOG(ERR, 6523d38e3dcSIgor Russkikh "Port %d: Stop Rx queue %d failed", 6533d38e3dcSIgor Russkikh dev->data->port_id, i); 6543d38e3dcSIgor Russkikh return -1; 6553d38e3dcSIgor Russkikh } 6563d38e3dcSIgor Russkikh } 6573d38e3dcSIgor Russkikh 6583d38e3dcSIgor Russkikh return 0; 6593d38e3dcSIgor Russkikh } 6603d38e3dcSIgor Russkikh 661391de329SPavel Belous void 662391de329SPavel Belous atl_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 663391de329SPavel Belous struct rte_eth_rxq_info *qinfo) 664391de329SPavel Belous { 665391de329SPavel Belous struct atl_rx_queue *rxq; 666391de329SPavel Belous 667391de329SPavel Belous PMD_INIT_FUNC_TRACE(); 668391de329SPavel Belous 669391de329SPavel Belous rxq = dev->data->rx_queues[queue_id]; 670391de329SPavel Belous 671391de329SPavel Belous qinfo->mp = rxq->mb_pool; 672391de329SPavel Belous qinfo->scattered_rx = dev->data->scattered_rx; 673391de329SPavel Belous qinfo->nb_desc = rxq->nb_rx_desc; 674391de329SPavel Belous } 675391de329SPavel Belous 676391de329SPavel Belous void 677391de329SPavel Belous atl_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 678391de329SPavel Belous struct rte_eth_txq_info *qinfo) 679391de329SPavel Belous { 680391de329SPavel Belous struct atl_tx_queue *txq; 681391de329SPavel Belous 682391de329SPavel Belous PMD_INIT_FUNC_TRACE(); 683391de329SPavel Belous 684391de329SPavel Belous txq = dev->data->tx_queues[queue_id]; 685391de329SPavel Belous 686391de329SPavel Belous qinfo->nb_desc = txq->nb_tx_desc; 687391de329SPavel Belous } 688391de329SPavel Belous 689391de329SPavel Belous /* Return Rx queue avail count */ 690391de329SPavel Belous 691391de329SPavel Belous uint32_t 6928d7d4fcdSKonstantin Ananyev atl_rx_queue_count(void *rx_queue) 693391de329SPavel Belous { 694391de329SPavel Belous struct atl_rx_queue *rxq; 695391de329SPavel Belous 696391de329SPavel Belous PMD_INIT_FUNC_TRACE(); 697391de329SPavel Belous 6988d7d4fcdSKonstantin Ananyev rxq = rx_queue; 699391de329SPavel Belous 700391de329SPavel Belous if (rxq == NULL) 701391de329SPavel Belous return 0; 702391de329SPavel Belous 703391de329SPavel Belous return rxq->nb_rx_desc - rxq->nb_rx_hold; 704391de329SPavel Belous } 705391de329SPavel Belous 706391de329SPavel Belous int 707391de329SPavel Belous atl_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 708391de329SPavel Belous { 709391de329SPavel Belous struct atl_rx_queue *rxq = rx_queue; 710391de329SPavel Belous struct hw_atl_rxd_wb_s *rxd; 711391de329SPavel Belous uint32_t idx; 712391de329SPavel Belous 713391de329SPavel Belous PMD_INIT_FUNC_TRACE(); 714391de329SPavel Belous 715391de329SPavel Belous if (unlikely(offset >= rxq->nb_rx_desc)) 716391de329SPavel Belous return -EINVAL; 717391de329SPavel Belous 718391de329SPavel Belous if (offset >= rxq->nb_rx_desc - rxq->nb_rx_hold) 719391de329SPavel Belous return RTE_ETH_RX_DESC_UNAVAIL; 720391de329SPavel Belous 721391de329SPavel Belous idx = rxq->rx_tail + offset; 722391de329SPavel Belous 723391de329SPavel Belous if (idx >= rxq->nb_rx_desc) 724391de329SPavel Belous idx -= rxq->nb_rx_desc; 725391de329SPavel Belous 726391de329SPavel Belous rxd = (struct hw_atl_rxd_wb_s *)&rxq->hw_ring[idx]; 727391de329SPavel Belous 728391de329SPavel Belous if (rxd->dd) 729391de329SPavel Belous return RTE_ETH_RX_DESC_DONE; 730391de329SPavel Belous 731391de329SPavel Belous return RTE_ETH_RX_DESC_AVAIL; 732391de329SPavel Belous } 733391de329SPavel Belous 734391de329SPavel Belous int 735391de329SPavel Belous atl_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 736391de329SPavel Belous { 737391de329SPavel Belous struct atl_tx_queue *txq = tx_queue; 738391de329SPavel Belous struct hw_atl_txd_s *txd; 739391de329SPavel Belous uint32_t idx; 740391de329SPavel Belous 741391de329SPavel Belous PMD_INIT_FUNC_TRACE(); 742391de329SPavel Belous 743391de329SPavel Belous if (unlikely(offset >= txq->nb_tx_desc)) 744391de329SPavel Belous return -EINVAL; 745391de329SPavel Belous 746391de329SPavel Belous idx = txq->tx_tail + offset; 747391de329SPavel Belous 748391de329SPavel Belous if (idx >= txq->nb_tx_desc) 749391de329SPavel Belous idx -= txq->nb_tx_desc; 750391de329SPavel Belous 751391de329SPavel Belous txd = &txq->hw_ring[idx]; 752391de329SPavel Belous 753391de329SPavel Belous if (txd->dd) 754391de329SPavel Belous return RTE_ETH_TX_DESC_DONE; 755391de329SPavel Belous 756391de329SPavel Belous return RTE_ETH_TX_DESC_FULL; 757391de329SPavel Belous } 758391de329SPavel Belous 7597943ba05SPavel Belous static int 7607943ba05SPavel Belous atl_rx_enable_intr(struct rte_eth_dev *dev, uint16_t queue_id, bool enable) 7617943ba05SPavel Belous { 7627943ba05SPavel Belous struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 7637943ba05SPavel Belous struct atl_rx_queue *rxq; 7647943ba05SPavel Belous 7657943ba05SPavel Belous PMD_INIT_FUNC_TRACE(); 7667943ba05SPavel Belous 7677943ba05SPavel Belous if (queue_id >= dev->data->nb_rx_queues) { 7687943ba05SPavel Belous PMD_DRV_LOG(ERR, "Invalid RX queue id=%d", queue_id); 7697943ba05SPavel Belous return -EINVAL; 7707943ba05SPavel Belous } 7717943ba05SPavel Belous 7727943ba05SPavel Belous rxq = dev->data->rx_queues[queue_id]; 7737943ba05SPavel Belous 7747943ba05SPavel Belous if (rxq == NULL) 7757943ba05SPavel Belous return 0; 7767943ba05SPavel Belous 7777943ba05SPavel Belous /* Mapping interrupt vector */ 7787943ba05SPavel Belous hw_atl_itr_irq_map_en_rx_set(hw, enable, queue_id); 7797943ba05SPavel Belous 7807943ba05SPavel Belous return 0; 7817943ba05SPavel Belous } 7827943ba05SPavel Belous 7837943ba05SPavel Belous int 7847943ba05SPavel Belous atl_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev, uint16_t queue_id) 7857943ba05SPavel Belous { 7867943ba05SPavel Belous return atl_rx_enable_intr(eth_dev, queue_id, true); 7877943ba05SPavel Belous } 7887943ba05SPavel Belous 7897943ba05SPavel Belous int 7907943ba05SPavel Belous atl_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev, uint16_t queue_id) 7917943ba05SPavel Belous { 7927943ba05SPavel Belous return atl_rx_enable_intr(eth_dev, queue_id, false); 7937943ba05SPavel Belous } 7947943ba05SPavel Belous 7952b1472d7SPavel Belous uint16_t 7962b1472d7SPavel Belous atl_prep_pkts(__rte_unused void *tx_queue, struct rte_mbuf **tx_pkts, 7972b1472d7SPavel Belous uint16_t nb_pkts) 7982b1472d7SPavel Belous { 7992b1472d7SPavel Belous int i, ret; 8002b1472d7SPavel Belous uint64_t ol_flags; 8012b1472d7SPavel Belous struct rte_mbuf *m; 8022b1472d7SPavel Belous 8032b1472d7SPavel Belous PMD_INIT_FUNC_TRACE(); 8042b1472d7SPavel Belous 8052b1472d7SPavel Belous for (i = 0; i < nb_pkts; i++) { 8062b1472d7SPavel Belous m = tx_pkts[i]; 8072b1472d7SPavel Belous ol_flags = m->ol_flags; 8082b1472d7SPavel Belous 8092b1472d7SPavel Belous if (m->nb_segs > AQ_HW_MAX_SEGS_SIZE) { 810327d8206SIgor Russkikh rte_errno = EINVAL; 8112b1472d7SPavel Belous return i; 8122b1472d7SPavel Belous } 8132b1472d7SPavel Belous 8142b1472d7SPavel Belous if (ol_flags & ATL_TX_OFFLOAD_NOTSUP_MASK) { 815327d8206SIgor Russkikh rte_errno = ENOTSUP; 8162b1472d7SPavel Belous return i; 8172b1472d7SPavel Belous } 8182b1472d7SPavel Belous 8192b1472d7SPavel Belous #ifdef RTE_LIBRTE_ETHDEV_DEBUG 8202b1472d7SPavel Belous ret = rte_validate_tx_offload(m); 8212b1472d7SPavel Belous if (ret != 0) { 822328121fdSAndrew Rybchenko rte_errno = -ret; 8232b1472d7SPavel Belous return i; 8242b1472d7SPavel Belous } 8252b1472d7SPavel Belous #endif 8262b1472d7SPavel Belous ret = rte_net_intel_cksum_prepare(m); 8272b1472d7SPavel Belous if (ret != 0) { 828328121fdSAndrew Rybchenko rte_errno = -ret; 8292b1472d7SPavel Belous return i; 8302b1472d7SPavel Belous } 8312b1472d7SPavel Belous } 8322b1472d7SPavel Belous 8332b1472d7SPavel Belous return i; 8342b1472d7SPavel Belous } 8352b1472d7SPavel Belous 8363d38e3dcSIgor Russkikh static uint64_t 8373d38e3dcSIgor Russkikh atl_desc_to_offload_flags(struct atl_rx_queue *rxq, 8383d38e3dcSIgor Russkikh struct hw_atl_rxd_wb_s *rxd_wb) 8393d38e3dcSIgor Russkikh { 8403d38e3dcSIgor Russkikh uint64_t mbuf_flags = 0; 8413d38e3dcSIgor Russkikh 8423d38e3dcSIgor Russkikh PMD_INIT_FUNC_TRACE(); 8433d38e3dcSIgor Russkikh 8443d38e3dcSIgor Russkikh /* IPv4 ? */ 8453d38e3dcSIgor Russkikh if (rxq->l3_csum_enabled && ((rxd_wb->pkt_type & 0x3) == 0)) { 8463d38e3dcSIgor Russkikh /* IPv4 csum error ? */ 8473d38e3dcSIgor Russkikh if (rxd_wb->rx_stat & BIT(1)) 848daa02b5cSOlivier Matz mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 8493d38e3dcSIgor Russkikh else 850daa02b5cSOlivier Matz mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 8513d38e3dcSIgor Russkikh } else { 852daa02b5cSOlivier Matz mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN; 8533d38e3dcSIgor Russkikh } 8543d38e3dcSIgor Russkikh 8553d38e3dcSIgor Russkikh /* CSUM calculated ? */ 8563d38e3dcSIgor Russkikh if (rxq->l4_csum_enabled && (rxd_wb->rx_stat & BIT(3))) { 8573d38e3dcSIgor Russkikh if (rxd_wb->rx_stat & BIT(2)) 858daa02b5cSOlivier Matz mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 8593d38e3dcSIgor Russkikh else 860daa02b5cSOlivier Matz mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 8613d38e3dcSIgor Russkikh } else { 862daa02b5cSOlivier Matz mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN; 8633d38e3dcSIgor Russkikh } 8643d38e3dcSIgor Russkikh 8653d38e3dcSIgor Russkikh return mbuf_flags; 8663d38e3dcSIgor Russkikh } 8673d38e3dcSIgor Russkikh 8683d38e3dcSIgor Russkikh static uint32_t 8693d38e3dcSIgor Russkikh atl_desc_to_pkt_type(struct hw_atl_rxd_wb_s *rxd_wb) 8703d38e3dcSIgor Russkikh { 8713d38e3dcSIgor Russkikh uint32_t type = RTE_PTYPE_UNKNOWN; 8723d38e3dcSIgor Russkikh uint16_t l2_l3_type = rxd_wb->pkt_type & 0x3; 8733d38e3dcSIgor Russkikh uint16_t l4_type = (rxd_wb->pkt_type & 0x1C) >> 2; 8743d38e3dcSIgor Russkikh 8753d38e3dcSIgor Russkikh switch (l2_l3_type) { 8763d38e3dcSIgor Russkikh case 0: 8773d38e3dcSIgor Russkikh type = RTE_PTYPE_L3_IPV4; 8783d38e3dcSIgor Russkikh break; 8793d38e3dcSIgor Russkikh case 1: 8803d38e3dcSIgor Russkikh type = RTE_PTYPE_L3_IPV6; 8813d38e3dcSIgor Russkikh break; 8823d38e3dcSIgor Russkikh case 2: 8833d38e3dcSIgor Russkikh type = RTE_PTYPE_L2_ETHER; 8843d38e3dcSIgor Russkikh break; 8853d38e3dcSIgor Russkikh case 3: 8863d38e3dcSIgor Russkikh type = RTE_PTYPE_L2_ETHER_ARP; 8873d38e3dcSIgor Russkikh break; 8883d38e3dcSIgor Russkikh } 8893d38e3dcSIgor Russkikh 8903d38e3dcSIgor Russkikh switch (l4_type) { 8913d38e3dcSIgor Russkikh case 0: 8923d38e3dcSIgor Russkikh type |= RTE_PTYPE_L4_TCP; 8933d38e3dcSIgor Russkikh break; 8943d38e3dcSIgor Russkikh case 1: 8953d38e3dcSIgor Russkikh type |= RTE_PTYPE_L4_UDP; 8963d38e3dcSIgor Russkikh break; 8973d38e3dcSIgor Russkikh case 2: 8983d38e3dcSIgor Russkikh type |= RTE_PTYPE_L4_SCTP; 8993d38e3dcSIgor Russkikh break; 9003d38e3dcSIgor Russkikh case 3: 9013d38e3dcSIgor Russkikh type |= RTE_PTYPE_L4_ICMP; 9023d38e3dcSIgor Russkikh break; 9033d38e3dcSIgor Russkikh } 9043d38e3dcSIgor Russkikh 9053d38e3dcSIgor Russkikh if (rxd_wb->pkt_type & BIT(5)) 9063d38e3dcSIgor Russkikh type |= RTE_PTYPE_L2_ETHER_VLAN; 9073d38e3dcSIgor Russkikh 9083d38e3dcSIgor Russkikh return type; 9093d38e3dcSIgor Russkikh } 9103d38e3dcSIgor Russkikh 9113d38e3dcSIgor Russkikh uint16_t 9123d38e3dcSIgor Russkikh atl_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) 9133d38e3dcSIgor Russkikh { 9143d38e3dcSIgor Russkikh struct atl_rx_queue *rxq = (struct atl_rx_queue *)rx_queue; 9153d38e3dcSIgor Russkikh struct rte_eth_dev *dev = &rte_eth_devices[rxq->port_id]; 9163d38e3dcSIgor Russkikh struct atl_adapter *adapter = 9173d38e3dcSIgor Russkikh ATL_DEV_TO_ADAPTER(&rte_eth_devices[rxq->port_id]); 9183d38e3dcSIgor Russkikh struct aq_hw_s *hw = ATL_DEV_PRIVATE_TO_HW(adapter); 919f7c2c2c8SPavel Belous struct aq_hw_cfg_s *cfg = 920f7c2c2c8SPavel Belous ATL_DEV_PRIVATE_TO_CFG(dev->data->dev_private); 9213d38e3dcSIgor Russkikh struct atl_rx_entry *sw_ring = rxq->sw_ring; 9223d38e3dcSIgor Russkikh 9233d38e3dcSIgor Russkikh struct rte_mbuf *new_mbuf; 9243d38e3dcSIgor Russkikh struct rte_mbuf *rx_mbuf, *rx_mbuf_prev, *rx_mbuf_first; 9253d38e3dcSIgor Russkikh struct atl_rx_entry *rx_entry; 9263d38e3dcSIgor Russkikh uint16_t nb_rx = 0; 9273d38e3dcSIgor Russkikh uint16_t nb_hold = 0; 9283d38e3dcSIgor Russkikh struct hw_atl_rxd_wb_s rxd_wb; 9293d38e3dcSIgor Russkikh struct hw_atl_rxd_s *rxd = NULL; 9303d38e3dcSIgor Russkikh uint16_t tail = rxq->rx_tail; 9313d38e3dcSIgor Russkikh uint64_t dma_addr; 9323d38e3dcSIgor Russkikh uint16_t pkt_len = 0; 9333d38e3dcSIgor Russkikh 9343d38e3dcSIgor Russkikh while (nb_rx < nb_pkts) { 9353d38e3dcSIgor Russkikh uint16_t eop_tail = tail; 9363d38e3dcSIgor Russkikh 9373d38e3dcSIgor Russkikh rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail]; 9383d38e3dcSIgor Russkikh rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd; 9393d38e3dcSIgor Russkikh 9403d38e3dcSIgor Russkikh if (!rxd_wb.dd) { /* RxD is not done */ 9413d38e3dcSIgor Russkikh break; 9423d38e3dcSIgor Russkikh } 9433d38e3dcSIgor Russkikh 9449e6d1ab8SIgor Russkikh PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u tail=%u " 9453d38e3dcSIgor Russkikh "eop=0x%x pkt_len=%u hash=0x%x hash_type=0x%x", 9463d38e3dcSIgor Russkikh (unsigned int)rxq->port_id, 9473d38e3dcSIgor Russkikh (unsigned int)rxq->queue_id, 9483d38e3dcSIgor Russkikh (unsigned int)tail, (unsigned int)rxd_wb.eop, 9493d38e3dcSIgor Russkikh (unsigned int)rte_le_to_cpu_16(rxd_wb.pkt_len), 9503d38e3dcSIgor Russkikh rxd_wb.rss_hash, rxd_wb.rss_type); 9513d38e3dcSIgor Russkikh 9523d38e3dcSIgor Russkikh /* RxD is not done */ 9533d38e3dcSIgor Russkikh if (!rxd_wb.eop) { 9543d38e3dcSIgor Russkikh while (true) { 9553d38e3dcSIgor Russkikh struct hw_atl_rxd_wb_s *eop_rxwbd; 9563d38e3dcSIgor Russkikh 9573d38e3dcSIgor Russkikh eop_tail = (eop_tail + 1) % rxq->nb_rx_desc; 9583d38e3dcSIgor Russkikh eop_rxwbd = (struct hw_atl_rxd_wb_s *) 9593d38e3dcSIgor Russkikh &rxq->hw_ring[eop_tail]; 9603d38e3dcSIgor Russkikh if (!eop_rxwbd->dd) { 9613d38e3dcSIgor Russkikh /* no EOP received yet */ 9623d38e3dcSIgor Russkikh eop_tail = tail; 9633d38e3dcSIgor Russkikh break; 9643d38e3dcSIgor Russkikh } 9653d38e3dcSIgor Russkikh if (eop_rxwbd->dd && eop_rxwbd->eop) 9663d38e3dcSIgor Russkikh break; 9673d38e3dcSIgor Russkikh } 9683d38e3dcSIgor Russkikh /* No EOP in ring */ 9693d38e3dcSIgor Russkikh if (eop_tail == tail) 9703d38e3dcSIgor Russkikh break; 9713d38e3dcSIgor Russkikh } 9723d38e3dcSIgor Russkikh rx_mbuf_prev = NULL; 9733d38e3dcSIgor Russkikh rx_mbuf_first = NULL; 9743d38e3dcSIgor Russkikh 9753d38e3dcSIgor Russkikh /* Run through packet segments */ 9763d38e3dcSIgor Russkikh while (true) { 9773d38e3dcSIgor Russkikh new_mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); 9783d38e3dcSIgor Russkikh if (new_mbuf == NULL) { 9799e6d1ab8SIgor Russkikh PMD_RX_LOG(DEBUG, 9803d38e3dcSIgor Russkikh "RX mbuf alloc failed port_id=%u " 9813d38e3dcSIgor Russkikh "queue_id=%u", (unsigned int)rxq->port_id, 9823d38e3dcSIgor Russkikh (unsigned int)rxq->queue_id); 9833d38e3dcSIgor Russkikh dev->data->rx_mbuf_alloc_failed++; 984fbe059e8SPavel Belous adapter->sw_stats.rx_nombuf++; 9853d38e3dcSIgor Russkikh goto err_stop; 9863d38e3dcSIgor Russkikh } 9873d38e3dcSIgor Russkikh 9883d38e3dcSIgor Russkikh nb_hold++; 9893d38e3dcSIgor Russkikh rx_entry = &sw_ring[tail]; 9903d38e3dcSIgor Russkikh 9913d38e3dcSIgor Russkikh rx_mbuf = rx_entry->mbuf; 9923d38e3dcSIgor Russkikh rx_entry->mbuf = new_mbuf; 9933d38e3dcSIgor Russkikh dma_addr = rte_cpu_to_le_64( 9943d38e3dcSIgor Russkikh rte_mbuf_data_iova_default(new_mbuf)); 9953d38e3dcSIgor Russkikh 9963d38e3dcSIgor Russkikh /* setup RX descriptor */ 9973d38e3dcSIgor Russkikh rxd->hdr_addr = 0; 9983d38e3dcSIgor Russkikh rxd->buf_addr = dma_addr; 9993d38e3dcSIgor Russkikh 10003d38e3dcSIgor Russkikh /* 10013d38e3dcSIgor Russkikh * Initialize the returned mbuf. 10023d38e3dcSIgor Russkikh * 1) setup generic mbuf fields: 10033d38e3dcSIgor Russkikh * - number of segments, 10043d38e3dcSIgor Russkikh * - next segment, 10053d38e3dcSIgor Russkikh * - packet length, 10063d38e3dcSIgor Russkikh * - RX port identifier. 10073d38e3dcSIgor Russkikh * 2) integrate hardware offload data, if any: 10083d38e3dcSIgor Russkikh * < - RSS flag & hash, 10093d38e3dcSIgor Russkikh * - IP checksum flag, 10103d38e3dcSIgor Russkikh * - VLAN TCI, if any, 10113d38e3dcSIgor Russkikh * - error flags. 10123d38e3dcSIgor Russkikh */ 10133d38e3dcSIgor Russkikh pkt_len = (uint16_t)rte_le_to_cpu_16(rxd_wb.pkt_len); 10143d38e3dcSIgor Russkikh rx_mbuf->data_off = RTE_PKTMBUF_HEADROOM; 10153d38e3dcSIgor Russkikh rte_prefetch1((char *)rx_mbuf->buf_addr + 10163d38e3dcSIgor Russkikh rx_mbuf->data_off); 10173d38e3dcSIgor Russkikh rx_mbuf->nb_segs = 0; 10183d38e3dcSIgor Russkikh rx_mbuf->next = NULL; 10193d38e3dcSIgor Russkikh rx_mbuf->pkt_len = pkt_len; 10203d38e3dcSIgor Russkikh rx_mbuf->data_len = pkt_len; 10213d38e3dcSIgor Russkikh if (rxd_wb.eop) { 10223d38e3dcSIgor Russkikh u16 remainder_len = pkt_len % rxq->buff_size; 10233d38e3dcSIgor Russkikh if (!remainder_len) 10243d38e3dcSIgor Russkikh remainder_len = rxq->buff_size; 10253d38e3dcSIgor Russkikh rx_mbuf->data_len = remainder_len; 10263d38e3dcSIgor Russkikh } else { 10273d38e3dcSIgor Russkikh rx_mbuf->data_len = pkt_len > rxq->buff_size ? 10283d38e3dcSIgor Russkikh rxq->buff_size : pkt_len; 10293d38e3dcSIgor Russkikh } 10303d38e3dcSIgor Russkikh rx_mbuf->port = rxq->port_id; 10313d38e3dcSIgor Russkikh 10323d38e3dcSIgor Russkikh rx_mbuf->hash.rss = rxd_wb.rss_hash; 10333d38e3dcSIgor Russkikh 10343d38e3dcSIgor Russkikh rx_mbuf->vlan_tci = rxd_wb.vlan; 10353d38e3dcSIgor Russkikh 10363d38e3dcSIgor Russkikh rx_mbuf->ol_flags = 10373d38e3dcSIgor Russkikh atl_desc_to_offload_flags(rxq, &rxd_wb); 1038f7c2c2c8SPavel Belous 10393d38e3dcSIgor Russkikh rx_mbuf->packet_type = atl_desc_to_pkt_type(&rxd_wb); 10403d38e3dcSIgor Russkikh 1041f7c2c2c8SPavel Belous if (rx_mbuf->packet_type & RTE_PTYPE_L2_ETHER_VLAN) { 1042daa02b5cSOlivier Matz rx_mbuf->ol_flags |= RTE_MBUF_F_RX_VLAN; 1043f7c2c2c8SPavel Belous rx_mbuf->vlan_tci = rxd_wb.vlan; 1044f7c2c2c8SPavel Belous 1045f7c2c2c8SPavel Belous if (cfg->vlan_strip) 1046f7c2c2c8SPavel Belous rx_mbuf->ol_flags |= 1047daa02b5cSOlivier Matz RTE_MBUF_F_RX_VLAN_STRIPPED; 1048f7c2c2c8SPavel Belous } 1049f7c2c2c8SPavel Belous 10503d38e3dcSIgor Russkikh if (!rx_mbuf_first) 10513d38e3dcSIgor Russkikh rx_mbuf_first = rx_mbuf; 10523d38e3dcSIgor Russkikh rx_mbuf_first->nb_segs++; 10533d38e3dcSIgor Russkikh 10543d38e3dcSIgor Russkikh if (rx_mbuf_prev) 10553d38e3dcSIgor Russkikh rx_mbuf_prev->next = rx_mbuf; 10563d38e3dcSIgor Russkikh rx_mbuf_prev = rx_mbuf; 10573d38e3dcSIgor Russkikh 10583d38e3dcSIgor Russkikh tail = (tail + 1) % rxq->nb_rx_desc; 10593d38e3dcSIgor Russkikh /* Prefetch next mbufs */ 10603d38e3dcSIgor Russkikh rte_prefetch0(sw_ring[tail].mbuf); 10613d38e3dcSIgor Russkikh if ((tail & 0x3) == 0) { 10623d38e3dcSIgor Russkikh rte_prefetch0(&sw_ring[tail]); 10633d38e3dcSIgor Russkikh rte_prefetch0(&sw_ring[tail]); 10643d38e3dcSIgor Russkikh } 10653d38e3dcSIgor Russkikh 10663d38e3dcSIgor Russkikh /* filled mbuf_first */ 10673d38e3dcSIgor Russkikh if (rxd_wb.eop) 10683d38e3dcSIgor Russkikh break; 10693d38e3dcSIgor Russkikh rxd = (struct hw_atl_rxd_s *)&rxq->hw_ring[tail]; 10703d38e3dcSIgor Russkikh rxd_wb = *(struct hw_atl_rxd_wb_s *)rxd; 10713d38e3dcSIgor Russkikh }; 10723d38e3dcSIgor Russkikh 10733d38e3dcSIgor Russkikh /* 10743d38e3dcSIgor Russkikh * Store the mbuf address into the next entry of the array 10753d38e3dcSIgor Russkikh * of returned packets. 10763d38e3dcSIgor Russkikh */ 10773d38e3dcSIgor Russkikh rx_pkts[nb_rx++] = rx_mbuf_first; 1078fbe059e8SPavel Belous adapter->sw_stats.q_ipackets[rxq->queue_id]++; 1079fbe059e8SPavel Belous adapter->sw_stats.q_ibytes[rxq->queue_id] += 1080fbe059e8SPavel Belous rx_mbuf_first->pkt_len; 10813d38e3dcSIgor Russkikh 10829e6d1ab8SIgor Russkikh PMD_RX_LOG(DEBUG, "add mbuf segs=%d pkt_len=%d", 10833d38e3dcSIgor Russkikh rx_mbuf_first->nb_segs, 10843d38e3dcSIgor Russkikh rx_mbuf_first->pkt_len); 10853d38e3dcSIgor Russkikh } 10863d38e3dcSIgor Russkikh 10873d38e3dcSIgor Russkikh err_stop: 10883d38e3dcSIgor Russkikh 10893d38e3dcSIgor Russkikh rxq->rx_tail = tail; 10903d38e3dcSIgor Russkikh 10913d38e3dcSIgor Russkikh /* 10923d38e3dcSIgor Russkikh * If the number of free RX descriptors is greater than the RX free 10933d38e3dcSIgor Russkikh * threshold of the queue, advance the Receive Descriptor Tail (RDT) 10943d38e3dcSIgor Russkikh * register. 10953d38e3dcSIgor Russkikh * Update the RDT with the value of the last processed RX descriptor 10963d38e3dcSIgor Russkikh * minus 1, to guarantee that the RDT register is never equal to the 10977be78d02SJosh Soref * RDH register, which creates a "full" ring situation from the 10983d38e3dcSIgor Russkikh * hardware point of view... 10993d38e3dcSIgor Russkikh */ 11003d38e3dcSIgor Russkikh nb_hold = (uint16_t)(nb_hold + rxq->nb_rx_hold); 11013d38e3dcSIgor Russkikh if (nb_hold > rxq->rx_free_thresh) { 11029e6d1ab8SIgor Russkikh PMD_RX_LOG(DEBUG, "port_id=%u queue_id=%u rx_tail=%u " 11033d38e3dcSIgor Russkikh "nb_hold=%u nb_rx=%u", 11043d38e3dcSIgor Russkikh (unsigned int)rxq->port_id, (unsigned int)rxq->queue_id, 11053d38e3dcSIgor Russkikh (unsigned int)tail, (unsigned int)nb_hold, 11063d38e3dcSIgor Russkikh (unsigned int)nb_rx); 11073d38e3dcSIgor Russkikh tail = (uint16_t)((tail == 0) ? 11083d38e3dcSIgor Russkikh (rxq->nb_rx_desc - 1) : (tail - 1)); 11093d38e3dcSIgor Russkikh 11103d38e3dcSIgor Russkikh hw_atl_reg_rx_dma_desc_tail_ptr_set(hw, tail, rxq->queue_id); 11113d38e3dcSIgor Russkikh 11123d38e3dcSIgor Russkikh nb_hold = 0; 11133d38e3dcSIgor Russkikh } 11143d38e3dcSIgor Russkikh 11153d38e3dcSIgor Russkikh rxq->nb_rx_hold = nb_hold; 11163d38e3dcSIgor Russkikh 11173d38e3dcSIgor Russkikh return nb_rx; 11183d38e3dcSIgor Russkikh } 11193d38e3dcSIgor Russkikh 11202b1472d7SPavel Belous static void 11212b1472d7SPavel Belous atl_xmit_cleanup(struct atl_tx_queue *txq) 11222b1472d7SPavel Belous { 11232b1472d7SPavel Belous struct atl_tx_entry *sw_ring; 11242b1472d7SPavel Belous struct hw_atl_txd_s *txd; 11252b1472d7SPavel Belous int to_clean = 0; 11262b1472d7SPavel Belous 11272b1472d7SPavel Belous if (txq != NULL) { 11282b1472d7SPavel Belous sw_ring = txq->sw_ring; 11292b1472d7SPavel Belous int head = txq->tx_head; 1130f25fa03aSDavid Marchand int cnt = head; 11312b1472d7SPavel Belous 1132f25fa03aSDavid Marchand while (true) { 11332b1472d7SPavel Belous txd = &txq->hw_ring[cnt]; 11342b1472d7SPavel Belous 11352b1472d7SPavel Belous if (txd->dd) 11362b1472d7SPavel Belous to_clean++; 11372b1472d7SPavel Belous 11382b1472d7SPavel Belous cnt = (cnt + 1) % txq->nb_tx_desc; 11392b1472d7SPavel Belous if (cnt == txq->tx_tail) 11402b1472d7SPavel Belous break; 11412b1472d7SPavel Belous } 11422b1472d7SPavel Belous 11432b1472d7SPavel Belous if (to_clean == 0) 11442b1472d7SPavel Belous return; 11452b1472d7SPavel Belous 11462b1472d7SPavel Belous while (to_clean) { 11472b1472d7SPavel Belous txd = &txq->hw_ring[head]; 11482b1472d7SPavel Belous 11492b1472d7SPavel Belous struct atl_tx_entry *rx_entry = &sw_ring[head]; 11502b1472d7SPavel Belous 11512b1472d7SPavel Belous if (rx_entry->mbuf) { 11522b1472d7SPavel Belous rte_pktmbuf_free_seg(rx_entry->mbuf); 11532b1472d7SPavel Belous rx_entry->mbuf = NULL; 11542b1472d7SPavel Belous } 11552b1472d7SPavel Belous 11562b1472d7SPavel Belous if (txd->dd) 11572b1472d7SPavel Belous to_clean--; 11582b1472d7SPavel Belous 11592b1472d7SPavel Belous txd->buf_addr = 0; 11602b1472d7SPavel Belous txd->flags = 0; 11612b1472d7SPavel Belous 11622b1472d7SPavel Belous head = (head + 1) % txq->nb_tx_desc; 11632b1472d7SPavel Belous txq->tx_free++; 11642b1472d7SPavel Belous } 11652b1472d7SPavel Belous 11662b1472d7SPavel Belous txq->tx_head = head; 11672b1472d7SPavel Belous } 11682b1472d7SPavel Belous } 11692b1472d7SPavel Belous 11702b1472d7SPavel Belous static int 11712b1472d7SPavel Belous atl_tso_setup(struct rte_mbuf *tx_pkt, union hw_atl_txc_s *txc) 11722b1472d7SPavel Belous { 11732b1472d7SPavel Belous uint32_t tx_cmd = 0; 11742b1472d7SPavel Belous uint64_t ol_flags = tx_pkt->ol_flags; 11752b1472d7SPavel Belous 1176daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_TCP_SEG) { 11772b1472d7SPavel Belous tx_cmd |= tx_desc_cmd_lso | tx_desc_cmd_l4cs; 11782b1472d7SPavel Belous 11792b1472d7SPavel Belous txc->cmd = 0x4; 11802b1472d7SPavel Belous 1181daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_IPV6) 11822b1472d7SPavel Belous txc->cmd |= 0x2; 11832b1472d7SPavel Belous 11842b1472d7SPavel Belous txc->l2_len = tx_pkt->l2_len; 11852b1472d7SPavel Belous txc->l3_len = tx_pkt->l3_len; 11862b1472d7SPavel Belous txc->l4_len = tx_pkt->l4_len; 11872b1472d7SPavel Belous 11882b1472d7SPavel Belous txc->mss_len = tx_pkt->tso_segsz; 11892b1472d7SPavel Belous } 11902b1472d7SPavel Belous 1191daa02b5cSOlivier Matz if (ol_flags & RTE_MBUF_F_TX_VLAN) { 11922b1472d7SPavel Belous tx_cmd |= tx_desc_cmd_vlan; 11932b1472d7SPavel Belous txc->vlan_tag = tx_pkt->vlan_tci; 11942b1472d7SPavel Belous } 11952b1472d7SPavel Belous 11962b1472d7SPavel Belous if (tx_cmd) { 11972b1472d7SPavel Belous txc->type = tx_desc_type_ctx; 11982b1472d7SPavel Belous txc->idx = 0; 11992b1472d7SPavel Belous } 12002b1472d7SPavel Belous 12012b1472d7SPavel Belous return tx_cmd; 12022b1472d7SPavel Belous } 12032b1472d7SPavel Belous 12042b1472d7SPavel Belous static inline void 12052b1472d7SPavel Belous atl_setup_csum_offload(struct rte_mbuf *mbuf, struct hw_atl_txd_s *txd, 12062b1472d7SPavel Belous uint32_t tx_cmd) 12072b1472d7SPavel Belous { 12082b1472d7SPavel Belous txd->cmd |= tx_desc_cmd_fcs; 1209daa02b5cSOlivier Matz txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) ? tx_desc_cmd_ipv4 : 0; 12102b1472d7SPavel Belous /* L4 csum requested */ 1211daa02b5cSOlivier Matz txd->cmd |= (mbuf->ol_flags & RTE_MBUF_F_TX_L4_MASK) ? tx_desc_cmd_l4cs : 0; 12122b1472d7SPavel Belous txd->cmd |= tx_cmd; 12132b1472d7SPavel Belous } 12142b1472d7SPavel Belous 12152b1472d7SPavel Belous static inline void 12162b1472d7SPavel Belous atl_xmit_pkt(struct aq_hw_s *hw, struct atl_tx_queue *txq, 12172b1472d7SPavel Belous struct rte_mbuf *tx_pkt) 12182b1472d7SPavel Belous { 1219fbe059e8SPavel Belous struct atl_adapter *adapter = 1220fbe059e8SPavel Belous ATL_DEV_TO_ADAPTER(&rte_eth_devices[txq->port_id]); 12212b1472d7SPavel Belous uint32_t pay_len = 0; 12222b1472d7SPavel Belous int tail = 0; 12232b1472d7SPavel Belous struct atl_tx_entry *tx_entry; 12242b1472d7SPavel Belous uint64_t buf_dma_addr; 12252b1472d7SPavel Belous struct rte_mbuf *m_seg; 12262b1472d7SPavel Belous union hw_atl_txc_s *txc = NULL; 12272b1472d7SPavel Belous struct hw_atl_txd_s *txd = NULL; 12282b1472d7SPavel Belous u32 tx_cmd = 0U; 12292b1472d7SPavel Belous int desc_count = 0; 12302b1472d7SPavel Belous 12312b1472d7SPavel Belous tail = txq->tx_tail; 12322b1472d7SPavel Belous 12332b1472d7SPavel Belous txc = (union hw_atl_txc_s *)&txq->hw_ring[tail]; 12342b1472d7SPavel Belous 12352b1472d7SPavel Belous txc->flags1 = 0U; 12362b1472d7SPavel Belous txc->flags2 = 0U; 12372b1472d7SPavel Belous 12382b1472d7SPavel Belous tx_cmd = atl_tso_setup(tx_pkt, txc); 12392b1472d7SPavel Belous 12402b1472d7SPavel Belous if (tx_cmd) { 12412b1472d7SPavel Belous /* We've consumed the first desc, adjust counters */ 12422b1472d7SPavel Belous tail = (tail + 1) % txq->nb_tx_desc; 12432b1472d7SPavel Belous txq->tx_tail = tail; 12442b1472d7SPavel Belous txq->tx_free -= 1; 12452b1472d7SPavel Belous 12462b1472d7SPavel Belous txd = &txq->hw_ring[tail]; 12472b1472d7SPavel Belous txd->flags = 0U; 12482b1472d7SPavel Belous } else { 12492b1472d7SPavel Belous txd = (struct hw_atl_txd_s *)txc; 12502b1472d7SPavel Belous } 12512b1472d7SPavel Belous 12522b1472d7SPavel Belous txd->ct_en = !!tx_cmd; 12532b1472d7SPavel Belous 12542b1472d7SPavel Belous txd->type = tx_desc_type_desc; 12552b1472d7SPavel Belous 12562b1472d7SPavel Belous atl_setup_csum_offload(tx_pkt, txd, tx_cmd); 12572b1472d7SPavel Belous 12582b1472d7SPavel Belous if (tx_cmd) 12592b1472d7SPavel Belous txd->ct_idx = 0; 12602b1472d7SPavel Belous 12612b1472d7SPavel Belous pay_len = tx_pkt->pkt_len; 12622b1472d7SPavel Belous 12632b1472d7SPavel Belous txd->pay_len = pay_len; 12642b1472d7SPavel Belous 12652b1472d7SPavel Belous for (m_seg = tx_pkt; m_seg; m_seg = m_seg->next) { 12662b1472d7SPavel Belous if (desc_count > 0) { 12672b1472d7SPavel Belous txd = &txq->hw_ring[tail]; 12682b1472d7SPavel Belous txd->flags = 0U; 12692b1472d7SPavel Belous } 12702b1472d7SPavel Belous 12712b1472d7SPavel Belous buf_dma_addr = rte_mbuf_data_iova(m_seg); 12722b1472d7SPavel Belous txd->buf_addr = rte_cpu_to_le_64(buf_dma_addr); 12732b1472d7SPavel Belous 12742b1472d7SPavel Belous txd->type = tx_desc_type_desc; 12752b1472d7SPavel Belous txd->len = m_seg->data_len; 12762b1472d7SPavel Belous txd->pay_len = pay_len; 12772b1472d7SPavel Belous 12782b1472d7SPavel Belous /* Store mbuf for freeing later */ 12792b1472d7SPavel Belous tx_entry = &txq->sw_ring[tail]; 12802b1472d7SPavel Belous 12812b1472d7SPavel Belous if (tx_entry->mbuf) 12822b1472d7SPavel Belous rte_pktmbuf_free_seg(tx_entry->mbuf); 12832b1472d7SPavel Belous tx_entry->mbuf = m_seg; 12842b1472d7SPavel Belous 12852b1472d7SPavel Belous tail = (tail + 1) % txq->nb_tx_desc; 12862b1472d7SPavel Belous 12872b1472d7SPavel Belous desc_count++; 12882b1472d7SPavel Belous } 12892b1472d7SPavel Belous 12902b1472d7SPavel Belous // Last descriptor requires EOP and WB 12912b1472d7SPavel Belous txd->eop = 1U; 12922b1472d7SPavel Belous txd->cmd |= tx_desc_cmd_wb; 12932b1472d7SPavel Belous 12942b1472d7SPavel Belous hw_atl_b0_hw_tx_ring_tail_update(hw, tail, txq->queue_id); 12952b1472d7SPavel Belous 12962b1472d7SPavel Belous txq->tx_tail = tail; 12972b1472d7SPavel Belous 12982b1472d7SPavel Belous txq->tx_free -= desc_count; 1299fbe059e8SPavel Belous 1300fbe059e8SPavel Belous adapter->sw_stats.q_opackets[txq->queue_id]++; 1301fbe059e8SPavel Belous adapter->sw_stats.q_obytes[txq->queue_id] += pay_len; 13022b1472d7SPavel Belous } 13033d38e3dcSIgor Russkikh 1304b78958e2SPavel Belous uint16_t 13052b1472d7SPavel Belous atl_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 1306b78958e2SPavel Belous { 13072b1472d7SPavel Belous struct rte_eth_dev *dev = NULL; 13082b1472d7SPavel Belous struct aq_hw_s *hw = NULL; 13092b1472d7SPavel Belous struct atl_tx_queue *txq = tx_queue; 13102b1472d7SPavel Belous struct rte_mbuf *tx_pkt; 13112b1472d7SPavel Belous uint16_t nb_tx; 13122b1472d7SPavel Belous 13132b1472d7SPavel Belous dev = &rte_eth_devices[txq->port_id]; 13142b1472d7SPavel Belous hw = ATL_DEV_PRIVATE_TO_HW(dev->data->dev_private); 13152b1472d7SPavel Belous 13162b1472d7SPavel Belous PMD_TX_LOG(DEBUG, 13172b1472d7SPavel Belous "port %d txq %d pkts: %d tx_free=%d tx_tail=%d tx_head=%d", 13182b1472d7SPavel Belous txq->port_id, txq->queue_id, nb_pkts, txq->tx_free, 13192b1472d7SPavel Belous txq->tx_tail, txq->tx_head); 13202b1472d7SPavel Belous 13212b1472d7SPavel Belous for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { 13222b1472d7SPavel Belous tx_pkt = *tx_pkts++; 13232b1472d7SPavel Belous 13242b1472d7SPavel Belous /* Clean Tx queue if needed */ 13252b1472d7SPavel Belous if (txq->tx_free < txq->tx_free_thresh) 13262b1472d7SPavel Belous atl_xmit_cleanup(txq); 13272b1472d7SPavel Belous 13282b1472d7SPavel Belous /* Check if we have enough free descriptors */ 13292b1472d7SPavel Belous if (txq->tx_free < tx_pkt->nb_segs) 13302b1472d7SPavel Belous break; 13312b1472d7SPavel Belous 13322b1472d7SPavel Belous /* check mbuf is valid */ 13332b1472d7SPavel Belous if ((tx_pkt->nb_segs == 0) || 13342b1472d7SPavel Belous ((tx_pkt->nb_segs > 1) && (tx_pkt->next == NULL))) 13352b1472d7SPavel Belous break; 13362b1472d7SPavel Belous 13372b1472d7SPavel Belous /* Send the packet */ 13382b1472d7SPavel Belous atl_xmit_pkt(hw, txq, tx_pkt); 13392b1472d7SPavel Belous } 13402b1472d7SPavel Belous 13412b1472d7SPavel Belous PMD_TX_LOG(DEBUG, "atl_xmit_pkts %d transmitted", nb_tx); 13422b1472d7SPavel Belous 13432b1472d7SPavel Belous return nb_tx; 1344b78958e2SPavel Belous } 1345