176668754SAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <stdio.h> 6a27d9013SAlfredo Cardigliano #include <string.h> 7a27d9013SAlfredo Cardigliano #include <errno.h> 8a27d9013SAlfredo Cardigliano #include <stdint.h> 9a27d9013SAlfredo Cardigliano 10a27d9013SAlfredo Cardigliano #include <rte_common.h> 11e86a6fccSAndrew Boyer #include <rte_byteorder.h> 12e86a6fccSAndrew Boyer #include <rte_errno.h> 13a27d9013SAlfredo Cardigliano #include <rte_log.h> 14a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 15a27d9013SAlfredo Cardigliano #include <rte_ether.h> 16a27d9013SAlfredo Cardigliano #include <rte_ip.h> 17e86a6fccSAndrew Boyer #include <rte_tcp.h> 18e86a6fccSAndrew Boyer #include <rte_ethdev.h> 19e86a6fccSAndrew Boyer #include <ethdev_driver.h> 20a27d9013SAlfredo Cardigliano 21e86a6fccSAndrew Boyer #include "ionic.h" 22e86a6fccSAndrew Boyer #include "ionic_dev.h" 23a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 24e86a6fccSAndrew Boyer #include "ionic_ethdev.h" 25a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 26e86a6fccSAndrew Boyer #include "ionic_logs.h" 27a27d9013SAlfredo Cardigliano 28e7222f94SAndrew Boyer static void 29e7222f94SAndrew Boyer ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 30e7222f94SAndrew Boyer { 31e7222f94SAndrew Boyer uint32_t i; 32e7222f94SAndrew Boyer 33e7222f94SAndrew Boyer for (i = idx; i < cnt; i++) 34e7222f94SAndrew Boyer if (array[i]) 35e7222f94SAndrew Boyer rte_pktmbuf_free_seg(array[i]); 36e7222f94SAndrew Boyer 37e7222f94SAndrew Boyer memset(array, 0, sizeof(void *) * cnt); 38e7222f94SAndrew Boyer } 39e7222f94SAndrew Boyer 40e7222f94SAndrew Boyer static void __rte_cold 41e7222f94SAndrew Boyer ionic_tx_empty(struct ionic_tx_qcq *txq) 42e7222f94SAndrew Boyer { 43e7222f94SAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 44e7222f94SAndrew Boyer 45b4beb84aSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 46e7222f94SAndrew Boyer } 47e7222f94SAndrew Boyer 48e7222f94SAndrew Boyer static void __rte_cold 49e7222f94SAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 50e7222f94SAndrew Boyer { 51e7222f94SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 52e7222f94SAndrew Boyer 537b20fc2fSAndrew Boyer /* 547b20fc2fSAndrew Boyer * Walk the full info array so that the clean up includes any 557b20fc2fSAndrew Boyer * fragments that were left dangling for later reuse 567b20fc2fSAndrew Boyer */ 577b20fc2fSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 58218afd82SAndrew Boyer 59218afd82SAndrew Boyer ionic_empty_array((void **)rxq->mbs, 60218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); 61218afd82SAndrew Boyer rxq->mb_idx = 0; 62e7222f94SAndrew Boyer } 63e7222f94SAndrew Boyer 64a27d9013SAlfredo Cardigliano /********************************************************************* 65a27d9013SAlfredo Cardigliano * 66a27d9013SAlfredo Cardigliano * TX functions 67a27d9013SAlfredo Cardigliano * 68a27d9013SAlfredo Cardigliano **********************************************************************/ 69a27d9013SAlfredo Cardigliano 70a27d9013SAlfredo Cardigliano void 71a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 72a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 73a27d9013SAlfredo Cardigliano { 74be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 75be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 76a27d9013SAlfredo Cardigliano 77a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 7868591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 799ac234eeSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 809ac234eeSAndrew Boyer qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 8102eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 82a27d9013SAlfredo Cardigliano } 83a27d9013SAlfredo Cardigliano 84ce6427ddSThomas Monjalon void __rte_cold 857483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 86a27d9013SAlfredo Cardigliano { 877483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 88a27d9013SAlfredo Cardigliano 89a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 90a27d9013SAlfredo Cardigliano 91be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 92a27d9013SAlfredo Cardigliano } 93a27d9013SAlfredo Cardigliano 94ce6427ddSThomas Monjalon int __rte_cold 95*7bb08900SAndrew Boyer ionic_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) 96a27d9013SAlfredo Cardigliano { 97*7bb08900SAndrew Boyer ionic_dev_tx_queue_stop_firsthalf(dev, tx_queue_id); 98*7bb08900SAndrew Boyer ionic_dev_tx_queue_stop_secondhalf(dev, tx_queue_id); 99*7bb08900SAndrew Boyer 100*7bb08900SAndrew Boyer return 0; 101*7bb08900SAndrew Boyer } 102*7bb08900SAndrew Boyer 103*7bb08900SAndrew Boyer void __rte_cold 104*7bb08900SAndrew Boyer ionic_dev_tx_queue_stop_firsthalf(struct rte_eth_dev *dev, 105*7bb08900SAndrew Boyer uint16_t tx_queue_id) 106*7bb08900SAndrew Boyer { 107*7bb08900SAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; 108a27d9013SAlfredo Cardigliano 1094ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 110a27d9013SAlfredo Cardigliano 111*7bb08900SAndrew Boyer dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 112a27d9013SAlfredo Cardigliano 113*7bb08900SAndrew Boyer ionic_lif_txq_deinit_nowait(txq); 114*7bb08900SAndrew Boyer } 1159fdf11c4SAndrew Boyer 116*7bb08900SAndrew Boyer void __rte_cold 117*7bb08900SAndrew Boyer ionic_dev_tx_queue_stop_secondhalf(struct rte_eth_dev *dev, 118*7bb08900SAndrew Boyer uint16_t tx_queue_id) 119*7bb08900SAndrew Boyer { 120*7bb08900SAndrew Boyer struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); 121*7bb08900SAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[tx_queue_id]; 122a27d9013SAlfredo Cardigliano 123*7bb08900SAndrew Boyer ionic_adminq_wait(lif, &txq->admin_ctx); 124a27d9013SAlfredo Cardigliano 125e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 126e7222f94SAndrew Boyer ionic_tx_empty(txq); 127e7222f94SAndrew Boyer 128*7bb08900SAndrew Boyer ionic_lif_txq_stats(txq); 129a27d9013SAlfredo Cardigliano } 130a27d9013SAlfredo Cardigliano 131ce6427ddSThomas Monjalon int __rte_cold 132a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1334ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 134a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 135a27d9013SAlfredo Cardigliano { 136a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 137be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 138a27d9013SAlfredo Cardigliano uint64_t offloads; 139a27d9013SAlfredo Cardigliano int err; 140a27d9013SAlfredo Cardigliano 141a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 142a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 143a27d9013SAlfredo Cardigliano "(max %u queues)", 144a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 145a27d9013SAlfredo Cardigliano return -EINVAL; 146a27d9013SAlfredo Cardigliano } 147a27d9013SAlfredo Cardigliano 148a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 1494ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 1504ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 1514ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 152a27d9013SAlfredo Cardigliano 153a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 154a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 155a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 156a27d9013SAlfredo Cardigliano 15707512941SAndrew Boyer if (tx_conf->tx_free_thresh > nb_desc) { 15807512941SAndrew Boyer IONIC_PRINT(ERR, 15907512941SAndrew Boyer "tx_free_thresh must be less than nb_desc (%u)", 16007512941SAndrew Boyer nb_desc); 16107512941SAndrew Boyer return -EINVAL; 16207512941SAndrew Boyer } 16307512941SAndrew Boyer 164a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 165a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 1667483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 167a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 168a27d9013SAlfredo Cardigliano } 169a27d9013SAlfredo Cardigliano 1709fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1719fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1729fdf11c4SAndrew Boyer 1738ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 174a27d9013SAlfredo Cardigliano if (err) { 175a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 176a27d9013SAlfredo Cardigliano return -EINVAL; 177a27d9013SAlfredo Cardigliano } 178a27d9013SAlfredo Cardigliano 179a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 18002eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 18102eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 182a27d9013SAlfredo Cardigliano 18368591087SAndrew Boyer /* Convert the offload flags into queue flags */ 184295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 18568591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 186295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 18768591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 188295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 18968591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 1909ac234eeSAndrew Boyer if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 1919ac234eeSAndrew Boyer txq->flags |= IONIC_QCQ_F_FAST_FREE; 192a27d9013SAlfredo Cardigliano 19307512941SAndrew Boyer txq->free_thresh = 19407512941SAndrew Boyer tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : 19507512941SAndrew Boyer nb_desc - IONIC_DEF_TXRX_BURST; 19607512941SAndrew Boyer 197a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 198a27d9013SAlfredo Cardigliano 199a27d9013SAlfredo Cardigliano return 0; 200a27d9013SAlfredo Cardigliano } 201a27d9013SAlfredo Cardigliano 202a27d9013SAlfredo Cardigliano /* 203a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 204a27d9013SAlfredo Cardigliano */ 205ce6427ddSThomas Monjalon int __rte_cold 206a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 207a27d9013SAlfredo Cardigliano { 2089fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 209be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 210a27d9013SAlfredo Cardigliano int err; 211a27d9013SAlfredo Cardigliano 2129fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2139fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2149fdf11c4SAndrew Boyer tx_queue_id); 2159fdf11c4SAndrew Boyer return 0; 2169fdf11c4SAndrew Boyer } 2179fdf11c4SAndrew Boyer 218a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 219a27d9013SAlfredo Cardigliano 2204ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 221be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2224ae96cb8SAndrew Boyer 223a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 224a27d9013SAlfredo Cardigliano if (err) 225a27d9013SAlfredo Cardigliano return err; 226a27d9013SAlfredo Cardigliano 2279fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 228a27d9013SAlfredo Cardigliano 229a27d9013SAlfredo Cardigliano return 0; 230a27d9013SAlfredo Cardigliano } 231a27d9013SAlfredo Cardigliano 232a27d9013SAlfredo Cardigliano static void 23364b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 23464b08152SAlfredo Cardigliano { 23564b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 23664b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 23764b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 23864b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 23964b08152SAlfredo Cardigliano 240daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 24164b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 24264b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 24364b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 24464b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 24564b08152SAlfredo Cardigliano } else { 24664b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 24764b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 24864b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 24964b08152SAlfredo Cardigliano } 25064b08152SAlfredo Cardigliano } 25164b08152SAlfredo Cardigliano 25264b08152SAlfredo Cardigliano static void 25364b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 25464b08152SAlfredo Cardigliano { 25564b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 25664b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 25764b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 25864b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 25964b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 26064b08152SAlfredo Cardigliano 261daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 26264b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 26364b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 26464b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 26564b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 26664b08152SAlfredo Cardigliano } else { 26764b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 26864b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 26964b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 27064b08152SAlfredo Cardigliano } 27164b08152SAlfredo Cardigliano } 27264b08152SAlfredo Cardigliano 27364b08152SAlfredo Cardigliano static void 274a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 275a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 276a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 277a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 27864b08152SAlfredo Cardigliano bool encap, 279a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 280a27d9013SAlfredo Cardigliano bool start, bool done) 281a27d9013SAlfredo Cardigliano { 282b4beb84aSAndrew Boyer struct rte_mbuf *txm_seg; 283dd10c5b4SAndrew Boyer void **info; 2844a735599SAndrew Boyer uint64_t cmd; 285a27d9013SAlfredo Cardigliano uint8_t flags = 0; 286b4beb84aSAndrew Boyer int i; 287b4beb84aSAndrew Boyer 288a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 28964b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 290a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 291a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 292a27d9013SAlfredo Cardigliano 2934a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 294a27d9013SAlfredo Cardigliano flags, nsge, addr); 2954a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 2964a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 2974a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 2984a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 2994a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 300a27d9013SAlfredo Cardigliano 301dd10c5b4SAndrew Boyer if (done) { 302dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 303b4beb84aSAndrew Boyer 304b4beb84aSAndrew Boyer /* Walk the mbuf chain to stash pointers in the array */ 305b4beb84aSAndrew Boyer txm_seg = txm; 306b4beb84aSAndrew Boyer for (i = 0; i < txm->nb_segs; i++) { 307b4beb84aSAndrew Boyer info[i] = txm_seg; 308b4beb84aSAndrew Boyer txm_seg = txm_seg->next; 309b4beb84aSAndrew Boyer } 310dd10c5b4SAndrew Boyer } 311dd10c5b4SAndrew Boyer 312dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 313a27d9013SAlfredo Cardigliano } 314a27d9013SAlfredo Cardigliano 315a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 316be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 317a27d9013SAlfredo Cardigliano { 318be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 319a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 32056117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 321a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 32256117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 323a27d9013SAlfredo Cardigliano 324a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 325a27d9013SAlfredo Cardigliano return desc; 326a27d9013SAlfredo Cardigliano } 327a27d9013SAlfredo Cardigliano 328e86a6fccSAndrew Boyer int 32977c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 330a27d9013SAlfredo Cardigliano { 331be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 332be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 333a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 334a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 335a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3367c3a867bSAndrew Boyer rte_iova_t data_iova; 3377c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 338a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 339e86a6fccSAndrew Boyer uint8_t desc_nsge = 0; 340a27d9013SAlfredo Cardigliano uint32_t hdrlen; 341a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 342a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 343a27d9013SAlfredo Cardigliano uint32_t left; 344a27d9013SAlfredo Cardigliano uint32_t seglen; 345a27d9013SAlfredo Cardigliano uint32_t len; 346a27d9013SAlfredo Cardigliano uint32_t offset = 0; 347a27d9013SAlfredo Cardigliano bool start, done; 34864b08152SAlfredo Cardigliano bool encap; 349daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 350e86a6fccSAndrew Boyer bool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG); 351a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 35264b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 353a27d9013SAlfredo Cardigliano 354daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 355daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 356daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 357daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 35864b08152SAlfredo Cardigliano 35964b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 36064b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 36164b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 36264b08152SAlfredo Cardigliano */ 36364b08152SAlfredo Cardigliano 36464b08152SAlfredo Cardigliano if (encap) { 36564b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 36664b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 36764b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 36864b08152SAlfredo Cardigliano } else { 36964b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 37064b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 37164b08152SAlfredo Cardigliano } 372a27d9013SAlfredo Cardigliano 373be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 374e86a6fccSAndrew Boyer txm_seg = txm; 375a27d9013SAlfredo Cardigliano start = true; 376e86a6fccSAndrew Boyer seglen = hdrlen + mss; 377a27d9013SAlfredo Cardigliano 378e86a6fccSAndrew Boyer /* Walk the chain of mbufs */ 379a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 380a27d9013SAlfredo Cardigliano offset = 0; 3817c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 382a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 383a27d9013SAlfredo Cardigliano 384e86a6fccSAndrew Boyer /* Split the mbuf data up into multiple descriptors */ 385a27d9013SAlfredo Cardigliano while (left > 0) { 3867c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 387e86a6fccSAndrew Boyer if (frag_left > 0 && use_sgl) { 388e86a6fccSAndrew Boyer /* Fill previous descriptor's SGE */ 389a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 390a27d9013SAlfredo Cardigliano frag_left -= len; 3917c3a867bSAndrew Boyer elem->addr = next_addr; 3924a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 393a27d9013SAlfredo Cardigliano elem++; 394a27d9013SAlfredo Cardigliano desc_nsge++; 395a27d9013SAlfredo Cardigliano } else { 396e86a6fccSAndrew Boyer /* Fill new descriptor's data field */ 397e86a6fccSAndrew Boyer len = RTE_MIN(seglen, left); 398e86a6fccSAndrew Boyer frag_left = seglen - len; 3997c3a867bSAndrew Boyer desc_addr = next_addr; 400a27d9013SAlfredo Cardigliano desc_len = len; 401a27d9013SAlfredo Cardigliano desc_nsge = 0; 402a27d9013SAlfredo Cardigliano } 403a27d9013SAlfredo Cardigliano left -= len; 404a27d9013SAlfredo Cardigliano offset += len; 405e86a6fccSAndrew Boyer 406e86a6fccSAndrew Boyer /* Pack the next mbuf's data into the descriptor */ 407e86a6fccSAndrew Boyer if (txm_seg->next != NULL && frag_left > 0 && use_sgl) 408e86a6fccSAndrew Boyer break; 4097c3a867bSAndrew Boyer 410a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 411a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 412a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 413a27d9013SAlfredo Cardigliano hdrlen, mss, 41464b08152SAlfredo Cardigliano encap, 415a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 41677c60793SAndrew Boyer start, done); 417be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 418a27d9013SAlfredo Cardigliano start = false; 419e86a6fccSAndrew Boyer seglen = mss; 420a27d9013SAlfredo Cardigliano } 421a27d9013SAlfredo Cardigliano 422a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 423a27d9013SAlfredo Cardigliano } 424a27d9013SAlfredo Cardigliano 425a27d9013SAlfredo Cardigliano stats->tso++; 426a27d9013SAlfredo Cardigliano 427a27d9013SAlfredo Cardigliano return 0; 428a27d9013SAlfredo Cardigliano } 429a27d9013SAlfredo Cardigliano 430a27d9013SAlfredo Cardigliano /********************************************************************* 431a27d9013SAlfredo Cardigliano * 432a27d9013SAlfredo Cardigliano * TX prep functions 433a27d9013SAlfredo Cardigliano * 434a27d9013SAlfredo Cardigliano **********************************************************************/ 435a27d9013SAlfredo Cardigliano 436daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 437daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 438daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 439daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 440daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 441daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 442a27d9013SAlfredo Cardigliano 443a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 444daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 445a27d9013SAlfredo Cardigliano 446a27d9013SAlfredo Cardigliano uint16_t 447e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 448a27d9013SAlfredo Cardigliano { 449e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 450a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 451a27d9013SAlfredo Cardigliano uint64_t offloads; 452a27d9013SAlfredo Cardigliano int i = 0; 453a27d9013SAlfredo Cardigliano 454a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 455a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 456a27d9013SAlfredo Cardigliano 457e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 458a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 459a27d9013SAlfredo Cardigliano break; 460a27d9013SAlfredo Cardigliano } 461a27d9013SAlfredo Cardigliano 462a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 463a27d9013SAlfredo Cardigliano 464a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 465a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 466a27d9013SAlfredo Cardigliano break; 467a27d9013SAlfredo Cardigliano } 468a27d9013SAlfredo Cardigliano } 469a27d9013SAlfredo Cardigliano 470a27d9013SAlfredo Cardigliano return i; 471a27d9013SAlfredo Cardigliano } 472a27d9013SAlfredo Cardigliano 473a27d9013SAlfredo Cardigliano /********************************************************************* 474a27d9013SAlfredo Cardigliano * 475a27d9013SAlfredo Cardigliano * RX functions 476a27d9013SAlfredo Cardigliano * 477a27d9013SAlfredo Cardigliano **********************************************************************/ 478a27d9013SAlfredo Cardigliano 479a27d9013SAlfredo Cardigliano void 480a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 481a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 482a27d9013SAlfredo Cardigliano { 483be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 484be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 485a27d9013SAlfredo Cardigliano 486a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 487a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 488a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 48902eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 49068591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 491a27d9013SAlfredo Cardigliano } 492a27d9013SAlfredo Cardigliano 493ce6427ddSThomas Monjalon void __rte_cold 4947483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 495a27d9013SAlfredo Cardigliano { 4967483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 497be39f75cSAndrew Boyer 498be39f75cSAndrew Boyer if (!rxq) 499be39f75cSAndrew Boyer return; 500a27d9013SAlfredo Cardigliano 501a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 502a27d9013SAlfredo Cardigliano 503be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 504a27d9013SAlfredo Cardigliano } 505a27d9013SAlfredo Cardigliano 506ce6427ddSThomas Monjalon int __rte_cold 507a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 508a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 509a27d9013SAlfredo Cardigliano uint16_t nb_desc, 5104ae96cb8SAndrew Boyer uint32_t socket_id, 511a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 512a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 513a27d9013SAlfredo Cardigliano { 514a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 515be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 516a27d9013SAlfredo Cardigliano uint64_t offloads; 517a27d9013SAlfredo Cardigliano int err; 518a27d9013SAlfredo Cardigliano 519a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 520a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 521a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 522a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 523a27d9013SAlfredo Cardigliano return -EINVAL; 524a27d9013SAlfredo Cardigliano } 525a27d9013SAlfredo Cardigliano 526a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 5274ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 5284ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 5294ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 530a27d9013SAlfredo Cardigliano 53118a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 53218a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 53318a44465SAndrew Boyer 534a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 535a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 536a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 537a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 538a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 5394ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 540a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 541a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 542a27d9013SAlfredo Cardigliano } 543a27d9013SAlfredo Cardigliano 544a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 545a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 5467483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 547a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 548a27d9013SAlfredo Cardigliano } 549a27d9013SAlfredo Cardigliano 5509fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 5519fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 5529fdf11c4SAndrew Boyer 553d5850081SAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 554be39f75cSAndrew Boyer &rxq); 555a27d9013SAlfredo Cardigliano if (err) { 5564ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 557a27d9013SAlfredo Cardigliano return -EINVAL; 558a27d9013SAlfredo Cardigliano } 559a27d9013SAlfredo Cardigliano 560a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 561a5b1ffd8SAndrew Boyer rxq->wdog_ms = IONIC_Q_WDOG_MS; 562a27d9013SAlfredo Cardigliano 563a27d9013SAlfredo Cardigliano /* 564a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 565295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 566a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 567a27d9013SAlfredo Cardigliano * it to the length for all received packets: 568a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 569295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 570a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 571a27d9013SAlfredo Cardigliano */ 572a27d9013SAlfredo Cardigliano 573a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 57402eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 57502eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 576a27d9013SAlfredo Cardigliano 577a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 578a27d9013SAlfredo Cardigliano 579a27d9013SAlfredo Cardigliano return 0; 580a27d9013SAlfredo Cardigliano } 581a27d9013SAlfredo Cardigliano 582bbdf955dSAndrew Boyer #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1) 583e86a6fccSAndrew Boyer const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] 584bbdf955dSAndrew Boyer __rte_cache_aligned = { 585bbdf955dSAndrew Boyer /* IP_BAD set */ 586bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD, 587bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 588bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 589bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 590bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 591bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 592bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 593bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 594bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 595bbdf955dSAndrew Boyer /* IP_OK set */ 596bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD, 597bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 598bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 599bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 600bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 601bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 602bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 603bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 604bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 605bbdf955dSAndrew Boyer /* No IP flag set */ 606bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 607bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 608bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 609bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 610bbdf955dSAndrew Boyer }; 611bbdf955dSAndrew Boyer 61273b1c67eSAndrew Boyer /* RTE_PTYPE_UNKNOWN is 0x0 */ 613e86a6fccSAndrew Boyer const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] 61473b1c67eSAndrew Boyer __rte_cache_aligned = { 61573b1c67eSAndrew Boyer [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN, 61673b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, 61773b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_TCP] = 61873b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 61973b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_UDP] = 62073b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 62173b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, 62273b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_TCP] = 62373b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 62473b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_UDP] = 62573b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 62673b1c67eSAndrew Boyer }; 62773b1c67eSAndrew Boyer 628b5b56afdSAndrew Boyer const uint32_t * 629ba6a168aSSivaramakrishnan Venkat ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused, 630ba6a168aSSivaramakrishnan Venkat size_t *no_of_elements) 631b5b56afdSAndrew Boyer { 632b5b56afdSAndrew Boyer /* See ionic_ptype_table[] */ 633b5b56afdSAndrew Boyer static const uint32_t ptypes[] = { 634b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER, 635b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_TIMESYNC, 636b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_LLDP, 637b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_ARP, 638b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV4, 639b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV6, 640b5b56afdSAndrew Boyer RTE_PTYPE_L4_TCP, 641b5b56afdSAndrew Boyer RTE_PTYPE_L4_UDP, 642b5b56afdSAndrew Boyer }; 643b5b56afdSAndrew Boyer 644ba6a168aSSivaramakrishnan Venkat *no_of_elements = RTE_DIM(ptypes); 645b5b56afdSAndrew Boyer return ptypes; 646b5b56afdSAndrew Boyer } 647b5b56afdSAndrew Boyer 6487b20fc2fSAndrew Boyer /* 6497b2eb674SAndrew Boyer * Perform one-time initialization of descriptor fields 6507b2eb674SAndrew Boyer * which will not change for the life of the queue. 6517b2eb674SAndrew Boyer */ 6527b2eb674SAndrew Boyer static void __rte_cold 6537b2eb674SAndrew Boyer ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 6547b2eb674SAndrew Boyer { 6557b2eb674SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 6567b2eb674SAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 6577b2eb674SAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 6587b2eb674SAndrew Boyer uint32_t i, j; 6597b2eb674SAndrew Boyer uint8_t opcode; 6607b2eb674SAndrew Boyer 6617b2eb674SAndrew Boyer opcode = (q->num_segs > 1) ? 6627b2eb674SAndrew Boyer IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 6637b2eb674SAndrew Boyer 6647b2eb674SAndrew Boyer /* 6657b2eb674SAndrew Boyer * NB: Only the first segment needs to leave headroom (hdr_seg_size). 6667b2eb674SAndrew Boyer * Later segments (seg_size) do not. 6677b2eb674SAndrew Boyer */ 6687b2eb674SAndrew Boyer for (i = 0; i < q->num_descs; i++) { 6697b2eb674SAndrew Boyer desc = &desc_base[i]; 6707b2eb674SAndrew Boyer desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 6717b2eb674SAndrew Boyer desc->opcode = opcode; 6727b2eb674SAndrew Boyer 6737b2eb674SAndrew Boyer sg_desc = &sg_desc_base[i]; 6747b2eb674SAndrew Boyer for (j = 0; j < q->num_segs - 1u; j++) 6757b2eb674SAndrew Boyer sg_desc->elems[j].len = 6767b2eb674SAndrew Boyer rte_cpu_to_le_16(rxq->seg_size); 6777b2eb674SAndrew Boyer } 6787b2eb674SAndrew Boyer } 6797b2eb674SAndrew Boyer 6807b2eb674SAndrew Boyer /* 681a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 682a27d9013SAlfredo Cardigliano */ 683ce6427ddSThomas Monjalon int __rte_cold 684a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 685a27d9013SAlfredo Cardigliano { 6869fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 687be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 688d5850081SAndrew Boyer struct ionic_queue *q; 689a27d9013SAlfredo Cardigliano int err; 690a27d9013SAlfredo Cardigliano 6919fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 6929fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 6939fdf11c4SAndrew Boyer rx_queue_id); 6949fdf11c4SAndrew Boyer return 0; 6959fdf11c4SAndrew Boyer } 6969fdf11c4SAndrew Boyer 697a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 698d5850081SAndrew Boyer q = &rxq->qcq.q; 699a27d9013SAlfredo Cardigliano 700b671e69aSAndrew Boyer rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 701b671e69aSAndrew Boyer 702d5850081SAndrew Boyer /* Recalculate segment count based on MTU */ 703d5850081SAndrew Boyer q->num_segs = 1 + 704d5850081SAndrew Boyer (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 705d5850081SAndrew Boyer 706d5850081SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 707d5850081SAndrew Boyer rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 7084ae96cb8SAndrew Boyer 7097b2eb674SAndrew Boyer ionic_rx_init_descriptors(rxq); 7107b2eb674SAndrew Boyer 711a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 712a27d9013SAlfredo Cardigliano if (err) 713a27d9013SAlfredo Cardigliano return err; 714a27d9013SAlfredo Cardigliano 715e86a6fccSAndrew Boyer /* Allocate buffers for descriptor ring */ 716e86a6fccSAndrew Boyer if (rxq->flags & IONIC_QCQ_F_SG) 717e86a6fccSAndrew Boyer err = ionic_rx_fill_sg(rxq); 718e86a6fccSAndrew Boyer else 719e86a6fccSAndrew Boyer err = ionic_rx_fill(rxq); 720e86a6fccSAndrew Boyer if (err != 0) { 721e86a6fccSAndrew Boyer IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id); 722a27d9013SAlfredo Cardigliano return -1; 723a27d9013SAlfredo Cardigliano } 724a27d9013SAlfredo Cardigliano 7259fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 726a27d9013SAlfredo Cardigliano 727a27d9013SAlfredo Cardigliano return 0; 728a27d9013SAlfredo Cardigliano } 729a27d9013SAlfredo Cardigliano 7307b20fc2fSAndrew Boyer /* 731a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 732a27d9013SAlfredo Cardigliano */ 733ce6427ddSThomas Monjalon int __rte_cold 734*7bb08900SAndrew Boyer ionic_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) 735a27d9013SAlfredo Cardigliano { 736*7bb08900SAndrew Boyer ionic_dev_rx_queue_stop_firsthalf(dev, rx_queue_id); 737*7bb08900SAndrew Boyer ionic_dev_rx_queue_stop_secondhalf(dev, rx_queue_id); 738*7bb08900SAndrew Boyer 739*7bb08900SAndrew Boyer return 0; 740*7bb08900SAndrew Boyer } 741*7bb08900SAndrew Boyer 742*7bb08900SAndrew Boyer void __rte_cold 743*7bb08900SAndrew Boyer ionic_dev_rx_queue_stop_firsthalf(struct rte_eth_dev *dev, 744*7bb08900SAndrew Boyer uint16_t rx_queue_id) 745*7bb08900SAndrew Boyer { 746*7bb08900SAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; 747a27d9013SAlfredo Cardigliano 7484ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 749a27d9013SAlfredo Cardigliano 750*7bb08900SAndrew Boyer dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 751a27d9013SAlfredo Cardigliano 752*7bb08900SAndrew Boyer ionic_lif_rxq_deinit_nowait(rxq); 753*7bb08900SAndrew Boyer } 7549fdf11c4SAndrew Boyer 755*7bb08900SAndrew Boyer void __rte_cold 756*7bb08900SAndrew Boyer ionic_dev_rx_queue_stop_secondhalf(struct rte_eth_dev *dev, 757*7bb08900SAndrew Boyer uint16_t rx_queue_id) 758*7bb08900SAndrew Boyer { 759*7bb08900SAndrew Boyer struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(dev); 760*7bb08900SAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[rx_queue_id]; 761*7bb08900SAndrew Boyer 762*7bb08900SAndrew Boyer ionic_adminq_wait(lif, &rxq->admin_ctx); 763a27d9013SAlfredo Cardigliano 764e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 765e7222f94SAndrew Boyer ionic_rx_empty(rxq); 766e7222f94SAndrew Boyer 767*7bb08900SAndrew Boyer ionic_lif_rxq_stats(rxq); 768a27d9013SAlfredo Cardigliano } 769a27d9013SAlfredo Cardigliano 7700983a74aSAndrew Boyer int 7710983a74aSAndrew Boyer ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 7720983a74aSAndrew Boyer { 7730983a74aSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 7740983a74aSAndrew Boyer struct ionic_qcq *qcq = &rxq->qcq; 775463ad260SNeel Patel volatile struct ionic_rxq_comp *cq_desc; 7760983a74aSAndrew Boyer uint16_t mask, head, tail, pos; 7770983a74aSAndrew Boyer bool done_color; 7780983a74aSAndrew Boyer 7790983a74aSAndrew Boyer mask = qcq->q.size_mask; 7800983a74aSAndrew Boyer 7810983a74aSAndrew Boyer /* offset must be within the size of the ring */ 7820983a74aSAndrew Boyer if (offset > mask) 7830983a74aSAndrew Boyer return -EINVAL; 7840983a74aSAndrew Boyer 7850983a74aSAndrew Boyer head = qcq->q.head_idx; 7860983a74aSAndrew Boyer tail = qcq->q.tail_idx; 7870983a74aSAndrew Boyer 7880983a74aSAndrew Boyer /* offset is beyond what is posted */ 7890983a74aSAndrew Boyer if (offset >= ((head - tail) & mask)) 7900983a74aSAndrew Boyer return RTE_ETH_RX_DESC_UNAVAIL; 7910983a74aSAndrew Boyer 7920983a74aSAndrew Boyer /* interested in this absolute position in the rxq */ 7930983a74aSAndrew Boyer pos = (tail + offset) & mask; 7940983a74aSAndrew Boyer 7950983a74aSAndrew Boyer /* rx cq position == rx q position */ 7960983a74aSAndrew Boyer cq_desc = qcq->cq.base; 7970983a74aSAndrew Boyer cq_desc = &cq_desc[pos]; 7980983a74aSAndrew Boyer 7990983a74aSAndrew Boyer /* expected done color at this position */ 8000983a74aSAndrew Boyer done_color = qcq->cq.done_color != (pos < tail); 8010983a74aSAndrew Boyer 8020983a74aSAndrew Boyer /* has the hw indicated the done color at this position? */ 8030983a74aSAndrew Boyer if (color_match(cq_desc->pkt_type_color, done_color)) 8040983a74aSAndrew Boyer return RTE_ETH_RX_DESC_DONE; 8050983a74aSAndrew Boyer 8060983a74aSAndrew Boyer return RTE_ETH_RX_DESC_AVAIL; 8070983a74aSAndrew Boyer } 80860625147SAndrew Boyer 80960625147SAndrew Boyer int 81060625147SAndrew Boyer ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 81160625147SAndrew Boyer { 81260625147SAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 81360625147SAndrew Boyer struct ionic_qcq *qcq = &txq->qcq; 814463ad260SNeel Patel volatile struct ionic_txq_comp *cq_desc; 81560625147SAndrew Boyer uint16_t mask, head, tail, pos, cq_pos; 81660625147SAndrew Boyer bool done_color; 81760625147SAndrew Boyer 81860625147SAndrew Boyer mask = qcq->q.size_mask; 81960625147SAndrew Boyer 82060625147SAndrew Boyer /* offset must be within the size of the ring */ 82160625147SAndrew Boyer if (offset > mask) 82260625147SAndrew Boyer return -EINVAL; 82360625147SAndrew Boyer 82460625147SAndrew Boyer head = qcq->q.head_idx; 82560625147SAndrew Boyer tail = qcq->q.tail_idx; 82660625147SAndrew Boyer 82760625147SAndrew Boyer /* offset is beyond what is posted */ 82860625147SAndrew Boyer if (offset >= ((head - tail) & mask)) 82960625147SAndrew Boyer return RTE_ETH_TX_DESC_DONE; 83060625147SAndrew Boyer 83160625147SAndrew Boyer /* interested in this absolute position in the txq */ 83260625147SAndrew Boyer pos = (tail + offset) & mask; 83360625147SAndrew Boyer 83460625147SAndrew Boyer /* tx cq position != tx q position, need to walk cq */ 83560625147SAndrew Boyer cq_pos = qcq->cq.tail_idx; 83660625147SAndrew Boyer cq_desc = qcq->cq.base; 83760625147SAndrew Boyer cq_desc = &cq_desc[cq_pos]; 83860625147SAndrew Boyer 83960625147SAndrew Boyer /* how far behind is pos from head? */ 84060625147SAndrew Boyer offset = (head - pos) & mask; 84160625147SAndrew Boyer 84260625147SAndrew Boyer /* walk cq descriptors that match the expected done color */ 84360625147SAndrew Boyer done_color = qcq->cq.done_color; 84460625147SAndrew Boyer while (color_match(cq_desc->color, done_color)) { 84560625147SAndrew Boyer /* is comp index no further behind than pos? */ 84660625147SAndrew Boyer tail = rte_cpu_to_le_16(cq_desc->comp_index); 84760625147SAndrew Boyer if (((head - tail) & mask) <= offset) 84860625147SAndrew Boyer return RTE_ETH_TX_DESC_DONE; 84960625147SAndrew Boyer 85060625147SAndrew Boyer cq_pos = (cq_pos + 1) & mask; 85160625147SAndrew Boyer cq_desc = qcq->cq.base; 85260625147SAndrew Boyer cq_desc = &cq_desc[cq_pos]; 85360625147SAndrew Boyer 85460625147SAndrew Boyer done_color = done_color != (cq_pos == 0); 85560625147SAndrew Boyer } 85660625147SAndrew Boyer 85760625147SAndrew Boyer return RTE_ETH_TX_DESC_FULL; 85860625147SAndrew Boyer } 859