176668754SAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <stdio.h> 6a27d9013SAlfredo Cardigliano #include <string.h> 7a27d9013SAlfredo Cardigliano #include <errno.h> 8a27d9013SAlfredo Cardigliano #include <stdint.h> 9a27d9013SAlfredo Cardigliano 10a27d9013SAlfredo Cardigliano #include <rte_common.h> 11*e86a6fccSAndrew Boyer #include <rte_byteorder.h> 12*e86a6fccSAndrew Boyer #include <rte_errno.h> 13a27d9013SAlfredo Cardigliano #include <rte_log.h> 14a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 15a27d9013SAlfredo Cardigliano #include <rte_ether.h> 16a27d9013SAlfredo Cardigliano #include <rte_ip.h> 17*e86a6fccSAndrew Boyer #include <rte_tcp.h> 18*e86a6fccSAndrew Boyer #include <rte_ethdev.h> 19*e86a6fccSAndrew Boyer #include <ethdev_driver.h> 20a27d9013SAlfredo Cardigliano 21*e86a6fccSAndrew Boyer #include "ionic.h" 22*e86a6fccSAndrew Boyer #include "ionic_dev.h" 23a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 24*e86a6fccSAndrew Boyer #include "ionic_ethdev.h" 25a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 26*e86a6fccSAndrew Boyer #include "ionic_logs.h" 27a27d9013SAlfredo Cardigliano 28e7222f94SAndrew Boyer static void 29e7222f94SAndrew Boyer ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 30e7222f94SAndrew Boyer { 31e7222f94SAndrew Boyer uint32_t i; 32e7222f94SAndrew Boyer 33e7222f94SAndrew Boyer for (i = idx; i < cnt; i++) 34e7222f94SAndrew Boyer if (array[i]) 35e7222f94SAndrew Boyer rte_pktmbuf_free_seg(array[i]); 36e7222f94SAndrew Boyer 37e7222f94SAndrew Boyer memset(array, 0, sizeof(void *) * cnt); 38e7222f94SAndrew Boyer } 39e7222f94SAndrew Boyer 40e7222f94SAndrew Boyer static void __rte_cold 41e7222f94SAndrew Boyer ionic_tx_empty(struct ionic_tx_qcq *txq) 42e7222f94SAndrew Boyer { 43e7222f94SAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 44e7222f94SAndrew Boyer 45b4beb84aSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 46e7222f94SAndrew Boyer } 47e7222f94SAndrew Boyer 48e7222f94SAndrew Boyer static void __rte_cold 49e7222f94SAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 50e7222f94SAndrew Boyer { 51e7222f94SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 52e7222f94SAndrew Boyer 537b20fc2fSAndrew Boyer /* 547b20fc2fSAndrew Boyer * Walk the full info array so that the clean up includes any 557b20fc2fSAndrew Boyer * fragments that were left dangling for later reuse 567b20fc2fSAndrew Boyer */ 577b20fc2fSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 58218afd82SAndrew Boyer 59218afd82SAndrew Boyer ionic_empty_array((void **)rxq->mbs, 60218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); 61218afd82SAndrew Boyer rxq->mb_idx = 0; 62e7222f94SAndrew Boyer } 63e7222f94SAndrew Boyer 64a27d9013SAlfredo Cardigliano /********************************************************************* 65a27d9013SAlfredo Cardigliano * 66a27d9013SAlfredo Cardigliano * TX functions 67a27d9013SAlfredo Cardigliano * 68a27d9013SAlfredo Cardigliano **********************************************************************/ 69a27d9013SAlfredo Cardigliano 70a27d9013SAlfredo Cardigliano void 71a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 72a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 73a27d9013SAlfredo Cardigliano { 74be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 75be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 76a27d9013SAlfredo Cardigliano 77a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 7868591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 799ac234eeSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 809ac234eeSAndrew Boyer qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 8102eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 82a27d9013SAlfredo Cardigliano } 83a27d9013SAlfredo Cardigliano 84ce6427ddSThomas Monjalon void __rte_cold 857483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 86a27d9013SAlfredo Cardigliano { 877483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 88a27d9013SAlfredo Cardigliano 89a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 90a27d9013SAlfredo Cardigliano 91be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 92a27d9013SAlfredo Cardigliano } 93a27d9013SAlfredo Cardigliano 94ce6427ddSThomas Monjalon int __rte_cold 95a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 96a27d9013SAlfredo Cardigliano { 97e7222f94SAndrew Boyer struct ionic_tx_stats *stats; 98be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 99a27d9013SAlfredo Cardigliano 1004ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 101a27d9013SAlfredo Cardigliano 102a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 103a27d9013SAlfredo Cardigliano 1049fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1059fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1069fdf11c4SAndrew Boyer 107a27d9013SAlfredo Cardigliano /* 108a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 109a27d9013SAlfredo Cardigliano * before disabling Tx queue 110a27d9013SAlfredo Cardigliano */ 111a27d9013SAlfredo Cardigliano 112e7222f94SAndrew Boyer ionic_lif_txq_deinit(txq); 113a27d9013SAlfredo Cardigliano 114e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 115e7222f94SAndrew Boyer ionic_tx_empty(txq); 116e7222f94SAndrew Boyer 117e7222f94SAndrew Boyer stats = &txq->stats; 118e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 119e7222f94SAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 120a27d9013SAlfredo Cardigliano 121a27d9013SAlfredo Cardigliano return 0; 122a27d9013SAlfredo Cardigliano } 123a27d9013SAlfredo Cardigliano 124ce6427ddSThomas Monjalon int __rte_cold 125a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1264ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 127a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 128a27d9013SAlfredo Cardigliano { 129a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 130be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 131a27d9013SAlfredo Cardigliano uint64_t offloads; 132a27d9013SAlfredo Cardigliano int err; 133a27d9013SAlfredo Cardigliano 134a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 135a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 136a27d9013SAlfredo Cardigliano "(max %u queues)", 137a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 138a27d9013SAlfredo Cardigliano return -EINVAL; 139a27d9013SAlfredo Cardigliano } 140a27d9013SAlfredo Cardigliano 141a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 1424ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 1434ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 1444ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 145a27d9013SAlfredo Cardigliano 146a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 147a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 148a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 149a27d9013SAlfredo Cardigliano 15007512941SAndrew Boyer if (tx_conf->tx_free_thresh > nb_desc) { 15107512941SAndrew Boyer IONIC_PRINT(ERR, 15207512941SAndrew Boyer "tx_free_thresh must be less than nb_desc (%u)", 15307512941SAndrew Boyer nb_desc); 15407512941SAndrew Boyer return -EINVAL; 15507512941SAndrew Boyer } 15607512941SAndrew Boyer 157a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 158a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 1597483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 160a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 161a27d9013SAlfredo Cardigliano } 162a27d9013SAlfredo Cardigliano 1639fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1649fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1659fdf11c4SAndrew Boyer 1668ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 167a27d9013SAlfredo Cardigliano if (err) { 168a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 169a27d9013SAlfredo Cardigliano return -EINVAL; 170a27d9013SAlfredo Cardigliano } 171a27d9013SAlfredo Cardigliano 172a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 17302eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 17402eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 175a27d9013SAlfredo Cardigliano 17668591087SAndrew Boyer /* Convert the offload flags into queue flags */ 177295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 17868591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 179295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 18068591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 181295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 18268591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 1839ac234eeSAndrew Boyer if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 1849ac234eeSAndrew Boyer txq->flags |= IONIC_QCQ_F_FAST_FREE; 185a27d9013SAlfredo Cardigliano 18607512941SAndrew Boyer txq->free_thresh = 18707512941SAndrew Boyer tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : 18807512941SAndrew Boyer nb_desc - IONIC_DEF_TXRX_BURST; 18907512941SAndrew Boyer 190a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 191a27d9013SAlfredo Cardigliano 192a27d9013SAlfredo Cardigliano return 0; 193a27d9013SAlfredo Cardigliano } 194a27d9013SAlfredo Cardigliano 195a27d9013SAlfredo Cardigliano /* 196a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 197a27d9013SAlfredo Cardigliano */ 198ce6427ddSThomas Monjalon int __rte_cold 199a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 200a27d9013SAlfredo Cardigliano { 2019fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 202be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 203a27d9013SAlfredo Cardigliano int err; 204a27d9013SAlfredo Cardigliano 2059fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2069fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2079fdf11c4SAndrew Boyer tx_queue_id); 2089fdf11c4SAndrew Boyer return 0; 2099fdf11c4SAndrew Boyer } 2109fdf11c4SAndrew Boyer 211a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 212a27d9013SAlfredo Cardigliano 2134ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 214be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2154ae96cb8SAndrew Boyer 216a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 217a27d9013SAlfredo Cardigliano if (err) 218a27d9013SAlfredo Cardigliano return err; 219a27d9013SAlfredo Cardigliano 2209fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 221a27d9013SAlfredo Cardigliano 222a27d9013SAlfredo Cardigliano return 0; 223a27d9013SAlfredo Cardigliano } 224a27d9013SAlfredo Cardigliano 225a27d9013SAlfredo Cardigliano static void 22664b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 22764b08152SAlfredo Cardigliano { 22864b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 22964b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 23064b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 23164b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 23264b08152SAlfredo Cardigliano 233daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 23464b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 23564b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 23664b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 23764b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 23864b08152SAlfredo Cardigliano } else { 23964b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 24064b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 24164b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 24264b08152SAlfredo Cardigliano } 24364b08152SAlfredo Cardigliano } 24464b08152SAlfredo Cardigliano 24564b08152SAlfredo Cardigliano static void 24664b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 24764b08152SAlfredo Cardigliano { 24864b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 24964b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 25064b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 25164b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 25264b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 25364b08152SAlfredo Cardigliano 254daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 25564b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 25664b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 25764b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 25864b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 25964b08152SAlfredo Cardigliano } else { 26064b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 26164b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 26264b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 26364b08152SAlfredo Cardigliano } 26464b08152SAlfredo Cardigliano } 26564b08152SAlfredo Cardigliano 26664b08152SAlfredo Cardigliano static void 267a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 268a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 269a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 270a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 27164b08152SAlfredo Cardigliano bool encap, 272a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 273a27d9013SAlfredo Cardigliano bool start, bool done) 274a27d9013SAlfredo Cardigliano { 275b4beb84aSAndrew Boyer struct rte_mbuf *txm_seg; 276dd10c5b4SAndrew Boyer void **info; 2774a735599SAndrew Boyer uint64_t cmd; 278a27d9013SAlfredo Cardigliano uint8_t flags = 0; 279b4beb84aSAndrew Boyer int i; 280b4beb84aSAndrew Boyer 281a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 28264b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 283a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 284a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 285a27d9013SAlfredo Cardigliano 2864a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 287a27d9013SAlfredo Cardigliano flags, nsge, addr); 2884a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 2894a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 2904a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 2914a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 2924a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 293a27d9013SAlfredo Cardigliano 294dd10c5b4SAndrew Boyer if (done) { 295dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 296b4beb84aSAndrew Boyer 297b4beb84aSAndrew Boyer /* Walk the mbuf chain to stash pointers in the array */ 298b4beb84aSAndrew Boyer txm_seg = txm; 299b4beb84aSAndrew Boyer for (i = 0; i < txm->nb_segs; i++) { 300b4beb84aSAndrew Boyer info[i] = txm_seg; 301b4beb84aSAndrew Boyer txm_seg = txm_seg->next; 302b4beb84aSAndrew Boyer } 303dd10c5b4SAndrew Boyer } 304dd10c5b4SAndrew Boyer 305dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 306a27d9013SAlfredo Cardigliano } 307a27d9013SAlfredo Cardigliano 308a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 309be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 310a27d9013SAlfredo Cardigliano { 311be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 312a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 31356117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 314a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 31556117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 316a27d9013SAlfredo Cardigliano 317a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 318a27d9013SAlfredo Cardigliano return desc; 319a27d9013SAlfredo Cardigliano } 320a27d9013SAlfredo Cardigliano 321*e86a6fccSAndrew Boyer int 32277c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 323a27d9013SAlfredo Cardigliano { 324be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 325be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 326a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 327a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 328a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3297c3a867bSAndrew Boyer rte_iova_t data_iova; 3307c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 331a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 332*e86a6fccSAndrew Boyer uint8_t desc_nsge = 0; 333a27d9013SAlfredo Cardigliano uint32_t hdrlen; 334a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 335a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 336a27d9013SAlfredo Cardigliano uint32_t left; 337a27d9013SAlfredo Cardigliano uint32_t seglen; 338a27d9013SAlfredo Cardigliano uint32_t len; 339a27d9013SAlfredo Cardigliano uint32_t offset = 0; 340a27d9013SAlfredo Cardigliano bool start, done; 34164b08152SAlfredo Cardigliano bool encap; 342daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 343*e86a6fccSAndrew Boyer bool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG); 344a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 34564b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 346a27d9013SAlfredo Cardigliano 347daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 348daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 349daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 350daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 35164b08152SAlfredo Cardigliano 35264b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 35364b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 35464b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 35564b08152SAlfredo Cardigliano */ 35664b08152SAlfredo Cardigliano 35764b08152SAlfredo Cardigliano if (encap) { 35864b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 35964b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 36064b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 36164b08152SAlfredo Cardigliano } else { 36264b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 36364b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 36464b08152SAlfredo Cardigliano } 365a27d9013SAlfredo Cardigliano 366be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 367*e86a6fccSAndrew Boyer txm_seg = txm; 368a27d9013SAlfredo Cardigliano start = true; 369*e86a6fccSAndrew Boyer seglen = hdrlen + mss; 370a27d9013SAlfredo Cardigliano 371*e86a6fccSAndrew Boyer /* Walk the chain of mbufs */ 372a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 373a27d9013SAlfredo Cardigliano offset = 0; 3747c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 375a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 376a27d9013SAlfredo Cardigliano 377*e86a6fccSAndrew Boyer /* Split the mbuf data up into multiple descriptors */ 378a27d9013SAlfredo Cardigliano while (left > 0) { 3797c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 380*e86a6fccSAndrew Boyer if (frag_left > 0 && use_sgl) { 381*e86a6fccSAndrew Boyer /* Fill previous descriptor's SGE */ 382a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 383a27d9013SAlfredo Cardigliano frag_left -= len; 3847c3a867bSAndrew Boyer elem->addr = next_addr; 3854a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 386a27d9013SAlfredo Cardigliano elem++; 387a27d9013SAlfredo Cardigliano desc_nsge++; 388a27d9013SAlfredo Cardigliano } else { 389*e86a6fccSAndrew Boyer /* Fill new descriptor's data field */ 390*e86a6fccSAndrew Boyer len = RTE_MIN(seglen, left); 391*e86a6fccSAndrew Boyer frag_left = seglen - len; 3927c3a867bSAndrew Boyer desc_addr = next_addr; 393a27d9013SAlfredo Cardigliano desc_len = len; 394a27d9013SAlfredo Cardigliano desc_nsge = 0; 395a27d9013SAlfredo Cardigliano } 396a27d9013SAlfredo Cardigliano left -= len; 397a27d9013SAlfredo Cardigliano offset += len; 398*e86a6fccSAndrew Boyer 399*e86a6fccSAndrew Boyer /* Pack the next mbuf's data into the descriptor */ 400*e86a6fccSAndrew Boyer if (txm_seg->next != NULL && frag_left > 0 && use_sgl) 401*e86a6fccSAndrew Boyer break; 4027c3a867bSAndrew Boyer 403a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 404a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 405a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 406a27d9013SAlfredo Cardigliano hdrlen, mss, 40764b08152SAlfredo Cardigliano encap, 408a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 40977c60793SAndrew Boyer start, done); 410be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 411a27d9013SAlfredo Cardigliano start = false; 412*e86a6fccSAndrew Boyer seglen = mss; 413a27d9013SAlfredo Cardigliano } 414a27d9013SAlfredo Cardigliano 415a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 416a27d9013SAlfredo Cardigliano } 417a27d9013SAlfredo Cardigliano 418a27d9013SAlfredo Cardigliano stats->tso++; 419a27d9013SAlfredo Cardigliano 420a27d9013SAlfredo Cardigliano return 0; 421a27d9013SAlfredo Cardigliano } 422a27d9013SAlfredo Cardigliano 423a27d9013SAlfredo Cardigliano /********************************************************************* 424a27d9013SAlfredo Cardigliano * 425a27d9013SAlfredo Cardigliano * TX prep functions 426a27d9013SAlfredo Cardigliano * 427a27d9013SAlfredo Cardigliano **********************************************************************/ 428a27d9013SAlfredo Cardigliano 429daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 430daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 431daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 432daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 433daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 434daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 435a27d9013SAlfredo Cardigliano 436a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 437daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 438a27d9013SAlfredo Cardigliano 439a27d9013SAlfredo Cardigliano uint16_t 440e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 441a27d9013SAlfredo Cardigliano { 442e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 443a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 444a27d9013SAlfredo Cardigliano uint64_t offloads; 445a27d9013SAlfredo Cardigliano int i = 0; 446a27d9013SAlfredo Cardigliano 447a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 448a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 449a27d9013SAlfredo Cardigliano 450e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 451a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 452a27d9013SAlfredo Cardigliano break; 453a27d9013SAlfredo Cardigliano } 454a27d9013SAlfredo Cardigliano 455a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 456a27d9013SAlfredo Cardigliano 457a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 458a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 459a27d9013SAlfredo Cardigliano break; 460a27d9013SAlfredo Cardigliano } 461a27d9013SAlfredo Cardigliano } 462a27d9013SAlfredo Cardigliano 463a27d9013SAlfredo Cardigliano return i; 464a27d9013SAlfredo Cardigliano } 465a27d9013SAlfredo Cardigliano 466a27d9013SAlfredo Cardigliano /********************************************************************* 467a27d9013SAlfredo Cardigliano * 468a27d9013SAlfredo Cardigliano * RX functions 469a27d9013SAlfredo Cardigliano * 470a27d9013SAlfredo Cardigliano **********************************************************************/ 471a27d9013SAlfredo Cardigliano 472a27d9013SAlfredo Cardigliano void 473a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 474a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 475a27d9013SAlfredo Cardigliano { 476be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 477be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 478a27d9013SAlfredo Cardigliano 479a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 480a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 481a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 48202eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 48368591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 484a27d9013SAlfredo Cardigliano } 485a27d9013SAlfredo Cardigliano 486ce6427ddSThomas Monjalon void __rte_cold 4877483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 488a27d9013SAlfredo Cardigliano { 4897483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 490be39f75cSAndrew Boyer 491be39f75cSAndrew Boyer if (!rxq) 492be39f75cSAndrew Boyer return; 493a27d9013SAlfredo Cardigliano 494a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 495a27d9013SAlfredo Cardigliano 496be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 497a27d9013SAlfredo Cardigliano } 498a27d9013SAlfredo Cardigliano 499ce6427ddSThomas Monjalon int __rte_cold 500a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 501a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 502a27d9013SAlfredo Cardigliano uint16_t nb_desc, 5034ae96cb8SAndrew Boyer uint32_t socket_id, 504a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 505a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 506a27d9013SAlfredo Cardigliano { 507a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 508be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 509a27d9013SAlfredo Cardigliano uint64_t offloads; 510a27d9013SAlfredo Cardigliano int err; 511a27d9013SAlfredo Cardigliano 512a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 513a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 514a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 515a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 516a27d9013SAlfredo Cardigliano return -EINVAL; 517a27d9013SAlfredo Cardigliano } 518a27d9013SAlfredo Cardigliano 519a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 5204ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 5214ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 5224ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 523a27d9013SAlfredo Cardigliano 52418a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 52518a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 52618a44465SAndrew Boyer 527a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 528a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 529a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 530a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 531a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 5324ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 533a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 534a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 535a27d9013SAlfredo Cardigliano } 536a27d9013SAlfredo Cardigliano 537a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 538a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 5397483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 540a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 541a27d9013SAlfredo Cardigliano } 542a27d9013SAlfredo Cardigliano 5439fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 5449fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 5459fdf11c4SAndrew Boyer 546d5850081SAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 547be39f75cSAndrew Boyer &rxq); 548a27d9013SAlfredo Cardigliano if (err) { 5494ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 550a27d9013SAlfredo Cardigliano return -EINVAL; 551a27d9013SAlfredo Cardigliano } 552a27d9013SAlfredo Cardigliano 553a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 554a27d9013SAlfredo Cardigliano 555a27d9013SAlfredo Cardigliano /* 556a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 557295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 558a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 559a27d9013SAlfredo Cardigliano * it to the length for all received packets: 560a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 561295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 562a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 563a27d9013SAlfredo Cardigliano */ 564a27d9013SAlfredo Cardigliano 565a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 56602eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 56702eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 568a27d9013SAlfredo Cardigliano 569a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 570a27d9013SAlfredo Cardigliano 571a27d9013SAlfredo Cardigliano return 0; 572a27d9013SAlfredo Cardigliano } 573a27d9013SAlfredo Cardigliano 574bbdf955dSAndrew Boyer #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1) 575*e86a6fccSAndrew Boyer const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] 576bbdf955dSAndrew Boyer __rte_cache_aligned = { 577bbdf955dSAndrew Boyer /* IP_BAD set */ 578bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD, 579bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 580bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 581bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 582bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 583bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 584bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 585bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 586bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 587bbdf955dSAndrew Boyer /* IP_OK set */ 588bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD, 589bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 590bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 591bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 592bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 593bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 594bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 595bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 596bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 597bbdf955dSAndrew Boyer /* No IP flag set */ 598bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 599bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 600bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 601bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 602bbdf955dSAndrew Boyer }; 603bbdf955dSAndrew Boyer 60473b1c67eSAndrew Boyer /* RTE_PTYPE_UNKNOWN is 0x0 */ 605*e86a6fccSAndrew Boyer const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] 60673b1c67eSAndrew Boyer __rte_cache_aligned = { 60773b1c67eSAndrew Boyer [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN, 60873b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, 60973b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_TCP] = 61073b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 61173b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_UDP] = 61273b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 61373b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, 61473b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_TCP] = 61573b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 61673b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_UDP] = 61773b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 61873b1c67eSAndrew Boyer }; 61973b1c67eSAndrew Boyer 620b5b56afdSAndrew Boyer const uint32_t * 621b5b56afdSAndrew Boyer ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 622b5b56afdSAndrew Boyer { 623b5b56afdSAndrew Boyer /* See ionic_ptype_table[] */ 624b5b56afdSAndrew Boyer static const uint32_t ptypes[] = { 625b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER, 626b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_TIMESYNC, 627b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_LLDP, 628b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_ARP, 629b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV4, 630b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV6, 631b5b56afdSAndrew Boyer RTE_PTYPE_L4_TCP, 632b5b56afdSAndrew Boyer RTE_PTYPE_L4_UDP, 633b5b56afdSAndrew Boyer RTE_PTYPE_UNKNOWN 634b5b56afdSAndrew Boyer }; 635b5b56afdSAndrew Boyer 636b5b56afdSAndrew Boyer return ptypes; 637b5b56afdSAndrew Boyer } 638b5b56afdSAndrew Boyer 6397b20fc2fSAndrew Boyer /* 6407b2eb674SAndrew Boyer * Perform one-time initialization of descriptor fields 6417b2eb674SAndrew Boyer * which will not change for the life of the queue. 6427b2eb674SAndrew Boyer */ 6437b2eb674SAndrew Boyer static void __rte_cold 6447b2eb674SAndrew Boyer ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 6457b2eb674SAndrew Boyer { 6467b2eb674SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 6477b2eb674SAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 6487b2eb674SAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 6497b2eb674SAndrew Boyer uint32_t i, j; 6507b2eb674SAndrew Boyer uint8_t opcode; 6517b2eb674SAndrew Boyer 6527b2eb674SAndrew Boyer opcode = (q->num_segs > 1) ? 6537b2eb674SAndrew Boyer IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 6547b2eb674SAndrew Boyer 6557b2eb674SAndrew Boyer /* 6567b2eb674SAndrew Boyer * NB: Only the first segment needs to leave headroom (hdr_seg_size). 6577b2eb674SAndrew Boyer * Later segments (seg_size) do not. 6587b2eb674SAndrew Boyer */ 6597b2eb674SAndrew Boyer for (i = 0; i < q->num_descs; i++) { 6607b2eb674SAndrew Boyer desc = &desc_base[i]; 6617b2eb674SAndrew Boyer desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 6627b2eb674SAndrew Boyer desc->opcode = opcode; 6637b2eb674SAndrew Boyer 6647b2eb674SAndrew Boyer sg_desc = &sg_desc_base[i]; 6657b2eb674SAndrew Boyer for (j = 0; j < q->num_segs - 1u; j++) 6667b2eb674SAndrew Boyer sg_desc->elems[j].len = 6677b2eb674SAndrew Boyer rte_cpu_to_le_16(rxq->seg_size); 6687b2eb674SAndrew Boyer } 6697b2eb674SAndrew Boyer } 6707b2eb674SAndrew Boyer 6717b2eb674SAndrew Boyer /* 672a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 673a27d9013SAlfredo Cardigliano */ 674ce6427ddSThomas Monjalon int __rte_cold 675a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 676a27d9013SAlfredo Cardigliano { 6779fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 678be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 679d5850081SAndrew Boyer struct ionic_queue *q; 680a27d9013SAlfredo Cardigliano int err; 681a27d9013SAlfredo Cardigliano 6829fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 6839fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 6849fdf11c4SAndrew Boyer rx_queue_id); 6859fdf11c4SAndrew Boyer return 0; 6869fdf11c4SAndrew Boyer } 6879fdf11c4SAndrew Boyer 688a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 689d5850081SAndrew Boyer q = &rxq->qcq.q; 690a27d9013SAlfredo Cardigliano 691b671e69aSAndrew Boyer rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 692b671e69aSAndrew Boyer 693d5850081SAndrew Boyer /* Recalculate segment count based on MTU */ 694d5850081SAndrew Boyer q->num_segs = 1 + 695d5850081SAndrew Boyer (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 696d5850081SAndrew Boyer 697d5850081SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 698d5850081SAndrew Boyer rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 6994ae96cb8SAndrew Boyer 7007b2eb674SAndrew Boyer ionic_rx_init_descriptors(rxq); 7017b2eb674SAndrew Boyer 702a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 703a27d9013SAlfredo Cardigliano if (err) 704a27d9013SAlfredo Cardigliano return err; 705a27d9013SAlfredo Cardigliano 706*e86a6fccSAndrew Boyer /* Allocate buffers for descriptor ring */ 707*e86a6fccSAndrew Boyer if (rxq->flags & IONIC_QCQ_F_SG) 708*e86a6fccSAndrew Boyer err = ionic_rx_fill_sg(rxq); 709*e86a6fccSAndrew Boyer else 710*e86a6fccSAndrew Boyer err = ionic_rx_fill(rxq); 711*e86a6fccSAndrew Boyer if (err != 0) { 712*e86a6fccSAndrew Boyer IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id); 713a27d9013SAlfredo Cardigliano return -1; 714a27d9013SAlfredo Cardigliano } 715a27d9013SAlfredo Cardigliano 7169fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 717a27d9013SAlfredo Cardigliano 718a27d9013SAlfredo Cardigliano return 0; 719a27d9013SAlfredo Cardigliano } 720a27d9013SAlfredo Cardigliano 7217b20fc2fSAndrew Boyer /* 722a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 723a27d9013SAlfredo Cardigliano */ 724ce6427ddSThomas Monjalon int __rte_cold 725a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 726a27d9013SAlfredo Cardigliano { 727e7222f94SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 728e7222f94SAndrew Boyer struct ionic_rx_stats *stats; 729be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 730a27d9013SAlfredo Cardigliano 7314ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 732a27d9013SAlfredo Cardigliano 733a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 734a27d9013SAlfredo Cardigliano 735e7222f94SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 7369fdf11c4SAndrew Boyer 737e7222f94SAndrew Boyer ionic_lif_rxq_deinit(rxq); 738a27d9013SAlfredo Cardigliano 739e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 740e7222f94SAndrew Boyer ionic_rx_empty(rxq); 741e7222f94SAndrew Boyer 742e7222f94SAndrew Boyer stats = &rxq->stats; 743e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 744e7222f94SAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 745a27d9013SAlfredo Cardigliano 746a27d9013SAlfredo Cardigliano return 0; 747a27d9013SAlfredo Cardigliano } 748a27d9013SAlfredo Cardigliano 7490983a74aSAndrew Boyer int 7500983a74aSAndrew Boyer ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 7510983a74aSAndrew Boyer { 7520983a74aSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 7530983a74aSAndrew Boyer struct ionic_qcq *qcq = &rxq->qcq; 7540983a74aSAndrew Boyer struct ionic_rxq_comp *cq_desc; 7550983a74aSAndrew Boyer uint16_t mask, head, tail, pos; 7560983a74aSAndrew Boyer bool done_color; 7570983a74aSAndrew Boyer 7580983a74aSAndrew Boyer mask = qcq->q.size_mask; 7590983a74aSAndrew Boyer 7600983a74aSAndrew Boyer /* offset must be within the size of the ring */ 7610983a74aSAndrew Boyer if (offset > mask) 7620983a74aSAndrew Boyer return -EINVAL; 7630983a74aSAndrew Boyer 7640983a74aSAndrew Boyer head = qcq->q.head_idx; 7650983a74aSAndrew Boyer tail = qcq->q.tail_idx; 7660983a74aSAndrew Boyer 7670983a74aSAndrew Boyer /* offset is beyond what is posted */ 7680983a74aSAndrew Boyer if (offset >= ((head - tail) & mask)) 7690983a74aSAndrew Boyer return RTE_ETH_RX_DESC_UNAVAIL; 7700983a74aSAndrew Boyer 7710983a74aSAndrew Boyer /* interested in this absolute position in the rxq */ 7720983a74aSAndrew Boyer pos = (tail + offset) & mask; 7730983a74aSAndrew Boyer 7740983a74aSAndrew Boyer /* rx cq position == rx q position */ 7750983a74aSAndrew Boyer cq_desc = qcq->cq.base; 7760983a74aSAndrew Boyer cq_desc = &cq_desc[pos]; 7770983a74aSAndrew Boyer 7780983a74aSAndrew Boyer /* expected done color at this position */ 7790983a74aSAndrew Boyer done_color = qcq->cq.done_color != (pos < tail); 7800983a74aSAndrew Boyer 7810983a74aSAndrew Boyer /* has the hw indicated the done color at this position? */ 7820983a74aSAndrew Boyer if (color_match(cq_desc->pkt_type_color, done_color)) 7830983a74aSAndrew Boyer return RTE_ETH_RX_DESC_DONE; 7840983a74aSAndrew Boyer 7850983a74aSAndrew Boyer return RTE_ETH_RX_DESC_AVAIL; 7860983a74aSAndrew Boyer } 78760625147SAndrew Boyer 78860625147SAndrew Boyer int 78960625147SAndrew Boyer ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 79060625147SAndrew Boyer { 79160625147SAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 79260625147SAndrew Boyer struct ionic_qcq *qcq = &txq->qcq; 79360625147SAndrew Boyer struct ionic_txq_comp *cq_desc; 79460625147SAndrew Boyer uint16_t mask, head, tail, pos, cq_pos; 79560625147SAndrew Boyer bool done_color; 79660625147SAndrew Boyer 79760625147SAndrew Boyer mask = qcq->q.size_mask; 79860625147SAndrew Boyer 79960625147SAndrew Boyer /* offset must be within the size of the ring */ 80060625147SAndrew Boyer if (offset > mask) 80160625147SAndrew Boyer return -EINVAL; 80260625147SAndrew Boyer 80360625147SAndrew Boyer head = qcq->q.head_idx; 80460625147SAndrew Boyer tail = qcq->q.tail_idx; 80560625147SAndrew Boyer 80660625147SAndrew Boyer /* offset is beyond what is posted */ 80760625147SAndrew Boyer if (offset >= ((head - tail) & mask)) 80860625147SAndrew Boyer return RTE_ETH_TX_DESC_DONE; 80960625147SAndrew Boyer 81060625147SAndrew Boyer /* interested in this absolute position in the txq */ 81160625147SAndrew Boyer pos = (tail + offset) & mask; 81260625147SAndrew Boyer 81360625147SAndrew Boyer /* tx cq position != tx q position, need to walk cq */ 81460625147SAndrew Boyer cq_pos = qcq->cq.tail_idx; 81560625147SAndrew Boyer cq_desc = qcq->cq.base; 81660625147SAndrew Boyer cq_desc = &cq_desc[cq_pos]; 81760625147SAndrew Boyer 81860625147SAndrew Boyer /* how far behind is pos from head? */ 81960625147SAndrew Boyer offset = (head - pos) & mask; 82060625147SAndrew Boyer 82160625147SAndrew Boyer /* walk cq descriptors that match the expected done color */ 82260625147SAndrew Boyer done_color = qcq->cq.done_color; 82360625147SAndrew Boyer while (color_match(cq_desc->color, done_color)) { 82460625147SAndrew Boyer /* is comp index no further behind than pos? */ 82560625147SAndrew Boyer tail = rte_cpu_to_le_16(cq_desc->comp_index); 82660625147SAndrew Boyer if (((head - tail) & mask) <= offset) 82760625147SAndrew Boyer return RTE_ETH_TX_DESC_DONE; 82860625147SAndrew Boyer 82960625147SAndrew Boyer cq_pos = (cq_pos + 1) & mask; 83060625147SAndrew Boyer cq_desc = qcq->cq.base; 83160625147SAndrew Boyer cq_desc = &cq_desc[cq_pos]; 83260625147SAndrew Boyer 83360625147SAndrew Boyer done_color = done_color != (cq_pos == 0); 83460625147SAndrew Boyer } 83560625147SAndrew Boyer 83660625147SAndrew Boyer return RTE_ETH_TX_DESC_FULL; 83760625147SAndrew Boyer } 838