176668754SAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50e7222f94SAndrew Boyer static void 51e7222f94SAndrew Boyer ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 52e7222f94SAndrew Boyer { 53e7222f94SAndrew Boyer uint32_t i; 54e7222f94SAndrew Boyer 55e7222f94SAndrew Boyer for (i = idx; i < cnt; i++) 56e7222f94SAndrew Boyer if (array[i]) 57e7222f94SAndrew Boyer rte_pktmbuf_free_seg(array[i]); 58e7222f94SAndrew Boyer 59e7222f94SAndrew Boyer memset(array, 0, sizeof(void *) * cnt); 60e7222f94SAndrew Boyer } 61e7222f94SAndrew Boyer 62e7222f94SAndrew Boyer static void __rte_cold 63e7222f94SAndrew Boyer ionic_tx_empty(struct ionic_tx_qcq *txq) 64e7222f94SAndrew Boyer { 65e7222f94SAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 66e7222f94SAndrew Boyer 67b4beb84aSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 68e7222f94SAndrew Boyer } 69e7222f94SAndrew Boyer 70e7222f94SAndrew Boyer static void __rte_cold 71e7222f94SAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 72e7222f94SAndrew Boyer { 73e7222f94SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 74e7222f94SAndrew Boyer 757b20fc2fSAndrew Boyer /* 767b20fc2fSAndrew Boyer * Walk the full info array so that the clean up includes any 777b20fc2fSAndrew Boyer * fragments that were left dangling for later reuse 787b20fc2fSAndrew Boyer */ 797b20fc2fSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 80218afd82SAndrew Boyer 81218afd82SAndrew Boyer ionic_empty_array((void **)rxq->mbs, 82218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); 83218afd82SAndrew Boyer rxq->mb_idx = 0; 84e7222f94SAndrew Boyer } 85e7222f94SAndrew Boyer 86a27d9013SAlfredo Cardigliano /********************************************************************* 87a27d9013SAlfredo Cardigliano * 88a27d9013SAlfredo Cardigliano * TX functions 89a27d9013SAlfredo Cardigliano * 90a27d9013SAlfredo Cardigliano **********************************************************************/ 91a27d9013SAlfredo Cardigliano 92a27d9013SAlfredo Cardigliano void 93a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 94a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 95a27d9013SAlfredo Cardigliano { 96be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 97be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 98a27d9013SAlfredo Cardigliano 99a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 10068591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 1019ac234eeSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 1029ac234eeSAndrew Boyer qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 10302eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 104a27d9013SAlfredo Cardigliano } 105a27d9013SAlfredo Cardigliano 1060de3e209SAndrew Boyer static __rte_always_inline void 107be39f75cSAndrew Boyer ionic_tx_flush(struct ionic_tx_qcq *txq) 108a27d9013SAlfredo Cardigliano { 109be39f75cSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 110be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 111b4beb84aSAndrew Boyer struct rte_mbuf *txm; 112b4beb84aSAndrew Boyer struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; 113700f974dSAndrew Boyer void **info; 114b4beb84aSAndrew Boyer uint32_t i; 115a27d9013SAlfredo Cardigliano 116a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 117b4beb84aSAndrew Boyer 118a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 1192aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 120a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 121a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 122a27d9013SAlfredo Cardigliano 123b4beb84aSAndrew Boyer /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 124b4beb84aSAndrew Boyer if ((cq->tail_idx & 0x3) == 0) 125b4beb84aSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 126a27d9013SAlfredo Cardigliano 127b4beb84aSAndrew Boyer while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 128b4beb84aSAndrew Boyer /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 129b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2))); 130a27d9013SAlfredo Cardigliano 131b4beb84aSAndrew Boyer /* Prefetch next mbuf */ 132b4beb84aSAndrew Boyer void **next_info = 133b4beb84aSAndrew Boyer IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1)); 134b4beb84aSAndrew Boyer if (next_info[0]) 135b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(next_info[0]); 136b4beb84aSAndrew Boyer if (next_info[1]) 137b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(next_info[1]); 138b4beb84aSAndrew Boyer 139700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 140b4beb84aSAndrew Boyer for (i = 0; i < q->num_segs; i++) { 141b4beb84aSAndrew Boyer txm = info[i]; 142b4beb84aSAndrew Boyer if (!txm) 143b4beb84aSAndrew Boyer break; 144b4beb84aSAndrew Boyer 1459ac234eeSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 1469ac234eeSAndrew Boyer rte_mempool_put(txm->pool, txm); 1479ac234eeSAndrew Boyer else 148b4beb84aSAndrew Boyer rte_pktmbuf_free_seg(txm); 149b4beb84aSAndrew Boyer 150b4beb84aSAndrew Boyer info[i] = NULL; 151b4beb84aSAndrew Boyer } 152a27d9013SAlfredo Cardigliano 1534ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 154a27d9013SAlfredo Cardigliano } 155b4beb84aSAndrew Boyer 156b4beb84aSAndrew Boyer cq_desc = &cq_desc_base[cq->tail_idx]; 157a27d9013SAlfredo Cardigliano } 158a27d9013SAlfredo Cardigliano } 159a27d9013SAlfredo Cardigliano 160ce6427ddSThomas Monjalon void __rte_cold 1617483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 162a27d9013SAlfredo Cardigliano { 1637483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 164a27d9013SAlfredo Cardigliano 165a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 166a27d9013SAlfredo Cardigliano 167be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 168a27d9013SAlfredo Cardigliano } 169a27d9013SAlfredo Cardigliano 170ce6427ddSThomas Monjalon int __rte_cold 171a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 172a27d9013SAlfredo Cardigliano { 173e7222f94SAndrew Boyer struct ionic_tx_stats *stats; 174be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 175a27d9013SAlfredo Cardigliano 1764ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 177a27d9013SAlfredo Cardigliano 178a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 179a27d9013SAlfredo Cardigliano 1809fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1819fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1829fdf11c4SAndrew Boyer 183a27d9013SAlfredo Cardigliano /* 184a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 185a27d9013SAlfredo Cardigliano * before disabling Tx queue 186a27d9013SAlfredo Cardigliano */ 187a27d9013SAlfredo Cardigliano 188e7222f94SAndrew Boyer ionic_lif_txq_deinit(txq); 189a27d9013SAlfredo Cardigliano 190e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 191e7222f94SAndrew Boyer ionic_tx_empty(txq); 192e7222f94SAndrew Boyer 193e7222f94SAndrew Boyer stats = &txq->stats; 194e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 195e7222f94SAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 196a27d9013SAlfredo Cardigliano 197a27d9013SAlfredo Cardigliano return 0; 198a27d9013SAlfredo Cardigliano } 199a27d9013SAlfredo Cardigliano 200ce6427ddSThomas Monjalon int __rte_cold 201a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 2024ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 203a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 204a27d9013SAlfredo Cardigliano { 205a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 206be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 207a27d9013SAlfredo Cardigliano uint64_t offloads; 208a27d9013SAlfredo Cardigliano int err; 209a27d9013SAlfredo Cardigliano 210a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 211a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 212a27d9013SAlfredo Cardigliano "(max %u queues)", 213a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 214a27d9013SAlfredo Cardigliano return -EINVAL; 215a27d9013SAlfredo Cardigliano } 216a27d9013SAlfredo Cardigliano 217a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 2184ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 2194ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 2204ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 221a27d9013SAlfredo Cardigliano 222a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 223a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 224a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 225a27d9013SAlfredo Cardigliano 226a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 227a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 2287483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 229a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 230a27d9013SAlfredo Cardigliano } 231a27d9013SAlfredo Cardigliano 2329fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 2339fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 2349fdf11c4SAndrew Boyer 2358ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 236a27d9013SAlfredo Cardigliano if (err) { 237a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 238a27d9013SAlfredo Cardigliano return -EINVAL; 239a27d9013SAlfredo Cardigliano } 240a27d9013SAlfredo Cardigliano 241a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 24202eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 24302eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 244a27d9013SAlfredo Cardigliano 24568591087SAndrew Boyer /* Convert the offload flags into queue flags */ 246295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 24768591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 248295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 24968591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 250295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 25168591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 2529ac234eeSAndrew Boyer if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 2539ac234eeSAndrew Boyer txq->flags |= IONIC_QCQ_F_FAST_FREE; 254a27d9013SAlfredo Cardigliano 255a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 256a27d9013SAlfredo Cardigliano 257a27d9013SAlfredo Cardigliano return 0; 258a27d9013SAlfredo Cardigliano } 259a27d9013SAlfredo Cardigliano 260a27d9013SAlfredo Cardigliano /* 261a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 262a27d9013SAlfredo Cardigliano */ 263ce6427ddSThomas Monjalon int __rte_cold 264a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 265a27d9013SAlfredo Cardigliano { 2669fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 267be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 268a27d9013SAlfredo Cardigliano int err; 269a27d9013SAlfredo Cardigliano 2709fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2719fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2729fdf11c4SAndrew Boyer tx_queue_id); 2739fdf11c4SAndrew Boyer return 0; 2749fdf11c4SAndrew Boyer } 2759fdf11c4SAndrew Boyer 276a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 277a27d9013SAlfredo Cardigliano 2784ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 279be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2804ae96cb8SAndrew Boyer 281a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 282a27d9013SAlfredo Cardigliano if (err) 283a27d9013SAlfredo Cardigliano return err; 284a27d9013SAlfredo Cardigliano 2859fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 286a27d9013SAlfredo Cardigliano 287a27d9013SAlfredo Cardigliano return 0; 288a27d9013SAlfredo Cardigliano } 289a27d9013SAlfredo Cardigliano 290a27d9013SAlfredo Cardigliano static void 29164b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 29264b08152SAlfredo Cardigliano { 29364b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 29464b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 29564b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 29664b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 29764b08152SAlfredo Cardigliano 298daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 29964b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 30064b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 30164b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 30264b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 30364b08152SAlfredo Cardigliano } else { 30464b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 30564b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 30664b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 30764b08152SAlfredo Cardigliano } 30864b08152SAlfredo Cardigliano } 30964b08152SAlfredo Cardigliano 31064b08152SAlfredo Cardigliano static void 31164b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 31264b08152SAlfredo Cardigliano { 31364b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 31464b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 31564b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 31664b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 31764b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 31864b08152SAlfredo Cardigliano 319daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 32064b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 32164b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 32264b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 32364b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 32464b08152SAlfredo Cardigliano } else { 32564b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 32664b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 32764b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 32864b08152SAlfredo Cardigliano } 32964b08152SAlfredo Cardigliano } 33064b08152SAlfredo Cardigliano 33164b08152SAlfredo Cardigliano static void 332a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 333a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 334a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 335a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 33664b08152SAlfredo Cardigliano bool encap, 337a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 338a27d9013SAlfredo Cardigliano bool start, bool done) 339a27d9013SAlfredo Cardigliano { 340b4beb84aSAndrew Boyer struct rte_mbuf *txm_seg; 341dd10c5b4SAndrew Boyer void **info; 3424a735599SAndrew Boyer uint64_t cmd; 343a27d9013SAlfredo Cardigliano uint8_t flags = 0; 344b4beb84aSAndrew Boyer int i; 345b4beb84aSAndrew Boyer 346a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 34764b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 348a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 349a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 350a27d9013SAlfredo Cardigliano 3514a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 352a27d9013SAlfredo Cardigliano flags, nsge, addr); 3534a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 3544a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 3554a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 3564a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 3574a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 358a27d9013SAlfredo Cardigliano 359dd10c5b4SAndrew Boyer if (done) { 360dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 361b4beb84aSAndrew Boyer 362b4beb84aSAndrew Boyer /* Walk the mbuf chain to stash pointers in the array */ 363b4beb84aSAndrew Boyer txm_seg = txm; 364b4beb84aSAndrew Boyer for (i = 0; i < txm->nb_segs; i++) { 365b4beb84aSAndrew Boyer info[i] = txm_seg; 366b4beb84aSAndrew Boyer txm_seg = txm_seg->next; 367b4beb84aSAndrew Boyer } 368dd10c5b4SAndrew Boyer } 369dd10c5b4SAndrew Boyer 370dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 371a27d9013SAlfredo Cardigliano } 372a27d9013SAlfredo Cardigliano 373a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 374be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 375a27d9013SAlfredo Cardigliano { 376be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 377a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 37856117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 379a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 38056117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 381a27d9013SAlfredo Cardigliano 382a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 383a27d9013SAlfredo Cardigliano return desc; 384a27d9013SAlfredo Cardigliano } 385a27d9013SAlfredo Cardigliano 386a27d9013SAlfredo Cardigliano static int 38777c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 388a27d9013SAlfredo Cardigliano { 389be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 390be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 391a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 392a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 393a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3947c3a867bSAndrew Boyer rte_iova_t data_iova; 3957c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 396a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 397a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 398a27d9013SAlfredo Cardigliano uint32_t hdrlen; 399a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 400a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 401a27d9013SAlfredo Cardigliano uint32_t left; 402a27d9013SAlfredo Cardigliano uint32_t seglen; 403a27d9013SAlfredo Cardigliano uint32_t len; 404a27d9013SAlfredo Cardigliano uint32_t offset = 0; 405a27d9013SAlfredo Cardigliano bool start, done; 40664b08152SAlfredo Cardigliano bool encap; 407daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 408a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 40964b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 410a27d9013SAlfredo Cardigliano 411daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 412daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 413daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 414daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 41564b08152SAlfredo Cardigliano 41664b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 41764b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 41864b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 41964b08152SAlfredo Cardigliano */ 42064b08152SAlfredo Cardigliano 42164b08152SAlfredo Cardigliano if (encap) { 42264b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 42364b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 42464b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 42564b08152SAlfredo Cardigliano } else { 42664b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 42764b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 42864b08152SAlfredo Cardigliano } 429a27d9013SAlfredo Cardigliano 430a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 431a27d9013SAlfredo Cardigliano left = txm->data_len; 4327c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm); 433a27d9013SAlfredo Cardigliano 434be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 435a27d9013SAlfredo Cardigliano start = true; 436a27d9013SAlfredo Cardigliano 437a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 438a27d9013SAlfredo Cardigliano 439a27d9013SAlfredo Cardigliano while (left > 0) { 440a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 441a27d9013SAlfredo Cardigliano frag_left = seglen - len; 4427c3a867bSAndrew Boyer desc_addr = rte_cpu_to_le_64(data_iova + offset); 443a27d9013SAlfredo Cardigliano desc_len = len; 444a27d9013SAlfredo Cardigliano desc_nsge = 0; 445a27d9013SAlfredo Cardigliano left -= len; 446a27d9013SAlfredo Cardigliano offset += len; 447a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 448a27d9013SAlfredo Cardigliano continue; 449a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 450a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 451a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 452a27d9013SAlfredo Cardigliano hdrlen, mss, 45364b08152SAlfredo Cardigliano encap, 454a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 45577c60793SAndrew Boyer start, done); 456be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 457a27d9013SAlfredo Cardigliano start = false; 458a27d9013SAlfredo Cardigliano seglen = mss; 459a27d9013SAlfredo Cardigliano } 460a27d9013SAlfredo Cardigliano 461a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 462a27d9013SAlfredo Cardigliano 463a27d9013SAlfredo Cardigliano txm_seg = txm->next; 464a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 465a27d9013SAlfredo Cardigliano offset = 0; 4667c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 467a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 468a27d9013SAlfredo Cardigliano 469a27d9013SAlfredo Cardigliano while (left > 0) { 4707c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 471a27d9013SAlfredo Cardigliano if (frag_left > 0) { 472a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 473a27d9013SAlfredo Cardigliano frag_left -= len; 4747c3a867bSAndrew Boyer elem->addr = next_addr; 4754a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 476a27d9013SAlfredo Cardigliano elem++; 477a27d9013SAlfredo Cardigliano desc_nsge++; 478a27d9013SAlfredo Cardigliano } else { 479a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 480a27d9013SAlfredo Cardigliano frag_left = mss - len; 4817c3a867bSAndrew Boyer desc_addr = next_addr; 482a27d9013SAlfredo Cardigliano desc_len = len; 483a27d9013SAlfredo Cardigliano desc_nsge = 0; 484a27d9013SAlfredo Cardigliano } 485a27d9013SAlfredo Cardigliano left -= len; 486a27d9013SAlfredo Cardigliano offset += len; 487a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 488a27d9013SAlfredo Cardigliano continue; 4897c3a867bSAndrew Boyer 490a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 491a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 492a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 493a27d9013SAlfredo Cardigliano hdrlen, mss, 49464b08152SAlfredo Cardigliano encap, 495a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 49677c60793SAndrew Boyer start, done); 497be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 498a27d9013SAlfredo Cardigliano start = false; 499a27d9013SAlfredo Cardigliano } 500a27d9013SAlfredo Cardigliano 501a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 502a27d9013SAlfredo Cardigliano } 503a27d9013SAlfredo Cardigliano 504a27d9013SAlfredo Cardigliano stats->tso++; 505a27d9013SAlfredo Cardigliano 506a27d9013SAlfredo Cardigliano return 0; 507a27d9013SAlfredo Cardigliano } 508a27d9013SAlfredo Cardigliano 5090de3e209SAndrew Boyer static __rte_always_inline int 51077c60793SAndrew Boyer ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 511a27d9013SAlfredo Cardigliano { 512be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 513be39f75cSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 51456117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 515be39f75cSAndrew Boyer struct ionic_txq_sg_elem *elem; 516be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 517a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 518dd10c5b4SAndrew Boyer void **info; 519b4beb84aSAndrew Boyer rte_iova_t data_iova; 520a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 5214a735599SAndrew Boyer uint64_t addr, cmd; 522a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 523a27d9013SAlfredo Cardigliano uint8_t flags = 0; 524a27d9013SAlfredo Cardigliano 525be39f75cSAndrew Boyer desc = &desc_base[q->head_idx]; 526dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 527be39f75cSAndrew Boyer 528daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 52968591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 53064b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 53164b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 532f603eebcSAndrew Boyer } 533f603eebcSAndrew Boyer 534daa02b5cSOlivier Matz if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 53568591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 536daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 53768591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 538f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 53964b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 54064b08152SAlfredo Cardigliano } 54164b08152SAlfredo Cardigliano 542f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 543f603eebcSAndrew Boyer stats->no_csum++; 544f603eebcSAndrew Boyer 545b4beb84aSAndrew Boyer if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 546daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 547daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 548b4beb84aSAndrew Boyer (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 549b4beb84aSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 550b4beb84aSAndrew Boyer } 551a27d9013SAlfredo Cardigliano 552b4beb84aSAndrew Boyer if (ol_flags & RTE_MBUF_F_TX_VLAN) { 553b4beb84aSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_VLAN; 554b4beb84aSAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 555b4beb84aSAndrew Boyer } 556a27d9013SAlfredo Cardigliano 5577c3a867bSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 5587c3a867bSAndrew Boyer 5594a735599SAndrew Boyer cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 5604a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 5614a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(txm->data_len); 562a27d9013SAlfredo Cardigliano 563dd10c5b4SAndrew Boyer info[0] = txm; 564dd10c5b4SAndrew Boyer 565b4beb84aSAndrew Boyer if (txm->nb_segs > 1) { 566b4beb84aSAndrew Boyer txm_seg = txm->next; 567b4beb84aSAndrew Boyer 568be39f75cSAndrew Boyer elem = sg_desc_base[q->head_idx].elems; 569dd10c5b4SAndrew Boyer 570a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 571b4beb84aSAndrew Boyer /* Stash the mbuf ptr in the array */ 572b4beb84aSAndrew Boyer info++; 573b4beb84aSAndrew Boyer *info = txm_seg; 574b4beb84aSAndrew Boyer 575b4beb84aSAndrew Boyer /* Configure the SGE */ 576b4beb84aSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 5774a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(txm_seg->data_len); 578b4beb84aSAndrew Boyer elem->addr = rte_cpu_to_le_64(data_iova); 579a27d9013SAlfredo Cardigliano elem++; 580b4beb84aSAndrew Boyer 581a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 582a27d9013SAlfredo Cardigliano } 583b4beb84aSAndrew Boyer } 584a27d9013SAlfredo Cardigliano 585dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 586dd10c5b4SAndrew Boyer 587a27d9013SAlfredo Cardigliano return 0; 588a27d9013SAlfredo Cardigliano } 589a27d9013SAlfredo Cardigliano 590a27d9013SAlfredo Cardigliano uint16_t 591a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 592a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 593a27d9013SAlfredo Cardigliano { 594be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 595be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 596be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 597c35f08f7SAndrew Boyer struct rte_mbuf *mbuf; 598a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 59986551f81SAndrew Boyer uint16_t nb_avail, nb_tx = 0; 600a27d9013SAlfredo Cardigliano int err; 601a27d9013SAlfredo Cardigliano 602b4beb84aSAndrew Boyer struct ionic_txq_desc *desc_base = q->base; 603*9de21005SAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_CMB)) 604b4beb84aSAndrew Boyer rte_prefetch0(&desc_base[q->head_idx]); 605b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx)); 606b4beb84aSAndrew Boyer 607b4beb84aSAndrew Boyer if (tx_pkts) { 608b4beb84aSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[0]); 609b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[0]); 610b4beb84aSAndrew Boyer } 611b4beb84aSAndrew Boyer 612a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 6132aed9865SAndrew Boyer ionic_tx_flush(txq); 614a27d9013SAlfredo Cardigliano 61586551f81SAndrew Boyer nb_avail = ionic_q_space_avail(q); 61686551f81SAndrew Boyer if (unlikely(nb_avail < nb_pkts)) { 61786551f81SAndrew Boyer stats->stop += nb_pkts - nb_avail; 61886551f81SAndrew Boyer nb_pkts = nb_avail; 619a27d9013SAlfredo Cardigliano } 620a27d9013SAlfredo Cardigliano 621a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 622b4beb84aSAndrew Boyer uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 623*9de21005SAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_CMB)) 624b4beb84aSAndrew Boyer rte_prefetch0(&desc_base[next_idx]); 625b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, next_idx)); 626b4beb84aSAndrew Boyer 627b4beb84aSAndrew Boyer if (nb_tx + 1 < nb_pkts) { 628b4beb84aSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 629b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 630a27d9013SAlfredo Cardigliano } 631a27d9013SAlfredo Cardigliano 632c35f08f7SAndrew Boyer mbuf = tx_pkts[nb_tx]; 633c35f08f7SAndrew Boyer 634c35f08f7SAndrew Boyer if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 635c35f08f7SAndrew Boyer err = ionic_tx_tso(txq, mbuf); 636a27d9013SAlfredo Cardigliano else 637c35f08f7SAndrew Boyer err = ionic_tx(txq, mbuf); 638a27d9013SAlfredo Cardigliano if (err) { 639a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 640a27d9013SAlfredo Cardigliano break; 641a27d9013SAlfredo Cardigliano } 642a27d9013SAlfredo Cardigliano 643c35f08f7SAndrew Boyer bytes_tx += mbuf->pkt_len; 644a27d9013SAlfredo Cardigliano nb_tx++; 645a27d9013SAlfredo Cardigliano } 646a27d9013SAlfredo Cardigliano 64777c60793SAndrew Boyer if (nb_tx > 0) { 64877c60793SAndrew Boyer rte_wmb(); 64977c60793SAndrew Boyer ionic_q_flush(q); 65077c60793SAndrew Boyer 651a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 652a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 653b4beb84aSAndrew Boyer } 654a27d9013SAlfredo Cardigliano 655a27d9013SAlfredo Cardigliano return nb_tx; 656a27d9013SAlfredo Cardigliano } 657a27d9013SAlfredo Cardigliano 658a27d9013SAlfredo Cardigliano /********************************************************************* 659a27d9013SAlfredo Cardigliano * 660a27d9013SAlfredo Cardigliano * TX prep functions 661a27d9013SAlfredo Cardigliano * 662a27d9013SAlfredo Cardigliano **********************************************************************/ 663a27d9013SAlfredo Cardigliano 664daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 665daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 666daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 667daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 668daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 669daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 670a27d9013SAlfredo Cardigliano 671a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 672daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 673a27d9013SAlfredo Cardigliano 674a27d9013SAlfredo Cardigliano uint16_t 675e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 676a27d9013SAlfredo Cardigliano { 677e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 678a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 679a27d9013SAlfredo Cardigliano uint64_t offloads; 680a27d9013SAlfredo Cardigliano int i = 0; 681a27d9013SAlfredo Cardigliano 682a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 683a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 684a27d9013SAlfredo Cardigliano 685e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 686a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 687a27d9013SAlfredo Cardigliano break; 688a27d9013SAlfredo Cardigliano } 689a27d9013SAlfredo Cardigliano 690a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 691a27d9013SAlfredo Cardigliano 692a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 693a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 694a27d9013SAlfredo Cardigliano break; 695a27d9013SAlfredo Cardigliano } 696a27d9013SAlfredo Cardigliano } 697a27d9013SAlfredo Cardigliano 698a27d9013SAlfredo Cardigliano return i; 699a27d9013SAlfredo Cardigliano } 700a27d9013SAlfredo Cardigliano 701a27d9013SAlfredo Cardigliano /********************************************************************* 702a27d9013SAlfredo Cardigliano * 703a27d9013SAlfredo Cardigliano * RX functions 704a27d9013SAlfredo Cardigliano * 705a27d9013SAlfredo Cardigliano **********************************************************************/ 706a27d9013SAlfredo Cardigliano 707a27d9013SAlfredo Cardigliano void 708a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 709a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 710a27d9013SAlfredo Cardigliano { 711be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 712be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 713a27d9013SAlfredo Cardigliano 714a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 715a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 716a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 71702eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 71868591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 719a27d9013SAlfredo Cardigliano } 720a27d9013SAlfredo Cardigliano 721ce6427ddSThomas Monjalon void __rte_cold 7227483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 723a27d9013SAlfredo Cardigliano { 7247483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 725be39f75cSAndrew Boyer 726be39f75cSAndrew Boyer if (!rxq) 727be39f75cSAndrew Boyer return; 728a27d9013SAlfredo Cardigliano 729a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 730a27d9013SAlfredo Cardigliano 731be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 732a27d9013SAlfredo Cardigliano } 733a27d9013SAlfredo Cardigliano 734ce6427ddSThomas Monjalon int __rte_cold 735a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 736a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 737a27d9013SAlfredo Cardigliano uint16_t nb_desc, 7384ae96cb8SAndrew Boyer uint32_t socket_id, 739a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 740a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 741a27d9013SAlfredo Cardigliano { 742a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 743be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 744a27d9013SAlfredo Cardigliano uint64_t offloads; 745a27d9013SAlfredo Cardigliano int err; 746a27d9013SAlfredo Cardigliano 747a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 748a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 749a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 750a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 751a27d9013SAlfredo Cardigliano return -EINVAL; 752a27d9013SAlfredo Cardigliano } 753a27d9013SAlfredo Cardigliano 754a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 7554ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 7564ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 7574ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 758a27d9013SAlfredo Cardigliano 75918a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 76018a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 76118a44465SAndrew Boyer 762a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 763a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 764a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 765a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 766a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 7674ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 768a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 769a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 770a27d9013SAlfredo Cardigliano } 771a27d9013SAlfredo Cardigliano 772a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 773a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 7747483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 775a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 776a27d9013SAlfredo Cardigliano } 777a27d9013SAlfredo Cardigliano 7789fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7799fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7809fdf11c4SAndrew Boyer 781d5850081SAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 782be39f75cSAndrew Boyer &rxq); 783a27d9013SAlfredo Cardigliano if (err) { 7844ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 785a27d9013SAlfredo Cardigliano return -EINVAL; 786a27d9013SAlfredo Cardigliano } 787a27d9013SAlfredo Cardigliano 788a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 789a27d9013SAlfredo Cardigliano 790a27d9013SAlfredo Cardigliano /* 791a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 792295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 793a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 794a27d9013SAlfredo Cardigliano * it to the length for all received packets: 795a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 796295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 797a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 798a27d9013SAlfredo Cardigliano */ 799a27d9013SAlfredo Cardigliano 800a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 80102eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 80202eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 803a27d9013SAlfredo Cardigliano 804a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 805a27d9013SAlfredo Cardigliano 806a27d9013SAlfredo Cardigliano return 0; 807a27d9013SAlfredo Cardigliano } 808a27d9013SAlfredo Cardigliano 809bbdf955dSAndrew Boyer #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1) 810bbdf955dSAndrew Boyer static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] 811bbdf955dSAndrew Boyer __rte_cache_aligned = { 812bbdf955dSAndrew Boyer /* IP_BAD set */ 813bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD, 814bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 815bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 816bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 817bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 818bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 819bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 820bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 821bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 822bbdf955dSAndrew Boyer /* IP_OK set */ 823bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD, 824bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 825bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 826bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 827bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 828bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 829bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 830bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 831bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 832bbdf955dSAndrew Boyer /* No IP flag set */ 833bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 834bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 835bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 836bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 837bbdf955dSAndrew Boyer }; 838bbdf955dSAndrew Boyer 83973b1c67eSAndrew Boyer /* RTE_PTYPE_UNKNOWN is 0x0 */ 84073b1c67eSAndrew Boyer static const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] 84173b1c67eSAndrew Boyer __rte_cache_aligned = { 84273b1c67eSAndrew Boyer [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN, 84373b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, 84473b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_TCP] = 84573b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 84673b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_UDP] = 84773b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 84873b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, 84973b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_TCP] = 85073b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 85173b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_UDP] = 85273b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 85373b1c67eSAndrew Boyer }; 85473b1c67eSAndrew Boyer 855b5b56afdSAndrew Boyer const uint32_t * 856b5b56afdSAndrew Boyer ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 857b5b56afdSAndrew Boyer { 858b5b56afdSAndrew Boyer /* See ionic_ptype_table[] */ 859b5b56afdSAndrew Boyer static const uint32_t ptypes[] = { 860b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER, 861b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_TIMESYNC, 862b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_LLDP, 863b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_ARP, 864b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV4, 865b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV6, 866b5b56afdSAndrew Boyer RTE_PTYPE_L4_TCP, 867b5b56afdSAndrew Boyer RTE_PTYPE_L4_UDP, 868b5b56afdSAndrew Boyer RTE_PTYPE_UNKNOWN 869b5b56afdSAndrew Boyer }; 870b5b56afdSAndrew Boyer 871b5b56afdSAndrew Boyer return ptypes; 872b5b56afdSAndrew Boyer } 873b5b56afdSAndrew Boyer 8747b20fc2fSAndrew Boyer /* 8757b20fc2fSAndrew Boyer * Cleans one descriptor. Connects the filled mbufs into a chain. 8767b20fc2fSAndrew Boyer * Does not advance the tail index. 8777b20fc2fSAndrew Boyer */ 8780de3e209SAndrew Boyer static __rte_always_inline void 8797b20fc2fSAndrew Boyer ionic_rx_clean_one(struct ionic_rx_qcq *rxq, 8807b20fc2fSAndrew Boyer struct ionic_rxq_comp *cq_desc, 88114f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 882a27d9013SAlfredo Cardigliano { 883be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 8847b20fc2fSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg, *prev_rxm; 8857b20fc2fSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 886a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 887a27d9013SAlfredo Cardigliano uint32_t pkt_type; 8887b20fc2fSAndrew Boyer uint32_t left, i; 8897b20fc2fSAndrew Boyer uint16_t cq_desc_len; 890bbdf955dSAndrew Boyer uint8_t ptype, cflags; 891700f974dSAndrew Boyer void **info; 892700f974dSAndrew Boyer 8937b20fc2fSAndrew Boyer cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 894700f974dSAndrew Boyer 8957b20fc2fSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 896700f974dSAndrew Boyer 897700f974dSAndrew Boyer rxm = info[0]; 898a27d9013SAlfredo Cardigliano 899a27d9013SAlfredo Cardigliano if (cq_desc->status) { 900a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 901a27d9013SAlfredo Cardigliano return; 902a27d9013SAlfredo Cardigliano } 903a27d9013SAlfredo Cardigliano 9047b20fc2fSAndrew Boyer if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 905a27d9013SAlfredo Cardigliano stats->bad_len++; 906a27d9013SAlfredo Cardigliano return; 907a27d9013SAlfredo Cardigliano } 908a27d9013SAlfredo Cardigliano 9097b20fc2fSAndrew Boyer info[0] = NULL; 910a27d9013SAlfredo Cardigliano 9117b20fc2fSAndrew Boyer /* Set the mbuf metadata based on the cq entry */ 9127b20fc2fSAndrew Boyer rxm->rearm_data[0] = rxq->rearm_data; 9137b20fc2fSAndrew Boyer rxm->pkt_len = cq_desc_len; 9147b20fc2fSAndrew Boyer rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len); 9157b20fc2fSAndrew Boyer left = cq_desc_len - rxm->data_len; 9167b20fc2fSAndrew Boyer rxm->nb_segs = cq_desc->num_sg_elems + 1; 9177b20fc2fSAndrew Boyer prev_rxm = rxm; 918a27d9013SAlfredo Cardigliano 9197b20fc2fSAndrew Boyer for (i = 1; i < rxm->nb_segs && left; i++) { 9207b20fc2fSAndrew Boyer rxm_seg = info[i]; 9217b20fc2fSAndrew Boyer info[i] = NULL; 9227b20fc2fSAndrew Boyer 9237b20fc2fSAndrew Boyer /* Set the chained mbuf metadata */ 9247b20fc2fSAndrew Boyer rxm_seg->rearm_data[0] = rxq->rearm_seg_data; 925d5850081SAndrew Boyer rxm_seg->data_len = RTE_MIN(rxq->seg_size, left); 926a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 927a27d9013SAlfredo Cardigliano 9287b20fc2fSAndrew Boyer /* Link the mbuf */ 9297b20fc2fSAndrew Boyer prev_rxm->next = rxm_seg; 9307b20fc2fSAndrew Boyer prev_rxm = rxm_seg; 931a27d9013SAlfredo Cardigliano } 932a27d9013SAlfredo Cardigliano 9337b20fc2fSAndrew Boyer /* Terminate the mbuf chain */ 9347b20fc2fSAndrew Boyer prev_rxm->next = NULL; 9357b20fc2fSAndrew Boyer 93622e7171bSAlfredo Cardigliano /* RSS */ 937daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 9387506961aSAndrew Boyer rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 93922e7171bSAlfredo Cardigliano 940a27d9013SAlfredo Cardigliano /* Vlan Strip */ 941a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 942daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 9434a735599SAndrew Boyer rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 944a27d9013SAlfredo Cardigliano } 945a27d9013SAlfredo Cardigliano 946a27d9013SAlfredo Cardigliano /* Checksum */ 947a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 948bbdf955dSAndrew Boyer cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK; 949bbdf955dSAndrew Boyer pkt_flags |= ionic_csum_flags[cflags]; 950a27d9013SAlfredo Cardigliano } 951a27d9013SAlfredo Cardigliano 952a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 953a27d9013SAlfredo Cardigliano 954a27d9013SAlfredo Cardigliano /* Packet Type */ 95573b1c67eSAndrew Boyer ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK; 95673b1c67eSAndrew Boyer pkt_type = ionic_ptype_table[ptype]; 95773b1c67eSAndrew Boyer if (pkt_type == RTE_PTYPE_UNKNOWN) { 958a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 959a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 960a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 961a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 962a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 96373b1c67eSAndrew Boyer else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP)) 96473b1c67eSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_LLDP; 96573b1c67eSAndrew Boyer else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588)) 96673b1c67eSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC; 967ed522a3fSAndrew Boyer stats->mtods++; 968a27d9013SAlfredo Cardigliano } 969a27d9013SAlfredo Cardigliano 970a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 971a27d9013SAlfredo Cardigliano 97214f534beSAndrew Boyer rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 97314f534beSAndrew Boyer rx_svc->nb_rx++; 974a27d9013SAlfredo Cardigliano 975a27d9013SAlfredo Cardigliano stats->packets++; 976a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 977a27d9013SAlfredo Cardigliano } 978a27d9013SAlfredo Cardigliano 9797b20fc2fSAndrew Boyer /* 9807b20fc2fSAndrew Boyer * Fills one descriptor with mbufs. Does not advance the head index. 9817b20fc2fSAndrew Boyer */ 9820de3e209SAndrew Boyer static __rte_always_inline int 9837b20fc2fSAndrew Boyer ionic_rx_fill_one(struct ionic_rx_qcq *rxq) 984a27d9013SAlfredo Cardigliano { 985be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 9867b20fc2fSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg; 987be39f75cSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 988be39f75cSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 9897b20fc2fSAndrew Boyer rte_iova_t data_iova; 9907b20fc2fSAndrew Boyer uint32_t i; 991dd10c5b4SAndrew Boyer void **info; 992218afd82SAndrew Boyer int ret; 993a27d9013SAlfredo Cardigliano 994dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 995a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 996a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 997a27d9013SAlfredo Cardigliano 9987b20fc2fSAndrew Boyer /* mbuf is unused => whole chain is unused */ 9997b20fc2fSAndrew Boyer if (unlikely(info[0])) 10007b20fc2fSAndrew Boyer return 0; 10017b20fc2fSAndrew Boyer 1002218afd82SAndrew Boyer if (rxq->mb_idx == 0) { 1003218afd82SAndrew Boyer ret = rte_mempool_get_bulk(rxq->mb_pool, 1004218afd82SAndrew Boyer (void **)rxq->mbs, 1005218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC); 1006218afd82SAndrew Boyer if (ret) { 10077b20fc2fSAndrew Boyer assert(0); 1008a27d9013SAlfredo Cardigliano return -ENOMEM; 1009a27d9013SAlfredo Cardigliano } 1010a27d9013SAlfredo Cardigliano 1011218afd82SAndrew Boyer rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 1012218afd82SAndrew Boyer } 1013218afd82SAndrew Boyer 1014218afd82SAndrew Boyer rxm = rxq->mbs[--rxq->mb_idx]; 1015dd10c5b4SAndrew Boyer info[0] = rxm; 1016dd10c5b4SAndrew Boyer 10177b20fc2fSAndrew Boyer data_iova = rte_mbuf_data_iova_default(rxm); 10187b20fc2fSAndrew Boyer desc->addr = rte_cpu_to_le_64(data_iova); 10197b20fc2fSAndrew Boyer 10207b20fc2fSAndrew Boyer for (i = 1; i < q->num_segs; i++) { 10217b20fc2fSAndrew Boyer /* mbuf is unused => rest of the chain is unused */ 10227b20fc2fSAndrew Boyer if (info[i]) 10237b20fc2fSAndrew Boyer return 0; 10247b20fc2fSAndrew Boyer 1025218afd82SAndrew Boyer if (rxq->mb_idx == 0) { 1026218afd82SAndrew Boyer ret = rte_mempool_get_bulk(rxq->mb_pool, 1027218afd82SAndrew Boyer (void **)rxq->mbs, 1028218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC); 1029218afd82SAndrew Boyer if (ret) { 10307b20fc2fSAndrew Boyer assert(0); 10317b20fc2fSAndrew Boyer return -ENOMEM; 10327b20fc2fSAndrew Boyer } 10337b20fc2fSAndrew Boyer 1034218afd82SAndrew Boyer rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 1035218afd82SAndrew Boyer } 1036218afd82SAndrew Boyer 1037218afd82SAndrew Boyer rxm_seg = rxq->mbs[--rxq->mb_idx]; 10387b20fc2fSAndrew Boyer info[i] = rxm_seg; 10397b20fc2fSAndrew Boyer 10407b20fc2fSAndrew Boyer /* The data_off does not get set to 0 until later */ 10417b20fc2fSAndrew Boyer data_iova = rxm_seg->buf_iova; 10427b20fc2fSAndrew Boyer sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova); 10437b20fc2fSAndrew Boyer } 10447b20fc2fSAndrew Boyer 10457b20fc2fSAndrew Boyer return 0; 10467b20fc2fSAndrew Boyer } 10477b20fc2fSAndrew Boyer 10487b20fc2fSAndrew Boyer /* 10497b20fc2fSAndrew Boyer * Fills all descriptors with mbufs. 10507b20fc2fSAndrew Boyer */ 10517b20fc2fSAndrew Boyer static int __rte_cold 10527b20fc2fSAndrew Boyer ionic_rx_fill(struct ionic_rx_qcq *rxq) 10537b20fc2fSAndrew Boyer { 10547b20fc2fSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 10557b20fc2fSAndrew Boyer uint32_t i; 10567b20fc2fSAndrew Boyer int err; 10577b20fc2fSAndrew Boyer 10587b20fc2fSAndrew Boyer for (i = 1; i < q->num_descs; i++) { 10597b20fc2fSAndrew Boyer err = ionic_rx_fill_one(rxq); 10607b20fc2fSAndrew Boyer if (err) 10617b20fc2fSAndrew Boyer return err; 10627b20fc2fSAndrew Boyer 1063dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1064a27d9013SAlfredo Cardigliano } 1065a27d9013SAlfredo Cardigliano 106677c60793SAndrew Boyer ionic_q_flush(q); 106777c60793SAndrew Boyer 1068a27d9013SAlfredo Cardigliano return 0; 1069a27d9013SAlfredo Cardigliano } 1070a27d9013SAlfredo Cardigliano 1071a27d9013SAlfredo Cardigliano /* 10727b2eb674SAndrew Boyer * Perform one-time initialization of descriptor fields 10737b2eb674SAndrew Boyer * which will not change for the life of the queue. 10747b2eb674SAndrew Boyer */ 10757b2eb674SAndrew Boyer static void __rte_cold 10767b2eb674SAndrew Boyer ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 10777b2eb674SAndrew Boyer { 10787b2eb674SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 10797b2eb674SAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 10807b2eb674SAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 10817b2eb674SAndrew Boyer uint32_t i, j; 10827b2eb674SAndrew Boyer uint8_t opcode; 10837b2eb674SAndrew Boyer 10847b2eb674SAndrew Boyer opcode = (q->num_segs > 1) ? 10857b2eb674SAndrew Boyer IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 10867b2eb674SAndrew Boyer 10877b2eb674SAndrew Boyer /* 10887b2eb674SAndrew Boyer * NB: Only the first segment needs to leave headroom (hdr_seg_size). 10897b2eb674SAndrew Boyer * Later segments (seg_size) do not. 10907b2eb674SAndrew Boyer */ 10917b2eb674SAndrew Boyer for (i = 0; i < q->num_descs; i++) { 10927b2eb674SAndrew Boyer desc = &desc_base[i]; 10937b2eb674SAndrew Boyer desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 10947b2eb674SAndrew Boyer desc->opcode = opcode; 10957b2eb674SAndrew Boyer 10967b2eb674SAndrew Boyer sg_desc = &sg_desc_base[i]; 10977b2eb674SAndrew Boyer for (j = 0; j < q->num_segs - 1u; j++) 10987b2eb674SAndrew Boyer sg_desc->elems[j].len = 10997b2eb674SAndrew Boyer rte_cpu_to_le_16(rxq->seg_size); 11007b2eb674SAndrew Boyer } 11017b2eb674SAndrew Boyer } 11027b2eb674SAndrew Boyer 11037b2eb674SAndrew Boyer /* 1104a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 1105a27d9013SAlfredo Cardigliano */ 1106ce6427ddSThomas Monjalon int __rte_cold 1107a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1108a27d9013SAlfredo Cardigliano { 11099fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1110be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1111d5850081SAndrew Boyer struct ionic_queue *q; 1112a27d9013SAlfredo Cardigliano int err; 1113a27d9013SAlfredo Cardigliano 11149fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 11159fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 11169fdf11c4SAndrew Boyer rx_queue_id); 11179fdf11c4SAndrew Boyer return 0; 11189fdf11c4SAndrew Boyer } 11199fdf11c4SAndrew Boyer 1120a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1121d5850081SAndrew Boyer q = &rxq->qcq.q; 1122a27d9013SAlfredo Cardigliano 1123b671e69aSAndrew Boyer rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 1124b671e69aSAndrew Boyer 1125d5850081SAndrew Boyer /* Recalculate segment count based on MTU */ 1126d5850081SAndrew Boyer q->num_segs = 1 + 1127d5850081SAndrew Boyer (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 1128d5850081SAndrew Boyer 1129d5850081SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 1130d5850081SAndrew Boyer rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 11314ae96cb8SAndrew Boyer 11327b2eb674SAndrew Boyer ionic_rx_init_descriptors(rxq); 11337b2eb674SAndrew Boyer 1134a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 1135a27d9013SAlfredo Cardigliano if (err) 1136a27d9013SAlfredo Cardigliano return err; 1137a27d9013SAlfredo Cardigliano 1138a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1139b671e69aSAndrew Boyer if (ionic_rx_fill(rxq) != 0) { 1140a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1141a27d9013SAlfredo Cardigliano rx_queue_id); 1142a27d9013SAlfredo Cardigliano return -1; 1143a27d9013SAlfredo Cardigliano } 1144a27d9013SAlfredo Cardigliano 11459fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1146a27d9013SAlfredo Cardigliano 1147a27d9013SAlfredo Cardigliano return 0; 1148a27d9013SAlfredo Cardigliano } 1149a27d9013SAlfredo Cardigliano 11507b20fc2fSAndrew Boyer /* 11517b20fc2fSAndrew Boyer * Walk the CQ to find completed receive descriptors. 11527b20fc2fSAndrew Boyer * Any completed descriptor found is refilled. 11537b20fc2fSAndrew Boyer */ 11540de3e209SAndrew Boyer static __rte_always_inline void 1155be39f75cSAndrew Boyer ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 115614f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 1157a27d9013SAlfredo Cardigliano { 1158be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 1159be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 11607b20fc2fSAndrew Boyer struct ionic_rxq_desc *q_desc_base = q->base; 1161be39f75cSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1162a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1163a27d9013SAlfredo Cardigliano 1164a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 11657b20fc2fSAndrew Boyer 1166a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 11672aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1168a27d9013SAlfredo Cardigliano 1169a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1170a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1171a27d9013SAlfredo Cardigliano 11727b20fc2fSAndrew Boyer /* Prefetch 8 x 8B bufinfo */ 11737b20fc2fSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8))); 11747b20fc2fSAndrew Boyer /* Prefetch 4 x 16B comp */ 11757b20fc2fSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 11767b20fc2fSAndrew Boyer /* Prefetch 4 x 16B descriptors */ 1177*9de21005SAndrew Boyer if (!(rxq->flags & IONIC_QCQ_F_CMB)) 11787b20fc2fSAndrew Boyer rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 1179a27d9013SAlfredo Cardigliano 11807b20fc2fSAndrew Boyer ionic_rx_clean_one(rxq, cq_desc, rx_svc); 1181a27d9013SAlfredo Cardigliano 11824ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1183a27d9013SAlfredo Cardigliano 11847b20fc2fSAndrew Boyer (void)ionic_rx_fill_one(rxq); 1185a27d9013SAlfredo Cardigliano 11867b20fc2fSAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1187a27d9013SAlfredo Cardigliano 1188a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1189a27d9013SAlfredo Cardigliano break; 1190a27d9013SAlfredo Cardigliano 1191a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1192a27d9013SAlfredo Cardigliano } 11937b20fc2fSAndrew Boyer 11947b20fc2fSAndrew Boyer /* Update the queue indices and ring the doorbell */ 11957b20fc2fSAndrew Boyer if (work_done) 11967b20fc2fSAndrew Boyer ionic_q_flush(q); 1197a27d9013SAlfredo Cardigliano } 1198a27d9013SAlfredo Cardigliano 1199a27d9013SAlfredo Cardigliano /* 1200a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1201a27d9013SAlfredo Cardigliano */ 1202ce6427ddSThomas Monjalon int __rte_cold 1203a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1204a27d9013SAlfredo Cardigliano { 1205e7222f94SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1206e7222f94SAndrew Boyer struct ionic_rx_stats *stats; 1207be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1208a27d9013SAlfredo Cardigliano 12094ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1210a27d9013SAlfredo Cardigliano 1211a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1212a27d9013SAlfredo Cardigliano 1213e7222f94SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 12149fdf11c4SAndrew Boyer 1215e7222f94SAndrew Boyer ionic_lif_rxq_deinit(rxq); 1216a27d9013SAlfredo Cardigliano 1217e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 1218e7222f94SAndrew Boyer ionic_rx_empty(rxq); 1219e7222f94SAndrew Boyer 1220e7222f94SAndrew Boyer stats = &rxq->stats; 1221e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 1222e7222f94SAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 1223a27d9013SAlfredo Cardigliano 1224a27d9013SAlfredo Cardigliano return 0; 1225a27d9013SAlfredo Cardigliano } 1226a27d9013SAlfredo Cardigliano 1227a27d9013SAlfredo Cardigliano uint16_t 1228a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1229a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1230a27d9013SAlfredo Cardigliano { 1231be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 123214f534beSAndrew Boyer struct ionic_rx_service rx_svc; 1233a27d9013SAlfredo Cardigliano 123414f534beSAndrew Boyer rx_svc.rx_pkts = rx_pkts; 123514f534beSAndrew Boyer rx_svc.nb_rx = 0; 1236a27d9013SAlfredo Cardigliano 123714f534beSAndrew Boyer ionic_rxq_service(rxq, nb_pkts, &rx_svc); 1238a27d9013SAlfredo Cardigliano 123914f534beSAndrew Boyer return rx_svc.nb_rx; 1240a27d9013SAlfredo Cardigliano } 12410983a74aSAndrew Boyer 12420983a74aSAndrew Boyer int 12430983a74aSAndrew Boyer ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 12440983a74aSAndrew Boyer { 12450983a74aSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 12460983a74aSAndrew Boyer struct ionic_qcq *qcq = &rxq->qcq; 12470983a74aSAndrew Boyer struct ionic_rxq_comp *cq_desc; 12480983a74aSAndrew Boyer uint16_t mask, head, tail, pos; 12490983a74aSAndrew Boyer bool done_color; 12500983a74aSAndrew Boyer 12510983a74aSAndrew Boyer mask = qcq->q.size_mask; 12520983a74aSAndrew Boyer 12530983a74aSAndrew Boyer /* offset must be within the size of the ring */ 12540983a74aSAndrew Boyer if (offset > mask) 12550983a74aSAndrew Boyer return -EINVAL; 12560983a74aSAndrew Boyer 12570983a74aSAndrew Boyer head = qcq->q.head_idx; 12580983a74aSAndrew Boyer tail = qcq->q.tail_idx; 12590983a74aSAndrew Boyer 12600983a74aSAndrew Boyer /* offset is beyond what is posted */ 12610983a74aSAndrew Boyer if (offset >= ((head - tail) & mask)) 12620983a74aSAndrew Boyer return RTE_ETH_RX_DESC_UNAVAIL; 12630983a74aSAndrew Boyer 12640983a74aSAndrew Boyer /* interested in this absolute position in the rxq */ 12650983a74aSAndrew Boyer pos = (tail + offset) & mask; 12660983a74aSAndrew Boyer 12670983a74aSAndrew Boyer /* rx cq position == rx q position */ 12680983a74aSAndrew Boyer cq_desc = qcq->cq.base; 12690983a74aSAndrew Boyer cq_desc = &cq_desc[pos]; 12700983a74aSAndrew Boyer 12710983a74aSAndrew Boyer /* expected done color at this position */ 12720983a74aSAndrew Boyer done_color = qcq->cq.done_color != (pos < tail); 12730983a74aSAndrew Boyer 12740983a74aSAndrew Boyer /* has the hw indicated the done color at this position? */ 12750983a74aSAndrew Boyer if (color_match(cq_desc->pkt_type_color, done_color)) 12760983a74aSAndrew Boyer return RTE_ETH_RX_DESC_DONE; 12770983a74aSAndrew Boyer 12780983a74aSAndrew Boyer return RTE_ETH_RX_DESC_AVAIL; 12790983a74aSAndrew Boyer } 128060625147SAndrew Boyer 128160625147SAndrew Boyer int 128260625147SAndrew Boyer ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 128360625147SAndrew Boyer { 128460625147SAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 128560625147SAndrew Boyer struct ionic_qcq *qcq = &txq->qcq; 128660625147SAndrew Boyer struct ionic_txq_comp *cq_desc; 128760625147SAndrew Boyer uint16_t mask, head, tail, pos, cq_pos; 128860625147SAndrew Boyer bool done_color; 128960625147SAndrew Boyer 129060625147SAndrew Boyer mask = qcq->q.size_mask; 129160625147SAndrew Boyer 129260625147SAndrew Boyer /* offset must be within the size of the ring */ 129360625147SAndrew Boyer if (offset > mask) 129460625147SAndrew Boyer return -EINVAL; 129560625147SAndrew Boyer 129660625147SAndrew Boyer head = qcq->q.head_idx; 129760625147SAndrew Boyer tail = qcq->q.tail_idx; 129860625147SAndrew Boyer 129960625147SAndrew Boyer /* offset is beyond what is posted */ 130060625147SAndrew Boyer if (offset >= ((head - tail) & mask)) 130160625147SAndrew Boyer return RTE_ETH_TX_DESC_DONE; 130260625147SAndrew Boyer 130360625147SAndrew Boyer /* interested in this absolute position in the txq */ 130460625147SAndrew Boyer pos = (tail + offset) & mask; 130560625147SAndrew Boyer 130660625147SAndrew Boyer /* tx cq position != tx q position, need to walk cq */ 130760625147SAndrew Boyer cq_pos = qcq->cq.tail_idx; 130860625147SAndrew Boyer cq_desc = qcq->cq.base; 130960625147SAndrew Boyer cq_desc = &cq_desc[cq_pos]; 131060625147SAndrew Boyer 131160625147SAndrew Boyer /* how far behind is pos from head? */ 131260625147SAndrew Boyer offset = (head - pos) & mask; 131360625147SAndrew Boyer 131460625147SAndrew Boyer /* walk cq descriptors that match the expected done color */ 131560625147SAndrew Boyer done_color = qcq->cq.done_color; 131660625147SAndrew Boyer while (color_match(cq_desc->color, done_color)) { 131760625147SAndrew Boyer /* is comp index no further behind than pos? */ 131860625147SAndrew Boyer tail = rte_cpu_to_le_16(cq_desc->comp_index); 131960625147SAndrew Boyer if (((head - tail) & mask) <= offset) 132060625147SAndrew Boyer return RTE_ETH_TX_DESC_DONE; 132160625147SAndrew Boyer 132260625147SAndrew Boyer cq_pos = (cq_pos + 1) & mask; 132360625147SAndrew Boyer cq_desc = qcq->cq.base; 132460625147SAndrew Boyer cq_desc = &cq_desc[cq_pos]; 132560625147SAndrew Boyer 132660625147SAndrew Boyer done_color = done_color != (cq_pos == 0); 132760625147SAndrew Boyer } 132860625147SAndrew Boyer 132960625147SAndrew Boyer return RTE_ETH_TX_DESC_FULL; 133060625147SAndrew Boyer } 1331