176668754SAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50e7222f94SAndrew Boyer static void 51e7222f94SAndrew Boyer ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 52e7222f94SAndrew Boyer { 53e7222f94SAndrew Boyer uint32_t i; 54e7222f94SAndrew Boyer 55e7222f94SAndrew Boyer for (i = idx; i < cnt; i++) 56e7222f94SAndrew Boyer if (array[i]) 57e7222f94SAndrew Boyer rte_pktmbuf_free_seg(array[i]); 58e7222f94SAndrew Boyer 59e7222f94SAndrew Boyer memset(array, 0, sizeof(void *) * cnt); 60e7222f94SAndrew Boyer } 61e7222f94SAndrew Boyer 62e7222f94SAndrew Boyer static void __rte_cold 63e7222f94SAndrew Boyer ionic_tx_empty(struct ionic_tx_qcq *txq) 64e7222f94SAndrew Boyer { 65e7222f94SAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 66e7222f94SAndrew Boyer 67b4beb84aSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 68e7222f94SAndrew Boyer } 69e7222f94SAndrew Boyer 70e7222f94SAndrew Boyer static void __rte_cold 71e7222f94SAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 72e7222f94SAndrew Boyer { 73e7222f94SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 74e7222f94SAndrew Boyer 757b20fc2fSAndrew Boyer /* 767b20fc2fSAndrew Boyer * Walk the full info array so that the clean up includes any 777b20fc2fSAndrew Boyer * fragments that were left dangling for later reuse 787b20fc2fSAndrew Boyer */ 797b20fc2fSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 80218afd82SAndrew Boyer 81218afd82SAndrew Boyer ionic_empty_array((void **)rxq->mbs, 82218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); 83218afd82SAndrew Boyer rxq->mb_idx = 0; 84e7222f94SAndrew Boyer } 85e7222f94SAndrew Boyer 86a27d9013SAlfredo Cardigliano /********************************************************************* 87a27d9013SAlfredo Cardigliano * 88a27d9013SAlfredo Cardigliano * TX functions 89a27d9013SAlfredo Cardigliano * 90a27d9013SAlfredo Cardigliano **********************************************************************/ 91a27d9013SAlfredo Cardigliano 92a27d9013SAlfredo Cardigliano void 93a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 94a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 95a27d9013SAlfredo Cardigliano { 96be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 97be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 98a27d9013SAlfredo Cardigliano 99a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 10068591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 1019ac234eeSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 1029ac234eeSAndrew Boyer qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 10302eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 104a27d9013SAlfredo Cardigliano } 105a27d9013SAlfredo Cardigliano 1060de3e209SAndrew Boyer static __rte_always_inline void 107be39f75cSAndrew Boyer ionic_tx_flush(struct ionic_tx_qcq *txq) 108a27d9013SAlfredo Cardigliano { 109be39f75cSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 110be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 111b4beb84aSAndrew Boyer struct rte_mbuf *txm; 112b4beb84aSAndrew Boyer struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; 113700f974dSAndrew Boyer void **info; 114b4beb84aSAndrew Boyer uint32_t i; 115a27d9013SAlfredo Cardigliano 116a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 117b4beb84aSAndrew Boyer 118a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 1192aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 120a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 121a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 122a27d9013SAlfredo Cardigliano 123b4beb84aSAndrew Boyer /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 124b4beb84aSAndrew Boyer if ((cq->tail_idx & 0x3) == 0) 125b4beb84aSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 126a27d9013SAlfredo Cardigliano 127b4beb84aSAndrew Boyer while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 128b4beb84aSAndrew Boyer /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 129b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2))); 130a27d9013SAlfredo Cardigliano 131b4beb84aSAndrew Boyer /* Prefetch next mbuf */ 132b4beb84aSAndrew Boyer void **next_info = 133b4beb84aSAndrew Boyer IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1)); 134b4beb84aSAndrew Boyer if (next_info[0]) 135b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(next_info[0]); 136b4beb84aSAndrew Boyer if (next_info[1]) 137b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(next_info[1]); 138b4beb84aSAndrew Boyer 139700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 140b4beb84aSAndrew Boyer for (i = 0; i < q->num_segs; i++) { 141b4beb84aSAndrew Boyer txm = info[i]; 142b4beb84aSAndrew Boyer if (!txm) 143b4beb84aSAndrew Boyer break; 144b4beb84aSAndrew Boyer 1459ac234eeSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 1469ac234eeSAndrew Boyer rte_mempool_put(txm->pool, txm); 1479ac234eeSAndrew Boyer else 148b4beb84aSAndrew Boyer rte_pktmbuf_free_seg(txm); 149b4beb84aSAndrew Boyer 150b4beb84aSAndrew Boyer info[i] = NULL; 151b4beb84aSAndrew Boyer } 152a27d9013SAlfredo Cardigliano 1534ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 154a27d9013SAlfredo Cardigliano } 155b4beb84aSAndrew Boyer 156b4beb84aSAndrew Boyer cq_desc = &cq_desc_base[cq->tail_idx]; 157a27d9013SAlfredo Cardigliano } 158a27d9013SAlfredo Cardigliano } 159a27d9013SAlfredo Cardigliano 160ce6427ddSThomas Monjalon void __rte_cold 1617483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 162a27d9013SAlfredo Cardigliano { 1637483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 164a27d9013SAlfredo Cardigliano 165a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 166a27d9013SAlfredo Cardigliano 167be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 168a27d9013SAlfredo Cardigliano } 169a27d9013SAlfredo Cardigliano 170ce6427ddSThomas Monjalon int __rte_cold 171a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 172a27d9013SAlfredo Cardigliano { 173e7222f94SAndrew Boyer struct ionic_tx_stats *stats; 174be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 175a27d9013SAlfredo Cardigliano 1764ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 177a27d9013SAlfredo Cardigliano 178a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 179a27d9013SAlfredo Cardigliano 1809fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1819fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1829fdf11c4SAndrew Boyer 183a27d9013SAlfredo Cardigliano /* 184a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 185a27d9013SAlfredo Cardigliano * before disabling Tx queue 186a27d9013SAlfredo Cardigliano */ 187a27d9013SAlfredo Cardigliano 188e7222f94SAndrew Boyer ionic_lif_txq_deinit(txq); 189a27d9013SAlfredo Cardigliano 190e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 191e7222f94SAndrew Boyer ionic_tx_empty(txq); 192e7222f94SAndrew Boyer 193e7222f94SAndrew Boyer stats = &txq->stats; 194e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 195e7222f94SAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 196a27d9013SAlfredo Cardigliano 197a27d9013SAlfredo Cardigliano return 0; 198a27d9013SAlfredo Cardigliano } 199a27d9013SAlfredo Cardigliano 200ce6427ddSThomas Monjalon int __rte_cold 201a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 2024ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 203a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 204a27d9013SAlfredo Cardigliano { 205a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 206be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 207a27d9013SAlfredo Cardigliano uint64_t offloads; 208a27d9013SAlfredo Cardigliano int err; 209a27d9013SAlfredo Cardigliano 210a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 211a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 212a27d9013SAlfredo Cardigliano "(max %u queues)", 213a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 214a27d9013SAlfredo Cardigliano return -EINVAL; 215a27d9013SAlfredo Cardigliano } 216a27d9013SAlfredo Cardigliano 217a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 2184ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 2194ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 2204ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 221a27d9013SAlfredo Cardigliano 222a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 223a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 224a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 225a27d9013SAlfredo Cardigliano 226*07512941SAndrew Boyer if (tx_conf->tx_free_thresh > nb_desc) { 227*07512941SAndrew Boyer IONIC_PRINT(ERR, 228*07512941SAndrew Boyer "tx_free_thresh must be less than nb_desc (%u)", 229*07512941SAndrew Boyer nb_desc); 230*07512941SAndrew Boyer return -EINVAL; 231*07512941SAndrew Boyer } 232*07512941SAndrew Boyer 233a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 234a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 2357483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 236a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 237a27d9013SAlfredo Cardigliano } 238a27d9013SAlfredo Cardigliano 2399fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 2409fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 2419fdf11c4SAndrew Boyer 2428ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 243a27d9013SAlfredo Cardigliano if (err) { 244a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 245a27d9013SAlfredo Cardigliano return -EINVAL; 246a27d9013SAlfredo Cardigliano } 247a27d9013SAlfredo Cardigliano 248a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 24902eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 25002eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 251a27d9013SAlfredo Cardigliano 25268591087SAndrew Boyer /* Convert the offload flags into queue flags */ 253295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 25468591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 255295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 25668591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 257295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 25868591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 2599ac234eeSAndrew Boyer if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 2609ac234eeSAndrew Boyer txq->flags |= IONIC_QCQ_F_FAST_FREE; 261a27d9013SAlfredo Cardigliano 262*07512941SAndrew Boyer txq->free_thresh = 263*07512941SAndrew Boyer tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : 264*07512941SAndrew Boyer nb_desc - IONIC_DEF_TXRX_BURST; 265*07512941SAndrew Boyer 266a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 267a27d9013SAlfredo Cardigliano 268a27d9013SAlfredo Cardigliano return 0; 269a27d9013SAlfredo Cardigliano } 270a27d9013SAlfredo Cardigliano 271a27d9013SAlfredo Cardigliano /* 272a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 273a27d9013SAlfredo Cardigliano */ 274ce6427ddSThomas Monjalon int __rte_cold 275a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 276a27d9013SAlfredo Cardigliano { 2779fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 278be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 279a27d9013SAlfredo Cardigliano int err; 280a27d9013SAlfredo Cardigliano 2819fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2829fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2839fdf11c4SAndrew Boyer tx_queue_id); 2849fdf11c4SAndrew Boyer return 0; 2859fdf11c4SAndrew Boyer } 2869fdf11c4SAndrew Boyer 287a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 288a27d9013SAlfredo Cardigliano 2894ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 290be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2914ae96cb8SAndrew Boyer 292a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 293a27d9013SAlfredo Cardigliano if (err) 294a27d9013SAlfredo Cardigliano return err; 295a27d9013SAlfredo Cardigliano 2969fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 297a27d9013SAlfredo Cardigliano 298a27d9013SAlfredo Cardigliano return 0; 299a27d9013SAlfredo Cardigliano } 300a27d9013SAlfredo Cardigliano 301a27d9013SAlfredo Cardigliano static void 30264b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 30364b08152SAlfredo Cardigliano { 30464b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 30564b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 30664b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 30764b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 30864b08152SAlfredo Cardigliano 309daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 31064b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 31164b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 31264b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 31364b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 31464b08152SAlfredo Cardigliano } else { 31564b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 31664b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 31764b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 31864b08152SAlfredo Cardigliano } 31964b08152SAlfredo Cardigliano } 32064b08152SAlfredo Cardigliano 32164b08152SAlfredo Cardigliano static void 32264b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 32364b08152SAlfredo Cardigliano { 32464b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 32564b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 32664b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 32764b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 32864b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 32964b08152SAlfredo Cardigliano 330daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 33164b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 33264b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 33364b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 33464b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 33564b08152SAlfredo Cardigliano } else { 33664b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 33764b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 33864b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 33964b08152SAlfredo Cardigliano } 34064b08152SAlfredo Cardigliano } 34164b08152SAlfredo Cardigliano 34264b08152SAlfredo Cardigliano static void 343a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 344a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 345a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 346a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 34764b08152SAlfredo Cardigliano bool encap, 348a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 349a27d9013SAlfredo Cardigliano bool start, bool done) 350a27d9013SAlfredo Cardigliano { 351b4beb84aSAndrew Boyer struct rte_mbuf *txm_seg; 352dd10c5b4SAndrew Boyer void **info; 3534a735599SAndrew Boyer uint64_t cmd; 354a27d9013SAlfredo Cardigliano uint8_t flags = 0; 355b4beb84aSAndrew Boyer int i; 356b4beb84aSAndrew Boyer 357a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 35864b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 359a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 360a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 361a27d9013SAlfredo Cardigliano 3624a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 363a27d9013SAlfredo Cardigliano flags, nsge, addr); 3644a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 3654a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 3664a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 3674a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 3684a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 369a27d9013SAlfredo Cardigliano 370dd10c5b4SAndrew Boyer if (done) { 371dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 372b4beb84aSAndrew Boyer 373b4beb84aSAndrew Boyer /* Walk the mbuf chain to stash pointers in the array */ 374b4beb84aSAndrew Boyer txm_seg = txm; 375b4beb84aSAndrew Boyer for (i = 0; i < txm->nb_segs; i++) { 376b4beb84aSAndrew Boyer info[i] = txm_seg; 377b4beb84aSAndrew Boyer txm_seg = txm_seg->next; 378b4beb84aSAndrew Boyer } 379dd10c5b4SAndrew Boyer } 380dd10c5b4SAndrew Boyer 381dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 382a27d9013SAlfredo Cardigliano } 383a27d9013SAlfredo Cardigliano 384a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 385be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 386a27d9013SAlfredo Cardigliano { 387be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 388a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 38956117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 390a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 39156117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 392a27d9013SAlfredo Cardigliano 393a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 394a27d9013SAlfredo Cardigliano return desc; 395a27d9013SAlfredo Cardigliano } 396a27d9013SAlfredo Cardigliano 397a27d9013SAlfredo Cardigliano static int 39877c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 399a27d9013SAlfredo Cardigliano { 400be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 401be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 402a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 403a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 404a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 4057c3a867bSAndrew Boyer rte_iova_t data_iova; 4067c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 407a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 408a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 409a27d9013SAlfredo Cardigliano uint32_t hdrlen; 410a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 411a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 412a27d9013SAlfredo Cardigliano uint32_t left; 413a27d9013SAlfredo Cardigliano uint32_t seglen; 414a27d9013SAlfredo Cardigliano uint32_t len; 415a27d9013SAlfredo Cardigliano uint32_t offset = 0; 416a27d9013SAlfredo Cardigliano bool start, done; 41764b08152SAlfredo Cardigliano bool encap; 418daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 419a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 42064b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 421a27d9013SAlfredo Cardigliano 422daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 423daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 424daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 425daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 42664b08152SAlfredo Cardigliano 42764b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 42864b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 42964b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 43064b08152SAlfredo Cardigliano */ 43164b08152SAlfredo Cardigliano 43264b08152SAlfredo Cardigliano if (encap) { 43364b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 43464b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 43564b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 43664b08152SAlfredo Cardigliano } else { 43764b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 43864b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 43964b08152SAlfredo Cardigliano } 440a27d9013SAlfredo Cardigliano 441a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 442a27d9013SAlfredo Cardigliano left = txm->data_len; 4437c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm); 444a27d9013SAlfredo Cardigliano 445be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 446a27d9013SAlfredo Cardigliano start = true; 447a27d9013SAlfredo Cardigliano 448a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 449a27d9013SAlfredo Cardigliano 450a27d9013SAlfredo Cardigliano while (left > 0) { 451a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 452a27d9013SAlfredo Cardigliano frag_left = seglen - len; 4537c3a867bSAndrew Boyer desc_addr = rte_cpu_to_le_64(data_iova + offset); 454a27d9013SAlfredo Cardigliano desc_len = len; 455a27d9013SAlfredo Cardigliano desc_nsge = 0; 456a27d9013SAlfredo Cardigliano left -= len; 457a27d9013SAlfredo Cardigliano offset += len; 458a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 459a27d9013SAlfredo Cardigliano continue; 460a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 461a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 462a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 463a27d9013SAlfredo Cardigliano hdrlen, mss, 46464b08152SAlfredo Cardigliano encap, 465a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 46677c60793SAndrew Boyer start, done); 467be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 468a27d9013SAlfredo Cardigliano start = false; 469a27d9013SAlfredo Cardigliano seglen = mss; 470a27d9013SAlfredo Cardigliano } 471a27d9013SAlfredo Cardigliano 472a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 473a27d9013SAlfredo Cardigliano 474a27d9013SAlfredo Cardigliano txm_seg = txm->next; 475a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 476a27d9013SAlfredo Cardigliano offset = 0; 4777c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 478a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 479a27d9013SAlfredo Cardigliano 480a27d9013SAlfredo Cardigliano while (left > 0) { 4817c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 482a27d9013SAlfredo Cardigliano if (frag_left > 0) { 483a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 484a27d9013SAlfredo Cardigliano frag_left -= len; 4857c3a867bSAndrew Boyer elem->addr = next_addr; 4864a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 487a27d9013SAlfredo Cardigliano elem++; 488a27d9013SAlfredo Cardigliano desc_nsge++; 489a27d9013SAlfredo Cardigliano } else { 490a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 491a27d9013SAlfredo Cardigliano frag_left = mss - len; 4927c3a867bSAndrew Boyer desc_addr = next_addr; 493a27d9013SAlfredo Cardigliano desc_len = len; 494a27d9013SAlfredo Cardigliano desc_nsge = 0; 495a27d9013SAlfredo Cardigliano } 496a27d9013SAlfredo Cardigliano left -= len; 497a27d9013SAlfredo Cardigliano offset += len; 498a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 499a27d9013SAlfredo Cardigliano continue; 5007c3a867bSAndrew Boyer 501a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 502a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 503a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 504a27d9013SAlfredo Cardigliano hdrlen, mss, 50564b08152SAlfredo Cardigliano encap, 506a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 50777c60793SAndrew Boyer start, done); 508be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 509a27d9013SAlfredo Cardigliano start = false; 510a27d9013SAlfredo Cardigliano } 511a27d9013SAlfredo Cardigliano 512a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 513a27d9013SAlfredo Cardigliano } 514a27d9013SAlfredo Cardigliano 515a27d9013SAlfredo Cardigliano stats->tso++; 516a27d9013SAlfredo Cardigliano 517a27d9013SAlfredo Cardigliano return 0; 518a27d9013SAlfredo Cardigliano } 519a27d9013SAlfredo Cardigliano 5200de3e209SAndrew Boyer static __rte_always_inline int 52177c60793SAndrew Boyer ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 522a27d9013SAlfredo Cardigliano { 523be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 524be39f75cSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 52556117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 526be39f75cSAndrew Boyer struct ionic_txq_sg_elem *elem; 527be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 528a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 529dd10c5b4SAndrew Boyer void **info; 530b4beb84aSAndrew Boyer rte_iova_t data_iova; 531a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 5324a735599SAndrew Boyer uint64_t addr, cmd; 533a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 534a27d9013SAlfredo Cardigliano uint8_t flags = 0; 535a27d9013SAlfredo Cardigliano 536be39f75cSAndrew Boyer desc = &desc_base[q->head_idx]; 537dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 538be39f75cSAndrew Boyer 539daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 54068591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 54164b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 54264b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 543f603eebcSAndrew Boyer } 544f603eebcSAndrew Boyer 545daa02b5cSOlivier Matz if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 54668591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 547daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 54868591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 549f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 55064b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 55164b08152SAlfredo Cardigliano } 55264b08152SAlfredo Cardigliano 553f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 554f603eebcSAndrew Boyer stats->no_csum++; 555f603eebcSAndrew Boyer 556b4beb84aSAndrew Boyer if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 557daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 558daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 559b4beb84aSAndrew Boyer (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 560b4beb84aSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 561b4beb84aSAndrew Boyer } 562a27d9013SAlfredo Cardigliano 563b4beb84aSAndrew Boyer if (ol_flags & RTE_MBUF_F_TX_VLAN) { 564b4beb84aSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_VLAN; 565b4beb84aSAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 566b4beb84aSAndrew Boyer } 567a27d9013SAlfredo Cardigliano 5687c3a867bSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 5697c3a867bSAndrew Boyer 5704a735599SAndrew Boyer cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 5714a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 5724a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(txm->data_len); 573a27d9013SAlfredo Cardigliano 574dd10c5b4SAndrew Boyer info[0] = txm; 575dd10c5b4SAndrew Boyer 576b4beb84aSAndrew Boyer if (txm->nb_segs > 1) { 577b4beb84aSAndrew Boyer txm_seg = txm->next; 578b4beb84aSAndrew Boyer 579be39f75cSAndrew Boyer elem = sg_desc_base[q->head_idx].elems; 580dd10c5b4SAndrew Boyer 581a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 582b4beb84aSAndrew Boyer /* Stash the mbuf ptr in the array */ 583b4beb84aSAndrew Boyer info++; 584b4beb84aSAndrew Boyer *info = txm_seg; 585b4beb84aSAndrew Boyer 586b4beb84aSAndrew Boyer /* Configure the SGE */ 587b4beb84aSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 5884a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(txm_seg->data_len); 589b4beb84aSAndrew Boyer elem->addr = rte_cpu_to_le_64(data_iova); 590a27d9013SAlfredo Cardigliano elem++; 591b4beb84aSAndrew Boyer 592a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 593a27d9013SAlfredo Cardigliano } 594b4beb84aSAndrew Boyer } 595a27d9013SAlfredo Cardigliano 596dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 597dd10c5b4SAndrew Boyer 598a27d9013SAlfredo Cardigliano return 0; 599a27d9013SAlfredo Cardigliano } 600a27d9013SAlfredo Cardigliano 601a27d9013SAlfredo Cardigliano uint16_t 602a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 603a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 604a27d9013SAlfredo Cardigliano { 605be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 606be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 607be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 608c35f08f7SAndrew Boyer struct rte_mbuf *mbuf; 609a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 61086551f81SAndrew Boyer uint16_t nb_avail, nb_tx = 0; 611a27d9013SAlfredo Cardigliano int err; 612a27d9013SAlfredo Cardigliano 613b4beb84aSAndrew Boyer struct ionic_txq_desc *desc_base = q->base; 6149de21005SAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_CMB)) 615b4beb84aSAndrew Boyer rte_prefetch0(&desc_base[q->head_idx]); 616b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx)); 617b4beb84aSAndrew Boyer 618b4beb84aSAndrew Boyer if (tx_pkts) { 619b4beb84aSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[0]); 620b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[0]); 621b4beb84aSAndrew Boyer } 622b4beb84aSAndrew Boyer 623*07512941SAndrew Boyer if (unlikely(ionic_q_space_avail(q) < txq->free_thresh)) { 624a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 6252aed9865SAndrew Boyer ionic_tx_flush(txq); 626*07512941SAndrew Boyer } 627a27d9013SAlfredo Cardigliano 62886551f81SAndrew Boyer nb_avail = ionic_q_space_avail(q); 62986551f81SAndrew Boyer if (unlikely(nb_avail < nb_pkts)) { 63086551f81SAndrew Boyer stats->stop += nb_pkts - nb_avail; 63186551f81SAndrew Boyer nb_pkts = nb_avail; 632a27d9013SAlfredo Cardigliano } 633a27d9013SAlfredo Cardigliano 634a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 635b4beb84aSAndrew Boyer uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 6369de21005SAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_CMB)) 637b4beb84aSAndrew Boyer rte_prefetch0(&desc_base[next_idx]); 638b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, next_idx)); 639b4beb84aSAndrew Boyer 640b4beb84aSAndrew Boyer if (nb_tx + 1 < nb_pkts) { 641b4beb84aSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 642b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 643a27d9013SAlfredo Cardigliano } 644a27d9013SAlfredo Cardigliano 645c35f08f7SAndrew Boyer mbuf = tx_pkts[nb_tx]; 646c35f08f7SAndrew Boyer 647c35f08f7SAndrew Boyer if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 648c35f08f7SAndrew Boyer err = ionic_tx_tso(txq, mbuf); 649a27d9013SAlfredo Cardigliano else 650c35f08f7SAndrew Boyer err = ionic_tx(txq, mbuf); 651a27d9013SAlfredo Cardigliano if (err) { 652a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 653a27d9013SAlfredo Cardigliano break; 654a27d9013SAlfredo Cardigliano } 655a27d9013SAlfredo Cardigliano 656c35f08f7SAndrew Boyer bytes_tx += mbuf->pkt_len; 657a27d9013SAlfredo Cardigliano nb_tx++; 658a27d9013SAlfredo Cardigliano } 659a27d9013SAlfredo Cardigliano 66077c60793SAndrew Boyer if (nb_tx > 0) { 66177c60793SAndrew Boyer rte_wmb(); 66277c60793SAndrew Boyer ionic_q_flush(q); 66377c60793SAndrew Boyer 664a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 665a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 666b4beb84aSAndrew Boyer } 667a27d9013SAlfredo Cardigliano 668a27d9013SAlfredo Cardigliano return nb_tx; 669a27d9013SAlfredo Cardigliano } 670a27d9013SAlfredo Cardigliano 671a27d9013SAlfredo Cardigliano /********************************************************************* 672a27d9013SAlfredo Cardigliano * 673a27d9013SAlfredo Cardigliano * TX prep functions 674a27d9013SAlfredo Cardigliano * 675a27d9013SAlfredo Cardigliano **********************************************************************/ 676a27d9013SAlfredo Cardigliano 677daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 678daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 679daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 680daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 681daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 682daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 683a27d9013SAlfredo Cardigliano 684a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 685daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 686a27d9013SAlfredo Cardigliano 687a27d9013SAlfredo Cardigliano uint16_t 688e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 689a27d9013SAlfredo Cardigliano { 690e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 691a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 692a27d9013SAlfredo Cardigliano uint64_t offloads; 693a27d9013SAlfredo Cardigliano int i = 0; 694a27d9013SAlfredo Cardigliano 695a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 696a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 697a27d9013SAlfredo Cardigliano 698e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 699a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 700a27d9013SAlfredo Cardigliano break; 701a27d9013SAlfredo Cardigliano } 702a27d9013SAlfredo Cardigliano 703a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 704a27d9013SAlfredo Cardigliano 705a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 706a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 707a27d9013SAlfredo Cardigliano break; 708a27d9013SAlfredo Cardigliano } 709a27d9013SAlfredo Cardigliano } 710a27d9013SAlfredo Cardigliano 711a27d9013SAlfredo Cardigliano return i; 712a27d9013SAlfredo Cardigliano } 713a27d9013SAlfredo Cardigliano 714a27d9013SAlfredo Cardigliano /********************************************************************* 715a27d9013SAlfredo Cardigliano * 716a27d9013SAlfredo Cardigliano * RX functions 717a27d9013SAlfredo Cardigliano * 718a27d9013SAlfredo Cardigliano **********************************************************************/ 719a27d9013SAlfredo Cardigliano 720a27d9013SAlfredo Cardigliano void 721a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 722a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 723a27d9013SAlfredo Cardigliano { 724be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 725be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 726a27d9013SAlfredo Cardigliano 727a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 728a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 729a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 73002eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 73168591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 732a27d9013SAlfredo Cardigliano } 733a27d9013SAlfredo Cardigliano 734ce6427ddSThomas Monjalon void __rte_cold 7357483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 736a27d9013SAlfredo Cardigliano { 7377483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 738be39f75cSAndrew Boyer 739be39f75cSAndrew Boyer if (!rxq) 740be39f75cSAndrew Boyer return; 741a27d9013SAlfredo Cardigliano 742a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 743a27d9013SAlfredo Cardigliano 744be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 745a27d9013SAlfredo Cardigliano } 746a27d9013SAlfredo Cardigliano 747ce6427ddSThomas Monjalon int __rte_cold 748a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 749a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 750a27d9013SAlfredo Cardigliano uint16_t nb_desc, 7514ae96cb8SAndrew Boyer uint32_t socket_id, 752a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 753a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 754a27d9013SAlfredo Cardigliano { 755a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 756be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 757a27d9013SAlfredo Cardigliano uint64_t offloads; 758a27d9013SAlfredo Cardigliano int err; 759a27d9013SAlfredo Cardigliano 760a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 761a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 762a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 763a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 764a27d9013SAlfredo Cardigliano return -EINVAL; 765a27d9013SAlfredo Cardigliano } 766a27d9013SAlfredo Cardigliano 767a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 7684ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 7694ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 7704ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 771a27d9013SAlfredo Cardigliano 77218a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 77318a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 77418a44465SAndrew Boyer 775a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 776a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 777a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 778a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 779a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 7804ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 781a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 782a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 783a27d9013SAlfredo Cardigliano } 784a27d9013SAlfredo Cardigliano 785a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 786a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 7877483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 788a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 789a27d9013SAlfredo Cardigliano } 790a27d9013SAlfredo Cardigliano 7919fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7929fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7939fdf11c4SAndrew Boyer 794d5850081SAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 795be39f75cSAndrew Boyer &rxq); 796a27d9013SAlfredo Cardigliano if (err) { 7974ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 798a27d9013SAlfredo Cardigliano return -EINVAL; 799a27d9013SAlfredo Cardigliano } 800a27d9013SAlfredo Cardigliano 801a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 802a27d9013SAlfredo Cardigliano 803a27d9013SAlfredo Cardigliano /* 804a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 805295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 806a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 807a27d9013SAlfredo Cardigliano * it to the length for all received packets: 808a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 809295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 810a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 811a27d9013SAlfredo Cardigliano */ 812a27d9013SAlfredo Cardigliano 813a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 81402eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 81502eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 816a27d9013SAlfredo Cardigliano 817a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 818a27d9013SAlfredo Cardigliano 819a27d9013SAlfredo Cardigliano return 0; 820a27d9013SAlfredo Cardigliano } 821a27d9013SAlfredo Cardigliano 822bbdf955dSAndrew Boyer #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1) 823bbdf955dSAndrew Boyer static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] 824bbdf955dSAndrew Boyer __rte_cache_aligned = { 825bbdf955dSAndrew Boyer /* IP_BAD set */ 826bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD, 827bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 828bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 829bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 830bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 831bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 832bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 833bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 834bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 835bbdf955dSAndrew Boyer /* IP_OK set */ 836bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD, 837bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 838bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 839bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 840bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 841bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 842bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 843bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 844bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 845bbdf955dSAndrew Boyer /* No IP flag set */ 846bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 847bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 848bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 849bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 850bbdf955dSAndrew Boyer }; 851bbdf955dSAndrew Boyer 85273b1c67eSAndrew Boyer /* RTE_PTYPE_UNKNOWN is 0x0 */ 85373b1c67eSAndrew Boyer static const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] 85473b1c67eSAndrew Boyer __rte_cache_aligned = { 85573b1c67eSAndrew Boyer [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN, 85673b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, 85773b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_TCP] = 85873b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 85973b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_UDP] = 86073b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 86173b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, 86273b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_TCP] = 86373b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 86473b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_UDP] = 86573b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 86673b1c67eSAndrew Boyer }; 86773b1c67eSAndrew Boyer 868b5b56afdSAndrew Boyer const uint32_t * 869b5b56afdSAndrew Boyer ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 870b5b56afdSAndrew Boyer { 871b5b56afdSAndrew Boyer /* See ionic_ptype_table[] */ 872b5b56afdSAndrew Boyer static const uint32_t ptypes[] = { 873b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER, 874b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_TIMESYNC, 875b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_LLDP, 876b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_ARP, 877b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV4, 878b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV6, 879b5b56afdSAndrew Boyer RTE_PTYPE_L4_TCP, 880b5b56afdSAndrew Boyer RTE_PTYPE_L4_UDP, 881b5b56afdSAndrew Boyer RTE_PTYPE_UNKNOWN 882b5b56afdSAndrew Boyer }; 883b5b56afdSAndrew Boyer 884b5b56afdSAndrew Boyer return ptypes; 885b5b56afdSAndrew Boyer } 886b5b56afdSAndrew Boyer 8877b20fc2fSAndrew Boyer /* 8887b20fc2fSAndrew Boyer * Cleans one descriptor. Connects the filled mbufs into a chain. 8897b20fc2fSAndrew Boyer * Does not advance the tail index. 8907b20fc2fSAndrew Boyer */ 8910de3e209SAndrew Boyer static __rte_always_inline void 8927b20fc2fSAndrew Boyer ionic_rx_clean_one(struct ionic_rx_qcq *rxq, 8937b20fc2fSAndrew Boyer struct ionic_rxq_comp *cq_desc, 89414f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 895a27d9013SAlfredo Cardigliano { 896be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 8977b20fc2fSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg, *prev_rxm; 8987b20fc2fSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 899a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 900a27d9013SAlfredo Cardigliano uint32_t pkt_type; 9017b20fc2fSAndrew Boyer uint32_t left, i; 9027b20fc2fSAndrew Boyer uint16_t cq_desc_len; 903bbdf955dSAndrew Boyer uint8_t ptype, cflags; 904700f974dSAndrew Boyer void **info; 905700f974dSAndrew Boyer 9067b20fc2fSAndrew Boyer cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 907700f974dSAndrew Boyer 9087b20fc2fSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 909700f974dSAndrew Boyer 910700f974dSAndrew Boyer rxm = info[0]; 911a27d9013SAlfredo Cardigliano 912a27d9013SAlfredo Cardigliano if (cq_desc->status) { 913a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 914a27d9013SAlfredo Cardigliano return; 915a27d9013SAlfredo Cardigliano } 916a27d9013SAlfredo Cardigliano 9177b20fc2fSAndrew Boyer if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 918a27d9013SAlfredo Cardigliano stats->bad_len++; 919a27d9013SAlfredo Cardigliano return; 920a27d9013SAlfredo Cardigliano } 921a27d9013SAlfredo Cardigliano 9227b20fc2fSAndrew Boyer info[0] = NULL; 923a27d9013SAlfredo Cardigliano 9247b20fc2fSAndrew Boyer /* Set the mbuf metadata based on the cq entry */ 9257b20fc2fSAndrew Boyer rxm->rearm_data[0] = rxq->rearm_data; 9267b20fc2fSAndrew Boyer rxm->pkt_len = cq_desc_len; 9277b20fc2fSAndrew Boyer rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len); 9287b20fc2fSAndrew Boyer left = cq_desc_len - rxm->data_len; 9297b20fc2fSAndrew Boyer rxm->nb_segs = cq_desc->num_sg_elems + 1; 9307b20fc2fSAndrew Boyer prev_rxm = rxm; 931a27d9013SAlfredo Cardigliano 9327b20fc2fSAndrew Boyer for (i = 1; i < rxm->nb_segs && left; i++) { 9337b20fc2fSAndrew Boyer rxm_seg = info[i]; 9347b20fc2fSAndrew Boyer info[i] = NULL; 9357b20fc2fSAndrew Boyer 9367b20fc2fSAndrew Boyer /* Set the chained mbuf metadata */ 9377b20fc2fSAndrew Boyer rxm_seg->rearm_data[0] = rxq->rearm_seg_data; 938d5850081SAndrew Boyer rxm_seg->data_len = RTE_MIN(rxq->seg_size, left); 939a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 940a27d9013SAlfredo Cardigliano 9417b20fc2fSAndrew Boyer /* Link the mbuf */ 9427b20fc2fSAndrew Boyer prev_rxm->next = rxm_seg; 9437b20fc2fSAndrew Boyer prev_rxm = rxm_seg; 944a27d9013SAlfredo Cardigliano } 945a27d9013SAlfredo Cardigliano 9467b20fc2fSAndrew Boyer /* Terminate the mbuf chain */ 9477b20fc2fSAndrew Boyer prev_rxm->next = NULL; 9487b20fc2fSAndrew Boyer 94922e7171bSAlfredo Cardigliano /* RSS */ 950daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 9517506961aSAndrew Boyer rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 95222e7171bSAlfredo Cardigliano 953a27d9013SAlfredo Cardigliano /* Vlan Strip */ 954a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 955daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 9564a735599SAndrew Boyer rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 957a27d9013SAlfredo Cardigliano } 958a27d9013SAlfredo Cardigliano 959a27d9013SAlfredo Cardigliano /* Checksum */ 960a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 961bbdf955dSAndrew Boyer cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK; 962bbdf955dSAndrew Boyer pkt_flags |= ionic_csum_flags[cflags]; 963a27d9013SAlfredo Cardigliano } 964a27d9013SAlfredo Cardigliano 965a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 966a27d9013SAlfredo Cardigliano 967a27d9013SAlfredo Cardigliano /* Packet Type */ 96873b1c67eSAndrew Boyer ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK; 96973b1c67eSAndrew Boyer pkt_type = ionic_ptype_table[ptype]; 97073b1c67eSAndrew Boyer if (pkt_type == RTE_PTYPE_UNKNOWN) { 971a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 972a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 973a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 974a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 975a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 97673b1c67eSAndrew Boyer else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP)) 97773b1c67eSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_LLDP; 97873b1c67eSAndrew Boyer else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588)) 97973b1c67eSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC; 980ed522a3fSAndrew Boyer stats->mtods++; 981a27d9013SAlfredo Cardigliano } 982a27d9013SAlfredo Cardigliano 983a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 984a27d9013SAlfredo Cardigliano 98514f534beSAndrew Boyer rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 98614f534beSAndrew Boyer rx_svc->nb_rx++; 987a27d9013SAlfredo Cardigliano 988a27d9013SAlfredo Cardigliano stats->packets++; 989a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 990a27d9013SAlfredo Cardigliano } 991a27d9013SAlfredo Cardigliano 9927b20fc2fSAndrew Boyer /* 9937b20fc2fSAndrew Boyer * Fills one descriptor with mbufs. Does not advance the head index. 9947b20fc2fSAndrew Boyer */ 9950de3e209SAndrew Boyer static __rte_always_inline int 9967b20fc2fSAndrew Boyer ionic_rx_fill_one(struct ionic_rx_qcq *rxq) 997a27d9013SAlfredo Cardigliano { 998be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 9997b20fc2fSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg; 1000be39f75cSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 1001be39f75cSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 10027b20fc2fSAndrew Boyer rte_iova_t data_iova; 10037b20fc2fSAndrew Boyer uint32_t i; 1004dd10c5b4SAndrew Boyer void **info; 1005218afd82SAndrew Boyer int ret; 1006a27d9013SAlfredo Cardigliano 1007dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 1008a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 1009a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 1010a27d9013SAlfredo Cardigliano 10117b20fc2fSAndrew Boyer /* mbuf is unused => whole chain is unused */ 10127b20fc2fSAndrew Boyer if (unlikely(info[0])) 10137b20fc2fSAndrew Boyer return 0; 10147b20fc2fSAndrew Boyer 1015218afd82SAndrew Boyer if (rxq->mb_idx == 0) { 1016218afd82SAndrew Boyer ret = rte_mempool_get_bulk(rxq->mb_pool, 1017218afd82SAndrew Boyer (void **)rxq->mbs, 1018218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC); 1019218afd82SAndrew Boyer if (ret) { 10207b20fc2fSAndrew Boyer assert(0); 1021a27d9013SAlfredo Cardigliano return -ENOMEM; 1022a27d9013SAlfredo Cardigliano } 1023a27d9013SAlfredo Cardigliano 1024218afd82SAndrew Boyer rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 1025218afd82SAndrew Boyer } 1026218afd82SAndrew Boyer 1027218afd82SAndrew Boyer rxm = rxq->mbs[--rxq->mb_idx]; 1028dd10c5b4SAndrew Boyer info[0] = rxm; 1029dd10c5b4SAndrew Boyer 10307b20fc2fSAndrew Boyer data_iova = rte_mbuf_data_iova_default(rxm); 10317b20fc2fSAndrew Boyer desc->addr = rte_cpu_to_le_64(data_iova); 10327b20fc2fSAndrew Boyer 10337b20fc2fSAndrew Boyer for (i = 1; i < q->num_segs; i++) { 10347b20fc2fSAndrew Boyer /* mbuf is unused => rest of the chain is unused */ 10357b20fc2fSAndrew Boyer if (info[i]) 10367b20fc2fSAndrew Boyer return 0; 10377b20fc2fSAndrew Boyer 1038218afd82SAndrew Boyer if (rxq->mb_idx == 0) { 1039218afd82SAndrew Boyer ret = rte_mempool_get_bulk(rxq->mb_pool, 1040218afd82SAndrew Boyer (void **)rxq->mbs, 1041218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC); 1042218afd82SAndrew Boyer if (ret) { 10437b20fc2fSAndrew Boyer assert(0); 10447b20fc2fSAndrew Boyer return -ENOMEM; 10457b20fc2fSAndrew Boyer } 10467b20fc2fSAndrew Boyer 1047218afd82SAndrew Boyer rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 1048218afd82SAndrew Boyer } 1049218afd82SAndrew Boyer 1050218afd82SAndrew Boyer rxm_seg = rxq->mbs[--rxq->mb_idx]; 10517b20fc2fSAndrew Boyer info[i] = rxm_seg; 10527b20fc2fSAndrew Boyer 10537b20fc2fSAndrew Boyer /* The data_off does not get set to 0 until later */ 10547b20fc2fSAndrew Boyer data_iova = rxm_seg->buf_iova; 10557b20fc2fSAndrew Boyer sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova); 10567b20fc2fSAndrew Boyer } 10577b20fc2fSAndrew Boyer 10587b20fc2fSAndrew Boyer return 0; 10597b20fc2fSAndrew Boyer } 10607b20fc2fSAndrew Boyer 10617b20fc2fSAndrew Boyer /* 10627b20fc2fSAndrew Boyer * Fills all descriptors with mbufs. 10637b20fc2fSAndrew Boyer */ 10647b20fc2fSAndrew Boyer static int __rte_cold 10657b20fc2fSAndrew Boyer ionic_rx_fill(struct ionic_rx_qcq *rxq) 10667b20fc2fSAndrew Boyer { 10677b20fc2fSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 10687b20fc2fSAndrew Boyer uint32_t i; 10697b20fc2fSAndrew Boyer int err; 10707b20fc2fSAndrew Boyer 10717b20fc2fSAndrew Boyer for (i = 1; i < q->num_descs; i++) { 10727b20fc2fSAndrew Boyer err = ionic_rx_fill_one(rxq); 10737b20fc2fSAndrew Boyer if (err) 10747b20fc2fSAndrew Boyer return err; 10757b20fc2fSAndrew Boyer 1076dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1077a27d9013SAlfredo Cardigliano } 1078a27d9013SAlfredo Cardigliano 107977c60793SAndrew Boyer ionic_q_flush(q); 108077c60793SAndrew Boyer 1081a27d9013SAlfredo Cardigliano return 0; 1082a27d9013SAlfredo Cardigliano } 1083a27d9013SAlfredo Cardigliano 1084a27d9013SAlfredo Cardigliano /* 10857b2eb674SAndrew Boyer * Perform one-time initialization of descriptor fields 10867b2eb674SAndrew Boyer * which will not change for the life of the queue. 10877b2eb674SAndrew Boyer */ 10887b2eb674SAndrew Boyer static void __rte_cold 10897b2eb674SAndrew Boyer ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 10907b2eb674SAndrew Boyer { 10917b2eb674SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 10927b2eb674SAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 10937b2eb674SAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 10947b2eb674SAndrew Boyer uint32_t i, j; 10957b2eb674SAndrew Boyer uint8_t opcode; 10967b2eb674SAndrew Boyer 10977b2eb674SAndrew Boyer opcode = (q->num_segs > 1) ? 10987b2eb674SAndrew Boyer IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 10997b2eb674SAndrew Boyer 11007b2eb674SAndrew Boyer /* 11017b2eb674SAndrew Boyer * NB: Only the first segment needs to leave headroom (hdr_seg_size). 11027b2eb674SAndrew Boyer * Later segments (seg_size) do not. 11037b2eb674SAndrew Boyer */ 11047b2eb674SAndrew Boyer for (i = 0; i < q->num_descs; i++) { 11057b2eb674SAndrew Boyer desc = &desc_base[i]; 11067b2eb674SAndrew Boyer desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 11077b2eb674SAndrew Boyer desc->opcode = opcode; 11087b2eb674SAndrew Boyer 11097b2eb674SAndrew Boyer sg_desc = &sg_desc_base[i]; 11107b2eb674SAndrew Boyer for (j = 0; j < q->num_segs - 1u; j++) 11117b2eb674SAndrew Boyer sg_desc->elems[j].len = 11127b2eb674SAndrew Boyer rte_cpu_to_le_16(rxq->seg_size); 11137b2eb674SAndrew Boyer } 11147b2eb674SAndrew Boyer } 11157b2eb674SAndrew Boyer 11167b2eb674SAndrew Boyer /* 1117a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 1118a27d9013SAlfredo Cardigliano */ 1119ce6427ddSThomas Monjalon int __rte_cold 1120a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1121a27d9013SAlfredo Cardigliano { 11229fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1123be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1124d5850081SAndrew Boyer struct ionic_queue *q; 1125a27d9013SAlfredo Cardigliano int err; 1126a27d9013SAlfredo Cardigliano 11279fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 11289fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 11299fdf11c4SAndrew Boyer rx_queue_id); 11309fdf11c4SAndrew Boyer return 0; 11319fdf11c4SAndrew Boyer } 11329fdf11c4SAndrew Boyer 1133a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1134d5850081SAndrew Boyer q = &rxq->qcq.q; 1135a27d9013SAlfredo Cardigliano 1136b671e69aSAndrew Boyer rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 1137b671e69aSAndrew Boyer 1138d5850081SAndrew Boyer /* Recalculate segment count based on MTU */ 1139d5850081SAndrew Boyer q->num_segs = 1 + 1140d5850081SAndrew Boyer (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 1141d5850081SAndrew Boyer 1142d5850081SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 1143d5850081SAndrew Boyer rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 11444ae96cb8SAndrew Boyer 11457b2eb674SAndrew Boyer ionic_rx_init_descriptors(rxq); 11467b2eb674SAndrew Boyer 1147a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 1148a27d9013SAlfredo Cardigliano if (err) 1149a27d9013SAlfredo Cardigliano return err; 1150a27d9013SAlfredo Cardigliano 1151a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1152b671e69aSAndrew Boyer if (ionic_rx_fill(rxq) != 0) { 1153a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1154a27d9013SAlfredo Cardigliano rx_queue_id); 1155a27d9013SAlfredo Cardigliano return -1; 1156a27d9013SAlfredo Cardigliano } 1157a27d9013SAlfredo Cardigliano 11589fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1159a27d9013SAlfredo Cardigliano 1160a27d9013SAlfredo Cardigliano return 0; 1161a27d9013SAlfredo Cardigliano } 1162a27d9013SAlfredo Cardigliano 11637b20fc2fSAndrew Boyer /* 11647b20fc2fSAndrew Boyer * Walk the CQ to find completed receive descriptors. 11657b20fc2fSAndrew Boyer * Any completed descriptor found is refilled. 11667b20fc2fSAndrew Boyer */ 11670de3e209SAndrew Boyer static __rte_always_inline void 1168be39f75cSAndrew Boyer ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 116914f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 1170a27d9013SAlfredo Cardigliano { 1171be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 1172be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 11737b20fc2fSAndrew Boyer struct ionic_rxq_desc *q_desc_base = q->base; 1174be39f75cSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1175a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1176a27d9013SAlfredo Cardigliano 1177a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 11787b20fc2fSAndrew Boyer 1179a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 11802aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1181a27d9013SAlfredo Cardigliano 1182a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1183a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1184a27d9013SAlfredo Cardigliano 11857b20fc2fSAndrew Boyer /* Prefetch 8 x 8B bufinfo */ 11867b20fc2fSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8))); 11877b20fc2fSAndrew Boyer /* Prefetch 4 x 16B comp */ 11887b20fc2fSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 11897b20fc2fSAndrew Boyer /* Prefetch 4 x 16B descriptors */ 11909de21005SAndrew Boyer if (!(rxq->flags & IONIC_QCQ_F_CMB)) 11917b20fc2fSAndrew Boyer rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 1192a27d9013SAlfredo Cardigliano 11937b20fc2fSAndrew Boyer ionic_rx_clean_one(rxq, cq_desc, rx_svc); 1194a27d9013SAlfredo Cardigliano 11954ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1196a27d9013SAlfredo Cardigliano 11977b20fc2fSAndrew Boyer (void)ionic_rx_fill_one(rxq); 1198a27d9013SAlfredo Cardigliano 11997b20fc2fSAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1200a27d9013SAlfredo Cardigliano 1201a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1202a27d9013SAlfredo Cardigliano break; 1203a27d9013SAlfredo Cardigliano 1204a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1205a27d9013SAlfredo Cardigliano } 12067b20fc2fSAndrew Boyer 12077b20fc2fSAndrew Boyer /* Update the queue indices and ring the doorbell */ 12087b20fc2fSAndrew Boyer if (work_done) 12097b20fc2fSAndrew Boyer ionic_q_flush(q); 1210a27d9013SAlfredo Cardigliano } 1211a27d9013SAlfredo Cardigliano 1212a27d9013SAlfredo Cardigliano /* 1213a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1214a27d9013SAlfredo Cardigliano */ 1215ce6427ddSThomas Monjalon int __rte_cold 1216a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1217a27d9013SAlfredo Cardigliano { 1218e7222f94SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1219e7222f94SAndrew Boyer struct ionic_rx_stats *stats; 1220be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1221a27d9013SAlfredo Cardigliano 12224ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1223a27d9013SAlfredo Cardigliano 1224a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1225a27d9013SAlfredo Cardigliano 1226e7222f94SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 12279fdf11c4SAndrew Boyer 1228e7222f94SAndrew Boyer ionic_lif_rxq_deinit(rxq); 1229a27d9013SAlfredo Cardigliano 1230e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 1231e7222f94SAndrew Boyer ionic_rx_empty(rxq); 1232e7222f94SAndrew Boyer 1233e7222f94SAndrew Boyer stats = &rxq->stats; 1234e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 1235e7222f94SAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 1236a27d9013SAlfredo Cardigliano 1237a27d9013SAlfredo Cardigliano return 0; 1238a27d9013SAlfredo Cardigliano } 1239a27d9013SAlfredo Cardigliano 1240a27d9013SAlfredo Cardigliano uint16_t 1241a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1242a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1243a27d9013SAlfredo Cardigliano { 1244be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 124514f534beSAndrew Boyer struct ionic_rx_service rx_svc; 1246a27d9013SAlfredo Cardigliano 124714f534beSAndrew Boyer rx_svc.rx_pkts = rx_pkts; 124814f534beSAndrew Boyer rx_svc.nb_rx = 0; 1249a27d9013SAlfredo Cardigliano 125014f534beSAndrew Boyer ionic_rxq_service(rxq, nb_pkts, &rx_svc); 1251a27d9013SAlfredo Cardigliano 125214f534beSAndrew Boyer return rx_svc.nb_rx; 1253a27d9013SAlfredo Cardigliano } 12540983a74aSAndrew Boyer 12550983a74aSAndrew Boyer int 12560983a74aSAndrew Boyer ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset) 12570983a74aSAndrew Boyer { 12580983a74aSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 12590983a74aSAndrew Boyer struct ionic_qcq *qcq = &rxq->qcq; 12600983a74aSAndrew Boyer struct ionic_rxq_comp *cq_desc; 12610983a74aSAndrew Boyer uint16_t mask, head, tail, pos; 12620983a74aSAndrew Boyer bool done_color; 12630983a74aSAndrew Boyer 12640983a74aSAndrew Boyer mask = qcq->q.size_mask; 12650983a74aSAndrew Boyer 12660983a74aSAndrew Boyer /* offset must be within the size of the ring */ 12670983a74aSAndrew Boyer if (offset > mask) 12680983a74aSAndrew Boyer return -EINVAL; 12690983a74aSAndrew Boyer 12700983a74aSAndrew Boyer head = qcq->q.head_idx; 12710983a74aSAndrew Boyer tail = qcq->q.tail_idx; 12720983a74aSAndrew Boyer 12730983a74aSAndrew Boyer /* offset is beyond what is posted */ 12740983a74aSAndrew Boyer if (offset >= ((head - tail) & mask)) 12750983a74aSAndrew Boyer return RTE_ETH_RX_DESC_UNAVAIL; 12760983a74aSAndrew Boyer 12770983a74aSAndrew Boyer /* interested in this absolute position in the rxq */ 12780983a74aSAndrew Boyer pos = (tail + offset) & mask; 12790983a74aSAndrew Boyer 12800983a74aSAndrew Boyer /* rx cq position == rx q position */ 12810983a74aSAndrew Boyer cq_desc = qcq->cq.base; 12820983a74aSAndrew Boyer cq_desc = &cq_desc[pos]; 12830983a74aSAndrew Boyer 12840983a74aSAndrew Boyer /* expected done color at this position */ 12850983a74aSAndrew Boyer done_color = qcq->cq.done_color != (pos < tail); 12860983a74aSAndrew Boyer 12870983a74aSAndrew Boyer /* has the hw indicated the done color at this position? */ 12880983a74aSAndrew Boyer if (color_match(cq_desc->pkt_type_color, done_color)) 12890983a74aSAndrew Boyer return RTE_ETH_RX_DESC_DONE; 12900983a74aSAndrew Boyer 12910983a74aSAndrew Boyer return RTE_ETH_RX_DESC_AVAIL; 12920983a74aSAndrew Boyer } 129360625147SAndrew Boyer 129460625147SAndrew Boyer int 129560625147SAndrew Boyer ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset) 129660625147SAndrew Boyer { 129760625147SAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 129860625147SAndrew Boyer struct ionic_qcq *qcq = &txq->qcq; 129960625147SAndrew Boyer struct ionic_txq_comp *cq_desc; 130060625147SAndrew Boyer uint16_t mask, head, tail, pos, cq_pos; 130160625147SAndrew Boyer bool done_color; 130260625147SAndrew Boyer 130360625147SAndrew Boyer mask = qcq->q.size_mask; 130460625147SAndrew Boyer 130560625147SAndrew Boyer /* offset must be within the size of the ring */ 130660625147SAndrew Boyer if (offset > mask) 130760625147SAndrew Boyer return -EINVAL; 130860625147SAndrew Boyer 130960625147SAndrew Boyer head = qcq->q.head_idx; 131060625147SAndrew Boyer tail = qcq->q.tail_idx; 131160625147SAndrew Boyer 131260625147SAndrew Boyer /* offset is beyond what is posted */ 131360625147SAndrew Boyer if (offset >= ((head - tail) & mask)) 131460625147SAndrew Boyer return RTE_ETH_TX_DESC_DONE; 131560625147SAndrew Boyer 131660625147SAndrew Boyer /* interested in this absolute position in the txq */ 131760625147SAndrew Boyer pos = (tail + offset) & mask; 131860625147SAndrew Boyer 131960625147SAndrew Boyer /* tx cq position != tx q position, need to walk cq */ 132060625147SAndrew Boyer cq_pos = qcq->cq.tail_idx; 132160625147SAndrew Boyer cq_desc = qcq->cq.base; 132260625147SAndrew Boyer cq_desc = &cq_desc[cq_pos]; 132360625147SAndrew Boyer 132460625147SAndrew Boyer /* how far behind is pos from head? */ 132560625147SAndrew Boyer offset = (head - pos) & mask; 132660625147SAndrew Boyer 132760625147SAndrew Boyer /* walk cq descriptors that match the expected done color */ 132860625147SAndrew Boyer done_color = qcq->cq.done_color; 132960625147SAndrew Boyer while (color_match(cq_desc->color, done_color)) { 133060625147SAndrew Boyer /* is comp index no further behind than pos? */ 133160625147SAndrew Boyer tail = rte_cpu_to_le_16(cq_desc->comp_index); 133260625147SAndrew Boyer if (((head - tail) & mask) <= offset) 133360625147SAndrew Boyer return RTE_ETH_TX_DESC_DONE; 133460625147SAndrew Boyer 133560625147SAndrew Boyer cq_pos = (cq_pos + 1) & mask; 133660625147SAndrew Boyer cq_desc = qcq->cq.base; 133760625147SAndrew Boyer cq_desc = &cq_desc[cq_pos]; 133860625147SAndrew Boyer 133960625147SAndrew Boyer done_color = done_color != (cq_pos == 0); 134060625147SAndrew Boyer } 134160625147SAndrew Boyer 134260625147SAndrew Boyer return RTE_ETH_TX_DESC_FULL; 134360625147SAndrew Boyer } 1344