176668754SAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50e7222f94SAndrew Boyer static void 51e7222f94SAndrew Boyer ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 52e7222f94SAndrew Boyer { 53e7222f94SAndrew Boyer uint32_t i; 54e7222f94SAndrew Boyer 55e7222f94SAndrew Boyer for (i = idx; i < cnt; i++) 56e7222f94SAndrew Boyer if (array[i]) 57e7222f94SAndrew Boyer rte_pktmbuf_free_seg(array[i]); 58e7222f94SAndrew Boyer 59e7222f94SAndrew Boyer memset(array, 0, sizeof(void *) * cnt); 60e7222f94SAndrew Boyer } 61e7222f94SAndrew Boyer 62e7222f94SAndrew Boyer static void __rte_cold 63e7222f94SAndrew Boyer ionic_tx_empty(struct ionic_tx_qcq *txq) 64e7222f94SAndrew Boyer { 65e7222f94SAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 66e7222f94SAndrew Boyer 67b4beb84aSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 68e7222f94SAndrew Boyer } 69e7222f94SAndrew Boyer 70e7222f94SAndrew Boyer static void __rte_cold 71e7222f94SAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 72e7222f94SAndrew Boyer { 73e7222f94SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 74e7222f94SAndrew Boyer 757b20fc2fSAndrew Boyer /* 767b20fc2fSAndrew Boyer * Walk the full info array so that the clean up includes any 777b20fc2fSAndrew Boyer * fragments that were left dangling for later reuse 787b20fc2fSAndrew Boyer */ 797b20fc2fSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 80218afd82SAndrew Boyer 81218afd82SAndrew Boyer ionic_empty_array((void **)rxq->mbs, 82218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC, rxq->mb_idx); 83218afd82SAndrew Boyer rxq->mb_idx = 0; 84e7222f94SAndrew Boyer } 85e7222f94SAndrew Boyer 86a27d9013SAlfredo Cardigliano /********************************************************************* 87a27d9013SAlfredo Cardigliano * 88a27d9013SAlfredo Cardigliano * TX functions 89a27d9013SAlfredo Cardigliano * 90a27d9013SAlfredo Cardigliano **********************************************************************/ 91a27d9013SAlfredo Cardigliano 92a27d9013SAlfredo Cardigliano void 93a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 94a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 95a27d9013SAlfredo Cardigliano { 96be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 97be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 98a27d9013SAlfredo Cardigliano 99a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 10068591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 1019ac234eeSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 1029ac234eeSAndrew Boyer qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE; 10302eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 104a27d9013SAlfredo Cardigliano } 105a27d9013SAlfredo Cardigliano 1060de3e209SAndrew Boyer static __rte_always_inline void 107be39f75cSAndrew Boyer ionic_tx_flush(struct ionic_tx_qcq *txq) 108a27d9013SAlfredo Cardigliano { 109be39f75cSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 110be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 111b4beb84aSAndrew Boyer struct rte_mbuf *txm; 112b4beb84aSAndrew Boyer struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; 113700f974dSAndrew Boyer void **info; 114b4beb84aSAndrew Boyer uint32_t i; 115a27d9013SAlfredo Cardigliano 116a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 117b4beb84aSAndrew Boyer 118a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 1192aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 120a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 121a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 122a27d9013SAlfredo Cardigliano 123b4beb84aSAndrew Boyer /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 124b4beb84aSAndrew Boyer if ((cq->tail_idx & 0x3) == 0) 125b4beb84aSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 126a27d9013SAlfredo Cardigliano 127b4beb84aSAndrew Boyer while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 128b4beb84aSAndrew Boyer /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 129b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2))); 130a27d9013SAlfredo Cardigliano 131b4beb84aSAndrew Boyer /* Prefetch next mbuf */ 132b4beb84aSAndrew Boyer void **next_info = 133b4beb84aSAndrew Boyer IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1)); 134b4beb84aSAndrew Boyer if (next_info[0]) 135b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(next_info[0]); 136b4beb84aSAndrew Boyer if (next_info[1]) 137b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(next_info[1]); 138b4beb84aSAndrew Boyer 139700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 140b4beb84aSAndrew Boyer for (i = 0; i < q->num_segs; i++) { 141b4beb84aSAndrew Boyer txm = info[i]; 142b4beb84aSAndrew Boyer if (!txm) 143b4beb84aSAndrew Boyer break; 144b4beb84aSAndrew Boyer 1459ac234eeSAndrew Boyer if (txq->flags & IONIC_QCQ_F_FAST_FREE) 1469ac234eeSAndrew Boyer rte_mempool_put(txm->pool, txm); 1479ac234eeSAndrew Boyer else 148b4beb84aSAndrew Boyer rte_pktmbuf_free_seg(txm); 149b4beb84aSAndrew Boyer 150b4beb84aSAndrew Boyer info[i] = NULL; 151b4beb84aSAndrew Boyer } 152a27d9013SAlfredo Cardigliano 1534ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 154a27d9013SAlfredo Cardigliano } 155b4beb84aSAndrew Boyer 156b4beb84aSAndrew Boyer cq_desc = &cq_desc_base[cq->tail_idx]; 157a27d9013SAlfredo Cardigliano } 158a27d9013SAlfredo Cardigliano } 159a27d9013SAlfredo Cardigliano 160ce6427ddSThomas Monjalon void __rte_cold 1617483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 162a27d9013SAlfredo Cardigliano { 1637483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 164a27d9013SAlfredo Cardigliano 165a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 166a27d9013SAlfredo Cardigliano 167be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 168a27d9013SAlfredo Cardigliano } 169a27d9013SAlfredo Cardigliano 170ce6427ddSThomas Monjalon int __rte_cold 171a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 172a27d9013SAlfredo Cardigliano { 173e7222f94SAndrew Boyer struct ionic_tx_stats *stats; 174be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 175a27d9013SAlfredo Cardigliano 1764ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 177a27d9013SAlfredo Cardigliano 178a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 179a27d9013SAlfredo Cardigliano 1809fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1819fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1829fdf11c4SAndrew Boyer 183a27d9013SAlfredo Cardigliano /* 184a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 185a27d9013SAlfredo Cardigliano * before disabling Tx queue 186a27d9013SAlfredo Cardigliano */ 187a27d9013SAlfredo Cardigliano 188e7222f94SAndrew Boyer ionic_lif_txq_deinit(txq); 189a27d9013SAlfredo Cardigliano 190e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 191e7222f94SAndrew Boyer ionic_tx_empty(txq); 192e7222f94SAndrew Boyer 193e7222f94SAndrew Boyer stats = &txq->stats; 194e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 195e7222f94SAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 196a27d9013SAlfredo Cardigliano 197a27d9013SAlfredo Cardigliano return 0; 198a27d9013SAlfredo Cardigliano } 199a27d9013SAlfredo Cardigliano 200ce6427ddSThomas Monjalon int __rte_cold 201a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 2024ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 203a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 204a27d9013SAlfredo Cardigliano { 205a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 206be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 207a27d9013SAlfredo Cardigliano uint64_t offloads; 208a27d9013SAlfredo Cardigliano int err; 209a27d9013SAlfredo Cardigliano 210a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 211a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 212a27d9013SAlfredo Cardigliano "(max %u queues)", 213a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 214a27d9013SAlfredo Cardigliano return -EINVAL; 215a27d9013SAlfredo Cardigliano } 216a27d9013SAlfredo Cardigliano 217a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 2184ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 2194ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 2204ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 221a27d9013SAlfredo Cardigliano 222a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 223a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 224a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 225a27d9013SAlfredo Cardigliano 226a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 227a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 2287483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 229a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 230a27d9013SAlfredo Cardigliano } 231a27d9013SAlfredo Cardigliano 2329fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 2339fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 2349fdf11c4SAndrew Boyer 2358ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 236a27d9013SAlfredo Cardigliano if (err) { 237a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 238a27d9013SAlfredo Cardigliano return -EINVAL; 239a27d9013SAlfredo Cardigliano } 240a27d9013SAlfredo Cardigliano 241a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 24202eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 24302eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 244a27d9013SAlfredo Cardigliano 24568591087SAndrew Boyer /* Convert the offload flags into queue flags */ 246295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 24768591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 248295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 24968591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 250295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 25168591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 2529ac234eeSAndrew Boyer if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE) 2539ac234eeSAndrew Boyer txq->flags |= IONIC_QCQ_F_FAST_FREE; 254a27d9013SAlfredo Cardigliano 255a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 256a27d9013SAlfredo Cardigliano 257a27d9013SAlfredo Cardigliano return 0; 258a27d9013SAlfredo Cardigliano } 259a27d9013SAlfredo Cardigliano 260a27d9013SAlfredo Cardigliano /* 261a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 262a27d9013SAlfredo Cardigliano */ 263ce6427ddSThomas Monjalon int __rte_cold 264a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 265a27d9013SAlfredo Cardigliano { 2669fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 267be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 268a27d9013SAlfredo Cardigliano int err; 269a27d9013SAlfredo Cardigliano 2709fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2719fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2729fdf11c4SAndrew Boyer tx_queue_id); 2739fdf11c4SAndrew Boyer return 0; 2749fdf11c4SAndrew Boyer } 2759fdf11c4SAndrew Boyer 276a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 277a27d9013SAlfredo Cardigliano 2784ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 279be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2804ae96cb8SAndrew Boyer 281a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 282a27d9013SAlfredo Cardigliano if (err) 283a27d9013SAlfredo Cardigliano return err; 284a27d9013SAlfredo Cardigliano 2859fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 286a27d9013SAlfredo Cardigliano 287a27d9013SAlfredo Cardigliano return 0; 288a27d9013SAlfredo Cardigliano } 289a27d9013SAlfredo Cardigliano 290a27d9013SAlfredo Cardigliano static void 29164b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 29264b08152SAlfredo Cardigliano { 29364b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 29464b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 29564b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 29664b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 29764b08152SAlfredo Cardigliano 298daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 29964b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 30064b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 30164b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 30264b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 30364b08152SAlfredo Cardigliano } else { 30464b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 30564b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 30664b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 30764b08152SAlfredo Cardigliano } 30864b08152SAlfredo Cardigliano } 30964b08152SAlfredo Cardigliano 31064b08152SAlfredo Cardigliano static void 31164b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 31264b08152SAlfredo Cardigliano { 31364b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 31464b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 31564b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 31664b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 31764b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 31864b08152SAlfredo Cardigliano 319daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 32064b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 32164b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 32264b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 32364b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 32464b08152SAlfredo Cardigliano } else { 32564b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 32664b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 32764b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 32864b08152SAlfredo Cardigliano } 32964b08152SAlfredo Cardigliano } 33064b08152SAlfredo Cardigliano 33164b08152SAlfredo Cardigliano static void 332a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 333a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 334a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 335a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 33664b08152SAlfredo Cardigliano bool encap, 337a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 338a27d9013SAlfredo Cardigliano bool start, bool done) 339a27d9013SAlfredo Cardigliano { 340b4beb84aSAndrew Boyer struct rte_mbuf *txm_seg; 341dd10c5b4SAndrew Boyer void **info; 3424a735599SAndrew Boyer uint64_t cmd; 343a27d9013SAlfredo Cardigliano uint8_t flags = 0; 344b4beb84aSAndrew Boyer int i; 345b4beb84aSAndrew Boyer 346a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 34764b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 348a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 349a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 350a27d9013SAlfredo Cardigliano 3514a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 352a27d9013SAlfredo Cardigliano flags, nsge, addr); 3534a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 3544a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 3554a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 3564a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 3574a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 358a27d9013SAlfredo Cardigliano 359dd10c5b4SAndrew Boyer if (done) { 360dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 361b4beb84aSAndrew Boyer 362b4beb84aSAndrew Boyer /* Walk the mbuf chain to stash pointers in the array */ 363b4beb84aSAndrew Boyer txm_seg = txm; 364b4beb84aSAndrew Boyer for (i = 0; i < txm->nb_segs; i++) { 365b4beb84aSAndrew Boyer info[i] = txm_seg; 366b4beb84aSAndrew Boyer txm_seg = txm_seg->next; 367b4beb84aSAndrew Boyer } 368dd10c5b4SAndrew Boyer } 369dd10c5b4SAndrew Boyer 370dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 371a27d9013SAlfredo Cardigliano } 372a27d9013SAlfredo Cardigliano 373a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 374be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 375a27d9013SAlfredo Cardigliano { 376be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 377a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 37856117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 379a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 38056117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 381a27d9013SAlfredo Cardigliano 382a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 383a27d9013SAlfredo Cardigliano return desc; 384a27d9013SAlfredo Cardigliano } 385a27d9013SAlfredo Cardigliano 386a27d9013SAlfredo Cardigliano static int 38777c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 388a27d9013SAlfredo Cardigliano { 389be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 390be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 391a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 392a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 393a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3947c3a867bSAndrew Boyer rte_iova_t data_iova; 3957c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 396a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 397a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 398a27d9013SAlfredo Cardigliano uint32_t hdrlen; 399a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 400a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 401a27d9013SAlfredo Cardigliano uint32_t left; 402a27d9013SAlfredo Cardigliano uint32_t seglen; 403a27d9013SAlfredo Cardigliano uint32_t len; 404a27d9013SAlfredo Cardigliano uint32_t offset = 0; 405a27d9013SAlfredo Cardigliano bool start, done; 40664b08152SAlfredo Cardigliano bool encap; 407daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 408a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 40964b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 410a27d9013SAlfredo Cardigliano 411daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 412daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 413daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 414daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 41564b08152SAlfredo Cardigliano 41664b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 41764b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 41864b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 41964b08152SAlfredo Cardigliano */ 42064b08152SAlfredo Cardigliano 42164b08152SAlfredo Cardigliano if (encap) { 42264b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 42364b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 42464b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 42564b08152SAlfredo Cardigliano } else { 42664b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 42764b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 42864b08152SAlfredo Cardigliano } 429a27d9013SAlfredo Cardigliano 430a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 431a27d9013SAlfredo Cardigliano left = txm->data_len; 4327c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm); 433a27d9013SAlfredo Cardigliano 434be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 435a27d9013SAlfredo Cardigliano start = true; 436a27d9013SAlfredo Cardigliano 437a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 438a27d9013SAlfredo Cardigliano 439a27d9013SAlfredo Cardigliano while (left > 0) { 440a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 441a27d9013SAlfredo Cardigliano frag_left = seglen - len; 4427c3a867bSAndrew Boyer desc_addr = rte_cpu_to_le_64(data_iova + offset); 443a27d9013SAlfredo Cardigliano desc_len = len; 444a27d9013SAlfredo Cardigliano desc_nsge = 0; 445a27d9013SAlfredo Cardigliano left -= len; 446a27d9013SAlfredo Cardigliano offset += len; 447a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 448a27d9013SAlfredo Cardigliano continue; 449a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 450a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 451a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 452a27d9013SAlfredo Cardigliano hdrlen, mss, 45364b08152SAlfredo Cardigliano encap, 454a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 45577c60793SAndrew Boyer start, done); 456be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 457a27d9013SAlfredo Cardigliano start = false; 458a27d9013SAlfredo Cardigliano seglen = mss; 459a27d9013SAlfredo Cardigliano } 460a27d9013SAlfredo Cardigliano 461a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 462a27d9013SAlfredo Cardigliano 463a27d9013SAlfredo Cardigliano txm_seg = txm->next; 464a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 465a27d9013SAlfredo Cardigliano offset = 0; 4667c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 467a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 468a27d9013SAlfredo Cardigliano 469a27d9013SAlfredo Cardigliano while (left > 0) { 4707c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 471a27d9013SAlfredo Cardigliano if (frag_left > 0) { 472a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 473a27d9013SAlfredo Cardigliano frag_left -= len; 4747c3a867bSAndrew Boyer elem->addr = next_addr; 4754a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 476a27d9013SAlfredo Cardigliano elem++; 477a27d9013SAlfredo Cardigliano desc_nsge++; 478a27d9013SAlfredo Cardigliano } else { 479a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 480a27d9013SAlfredo Cardigliano frag_left = mss - len; 4817c3a867bSAndrew Boyer desc_addr = next_addr; 482a27d9013SAlfredo Cardigliano desc_len = len; 483a27d9013SAlfredo Cardigliano desc_nsge = 0; 484a27d9013SAlfredo Cardigliano } 485a27d9013SAlfredo Cardigliano left -= len; 486a27d9013SAlfredo Cardigliano offset += len; 487a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 488a27d9013SAlfredo Cardigliano continue; 4897c3a867bSAndrew Boyer 490a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 491a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 492a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 493a27d9013SAlfredo Cardigliano hdrlen, mss, 49464b08152SAlfredo Cardigliano encap, 495a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 49677c60793SAndrew Boyer start, done); 497be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 498a27d9013SAlfredo Cardigliano start = false; 499a27d9013SAlfredo Cardigliano } 500a27d9013SAlfredo Cardigliano 501a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 502a27d9013SAlfredo Cardigliano } 503a27d9013SAlfredo Cardigliano 504a27d9013SAlfredo Cardigliano stats->tso++; 505a27d9013SAlfredo Cardigliano 506a27d9013SAlfredo Cardigliano return 0; 507a27d9013SAlfredo Cardigliano } 508a27d9013SAlfredo Cardigliano 5090de3e209SAndrew Boyer static __rte_always_inline int 51077c60793SAndrew Boyer ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 511a27d9013SAlfredo Cardigliano { 512be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 513be39f75cSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 51456117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 515be39f75cSAndrew Boyer struct ionic_txq_sg_elem *elem; 516be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 517a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 518dd10c5b4SAndrew Boyer void **info; 519b4beb84aSAndrew Boyer rte_iova_t data_iova; 520a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 5214a735599SAndrew Boyer uint64_t addr, cmd; 522a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 523a27d9013SAlfredo Cardigliano uint8_t flags = 0; 524a27d9013SAlfredo Cardigliano 525be39f75cSAndrew Boyer desc = &desc_base[q->head_idx]; 526dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 527be39f75cSAndrew Boyer 528daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 52968591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 53064b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 53164b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 532f603eebcSAndrew Boyer } 533f603eebcSAndrew Boyer 534daa02b5cSOlivier Matz if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 53568591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 536daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 53768591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 538f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 53964b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 54064b08152SAlfredo Cardigliano } 54164b08152SAlfredo Cardigliano 542f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 543f603eebcSAndrew Boyer stats->no_csum++; 544f603eebcSAndrew Boyer 545b4beb84aSAndrew Boyer if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 546daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 547daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 548b4beb84aSAndrew Boyer (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 549b4beb84aSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 550b4beb84aSAndrew Boyer } 551a27d9013SAlfredo Cardigliano 552b4beb84aSAndrew Boyer if (ol_flags & RTE_MBUF_F_TX_VLAN) { 553b4beb84aSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_VLAN; 554b4beb84aSAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 555b4beb84aSAndrew Boyer } 556a27d9013SAlfredo Cardigliano 5577c3a867bSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 5587c3a867bSAndrew Boyer 5594a735599SAndrew Boyer cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 5604a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 5614a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(txm->data_len); 562a27d9013SAlfredo Cardigliano 563dd10c5b4SAndrew Boyer info[0] = txm; 564dd10c5b4SAndrew Boyer 565b4beb84aSAndrew Boyer if (txm->nb_segs > 1) { 566b4beb84aSAndrew Boyer txm_seg = txm->next; 567b4beb84aSAndrew Boyer 568be39f75cSAndrew Boyer elem = sg_desc_base[q->head_idx].elems; 569dd10c5b4SAndrew Boyer 570a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 571b4beb84aSAndrew Boyer /* Stash the mbuf ptr in the array */ 572b4beb84aSAndrew Boyer info++; 573b4beb84aSAndrew Boyer *info = txm_seg; 574b4beb84aSAndrew Boyer 575b4beb84aSAndrew Boyer /* Configure the SGE */ 576b4beb84aSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 5774a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(txm_seg->data_len); 578b4beb84aSAndrew Boyer elem->addr = rte_cpu_to_le_64(data_iova); 579a27d9013SAlfredo Cardigliano elem++; 580b4beb84aSAndrew Boyer 581a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 582a27d9013SAlfredo Cardigliano } 583b4beb84aSAndrew Boyer } 584a27d9013SAlfredo Cardigliano 585dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 586dd10c5b4SAndrew Boyer 587a27d9013SAlfredo Cardigliano return 0; 588a27d9013SAlfredo Cardigliano } 589a27d9013SAlfredo Cardigliano 590a27d9013SAlfredo Cardigliano uint16_t 591a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 592a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 593a27d9013SAlfredo Cardigliano { 594be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 595be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 596be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 597c35f08f7SAndrew Boyer struct rte_mbuf *mbuf; 598a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 59986551f81SAndrew Boyer uint16_t nb_avail, nb_tx = 0; 600a27d9013SAlfredo Cardigliano int err; 601a27d9013SAlfredo Cardigliano 602b4beb84aSAndrew Boyer struct ionic_txq_desc *desc_base = q->base; 603b4beb84aSAndrew Boyer rte_prefetch0(&desc_base[q->head_idx]); 604b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx)); 605b4beb84aSAndrew Boyer 606b4beb84aSAndrew Boyer if (tx_pkts) { 607b4beb84aSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[0]); 608b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[0]); 609b4beb84aSAndrew Boyer } 610b4beb84aSAndrew Boyer 611a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 6122aed9865SAndrew Boyer ionic_tx_flush(txq); 613a27d9013SAlfredo Cardigliano 61486551f81SAndrew Boyer nb_avail = ionic_q_space_avail(q); 61586551f81SAndrew Boyer if (unlikely(nb_avail < nb_pkts)) { 61686551f81SAndrew Boyer stats->stop += nb_pkts - nb_avail; 61786551f81SAndrew Boyer nb_pkts = nb_avail; 618a27d9013SAlfredo Cardigliano } 619a27d9013SAlfredo Cardigliano 620a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 621b4beb84aSAndrew Boyer uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 622b4beb84aSAndrew Boyer rte_prefetch0(&desc_base[next_idx]); 623b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, next_idx)); 624b4beb84aSAndrew Boyer 625b4beb84aSAndrew Boyer if (nb_tx + 1 < nb_pkts) { 626b4beb84aSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 627b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 628a27d9013SAlfredo Cardigliano } 629a27d9013SAlfredo Cardigliano 630c35f08f7SAndrew Boyer mbuf = tx_pkts[nb_tx]; 631c35f08f7SAndrew Boyer 632c35f08f7SAndrew Boyer if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 633c35f08f7SAndrew Boyer err = ionic_tx_tso(txq, mbuf); 634a27d9013SAlfredo Cardigliano else 635c35f08f7SAndrew Boyer err = ionic_tx(txq, mbuf); 636a27d9013SAlfredo Cardigliano if (err) { 637a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 638a27d9013SAlfredo Cardigliano break; 639a27d9013SAlfredo Cardigliano } 640a27d9013SAlfredo Cardigliano 641c35f08f7SAndrew Boyer bytes_tx += mbuf->pkt_len; 642a27d9013SAlfredo Cardigliano nb_tx++; 643a27d9013SAlfredo Cardigliano } 644a27d9013SAlfredo Cardigliano 64577c60793SAndrew Boyer if (nb_tx > 0) { 64677c60793SAndrew Boyer rte_wmb(); 64777c60793SAndrew Boyer ionic_q_flush(q); 64877c60793SAndrew Boyer 649a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 650a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 651b4beb84aSAndrew Boyer } 652a27d9013SAlfredo Cardigliano 653a27d9013SAlfredo Cardigliano return nb_tx; 654a27d9013SAlfredo Cardigliano } 655a27d9013SAlfredo Cardigliano 656a27d9013SAlfredo Cardigliano /********************************************************************* 657a27d9013SAlfredo Cardigliano * 658a27d9013SAlfredo Cardigliano * TX prep functions 659a27d9013SAlfredo Cardigliano * 660a27d9013SAlfredo Cardigliano **********************************************************************/ 661a27d9013SAlfredo Cardigliano 662daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 663daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 664daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 665daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 666daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 667daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 668a27d9013SAlfredo Cardigliano 669a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 670daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 671a27d9013SAlfredo Cardigliano 672a27d9013SAlfredo Cardigliano uint16_t 673e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 674a27d9013SAlfredo Cardigliano { 675e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 676a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 677a27d9013SAlfredo Cardigliano uint64_t offloads; 678a27d9013SAlfredo Cardigliano int i = 0; 679a27d9013SAlfredo Cardigliano 680a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 681a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 682a27d9013SAlfredo Cardigliano 683e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 684a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 685a27d9013SAlfredo Cardigliano break; 686a27d9013SAlfredo Cardigliano } 687a27d9013SAlfredo Cardigliano 688a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 689a27d9013SAlfredo Cardigliano 690a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 691a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 692a27d9013SAlfredo Cardigliano break; 693a27d9013SAlfredo Cardigliano } 694a27d9013SAlfredo Cardigliano } 695a27d9013SAlfredo Cardigliano 696a27d9013SAlfredo Cardigliano return i; 697a27d9013SAlfredo Cardigliano } 698a27d9013SAlfredo Cardigliano 699a27d9013SAlfredo Cardigliano /********************************************************************* 700a27d9013SAlfredo Cardigliano * 701a27d9013SAlfredo Cardigliano * RX functions 702a27d9013SAlfredo Cardigliano * 703a27d9013SAlfredo Cardigliano **********************************************************************/ 704a27d9013SAlfredo Cardigliano 705a27d9013SAlfredo Cardigliano void 706a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 707a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 708a27d9013SAlfredo Cardigliano { 709be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 710be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 711a27d9013SAlfredo Cardigliano 712a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 713a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 714a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 71502eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 71668591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 717a27d9013SAlfredo Cardigliano } 718a27d9013SAlfredo Cardigliano 719ce6427ddSThomas Monjalon void __rte_cold 7207483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 721a27d9013SAlfredo Cardigliano { 7227483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 723be39f75cSAndrew Boyer 724be39f75cSAndrew Boyer if (!rxq) 725be39f75cSAndrew Boyer return; 726a27d9013SAlfredo Cardigliano 727a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 728a27d9013SAlfredo Cardigliano 729be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 730a27d9013SAlfredo Cardigliano } 731a27d9013SAlfredo Cardigliano 732ce6427ddSThomas Monjalon int __rte_cold 733a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 734a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 735a27d9013SAlfredo Cardigliano uint16_t nb_desc, 7364ae96cb8SAndrew Boyer uint32_t socket_id, 737a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 738a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 739a27d9013SAlfredo Cardigliano { 740a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 741be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 742a27d9013SAlfredo Cardigliano uint64_t offloads; 743a27d9013SAlfredo Cardigliano int err; 744a27d9013SAlfredo Cardigliano 745a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 746a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 747a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 748a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 749a27d9013SAlfredo Cardigliano return -EINVAL; 750a27d9013SAlfredo Cardigliano } 751a27d9013SAlfredo Cardigliano 752a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 7534ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 7544ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 7554ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 756a27d9013SAlfredo Cardigliano 75718a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 75818a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 75918a44465SAndrew Boyer 760a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 761a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 762a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 763a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 764a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 7654ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 766a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 767a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 768a27d9013SAlfredo Cardigliano } 769a27d9013SAlfredo Cardigliano 770a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 771a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 7727483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 773a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 774a27d9013SAlfredo Cardigliano } 775a27d9013SAlfredo Cardigliano 7769fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7779fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7789fdf11c4SAndrew Boyer 779d5850081SAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 780be39f75cSAndrew Boyer &rxq); 781a27d9013SAlfredo Cardigliano if (err) { 7824ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 783a27d9013SAlfredo Cardigliano return -EINVAL; 784a27d9013SAlfredo Cardigliano } 785a27d9013SAlfredo Cardigliano 786a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 787a27d9013SAlfredo Cardigliano 788a27d9013SAlfredo Cardigliano /* 789a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 790295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 791a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 792a27d9013SAlfredo Cardigliano * it to the length for all received packets: 793a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 794295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 795a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 796a27d9013SAlfredo Cardigliano */ 797a27d9013SAlfredo Cardigliano 798a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 79902eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 80002eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 801a27d9013SAlfredo Cardigliano 802a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 803a27d9013SAlfredo Cardigliano 804a27d9013SAlfredo Cardigliano return 0; 805a27d9013SAlfredo Cardigliano } 806a27d9013SAlfredo Cardigliano 807bbdf955dSAndrew Boyer #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1) 808bbdf955dSAndrew Boyer static const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK] 809bbdf955dSAndrew Boyer __rte_cache_aligned = { 810bbdf955dSAndrew Boyer /* IP_BAD set */ 811bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD, 812bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 813bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 814bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 815bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 816bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 817bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 818bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 819bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 820bbdf955dSAndrew Boyer /* IP_OK set */ 821bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD, 822bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] = 823bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 824bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = 825bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 826bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] = 827bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD, 828bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = 829bbdf955dSAndrew Boyer RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD, 830bbdf955dSAndrew Boyer /* No IP flag set */ 831bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 832bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 833bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD, 834bbdf955dSAndrew Boyer [IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD, 835bbdf955dSAndrew Boyer }; 836bbdf955dSAndrew Boyer 83773b1c67eSAndrew Boyer /* RTE_PTYPE_UNKNOWN is 0x0 */ 83873b1c67eSAndrew Boyer static const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK] 83973b1c67eSAndrew Boyer __rte_cache_aligned = { 84073b1c67eSAndrew Boyer [IONIC_PKT_TYPE_NON_IP] = RTE_PTYPE_UNKNOWN, 84173b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, 84273b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_TCP] = 84373b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, 84473b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV4_UDP] = 84573b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, 84673b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, 84773b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_TCP] = 84873b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, 84973b1c67eSAndrew Boyer [IONIC_PKT_TYPE_IPV6_UDP] = 85073b1c67eSAndrew Boyer RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, 85173b1c67eSAndrew Boyer }; 85273b1c67eSAndrew Boyer 853*b5b56afdSAndrew Boyer const uint32_t * 854*b5b56afdSAndrew Boyer ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused) 855*b5b56afdSAndrew Boyer { 856*b5b56afdSAndrew Boyer /* See ionic_ptype_table[] */ 857*b5b56afdSAndrew Boyer static const uint32_t ptypes[] = { 858*b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER, 859*b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_TIMESYNC, 860*b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_LLDP, 861*b5b56afdSAndrew Boyer RTE_PTYPE_L2_ETHER_ARP, 862*b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV4, 863*b5b56afdSAndrew Boyer RTE_PTYPE_L3_IPV6, 864*b5b56afdSAndrew Boyer RTE_PTYPE_L4_TCP, 865*b5b56afdSAndrew Boyer RTE_PTYPE_L4_UDP, 866*b5b56afdSAndrew Boyer RTE_PTYPE_UNKNOWN 867*b5b56afdSAndrew Boyer }; 868*b5b56afdSAndrew Boyer 869*b5b56afdSAndrew Boyer return ptypes; 870*b5b56afdSAndrew Boyer } 871*b5b56afdSAndrew Boyer 8727b20fc2fSAndrew Boyer /* 8737b20fc2fSAndrew Boyer * Cleans one descriptor. Connects the filled mbufs into a chain. 8747b20fc2fSAndrew Boyer * Does not advance the tail index. 8757b20fc2fSAndrew Boyer */ 8760de3e209SAndrew Boyer static __rte_always_inline void 8777b20fc2fSAndrew Boyer ionic_rx_clean_one(struct ionic_rx_qcq *rxq, 8787b20fc2fSAndrew Boyer struct ionic_rxq_comp *cq_desc, 87914f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 880a27d9013SAlfredo Cardigliano { 881be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 8827b20fc2fSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg, *prev_rxm; 8837b20fc2fSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 884a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 885a27d9013SAlfredo Cardigliano uint32_t pkt_type; 8867b20fc2fSAndrew Boyer uint32_t left, i; 8877b20fc2fSAndrew Boyer uint16_t cq_desc_len; 888bbdf955dSAndrew Boyer uint8_t ptype, cflags; 889700f974dSAndrew Boyer void **info; 890700f974dSAndrew Boyer 8917b20fc2fSAndrew Boyer cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 892700f974dSAndrew Boyer 8937b20fc2fSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 894700f974dSAndrew Boyer 895700f974dSAndrew Boyer rxm = info[0]; 896a27d9013SAlfredo Cardigliano 897a27d9013SAlfredo Cardigliano if (cq_desc->status) { 898a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 899a27d9013SAlfredo Cardigliano return; 900a27d9013SAlfredo Cardigliano } 901a27d9013SAlfredo Cardigliano 9027b20fc2fSAndrew Boyer if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 903a27d9013SAlfredo Cardigliano stats->bad_len++; 904a27d9013SAlfredo Cardigliano return; 905a27d9013SAlfredo Cardigliano } 906a27d9013SAlfredo Cardigliano 9077b20fc2fSAndrew Boyer info[0] = NULL; 908a27d9013SAlfredo Cardigliano 9097b20fc2fSAndrew Boyer /* Set the mbuf metadata based on the cq entry */ 9107b20fc2fSAndrew Boyer rxm->rearm_data[0] = rxq->rearm_data; 9117b20fc2fSAndrew Boyer rxm->pkt_len = cq_desc_len; 9127b20fc2fSAndrew Boyer rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len); 9137b20fc2fSAndrew Boyer left = cq_desc_len - rxm->data_len; 9147b20fc2fSAndrew Boyer rxm->nb_segs = cq_desc->num_sg_elems + 1; 9157b20fc2fSAndrew Boyer prev_rxm = rxm; 916a27d9013SAlfredo Cardigliano 9177b20fc2fSAndrew Boyer for (i = 1; i < rxm->nb_segs && left; i++) { 9187b20fc2fSAndrew Boyer rxm_seg = info[i]; 9197b20fc2fSAndrew Boyer info[i] = NULL; 9207b20fc2fSAndrew Boyer 9217b20fc2fSAndrew Boyer /* Set the chained mbuf metadata */ 9227b20fc2fSAndrew Boyer rxm_seg->rearm_data[0] = rxq->rearm_seg_data; 923d5850081SAndrew Boyer rxm_seg->data_len = RTE_MIN(rxq->seg_size, left); 924a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 925a27d9013SAlfredo Cardigliano 9267b20fc2fSAndrew Boyer /* Link the mbuf */ 9277b20fc2fSAndrew Boyer prev_rxm->next = rxm_seg; 9287b20fc2fSAndrew Boyer prev_rxm = rxm_seg; 929a27d9013SAlfredo Cardigliano } 930a27d9013SAlfredo Cardigliano 9317b20fc2fSAndrew Boyer /* Terminate the mbuf chain */ 9327b20fc2fSAndrew Boyer prev_rxm->next = NULL; 9337b20fc2fSAndrew Boyer 93422e7171bSAlfredo Cardigliano /* RSS */ 935daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 9367506961aSAndrew Boyer rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 93722e7171bSAlfredo Cardigliano 938a27d9013SAlfredo Cardigliano /* Vlan Strip */ 939a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 940daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 9414a735599SAndrew Boyer rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 942a27d9013SAlfredo Cardigliano } 943a27d9013SAlfredo Cardigliano 944a27d9013SAlfredo Cardigliano /* Checksum */ 945a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 946bbdf955dSAndrew Boyer cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK; 947bbdf955dSAndrew Boyer pkt_flags |= ionic_csum_flags[cflags]; 948a27d9013SAlfredo Cardigliano } 949a27d9013SAlfredo Cardigliano 950a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 951a27d9013SAlfredo Cardigliano 952a27d9013SAlfredo Cardigliano /* Packet Type */ 95373b1c67eSAndrew Boyer ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK; 95473b1c67eSAndrew Boyer pkt_type = ionic_ptype_table[ptype]; 95573b1c67eSAndrew Boyer if (pkt_type == RTE_PTYPE_UNKNOWN) { 956a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 957a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 958a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 959a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 960a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 96173b1c67eSAndrew Boyer else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP)) 96273b1c67eSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_LLDP; 96373b1c67eSAndrew Boyer else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588)) 96473b1c67eSAndrew Boyer pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC; 965ed522a3fSAndrew Boyer stats->mtods++; 966a27d9013SAlfredo Cardigliano } 967a27d9013SAlfredo Cardigliano 968a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 969a27d9013SAlfredo Cardigliano 97014f534beSAndrew Boyer rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 97114f534beSAndrew Boyer rx_svc->nb_rx++; 972a27d9013SAlfredo Cardigliano 973a27d9013SAlfredo Cardigliano stats->packets++; 974a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 975a27d9013SAlfredo Cardigliano } 976a27d9013SAlfredo Cardigliano 9777b20fc2fSAndrew Boyer /* 9787b20fc2fSAndrew Boyer * Fills one descriptor with mbufs. Does not advance the head index. 9797b20fc2fSAndrew Boyer */ 9800de3e209SAndrew Boyer static __rte_always_inline int 9817b20fc2fSAndrew Boyer ionic_rx_fill_one(struct ionic_rx_qcq *rxq) 982a27d9013SAlfredo Cardigliano { 983be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 9847b20fc2fSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg; 985be39f75cSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 986be39f75cSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 9877b20fc2fSAndrew Boyer rte_iova_t data_iova; 9887b20fc2fSAndrew Boyer uint32_t i; 989dd10c5b4SAndrew Boyer void **info; 990218afd82SAndrew Boyer int ret; 991a27d9013SAlfredo Cardigliano 992dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 993a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 994a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 995a27d9013SAlfredo Cardigliano 9967b20fc2fSAndrew Boyer /* mbuf is unused => whole chain is unused */ 9977b20fc2fSAndrew Boyer if (unlikely(info[0])) 9987b20fc2fSAndrew Boyer return 0; 9997b20fc2fSAndrew Boyer 1000218afd82SAndrew Boyer if (rxq->mb_idx == 0) { 1001218afd82SAndrew Boyer ret = rte_mempool_get_bulk(rxq->mb_pool, 1002218afd82SAndrew Boyer (void **)rxq->mbs, 1003218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC); 1004218afd82SAndrew Boyer if (ret) { 10057b20fc2fSAndrew Boyer assert(0); 1006a27d9013SAlfredo Cardigliano return -ENOMEM; 1007a27d9013SAlfredo Cardigliano } 1008a27d9013SAlfredo Cardigliano 1009218afd82SAndrew Boyer rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 1010218afd82SAndrew Boyer } 1011218afd82SAndrew Boyer 1012218afd82SAndrew Boyer rxm = rxq->mbs[--rxq->mb_idx]; 1013dd10c5b4SAndrew Boyer info[0] = rxm; 1014dd10c5b4SAndrew Boyer 10157b20fc2fSAndrew Boyer data_iova = rte_mbuf_data_iova_default(rxm); 10167b20fc2fSAndrew Boyer desc->addr = rte_cpu_to_le_64(data_iova); 10177b20fc2fSAndrew Boyer 10187b20fc2fSAndrew Boyer for (i = 1; i < q->num_segs; i++) { 10197b20fc2fSAndrew Boyer /* mbuf is unused => rest of the chain is unused */ 10207b20fc2fSAndrew Boyer if (info[i]) 10217b20fc2fSAndrew Boyer return 0; 10227b20fc2fSAndrew Boyer 1023218afd82SAndrew Boyer if (rxq->mb_idx == 0) { 1024218afd82SAndrew Boyer ret = rte_mempool_get_bulk(rxq->mb_pool, 1025218afd82SAndrew Boyer (void **)rxq->mbs, 1026218afd82SAndrew Boyer IONIC_MBUF_BULK_ALLOC); 1027218afd82SAndrew Boyer if (ret) { 10287b20fc2fSAndrew Boyer assert(0); 10297b20fc2fSAndrew Boyer return -ENOMEM; 10307b20fc2fSAndrew Boyer } 10317b20fc2fSAndrew Boyer 1032218afd82SAndrew Boyer rxq->mb_idx = IONIC_MBUF_BULK_ALLOC; 1033218afd82SAndrew Boyer } 1034218afd82SAndrew Boyer 1035218afd82SAndrew Boyer rxm_seg = rxq->mbs[--rxq->mb_idx]; 10367b20fc2fSAndrew Boyer info[i] = rxm_seg; 10377b20fc2fSAndrew Boyer 10387b20fc2fSAndrew Boyer /* The data_off does not get set to 0 until later */ 10397b20fc2fSAndrew Boyer data_iova = rxm_seg->buf_iova; 10407b20fc2fSAndrew Boyer sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova); 10417b20fc2fSAndrew Boyer } 10427b20fc2fSAndrew Boyer 10437b20fc2fSAndrew Boyer return 0; 10447b20fc2fSAndrew Boyer } 10457b20fc2fSAndrew Boyer 10467b20fc2fSAndrew Boyer /* 10477b20fc2fSAndrew Boyer * Fills all descriptors with mbufs. 10487b20fc2fSAndrew Boyer */ 10497b20fc2fSAndrew Boyer static int __rte_cold 10507b20fc2fSAndrew Boyer ionic_rx_fill(struct ionic_rx_qcq *rxq) 10517b20fc2fSAndrew Boyer { 10527b20fc2fSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 10537b20fc2fSAndrew Boyer uint32_t i; 10547b20fc2fSAndrew Boyer int err; 10557b20fc2fSAndrew Boyer 10567b20fc2fSAndrew Boyer for (i = 1; i < q->num_descs; i++) { 10577b20fc2fSAndrew Boyer err = ionic_rx_fill_one(rxq); 10587b20fc2fSAndrew Boyer if (err) 10597b20fc2fSAndrew Boyer return err; 10607b20fc2fSAndrew Boyer 1061dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1062a27d9013SAlfredo Cardigliano } 1063a27d9013SAlfredo Cardigliano 106477c60793SAndrew Boyer ionic_q_flush(q); 106577c60793SAndrew Boyer 1066a27d9013SAlfredo Cardigliano return 0; 1067a27d9013SAlfredo Cardigliano } 1068a27d9013SAlfredo Cardigliano 1069a27d9013SAlfredo Cardigliano /* 10707b2eb674SAndrew Boyer * Perform one-time initialization of descriptor fields 10717b2eb674SAndrew Boyer * which will not change for the life of the queue. 10727b2eb674SAndrew Boyer */ 10737b2eb674SAndrew Boyer static void __rte_cold 10747b2eb674SAndrew Boyer ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 10757b2eb674SAndrew Boyer { 10767b2eb674SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 10777b2eb674SAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 10787b2eb674SAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 10797b2eb674SAndrew Boyer uint32_t i, j; 10807b2eb674SAndrew Boyer uint8_t opcode; 10817b2eb674SAndrew Boyer 10827b2eb674SAndrew Boyer opcode = (q->num_segs > 1) ? 10837b2eb674SAndrew Boyer IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 10847b2eb674SAndrew Boyer 10857b2eb674SAndrew Boyer /* 10867b2eb674SAndrew Boyer * NB: Only the first segment needs to leave headroom (hdr_seg_size). 10877b2eb674SAndrew Boyer * Later segments (seg_size) do not. 10887b2eb674SAndrew Boyer */ 10897b2eb674SAndrew Boyer for (i = 0; i < q->num_descs; i++) { 10907b2eb674SAndrew Boyer desc = &desc_base[i]; 10917b2eb674SAndrew Boyer desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 10927b2eb674SAndrew Boyer desc->opcode = opcode; 10937b2eb674SAndrew Boyer 10947b2eb674SAndrew Boyer sg_desc = &sg_desc_base[i]; 10957b2eb674SAndrew Boyer for (j = 0; j < q->num_segs - 1u; j++) 10967b2eb674SAndrew Boyer sg_desc->elems[j].len = 10977b2eb674SAndrew Boyer rte_cpu_to_le_16(rxq->seg_size); 10987b2eb674SAndrew Boyer } 10997b2eb674SAndrew Boyer } 11007b2eb674SAndrew Boyer 11017b2eb674SAndrew Boyer /* 1102a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 1103a27d9013SAlfredo Cardigliano */ 1104ce6427ddSThomas Monjalon int __rte_cold 1105a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1106a27d9013SAlfredo Cardigliano { 11079fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1108be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1109d5850081SAndrew Boyer struct ionic_queue *q; 1110a27d9013SAlfredo Cardigliano int err; 1111a27d9013SAlfredo Cardigliano 11129fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 11139fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 11149fdf11c4SAndrew Boyer rx_queue_id); 11159fdf11c4SAndrew Boyer return 0; 11169fdf11c4SAndrew Boyer } 11179fdf11c4SAndrew Boyer 1118a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1119d5850081SAndrew Boyer q = &rxq->qcq.q; 1120a27d9013SAlfredo Cardigliano 1121b671e69aSAndrew Boyer rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 1122b671e69aSAndrew Boyer 1123d5850081SAndrew Boyer /* Recalculate segment count based on MTU */ 1124d5850081SAndrew Boyer q->num_segs = 1 + 1125d5850081SAndrew Boyer (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 1126d5850081SAndrew Boyer 1127d5850081SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 1128d5850081SAndrew Boyer rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 11294ae96cb8SAndrew Boyer 11307b2eb674SAndrew Boyer ionic_rx_init_descriptors(rxq); 11317b2eb674SAndrew Boyer 1132a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 1133a27d9013SAlfredo Cardigliano if (err) 1134a27d9013SAlfredo Cardigliano return err; 1135a27d9013SAlfredo Cardigliano 1136a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1137b671e69aSAndrew Boyer if (ionic_rx_fill(rxq) != 0) { 1138a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1139a27d9013SAlfredo Cardigliano rx_queue_id); 1140a27d9013SAlfredo Cardigliano return -1; 1141a27d9013SAlfredo Cardigliano } 1142a27d9013SAlfredo Cardigliano 11439fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1144a27d9013SAlfredo Cardigliano 1145a27d9013SAlfredo Cardigliano return 0; 1146a27d9013SAlfredo Cardigliano } 1147a27d9013SAlfredo Cardigliano 11487b20fc2fSAndrew Boyer /* 11497b20fc2fSAndrew Boyer * Walk the CQ to find completed receive descriptors. 11507b20fc2fSAndrew Boyer * Any completed descriptor found is refilled. 11517b20fc2fSAndrew Boyer */ 11520de3e209SAndrew Boyer static __rte_always_inline void 1153be39f75cSAndrew Boyer ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 115414f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 1155a27d9013SAlfredo Cardigliano { 1156be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 1157be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 11587b20fc2fSAndrew Boyer struct ionic_rxq_desc *q_desc_base = q->base; 1159be39f75cSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1160a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1161a27d9013SAlfredo Cardigliano 1162a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 11637b20fc2fSAndrew Boyer 1164a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 11652aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1166a27d9013SAlfredo Cardigliano 1167a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1168a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1169a27d9013SAlfredo Cardigliano 11707b20fc2fSAndrew Boyer /* Prefetch 8 x 8B bufinfo */ 11717b20fc2fSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8))); 11727b20fc2fSAndrew Boyer /* Prefetch 4 x 16B comp */ 11737b20fc2fSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 11747b20fc2fSAndrew Boyer /* Prefetch 4 x 16B descriptors */ 11757b20fc2fSAndrew Boyer rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 1176a27d9013SAlfredo Cardigliano 11777b20fc2fSAndrew Boyer ionic_rx_clean_one(rxq, cq_desc, rx_svc); 1178a27d9013SAlfredo Cardigliano 11794ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1180a27d9013SAlfredo Cardigliano 11817b20fc2fSAndrew Boyer (void)ionic_rx_fill_one(rxq); 1182a27d9013SAlfredo Cardigliano 11837b20fc2fSAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1184a27d9013SAlfredo Cardigliano 1185a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1186a27d9013SAlfredo Cardigliano break; 1187a27d9013SAlfredo Cardigliano 1188a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1189a27d9013SAlfredo Cardigliano } 11907b20fc2fSAndrew Boyer 11917b20fc2fSAndrew Boyer /* Update the queue indices and ring the doorbell */ 11927b20fc2fSAndrew Boyer if (work_done) 11937b20fc2fSAndrew Boyer ionic_q_flush(q); 1194a27d9013SAlfredo Cardigliano } 1195a27d9013SAlfredo Cardigliano 1196a27d9013SAlfredo Cardigliano /* 1197a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1198a27d9013SAlfredo Cardigliano */ 1199ce6427ddSThomas Monjalon int __rte_cold 1200a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1201a27d9013SAlfredo Cardigliano { 1202e7222f94SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1203e7222f94SAndrew Boyer struct ionic_rx_stats *stats; 1204be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1205a27d9013SAlfredo Cardigliano 12064ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1207a27d9013SAlfredo Cardigliano 1208a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1209a27d9013SAlfredo Cardigliano 1210e7222f94SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 12119fdf11c4SAndrew Boyer 1212e7222f94SAndrew Boyer ionic_lif_rxq_deinit(rxq); 1213a27d9013SAlfredo Cardigliano 1214e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 1215e7222f94SAndrew Boyer ionic_rx_empty(rxq); 1216e7222f94SAndrew Boyer 1217e7222f94SAndrew Boyer stats = &rxq->stats; 1218e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 1219e7222f94SAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 1220a27d9013SAlfredo Cardigliano 1221a27d9013SAlfredo Cardigliano return 0; 1222a27d9013SAlfredo Cardigliano } 1223a27d9013SAlfredo Cardigliano 1224a27d9013SAlfredo Cardigliano uint16_t 1225a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1226a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1227a27d9013SAlfredo Cardigliano { 1228be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 122914f534beSAndrew Boyer struct ionic_rx_service rx_svc; 1230a27d9013SAlfredo Cardigliano 123114f534beSAndrew Boyer rx_svc.rx_pkts = rx_pkts; 123214f534beSAndrew Boyer rx_svc.nb_rx = 0; 1233a27d9013SAlfredo Cardigliano 123414f534beSAndrew Boyer ionic_rxq_service(rxq, nb_pkts, &rx_svc); 1235a27d9013SAlfredo Cardigliano 123614f534beSAndrew Boyer return rx_svc.nb_rx; 1237a27d9013SAlfredo Cardigliano } 1238