176668754SAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50e7222f94SAndrew Boyer static void 51e7222f94SAndrew Boyer ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 52e7222f94SAndrew Boyer { 53e7222f94SAndrew Boyer uint32_t i; 54e7222f94SAndrew Boyer 55e7222f94SAndrew Boyer for (i = idx; i < cnt; i++) 56e7222f94SAndrew Boyer if (array[i]) 57e7222f94SAndrew Boyer rte_pktmbuf_free_seg(array[i]); 58e7222f94SAndrew Boyer 59e7222f94SAndrew Boyer memset(array, 0, sizeof(void *) * cnt); 60e7222f94SAndrew Boyer } 61e7222f94SAndrew Boyer 62e7222f94SAndrew Boyer static void __rte_cold 63e7222f94SAndrew Boyer ionic_tx_empty(struct ionic_tx_qcq *txq) 64e7222f94SAndrew Boyer { 65e7222f94SAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 66e7222f94SAndrew Boyer 67*b4beb84aSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 68e7222f94SAndrew Boyer } 69e7222f94SAndrew Boyer 70e7222f94SAndrew Boyer static void __rte_cold 71e7222f94SAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 72e7222f94SAndrew Boyer { 73e7222f94SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 74e7222f94SAndrew Boyer 757b20fc2fSAndrew Boyer /* 767b20fc2fSAndrew Boyer * Walk the full info array so that the clean up includes any 777b20fc2fSAndrew Boyer * fragments that were left dangling for later reuse 787b20fc2fSAndrew Boyer */ 797b20fc2fSAndrew Boyer ionic_empty_array(q->info, q->num_descs * q->num_segs, 0); 80e7222f94SAndrew Boyer } 81e7222f94SAndrew Boyer 82a27d9013SAlfredo Cardigliano /********************************************************************* 83a27d9013SAlfredo Cardigliano * 84a27d9013SAlfredo Cardigliano * TX functions 85a27d9013SAlfredo Cardigliano * 86a27d9013SAlfredo Cardigliano **********************************************************************/ 87a27d9013SAlfredo Cardigliano 88a27d9013SAlfredo Cardigliano void 89a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 90a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 91a27d9013SAlfredo Cardigliano { 92be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 93be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 94a27d9013SAlfredo Cardigliano 95a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 9668591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 9702eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 98a27d9013SAlfredo Cardigliano } 99a27d9013SAlfredo Cardigliano 1000de3e209SAndrew Boyer static __rte_always_inline void 101be39f75cSAndrew Boyer ionic_tx_flush(struct ionic_tx_qcq *txq) 102a27d9013SAlfredo Cardigliano { 103be39f75cSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 104be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 105*b4beb84aSAndrew Boyer struct rte_mbuf *txm; 106*b4beb84aSAndrew Boyer struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base; 107700f974dSAndrew Boyer void **info; 108*b4beb84aSAndrew Boyer uint32_t i; 109a27d9013SAlfredo Cardigliano 110a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 111*b4beb84aSAndrew Boyer 112a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 1132aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 114a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 115a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 116a27d9013SAlfredo Cardigliano 117*b4beb84aSAndrew Boyer /* Prefetch 4 x 16B comp at cq->tail_idx + 4 */ 118*b4beb84aSAndrew Boyer if ((cq->tail_idx & 0x3) == 0) 119*b4beb84aSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 120a27d9013SAlfredo Cardigliano 121*b4beb84aSAndrew Boyer while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) { 122*b4beb84aSAndrew Boyer /* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */ 123*b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2))); 124a27d9013SAlfredo Cardigliano 125*b4beb84aSAndrew Boyer /* Prefetch next mbuf */ 126*b4beb84aSAndrew Boyer void **next_info = 127*b4beb84aSAndrew Boyer IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1)); 128*b4beb84aSAndrew Boyer if (next_info[0]) 129*b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(next_info[0]); 130*b4beb84aSAndrew Boyer if (next_info[1]) 131*b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(next_info[1]); 132*b4beb84aSAndrew Boyer 133700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 134*b4beb84aSAndrew Boyer for (i = 0; i < q->num_segs; i++) { 135*b4beb84aSAndrew Boyer txm = info[i]; 136*b4beb84aSAndrew Boyer if (!txm) 137*b4beb84aSAndrew Boyer break; 138*b4beb84aSAndrew Boyer 139*b4beb84aSAndrew Boyer rte_pktmbuf_free_seg(txm); 140*b4beb84aSAndrew Boyer 141*b4beb84aSAndrew Boyer info[i] = NULL; 142*b4beb84aSAndrew Boyer } 143a27d9013SAlfredo Cardigliano 1444ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 145a27d9013SAlfredo Cardigliano } 146*b4beb84aSAndrew Boyer 147*b4beb84aSAndrew Boyer cq_desc = &cq_desc_base[cq->tail_idx]; 148a27d9013SAlfredo Cardigliano } 149a27d9013SAlfredo Cardigliano } 150a27d9013SAlfredo Cardigliano 151ce6427ddSThomas Monjalon void __rte_cold 1527483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 153a27d9013SAlfredo Cardigliano { 1547483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 155a27d9013SAlfredo Cardigliano 156a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 157a27d9013SAlfredo Cardigliano 158be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 159a27d9013SAlfredo Cardigliano } 160a27d9013SAlfredo Cardigliano 161ce6427ddSThomas Monjalon int __rte_cold 162a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 163a27d9013SAlfredo Cardigliano { 164e7222f94SAndrew Boyer struct ionic_tx_stats *stats; 165be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 166a27d9013SAlfredo Cardigliano 1674ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 168a27d9013SAlfredo Cardigliano 169a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 170a27d9013SAlfredo Cardigliano 1719fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1729fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1739fdf11c4SAndrew Boyer 174a27d9013SAlfredo Cardigliano /* 175a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 176a27d9013SAlfredo Cardigliano * before disabling Tx queue 177a27d9013SAlfredo Cardigliano */ 178a27d9013SAlfredo Cardigliano 179e7222f94SAndrew Boyer ionic_lif_txq_deinit(txq); 180a27d9013SAlfredo Cardigliano 181e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 182e7222f94SAndrew Boyer ionic_tx_empty(txq); 183e7222f94SAndrew Boyer 184e7222f94SAndrew Boyer stats = &txq->stats; 185e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 186e7222f94SAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 187a27d9013SAlfredo Cardigliano 188a27d9013SAlfredo Cardigliano return 0; 189a27d9013SAlfredo Cardigliano } 190a27d9013SAlfredo Cardigliano 191ce6427ddSThomas Monjalon int __rte_cold 192a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1934ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 194a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 195a27d9013SAlfredo Cardigliano { 196a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 197be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 198a27d9013SAlfredo Cardigliano uint64_t offloads; 199a27d9013SAlfredo Cardigliano int err; 200a27d9013SAlfredo Cardigliano 201a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 202a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 203a27d9013SAlfredo Cardigliano "(max %u queues)", 204a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 205a27d9013SAlfredo Cardigliano return -EINVAL; 206a27d9013SAlfredo Cardigliano } 207a27d9013SAlfredo Cardigliano 208a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 2094ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 2104ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 2114ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 212a27d9013SAlfredo Cardigliano 213a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 214a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 215a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 216a27d9013SAlfredo Cardigliano 217a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 218a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 2197483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 220a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 221a27d9013SAlfredo Cardigliano } 222a27d9013SAlfredo Cardigliano 2239fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 2249fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 2259fdf11c4SAndrew Boyer 2268ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 227a27d9013SAlfredo Cardigliano if (err) { 228a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 229a27d9013SAlfredo Cardigliano return -EINVAL; 230a27d9013SAlfredo Cardigliano } 231a27d9013SAlfredo Cardigliano 232a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 23302eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 23402eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 235a27d9013SAlfredo Cardigliano 23668591087SAndrew Boyer /* Convert the offload flags into queue flags */ 237295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 23868591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 239295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 24068591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 241295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 24268591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 243a27d9013SAlfredo Cardigliano 244a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 245a27d9013SAlfredo Cardigliano 246a27d9013SAlfredo Cardigliano return 0; 247a27d9013SAlfredo Cardigliano } 248a27d9013SAlfredo Cardigliano 249a27d9013SAlfredo Cardigliano /* 250a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 251a27d9013SAlfredo Cardigliano */ 252ce6427ddSThomas Monjalon int __rte_cold 253a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 254a27d9013SAlfredo Cardigliano { 2559fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 256be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 257a27d9013SAlfredo Cardigliano int err; 258a27d9013SAlfredo Cardigliano 2599fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2609fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2619fdf11c4SAndrew Boyer tx_queue_id); 2629fdf11c4SAndrew Boyer return 0; 2639fdf11c4SAndrew Boyer } 2649fdf11c4SAndrew Boyer 265a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 266a27d9013SAlfredo Cardigliano 2674ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 268be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2694ae96cb8SAndrew Boyer 270a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 271a27d9013SAlfredo Cardigliano if (err) 272a27d9013SAlfredo Cardigliano return err; 273a27d9013SAlfredo Cardigliano 2749fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 275a27d9013SAlfredo Cardigliano 276a27d9013SAlfredo Cardigliano return 0; 277a27d9013SAlfredo Cardigliano } 278a27d9013SAlfredo Cardigliano 279a27d9013SAlfredo Cardigliano static void 28064b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 28164b08152SAlfredo Cardigliano { 28264b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 28364b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 28464b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 28564b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 28664b08152SAlfredo Cardigliano 287daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 28864b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 28964b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 29064b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 29164b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 29264b08152SAlfredo Cardigliano } else { 29364b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 29464b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 29564b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 29664b08152SAlfredo Cardigliano } 29764b08152SAlfredo Cardigliano } 29864b08152SAlfredo Cardigliano 29964b08152SAlfredo Cardigliano static void 30064b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 30164b08152SAlfredo Cardigliano { 30264b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 30364b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 30464b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 30564b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 30664b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 30764b08152SAlfredo Cardigliano 308daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 30964b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 31064b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 31164b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 31264b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 31364b08152SAlfredo Cardigliano } else { 31464b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 31564b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 31664b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 31764b08152SAlfredo Cardigliano } 31864b08152SAlfredo Cardigliano } 31964b08152SAlfredo Cardigliano 32064b08152SAlfredo Cardigliano static void 321a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 322a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 323a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 324a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 32564b08152SAlfredo Cardigliano bool encap, 326a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 327a27d9013SAlfredo Cardigliano bool start, bool done) 328a27d9013SAlfredo Cardigliano { 329*b4beb84aSAndrew Boyer struct rte_mbuf *txm_seg; 330dd10c5b4SAndrew Boyer void **info; 3314a735599SAndrew Boyer uint64_t cmd; 332a27d9013SAlfredo Cardigliano uint8_t flags = 0; 333*b4beb84aSAndrew Boyer int i; 334*b4beb84aSAndrew Boyer 335a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 33664b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 337a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 338a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 339a27d9013SAlfredo Cardigliano 3404a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 341a27d9013SAlfredo Cardigliano flags, nsge, addr); 3424a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 3434a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 3444a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 3454a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 3464a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 347a27d9013SAlfredo Cardigliano 348dd10c5b4SAndrew Boyer if (done) { 349dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 350*b4beb84aSAndrew Boyer 351*b4beb84aSAndrew Boyer /* Walk the mbuf chain to stash pointers in the array */ 352*b4beb84aSAndrew Boyer txm_seg = txm; 353*b4beb84aSAndrew Boyer for (i = 0; i < txm->nb_segs; i++) { 354*b4beb84aSAndrew Boyer info[i] = txm_seg; 355*b4beb84aSAndrew Boyer txm_seg = txm_seg->next; 356*b4beb84aSAndrew Boyer } 357dd10c5b4SAndrew Boyer } 358dd10c5b4SAndrew Boyer 359dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 360a27d9013SAlfredo Cardigliano } 361a27d9013SAlfredo Cardigliano 362a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 363be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 364a27d9013SAlfredo Cardigliano { 365be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 366a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 36756117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 368a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 36956117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 370a27d9013SAlfredo Cardigliano 371a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 372a27d9013SAlfredo Cardigliano return desc; 373a27d9013SAlfredo Cardigliano } 374a27d9013SAlfredo Cardigliano 375a27d9013SAlfredo Cardigliano static int 37677c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 377a27d9013SAlfredo Cardigliano { 378be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 379be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 380a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 381a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 382a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3837c3a867bSAndrew Boyer rte_iova_t data_iova; 3847c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 385a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 386a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 387a27d9013SAlfredo Cardigliano uint32_t hdrlen; 388a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 389a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 390a27d9013SAlfredo Cardigliano uint32_t left; 391a27d9013SAlfredo Cardigliano uint32_t seglen; 392a27d9013SAlfredo Cardigliano uint32_t len; 393a27d9013SAlfredo Cardigliano uint32_t offset = 0; 394a27d9013SAlfredo Cardigliano bool start, done; 39564b08152SAlfredo Cardigliano bool encap; 396daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 397a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 39864b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 399a27d9013SAlfredo Cardigliano 400daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 401daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 402daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 403daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 40464b08152SAlfredo Cardigliano 40564b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 40664b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 40764b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 40864b08152SAlfredo Cardigliano */ 40964b08152SAlfredo Cardigliano 41064b08152SAlfredo Cardigliano if (encap) { 41164b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 41264b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 41364b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 41464b08152SAlfredo Cardigliano } else { 41564b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 41664b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 41764b08152SAlfredo Cardigliano } 418a27d9013SAlfredo Cardigliano 419a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 420a27d9013SAlfredo Cardigliano left = txm->data_len; 4217c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm); 422a27d9013SAlfredo Cardigliano 423be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 424a27d9013SAlfredo Cardigliano start = true; 425a27d9013SAlfredo Cardigliano 426a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 427a27d9013SAlfredo Cardigliano 428a27d9013SAlfredo Cardigliano while (left > 0) { 429a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 430a27d9013SAlfredo Cardigliano frag_left = seglen - len; 4317c3a867bSAndrew Boyer desc_addr = rte_cpu_to_le_64(data_iova + offset); 432a27d9013SAlfredo Cardigliano desc_len = len; 433a27d9013SAlfredo Cardigliano desc_nsge = 0; 434a27d9013SAlfredo Cardigliano left -= len; 435a27d9013SAlfredo Cardigliano offset += len; 436a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 437a27d9013SAlfredo Cardigliano continue; 438a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 439a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 440a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 441a27d9013SAlfredo Cardigliano hdrlen, mss, 44264b08152SAlfredo Cardigliano encap, 443a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 44477c60793SAndrew Boyer start, done); 445be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 446a27d9013SAlfredo Cardigliano start = false; 447a27d9013SAlfredo Cardigliano seglen = mss; 448a27d9013SAlfredo Cardigliano } 449a27d9013SAlfredo Cardigliano 450a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 451a27d9013SAlfredo Cardigliano 452a27d9013SAlfredo Cardigliano txm_seg = txm->next; 453a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 454a27d9013SAlfredo Cardigliano offset = 0; 4557c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 456a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 457a27d9013SAlfredo Cardigliano 458a27d9013SAlfredo Cardigliano while (left > 0) { 4597c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 460a27d9013SAlfredo Cardigliano if (frag_left > 0) { 461a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 462a27d9013SAlfredo Cardigliano frag_left -= len; 4637c3a867bSAndrew Boyer elem->addr = next_addr; 4644a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 465a27d9013SAlfredo Cardigliano elem++; 466a27d9013SAlfredo Cardigliano desc_nsge++; 467a27d9013SAlfredo Cardigliano } else { 468a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 469a27d9013SAlfredo Cardigliano frag_left = mss - len; 4707c3a867bSAndrew Boyer desc_addr = next_addr; 471a27d9013SAlfredo Cardigliano desc_len = len; 472a27d9013SAlfredo Cardigliano desc_nsge = 0; 473a27d9013SAlfredo Cardigliano } 474a27d9013SAlfredo Cardigliano left -= len; 475a27d9013SAlfredo Cardigliano offset += len; 476a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 477a27d9013SAlfredo Cardigliano continue; 4787c3a867bSAndrew Boyer 479a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 480a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 481a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 482a27d9013SAlfredo Cardigliano hdrlen, mss, 48364b08152SAlfredo Cardigliano encap, 484a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 48577c60793SAndrew Boyer start, done); 486be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 487a27d9013SAlfredo Cardigliano start = false; 488a27d9013SAlfredo Cardigliano } 489a27d9013SAlfredo Cardigliano 490a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 491a27d9013SAlfredo Cardigliano } 492a27d9013SAlfredo Cardigliano 493a27d9013SAlfredo Cardigliano stats->tso++; 494a27d9013SAlfredo Cardigliano 495a27d9013SAlfredo Cardigliano return 0; 496a27d9013SAlfredo Cardigliano } 497a27d9013SAlfredo Cardigliano 4980de3e209SAndrew Boyer static __rte_always_inline int 49977c60793SAndrew Boyer ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 500a27d9013SAlfredo Cardigliano { 501be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 502be39f75cSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 50356117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 504be39f75cSAndrew Boyer struct ionic_txq_sg_elem *elem; 505be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 506a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 507dd10c5b4SAndrew Boyer void **info; 508*b4beb84aSAndrew Boyer rte_iova_t data_iova; 509a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 5104a735599SAndrew Boyer uint64_t addr, cmd; 511a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 512a27d9013SAlfredo Cardigliano uint8_t flags = 0; 513a27d9013SAlfredo Cardigliano 514be39f75cSAndrew Boyer desc = &desc_base[q->head_idx]; 515dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 516be39f75cSAndrew Boyer 517daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 51868591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 51964b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 52064b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 521f603eebcSAndrew Boyer } 522f603eebcSAndrew Boyer 523daa02b5cSOlivier Matz if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 52468591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 525daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 52668591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 527f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 52864b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 52964b08152SAlfredo Cardigliano } 53064b08152SAlfredo Cardigliano 531f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 532f603eebcSAndrew Boyer stats->no_csum++; 533f603eebcSAndrew Boyer 534*b4beb84aSAndrew Boyer if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 535daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 536daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 537*b4beb84aSAndrew Boyer (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) { 538*b4beb84aSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_ENCAP; 539*b4beb84aSAndrew Boyer } 540a27d9013SAlfredo Cardigliano 541*b4beb84aSAndrew Boyer if (ol_flags & RTE_MBUF_F_TX_VLAN) { 542*b4beb84aSAndrew Boyer flags |= IONIC_TXQ_DESC_FLAG_VLAN; 543*b4beb84aSAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 544*b4beb84aSAndrew Boyer } 545a27d9013SAlfredo Cardigliano 5467c3a867bSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 5477c3a867bSAndrew Boyer 5484a735599SAndrew Boyer cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 5494a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 5504a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(txm->data_len); 551a27d9013SAlfredo Cardigliano 552dd10c5b4SAndrew Boyer info[0] = txm; 553dd10c5b4SAndrew Boyer 554*b4beb84aSAndrew Boyer if (txm->nb_segs > 1) { 555*b4beb84aSAndrew Boyer txm_seg = txm->next; 556*b4beb84aSAndrew Boyer 557be39f75cSAndrew Boyer elem = sg_desc_base[q->head_idx].elems; 558dd10c5b4SAndrew Boyer 559a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 560*b4beb84aSAndrew Boyer /* Stash the mbuf ptr in the array */ 561*b4beb84aSAndrew Boyer info++; 562*b4beb84aSAndrew Boyer *info = txm_seg; 563*b4beb84aSAndrew Boyer 564*b4beb84aSAndrew Boyer /* Configure the SGE */ 565*b4beb84aSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 5664a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(txm_seg->data_len); 567*b4beb84aSAndrew Boyer elem->addr = rte_cpu_to_le_64(data_iova); 568a27d9013SAlfredo Cardigliano elem++; 569*b4beb84aSAndrew Boyer 570a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 571a27d9013SAlfredo Cardigliano } 572*b4beb84aSAndrew Boyer } 573a27d9013SAlfredo Cardigliano 574dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 575dd10c5b4SAndrew Boyer 576a27d9013SAlfredo Cardigliano return 0; 577a27d9013SAlfredo Cardigliano } 578a27d9013SAlfredo Cardigliano 579a27d9013SAlfredo Cardigliano uint16_t 580a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 581a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 582a27d9013SAlfredo Cardigliano { 583be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 584be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 585be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 586c35f08f7SAndrew Boyer struct rte_mbuf *mbuf; 587a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 58886551f81SAndrew Boyer uint16_t nb_avail, nb_tx = 0; 589a27d9013SAlfredo Cardigliano int err; 590a27d9013SAlfredo Cardigliano 591*b4beb84aSAndrew Boyer struct ionic_txq_desc *desc_base = q->base; 592*b4beb84aSAndrew Boyer rte_prefetch0(&desc_base[q->head_idx]); 593*b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx)); 594*b4beb84aSAndrew Boyer 595*b4beb84aSAndrew Boyer if (tx_pkts) { 596*b4beb84aSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[0]); 597*b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[0]); 598*b4beb84aSAndrew Boyer } 599*b4beb84aSAndrew Boyer 600a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 6012aed9865SAndrew Boyer ionic_tx_flush(txq); 602a27d9013SAlfredo Cardigliano 60386551f81SAndrew Boyer nb_avail = ionic_q_space_avail(q); 60486551f81SAndrew Boyer if (unlikely(nb_avail < nb_pkts)) { 60586551f81SAndrew Boyer stats->stop += nb_pkts - nb_avail; 60686551f81SAndrew Boyer nb_pkts = nb_avail; 607a27d9013SAlfredo Cardigliano } 608a27d9013SAlfredo Cardigliano 609a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 610*b4beb84aSAndrew Boyer uint16_t next_idx = Q_NEXT_TO_POST(q, 1); 611*b4beb84aSAndrew Boyer rte_prefetch0(&desc_base[next_idx]); 612*b4beb84aSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, next_idx)); 613*b4beb84aSAndrew Boyer 614*b4beb84aSAndrew Boyer if (nb_tx + 1 < nb_pkts) { 615*b4beb84aSAndrew Boyer rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]); 616*b4beb84aSAndrew Boyer rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]); 617a27d9013SAlfredo Cardigliano } 618a27d9013SAlfredo Cardigliano 619c35f08f7SAndrew Boyer mbuf = tx_pkts[nb_tx]; 620c35f08f7SAndrew Boyer 621c35f08f7SAndrew Boyer if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 622c35f08f7SAndrew Boyer err = ionic_tx_tso(txq, mbuf); 623a27d9013SAlfredo Cardigliano else 624c35f08f7SAndrew Boyer err = ionic_tx(txq, mbuf); 625a27d9013SAlfredo Cardigliano if (err) { 626a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 627a27d9013SAlfredo Cardigliano break; 628a27d9013SAlfredo Cardigliano } 629a27d9013SAlfredo Cardigliano 630c35f08f7SAndrew Boyer bytes_tx += mbuf->pkt_len; 631a27d9013SAlfredo Cardigliano nb_tx++; 632a27d9013SAlfredo Cardigliano } 633a27d9013SAlfredo Cardigliano 63477c60793SAndrew Boyer if (nb_tx > 0) { 63577c60793SAndrew Boyer rte_wmb(); 63677c60793SAndrew Boyer ionic_q_flush(q); 63777c60793SAndrew Boyer 638a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 639a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 640*b4beb84aSAndrew Boyer } 641a27d9013SAlfredo Cardigliano 642a27d9013SAlfredo Cardigliano return nb_tx; 643a27d9013SAlfredo Cardigliano } 644a27d9013SAlfredo Cardigliano 645a27d9013SAlfredo Cardigliano /********************************************************************* 646a27d9013SAlfredo Cardigliano * 647a27d9013SAlfredo Cardigliano * TX prep functions 648a27d9013SAlfredo Cardigliano * 649a27d9013SAlfredo Cardigliano **********************************************************************/ 650a27d9013SAlfredo Cardigliano 651daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 652daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 653daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 654daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 655daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 656daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 657a27d9013SAlfredo Cardigliano 658a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 659daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 660a27d9013SAlfredo Cardigliano 661a27d9013SAlfredo Cardigliano uint16_t 662e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 663a27d9013SAlfredo Cardigliano { 664e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 665a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 666a27d9013SAlfredo Cardigliano uint64_t offloads; 667a27d9013SAlfredo Cardigliano int i = 0; 668a27d9013SAlfredo Cardigliano 669a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 670a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 671a27d9013SAlfredo Cardigliano 672e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 673a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 674a27d9013SAlfredo Cardigliano break; 675a27d9013SAlfredo Cardigliano } 676a27d9013SAlfredo Cardigliano 677a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 678a27d9013SAlfredo Cardigliano 679a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 680a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 681a27d9013SAlfredo Cardigliano break; 682a27d9013SAlfredo Cardigliano } 683a27d9013SAlfredo Cardigliano } 684a27d9013SAlfredo Cardigliano 685a27d9013SAlfredo Cardigliano return i; 686a27d9013SAlfredo Cardigliano } 687a27d9013SAlfredo Cardigliano 688a27d9013SAlfredo Cardigliano /********************************************************************* 689a27d9013SAlfredo Cardigliano * 690a27d9013SAlfredo Cardigliano * RX functions 691a27d9013SAlfredo Cardigliano * 692a27d9013SAlfredo Cardigliano **********************************************************************/ 693a27d9013SAlfredo Cardigliano 694a27d9013SAlfredo Cardigliano void 695a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 696a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 697a27d9013SAlfredo Cardigliano { 698be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 699be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 700a27d9013SAlfredo Cardigliano 701a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 702a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 703a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 70402eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 70568591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 706a27d9013SAlfredo Cardigliano } 707a27d9013SAlfredo Cardigliano 708ce6427ddSThomas Monjalon void __rte_cold 7097483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 710a27d9013SAlfredo Cardigliano { 7117483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 712be39f75cSAndrew Boyer 713be39f75cSAndrew Boyer if (!rxq) 714be39f75cSAndrew Boyer return; 715a27d9013SAlfredo Cardigliano 716a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 717a27d9013SAlfredo Cardigliano 718be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 719a27d9013SAlfredo Cardigliano } 720a27d9013SAlfredo Cardigliano 721ce6427ddSThomas Monjalon int __rte_cold 722a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 723a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 724a27d9013SAlfredo Cardigliano uint16_t nb_desc, 7254ae96cb8SAndrew Boyer uint32_t socket_id, 726a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 727a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 728a27d9013SAlfredo Cardigliano { 729a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 730be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 731a27d9013SAlfredo Cardigliano uint64_t offloads; 732a27d9013SAlfredo Cardigliano int err; 733a27d9013SAlfredo Cardigliano 734a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 735a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 736a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 737a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 738a27d9013SAlfredo Cardigliano return -EINVAL; 739a27d9013SAlfredo Cardigliano } 740a27d9013SAlfredo Cardigliano 741a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 7424ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 7434ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 7444ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 745a27d9013SAlfredo Cardigliano 74618a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 74718a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 74818a44465SAndrew Boyer 749a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 750a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 751a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 752a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 753a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 7544ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 755a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 756a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 757a27d9013SAlfredo Cardigliano } 758a27d9013SAlfredo Cardigliano 759a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 760a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 7617483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 762a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 763a27d9013SAlfredo Cardigliano } 764a27d9013SAlfredo Cardigliano 7659fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7669fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7679fdf11c4SAndrew Boyer 768d5850081SAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 769be39f75cSAndrew Boyer &rxq); 770a27d9013SAlfredo Cardigliano if (err) { 7714ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 772a27d9013SAlfredo Cardigliano return -EINVAL; 773a27d9013SAlfredo Cardigliano } 774a27d9013SAlfredo Cardigliano 775a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 776a27d9013SAlfredo Cardigliano 777a27d9013SAlfredo Cardigliano /* 778a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 779295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 780a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 781a27d9013SAlfredo Cardigliano * it to the length for all received packets: 782a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 783295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 784a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 785a27d9013SAlfredo Cardigliano */ 786a27d9013SAlfredo Cardigliano 787a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 78802eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 78902eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 790a27d9013SAlfredo Cardigliano 791a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 792a27d9013SAlfredo Cardigliano 793a27d9013SAlfredo Cardigliano return 0; 794a27d9013SAlfredo Cardigliano } 795a27d9013SAlfredo Cardigliano 7967b20fc2fSAndrew Boyer /* 7977b20fc2fSAndrew Boyer * Cleans one descriptor. Connects the filled mbufs into a chain. 7987b20fc2fSAndrew Boyer * Does not advance the tail index. 7997b20fc2fSAndrew Boyer */ 8000de3e209SAndrew Boyer static __rte_always_inline void 8017b20fc2fSAndrew Boyer ionic_rx_clean_one(struct ionic_rx_qcq *rxq, 8027b20fc2fSAndrew Boyer struct ionic_rxq_comp *cq_desc, 80314f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 804a27d9013SAlfredo Cardigliano { 805be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 8067b20fc2fSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg, *prev_rxm; 8077b20fc2fSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 808a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 809a27d9013SAlfredo Cardigliano uint32_t pkt_type; 8107b20fc2fSAndrew Boyer uint32_t left, i; 8117b20fc2fSAndrew Boyer uint16_t cq_desc_len; 812700f974dSAndrew Boyer void **info; 813700f974dSAndrew Boyer 8147b20fc2fSAndrew Boyer cq_desc_len = rte_le_to_cpu_16(cq_desc->len); 815700f974dSAndrew Boyer 8167b20fc2fSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 817700f974dSAndrew Boyer 818700f974dSAndrew Boyer rxm = info[0]; 819a27d9013SAlfredo Cardigliano 820a27d9013SAlfredo Cardigliano if (cq_desc->status) { 821a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 822a27d9013SAlfredo Cardigliano return; 823a27d9013SAlfredo Cardigliano } 824a27d9013SAlfredo Cardigliano 8257b20fc2fSAndrew Boyer if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) { 826a27d9013SAlfredo Cardigliano stats->bad_len++; 827a27d9013SAlfredo Cardigliano return; 828a27d9013SAlfredo Cardigliano } 829a27d9013SAlfredo Cardigliano 8307b20fc2fSAndrew Boyer info[0] = NULL; 831a27d9013SAlfredo Cardigliano 8327b20fc2fSAndrew Boyer /* Set the mbuf metadata based on the cq entry */ 8337b20fc2fSAndrew Boyer rxm->rearm_data[0] = rxq->rearm_data; 8347b20fc2fSAndrew Boyer rxm->pkt_len = cq_desc_len; 8357b20fc2fSAndrew Boyer rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len); 8367b20fc2fSAndrew Boyer left = cq_desc_len - rxm->data_len; 8377b20fc2fSAndrew Boyer rxm->nb_segs = cq_desc->num_sg_elems + 1; 8387b20fc2fSAndrew Boyer prev_rxm = rxm; 839a27d9013SAlfredo Cardigliano 8407b20fc2fSAndrew Boyer for (i = 1; i < rxm->nb_segs && left; i++) { 8417b20fc2fSAndrew Boyer rxm_seg = info[i]; 8427b20fc2fSAndrew Boyer info[i] = NULL; 8437b20fc2fSAndrew Boyer 8447b20fc2fSAndrew Boyer /* Set the chained mbuf metadata */ 8457b20fc2fSAndrew Boyer rxm_seg->rearm_data[0] = rxq->rearm_seg_data; 846d5850081SAndrew Boyer rxm_seg->data_len = RTE_MIN(rxq->seg_size, left); 847a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 848a27d9013SAlfredo Cardigliano 8497b20fc2fSAndrew Boyer /* Link the mbuf */ 8507b20fc2fSAndrew Boyer prev_rxm->next = rxm_seg; 8517b20fc2fSAndrew Boyer prev_rxm = rxm_seg; 852a27d9013SAlfredo Cardigliano } 853a27d9013SAlfredo Cardigliano 8547b20fc2fSAndrew Boyer /* Terminate the mbuf chain */ 8557b20fc2fSAndrew Boyer prev_rxm->next = NULL; 8567b20fc2fSAndrew Boyer 85722e7171bSAlfredo Cardigliano /* RSS */ 858daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 8597506961aSAndrew Boyer rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 86022e7171bSAlfredo Cardigliano 861a27d9013SAlfredo Cardigliano /* Vlan Strip */ 862a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 863daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 8644a735599SAndrew Boyer rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 865a27d9013SAlfredo Cardigliano } 866a27d9013SAlfredo Cardigliano 867a27d9013SAlfredo Cardigliano /* Checksum */ 868a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 869a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 870daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 871a27d9013SAlfredo Cardigliano else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 872daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 873a27d9013SAlfredo Cardigliano 874a27d9013SAlfredo Cardigliano if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 875a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 876daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 877a27d9013SAlfredo Cardigliano else if ((cq_desc->csum_flags & 878a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 879a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & 880a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 881daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 882a27d9013SAlfredo Cardigliano } 883a27d9013SAlfredo Cardigliano 884a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 885a27d9013SAlfredo Cardigliano 886a27d9013SAlfredo Cardigliano /* Packet Type */ 887a27d9013SAlfredo Cardigliano switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 888a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4: 889a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 890a27d9013SAlfredo Cardigliano break; 891a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6: 892a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 893a27d9013SAlfredo Cardigliano break; 894a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_TCP: 895a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 896a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 897a27d9013SAlfredo Cardigliano break; 898a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_TCP: 899a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 900a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 901a27d9013SAlfredo Cardigliano break; 902a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_UDP: 903a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 904a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 905a27d9013SAlfredo Cardigliano break; 906a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_UDP: 907a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 908a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 909a27d9013SAlfredo Cardigliano break; 910a27d9013SAlfredo Cardigliano default: 911a27d9013SAlfredo Cardigliano { 912a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 913a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 914a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 915a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 916a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 917a27d9013SAlfredo Cardigliano else 918a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_UNKNOWN; 919ed522a3fSAndrew Boyer stats->mtods++; 920a27d9013SAlfredo Cardigliano break; 921a27d9013SAlfredo Cardigliano } 922a27d9013SAlfredo Cardigliano } 923a27d9013SAlfredo Cardigliano 924a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 925a27d9013SAlfredo Cardigliano 92614f534beSAndrew Boyer rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 92714f534beSAndrew Boyer rx_svc->nb_rx++; 928a27d9013SAlfredo Cardigliano 929a27d9013SAlfredo Cardigliano stats->packets++; 930a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 931a27d9013SAlfredo Cardigliano } 932a27d9013SAlfredo Cardigliano 9337b20fc2fSAndrew Boyer /* 9347b20fc2fSAndrew Boyer * Fills one descriptor with mbufs. Does not advance the head index. 9357b20fc2fSAndrew Boyer */ 9360de3e209SAndrew Boyer static __rte_always_inline int 9377b20fc2fSAndrew Boyer ionic_rx_fill_one(struct ionic_rx_qcq *rxq) 938a27d9013SAlfredo Cardigliano { 939be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 9407b20fc2fSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg; 941be39f75cSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 942be39f75cSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 9437b20fc2fSAndrew Boyer rte_iova_t data_iova; 9447b20fc2fSAndrew Boyer uint32_t i; 945dd10c5b4SAndrew Boyer void **info; 946a27d9013SAlfredo Cardigliano 947dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 948a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 949a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 950a27d9013SAlfredo Cardigliano 9517b20fc2fSAndrew Boyer /* mbuf is unused => whole chain is unused */ 9527b20fc2fSAndrew Boyer if (unlikely(info[0])) 9537b20fc2fSAndrew Boyer return 0; 9547b20fc2fSAndrew Boyer 9557b20fc2fSAndrew Boyer rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 9567b20fc2fSAndrew Boyer if (unlikely(rxm == NULL)) { 9577b20fc2fSAndrew Boyer assert(0); 958a27d9013SAlfredo Cardigliano return -ENOMEM; 959a27d9013SAlfredo Cardigliano } 960a27d9013SAlfredo Cardigliano 961dd10c5b4SAndrew Boyer info[0] = rxm; 962dd10c5b4SAndrew Boyer 9637b20fc2fSAndrew Boyer data_iova = rte_mbuf_data_iova_default(rxm); 9647b20fc2fSAndrew Boyer desc->addr = rte_cpu_to_le_64(data_iova); 9657b20fc2fSAndrew Boyer 9667b20fc2fSAndrew Boyer for (i = 1; i < q->num_segs; i++) { 9677b20fc2fSAndrew Boyer /* mbuf is unused => rest of the chain is unused */ 9687b20fc2fSAndrew Boyer if (info[i]) 9697b20fc2fSAndrew Boyer return 0; 9707b20fc2fSAndrew Boyer 9717b20fc2fSAndrew Boyer rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 9727b20fc2fSAndrew Boyer if (rxm_seg == NULL) { 9737b20fc2fSAndrew Boyer assert(0); 9747b20fc2fSAndrew Boyer return -ENOMEM; 9757b20fc2fSAndrew Boyer } 9767b20fc2fSAndrew Boyer 9777b20fc2fSAndrew Boyer info[i] = rxm_seg; 9787b20fc2fSAndrew Boyer 9797b20fc2fSAndrew Boyer /* The data_off does not get set to 0 until later */ 9807b20fc2fSAndrew Boyer data_iova = rxm_seg->buf_iova; 9817b20fc2fSAndrew Boyer sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova); 9827b20fc2fSAndrew Boyer } 9837b20fc2fSAndrew Boyer 9847b20fc2fSAndrew Boyer return 0; 9857b20fc2fSAndrew Boyer } 9867b20fc2fSAndrew Boyer 9877b20fc2fSAndrew Boyer /* 9887b20fc2fSAndrew Boyer * Fills all descriptors with mbufs. 9897b20fc2fSAndrew Boyer */ 9907b20fc2fSAndrew Boyer static int __rte_cold 9917b20fc2fSAndrew Boyer ionic_rx_fill(struct ionic_rx_qcq *rxq) 9927b20fc2fSAndrew Boyer { 9937b20fc2fSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 9947b20fc2fSAndrew Boyer uint32_t i; 9957b20fc2fSAndrew Boyer int err; 9967b20fc2fSAndrew Boyer 9977b20fc2fSAndrew Boyer for (i = 1; i < q->num_descs; i++) { 9987b20fc2fSAndrew Boyer err = ionic_rx_fill_one(rxq); 9997b20fc2fSAndrew Boyer if (err) 10007b20fc2fSAndrew Boyer return err; 10017b20fc2fSAndrew Boyer 1002dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1003a27d9013SAlfredo Cardigliano } 1004a27d9013SAlfredo Cardigliano 100577c60793SAndrew Boyer ionic_q_flush(q); 100677c60793SAndrew Boyer 1007a27d9013SAlfredo Cardigliano return 0; 1008a27d9013SAlfredo Cardigliano } 1009a27d9013SAlfredo Cardigliano 1010a27d9013SAlfredo Cardigliano /* 10117b2eb674SAndrew Boyer * Perform one-time initialization of descriptor fields 10127b2eb674SAndrew Boyer * which will not change for the life of the queue. 10137b2eb674SAndrew Boyer */ 10147b2eb674SAndrew Boyer static void __rte_cold 10157b2eb674SAndrew Boyer ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq) 10167b2eb674SAndrew Boyer { 10177b2eb674SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 10187b2eb674SAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 10197b2eb674SAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 10207b2eb674SAndrew Boyer uint32_t i, j; 10217b2eb674SAndrew Boyer uint8_t opcode; 10227b2eb674SAndrew Boyer 10237b2eb674SAndrew Boyer opcode = (q->num_segs > 1) ? 10247b2eb674SAndrew Boyer IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE; 10257b2eb674SAndrew Boyer 10267b2eb674SAndrew Boyer /* 10277b2eb674SAndrew Boyer * NB: Only the first segment needs to leave headroom (hdr_seg_size). 10287b2eb674SAndrew Boyer * Later segments (seg_size) do not. 10297b2eb674SAndrew Boyer */ 10307b2eb674SAndrew Boyer for (i = 0; i < q->num_descs; i++) { 10317b2eb674SAndrew Boyer desc = &desc_base[i]; 10327b2eb674SAndrew Boyer desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size); 10337b2eb674SAndrew Boyer desc->opcode = opcode; 10347b2eb674SAndrew Boyer 10357b2eb674SAndrew Boyer sg_desc = &sg_desc_base[i]; 10367b2eb674SAndrew Boyer for (j = 0; j < q->num_segs - 1u; j++) 10377b2eb674SAndrew Boyer sg_desc->elems[j].len = 10387b2eb674SAndrew Boyer rte_cpu_to_le_16(rxq->seg_size); 10397b2eb674SAndrew Boyer } 10407b2eb674SAndrew Boyer } 10417b2eb674SAndrew Boyer 10427b2eb674SAndrew Boyer /* 1043a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 1044a27d9013SAlfredo Cardigliano */ 1045ce6427ddSThomas Monjalon int __rte_cold 1046a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1047a27d9013SAlfredo Cardigliano { 10489fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1049be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1050d5850081SAndrew Boyer struct ionic_queue *q; 1051a27d9013SAlfredo Cardigliano int err; 1052a27d9013SAlfredo Cardigliano 10539fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 10549fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 10559fdf11c4SAndrew Boyer rx_queue_id); 10569fdf11c4SAndrew Boyer return 0; 10579fdf11c4SAndrew Boyer } 10589fdf11c4SAndrew Boyer 1059a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1060d5850081SAndrew Boyer q = &rxq->qcq.q; 1061a27d9013SAlfredo Cardigliano 1062b671e69aSAndrew Boyer rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 1063b671e69aSAndrew Boyer 1064d5850081SAndrew Boyer /* Recalculate segment count based on MTU */ 1065d5850081SAndrew Boyer q->num_segs = 1 + 1066d5850081SAndrew Boyer (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 1067d5850081SAndrew Boyer 1068d5850081SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 1069d5850081SAndrew Boyer rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 10704ae96cb8SAndrew Boyer 10717b2eb674SAndrew Boyer ionic_rx_init_descriptors(rxq); 10727b2eb674SAndrew Boyer 1073a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 1074a27d9013SAlfredo Cardigliano if (err) 1075a27d9013SAlfredo Cardigliano return err; 1076a27d9013SAlfredo Cardigliano 1077a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1078b671e69aSAndrew Boyer if (ionic_rx_fill(rxq) != 0) { 1079a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1080a27d9013SAlfredo Cardigliano rx_queue_id); 1081a27d9013SAlfredo Cardigliano return -1; 1082a27d9013SAlfredo Cardigliano } 1083a27d9013SAlfredo Cardigliano 10849fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1085a27d9013SAlfredo Cardigliano 1086a27d9013SAlfredo Cardigliano return 0; 1087a27d9013SAlfredo Cardigliano } 1088a27d9013SAlfredo Cardigliano 10897b20fc2fSAndrew Boyer /* 10907b20fc2fSAndrew Boyer * Walk the CQ to find completed receive descriptors. 10917b20fc2fSAndrew Boyer * Any completed descriptor found is refilled. 10927b20fc2fSAndrew Boyer */ 10930de3e209SAndrew Boyer static __rte_always_inline void 1094be39f75cSAndrew Boyer ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 109514f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 1096a27d9013SAlfredo Cardigliano { 1097be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 1098be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 10997b20fc2fSAndrew Boyer struct ionic_rxq_desc *q_desc_base = q->base; 1100be39f75cSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1101a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1102a27d9013SAlfredo Cardigliano 1103a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 11047b20fc2fSAndrew Boyer 1105a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 11062aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1107a27d9013SAlfredo Cardigliano 1108a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1109a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1110a27d9013SAlfredo Cardigliano 11117b20fc2fSAndrew Boyer /* Prefetch 8 x 8B bufinfo */ 11127b20fc2fSAndrew Boyer rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8))); 11137b20fc2fSAndrew Boyer /* Prefetch 4 x 16B comp */ 11147b20fc2fSAndrew Boyer rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]); 11157b20fc2fSAndrew Boyer /* Prefetch 4 x 16B descriptors */ 11167b20fc2fSAndrew Boyer rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]); 1117a27d9013SAlfredo Cardigliano 11187b20fc2fSAndrew Boyer ionic_rx_clean_one(rxq, cq_desc, rx_svc); 1119a27d9013SAlfredo Cardigliano 11204ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1121a27d9013SAlfredo Cardigliano 11227b20fc2fSAndrew Boyer (void)ionic_rx_fill_one(rxq); 1123a27d9013SAlfredo Cardigliano 11247b20fc2fSAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1125a27d9013SAlfredo Cardigliano 1126a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1127a27d9013SAlfredo Cardigliano break; 1128a27d9013SAlfredo Cardigliano 1129a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1130a27d9013SAlfredo Cardigliano } 11317b20fc2fSAndrew Boyer 11327b20fc2fSAndrew Boyer /* Update the queue indices and ring the doorbell */ 11337b20fc2fSAndrew Boyer if (work_done) 11347b20fc2fSAndrew Boyer ionic_q_flush(q); 1135a27d9013SAlfredo Cardigliano } 1136a27d9013SAlfredo Cardigliano 1137a27d9013SAlfredo Cardigliano /* 1138a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1139a27d9013SAlfredo Cardigliano */ 1140ce6427ddSThomas Monjalon int __rte_cold 1141a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1142a27d9013SAlfredo Cardigliano { 1143e7222f94SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1144e7222f94SAndrew Boyer struct ionic_rx_stats *stats; 1145be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1146a27d9013SAlfredo Cardigliano 11474ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1148a27d9013SAlfredo Cardigliano 1149a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1150a27d9013SAlfredo Cardigliano 1151e7222f94SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 11529fdf11c4SAndrew Boyer 1153e7222f94SAndrew Boyer ionic_lif_rxq_deinit(rxq); 1154a27d9013SAlfredo Cardigliano 1155e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 1156e7222f94SAndrew Boyer ionic_rx_empty(rxq); 1157e7222f94SAndrew Boyer 1158e7222f94SAndrew Boyer stats = &rxq->stats; 1159e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 1160e7222f94SAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 1161a27d9013SAlfredo Cardigliano 1162a27d9013SAlfredo Cardigliano return 0; 1163a27d9013SAlfredo Cardigliano } 1164a27d9013SAlfredo Cardigliano 1165a27d9013SAlfredo Cardigliano uint16_t 1166a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1167a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1168a27d9013SAlfredo Cardigliano { 1169be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 117014f534beSAndrew Boyer struct ionic_rx_service rx_svc; 1171a27d9013SAlfredo Cardigliano 117214f534beSAndrew Boyer rx_svc.rx_pkts = rx_pkts; 117314f534beSAndrew Boyer rx_svc.nb_rx = 0; 1174a27d9013SAlfredo Cardigliano 117514f534beSAndrew Boyer ionic_rxq_service(rxq, nb_pkts, &rx_svc); 1176a27d9013SAlfredo Cardigliano 117714f534beSAndrew Boyer return rx_svc.nb_rx; 1178a27d9013SAlfredo Cardigliano } 1179