176668754SAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50*e7222f94SAndrew Boyer static void 51*e7222f94SAndrew Boyer ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 52*e7222f94SAndrew Boyer { 53*e7222f94SAndrew Boyer uint32_t i; 54*e7222f94SAndrew Boyer 55*e7222f94SAndrew Boyer for (i = idx; i < cnt; i++) 56*e7222f94SAndrew Boyer if (array[i]) 57*e7222f94SAndrew Boyer rte_pktmbuf_free_seg(array[i]); 58*e7222f94SAndrew Boyer 59*e7222f94SAndrew Boyer memset(array, 0, sizeof(void *) * cnt); 60*e7222f94SAndrew Boyer } 61*e7222f94SAndrew Boyer 62*e7222f94SAndrew Boyer static void __rte_cold 63*e7222f94SAndrew Boyer ionic_tx_empty(struct ionic_tx_qcq *txq) 64*e7222f94SAndrew Boyer { 65*e7222f94SAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 66*e7222f94SAndrew Boyer 67*e7222f94SAndrew Boyer ionic_empty_array(q->info, q->num_descs, 0); 68*e7222f94SAndrew Boyer } 69*e7222f94SAndrew Boyer 70*e7222f94SAndrew Boyer static void __rte_cold 71*e7222f94SAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 72*e7222f94SAndrew Boyer { 73*e7222f94SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 74*e7222f94SAndrew Boyer 75*e7222f94SAndrew Boyer ionic_empty_array(q->info, q->num_descs, 0); 76*e7222f94SAndrew Boyer } 77*e7222f94SAndrew Boyer 78a27d9013SAlfredo Cardigliano /********************************************************************* 79a27d9013SAlfredo Cardigliano * 80a27d9013SAlfredo Cardigliano * TX functions 81a27d9013SAlfredo Cardigliano * 82a27d9013SAlfredo Cardigliano **********************************************************************/ 83a27d9013SAlfredo Cardigliano 84a27d9013SAlfredo Cardigliano void 85a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 86a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 87a27d9013SAlfredo Cardigliano { 88be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 89be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 90a27d9013SAlfredo Cardigliano 91a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 9268591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 9302eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 94a27d9013SAlfredo Cardigliano } 95a27d9013SAlfredo Cardigliano 960de3e209SAndrew Boyer static __rte_always_inline void 97be39f75cSAndrew Boyer ionic_tx_flush(struct ionic_tx_qcq *txq) 98a27d9013SAlfredo Cardigliano { 99be39f75cSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 100be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 101a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, *next; 102a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc_base = cq->base; 103a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc; 104700f974dSAndrew Boyer void **info; 105a27d9013SAlfredo Cardigliano u_int32_t comp_index = (u_int32_t)-1; 106a27d9013SAlfredo Cardigliano 107a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 108a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 1092aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 110a27d9013SAlfredo Cardigliano 111a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors (not really useful here) */ 112a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 113a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 114a27d9013SAlfredo Cardigliano 115a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 116a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 117a27d9013SAlfredo Cardigliano 118a27d9013SAlfredo Cardigliano comp_index = cq_desc->comp_index; 119a27d9013SAlfredo Cardigliano 120a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 121a27d9013SAlfredo Cardigliano } 122a27d9013SAlfredo Cardigliano 123a27d9013SAlfredo Cardigliano if (comp_index != (u_int32_t)-1) { 124a27d9013SAlfredo Cardigliano while (q->tail_idx != comp_index) { 125700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 126a27d9013SAlfredo Cardigliano 1274ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 128a27d9013SAlfredo Cardigliano 129a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 130a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 131a27d9013SAlfredo Cardigliano /* q desc info */ 132a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 133a27d9013SAlfredo Cardigliano 134a27d9013SAlfredo Cardigliano /* 135a27d9013SAlfredo Cardigliano * Note: you can just use rte_pktmbuf_free, 136a27d9013SAlfredo Cardigliano * but this loop is faster 137a27d9013SAlfredo Cardigliano */ 138700f974dSAndrew Boyer txm = info[0]; 139a27d9013SAlfredo Cardigliano while (txm != NULL) { 140a27d9013SAlfredo Cardigliano next = txm->next; 141a27d9013SAlfredo Cardigliano rte_pktmbuf_free_seg(txm); 142a27d9013SAlfredo Cardigliano txm = next; 143a27d9013SAlfredo Cardigliano } 144a27d9013SAlfredo Cardigliano } 145a27d9013SAlfredo Cardigliano } 146a27d9013SAlfredo Cardigliano } 147a27d9013SAlfredo Cardigliano 148ce6427ddSThomas Monjalon void __rte_cold 1497483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 150a27d9013SAlfredo Cardigliano { 1517483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 152a27d9013SAlfredo Cardigliano 153a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 154a27d9013SAlfredo Cardigliano 155be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 156a27d9013SAlfredo Cardigliano } 157a27d9013SAlfredo Cardigliano 158ce6427ddSThomas Monjalon int __rte_cold 159a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 160a27d9013SAlfredo Cardigliano { 161*e7222f94SAndrew Boyer struct ionic_tx_stats *stats; 162be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 163a27d9013SAlfredo Cardigliano 1644ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 165a27d9013SAlfredo Cardigliano 166a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 167a27d9013SAlfredo Cardigliano 1689fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1699fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1709fdf11c4SAndrew Boyer 171a27d9013SAlfredo Cardigliano /* 172a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 173a27d9013SAlfredo Cardigliano * before disabling Tx queue 174a27d9013SAlfredo Cardigliano */ 175a27d9013SAlfredo Cardigliano 176*e7222f94SAndrew Boyer ionic_lif_txq_deinit(txq); 177a27d9013SAlfredo Cardigliano 178*e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 179*e7222f94SAndrew Boyer ionic_tx_empty(txq); 180*e7222f94SAndrew Boyer 181*e7222f94SAndrew Boyer stats = &txq->stats; 182*e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 183*e7222f94SAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 184a27d9013SAlfredo Cardigliano 185a27d9013SAlfredo Cardigliano return 0; 186a27d9013SAlfredo Cardigliano } 187a27d9013SAlfredo Cardigliano 188ce6427ddSThomas Monjalon int __rte_cold 189a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1904ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 191a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 192a27d9013SAlfredo Cardigliano { 193a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 194be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 195a27d9013SAlfredo Cardigliano uint64_t offloads; 196a27d9013SAlfredo Cardigliano int err; 197a27d9013SAlfredo Cardigliano 198a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 199a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 200a27d9013SAlfredo Cardigliano "(max %u queues)", 201a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 202a27d9013SAlfredo Cardigliano return -EINVAL; 203a27d9013SAlfredo Cardigliano } 204a27d9013SAlfredo Cardigliano 205a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 2064ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 2074ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 2084ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 209a27d9013SAlfredo Cardigliano 210a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 211a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 212a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 213a27d9013SAlfredo Cardigliano 214a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 215a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 2167483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 217a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 218a27d9013SAlfredo Cardigliano } 219a27d9013SAlfredo Cardigliano 2209fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 2219fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 2229fdf11c4SAndrew Boyer 2238ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 224a27d9013SAlfredo Cardigliano if (err) { 225a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 226a27d9013SAlfredo Cardigliano return -EINVAL; 227a27d9013SAlfredo Cardigliano } 228a27d9013SAlfredo Cardigliano 229a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 23002eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 23102eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 232a27d9013SAlfredo Cardigliano 23368591087SAndrew Boyer /* Convert the offload flags into queue flags */ 234295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 23568591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 236295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 23768591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 238295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 23968591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 240a27d9013SAlfredo Cardigliano 241a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 242a27d9013SAlfredo Cardigliano 243a27d9013SAlfredo Cardigliano return 0; 244a27d9013SAlfredo Cardigliano } 245a27d9013SAlfredo Cardigliano 246a27d9013SAlfredo Cardigliano /* 247a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 248a27d9013SAlfredo Cardigliano */ 249ce6427ddSThomas Monjalon int __rte_cold 250a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 251a27d9013SAlfredo Cardigliano { 2529fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 253be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 254a27d9013SAlfredo Cardigliano int err; 255a27d9013SAlfredo Cardigliano 2569fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2579fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2589fdf11c4SAndrew Boyer tx_queue_id); 2599fdf11c4SAndrew Boyer return 0; 2609fdf11c4SAndrew Boyer } 2619fdf11c4SAndrew Boyer 262a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 263a27d9013SAlfredo Cardigliano 2644ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 265be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2664ae96cb8SAndrew Boyer 267a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 268a27d9013SAlfredo Cardigliano if (err) 269a27d9013SAlfredo Cardigliano return err; 270a27d9013SAlfredo Cardigliano 2719fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 272a27d9013SAlfredo Cardigliano 273a27d9013SAlfredo Cardigliano return 0; 274a27d9013SAlfredo Cardigliano } 275a27d9013SAlfredo Cardigliano 276a27d9013SAlfredo Cardigliano static void 27764b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 27864b08152SAlfredo Cardigliano { 27964b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 28064b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 28164b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 28264b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 28364b08152SAlfredo Cardigliano 284daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 28564b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 28664b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 28764b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 28864b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 28964b08152SAlfredo Cardigliano } else { 29064b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 29164b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 29264b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 29364b08152SAlfredo Cardigliano } 29464b08152SAlfredo Cardigliano } 29564b08152SAlfredo Cardigliano 29664b08152SAlfredo Cardigliano static void 29764b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 29864b08152SAlfredo Cardigliano { 29964b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 30064b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 30164b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 30264b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 30364b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 30464b08152SAlfredo Cardigliano 305daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 30664b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 30764b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 30864b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 30964b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 31064b08152SAlfredo Cardigliano } else { 31164b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 31264b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 31364b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 31464b08152SAlfredo Cardigliano } 31564b08152SAlfredo Cardigliano } 31664b08152SAlfredo Cardigliano 31764b08152SAlfredo Cardigliano static void 318a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 319a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 320a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 321a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 32264b08152SAlfredo Cardigliano bool encap, 323a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 324a27d9013SAlfredo Cardigliano bool start, bool done) 325a27d9013SAlfredo Cardigliano { 326dd10c5b4SAndrew Boyer void **info; 3274a735599SAndrew Boyer uint64_t cmd; 328a27d9013SAlfredo Cardigliano uint8_t flags = 0; 329a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 33064b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 331a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 332a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 333a27d9013SAlfredo Cardigliano 3344a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 335a27d9013SAlfredo Cardigliano flags, nsge, addr); 3364a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 3374a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 3384a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 3394a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 3404a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 341a27d9013SAlfredo Cardigliano 342dd10c5b4SAndrew Boyer if (done) { 343dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 344dd10c5b4SAndrew Boyer info[0] = txm; 345dd10c5b4SAndrew Boyer } 346dd10c5b4SAndrew Boyer 347dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 348a27d9013SAlfredo Cardigliano } 349a27d9013SAlfredo Cardigliano 350a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 351be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 352a27d9013SAlfredo Cardigliano { 353be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 354a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 35556117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 356a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 35756117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 358a27d9013SAlfredo Cardigliano 359a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 360a27d9013SAlfredo Cardigliano return desc; 361a27d9013SAlfredo Cardigliano } 362a27d9013SAlfredo Cardigliano 363a27d9013SAlfredo Cardigliano static int 36477c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 365a27d9013SAlfredo Cardigliano { 366be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 367be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 368a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 369a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 370a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3717c3a867bSAndrew Boyer rte_iova_t data_iova; 3727c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 373a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 374a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 375a27d9013SAlfredo Cardigliano uint32_t hdrlen; 376a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 377a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 378a27d9013SAlfredo Cardigliano uint32_t left; 379a27d9013SAlfredo Cardigliano uint32_t seglen; 380a27d9013SAlfredo Cardigliano uint32_t len; 381a27d9013SAlfredo Cardigliano uint32_t offset = 0; 382a27d9013SAlfredo Cardigliano bool start, done; 38364b08152SAlfredo Cardigliano bool encap; 384daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 385a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 38664b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 387a27d9013SAlfredo Cardigliano 388daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 389daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 390daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 391daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 39264b08152SAlfredo Cardigliano 39364b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 39464b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 39564b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 39664b08152SAlfredo Cardigliano */ 39764b08152SAlfredo Cardigliano 39864b08152SAlfredo Cardigliano if (encap) { 39964b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 40064b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 40164b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 40264b08152SAlfredo Cardigliano } else { 40364b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 40464b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 40564b08152SAlfredo Cardigliano } 406a27d9013SAlfredo Cardigliano 407a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 408a27d9013SAlfredo Cardigliano left = txm->data_len; 4097c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm); 410a27d9013SAlfredo Cardigliano 411be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 412a27d9013SAlfredo Cardigliano start = true; 413a27d9013SAlfredo Cardigliano 414a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 415a27d9013SAlfredo Cardigliano 416a27d9013SAlfredo Cardigliano while (left > 0) { 417a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 418a27d9013SAlfredo Cardigliano frag_left = seglen - len; 4197c3a867bSAndrew Boyer desc_addr = rte_cpu_to_le_64(data_iova + offset); 420a27d9013SAlfredo Cardigliano desc_len = len; 421a27d9013SAlfredo Cardigliano desc_nsge = 0; 422a27d9013SAlfredo Cardigliano left -= len; 423a27d9013SAlfredo Cardigliano offset += len; 424a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 425a27d9013SAlfredo Cardigliano continue; 426a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 427a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 428a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 429a27d9013SAlfredo Cardigliano hdrlen, mss, 43064b08152SAlfredo Cardigliano encap, 431a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 43277c60793SAndrew Boyer start, done); 433be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 434a27d9013SAlfredo Cardigliano start = false; 435a27d9013SAlfredo Cardigliano seglen = mss; 436a27d9013SAlfredo Cardigliano } 437a27d9013SAlfredo Cardigliano 438a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 439a27d9013SAlfredo Cardigliano 440a27d9013SAlfredo Cardigliano txm_seg = txm->next; 441a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 442a27d9013SAlfredo Cardigliano offset = 0; 4437c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 444a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 445a27d9013SAlfredo Cardigliano 446a27d9013SAlfredo Cardigliano while (left > 0) { 4477c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 448a27d9013SAlfredo Cardigliano if (frag_left > 0) { 449a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 450a27d9013SAlfredo Cardigliano frag_left -= len; 4517c3a867bSAndrew Boyer elem->addr = next_addr; 4524a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 453a27d9013SAlfredo Cardigliano elem++; 454a27d9013SAlfredo Cardigliano desc_nsge++; 455a27d9013SAlfredo Cardigliano } else { 456a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 457a27d9013SAlfredo Cardigliano frag_left = mss - len; 4587c3a867bSAndrew Boyer desc_addr = next_addr; 459a27d9013SAlfredo Cardigliano desc_len = len; 460a27d9013SAlfredo Cardigliano desc_nsge = 0; 461a27d9013SAlfredo Cardigliano } 462a27d9013SAlfredo Cardigliano left -= len; 463a27d9013SAlfredo Cardigliano offset += len; 464a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 465a27d9013SAlfredo Cardigliano continue; 4667c3a867bSAndrew Boyer 467a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 468a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 469a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 470a27d9013SAlfredo Cardigliano hdrlen, mss, 47164b08152SAlfredo Cardigliano encap, 472a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 47377c60793SAndrew Boyer start, done); 474be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 475a27d9013SAlfredo Cardigliano start = false; 476a27d9013SAlfredo Cardigliano } 477a27d9013SAlfredo Cardigliano 478a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 479a27d9013SAlfredo Cardigliano } 480a27d9013SAlfredo Cardigliano 481a27d9013SAlfredo Cardigliano stats->tso++; 482a27d9013SAlfredo Cardigliano 483a27d9013SAlfredo Cardigliano return 0; 484a27d9013SAlfredo Cardigliano } 485a27d9013SAlfredo Cardigliano 4860de3e209SAndrew Boyer static __rte_always_inline int 48777c60793SAndrew Boyer ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 488a27d9013SAlfredo Cardigliano { 489be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 490be39f75cSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 49156117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 492be39f75cSAndrew Boyer struct ionic_txq_sg_elem *elem; 493be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 494a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 495dd10c5b4SAndrew Boyer void **info; 49664b08152SAlfredo Cardigliano bool encap; 497a27d9013SAlfredo Cardigliano bool has_vlan; 498a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 4994a735599SAndrew Boyer uint64_t addr, cmd; 500a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 501a27d9013SAlfredo Cardigliano uint8_t flags = 0; 502a27d9013SAlfredo Cardigliano 503be39f75cSAndrew Boyer desc = &desc_base[q->head_idx]; 504dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 505be39f75cSAndrew Boyer 506daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 50768591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 50864b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 50964b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 510f603eebcSAndrew Boyer } 511f603eebcSAndrew Boyer 512daa02b5cSOlivier Matz if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 51368591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 514daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 51568591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 516f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 51764b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 51864b08152SAlfredo Cardigliano } 51964b08152SAlfredo Cardigliano 520f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 521f603eebcSAndrew Boyer stats->no_csum++; 522f603eebcSAndrew Boyer 523daa02b5cSOlivier Matz has_vlan = (ol_flags & RTE_MBUF_F_TX_VLAN); 524daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 525daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 526daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 527daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 528a27d9013SAlfredo Cardigliano 529a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 53064b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 531a27d9013SAlfredo Cardigliano 5327c3a867bSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 5337c3a867bSAndrew Boyer 5344a735599SAndrew Boyer cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 5354a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 5364a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(txm->data_len); 5374a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 538a27d9013SAlfredo Cardigliano 539dd10c5b4SAndrew Boyer info[0] = txm; 540dd10c5b4SAndrew Boyer 541be39f75cSAndrew Boyer elem = sg_desc_base[q->head_idx].elems; 542dd10c5b4SAndrew Boyer 543a27d9013SAlfredo Cardigliano txm_seg = txm->next; 544a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 5454a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(txm_seg->data_len); 546a27d9013SAlfredo Cardigliano elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); 547a27d9013SAlfredo Cardigliano elem++; 548a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 549a27d9013SAlfredo Cardigliano } 550a27d9013SAlfredo Cardigliano 551dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 552dd10c5b4SAndrew Boyer 553a27d9013SAlfredo Cardigliano return 0; 554a27d9013SAlfredo Cardigliano } 555a27d9013SAlfredo Cardigliano 556a27d9013SAlfredo Cardigliano uint16_t 557a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 558a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 559a27d9013SAlfredo Cardigliano { 560be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 561be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 562be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 563a27d9013SAlfredo Cardigliano uint32_t next_q_head_idx; 564a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 56586551f81SAndrew Boyer uint16_t nb_avail, nb_tx = 0; 566a27d9013SAlfredo Cardigliano int err; 567a27d9013SAlfredo Cardigliano 568a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 5692aed9865SAndrew Boyer ionic_tx_flush(txq); 570a27d9013SAlfredo Cardigliano 57186551f81SAndrew Boyer nb_avail = ionic_q_space_avail(q); 57286551f81SAndrew Boyer if (unlikely(nb_avail < nb_pkts)) { 57386551f81SAndrew Boyer stats->stop += nb_pkts - nb_avail; 57486551f81SAndrew Boyer nb_pkts = nb_avail; 575a27d9013SAlfredo Cardigliano } 576a27d9013SAlfredo Cardigliano 577a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 5784ad56b7aSAndrew Boyer next_q_head_idx = Q_NEXT_TO_POST(q, 1); 579a27d9013SAlfredo Cardigliano if ((next_q_head_idx & 0x3) == 0) { 580a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 581a27d9013SAlfredo Cardigliano rte_prefetch0(&desc_base[next_q_head_idx]); 582a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[next_q_head_idx]); 583a27d9013SAlfredo Cardigliano } 584a27d9013SAlfredo Cardigliano 585daa02b5cSOlivier Matz if (tx_pkts[nb_tx]->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 58677c60793SAndrew Boyer err = ionic_tx_tso(txq, tx_pkts[nb_tx]); 587a27d9013SAlfredo Cardigliano else 58877c60793SAndrew Boyer err = ionic_tx(txq, tx_pkts[nb_tx]); 589a27d9013SAlfredo Cardigliano if (err) { 590a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 591a27d9013SAlfredo Cardigliano break; 592a27d9013SAlfredo Cardigliano } 593a27d9013SAlfredo Cardigliano 594a27d9013SAlfredo Cardigliano bytes_tx += tx_pkts[nb_tx]->pkt_len; 595a27d9013SAlfredo Cardigliano nb_tx++; 596a27d9013SAlfredo Cardigliano } 597a27d9013SAlfredo Cardigliano 59877c60793SAndrew Boyer if (nb_tx > 0) { 59977c60793SAndrew Boyer rte_wmb(); 60077c60793SAndrew Boyer ionic_q_flush(q); 60177c60793SAndrew Boyer } 60277c60793SAndrew Boyer 603a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 604a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 605a27d9013SAlfredo Cardigliano 606a27d9013SAlfredo Cardigliano return nb_tx; 607a27d9013SAlfredo Cardigliano } 608a27d9013SAlfredo Cardigliano 609a27d9013SAlfredo Cardigliano /********************************************************************* 610a27d9013SAlfredo Cardigliano * 611a27d9013SAlfredo Cardigliano * TX prep functions 612a27d9013SAlfredo Cardigliano * 613a27d9013SAlfredo Cardigliano **********************************************************************/ 614a27d9013SAlfredo Cardigliano 615daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 616daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 617daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 618daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 619daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 620daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 621a27d9013SAlfredo Cardigliano 622a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 623daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 624a27d9013SAlfredo Cardigliano 625a27d9013SAlfredo Cardigliano uint16_t 626e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 627a27d9013SAlfredo Cardigliano { 628e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 629a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 630a27d9013SAlfredo Cardigliano uint64_t offloads; 631a27d9013SAlfredo Cardigliano int i = 0; 632a27d9013SAlfredo Cardigliano 633a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 634a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 635a27d9013SAlfredo Cardigliano 636e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 637a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 638a27d9013SAlfredo Cardigliano break; 639a27d9013SAlfredo Cardigliano } 640a27d9013SAlfredo Cardigliano 641a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 642a27d9013SAlfredo Cardigliano 643a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 644a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 645a27d9013SAlfredo Cardigliano break; 646a27d9013SAlfredo Cardigliano } 647a27d9013SAlfredo Cardigliano } 648a27d9013SAlfredo Cardigliano 649a27d9013SAlfredo Cardigliano return i; 650a27d9013SAlfredo Cardigliano } 651a27d9013SAlfredo Cardigliano 652a27d9013SAlfredo Cardigliano /********************************************************************* 653a27d9013SAlfredo Cardigliano * 654a27d9013SAlfredo Cardigliano * RX functions 655a27d9013SAlfredo Cardigliano * 656a27d9013SAlfredo Cardigliano **********************************************************************/ 657a27d9013SAlfredo Cardigliano 658a27d9013SAlfredo Cardigliano static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 659a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf); 660a27d9013SAlfredo Cardigliano 661a27d9013SAlfredo Cardigliano void 662a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 663a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 664a27d9013SAlfredo Cardigliano { 665be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 666be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 667a27d9013SAlfredo Cardigliano 668a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 669a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 670a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 67102eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 67268591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 673a27d9013SAlfredo Cardigliano } 674a27d9013SAlfredo Cardigliano 675ce6427ddSThomas Monjalon void __rte_cold 6767483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 677a27d9013SAlfredo Cardigliano { 6787483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 679be39f75cSAndrew Boyer 680be39f75cSAndrew Boyer if (!rxq) 681be39f75cSAndrew Boyer return; 682a27d9013SAlfredo Cardigliano 683a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 684a27d9013SAlfredo Cardigliano 685be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 686a27d9013SAlfredo Cardigliano } 687a27d9013SAlfredo Cardigliano 688ce6427ddSThomas Monjalon int __rte_cold 689a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 690a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 691a27d9013SAlfredo Cardigliano uint16_t nb_desc, 6924ae96cb8SAndrew Boyer uint32_t socket_id, 693a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 694a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 695a27d9013SAlfredo Cardigliano { 696a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 697be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 698a27d9013SAlfredo Cardigliano uint64_t offloads; 699a27d9013SAlfredo Cardigliano int err; 700a27d9013SAlfredo Cardigliano 701a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 702a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 703a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 704a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 705a27d9013SAlfredo Cardigliano return -EINVAL; 706a27d9013SAlfredo Cardigliano } 707a27d9013SAlfredo Cardigliano 708a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 7094ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 7104ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 7114ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 712a27d9013SAlfredo Cardigliano 71318a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 71418a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 71518a44465SAndrew Boyer 716a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 717a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 718a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 719a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 720a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 7214ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 722a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 723a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 724a27d9013SAlfredo Cardigliano } 725a27d9013SAlfredo Cardigliano 726a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 727a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 7287483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 729a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 730a27d9013SAlfredo Cardigliano } 731a27d9013SAlfredo Cardigliano 7329fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7339fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7349fdf11c4SAndrew Boyer 7358ec5ad7fSAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, 736be39f75cSAndrew Boyer &rxq); 737a27d9013SAlfredo Cardigliano if (err) { 7384ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 739a27d9013SAlfredo Cardigliano return -EINVAL; 740a27d9013SAlfredo Cardigliano } 741a27d9013SAlfredo Cardigliano 742a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 743a27d9013SAlfredo Cardigliano 744a27d9013SAlfredo Cardigliano /* 745a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 746295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 747a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 748a27d9013SAlfredo Cardigliano * it to the length for all received packets: 749a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 750295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 751a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 752a27d9013SAlfredo Cardigliano */ 753a27d9013SAlfredo Cardigliano 754a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 75502eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 75602eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 757a27d9013SAlfredo Cardigliano 758a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 759a27d9013SAlfredo Cardigliano 760a27d9013SAlfredo Cardigliano return 0; 761a27d9013SAlfredo Cardigliano } 762a27d9013SAlfredo Cardigliano 7630de3e209SAndrew Boyer static __rte_always_inline void 764be39f75cSAndrew Boyer ionic_rx_clean(struct ionic_rx_qcq *rxq, 765a27d9013SAlfredo Cardigliano uint32_t q_desc_index, uint32_t cq_desc_index, 76614f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 767a27d9013SAlfredo Cardigliano { 768be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 769be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 770c6a9a6fbSAndrew Boyer struct ionic_rxq_comp *cq_desc_base = cq->base; 771a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 772700f974dSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg; 773a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 774a27d9013SAlfredo Cardigliano uint32_t pkt_type; 775be39f75cSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 776a27d9013SAlfredo Cardigliano uint32_t buf_size = (uint16_t) 777a27d9013SAlfredo Cardigliano (rte_pktmbuf_data_room_size(rxq->mb_pool) - 778a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 779a27d9013SAlfredo Cardigliano uint32_t left; 780700f974dSAndrew Boyer void **info; 781700f974dSAndrew Boyer 782700f974dSAndrew Boyer assert(q_desc_index == cq_desc->comp_index); 783700f974dSAndrew Boyer 784700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, cq_desc->comp_index); 785700f974dSAndrew Boyer 786700f974dSAndrew Boyer rxm = info[0]; 787a27d9013SAlfredo Cardigliano 788a27d9013SAlfredo Cardigliano if (cq_desc->status) { 789a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 790a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 791a27d9013SAlfredo Cardigliano return; 792a27d9013SAlfredo Cardigliano } 793a27d9013SAlfredo Cardigliano 79414f534beSAndrew Boyer if (rx_svc->nb_rx >= rx_svc->nb_pkts) { 795a27d9013SAlfredo Cardigliano stats->no_room++; 796a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 797a27d9013SAlfredo Cardigliano return; 798a27d9013SAlfredo Cardigliano } 799a27d9013SAlfredo Cardigliano 800b671e69aSAndrew Boyer if (cq_desc->len > rxq->frame_size || cq_desc->len == 0) { 801a27d9013SAlfredo Cardigliano stats->bad_len++; 802a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 803a27d9013SAlfredo Cardigliano return; 804a27d9013SAlfredo Cardigliano } 805a27d9013SAlfredo Cardigliano 806a27d9013SAlfredo Cardigliano rxm->data_off = RTE_PKTMBUF_HEADROOM; 807a27d9013SAlfredo Cardigliano rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); 808a27d9013SAlfredo Cardigliano rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ 809a27d9013SAlfredo Cardigliano rxm->pkt_len = cq_desc->len; 810be39f75cSAndrew Boyer rxm->port = rxq->qcq.lif->port_id; 811a27d9013SAlfredo Cardigliano 812a27d9013SAlfredo Cardigliano left = cq_desc->len; 813a27d9013SAlfredo Cardigliano 814a27d9013SAlfredo Cardigliano rxm->data_len = RTE_MIN(buf_size, left); 815a27d9013SAlfredo Cardigliano left -= rxm->data_len; 816a27d9013SAlfredo Cardigliano 817a27d9013SAlfredo Cardigliano rxm_seg = rxm->next; 818a27d9013SAlfredo Cardigliano while (rxm_seg && left) { 819a27d9013SAlfredo Cardigliano rxm_seg->data_len = RTE_MIN(buf_size, left); 820a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 821a27d9013SAlfredo Cardigliano 822a27d9013SAlfredo Cardigliano rxm_seg = rxm_seg->next; 823a27d9013SAlfredo Cardigliano rxm->nb_segs++; 824a27d9013SAlfredo Cardigliano } 825a27d9013SAlfredo Cardigliano 82622e7171bSAlfredo Cardigliano /* RSS */ 827daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 8287506961aSAndrew Boyer rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 82922e7171bSAlfredo Cardigliano 830a27d9013SAlfredo Cardigliano /* Vlan Strip */ 831a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 832daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 8334a735599SAndrew Boyer rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 834a27d9013SAlfredo Cardigliano } 835a27d9013SAlfredo Cardigliano 836a27d9013SAlfredo Cardigliano /* Checksum */ 837a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 838a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 839daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 840a27d9013SAlfredo Cardigliano else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 841daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 842a27d9013SAlfredo Cardigliano 843a27d9013SAlfredo Cardigliano if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 844a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 845daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 846a27d9013SAlfredo Cardigliano else if ((cq_desc->csum_flags & 847a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 848a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & 849a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 850daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 851a27d9013SAlfredo Cardigliano } 852a27d9013SAlfredo Cardigliano 853a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 854a27d9013SAlfredo Cardigliano 855a27d9013SAlfredo Cardigliano /* Packet Type */ 856a27d9013SAlfredo Cardigliano switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 857a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4: 858a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 859a27d9013SAlfredo Cardigliano break; 860a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6: 861a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 862a27d9013SAlfredo Cardigliano break; 863a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_TCP: 864a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 865a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 866a27d9013SAlfredo Cardigliano break; 867a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_TCP: 868a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 869a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 870a27d9013SAlfredo Cardigliano break; 871a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_UDP: 872a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 873a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 874a27d9013SAlfredo Cardigliano break; 875a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_UDP: 876a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 877a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 878a27d9013SAlfredo Cardigliano break; 879a27d9013SAlfredo Cardigliano default: 880a27d9013SAlfredo Cardigliano { 881a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 882a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 883a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 884a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 885a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 886a27d9013SAlfredo Cardigliano else 887a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_UNKNOWN; 888ed522a3fSAndrew Boyer stats->mtods++; 889a27d9013SAlfredo Cardigliano break; 890a27d9013SAlfredo Cardigliano } 891a27d9013SAlfredo Cardigliano } 892a27d9013SAlfredo Cardigliano 893a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 894a27d9013SAlfredo Cardigliano 89514f534beSAndrew Boyer rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 89614f534beSAndrew Boyer rx_svc->nb_rx++; 897a27d9013SAlfredo Cardigliano 898a27d9013SAlfredo Cardigliano stats->packets++; 899a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 900a27d9013SAlfredo Cardigliano } 901a27d9013SAlfredo Cardigliano 902a27d9013SAlfredo Cardigliano static void 903a27d9013SAlfredo Cardigliano ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 904a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf) 905a27d9013SAlfredo Cardigliano { 906a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc_base = q->base; 907a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *old = &desc_base[q_desc_index]; 908a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *new = &desc_base[q->head_idx]; 909a27d9013SAlfredo Cardigliano 910a27d9013SAlfredo Cardigliano new->addr = old->addr; 911a27d9013SAlfredo Cardigliano new->len = old->len; 912a27d9013SAlfredo Cardigliano 913dd10c5b4SAndrew Boyer q->info[q->head_idx] = mbuf; 914dd10c5b4SAndrew Boyer 915dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 916dd10c5b4SAndrew Boyer 917dd10c5b4SAndrew Boyer ionic_q_flush(q); 918a27d9013SAlfredo Cardigliano } 919a27d9013SAlfredo Cardigliano 9200de3e209SAndrew Boyer static __rte_always_inline int 921b671e69aSAndrew Boyer ionic_rx_fill(struct ionic_rx_qcq *rxq) 922a27d9013SAlfredo Cardigliano { 923be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 924be39f75cSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 925be39f75cSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 926a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_elem *elem; 927dd10c5b4SAndrew Boyer void **info; 928a27d9013SAlfredo Cardigliano rte_iova_t dma_addr; 929a27d9013SAlfredo Cardigliano uint32_t i, j, nsegs, buf_size, size; 930a27d9013SAlfredo Cardigliano 931a27d9013SAlfredo Cardigliano buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 932a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 933a27d9013SAlfredo Cardigliano 934a27d9013SAlfredo Cardigliano /* Initialize software ring entries */ 935a27d9013SAlfredo Cardigliano for (i = ionic_q_space_avail(q); i; i--) { 936a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 937a27d9013SAlfredo Cardigliano struct rte_mbuf *prev_rxm_seg; 938a27d9013SAlfredo Cardigliano 939a27d9013SAlfredo Cardigliano if (rxm == NULL) { 940a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 941a27d9013SAlfredo Cardigliano return -ENOMEM; 942a27d9013SAlfredo Cardigliano } 943a27d9013SAlfredo Cardigliano 944dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 945dd10c5b4SAndrew Boyer 946b671e69aSAndrew Boyer nsegs = (rxq->frame_size + buf_size - 1) / buf_size; 947a27d9013SAlfredo Cardigliano 948a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 949a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); 950a27d9013SAlfredo Cardigliano desc->addr = dma_addr; 951a27d9013SAlfredo Cardigliano desc->len = buf_size; 952a27d9013SAlfredo Cardigliano size = buf_size; 953a27d9013SAlfredo Cardigliano desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 954a27d9013SAlfredo Cardigliano IONIC_RXQ_DESC_OPCODE_SIMPLE; 955a27d9013SAlfredo Cardigliano rxm->next = NULL; 956a27d9013SAlfredo Cardigliano 957a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm; 958a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 959a27d9013SAlfredo Cardigliano elem = sg_desc->elems; 960a27d9013SAlfredo Cardigliano for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { 961a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm_seg; 962a27d9013SAlfredo Cardigliano rte_iova_t data_iova; 963a27d9013SAlfredo Cardigliano 964a27d9013SAlfredo Cardigliano rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 965a27d9013SAlfredo Cardigliano if (rxm_seg == NULL) { 966a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 967a27d9013SAlfredo Cardigliano return -ENOMEM; 968a27d9013SAlfredo Cardigliano } 969a27d9013SAlfredo Cardigliano 970a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(rxm_seg); 971a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(data_iova); 972a27d9013SAlfredo Cardigliano elem->addr = dma_addr; 973a27d9013SAlfredo Cardigliano elem->len = buf_size; 974a27d9013SAlfredo Cardigliano size += buf_size; 975a27d9013SAlfredo Cardigliano elem++; 976a27d9013SAlfredo Cardigliano rxm_seg->next = NULL; 977a27d9013SAlfredo Cardigliano prev_rxm_seg->next = rxm_seg; 978a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm_seg; 979a27d9013SAlfredo Cardigliano } 980a27d9013SAlfredo Cardigliano 981b671e69aSAndrew Boyer if (size < rxq->frame_size) 982a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", 983b671e69aSAndrew Boyer size, rxq->frame_size); 984a27d9013SAlfredo Cardigliano 985dd10c5b4SAndrew Boyer info[0] = rxm; 986dd10c5b4SAndrew Boyer 987dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 988a27d9013SAlfredo Cardigliano } 989a27d9013SAlfredo Cardigliano 99077c60793SAndrew Boyer ionic_q_flush(q); 99177c60793SAndrew Boyer 992a27d9013SAlfredo Cardigliano return 0; 993a27d9013SAlfredo Cardigliano } 994a27d9013SAlfredo Cardigliano 995a27d9013SAlfredo Cardigliano /* 996a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 997a27d9013SAlfredo Cardigliano */ 998ce6427ddSThomas Monjalon int __rte_cold 999a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1000a27d9013SAlfredo Cardigliano { 10019fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1002be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1003a27d9013SAlfredo Cardigliano int err; 1004a27d9013SAlfredo Cardigliano 10059fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 10069fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 10079fdf11c4SAndrew Boyer rx_queue_id); 10089fdf11c4SAndrew Boyer return 0; 10099fdf11c4SAndrew Boyer } 10109fdf11c4SAndrew Boyer 1011a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1012a27d9013SAlfredo Cardigliano 1013b671e69aSAndrew Boyer rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 1014b671e69aSAndrew Boyer 1015b671e69aSAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u", 1016b671e69aSAndrew Boyer rx_queue_id, rxq->qcq.q.num_descs, rxq->frame_size); 10174ae96cb8SAndrew Boyer 1018a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 1019a27d9013SAlfredo Cardigliano if (err) 1020a27d9013SAlfredo Cardigliano return err; 1021a27d9013SAlfredo Cardigliano 1022a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1023b671e69aSAndrew Boyer if (ionic_rx_fill(rxq) != 0) { 1024a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1025a27d9013SAlfredo Cardigliano rx_queue_id); 1026a27d9013SAlfredo Cardigliano return -1; 1027a27d9013SAlfredo Cardigliano } 1028a27d9013SAlfredo Cardigliano 10299fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1030a27d9013SAlfredo Cardigliano 1031a27d9013SAlfredo Cardigliano return 0; 1032a27d9013SAlfredo Cardigliano } 1033a27d9013SAlfredo Cardigliano 10340de3e209SAndrew Boyer static __rte_always_inline void 1035be39f75cSAndrew Boyer ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 103614f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 1037a27d9013SAlfredo Cardigliano { 1038be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 1039be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 1040be39f75cSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1041a27d9013SAlfredo Cardigliano bool more; 1042a27d9013SAlfredo Cardigliano uint32_t curr_q_tail_idx, curr_cq_tail_idx; 1043a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1044a27d9013SAlfredo Cardigliano 1045a27d9013SAlfredo Cardigliano if (work_to_do == 0) 1046a27d9013SAlfredo Cardigliano return; 1047a27d9013SAlfredo Cardigliano 1048a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1049a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1050a27d9013SAlfredo Cardigliano curr_cq_tail_idx = cq->tail_idx; 10512aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1052a27d9013SAlfredo Cardigliano 1053a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1054a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1055a27d9013SAlfredo Cardigliano 1056a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1057a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 1058a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 1059a27d9013SAlfredo Cardigliano 1060a27d9013SAlfredo Cardigliano do { 1061a27d9013SAlfredo Cardigliano more = (q->tail_idx != cq_desc->comp_index); 1062a27d9013SAlfredo Cardigliano 1063a27d9013SAlfredo Cardigliano curr_q_tail_idx = q->tail_idx; 10644ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1065a27d9013SAlfredo Cardigliano 1066a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1067a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 1068a27d9013SAlfredo Cardigliano /* q desc info */ 1069a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 1070a27d9013SAlfredo Cardigliano 1071c6a9a6fbSAndrew Boyer ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx, 107214f534beSAndrew Boyer rx_svc); 1073a27d9013SAlfredo Cardigliano 1074a27d9013SAlfredo Cardigliano } while (more); 1075a27d9013SAlfredo Cardigliano 1076a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1077a27d9013SAlfredo Cardigliano break; 1078a27d9013SAlfredo Cardigliano 1079a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1080a27d9013SAlfredo Cardigliano } 1081a27d9013SAlfredo Cardigliano } 1082a27d9013SAlfredo Cardigliano 1083a27d9013SAlfredo Cardigliano /* 1084a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1085a27d9013SAlfredo Cardigliano */ 1086ce6427ddSThomas Monjalon int __rte_cold 1087a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1088a27d9013SAlfredo Cardigliano { 1089*e7222f94SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1090*e7222f94SAndrew Boyer struct ionic_rx_stats *stats; 1091be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1092a27d9013SAlfredo Cardigliano 10934ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1094a27d9013SAlfredo Cardigliano 1095a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1096a27d9013SAlfredo Cardigliano 1097*e7222f94SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 10989fdf11c4SAndrew Boyer 1099*e7222f94SAndrew Boyer ionic_lif_rxq_deinit(rxq); 1100a27d9013SAlfredo Cardigliano 1101*e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 1102*e7222f94SAndrew Boyer ionic_rx_empty(rxq); 1103*e7222f94SAndrew Boyer 1104*e7222f94SAndrew Boyer stats = &rxq->stats; 1105*e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 1106*e7222f94SAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 1107a27d9013SAlfredo Cardigliano 1108a27d9013SAlfredo Cardigliano return 0; 1109a27d9013SAlfredo Cardigliano } 1110a27d9013SAlfredo Cardigliano 1111a27d9013SAlfredo Cardigliano uint16_t 1112a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1113a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1114a27d9013SAlfredo Cardigliano { 1115be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 111614f534beSAndrew Boyer struct ionic_rx_service rx_svc; 1117a27d9013SAlfredo Cardigliano 111814f534beSAndrew Boyer rx_svc.rx_pkts = rx_pkts; 111914f534beSAndrew Boyer rx_svc.nb_pkts = nb_pkts; 112014f534beSAndrew Boyer rx_svc.nb_rx = 0; 1121a27d9013SAlfredo Cardigliano 112214f534beSAndrew Boyer ionic_rxq_service(rxq, nb_pkts, &rx_svc); 1123a27d9013SAlfredo Cardigliano 1124b671e69aSAndrew Boyer ionic_rx_fill(rxq); 1125a27d9013SAlfredo Cardigliano 112614f534beSAndrew Boyer return rx_svc.nb_rx; 1127a27d9013SAlfredo Cardigliano } 1128