1a27d9013SAlfredo Cardigliano /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2a27d9013SAlfredo Cardigliano * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50a27d9013SAlfredo Cardigliano #define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1) 51a27d9013SAlfredo Cardigliano 52a27d9013SAlfredo Cardigliano /********************************************************************* 53a27d9013SAlfredo Cardigliano * 54a27d9013SAlfredo Cardigliano * TX functions 55a27d9013SAlfredo Cardigliano * 56a27d9013SAlfredo Cardigliano **********************************************************************/ 57a27d9013SAlfredo Cardigliano 58a27d9013SAlfredo Cardigliano void 59a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 60a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 61a27d9013SAlfredo Cardigliano { 62be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 63be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 64a27d9013SAlfredo Cardigliano 65a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 6668591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 6702eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 68a27d9013SAlfredo Cardigliano } 69a27d9013SAlfredo Cardigliano 700de3e209SAndrew Boyer static __rte_always_inline void 71be39f75cSAndrew Boyer ionic_tx_flush(struct ionic_tx_qcq *txq) 72a27d9013SAlfredo Cardigliano { 73be39f75cSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 74be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 75a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, *next; 76a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc_base = cq->base; 77a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc; 78700f974dSAndrew Boyer void **info; 79a27d9013SAlfredo Cardigliano u_int32_t comp_index = (u_int32_t)-1; 80a27d9013SAlfredo Cardigliano 81a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 82a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 832aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 84a27d9013SAlfredo Cardigliano 85a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors (not really useful here) */ 86a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 87a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 88a27d9013SAlfredo Cardigliano 89a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 90a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 91a27d9013SAlfredo Cardigliano 92a27d9013SAlfredo Cardigliano comp_index = cq_desc->comp_index; 93a27d9013SAlfredo Cardigliano 94a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 95a27d9013SAlfredo Cardigliano } 96a27d9013SAlfredo Cardigliano 97a27d9013SAlfredo Cardigliano if (comp_index != (u_int32_t)-1) { 98a27d9013SAlfredo Cardigliano while (q->tail_idx != comp_index) { 99700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 100a27d9013SAlfredo Cardigliano 1014ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 102a27d9013SAlfredo Cardigliano 103a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 104a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 105a27d9013SAlfredo Cardigliano /* q desc info */ 106a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 107a27d9013SAlfredo Cardigliano 108a27d9013SAlfredo Cardigliano /* 109a27d9013SAlfredo Cardigliano * Note: you can just use rte_pktmbuf_free, 110a27d9013SAlfredo Cardigliano * but this loop is faster 111a27d9013SAlfredo Cardigliano */ 112700f974dSAndrew Boyer txm = info[0]; 113a27d9013SAlfredo Cardigliano while (txm != NULL) { 114a27d9013SAlfredo Cardigliano next = txm->next; 115a27d9013SAlfredo Cardigliano rte_pktmbuf_free_seg(txm); 116a27d9013SAlfredo Cardigliano txm = next; 117a27d9013SAlfredo Cardigliano } 118a27d9013SAlfredo Cardigliano } 119a27d9013SAlfredo Cardigliano } 120a27d9013SAlfredo Cardigliano } 121a27d9013SAlfredo Cardigliano 122ce6427ddSThomas Monjalon void __rte_cold 123a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_release(void *tx_queue) 124a27d9013SAlfredo Cardigliano { 125be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 126*ed522a3fSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 127a27d9013SAlfredo Cardigliano 128a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 129a27d9013SAlfredo Cardigliano 130*ed522a3fSAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 131*ed522a3fSAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 132*ed522a3fSAndrew Boyer 1339fdf11c4SAndrew Boyer ionic_lif_txq_deinit(txq); 1349fdf11c4SAndrew Boyer 135be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 136a27d9013SAlfredo Cardigliano } 137a27d9013SAlfredo Cardigliano 138ce6427ddSThomas Monjalon int __rte_cold 139a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 140a27d9013SAlfredo Cardigliano { 141be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 142a27d9013SAlfredo Cardigliano 1434ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 144a27d9013SAlfredo Cardigliano 145a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 146a27d9013SAlfredo Cardigliano 1479fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1489fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1499fdf11c4SAndrew Boyer 150a27d9013SAlfredo Cardigliano /* 151a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 152a27d9013SAlfredo Cardigliano * before disabling Tx queue 153a27d9013SAlfredo Cardigliano */ 154a27d9013SAlfredo Cardigliano 155be39f75cSAndrew Boyer ionic_qcq_disable(&txq->qcq); 156a27d9013SAlfredo Cardigliano 1572aed9865SAndrew Boyer ionic_tx_flush(txq); 158a27d9013SAlfredo Cardigliano 159a27d9013SAlfredo Cardigliano return 0; 160a27d9013SAlfredo Cardigliano } 161a27d9013SAlfredo Cardigliano 162ce6427ddSThomas Monjalon int __rte_cold 163a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1644ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 165a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 166a27d9013SAlfredo Cardigliano { 167a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 168be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 169a27d9013SAlfredo Cardigliano uint64_t offloads; 170a27d9013SAlfredo Cardigliano int err; 171a27d9013SAlfredo Cardigliano 172a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 173a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 174a27d9013SAlfredo Cardigliano "(max %u queues)", 175a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 176a27d9013SAlfredo Cardigliano return -EINVAL; 177a27d9013SAlfredo Cardigliano } 178a27d9013SAlfredo Cardigliano 179a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 1804ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 1814ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 1824ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 183a27d9013SAlfredo Cardigliano 184a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 185a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 186a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 187a27d9013SAlfredo Cardigliano 188a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 189a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 190a27d9013SAlfredo Cardigliano void *tx_queue = eth_dev->data->tx_queues[tx_queue_id]; 191a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_release(tx_queue); 192a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 193a27d9013SAlfredo Cardigliano } 194a27d9013SAlfredo Cardigliano 1959fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1969fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1979fdf11c4SAndrew Boyer 1988ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 199a27d9013SAlfredo Cardigliano if (err) { 200a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 201a27d9013SAlfredo Cardigliano return -EINVAL; 202a27d9013SAlfredo Cardigliano } 203a27d9013SAlfredo Cardigliano 204a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 20502eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 20602eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 207a27d9013SAlfredo Cardigliano 20868591087SAndrew Boyer /* Convert the offload flags into queue flags */ 20968591087SAndrew Boyer if (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM) 21068591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 21168591087SAndrew Boyer if (offloads & DEV_TX_OFFLOAD_TCP_CKSUM) 21268591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 21368591087SAndrew Boyer if (offloads & DEV_TX_OFFLOAD_UDP_CKSUM) 21468591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 215a27d9013SAlfredo Cardigliano 216a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 217a27d9013SAlfredo Cardigliano 218a27d9013SAlfredo Cardigliano return 0; 219a27d9013SAlfredo Cardigliano } 220a27d9013SAlfredo Cardigliano 221a27d9013SAlfredo Cardigliano /* 222a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 223a27d9013SAlfredo Cardigliano */ 224ce6427ddSThomas Monjalon int __rte_cold 225a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 226a27d9013SAlfredo Cardigliano { 2279fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 228be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 229a27d9013SAlfredo Cardigliano int err; 230a27d9013SAlfredo Cardigliano 2319fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2329fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2339fdf11c4SAndrew Boyer tx_queue_id); 2349fdf11c4SAndrew Boyer return 0; 2359fdf11c4SAndrew Boyer } 2369fdf11c4SAndrew Boyer 237a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 238a27d9013SAlfredo Cardigliano 2394ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 240be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2414ae96cb8SAndrew Boyer 2429fdf11c4SAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_INITED)) { 243a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 244a27d9013SAlfredo Cardigliano if (err) 245a27d9013SAlfredo Cardigliano return err; 246b5d9a4f0SAndrew Boyer } else { 247be39f75cSAndrew Boyer ionic_qcq_enable(&txq->qcq); 248b5d9a4f0SAndrew Boyer } 249a27d9013SAlfredo Cardigliano 2509fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 251a27d9013SAlfredo Cardigliano 252a27d9013SAlfredo Cardigliano return 0; 253a27d9013SAlfredo Cardigliano } 254a27d9013SAlfredo Cardigliano 255a27d9013SAlfredo Cardigliano static void 25664b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 25764b08152SAlfredo Cardigliano { 25864b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 25964b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 26064b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 26164b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 26264b08152SAlfredo Cardigliano 26364b08152SAlfredo Cardigliano if (txm->ol_flags & PKT_TX_IP_CKSUM) { 26464b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 26564b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 26664b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 26764b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 26864b08152SAlfredo Cardigliano } else { 26964b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 27064b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 27164b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 27264b08152SAlfredo Cardigliano } 27364b08152SAlfredo Cardigliano } 27464b08152SAlfredo Cardigliano 27564b08152SAlfredo Cardigliano static void 27664b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 27764b08152SAlfredo Cardigliano { 27864b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 27964b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 28064b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 28164b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 28264b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 28364b08152SAlfredo Cardigliano 28464b08152SAlfredo Cardigliano if (txm->ol_flags & PKT_TX_IPV4) { 28564b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 28664b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 28764b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 28864b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 28964b08152SAlfredo Cardigliano } else { 29064b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 29164b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 29264b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 29364b08152SAlfredo Cardigliano } 29464b08152SAlfredo Cardigliano } 29564b08152SAlfredo Cardigliano 29664b08152SAlfredo Cardigliano static void 297a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 298a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 299a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 300a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 30164b08152SAlfredo Cardigliano bool encap, 302a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 303a27d9013SAlfredo Cardigliano bool start, bool done) 304a27d9013SAlfredo Cardigliano { 305a27d9013SAlfredo Cardigliano uint8_t flags = 0; 306a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 30764b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 308a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 309a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 310a27d9013SAlfredo Cardigliano 311a27d9013SAlfredo Cardigliano desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 312a27d9013SAlfredo Cardigliano flags, nsge, addr); 313a27d9013SAlfredo Cardigliano desc->len = len; 314a27d9013SAlfredo Cardigliano desc->vlan_tci = vlan_tci; 315a27d9013SAlfredo Cardigliano desc->hdr_len = hdrlen; 316a27d9013SAlfredo Cardigliano desc->mss = mss; 317a27d9013SAlfredo Cardigliano 318700f974dSAndrew Boyer ionic_q_post(q, done, done ? txm : NULL); 319a27d9013SAlfredo Cardigliano } 320a27d9013SAlfredo Cardigliano 321a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 322be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 323a27d9013SAlfredo Cardigliano { 324be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 325a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 32656117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 327a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 32856117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 329a27d9013SAlfredo Cardigliano 330a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 331a27d9013SAlfredo Cardigliano return desc; 332a27d9013SAlfredo Cardigliano } 333a27d9013SAlfredo Cardigliano 334a27d9013SAlfredo Cardigliano static int 335be39f75cSAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, 33668591087SAndrew Boyer bool not_xmit_more) 337a27d9013SAlfredo Cardigliano { 338be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 339be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 340a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 341a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 342a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3437c3a867bSAndrew Boyer rte_iova_t data_iova; 3447c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 345a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 346a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 347a27d9013SAlfredo Cardigliano uint32_t hdrlen; 348a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 349a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 350a27d9013SAlfredo Cardigliano uint32_t left; 351a27d9013SAlfredo Cardigliano uint32_t seglen; 352a27d9013SAlfredo Cardigliano uint32_t len; 353a27d9013SAlfredo Cardigliano uint32_t offset = 0; 354a27d9013SAlfredo Cardigliano bool start, done; 35564b08152SAlfredo Cardigliano bool encap; 356a27d9013SAlfredo Cardigliano bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT); 357a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 35864b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 359a27d9013SAlfredo Cardigliano 36064b08152SAlfredo Cardigliano encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || 36164b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && 36264b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_OUTER_IPV4) || 36364b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_IPV6)); 36464b08152SAlfredo Cardigliano 36564b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 36664b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 36764b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 36864b08152SAlfredo Cardigliano */ 36964b08152SAlfredo Cardigliano 37064b08152SAlfredo Cardigliano if (encap) { 37164b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 37264b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 37364b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 37464b08152SAlfredo Cardigliano } else { 37564b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 37664b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 37764b08152SAlfredo Cardigliano } 378a27d9013SAlfredo Cardigliano 379a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 380a27d9013SAlfredo Cardigliano left = txm->data_len; 3817c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm); 382a27d9013SAlfredo Cardigliano 383be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 384a27d9013SAlfredo Cardigliano start = true; 385a27d9013SAlfredo Cardigliano 386a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 387a27d9013SAlfredo Cardigliano 388a27d9013SAlfredo Cardigliano while (left > 0) { 389a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 390a27d9013SAlfredo Cardigliano frag_left = seglen - len; 3917c3a867bSAndrew Boyer desc_addr = rte_cpu_to_le_64(data_iova + offset); 392a27d9013SAlfredo Cardigliano desc_len = len; 393a27d9013SAlfredo Cardigliano desc_nsge = 0; 394a27d9013SAlfredo Cardigliano left -= len; 395a27d9013SAlfredo Cardigliano offset += len; 396a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 397a27d9013SAlfredo Cardigliano continue; 398a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 399a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 400a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 401a27d9013SAlfredo Cardigliano hdrlen, mss, 40264b08152SAlfredo Cardigliano encap, 403a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 404a27d9013SAlfredo Cardigliano start, done && not_xmit_more); 405be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 406a27d9013SAlfredo Cardigliano start = false; 407a27d9013SAlfredo Cardigliano seglen = mss; 408a27d9013SAlfredo Cardigliano } 409a27d9013SAlfredo Cardigliano 410a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 411a27d9013SAlfredo Cardigliano 412a27d9013SAlfredo Cardigliano txm_seg = txm->next; 413a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 414a27d9013SAlfredo Cardigliano offset = 0; 4157c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 416a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 417a27d9013SAlfredo Cardigliano 418a27d9013SAlfredo Cardigliano while (left > 0) { 4197c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 420a27d9013SAlfredo Cardigliano if (frag_left > 0) { 421a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 422a27d9013SAlfredo Cardigliano frag_left -= len; 4237c3a867bSAndrew Boyer elem->addr = next_addr; 424a27d9013SAlfredo Cardigliano elem->len = len; 425a27d9013SAlfredo Cardigliano elem++; 426a27d9013SAlfredo Cardigliano desc_nsge++; 427a27d9013SAlfredo Cardigliano } else { 428a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 429a27d9013SAlfredo Cardigliano frag_left = mss - len; 4307c3a867bSAndrew Boyer desc_addr = next_addr; 431a27d9013SAlfredo Cardigliano desc_len = len; 432a27d9013SAlfredo Cardigliano desc_nsge = 0; 433a27d9013SAlfredo Cardigliano } 434a27d9013SAlfredo Cardigliano left -= len; 435a27d9013SAlfredo Cardigliano offset += len; 436a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 437a27d9013SAlfredo Cardigliano continue; 4387c3a867bSAndrew Boyer 439a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 440a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 441a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 442a27d9013SAlfredo Cardigliano hdrlen, mss, 44364b08152SAlfredo Cardigliano encap, 444a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 445a27d9013SAlfredo Cardigliano start, done && not_xmit_more); 446be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 447a27d9013SAlfredo Cardigliano start = false; 448a27d9013SAlfredo Cardigliano } 449a27d9013SAlfredo Cardigliano 450a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 451a27d9013SAlfredo Cardigliano } 452a27d9013SAlfredo Cardigliano 453a27d9013SAlfredo Cardigliano stats->tso++; 454a27d9013SAlfredo Cardigliano 455a27d9013SAlfredo Cardigliano return 0; 456a27d9013SAlfredo Cardigliano } 457a27d9013SAlfredo Cardigliano 4580de3e209SAndrew Boyer static __rte_always_inline int 459be39f75cSAndrew Boyer ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm, 46068591087SAndrew Boyer bool not_xmit_more) 461a27d9013SAlfredo Cardigliano { 462be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 463be39f75cSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 46456117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 465be39f75cSAndrew Boyer struct ionic_txq_sg_elem *elem; 466be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 467a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 46864b08152SAlfredo Cardigliano bool encap; 469a27d9013SAlfredo Cardigliano bool has_vlan; 470a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 4717c3a867bSAndrew Boyer uint64_t addr; 472a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 473a27d9013SAlfredo Cardigliano uint8_t flags = 0; 474a27d9013SAlfredo Cardigliano 475be39f75cSAndrew Boyer desc = &desc_base[q->head_idx]; 476be39f75cSAndrew Boyer 47764b08152SAlfredo Cardigliano if ((ol_flags & PKT_TX_IP_CKSUM) && 47868591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 47964b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 48064b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 481f603eebcSAndrew Boyer } 482f603eebcSAndrew Boyer 48364b08152SAlfredo Cardigliano if (((ol_flags & PKT_TX_TCP_CKSUM) && 48468591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 48564b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_UDP_CKSUM) && 48668591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 487f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 48864b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 48964b08152SAlfredo Cardigliano } 49064b08152SAlfredo Cardigliano 491f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 492f603eebcSAndrew Boyer stats->no_csum++; 493f603eebcSAndrew Boyer 494a27d9013SAlfredo Cardigliano has_vlan = (ol_flags & PKT_TX_VLAN_PKT); 49564b08152SAlfredo Cardigliano encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || 49664b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && 49764b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_OUTER_IPV4) || 49864b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_IPV6)); 499a27d9013SAlfredo Cardigliano 500a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 50164b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 502a27d9013SAlfredo Cardigliano 5037c3a867bSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 5047c3a867bSAndrew Boyer 505a27d9013SAlfredo Cardigliano desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 506a27d9013SAlfredo Cardigliano desc->len = txm->data_len; 507a27d9013SAlfredo Cardigliano desc->vlan_tci = txm->vlan_tci; 508a27d9013SAlfredo Cardigliano 509be39f75cSAndrew Boyer elem = sg_desc_base[q->head_idx].elems; 510a27d9013SAlfredo Cardigliano txm_seg = txm->next; 511a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 512a27d9013SAlfredo Cardigliano elem->len = txm_seg->data_len; 513a27d9013SAlfredo Cardigliano elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); 514a27d9013SAlfredo Cardigliano elem++; 515a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 516a27d9013SAlfredo Cardigliano } 517a27d9013SAlfredo Cardigliano 518700f974dSAndrew Boyer ionic_q_post(q, not_xmit_more, txm); 519a27d9013SAlfredo Cardigliano 520a27d9013SAlfredo Cardigliano return 0; 521a27d9013SAlfredo Cardigliano } 522a27d9013SAlfredo Cardigliano 523a27d9013SAlfredo Cardigliano uint16_t 524a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 525a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 526a27d9013SAlfredo Cardigliano { 527be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 528be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 529be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 530a27d9013SAlfredo Cardigliano uint32_t next_q_head_idx; 531a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 532a27d9013SAlfredo Cardigliano uint16_t nb_tx = 0; 533a27d9013SAlfredo Cardigliano int err; 534a27d9013SAlfredo Cardigliano bool last; 535a27d9013SAlfredo Cardigliano 536a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 5372aed9865SAndrew Boyer ionic_tx_flush(txq); 538a27d9013SAlfredo Cardigliano 539a27d9013SAlfredo Cardigliano if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { 540a27d9013SAlfredo Cardigliano stats->stop += nb_pkts; 541a27d9013SAlfredo Cardigliano return 0; 542a27d9013SAlfredo Cardigliano } 543a27d9013SAlfredo Cardigliano 544a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 545a27d9013SAlfredo Cardigliano last = (nb_tx == (nb_pkts - 1)); 546a27d9013SAlfredo Cardigliano 5474ad56b7aSAndrew Boyer next_q_head_idx = Q_NEXT_TO_POST(q, 1); 548a27d9013SAlfredo Cardigliano if ((next_q_head_idx & 0x3) == 0) { 549a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 550a27d9013SAlfredo Cardigliano rte_prefetch0(&desc_base[next_q_head_idx]); 551a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[next_q_head_idx]); 552a27d9013SAlfredo Cardigliano } 553a27d9013SAlfredo Cardigliano 554a27d9013SAlfredo Cardigliano if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG) 55568591087SAndrew Boyer err = ionic_tx_tso(txq, tx_pkts[nb_tx], last); 556a27d9013SAlfredo Cardigliano else 55768591087SAndrew Boyer err = ionic_tx(txq, tx_pkts[nb_tx], last); 558a27d9013SAlfredo Cardigliano if (err) { 559a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 560a27d9013SAlfredo Cardigliano if (nb_tx > 0) 561a27d9013SAlfredo Cardigliano ionic_q_flush(q); 562a27d9013SAlfredo Cardigliano break; 563a27d9013SAlfredo Cardigliano } 564a27d9013SAlfredo Cardigliano 565a27d9013SAlfredo Cardigliano bytes_tx += tx_pkts[nb_tx]->pkt_len; 566a27d9013SAlfredo Cardigliano nb_tx++; 567a27d9013SAlfredo Cardigliano } 568a27d9013SAlfredo Cardigliano 569a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 570a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 571a27d9013SAlfredo Cardigliano 572a27d9013SAlfredo Cardigliano return nb_tx; 573a27d9013SAlfredo Cardigliano } 574a27d9013SAlfredo Cardigliano 575a27d9013SAlfredo Cardigliano /********************************************************************* 576a27d9013SAlfredo Cardigliano * 577a27d9013SAlfredo Cardigliano * TX prep functions 578a27d9013SAlfredo Cardigliano * 579a27d9013SAlfredo Cardigliano **********************************************************************/ 580a27d9013SAlfredo Cardigliano 581a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_MASK ( \ 582a27d9013SAlfredo Cardigliano PKT_TX_IPV4 | \ 583a27d9013SAlfredo Cardigliano PKT_TX_IPV6 | \ 584a27d9013SAlfredo Cardigliano PKT_TX_VLAN | \ 58564b08152SAlfredo Cardigliano PKT_TX_IP_CKSUM | \ 586a27d9013SAlfredo Cardigliano PKT_TX_TCP_SEG | \ 587a27d9013SAlfredo Cardigliano PKT_TX_L4_MASK) 588a27d9013SAlfredo Cardigliano 589a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 590a27d9013SAlfredo Cardigliano (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 591a27d9013SAlfredo Cardigliano 592a27d9013SAlfredo Cardigliano uint16_t 593a27d9013SAlfredo Cardigliano ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, 594a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 595a27d9013SAlfredo Cardigliano { 596a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 597a27d9013SAlfredo Cardigliano uint64_t offloads; 598a27d9013SAlfredo Cardigliano int i = 0; 599a27d9013SAlfredo Cardigliano 600a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 601a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 602a27d9013SAlfredo Cardigliano 603d13d7829SAndrew Boyer if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS_V1 + 1) { 604a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 605a27d9013SAlfredo Cardigliano break; 606a27d9013SAlfredo Cardigliano } 607a27d9013SAlfredo Cardigliano 608a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 609a27d9013SAlfredo Cardigliano 610a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 611a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 612a27d9013SAlfredo Cardigliano break; 613a27d9013SAlfredo Cardigliano } 614a27d9013SAlfredo Cardigliano } 615a27d9013SAlfredo Cardigliano 616a27d9013SAlfredo Cardigliano return i; 617a27d9013SAlfredo Cardigliano } 618a27d9013SAlfredo Cardigliano 619a27d9013SAlfredo Cardigliano /********************************************************************* 620a27d9013SAlfredo Cardigliano * 621a27d9013SAlfredo Cardigliano * RX functions 622a27d9013SAlfredo Cardigliano * 623a27d9013SAlfredo Cardigliano **********************************************************************/ 624a27d9013SAlfredo Cardigliano 625a27d9013SAlfredo Cardigliano static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 626a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf); 627a27d9013SAlfredo Cardigliano 628a27d9013SAlfredo Cardigliano void 629a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 630a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 631a27d9013SAlfredo Cardigliano { 632be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 633be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 634a27d9013SAlfredo Cardigliano 635a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 636a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 637a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 63802eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 63968591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 640a27d9013SAlfredo Cardigliano } 641a27d9013SAlfredo Cardigliano 642ce6427ddSThomas Monjalon static void __rte_cold 643be39f75cSAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 644a27d9013SAlfredo Cardigliano { 645be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 646a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf; 647700f974dSAndrew Boyer void **info; 648a27d9013SAlfredo Cardigliano 649a27d9013SAlfredo Cardigliano while (q->tail_idx != q->head_idx) { 650700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 651700f974dSAndrew Boyer mbuf = info[0]; 652a27d9013SAlfredo Cardigliano rte_mempool_put(rxq->mb_pool, mbuf); 653a27d9013SAlfredo Cardigliano 6544ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 655a27d9013SAlfredo Cardigliano } 656a27d9013SAlfredo Cardigliano } 657a27d9013SAlfredo Cardigliano 658ce6427ddSThomas Monjalon void __rte_cold 659a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_release(void *rx_queue) 660a27d9013SAlfredo Cardigliano { 661be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 662*ed522a3fSAndrew Boyer struct ionic_rx_stats *stats; 663be39f75cSAndrew Boyer 664be39f75cSAndrew Boyer if (!rxq) 665be39f75cSAndrew Boyer return; 666a27d9013SAlfredo Cardigliano 667a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 668a27d9013SAlfredo Cardigliano 669*ed522a3fSAndrew Boyer stats = &rxq->stats; 670*ed522a3fSAndrew Boyer 671*ed522a3fSAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 672*ed522a3fSAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 673*ed522a3fSAndrew Boyer 674be39f75cSAndrew Boyer ionic_rx_empty(rxq); 675a27d9013SAlfredo Cardigliano 6769fdf11c4SAndrew Boyer ionic_lif_rxq_deinit(rxq); 6779fdf11c4SAndrew Boyer 678be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 679a27d9013SAlfredo Cardigliano } 680a27d9013SAlfredo Cardigliano 681ce6427ddSThomas Monjalon int __rte_cold 682a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 683a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 684a27d9013SAlfredo Cardigliano uint16_t nb_desc, 6854ae96cb8SAndrew Boyer uint32_t socket_id, 686a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 687a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 688a27d9013SAlfredo Cardigliano { 689a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 690be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 691a27d9013SAlfredo Cardigliano uint64_t offloads; 692a27d9013SAlfredo Cardigliano int err; 693a27d9013SAlfredo Cardigliano 694a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 695a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 696a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 697a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 698a27d9013SAlfredo Cardigliano return -EINVAL; 699a27d9013SAlfredo Cardigliano } 700a27d9013SAlfredo Cardigliano 701a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 7024ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 7034ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 7044ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 705a27d9013SAlfredo Cardigliano 70618a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 70718a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 70818a44465SAndrew Boyer 709a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 710a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 711a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 712a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 713a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 7144ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 715a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 716a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 717a27d9013SAlfredo Cardigliano } 718a27d9013SAlfredo Cardigliano 719a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 720a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 721a27d9013SAlfredo Cardigliano void *rx_queue = eth_dev->data->rx_queues[rx_queue_id]; 722a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_release(rx_queue); 723a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 724a27d9013SAlfredo Cardigliano } 725a27d9013SAlfredo Cardigliano 7269fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7279fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7289fdf11c4SAndrew Boyer 7298ec5ad7fSAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, 730be39f75cSAndrew Boyer &rxq); 731a27d9013SAlfredo Cardigliano if (err) { 7324ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 733a27d9013SAlfredo Cardigliano return -EINVAL; 734a27d9013SAlfredo Cardigliano } 735a27d9013SAlfredo Cardigliano 736a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 737a27d9013SAlfredo Cardigliano 738a27d9013SAlfredo Cardigliano /* 739a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 740a27d9013SAlfredo Cardigliano * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 741a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 742a27d9013SAlfredo Cardigliano * it to the length for all received packets: 743a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 744a27d9013SAlfredo Cardigliano * DEV_RX_OFFLOAD_KEEP_CRC) 745a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 746a27d9013SAlfredo Cardigliano */ 747a27d9013SAlfredo Cardigliano 748a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 74902eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 75002eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 751a27d9013SAlfredo Cardigliano 752a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 753a27d9013SAlfredo Cardigliano 754a27d9013SAlfredo Cardigliano return 0; 755a27d9013SAlfredo Cardigliano } 756a27d9013SAlfredo Cardigliano 7570de3e209SAndrew Boyer static __rte_always_inline void 758be39f75cSAndrew Boyer ionic_rx_clean(struct ionic_rx_qcq *rxq, 759a27d9013SAlfredo Cardigliano uint32_t q_desc_index, uint32_t cq_desc_index, 760700f974dSAndrew Boyer void *service_cb_arg) 761a27d9013SAlfredo Cardigliano { 762be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 763be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 764c6a9a6fbSAndrew Boyer struct ionic_rxq_comp *cq_desc_base = cq->base; 765a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 766700f974dSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg; 767a27d9013SAlfredo Cardigliano uint32_t max_frame_size = 768be39f75cSAndrew Boyer rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 769a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 770a27d9013SAlfredo Cardigliano uint32_t pkt_type; 771be39f75cSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 772a27d9013SAlfredo Cardigliano struct ionic_rx_service *recv_args = (struct ionic_rx_service *) 773a27d9013SAlfredo Cardigliano service_cb_arg; 774a27d9013SAlfredo Cardigliano uint32_t buf_size = (uint16_t) 775a27d9013SAlfredo Cardigliano (rte_pktmbuf_data_room_size(rxq->mb_pool) - 776a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 777a27d9013SAlfredo Cardigliano uint32_t left; 778700f974dSAndrew Boyer void **info; 779700f974dSAndrew Boyer 780700f974dSAndrew Boyer assert(q_desc_index == cq_desc->comp_index); 781700f974dSAndrew Boyer 782700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, cq_desc->comp_index); 783700f974dSAndrew Boyer 784700f974dSAndrew Boyer rxm = info[0]; 785a27d9013SAlfredo Cardigliano 786a27d9013SAlfredo Cardigliano if (!recv_args) { 787a27d9013SAlfredo Cardigliano stats->no_cb_arg++; 788a27d9013SAlfredo Cardigliano /* Flush */ 789a27d9013SAlfredo Cardigliano rte_pktmbuf_free(rxm); 790a27d9013SAlfredo Cardigliano /* 791a27d9013SAlfredo Cardigliano * Note: rte_mempool_put is faster with no segs 792a27d9013SAlfredo Cardigliano * rte_mempool_put(rxq->mb_pool, rxm); 793a27d9013SAlfredo Cardigliano */ 794a27d9013SAlfredo Cardigliano return; 795a27d9013SAlfredo Cardigliano } 796a27d9013SAlfredo Cardigliano 797a27d9013SAlfredo Cardigliano if (cq_desc->status) { 798a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 799a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 800a27d9013SAlfredo Cardigliano return; 801a27d9013SAlfredo Cardigliano } 802a27d9013SAlfredo Cardigliano 803a27d9013SAlfredo Cardigliano if (recv_args->nb_rx >= recv_args->nb_pkts) { 804a27d9013SAlfredo Cardigliano stats->no_room++; 805a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 806a27d9013SAlfredo Cardigliano return; 807a27d9013SAlfredo Cardigliano } 808a27d9013SAlfredo Cardigliano 809a27d9013SAlfredo Cardigliano if (cq_desc->len > max_frame_size || 810a27d9013SAlfredo Cardigliano cq_desc->len == 0) { 811a27d9013SAlfredo Cardigliano stats->bad_len++; 812a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 813a27d9013SAlfredo Cardigliano return; 814a27d9013SAlfredo Cardigliano } 815a27d9013SAlfredo Cardigliano 816a27d9013SAlfredo Cardigliano rxm->data_off = RTE_PKTMBUF_HEADROOM; 817a27d9013SAlfredo Cardigliano rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); 818a27d9013SAlfredo Cardigliano rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ 819a27d9013SAlfredo Cardigliano rxm->pkt_len = cq_desc->len; 820be39f75cSAndrew Boyer rxm->port = rxq->qcq.lif->port_id; 821a27d9013SAlfredo Cardigliano 822a27d9013SAlfredo Cardigliano left = cq_desc->len; 823a27d9013SAlfredo Cardigliano 824a27d9013SAlfredo Cardigliano rxm->data_len = RTE_MIN(buf_size, left); 825a27d9013SAlfredo Cardigliano left -= rxm->data_len; 826a27d9013SAlfredo Cardigliano 827a27d9013SAlfredo Cardigliano rxm_seg = rxm->next; 828a27d9013SAlfredo Cardigliano while (rxm_seg && left) { 829a27d9013SAlfredo Cardigliano rxm_seg->data_len = RTE_MIN(buf_size, left); 830a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 831a27d9013SAlfredo Cardigliano 832a27d9013SAlfredo Cardigliano rxm_seg = rxm_seg->next; 833a27d9013SAlfredo Cardigliano rxm->nb_segs++; 834a27d9013SAlfredo Cardigliano } 835a27d9013SAlfredo Cardigliano 83622e7171bSAlfredo Cardigliano /* RSS */ 83722e7171bSAlfredo Cardigliano pkt_flags |= PKT_RX_RSS_HASH; 83822e7171bSAlfredo Cardigliano rxm->hash.rss = cq_desc->rss_hash; 83922e7171bSAlfredo Cardigliano 840a27d9013SAlfredo Cardigliano /* Vlan Strip */ 841a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 842a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 843a27d9013SAlfredo Cardigliano rxm->vlan_tci = cq_desc->vlan_tci; 844a27d9013SAlfredo Cardigliano } 845a27d9013SAlfredo Cardigliano 846a27d9013SAlfredo Cardigliano /* Checksum */ 847a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 848a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 849a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_IP_CKSUM_GOOD; 850a27d9013SAlfredo Cardigliano else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 851a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_IP_CKSUM_BAD; 852a27d9013SAlfredo Cardigliano 853a27d9013SAlfredo Cardigliano if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 854a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 855a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_L4_CKSUM_GOOD; 856a27d9013SAlfredo Cardigliano else if ((cq_desc->csum_flags & 857a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 858a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & 859a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 860a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_L4_CKSUM_BAD; 861a27d9013SAlfredo Cardigliano } 862a27d9013SAlfredo Cardigliano 863a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 864a27d9013SAlfredo Cardigliano 865a27d9013SAlfredo Cardigliano /* Packet Type */ 866a27d9013SAlfredo Cardigliano switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 867a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4: 868a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 869a27d9013SAlfredo Cardigliano break; 870a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6: 871a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 872a27d9013SAlfredo Cardigliano break; 873a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_TCP: 874a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 875a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 876a27d9013SAlfredo Cardigliano break; 877a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_TCP: 878a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 879a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 880a27d9013SAlfredo Cardigliano break; 881a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_UDP: 882a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 883a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 884a27d9013SAlfredo Cardigliano break; 885a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_UDP: 886a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 887a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 888a27d9013SAlfredo Cardigliano break; 889a27d9013SAlfredo Cardigliano default: 890a27d9013SAlfredo Cardigliano { 891a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 892a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 893a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 894a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 895a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 896a27d9013SAlfredo Cardigliano else 897a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_UNKNOWN; 898*ed522a3fSAndrew Boyer stats->mtods++; 899a27d9013SAlfredo Cardigliano break; 900a27d9013SAlfredo Cardigliano } 901a27d9013SAlfredo Cardigliano } 902a27d9013SAlfredo Cardigliano 903a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 904a27d9013SAlfredo Cardigliano 905a27d9013SAlfredo Cardigliano recv_args->rx_pkts[recv_args->nb_rx] = rxm; 906a27d9013SAlfredo Cardigliano recv_args->nb_rx++; 907a27d9013SAlfredo Cardigliano 908a27d9013SAlfredo Cardigliano stats->packets++; 909a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 910a27d9013SAlfredo Cardigliano } 911a27d9013SAlfredo Cardigliano 912a27d9013SAlfredo Cardigliano static void 913a27d9013SAlfredo Cardigliano ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 914a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf) 915a27d9013SAlfredo Cardigliano { 916a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc_base = q->base; 917a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *old = &desc_base[q_desc_index]; 918a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *new = &desc_base[q->head_idx]; 919a27d9013SAlfredo Cardigliano 920a27d9013SAlfredo Cardigliano new->addr = old->addr; 921a27d9013SAlfredo Cardigliano new->len = old->len; 922a27d9013SAlfredo Cardigliano 923700f974dSAndrew Boyer ionic_q_post(q, true, mbuf); 924a27d9013SAlfredo Cardigliano } 925a27d9013SAlfredo Cardigliano 9260de3e209SAndrew Boyer static __rte_always_inline int 927be39f75cSAndrew Boyer ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) 928a27d9013SAlfredo Cardigliano { 929be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 930be39f75cSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 931be39f75cSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 932a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_elem *elem; 933a27d9013SAlfredo Cardigliano rte_iova_t dma_addr; 934a27d9013SAlfredo Cardigliano uint32_t i, j, nsegs, buf_size, size; 935a27d9013SAlfredo Cardigliano bool ring_doorbell; 936a27d9013SAlfredo Cardigliano 937a27d9013SAlfredo Cardigliano buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 938a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 939a27d9013SAlfredo Cardigliano 940a27d9013SAlfredo Cardigliano /* Initialize software ring entries */ 941a27d9013SAlfredo Cardigliano for (i = ionic_q_space_avail(q); i; i--) { 942a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 943a27d9013SAlfredo Cardigliano struct rte_mbuf *prev_rxm_seg; 944a27d9013SAlfredo Cardigliano 945a27d9013SAlfredo Cardigliano if (rxm == NULL) { 946a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 947a27d9013SAlfredo Cardigliano return -ENOMEM; 948a27d9013SAlfredo Cardigliano } 949a27d9013SAlfredo Cardigliano 950a27d9013SAlfredo Cardigliano nsegs = (len + buf_size - 1) / buf_size; 951a27d9013SAlfredo Cardigliano 952a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 953a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); 954a27d9013SAlfredo Cardigliano desc->addr = dma_addr; 955a27d9013SAlfredo Cardigliano desc->len = buf_size; 956a27d9013SAlfredo Cardigliano size = buf_size; 957a27d9013SAlfredo Cardigliano desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 958a27d9013SAlfredo Cardigliano IONIC_RXQ_DESC_OPCODE_SIMPLE; 959a27d9013SAlfredo Cardigliano rxm->next = NULL; 960a27d9013SAlfredo Cardigliano 961a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm; 962a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 963a27d9013SAlfredo Cardigliano elem = sg_desc->elems; 964a27d9013SAlfredo Cardigliano for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { 965a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm_seg; 966a27d9013SAlfredo Cardigliano rte_iova_t data_iova; 967a27d9013SAlfredo Cardigliano 968a27d9013SAlfredo Cardigliano rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 969a27d9013SAlfredo Cardigliano if (rxm_seg == NULL) { 970a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 971a27d9013SAlfredo Cardigliano return -ENOMEM; 972a27d9013SAlfredo Cardigliano } 973a27d9013SAlfredo Cardigliano 974a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(rxm_seg); 975a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(data_iova); 976a27d9013SAlfredo Cardigliano elem->addr = dma_addr; 977a27d9013SAlfredo Cardigliano elem->len = buf_size; 978a27d9013SAlfredo Cardigliano size += buf_size; 979a27d9013SAlfredo Cardigliano elem++; 980a27d9013SAlfredo Cardigliano rxm_seg->next = NULL; 981a27d9013SAlfredo Cardigliano prev_rxm_seg->next = rxm_seg; 982a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm_seg; 983a27d9013SAlfredo Cardigliano } 984a27d9013SAlfredo Cardigliano 985a27d9013SAlfredo Cardigliano if (size < len) 986a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", 987a27d9013SAlfredo Cardigliano size, len); 988a27d9013SAlfredo Cardigliano 989a27d9013SAlfredo Cardigliano ring_doorbell = ((q->head_idx + 1) & 990a27d9013SAlfredo Cardigliano IONIC_RX_RING_DOORBELL_STRIDE) == 0; 991a27d9013SAlfredo Cardigliano 992700f974dSAndrew Boyer ionic_q_post(q, ring_doorbell, rxm); 993a27d9013SAlfredo Cardigliano } 994a27d9013SAlfredo Cardigliano 995a27d9013SAlfredo Cardigliano return 0; 996a27d9013SAlfredo Cardigliano } 997a27d9013SAlfredo Cardigliano 998a27d9013SAlfredo Cardigliano /* 999a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 1000a27d9013SAlfredo Cardigliano */ 1001ce6427ddSThomas Monjalon int __rte_cold 1002a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1003a27d9013SAlfredo Cardigliano { 1004a27d9013SAlfredo Cardigliano uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 10059fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1006be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1007a27d9013SAlfredo Cardigliano int err; 1008a27d9013SAlfredo Cardigliano 10099fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 10109fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 10119fdf11c4SAndrew Boyer rx_queue_id); 10129fdf11c4SAndrew Boyer return 0; 10139fdf11c4SAndrew Boyer } 10149fdf11c4SAndrew Boyer 1015a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1016a27d9013SAlfredo Cardigliano 10174ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)", 1018be39f75cSAndrew Boyer rx_queue_id, rxq->qcq.q.num_descs, frame_size); 10194ae96cb8SAndrew Boyer 10209fdf11c4SAndrew Boyer if (!(rxq->flags & IONIC_QCQ_F_INITED)) { 1021a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 1022a27d9013SAlfredo Cardigliano if (err) 1023a27d9013SAlfredo Cardigliano return err; 1024b5d9a4f0SAndrew Boyer } else { 1025be39f75cSAndrew Boyer ionic_qcq_enable(&rxq->qcq); 10269fdf11c4SAndrew Boyer } 1027a27d9013SAlfredo Cardigliano 1028a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1029a27d9013SAlfredo Cardigliano if (ionic_rx_fill(rxq, frame_size) != 0) { 1030a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1031a27d9013SAlfredo Cardigliano rx_queue_id); 1032a27d9013SAlfredo Cardigliano return -1; 1033a27d9013SAlfredo Cardigliano } 1034a27d9013SAlfredo Cardigliano 10359fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1036a27d9013SAlfredo Cardigliano 1037a27d9013SAlfredo Cardigliano return 0; 1038a27d9013SAlfredo Cardigliano } 1039a27d9013SAlfredo Cardigliano 10400de3e209SAndrew Boyer static __rte_always_inline void 1041be39f75cSAndrew Boyer ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 1042a27d9013SAlfredo Cardigliano void *service_cb_arg) 1043a27d9013SAlfredo Cardigliano { 1044be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 1045be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 1046be39f75cSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1047a27d9013SAlfredo Cardigliano bool more; 1048a27d9013SAlfredo Cardigliano uint32_t curr_q_tail_idx, curr_cq_tail_idx; 1049a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1050a27d9013SAlfredo Cardigliano 1051a27d9013SAlfredo Cardigliano if (work_to_do == 0) 1052a27d9013SAlfredo Cardigliano return; 1053a27d9013SAlfredo Cardigliano 1054a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1055a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1056a27d9013SAlfredo Cardigliano curr_cq_tail_idx = cq->tail_idx; 10572aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1058a27d9013SAlfredo Cardigliano 1059a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1060a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1061a27d9013SAlfredo Cardigliano 1062a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1063a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 1064a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 1065a27d9013SAlfredo Cardigliano 1066a27d9013SAlfredo Cardigliano do { 1067a27d9013SAlfredo Cardigliano more = (q->tail_idx != cq_desc->comp_index); 1068a27d9013SAlfredo Cardigliano 1069a27d9013SAlfredo Cardigliano curr_q_tail_idx = q->tail_idx; 10704ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1071a27d9013SAlfredo Cardigliano 1072a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1073a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 1074a27d9013SAlfredo Cardigliano /* q desc info */ 1075a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 1076a27d9013SAlfredo Cardigliano 1077c6a9a6fbSAndrew Boyer ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx, 1078700f974dSAndrew Boyer service_cb_arg); 1079a27d9013SAlfredo Cardigliano 1080a27d9013SAlfredo Cardigliano } while (more); 1081a27d9013SAlfredo Cardigliano 1082a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1083a27d9013SAlfredo Cardigliano break; 1084a27d9013SAlfredo Cardigliano 1085a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1086a27d9013SAlfredo Cardigliano } 1087a27d9013SAlfredo Cardigliano } 1088a27d9013SAlfredo Cardigliano 1089a27d9013SAlfredo Cardigliano /* 1090a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1091a27d9013SAlfredo Cardigliano */ 1092ce6427ddSThomas Monjalon int __rte_cold 1093a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1094a27d9013SAlfredo Cardigliano { 1095be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1096a27d9013SAlfredo Cardigliano 10974ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1098a27d9013SAlfredo Cardigliano 1099a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1100a27d9013SAlfredo Cardigliano 11019fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 11029fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 11039fdf11c4SAndrew Boyer 1104be39f75cSAndrew Boyer ionic_qcq_disable(&rxq->qcq); 1105a27d9013SAlfredo Cardigliano 1106a27d9013SAlfredo Cardigliano /* Flush */ 11072aed9865SAndrew Boyer ionic_rxq_service(rxq, -1, NULL); 1108a27d9013SAlfredo Cardigliano 1109a27d9013SAlfredo Cardigliano return 0; 1110a27d9013SAlfredo Cardigliano } 1111a27d9013SAlfredo Cardigliano 1112a27d9013SAlfredo Cardigliano uint16_t 1113a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1114a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1115a27d9013SAlfredo Cardigliano { 1116be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 1117a27d9013SAlfredo Cardigliano uint32_t frame_size = 1118be39f75cSAndrew Boyer rxq->qcq.lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 1119a27d9013SAlfredo Cardigliano struct ionic_rx_service service_cb_arg; 1120a27d9013SAlfredo Cardigliano 1121a27d9013SAlfredo Cardigliano service_cb_arg.rx_pkts = rx_pkts; 1122a27d9013SAlfredo Cardigliano service_cb_arg.nb_pkts = nb_pkts; 1123a27d9013SAlfredo Cardigliano service_cb_arg.nb_rx = 0; 1124a27d9013SAlfredo Cardigliano 11252aed9865SAndrew Boyer ionic_rxq_service(rxq, nb_pkts, &service_cb_arg); 1126a27d9013SAlfredo Cardigliano 1127a27d9013SAlfredo Cardigliano ionic_rx_fill(rxq, frame_size); 1128a27d9013SAlfredo Cardigliano 1129a27d9013SAlfredo Cardigliano return service_cb_arg.nb_rx; 1130a27d9013SAlfredo Cardigliano } 1131