1a27d9013SAlfredo Cardigliano /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2*a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50a27d9013SAlfredo Cardigliano /********************************************************************* 51a27d9013SAlfredo Cardigliano * 52a27d9013SAlfredo Cardigliano * TX functions 53a27d9013SAlfredo Cardigliano * 54a27d9013SAlfredo Cardigliano **********************************************************************/ 55a27d9013SAlfredo Cardigliano 56a27d9013SAlfredo Cardigliano void 57a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 58a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 59a27d9013SAlfredo Cardigliano { 60be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 61be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 62a27d9013SAlfredo Cardigliano 63a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 6468591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 6502eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 66a27d9013SAlfredo Cardigliano } 67a27d9013SAlfredo Cardigliano 680de3e209SAndrew Boyer static __rte_always_inline void 69be39f75cSAndrew Boyer ionic_tx_flush(struct ionic_tx_qcq *txq) 70a27d9013SAlfredo Cardigliano { 71be39f75cSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 72be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 73a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, *next; 74a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc_base = cq->base; 75a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc; 76700f974dSAndrew Boyer void **info; 77a27d9013SAlfredo Cardigliano u_int32_t comp_index = (u_int32_t)-1; 78a27d9013SAlfredo Cardigliano 79a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 80a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 812aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 82a27d9013SAlfredo Cardigliano 83a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors (not really useful here) */ 84a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 85a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 86a27d9013SAlfredo Cardigliano 87a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 88a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 89a27d9013SAlfredo Cardigliano 90a27d9013SAlfredo Cardigliano comp_index = cq_desc->comp_index; 91a27d9013SAlfredo Cardigliano 92a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 93a27d9013SAlfredo Cardigliano } 94a27d9013SAlfredo Cardigliano 95a27d9013SAlfredo Cardigliano if (comp_index != (u_int32_t)-1) { 96a27d9013SAlfredo Cardigliano while (q->tail_idx != comp_index) { 97700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 98a27d9013SAlfredo Cardigliano 994ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 100a27d9013SAlfredo Cardigliano 101a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 102a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 103a27d9013SAlfredo Cardigliano /* q desc info */ 104a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 105a27d9013SAlfredo Cardigliano 106a27d9013SAlfredo Cardigliano /* 107a27d9013SAlfredo Cardigliano * Note: you can just use rte_pktmbuf_free, 108a27d9013SAlfredo Cardigliano * but this loop is faster 109a27d9013SAlfredo Cardigliano */ 110700f974dSAndrew Boyer txm = info[0]; 111a27d9013SAlfredo Cardigliano while (txm != NULL) { 112a27d9013SAlfredo Cardigliano next = txm->next; 113a27d9013SAlfredo Cardigliano rte_pktmbuf_free_seg(txm); 114a27d9013SAlfredo Cardigliano txm = next; 115a27d9013SAlfredo Cardigliano } 116a27d9013SAlfredo Cardigliano } 117a27d9013SAlfredo Cardigliano } 118a27d9013SAlfredo Cardigliano } 119a27d9013SAlfredo Cardigliano 120ce6427ddSThomas Monjalon void __rte_cold 1217483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 122a27d9013SAlfredo Cardigliano { 1237483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 124ed522a3fSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 125a27d9013SAlfredo Cardigliano 126a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 127a27d9013SAlfredo Cardigliano 128ed522a3fSAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 129ed522a3fSAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 130ed522a3fSAndrew Boyer 1319fdf11c4SAndrew Boyer ionic_lif_txq_deinit(txq); 1329fdf11c4SAndrew Boyer 133be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 134a27d9013SAlfredo Cardigliano } 135a27d9013SAlfredo Cardigliano 136ce6427ddSThomas Monjalon int __rte_cold 137a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 138a27d9013SAlfredo Cardigliano { 139be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 140a27d9013SAlfredo Cardigliano 1414ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 142a27d9013SAlfredo Cardigliano 143a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 144a27d9013SAlfredo Cardigliano 1459fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1469fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1479fdf11c4SAndrew Boyer 148a27d9013SAlfredo Cardigliano /* 149a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 150a27d9013SAlfredo Cardigliano * before disabling Tx queue 151a27d9013SAlfredo Cardigliano */ 152a27d9013SAlfredo Cardigliano 153be39f75cSAndrew Boyer ionic_qcq_disable(&txq->qcq); 154a27d9013SAlfredo Cardigliano 1552aed9865SAndrew Boyer ionic_tx_flush(txq); 156a27d9013SAlfredo Cardigliano 157a27d9013SAlfredo Cardigliano return 0; 158a27d9013SAlfredo Cardigliano } 159a27d9013SAlfredo Cardigliano 160ce6427ddSThomas Monjalon int __rte_cold 161a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1624ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 163a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 164a27d9013SAlfredo Cardigliano { 165a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 166be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 167a27d9013SAlfredo Cardigliano uint64_t offloads; 168a27d9013SAlfredo Cardigliano int err; 169a27d9013SAlfredo Cardigliano 170a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 171a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 172a27d9013SAlfredo Cardigliano "(max %u queues)", 173a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 174a27d9013SAlfredo Cardigliano return -EINVAL; 175a27d9013SAlfredo Cardigliano } 176a27d9013SAlfredo Cardigliano 177a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 1784ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 1794ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 1804ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 181a27d9013SAlfredo Cardigliano 182a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 183a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 184a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 185a27d9013SAlfredo Cardigliano 186a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 187a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 1887483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 189a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 190a27d9013SAlfredo Cardigliano } 191a27d9013SAlfredo Cardigliano 1929fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1939fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1949fdf11c4SAndrew Boyer 1958ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 196a27d9013SAlfredo Cardigliano if (err) { 197a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 198a27d9013SAlfredo Cardigliano return -EINVAL; 199a27d9013SAlfredo Cardigliano } 200a27d9013SAlfredo Cardigliano 201a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 20202eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 20302eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 204a27d9013SAlfredo Cardigliano 20568591087SAndrew Boyer /* Convert the offload flags into queue flags */ 206295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 20768591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 208295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 20968591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 210295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 21168591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 212a27d9013SAlfredo Cardigliano 213a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 214a27d9013SAlfredo Cardigliano 215a27d9013SAlfredo Cardigliano return 0; 216a27d9013SAlfredo Cardigliano } 217a27d9013SAlfredo Cardigliano 218a27d9013SAlfredo Cardigliano /* 219a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 220a27d9013SAlfredo Cardigliano */ 221ce6427ddSThomas Monjalon int __rte_cold 222a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 223a27d9013SAlfredo Cardigliano { 2249fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 225be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 226a27d9013SAlfredo Cardigliano int err; 227a27d9013SAlfredo Cardigliano 2289fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2299fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2309fdf11c4SAndrew Boyer tx_queue_id); 2319fdf11c4SAndrew Boyer return 0; 2329fdf11c4SAndrew Boyer } 2339fdf11c4SAndrew Boyer 234a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 235a27d9013SAlfredo Cardigliano 2364ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 237be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2384ae96cb8SAndrew Boyer 2399fdf11c4SAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_INITED)) { 240a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 241a27d9013SAlfredo Cardigliano if (err) 242a27d9013SAlfredo Cardigliano return err; 243b5d9a4f0SAndrew Boyer } else { 244be39f75cSAndrew Boyer ionic_qcq_enable(&txq->qcq); 245b5d9a4f0SAndrew Boyer } 246a27d9013SAlfredo Cardigliano 2479fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 248a27d9013SAlfredo Cardigliano 249a27d9013SAlfredo Cardigliano return 0; 250a27d9013SAlfredo Cardigliano } 251a27d9013SAlfredo Cardigliano 252a27d9013SAlfredo Cardigliano static void 25364b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 25464b08152SAlfredo Cardigliano { 25564b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 25664b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 25764b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 25864b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 25964b08152SAlfredo Cardigliano 260daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 26164b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 26264b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 26364b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 26464b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 26564b08152SAlfredo Cardigliano } else { 26664b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 26764b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 26864b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 26964b08152SAlfredo Cardigliano } 27064b08152SAlfredo Cardigliano } 27164b08152SAlfredo Cardigliano 27264b08152SAlfredo Cardigliano static void 27364b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 27464b08152SAlfredo Cardigliano { 27564b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 27664b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 27764b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 27864b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 27964b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 28064b08152SAlfredo Cardigliano 281daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 28264b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 28364b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 28464b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 28564b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 28664b08152SAlfredo Cardigliano } else { 28764b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 28864b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 28964b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 29064b08152SAlfredo Cardigliano } 29164b08152SAlfredo Cardigliano } 29264b08152SAlfredo Cardigliano 29364b08152SAlfredo Cardigliano static void 294a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 295a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 296a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 297a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 29864b08152SAlfredo Cardigliano bool encap, 299a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 300a27d9013SAlfredo Cardigliano bool start, bool done) 301a27d9013SAlfredo Cardigliano { 302dd10c5b4SAndrew Boyer void **info; 3034a735599SAndrew Boyer uint64_t cmd; 304a27d9013SAlfredo Cardigliano uint8_t flags = 0; 305a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 30664b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 307a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 308a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 309a27d9013SAlfredo Cardigliano 3104a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 311a27d9013SAlfredo Cardigliano flags, nsge, addr); 3124a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 3134a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 3144a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 3154a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 3164a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 317a27d9013SAlfredo Cardigliano 318dd10c5b4SAndrew Boyer if (done) { 319dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 320dd10c5b4SAndrew Boyer info[0] = txm; 321dd10c5b4SAndrew Boyer } 322dd10c5b4SAndrew Boyer 323dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 324a27d9013SAlfredo Cardigliano } 325a27d9013SAlfredo Cardigliano 326a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 327be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 328a27d9013SAlfredo Cardigliano { 329be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 330a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 33156117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 332a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 33356117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 334a27d9013SAlfredo Cardigliano 335a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 336a27d9013SAlfredo Cardigliano return desc; 337a27d9013SAlfredo Cardigliano } 338a27d9013SAlfredo Cardigliano 339a27d9013SAlfredo Cardigliano static int 34077c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 341a27d9013SAlfredo Cardigliano { 342be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 343be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 344a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 345a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 346a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3477c3a867bSAndrew Boyer rte_iova_t data_iova; 3487c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 349a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 350a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 351a27d9013SAlfredo Cardigliano uint32_t hdrlen; 352a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 353a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 354a27d9013SAlfredo Cardigliano uint32_t left; 355a27d9013SAlfredo Cardigliano uint32_t seglen; 356a27d9013SAlfredo Cardigliano uint32_t len; 357a27d9013SAlfredo Cardigliano uint32_t offset = 0; 358a27d9013SAlfredo Cardigliano bool start, done; 35964b08152SAlfredo Cardigliano bool encap; 360daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 361a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 36264b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 363a27d9013SAlfredo Cardigliano 364daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 365daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 366daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 367daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 36864b08152SAlfredo Cardigliano 36964b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 37064b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 37164b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 37264b08152SAlfredo Cardigliano */ 37364b08152SAlfredo Cardigliano 37464b08152SAlfredo Cardigliano if (encap) { 37564b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 37664b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 37764b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 37864b08152SAlfredo Cardigliano } else { 37964b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 38064b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 38164b08152SAlfredo Cardigliano } 382a27d9013SAlfredo Cardigliano 383a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 384a27d9013SAlfredo Cardigliano left = txm->data_len; 3857c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm); 386a27d9013SAlfredo Cardigliano 387be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 388a27d9013SAlfredo Cardigliano start = true; 389a27d9013SAlfredo Cardigliano 390a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 391a27d9013SAlfredo Cardigliano 392a27d9013SAlfredo Cardigliano while (left > 0) { 393a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 394a27d9013SAlfredo Cardigliano frag_left = seglen - len; 3957c3a867bSAndrew Boyer desc_addr = rte_cpu_to_le_64(data_iova + offset); 396a27d9013SAlfredo Cardigliano desc_len = len; 397a27d9013SAlfredo Cardigliano desc_nsge = 0; 398a27d9013SAlfredo Cardigliano left -= len; 399a27d9013SAlfredo Cardigliano offset += len; 400a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 401a27d9013SAlfredo Cardigliano continue; 402a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 403a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 404a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 405a27d9013SAlfredo Cardigliano hdrlen, mss, 40664b08152SAlfredo Cardigliano encap, 407a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 40877c60793SAndrew Boyer start, done); 409be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 410a27d9013SAlfredo Cardigliano start = false; 411a27d9013SAlfredo Cardigliano seglen = mss; 412a27d9013SAlfredo Cardigliano } 413a27d9013SAlfredo Cardigliano 414a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 415a27d9013SAlfredo Cardigliano 416a27d9013SAlfredo Cardigliano txm_seg = txm->next; 417a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 418a27d9013SAlfredo Cardigliano offset = 0; 4197c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 420a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 421a27d9013SAlfredo Cardigliano 422a27d9013SAlfredo Cardigliano while (left > 0) { 4237c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 424a27d9013SAlfredo Cardigliano if (frag_left > 0) { 425a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 426a27d9013SAlfredo Cardigliano frag_left -= len; 4277c3a867bSAndrew Boyer elem->addr = next_addr; 4284a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 429a27d9013SAlfredo Cardigliano elem++; 430a27d9013SAlfredo Cardigliano desc_nsge++; 431a27d9013SAlfredo Cardigliano } else { 432a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 433a27d9013SAlfredo Cardigliano frag_left = mss - len; 4347c3a867bSAndrew Boyer desc_addr = next_addr; 435a27d9013SAlfredo Cardigliano desc_len = len; 436a27d9013SAlfredo Cardigliano desc_nsge = 0; 437a27d9013SAlfredo Cardigliano } 438a27d9013SAlfredo Cardigliano left -= len; 439a27d9013SAlfredo Cardigliano offset += len; 440a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 441a27d9013SAlfredo Cardigliano continue; 4427c3a867bSAndrew Boyer 443a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 444a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 445a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 446a27d9013SAlfredo Cardigliano hdrlen, mss, 44764b08152SAlfredo Cardigliano encap, 448a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 44977c60793SAndrew Boyer start, done); 450be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 451a27d9013SAlfredo Cardigliano start = false; 452a27d9013SAlfredo Cardigliano } 453a27d9013SAlfredo Cardigliano 454a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 455a27d9013SAlfredo Cardigliano } 456a27d9013SAlfredo Cardigliano 457a27d9013SAlfredo Cardigliano stats->tso++; 458a27d9013SAlfredo Cardigliano 459a27d9013SAlfredo Cardigliano return 0; 460a27d9013SAlfredo Cardigliano } 461a27d9013SAlfredo Cardigliano 4620de3e209SAndrew Boyer static __rte_always_inline int 46377c60793SAndrew Boyer ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 464a27d9013SAlfredo Cardigliano { 465be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 466be39f75cSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 46756117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 468be39f75cSAndrew Boyer struct ionic_txq_sg_elem *elem; 469be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 470a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 471dd10c5b4SAndrew Boyer void **info; 47264b08152SAlfredo Cardigliano bool encap; 473a27d9013SAlfredo Cardigliano bool has_vlan; 474a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 4754a735599SAndrew Boyer uint64_t addr, cmd; 476a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 477a27d9013SAlfredo Cardigliano uint8_t flags = 0; 478a27d9013SAlfredo Cardigliano 479be39f75cSAndrew Boyer desc = &desc_base[q->head_idx]; 480dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 481be39f75cSAndrew Boyer 482daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 48368591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 48464b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 48564b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 486f603eebcSAndrew Boyer } 487f603eebcSAndrew Boyer 488daa02b5cSOlivier Matz if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 48968591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 490daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 49168591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 492f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 49364b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 49464b08152SAlfredo Cardigliano } 49564b08152SAlfredo Cardigliano 496f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 497f603eebcSAndrew Boyer stats->no_csum++; 498f603eebcSAndrew Boyer 499daa02b5cSOlivier Matz has_vlan = (ol_flags & RTE_MBUF_F_TX_VLAN); 500daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 501daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 502daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 503daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 504a27d9013SAlfredo Cardigliano 505a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 50664b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 507a27d9013SAlfredo Cardigliano 5087c3a867bSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 5097c3a867bSAndrew Boyer 5104a735599SAndrew Boyer cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 5114a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 5124a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(txm->data_len); 5134a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 514a27d9013SAlfredo Cardigliano 515dd10c5b4SAndrew Boyer info[0] = txm; 516dd10c5b4SAndrew Boyer 517be39f75cSAndrew Boyer elem = sg_desc_base[q->head_idx].elems; 518dd10c5b4SAndrew Boyer 519a27d9013SAlfredo Cardigliano txm_seg = txm->next; 520a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 5214a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(txm_seg->data_len); 522a27d9013SAlfredo Cardigliano elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); 523a27d9013SAlfredo Cardigliano elem++; 524a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 525a27d9013SAlfredo Cardigliano } 526a27d9013SAlfredo Cardigliano 527dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 528dd10c5b4SAndrew Boyer 529a27d9013SAlfredo Cardigliano return 0; 530a27d9013SAlfredo Cardigliano } 531a27d9013SAlfredo Cardigliano 532a27d9013SAlfredo Cardigliano uint16_t 533a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 534a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 535a27d9013SAlfredo Cardigliano { 536be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 537be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 538be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 539a27d9013SAlfredo Cardigliano uint32_t next_q_head_idx; 540a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 54186551f81SAndrew Boyer uint16_t nb_avail, nb_tx = 0; 542a27d9013SAlfredo Cardigliano int err; 543a27d9013SAlfredo Cardigliano 544a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 5452aed9865SAndrew Boyer ionic_tx_flush(txq); 546a27d9013SAlfredo Cardigliano 54786551f81SAndrew Boyer nb_avail = ionic_q_space_avail(q); 54886551f81SAndrew Boyer if (unlikely(nb_avail < nb_pkts)) { 54986551f81SAndrew Boyer stats->stop += nb_pkts - nb_avail; 55086551f81SAndrew Boyer nb_pkts = nb_avail; 551a27d9013SAlfredo Cardigliano } 552a27d9013SAlfredo Cardigliano 553a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 5544ad56b7aSAndrew Boyer next_q_head_idx = Q_NEXT_TO_POST(q, 1); 555a27d9013SAlfredo Cardigliano if ((next_q_head_idx & 0x3) == 0) { 556a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 557a27d9013SAlfredo Cardigliano rte_prefetch0(&desc_base[next_q_head_idx]); 558a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[next_q_head_idx]); 559a27d9013SAlfredo Cardigliano } 560a27d9013SAlfredo Cardigliano 561daa02b5cSOlivier Matz if (tx_pkts[nb_tx]->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 56277c60793SAndrew Boyer err = ionic_tx_tso(txq, tx_pkts[nb_tx]); 563a27d9013SAlfredo Cardigliano else 56477c60793SAndrew Boyer err = ionic_tx(txq, tx_pkts[nb_tx]); 565a27d9013SAlfredo Cardigliano if (err) { 566a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 567a27d9013SAlfredo Cardigliano break; 568a27d9013SAlfredo Cardigliano } 569a27d9013SAlfredo Cardigliano 570a27d9013SAlfredo Cardigliano bytes_tx += tx_pkts[nb_tx]->pkt_len; 571a27d9013SAlfredo Cardigliano nb_tx++; 572a27d9013SAlfredo Cardigliano } 573a27d9013SAlfredo Cardigliano 57477c60793SAndrew Boyer if (nb_tx > 0) { 57577c60793SAndrew Boyer rte_wmb(); 57677c60793SAndrew Boyer ionic_q_flush(q); 57777c60793SAndrew Boyer } 57877c60793SAndrew Boyer 579a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 580a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 581a27d9013SAlfredo Cardigliano 582a27d9013SAlfredo Cardigliano return nb_tx; 583a27d9013SAlfredo Cardigliano } 584a27d9013SAlfredo Cardigliano 585a27d9013SAlfredo Cardigliano /********************************************************************* 586a27d9013SAlfredo Cardigliano * 587a27d9013SAlfredo Cardigliano * TX prep functions 588a27d9013SAlfredo Cardigliano * 589a27d9013SAlfredo Cardigliano **********************************************************************/ 590a27d9013SAlfredo Cardigliano 591daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 592daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 593daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 594daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 595daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 596daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 597a27d9013SAlfredo Cardigliano 598a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 599daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 600a27d9013SAlfredo Cardigliano 601a27d9013SAlfredo Cardigliano uint16_t 602e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 603a27d9013SAlfredo Cardigliano { 604e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 605a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 606a27d9013SAlfredo Cardigliano uint64_t offloads; 607a27d9013SAlfredo Cardigliano int i = 0; 608a27d9013SAlfredo Cardigliano 609a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 610a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 611a27d9013SAlfredo Cardigliano 612e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 613a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 614a27d9013SAlfredo Cardigliano break; 615a27d9013SAlfredo Cardigliano } 616a27d9013SAlfredo Cardigliano 617a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 618a27d9013SAlfredo Cardigliano 619a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 620a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 621a27d9013SAlfredo Cardigliano break; 622a27d9013SAlfredo Cardigliano } 623a27d9013SAlfredo Cardigliano } 624a27d9013SAlfredo Cardigliano 625a27d9013SAlfredo Cardigliano return i; 626a27d9013SAlfredo Cardigliano } 627a27d9013SAlfredo Cardigliano 628a27d9013SAlfredo Cardigliano /********************************************************************* 629a27d9013SAlfredo Cardigliano * 630a27d9013SAlfredo Cardigliano * RX functions 631a27d9013SAlfredo Cardigliano * 632a27d9013SAlfredo Cardigliano **********************************************************************/ 633a27d9013SAlfredo Cardigliano 634a27d9013SAlfredo Cardigliano static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 635a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf); 636a27d9013SAlfredo Cardigliano 637a27d9013SAlfredo Cardigliano void 638a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 639a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 640a27d9013SAlfredo Cardigliano { 641be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 642be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 643a27d9013SAlfredo Cardigliano 644a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 645a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 646a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 64702eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 64868591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 649a27d9013SAlfredo Cardigliano } 650a27d9013SAlfredo Cardigliano 651ce6427ddSThomas Monjalon static void __rte_cold 652be39f75cSAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 653a27d9013SAlfredo Cardigliano { 654be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 655a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf; 656700f974dSAndrew Boyer void **info; 657a27d9013SAlfredo Cardigliano 658a27d9013SAlfredo Cardigliano while (q->tail_idx != q->head_idx) { 659700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 660700f974dSAndrew Boyer mbuf = info[0]; 661a27d9013SAlfredo Cardigliano rte_mempool_put(rxq->mb_pool, mbuf); 662a27d9013SAlfredo Cardigliano 6634ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 664a27d9013SAlfredo Cardigliano } 665a27d9013SAlfredo Cardigliano } 666a27d9013SAlfredo Cardigliano 667ce6427ddSThomas Monjalon void __rte_cold 6687483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 669a27d9013SAlfredo Cardigliano { 6707483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 671ed522a3fSAndrew Boyer struct ionic_rx_stats *stats; 672be39f75cSAndrew Boyer 673be39f75cSAndrew Boyer if (!rxq) 674be39f75cSAndrew Boyer return; 675a27d9013SAlfredo Cardigliano 676a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 677a27d9013SAlfredo Cardigliano 678ed522a3fSAndrew Boyer stats = &rxq->stats; 679ed522a3fSAndrew Boyer 680ed522a3fSAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 681ed522a3fSAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 682ed522a3fSAndrew Boyer 683be39f75cSAndrew Boyer ionic_rx_empty(rxq); 684a27d9013SAlfredo Cardigliano 6859fdf11c4SAndrew Boyer ionic_lif_rxq_deinit(rxq); 6869fdf11c4SAndrew Boyer 687be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 688a27d9013SAlfredo Cardigliano } 689a27d9013SAlfredo Cardigliano 690ce6427ddSThomas Monjalon int __rte_cold 691a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 692a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 693a27d9013SAlfredo Cardigliano uint16_t nb_desc, 6944ae96cb8SAndrew Boyer uint32_t socket_id, 695a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 696a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 697a27d9013SAlfredo Cardigliano { 698a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 699be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 700a27d9013SAlfredo Cardigliano uint64_t offloads; 701a27d9013SAlfredo Cardigliano int err; 702a27d9013SAlfredo Cardigliano 703a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 704a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 705a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 706a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 707a27d9013SAlfredo Cardigliano return -EINVAL; 708a27d9013SAlfredo Cardigliano } 709a27d9013SAlfredo Cardigliano 710a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 7114ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 7124ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 7134ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 714a27d9013SAlfredo Cardigliano 71518a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 71618a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 71718a44465SAndrew Boyer 718a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 719a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 720a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 721a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 722a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 7234ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 724a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 725a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 726a27d9013SAlfredo Cardigliano } 727a27d9013SAlfredo Cardigliano 728a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 729a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 7307483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 731a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 732a27d9013SAlfredo Cardigliano } 733a27d9013SAlfredo Cardigliano 7349fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7359fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7369fdf11c4SAndrew Boyer 7378ec5ad7fSAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, 738be39f75cSAndrew Boyer &rxq); 739a27d9013SAlfredo Cardigliano if (err) { 7404ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 741a27d9013SAlfredo Cardigliano return -EINVAL; 742a27d9013SAlfredo Cardigliano } 743a27d9013SAlfredo Cardigliano 744a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 745a27d9013SAlfredo Cardigliano 746a27d9013SAlfredo Cardigliano /* 747a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 748295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 749a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 750a27d9013SAlfredo Cardigliano * it to the length for all received packets: 751a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 752295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 753a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 754a27d9013SAlfredo Cardigliano */ 755a27d9013SAlfredo Cardigliano 756a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 75702eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 75802eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 759a27d9013SAlfredo Cardigliano 760a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 761a27d9013SAlfredo Cardigliano 762a27d9013SAlfredo Cardigliano return 0; 763a27d9013SAlfredo Cardigliano } 764a27d9013SAlfredo Cardigliano 7650de3e209SAndrew Boyer static __rte_always_inline void 766be39f75cSAndrew Boyer ionic_rx_clean(struct ionic_rx_qcq *rxq, 767a27d9013SAlfredo Cardigliano uint32_t q_desc_index, uint32_t cq_desc_index, 768700f974dSAndrew Boyer void *service_cb_arg) 769a27d9013SAlfredo Cardigliano { 770be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 771be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 772c6a9a6fbSAndrew Boyer struct ionic_rxq_comp *cq_desc_base = cq->base; 773a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 774700f974dSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg; 775a27d9013SAlfredo Cardigliano uint32_t max_frame_size = 7761bb4a528SFerruh Yigit rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN; 777a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 778a27d9013SAlfredo Cardigliano uint32_t pkt_type; 779be39f75cSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 780a27d9013SAlfredo Cardigliano struct ionic_rx_service *recv_args = (struct ionic_rx_service *) 781a27d9013SAlfredo Cardigliano service_cb_arg; 782a27d9013SAlfredo Cardigliano uint32_t buf_size = (uint16_t) 783a27d9013SAlfredo Cardigliano (rte_pktmbuf_data_room_size(rxq->mb_pool) - 784a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 785a27d9013SAlfredo Cardigliano uint32_t left; 786700f974dSAndrew Boyer void **info; 787700f974dSAndrew Boyer 788700f974dSAndrew Boyer assert(q_desc_index == cq_desc->comp_index); 789700f974dSAndrew Boyer 790700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, cq_desc->comp_index); 791700f974dSAndrew Boyer 792700f974dSAndrew Boyer rxm = info[0]; 793a27d9013SAlfredo Cardigliano 794a27d9013SAlfredo Cardigliano if (!recv_args) { 795a27d9013SAlfredo Cardigliano stats->no_cb_arg++; 796a27d9013SAlfredo Cardigliano /* Flush */ 797a27d9013SAlfredo Cardigliano rte_pktmbuf_free(rxm); 798a27d9013SAlfredo Cardigliano /* 799a27d9013SAlfredo Cardigliano * Note: rte_mempool_put is faster with no segs 800a27d9013SAlfredo Cardigliano * rte_mempool_put(rxq->mb_pool, rxm); 801a27d9013SAlfredo Cardigliano */ 802a27d9013SAlfredo Cardigliano return; 803a27d9013SAlfredo Cardigliano } 804a27d9013SAlfredo Cardigliano 805a27d9013SAlfredo Cardigliano if (cq_desc->status) { 806a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 807a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 808a27d9013SAlfredo Cardigliano return; 809a27d9013SAlfredo Cardigliano } 810a27d9013SAlfredo Cardigliano 811a27d9013SAlfredo Cardigliano if (recv_args->nb_rx >= recv_args->nb_pkts) { 812a27d9013SAlfredo Cardigliano stats->no_room++; 813a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 814a27d9013SAlfredo Cardigliano return; 815a27d9013SAlfredo Cardigliano } 816a27d9013SAlfredo Cardigliano 817a27d9013SAlfredo Cardigliano if (cq_desc->len > max_frame_size || 818a27d9013SAlfredo Cardigliano cq_desc->len == 0) { 819a27d9013SAlfredo Cardigliano stats->bad_len++; 820a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 821a27d9013SAlfredo Cardigliano return; 822a27d9013SAlfredo Cardigliano } 823a27d9013SAlfredo Cardigliano 824a27d9013SAlfredo Cardigliano rxm->data_off = RTE_PKTMBUF_HEADROOM; 825a27d9013SAlfredo Cardigliano rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); 826a27d9013SAlfredo Cardigliano rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ 827a27d9013SAlfredo Cardigliano rxm->pkt_len = cq_desc->len; 828be39f75cSAndrew Boyer rxm->port = rxq->qcq.lif->port_id; 829a27d9013SAlfredo Cardigliano 830a27d9013SAlfredo Cardigliano left = cq_desc->len; 831a27d9013SAlfredo Cardigliano 832a27d9013SAlfredo Cardigliano rxm->data_len = RTE_MIN(buf_size, left); 833a27d9013SAlfredo Cardigliano left -= rxm->data_len; 834a27d9013SAlfredo Cardigliano 835a27d9013SAlfredo Cardigliano rxm_seg = rxm->next; 836a27d9013SAlfredo Cardigliano while (rxm_seg && left) { 837a27d9013SAlfredo Cardigliano rxm_seg->data_len = RTE_MIN(buf_size, left); 838a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 839a27d9013SAlfredo Cardigliano 840a27d9013SAlfredo Cardigliano rxm_seg = rxm_seg->next; 841a27d9013SAlfredo Cardigliano rxm->nb_segs++; 842a27d9013SAlfredo Cardigliano } 843a27d9013SAlfredo Cardigliano 84422e7171bSAlfredo Cardigliano /* RSS */ 845daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 8467506961aSAndrew Boyer rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 84722e7171bSAlfredo Cardigliano 848a27d9013SAlfredo Cardigliano /* Vlan Strip */ 849a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 850daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 8514a735599SAndrew Boyer rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 852a27d9013SAlfredo Cardigliano } 853a27d9013SAlfredo Cardigliano 854a27d9013SAlfredo Cardigliano /* Checksum */ 855a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 856a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 857daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 858a27d9013SAlfredo Cardigliano else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 859daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 860a27d9013SAlfredo Cardigliano 861a27d9013SAlfredo Cardigliano if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 862a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 863daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 864a27d9013SAlfredo Cardigliano else if ((cq_desc->csum_flags & 865a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 866a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & 867a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 868daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 869a27d9013SAlfredo Cardigliano } 870a27d9013SAlfredo Cardigliano 871a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 872a27d9013SAlfredo Cardigliano 873a27d9013SAlfredo Cardigliano /* Packet Type */ 874a27d9013SAlfredo Cardigliano switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 875a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4: 876a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 877a27d9013SAlfredo Cardigliano break; 878a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6: 879a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 880a27d9013SAlfredo Cardigliano break; 881a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_TCP: 882a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 883a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 884a27d9013SAlfredo Cardigliano break; 885a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_TCP: 886a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 887a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 888a27d9013SAlfredo Cardigliano break; 889a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_UDP: 890a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 891a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 892a27d9013SAlfredo Cardigliano break; 893a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_UDP: 894a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 895a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 896a27d9013SAlfredo Cardigliano break; 897a27d9013SAlfredo Cardigliano default: 898a27d9013SAlfredo Cardigliano { 899a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 900a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 901a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 902a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 903a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 904a27d9013SAlfredo Cardigliano else 905a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_UNKNOWN; 906ed522a3fSAndrew Boyer stats->mtods++; 907a27d9013SAlfredo Cardigliano break; 908a27d9013SAlfredo Cardigliano } 909a27d9013SAlfredo Cardigliano } 910a27d9013SAlfredo Cardigliano 911a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 912a27d9013SAlfredo Cardigliano 913a27d9013SAlfredo Cardigliano recv_args->rx_pkts[recv_args->nb_rx] = rxm; 914a27d9013SAlfredo Cardigliano recv_args->nb_rx++; 915a27d9013SAlfredo Cardigliano 916a27d9013SAlfredo Cardigliano stats->packets++; 917a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 918a27d9013SAlfredo Cardigliano } 919a27d9013SAlfredo Cardigliano 920a27d9013SAlfredo Cardigliano static void 921a27d9013SAlfredo Cardigliano ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 922a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf) 923a27d9013SAlfredo Cardigliano { 924a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc_base = q->base; 925a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *old = &desc_base[q_desc_index]; 926a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *new = &desc_base[q->head_idx]; 927a27d9013SAlfredo Cardigliano 928a27d9013SAlfredo Cardigliano new->addr = old->addr; 929a27d9013SAlfredo Cardigliano new->len = old->len; 930a27d9013SAlfredo Cardigliano 931dd10c5b4SAndrew Boyer q->info[q->head_idx] = mbuf; 932dd10c5b4SAndrew Boyer 933dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 934dd10c5b4SAndrew Boyer 935dd10c5b4SAndrew Boyer ionic_q_flush(q); 936a27d9013SAlfredo Cardigliano } 937a27d9013SAlfredo Cardigliano 9380de3e209SAndrew Boyer static __rte_always_inline int 939be39f75cSAndrew Boyer ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len) 940a27d9013SAlfredo Cardigliano { 941be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 942be39f75cSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 943be39f75cSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 944a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_elem *elem; 945dd10c5b4SAndrew Boyer void **info; 946a27d9013SAlfredo Cardigliano rte_iova_t dma_addr; 947a27d9013SAlfredo Cardigliano uint32_t i, j, nsegs, buf_size, size; 948a27d9013SAlfredo Cardigliano 949a27d9013SAlfredo Cardigliano buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 950a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 951a27d9013SAlfredo Cardigliano 952a27d9013SAlfredo Cardigliano /* Initialize software ring entries */ 953a27d9013SAlfredo Cardigliano for (i = ionic_q_space_avail(q); i; i--) { 954a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 955a27d9013SAlfredo Cardigliano struct rte_mbuf *prev_rxm_seg; 956a27d9013SAlfredo Cardigliano 957a27d9013SAlfredo Cardigliano if (rxm == NULL) { 958a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 959a27d9013SAlfredo Cardigliano return -ENOMEM; 960a27d9013SAlfredo Cardigliano } 961a27d9013SAlfredo Cardigliano 962dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 963dd10c5b4SAndrew Boyer 964a27d9013SAlfredo Cardigliano nsegs = (len + buf_size - 1) / buf_size; 965a27d9013SAlfredo Cardigliano 966a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 967a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); 968a27d9013SAlfredo Cardigliano desc->addr = dma_addr; 969a27d9013SAlfredo Cardigliano desc->len = buf_size; 970a27d9013SAlfredo Cardigliano size = buf_size; 971a27d9013SAlfredo Cardigliano desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 972a27d9013SAlfredo Cardigliano IONIC_RXQ_DESC_OPCODE_SIMPLE; 973a27d9013SAlfredo Cardigliano rxm->next = NULL; 974a27d9013SAlfredo Cardigliano 975a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm; 976a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 977a27d9013SAlfredo Cardigliano elem = sg_desc->elems; 978a27d9013SAlfredo Cardigliano for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { 979a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm_seg; 980a27d9013SAlfredo Cardigliano rte_iova_t data_iova; 981a27d9013SAlfredo Cardigliano 982a27d9013SAlfredo Cardigliano rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 983a27d9013SAlfredo Cardigliano if (rxm_seg == NULL) { 984a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 985a27d9013SAlfredo Cardigliano return -ENOMEM; 986a27d9013SAlfredo Cardigliano } 987a27d9013SAlfredo Cardigliano 988a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(rxm_seg); 989a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(data_iova); 990a27d9013SAlfredo Cardigliano elem->addr = dma_addr; 991a27d9013SAlfredo Cardigliano elem->len = buf_size; 992a27d9013SAlfredo Cardigliano size += buf_size; 993a27d9013SAlfredo Cardigliano elem++; 994a27d9013SAlfredo Cardigliano rxm_seg->next = NULL; 995a27d9013SAlfredo Cardigliano prev_rxm_seg->next = rxm_seg; 996a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm_seg; 997a27d9013SAlfredo Cardigliano } 998a27d9013SAlfredo Cardigliano 999a27d9013SAlfredo Cardigliano if (size < len) 1000a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", 1001a27d9013SAlfredo Cardigliano size, len); 1002a27d9013SAlfredo Cardigliano 1003dd10c5b4SAndrew Boyer info[0] = rxm; 1004dd10c5b4SAndrew Boyer 1005dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 1006a27d9013SAlfredo Cardigliano } 1007a27d9013SAlfredo Cardigliano 100877c60793SAndrew Boyer ionic_q_flush(q); 100977c60793SAndrew Boyer 1010a27d9013SAlfredo Cardigliano return 0; 1011a27d9013SAlfredo Cardigliano } 1012a27d9013SAlfredo Cardigliano 1013a27d9013SAlfredo Cardigliano /* 1014a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 1015a27d9013SAlfredo Cardigliano */ 1016ce6427ddSThomas Monjalon int __rte_cold 1017a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1018a27d9013SAlfredo Cardigliano { 10191bb4a528SFerruh Yigit uint32_t frame_size = eth_dev->data->mtu + RTE_ETHER_HDR_LEN; 10209fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1021be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1022a27d9013SAlfredo Cardigliano int err; 1023a27d9013SAlfredo Cardigliano 10249fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 10259fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 10269fdf11c4SAndrew Boyer rx_queue_id); 10279fdf11c4SAndrew Boyer return 0; 10289fdf11c4SAndrew Boyer } 10299fdf11c4SAndrew Boyer 1030a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1031a27d9013SAlfredo Cardigliano 10324ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)", 1033be39f75cSAndrew Boyer rx_queue_id, rxq->qcq.q.num_descs, frame_size); 10344ae96cb8SAndrew Boyer 10359fdf11c4SAndrew Boyer if (!(rxq->flags & IONIC_QCQ_F_INITED)) { 1036a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 1037a27d9013SAlfredo Cardigliano if (err) 1038a27d9013SAlfredo Cardigliano return err; 1039b5d9a4f0SAndrew Boyer } else { 1040be39f75cSAndrew Boyer ionic_qcq_enable(&rxq->qcq); 10419fdf11c4SAndrew Boyer } 1042a27d9013SAlfredo Cardigliano 1043a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1044a27d9013SAlfredo Cardigliano if (ionic_rx_fill(rxq, frame_size) != 0) { 1045a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1046a27d9013SAlfredo Cardigliano rx_queue_id); 1047a27d9013SAlfredo Cardigliano return -1; 1048a27d9013SAlfredo Cardigliano } 1049a27d9013SAlfredo Cardigliano 10509fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1051a27d9013SAlfredo Cardigliano 1052a27d9013SAlfredo Cardigliano return 0; 1053a27d9013SAlfredo Cardigliano } 1054a27d9013SAlfredo Cardigliano 10550de3e209SAndrew Boyer static __rte_always_inline void 1056be39f75cSAndrew Boyer ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 1057a27d9013SAlfredo Cardigliano void *service_cb_arg) 1058a27d9013SAlfredo Cardigliano { 1059be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 1060be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 1061be39f75cSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1062a27d9013SAlfredo Cardigliano bool more; 1063a27d9013SAlfredo Cardigliano uint32_t curr_q_tail_idx, curr_cq_tail_idx; 1064a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1065a27d9013SAlfredo Cardigliano 1066a27d9013SAlfredo Cardigliano if (work_to_do == 0) 1067a27d9013SAlfredo Cardigliano return; 1068a27d9013SAlfredo Cardigliano 1069a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1070a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1071a27d9013SAlfredo Cardigliano curr_cq_tail_idx = cq->tail_idx; 10722aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1073a27d9013SAlfredo Cardigliano 1074a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1075a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1076a27d9013SAlfredo Cardigliano 1077a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1078a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 1079a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 1080a27d9013SAlfredo Cardigliano 1081a27d9013SAlfredo Cardigliano do { 1082a27d9013SAlfredo Cardigliano more = (q->tail_idx != cq_desc->comp_index); 1083a27d9013SAlfredo Cardigliano 1084a27d9013SAlfredo Cardigliano curr_q_tail_idx = q->tail_idx; 10854ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1086a27d9013SAlfredo Cardigliano 1087a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1088a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 1089a27d9013SAlfredo Cardigliano /* q desc info */ 1090a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 1091a27d9013SAlfredo Cardigliano 1092c6a9a6fbSAndrew Boyer ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx, 1093700f974dSAndrew Boyer service_cb_arg); 1094a27d9013SAlfredo Cardigliano 1095a27d9013SAlfredo Cardigliano } while (more); 1096a27d9013SAlfredo Cardigliano 1097a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1098a27d9013SAlfredo Cardigliano break; 1099a27d9013SAlfredo Cardigliano 1100a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1101a27d9013SAlfredo Cardigliano } 1102a27d9013SAlfredo Cardigliano } 1103a27d9013SAlfredo Cardigliano 1104a27d9013SAlfredo Cardigliano /* 1105a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1106a27d9013SAlfredo Cardigliano */ 1107ce6427ddSThomas Monjalon int __rte_cold 1108a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1109a27d9013SAlfredo Cardigliano { 1110be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1111a27d9013SAlfredo Cardigliano 11124ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1113a27d9013SAlfredo Cardigliano 1114a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1115a27d9013SAlfredo Cardigliano 11169fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 11179fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 11189fdf11c4SAndrew Boyer 1119be39f75cSAndrew Boyer ionic_qcq_disable(&rxq->qcq); 1120a27d9013SAlfredo Cardigliano 1121a27d9013SAlfredo Cardigliano /* Flush */ 11222aed9865SAndrew Boyer ionic_rxq_service(rxq, -1, NULL); 1123a27d9013SAlfredo Cardigliano 1124a27d9013SAlfredo Cardigliano return 0; 1125a27d9013SAlfredo Cardigliano } 1126a27d9013SAlfredo Cardigliano 1127a27d9013SAlfredo Cardigliano uint16_t 1128a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1129a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1130a27d9013SAlfredo Cardigliano { 1131be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 1132a27d9013SAlfredo Cardigliano uint32_t frame_size = 11331bb4a528SFerruh Yigit rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN; 1134a27d9013SAlfredo Cardigliano struct ionic_rx_service service_cb_arg; 1135a27d9013SAlfredo Cardigliano 1136a27d9013SAlfredo Cardigliano service_cb_arg.rx_pkts = rx_pkts; 1137a27d9013SAlfredo Cardigliano service_cb_arg.nb_pkts = nb_pkts; 1138a27d9013SAlfredo Cardigliano service_cb_arg.nb_rx = 0; 1139a27d9013SAlfredo Cardigliano 11402aed9865SAndrew Boyer ionic_rxq_service(rxq, nb_pkts, &service_cb_arg); 1141a27d9013SAlfredo Cardigliano 1142a27d9013SAlfredo Cardigliano ionic_rx_fill(rxq, frame_size); 1143a27d9013SAlfredo Cardigliano 1144a27d9013SAlfredo Cardigliano return service_cb_arg.nb_rx; 1145a27d9013SAlfredo Cardigliano } 1146