1a27d9013SAlfredo Cardigliano /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2a27d9013SAlfredo Cardigliano * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50a27d9013SAlfredo Cardigliano #define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1) 51a27d9013SAlfredo Cardigliano 52a27d9013SAlfredo Cardigliano /********************************************************************* 53a27d9013SAlfredo Cardigliano * 54a27d9013SAlfredo Cardigliano * TX functions 55a27d9013SAlfredo Cardigliano * 56a27d9013SAlfredo Cardigliano **********************************************************************/ 57a27d9013SAlfredo Cardigliano 58a27d9013SAlfredo Cardigliano void 59a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 60a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 61a27d9013SAlfredo Cardigliano { 62a27d9013SAlfredo Cardigliano struct ionic_qcq *txq = dev->data->tx_queues[queue_id]; 63a27d9013SAlfredo Cardigliano struct ionic_queue *q = &txq->q; 64a27d9013SAlfredo Cardigliano 65a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 66a27d9013SAlfredo Cardigliano qinfo->conf.offloads = txq->offloads; 6702eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 68a27d9013SAlfredo Cardigliano } 69a27d9013SAlfredo Cardigliano 70ce6427ddSThomas Monjalon static inline void __rte_cold 71a27d9013SAlfredo Cardigliano ionic_tx_flush(struct ionic_cq *cq) 72a27d9013SAlfredo Cardigliano { 73a27d9013SAlfredo Cardigliano struct ionic_queue *q = cq->bound_q; 74a27d9013SAlfredo Cardigliano struct ionic_desc_info *q_desc_info; 75a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, *next; 76a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc_base = cq->base; 77a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc; 78a27d9013SAlfredo Cardigliano u_int32_t comp_index = (u_int32_t)-1; 79a27d9013SAlfredo Cardigliano 80a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 81a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 82a27d9013SAlfredo Cardigliano cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); 83a27d9013SAlfredo Cardigliano 84a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors (not really useful here) */ 85a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 86a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 87a27d9013SAlfredo Cardigliano 88a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 89a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 90a27d9013SAlfredo Cardigliano 91a27d9013SAlfredo Cardigliano comp_index = cq_desc->comp_index; 92a27d9013SAlfredo Cardigliano 93a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 94a27d9013SAlfredo Cardigliano } 95a27d9013SAlfredo Cardigliano 96a27d9013SAlfredo Cardigliano if (comp_index != (u_int32_t)-1) { 97a27d9013SAlfredo Cardigliano while (q->tail_idx != comp_index) { 98a27d9013SAlfredo Cardigliano q_desc_info = &q->info[q->tail_idx]; 99a27d9013SAlfredo Cardigliano 100a27d9013SAlfredo Cardigliano q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 101a27d9013SAlfredo Cardigliano 102a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 103a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 104a27d9013SAlfredo Cardigliano /* q desc info */ 105a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 106a27d9013SAlfredo Cardigliano 107a27d9013SAlfredo Cardigliano /* 108a27d9013SAlfredo Cardigliano * Note: you can just use rte_pktmbuf_free, 109a27d9013SAlfredo Cardigliano * but this loop is faster 110a27d9013SAlfredo Cardigliano */ 111a27d9013SAlfredo Cardigliano txm = q_desc_info->cb_arg; 112a27d9013SAlfredo Cardigliano while (txm != NULL) { 113a27d9013SAlfredo Cardigliano next = txm->next; 114a27d9013SAlfredo Cardigliano rte_pktmbuf_free_seg(txm); 115a27d9013SAlfredo Cardigliano txm = next; 116a27d9013SAlfredo Cardigliano } 117a27d9013SAlfredo Cardigliano } 118a27d9013SAlfredo Cardigliano } 119a27d9013SAlfredo Cardigliano } 120a27d9013SAlfredo Cardigliano 121ce6427ddSThomas Monjalon void __rte_cold 122a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_release(void *tx_queue) 123a27d9013SAlfredo Cardigliano { 124a27d9013SAlfredo Cardigliano struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; 125a27d9013SAlfredo Cardigliano 126a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 127a27d9013SAlfredo Cardigliano 1289fdf11c4SAndrew Boyer ionic_lif_txq_deinit(txq); 1299fdf11c4SAndrew Boyer 130a27d9013SAlfredo Cardigliano ionic_qcq_free(txq); 131a27d9013SAlfredo Cardigliano } 132a27d9013SAlfredo Cardigliano 133ce6427ddSThomas Monjalon int __rte_cold 134a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 135a27d9013SAlfredo Cardigliano { 136a27d9013SAlfredo Cardigliano struct ionic_qcq *txq; 137a27d9013SAlfredo Cardigliano 1384ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 139a27d9013SAlfredo Cardigliano 140a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 141a27d9013SAlfredo Cardigliano 1429fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1439fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1449fdf11c4SAndrew Boyer 145a27d9013SAlfredo Cardigliano /* 146a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 147a27d9013SAlfredo Cardigliano * before disabling Tx queue 148a27d9013SAlfredo Cardigliano */ 149a27d9013SAlfredo Cardigliano 150a27d9013SAlfredo Cardigliano ionic_qcq_disable(txq); 151a27d9013SAlfredo Cardigliano 152a27d9013SAlfredo Cardigliano ionic_tx_flush(&txq->cq); 153a27d9013SAlfredo Cardigliano 154a27d9013SAlfredo Cardigliano return 0; 155a27d9013SAlfredo Cardigliano } 156a27d9013SAlfredo Cardigliano 157ce6427ddSThomas Monjalon int __rte_cold 158a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1594ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 160a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 161a27d9013SAlfredo Cardigliano { 162a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 163a27d9013SAlfredo Cardigliano struct ionic_qcq *txq; 164a27d9013SAlfredo Cardigliano uint64_t offloads; 165a27d9013SAlfredo Cardigliano int err; 166a27d9013SAlfredo Cardigliano 167a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 168a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 169a27d9013SAlfredo Cardigliano "(max %u queues)", 170a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 171a27d9013SAlfredo Cardigliano return -EINVAL; 172a27d9013SAlfredo Cardigliano } 173a27d9013SAlfredo Cardigliano 174a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 1754ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 1764ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 1774ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 178a27d9013SAlfredo Cardigliano 179a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 180a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 181a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 182a27d9013SAlfredo Cardigliano 183a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 184a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 185a27d9013SAlfredo Cardigliano void *tx_queue = eth_dev->data->tx_queues[tx_queue_id]; 186a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_release(tx_queue); 187a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 188a27d9013SAlfredo Cardigliano } 189a27d9013SAlfredo Cardigliano 1909fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1919fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1929fdf11c4SAndrew Boyer 193a27d9013SAlfredo Cardigliano err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq); 194a27d9013SAlfredo Cardigliano if (err) { 195a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 196a27d9013SAlfredo Cardigliano return -EINVAL; 197a27d9013SAlfredo Cardigliano } 198a27d9013SAlfredo Cardigliano 199a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 20002eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 20102eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 202a27d9013SAlfredo Cardigliano 203a27d9013SAlfredo Cardigliano txq->offloads = offloads; 204a27d9013SAlfredo Cardigliano 205a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 206a27d9013SAlfredo Cardigliano 207a27d9013SAlfredo Cardigliano return 0; 208a27d9013SAlfredo Cardigliano } 209a27d9013SAlfredo Cardigliano 210a27d9013SAlfredo Cardigliano /* 211a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 212a27d9013SAlfredo Cardigliano */ 213ce6427ddSThomas Monjalon int __rte_cold 214a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 215a27d9013SAlfredo Cardigliano { 2169fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 217a27d9013SAlfredo Cardigliano struct ionic_qcq *txq; 218a27d9013SAlfredo Cardigliano int err; 219a27d9013SAlfredo Cardigliano 2209fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2219fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2229fdf11c4SAndrew Boyer tx_queue_id); 2239fdf11c4SAndrew Boyer return 0; 2249fdf11c4SAndrew Boyer } 2259fdf11c4SAndrew Boyer 226a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 227a27d9013SAlfredo Cardigliano 2284ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 2294ae96cb8SAndrew Boyer tx_queue_id, txq->q.num_descs); 2304ae96cb8SAndrew Boyer 2319fdf11c4SAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_INITED)) { 232a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 233a27d9013SAlfredo Cardigliano if (err) 234a27d9013SAlfredo Cardigliano return err; 235b5d9a4f0SAndrew Boyer } else { 236a27d9013SAlfredo Cardigliano ionic_qcq_enable(txq); 237b5d9a4f0SAndrew Boyer } 238a27d9013SAlfredo Cardigliano 2399fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 240a27d9013SAlfredo Cardigliano 241a27d9013SAlfredo Cardigliano return 0; 242a27d9013SAlfredo Cardigliano } 243a27d9013SAlfredo Cardigliano 244a27d9013SAlfredo Cardigliano static void 24564b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 24664b08152SAlfredo Cardigliano { 24764b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 24864b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 24964b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 25064b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 25164b08152SAlfredo Cardigliano 25264b08152SAlfredo Cardigliano if (txm->ol_flags & PKT_TX_IP_CKSUM) { 25364b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 25464b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 25564b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 25664b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 25764b08152SAlfredo Cardigliano } else { 25864b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 25964b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 26064b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 26164b08152SAlfredo Cardigliano } 26264b08152SAlfredo Cardigliano } 26364b08152SAlfredo Cardigliano 26464b08152SAlfredo Cardigliano static void 26564b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 26664b08152SAlfredo Cardigliano { 26764b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 26864b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 26964b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 27064b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 27164b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 27264b08152SAlfredo Cardigliano 27364b08152SAlfredo Cardigliano if (txm->ol_flags & PKT_TX_IPV4) { 27464b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 27564b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 27664b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 27764b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 27864b08152SAlfredo Cardigliano } else { 27964b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 28064b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 28164b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 28264b08152SAlfredo Cardigliano } 28364b08152SAlfredo Cardigliano } 28464b08152SAlfredo Cardigliano 28564b08152SAlfredo Cardigliano static void 286a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 287a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 288a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 289a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 29064b08152SAlfredo Cardigliano bool encap, 291a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 292a27d9013SAlfredo Cardigliano bool start, bool done) 293a27d9013SAlfredo Cardigliano { 294a27d9013SAlfredo Cardigliano uint8_t flags = 0; 295a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 29664b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 297a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 298a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 299a27d9013SAlfredo Cardigliano 300a27d9013SAlfredo Cardigliano desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 301a27d9013SAlfredo Cardigliano flags, nsge, addr); 302a27d9013SAlfredo Cardigliano desc->len = len; 303a27d9013SAlfredo Cardigliano desc->vlan_tci = vlan_tci; 304a27d9013SAlfredo Cardigliano desc->hdr_len = hdrlen; 305a27d9013SAlfredo Cardigliano desc->mss = mss; 306a27d9013SAlfredo Cardigliano 307a27d9013SAlfredo Cardigliano ionic_q_post(q, done, NULL, done ? txm : NULL); 308a27d9013SAlfredo Cardigliano } 309a27d9013SAlfredo Cardigliano 310a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 311a27d9013SAlfredo Cardigliano ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem) 312a27d9013SAlfredo Cardigliano { 313a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 314a27d9013SAlfredo Cardigliano struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; 315a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 316a27d9013SAlfredo Cardigliano struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; 317a27d9013SAlfredo Cardigliano 318a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 319a27d9013SAlfredo Cardigliano return desc; 320a27d9013SAlfredo Cardigliano } 321a27d9013SAlfredo Cardigliano 322a27d9013SAlfredo Cardigliano static int 323a27d9013SAlfredo Cardigliano ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, 324a27d9013SAlfredo Cardigliano uint64_t offloads __rte_unused, bool not_xmit_more) 325a27d9013SAlfredo Cardigliano { 326a27d9013SAlfredo Cardigliano struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); 327a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 328a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 329a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 330a27d9013SAlfredo Cardigliano uint64_t desc_addr = 0; 331a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 332a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 333a27d9013SAlfredo Cardigliano uint32_t hdrlen; 334a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 335a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 336a27d9013SAlfredo Cardigliano uint32_t left; 337a27d9013SAlfredo Cardigliano uint32_t seglen; 338a27d9013SAlfredo Cardigliano uint32_t len; 339a27d9013SAlfredo Cardigliano uint32_t offset = 0; 340a27d9013SAlfredo Cardigliano bool start, done; 34164b08152SAlfredo Cardigliano bool encap; 342a27d9013SAlfredo Cardigliano bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT); 343a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 34464b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 345a27d9013SAlfredo Cardigliano 34664b08152SAlfredo Cardigliano encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || 34764b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && 34864b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_OUTER_IPV4) || 34964b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_IPV6)); 35064b08152SAlfredo Cardigliano 35164b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 35264b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 35364b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 35464b08152SAlfredo Cardigliano */ 35564b08152SAlfredo Cardigliano 35664b08152SAlfredo Cardigliano if (encap) { 35764b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 35864b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 35964b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 36064b08152SAlfredo Cardigliano } else { 36164b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 36264b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 36364b08152SAlfredo Cardigliano } 364a27d9013SAlfredo Cardigliano 365a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 366a27d9013SAlfredo Cardigliano left = txm->data_len; 367a27d9013SAlfredo Cardigliano 368a27d9013SAlfredo Cardigliano desc = ionic_tx_tso_next(q, &elem); 369a27d9013SAlfredo Cardigliano start = true; 370a27d9013SAlfredo Cardigliano 371a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 372a27d9013SAlfredo Cardigliano 373a27d9013SAlfredo Cardigliano while (left > 0) { 374a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 375a27d9013SAlfredo Cardigliano frag_left = seglen - len; 376a27d9013SAlfredo Cardigliano desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); 377a27d9013SAlfredo Cardigliano desc_len = len; 378a27d9013SAlfredo Cardigliano desc_nsge = 0; 379a27d9013SAlfredo Cardigliano left -= len; 380a27d9013SAlfredo Cardigliano offset += len; 381a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 382a27d9013SAlfredo Cardigliano continue; 383a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 384a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 385a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 386a27d9013SAlfredo Cardigliano hdrlen, mss, 38764b08152SAlfredo Cardigliano encap, 388a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 389a27d9013SAlfredo Cardigliano start, done && not_xmit_more); 390a27d9013SAlfredo Cardigliano desc = ionic_tx_tso_next(q, &elem); 391a27d9013SAlfredo Cardigliano start = false; 392a27d9013SAlfredo Cardigliano seglen = mss; 393a27d9013SAlfredo Cardigliano } 394a27d9013SAlfredo Cardigliano 395a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 396a27d9013SAlfredo Cardigliano 397a27d9013SAlfredo Cardigliano txm_seg = txm->next; 398a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 399a27d9013SAlfredo Cardigliano offset = 0; 400a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 401a27d9013SAlfredo Cardigliano stats->frags++; 402a27d9013SAlfredo Cardigliano 403a27d9013SAlfredo Cardigliano while (left > 0) { 404a27d9013SAlfredo Cardigliano rte_iova_t data_iova; 405a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(txm_seg); 406a27d9013SAlfredo Cardigliano elem->addr = rte_cpu_to_le_64(data_iova) + offset; 407a27d9013SAlfredo Cardigliano if (frag_left > 0) { 408a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 409a27d9013SAlfredo Cardigliano frag_left -= len; 410a27d9013SAlfredo Cardigliano elem->len = len; 411a27d9013SAlfredo Cardigliano elem++; 412a27d9013SAlfredo Cardigliano desc_nsge++; 413a27d9013SAlfredo Cardigliano } else { 414a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 415a27d9013SAlfredo Cardigliano frag_left = mss - len; 416a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(txm_seg); 417a27d9013SAlfredo Cardigliano desc_addr = rte_cpu_to_le_64(data_iova); 418a27d9013SAlfredo Cardigliano desc_len = len; 419a27d9013SAlfredo Cardigliano desc_nsge = 0; 420a27d9013SAlfredo Cardigliano } 421a27d9013SAlfredo Cardigliano left -= len; 422a27d9013SAlfredo Cardigliano offset += len; 423a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 424a27d9013SAlfredo Cardigliano continue; 425a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 426a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 427a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 428a27d9013SAlfredo Cardigliano hdrlen, mss, 42964b08152SAlfredo Cardigliano encap, 430a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 431a27d9013SAlfredo Cardigliano start, done && not_xmit_more); 432a27d9013SAlfredo Cardigliano desc = ionic_tx_tso_next(q, &elem); 433a27d9013SAlfredo Cardigliano start = false; 434a27d9013SAlfredo Cardigliano } 435a27d9013SAlfredo Cardigliano 436a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 437a27d9013SAlfredo Cardigliano } 438a27d9013SAlfredo Cardigliano 439a27d9013SAlfredo Cardigliano stats->tso++; 440a27d9013SAlfredo Cardigliano 441a27d9013SAlfredo Cardigliano return 0; 442a27d9013SAlfredo Cardigliano } 443a27d9013SAlfredo Cardigliano 444a27d9013SAlfredo Cardigliano static int 445a27d9013SAlfredo Cardigliano ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, 44664b08152SAlfredo Cardigliano uint64_t offloads, bool not_xmit_more) 447a27d9013SAlfredo Cardigliano { 448a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 449a27d9013SAlfredo Cardigliano struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; 450a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 451a27d9013SAlfredo Cardigliano struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; 452a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem = sg_desc->elems; 453a27d9013SAlfredo Cardigliano struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); 454a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 45564b08152SAlfredo Cardigliano bool encap; 456a27d9013SAlfredo Cardigliano bool has_vlan; 457a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 458a27d9013SAlfredo Cardigliano uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); 459a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 460a27d9013SAlfredo Cardigliano uint8_t flags = 0; 461a27d9013SAlfredo Cardigliano 46264b08152SAlfredo Cardigliano if ((ol_flags & PKT_TX_IP_CKSUM) && 46364b08152SAlfredo Cardigliano (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) { 46464b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 46564b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 466*f603eebcSAndrew Boyer } 467*f603eebcSAndrew Boyer 46864b08152SAlfredo Cardigliano if (((ol_flags & PKT_TX_TCP_CKSUM) && 46964b08152SAlfredo Cardigliano (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || 47064b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_UDP_CKSUM) && 471*f603eebcSAndrew Boyer (offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) { 472*f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 47364b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 47464b08152SAlfredo Cardigliano } 47564b08152SAlfredo Cardigliano 476*f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 477*f603eebcSAndrew Boyer stats->no_csum++; 478*f603eebcSAndrew Boyer 479a27d9013SAlfredo Cardigliano has_vlan = (ol_flags & PKT_TX_VLAN_PKT); 48064b08152SAlfredo Cardigliano encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || 48164b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && 48264b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_OUTER_IPV4) || 48364b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_IPV6)); 484a27d9013SAlfredo Cardigliano 485a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 48664b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 487a27d9013SAlfredo Cardigliano 488a27d9013SAlfredo Cardigliano desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 489a27d9013SAlfredo Cardigliano desc->len = txm->data_len; 490a27d9013SAlfredo Cardigliano desc->vlan_tci = txm->vlan_tci; 491a27d9013SAlfredo Cardigliano 492a27d9013SAlfredo Cardigliano txm_seg = txm->next; 493a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 494a27d9013SAlfredo Cardigliano elem->len = txm_seg->data_len; 495a27d9013SAlfredo Cardigliano elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); 496a27d9013SAlfredo Cardigliano stats->frags++; 497a27d9013SAlfredo Cardigliano elem++; 498a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 499a27d9013SAlfredo Cardigliano } 500a27d9013SAlfredo Cardigliano 501a27d9013SAlfredo Cardigliano ionic_q_post(q, not_xmit_more, NULL, txm); 502a27d9013SAlfredo Cardigliano 503a27d9013SAlfredo Cardigliano return 0; 504a27d9013SAlfredo Cardigliano } 505a27d9013SAlfredo Cardigliano 506a27d9013SAlfredo Cardigliano uint16_t 507a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 508a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 509a27d9013SAlfredo Cardigliano { 510a27d9013SAlfredo Cardigliano struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; 511a27d9013SAlfredo Cardigliano struct ionic_queue *q = &txq->q; 512a27d9013SAlfredo Cardigliano struct ionic_cq *cq = &txq->cq; 513a27d9013SAlfredo Cardigliano struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); 514a27d9013SAlfredo Cardigliano uint32_t next_q_head_idx; 515a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 516a27d9013SAlfredo Cardigliano uint16_t nb_tx = 0; 517a27d9013SAlfredo Cardigliano int err; 518a27d9013SAlfredo Cardigliano bool last; 519a27d9013SAlfredo Cardigliano 520a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 521a27d9013SAlfredo Cardigliano ionic_tx_flush(cq); 522a27d9013SAlfredo Cardigliano 523a27d9013SAlfredo Cardigliano if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { 524a27d9013SAlfredo Cardigliano stats->stop += nb_pkts; 525a27d9013SAlfredo Cardigliano return 0; 526a27d9013SAlfredo Cardigliano } 527a27d9013SAlfredo Cardigliano 528a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 529a27d9013SAlfredo Cardigliano last = (nb_tx == (nb_pkts - 1)); 530a27d9013SAlfredo Cardigliano 531a27d9013SAlfredo Cardigliano next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1); 532a27d9013SAlfredo Cardigliano if ((next_q_head_idx & 0x3) == 0) { 533a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 534a27d9013SAlfredo Cardigliano rte_prefetch0(&desc_base[next_q_head_idx]); 535a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[next_q_head_idx]); 536a27d9013SAlfredo Cardigliano } 537a27d9013SAlfredo Cardigliano 538a27d9013SAlfredo Cardigliano if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG) 539a27d9013SAlfredo Cardigliano err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads, 540a27d9013SAlfredo Cardigliano last); 541a27d9013SAlfredo Cardigliano else 542a27d9013SAlfredo Cardigliano err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last); 543a27d9013SAlfredo Cardigliano if (err) { 544a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 545a27d9013SAlfredo Cardigliano if (nb_tx > 0) 546a27d9013SAlfredo Cardigliano ionic_q_flush(q); 547a27d9013SAlfredo Cardigliano break; 548a27d9013SAlfredo Cardigliano } 549a27d9013SAlfredo Cardigliano 550a27d9013SAlfredo Cardigliano bytes_tx += tx_pkts[nb_tx]->pkt_len; 551a27d9013SAlfredo Cardigliano nb_tx++; 552a27d9013SAlfredo Cardigliano } 553a27d9013SAlfredo Cardigliano 554a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 555a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 556a27d9013SAlfredo Cardigliano 557a27d9013SAlfredo Cardigliano return nb_tx; 558a27d9013SAlfredo Cardigliano } 559a27d9013SAlfredo Cardigliano 560a27d9013SAlfredo Cardigliano /********************************************************************* 561a27d9013SAlfredo Cardigliano * 562a27d9013SAlfredo Cardigliano * TX prep functions 563a27d9013SAlfredo Cardigliano * 564a27d9013SAlfredo Cardigliano **********************************************************************/ 565a27d9013SAlfredo Cardigliano 566a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_MASK ( \ 567a27d9013SAlfredo Cardigliano PKT_TX_IPV4 | \ 568a27d9013SAlfredo Cardigliano PKT_TX_IPV6 | \ 569a27d9013SAlfredo Cardigliano PKT_TX_VLAN | \ 57064b08152SAlfredo Cardigliano PKT_TX_IP_CKSUM | \ 571a27d9013SAlfredo Cardigliano PKT_TX_TCP_SEG | \ 572a27d9013SAlfredo Cardigliano PKT_TX_L4_MASK) 573a27d9013SAlfredo Cardigliano 574a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 575a27d9013SAlfredo Cardigliano (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 576a27d9013SAlfredo Cardigliano 577a27d9013SAlfredo Cardigliano uint16_t 578a27d9013SAlfredo Cardigliano ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, 579a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 580a27d9013SAlfredo Cardigliano { 581a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 582a27d9013SAlfredo Cardigliano uint64_t offloads; 583a27d9013SAlfredo Cardigliano int i = 0; 584a27d9013SAlfredo Cardigliano 585a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 586a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 587a27d9013SAlfredo Cardigliano 588a27d9013SAlfredo Cardigliano if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) { 589a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 590a27d9013SAlfredo Cardigliano break; 591a27d9013SAlfredo Cardigliano } 592a27d9013SAlfredo Cardigliano 593a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 594a27d9013SAlfredo Cardigliano 595a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 596a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 597a27d9013SAlfredo Cardigliano break; 598a27d9013SAlfredo Cardigliano } 599a27d9013SAlfredo Cardigliano } 600a27d9013SAlfredo Cardigliano 601a27d9013SAlfredo Cardigliano return i; 602a27d9013SAlfredo Cardigliano } 603a27d9013SAlfredo Cardigliano 604a27d9013SAlfredo Cardigliano /********************************************************************* 605a27d9013SAlfredo Cardigliano * 606a27d9013SAlfredo Cardigliano * RX functions 607a27d9013SAlfredo Cardigliano * 608a27d9013SAlfredo Cardigliano **********************************************************************/ 609a27d9013SAlfredo Cardigliano 610a27d9013SAlfredo Cardigliano static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 611a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf); 612a27d9013SAlfredo Cardigliano 613a27d9013SAlfredo Cardigliano void 614a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 615a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 616a27d9013SAlfredo Cardigliano { 617a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = dev->data->rx_queues[queue_id]; 618a27d9013SAlfredo Cardigliano struct ionic_queue *q = &rxq->q; 619a27d9013SAlfredo Cardigliano 620a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 621a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 622a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 62302eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 624a27d9013SAlfredo Cardigliano qinfo->conf.offloads = rxq->offloads; 625a27d9013SAlfredo Cardigliano } 626a27d9013SAlfredo Cardigliano 627ce6427ddSThomas Monjalon static void __rte_cold 628a27d9013SAlfredo Cardigliano ionic_rx_empty(struct ionic_queue *q) 629a27d9013SAlfredo Cardigliano { 630a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); 631a27d9013SAlfredo Cardigliano struct ionic_desc_info *cur; 632a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf; 633a27d9013SAlfredo Cardigliano 634a27d9013SAlfredo Cardigliano while (q->tail_idx != q->head_idx) { 635a27d9013SAlfredo Cardigliano cur = &q->info[q->tail_idx]; 636a27d9013SAlfredo Cardigliano mbuf = cur->cb_arg; 637a27d9013SAlfredo Cardigliano rte_mempool_put(rxq->mb_pool, mbuf); 638a27d9013SAlfredo Cardigliano 639a27d9013SAlfredo Cardigliano q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 640a27d9013SAlfredo Cardigliano } 641a27d9013SAlfredo Cardigliano } 642a27d9013SAlfredo Cardigliano 643ce6427ddSThomas Monjalon void __rte_cold 644a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_release(void *rx_queue) 645a27d9013SAlfredo Cardigliano { 646a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; 647a27d9013SAlfredo Cardigliano 648a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 649a27d9013SAlfredo Cardigliano 650a27d9013SAlfredo Cardigliano ionic_rx_empty(&rxq->q); 651a27d9013SAlfredo Cardigliano 6529fdf11c4SAndrew Boyer ionic_lif_rxq_deinit(rxq); 6539fdf11c4SAndrew Boyer 654a27d9013SAlfredo Cardigliano ionic_qcq_free(rxq); 655a27d9013SAlfredo Cardigliano } 656a27d9013SAlfredo Cardigliano 657ce6427ddSThomas Monjalon int __rte_cold 658a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 659a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 660a27d9013SAlfredo Cardigliano uint16_t nb_desc, 6614ae96cb8SAndrew Boyer uint32_t socket_id, 662a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 663a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 664a27d9013SAlfredo Cardigliano { 665a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 666a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq; 667a27d9013SAlfredo Cardigliano uint64_t offloads; 668a27d9013SAlfredo Cardigliano int err; 669a27d9013SAlfredo Cardigliano 670a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 671a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 672a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 673a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 674a27d9013SAlfredo Cardigliano return -EINVAL; 675a27d9013SAlfredo Cardigliano } 676a27d9013SAlfredo Cardigliano 677a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 6784ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 6794ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 6804ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 681a27d9013SAlfredo Cardigliano 68218a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 68318a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 68418a44465SAndrew Boyer 685a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 686a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 687a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 688a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 689a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 6904ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 691a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 692a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 693a27d9013SAlfredo Cardigliano } 694a27d9013SAlfredo Cardigliano 695a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 696a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 697a27d9013SAlfredo Cardigliano void *rx_queue = eth_dev->data->rx_queues[rx_queue_id]; 698a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_release(rx_queue); 699a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 700a27d9013SAlfredo Cardigliano } 701a27d9013SAlfredo Cardigliano 7029fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7039fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7049fdf11c4SAndrew Boyer 705a27d9013SAlfredo Cardigliano err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq); 706a27d9013SAlfredo Cardigliano if (err) { 7074ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 708a27d9013SAlfredo Cardigliano return -EINVAL; 709a27d9013SAlfredo Cardigliano } 710a27d9013SAlfredo Cardigliano 711a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 712a27d9013SAlfredo Cardigliano 713a27d9013SAlfredo Cardigliano /* 714a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 715a27d9013SAlfredo Cardigliano * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 716a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 717a27d9013SAlfredo Cardigliano * it to the length for all received packets: 718a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 719a27d9013SAlfredo Cardigliano * DEV_RX_OFFLOAD_KEEP_CRC) 720a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 721a27d9013SAlfredo Cardigliano */ 722a27d9013SAlfredo Cardigliano 723a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 72402eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 72502eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 726a27d9013SAlfredo Cardigliano 727a27d9013SAlfredo Cardigliano rxq->offloads = offloads; 728a27d9013SAlfredo Cardigliano 729a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 730a27d9013SAlfredo Cardigliano 731a27d9013SAlfredo Cardigliano return 0; 732a27d9013SAlfredo Cardigliano } 733a27d9013SAlfredo Cardigliano 734a27d9013SAlfredo Cardigliano static void 735a27d9013SAlfredo Cardigliano ionic_rx_clean(struct ionic_queue *q, 736a27d9013SAlfredo Cardigliano uint32_t q_desc_index, uint32_t cq_desc_index, 737a27d9013SAlfredo Cardigliano void *cb_arg, void *service_cb_arg) 738a27d9013SAlfredo Cardigliano { 739a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base; 740a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 741a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm = cb_arg; 742a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm_seg; 743a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); 744a27d9013SAlfredo Cardigliano uint32_t max_frame_size = 745a27d9013SAlfredo Cardigliano rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 746a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 747a27d9013SAlfredo Cardigliano uint32_t pkt_type; 748a27d9013SAlfredo Cardigliano struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q); 749a27d9013SAlfredo Cardigliano struct ionic_rx_service *recv_args = (struct ionic_rx_service *) 750a27d9013SAlfredo Cardigliano service_cb_arg; 751a27d9013SAlfredo Cardigliano uint32_t buf_size = (uint16_t) 752a27d9013SAlfredo Cardigliano (rte_pktmbuf_data_room_size(rxq->mb_pool) - 753a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 754a27d9013SAlfredo Cardigliano uint32_t left; 755a27d9013SAlfredo Cardigliano 756a27d9013SAlfredo Cardigliano if (!recv_args) { 757a27d9013SAlfredo Cardigliano stats->no_cb_arg++; 758a27d9013SAlfredo Cardigliano /* Flush */ 759a27d9013SAlfredo Cardigliano rte_pktmbuf_free(rxm); 760a27d9013SAlfredo Cardigliano /* 761a27d9013SAlfredo Cardigliano * Note: rte_mempool_put is faster with no segs 762a27d9013SAlfredo Cardigliano * rte_mempool_put(rxq->mb_pool, rxm); 763a27d9013SAlfredo Cardigliano */ 764a27d9013SAlfredo Cardigliano return; 765a27d9013SAlfredo Cardigliano } 766a27d9013SAlfredo Cardigliano 767a27d9013SAlfredo Cardigliano if (cq_desc->status) { 768a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 769a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 770a27d9013SAlfredo Cardigliano return; 771a27d9013SAlfredo Cardigliano } 772a27d9013SAlfredo Cardigliano 773a27d9013SAlfredo Cardigliano if (recv_args->nb_rx >= recv_args->nb_pkts) { 774a27d9013SAlfredo Cardigliano stats->no_room++; 775a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 776a27d9013SAlfredo Cardigliano return; 777a27d9013SAlfredo Cardigliano } 778a27d9013SAlfredo Cardigliano 779a27d9013SAlfredo Cardigliano if (cq_desc->len > max_frame_size || 780a27d9013SAlfredo Cardigliano cq_desc->len == 0) { 781a27d9013SAlfredo Cardigliano stats->bad_len++; 782a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 783a27d9013SAlfredo Cardigliano return; 784a27d9013SAlfredo Cardigliano } 785a27d9013SAlfredo Cardigliano 786a27d9013SAlfredo Cardigliano rxm->data_off = RTE_PKTMBUF_HEADROOM; 787a27d9013SAlfredo Cardigliano rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); 788a27d9013SAlfredo Cardigliano rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ 789a27d9013SAlfredo Cardigliano rxm->pkt_len = cq_desc->len; 790a27d9013SAlfredo Cardigliano rxm->port = rxq->lif->port_id; 791a27d9013SAlfredo Cardigliano 792a27d9013SAlfredo Cardigliano left = cq_desc->len; 793a27d9013SAlfredo Cardigliano 794a27d9013SAlfredo Cardigliano rxm->data_len = RTE_MIN(buf_size, left); 795a27d9013SAlfredo Cardigliano left -= rxm->data_len; 796a27d9013SAlfredo Cardigliano 797a27d9013SAlfredo Cardigliano rxm_seg = rxm->next; 798a27d9013SAlfredo Cardigliano while (rxm_seg && left) { 799a27d9013SAlfredo Cardigliano rxm_seg->data_len = RTE_MIN(buf_size, left); 800a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 801a27d9013SAlfredo Cardigliano 802a27d9013SAlfredo Cardigliano rxm_seg = rxm_seg->next; 803a27d9013SAlfredo Cardigliano rxm->nb_segs++; 804a27d9013SAlfredo Cardigliano } 805a27d9013SAlfredo Cardigliano 80622e7171bSAlfredo Cardigliano /* RSS */ 80722e7171bSAlfredo Cardigliano pkt_flags |= PKT_RX_RSS_HASH; 80822e7171bSAlfredo Cardigliano rxm->hash.rss = cq_desc->rss_hash; 80922e7171bSAlfredo Cardigliano 810a27d9013SAlfredo Cardigliano /* Vlan Strip */ 811a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 812a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 813a27d9013SAlfredo Cardigliano rxm->vlan_tci = cq_desc->vlan_tci; 814a27d9013SAlfredo Cardigliano } 815a27d9013SAlfredo Cardigliano 816a27d9013SAlfredo Cardigliano /* Checksum */ 817a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 818a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 819a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_IP_CKSUM_GOOD; 820a27d9013SAlfredo Cardigliano else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 821a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_IP_CKSUM_BAD; 822a27d9013SAlfredo Cardigliano 823a27d9013SAlfredo Cardigliano if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 824a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 825a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_L4_CKSUM_GOOD; 826a27d9013SAlfredo Cardigliano else if ((cq_desc->csum_flags & 827a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 828a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & 829a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 830a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_L4_CKSUM_BAD; 831a27d9013SAlfredo Cardigliano } 832a27d9013SAlfredo Cardigliano 833a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 834a27d9013SAlfredo Cardigliano 835a27d9013SAlfredo Cardigliano /* Packet Type */ 836a27d9013SAlfredo Cardigliano switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 837a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4: 838a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 839a27d9013SAlfredo Cardigliano break; 840a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6: 841a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 842a27d9013SAlfredo Cardigliano break; 843a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_TCP: 844a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 845a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 846a27d9013SAlfredo Cardigliano break; 847a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_TCP: 848a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 849a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 850a27d9013SAlfredo Cardigliano break; 851a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_UDP: 852a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 853a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 854a27d9013SAlfredo Cardigliano break; 855a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_UDP: 856a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 857a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 858a27d9013SAlfredo Cardigliano break; 859a27d9013SAlfredo Cardigliano default: 860a27d9013SAlfredo Cardigliano { 861a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 862a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 863a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 864a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 865a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 866a27d9013SAlfredo Cardigliano else 867a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_UNKNOWN; 868a27d9013SAlfredo Cardigliano break; 869a27d9013SAlfredo Cardigliano } 870a27d9013SAlfredo Cardigliano } 871a27d9013SAlfredo Cardigliano 872a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 873a27d9013SAlfredo Cardigliano 874a27d9013SAlfredo Cardigliano recv_args->rx_pkts[recv_args->nb_rx] = rxm; 875a27d9013SAlfredo Cardigliano recv_args->nb_rx++; 876a27d9013SAlfredo Cardigliano 877a27d9013SAlfredo Cardigliano stats->packets++; 878a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 879a27d9013SAlfredo Cardigliano } 880a27d9013SAlfredo Cardigliano 881a27d9013SAlfredo Cardigliano static void 882a27d9013SAlfredo Cardigliano ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 883a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf) 884a27d9013SAlfredo Cardigliano { 885a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc_base = q->base; 886a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *old = &desc_base[q_desc_index]; 887a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *new = &desc_base[q->head_idx]; 888a27d9013SAlfredo Cardigliano 889a27d9013SAlfredo Cardigliano new->addr = old->addr; 890a27d9013SAlfredo Cardigliano new->len = old->len; 891a27d9013SAlfredo Cardigliano 892a27d9013SAlfredo Cardigliano ionic_q_post(q, true, ionic_rx_clean, mbuf); 893a27d9013SAlfredo Cardigliano } 894a27d9013SAlfredo Cardigliano 895ce6427ddSThomas Monjalon static int __rte_cold 896a27d9013SAlfredo Cardigliano ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) 897a27d9013SAlfredo Cardigliano { 898a27d9013SAlfredo Cardigliano struct ionic_queue *q = &rxq->q; 899a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc_base = q->base; 900a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base; 901a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc; 902a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_desc *sg_desc; 903a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_elem *elem; 904a27d9013SAlfredo Cardigliano rte_iova_t dma_addr; 905a27d9013SAlfredo Cardigliano uint32_t i, j, nsegs, buf_size, size; 906a27d9013SAlfredo Cardigliano bool ring_doorbell; 907a27d9013SAlfredo Cardigliano 908a27d9013SAlfredo Cardigliano buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 909a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 910a27d9013SAlfredo Cardigliano 911a27d9013SAlfredo Cardigliano /* Initialize software ring entries */ 912a27d9013SAlfredo Cardigliano for (i = ionic_q_space_avail(q); i; i--) { 913a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 914a27d9013SAlfredo Cardigliano struct rte_mbuf *prev_rxm_seg; 915a27d9013SAlfredo Cardigliano 916a27d9013SAlfredo Cardigliano if (rxm == NULL) { 917a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 918a27d9013SAlfredo Cardigliano return -ENOMEM; 919a27d9013SAlfredo Cardigliano } 920a27d9013SAlfredo Cardigliano 921a27d9013SAlfredo Cardigliano nsegs = (len + buf_size - 1) / buf_size; 922a27d9013SAlfredo Cardigliano 923a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 924a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); 925a27d9013SAlfredo Cardigliano desc->addr = dma_addr; 926a27d9013SAlfredo Cardigliano desc->len = buf_size; 927a27d9013SAlfredo Cardigliano size = buf_size; 928a27d9013SAlfredo Cardigliano desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 929a27d9013SAlfredo Cardigliano IONIC_RXQ_DESC_OPCODE_SIMPLE; 930a27d9013SAlfredo Cardigliano rxm->next = NULL; 931a27d9013SAlfredo Cardigliano 932a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm; 933a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 934a27d9013SAlfredo Cardigliano elem = sg_desc->elems; 935a27d9013SAlfredo Cardigliano for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { 936a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm_seg; 937a27d9013SAlfredo Cardigliano rte_iova_t data_iova; 938a27d9013SAlfredo Cardigliano 939a27d9013SAlfredo Cardigliano rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 940a27d9013SAlfredo Cardigliano if (rxm_seg == NULL) { 941a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 942a27d9013SAlfredo Cardigliano return -ENOMEM; 943a27d9013SAlfredo Cardigliano } 944a27d9013SAlfredo Cardigliano 945a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(rxm_seg); 946a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(data_iova); 947a27d9013SAlfredo Cardigliano elem->addr = dma_addr; 948a27d9013SAlfredo Cardigliano elem->len = buf_size; 949a27d9013SAlfredo Cardigliano size += buf_size; 950a27d9013SAlfredo Cardigliano elem++; 951a27d9013SAlfredo Cardigliano rxm_seg->next = NULL; 952a27d9013SAlfredo Cardigliano prev_rxm_seg->next = rxm_seg; 953a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm_seg; 954a27d9013SAlfredo Cardigliano } 955a27d9013SAlfredo Cardigliano 956a27d9013SAlfredo Cardigliano if (size < len) 957a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", 958a27d9013SAlfredo Cardigliano size, len); 959a27d9013SAlfredo Cardigliano 960a27d9013SAlfredo Cardigliano ring_doorbell = ((q->head_idx + 1) & 961a27d9013SAlfredo Cardigliano IONIC_RX_RING_DOORBELL_STRIDE) == 0; 962a27d9013SAlfredo Cardigliano 963a27d9013SAlfredo Cardigliano ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm); 964a27d9013SAlfredo Cardigliano } 965a27d9013SAlfredo Cardigliano 966a27d9013SAlfredo Cardigliano return 0; 967a27d9013SAlfredo Cardigliano } 968a27d9013SAlfredo Cardigliano 969a27d9013SAlfredo Cardigliano /* 970a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 971a27d9013SAlfredo Cardigliano */ 972ce6427ddSThomas Monjalon int __rte_cold 973a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 974a27d9013SAlfredo Cardigliano { 975a27d9013SAlfredo Cardigliano uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 9769fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 977a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq; 978a27d9013SAlfredo Cardigliano int err; 979a27d9013SAlfredo Cardigliano 9809fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 9819fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 9829fdf11c4SAndrew Boyer rx_queue_id); 9839fdf11c4SAndrew Boyer return 0; 9849fdf11c4SAndrew Boyer } 9859fdf11c4SAndrew Boyer 986a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 987a27d9013SAlfredo Cardigliano 9884ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)", 9894ae96cb8SAndrew Boyer rx_queue_id, rxq->q.num_descs, frame_size); 9904ae96cb8SAndrew Boyer 9919fdf11c4SAndrew Boyer if (!(rxq->flags & IONIC_QCQ_F_INITED)) { 992a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 993a27d9013SAlfredo Cardigliano if (err) 994a27d9013SAlfredo Cardigliano return err; 995b5d9a4f0SAndrew Boyer } else { 996b5d9a4f0SAndrew Boyer ionic_qcq_enable(rxq); 9979fdf11c4SAndrew Boyer } 998a27d9013SAlfredo Cardigliano 999a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1000a27d9013SAlfredo Cardigliano if (ionic_rx_fill(rxq, frame_size) != 0) { 1001a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1002a27d9013SAlfredo Cardigliano rx_queue_id); 1003a27d9013SAlfredo Cardigliano return -1; 1004a27d9013SAlfredo Cardigliano } 1005a27d9013SAlfredo Cardigliano 10069fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1007a27d9013SAlfredo Cardigliano 1008a27d9013SAlfredo Cardigliano return 0; 1009a27d9013SAlfredo Cardigliano } 1010a27d9013SAlfredo Cardigliano 1011ce6427ddSThomas Monjalon static inline void __rte_cold 1012a27d9013SAlfredo Cardigliano ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, 1013a27d9013SAlfredo Cardigliano void *service_cb_arg) 1014a27d9013SAlfredo Cardigliano { 1015a27d9013SAlfredo Cardigliano struct ionic_queue *q = cq->bound_q; 1016a27d9013SAlfredo Cardigliano struct ionic_desc_info *q_desc_info; 1017a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc_base = cq->base; 1018a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc; 1019a27d9013SAlfredo Cardigliano bool more; 1020a27d9013SAlfredo Cardigliano uint32_t curr_q_tail_idx, curr_cq_tail_idx; 1021a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1022a27d9013SAlfredo Cardigliano 1023a27d9013SAlfredo Cardigliano if (work_to_do == 0) 1024a27d9013SAlfredo Cardigliano return; 1025a27d9013SAlfredo Cardigliano 1026a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1027a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1028a27d9013SAlfredo Cardigliano curr_cq_tail_idx = cq->tail_idx; 1029a27d9013SAlfredo Cardigliano cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); 1030a27d9013SAlfredo Cardigliano 1031a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1032a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1033a27d9013SAlfredo Cardigliano 1034a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1035a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 1036a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 1037a27d9013SAlfredo Cardigliano 1038a27d9013SAlfredo Cardigliano do { 1039a27d9013SAlfredo Cardigliano more = (q->tail_idx != cq_desc->comp_index); 1040a27d9013SAlfredo Cardigliano 1041a27d9013SAlfredo Cardigliano q_desc_info = &q->info[q->tail_idx]; 1042a27d9013SAlfredo Cardigliano 1043a27d9013SAlfredo Cardigliano curr_q_tail_idx = q->tail_idx; 1044a27d9013SAlfredo Cardigliano q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 1045a27d9013SAlfredo Cardigliano 1046a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1047a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 1048a27d9013SAlfredo Cardigliano /* q desc info */ 1049a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 1050a27d9013SAlfredo Cardigliano 1051a27d9013SAlfredo Cardigliano ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx, 1052a27d9013SAlfredo Cardigliano q_desc_info->cb_arg, service_cb_arg); 1053a27d9013SAlfredo Cardigliano 1054a27d9013SAlfredo Cardigliano } while (more); 1055a27d9013SAlfredo Cardigliano 1056a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1057a27d9013SAlfredo Cardigliano break; 1058a27d9013SAlfredo Cardigliano 1059a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1060a27d9013SAlfredo Cardigliano } 1061a27d9013SAlfredo Cardigliano } 1062a27d9013SAlfredo Cardigliano 1063a27d9013SAlfredo Cardigliano /* 1064a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1065a27d9013SAlfredo Cardigliano */ 1066ce6427ddSThomas Monjalon int __rte_cold 1067a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1068a27d9013SAlfredo Cardigliano { 1069a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq; 1070a27d9013SAlfredo Cardigliano 10714ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1072a27d9013SAlfredo Cardigliano 1073a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1074a27d9013SAlfredo Cardigliano 10759fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 10769fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 10779fdf11c4SAndrew Boyer 1078a27d9013SAlfredo Cardigliano ionic_qcq_disable(rxq); 1079a27d9013SAlfredo Cardigliano 1080a27d9013SAlfredo Cardigliano /* Flush */ 1081a27d9013SAlfredo Cardigliano ionic_rxq_service(&rxq->cq, -1, NULL); 1082a27d9013SAlfredo Cardigliano 1083a27d9013SAlfredo Cardigliano return 0; 1084a27d9013SAlfredo Cardigliano } 1085a27d9013SAlfredo Cardigliano 1086a27d9013SAlfredo Cardigliano uint16_t 1087a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1088a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1089a27d9013SAlfredo Cardigliano { 1090a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; 1091a27d9013SAlfredo Cardigliano uint32_t frame_size = 1092a27d9013SAlfredo Cardigliano rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 1093a27d9013SAlfredo Cardigliano struct ionic_cq *cq = &rxq->cq; 1094a27d9013SAlfredo Cardigliano struct ionic_rx_service service_cb_arg; 1095a27d9013SAlfredo Cardigliano 1096a27d9013SAlfredo Cardigliano service_cb_arg.rx_pkts = rx_pkts; 1097a27d9013SAlfredo Cardigliano service_cb_arg.nb_pkts = nb_pkts; 1098a27d9013SAlfredo Cardigliano service_cb_arg.nb_rx = 0; 1099a27d9013SAlfredo Cardigliano 1100a27d9013SAlfredo Cardigliano ionic_rxq_service(cq, nb_pkts, &service_cb_arg); 1101a27d9013SAlfredo Cardigliano 1102a27d9013SAlfredo Cardigliano ionic_rx_fill(rxq, frame_size); 1103a27d9013SAlfredo Cardigliano 1104a27d9013SAlfredo Cardigliano return service_cb_arg.nb_rx; 1105a27d9013SAlfredo Cardigliano } 1106