1a27d9013SAlfredo Cardigliano /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0) 2a27d9013SAlfredo Cardigliano * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34a27d9013SAlfredo Cardigliano #include <rte_ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50a27d9013SAlfredo Cardigliano #define IONIC_RX_RING_DOORBELL_STRIDE (32 - 1) 51a27d9013SAlfredo Cardigliano 52a27d9013SAlfredo Cardigliano /********************************************************************* 53a27d9013SAlfredo Cardigliano * 54a27d9013SAlfredo Cardigliano * TX functions 55a27d9013SAlfredo Cardigliano * 56a27d9013SAlfredo Cardigliano **********************************************************************/ 57a27d9013SAlfredo Cardigliano 58a27d9013SAlfredo Cardigliano void 59a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 60a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 61a27d9013SAlfredo Cardigliano { 62a27d9013SAlfredo Cardigliano struct ionic_qcq *txq = dev->data->tx_queues[queue_id]; 63a27d9013SAlfredo Cardigliano struct ionic_queue *q = &txq->q; 64a27d9013SAlfredo Cardigliano 65a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 66a27d9013SAlfredo Cardigliano qinfo->conf.offloads = txq->offloads; 6702eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 68a27d9013SAlfredo Cardigliano } 69a27d9013SAlfredo Cardigliano 70ce6427ddSThomas Monjalon static inline void __rte_cold 71a27d9013SAlfredo Cardigliano ionic_tx_flush(struct ionic_cq *cq) 72a27d9013SAlfredo Cardigliano { 73a27d9013SAlfredo Cardigliano struct ionic_queue *q = cq->bound_q; 74a27d9013SAlfredo Cardigliano struct ionic_desc_info *q_desc_info; 75a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, *next; 76a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc_base = cq->base; 77a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc; 78a27d9013SAlfredo Cardigliano u_int32_t comp_index = (u_int32_t)-1; 79a27d9013SAlfredo Cardigliano 80a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 81a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 82a27d9013SAlfredo Cardigliano cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); 83a27d9013SAlfredo Cardigliano 84a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors (not really useful here) */ 85a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 86a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 87a27d9013SAlfredo Cardigliano 88a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 89a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 90a27d9013SAlfredo Cardigliano 91a27d9013SAlfredo Cardigliano comp_index = cq_desc->comp_index; 92a27d9013SAlfredo Cardigliano 93a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 94a27d9013SAlfredo Cardigliano } 95a27d9013SAlfredo Cardigliano 96a27d9013SAlfredo Cardigliano if (comp_index != (u_int32_t)-1) { 97a27d9013SAlfredo Cardigliano while (q->tail_idx != comp_index) { 98a27d9013SAlfredo Cardigliano q_desc_info = &q->info[q->tail_idx]; 99a27d9013SAlfredo Cardigliano 100a27d9013SAlfredo Cardigliano q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 101a27d9013SAlfredo Cardigliano 102a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 103a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 104a27d9013SAlfredo Cardigliano /* q desc info */ 105a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 106a27d9013SAlfredo Cardigliano 107a27d9013SAlfredo Cardigliano /* 108a27d9013SAlfredo Cardigliano * Note: you can just use rte_pktmbuf_free, 109a27d9013SAlfredo Cardigliano * but this loop is faster 110a27d9013SAlfredo Cardigliano */ 111a27d9013SAlfredo Cardigliano txm = q_desc_info->cb_arg; 112a27d9013SAlfredo Cardigliano while (txm != NULL) { 113a27d9013SAlfredo Cardigliano next = txm->next; 114a27d9013SAlfredo Cardigliano rte_pktmbuf_free_seg(txm); 115a27d9013SAlfredo Cardigliano txm = next; 116a27d9013SAlfredo Cardigliano } 117a27d9013SAlfredo Cardigliano } 118a27d9013SAlfredo Cardigliano } 119a27d9013SAlfredo Cardigliano } 120a27d9013SAlfredo Cardigliano 121ce6427ddSThomas Monjalon void __rte_cold 122a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_release(void *tx_queue) 123a27d9013SAlfredo Cardigliano { 124a27d9013SAlfredo Cardigliano struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; 125a27d9013SAlfredo Cardigliano 126a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 127a27d9013SAlfredo Cardigliano 128*9fdf11c4SAndrew Boyer ionic_lif_txq_deinit(txq); 129*9fdf11c4SAndrew Boyer 130a27d9013SAlfredo Cardigliano ionic_qcq_free(txq); 131a27d9013SAlfredo Cardigliano } 132a27d9013SAlfredo Cardigliano 133ce6427ddSThomas Monjalon int __rte_cold 134a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 135a27d9013SAlfredo Cardigliano { 136a27d9013SAlfredo Cardigliano struct ionic_qcq *txq; 137a27d9013SAlfredo Cardigliano 1384ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 139a27d9013SAlfredo Cardigliano 140a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 141a27d9013SAlfredo Cardigliano 142*9fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 143*9fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 144*9fdf11c4SAndrew Boyer 145a27d9013SAlfredo Cardigliano /* 146a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 147a27d9013SAlfredo Cardigliano * before disabling Tx queue 148a27d9013SAlfredo Cardigliano */ 149a27d9013SAlfredo Cardigliano 150a27d9013SAlfredo Cardigliano ionic_qcq_disable(txq); 151a27d9013SAlfredo Cardigliano 152a27d9013SAlfredo Cardigliano ionic_tx_flush(&txq->cq); 153a27d9013SAlfredo Cardigliano 154a27d9013SAlfredo Cardigliano return 0; 155a27d9013SAlfredo Cardigliano } 156a27d9013SAlfredo Cardigliano 157ce6427ddSThomas Monjalon int __rte_cold 158a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1594ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 160a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 161a27d9013SAlfredo Cardigliano { 162a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 163a27d9013SAlfredo Cardigliano struct ionic_qcq *txq; 164a27d9013SAlfredo Cardigliano uint64_t offloads; 165a27d9013SAlfredo Cardigliano int err; 166a27d9013SAlfredo Cardigliano 167a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 168a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 169a27d9013SAlfredo Cardigliano "(max %u queues)", 170a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 171a27d9013SAlfredo Cardigliano return -EINVAL; 172a27d9013SAlfredo Cardigliano } 173a27d9013SAlfredo Cardigliano 174a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 1754ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 1764ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 1774ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 178a27d9013SAlfredo Cardigliano 179a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 180a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 181a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 182a27d9013SAlfredo Cardigliano 183a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 184a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 185a27d9013SAlfredo Cardigliano void *tx_queue = eth_dev->data->tx_queues[tx_queue_id]; 186a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_release(tx_queue); 187a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 188a27d9013SAlfredo Cardigliano } 189a27d9013SAlfredo Cardigliano 190*9fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 191*9fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 192*9fdf11c4SAndrew Boyer 193a27d9013SAlfredo Cardigliano err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq); 194a27d9013SAlfredo Cardigliano if (err) { 195a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 196a27d9013SAlfredo Cardigliano return -EINVAL; 197a27d9013SAlfredo Cardigliano } 198a27d9013SAlfredo Cardigliano 199a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 20002eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 20102eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 202a27d9013SAlfredo Cardigliano 203a27d9013SAlfredo Cardigliano txq->offloads = offloads; 204a27d9013SAlfredo Cardigliano 205a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 206a27d9013SAlfredo Cardigliano 207a27d9013SAlfredo Cardigliano return 0; 208a27d9013SAlfredo Cardigliano } 209a27d9013SAlfredo Cardigliano 210a27d9013SAlfredo Cardigliano /* 211a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 212a27d9013SAlfredo Cardigliano */ 213ce6427ddSThomas Monjalon int __rte_cold 214a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 215a27d9013SAlfredo Cardigliano { 216*9fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 217a27d9013SAlfredo Cardigliano struct ionic_qcq *txq; 218a27d9013SAlfredo Cardigliano int err; 219a27d9013SAlfredo Cardigliano 220*9fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 221*9fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 222*9fdf11c4SAndrew Boyer tx_queue_id); 223*9fdf11c4SAndrew Boyer return 0; 224*9fdf11c4SAndrew Boyer } 225*9fdf11c4SAndrew Boyer 226a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 227a27d9013SAlfredo Cardigliano 2284ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 2294ae96cb8SAndrew Boyer tx_queue_id, txq->q.num_descs); 2304ae96cb8SAndrew Boyer 231*9fdf11c4SAndrew Boyer if (!(txq->flags & IONIC_QCQ_F_INITED)) { 232a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 233a27d9013SAlfredo Cardigliano if (err) 234a27d9013SAlfredo Cardigliano return err; 235*9fdf11c4SAndrew Boyer } 236a27d9013SAlfredo Cardigliano 237a27d9013SAlfredo Cardigliano ionic_qcq_enable(txq); 238a27d9013SAlfredo Cardigliano 239*9fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 240a27d9013SAlfredo Cardigliano 241a27d9013SAlfredo Cardigliano return 0; 242a27d9013SAlfredo Cardigliano } 243a27d9013SAlfredo Cardigliano 244a27d9013SAlfredo Cardigliano static void 24564b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 24664b08152SAlfredo Cardigliano { 24764b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 24864b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 24964b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 25064b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 25164b08152SAlfredo Cardigliano 25264b08152SAlfredo Cardigliano if (txm->ol_flags & PKT_TX_IP_CKSUM) { 25364b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 25464b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 25564b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 25664b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 25764b08152SAlfredo Cardigliano } else { 25864b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 25964b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 26064b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 26164b08152SAlfredo Cardigliano } 26264b08152SAlfredo Cardigliano } 26364b08152SAlfredo Cardigliano 26464b08152SAlfredo Cardigliano static void 26564b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 26664b08152SAlfredo Cardigliano { 26764b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 26864b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 26964b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 27064b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 27164b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 27264b08152SAlfredo Cardigliano 27364b08152SAlfredo Cardigliano if (txm->ol_flags & PKT_TX_IPV4) { 27464b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 27564b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 27664b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 27764b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 27864b08152SAlfredo Cardigliano } else { 27964b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 28064b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 28164b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 28264b08152SAlfredo Cardigliano } 28364b08152SAlfredo Cardigliano } 28464b08152SAlfredo Cardigliano 28564b08152SAlfredo Cardigliano static void 286a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 287a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 288a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 289a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 29064b08152SAlfredo Cardigliano bool encap, 291a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 292a27d9013SAlfredo Cardigliano bool start, bool done) 293a27d9013SAlfredo Cardigliano { 294a27d9013SAlfredo Cardigliano uint8_t flags = 0; 295a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 29664b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 297a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 298a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 299a27d9013SAlfredo Cardigliano 300a27d9013SAlfredo Cardigliano desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 301a27d9013SAlfredo Cardigliano flags, nsge, addr); 302a27d9013SAlfredo Cardigliano desc->len = len; 303a27d9013SAlfredo Cardigliano desc->vlan_tci = vlan_tci; 304a27d9013SAlfredo Cardigliano desc->hdr_len = hdrlen; 305a27d9013SAlfredo Cardigliano desc->mss = mss; 306a27d9013SAlfredo Cardigliano 307a27d9013SAlfredo Cardigliano ionic_q_post(q, done, NULL, done ? txm : NULL); 308a27d9013SAlfredo Cardigliano } 309a27d9013SAlfredo Cardigliano 310a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 311a27d9013SAlfredo Cardigliano ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem) 312a27d9013SAlfredo Cardigliano { 313a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 314a27d9013SAlfredo Cardigliano struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; 315a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 316a27d9013SAlfredo Cardigliano struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; 317a27d9013SAlfredo Cardigliano 318a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 319a27d9013SAlfredo Cardigliano return desc; 320a27d9013SAlfredo Cardigliano } 321a27d9013SAlfredo Cardigliano 322a27d9013SAlfredo Cardigliano static int 323a27d9013SAlfredo Cardigliano ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm, 324a27d9013SAlfredo Cardigliano uint64_t offloads __rte_unused, bool not_xmit_more) 325a27d9013SAlfredo Cardigliano { 326a27d9013SAlfredo Cardigliano struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); 327a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 328a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 329a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 330a27d9013SAlfredo Cardigliano uint64_t desc_addr = 0; 331a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 332a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 333a27d9013SAlfredo Cardigliano uint32_t hdrlen; 334a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 335a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 336a27d9013SAlfredo Cardigliano uint32_t left; 337a27d9013SAlfredo Cardigliano uint32_t seglen; 338a27d9013SAlfredo Cardigliano uint32_t len; 339a27d9013SAlfredo Cardigliano uint32_t offset = 0; 340a27d9013SAlfredo Cardigliano bool start, done; 34164b08152SAlfredo Cardigliano bool encap; 342a27d9013SAlfredo Cardigliano bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT); 343a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 34464b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 345a27d9013SAlfredo Cardigliano 34664b08152SAlfredo Cardigliano encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || 34764b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && 34864b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_OUTER_IPV4) || 34964b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_IPV6)); 35064b08152SAlfredo Cardigliano 35164b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 35264b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 35364b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 35464b08152SAlfredo Cardigliano */ 35564b08152SAlfredo Cardigliano 35664b08152SAlfredo Cardigliano if (encap) { 35764b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 35864b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 35964b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 36064b08152SAlfredo Cardigliano } else { 36164b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 36264b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 36364b08152SAlfredo Cardigliano } 364a27d9013SAlfredo Cardigliano 365a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 366a27d9013SAlfredo Cardigliano left = txm->data_len; 367a27d9013SAlfredo Cardigliano 368a27d9013SAlfredo Cardigliano desc = ionic_tx_tso_next(q, &elem); 369a27d9013SAlfredo Cardigliano start = true; 370a27d9013SAlfredo Cardigliano 371a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 372a27d9013SAlfredo Cardigliano 373a27d9013SAlfredo Cardigliano while (left > 0) { 374a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 375a27d9013SAlfredo Cardigliano frag_left = seglen - len; 376a27d9013SAlfredo Cardigliano desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); 377a27d9013SAlfredo Cardigliano desc_len = len; 378a27d9013SAlfredo Cardigliano desc_nsge = 0; 379a27d9013SAlfredo Cardigliano left -= len; 380a27d9013SAlfredo Cardigliano offset += len; 381a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 382a27d9013SAlfredo Cardigliano continue; 383a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 384a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 385a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 386a27d9013SAlfredo Cardigliano hdrlen, mss, 38764b08152SAlfredo Cardigliano encap, 388a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 389a27d9013SAlfredo Cardigliano start, done && not_xmit_more); 390a27d9013SAlfredo Cardigliano desc = ionic_tx_tso_next(q, &elem); 391a27d9013SAlfredo Cardigliano start = false; 392a27d9013SAlfredo Cardigliano seglen = mss; 393a27d9013SAlfredo Cardigliano } 394a27d9013SAlfredo Cardigliano 395a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 396a27d9013SAlfredo Cardigliano 397a27d9013SAlfredo Cardigliano txm_seg = txm->next; 398a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 399a27d9013SAlfredo Cardigliano offset = 0; 400a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 401a27d9013SAlfredo Cardigliano stats->frags++; 402a27d9013SAlfredo Cardigliano 403a27d9013SAlfredo Cardigliano while (left > 0) { 404a27d9013SAlfredo Cardigliano rte_iova_t data_iova; 405a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(txm_seg); 406a27d9013SAlfredo Cardigliano elem->addr = rte_cpu_to_le_64(data_iova) + offset; 407a27d9013SAlfredo Cardigliano if (frag_left > 0) { 408a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 409a27d9013SAlfredo Cardigliano frag_left -= len; 410a27d9013SAlfredo Cardigliano elem->len = len; 411a27d9013SAlfredo Cardigliano elem++; 412a27d9013SAlfredo Cardigliano desc_nsge++; 413a27d9013SAlfredo Cardigliano } else { 414a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 415a27d9013SAlfredo Cardigliano frag_left = mss - len; 416a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(txm_seg); 417a27d9013SAlfredo Cardigliano desc_addr = rte_cpu_to_le_64(data_iova); 418a27d9013SAlfredo Cardigliano desc_len = len; 419a27d9013SAlfredo Cardigliano desc_nsge = 0; 420a27d9013SAlfredo Cardigliano } 421a27d9013SAlfredo Cardigliano left -= len; 422a27d9013SAlfredo Cardigliano offset += len; 423a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 424a27d9013SAlfredo Cardigliano continue; 425a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 426a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 427a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 428a27d9013SAlfredo Cardigliano hdrlen, mss, 42964b08152SAlfredo Cardigliano encap, 430a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 431a27d9013SAlfredo Cardigliano start, done && not_xmit_more); 432a27d9013SAlfredo Cardigliano desc = ionic_tx_tso_next(q, &elem); 433a27d9013SAlfredo Cardigliano start = false; 434a27d9013SAlfredo Cardigliano } 435a27d9013SAlfredo Cardigliano 436a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 437a27d9013SAlfredo Cardigliano } 438a27d9013SAlfredo Cardigliano 439a27d9013SAlfredo Cardigliano stats->tso++; 440a27d9013SAlfredo Cardigliano 441a27d9013SAlfredo Cardigliano return 0; 442a27d9013SAlfredo Cardigliano } 443a27d9013SAlfredo Cardigliano 444a27d9013SAlfredo Cardigliano static int 445a27d9013SAlfredo Cardigliano ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm, 44664b08152SAlfredo Cardigliano uint64_t offloads, bool not_xmit_more) 447a27d9013SAlfredo Cardigliano { 448a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 449a27d9013SAlfredo Cardigliano struct ionic_txq_sg_desc *sg_desc_base = q->sg_base; 450a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 451a27d9013SAlfredo Cardigliano struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx]; 452a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem = sg_desc->elems; 453a27d9013SAlfredo Cardigliano struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); 454a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 45564b08152SAlfredo Cardigliano bool encap; 456a27d9013SAlfredo Cardigliano bool has_vlan; 457a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 458a27d9013SAlfredo Cardigliano uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm)); 459a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 460a27d9013SAlfredo Cardigliano uint8_t flags = 0; 461a27d9013SAlfredo Cardigliano 46264b08152SAlfredo Cardigliano if ((ol_flags & PKT_TX_IP_CKSUM) && 46364b08152SAlfredo Cardigliano (offloads & DEV_TX_OFFLOAD_IPV4_CKSUM)) { 46464b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 46564b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 46664b08152SAlfredo Cardigliano if (((ol_flags & PKT_TX_TCP_CKSUM) && 46764b08152SAlfredo Cardigliano (offloads & DEV_TX_OFFLOAD_TCP_CKSUM)) || 46864b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_UDP_CKSUM) && 46964b08152SAlfredo Cardigliano (offloads & DEV_TX_OFFLOAD_UDP_CKSUM))) 47064b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 47164b08152SAlfredo Cardigliano } else { 47264b08152SAlfredo Cardigliano stats->no_csum++; 47364b08152SAlfredo Cardigliano } 47464b08152SAlfredo Cardigliano 475a27d9013SAlfredo Cardigliano has_vlan = (ol_flags & PKT_TX_VLAN_PKT); 47664b08152SAlfredo Cardigliano encap = ((ol_flags & PKT_TX_OUTER_IP_CKSUM) || 47764b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_UDP_CKSUM)) && 47864b08152SAlfredo Cardigliano ((ol_flags & PKT_TX_OUTER_IPV4) || 47964b08152SAlfredo Cardigliano (ol_flags & PKT_TX_OUTER_IPV6)); 480a27d9013SAlfredo Cardigliano 481a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 48264b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 483a27d9013SAlfredo Cardigliano 484a27d9013SAlfredo Cardigliano desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 485a27d9013SAlfredo Cardigliano desc->len = txm->data_len; 486a27d9013SAlfredo Cardigliano desc->vlan_tci = txm->vlan_tci; 487a27d9013SAlfredo Cardigliano 488a27d9013SAlfredo Cardigliano txm_seg = txm->next; 489a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 490a27d9013SAlfredo Cardigliano elem->len = txm_seg->data_len; 491a27d9013SAlfredo Cardigliano elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); 492a27d9013SAlfredo Cardigliano stats->frags++; 493a27d9013SAlfredo Cardigliano elem++; 494a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 495a27d9013SAlfredo Cardigliano } 496a27d9013SAlfredo Cardigliano 497a27d9013SAlfredo Cardigliano ionic_q_post(q, not_xmit_more, NULL, txm); 498a27d9013SAlfredo Cardigliano 499a27d9013SAlfredo Cardigliano return 0; 500a27d9013SAlfredo Cardigliano } 501a27d9013SAlfredo Cardigliano 502a27d9013SAlfredo Cardigliano uint16_t 503a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 504a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 505a27d9013SAlfredo Cardigliano { 506a27d9013SAlfredo Cardigliano struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue; 507a27d9013SAlfredo Cardigliano struct ionic_queue *q = &txq->q; 508a27d9013SAlfredo Cardigliano struct ionic_cq *cq = &txq->cq; 509a27d9013SAlfredo Cardigliano struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q); 510a27d9013SAlfredo Cardigliano uint32_t next_q_head_idx; 511a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 512a27d9013SAlfredo Cardigliano uint16_t nb_tx = 0; 513a27d9013SAlfredo Cardigliano int err; 514a27d9013SAlfredo Cardigliano bool last; 515a27d9013SAlfredo Cardigliano 516a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 517a27d9013SAlfredo Cardigliano ionic_tx_flush(cq); 518a27d9013SAlfredo Cardigliano 519a27d9013SAlfredo Cardigliano if (unlikely(ionic_q_space_avail(q) < nb_pkts)) { 520a27d9013SAlfredo Cardigliano stats->stop += nb_pkts; 521a27d9013SAlfredo Cardigliano return 0; 522a27d9013SAlfredo Cardigliano } 523a27d9013SAlfredo Cardigliano 524a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 525a27d9013SAlfredo Cardigliano last = (nb_tx == (nb_pkts - 1)); 526a27d9013SAlfredo Cardigliano 527a27d9013SAlfredo Cardigliano next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1); 528a27d9013SAlfredo Cardigliano if ((next_q_head_idx & 0x3) == 0) { 529a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 530a27d9013SAlfredo Cardigliano rte_prefetch0(&desc_base[next_q_head_idx]); 531a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[next_q_head_idx]); 532a27d9013SAlfredo Cardigliano } 533a27d9013SAlfredo Cardigliano 534a27d9013SAlfredo Cardigliano if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG) 535a27d9013SAlfredo Cardigliano err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads, 536a27d9013SAlfredo Cardigliano last); 537a27d9013SAlfredo Cardigliano else 538a27d9013SAlfredo Cardigliano err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last); 539a27d9013SAlfredo Cardigliano if (err) { 540a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 541a27d9013SAlfredo Cardigliano if (nb_tx > 0) 542a27d9013SAlfredo Cardigliano ionic_q_flush(q); 543a27d9013SAlfredo Cardigliano break; 544a27d9013SAlfredo Cardigliano } 545a27d9013SAlfredo Cardigliano 546a27d9013SAlfredo Cardigliano bytes_tx += tx_pkts[nb_tx]->pkt_len; 547a27d9013SAlfredo Cardigliano nb_tx++; 548a27d9013SAlfredo Cardigliano } 549a27d9013SAlfredo Cardigliano 550a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 551a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 552a27d9013SAlfredo Cardigliano 553a27d9013SAlfredo Cardigliano return nb_tx; 554a27d9013SAlfredo Cardigliano } 555a27d9013SAlfredo Cardigliano 556a27d9013SAlfredo Cardigliano /********************************************************************* 557a27d9013SAlfredo Cardigliano * 558a27d9013SAlfredo Cardigliano * TX prep functions 559a27d9013SAlfredo Cardigliano * 560a27d9013SAlfredo Cardigliano **********************************************************************/ 561a27d9013SAlfredo Cardigliano 562a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_MASK ( \ 563a27d9013SAlfredo Cardigliano PKT_TX_IPV4 | \ 564a27d9013SAlfredo Cardigliano PKT_TX_IPV6 | \ 565a27d9013SAlfredo Cardigliano PKT_TX_VLAN | \ 56664b08152SAlfredo Cardigliano PKT_TX_IP_CKSUM | \ 567a27d9013SAlfredo Cardigliano PKT_TX_TCP_SEG | \ 568a27d9013SAlfredo Cardigliano PKT_TX_L4_MASK) 569a27d9013SAlfredo Cardigliano 570a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 571a27d9013SAlfredo Cardigliano (PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 572a27d9013SAlfredo Cardigliano 573a27d9013SAlfredo Cardigliano uint16_t 574a27d9013SAlfredo Cardigliano ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts, 575a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 576a27d9013SAlfredo Cardigliano { 577a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 578a27d9013SAlfredo Cardigliano uint64_t offloads; 579a27d9013SAlfredo Cardigliano int i = 0; 580a27d9013SAlfredo Cardigliano 581a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 582a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 583a27d9013SAlfredo Cardigliano 584a27d9013SAlfredo Cardigliano if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) { 585a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 586a27d9013SAlfredo Cardigliano break; 587a27d9013SAlfredo Cardigliano } 588a27d9013SAlfredo Cardigliano 589a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 590a27d9013SAlfredo Cardigliano 591a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 592a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 593a27d9013SAlfredo Cardigliano break; 594a27d9013SAlfredo Cardigliano } 595a27d9013SAlfredo Cardigliano } 596a27d9013SAlfredo Cardigliano 597a27d9013SAlfredo Cardigliano return i; 598a27d9013SAlfredo Cardigliano } 599a27d9013SAlfredo Cardigliano 600a27d9013SAlfredo Cardigliano /********************************************************************* 601a27d9013SAlfredo Cardigliano * 602a27d9013SAlfredo Cardigliano * RX functions 603a27d9013SAlfredo Cardigliano * 604a27d9013SAlfredo Cardigliano **********************************************************************/ 605a27d9013SAlfredo Cardigliano 606a27d9013SAlfredo Cardigliano static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 607a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf); 608a27d9013SAlfredo Cardigliano 609a27d9013SAlfredo Cardigliano void 610a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 611a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 612a27d9013SAlfredo Cardigliano { 613a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = dev->data->rx_queues[queue_id]; 614a27d9013SAlfredo Cardigliano struct ionic_queue *q = &rxq->q; 615a27d9013SAlfredo Cardigliano 616a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 617a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 618a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 61902eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 620a27d9013SAlfredo Cardigliano qinfo->conf.offloads = rxq->offloads; 621a27d9013SAlfredo Cardigliano } 622a27d9013SAlfredo Cardigliano 623ce6427ddSThomas Monjalon static void __rte_cold 624a27d9013SAlfredo Cardigliano ionic_rx_empty(struct ionic_queue *q) 625a27d9013SAlfredo Cardigliano { 626a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); 627a27d9013SAlfredo Cardigliano struct ionic_desc_info *cur; 628a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf; 629a27d9013SAlfredo Cardigliano 630a27d9013SAlfredo Cardigliano while (q->tail_idx != q->head_idx) { 631a27d9013SAlfredo Cardigliano cur = &q->info[q->tail_idx]; 632a27d9013SAlfredo Cardigliano mbuf = cur->cb_arg; 633a27d9013SAlfredo Cardigliano rte_mempool_put(rxq->mb_pool, mbuf); 634a27d9013SAlfredo Cardigliano 635a27d9013SAlfredo Cardigliano q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 636a27d9013SAlfredo Cardigliano } 637a27d9013SAlfredo Cardigliano } 638a27d9013SAlfredo Cardigliano 639ce6427ddSThomas Monjalon void __rte_cold 640a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_release(void *rx_queue) 641a27d9013SAlfredo Cardigliano { 642a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; 643a27d9013SAlfredo Cardigliano 644a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 645a27d9013SAlfredo Cardigliano 646a27d9013SAlfredo Cardigliano ionic_rx_empty(&rxq->q); 647a27d9013SAlfredo Cardigliano 648*9fdf11c4SAndrew Boyer ionic_lif_rxq_deinit(rxq); 649*9fdf11c4SAndrew Boyer 650a27d9013SAlfredo Cardigliano ionic_qcq_free(rxq); 651a27d9013SAlfredo Cardigliano } 652a27d9013SAlfredo Cardigliano 653ce6427ddSThomas Monjalon int __rte_cold 654a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 655a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 656a27d9013SAlfredo Cardigliano uint16_t nb_desc, 6574ae96cb8SAndrew Boyer uint32_t socket_id, 658a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 659a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 660a27d9013SAlfredo Cardigliano { 661a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 662a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq; 663a27d9013SAlfredo Cardigliano uint64_t offloads; 664a27d9013SAlfredo Cardigliano int err; 665a27d9013SAlfredo Cardigliano 666a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 667a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 668a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 669a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 670a27d9013SAlfredo Cardigliano return -EINVAL; 671a27d9013SAlfredo Cardigliano } 672a27d9013SAlfredo Cardigliano 673a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 6744ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 6754ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 6764ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 677a27d9013SAlfredo Cardigliano 678a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 679a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 680a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 681a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 682a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 6834ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 684a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 685a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 686a27d9013SAlfredo Cardigliano } 687a27d9013SAlfredo Cardigliano 688a27d9013SAlfredo Cardigliano if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER) 689a27d9013SAlfredo Cardigliano eth_dev->data->scattered_rx = 1; 690a27d9013SAlfredo Cardigliano 691a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 692a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 693a27d9013SAlfredo Cardigliano void *rx_queue = eth_dev->data->rx_queues[rx_queue_id]; 694a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_release(rx_queue); 695a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 696a27d9013SAlfredo Cardigliano } 697a27d9013SAlfredo Cardigliano 698*9fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 699*9fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 700*9fdf11c4SAndrew Boyer 701a27d9013SAlfredo Cardigliano err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq); 702a27d9013SAlfredo Cardigliano if (err) { 7034ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 704a27d9013SAlfredo Cardigliano return -EINVAL; 705a27d9013SAlfredo Cardigliano } 706a27d9013SAlfredo Cardigliano 707a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 708a27d9013SAlfredo Cardigliano 709a27d9013SAlfredo Cardigliano /* 710a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 711a27d9013SAlfredo Cardigliano * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 712a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 713a27d9013SAlfredo Cardigliano * it to the length for all received packets: 714a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 715a27d9013SAlfredo Cardigliano * DEV_RX_OFFLOAD_KEEP_CRC) 716a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 717a27d9013SAlfredo Cardigliano */ 718a27d9013SAlfredo Cardigliano 719a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 72002eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 72102eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 722a27d9013SAlfredo Cardigliano 723a27d9013SAlfredo Cardigliano rxq->offloads = offloads; 724a27d9013SAlfredo Cardigliano 725a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 726a27d9013SAlfredo Cardigliano 727a27d9013SAlfredo Cardigliano return 0; 728a27d9013SAlfredo Cardigliano } 729a27d9013SAlfredo Cardigliano 730a27d9013SAlfredo Cardigliano static void 731a27d9013SAlfredo Cardigliano ionic_rx_clean(struct ionic_queue *q, 732a27d9013SAlfredo Cardigliano uint32_t q_desc_index, uint32_t cq_desc_index, 733a27d9013SAlfredo Cardigliano void *cb_arg, void *service_cb_arg) 734a27d9013SAlfredo Cardigliano { 735a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base; 736a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 737a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm = cb_arg; 738a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm_seg; 739a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q); 740a27d9013SAlfredo Cardigliano uint32_t max_frame_size = 741a27d9013SAlfredo Cardigliano rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 742a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 743a27d9013SAlfredo Cardigliano uint32_t pkt_type; 744a27d9013SAlfredo Cardigliano struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q); 745a27d9013SAlfredo Cardigliano struct ionic_rx_service *recv_args = (struct ionic_rx_service *) 746a27d9013SAlfredo Cardigliano service_cb_arg; 747a27d9013SAlfredo Cardigliano uint32_t buf_size = (uint16_t) 748a27d9013SAlfredo Cardigliano (rte_pktmbuf_data_room_size(rxq->mb_pool) - 749a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 750a27d9013SAlfredo Cardigliano uint32_t left; 751a27d9013SAlfredo Cardigliano 752a27d9013SAlfredo Cardigliano if (!recv_args) { 753a27d9013SAlfredo Cardigliano stats->no_cb_arg++; 754a27d9013SAlfredo Cardigliano /* Flush */ 755a27d9013SAlfredo Cardigliano rte_pktmbuf_free(rxm); 756a27d9013SAlfredo Cardigliano /* 757a27d9013SAlfredo Cardigliano * Note: rte_mempool_put is faster with no segs 758a27d9013SAlfredo Cardigliano * rte_mempool_put(rxq->mb_pool, rxm); 759a27d9013SAlfredo Cardigliano */ 760a27d9013SAlfredo Cardigliano return; 761a27d9013SAlfredo Cardigliano } 762a27d9013SAlfredo Cardigliano 763a27d9013SAlfredo Cardigliano if (cq_desc->status) { 764a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 765a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 766a27d9013SAlfredo Cardigliano return; 767a27d9013SAlfredo Cardigliano } 768a27d9013SAlfredo Cardigliano 769a27d9013SAlfredo Cardigliano if (recv_args->nb_rx >= recv_args->nb_pkts) { 770a27d9013SAlfredo Cardigliano stats->no_room++; 771a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 772a27d9013SAlfredo Cardigliano return; 773a27d9013SAlfredo Cardigliano } 774a27d9013SAlfredo Cardigliano 775a27d9013SAlfredo Cardigliano if (cq_desc->len > max_frame_size || 776a27d9013SAlfredo Cardigliano cq_desc->len == 0) { 777a27d9013SAlfredo Cardigliano stats->bad_len++; 778a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 779a27d9013SAlfredo Cardigliano return; 780a27d9013SAlfredo Cardigliano } 781a27d9013SAlfredo Cardigliano 782a27d9013SAlfredo Cardigliano rxm->data_off = RTE_PKTMBUF_HEADROOM; 783a27d9013SAlfredo Cardigliano rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); 784a27d9013SAlfredo Cardigliano rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ 785a27d9013SAlfredo Cardigliano rxm->pkt_len = cq_desc->len; 786a27d9013SAlfredo Cardigliano rxm->port = rxq->lif->port_id; 787a27d9013SAlfredo Cardigliano 788a27d9013SAlfredo Cardigliano left = cq_desc->len; 789a27d9013SAlfredo Cardigliano 790a27d9013SAlfredo Cardigliano rxm->data_len = RTE_MIN(buf_size, left); 791a27d9013SAlfredo Cardigliano left -= rxm->data_len; 792a27d9013SAlfredo Cardigliano 793a27d9013SAlfredo Cardigliano rxm_seg = rxm->next; 794a27d9013SAlfredo Cardigliano while (rxm_seg && left) { 795a27d9013SAlfredo Cardigliano rxm_seg->data_len = RTE_MIN(buf_size, left); 796a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 797a27d9013SAlfredo Cardigliano 798a27d9013SAlfredo Cardigliano rxm_seg = rxm_seg->next; 799a27d9013SAlfredo Cardigliano rxm->nb_segs++; 800a27d9013SAlfredo Cardigliano } 801a27d9013SAlfredo Cardigliano 80222e7171bSAlfredo Cardigliano /* RSS */ 80322e7171bSAlfredo Cardigliano pkt_flags |= PKT_RX_RSS_HASH; 80422e7171bSAlfredo Cardigliano rxm->hash.rss = cq_desc->rss_hash; 80522e7171bSAlfredo Cardigliano 806a27d9013SAlfredo Cardigliano /* Vlan Strip */ 807a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 808a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED; 809a27d9013SAlfredo Cardigliano rxm->vlan_tci = cq_desc->vlan_tci; 810a27d9013SAlfredo Cardigliano } 811a27d9013SAlfredo Cardigliano 812a27d9013SAlfredo Cardigliano /* Checksum */ 813a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 814a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 815a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_IP_CKSUM_GOOD; 816a27d9013SAlfredo Cardigliano else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 817a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_IP_CKSUM_BAD; 818a27d9013SAlfredo Cardigliano 819a27d9013SAlfredo Cardigliano if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 820a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 821a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_L4_CKSUM_GOOD; 822a27d9013SAlfredo Cardigliano else if ((cq_desc->csum_flags & 823a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 824a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & 825a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 826a27d9013SAlfredo Cardigliano pkt_flags |= PKT_RX_L4_CKSUM_BAD; 827a27d9013SAlfredo Cardigliano } 828a27d9013SAlfredo Cardigliano 829a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 830a27d9013SAlfredo Cardigliano 831a27d9013SAlfredo Cardigliano /* Packet Type */ 832a27d9013SAlfredo Cardigliano switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 833a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4: 834a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 835a27d9013SAlfredo Cardigliano break; 836a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6: 837a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 838a27d9013SAlfredo Cardigliano break; 839a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_TCP: 840a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 841a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 842a27d9013SAlfredo Cardigliano break; 843a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_TCP: 844a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 845a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 846a27d9013SAlfredo Cardigliano break; 847a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_UDP: 848a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 849a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 850a27d9013SAlfredo Cardigliano break; 851a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_UDP: 852a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 853a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 854a27d9013SAlfredo Cardigliano break; 855a27d9013SAlfredo Cardigliano default: 856a27d9013SAlfredo Cardigliano { 857a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 858a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 859a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 860a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 861a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 862a27d9013SAlfredo Cardigliano else 863a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_UNKNOWN; 864a27d9013SAlfredo Cardigliano break; 865a27d9013SAlfredo Cardigliano } 866a27d9013SAlfredo Cardigliano } 867a27d9013SAlfredo Cardigliano 868a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 869a27d9013SAlfredo Cardigliano 870a27d9013SAlfredo Cardigliano recv_args->rx_pkts[recv_args->nb_rx] = rxm; 871a27d9013SAlfredo Cardigliano recv_args->nb_rx++; 872a27d9013SAlfredo Cardigliano 873a27d9013SAlfredo Cardigliano stats->packets++; 874a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 875a27d9013SAlfredo Cardigliano } 876a27d9013SAlfredo Cardigliano 877a27d9013SAlfredo Cardigliano static void 878a27d9013SAlfredo Cardigliano ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 879a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf) 880a27d9013SAlfredo Cardigliano { 881a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc_base = q->base; 882a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *old = &desc_base[q_desc_index]; 883a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *new = &desc_base[q->head_idx]; 884a27d9013SAlfredo Cardigliano 885a27d9013SAlfredo Cardigliano new->addr = old->addr; 886a27d9013SAlfredo Cardigliano new->len = old->len; 887a27d9013SAlfredo Cardigliano 888a27d9013SAlfredo Cardigliano ionic_q_post(q, true, ionic_rx_clean, mbuf); 889a27d9013SAlfredo Cardigliano } 890a27d9013SAlfredo Cardigliano 891ce6427ddSThomas Monjalon static int __rte_cold 892a27d9013SAlfredo Cardigliano ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len) 893a27d9013SAlfredo Cardigliano { 894a27d9013SAlfredo Cardigliano struct ionic_queue *q = &rxq->q; 895a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc_base = q->base; 896a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base; 897a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc; 898a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_desc *sg_desc; 899a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_elem *elem; 900a27d9013SAlfredo Cardigliano rte_iova_t dma_addr; 901a27d9013SAlfredo Cardigliano uint32_t i, j, nsegs, buf_size, size; 902a27d9013SAlfredo Cardigliano bool ring_doorbell; 903a27d9013SAlfredo Cardigliano 904a27d9013SAlfredo Cardigliano buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) - 905a27d9013SAlfredo Cardigliano RTE_PKTMBUF_HEADROOM); 906a27d9013SAlfredo Cardigliano 907a27d9013SAlfredo Cardigliano /* Initialize software ring entries */ 908a27d9013SAlfredo Cardigliano for (i = ionic_q_space_avail(q); i; i--) { 909a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 910a27d9013SAlfredo Cardigliano struct rte_mbuf *prev_rxm_seg; 911a27d9013SAlfredo Cardigliano 912a27d9013SAlfredo Cardigliano if (rxm == NULL) { 913a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 914a27d9013SAlfredo Cardigliano return -ENOMEM; 915a27d9013SAlfredo Cardigliano } 916a27d9013SAlfredo Cardigliano 917a27d9013SAlfredo Cardigliano nsegs = (len + buf_size - 1) / buf_size; 918a27d9013SAlfredo Cardigliano 919a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 920a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); 921a27d9013SAlfredo Cardigliano desc->addr = dma_addr; 922a27d9013SAlfredo Cardigliano desc->len = buf_size; 923a27d9013SAlfredo Cardigliano size = buf_size; 924a27d9013SAlfredo Cardigliano desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 925a27d9013SAlfredo Cardigliano IONIC_RXQ_DESC_OPCODE_SIMPLE; 926a27d9013SAlfredo Cardigliano rxm->next = NULL; 927a27d9013SAlfredo Cardigliano 928a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm; 929a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 930a27d9013SAlfredo Cardigliano elem = sg_desc->elems; 931a27d9013SAlfredo Cardigliano for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) { 932a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm_seg; 933a27d9013SAlfredo Cardigliano rte_iova_t data_iova; 934a27d9013SAlfredo Cardigliano 935a27d9013SAlfredo Cardigliano rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 936a27d9013SAlfredo Cardigliano if (rxm_seg == NULL) { 937a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 938a27d9013SAlfredo Cardigliano return -ENOMEM; 939a27d9013SAlfredo Cardigliano } 940a27d9013SAlfredo Cardigliano 941a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(rxm_seg); 942a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(data_iova); 943a27d9013SAlfredo Cardigliano elem->addr = dma_addr; 944a27d9013SAlfredo Cardigliano elem->len = buf_size; 945a27d9013SAlfredo Cardigliano size += buf_size; 946a27d9013SAlfredo Cardigliano elem++; 947a27d9013SAlfredo Cardigliano rxm_seg->next = NULL; 948a27d9013SAlfredo Cardigliano prev_rxm_seg->next = rxm_seg; 949a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm_seg; 950a27d9013SAlfredo Cardigliano } 951a27d9013SAlfredo Cardigliano 952a27d9013SAlfredo Cardigliano if (size < len) 953a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)", 954a27d9013SAlfredo Cardigliano size, len); 955a27d9013SAlfredo Cardigliano 956a27d9013SAlfredo Cardigliano ring_doorbell = ((q->head_idx + 1) & 957a27d9013SAlfredo Cardigliano IONIC_RX_RING_DOORBELL_STRIDE) == 0; 958a27d9013SAlfredo Cardigliano 959a27d9013SAlfredo Cardigliano ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm); 960a27d9013SAlfredo Cardigliano } 961a27d9013SAlfredo Cardigliano 962a27d9013SAlfredo Cardigliano return 0; 963a27d9013SAlfredo Cardigliano } 964a27d9013SAlfredo Cardigliano 965a27d9013SAlfredo Cardigliano /* 966a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 967a27d9013SAlfredo Cardigliano */ 968ce6427ddSThomas Monjalon int __rte_cold 969a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 970a27d9013SAlfredo Cardigliano { 971a27d9013SAlfredo Cardigliano uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 972*9fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 973a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq; 974a27d9013SAlfredo Cardigliano int err; 975a27d9013SAlfredo Cardigliano 976*9fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 977*9fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 978*9fdf11c4SAndrew Boyer rx_queue_id); 979*9fdf11c4SAndrew Boyer return 0; 980*9fdf11c4SAndrew Boyer } 981*9fdf11c4SAndrew Boyer 982a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 983a27d9013SAlfredo Cardigliano 9844ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)", 9854ae96cb8SAndrew Boyer rx_queue_id, rxq->q.num_descs, frame_size); 9864ae96cb8SAndrew Boyer 987*9fdf11c4SAndrew Boyer if (!(rxq->flags & IONIC_QCQ_F_INITED)) { 988a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 989a27d9013SAlfredo Cardigliano if (err) 990a27d9013SAlfredo Cardigliano return err; 991*9fdf11c4SAndrew Boyer } 992a27d9013SAlfredo Cardigliano 993a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 994a27d9013SAlfredo Cardigliano if (ionic_rx_fill(rxq, frame_size) != 0) { 995a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 996a27d9013SAlfredo Cardigliano rx_queue_id); 997a27d9013SAlfredo Cardigliano return -1; 998a27d9013SAlfredo Cardigliano } 999a27d9013SAlfredo Cardigliano 1000a27d9013SAlfredo Cardigliano ionic_qcq_enable(rxq); 1001a27d9013SAlfredo Cardigliano 1002*9fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1003a27d9013SAlfredo Cardigliano 1004a27d9013SAlfredo Cardigliano return 0; 1005a27d9013SAlfredo Cardigliano } 1006a27d9013SAlfredo Cardigliano 1007ce6427ddSThomas Monjalon static inline void __rte_cold 1008a27d9013SAlfredo Cardigliano ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do, 1009a27d9013SAlfredo Cardigliano void *service_cb_arg) 1010a27d9013SAlfredo Cardigliano { 1011a27d9013SAlfredo Cardigliano struct ionic_queue *q = cq->bound_q; 1012a27d9013SAlfredo Cardigliano struct ionic_desc_info *q_desc_info; 1013a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc_base = cq->base; 1014a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc; 1015a27d9013SAlfredo Cardigliano bool more; 1016a27d9013SAlfredo Cardigliano uint32_t curr_q_tail_idx, curr_cq_tail_idx; 1017a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1018a27d9013SAlfredo Cardigliano 1019a27d9013SAlfredo Cardigliano if (work_to_do == 0) 1020a27d9013SAlfredo Cardigliano return; 1021a27d9013SAlfredo Cardigliano 1022a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1023a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1024a27d9013SAlfredo Cardigliano curr_cq_tail_idx = cq->tail_idx; 1025a27d9013SAlfredo Cardigliano cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1); 1026a27d9013SAlfredo Cardigliano 1027a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1028a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1029a27d9013SAlfredo Cardigliano 1030a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1031a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 1032a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 1033a27d9013SAlfredo Cardigliano 1034a27d9013SAlfredo Cardigliano do { 1035a27d9013SAlfredo Cardigliano more = (q->tail_idx != cq_desc->comp_index); 1036a27d9013SAlfredo Cardigliano 1037a27d9013SAlfredo Cardigliano q_desc_info = &q->info[q->tail_idx]; 1038a27d9013SAlfredo Cardigliano 1039a27d9013SAlfredo Cardigliano curr_q_tail_idx = q->tail_idx; 1040a27d9013SAlfredo Cardigliano q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1); 1041a27d9013SAlfredo Cardigliano 1042a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1043a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 1044a27d9013SAlfredo Cardigliano /* q desc info */ 1045a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 1046a27d9013SAlfredo Cardigliano 1047a27d9013SAlfredo Cardigliano ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx, 1048a27d9013SAlfredo Cardigliano q_desc_info->cb_arg, service_cb_arg); 1049a27d9013SAlfredo Cardigliano 1050a27d9013SAlfredo Cardigliano } while (more); 1051a27d9013SAlfredo Cardigliano 1052a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1053a27d9013SAlfredo Cardigliano break; 1054a27d9013SAlfredo Cardigliano 1055a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1056a27d9013SAlfredo Cardigliano } 1057a27d9013SAlfredo Cardigliano } 1058a27d9013SAlfredo Cardigliano 1059a27d9013SAlfredo Cardigliano /* 1060a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1061a27d9013SAlfredo Cardigliano */ 1062ce6427ddSThomas Monjalon int __rte_cold 1063a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1064a27d9013SAlfredo Cardigliano { 1065a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq; 1066a27d9013SAlfredo Cardigliano 10674ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1068a27d9013SAlfredo Cardigliano 1069a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1070a27d9013SAlfredo Cardigliano 1071*9fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 1072*9fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1073*9fdf11c4SAndrew Boyer 1074a27d9013SAlfredo Cardigliano ionic_qcq_disable(rxq); 1075a27d9013SAlfredo Cardigliano 1076a27d9013SAlfredo Cardigliano /* Flush */ 1077a27d9013SAlfredo Cardigliano ionic_rxq_service(&rxq->cq, -1, NULL); 1078a27d9013SAlfredo Cardigliano 1079a27d9013SAlfredo Cardigliano return 0; 1080a27d9013SAlfredo Cardigliano } 1081a27d9013SAlfredo Cardigliano 1082a27d9013SAlfredo Cardigliano uint16_t 1083a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1084a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1085a27d9013SAlfredo Cardigliano { 1086a27d9013SAlfredo Cardigliano struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue; 1087a27d9013SAlfredo Cardigliano uint32_t frame_size = 1088a27d9013SAlfredo Cardigliano rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len; 1089a27d9013SAlfredo Cardigliano struct ionic_cq *cq = &rxq->cq; 1090a27d9013SAlfredo Cardigliano struct ionic_rx_service service_cb_arg; 1091a27d9013SAlfredo Cardigliano 1092a27d9013SAlfredo Cardigliano service_cb_arg.rx_pkts = rx_pkts; 1093a27d9013SAlfredo Cardigliano service_cb_arg.nb_pkts = nb_pkts; 1094a27d9013SAlfredo Cardigliano service_cb_arg.nb_rx = 0; 1095a27d9013SAlfredo Cardigliano 1096a27d9013SAlfredo Cardigliano ionic_rxq_service(cq, nb_pkts, &service_cb_arg); 1097a27d9013SAlfredo Cardigliano 1098a27d9013SAlfredo Cardigliano ionic_rx_fill(rxq, frame_size); 1099a27d9013SAlfredo Cardigliano 1100a27d9013SAlfredo Cardigliano return service_cb_arg.nb_rx; 1101a27d9013SAlfredo Cardigliano } 1102