176668754SAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause 2a5205992SAndrew Boyer * Copyright 2018-2022 Advanced Micro Devices, Inc. 3a27d9013SAlfredo Cardigliano */ 4a27d9013SAlfredo Cardigliano 5a27d9013SAlfredo Cardigliano #include <sys/queue.h> 6a27d9013SAlfredo Cardigliano #include <stdio.h> 7a27d9013SAlfredo Cardigliano #include <stdlib.h> 8a27d9013SAlfredo Cardigliano #include <string.h> 9a27d9013SAlfredo Cardigliano #include <errno.h> 10a27d9013SAlfredo Cardigliano #include <stdint.h> 11a27d9013SAlfredo Cardigliano #include <stdarg.h> 12a27d9013SAlfredo Cardigliano #include <unistd.h> 13a27d9013SAlfredo Cardigliano #include <inttypes.h> 14a27d9013SAlfredo Cardigliano 15a27d9013SAlfredo Cardigliano #include <rte_byteorder.h> 16a27d9013SAlfredo Cardigliano #include <rte_common.h> 17a27d9013SAlfredo Cardigliano #include <rte_cycles.h> 18a27d9013SAlfredo Cardigliano #include <rte_log.h> 19a27d9013SAlfredo Cardigliano #include <rte_debug.h> 20a27d9013SAlfredo Cardigliano #include <rte_interrupts.h> 21a27d9013SAlfredo Cardigliano #include <rte_pci.h> 22a27d9013SAlfredo Cardigliano #include <rte_memory.h> 23a27d9013SAlfredo Cardigliano #include <rte_memzone.h> 24a27d9013SAlfredo Cardigliano #include <rte_launch.h> 25a27d9013SAlfredo Cardigliano #include <rte_eal.h> 26a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h> 27a27d9013SAlfredo Cardigliano #include <rte_lcore.h> 28a27d9013SAlfredo Cardigliano #include <rte_atomic.h> 29a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h> 30a27d9013SAlfredo Cardigliano #include <rte_mempool.h> 31a27d9013SAlfredo Cardigliano #include <rte_malloc.h> 32a27d9013SAlfredo Cardigliano #include <rte_mbuf.h> 33a27d9013SAlfredo Cardigliano #include <rte_ether.h> 34df96fd0dSBruce Richardson #include <ethdev_driver.h> 35a27d9013SAlfredo Cardigliano #include <rte_prefetch.h> 36a27d9013SAlfredo Cardigliano #include <rte_udp.h> 37a27d9013SAlfredo Cardigliano #include <rte_tcp.h> 38a27d9013SAlfredo Cardigliano #include <rte_sctp.h> 39a27d9013SAlfredo Cardigliano #include <rte_string_fns.h> 40a27d9013SAlfredo Cardigliano #include <rte_errno.h> 41a27d9013SAlfredo Cardigliano #include <rte_ip.h> 42a27d9013SAlfredo Cardigliano #include <rte_net.h> 43a27d9013SAlfredo Cardigliano 44a27d9013SAlfredo Cardigliano #include "ionic_logs.h" 45a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h" 46a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h" 47a27d9013SAlfredo Cardigliano #include "ionic_lif.h" 48a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h" 49a27d9013SAlfredo Cardigliano 50e7222f94SAndrew Boyer static void 51e7222f94SAndrew Boyer ionic_empty_array(void **array, uint32_t cnt, uint16_t idx) 52e7222f94SAndrew Boyer { 53e7222f94SAndrew Boyer uint32_t i; 54e7222f94SAndrew Boyer 55e7222f94SAndrew Boyer for (i = idx; i < cnt; i++) 56e7222f94SAndrew Boyer if (array[i]) 57e7222f94SAndrew Boyer rte_pktmbuf_free_seg(array[i]); 58e7222f94SAndrew Boyer 59e7222f94SAndrew Boyer memset(array, 0, sizeof(void *) * cnt); 60e7222f94SAndrew Boyer } 61e7222f94SAndrew Boyer 62e7222f94SAndrew Boyer static void __rte_cold 63e7222f94SAndrew Boyer ionic_tx_empty(struct ionic_tx_qcq *txq) 64e7222f94SAndrew Boyer { 65e7222f94SAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 66e7222f94SAndrew Boyer 67e7222f94SAndrew Boyer ionic_empty_array(q->info, q->num_descs, 0); 68e7222f94SAndrew Boyer } 69e7222f94SAndrew Boyer 70e7222f94SAndrew Boyer static void __rte_cold 71e7222f94SAndrew Boyer ionic_rx_empty(struct ionic_rx_qcq *rxq) 72e7222f94SAndrew Boyer { 73e7222f94SAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 74e7222f94SAndrew Boyer 75e7222f94SAndrew Boyer ionic_empty_array(q->info, q->num_descs, 0); 76e7222f94SAndrew Boyer } 77e7222f94SAndrew Boyer 78a27d9013SAlfredo Cardigliano /********************************************************************* 79a27d9013SAlfredo Cardigliano * 80a27d9013SAlfredo Cardigliano * TX functions 81a27d9013SAlfredo Cardigliano * 82a27d9013SAlfredo Cardigliano **********************************************************************/ 83a27d9013SAlfredo Cardigliano 84a27d9013SAlfredo Cardigliano void 85a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 86a27d9013SAlfredo Cardigliano struct rte_eth_txq_info *qinfo) 87a27d9013SAlfredo Cardigliano { 88be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id]; 89be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 90a27d9013SAlfredo Cardigliano 91a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 9268591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads; 9302eabf57SAndrew Boyer qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED; 94a27d9013SAlfredo Cardigliano } 95a27d9013SAlfredo Cardigliano 960de3e209SAndrew Boyer static __rte_always_inline void 97be39f75cSAndrew Boyer ionic_tx_flush(struct ionic_tx_qcq *txq) 98a27d9013SAlfredo Cardigliano { 99be39f75cSAndrew Boyer struct ionic_cq *cq = &txq->qcq.cq; 100be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 101a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, *next; 102a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc_base = cq->base; 103a27d9013SAlfredo Cardigliano struct ionic_txq_comp *cq_desc; 104700f974dSAndrew Boyer void **info; 105a27d9013SAlfredo Cardigliano u_int32_t comp_index = (u_int32_t)-1; 106a27d9013SAlfredo Cardigliano 107a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 108a27d9013SAlfredo Cardigliano while (color_match(cq_desc->color, cq->done_color)) { 1092aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 110a27d9013SAlfredo Cardigliano 111a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors (not really useful here) */ 112a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 113a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 114a27d9013SAlfredo Cardigliano 115a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 116a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 117a27d9013SAlfredo Cardigliano 118a27d9013SAlfredo Cardigliano comp_index = cq_desc->comp_index; 119a27d9013SAlfredo Cardigliano 120a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 121a27d9013SAlfredo Cardigliano } 122a27d9013SAlfredo Cardigliano 123a27d9013SAlfredo Cardigliano if (comp_index != (u_int32_t)-1) { 124a27d9013SAlfredo Cardigliano while (q->tail_idx != comp_index) { 125700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, q->tail_idx); 126a27d9013SAlfredo Cardigliano 1274ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 128a27d9013SAlfredo Cardigliano 129a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 130a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 131a27d9013SAlfredo Cardigliano /* q desc info */ 132a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 133a27d9013SAlfredo Cardigliano 134a27d9013SAlfredo Cardigliano /* 135a27d9013SAlfredo Cardigliano * Note: you can just use rte_pktmbuf_free, 136a27d9013SAlfredo Cardigliano * but this loop is faster 137a27d9013SAlfredo Cardigliano */ 138700f974dSAndrew Boyer txm = info[0]; 139a27d9013SAlfredo Cardigliano while (txm != NULL) { 140a27d9013SAlfredo Cardigliano next = txm->next; 141a27d9013SAlfredo Cardigliano rte_pktmbuf_free_seg(txm); 142a27d9013SAlfredo Cardigliano txm = next; 143a27d9013SAlfredo Cardigliano } 144a27d9013SAlfredo Cardigliano } 145a27d9013SAlfredo Cardigliano } 146a27d9013SAlfredo Cardigliano } 147a27d9013SAlfredo Cardigliano 148ce6427ddSThomas Monjalon void __rte_cold 1497483341aSXueming Li ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 150a27d9013SAlfredo Cardigliano { 1517483341aSXueming Li struct ionic_tx_qcq *txq = dev->data->tx_queues[qid]; 152a27d9013SAlfredo Cardigliano 153a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 154a27d9013SAlfredo Cardigliano 155be39f75cSAndrew Boyer ionic_qcq_free(&txq->qcq); 156a27d9013SAlfredo Cardigliano } 157a27d9013SAlfredo Cardigliano 158ce6427ddSThomas Monjalon int __rte_cold 159a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 160a27d9013SAlfredo Cardigliano { 161e7222f94SAndrew Boyer struct ionic_tx_stats *stats; 162be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 163a27d9013SAlfredo Cardigliano 1644ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id); 165a27d9013SAlfredo Cardigliano 166a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 167a27d9013SAlfredo Cardigliano 1689fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 1699fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 1709fdf11c4SAndrew Boyer 171a27d9013SAlfredo Cardigliano /* 172a27d9013SAlfredo Cardigliano * Note: we should better post NOP Tx desc and wait for its completion 173a27d9013SAlfredo Cardigliano * before disabling Tx queue 174a27d9013SAlfredo Cardigliano */ 175a27d9013SAlfredo Cardigliano 176e7222f94SAndrew Boyer ionic_lif_txq_deinit(txq); 177a27d9013SAlfredo Cardigliano 178e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 179e7222f94SAndrew Boyer ionic_tx_empty(txq); 180e7222f94SAndrew Boyer 181e7222f94SAndrew Boyer stats = &txq->stats; 182e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju", 183e7222f94SAndrew Boyer txq->qcq.q.index, stats->packets, stats->tso); 184a27d9013SAlfredo Cardigliano 185a27d9013SAlfredo Cardigliano return 0; 186a27d9013SAlfredo Cardigliano } 187a27d9013SAlfredo Cardigliano 188ce6427ddSThomas Monjalon int __rte_cold 189a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id, 1904ae96cb8SAndrew Boyer uint16_t nb_desc, uint32_t socket_id, 191a27d9013SAlfredo Cardigliano const struct rte_eth_txconf *tx_conf) 192a27d9013SAlfredo Cardigliano { 193a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 194be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 195a27d9013SAlfredo Cardigliano uint64_t offloads; 196a27d9013SAlfredo Cardigliano int err; 197a27d9013SAlfredo Cardigliano 198a27d9013SAlfredo Cardigliano if (tx_queue_id >= lif->ntxqcqs) { 199a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue index %u not available " 200a27d9013SAlfredo Cardigliano "(max %u queues)", 201a27d9013SAlfredo Cardigliano tx_queue_id, lif->ntxqcqs); 202a27d9013SAlfredo Cardigliano return -EINVAL; 203a27d9013SAlfredo Cardigliano } 204a27d9013SAlfredo Cardigliano 205a27d9013SAlfredo Cardigliano offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads; 2064ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 2074ae96cb8SAndrew Boyer "Configuring skt %u TX queue %u with %u buffers, offloads %jx", 2084ae96cb8SAndrew Boyer socket_id, tx_queue_id, nb_desc, offloads); 209a27d9013SAlfredo Cardigliano 210a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 211a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC) 212a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 213a27d9013SAlfredo Cardigliano 214a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 215a27d9013SAlfredo Cardigliano if (eth_dev->data->tx_queues[tx_queue_id] != NULL) { 2167483341aSXueming Li ionic_dev_tx_queue_release(eth_dev, tx_queue_id); 217a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = NULL; 218a27d9013SAlfredo Cardigliano } 219a27d9013SAlfredo Cardigliano 2209fdf11c4SAndrew Boyer eth_dev->data->tx_queue_state[tx_queue_id] = 2219fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 2229fdf11c4SAndrew Boyer 2238ec5ad7fSAndrew Boyer err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq); 224a27d9013SAlfredo Cardigliano if (err) { 225a27d9013SAlfredo Cardigliano IONIC_PRINT(DEBUG, "Queue allocation failure"); 226a27d9013SAlfredo Cardigliano return -EINVAL; 227a27d9013SAlfredo Cardigliano } 228a27d9013SAlfredo Cardigliano 229a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 23002eabf57SAndrew Boyer if (tx_conf->tx_deferred_start) 23102eabf57SAndrew Boyer txq->flags |= IONIC_QCQ_F_DEFERRED; 232a27d9013SAlfredo Cardigliano 23368591087SAndrew Boyer /* Convert the offload flags into queue flags */ 234295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM) 23568591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_L3; 236295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM) 23768591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_TCP; 238295968d1SFerruh Yigit if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM) 23968591087SAndrew Boyer txq->flags |= IONIC_QCQ_F_CSUM_UDP; 240a27d9013SAlfredo Cardigliano 241a27d9013SAlfredo Cardigliano eth_dev->data->tx_queues[tx_queue_id] = txq; 242a27d9013SAlfredo Cardigliano 243a27d9013SAlfredo Cardigliano return 0; 244a27d9013SAlfredo Cardigliano } 245a27d9013SAlfredo Cardigliano 246a27d9013SAlfredo Cardigliano /* 247a27d9013SAlfredo Cardigliano * Start Transmit Units for specified queue. 248a27d9013SAlfredo Cardigliano */ 249ce6427ddSThomas Monjalon int __rte_cold 250a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id) 251a27d9013SAlfredo Cardigliano { 2529fdf11c4SAndrew Boyer uint8_t *tx_queue_state = eth_dev->data->tx_queue_state; 253be39f75cSAndrew Boyer struct ionic_tx_qcq *txq; 254a27d9013SAlfredo Cardigliano int err; 255a27d9013SAlfredo Cardigliano 2569fdf11c4SAndrew Boyer if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 2579fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "TX queue %u already started", 2589fdf11c4SAndrew Boyer tx_queue_id); 2599fdf11c4SAndrew Boyer return 0; 2609fdf11c4SAndrew Boyer } 2619fdf11c4SAndrew Boyer 262a27d9013SAlfredo Cardigliano txq = eth_dev->data->tx_queues[tx_queue_id]; 263a27d9013SAlfredo Cardigliano 2644ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs", 265be39f75cSAndrew Boyer tx_queue_id, txq->qcq.q.num_descs); 2664ae96cb8SAndrew Boyer 267a27d9013SAlfredo Cardigliano err = ionic_lif_txq_init(txq); 268a27d9013SAlfredo Cardigliano if (err) 269a27d9013SAlfredo Cardigliano return err; 270a27d9013SAlfredo Cardigliano 2719fdf11c4SAndrew Boyer tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 272a27d9013SAlfredo Cardigliano 273a27d9013SAlfredo Cardigliano return 0; 274a27d9013SAlfredo Cardigliano } 275a27d9013SAlfredo Cardigliano 276a27d9013SAlfredo Cardigliano static void 27764b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm) 27864b08152SAlfredo Cardigliano { 27964b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 28064b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->l2_len; 28164b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 28264b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 28364b08152SAlfredo Cardigliano 284daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) { 28564b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 28664b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 28764b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 28864b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 28964b08152SAlfredo Cardigliano } else { 29064b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 29164b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 29264b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 29364b08152SAlfredo Cardigliano } 29464b08152SAlfredo Cardigliano } 29564b08152SAlfredo Cardigliano 29664b08152SAlfredo Cardigliano static void 29764b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm) 29864b08152SAlfredo Cardigliano { 29964b08152SAlfredo Cardigliano struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *); 30064b08152SAlfredo Cardigliano char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len + 30164b08152SAlfredo Cardigliano txm->outer_l3_len + txm->l2_len; 30264b08152SAlfredo Cardigliano struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *) 30364b08152SAlfredo Cardigliano (l3_hdr + txm->l3_len); 30464b08152SAlfredo Cardigliano 305daa02b5cSOlivier Matz if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) { 30664b08152SAlfredo Cardigliano struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr; 30764b08152SAlfredo Cardigliano ipv4_hdr->hdr_checksum = 0; 30864b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 30964b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr); 31064b08152SAlfredo Cardigliano } else { 31164b08152SAlfredo Cardigliano struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr; 31264b08152SAlfredo Cardigliano tcp_hdr->cksum = 0; 31364b08152SAlfredo Cardigliano tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr); 31464b08152SAlfredo Cardigliano } 31564b08152SAlfredo Cardigliano } 31664b08152SAlfredo Cardigliano 31764b08152SAlfredo Cardigliano static void 318a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc, 319a27d9013SAlfredo Cardigliano struct rte_mbuf *txm, 320a27d9013SAlfredo Cardigliano rte_iova_t addr, uint8_t nsge, uint16_t len, 321a27d9013SAlfredo Cardigliano uint32_t hdrlen, uint32_t mss, 32264b08152SAlfredo Cardigliano bool encap, 323a27d9013SAlfredo Cardigliano uint16_t vlan_tci, bool has_vlan, 324a27d9013SAlfredo Cardigliano bool start, bool done) 325a27d9013SAlfredo Cardigliano { 326dd10c5b4SAndrew Boyer void **info; 3274a735599SAndrew Boyer uint64_t cmd; 328a27d9013SAlfredo Cardigliano uint8_t flags = 0; 329a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 33064b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 331a27d9013SAlfredo Cardigliano flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0; 332a27d9013SAlfredo Cardigliano flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0; 333a27d9013SAlfredo Cardigliano 3344a735599SAndrew Boyer cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO, 335a27d9013SAlfredo Cardigliano flags, nsge, addr); 3364a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 3374a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(len); 3384a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(vlan_tci); 3394a735599SAndrew Boyer desc->hdr_len = rte_cpu_to_le_16(hdrlen); 3404a735599SAndrew Boyer desc->mss = rte_cpu_to_le_16(mss); 341a27d9013SAlfredo Cardigliano 342dd10c5b4SAndrew Boyer if (done) { 343dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 344dd10c5b4SAndrew Boyer info[0] = txm; 345dd10c5b4SAndrew Boyer } 346dd10c5b4SAndrew Boyer 347dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 348a27d9013SAlfredo Cardigliano } 349a27d9013SAlfredo Cardigliano 350a27d9013SAlfredo Cardigliano static struct ionic_txq_desc * 351be39f75cSAndrew Boyer ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem) 352a27d9013SAlfredo Cardigliano { 353be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 354a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 35556117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 356a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc = &desc_base[q->head_idx]; 35756117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx]; 358a27d9013SAlfredo Cardigliano 359a27d9013SAlfredo Cardigliano *elem = sg_desc->elems; 360a27d9013SAlfredo Cardigliano return desc; 361a27d9013SAlfredo Cardigliano } 362a27d9013SAlfredo Cardigliano 363a27d9013SAlfredo Cardigliano static int 36477c60793SAndrew Boyer ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 365a27d9013SAlfredo Cardigliano { 366be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 367be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 368a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc; 369a27d9013SAlfredo Cardigliano struct ionic_txq_sg_elem *elem; 370a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 3717c3a867bSAndrew Boyer rte_iova_t data_iova; 3727c3a867bSAndrew Boyer uint64_t desc_addr = 0, next_addr; 373a27d9013SAlfredo Cardigliano uint16_t desc_len = 0; 374a27d9013SAlfredo Cardigliano uint8_t desc_nsge; 375a27d9013SAlfredo Cardigliano uint32_t hdrlen; 376a27d9013SAlfredo Cardigliano uint32_t mss = txm->tso_segsz; 377a27d9013SAlfredo Cardigliano uint32_t frag_left = 0; 378a27d9013SAlfredo Cardigliano uint32_t left; 379a27d9013SAlfredo Cardigliano uint32_t seglen; 380a27d9013SAlfredo Cardigliano uint32_t len; 381a27d9013SAlfredo Cardigliano uint32_t offset = 0; 382a27d9013SAlfredo Cardigliano bool start, done; 38364b08152SAlfredo Cardigliano bool encap; 384daa02b5cSOlivier Matz bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN); 385a27d9013SAlfredo Cardigliano uint16_t vlan_tci = txm->vlan_tci; 38664b08152SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 387a27d9013SAlfredo Cardigliano 388daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 389daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 390daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 391daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 39264b08152SAlfredo Cardigliano 39364b08152SAlfredo Cardigliano /* Preload inner-most TCP csum field with IP pseudo hdr 39464b08152SAlfredo Cardigliano * calculated with IP length set to zero. HW will later 39564b08152SAlfredo Cardigliano * add in length to each TCP segment resulting from the TSO. 39664b08152SAlfredo Cardigliano */ 39764b08152SAlfredo Cardigliano 39864b08152SAlfredo Cardigliano if (encap) { 39964b08152SAlfredo Cardigliano ionic_tx_tcp_inner_pseudo_csum(txm); 40064b08152SAlfredo Cardigliano hdrlen = txm->outer_l2_len + txm->outer_l3_len + 40164b08152SAlfredo Cardigliano txm->l2_len + txm->l3_len + txm->l4_len; 40264b08152SAlfredo Cardigliano } else { 40364b08152SAlfredo Cardigliano ionic_tx_tcp_pseudo_csum(txm); 40464b08152SAlfredo Cardigliano hdrlen = txm->l2_len + txm->l3_len + txm->l4_len; 40564b08152SAlfredo Cardigliano } 406a27d9013SAlfredo Cardigliano 407a27d9013SAlfredo Cardigliano seglen = hdrlen + mss; 408a27d9013SAlfredo Cardigliano left = txm->data_len; 4097c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm); 410a27d9013SAlfredo Cardigliano 411be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 412a27d9013SAlfredo Cardigliano start = true; 413a27d9013SAlfredo Cardigliano 414a27d9013SAlfredo Cardigliano /* Chop data up into desc segments */ 415a27d9013SAlfredo Cardigliano 416a27d9013SAlfredo Cardigliano while (left > 0) { 417a27d9013SAlfredo Cardigliano len = RTE_MIN(seglen, left); 418a27d9013SAlfredo Cardigliano frag_left = seglen - len; 4197c3a867bSAndrew Boyer desc_addr = rte_cpu_to_le_64(data_iova + offset); 420a27d9013SAlfredo Cardigliano desc_len = len; 421a27d9013SAlfredo Cardigliano desc_nsge = 0; 422a27d9013SAlfredo Cardigliano left -= len; 423a27d9013SAlfredo Cardigliano offset += len; 424a27d9013SAlfredo Cardigliano if (txm->nb_segs > 1 && frag_left > 0) 425a27d9013SAlfredo Cardigliano continue; 426a27d9013SAlfredo Cardigliano done = (txm->nb_segs == 1 && left == 0); 427a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm, 428a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 429a27d9013SAlfredo Cardigliano hdrlen, mss, 43064b08152SAlfredo Cardigliano encap, 431a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 43277c60793SAndrew Boyer start, done); 433be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 434a27d9013SAlfredo Cardigliano start = false; 435a27d9013SAlfredo Cardigliano seglen = mss; 436a27d9013SAlfredo Cardigliano } 437a27d9013SAlfredo Cardigliano 438a27d9013SAlfredo Cardigliano /* Chop frags into desc segments */ 439a27d9013SAlfredo Cardigliano 440a27d9013SAlfredo Cardigliano txm_seg = txm->next; 441a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 442a27d9013SAlfredo Cardigliano offset = 0; 4437c3a867bSAndrew Boyer data_iova = rte_mbuf_data_iova(txm_seg); 444a27d9013SAlfredo Cardigliano left = txm_seg->data_len; 445a27d9013SAlfredo Cardigliano 446a27d9013SAlfredo Cardigliano while (left > 0) { 4477c3a867bSAndrew Boyer next_addr = rte_cpu_to_le_64(data_iova + offset); 448a27d9013SAlfredo Cardigliano if (frag_left > 0) { 449a27d9013SAlfredo Cardigliano len = RTE_MIN(frag_left, left); 450a27d9013SAlfredo Cardigliano frag_left -= len; 4517c3a867bSAndrew Boyer elem->addr = next_addr; 4524a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(len); 453a27d9013SAlfredo Cardigliano elem++; 454a27d9013SAlfredo Cardigliano desc_nsge++; 455a27d9013SAlfredo Cardigliano } else { 456a27d9013SAlfredo Cardigliano len = RTE_MIN(mss, left); 457a27d9013SAlfredo Cardigliano frag_left = mss - len; 4587c3a867bSAndrew Boyer desc_addr = next_addr; 459a27d9013SAlfredo Cardigliano desc_len = len; 460a27d9013SAlfredo Cardigliano desc_nsge = 0; 461a27d9013SAlfredo Cardigliano } 462a27d9013SAlfredo Cardigliano left -= len; 463a27d9013SAlfredo Cardigliano offset += len; 464a27d9013SAlfredo Cardigliano if (txm_seg->next != NULL && frag_left > 0) 465a27d9013SAlfredo Cardigliano continue; 4667c3a867bSAndrew Boyer 467a27d9013SAlfredo Cardigliano done = (txm_seg->next == NULL && left == 0); 468a27d9013SAlfredo Cardigliano ionic_tx_tso_post(q, desc, txm_seg, 469a27d9013SAlfredo Cardigliano desc_addr, desc_nsge, desc_len, 470a27d9013SAlfredo Cardigliano hdrlen, mss, 47164b08152SAlfredo Cardigliano encap, 472a27d9013SAlfredo Cardigliano vlan_tci, has_vlan, 47377c60793SAndrew Boyer start, done); 474be39f75cSAndrew Boyer desc = ionic_tx_tso_next(txq, &elem); 475a27d9013SAlfredo Cardigliano start = false; 476a27d9013SAlfredo Cardigliano } 477a27d9013SAlfredo Cardigliano 478a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 479a27d9013SAlfredo Cardigliano } 480a27d9013SAlfredo Cardigliano 481a27d9013SAlfredo Cardigliano stats->tso++; 482a27d9013SAlfredo Cardigliano 483a27d9013SAlfredo Cardigliano return 0; 484a27d9013SAlfredo Cardigliano } 485a27d9013SAlfredo Cardigliano 4860de3e209SAndrew Boyer static __rte_always_inline int 48777c60793SAndrew Boyer ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm) 488a27d9013SAlfredo Cardigliano { 489be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 490be39f75cSAndrew Boyer struct ionic_txq_desc *desc, *desc_base = q->base; 49156117636SAndrew Boyer struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base; 492be39f75cSAndrew Boyer struct ionic_txq_sg_elem *elem; 493be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 494a27d9013SAlfredo Cardigliano struct rte_mbuf *txm_seg; 495dd10c5b4SAndrew Boyer void **info; 49664b08152SAlfredo Cardigliano bool encap; 497a27d9013SAlfredo Cardigliano bool has_vlan; 498a27d9013SAlfredo Cardigliano uint64_t ol_flags = txm->ol_flags; 4994a735599SAndrew Boyer uint64_t addr, cmd; 500a27d9013SAlfredo Cardigliano uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE; 501a27d9013SAlfredo Cardigliano uint8_t flags = 0; 502a27d9013SAlfredo Cardigliano 503be39f75cSAndrew Boyer desc = &desc_base[q->head_idx]; 504dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 505be39f75cSAndrew Boyer 506daa02b5cSOlivier Matz if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && 50768591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_L3)) { 50864b08152SAlfredo Cardigliano opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 50964b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3; 510f603eebcSAndrew Boyer } 511f603eebcSAndrew Boyer 512daa02b5cSOlivier Matz if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) && 51368591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_TCP)) || 514daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) && 51568591087SAndrew Boyer (txq->flags & IONIC_QCQ_F_CSUM_UDP))) { 516f603eebcSAndrew Boyer opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW; 51764b08152SAlfredo Cardigliano flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4; 51864b08152SAlfredo Cardigliano } 51964b08152SAlfredo Cardigliano 520f603eebcSAndrew Boyer if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE) 521f603eebcSAndrew Boyer stats->no_csum++; 522f603eebcSAndrew Boyer 523daa02b5cSOlivier Matz has_vlan = (ol_flags & RTE_MBUF_F_TX_VLAN); 524daa02b5cSOlivier Matz encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) || 525daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) && 526daa02b5cSOlivier Matz ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) || 527daa02b5cSOlivier Matz (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6)); 528a27d9013SAlfredo Cardigliano 529a27d9013SAlfredo Cardigliano flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0; 53064b08152SAlfredo Cardigliano flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0; 531a27d9013SAlfredo Cardigliano 5327c3a867bSAndrew Boyer addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm)); 5337c3a867bSAndrew Boyer 5344a735599SAndrew Boyer cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr); 5354a735599SAndrew Boyer desc->cmd = rte_cpu_to_le_64(cmd); 5364a735599SAndrew Boyer desc->len = rte_cpu_to_le_16(txm->data_len); 5374a735599SAndrew Boyer desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci); 538a27d9013SAlfredo Cardigliano 539dd10c5b4SAndrew Boyer info[0] = txm; 540dd10c5b4SAndrew Boyer 541be39f75cSAndrew Boyer elem = sg_desc_base[q->head_idx].elems; 542dd10c5b4SAndrew Boyer 543a27d9013SAlfredo Cardigliano txm_seg = txm->next; 544a27d9013SAlfredo Cardigliano while (txm_seg != NULL) { 5454a735599SAndrew Boyer elem->len = rte_cpu_to_le_16(txm_seg->data_len); 546a27d9013SAlfredo Cardigliano elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg)); 547a27d9013SAlfredo Cardigliano elem++; 548a27d9013SAlfredo Cardigliano txm_seg = txm_seg->next; 549a27d9013SAlfredo Cardigliano } 550a27d9013SAlfredo Cardigliano 551dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 552dd10c5b4SAndrew Boyer 553a27d9013SAlfredo Cardigliano return 0; 554a27d9013SAlfredo Cardigliano } 555a27d9013SAlfredo Cardigliano 556a27d9013SAlfredo Cardigliano uint16_t 557a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, 558a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 559a27d9013SAlfredo Cardigliano { 560be39f75cSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 561be39f75cSAndrew Boyer struct ionic_queue *q = &txq->qcq.q; 562be39f75cSAndrew Boyer struct ionic_tx_stats *stats = &txq->stats; 563*c35f08f7SAndrew Boyer struct rte_mbuf *mbuf; 564a27d9013SAlfredo Cardigliano uint32_t next_q_head_idx; 565a27d9013SAlfredo Cardigliano uint32_t bytes_tx = 0; 56686551f81SAndrew Boyer uint16_t nb_avail, nb_tx = 0; 567a27d9013SAlfredo Cardigliano int err; 568a27d9013SAlfredo Cardigliano 569a27d9013SAlfredo Cardigliano /* Cleaning old buffers */ 5702aed9865SAndrew Boyer ionic_tx_flush(txq); 571a27d9013SAlfredo Cardigliano 57286551f81SAndrew Boyer nb_avail = ionic_q_space_avail(q); 57386551f81SAndrew Boyer if (unlikely(nb_avail < nb_pkts)) { 57486551f81SAndrew Boyer stats->stop += nb_pkts - nb_avail; 57586551f81SAndrew Boyer nb_pkts = nb_avail; 576a27d9013SAlfredo Cardigliano } 577a27d9013SAlfredo Cardigliano 578a27d9013SAlfredo Cardigliano while (nb_tx < nb_pkts) { 5794ad56b7aSAndrew Boyer next_q_head_idx = Q_NEXT_TO_POST(q, 1); 580a27d9013SAlfredo Cardigliano if ((next_q_head_idx & 0x3) == 0) { 581a27d9013SAlfredo Cardigliano struct ionic_txq_desc *desc_base = q->base; 582a27d9013SAlfredo Cardigliano rte_prefetch0(&desc_base[next_q_head_idx]); 583a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[next_q_head_idx]); 584a27d9013SAlfredo Cardigliano } 585a27d9013SAlfredo Cardigliano 586*c35f08f7SAndrew Boyer mbuf = tx_pkts[nb_tx]; 587*c35f08f7SAndrew Boyer 588*c35f08f7SAndrew Boyer if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG) 589*c35f08f7SAndrew Boyer err = ionic_tx_tso(txq, mbuf); 590a27d9013SAlfredo Cardigliano else 591*c35f08f7SAndrew Boyer err = ionic_tx(txq, mbuf); 592a27d9013SAlfredo Cardigliano if (err) { 593a27d9013SAlfredo Cardigliano stats->drop += nb_pkts - nb_tx; 594a27d9013SAlfredo Cardigliano break; 595a27d9013SAlfredo Cardigliano } 596a27d9013SAlfredo Cardigliano 597*c35f08f7SAndrew Boyer bytes_tx += mbuf->pkt_len; 598a27d9013SAlfredo Cardigliano nb_tx++; 599a27d9013SAlfredo Cardigliano } 600a27d9013SAlfredo Cardigliano 60177c60793SAndrew Boyer if (nb_tx > 0) { 60277c60793SAndrew Boyer rte_wmb(); 60377c60793SAndrew Boyer ionic_q_flush(q); 60477c60793SAndrew Boyer } 60577c60793SAndrew Boyer 606a27d9013SAlfredo Cardigliano stats->packets += nb_tx; 607a27d9013SAlfredo Cardigliano stats->bytes += bytes_tx; 608a27d9013SAlfredo Cardigliano 609a27d9013SAlfredo Cardigliano return nb_tx; 610a27d9013SAlfredo Cardigliano } 611a27d9013SAlfredo Cardigliano 612a27d9013SAlfredo Cardigliano /********************************************************************* 613a27d9013SAlfredo Cardigliano * 614a27d9013SAlfredo Cardigliano * TX prep functions 615a27d9013SAlfredo Cardigliano * 616a27d9013SAlfredo Cardigliano **********************************************************************/ 617a27d9013SAlfredo Cardigliano 618daa02b5cSOlivier Matz #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 | \ 619daa02b5cSOlivier Matz RTE_MBUF_F_TX_IPV6 | \ 620daa02b5cSOlivier Matz RTE_MBUF_F_TX_VLAN | \ 621daa02b5cSOlivier Matz RTE_MBUF_F_TX_IP_CKSUM | \ 622daa02b5cSOlivier Matz RTE_MBUF_F_TX_TCP_SEG | \ 623daa02b5cSOlivier Matz RTE_MBUF_F_TX_L4_MASK) 624a27d9013SAlfredo Cardigliano 625a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \ 626daa02b5cSOlivier Matz (RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK) 627a27d9013SAlfredo Cardigliano 628a27d9013SAlfredo Cardigliano uint16_t 629e19eea1eSAndrew Boyer ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) 630a27d9013SAlfredo Cardigliano { 631e19eea1eSAndrew Boyer struct ionic_tx_qcq *txq = tx_queue; 632a27d9013SAlfredo Cardigliano struct rte_mbuf *txm; 633a27d9013SAlfredo Cardigliano uint64_t offloads; 634a27d9013SAlfredo Cardigliano int i = 0; 635a27d9013SAlfredo Cardigliano 636a27d9013SAlfredo Cardigliano for (i = 0; i < nb_pkts; i++) { 637a27d9013SAlfredo Cardigliano txm = tx_pkts[i]; 638a27d9013SAlfredo Cardigliano 639e19eea1eSAndrew Boyer if (txm->nb_segs > txq->num_segs_fw) { 640a27d9013SAlfredo Cardigliano rte_errno = -EINVAL; 641a27d9013SAlfredo Cardigliano break; 642a27d9013SAlfredo Cardigliano } 643a27d9013SAlfredo Cardigliano 644a27d9013SAlfredo Cardigliano offloads = txm->ol_flags; 645a27d9013SAlfredo Cardigliano 646a27d9013SAlfredo Cardigliano if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) { 647a27d9013SAlfredo Cardigliano rte_errno = -ENOTSUP; 648a27d9013SAlfredo Cardigliano break; 649a27d9013SAlfredo Cardigliano } 650a27d9013SAlfredo Cardigliano } 651a27d9013SAlfredo Cardigliano 652a27d9013SAlfredo Cardigliano return i; 653a27d9013SAlfredo Cardigliano } 654a27d9013SAlfredo Cardigliano 655a27d9013SAlfredo Cardigliano /********************************************************************* 656a27d9013SAlfredo Cardigliano * 657a27d9013SAlfredo Cardigliano * RX functions 658a27d9013SAlfredo Cardigliano * 659a27d9013SAlfredo Cardigliano **********************************************************************/ 660a27d9013SAlfredo Cardigliano 661a27d9013SAlfredo Cardigliano static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 662a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf); 663a27d9013SAlfredo Cardigliano 664a27d9013SAlfredo Cardigliano void 665a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id, 666a27d9013SAlfredo Cardigliano struct rte_eth_rxq_info *qinfo) 667a27d9013SAlfredo Cardigliano { 668be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id]; 669be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 670a27d9013SAlfredo Cardigliano 671a27d9013SAlfredo Cardigliano qinfo->mp = rxq->mb_pool; 672a27d9013SAlfredo Cardigliano qinfo->scattered_rx = dev->data->scattered_rx; 673a27d9013SAlfredo Cardigliano qinfo->nb_desc = q->num_descs; 67402eabf57SAndrew Boyer qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED; 67568591087SAndrew Boyer qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads; 676a27d9013SAlfredo Cardigliano } 677a27d9013SAlfredo Cardigliano 678ce6427ddSThomas Monjalon void __rte_cold 6797483341aSXueming Li ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid) 680a27d9013SAlfredo Cardigliano { 6817483341aSXueming Li struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid]; 682be39f75cSAndrew Boyer 683be39f75cSAndrew Boyer if (!rxq) 684be39f75cSAndrew Boyer return; 685a27d9013SAlfredo Cardigliano 686a27d9013SAlfredo Cardigliano IONIC_PRINT_CALL(); 687a27d9013SAlfredo Cardigliano 688be39f75cSAndrew Boyer ionic_qcq_free(&rxq->qcq); 689a27d9013SAlfredo Cardigliano } 690a27d9013SAlfredo Cardigliano 691ce6427ddSThomas Monjalon int __rte_cold 692a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, 693a27d9013SAlfredo Cardigliano uint16_t rx_queue_id, 694a27d9013SAlfredo Cardigliano uint16_t nb_desc, 6954ae96cb8SAndrew Boyer uint32_t socket_id, 696a27d9013SAlfredo Cardigliano const struct rte_eth_rxconf *rx_conf, 697a27d9013SAlfredo Cardigliano struct rte_mempool *mp) 698a27d9013SAlfredo Cardigliano { 699a27d9013SAlfredo Cardigliano struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev); 700be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 701a27d9013SAlfredo Cardigliano uint64_t offloads; 702a27d9013SAlfredo Cardigliano int err; 703a27d9013SAlfredo Cardigliano 704a27d9013SAlfredo Cardigliano if (rx_queue_id >= lif->nrxqcqs) { 705a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 706a27d9013SAlfredo Cardigliano "Queue index %u not available (max %u queues)", 707a27d9013SAlfredo Cardigliano rx_queue_id, lif->nrxqcqs); 708a27d9013SAlfredo Cardigliano return -EINVAL; 709a27d9013SAlfredo Cardigliano } 710a27d9013SAlfredo Cardigliano 711a27d9013SAlfredo Cardigliano offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads; 7124ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, 7134ae96cb8SAndrew Boyer "Configuring skt %u RX queue %u with %u buffers, offloads %jx", 7144ae96cb8SAndrew Boyer socket_id, rx_queue_id, nb_desc, offloads); 715a27d9013SAlfredo Cardigliano 71618a44465SAndrew Boyer if (!rx_conf->rx_drop_en) 71718a44465SAndrew Boyer IONIC_PRINT(WARNING, "No-drop mode is not supported"); 71818a44465SAndrew Boyer 719a27d9013SAlfredo Cardigliano /* Validate number of receive descriptors */ 720a27d9013SAlfredo Cardigliano if (!rte_is_power_of_2(nb_desc) || 721a27d9013SAlfredo Cardigliano nb_desc < IONIC_MIN_RING_DESC || 722a27d9013SAlfredo Cardigliano nb_desc > IONIC_MAX_RING_DESC) { 723a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, 7244ae96cb8SAndrew Boyer "Bad descriptor count (%u) for queue %u (min: %u)", 725a27d9013SAlfredo Cardigliano nb_desc, rx_queue_id, IONIC_MIN_RING_DESC); 726a27d9013SAlfredo Cardigliano return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */ 727a27d9013SAlfredo Cardigliano } 728a27d9013SAlfredo Cardigliano 729a27d9013SAlfredo Cardigliano /* Free memory prior to re-allocation if needed... */ 730a27d9013SAlfredo Cardigliano if (eth_dev->data->rx_queues[rx_queue_id] != NULL) { 7317483341aSXueming Li ionic_dev_rx_queue_release(eth_dev, rx_queue_id); 732a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = NULL; 733a27d9013SAlfredo Cardigliano } 734a27d9013SAlfredo Cardigliano 7359fdf11c4SAndrew Boyer eth_dev->data->rx_queue_state[rx_queue_id] = 7369fdf11c4SAndrew Boyer RTE_ETH_QUEUE_STATE_STOPPED; 7379fdf11c4SAndrew Boyer 738d5850081SAndrew Boyer err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp, 739be39f75cSAndrew Boyer &rxq); 740a27d9013SAlfredo Cardigliano if (err) { 7414ae96cb8SAndrew Boyer IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id); 742a27d9013SAlfredo Cardigliano return -EINVAL; 743a27d9013SAlfredo Cardigliano } 744a27d9013SAlfredo Cardigliano 745a27d9013SAlfredo Cardigliano rxq->mb_pool = mp; 746a27d9013SAlfredo Cardigliano 747a27d9013SAlfredo Cardigliano /* 748a27d9013SAlfredo Cardigliano * Note: the interface does not currently support 749295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN 750a27d9013SAlfredo Cardigliano * when the adapter will be able to keep the CRC and subtract 751a27d9013SAlfredo Cardigliano * it to the length for all received packets: 752a27d9013SAlfredo Cardigliano * if (eth_dev->data->dev_conf.rxmode.offloads & 753295968d1SFerruh Yigit * RTE_ETH_RX_OFFLOAD_KEEP_CRC) 754a27d9013SAlfredo Cardigliano * rxq->crc_len = ETHER_CRC_LEN; 755a27d9013SAlfredo Cardigliano */ 756a27d9013SAlfredo Cardigliano 757a27d9013SAlfredo Cardigliano /* Do not start queue with rte_eth_dev_start() */ 75802eabf57SAndrew Boyer if (rx_conf->rx_deferred_start) 75902eabf57SAndrew Boyer rxq->flags |= IONIC_QCQ_F_DEFERRED; 760a27d9013SAlfredo Cardigliano 761a27d9013SAlfredo Cardigliano eth_dev->data->rx_queues[rx_queue_id] = rxq; 762a27d9013SAlfredo Cardigliano 763a27d9013SAlfredo Cardigliano return 0; 764a27d9013SAlfredo Cardigliano } 765a27d9013SAlfredo Cardigliano 7660de3e209SAndrew Boyer static __rte_always_inline void 767be39f75cSAndrew Boyer ionic_rx_clean(struct ionic_rx_qcq *rxq, 768a27d9013SAlfredo Cardigliano uint32_t q_desc_index, uint32_t cq_desc_index, 76914f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 770a27d9013SAlfredo Cardigliano { 771be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 772be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 773c6a9a6fbSAndrew Boyer struct ionic_rxq_comp *cq_desc_base = cq->base; 774a27d9013SAlfredo Cardigliano struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index]; 775700f974dSAndrew Boyer struct rte_mbuf *rxm, *rxm_seg; 776a27d9013SAlfredo Cardigliano uint64_t pkt_flags = 0; 777a27d9013SAlfredo Cardigliano uint32_t pkt_type; 778be39f75cSAndrew Boyer struct ionic_rx_stats *stats = &rxq->stats; 779a27d9013SAlfredo Cardigliano uint32_t left; 780700f974dSAndrew Boyer void **info; 781700f974dSAndrew Boyer 782700f974dSAndrew Boyer assert(q_desc_index == cq_desc->comp_index); 783700f974dSAndrew Boyer 784700f974dSAndrew Boyer info = IONIC_INFO_PTR(q, cq_desc->comp_index); 785700f974dSAndrew Boyer 786700f974dSAndrew Boyer rxm = info[0]; 787a27d9013SAlfredo Cardigliano 788a27d9013SAlfredo Cardigliano if (cq_desc->status) { 789a27d9013SAlfredo Cardigliano stats->bad_cq_status++; 790a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 791a27d9013SAlfredo Cardigliano return; 792a27d9013SAlfredo Cardigliano } 793a27d9013SAlfredo Cardigliano 79414f534beSAndrew Boyer if (rx_svc->nb_rx >= rx_svc->nb_pkts) { 795a27d9013SAlfredo Cardigliano stats->no_room++; 796a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 797a27d9013SAlfredo Cardigliano return; 798a27d9013SAlfredo Cardigliano } 799a27d9013SAlfredo Cardigliano 800b671e69aSAndrew Boyer if (cq_desc->len > rxq->frame_size || cq_desc->len == 0) { 801a27d9013SAlfredo Cardigliano stats->bad_len++; 802a27d9013SAlfredo Cardigliano ionic_rx_recycle(q, q_desc_index, rxm); 803a27d9013SAlfredo Cardigliano return; 804a27d9013SAlfredo Cardigliano } 805a27d9013SAlfredo Cardigliano 806a27d9013SAlfredo Cardigliano rxm->data_off = RTE_PKTMBUF_HEADROOM; 807a27d9013SAlfredo Cardigliano rte_prefetch1((char *)rxm->buf_addr + rxm->data_off); 808a27d9013SAlfredo Cardigliano rxm->nb_segs = 1; /* cq_desc->num_sg_elems */ 809a27d9013SAlfredo Cardigliano rxm->pkt_len = cq_desc->len; 810be39f75cSAndrew Boyer rxm->port = rxq->qcq.lif->port_id; 811a27d9013SAlfredo Cardigliano 812d5850081SAndrew Boyer rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc->len); 813d5850081SAndrew Boyer left = cq_desc->len - rxm->data_len; 814a27d9013SAlfredo Cardigliano 815a27d9013SAlfredo Cardigliano rxm_seg = rxm->next; 816a27d9013SAlfredo Cardigliano while (rxm_seg && left) { 817d5850081SAndrew Boyer rxm_seg->data_len = RTE_MIN(rxq->seg_size, left); 818a27d9013SAlfredo Cardigliano left -= rxm_seg->data_len; 819a27d9013SAlfredo Cardigliano 820a27d9013SAlfredo Cardigliano rxm_seg = rxm_seg->next; 821a27d9013SAlfredo Cardigliano rxm->nb_segs++; 822a27d9013SAlfredo Cardigliano } 823a27d9013SAlfredo Cardigliano 82422e7171bSAlfredo Cardigliano /* RSS */ 825daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_RSS_HASH; 8267506961aSAndrew Boyer rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash); 82722e7171bSAlfredo Cardigliano 828a27d9013SAlfredo Cardigliano /* Vlan Strip */ 829a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) { 830daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED; 8314a735599SAndrew Boyer rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci); 832a27d9013SAlfredo Cardigliano } 833a27d9013SAlfredo Cardigliano 834a27d9013SAlfredo Cardigliano /* Checksum */ 835a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) { 836a27d9013SAlfredo Cardigliano if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK) 837daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD; 838a27d9013SAlfredo Cardigliano else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD) 839daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD; 840a27d9013SAlfredo Cardigliano 841a27d9013SAlfredo Cardigliano if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) || 842a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK)) 843daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD; 844a27d9013SAlfredo Cardigliano else if ((cq_desc->csum_flags & 845a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_TCP_BAD) || 846a27d9013SAlfredo Cardigliano (cq_desc->csum_flags & 847a27d9013SAlfredo Cardigliano IONIC_RXQ_COMP_CSUM_F_UDP_BAD)) 848daa02b5cSOlivier Matz pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD; 849a27d9013SAlfredo Cardigliano } 850a27d9013SAlfredo Cardigliano 851a27d9013SAlfredo Cardigliano rxm->ol_flags = pkt_flags; 852a27d9013SAlfredo Cardigliano 853a27d9013SAlfredo Cardigliano /* Packet Type */ 854a27d9013SAlfredo Cardigliano switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) { 855a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4: 856a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4; 857a27d9013SAlfredo Cardigliano break; 858a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6: 859a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6; 860a27d9013SAlfredo Cardigliano break; 861a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_TCP: 862a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 863a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 864a27d9013SAlfredo Cardigliano break; 865a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_TCP: 866a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 867a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_TCP; 868a27d9013SAlfredo Cardigliano break; 869a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV4_UDP: 870a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | 871a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 872a27d9013SAlfredo Cardigliano break; 873a27d9013SAlfredo Cardigliano case IONIC_PKT_TYPE_IPV6_UDP: 874a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | 875a27d9013SAlfredo Cardigliano RTE_PTYPE_L4_UDP; 876a27d9013SAlfredo Cardigliano break; 877a27d9013SAlfredo Cardigliano default: 878a27d9013SAlfredo Cardigliano { 879a27d9013SAlfredo Cardigliano struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm, 880a27d9013SAlfredo Cardigliano struct rte_ether_hdr *); 881a27d9013SAlfredo Cardigliano uint16_t ether_type = eth_h->ether_type; 882a27d9013SAlfredo Cardigliano if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP)) 883a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_L2_ETHER_ARP; 884a27d9013SAlfredo Cardigliano else 885a27d9013SAlfredo Cardigliano pkt_type = RTE_PTYPE_UNKNOWN; 886ed522a3fSAndrew Boyer stats->mtods++; 887a27d9013SAlfredo Cardigliano break; 888a27d9013SAlfredo Cardigliano } 889a27d9013SAlfredo Cardigliano } 890a27d9013SAlfredo Cardigliano 891a27d9013SAlfredo Cardigliano rxm->packet_type = pkt_type; 892a27d9013SAlfredo Cardigliano 89314f534beSAndrew Boyer rx_svc->rx_pkts[rx_svc->nb_rx] = rxm; 89414f534beSAndrew Boyer rx_svc->nb_rx++; 895a27d9013SAlfredo Cardigliano 896a27d9013SAlfredo Cardigliano stats->packets++; 897a27d9013SAlfredo Cardigliano stats->bytes += rxm->pkt_len; 898a27d9013SAlfredo Cardigliano } 899a27d9013SAlfredo Cardigliano 900a27d9013SAlfredo Cardigliano static void 901a27d9013SAlfredo Cardigliano ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index, 902a27d9013SAlfredo Cardigliano struct rte_mbuf *mbuf) 903a27d9013SAlfredo Cardigliano { 904a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *desc_base = q->base; 905a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *old = &desc_base[q_desc_index]; 906a27d9013SAlfredo Cardigliano struct ionic_rxq_desc *new = &desc_base[q->head_idx]; 907a27d9013SAlfredo Cardigliano 908a27d9013SAlfredo Cardigliano new->addr = old->addr; 909a27d9013SAlfredo Cardigliano new->len = old->len; 910a27d9013SAlfredo Cardigliano 911dd10c5b4SAndrew Boyer q->info[q->head_idx] = mbuf; 912dd10c5b4SAndrew Boyer 913dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 914dd10c5b4SAndrew Boyer 915dd10c5b4SAndrew Boyer ionic_q_flush(q); 916a27d9013SAlfredo Cardigliano } 917a27d9013SAlfredo Cardigliano 9180de3e209SAndrew Boyer static __rte_always_inline int 919b671e69aSAndrew Boyer ionic_rx_fill(struct ionic_rx_qcq *rxq) 920a27d9013SAlfredo Cardigliano { 921be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 922be39f75cSAndrew Boyer struct ionic_rxq_desc *desc, *desc_base = q->base; 923be39f75cSAndrew Boyer struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base; 924a27d9013SAlfredo Cardigliano struct ionic_rxq_sg_elem *elem; 925dd10c5b4SAndrew Boyer void **info; 926a27d9013SAlfredo Cardigliano rte_iova_t dma_addr; 927d5850081SAndrew Boyer uint32_t i, j; 928a27d9013SAlfredo Cardigliano 929a27d9013SAlfredo Cardigliano /* Initialize software ring entries */ 930a27d9013SAlfredo Cardigliano for (i = ionic_q_space_avail(q); i; i--) { 931a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool); 932a27d9013SAlfredo Cardigliano struct rte_mbuf *prev_rxm_seg; 933a27d9013SAlfredo Cardigliano 934a27d9013SAlfredo Cardigliano if (rxm == NULL) { 935a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 936a27d9013SAlfredo Cardigliano return -ENOMEM; 937a27d9013SAlfredo Cardigliano } 938a27d9013SAlfredo Cardigliano 939dd10c5b4SAndrew Boyer info = IONIC_INFO_PTR(q, q->head_idx); 940dd10c5b4SAndrew Boyer 941a27d9013SAlfredo Cardigliano desc = &desc_base[q->head_idx]; 942a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm)); 943a27d9013SAlfredo Cardigliano desc->addr = dma_addr; 944d5850081SAndrew Boyer desc->len = rxq->hdr_seg_size; 945d5850081SAndrew Boyer desc->opcode = (q->num_segs > 1) ? IONIC_RXQ_DESC_OPCODE_SG : 946a27d9013SAlfredo Cardigliano IONIC_RXQ_DESC_OPCODE_SIMPLE; 947a27d9013SAlfredo Cardigliano rxm->next = NULL; 948a27d9013SAlfredo Cardigliano 949a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm; 950a27d9013SAlfredo Cardigliano sg_desc = &sg_desc_base[q->head_idx]; 951a27d9013SAlfredo Cardigliano elem = sg_desc->elems; 952d5850081SAndrew Boyer for (j = 0; j < q->num_segs - 1u; j++) { 953a27d9013SAlfredo Cardigliano struct rte_mbuf *rxm_seg; 954a27d9013SAlfredo Cardigliano rte_iova_t data_iova; 955a27d9013SAlfredo Cardigliano 956a27d9013SAlfredo Cardigliano rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool); 957a27d9013SAlfredo Cardigliano if (rxm_seg == NULL) { 958a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "RX mbuf alloc failed"); 959a27d9013SAlfredo Cardigliano return -ENOMEM; 960a27d9013SAlfredo Cardigliano } 961a27d9013SAlfredo Cardigliano 962d5850081SAndrew Boyer rxm_seg->data_off = 0; 963a27d9013SAlfredo Cardigliano data_iova = rte_mbuf_data_iova(rxm_seg); 964a27d9013SAlfredo Cardigliano dma_addr = rte_cpu_to_le_64(data_iova); 965a27d9013SAlfredo Cardigliano elem->addr = dma_addr; 966d5850081SAndrew Boyer elem->len = rxq->seg_size; 967a27d9013SAlfredo Cardigliano elem++; 968d5850081SAndrew Boyer 969a27d9013SAlfredo Cardigliano rxm_seg->next = NULL; 970a27d9013SAlfredo Cardigliano prev_rxm_seg->next = rxm_seg; 971a27d9013SAlfredo Cardigliano prev_rxm_seg = rxm_seg; 972a27d9013SAlfredo Cardigliano } 973a27d9013SAlfredo Cardigliano 974dd10c5b4SAndrew Boyer info[0] = rxm; 975dd10c5b4SAndrew Boyer 976dd10c5b4SAndrew Boyer q->head_idx = Q_NEXT_TO_POST(q, 1); 977a27d9013SAlfredo Cardigliano } 978a27d9013SAlfredo Cardigliano 97977c60793SAndrew Boyer ionic_q_flush(q); 98077c60793SAndrew Boyer 981a27d9013SAlfredo Cardigliano return 0; 982a27d9013SAlfredo Cardigliano } 983a27d9013SAlfredo Cardigliano 984a27d9013SAlfredo Cardigliano /* 985a27d9013SAlfredo Cardigliano * Start Receive Units for specified queue. 986a27d9013SAlfredo Cardigliano */ 987ce6427ddSThomas Monjalon int __rte_cold 988a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 989a27d9013SAlfredo Cardigliano { 9909fdf11c4SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 991be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 992d5850081SAndrew Boyer struct ionic_queue *q; 993a27d9013SAlfredo Cardigliano int err; 994a27d9013SAlfredo Cardigliano 9959fdf11c4SAndrew Boyer if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) { 9969fdf11c4SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u already started", 9979fdf11c4SAndrew Boyer rx_queue_id); 9989fdf11c4SAndrew Boyer return 0; 9999fdf11c4SAndrew Boyer } 10009fdf11c4SAndrew Boyer 1001a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1002d5850081SAndrew Boyer q = &rxq->qcq.q; 1003a27d9013SAlfredo Cardigliano 1004b671e69aSAndrew Boyer rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN; 1005b671e69aSAndrew Boyer 1006d5850081SAndrew Boyer /* Recalculate segment count based on MTU */ 1007d5850081SAndrew Boyer q->num_segs = 1 + 1008d5850081SAndrew Boyer (rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size; 1009d5850081SAndrew Boyer 1010d5850081SAndrew Boyer IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u", 1011d5850081SAndrew Boyer rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs); 10124ae96cb8SAndrew Boyer 1013a27d9013SAlfredo Cardigliano err = ionic_lif_rxq_init(rxq); 1014a27d9013SAlfredo Cardigliano if (err) 1015a27d9013SAlfredo Cardigliano return err; 1016a27d9013SAlfredo Cardigliano 1017a27d9013SAlfredo Cardigliano /* Allocate buffers for descriptor rings */ 1018b671e69aSAndrew Boyer if (ionic_rx_fill(rxq) != 0) { 1019a27d9013SAlfredo Cardigliano IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d", 1020a27d9013SAlfredo Cardigliano rx_queue_id); 1021a27d9013SAlfredo Cardigliano return -1; 1022a27d9013SAlfredo Cardigliano } 1023a27d9013SAlfredo Cardigliano 10249fdf11c4SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED; 1025a27d9013SAlfredo Cardigliano 1026a27d9013SAlfredo Cardigliano return 0; 1027a27d9013SAlfredo Cardigliano } 1028a27d9013SAlfredo Cardigliano 10290de3e209SAndrew Boyer static __rte_always_inline void 1030be39f75cSAndrew Boyer ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do, 103114f534beSAndrew Boyer struct ionic_rx_service *rx_svc) 1032a27d9013SAlfredo Cardigliano { 1033be39f75cSAndrew Boyer struct ionic_cq *cq = &rxq->qcq.cq; 1034be39f75cSAndrew Boyer struct ionic_queue *q = &rxq->qcq.q; 1035be39f75cSAndrew Boyer struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base; 1036a27d9013SAlfredo Cardigliano bool more; 1037a27d9013SAlfredo Cardigliano uint32_t curr_q_tail_idx, curr_cq_tail_idx; 1038a27d9013SAlfredo Cardigliano uint32_t work_done = 0; 1039a27d9013SAlfredo Cardigliano 1040a27d9013SAlfredo Cardigliano if (work_to_do == 0) 1041a27d9013SAlfredo Cardigliano return; 1042a27d9013SAlfredo Cardigliano 1043a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1044a27d9013SAlfredo Cardigliano while (color_match(cq_desc->pkt_type_color, cq->done_color)) { 1045a27d9013SAlfredo Cardigliano curr_cq_tail_idx = cq->tail_idx; 10462aed9865SAndrew Boyer cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1); 1047a27d9013SAlfredo Cardigliano 1048a27d9013SAlfredo Cardigliano if (cq->tail_idx == 0) 1049a27d9013SAlfredo Cardigliano cq->done_color = !cq->done_color; 1050a27d9013SAlfredo Cardigliano 1051a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1052a27d9013SAlfredo Cardigliano if ((cq->tail_idx & 0x3) == 0) 1053a27d9013SAlfredo Cardigliano rte_prefetch0(&cq_desc_base[cq->tail_idx]); 1054a27d9013SAlfredo Cardigliano 1055a27d9013SAlfredo Cardigliano do { 1056a27d9013SAlfredo Cardigliano more = (q->tail_idx != cq_desc->comp_index); 1057a27d9013SAlfredo Cardigliano 1058a27d9013SAlfredo Cardigliano curr_q_tail_idx = q->tail_idx; 10594ad56b7aSAndrew Boyer q->tail_idx = Q_NEXT_TO_SRVC(q, 1); 1060a27d9013SAlfredo Cardigliano 1061a27d9013SAlfredo Cardigliano /* Prefetch the next 4 descriptors */ 1062a27d9013SAlfredo Cardigliano if ((q->tail_idx & 0x3) == 0) 1063a27d9013SAlfredo Cardigliano /* q desc info */ 1064a27d9013SAlfredo Cardigliano rte_prefetch0(&q->info[q->tail_idx]); 1065a27d9013SAlfredo Cardigliano 1066c6a9a6fbSAndrew Boyer ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx, 106714f534beSAndrew Boyer rx_svc); 1068a27d9013SAlfredo Cardigliano 1069a27d9013SAlfredo Cardigliano } while (more); 1070a27d9013SAlfredo Cardigliano 1071a27d9013SAlfredo Cardigliano if (++work_done == work_to_do) 1072a27d9013SAlfredo Cardigliano break; 1073a27d9013SAlfredo Cardigliano 1074a27d9013SAlfredo Cardigliano cq_desc = &cq_desc_base[cq->tail_idx]; 1075a27d9013SAlfredo Cardigliano } 1076a27d9013SAlfredo Cardigliano } 1077a27d9013SAlfredo Cardigliano 1078a27d9013SAlfredo Cardigliano /* 1079a27d9013SAlfredo Cardigliano * Stop Receive Units for specified queue. 1080a27d9013SAlfredo Cardigliano */ 1081ce6427ddSThomas Monjalon int __rte_cold 1082a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id) 1083a27d9013SAlfredo Cardigliano { 1084e7222f94SAndrew Boyer uint8_t *rx_queue_state = eth_dev->data->rx_queue_state; 1085e7222f94SAndrew Boyer struct ionic_rx_stats *stats; 1086be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq; 1087a27d9013SAlfredo Cardigliano 10884ae96cb8SAndrew Boyer IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id); 1089a27d9013SAlfredo Cardigliano 1090a27d9013SAlfredo Cardigliano rxq = eth_dev->data->rx_queues[rx_queue_id]; 1091a27d9013SAlfredo Cardigliano 1092e7222f94SAndrew Boyer rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; 10939fdf11c4SAndrew Boyer 1094e7222f94SAndrew Boyer ionic_lif_rxq_deinit(rxq); 1095a27d9013SAlfredo Cardigliano 1096e7222f94SAndrew Boyer /* Free all buffers from descriptor ring */ 1097e7222f94SAndrew Boyer ionic_rx_empty(rxq); 1098e7222f94SAndrew Boyer 1099e7222f94SAndrew Boyer stats = &rxq->stats; 1100e7222f94SAndrew Boyer IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju", 1101e7222f94SAndrew Boyer rxq->qcq.q.index, stats->packets, stats->mtods); 1102a27d9013SAlfredo Cardigliano 1103a27d9013SAlfredo Cardigliano return 0; 1104a27d9013SAlfredo Cardigliano } 1105a27d9013SAlfredo Cardigliano 1106a27d9013SAlfredo Cardigliano uint16_t 1107a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, 1108a27d9013SAlfredo Cardigliano uint16_t nb_pkts) 1109a27d9013SAlfredo Cardigliano { 1110be39f75cSAndrew Boyer struct ionic_rx_qcq *rxq = rx_queue; 111114f534beSAndrew Boyer struct ionic_rx_service rx_svc; 1112a27d9013SAlfredo Cardigliano 111314f534beSAndrew Boyer rx_svc.rx_pkts = rx_pkts; 111414f534beSAndrew Boyer rx_svc.nb_pkts = nb_pkts; 111514f534beSAndrew Boyer rx_svc.nb_rx = 0; 1116a27d9013SAlfredo Cardigliano 111714f534beSAndrew Boyer ionic_rxq_service(rxq, nb_pkts, &rx_svc); 1118a27d9013SAlfredo Cardigliano 1119b671e69aSAndrew Boyer ionic_rx_fill(rxq); 1120a27d9013SAlfredo Cardigliano 112114f534beSAndrew Boyer return rx_svc.nb_rx; 1122a27d9013SAlfredo Cardigliano } 1123