xref: /dpdk/drivers/net/ionic/ionic_rxtx.c (revision e7222f947e0dca5d3029c6bbc5213546581a075c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <stdarg.h>
12 #include <unistd.h>
13 #include <inttypes.h>
14 
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_pci.h>
22 #include <rte_memory.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
32 #include <rte_mbuf.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
36 #include <rte_udp.h>
37 #include <rte_tcp.h>
38 #include <rte_sctp.h>
39 #include <rte_string_fns.h>
40 #include <rte_errno.h>
41 #include <rte_ip.h>
42 #include <rte_net.h>
43 
44 #include "ionic_logs.h"
45 #include "ionic_mac_api.h"
46 #include "ionic_ethdev.h"
47 #include "ionic_lif.h"
48 #include "ionic_rxtx.h"
49 
50 static void
51 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)
52 {
53 	uint32_t i;
54 
55 	for (i = idx; i < cnt; i++)
56 		if (array[i])
57 			rte_pktmbuf_free_seg(array[i]);
58 
59 	memset(array, 0, sizeof(void *) * cnt);
60 }
61 
62 static void __rte_cold
63 ionic_tx_empty(struct ionic_tx_qcq *txq)
64 {
65 	struct ionic_queue *q = &txq->qcq.q;
66 
67 	ionic_empty_array(q->info, q->num_descs, 0);
68 }
69 
70 static void __rte_cold
71 ionic_rx_empty(struct ionic_rx_qcq *rxq)
72 {
73 	struct ionic_queue *q = &rxq->qcq.q;
74 
75 	ionic_empty_array(q->info, q->num_descs, 0);
76 }
77 
78 /*********************************************************************
79  *
80  *  TX functions
81  *
82  **********************************************************************/
83 
84 void
85 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
86 		struct rte_eth_txq_info *qinfo)
87 {
88 	struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
89 	struct ionic_queue *q = &txq->qcq.q;
90 
91 	qinfo->nb_desc = q->num_descs;
92 	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
93 	qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
94 }
95 
96 static __rte_always_inline void
97 ionic_tx_flush(struct ionic_tx_qcq *txq)
98 {
99 	struct ionic_cq *cq = &txq->qcq.cq;
100 	struct ionic_queue *q = &txq->qcq.q;
101 	struct rte_mbuf *txm, *next;
102 	struct ionic_txq_comp *cq_desc_base = cq->base;
103 	struct ionic_txq_comp *cq_desc;
104 	void **info;
105 	u_int32_t comp_index = (u_int32_t)-1;
106 
107 	cq_desc = &cq_desc_base[cq->tail_idx];
108 	while (color_match(cq_desc->color, cq->done_color)) {
109 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
110 
111 		/* Prefetch the next 4 descriptors (not really useful here) */
112 		if ((cq->tail_idx & 0x3) == 0)
113 			rte_prefetch0(&cq_desc_base[cq->tail_idx]);
114 
115 		if (cq->tail_idx == 0)
116 			cq->done_color = !cq->done_color;
117 
118 		comp_index = cq_desc->comp_index;
119 
120 		cq_desc = &cq_desc_base[cq->tail_idx];
121 	}
122 
123 	if (comp_index != (u_int32_t)-1) {
124 		while (q->tail_idx != comp_index) {
125 			info = IONIC_INFO_PTR(q, q->tail_idx);
126 
127 			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
128 
129 			/* Prefetch the next 4 descriptors */
130 			if ((q->tail_idx & 0x3) == 0)
131 				/* q desc info */
132 				rte_prefetch0(&q->info[q->tail_idx]);
133 
134 			/*
135 			 * Note: you can just use rte_pktmbuf_free,
136 			 * but this loop is faster
137 			 */
138 			txm = info[0];
139 			while (txm != NULL) {
140 				next = txm->next;
141 				rte_pktmbuf_free_seg(txm);
142 				txm = next;
143 			}
144 		}
145 	}
146 }
147 
148 void __rte_cold
149 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
150 {
151 	struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
152 
153 	IONIC_PRINT_CALL();
154 
155 	ionic_qcq_free(&txq->qcq);
156 }
157 
158 int __rte_cold
159 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
160 {
161 	struct ionic_tx_stats *stats;
162 	struct ionic_tx_qcq *txq;
163 
164 	IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
165 
166 	txq = eth_dev->data->tx_queues[tx_queue_id];
167 
168 	eth_dev->data->tx_queue_state[tx_queue_id] =
169 		RTE_ETH_QUEUE_STATE_STOPPED;
170 
171 	/*
172 	 * Note: we should better post NOP Tx desc and wait for its completion
173 	 * before disabling Tx queue
174 	 */
175 
176 	ionic_lif_txq_deinit(txq);
177 
178 	/* Free all buffers from descriptor ring */
179 	ionic_tx_empty(txq);
180 
181 	stats = &txq->stats;
182 	IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
183 		txq->qcq.q.index, stats->packets, stats->tso);
184 
185 	return 0;
186 }
187 
188 int __rte_cold
189 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
190 		uint16_t nb_desc, uint32_t socket_id,
191 		const struct rte_eth_txconf *tx_conf)
192 {
193 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
194 	struct ionic_tx_qcq *txq;
195 	uint64_t offloads;
196 	int err;
197 
198 	if (tx_queue_id >= lif->ntxqcqs) {
199 		IONIC_PRINT(DEBUG, "Queue index %u not available "
200 			"(max %u queues)",
201 			tx_queue_id, lif->ntxqcqs);
202 		return -EINVAL;
203 	}
204 
205 	offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
206 	IONIC_PRINT(DEBUG,
207 		"Configuring skt %u TX queue %u with %u buffers, offloads %jx",
208 		socket_id, tx_queue_id, nb_desc, offloads);
209 
210 	/* Validate number of receive descriptors */
211 	if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
212 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
213 
214 	/* Free memory prior to re-allocation if needed... */
215 	if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
216 		ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
217 		eth_dev->data->tx_queues[tx_queue_id] = NULL;
218 	}
219 
220 	eth_dev->data->tx_queue_state[tx_queue_id] =
221 		RTE_ETH_QUEUE_STATE_STOPPED;
222 
223 	err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
224 	if (err) {
225 		IONIC_PRINT(DEBUG, "Queue allocation failure");
226 		return -EINVAL;
227 	}
228 
229 	/* Do not start queue with rte_eth_dev_start() */
230 	if (tx_conf->tx_deferred_start)
231 		txq->flags |= IONIC_QCQ_F_DEFERRED;
232 
233 	/* Convert the offload flags into queue flags */
234 	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
235 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
236 	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
237 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
238 	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
239 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
240 
241 	eth_dev->data->tx_queues[tx_queue_id] = txq;
242 
243 	return 0;
244 }
245 
246 /*
247  * Start Transmit Units for specified queue.
248  */
249 int __rte_cold
250 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
251 {
252 	uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
253 	struct ionic_tx_qcq *txq;
254 	int err;
255 
256 	if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
257 		IONIC_PRINT(DEBUG, "TX queue %u already started",
258 			tx_queue_id);
259 		return 0;
260 	}
261 
262 	txq = eth_dev->data->tx_queues[tx_queue_id];
263 
264 	IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
265 		tx_queue_id, txq->qcq.q.num_descs);
266 
267 	err = ionic_lif_txq_init(txq);
268 	if (err)
269 		return err;
270 
271 	tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
272 
273 	return 0;
274 }
275 
276 static void
277 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
278 {
279 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
280 	char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
281 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
282 		(l3_hdr + txm->l3_len);
283 
284 	if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
285 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
286 		ipv4_hdr->hdr_checksum = 0;
287 		tcp_hdr->cksum = 0;
288 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
289 	} else {
290 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
291 		tcp_hdr->cksum = 0;
292 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
293 	}
294 }
295 
296 static void
297 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
298 {
299 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
300 	char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
301 		txm->outer_l3_len + txm->l2_len;
302 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
303 		(l3_hdr + txm->l3_len);
304 
305 	if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) {
306 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
307 		ipv4_hdr->hdr_checksum = 0;
308 		tcp_hdr->cksum = 0;
309 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
310 	} else {
311 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
312 		tcp_hdr->cksum = 0;
313 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
314 	}
315 }
316 
317 static void
318 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
319 		struct rte_mbuf *txm,
320 		rte_iova_t addr, uint8_t nsge, uint16_t len,
321 		uint32_t hdrlen, uint32_t mss,
322 		bool encap,
323 		uint16_t vlan_tci, bool has_vlan,
324 		bool start, bool done)
325 {
326 	void **info;
327 	uint64_t cmd;
328 	uint8_t flags = 0;
329 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
330 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
331 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
332 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
333 
334 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
335 		flags, nsge, addr);
336 	desc->cmd = rte_cpu_to_le_64(cmd);
337 	desc->len = rte_cpu_to_le_16(len);
338 	desc->vlan_tci = rte_cpu_to_le_16(vlan_tci);
339 	desc->hdr_len = rte_cpu_to_le_16(hdrlen);
340 	desc->mss = rte_cpu_to_le_16(mss);
341 
342 	if (done) {
343 		info = IONIC_INFO_PTR(q, q->head_idx);
344 		info[0] = txm;
345 	}
346 
347 	q->head_idx = Q_NEXT_TO_POST(q, 1);
348 }
349 
350 static struct ionic_txq_desc *
351 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
352 {
353 	struct ionic_queue *q = &txq->qcq.q;
354 	struct ionic_txq_desc *desc_base = q->base;
355 	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
356 	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
357 	struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
358 
359 	*elem = sg_desc->elems;
360 	return desc;
361 }
362 
363 static int
364 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
365 {
366 	struct ionic_queue *q = &txq->qcq.q;
367 	struct ionic_tx_stats *stats = &txq->stats;
368 	struct ionic_txq_desc *desc;
369 	struct ionic_txq_sg_elem *elem;
370 	struct rte_mbuf *txm_seg;
371 	rte_iova_t data_iova;
372 	uint64_t desc_addr = 0, next_addr;
373 	uint16_t desc_len = 0;
374 	uint8_t desc_nsge;
375 	uint32_t hdrlen;
376 	uint32_t mss = txm->tso_segsz;
377 	uint32_t frag_left = 0;
378 	uint32_t left;
379 	uint32_t seglen;
380 	uint32_t len;
381 	uint32_t offset = 0;
382 	bool start, done;
383 	bool encap;
384 	bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);
385 	uint16_t vlan_tci = txm->vlan_tci;
386 	uint64_t ol_flags = txm->ol_flags;
387 
388 	encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
389 		 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
390 		((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
391 		 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
392 
393 	/* Preload inner-most TCP csum field with IP pseudo hdr
394 	 * calculated with IP length set to zero.  HW will later
395 	 * add in length to each TCP segment resulting from the TSO.
396 	 */
397 
398 	if (encap) {
399 		ionic_tx_tcp_inner_pseudo_csum(txm);
400 		hdrlen = txm->outer_l2_len + txm->outer_l3_len +
401 			txm->l2_len + txm->l3_len + txm->l4_len;
402 	} else {
403 		ionic_tx_tcp_pseudo_csum(txm);
404 		hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
405 	}
406 
407 	seglen = hdrlen + mss;
408 	left = txm->data_len;
409 	data_iova = rte_mbuf_data_iova(txm);
410 
411 	desc = ionic_tx_tso_next(txq, &elem);
412 	start = true;
413 
414 	/* Chop data up into desc segments */
415 
416 	while (left > 0) {
417 		len = RTE_MIN(seglen, left);
418 		frag_left = seglen - len;
419 		desc_addr = rte_cpu_to_le_64(data_iova + offset);
420 		desc_len = len;
421 		desc_nsge = 0;
422 		left -= len;
423 		offset += len;
424 		if (txm->nb_segs > 1 && frag_left > 0)
425 			continue;
426 		done = (txm->nb_segs == 1 && left == 0);
427 		ionic_tx_tso_post(q, desc, txm,
428 			desc_addr, desc_nsge, desc_len,
429 			hdrlen, mss,
430 			encap,
431 			vlan_tci, has_vlan,
432 			start, done);
433 		desc = ionic_tx_tso_next(txq, &elem);
434 		start = false;
435 		seglen = mss;
436 	}
437 
438 	/* Chop frags into desc segments */
439 
440 	txm_seg = txm->next;
441 	while (txm_seg != NULL) {
442 		offset = 0;
443 		data_iova = rte_mbuf_data_iova(txm_seg);
444 		left = txm_seg->data_len;
445 
446 		while (left > 0) {
447 			next_addr = rte_cpu_to_le_64(data_iova + offset);
448 			if (frag_left > 0) {
449 				len = RTE_MIN(frag_left, left);
450 				frag_left -= len;
451 				elem->addr = next_addr;
452 				elem->len = rte_cpu_to_le_16(len);
453 				elem++;
454 				desc_nsge++;
455 			} else {
456 				len = RTE_MIN(mss, left);
457 				frag_left = mss - len;
458 				desc_addr = next_addr;
459 				desc_len = len;
460 				desc_nsge = 0;
461 			}
462 			left -= len;
463 			offset += len;
464 			if (txm_seg->next != NULL && frag_left > 0)
465 				continue;
466 
467 			done = (txm_seg->next == NULL && left == 0);
468 			ionic_tx_tso_post(q, desc, txm_seg,
469 				desc_addr, desc_nsge, desc_len,
470 				hdrlen, mss,
471 				encap,
472 				vlan_tci, has_vlan,
473 				start, done);
474 			desc = ionic_tx_tso_next(txq, &elem);
475 			start = false;
476 		}
477 
478 		txm_seg = txm_seg->next;
479 	}
480 
481 	stats->tso++;
482 
483 	return 0;
484 }
485 
486 static __rte_always_inline int
487 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
488 {
489 	struct ionic_queue *q = &txq->qcq.q;
490 	struct ionic_txq_desc *desc, *desc_base = q->base;
491 	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
492 	struct ionic_txq_sg_elem *elem;
493 	struct ionic_tx_stats *stats = &txq->stats;
494 	struct rte_mbuf *txm_seg;
495 	void **info;
496 	bool encap;
497 	bool has_vlan;
498 	uint64_t ol_flags = txm->ol_flags;
499 	uint64_t addr, cmd;
500 	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
501 	uint8_t flags = 0;
502 
503 	desc = &desc_base[q->head_idx];
504 	info = IONIC_INFO_PTR(q, q->head_idx);
505 
506 	if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
507 	    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
508 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
509 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
510 	}
511 
512 	if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
513 	     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
514 	    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
515 	     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
516 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
517 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
518 	}
519 
520 	if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
521 		stats->no_csum++;
522 
523 	has_vlan = (ol_flags & RTE_MBUF_F_TX_VLAN);
524 	encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
525 			(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
526 			((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
527 			 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
528 
529 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
530 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
531 
532 	addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
533 
534 	cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
535 	desc->cmd = rte_cpu_to_le_64(cmd);
536 	desc->len = rte_cpu_to_le_16(txm->data_len);
537 	desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
538 
539 	info[0] = txm;
540 
541 	elem = sg_desc_base[q->head_idx].elems;
542 
543 	txm_seg = txm->next;
544 	while (txm_seg != NULL) {
545 		elem->len = rte_cpu_to_le_16(txm_seg->data_len);
546 		elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
547 		elem++;
548 		txm_seg = txm_seg->next;
549 	}
550 
551 	q->head_idx = Q_NEXT_TO_POST(q, 1);
552 
553 	return 0;
554 }
555 
556 uint16_t
557 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
558 		uint16_t nb_pkts)
559 {
560 	struct ionic_tx_qcq *txq = tx_queue;
561 	struct ionic_queue *q = &txq->qcq.q;
562 	struct ionic_tx_stats *stats = &txq->stats;
563 	uint32_t next_q_head_idx;
564 	uint32_t bytes_tx = 0;
565 	uint16_t nb_avail, nb_tx = 0;
566 	int err;
567 
568 	/* Cleaning old buffers */
569 	ionic_tx_flush(txq);
570 
571 	nb_avail = ionic_q_space_avail(q);
572 	if (unlikely(nb_avail < nb_pkts)) {
573 		stats->stop += nb_pkts - nb_avail;
574 		nb_pkts = nb_avail;
575 	}
576 
577 	while (nb_tx < nb_pkts) {
578 		next_q_head_idx = Q_NEXT_TO_POST(q, 1);
579 		if ((next_q_head_idx & 0x3) == 0) {
580 			struct ionic_txq_desc *desc_base = q->base;
581 			rte_prefetch0(&desc_base[next_q_head_idx]);
582 			rte_prefetch0(&q->info[next_q_head_idx]);
583 		}
584 
585 		if (tx_pkts[nb_tx]->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
586 			err = ionic_tx_tso(txq, tx_pkts[nb_tx]);
587 		else
588 			err = ionic_tx(txq, tx_pkts[nb_tx]);
589 		if (err) {
590 			stats->drop += nb_pkts - nb_tx;
591 			break;
592 		}
593 
594 		bytes_tx += tx_pkts[nb_tx]->pkt_len;
595 		nb_tx++;
596 	}
597 
598 	if (nb_tx > 0) {
599 		rte_wmb();
600 		ionic_q_flush(q);
601 	}
602 
603 	stats->packets += nb_tx;
604 	stats->bytes += bytes_tx;
605 
606 	return nb_tx;
607 }
608 
609 /*********************************************************************
610  *
611  *  TX prep functions
612  *
613  **********************************************************************/
614 
615 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 |		\
616 	RTE_MBUF_F_TX_IPV6 |		\
617 	RTE_MBUF_F_TX_VLAN |		\
618 	RTE_MBUF_F_TX_IP_CKSUM |	\
619 	RTE_MBUF_F_TX_TCP_SEG |	\
620 	RTE_MBUF_F_TX_L4_MASK)
621 
622 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
623 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
624 
625 uint16_t
626 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
627 {
628 	struct ionic_tx_qcq *txq = tx_queue;
629 	struct rte_mbuf *txm;
630 	uint64_t offloads;
631 	int i = 0;
632 
633 	for (i = 0; i < nb_pkts; i++) {
634 		txm = tx_pkts[i];
635 
636 		if (txm->nb_segs > txq->num_segs_fw) {
637 			rte_errno = -EINVAL;
638 			break;
639 		}
640 
641 		offloads = txm->ol_flags;
642 
643 		if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
644 			rte_errno = -ENOTSUP;
645 			break;
646 		}
647 	}
648 
649 	return i;
650 }
651 
652 /*********************************************************************
653  *
654  *  RX functions
655  *
656  **********************************************************************/
657 
658 static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
659 		struct rte_mbuf *mbuf);
660 
661 void
662 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
663 		struct rte_eth_rxq_info *qinfo)
664 {
665 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
666 	struct ionic_queue *q = &rxq->qcq.q;
667 
668 	qinfo->mp = rxq->mb_pool;
669 	qinfo->scattered_rx = dev->data->scattered_rx;
670 	qinfo->nb_desc = q->num_descs;
671 	qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
672 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
673 }
674 
675 void __rte_cold
676 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
677 {
678 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
679 
680 	if (!rxq)
681 		return;
682 
683 	IONIC_PRINT_CALL();
684 
685 	ionic_qcq_free(&rxq->qcq);
686 }
687 
688 int __rte_cold
689 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
690 		uint16_t rx_queue_id,
691 		uint16_t nb_desc,
692 		uint32_t socket_id,
693 		const struct rte_eth_rxconf *rx_conf,
694 		struct rte_mempool *mp)
695 {
696 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
697 	struct ionic_rx_qcq *rxq;
698 	uint64_t offloads;
699 	int err;
700 
701 	if (rx_queue_id >= lif->nrxqcqs) {
702 		IONIC_PRINT(ERR,
703 			"Queue index %u not available (max %u queues)",
704 			rx_queue_id, lif->nrxqcqs);
705 		return -EINVAL;
706 	}
707 
708 	offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
709 	IONIC_PRINT(DEBUG,
710 		"Configuring skt %u RX queue %u with %u buffers, offloads %jx",
711 		socket_id, rx_queue_id, nb_desc, offloads);
712 
713 	if (!rx_conf->rx_drop_en)
714 		IONIC_PRINT(WARNING, "No-drop mode is not supported");
715 
716 	/* Validate number of receive descriptors */
717 	if (!rte_is_power_of_2(nb_desc) ||
718 			nb_desc < IONIC_MIN_RING_DESC ||
719 			nb_desc > IONIC_MAX_RING_DESC) {
720 		IONIC_PRINT(ERR,
721 			"Bad descriptor count (%u) for queue %u (min: %u)",
722 			nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
723 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
724 	}
725 
726 	/* Free memory prior to re-allocation if needed... */
727 	if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
728 		ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
729 		eth_dev->data->rx_queues[rx_queue_id] = NULL;
730 	}
731 
732 	eth_dev->data->rx_queue_state[rx_queue_id] =
733 		RTE_ETH_QUEUE_STATE_STOPPED;
734 
735 	err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc,
736 			&rxq);
737 	if (err) {
738 		IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
739 		return -EINVAL;
740 	}
741 
742 	rxq->mb_pool = mp;
743 
744 	/*
745 	 * Note: the interface does not currently support
746 	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
747 	 * when the adapter will be able to keep the CRC and subtract
748 	 * it to the length for all received packets:
749 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
750 	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
751 	 *   rxq->crc_len = ETHER_CRC_LEN;
752 	 */
753 
754 	/* Do not start queue with rte_eth_dev_start() */
755 	if (rx_conf->rx_deferred_start)
756 		rxq->flags |= IONIC_QCQ_F_DEFERRED;
757 
758 	eth_dev->data->rx_queues[rx_queue_id] = rxq;
759 
760 	return 0;
761 }
762 
763 static __rte_always_inline void
764 ionic_rx_clean(struct ionic_rx_qcq *rxq,
765 		uint32_t q_desc_index, uint32_t cq_desc_index,
766 		struct ionic_rx_service *rx_svc)
767 {
768 	struct ionic_queue *q = &rxq->qcq.q;
769 	struct ionic_cq *cq = &rxq->qcq.cq;
770 	struct ionic_rxq_comp *cq_desc_base = cq->base;
771 	struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
772 	struct rte_mbuf *rxm, *rxm_seg;
773 	uint64_t pkt_flags = 0;
774 	uint32_t pkt_type;
775 	struct ionic_rx_stats *stats = &rxq->stats;
776 	uint32_t buf_size = (uint16_t)
777 		(rte_pktmbuf_data_room_size(rxq->mb_pool) -
778 		RTE_PKTMBUF_HEADROOM);
779 	uint32_t left;
780 	void **info;
781 
782 	assert(q_desc_index == cq_desc->comp_index);
783 
784 	info = IONIC_INFO_PTR(q, cq_desc->comp_index);
785 
786 	rxm = info[0];
787 
788 	if (cq_desc->status) {
789 		stats->bad_cq_status++;
790 		ionic_rx_recycle(q, q_desc_index, rxm);
791 		return;
792 	}
793 
794 	if (rx_svc->nb_rx >= rx_svc->nb_pkts) {
795 		stats->no_room++;
796 		ionic_rx_recycle(q, q_desc_index, rxm);
797 		return;
798 	}
799 
800 	if (cq_desc->len > rxq->frame_size || cq_desc->len == 0) {
801 		stats->bad_len++;
802 		ionic_rx_recycle(q, q_desc_index, rxm);
803 		return;
804 	}
805 
806 	rxm->data_off = RTE_PKTMBUF_HEADROOM;
807 	rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
808 	rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
809 	rxm->pkt_len = cq_desc->len;
810 	rxm->port = rxq->qcq.lif->port_id;
811 
812 	left = cq_desc->len;
813 
814 	rxm->data_len = RTE_MIN(buf_size, left);
815 	left -= rxm->data_len;
816 
817 	rxm_seg = rxm->next;
818 	while (rxm_seg && left) {
819 		rxm_seg->data_len = RTE_MIN(buf_size, left);
820 		left -= rxm_seg->data_len;
821 
822 		rxm_seg = rxm_seg->next;
823 		rxm->nb_segs++;
824 	}
825 
826 	/* RSS */
827 	pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
828 	rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);
829 
830 	/* Vlan Strip */
831 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
832 		pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
833 		rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
834 	}
835 
836 	/* Checksum */
837 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
838 		if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
839 			pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
840 		else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
841 			pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
842 
843 		if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
844 			(cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
845 			pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
846 		else if ((cq_desc->csum_flags &
847 				IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
848 				(cq_desc->csum_flags &
849 				IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
850 			pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
851 	}
852 
853 	rxm->ol_flags = pkt_flags;
854 
855 	/* Packet Type */
856 	switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
857 	case IONIC_PKT_TYPE_IPV4:
858 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
859 		break;
860 	case IONIC_PKT_TYPE_IPV6:
861 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
862 		break;
863 	case IONIC_PKT_TYPE_IPV4_TCP:
864 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
865 			RTE_PTYPE_L4_TCP;
866 		break;
867 	case IONIC_PKT_TYPE_IPV6_TCP:
868 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
869 			RTE_PTYPE_L4_TCP;
870 		break;
871 	case IONIC_PKT_TYPE_IPV4_UDP:
872 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
873 			RTE_PTYPE_L4_UDP;
874 		break;
875 	case IONIC_PKT_TYPE_IPV6_UDP:
876 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
877 			RTE_PTYPE_L4_UDP;
878 		break;
879 	default:
880 		{
881 			struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
882 				struct rte_ether_hdr *);
883 			uint16_t ether_type = eth_h->ether_type;
884 			if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
885 				pkt_type = RTE_PTYPE_L2_ETHER_ARP;
886 			else
887 				pkt_type = RTE_PTYPE_UNKNOWN;
888 			stats->mtods++;
889 			break;
890 		}
891 	}
892 
893 	rxm->packet_type = pkt_type;
894 
895 	rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
896 	rx_svc->nb_rx++;
897 
898 	stats->packets++;
899 	stats->bytes += rxm->pkt_len;
900 }
901 
902 static void
903 ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
904 		 struct rte_mbuf *mbuf)
905 {
906 	struct ionic_rxq_desc *desc_base = q->base;
907 	struct ionic_rxq_desc *old = &desc_base[q_desc_index];
908 	struct ionic_rxq_desc *new = &desc_base[q->head_idx];
909 
910 	new->addr = old->addr;
911 	new->len = old->len;
912 
913 	q->info[q->head_idx] = mbuf;
914 
915 	q->head_idx = Q_NEXT_TO_POST(q, 1);
916 
917 	ionic_q_flush(q);
918 }
919 
920 static __rte_always_inline int
921 ionic_rx_fill(struct ionic_rx_qcq *rxq)
922 {
923 	struct ionic_queue *q = &rxq->qcq.q;
924 	struct ionic_rxq_desc *desc, *desc_base = q->base;
925 	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
926 	struct ionic_rxq_sg_elem *elem;
927 	void **info;
928 	rte_iova_t dma_addr;
929 	uint32_t i, j, nsegs, buf_size, size;
930 
931 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
932 		RTE_PKTMBUF_HEADROOM);
933 
934 	/* Initialize software ring entries */
935 	for (i = ionic_q_space_avail(q); i; i--) {
936 		struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
937 		struct rte_mbuf *prev_rxm_seg;
938 
939 		if (rxm == NULL) {
940 			IONIC_PRINT(ERR, "RX mbuf alloc failed");
941 			return -ENOMEM;
942 		}
943 
944 		info = IONIC_INFO_PTR(q, q->head_idx);
945 
946 		nsegs = (rxq->frame_size + buf_size - 1) / buf_size;
947 
948 		desc = &desc_base[q->head_idx];
949 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
950 		desc->addr = dma_addr;
951 		desc->len = buf_size;
952 		size = buf_size;
953 		desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
954 			IONIC_RXQ_DESC_OPCODE_SIMPLE;
955 		rxm->next = NULL;
956 
957 		prev_rxm_seg = rxm;
958 		sg_desc = &sg_desc_base[q->head_idx];
959 		elem = sg_desc->elems;
960 		for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
961 			struct rte_mbuf *rxm_seg;
962 			rte_iova_t data_iova;
963 
964 			rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
965 			if (rxm_seg == NULL) {
966 				IONIC_PRINT(ERR, "RX mbuf alloc failed");
967 				return -ENOMEM;
968 			}
969 
970 			data_iova = rte_mbuf_data_iova(rxm_seg);
971 			dma_addr = rte_cpu_to_le_64(data_iova);
972 			elem->addr = dma_addr;
973 			elem->len = buf_size;
974 			size += buf_size;
975 			elem++;
976 			rxm_seg->next = NULL;
977 			prev_rxm_seg->next = rxm_seg;
978 			prev_rxm_seg = rxm_seg;
979 		}
980 
981 		if (size < rxq->frame_size)
982 			IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
983 				size, rxq->frame_size);
984 
985 		info[0] = rxm;
986 
987 		q->head_idx = Q_NEXT_TO_POST(q, 1);
988 	}
989 
990 	ionic_q_flush(q);
991 
992 	return 0;
993 }
994 
995 /*
996  * Start Receive Units for specified queue.
997  */
998 int __rte_cold
999 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1000 {
1001 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
1002 	struct ionic_rx_qcq *rxq;
1003 	int err;
1004 
1005 	if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
1006 		IONIC_PRINT(DEBUG, "RX queue %u already started",
1007 			rx_queue_id);
1008 		return 0;
1009 	}
1010 
1011 	rxq = eth_dev->data->rx_queues[rx_queue_id];
1012 
1013 	rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN;
1014 
1015 	IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u",
1016 		rx_queue_id, rxq->qcq.q.num_descs, rxq->frame_size);
1017 
1018 	err = ionic_lif_rxq_init(rxq);
1019 	if (err)
1020 		return err;
1021 
1022 	/* Allocate buffers for descriptor rings */
1023 	if (ionic_rx_fill(rxq) != 0) {
1024 		IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
1025 			rx_queue_id);
1026 		return -1;
1027 	}
1028 
1029 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1030 
1031 	return 0;
1032 }
1033 
1034 static __rte_always_inline void
1035 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
1036 		struct ionic_rx_service *rx_svc)
1037 {
1038 	struct ionic_cq *cq = &rxq->qcq.cq;
1039 	struct ionic_queue *q = &rxq->qcq.q;
1040 	struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
1041 	bool more;
1042 	uint32_t curr_q_tail_idx, curr_cq_tail_idx;
1043 	uint32_t work_done = 0;
1044 
1045 	if (work_to_do == 0)
1046 		return;
1047 
1048 	cq_desc = &cq_desc_base[cq->tail_idx];
1049 	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
1050 		curr_cq_tail_idx = cq->tail_idx;
1051 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
1052 
1053 		if (cq->tail_idx == 0)
1054 			cq->done_color = !cq->done_color;
1055 
1056 		/* Prefetch the next 4 descriptors */
1057 		if ((cq->tail_idx & 0x3) == 0)
1058 			rte_prefetch0(&cq_desc_base[cq->tail_idx]);
1059 
1060 		do {
1061 			more = (q->tail_idx != cq_desc->comp_index);
1062 
1063 			curr_q_tail_idx = q->tail_idx;
1064 			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
1065 
1066 			/* Prefetch the next 4 descriptors */
1067 			if ((q->tail_idx & 0x3) == 0)
1068 				/* q desc info */
1069 				rte_prefetch0(&q->info[q->tail_idx]);
1070 
1071 			ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx,
1072 				rx_svc);
1073 
1074 		} while (more);
1075 
1076 		if (++work_done == work_to_do)
1077 			break;
1078 
1079 		cq_desc = &cq_desc_base[cq->tail_idx];
1080 	}
1081 }
1082 
1083 /*
1084  * Stop Receive Units for specified queue.
1085  */
1086 int __rte_cold
1087 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1088 {
1089 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
1090 	struct ionic_rx_stats *stats;
1091 	struct ionic_rx_qcq *rxq;
1092 
1093 	IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
1094 
1095 	rxq = eth_dev->data->rx_queues[rx_queue_id];
1096 
1097 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1098 
1099 	ionic_lif_rxq_deinit(rxq);
1100 
1101 	/* Free all buffers from descriptor ring */
1102 	ionic_rx_empty(rxq);
1103 
1104 	stats = &rxq->stats;
1105 	IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
1106 		rxq->qcq.q.index, stats->packets, stats->mtods);
1107 
1108 	return 0;
1109 }
1110 
1111 uint16_t
1112 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1113 		uint16_t nb_pkts)
1114 {
1115 	struct ionic_rx_qcq *rxq = rx_queue;
1116 	struct ionic_rx_service rx_svc;
1117 
1118 	rx_svc.rx_pkts = rx_pkts;
1119 	rx_svc.nb_pkts = nb_pkts;
1120 	rx_svc.nb_rx = 0;
1121 
1122 	ionic_rxq_service(rxq, nb_pkts, &rx_svc);
1123 
1124 	ionic_rx_fill(rxq);
1125 
1126 	return rx_svc.nb_rx;
1127 }
1128