xref: /dpdk/drivers/net/ionic/ionic_rxtx.c (revision 9ac234ee8b876310a8b82550631ce2c713a175e4)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <stdarg.h>
12 #include <unistd.h>
13 #include <inttypes.h>
14 
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_pci.h>
22 #include <rte_memory.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
32 #include <rte_mbuf.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
36 #include <rte_udp.h>
37 #include <rte_tcp.h>
38 #include <rte_sctp.h>
39 #include <rte_string_fns.h>
40 #include <rte_errno.h>
41 #include <rte_ip.h>
42 #include <rte_net.h>
43 
44 #include "ionic_logs.h"
45 #include "ionic_mac_api.h"
46 #include "ionic_ethdev.h"
47 #include "ionic_lif.h"
48 #include "ionic_rxtx.h"
49 
50 static void
51 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)
52 {
53 	uint32_t i;
54 
55 	for (i = idx; i < cnt; i++)
56 		if (array[i])
57 			rte_pktmbuf_free_seg(array[i]);
58 
59 	memset(array, 0, sizeof(void *) * cnt);
60 }
61 
62 static void __rte_cold
63 ionic_tx_empty(struct ionic_tx_qcq *txq)
64 {
65 	struct ionic_queue *q = &txq->qcq.q;
66 
67 	ionic_empty_array(q->info, q->num_descs * q->num_segs, 0);
68 }
69 
70 static void __rte_cold
71 ionic_rx_empty(struct ionic_rx_qcq *rxq)
72 {
73 	struct ionic_queue *q = &rxq->qcq.q;
74 
75 	/*
76 	 * Walk the full info array so that the clean up includes any
77 	 * fragments that were left dangling for later reuse
78 	 */
79 	ionic_empty_array(q->info, q->num_descs * q->num_segs, 0);
80 }
81 
82 /*********************************************************************
83  *
84  *  TX functions
85  *
86  **********************************************************************/
87 
88 void
89 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
90 		struct rte_eth_txq_info *qinfo)
91 {
92 	struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
93 	struct ionic_queue *q = &txq->qcq.q;
94 
95 	qinfo->nb_desc = q->num_descs;
96 	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
97 	if (txq->flags & IONIC_QCQ_F_FAST_FREE)
98 		qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
99 	qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
100 }
101 
102 static __rte_always_inline void
103 ionic_tx_flush(struct ionic_tx_qcq *txq)
104 {
105 	struct ionic_cq *cq = &txq->qcq.cq;
106 	struct ionic_queue *q = &txq->qcq.q;
107 	struct rte_mbuf *txm;
108 	struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
109 	void **info;
110 	uint32_t i;
111 
112 	cq_desc = &cq_desc_base[cq->tail_idx];
113 
114 	while (color_match(cq_desc->color, cq->done_color)) {
115 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
116 		if (cq->tail_idx == 0)
117 			cq->done_color = !cq->done_color;
118 
119 		/* Prefetch 4 x 16B comp at cq->tail_idx + 4 */
120 		if ((cq->tail_idx & 0x3) == 0)
121 			rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
122 
123 		while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {
124 			/* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */
125 			rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));
126 
127 			/* Prefetch next mbuf */
128 			void **next_info =
129 				IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));
130 			if (next_info[0])
131 				rte_mbuf_prefetch_part2(next_info[0]);
132 			if (next_info[1])
133 				rte_mbuf_prefetch_part2(next_info[1]);
134 
135 			info = IONIC_INFO_PTR(q, q->tail_idx);
136 			for (i = 0; i < q->num_segs; i++) {
137 				txm = info[i];
138 				if (!txm)
139 					break;
140 
141 				if (txq->flags & IONIC_QCQ_F_FAST_FREE)
142 					rte_mempool_put(txm->pool, txm);
143 				else
144 					rte_pktmbuf_free_seg(txm);
145 
146 				info[i] = NULL;
147 			}
148 
149 			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
150 		}
151 
152 		cq_desc = &cq_desc_base[cq->tail_idx];
153 	}
154 }
155 
156 void __rte_cold
157 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
158 {
159 	struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
160 
161 	IONIC_PRINT_CALL();
162 
163 	ionic_qcq_free(&txq->qcq);
164 }
165 
166 int __rte_cold
167 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
168 {
169 	struct ionic_tx_stats *stats;
170 	struct ionic_tx_qcq *txq;
171 
172 	IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
173 
174 	txq = eth_dev->data->tx_queues[tx_queue_id];
175 
176 	eth_dev->data->tx_queue_state[tx_queue_id] =
177 		RTE_ETH_QUEUE_STATE_STOPPED;
178 
179 	/*
180 	 * Note: we should better post NOP Tx desc and wait for its completion
181 	 * before disabling Tx queue
182 	 */
183 
184 	ionic_lif_txq_deinit(txq);
185 
186 	/* Free all buffers from descriptor ring */
187 	ionic_tx_empty(txq);
188 
189 	stats = &txq->stats;
190 	IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
191 		txq->qcq.q.index, stats->packets, stats->tso);
192 
193 	return 0;
194 }
195 
196 int __rte_cold
197 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
198 		uint16_t nb_desc, uint32_t socket_id,
199 		const struct rte_eth_txconf *tx_conf)
200 {
201 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
202 	struct ionic_tx_qcq *txq;
203 	uint64_t offloads;
204 	int err;
205 
206 	if (tx_queue_id >= lif->ntxqcqs) {
207 		IONIC_PRINT(DEBUG, "Queue index %u not available "
208 			"(max %u queues)",
209 			tx_queue_id, lif->ntxqcqs);
210 		return -EINVAL;
211 	}
212 
213 	offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
214 	IONIC_PRINT(DEBUG,
215 		"Configuring skt %u TX queue %u with %u buffers, offloads %jx",
216 		socket_id, tx_queue_id, nb_desc, offloads);
217 
218 	/* Validate number of receive descriptors */
219 	if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
220 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
221 
222 	/* Free memory prior to re-allocation if needed... */
223 	if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
224 		ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
225 		eth_dev->data->tx_queues[tx_queue_id] = NULL;
226 	}
227 
228 	eth_dev->data->tx_queue_state[tx_queue_id] =
229 		RTE_ETH_QUEUE_STATE_STOPPED;
230 
231 	err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
232 	if (err) {
233 		IONIC_PRINT(DEBUG, "Queue allocation failure");
234 		return -EINVAL;
235 	}
236 
237 	/* Do not start queue with rte_eth_dev_start() */
238 	if (tx_conf->tx_deferred_start)
239 		txq->flags |= IONIC_QCQ_F_DEFERRED;
240 
241 	/* Convert the offload flags into queue flags */
242 	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
243 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
244 	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
245 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
246 	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
247 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
248 	if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
249 		txq->flags |= IONIC_QCQ_F_FAST_FREE;
250 
251 	eth_dev->data->tx_queues[tx_queue_id] = txq;
252 
253 	return 0;
254 }
255 
256 /*
257  * Start Transmit Units for specified queue.
258  */
259 int __rte_cold
260 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
261 {
262 	uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
263 	struct ionic_tx_qcq *txq;
264 	int err;
265 
266 	if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
267 		IONIC_PRINT(DEBUG, "TX queue %u already started",
268 			tx_queue_id);
269 		return 0;
270 	}
271 
272 	txq = eth_dev->data->tx_queues[tx_queue_id];
273 
274 	IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
275 		tx_queue_id, txq->qcq.q.num_descs);
276 
277 	err = ionic_lif_txq_init(txq);
278 	if (err)
279 		return err;
280 
281 	tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
282 
283 	return 0;
284 }
285 
286 static void
287 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
288 {
289 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
290 	char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
291 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
292 		(l3_hdr + txm->l3_len);
293 
294 	if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
295 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
296 		ipv4_hdr->hdr_checksum = 0;
297 		tcp_hdr->cksum = 0;
298 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
299 	} else {
300 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
301 		tcp_hdr->cksum = 0;
302 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
303 	}
304 }
305 
306 static void
307 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
308 {
309 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
310 	char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
311 		txm->outer_l3_len + txm->l2_len;
312 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
313 		(l3_hdr + txm->l3_len);
314 
315 	if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) {
316 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
317 		ipv4_hdr->hdr_checksum = 0;
318 		tcp_hdr->cksum = 0;
319 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
320 	} else {
321 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
322 		tcp_hdr->cksum = 0;
323 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
324 	}
325 }
326 
327 static void
328 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
329 		struct rte_mbuf *txm,
330 		rte_iova_t addr, uint8_t nsge, uint16_t len,
331 		uint32_t hdrlen, uint32_t mss,
332 		bool encap,
333 		uint16_t vlan_tci, bool has_vlan,
334 		bool start, bool done)
335 {
336 	struct rte_mbuf *txm_seg;
337 	void **info;
338 	uint64_t cmd;
339 	uint8_t flags = 0;
340 	int i;
341 
342 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
343 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
344 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
345 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
346 
347 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
348 		flags, nsge, addr);
349 	desc->cmd = rte_cpu_to_le_64(cmd);
350 	desc->len = rte_cpu_to_le_16(len);
351 	desc->vlan_tci = rte_cpu_to_le_16(vlan_tci);
352 	desc->hdr_len = rte_cpu_to_le_16(hdrlen);
353 	desc->mss = rte_cpu_to_le_16(mss);
354 
355 	if (done) {
356 		info = IONIC_INFO_PTR(q, q->head_idx);
357 
358 		/* Walk the mbuf chain to stash pointers in the array */
359 		txm_seg = txm;
360 		for (i = 0; i < txm->nb_segs; i++) {
361 			info[i] = txm_seg;
362 			txm_seg = txm_seg->next;
363 		}
364 	}
365 
366 	q->head_idx = Q_NEXT_TO_POST(q, 1);
367 }
368 
369 static struct ionic_txq_desc *
370 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
371 {
372 	struct ionic_queue *q = &txq->qcq.q;
373 	struct ionic_txq_desc *desc_base = q->base;
374 	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
375 	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
376 	struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
377 
378 	*elem = sg_desc->elems;
379 	return desc;
380 }
381 
382 static int
383 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
384 {
385 	struct ionic_queue *q = &txq->qcq.q;
386 	struct ionic_tx_stats *stats = &txq->stats;
387 	struct ionic_txq_desc *desc;
388 	struct ionic_txq_sg_elem *elem;
389 	struct rte_mbuf *txm_seg;
390 	rte_iova_t data_iova;
391 	uint64_t desc_addr = 0, next_addr;
392 	uint16_t desc_len = 0;
393 	uint8_t desc_nsge;
394 	uint32_t hdrlen;
395 	uint32_t mss = txm->tso_segsz;
396 	uint32_t frag_left = 0;
397 	uint32_t left;
398 	uint32_t seglen;
399 	uint32_t len;
400 	uint32_t offset = 0;
401 	bool start, done;
402 	bool encap;
403 	bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);
404 	uint16_t vlan_tci = txm->vlan_tci;
405 	uint64_t ol_flags = txm->ol_flags;
406 
407 	encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
408 		 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
409 		((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
410 		 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
411 
412 	/* Preload inner-most TCP csum field with IP pseudo hdr
413 	 * calculated with IP length set to zero.  HW will later
414 	 * add in length to each TCP segment resulting from the TSO.
415 	 */
416 
417 	if (encap) {
418 		ionic_tx_tcp_inner_pseudo_csum(txm);
419 		hdrlen = txm->outer_l2_len + txm->outer_l3_len +
420 			txm->l2_len + txm->l3_len + txm->l4_len;
421 	} else {
422 		ionic_tx_tcp_pseudo_csum(txm);
423 		hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
424 	}
425 
426 	seglen = hdrlen + mss;
427 	left = txm->data_len;
428 	data_iova = rte_mbuf_data_iova(txm);
429 
430 	desc = ionic_tx_tso_next(txq, &elem);
431 	start = true;
432 
433 	/* Chop data up into desc segments */
434 
435 	while (left > 0) {
436 		len = RTE_MIN(seglen, left);
437 		frag_left = seglen - len;
438 		desc_addr = rte_cpu_to_le_64(data_iova + offset);
439 		desc_len = len;
440 		desc_nsge = 0;
441 		left -= len;
442 		offset += len;
443 		if (txm->nb_segs > 1 && frag_left > 0)
444 			continue;
445 		done = (txm->nb_segs == 1 && left == 0);
446 		ionic_tx_tso_post(q, desc, txm,
447 			desc_addr, desc_nsge, desc_len,
448 			hdrlen, mss,
449 			encap,
450 			vlan_tci, has_vlan,
451 			start, done);
452 		desc = ionic_tx_tso_next(txq, &elem);
453 		start = false;
454 		seglen = mss;
455 	}
456 
457 	/* Chop frags into desc segments */
458 
459 	txm_seg = txm->next;
460 	while (txm_seg != NULL) {
461 		offset = 0;
462 		data_iova = rte_mbuf_data_iova(txm_seg);
463 		left = txm_seg->data_len;
464 
465 		while (left > 0) {
466 			next_addr = rte_cpu_to_le_64(data_iova + offset);
467 			if (frag_left > 0) {
468 				len = RTE_MIN(frag_left, left);
469 				frag_left -= len;
470 				elem->addr = next_addr;
471 				elem->len = rte_cpu_to_le_16(len);
472 				elem++;
473 				desc_nsge++;
474 			} else {
475 				len = RTE_MIN(mss, left);
476 				frag_left = mss - len;
477 				desc_addr = next_addr;
478 				desc_len = len;
479 				desc_nsge = 0;
480 			}
481 			left -= len;
482 			offset += len;
483 			if (txm_seg->next != NULL && frag_left > 0)
484 				continue;
485 
486 			done = (txm_seg->next == NULL && left == 0);
487 			ionic_tx_tso_post(q, desc, txm_seg,
488 				desc_addr, desc_nsge, desc_len,
489 				hdrlen, mss,
490 				encap,
491 				vlan_tci, has_vlan,
492 				start, done);
493 			desc = ionic_tx_tso_next(txq, &elem);
494 			start = false;
495 		}
496 
497 		txm_seg = txm_seg->next;
498 	}
499 
500 	stats->tso++;
501 
502 	return 0;
503 }
504 
505 static __rte_always_inline int
506 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
507 {
508 	struct ionic_queue *q = &txq->qcq.q;
509 	struct ionic_txq_desc *desc, *desc_base = q->base;
510 	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
511 	struct ionic_txq_sg_elem *elem;
512 	struct ionic_tx_stats *stats = &txq->stats;
513 	struct rte_mbuf *txm_seg;
514 	void **info;
515 	rte_iova_t data_iova;
516 	uint64_t ol_flags = txm->ol_flags;
517 	uint64_t addr, cmd;
518 	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
519 	uint8_t flags = 0;
520 
521 	desc = &desc_base[q->head_idx];
522 	info = IONIC_INFO_PTR(q, q->head_idx);
523 
524 	if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
525 	    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
526 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
527 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
528 	}
529 
530 	if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
531 	     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
532 	    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
533 	     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
534 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
535 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
536 	}
537 
538 	if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
539 		stats->no_csum++;
540 
541 	if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
542 	     (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
543 	    ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
544 	     (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {
545 		flags |= IONIC_TXQ_DESC_FLAG_ENCAP;
546 	}
547 
548 	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
549 		flags |= IONIC_TXQ_DESC_FLAG_VLAN;
550 		desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
551 	}
552 
553 	addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
554 
555 	cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
556 	desc->cmd = rte_cpu_to_le_64(cmd);
557 	desc->len = rte_cpu_to_le_16(txm->data_len);
558 
559 	info[0] = txm;
560 
561 	if (txm->nb_segs > 1) {
562 		txm_seg = txm->next;
563 
564 		elem = sg_desc_base[q->head_idx].elems;
565 
566 		while (txm_seg != NULL) {
567 			/* Stash the mbuf ptr in the array */
568 			info++;
569 			*info = txm_seg;
570 
571 			/* Configure the SGE */
572 			data_iova = rte_mbuf_data_iova(txm_seg);
573 			elem->len = rte_cpu_to_le_16(txm_seg->data_len);
574 			elem->addr = rte_cpu_to_le_64(data_iova);
575 			elem++;
576 
577 			txm_seg = txm_seg->next;
578 		}
579 	}
580 
581 	q->head_idx = Q_NEXT_TO_POST(q, 1);
582 
583 	return 0;
584 }
585 
586 uint16_t
587 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
588 		uint16_t nb_pkts)
589 {
590 	struct ionic_tx_qcq *txq = tx_queue;
591 	struct ionic_queue *q = &txq->qcq.q;
592 	struct ionic_tx_stats *stats = &txq->stats;
593 	struct rte_mbuf *mbuf;
594 	uint32_t bytes_tx = 0;
595 	uint16_t nb_avail, nb_tx = 0;
596 	int err;
597 
598 	struct ionic_txq_desc *desc_base = q->base;
599 	rte_prefetch0(&desc_base[q->head_idx]);
600 	rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));
601 
602 	if (tx_pkts) {
603 		rte_mbuf_prefetch_part1(tx_pkts[0]);
604 		rte_mbuf_prefetch_part2(tx_pkts[0]);
605 	}
606 
607 	/* Cleaning old buffers */
608 	ionic_tx_flush(txq);
609 
610 	nb_avail = ionic_q_space_avail(q);
611 	if (unlikely(nb_avail < nb_pkts)) {
612 		stats->stop += nb_pkts - nb_avail;
613 		nb_pkts = nb_avail;
614 	}
615 
616 	while (nb_tx < nb_pkts) {
617 		uint16_t next_idx = Q_NEXT_TO_POST(q, 1);
618 		rte_prefetch0(&desc_base[next_idx]);
619 		rte_prefetch0(IONIC_INFO_PTR(q, next_idx));
620 
621 		if (nb_tx + 1 < nb_pkts) {
622 			rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);
623 			rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);
624 		}
625 
626 		mbuf = tx_pkts[nb_tx];
627 
628 		if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
629 			err = ionic_tx_tso(txq, mbuf);
630 		else
631 			err = ionic_tx(txq, mbuf);
632 		if (err) {
633 			stats->drop += nb_pkts - nb_tx;
634 			break;
635 		}
636 
637 		bytes_tx += mbuf->pkt_len;
638 		nb_tx++;
639 	}
640 
641 	if (nb_tx > 0) {
642 		rte_wmb();
643 		ionic_q_flush(q);
644 
645 		stats->packets += nb_tx;
646 		stats->bytes += bytes_tx;
647 	}
648 
649 	return nb_tx;
650 }
651 
652 /*********************************************************************
653  *
654  *  TX prep functions
655  *
656  **********************************************************************/
657 
658 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 |		\
659 	RTE_MBUF_F_TX_IPV6 |		\
660 	RTE_MBUF_F_TX_VLAN |		\
661 	RTE_MBUF_F_TX_IP_CKSUM |	\
662 	RTE_MBUF_F_TX_TCP_SEG |	\
663 	RTE_MBUF_F_TX_L4_MASK)
664 
665 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
666 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
667 
668 uint16_t
669 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
670 {
671 	struct ionic_tx_qcq *txq = tx_queue;
672 	struct rte_mbuf *txm;
673 	uint64_t offloads;
674 	int i = 0;
675 
676 	for (i = 0; i < nb_pkts; i++) {
677 		txm = tx_pkts[i];
678 
679 		if (txm->nb_segs > txq->num_segs_fw) {
680 			rte_errno = -EINVAL;
681 			break;
682 		}
683 
684 		offloads = txm->ol_flags;
685 
686 		if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
687 			rte_errno = -ENOTSUP;
688 			break;
689 		}
690 	}
691 
692 	return i;
693 }
694 
695 /*********************************************************************
696  *
697  *  RX functions
698  *
699  **********************************************************************/
700 
701 void
702 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
703 		struct rte_eth_rxq_info *qinfo)
704 {
705 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
706 	struct ionic_queue *q = &rxq->qcq.q;
707 
708 	qinfo->mp = rxq->mb_pool;
709 	qinfo->scattered_rx = dev->data->scattered_rx;
710 	qinfo->nb_desc = q->num_descs;
711 	qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
712 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
713 }
714 
715 void __rte_cold
716 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
717 {
718 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
719 
720 	if (!rxq)
721 		return;
722 
723 	IONIC_PRINT_CALL();
724 
725 	ionic_qcq_free(&rxq->qcq);
726 }
727 
728 int __rte_cold
729 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
730 		uint16_t rx_queue_id,
731 		uint16_t nb_desc,
732 		uint32_t socket_id,
733 		const struct rte_eth_rxconf *rx_conf,
734 		struct rte_mempool *mp)
735 {
736 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
737 	struct ionic_rx_qcq *rxq;
738 	uint64_t offloads;
739 	int err;
740 
741 	if (rx_queue_id >= lif->nrxqcqs) {
742 		IONIC_PRINT(ERR,
743 			"Queue index %u not available (max %u queues)",
744 			rx_queue_id, lif->nrxqcqs);
745 		return -EINVAL;
746 	}
747 
748 	offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
749 	IONIC_PRINT(DEBUG,
750 		"Configuring skt %u RX queue %u with %u buffers, offloads %jx",
751 		socket_id, rx_queue_id, nb_desc, offloads);
752 
753 	if (!rx_conf->rx_drop_en)
754 		IONIC_PRINT(WARNING, "No-drop mode is not supported");
755 
756 	/* Validate number of receive descriptors */
757 	if (!rte_is_power_of_2(nb_desc) ||
758 			nb_desc < IONIC_MIN_RING_DESC ||
759 			nb_desc > IONIC_MAX_RING_DESC) {
760 		IONIC_PRINT(ERR,
761 			"Bad descriptor count (%u) for queue %u (min: %u)",
762 			nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
763 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
764 	}
765 
766 	/* Free memory prior to re-allocation if needed... */
767 	if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
768 		ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
769 		eth_dev->data->rx_queues[rx_queue_id] = NULL;
770 	}
771 
772 	eth_dev->data->rx_queue_state[rx_queue_id] =
773 		RTE_ETH_QUEUE_STATE_STOPPED;
774 
775 	err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp,
776 			&rxq);
777 	if (err) {
778 		IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
779 		return -EINVAL;
780 	}
781 
782 	rxq->mb_pool = mp;
783 
784 	/*
785 	 * Note: the interface does not currently support
786 	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
787 	 * when the adapter will be able to keep the CRC and subtract
788 	 * it to the length for all received packets:
789 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
790 	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
791 	 *   rxq->crc_len = ETHER_CRC_LEN;
792 	 */
793 
794 	/* Do not start queue with rte_eth_dev_start() */
795 	if (rx_conf->rx_deferred_start)
796 		rxq->flags |= IONIC_QCQ_F_DEFERRED;
797 
798 	eth_dev->data->rx_queues[rx_queue_id] = rxq;
799 
800 	return 0;
801 }
802 
803 /*
804  * Cleans one descriptor. Connects the filled mbufs into a chain.
805  * Does not advance the tail index.
806  */
807 static __rte_always_inline void
808 ionic_rx_clean_one(struct ionic_rx_qcq *rxq,
809 		struct ionic_rxq_comp *cq_desc,
810 		struct ionic_rx_service *rx_svc)
811 {
812 	struct ionic_queue *q = &rxq->qcq.q;
813 	struct rte_mbuf *rxm, *rxm_seg, *prev_rxm;
814 	struct ionic_rx_stats *stats = &rxq->stats;
815 	uint64_t pkt_flags = 0;
816 	uint32_t pkt_type;
817 	uint32_t left, i;
818 	uint16_t cq_desc_len;
819 	void **info;
820 
821 	cq_desc_len = rte_le_to_cpu_16(cq_desc->len);
822 
823 	info = IONIC_INFO_PTR(q, q->tail_idx);
824 
825 	rxm = info[0];
826 
827 	if (cq_desc->status) {
828 		stats->bad_cq_status++;
829 		return;
830 	}
831 
832 	if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {
833 		stats->bad_len++;
834 		return;
835 	}
836 
837 	info[0] = NULL;
838 
839 	/* Set the mbuf metadata based on the cq entry */
840 	rxm->rearm_data[0] = rxq->rearm_data;
841 	rxm->pkt_len = cq_desc_len;
842 	rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);
843 	left = cq_desc_len - rxm->data_len;
844 	rxm->nb_segs = cq_desc->num_sg_elems + 1;
845 	prev_rxm = rxm;
846 
847 	for (i = 1; i < rxm->nb_segs && left; i++) {
848 		rxm_seg = info[i];
849 		info[i] = NULL;
850 
851 		/* Set the chained mbuf metadata */
852 		rxm_seg->rearm_data[0] = rxq->rearm_seg_data;
853 		rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
854 		left -= rxm_seg->data_len;
855 
856 		/* Link the mbuf */
857 		prev_rxm->next = rxm_seg;
858 		prev_rxm = rxm_seg;
859 	}
860 
861 	/* Terminate the mbuf chain */
862 	prev_rxm->next = NULL;
863 
864 	/* RSS */
865 	pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
866 	rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);
867 
868 	/* Vlan Strip */
869 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
870 		pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
871 		rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
872 	}
873 
874 	/* Checksum */
875 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
876 		if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
877 			pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
878 		else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
879 			pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
880 
881 		if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
882 			(cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
883 			pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
884 		else if ((cq_desc->csum_flags &
885 				IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
886 				(cq_desc->csum_flags &
887 				IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
888 			pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
889 	}
890 
891 	rxm->ol_flags = pkt_flags;
892 
893 	/* Packet Type */
894 	switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
895 	case IONIC_PKT_TYPE_IPV4:
896 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
897 		break;
898 	case IONIC_PKT_TYPE_IPV6:
899 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
900 		break;
901 	case IONIC_PKT_TYPE_IPV4_TCP:
902 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
903 			RTE_PTYPE_L4_TCP;
904 		break;
905 	case IONIC_PKT_TYPE_IPV6_TCP:
906 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
907 			RTE_PTYPE_L4_TCP;
908 		break;
909 	case IONIC_PKT_TYPE_IPV4_UDP:
910 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
911 			RTE_PTYPE_L4_UDP;
912 		break;
913 	case IONIC_PKT_TYPE_IPV6_UDP:
914 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
915 			RTE_PTYPE_L4_UDP;
916 		break;
917 	default:
918 		{
919 			struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
920 				struct rte_ether_hdr *);
921 			uint16_t ether_type = eth_h->ether_type;
922 			if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
923 				pkt_type = RTE_PTYPE_L2_ETHER_ARP;
924 			else
925 				pkt_type = RTE_PTYPE_UNKNOWN;
926 			stats->mtods++;
927 			break;
928 		}
929 	}
930 
931 	rxm->packet_type = pkt_type;
932 
933 	rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
934 	rx_svc->nb_rx++;
935 
936 	stats->packets++;
937 	stats->bytes += rxm->pkt_len;
938 }
939 
940 /*
941  * Fills one descriptor with mbufs. Does not advance the head index.
942  */
943 static __rte_always_inline int
944 ionic_rx_fill_one(struct ionic_rx_qcq *rxq)
945 {
946 	struct ionic_queue *q = &rxq->qcq.q;
947 	struct rte_mbuf *rxm, *rxm_seg;
948 	struct ionic_rxq_desc *desc, *desc_base = q->base;
949 	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
950 	rte_iova_t data_iova;
951 	uint32_t i;
952 	void **info;
953 
954 	info = IONIC_INFO_PTR(q, q->head_idx);
955 	desc = &desc_base[q->head_idx];
956 	sg_desc = &sg_desc_base[q->head_idx];
957 
958 	/* mbuf is unused => whole chain is unused */
959 	if (unlikely(info[0]))
960 		return 0;
961 
962 	rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
963 	if (unlikely(rxm == NULL)) {
964 		assert(0);
965 		return -ENOMEM;
966 	}
967 
968 	info[0] = rxm;
969 
970 	data_iova = rte_mbuf_data_iova_default(rxm);
971 	desc->addr = rte_cpu_to_le_64(data_iova);
972 
973 	for (i = 1; i < q->num_segs; i++) {
974 		/* mbuf is unused => rest of the chain is unused */
975 		if (info[i])
976 			return 0;
977 
978 		rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
979 		if (rxm_seg == NULL) {
980 			assert(0);
981 			return -ENOMEM;
982 		}
983 
984 		info[i] = rxm_seg;
985 
986 		/* The data_off does not get set to 0 until later */
987 		data_iova = rxm_seg->buf_iova;
988 		sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova);
989 	}
990 
991 	return 0;
992 }
993 
994 /*
995  * Fills all descriptors with mbufs.
996  */
997 static int __rte_cold
998 ionic_rx_fill(struct ionic_rx_qcq *rxq)
999 {
1000 	struct ionic_queue *q = &rxq->qcq.q;
1001 	uint32_t i;
1002 	int err;
1003 
1004 	for (i = 1; i < q->num_descs; i++) {
1005 		err = ionic_rx_fill_one(rxq);
1006 		if (err)
1007 			return err;
1008 
1009 		q->head_idx = Q_NEXT_TO_POST(q, 1);
1010 	}
1011 
1012 	ionic_q_flush(q);
1013 
1014 	return 0;
1015 }
1016 
1017 /*
1018  * Perform one-time initialization of descriptor fields
1019  * which will not change for the life of the queue.
1020  */
1021 static void __rte_cold
1022 ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq)
1023 {
1024 	struct ionic_queue *q = &rxq->qcq.q;
1025 	struct ionic_rxq_desc *desc, *desc_base = q->base;
1026 	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
1027 	uint32_t i, j;
1028 	uint8_t opcode;
1029 
1030 	opcode = (q->num_segs > 1) ?
1031 		IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE;
1032 
1033 	/*
1034 	 * NB: Only the first segment needs to leave headroom (hdr_seg_size).
1035 	 *     Later segments (seg_size) do not.
1036 	 */
1037 	for (i = 0; i < q->num_descs; i++) {
1038 		desc = &desc_base[i];
1039 		desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size);
1040 		desc->opcode = opcode;
1041 
1042 		sg_desc = &sg_desc_base[i];
1043 		for (j = 0; j < q->num_segs - 1u; j++)
1044 			sg_desc->elems[j].len =
1045 				rte_cpu_to_le_16(rxq->seg_size);
1046 	}
1047 }
1048 
1049 /*
1050  * Start Receive Units for specified queue.
1051  */
1052 int __rte_cold
1053 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1054 {
1055 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
1056 	struct ionic_rx_qcq *rxq;
1057 	struct ionic_queue *q;
1058 	int err;
1059 
1060 	if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
1061 		IONIC_PRINT(DEBUG, "RX queue %u already started",
1062 			rx_queue_id);
1063 		return 0;
1064 	}
1065 
1066 	rxq = eth_dev->data->rx_queues[rx_queue_id];
1067 	q = &rxq->qcq.q;
1068 
1069 	rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN;
1070 
1071 	/* Recalculate segment count based on MTU */
1072 	q->num_segs = 1 +
1073 		(rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size;
1074 
1075 	IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u",
1076 		rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs);
1077 
1078 	ionic_rx_init_descriptors(rxq);
1079 
1080 	err = ionic_lif_rxq_init(rxq);
1081 	if (err)
1082 		return err;
1083 
1084 	/* Allocate buffers for descriptor rings */
1085 	if (ionic_rx_fill(rxq) != 0) {
1086 		IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
1087 			rx_queue_id);
1088 		return -1;
1089 	}
1090 
1091 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1092 
1093 	return 0;
1094 }
1095 
1096 /*
1097  * Walk the CQ to find completed receive descriptors.
1098  * Any completed descriptor found is refilled.
1099  */
1100 static __rte_always_inline void
1101 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
1102 		struct ionic_rx_service *rx_svc)
1103 {
1104 	struct ionic_cq *cq = &rxq->qcq.cq;
1105 	struct ionic_queue *q = &rxq->qcq.q;
1106 	struct ionic_rxq_desc *q_desc_base = q->base;
1107 	struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
1108 	uint32_t work_done = 0;
1109 
1110 	cq_desc = &cq_desc_base[cq->tail_idx];
1111 
1112 	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
1113 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
1114 
1115 		if (cq->tail_idx == 0)
1116 			cq->done_color = !cq->done_color;
1117 
1118 		/* Prefetch 8 x 8B bufinfo */
1119 		rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));
1120 		/* Prefetch 4 x 16B comp */
1121 		rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
1122 		/* Prefetch 4 x 16B descriptors */
1123 		rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);
1124 
1125 		ionic_rx_clean_one(rxq, cq_desc, rx_svc);
1126 
1127 		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
1128 
1129 		(void)ionic_rx_fill_one(rxq);
1130 
1131 		q->head_idx = Q_NEXT_TO_POST(q, 1);
1132 
1133 		if (++work_done == work_to_do)
1134 			break;
1135 
1136 		cq_desc = &cq_desc_base[cq->tail_idx];
1137 	}
1138 
1139 	/* Update the queue indices and ring the doorbell */
1140 	if (work_done)
1141 		ionic_q_flush(q);
1142 }
1143 
1144 /*
1145  * Stop Receive Units for specified queue.
1146  */
1147 int __rte_cold
1148 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1149 {
1150 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
1151 	struct ionic_rx_stats *stats;
1152 	struct ionic_rx_qcq *rxq;
1153 
1154 	IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
1155 
1156 	rxq = eth_dev->data->rx_queues[rx_queue_id];
1157 
1158 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
1159 
1160 	ionic_lif_rxq_deinit(rxq);
1161 
1162 	/* Free all buffers from descriptor ring */
1163 	ionic_rx_empty(rxq);
1164 
1165 	stats = &rxq->stats;
1166 	IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
1167 		rxq->qcq.q.index, stats->packets, stats->mtods);
1168 
1169 	return 0;
1170 }
1171 
1172 uint16_t
1173 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1174 		uint16_t nb_pkts)
1175 {
1176 	struct ionic_rx_qcq *rxq = rx_queue;
1177 	struct ionic_rx_service rx_svc;
1178 
1179 	rx_svc.rx_pkts = rx_pkts;
1180 	rx_svc.nb_rx = 0;
1181 
1182 	ionic_rxq_service(rxq, nb_pkts, &rx_svc);
1183 
1184 	return rx_svc.nb_rx;
1185 }
1186