xref: /dpdk/drivers/net/ionic/ionic_rxtx.c (revision 463ad260d35ee5934ab206d392a1a3e08b5506d0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <errno.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_byteorder.h>
12 #include <rte_errno.h>
13 #include <rte_log.h>
14 #include <rte_mbuf.h>
15 #include <rte_ether.h>
16 #include <rte_ip.h>
17 #include <rte_tcp.h>
18 #include <rte_ethdev.h>
19 #include <ethdev_driver.h>
20 
21 #include "ionic.h"
22 #include "ionic_dev.h"
23 #include "ionic_lif.h"
24 #include "ionic_ethdev.h"
25 #include "ionic_rxtx.h"
26 #include "ionic_logs.h"
27 
28 static void
29 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)
30 {
31 	uint32_t i;
32 
33 	for (i = idx; i < cnt; i++)
34 		if (array[i])
35 			rte_pktmbuf_free_seg(array[i]);
36 
37 	memset(array, 0, sizeof(void *) * cnt);
38 }
39 
40 static void __rte_cold
41 ionic_tx_empty(struct ionic_tx_qcq *txq)
42 {
43 	struct ionic_queue *q = &txq->qcq.q;
44 
45 	ionic_empty_array(q->info, q->num_descs * q->num_segs, 0);
46 }
47 
48 static void __rte_cold
49 ionic_rx_empty(struct ionic_rx_qcq *rxq)
50 {
51 	struct ionic_queue *q = &rxq->qcq.q;
52 
53 	/*
54 	 * Walk the full info array so that the clean up includes any
55 	 * fragments that were left dangling for later reuse
56 	 */
57 	ionic_empty_array(q->info, q->num_descs * q->num_segs, 0);
58 
59 	ionic_empty_array((void **)rxq->mbs,
60 			IONIC_MBUF_BULK_ALLOC, rxq->mb_idx);
61 	rxq->mb_idx = 0;
62 }
63 
64 /*********************************************************************
65  *
66  *  TX functions
67  *
68  **********************************************************************/
69 
70 void
71 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
72 		struct rte_eth_txq_info *qinfo)
73 {
74 	struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
75 	struct ionic_queue *q = &txq->qcq.q;
76 
77 	qinfo->nb_desc = q->num_descs;
78 	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
79 	if (txq->flags & IONIC_QCQ_F_FAST_FREE)
80 		qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
81 	qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
82 }
83 
84 void __rte_cold
85 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
86 {
87 	struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
88 
89 	IONIC_PRINT_CALL();
90 
91 	ionic_qcq_free(&txq->qcq);
92 }
93 
94 int __rte_cold
95 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
96 {
97 	struct ionic_tx_stats *stats;
98 	struct ionic_tx_qcq *txq;
99 
100 	IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
101 
102 	txq = eth_dev->data->tx_queues[tx_queue_id];
103 
104 	eth_dev->data->tx_queue_state[tx_queue_id] =
105 		RTE_ETH_QUEUE_STATE_STOPPED;
106 
107 	/*
108 	 * Note: we should better post NOP Tx desc and wait for its completion
109 	 * before disabling Tx queue
110 	 */
111 
112 	ionic_lif_txq_deinit(txq);
113 
114 	/* Free all buffers from descriptor ring */
115 	ionic_tx_empty(txq);
116 
117 	stats = &txq->stats;
118 	IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
119 		txq->qcq.q.index, stats->packets, stats->tso);
120 	IONIC_PRINT(DEBUG, "TX queue %u comps %ju (%ju per)",
121 		txq->qcq.q.index, stats->comps,
122 		stats->comps ? stats->packets / stats->comps : 0);
123 
124 	return 0;
125 }
126 
127 int __rte_cold
128 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
129 		uint16_t nb_desc, uint32_t socket_id,
130 		const struct rte_eth_txconf *tx_conf)
131 {
132 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
133 	struct ionic_tx_qcq *txq;
134 	uint64_t offloads;
135 	int err;
136 
137 	if (tx_queue_id >= lif->ntxqcqs) {
138 		IONIC_PRINT(DEBUG, "Queue index %u not available "
139 			"(max %u queues)",
140 			tx_queue_id, lif->ntxqcqs);
141 		return -EINVAL;
142 	}
143 
144 	offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
145 	IONIC_PRINT(DEBUG,
146 		"Configuring skt %u TX queue %u with %u buffers, offloads %jx",
147 		socket_id, tx_queue_id, nb_desc, offloads);
148 
149 	/* Validate number of receive descriptors */
150 	if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
151 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
152 
153 	if (tx_conf->tx_free_thresh > nb_desc) {
154 		IONIC_PRINT(ERR,
155 			"tx_free_thresh must be less than nb_desc (%u)",
156 			nb_desc);
157 		return -EINVAL;
158 	}
159 
160 	/* Free memory prior to re-allocation if needed... */
161 	if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
162 		ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
163 		eth_dev->data->tx_queues[tx_queue_id] = NULL;
164 	}
165 
166 	eth_dev->data->tx_queue_state[tx_queue_id] =
167 		RTE_ETH_QUEUE_STATE_STOPPED;
168 
169 	err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
170 	if (err) {
171 		IONIC_PRINT(DEBUG, "Queue allocation failure");
172 		return -EINVAL;
173 	}
174 
175 	/* Do not start queue with rte_eth_dev_start() */
176 	if (tx_conf->tx_deferred_start)
177 		txq->flags |= IONIC_QCQ_F_DEFERRED;
178 
179 	/* Convert the offload flags into queue flags */
180 	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
181 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
182 	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
183 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
184 	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
185 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
186 	if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
187 		txq->flags |= IONIC_QCQ_F_FAST_FREE;
188 
189 	txq->free_thresh =
190 		tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
191 		nb_desc - IONIC_DEF_TXRX_BURST;
192 
193 	eth_dev->data->tx_queues[tx_queue_id] = txq;
194 
195 	return 0;
196 }
197 
198 /*
199  * Start Transmit Units for specified queue.
200  */
201 int __rte_cold
202 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
203 {
204 	uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
205 	struct ionic_tx_qcq *txq;
206 	int err;
207 
208 	if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
209 		IONIC_PRINT(DEBUG, "TX queue %u already started",
210 			tx_queue_id);
211 		return 0;
212 	}
213 
214 	txq = eth_dev->data->tx_queues[tx_queue_id];
215 
216 	IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
217 		tx_queue_id, txq->qcq.q.num_descs);
218 
219 	err = ionic_lif_txq_init(txq);
220 	if (err)
221 		return err;
222 
223 	tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
224 
225 	return 0;
226 }
227 
228 static void
229 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
230 {
231 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
232 	char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
233 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
234 		(l3_hdr + txm->l3_len);
235 
236 	if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
237 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
238 		ipv4_hdr->hdr_checksum = 0;
239 		tcp_hdr->cksum = 0;
240 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
241 	} else {
242 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
243 		tcp_hdr->cksum = 0;
244 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
245 	}
246 }
247 
248 static void
249 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
250 {
251 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
252 	char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
253 		txm->outer_l3_len + txm->l2_len;
254 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
255 		(l3_hdr + txm->l3_len);
256 
257 	if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) {
258 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
259 		ipv4_hdr->hdr_checksum = 0;
260 		tcp_hdr->cksum = 0;
261 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
262 	} else {
263 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
264 		tcp_hdr->cksum = 0;
265 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
266 	}
267 }
268 
269 static void
270 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
271 		struct rte_mbuf *txm,
272 		rte_iova_t addr, uint8_t nsge, uint16_t len,
273 		uint32_t hdrlen, uint32_t mss,
274 		bool encap,
275 		uint16_t vlan_tci, bool has_vlan,
276 		bool start, bool done)
277 {
278 	struct rte_mbuf *txm_seg;
279 	void **info;
280 	uint64_t cmd;
281 	uint8_t flags = 0;
282 	int i;
283 
284 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
285 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
286 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
287 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
288 
289 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
290 		flags, nsge, addr);
291 	desc->cmd = rte_cpu_to_le_64(cmd);
292 	desc->len = rte_cpu_to_le_16(len);
293 	desc->vlan_tci = rte_cpu_to_le_16(vlan_tci);
294 	desc->hdr_len = rte_cpu_to_le_16(hdrlen);
295 	desc->mss = rte_cpu_to_le_16(mss);
296 
297 	if (done) {
298 		info = IONIC_INFO_PTR(q, q->head_idx);
299 
300 		/* Walk the mbuf chain to stash pointers in the array */
301 		txm_seg = txm;
302 		for (i = 0; i < txm->nb_segs; i++) {
303 			info[i] = txm_seg;
304 			txm_seg = txm_seg->next;
305 		}
306 	}
307 
308 	q->head_idx = Q_NEXT_TO_POST(q, 1);
309 }
310 
311 static struct ionic_txq_desc *
312 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
313 {
314 	struct ionic_queue *q = &txq->qcq.q;
315 	struct ionic_txq_desc *desc_base = q->base;
316 	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
317 	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
318 	struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
319 
320 	*elem = sg_desc->elems;
321 	return desc;
322 }
323 
324 int
325 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
326 {
327 	struct ionic_queue *q = &txq->qcq.q;
328 	struct ionic_tx_stats *stats = &txq->stats;
329 	struct ionic_txq_desc *desc;
330 	struct ionic_txq_sg_elem *elem;
331 	struct rte_mbuf *txm_seg;
332 	rte_iova_t data_iova;
333 	uint64_t desc_addr = 0, next_addr;
334 	uint16_t desc_len = 0;
335 	uint8_t desc_nsge = 0;
336 	uint32_t hdrlen;
337 	uint32_t mss = txm->tso_segsz;
338 	uint32_t frag_left = 0;
339 	uint32_t left;
340 	uint32_t seglen;
341 	uint32_t len;
342 	uint32_t offset = 0;
343 	bool start, done;
344 	bool encap;
345 	bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);
346 	bool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG);
347 	uint16_t vlan_tci = txm->vlan_tci;
348 	uint64_t ol_flags = txm->ol_flags;
349 
350 	encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
351 		 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
352 		((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
353 		 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
354 
355 	/* Preload inner-most TCP csum field with IP pseudo hdr
356 	 * calculated with IP length set to zero.  HW will later
357 	 * add in length to each TCP segment resulting from the TSO.
358 	 */
359 
360 	if (encap) {
361 		ionic_tx_tcp_inner_pseudo_csum(txm);
362 		hdrlen = txm->outer_l2_len + txm->outer_l3_len +
363 			txm->l2_len + txm->l3_len + txm->l4_len;
364 	} else {
365 		ionic_tx_tcp_pseudo_csum(txm);
366 		hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
367 	}
368 
369 	desc = ionic_tx_tso_next(txq, &elem);
370 	txm_seg = txm;
371 	start = true;
372 	seglen = hdrlen + mss;
373 
374 	/* Walk the chain of mbufs */
375 	while (txm_seg != NULL) {
376 		offset = 0;
377 		data_iova = rte_mbuf_data_iova(txm_seg);
378 		left = txm_seg->data_len;
379 
380 		/* Split the mbuf data up into multiple descriptors */
381 		while (left > 0) {
382 			next_addr = rte_cpu_to_le_64(data_iova + offset);
383 			if (frag_left > 0 && use_sgl) {
384 				/* Fill previous descriptor's SGE */
385 				len = RTE_MIN(frag_left, left);
386 				frag_left -= len;
387 				elem->addr = next_addr;
388 				elem->len = rte_cpu_to_le_16(len);
389 				elem++;
390 				desc_nsge++;
391 			} else {
392 				/* Fill new descriptor's data field */
393 				len = RTE_MIN(seglen, left);
394 				frag_left = seglen - len;
395 				desc_addr = next_addr;
396 				desc_len = len;
397 				desc_nsge = 0;
398 			}
399 			left -= len;
400 			offset += len;
401 
402 			/* Pack the next mbuf's data into the descriptor */
403 			if (txm_seg->next != NULL && frag_left > 0 && use_sgl)
404 				break;
405 
406 			done = (txm_seg->next == NULL && left == 0);
407 			ionic_tx_tso_post(q, desc, txm_seg,
408 				desc_addr, desc_nsge, desc_len,
409 				hdrlen, mss,
410 				encap,
411 				vlan_tci, has_vlan,
412 				start, done);
413 			desc = ionic_tx_tso_next(txq, &elem);
414 			start = false;
415 			seglen = mss;
416 		}
417 
418 		txm_seg = txm_seg->next;
419 	}
420 
421 	stats->tso++;
422 
423 	return 0;
424 }
425 
426 /*********************************************************************
427  *
428  *  TX prep functions
429  *
430  **********************************************************************/
431 
432 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 |		\
433 	RTE_MBUF_F_TX_IPV6 |		\
434 	RTE_MBUF_F_TX_VLAN |		\
435 	RTE_MBUF_F_TX_IP_CKSUM |	\
436 	RTE_MBUF_F_TX_TCP_SEG |	\
437 	RTE_MBUF_F_TX_L4_MASK)
438 
439 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
440 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
441 
442 uint16_t
443 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
444 {
445 	struct ionic_tx_qcq *txq = tx_queue;
446 	struct rte_mbuf *txm;
447 	uint64_t offloads;
448 	int i = 0;
449 
450 	for (i = 0; i < nb_pkts; i++) {
451 		txm = tx_pkts[i];
452 
453 		if (txm->nb_segs > txq->num_segs_fw) {
454 			rte_errno = -EINVAL;
455 			break;
456 		}
457 
458 		offloads = txm->ol_flags;
459 
460 		if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
461 			rte_errno = -ENOTSUP;
462 			break;
463 		}
464 	}
465 
466 	return i;
467 }
468 
469 /*********************************************************************
470  *
471  *  RX functions
472  *
473  **********************************************************************/
474 
475 void
476 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
477 		struct rte_eth_rxq_info *qinfo)
478 {
479 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
480 	struct ionic_queue *q = &rxq->qcq.q;
481 
482 	qinfo->mp = rxq->mb_pool;
483 	qinfo->scattered_rx = dev->data->scattered_rx;
484 	qinfo->nb_desc = q->num_descs;
485 	qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
486 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
487 }
488 
489 void __rte_cold
490 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
491 {
492 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
493 
494 	if (!rxq)
495 		return;
496 
497 	IONIC_PRINT_CALL();
498 
499 	ionic_qcq_free(&rxq->qcq);
500 }
501 
502 int __rte_cold
503 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
504 		uint16_t rx_queue_id,
505 		uint16_t nb_desc,
506 		uint32_t socket_id,
507 		const struct rte_eth_rxconf *rx_conf,
508 		struct rte_mempool *mp)
509 {
510 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
511 	struct ionic_rx_qcq *rxq;
512 	uint64_t offloads;
513 	int err;
514 
515 	if (rx_queue_id >= lif->nrxqcqs) {
516 		IONIC_PRINT(ERR,
517 			"Queue index %u not available (max %u queues)",
518 			rx_queue_id, lif->nrxqcqs);
519 		return -EINVAL;
520 	}
521 
522 	offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
523 	IONIC_PRINT(DEBUG,
524 		"Configuring skt %u RX queue %u with %u buffers, offloads %jx",
525 		socket_id, rx_queue_id, nb_desc, offloads);
526 
527 	if (!rx_conf->rx_drop_en)
528 		IONIC_PRINT(WARNING, "No-drop mode is not supported");
529 
530 	/* Validate number of receive descriptors */
531 	if (!rte_is_power_of_2(nb_desc) ||
532 			nb_desc < IONIC_MIN_RING_DESC ||
533 			nb_desc > IONIC_MAX_RING_DESC) {
534 		IONIC_PRINT(ERR,
535 			"Bad descriptor count (%u) for queue %u (min: %u)",
536 			nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
537 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
538 	}
539 
540 	/* Free memory prior to re-allocation if needed... */
541 	if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
542 		ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
543 		eth_dev->data->rx_queues[rx_queue_id] = NULL;
544 	}
545 
546 	eth_dev->data->rx_queue_state[rx_queue_id] =
547 		RTE_ETH_QUEUE_STATE_STOPPED;
548 
549 	err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp,
550 			&rxq);
551 	if (err) {
552 		IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
553 		return -EINVAL;
554 	}
555 
556 	rxq->mb_pool = mp;
557 	rxq->wdog_ms = IONIC_Q_WDOG_MS;
558 
559 	/*
560 	 * Note: the interface does not currently support
561 	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
562 	 * when the adapter will be able to keep the CRC and subtract
563 	 * it to the length for all received packets:
564 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
565 	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
566 	 *   rxq->crc_len = ETHER_CRC_LEN;
567 	 */
568 
569 	/* Do not start queue with rte_eth_dev_start() */
570 	if (rx_conf->rx_deferred_start)
571 		rxq->flags |= IONIC_QCQ_F_DEFERRED;
572 
573 	eth_dev->data->rx_queues[rx_queue_id] = rxq;
574 
575 	return 0;
576 }
577 
578 #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1)
579 const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
580 		__rte_cache_aligned = {
581 	/* IP_BAD set */
582 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD,
583 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] =
584 			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
585 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] =
586 			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
587 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] =
588 			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
589 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] =
590 			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
591 	/* IP_OK set */
592 	[IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD,
593 	[IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] =
594 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
595 	[IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] =
596 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
597 	[IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] =
598 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
599 	[IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] =
600 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
601 	/* No IP flag set */
602 	[IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD,
603 	[IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD,
604 	[IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD,
605 	[IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD,
606 };
607 
608 /* RTE_PTYPE_UNKNOWN is 0x0 */
609 const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]
610 		__rte_cache_aligned = {
611 	[IONIC_PKT_TYPE_NON_IP]   = RTE_PTYPE_UNKNOWN,
612 	[IONIC_PKT_TYPE_IPV4]     = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
613 	[IONIC_PKT_TYPE_IPV4_TCP] =
614 		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
615 	[IONIC_PKT_TYPE_IPV4_UDP] =
616 		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
617 	[IONIC_PKT_TYPE_IPV6]     = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
618 	[IONIC_PKT_TYPE_IPV6_TCP] =
619 		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
620 	[IONIC_PKT_TYPE_IPV6_UDP] =
621 		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
622 };
623 
624 const uint32_t *
625 ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused,
626 			       size_t *no_of_elements)
627 {
628 	/* See ionic_ptype_table[] */
629 	static const uint32_t ptypes[] = {
630 		RTE_PTYPE_L2_ETHER,
631 		RTE_PTYPE_L2_ETHER_TIMESYNC,
632 		RTE_PTYPE_L2_ETHER_LLDP,
633 		RTE_PTYPE_L2_ETHER_ARP,
634 		RTE_PTYPE_L3_IPV4,
635 		RTE_PTYPE_L3_IPV6,
636 		RTE_PTYPE_L4_TCP,
637 		RTE_PTYPE_L4_UDP,
638 	};
639 
640 	*no_of_elements = RTE_DIM(ptypes);
641 	return ptypes;
642 }
643 
644 /*
645  * Perform one-time initialization of descriptor fields
646  * which will not change for the life of the queue.
647  */
648 static void __rte_cold
649 ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq)
650 {
651 	struct ionic_queue *q = &rxq->qcq.q;
652 	struct ionic_rxq_desc *desc, *desc_base = q->base;
653 	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
654 	uint32_t i, j;
655 	uint8_t opcode;
656 
657 	opcode = (q->num_segs > 1) ?
658 		IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE;
659 
660 	/*
661 	 * NB: Only the first segment needs to leave headroom (hdr_seg_size).
662 	 *     Later segments (seg_size) do not.
663 	 */
664 	for (i = 0; i < q->num_descs; i++) {
665 		desc = &desc_base[i];
666 		desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size);
667 		desc->opcode = opcode;
668 
669 		sg_desc = &sg_desc_base[i];
670 		for (j = 0; j < q->num_segs - 1u; j++)
671 			sg_desc->elems[j].len =
672 				rte_cpu_to_le_16(rxq->seg_size);
673 	}
674 }
675 
676 /*
677  * Start Receive Units for specified queue.
678  */
679 int __rte_cold
680 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
681 {
682 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
683 	struct ionic_rx_qcq *rxq;
684 	struct ionic_queue *q;
685 	int err;
686 
687 	if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
688 		IONIC_PRINT(DEBUG, "RX queue %u already started",
689 			rx_queue_id);
690 		return 0;
691 	}
692 
693 	rxq = eth_dev->data->rx_queues[rx_queue_id];
694 	q = &rxq->qcq.q;
695 
696 	rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN;
697 
698 	/* Recalculate segment count based on MTU */
699 	q->num_segs = 1 +
700 		(rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size;
701 
702 	IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u",
703 		rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs);
704 
705 	ionic_rx_init_descriptors(rxq);
706 
707 	err = ionic_lif_rxq_init(rxq);
708 	if (err)
709 		return err;
710 
711 	/* Allocate buffers for descriptor ring */
712 	if (rxq->flags & IONIC_QCQ_F_SG)
713 		err = ionic_rx_fill_sg(rxq);
714 	else
715 		err = ionic_rx_fill(rxq);
716 	if (err != 0) {
717 		IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id);
718 		return -1;
719 	}
720 
721 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
722 
723 	return 0;
724 }
725 
726 /*
727  * Stop Receive Units for specified queue.
728  */
729 int __rte_cold
730 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
731 {
732 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
733 	struct ionic_rx_stats *stats;
734 	struct ionic_rx_qcq *rxq;
735 
736 	IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
737 
738 	rxq = eth_dev->data->rx_queues[rx_queue_id];
739 
740 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
741 
742 	ionic_lif_rxq_deinit(rxq);
743 
744 	/* Free all buffers from descriptor ring */
745 	ionic_rx_empty(rxq);
746 
747 	stats = &rxq->stats;
748 	IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
749 		rxq->qcq.q.index, stats->packets, stats->mtods);
750 
751 	return 0;
752 }
753 
754 int
755 ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
756 {
757 	struct ionic_rx_qcq *rxq = rx_queue;
758 	struct ionic_qcq *qcq = &rxq->qcq;
759 	volatile struct ionic_rxq_comp *cq_desc;
760 	uint16_t mask, head, tail, pos;
761 	bool done_color;
762 
763 	mask = qcq->q.size_mask;
764 
765 	/* offset must be within the size of the ring */
766 	if (offset > mask)
767 		return -EINVAL;
768 
769 	head = qcq->q.head_idx;
770 	tail = qcq->q.tail_idx;
771 
772 	/* offset is beyond what is posted */
773 	if (offset >= ((head - tail) & mask))
774 		return RTE_ETH_RX_DESC_UNAVAIL;
775 
776 	/* interested in this absolute position in the rxq */
777 	pos = (tail + offset) & mask;
778 
779 	/* rx cq position == rx q position */
780 	cq_desc = qcq->cq.base;
781 	cq_desc = &cq_desc[pos];
782 
783 	/* expected done color at this position */
784 	done_color = qcq->cq.done_color != (pos < tail);
785 
786 	/* has the hw indicated the done color at this position? */
787 	if (color_match(cq_desc->pkt_type_color, done_color))
788 		return RTE_ETH_RX_DESC_DONE;
789 
790 	return RTE_ETH_RX_DESC_AVAIL;
791 }
792 
793 int
794 ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
795 {
796 	struct ionic_tx_qcq *txq = tx_queue;
797 	struct ionic_qcq *qcq = &txq->qcq;
798 	volatile struct ionic_txq_comp *cq_desc;
799 	uint16_t mask, head, tail, pos, cq_pos;
800 	bool done_color;
801 
802 	mask = qcq->q.size_mask;
803 
804 	/* offset must be within the size of the ring */
805 	if (offset > mask)
806 		return -EINVAL;
807 
808 	head = qcq->q.head_idx;
809 	tail = qcq->q.tail_idx;
810 
811 	/* offset is beyond what is posted */
812 	if (offset >= ((head - tail) & mask))
813 		return RTE_ETH_TX_DESC_DONE;
814 
815 	/* interested in this absolute position in the txq */
816 	pos = (tail + offset) & mask;
817 
818 	/* tx cq position != tx q position, need to walk cq */
819 	cq_pos = qcq->cq.tail_idx;
820 	cq_desc = qcq->cq.base;
821 	cq_desc = &cq_desc[cq_pos];
822 
823 	/* how far behind is pos from head? */
824 	offset = (head - pos) & mask;
825 
826 	/* walk cq descriptors that match the expected done color */
827 	done_color = qcq->cq.done_color;
828 	while (color_match(cq_desc->color, done_color)) {
829 		/* is comp index no further behind than pos? */
830 		tail = rte_cpu_to_le_16(cq_desc->comp_index);
831 		if (((head - tail) & mask) <= offset)
832 			return RTE_ETH_TX_DESC_DONE;
833 
834 		cq_pos = (cq_pos + 1) & mask;
835 		cq_desc = qcq->cq.base;
836 		cq_desc = &cq_desc[cq_pos];
837 
838 		done_color = done_color != (cq_pos == 0);
839 	}
840 
841 	return RTE_ETH_TX_DESC_FULL;
842 }
843