xref: /dpdk/drivers/net/ionic/ionic_rxtx.c (revision ba6a168a06581b5b3d523f984722a3e5f65bbb82)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3  */
4 
5 #include <stdio.h>
6 #include <string.h>
7 #include <errno.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_byteorder.h>
12 #include <rte_errno.h>
13 #include <rte_log.h>
14 #include <rte_mbuf.h>
15 #include <rte_ether.h>
16 #include <rte_ip.h>
17 #include <rte_tcp.h>
18 #include <rte_ethdev.h>
19 #include <ethdev_driver.h>
20 
21 #include "ionic.h"
22 #include "ionic_dev.h"
23 #include "ionic_lif.h"
24 #include "ionic_ethdev.h"
25 #include "ionic_rxtx.h"
26 #include "ionic_logs.h"
27 
28 static void
29 ionic_empty_array(void **array, uint32_t cnt, uint16_t idx)
30 {
31 	uint32_t i;
32 
33 	for (i = idx; i < cnt; i++)
34 		if (array[i])
35 			rte_pktmbuf_free_seg(array[i]);
36 
37 	memset(array, 0, sizeof(void *) * cnt);
38 }
39 
40 static void __rte_cold
41 ionic_tx_empty(struct ionic_tx_qcq *txq)
42 {
43 	struct ionic_queue *q = &txq->qcq.q;
44 
45 	ionic_empty_array(q->info, q->num_descs * q->num_segs, 0);
46 }
47 
48 static void __rte_cold
49 ionic_rx_empty(struct ionic_rx_qcq *rxq)
50 {
51 	struct ionic_queue *q = &rxq->qcq.q;
52 
53 	/*
54 	 * Walk the full info array so that the clean up includes any
55 	 * fragments that were left dangling for later reuse
56 	 */
57 	ionic_empty_array(q->info, q->num_descs * q->num_segs, 0);
58 
59 	ionic_empty_array((void **)rxq->mbs,
60 			IONIC_MBUF_BULK_ALLOC, rxq->mb_idx);
61 	rxq->mb_idx = 0;
62 }
63 
64 /*********************************************************************
65  *
66  *  TX functions
67  *
68  **********************************************************************/
69 
70 void
71 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
72 		struct rte_eth_txq_info *qinfo)
73 {
74 	struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
75 	struct ionic_queue *q = &txq->qcq.q;
76 
77 	qinfo->nb_desc = q->num_descs;
78 	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
79 	if (txq->flags & IONIC_QCQ_F_FAST_FREE)
80 		qinfo->conf.offloads |= RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
81 	qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
82 }
83 
84 void __rte_cold
85 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
86 {
87 	struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
88 
89 	IONIC_PRINT_CALL();
90 
91 	ionic_qcq_free(&txq->qcq);
92 }
93 
94 int __rte_cold
95 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
96 {
97 	struct ionic_tx_stats *stats;
98 	struct ionic_tx_qcq *txq;
99 
100 	IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
101 
102 	txq = eth_dev->data->tx_queues[tx_queue_id];
103 
104 	eth_dev->data->tx_queue_state[tx_queue_id] =
105 		RTE_ETH_QUEUE_STATE_STOPPED;
106 
107 	/*
108 	 * Note: we should better post NOP Tx desc and wait for its completion
109 	 * before disabling Tx queue
110 	 */
111 
112 	ionic_lif_txq_deinit(txq);
113 
114 	/* Free all buffers from descriptor ring */
115 	ionic_tx_empty(txq);
116 
117 	stats = &txq->stats;
118 	IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
119 		txq->qcq.q.index, stats->packets, stats->tso);
120 
121 	return 0;
122 }
123 
124 int __rte_cold
125 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
126 		uint16_t nb_desc, uint32_t socket_id,
127 		const struct rte_eth_txconf *tx_conf)
128 {
129 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
130 	struct ionic_tx_qcq *txq;
131 	uint64_t offloads;
132 	int err;
133 
134 	if (tx_queue_id >= lif->ntxqcqs) {
135 		IONIC_PRINT(DEBUG, "Queue index %u not available "
136 			"(max %u queues)",
137 			tx_queue_id, lif->ntxqcqs);
138 		return -EINVAL;
139 	}
140 
141 	offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
142 	IONIC_PRINT(DEBUG,
143 		"Configuring skt %u TX queue %u with %u buffers, offloads %jx",
144 		socket_id, tx_queue_id, nb_desc, offloads);
145 
146 	/* Validate number of receive descriptors */
147 	if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
148 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
149 
150 	if (tx_conf->tx_free_thresh > nb_desc) {
151 		IONIC_PRINT(ERR,
152 			"tx_free_thresh must be less than nb_desc (%u)",
153 			nb_desc);
154 		return -EINVAL;
155 	}
156 
157 	/* Free memory prior to re-allocation if needed... */
158 	if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
159 		ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
160 		eth_dev->data->tx_queues[tx_queue_id] = NULL;
161 	}
162 
163 	eth_dev->data->tx_queue_state[tx_queue_id] =
164 		RTE_ETH_QUEUE_STATE_STOPPED;
165 
166 	err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
167 	if (err) {
168 		IONIC_PRINT(DEBUG, "Queue allocation failure");
169 		return -EINVAL;
170 	}
171 
172 	/* Do not start queue with rte_eth_dev_start() */
173 	if (tx_conf->tx_deferred_start)
174 		txq->flags |= IONIC_QCQ_F_DEFERRED;
175 
176 	/* Convert the offload flags into queue flags */
177 	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
178 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
179 	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
180 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
181 	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
182 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
183 	if (offloads & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
184 		txq->flags |= IONIC_QCQ_F_FAST_FREE;
185 
186 	txq->free_thresh =
187 		tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh :
188 		nb_desc - IONIC_DEF_TXRX_BURST;
189 
190 	eth_dev->data->tx_queues[tx_queue_id] = txq;
191 
192 	return 0;
193 }
194 
195 /*
196  * Start Transmit Units for specified queue.
197  */
198 int __rte_cold
199 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
200 {
201 	uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
202 	struct ionic_tx_qcq *txq;
203 	int err;
204 
205 	if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
206 		IONIC_PRINT(DEBUG, "TX queue %u already started",
207 			tx_queue_id);
208 		return 0;
209 	}
210 
211 	txq = eth_dev->data->tx_queues[tx_queue_id];
212 
213 	IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
214 		tx_queue_id, txq->qcq.q.num_descs);
215 
216 	err = ionic_lif_txq_init(txq);
217 	if (err)
218 		return err;
219 
220 	tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
221 
222 	return 0;
223 }
224 
225 static void
226 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
227 {
228 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
229 	char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
230 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
231 		(l3_hdr + txm->l3_len);
232 
233 	if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
234 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
235 		ipv4_hdr->hdr_checksum = 0;
236 		tcp_hdr->cksum = 0;
237 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
238 	} else {
239 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
240 		tcp_hdr->cksum = 0;
241 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
242 	}
243 }
244 
245 static void
246 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
247 {
248 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
249 	char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
250 		txm->outer_l3_len + txm->l2_len;
251 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
252 		(l3_hdr + txm->l3_len);
253 
254 	if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) {
255 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
256 		ipv4_hdr->hdr_checksum = 0;
257 		tcp_hdr->cksum = 0;
258 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
259 	} else {
260 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
261 		tcp_hdr->cksum = 0;
262 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
263 	}
264 }
265 
266 static void
267 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
268 		struct rte_mbuf *txm,
269 		rte_iova_t addr, uint8_t nsge, uint16_t len,
270 		uint32_t hdrlen, uint32_t mss,
271 		bool encap,
272 		uint16_t vlan_tci, bool has_vlan,
273 		bool start, bool done)
274 {
275 	struct rte_mbuf *txm_seg;
276 	void **info;
277 	uint64_t cmd;
278 	uint8_t flags = 0;
279 	int i;
280 
281 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
282 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
283 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
284 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
285 
286 	cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
287 		flags, nsge, addr);
288 	desc->cmd = rte_cpu_to_le_64(cmd);
289 	desc->len = rte_cpu_to_le_16(len);
290 	desc->vlan_tci = rte_cpu_to_le_16(vlan_tci);
291 	desc->hdr_len = rte_cpu_to_le_16(hdrlen);
292 	desc->mss = rte_cpu_to_le_16(mss);
293 
294 	if (done) {
295 		info = IONIC_INFO_PTR(q, q->head_idx);
296 
297 		/* Walk the mbuf chain to stash pointers in the array */
298 		txm_seg = txm;
299 		for (i = 0; i < txm->nb_segs; i++) {
300 			info[i] = txm_seg;
301 			txm_seg = txm_seg->next;
302 		}
303 	}
304 
305 	q->head_idx = Q_NEXT_TO_POST(q, 1);
306 }
307 
308 static struct ionic_txq_desc *
309 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
310 {
311 	struct ionic_queue *q = &txq->qcq.q;
312 	struct ionic_txq_desc *desc_base = q->base;
313 	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
314 	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
315 	struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
316 
317 	*elem = sg_desc->elems;
318 	return desc;
319 }
320 
321 int
322 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
323 {
324 	struct ionic_queue *q = &txq->qcq.q;
325 	struct ionic_tx_stats *stats = &txq->stats;
326 	struct ionic_txq_desc *desc;
327 	struct ionic_txq_sg_elem *elem;
328 	struct rte_mbuf *txm_seg;
329 	rte_iova_t data_iova;
330 	uint64_t desc_addr = 0, next_addr;
331 	uint16_t desc_len = 0;
332 	uint8_t desc_nsge = 0;
333 	uint32_t hdrlen;
334 	uint32_t mss = txm->tso_segsz;
335 	uint32_t frag_left = 0;
336 	uint32_t left;
337 	uint32_t seglen;
338 	uint32_t len;
339 	uint32_t offset = 0;
340 	bool start, done;
341 	bool encap;
342 	bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);
343 	bool use_sgl = !!(txq->flags & IONIC_QCQ_F_SG);
344 	uint16_t vlan_tci = txm->vlan_tci;
345 	uint64_t ol_flags = txm->ol_flags;
346 
347 	encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
348 		 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
349 		((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
350 		 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
351 
352 	/* Preload inner-most TCP csum field with IP pseudo hdr
353 	 * calculated with IP length set to zero.  HW will later
354 	 * add in length to each TCP segment resulting from the TSO.
355 	 */
356 
357 	if (encap) {
358 		ionic_tx_tcp_inner_pseudo_csum(txm);
359 		hdrlen = txm->outer_l2_len + txm->outer_l3_len +
360 			txm->l2_len + txm->l3_len + txm->l4_len;
361 	} else {
362 		ionic_tx_tcp_pseudo_csum(txm);
363 		hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
364 	}
365 
366 	desc = ionic_tx_tso_next(txq, &elem);
367 	txm_seg = txm;
368 	start = true;
369 	seglen = hdrlen + mss;
370 
371 	/* Walk the chain of mbufs */
372 	while (txm_seg != NULL) {
373 		offset = 0;
374 		data_iova = rte_mbuf_data_iova(txm_seg);
375 		left = txm_seg->data_len;
376 
377 		/* Split the mbuf data up into multiple descriptors */
378 		while (left > 0) {
379 			next_addr = rte_cpu_to_le_64(data_iova + offset);
380 			if (frag_left > 0 && use_sgl) {
381 				/* Fill previous descriptor's SGE */
382 				len = RTE_MIN(frag_left, left);
383 				frag_left -= len;
384 				elem->addr = next_addr;
385 				elem->len = rte_cpu_to_le_16(len);
386 				elem++;
387 				desc_nsge++;
388 			} else {
389 				/* Fill new descriptor's data field */
390 				len = RTE_MIN(seglen, left);
391 				frag_left = seglen - len;
392 				desc_addr = next_addr;
393 				desc_len = len;
394 				desc_nsge = 0;
395 			}
396 			left -= len;
397 			offset += len;
398 
399 			/* Pack the next mbuf's data into the descriptor */
400 			if (txm_seg->next != NULL && frag_left > 0 && use_sgl)
401 				break;
402 
403 			done = (txm_seg->next == NULL && left == 0);
404 			ionic_tx_tso_post(q, desc, txm_seg,
405 				desc_addr, desc_nsge, desc_len,
406 				hdrlen, mss,
407 				encap,
408 				vlan_tci, has_vlan,
409 				start, done);
410 			desc = ionic_tx_tso_next(txq, &elem);
411 			start = false;
412 			seglen = mss;
413 		}
414 
415 		txm_seg = txm_seg->next;
416 	}
417 
418 	stats->tso++;
419 
420 	return 0;
421 }
422 
423 /*********************************************************************
424  *
425  *  TX prep functions
426  *
427  **********************************************************************/
428 
429 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 |		\
430 	RTE_MBUF_F_TX_IPV6 |		\
431 	RTE_MBUF_F_TX_VLAN |		\
432 	RTE_MBUF_F_TX_IP_CKSUM |	\
433 	RTE_MBUF_F_TX_TCP_SEG |	\
434 	RTE_MBUF_F_TX_L4_MASK)
435 
436 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
437 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
438 
439 uint16_t
440 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
441 {
442 	struct ionic_tx_qcq *txq = tx_queue;
443 	struct rte_mbuf *txm;
444 	uint64_t offloads;
445 	int i = 0;
446 
447 	for (i = 0; i < nb_pkts; i++) {
448 		txm = tx_pkts[i];
449 
450 		if (txm->nb_segs > txq->num_segs_fw) {
451 			rte_errno = -EINVAL;
452 			break;
453 		}
454 
455 		offloads = txm->ol_flags;
456 
457 		if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
458 			rte_errno = -ENOTSUP;
459 			break;
460 		}
461 	}
462 
463 	return i;
464 }
465 
466 /*********************************************************************
467  *
468  *  RX functions
469  *
470  **********************************************************************/
471 
472 void
473 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
474 		struct rte_eth_rxq_info *qinfo)
475 {
476 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
477 	struct ionic_queue *q = &rxq->qcq.q;
478 
479 	qinfo->mp = rxq->mb_pool;
480 	qinfo->scattered_rx = dev->data->scattered_rx;
481 	qinfo->nb_desc = q->num_descs;
482 	qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
483 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
484 }
485 
486 void __rte_cold
487 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
488 {
489 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
490 
491 	if (!rxq)
492 		return;
493 
494 	IONIC_PRINT_CALL();
495 
496 	ionic_qcq_free(&rxq->qcq);
497 }
498 
499 int __rte_cold
500 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
501 		uint16_t rx_queue_id,
502 		uint16_t nb_desc,
503 		uint32_t socket_id,
504 		const struct rte_eth_rxconf *rx_conf,
505 		struct rte_mempool *mp)
506 {
507 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
508 	struct ionic_rx_qcq *rxq;
509 	uint64_t offloads;
510 	int err;
511 
512 	if (rx_queue_id >= lif->nrxqcqs) {
513 		IONIC_PRINT(ERR,
514 			"Queue index %u not available (max %u queues)",
515 			rx_queue_id, lif->nrxqcqs);
516 		return -EINVAL;
517 	}
518 
519 	offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
520 	IONIC_PRINT(DEBUG,
521 		"Configuring skt %u RX queue %u with %u buffers, offloads %jx",
522 		socket_id, rx_queue_id, nb_desc, offloads);
523 
524 	if (!rx_conf->rx_drop_en)
525 		IONIC_PRINT(WARNING, "No-drop mode is not supported");
526 
527 	/* Validate number of receive descriptors */
528 	if (!rte_is_power_of_2(nb_desc) ||
529 			nb_desc < IONIC_MIN_RING_DESC ||
530 			nb_desc > IONIC_MAX_RING_DESC) {
531 		IONIC_PRINT(ERR,
532 			"Bad descriptor count (%u) for queue %u (min: %u)",
533 			nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
534 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
535 	}
536 
537 	/* Free memory prior to re-allocation if needed... */
538 	if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
539 		ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
540 		eth_dev->data->rx_queues[rx_queue_id] = NULL;
541 	}
542 
543 	eth_dev->data->rx_queue_state[rx_queue_id] =
544 		RTE_ETH_QUEUE_STATE_STOPPED;
545 
546 	err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc, mp,
547 			&rxq);
548 	if (err) {
549 		IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
550 		return -EINVAL;
551 	}
552 
553 	rxq->mb_pool = mp;
554 	rxq->wdog_ms = IONIC_Q_WDOG_MS;
555 
556 	/*
557 	 * Note: the interface does not currently support
558 	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
559 	 * when the adapter will be able to keep the CRC and subtract
560 	 * it to the length for all received packets:
561 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
562 	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
563 	 *   rxq->crc_len = ETHER_CRC_LEN;
564 	 */
565 
566 	/* Do not start queue with rte_eth_dev_start() */
567 	if (rx_conf->rx_deferred_start)
568 		rxq->flags |= IONIC_QCQ_F_DEFERRED;
569 
570 	eth_dev->data->rx_queues[rx_queue_id] = rxq;
571 
572 	return 0;
573 }
574 
575 #define IONIC_CSUM_FLAG_MASK (IONIC_RXQ_COMP_CSUM_F_VLAN - 1)
576 const uint64_t ionic_csum_flags[IONIC_CSUM_FLAG_MASK]
577 		__rte_cache_aligned = {
578 	/* IP_BAD set */
579 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD] = RTE_MBUF_F_RX_IP_CKSUM_BAD,
580 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_OK] =
581 			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
582 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] =
583 			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
584 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_OK] =
585 			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
586 	[IONIC_RXQ_COMP_CSUM_F_IP_BAD | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] =
587 			RTE_MBUF_F_RX_IP_CKSUM_BAD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
588 	/* IP_OK set */
589 	[IONIC_RXQ_COMP_CSUM_F_IP_OK] = RTE_MBUF_F_RX_IP_CKSUM_GOOD,
590 	[IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_OK] =
591 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
592 	[IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_TCP_BAD] =
593 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
594 	[IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_OK] =
595 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_GOOD,
596 	[IONIC_RXQ_COMP_CSUM_F_IP_OK | IONIC_RXQ_COMP_CSUM_F_UDP_BAD] =
597 			RTE_MBUF_F_RX_IP_CKSUM_GOOD | RTE_MBUF_F_RX_L4_CKSUM_BAD,
598 	/* No IP flag set */
599 	[IONIC_RXQ_COMP_CSUM_F_TCP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD,
600 	[IONIC_RXQ_COMP_CSUM_F_TCP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD,
601 	[IONIC_RXQ_COMP_CSUM_F_UDP_OK] = RTE_MBUF_F_RX_L4_CKSUM_GOOD,
602 	[IONIC_RXQ_COMP_CSUM_F_UDP_BAD] = RTE_MBUF_F_RX_L4_CKSUM_BAD,
603 };
604 
605 /* RTE_PTYPE_UNKNOWN is 0x0 */
606 const uint32_t ionic_ptype_table[IONIC_RXQ_COMP_PKT_TYPE_MASK]
607 		__rte_cache_aligned = {
608 	[IONIC_PKT_TYPE_NON_IP]   = RTE_PTYPE_UNKNOWN,
609 	[IONIC_PKT_TYPE_IPV4]     = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4,
610 	[IONIC_PKT_TYPE_IPV4_TCP] =
611 		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
612 	[IONIC_PKT_TYPE_IPV4_UDP] =
613 		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
614 	[IONIC_PKT_TYPE_IPV6]     = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6,
615 	[IONIC_PKT_TYPE_IPV6_TCP] =
616 		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
617 	[IONIC_PKT_TYPE_IPV6_UDP] =
618 		RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
619 };
620 
621 const uint32_t *
622 ionic_dev_supported_ptypes_get(struct rte_eth_dev *dev __rte_unused,
623 			       size_t *no_of_elements)
624 {
625 	/* See ionic_ptype_table[] */
626 	static const uint32_t ptypes[] = {
627 		RTE_PTYPE_L2_ETHER,
628 		RTE_PTYPE_L2_ETHER_TIMESYNC,
629 		RTE_PTYPE_L2_ETHER_LLDP,
630 		RTE_PTYPE_L2_ETHER_ARP,
631 		RTE_PTYPE_L3_IPV4,
632 		RTE_PTYPE_L3_IPV6,
633 		RTE_PTYPE_L4_TCP,
634 		RTE_PTYPE_L4_UDP,
635 	};
636 
637 	*no_of_elements = RTE_DIM(ptypes);
638 	return ptypes;
639 }
640 
641 /*
642  * Perform one-time initialization of descriptor fields
643  * which will not change for the life of the queue.
644  */
645 static void __rte_cold
646 ionic_rx_init_descriptors(struct ionic_rx_qcq *rxq)
647 {
648 	struct ionic_queue *q = &rxq->qcq.q;
649 	struct ionic_rxq_desc *desc, *desc_base = q->base;
650 	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
651 	uint32_t i, j;
652 	uint8_t opcode;
653 
654 	opcode = (q->num_segs > 1) ?
655 		IONIC_RXQ_DESC_OPCODE_SG : IONIC_RXQ_DESC_OPCODE_SIMPLE;
656 
657 	/*
658 	 * NB: Only the first segment needs to leave headroom (hdr_seg_size).
659 	 *     Later segments (seg_size) do not.
660 	 */
661 	for (i = 0; i < q->num_descs; i++) {
662 		desc = &desc_base[i];
663 		desc->len = rte_cpu_to_le_16(rxq->hdr_seg_size);
664 		desc->opcode = opcode;
665 
666 		sg_desc = &sg_desc_base[i];
667 		for (j = 0; j < q->num_segs - 1u; j++)
668 			sg_desc->elems[j].len =
669 				rte_cpu_to_le_16(rxq->seg_size);
670 	}
671 }
672 
673 /*
674  * Start Receive Units for specified queue.
675  */
676 int __rte_cold
677 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
678 {
679 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
680 	struct ionic_rx_qcq *rxq;
681 	struct ionic_queue *q;
682 	int err;
683 
684 	if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
685 		IONIC_PRINT(DEBUG, "RX queue %u already started",
686 			rx_queue_id);
687 		return 0;
688 	}
689 
690 	rxq = eth_dev->data->rx_queues[rx_queue_id];
691 	q = &rxq->qcq.q;
692 
693 	rxq->frame_size = rxq->qcq.lif->frame_size - RTE_ETHER_CRC_LEN;
694 
695 	/* Recalculate segment count based on MTU */
696 	q->num_segs = 1 +
697 		(rxq->frame_size + RTE_PKTMBUF_HEADROOM - 1) / rxq->seg_size;
698 
699 	IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs, size %u segs %u",
700 		rx_queue_id, q->num_descs, rxq->frame_size, q->num_segs);
701 
702 	ionic_rx_init_descriptors(rxq);
703 
704 	err = ionic_lif_rxq_init(rxq);
705 	if (err)
706 		return err;
707 
708 	/* Allocate buffers for descriptor ring */
709 	if (rxq->flags & IONIC_QCQ_F_SG)
710 		err = ionic_rx_fill_sg(rxq);
711 	else
712 		err = ionic_rx_fill(rxq);
713 	if (err != 0) {
714 		IONIC_PRINT(ERR, "Could not fill queue %d", rx_queue_id);
715 		return -1;
716 	}
717 
718 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
719 
720 	return 0;
721 }
722 
723 /*
724  * Stop Receive Units for specified queue.
725  */
726 int __rte_cold
727 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
728 {
729 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
730 	struct ionic_rx_stats *stats;
731 	struct ionic_rx_qcq *rxq;
732 
733 	IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
734 
735 	rxq = eth_dev->data->rx_queues[rx_queue_id];
736 
737 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
738 
739 	ionic_lif_rxq_deinit(rxq);
740 
741 	/* Free all buffers from descriptor ring */
742 	ionic_rx_empty(rxq);
743 
744 	stats = &rxq->stats;
745 	IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
746 		rxq->qcq.q.index, stats->packets, stats->mtods);
747 
748 	return 0;
749 }
750 
751 int
752 ionic_dev_rx_descriptor_status(void *rx_queue, uint16_t offset)
753 {
754 	struct ionic_rx_qcq *rxq = rx_queue;
755 	struct ionic_qcq *qcq = &rxq->qcq;
756 	struct ionic_rxq_comp *cq_desc;
757 	uint16_t mask, head, tail, pos;
758 	bool done_color;
759 
760 	mask = qcq->q.size_mask;
761 
762 	/* offset must be within the size of the ring */
763 	if (offset > mask)
764 		return -EINVAL;
765 
766 	head = qcq->q.head_idx;
767 	tail = qcq->q.tail_idx;
768 
769 	/* offset is beyond what is posted */
770 	if (offset >= ((head - tail) & mask))
771 		return RTE_ETH_RX_DESC_UNAVAIL;
772 
773 	/* interested in this absolute position in the rxq */
774 	pos = (tail + offset) & mask;
775 
776 	/* rx cq position == rx q position */
777 	cq_desc = qcq->cq.base;
778 	cq_desc = &cq_desc[pos];
779 
780 	/* expected done color at this position */
781 	done_color = qcq->cq.done_color != (pos < tail);
782 
783 	/* has the hw indicated the done color at this position? */
784 	if (color_match(cq_desc->pkt_type_color, done_color))
785 		return RTE_ETH_RX_DESC_DONE;
786 
787 	return RTE_ETH_RX_DESC_AVAIL;
788 }
789 
790 int
791 ionic_dev_tx_descriptor_status(void *tx_queue, uint16_t offset)
792 {
793 	struct ionic_tx_qcq *txq = tx_queue;
794 	struct ionic_qcq *qcq = &txq->qcq;
795 	struct ionic_txq_comp *cq_desc;
796 	uint16_t mask, head, tail, pos, cq_pos;
797 	bool done_color;
798 
799 	mask = qcq->q.size_mask;
800 
801 	/* offset must be within the size of the ring */
802 	if (offset > mask)
803 		return -EINVAL;
804 
805 	head = qcq->q.head_idx;
806 	tail = qcq->q.tail_idx;
807 
808 	/* offset is beyond what is posted */
809 	if (offset >= ((head - tail) & mask))
810 		return RTE_ETH_TX_DESC_DONE;
811 
812 	/* interested in this absolute position in the txq */
813 	pos = (tail + offset) & mask;
814 
815 	/* tx cq position != tx q position, need to walk cq */
816 	cq_pos = qcq->cq.tail_idx;
817 	cq_desc = qcq->cq.base;
818 	cq_desc = &cq_desc[cq_pos];
819 
820 	/* how far behind is pos from head? */
821 	offset = (head - pos) & mask;
822 
823 	/* walk cq descriptors that match the expected done color */
824 	done_color = qcq->cq.done_color;
825 	while (color_match(cq_desc->color, done_color)) {
826 		/* is comp index no further behind than pos? */
827 		tail = rte_cpu_to_le_16(cq_desc->comp_index);
828 		if (((head - tail) & mask) <= offset)
829 			return RTE_ETH_TX_DESC_DONE;
830 
831 		cq_pos = (cq_pos + 1) & mask;
832 		cq_desc = qcq->cq.base;
833 		cq_desc = &cq_desc[cq_pos];
834 
835 		done_color = done_color != (cq_pos == 0);
836 	}
837 
838 	return RTE_ETH_TX_DESC_FULL;
839 }
840