xref: /dpdk/drivers/net/ionic/ionic_rxtx.c (revision 4ed890495594a71e618d62fd975c5868915eb4a1)
1 /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2  * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
3  */
4 
5 #include <sys/queue.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <errno.h>
10 #include <stdint.h>
11 #include <stdarg.h>
12 #include <unistd.h>
13 #include <inttypes.h>
14 
15 #include <rte_byteorder.h>
16 #include <rte_common.h>
17 #include <rte_cycles.h>
18 #include <rte_log.h>
19 #include <rte_debug.h>
20 #include <rte_interrupts.h>
21 #include <rte_pci.h>
22 #include <rte_memory.h>
23 #include <rte_memzone.h>
24 #include <rte_launch.h>
25 #include <rte_eal.h>
26 #include <rte_per_lcore.h>
27 #include <rte_lcore.h>
28 #include <rte_atomic.h>
29 #include <rte_branch_prediction.h>
30 #include <rte_mempool.h>
31 #include <rte_malloc.h>
32 #include <rte_mbuf.h>
33 #include <rte_ether.h>
34 #include <ethdev_driver.h>
35 #include <rte_prefetch.h>
36 #include <rte_udp.h>
37 #include <rte_tcp.h>
38 #include <rte_sctp.h>
39 #include <rte_string_fns.h>
40 #include <rte_errno.h>
41 #include <rte_ip.h>
42 #include <rte_net.h>
43 
44 #include "ionic_logs.h"
45 #include "ionic_mac_api.h"
46 #include "ionic_ethdev.h"
47 #include "ionic_lif.h"
48 #include "ionic_rxtx.h"
49 
50 /*********************************************************************
51  *
52  *  TX functions
53  *
54  **********************************************************************/
55 
56 void
57 ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
58 		struct rte_eth_txq_info *qinfo)
59 {
60 	struct ionic_tx_qcq *txq = dev->data->tx_queues[queue_id];
61 	struct ionic_queue *q = &txq->qcq.q;
62 
63 	qinfo->nb_desc = q->num_descs;
64 	qinfo->conf.offloads = dev->data->dev_conf.txmode.offloads;
65 	qinfo->conf.tx_deferred_start = txq->flags & IONIC_QCQ_F_DEFERRED;
66 }
67 
68 static __rte_always_inline void
69 ionic_tx_flush(struct ionic_tx_qcq *txq)
70 {
71 	struct ionic_cq *cq = &txq->qcq.cq;
72 	struct ionic_queue *q = &txq->qcq.q;
73 	struct rte_mbuf *txm, *next;
74 	struct ionic_txq_comp *cq_desc_base = cq->base;
75 	struct ionic_txq_comp *cq_desc;
76 	void **info;
77 	u_int32_t comp_index = (u_int32_t)-1;
78 
79 	cq_desc = &cq_desc_base[cq->tail_idx];
80 	while (color_match(cq_desc->color, cq->done_color)) {
81 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
82 
83 		/* Prefetch the next 4 descriptors (not really useful here) */
84 		if ((cq->tail_idx & 0x3) == 0)
85 			rte_prefetch0(&cq_desc_base[cq->tail_idx]);
86 
87 		if (cq->tail_idx == 0)
88 			cq->done_color = !cq->done_color;
89 
90 		comp_index = cq_desc->comp_index;
91 
92 		cq_desc = &cq_desc_base[cq->tail_idx];
93 	}
94 
95 	if (comp_index != (u_int32_t)-1) {
96 		while (q->tail_idx != comp_index) {
97 			info = IONIC_INFO_PTR(q, q->tail_idx);
98 
99 			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
100 
101 			/* Prefetch the next 4 descriptors */
102 			if ((q->tail_idx & 0x3) == 0)
103 				/* q desc info */
104 				rte_prefetch0(&q->info[q->tail_idx]);
105 
106 			/*
107 			 * Note: you can just use rte_pktmbuf_free,
108 			 * but this loop is faster
109 			 */
110 			txm = info[0];
111 			while (txm != NULL) {
112 				next = txm->next;
113 				rte_pktmbuf_free_seg(txm);
114 				txm = next;
115 			}
116 		}
117 	}
118 }
119 
120 void __rte_cold
121 ionic_dev_tx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
122 {
123 	struct ionic_tx_qcq *txq = dev->data->tx_queues[qid];
124 	struct ionic_tx_stats *stats = &txq->stats;
125 
126 	IONIC_PRINT_CALL();
127 
128 	IONIC_PRINT(DEBUG, "TX queue %u pkts %ju tso %ju",
129 		txq->qcq.q.index, stats->packets, stats->tso);
130 
131 	ionic_lif_txq_deinit(txq);
132 
133 	ionic_qcq_free(&txq->qcq);
134 }
135 
136 int __rte_cold
137 ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
138 {
139 	struct ionic_tx_qcq *txq;
140 
141 	IONIC_PRINT(DEBUG, "Stopping TX queue %u", tx_queue_id);
142 
143 	txq = eth_dev->data->tx_queues[tx_queue_id];
144 
145 	eth_dev->data->tx_queue_state[tx_queue_id] =
146 		RTE_ETH_QUEUE_STATE_STOPPED;
147 
148 	/*
149 	 * Note: we should better post NOP Tx desc and wait for its completion
150 	 * before disabling Tx queue
151 	 */
152 
153 	ionic_qcq_disable(&txq->qcq);
154 
155 	ionic_tx_flush(txq);
156 
157 	return 0;
158 }
159 
160 int __rte_cold
161 ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
162 		uint16_t nb_desc, uint32_t socket_id,
163 		const struct rte_eth_txconf *tx_conf)
164 {
165 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
166 	struct ionic_tx_qcq *txq;
167 	uint64_t offloads;
168 	int err;
169 
170 	if (tx_queue_id >= lif->ntxqcqs) {
171 		IONIC_PRINT(DEBUG, "Queue index %u not available "
172 			"(max %u queues)",
173 			tx_queue_id, lif->ntxqcqs);
174 		return -EINVAL;
175 	}
176 
177 	offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
178 	IONIC_PRINT(DEBUG,
179 		"Configuring skt %u TX queue %u with %u buffers, offloads %jx",
180 		socket_id, tx_queue_id, nb_desc, offloads);
181 
182 	/* Validate number of receive descriptors */
183 	if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
184 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
185 
186 	/* Free memory prior to re-allocation if needed... */
187 	if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
188 		ionic_dev_tx_queue_release(eth_dev, tx_queue_id);
189 		eth_dev->data->tx_queues[tx_queue_id] = NULL;
190 	}
191 
192 	eth_dev->data->tx_queue_state[tx_queue_id] =
193 		RTE_ETH_QUEUE_STATE_STOPPED;
194 
195 	err = ionic_tx_qcq_alloc(lif, socket_id, tx_queue_id, nb_desc, &txq);
196 	if (err) {
197 		IONIC_PRINT(DEBUG, "Queue allocation failure");
198 		return -EINVAL;
199 	}
200 
201 	/* Do not start queue with rte_eth_dev_start() */
202 	if (tx_conf->tx_deferred_start)
203 		txq->flags |= IONIC_QCQ_F_DEFERRED;
204 
205 	/* Convert the offload flags into queue flags */
206 	if (offloads & RTE_ETH_TX_OFFLOAD_IPV4_CKSUM)
207 		txq->flags |= IONIC_QCQ_F_CSUM_L3;
208 	if (offloads & RTE_ETH_TX_OFFLOAD_TCP_CKSUM)
209 		txq->flags |= IONIC_QCQ_F_CSUM_TCP;
210 	if (offloads & RTE_ETH_TX_OFFLOAD_UDP_CKSUM)
211 		txq->flags |= IONIC_QCQ_F_CSUM_UDP;
212 
213 	eth_dev->data->tx_queues[tx_queue_id] = txq;
214 
215 	return 0;
216 }
217 
218 /*
219  * Start Transmit Units for specified queue.
220  */
221 int __rte_cold
222 ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
223 {
224 	uint8_t *tx_queue_state = eth_dev->data->tx_queue_state;
225 	struct ionic_tx_qcq *txq;
226 	int err;
227 
228 	if (tx_queue_state[tx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
229 		IONIC_PRINT(DEBUG, "TX queue %u already started",
230 			tx_queue_id);
231 		return 0;
232 	}
233 
234 	txq = eth_dev->data->tx_queues[tx_queue_id];
235 
236 	IONIC_PRINT(DEBUG, "Starting TX queue %u, %u descs",
237 		tx_queue_id, txq->qcq.q.num_descs);
238 
239 	if (!(txq->flags & IONIC_QCQ_F_INITED)) {
240 		err = ionic_lif_txq_init(txq);
241 		if (err)
242 			return err;
243 	} else {
244 		ionic_qcq_enable(&txq->qcq);
245 	}
246 
247 	tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
248 
249 	return 0;
250 }
251 
252 static void
253 ionic_tx_tcp_pseudo_csum(struct rte_mbuf *txm)
254 {
255 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
256 	char *l3_hdr = ((char *)eth_hdr) + txm->l2_len;
257 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
258 		(l3_hdr + txm->l3_len);
259 
260 	if (txm->ol_flags & RTE_MBUF_F_TX_IP_CKSUM) {
261 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
262 		ipv4_hdr->hdr_checksum = 0;
263 		tcp_hdr->cksum = 0;
264 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
265 	} else {
266 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
267 		tcp_hdr->cksum = 0;
268 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
269 	}
270 }
271 
272 static void
273 ionic_tx_tcp_inner_pseudo_csum(struct rte_mbuf *txm)
274 {
275 	struct ether_hdr *eth_hdr = rte_pktmbuf_mtod(txm, struct ether_hdr *);
276 	char *l3_hdr = ((char *)eth_hdr) + txm->outer_l2_len +
277 		txm->outer_l3_len + txm->l2_len;
278 	struct rte_tcp_hdr *tcp_hdr = (struct rte_tcp_hdr *)
279 		(l3_hdr + txm->l3_len);
280 
281 	if (txm->ol_flags & RTE_MBUF_F_TX_IPV4) {
282 		struct rte_ipv4_hdr *ipv4_hdr = (struct rte_ipv4_hdr *)l3_hdr;
283 		ipv4_hdr->hdr_checksum = 0;
284 		tcp_hdr->cksum = 0;
285 		tcp_hdr->cksum = rte_ipv4_udptcp_cksum(ipv4_hdr, tcp_hdr);
286 	} else {
287 		struct rte_ipv6_hdr *ipv6_hdr = (struct rte_ipv6_hdr *)l3_hdr;
288 		tcp_hdr->cksum = 0;
289 		tcp_hdr->cksum = rte_ipv6_udptcp_cksum(ipv6_hdr, tcp_hdr);
290 	}
291 }
292 
293 static void
294 ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
295 		struct rte_mbuf *txm,
296 		rte_iova_t addr, uint8_t nsge, uint16_t len,
297 		uint32_t hdrlen, uint32_t mss,
298 		bool encap,
299 		uint16_t vlan_tci, bool has_vlan,
300 		bool start, bool done)
301 {
302 	void **info;
303 	uint8_t flags = 0;
304 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
305 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
306 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
307 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
308 
309 	desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
310 		flags, nsge, addr);
311 	desc->len = len;
312 	desc->vlan_tci = vlan_tci;
313 	desc->hdr_len = hdrlen;
314 	desc->mss = mss;
315 
316 	if (done) {
317 		info = IONIC_INFO_PTR(q, q->head_idx);
318 		info[0] = txm;
319 	}
320 
321 	q->head_idx = Q_NEXT_TO_POST(q, 1);
322 }
323 
324 static struct ionic_txq_desc *
325 ionic_tx_tso_next(struct ionic_tx_qcq *txq, struct ionic_txq_sg_elem **elem)
326 {
327 	struct ionic_queue *q = &txq->qcq.q;
328 	struct ionic_txq_desc *desc_base = q->base;
329 	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
330 	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
331 	struct ionic_txq_sg_desc_v1 *sg_desc = &sg_desc_base[q->head_idx];
332 
333 	*elem = sg_desc->elems;
334 	return desc;
335 }
336 
337 static int
338 ionic_tx_tso(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
339 {
340 	struct ionic_queue *q = &txq->qcq.q;
341 	struct ionic_tx_stats *stats = &txq->stats;
342 	struct ionic_txq_desc *desc;
343 	struct ionic_txq_sg_elem *elem;
344 	struct rte_mbuf *txm_seg;
345 	rte_iova_t data_iova;
346 	uint64_t desc_addr = 0, next_addr;
347 	uint16_t desc_len = 0;
348 	uint8_t desc_nsge;
349 	uint32_t hdrlen;
350 	uint32_t mss = txm->tso_segsz;
351 	uint32_t frag_left = 0;
352 	uint32_t left;
353 	uint32_t seglen;
354 	uint32_t len;
355 	uint32_t offset = 0;
356 	bool start, done;
357 	bool encap;
358 	bool has_vlan = !!(txm->ol_flags & RTE_MBUF_F_TX_VLAN);
359 	uint16_t vlan_tci = txm->vlan_tci;
360 	uint64_t ol_flags = txm->ol_flags;
361 
362 	encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
363 		 (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
364 		((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
365 		 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
366 
367 	/* Preload inner-most TCP csum field with IP pseudo hdr
368 	 * calculated with IP length set to zero.  HW will later
369 	 * add in length to each TCP segment resulting from the TSO.
370 	 */
371 
372 	if (encap) {
373 		ionic_tx_tcp_inner_pseudo_csum(txm);
374 		hdrlen = txm->outer_l2_len + txm->outer_l3_len +
375 			txm->l2_len + txm->l3_len + txm->l4_len;
376 	} else {
377 		ionic_tx_tcp_pseudo_csum(txm);
378 		hdrlen = txm->l2_len + txm->l3_len + txm->l4_len;
379 	}
380 
381 	seglen = hdrlen + mss;
382 	left = txm->data_len;
383 	data_iova = rte_mbuf_data_iova(txm);
384 
385 	desc = ionic_tx_tso_next(txq, &elem);
386 	start = true;
387 
388 	/* Chop data up into desc segments */
389 
390 	while (left > 0) {
391 		len = RTE_MIN(seglen, left);
392 		frag_left = seglen - len;
393 		desc_addr = rte_cpu_to_le_64(data_iova + offset);
394 		desc_len = len;
395 		desc_nsge = 0;
396 		left -= len;
397 		offset += len;
398 		if (txm->nb_segs > 1 && frag_left > 0)
399 			continue;
400 		done = (txm->nb_segs == 1 && left == 0);
401 		ionic_tx_tso_post(q, desc, txm,
402 			desc_addr, desc_nsge, desc_len,
403 			hdrlen, mss,
404 			encap,
405 			vlan_tci, has_vlan,
406 			start, done);
407 		desc = ionic_tx_tso_next(txq, &elem);
408 		start = false;
409 		seglen = mss;
410 	}
411 
412 	/* Chop frags into desc segments */
413 
414 	txm_seg = txm->next;
415 	while (txm_seg != NULL) {
416 		offset = 0;
417 		data_iova = rte_mbuf_data_iova(txm_seg);
418 		left = txm_seg->data_len;
419 
420 		while (left > 0) {
421 			next_addr = rte_cpu_to_le_64(data_iova + offset);
422 			if (frag_left > 0) {
423 				len = RTE_MIN(frag_left, left);
424 				frag_left -= len;
425 				elem->addr = next_addr;
426 				elem->len = len;
427 				elem++;
428 				desc_nsge++;
429 			} else {
430 				len = RTE_MIN(mss, left);
431 				frag_left = mss - len;
432 				desc_addr = next_addr;
433 				desc_len = len;
434 				desc_nsge = 0;
435 			}
436 			left -= len;
437 			offset += len;
438 			if (txm_seg->next != NULL && frag_left > 0)
439 				continue;
440 
441 			done = (txm_seg->next == NULL && left == 0);
442 			ionic_tx_tso_post(q, desc, txm_seg,
443 				desc_addr, desc_nsge, desc_len,
444 				hdrlen, mss,
445 				encap,
446 				vlan_tci, has_vlan,
447 				start, done);
448 			desc = ionic_tx_tso_next(txq, &elem);
449 			start = false;
450 		}
451 
452 		txm_seg = txm_seg->next;
453 	}
454 
455 	stats->tso++;
456 
457 	return 0;
458 }
459 
460 static __rte_always_inline int
461 ionic_tx(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
462 {
463 	struct ionic_queue *q = &txq->qcq.q;
464 	struct ionic_txq_desc *desc, *desc_base = q->base;
465 	struct ionic_txq_sg_desc_v1 *sg_desc_base = q->sg_base;
466 	struct ionic_txq_sg_elem *elem;
467 	struct ionic_tx_stats *stats = &txq->stats;
468 	struct rte_mbuf *txm_seg;
469 	void **info;
470 	bool encap;
471 	bool has_vlan;
472 	uint64_t ol_flags = txm->ol_flags;
473 	uint64_t addr;
474 	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
475 	uint8_t flags = 0;
476 
477 	desc = &desc_base[q->head_idx];
478 	info = IONIC_INFO_PTR(q, q->head_idx);
479 
480 	if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
481 	    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
482 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
483 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
484 	}
485 
486 	if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
487 	     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
488 	    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
489 	     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
490 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
491 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
492 	}
493 
494 	if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
495 		stats->no_csum++;
496 
497 	has_vlan = (ol_flags & RTE_MBUF_F_TX_VLAN);
498 	encap = ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
499 			(ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
500 			((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
501 			 (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6));
502 
503 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
504 	flags |= encap ? IONIC_TXQ_DESC_FLAG_ENCAP : 0;
505 
506 	addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
507 
508 	desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
509 	desc->len = txm->data_len;
510 	desc->vlan_tci = txm->vlan_tci;
511 
512 	info[0] = txm;
513 
514 	elem = sg_desc_base[q->head_idx].elems;
515 
516 	txm_seg = txm->next;
517 	while (txm_seg != NULL) {
518 		elem->len = txm_seg->data_len;
519 		elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
520 		elem++;
521 		txm_seg = txm_seg->next;
522 	}
523 
524 	q->head_idx = Q_NEXT_TO_POST(q, 1);
525 
526 	return 0;
527 }
528 
529 uint16_t
530 ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
531 		uint16_t nb_pkts)
532 {
533 	struct ionic_tx_qcq *txq = tx_queue;
534 	struct ionic_queue *q = &txq->qcq.q;
535 	struct ionic_tx_stats *stats = &txq->stats;
536 	uint32_t next_q_head_idx;
537 	uint32_t bytes_tx = 0;
538 	uint16_t nb_avail, nb_tx = 0;
539 	int err;
540 
541 	/* Cleaning old buffers */
542 	ionic_tx_flush(txq);
543 
544 	nb_avail = ionic_q_space_avail(q);
545 	if (unlikely(nb_avail < nb_pkts)) {
546 		stats->stop += nb_pkts - nb_avail;
547 		nb_pkts = nb_avail;
548 	}
549 
550 	while (nb_tx < nb_pkts) {
551 		next_q_head_idx = Q_NEXT_TO_POST(q, 1);
552 		if ((next_q_head_idx & 0x3) == 0) {
553 			struct ionic_txq_desc *desc_base = q->base;
554 			rte_prefetch0(&desc_base[next_q_head_idx]);
555 			rte_prefetch0(&q->info[next_q_head_idx]);
556 		}
557 
558 		if (tx_pkts[nb_tx]->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
559 			err = ionic_tx_tso(txq, tx_pkts[nb_tx]);
560 		else
561 			err = ionic_tx(txq, tx_pkts[nb_tx]);
562 		if (err) {
563 			stats->drop += nb_pkts - nb_tx;
564 			break;
565 		}
566 
567 		bytes_tx += tx_pkts[nb_tx]->pkt_len;
568 		nb_tx++;
569 	}
570 
571 	if (nb_tx > 0) {
572 		rte_wmb();
573 		ionic_q_flush(q);
574 	}
575 
576 	stats->packets += nb_tx;
577 	stats->bytes += bytes_tx;
578 
579 	return nb_tx;
580 }
581 
582 /*********************************************************************
583  *
584  *  TX prep functions
585  *
586  **********************************************************************/
587 
588 #define IONIC_TX_OFFLOAD_MASK (RTE_MBUF_F_TX_IPV4 |		\
589 	RTE_MBUF_F_TX_IPV6 |		\
590 	RTE_MBUF_F_TX_VLAN |		\
591 	RTE_MBUF_F_TX_IP_CKSUM |	\
592 	RTE_MBUF_F_TX_TCP_SEG |	\
593 	RTE_MBUF_F_TX_L4_MASK)
594 
595 #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
596 	(RTE_MBUF_F_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
597 
598 uint16_t
599 ionic_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
600 {
601 	struct ionic_tx_qcq *txq = tx_queue;
602 	struct rte_mbuf *txm;
603 	uint64_t offloads;
604 	int i = 0;
605 
606 	for (i = 0; i < nb_pkts; i++) {
607 		txm = tx_pkts[i];
608 
609 		if (txm->nb_segs > txq->num_segs_fw) {
610 			rte_errno = -EINVAL;
611 			break;
612 		}
613 
614 		offloads = txm->ol_flags;
615 
616 		if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
617 			rte_errno = -ENOTSUP;
618 			break;
619 		}
620 	}
621 
622 	return i;
623 }
624 
625 /*********************************************************************
626  *
627  *  RX functions
628  *
629  **********************************************************************/
630 
631 static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
632 		struct rte_mbuf *mbuf);
633 
634 void
635 ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
636 		struct rte_eth_rxq_info *qinfo)
637 {
638 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[queue_id];
639 	struct ionic_queue *q = &rxq->qcq.q;
640 
641 	qinfo->mp = rxq->mb_pool;
642 	qinfo->scattered_rx = dev->data->scattered_rx;
643 	qinfo->nb_desc = q->num_descs;
644 	qinfo->conf.rx_deferred_start = rxq->flags & IONIC_QCQ_F_DEFERRED;
645 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
646 }
647 
648 static void __rte_cold
649 ionic_rx_empty(struct ionic_rx_qcq *rxq)
650 {
651 	struct ionic_queue *q = &rxq->qcq.q;
652 	struct rte_mbuf *mbuf;
653 	void **info;
654 
655 	while (q->tail_idx != q->head_idx) {
656 		info = IONIC_INFO_PTR(q, q->tail_idx);
657 		mbuf = info[0];
658 		rte_mempool_put(rxq->mb_pool, mbuf);
659 
660 		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
661 	}
662 }
663 
664 void __rte_cold
665 ionic_dev_rx_queue_release(struct rte_eth_dev *dev, uint16_t qid)
666 {
667 	struct ionic_rx_qcq *rxq = dev->data->rx_queues[qid];
668 	struct ionic_rx_stats *stats;
669 
670 	if (!rxq)
671 		return;
672 
673 	IONIC_PRINT_CALL();
674 
675 	stats = &rxq->stats;
676 
677 	IONIC_PRINT(DEBUG, "RX queue %u pkts %ju mtod %ju",
678 		rxq->qcq.q.index, stats->packets, stats->mtods);
679 
680 	ionic_rx_empty(rxq);
681 
682 	ionic_lif_rxq_deinit(rxq);
683 
684 	ionic_qcq_free(&rxq->qcq);
685 }
686 
687 int __rte_cold
688 ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
689 		uint16_t rx_queue_id,
690 		uint16_t nb_desc,
691 		uint32_t socket_id,
692 		const struct rte_eth_rxconf *rx_conf,
693 		struct rte_mempool *mp)
694 {
695 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
696 	struct ionic_rx_qcq *rxq;
697 	uint64_t offloads;
698 	int err;
699 
700 	if (rx_queue_id >= lif->nrxqcqs) {
701 		IONIC_PRINT(ERR,
702 			"Queue index %u not available (max %u queues)",
703 			rx_queue_id, lif->nrxqcqs);
704 		return -EINVAL;
705 	}
706 
707 	offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
708 	IONIC_PRINT(DEBUG,
709 		"Configuring skt %u RX queue %u with %u buffers, offloads %jx",
710 		socket_id, rx_queue_id, nb_desc, offloads);
711 
712 	if (!rx_conf->rx_drop_en)
713 		IONIC_PRINT(WARNING, "No-drop mode is not supported");
714 
715 	/* Validate number of receive descriptors */
716 	if (!rte_is_power_of_2(nb_desc) ||
717 			nb_desc < IONIC_MIN_RING_DESC ||
718 			nb_desc > IONIC_MAX_RING_DESC) {
719 		IONIC_PRINT(ERR,
720 			"Bad descriptor count (%u) for queue %u (min: %u)",
721 			nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
722 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
723 	}
724 
725 	/* Free memory prior to re-allocation if needed... */
726 	if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
727 		ionic_dev_rx_queue_release(eth_dev, rx_queue_id);
728 		eth_dev->data->rx_queues[rx_queue_id] = NULL;
729 	}
730 
731 	eth_dev->data->rx_queue_state[rx_queue_id] =
732 		RTE_ETH_QUEUE_STATE_STOPPED;
733 
734 	err = ionic_rx_qcq_alloc(lif, socket_id, rx_queue_id, nb_desc,
735 			&rxq);
736 	if (err) {
737 		IONIC_PRINT(ERR, "Queue %d allocation failure", rx_queue_id);
738 		return -EINVAL;
739 	}
740 
741 	rxq->mb_pool = mp;
742 
743 	/*
744 	 * Note: the interface does not currently support
745 	 * RTE_ETH_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
746 	 * when the adapter will be able to keep the CRC and subtract
747 	 * it to the length for all received packets:
748 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
749 	 *     RTE_ETH_RX_OFFLOAD_KEEP_CRC)
750 	 *   rxq->crc_len = ETHER_CRC_LEN;
751 	 */
752 
753 	/* Do not start queue with rte_eth_dev_start() */
754 	if (rx_conf->rx_deferred_start)
755 		rxq->flags |= IONIC_QCQ_F_DEFERRED;
756 
757 	eth_dev->data->rx_queues[rx_queue_id] = rxq;
758 
759 	return 0;
760 }
761 
762 static __rte_always_inline void
763 ionic_rx_clean(struct ionic_rx_qcq *rxq,
764 		uint32_t q_desc_index, uint32_t cq_desc_index,
765 		void *service_cb_arg)
766 {
767 	struct ionic_queue *q = &rxq->qcq.q;
768 	struct ionic_cq *cq = &rxq->qcq.cq;
769 	struct ionic_rxq_comp *cq_desc_base = cq->base;
770 	struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
771 	struct rte_mbuf *rxm, *rxm_seg;
772 	uint32_t max_frame_size =
773 		rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
774 	uint64_t pkt_flags = 0;
775 	uint32_t pkt_type;
776 	struct ionic_rx_stats *stats = &rxq->stats;
777 	struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
778 		service_cb_arg;
779 	uint32_t buf_size = (uint16_t)
780 		(rte_pktmbuf_data_room_size(rxq->mb_pool) -
781 		RTE_PKTMBUF_HEADROOM);
782 	uint32_t left;
783 	void **info;
784 
785 	assert(q_desc_index == cq_desc->comp_index);
786 
787 	info = IONIC_INFO_PTR(q, cq_desc->comp_index);
788 
789 	rxm = info[0];
790 
791 	if (!recv_args) {
792 		stats->no_cb_arg++;
793 		/* Flush */
794 		rte_pktmbuf_free(rxm);
795 		/*
796 		 * Note: rte_mempool_put is faster with no segs
797 		 * rte_mempool_put(rxq->mb_pool, rxm);
798 		 */
799 		return;
800 	}
801 
802 	if (cq_desc->status) {
803 		stats->bad_cq_status++;
804 		ionic_rx_recycle(q, q_desc_index, rxm);
805 		return;
806 	}
807 
808 	if (recv_args->nb_rx >= recv_args->nb_pkts) {
809 		stats->no_room++;
810 		ionic_rx_recycle(q, q_desc_index, rxm);
811 		return;
812 	}
813 
814 	if (cq_desc->len > max_frame_size ||
815 			cq_desc->len == 0) {
816 		stats->bad_len++;
817 		ionic_rx_recycle(q, q_desc_index, rxm);
818 		return;
819 	}
820 
821 	rxm->data_off = RTE_PKTMBUF_HEADROOM;
822 	rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
823 	rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
824 	rxm->pkt_len = cq_desc->len;
825 	rxm->port = rxq->qcq.lif->port_id;
826 
827 	left = cq_desc->len;
828 
829 	rxm->data_len = RTE_MIN(buf_size, left);
830 	left -= rxm->data_len;
831 
832 	rxm_seg = rxm->next;
833 	while (rxm_seg && left) {
834 		rxm_seg->data_len = RTE_MIN(buf_size, left);
835 		left -= rxm_seg->data_len;
836 
837 		rxm_seg = rxm_seg->next;
838 		rxm->nb_segs++;
839 	}
840 
841 	/* RSS */
842 	pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
843 	rxm->hash.rss = cq_desc->rss_hash;
844 
845 	/* Vlan Strip */
846 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
847 		pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
848 		rxm->vlan_tci = cq_desc->vlan_tci;
849 	}
850 
851 	/* Checksum */
852 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
853 		if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
854 			pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
855 		else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
856 			pkt_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
857 
858 		if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
859 			(cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
860 			pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
861 		else if ((cq_desc->csum_flags &
862 				IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
863 				(cq_desc->csum_flags &
864 				IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
865 			pkt_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
866 	}
867 
868 	rxm->ol_flags = pkt_flags;
869 
870 	/* Packet Type */
871 	switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
872 	case IONIC_PKT_TYPE_IPV4:
873 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
874 		break;
875 	case IONIC_PKT_TYPE_IPV6:
876 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
877 		break;
878 	case IONIC_PKT_TYPE_IPV4_TCP:
879 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
880 			RTE_PTYPE_L4_TCP;
881 		break;
882 	case IONIC_PKT_TYPE_IPV6_TCP:
883 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
884 			RTE_PTYPE_L4_TCP;
885 		break;
886 	case IONIC_PKT_TYPE_IPV4_UDP:
887 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
888 			RTE_PTYPE_L4_UDP;
889 		break;
890 	case IONIC_PKT_TYPE_IPV6_UDP:
891 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
892 			RTE_PTYPE_L4_UDP;
893 		break;
894 	default:
895 		{
896 			struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
897 				struct rte_ether_hdr *);
898 			uint16_t ether_type = eth_h->ether_type;
899 			if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
900 				pkt_type = RTE_PTYPE_L2_ETHER_ARP;
901 			else
902 				pkt_type = RTE_PTYPE_UNKNOWN;
903 			stats->mtods++;
904 			break;
905 		}
906 	}
907 
908 	rxm->packet_type = pkt_type;
909 
910 	recv_args->rx_pkts[recv_args->nb_rx] = rxm;
911 	recv_args->nb_rx++;
912 
913 	stats->packets++;
914 	stats->bytes += rxm->pkt_len;
915 }
916 
917 static void
918 ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
919 		 struct rte_mbuf *mbuf)
920 {
921 	struct ionic_rxq_desc *desc_base = q->base;
922 	struct ionic_rxq_desc *old = &desc_base[q_desc_index];
923 	struct ionic_rxq_desc *new = &desc_base[q->head_idx];
924 
925 	new->addr = old->addr;
926 	new->len = old->len;
927 
928 	q->info[q->head_idx] = mbuf;
929 
930 	q->head_idx = Q_NEXT_TO_POST(q, 1);
931 
932 	ionic_q_flush(q);
933 }
934 
935 static __rte_always_inline int
936 ionic_rx_fill(struct ionic_rx_qcq *rxq, uint32_t len)
937 {
938 	struct ionic_queue *q = &rxq->qcq.q;
939 	struct ionic_rxq_desc *desc, *desc_base = q->base;
940 	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
941 	struct ionic_rxq_sg_elem *elem;
942 	void **info;
943 	rte_iova_t dma_addr;
944 	uint32_t i, j, nsegs, buf_size, size;
945 
946 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
947 		RTE_PKTMBUF_HEADROOM);
948 
949 	/* Initialize software ring entries */
950 	for (i = ionic_q_space_avail(q); i; i--) {
951 		struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
952 		struct rte_mbuf *prev_rxm_seg;
953 
954 		if (rxm == NULL) {
955 			IONIC_PRINT(ERR, "RX mbuf alloc failed");
956 			return -ENOMEM;
957 		}
958 
959 		info = IONIC_INFO_PTR(q, q->head_idx);
960 
961 		nsegs = (len + buf_size - 1) / buf_size;
962 
963 		desc = &desc_base[q->head_idx];
964 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
965 		desc->addr = dma_addr;
966 		desc->len = buf_size;
967 		size = buf_size;
968 		desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
969 			IONIC_RXQ_DESC_OPCODE_SIMPLE;
970 		rxm->next = NULL;
971 
972 		prev_rxm_seg = rxm;
973 		sg_desc = &sg_desc_base[q->head_idx];
974 		elem = sg_desc->elems;
975 		for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
976 			struct rte_mbuf *rxm_seg;
977 			rte_iova_t data_iova;
978 
979 			rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
980 			if (rxm_seg == NULL) {
981 				IONIC_PRINT(ERR, "RX mbuf alloc failed");
982 				return -ENOMEM;
983 			}
984 
985 			data_iova = rte_mbuf_data_iova(rxm_seg);
986 			dma_addr = rte_cpu_to_le_64(data_iova);
987 			elem->addr = dma_addr;
988 			elem->len = buf_size;
989 			size += buf_size;
990 			elem++;
991 			rxm_seg->next = NULL;
992 			prev_rxm_seg->next = rxm_seg;
993 			prev_rxm_seg = rxm_seg;
994 		}
995 
996 		if (size < len)
997 			IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
998 				size, len);
999 
1000 		info[0] = rxm;
1001 
1002 		q->head_idx = Q_NEXT_TO_POST(q, 1);
1003 	}
1004 
1005 	ionic_q_flush(q);
1006 
1007 	return 0;
1008 }
1009 
1010 /*
1011  * Start Receive Units for specified queue.
1012  */
1013 int __rte_cold
1014 ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1015 {
1016 	uint32_t frame_size = eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
1017 	uint8_t *rx_queue_state = eth_dev->data->rx_queue_state;
1018 	struct ionic_rx_qcq *rxq;
1019 	int err;
1020 
1021 	if (rx_queue_state[rx_queue_id] == RTE_ETH_QUEUE_STATE_STARTED) {
1022 		IONIC_PRINT(DEBUG, "RX queue %u already started",
1023 			rx_queue_id);
1024 		return 0;
1025 	}
1026 
1027 	rxq = eth_dev->data->rx_queues[rx_queue_id];
1028 
1029 	IONIC_PRINT(DEBUG, "Starting RX queue %u, %u descs (size: %u)",
1030 		rx_queue_id, rxq->qcq.q.num_descs, frame_size);
1031 
1032 	if (!(rxq->flags & IONIC_QCQ_F_INITED)) {
1033 		err = ionic_lif_rxq_init(rxq);
1034 		if (err)
1035 			return err;
1036 	} else {
1037 		ionic_qcq_enable(&rxq->qcq);
1038 	}
1039 
1040 	/* Allocate buffers for descriptor rings */
1041 	if (ionic_rx_fill(rxq, frame_size) != 0) {
1042 		IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
1043 			rx_queue_id);
1044 		return -1;
1045 	}
1046 
1047 	rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
1048 
1049 	return 0;
1050 }
1051 
1052 static __rte_always_inline void
1053 ionic_rxq_service(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
1054 		void *service_cb_arg)
1055 {
1056 	struct ionic_cq *cq = &rxq->qcq.cq;
1057 	struct ionic_queue *q = &rxq->qcq.q;
1058 	struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
1059 	bool more;
1060 	uint32_t curr_q_tail_idx, curr_cq_tail_idx;
1061 	uint32_t work_done = 0;
1062 
1063 	if (work_to_do == 0)
1064 		return;
1065 
1066 	cq_desc = &cq_desc_base[cq->tail_idx];
1067 	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
1068 		curr_cq_tail_idx = cq->tail_idx;
1069 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
1070 
1071 		if (cq->tail_idx == 0)
1072 			cq->done_color = !cq->done_color;
1073 
1074 		/* Prefetch the next 4 descriptors */
1075 		if ((cq->tail_idx & 0x3) == 0)
1076 			rte_prefetch0(&cq_desc_base[cq->tail_idx]);
1077 
1078 		do {
1079 			more = (q->tail_idx != cq_desc->comp_index);
1080 
1081 			curr_q_tail_idx = q->tail_idx;
1082 			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
1083 
1084 			/* Prefetch the next 4 descriptors */
1085 			if ((q->tail_idx & 0x3) == 0)
1086 				/* q desc info */
1087 				rte_prefetch0(&q->info[q->tail_idx]);
1088 
1089 			ionic_rx_clean(rxq, curr_q_tail_idx, curr_cq_tail_idx,
1090 				service_cb_arg);
1091 
1092 		} while (more);
1093 
1094 		if (++work_done == work_to_do)
1095 			break;
1096 
1097 		cq_desc = &cq_desc_base[cq->tail_idx];
1098 	}
1099 }
1100 
1101 /*
1102  * Stop Receive Units for specified queue.
1103  */
1104 int __rte_cold
1105 ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
1106 {
1107 	struct ionic_rx_qcq *rxq;
1108 
1109 	IONIC_PRINT(DEBUG, "Stopping RX queue %u", rx_queue_id);
1110 
1111 	rxq = eth_dev->data->rx_queues[rx_queue_id];
1112 
1113 	eth_dev->data->rx_queue_state[rx_queue_id] =
1114 		RTE_ETH_QUEUE_STATE_STOPPED;
1115 
1116 	ionic_qcq_disable(&rxq->qcq);
1117 
1118 	/* Flush */
1119 	ionic_rxq_service(rxq, -1, NULL);
1120 
1121 	return 0;
1122 }
1123 
1124 uint16_t
1125 ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
1126 		uint16_t nb_pkts)
1127 {
1128 	struct ionic_rx_qcq *rxq = rx_queue;
1129 	uint32_t frame_size =
1130 		rxq->qcq.lif->eth_dev->data->mtu + RTE_ETHER_HDR_LEN;
1131 	struct ionic_rx_service service_cb_arg;
1132 
1133 	service_cb_arg.rx_pkts = rx_pkts;
1134 	service_cb_arg.nb_pkts = nb_pkts;
1135 	service_cb_arg.nb_rx = 0;
1136 
1137 	ionic_rxq_service(rxq, nb_pkts, &service_cb_arg);
1138 
1139 	ionic_rx_fill(rxq, frame_size);
1140 
1141 	return service_cb_arg.nb_rx;
1142 }
1143