xref: /dpdk/drivers/net/ionic/ionic_rxtx.c (revision a27d901331da7a0d6959cb2b3a90a017f2463103)
1*a27d9013SAlfredo Cardigliano /* SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0)
2*a27d9013SAlfredo Cardigliano  * Copyright(c) 2018-2019 Pensando Systems, Inc. All rights reserved.
3*a27d9013SAlfredo Cardigliano  */
4*a27d9013SAlfredo Cardigliano 
5*a27d9013SAlfredo Cardigliano #include <sys/queue.h>
6*a27d9013SAlfredo Cardigliano #include <stdio.h>
7*a27d9013SAlfredo Cardigliano #include <stdlib.h>
8*a27d9013SAlfredo Cardigliano #include <string.h>
9*a27d9013SAlfredo Cardigliano #include <errno.h>
10*a27d9013SAlfredo Cardigliano #include <stdint.h>
11*a27d9013SAlfredo Cardigliano #include <stdarg.h>
12*a27d9013SAlfredo Cardigliano #include <unistd.h>
13*a27d9013SAlfredo Cardigliano #include <inttypes.h>
14*a27d9013SAlfredo Cardigliano 
15*a27d9013SAlfredo Cardigliano #include <rte_byteorder.h>
16*a27d9013SAlfredo Cardigliano #include <rte_common.h>
17*a27d9013SAlfredo Cardigliano #include <rte_cycles.h>
18*a27d9013SAlfredo Cardigliano #include <rte_log.h>
19*a27d9013SAlfredo Cardigliano #include <rte_debug.h>
20*a27d9013SAlfredo Cardigliano #include <rte_interrupts.h>
21*a27d9013SAlfredo Cardigliano #include <rte_pci.h>
22*a27d9013SAlfredo Cardigliano #include <rte_memory.h>
23*a27d9013SAlfredo Cardigliano #include <rte_memzone.h>
24*a27d9013SAlfredo Cardigliano #include <rte_launch.h>
25*a27d9013SAlfredo Cardigliano #include <rte_eal.h>
26*a27d9013SAlfredo Cardigliano #include <rte_per_lcore.h>
27*a27d9013SAlfredo Cardigliano #include <rte_lcore.h>
28*a27d9013SAlfredo Cardigliano #include <rte_atomic.h>
29*a27d9013SAlfredo Cardigliano #include <rte_branch_prediction.h>
30*a27d9013SAlfredo Cardigliano #include <rte_mempool.h>
31*a27d9013SAlfredo Cardigliano #include <rte_malloc.h>
32*a27d9013SAlfredo Cardigliano #include <rte_mbuf.h>
33*a27d9013SAlfredo Cardigliano #include <rte_ether.h>
34*a27d9013SAlfredo Cardigliano #include <rte_ethdev_driver.h>
35*a27d9013SAlfredo Cardigliano #include <rte_prefetch.h>
36*a27d9013SAlfredo Cardigliano #include <rte_udp.h>
37*a27d9013SAlfredo Cardigliano #include <rte_tcp.h>
38*a27d9013SAlfredo Cardigliano #include <rte_sctp.h>
39*a27d9013SAlfredo Cardigliano #include <rte_string_fns.h>
40*a27d9013SAlfredo Cardigliano #include <rte_errno.h>
41*a27d9013SAlfredo Cardigliano #include <rte_ip.h>
42*a27d9013SAlfredo Cardigliano #include <rte_net.h>
43*a27d9013SAlfredo Cardigliano 
44*a27d9013SAlfredo Cardigliano #include "ionic_logs.h"
45*a27d9013SAlfredo Cardigliano #include "ionic_mac_api.h"
46*a27d9013SAlfredo Cardigliano #include "ionic_ethdev.h"
47*a27d9013SAlfredo Cardigliano #include "ionic_lif.h"
48*a27d9013SAlfredo Cardigliano #include "ionic_rxtx.h"
49*a27d9013SAlfredo Cardigliano 
50*a27d9013SAlfredo Cardigliano #define IONIC_RX_RING_DOORBELL_STRIDE		(32 - 1)
51*a27d9013SAlfredo Cardigliano 
52*a27d9013SAlfredo Cardigliano /*********************************************************************
53*a27d9013SAlfredo Cardigliano  *
54*a27d9013SAlfredo Cardigliano  *  TX functions
55*a27d9013SAlfredo Cardigliano  *
56*a27d9013SAlfredo Cardigliano  **********************************************************************/
57*a27d9013SAlfredo Cardigliano 
58*a27d9013SAlfredo Cardigliano void
59*a27d9013SAlfredo Cardigliano ionic_txq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
60*a27d9013SAlfredo Cardigliano 		struct rte_eth_txq_info *qinfo)
61*a27d9013SAlfredo Cardigliano {
62*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *txq = dev->data->tx_queues[queue_id];
63*a27d9013SAlfredo Cardigliano 	struct ionic_queue *q = &txq->q;
64*a27d9013SAlfredo Cardigliano 
65*a27d9013SAlfredo Cardigliano 	qinfo->nb_desc = q->num_descs;
66*a27d9013SAlfredo Cardigliano 	qinfo->conf.offloads = txq->offloads;
67*a27d9013SAlfredo Cardigliano 	qinfo->conf.tx_deferred_start = txq->deferred_start;
68*a27d9013SAlfredo Cardigliano }
69*a27d9013SAlfredo Cardigliano 
70*a27d9013SAlfredo Cardigliano static inline void __attribute__((cold))
71*a27d9013SAlfredo Cardigliano ionic_tx_flush(struct ionic_cq *cq)
72*a27d9013SAlfredo Cardigliano {
73*a27d9013SAlfredo Cardigliano 	struct ionic_queue *q = cq->bound_q;
74*a27d9013SAlfredo Cardigliano 	struct ionic_desc_info *q_desc_info;
75*a27d9013SAlfredo Cardigliano 	struct rte_mbuf *txm, *next;
76*a27d9013SAlfredo Cardigliano 	struct ionic_txq_comp *cq_desc_base = cq->base;
77*a27d9013SAlfredo Cardigliano 	struct ionic_txq_comp *cq_desc;
78*a27d9013SAlfredo Cardigliano 	u_int32_t comp_index = (u_int32_t)-1;
79*a27d9013SAlfredo Cardigliano 
80*a27d9013SAlfredo Cardigliano 	cq_desc = &cq_desc_base[cq->tail_idx];
81*a27d9013SAlfredo Cardigliano 	while (color_match(cq_desc->color, cq->done_color)) {
82*a27d9013SAlfredo Cardigliano 		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
83*a27d9013SAlfredo Cardigliano 
84*a27d9013SAlfredo Cardigliano 		/* Prefetch the next 4 descriptors (not really useful here) */
85*a27d9013SAlfredo Cardigliano 		if ((cq->tail_idx & 0x3) == 0)
86*a27d9013SAlfredo Cardigliano 			rte_prefetch0(&cq_desc_base[cq->tail_idx]);
87*a27d9013SAlfredo Cardigliano 
88*a27d9013SAlfredo Cardigliano 		if (cq->tail_idx == 0)
89*a27d9013SAlfredo Cardigliano 			cq->done_color = !cq->done_color;
90*a27d9013SAlfredo Cardigliano 
91*a27d9013SAlfredo Cardigliano 		comp_index = cq_desc->comp_index;
92*a27d9013SAlfredo Cardigliano 
93*a27d9013SAlfredo Cardigliano 		cq_desc = &cq_desc_base[cq->tail_idx];
94*a27d9013SAlfredo Cardigliano 	}
95*a27d9013SAlfredo Cardigliano 
96*a27d9013SAlfredo Cardigliano 	if (comp_index != (u_int32_t)-1) {
97*a27d9013SAlfredo Cardigliano 		while (q->tail_idx != comp_index) {
98*a27d9013SAlfredo Cardigliano 			q_desc_info = &q->info[q->tail_idx];
99*a27d9013SAlfredo Cardigliano 
100*a27d9013SAlfredo Cardigliano 			q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
101*a27d9013SAlfredo Cardigliano 
102*a27d9013SAlfredo Cardigliano 			/* Prefetch the next 4 descriptors */
103*a27d9013SAlfredo Cardigliano 			if ((q->tail_idx & 0x3) == 0)
104*a27d9013SAlfredo Cardigliano 				/* q desc info */
105*a27d9013SAlfredo Cardigliano 				rte_prefetch0(&q->info[q->tail_idx]);
106*a27d9013SAlfredo Cardigliano 
107*a27d9013SAlfredo Cardigliano 			/*
108*a27d9013SAlfredo Cardigliano 			 * Note: you can just use rte_pktmbuf_free,
109*a27d9013SAlfredo Cardigliano 			 * but this loop is faster
110*a27d9013SAlfredo Cardigliano 			 */
111*a27d9013SAlfredo Cardigliano 			txm = q_desc_info->cb_arg;
112*a27d9013SAlfredo Cardigliano 			while (txm != NULL) {
113*a27d9013SAlfredo Cardigliano 				next = txm->next;
114*a27d9013SAlfredo Cardigliano 				rte_pktmbuf_free_seg(txm);
115*a27d9013SAlfredo Cardigliano 				txm = next;
116*a27d9013SAlfredo Cardigliano 			}
117*a27d9013SAlfredo Cardigliano 		}
118*a27d9013SAlfredo Cardigliano 	}
119*a27d9013SAlfredo Cardigliano }
120*a27d9013SAlfredo Cardigliano 
121*a27d9013SAlfredo Cardigliano void __attribute__((cold))
122*a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_release(void *tx_queue)
123*a27d9013SAlfredo Cardigliano {
124*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
125*a27d9013SAlfredo Cardigliano 
126*a27d9013SAlfredo Cardigliano 	IONIC_PRINT_CALL();
127*a27d9013SAlfredo Cardigliano 
128*a27d9013SAlfredo Cardigliano 	ionic_qcq_free(txq);
129*a27d9013SAlfredo Cardigliano }
130*a27d9013SAlfredo Cardigliano 
131*a27d9013SAlfredo Cardigliano int __attribute__((cold))
132*a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
133*a27d9013SAlfredo Cardigliano {
134*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *txq;
135*a27d9013SAlfredo Cardigliano 
136*a27d9013SAlfredo Cardigliano 	IONIC_PRINT_CALL();
137*a27d9013SAlfredo Cardigliano 
138*a27d9013SAlfredo Cardigliano 	txq = eth_dev->data->tx_queues[tx_queue_id];
139*a27d9013SAlfredo Cardigliano 
140*a27d9013SAlfredo Cardigliano 	/*
141*a27d9013SAlfredo Cardigliano 	 * Note: we should better post NOP Tx desc and wait for its completion
142*a27d9013SAlfredo Cardigliano 	 * before disabling Tx queue
143*a27d9013SAlfredo Cardigliano 	 */
144*a27d9013SAlfredo Cardigliano 
145*a27d9013SAlfredo Cardigliano 	ionic_qcq_disable(txq);
146*a27d9013SAlfredo Cardigliano 
147*a27d9013SAlfredo Cardigliano 	ionic_tx_flush(&txq->cq);
148*a27d9013SAlfredo Cardigliano 
149*a27d9013SAlfredo Cardigliano 	ionic_lif_txq_deinit(txq);
150*a27d9013SAlfredo Cardigliano 
151*a27d9013SAlfredo Cardigliano 	eth_dev->data->tx_queue_state[tx_queue_id] =
152*a27d9013SAlfredo Cardigliano 		RTE_ETH_QUEUE_STATE_STOPPED;
153*a27d9013SAlfredo Cardigliano 
154*a27d9013SAlfredo Cardigliano 	return 0;
155*a27d9013SAlfredo Cardigliano }
156*a27d9013SAlfredo Cardigliano 
157*a27d9013SAlfredo Cardigliano int __attribute__((cold))
158*a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_setup(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id,
159*a27d9013SAlfredo Cardigliano 		uint16_t nb_desc, uint32_t socket_id __rte_unused,
160*a27d9013SAlfredo Cardigliano 		const struct rte_eth_txconf *tx_conf)
161*a27d9013SAlfredo Cardigliano {
162*a27d9013SAlfredo Cardigliano 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
163*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *txq;
164*a27d9013SAlfredo Cardigliano 	uint64_t offloads;
165*a27d9013SAlfredo Cardigliano 	int err;
166*a27d9013SAlfredo Cardigliano 
167*a27d9013SAlfredo Cardigliano 	IONIC_PRINT_CALL();
168*a27d9013SAlfredo Cardigliano 
169*a27d9013SAlfredo Cardigliano 	IONIC_PRINT(DEBUG, "Configuring TX queue %u with %u buffers",
170*a27d9013SAlfredo Cardigliano 		tx_queue_id, nb_desc);
171*a27d9013SAlfredo Cardigliano 
172*a27d9013SAlfredo Cardigliano 	if (tx_queue_id >= lif->ntxqcqs) {
173*a27d9013SAlfredo Cardigliano 		IONIC_PRINT(DEBUG, "Queue index %u not available "
174*a27d9013SAlfredo Cardigliano 			"(max %u queues)",
175*a27d9013SAlfredo Cardigliano 			tx_queue_id, lif->ntxqcqs);
176*a27d9013SAlfredo Cardigliano 		return -EINVAL;
177*a27d9013SAlfredo Cardigliano 	}
178*a27d9013SAlfredo Cardigliano 
179*a27d9013SAlfredo Cardigliano 	offloads = tx_conf->offloads | eth_dev->data->dev_conf.txmode.offloads;
180*a27d9013SAlfredo Cardigliano 
181*a27d9013SAlfredo Cardigliano 	/* Validate number of receive descriptors */
182*a27d9013SAlfredo Cardigliano 	if (!rte_is_power_of_2(nb_desc) || nb_desc < IONIC_MIN_RING_DESC)
183*a27d9013SAlfredo Cardigliano 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
184*a27d9013SAlfredo Cardigliano 
185*a27d9013SAlfredo Cardigliano 	/* Free memory prior to re-allocation if needed... */
186*a27d9013SAlfredo Cardigliano 	if (eth_dev->data->tx_queues[tx_queue_id] != NULL) {
187*a27d9013SAlfredo Cardigliano 		void *tx_queue = eth_dev->data->tx_queues[tx_queue_id];
188*a27d9013SAlfredo Cardigliano 		ionic_dev_tx_queue_release(tx_queue);
189*a27d9013SAlfredo Cardigliano 		eth_dev->data->tx_queues[tx_queue_id] = NULL;
190*a27d9013SAlfredo Cardigliano 	}
191*a27d9013SAlfredo Cardigliano 
192*a27d9013SAlfredo Cardigliano 	err = ionic_tx_qcq_alloc(lif, tx_queue_id, nb_desc, &txq);
193*a27d9013SAlfredo Cardigliano 	if (err) {
194*a27d9013SAlfredo Cardigliano 		IONIC_PRINT(DEBUG, "Queue allocation failure");
195*a27d9013SAlfredo Cardigliano 		return -EINVAL;
196*a27d9013SAlfredo Cardigliano 	}
197*a27d9013SAlfredo Cardigliano 
198*a27d9013SAlfredo Cardigliano 	/* Do not start queue with rte_eth_dev_start() */
199*a27d9013SAlfredo Cardigliano 	txq->deferred_start = tx_conf->tx_deferred_start;
200*a27d9013SAlfredo Cardigliano 
201*a27d9013SAlfredo Cardigliano 	txq->offloads = offloads;
202*a27d9013SAlfredo Cardigliano 
203*a27d9013SAlfredo Cardigliano 	eth_dev->data->tx_queues[tx_queue_id] = txq;
204*a27d9013SAlfredo Cardigliano 
205*a27d9013SAlfredo Cardigliano 	return 0;
206*a27d9013SAlfredo Cardigliano }
207*a27d9013SAlfredo Cardigliano 
208*a27d9013SAlfredo Cardigliano /*
209*a27d9013SAlfredo Cardigliano  * Start Transmit Units for specified queue.
210*a27d9013SAlfredo Cardigliano  */
211*a27d9013SAlfredo Cardigliano int __attribute__((cold))
212*a27d9013SAlfredo Cardigliano ionic_dev_tx_queue_start(struct rte_eth_dev *eth_dev, uint16_t tx_queue_id)
213*a27d9013SAlfredo Cardigliano {
214*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *txq;
215*a27d9013SAlfredo Cardigliano 	int err;
216*a27d9013SAlfredo Cardigliano 
217*a27d9013SAlfredo Cardigliano 	IONIC_PRINT_CALL();
218*a27d9013SAlfredo Cardigliano 
219*a27d9013SAlfredo Cardigliano 	txq = eth_dev->data->tx_queues[tx_queue_id];
220*a27d9013SAlfredo Cardigliano 
221*a27d9013SAlfredo Cardigliano 	err = ionic_lif_txq_init(txq);
222*a27d9013SAlfredo Cardigliano 	if (err)
223*a27d9013SAlfredo Cardigliano 		return err;
224*a27d9013SAlfredo Cardigliano 
225*a27d9013SAlfredo Cardigliano 	ionic_qcq_enable(txq);
226*a27d9013SAlfredo Cardigliano 
227*a27d9013SAlfredo Cardigliano 	eth_dev->data->tx_queue_state[tx_queue_id] =
228*a27d9013SAlfredo Cardigliano 		RTE_ETH_QUEUE_STATE_STARTED;
229*a27d9013SAlfredo Cardigliano 
230*a27d9013SAlfredo Cardigliano 	return 0;
231*a27d9013SAlfredo Cardigliano }
232*a27d9013SAlfredo Cardigliano 
233*a27d9013SAlfredo Cardigliano static void
234*a27d9013SAlfredo Cardigliano ionic_tx_tso_post(struct ionic_queue *q, struct ionic_txq_desc *desc,
235*a27d9013SAlfredo Cardigliano 		struct rte_mbuf *txm,
236*a27d9013SAlfredo Cardigliano 		rte_iova_t addr, uint8_t nsge, uint16_t len,
237*a27d9013SAlfredo Cardigliano 		uint32_t hdrlen, uint32_t mss,
238*a27d9013SAlfredo Cardigliano 		uint16_t vlan_tci, bool has_vlan,
239*a27d9013SAlfredo Cardigliano 		bool start, bool done)
240*a27d9013SAlfredo Cardigliano {
241*a27d9013SAlfredo Cardigliano 	uint8_t flags = 0;
242*a27d9013SAlfredo Cardigliano 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
243*a27d9013SAlfredo Cardigliano 	flags |= start ? IONIC_TXQ_DESC_FLAG_TSO_SOT : 0;
244*a27d9013SAlfredo Cardigliano 	flags |= done ? IONIC_TXQ_DESC_FLAG_TSO_EOT : 0;
245*a27d9013SAlfredo Cardigliano 
246*a27d9013SAlfredo Cardigliano 	desc->cmd = encode_txq_desc_cmd(IONIC_TXQ_DESC_OPCODE_TSO,
247*a27d9013SAlfredo Cardigliano 		flags, nsge, addr);
248*a27d9013SAlfredo Cardigliano 	desc->len = len;
249*a27d9013SAlfredo Cardigliano 	desc->vlan_tci = vlan_tci;
250*a27d9013SAlfredo Cardigliano 	desc->hdr_len = hdrlen;
251*a27d9013SAlfredo Cardigliano 	desc->mss = mss;
252*a27d9013SAlfredo Cardigliano 
253*a27d9013SAlfredo Cardigliano 	ionic_q_post(q, done, NULL, done ? txm : NULL);
254*a27d9013SAlfredo Cardigliano }
255*a27d9013SAlfredo Cardigliano 
256*a27d9013SAlfredo Cardigliano static struct ionic_txq_desc *
257*a27d9013SAlfredo Cardigliano ionic_tx_tso_next(struct ionic_queue *q, struct ionic_txq_sg_elem **elem)
258*a27d9013SAlfredo Cardigliano {
259*a27d9013SAlfredo Cardigliano 	struct ionic_txq_desc *desc_base = q->base;
260*a27d9013SAlfredo Cardigliano 	struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
261*a27d9013SAlfredo Cardigliano 	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
262*a27d9013SAlfredo Cardigliano 	struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
263*a27d9013SAlfredo Cardigliano 
264*a27d9013SAlfredo Cardigliano 	*elem = sg_desc->elems;
265*a27d9013SAlfredo Cardigliano 	return desc;
266*a27d9013SAlfredo Cardigliano }
267*a27d9013SAlfredo Cardigliano 
268*a27d9013SAlfredo Cardigliano static int
269*a27d9013SAlfredo Cardigliano ionic_tx_tso(struct ionic_queue *q, struct rte_mbuf *txm,
270*a27d9013SAlfredo Cardigliano 		uint64_t offloads __rte_unused, bool not_xmit_more)
271*a27d9013SAlfredo Cardigliano {
272*a27d9013SAlfredo Cardigliano 	struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
273*a27d9013SAlfredo Cardigliano 	struct ionic_txq_desc *desc;
274*a27d9013SAlfredo Cardigliano 	struct ionic_txq_sg_elem *elem;
275*a27d9013SAlfredo Cardigliano 	struct rte_mbuf *txm_seg;
276*a27d9013SAlfredo Cardigliano 	uint64_t desc_addr = 0;
277*a27d9013SAlfredo Cardigliano 	uint16_t desc_len = 0;
278*a27d9013SAlfredo Cardigliano 	uint8_t desc_nsge;
279*a27d9013SAlfredo Cardigliano 	uint32_t hdrlen;
280*a27d9013SAlfredo Cardigliano 	uint32_t mss = txm->tso_segsz;
281*a27d9013SAlfredo Cardigliano 	uint32_t frag_left = 0;
282*a27d9013SAlfredo Cardigliano 	uint32_t left;
283*a27d9013SAlfredo Cardigliano 	uint32_t seglen;
284*a27d9013SAlfredo Cardigliano 	uint32_t len;
285*a27d9013SAlfredo Cardigliano 	uint32_t offset = 0;
286*a27d9013SAlfredo Cardigliano 	bool start, done;
287*a27d9013SAlfredo Cardigliano 	bool has_vlan = !!(txm->ol_flags & PKT_TX_VLAN_PKT);
288*a27d9013SAlfredo Cardigliano 	uint16_t vlan_tci = txm->vlan_tci;
289*a27d9013SAlfredo Cardigliano 
290*a27d9013SAlfredo Cardigliano 	hdrlen = txm->l2_len + txm->l3_len;
291*a27d9013SAlfredo Cardigliano 
292*a27d9013SAlfredo Cardigliano 	seglen = hdrlen + mss;
293*a27d9013SAlfredo Cardigliano 	left = txm->data_len;
294*a27d9013SAlfredo Cardigliano 
295*a27d9013SAlfredo Cardigliano 	desc = ionic_tx_tso_next(q, &elem);
296*a27d9013SAlfredo Cardigliano 	start = true;
297*a27d9013SAlfredo Cardigliano 
298*a27d9013SAlfredo Cardigliano 	/* Chop data up into desc segments */
299*a27d9013SAlfredo Cardigliano 
300*a27d9013SAlfredo Cardigliano 	while (left > 0) {
301*a27d9013SAlfredo Cardigliano 		len = RTE_MIN(seglen, left);
302*a27d9013SAlfredo Cardigliano 		frag_left = seglen - len;
303*a27d9013SAlfredo Cardigliano 		desc_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
304*a27d9013SAlfredo Cardigliano 		desc_len = len;
305*a27d9013SAlfredo Cardigliano 		desc_nsge = 0;
306*a27d9013SAlfredo Cardigliano 		left -= len;
307*a27d9013SAlfredo Cardigliano 		offset += len;
308*a27d9013SAlfredo Cardigliano 		if (txm->nb_segs > 1 && frag_left > 0)
309*a27d9013SAlfredo Cardigliano 			continue;
310*a27d9013SAlfredo Cardigliano 		done = (txm->nb_segs == 1 && left == 0);
311*a27d9013SAlfredo Cardigliano 		ionic_tx_tso_post(q, desc, txm,
312*a27d9013SAlfredo Cardigliano 			desc_addr, desc_nsge, desc_len,
313*a27d9013SAlfredo Cardigliano 			hdrlen, mss,
314*a27d9013SAlfredo Cardigliano 			vlan_tci, has_vlan,
315*a27d9013SAlfredo Cardigliano 			start, done && not_xmit_more);
316*a27d9013SAlfredo Cardigliano 		desc = ionic_tx_tso_next(q, &elem);
317*a27d9013SAlfredo Cardigliano 		start = false;
318*a27d9013SAlfredo Cardigliano 		seglen = mss;
319*a27d9013SAlfredo Cardigliano 	}
320*a27d9013SAlfredo Cardigliano 
321*a27d9013SAlfredo Cardigliano 	/* Chop frags into desc segments */
322*a27d9013SAlfredo Cardigliano 
323*a27d9013SAlfredo Cardigliano 	txm_seg = txm->next;
324*a27d9013SAlfredo Cardigliano 	while (txm_seg != NULL) {
325*a27d9013SAlfredo Cardigliano 		offset = 0;
326*a27d9013SAlfredo Cardigliano 		left = txm_seg->data_len;
327*a27d9013SAlfredo Cardigliano 		stats->frags++;
328*a27d9013SAlfredo Cardigliano 
329*a27d9013SAlfredo Cardigliano 		while (left > 0) {
330*a27d9013SAlfredo Cardigliano 			rte_iova_t data_iova;
331*a27d9013SAlfredo Cardigliano 			data_iova = rte_mbuf_data_iova(txm_seg);
332*a27d9013SAlfredo Cardigliano 			elem->addr = rte_cpu_to_le_64(data_iova) + offset;
333*a27d9013SAlfredo Cardigliano 			if (frag_left > 0) {
334*a27d9013SAlfredo Cardigliano 				len = RTE_MIN(frag_left, left);
335*a27d9013SAlfredo Cardigliano 				frag_left -= len;
336*a27d9013SAlfredo Cardigliano 				elem->len = len;
337*a27d9013SAlfredo Cardigliano 				elem++;
338*a27d9013SAlfredo Cardigliano 				desc_nsge++;
339*a27d9013SAlfredo Cardigliano 			} else {
340*a27d9013SAlfredo Cardigliano 				len = RTE_MIN(mss, left);
341*a27d9013SAlfredo Cardigliano 				frag_left = mss - len;
342*a27d9013SAlfredo Cardigliano 				data_iova = rte_mbuf_data_iova(txm_seg);
343*a27d9013SAlfredo Cardigliano 				desc_addr = rte_cpu_to_le_64(data_iova);
344*a27d9013SAlfredo Cardigliano 				desc_len = len;
345*a27d9013SAlfredo Cardigliano 				desc_nsge = 0;
346*a27d9013SAlfredo Cardigliano 			}
347*a27d9013SAlfredo Cardigliano 			left -= len;
348*a27d9013SAlfredo Cardigliano 			offset += len;
349*a27d9013SAlfredo Cardigliano 			if (txm_seg->next != NULL && frag_left > 0)
350*a27d9013SAlfredo Cardigliano 				continue;
351*a27d9013SAlfredo Cardigliano 			done = (txm_seg->next == NULL && left == 0);
352*a27d9013SAlfredo Cardigliano 			ionic_tx_tso_post(q, desc, txm_seg,
353*a27d9013SAlfredo Cardigliano 				desc_addr, desc_nsge, desc_len,
354*a27d9013SAlfredo Cardigliano 				hdrlen, mss,
355*a27d9013SAlfredo Cardigliano 				vlan_tci, has_vlan,
356*a27d9013SAlfredo Cardigliano 				start, done && not_xmit_more);
357*a27d9013SAlfredo Cardigliano 			desc = ionic_tx_tso_next(q, &elem);
358*a27d9013SAlfredo Cardigliano 			start = false;
359*a27d9013SAlfredo Cardigliano 		}
360*a27d9013SAlfredo Cardigliano 
361*a27d9013SAlfredo Cardigliano 		txm_seg = txm_seg->next;
362*a27d9013SAlfredo Cardigliano 	}
363*a27d9013SAlfredo Cardigliano 
364*a27d9013SAlfredo Cardigliano 	stats->tso++;
365*a27d9013SAlfredo Cardigliano 
366*a27d9013SAlfredo Cardigliano 	return 0;
367*a27d9013SAlfredo Cardigliano }
368*a27d9013SAlfredo Cardigliano 
369*a27d9013SAlfredo Cardigliano static int
370*a27d9013SAlfredo Cardigliano ionic_tx(struct ionic_queue *q, struct rte_mbuf *txm,
371*a27d9013SAlfredo Cardigliano 		uint64_t offloads __rte_unused, bool not_xmit_more)
372*a27d9013SAlfredo Cardigliano {
373*a27d9013SAlfredo Cardigliano 	struct ionic_txq_desc *desc_base = q->base;
374*a27d9013SAlfredo Cardigliano 	struct ionic_txq_sg_desc *sg_desc_base = q->sg_base;
375*a27d9013SAlfredo Cardigliano 	struct ionic_txq_desc *desc = &desc_base[q->head_idx];
376*a27d9013SAlfredo Cardigliano 	struct ionic_txq_sg_desc *sg_desc = &sg_desc_base[q->head_idx];
377*a27d9013SAlfredo Cardigliano 	struct ionic_txq_sg_elem *elem = sg_desc->elems;
378*a27d9013SAlfredo Cardigliano 	struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
379*a27d9013SAlfredo Cardigliano 	struct rte_mbuf *txm_seg;
380*a27d9013SAlfredo Cardigliano 	bool has_vlan;
381*a27d9013SAlfredo Cardigliano 	uint64_t ol_flags = txm->ol_flags;
382*a27d9013SAlfredo Cardigliano 	uint64_t addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(txm));
383*a27d9013SAlfredo Cardigliano 	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
384*a27d9013SAlfredo Cardigliano 	uint8_t flags = 0;
385*a27d9013SAlfredo Cardigliano 
386*a27d9013SAlfredo Cardigliano 	has_vlan = (ol_flags & PKT_TX_VLAN_PKT);
387*a27d9013SAlfredo Cardigliano 
388*a27d9013SAlfredo Cardigliano 	flags |= has_vlan ? IONIC_TXQ_DESC_FLAG_VLAN : 0;
389*a27d9013SAlfredo Cardigliano 
390*a27d9013SAlfredo Cardigliano 	desc->cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
391*a27d9013SAlfredo Cardigliano 	desc->len = txm->data_len;
392*a27d9013SAlfredo Cardigliano 	desc->vlan_tci = txm->vlan_tci;
393*a27d9013SAlfredo Cardigliano 
394*a27d9013SAlfredo Cardigliano 	txm_seg = txm->next;
395*a27d9013SAlfredo Cardigliano 	while (txm_seg != NULL) {
396*a27d9013SAlfredo Cardigliano 		elem->len = txm_seg->data_len;
397*a27d9013SAlfredo Cardigliano 		elem->addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm_seg));
398*a27d9013SAlfredo Cardigliano 		stats->frags++;
399*a27d9013SAlfredo Cardigliano 		elem++;
400*a27d9013SAlfredo Cardigliano 		txm_seg = txm_seg->next;
401*a27d9013SAlfredo Cardigliano 	}
402*a27d9013SAlfredo Cardigliano 
403*a27d9013SAlfredo Cardigliano 	ionic_q_post(q, not_xmit_more, NULL, txm);
404*a27d9013SAlfredo Cardigliano 
405*a27d9013SAlfredo Cardigliano 	return 0;
406*a27d9013SAlfredo Cardigliano }
407*a27d9013SAlfredo Cardigliano 
408*a27d9013SAlfredo Cardigliano uint16_t
409*a27d9013SAlfredo Cardigliano ionic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
410*a27d9013SAlfredo Cardigliano 		uint16_t nb_pkts)
411*a27d9013SAlfredo Cardigliano {
412*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *txq = (struct ionic_qcq *)tx_queue;
413*a27d9013SAlfredo Cardigliano 	struct ionic_queue *q = &txq->q;
414*a27d9013SAlfredo Cardigliano 	struct ionic_cq *cq = &txq->cq;
415*a27d9013SAlfredo Cardigliano 	struct ionic_tx_stats *stats = IONIC_Q_TO_TX_STATS(q);
416*a27d9013SAlfredo Cardigliano 	uint32_t next_q_head_idx;
417*a27d9013SAlfredo Cardigliano 	uint32_t bytes_tx = 0;
418*a27d9013SAlfredo Cardigliano 	uint16_t nb_tx = 0;
419*a27d9013SAlfredo Cardigliano 	int err;
420*a27d9013SAlfredo Cardigliano 	bool last;
421*a27d9013SAlfredo Cardigliano 
422*a27d9013SAlfredo Cardigliano 	/* Cleaning old buffers */
423*a27d9013SAlfredo Cardigliano 	ionic_tx_flush(cq);
424*a27d9013SAlfredo Cardigliano 
425*a27d9013SAlfredo Cardigliano 	if (unlikely(ionic_q_space_avail(q) < nb_pkts)) {
426*a27d9013SAlfredo Cardigliano 		stats->stop += nb_pkts;
427*a27d9013SAlfredo Cardigliano 		return 0;
428*a27d9013SAlfredo Cardigliano 	}
429*a27d9013SAlfredo Cardigliano 
430*a27d9013SAlfredo Cardigliano 	while (nb_tx < nb_pkts) {
431*a27d9013SAlfredo Cardigliano 		last = (nb_tx == (nb_pkts - 1));
432*a27d9013SAlfredo Cardigliano 
433*a27d9013SAlfredo Cardigliano 		next_q_head_idx = (q->head_idx + 1) & (q->num_descs - 1);
434*a27d9013SAlfredo Cardigliano 		if ((next_q_head_idx & 0x3) == 0) {
435*a27d9013SAlfredo Cardigliano 			struct ionic_txq_desc *desc_base = q->base;
436*a27d9013SAlfredo Cardigliano 			rte_prefetch0(&desc_base[next_q_head_idx]);
437*a27d9013SAlfredo Cardigliano 			rte_prefetch0(&q->info[next_q_head_idx]);
438*a27d9013SAlfredo Cardigliano 		}
439*a27d9013SAlfredo Cardigliano 
440*a27d9013SAlfredo Cardigliano 		if (tx_pkts[nb_tx]->ol_flags & PKT_TX_TCP_SEG)
441*a27d9013SAlfredo Cardigliano 			err = ionic_tx_tso(q, tx_pkts[nb_tx], txq->offloads,
442*a27d9013SAlfredo Cardigliano 				last);
443*a27d9013SAlfredo Cardigliano 		else
444*a27d9013SAlfredo Cardigliano 			err = ionic_tx(q, tx_pkts[nb_tx], txq->offloads, last);
445*a27d9013SAlfredo Cardigliano 		if (err) {
446*a27d9013SAlfredo Cardigliano 			stats->drop += nb_pkts - nb_tx;
447*a27d9013SAlfredo Cardigliano 			if (nb_tx > 0)
448*a27d9013SAlfredo Cardigliano 				ionic_q_flush(q);
449*a27d9013SAlfredo Cardigliano 			break;
450*a27d9013SAlfredo Cardigliano 		}
451*a27d9013SAlfredo Cardigliano 
452*a27d9013SAlfredo Cardigliano 		bytes_tx += tx_pkts[nb_tx]->pkt_len;
453*a27d9013SAlfredo Cardigliano 		nb_tx++;
454*a27d9013SAlfredo Cardigliano 	}
455*a27d9013SAlfredo Cardigliano 
456*a27d9013SAlfredo Cardigliano 	stats->packets += nb_tx;
457*a27d9013SAlfredo Cardigliano 	stats->bytes += bytes_tx;
458*a27d9013SAlfredo Cardigliano 
459*a27d9013SAlfredo Cardigliano 	return nb_tx;
460*a27d9013SAlfredo Cardigliano }
461*a27d9013SAlfredo Cardigliano 
462*a27d9013SAlfredo Cardigliano /*********************************************************************
463*a27d9013SAlfredo Cardigliano  *
464*a27d9013SAlfredo Cardigliano  *  TX prep functions
465*a27d9013SAlfredo Cardigliano  *
466*a27d9013SAlfredo Cardigliano  **********************************************************************/
467*a27d9013SAlfredo Cardigliano 
468*a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_MASK (	\
469*a27d9013SAlfredo Cardigliano 	PKT_TX_IPV4 |		\
470*a27d9013SAlfredo Cardigliano 	PKT_TX_IPV6 |		\
471*a27d9013SAlfredo Cardigliano 	PKT_TX_VLAN |		\
472*a27d9013SAlfredo Cardigliano 	PKT_TX_TCP_SEG |	\
473*a27d9013SAlfredo Cardigliano 	PKT_TX_L4_MASK)
474*a27d9013SAlfredo Cardigliano 
475*a27d9013SAlfredo Cardigliano #define IONIC_TX_OFFLOAD_NOTSUP_MASK \
476*a27d9013SAlfredo Cardigliano 	(PKT_TX_OFFLOAD_MASK ^ IONIC_TX_OFFLOAD_MASK)
477*a27d9013SAlfredo Cardigliano 
478*a27d9013SAlfredo Cardigliano uint16_t
479*a27d9013SAlfredo Cardigliano ionic_prep_pkts(void *tx_queue __rte_unused, struct rte_mbuf **tx_pkts,
480*a27d9013SAlfredo Cardigliano 		uint16_t nb_pkts)
481*a27d9013SAlfredo Cardigliano {
482*a27d9013SAlfredo Cardigliano 	struct rte_mbuf *txm;
483*a27d9013SAlfredo Cardigliano 	uint64_t offloads;
484*a27d9013SAlfredo Cardigliano 	int i = 0;
485*a27d9013SAlfredo Cardigliano 
486*a27d9013SAlfredo Cardigliano 	for (i = 0; i < nb_pkts; i++) {
487*a27d9013SAlfredo Cardigliano 		txm = tx_pkts[i];
488*a27d9013SAlfredo Cardigliano 
489*a27d9013SAlfredo Cardigliano 		if (txm->nb_segs > IONIC_TX_MAX_SG_ELEMS) {
490*a27d9013SAlfredo Cardigliano 			rte_errno = -EINVAL;
491*a27d9013SAlfredo Cardigliano 			break;
492*a27d9013SAlfredo Cardigliano 		}
493*a27d9013SAlfredo Cardigliano 
494*a27d9013SAlfredo Cardigliano 		offloads = txm->ol_flags;
495*a27d9013SAlfredo Cardigliano 
496*a27d9013SAlfredo Cardigliano 		if (offloads & IONIC_TX_OFFLOAD_NOTSUP_MASK) {
497*a27d9013SAlfredo Cardigliano 			rte_errno = -ENOTSUP;
498*a27d9013SAlfredo Cardigliano 			break;
499*a27d9013SAlfredo Cardigliano 		}
500*a27d9013SAlfredo Cardigliano 	}
501*a27d9013SAlfredo Cardigliano 
502*a27d9013SAlfredo Cardigliano 	return i;
503*a27d9013SAlfredo Cardigliano }
504*a27d9013SAlfredo Cardigliano 
505*a27d9013SAlfredo Cardigliano /*********************************************************************
506*a27d9013SAlfredo Cardigliano  *
507*a27d9013SAlfredo Cardigliano  *  RX functions
508*a27d9013SAlfredo Cardigliano  *
509*a27d9013SAlfredo Cardigliano  **********************************************************************/
510*a27d9013SAlfredo Cardigliano 
511*a27d9013SAlfredo Cardigliano static void ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
512*a27d9013SAlfredo Cardigliano 		struct rte_mbuf *mbuf);
513*a27d9013SAlfredo Cardigliano 
514*a27d9013SAlfredo Cardigliano void
515*a27d9013SAlfredo Cardigliano ionic_rxq_info_get(struct rte_eth_dev *dev, uint16_t queue_id,
516*a27d9013SAlfredo Cardigliano 		struct rte_eth_rxq_info *qinfo)
517*a27d9013SAlfredo Cardigliano {
518*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *rxq = dev->data->rx_queues[queue_id];
519*a27d9013SAlfredo Cardigliano 	struct ionic_queue *q = &rxq->q;
520*a27d9013SAlfredo Cardigliano 
521*a27d9013SAlfredo Cardigliano 	qinfo->mp = rxq->mb_pool;
522*a27d9013SAlfredo Cardigliano 	qinfo->scattered_rx = dev->data->scattered_rx;
523*a27d9013SAlfredo Cardigliano 	qinfo->nb_desc = q->num_descs;
524*a27d9013SAlfredo Cardigliano 	qinfo->conf.rx_deferred_start = rxq->deferred_start;
525*a27d9013SAlfredo Cardigliano 	qinfo->conf.offloads = rxq->offloads;
526*a27d9013SAlfredo Cardigliano }
527*a27d9013SAlfredo Cardigliano 
528*a27d9013SAlfredo Cardigliano static void __attribute__((cold))
529*a27d9013SAlfredo Cardigliano ionic_rx_empty(struct ionic_queue *q)
530*a27d9013SAlfredo Cardigliano {
531*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
532*a27d9013SAlfredo Cardigliano 	struct ionic_desc_info *cur;
533*a27d9013SAlfredo Cardigliano 	struct rte_mbuf *mbuf;
534*a27d9013SAlfredo Cardigliano 
535*a27d9013SAlfredo Cardigliano 	while (q->tail_idx != q->head_idx) {
536*a27d9013SAlfredo Cardigliano 		cur = &q->info[q->tail_idx];
537*a27d9013SAlfredo Cardigliano 		mbuf = cur->cb_arg;
538*a27d9013SAlfredo Cardigliano 		rte_mempool_put(rxq->mb_pool, mbuf);
539*a27d9013SAlfredo Cardigliano 
540*a27d9013SAlfredo Cardigliano 		q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
541*a27d9013SAlfredo Cardigliano 	}
542*a27d9013SAlfredo Cardigliano }
543*a27d9013SAlfredo Cardigliano 
544*a27d9013SAlfredo Cardigliano void __attribute__((cold))
545*a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_release(void *rx_queue)
546*a27d9013SAlfredo Cardigliano {
547*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
548*a27d9013SAlfredo Cardigliano 
549*a27d9013SAlfredo Cardigliano 	IONIC_PRINT_CALL();
550*a27d9013SAlfredo Cardigliano 
551*a27d9013SAlfredo Cardigliano 	ionic_rx_empty(&rxq->q);
552*a27d9013SAlfredo Cardigliano 
553*a27d9013SAlfredo Cardigliano 	ionic_qcq_free(rxq);
554*a27d9013SAlfredo Cardigliano }
555*a27d9013SAlfredo Cardigliano 
556*a27d9013SAlfredo Cardigliano int __attribute__((cold))
557*a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_setup(struct rte_eth_dev *eth_dev,
558*a27d9013SAlfredo Cardigliano 		uint16_t rx_queue_id,
559*a27d9013SAlfredo Cardigliano 		uint16_t nb_desc,
560*a27d9013SAlfredo Cardigliano 		uint32_t socket_id __rte_unused,
561*a27d9013SAlfredo Cardigliano 		const struct rte_eth_rxconf *rx_conf,
562*a27d9013SAlfredo Cardigliano 		struct rte_mempool *mp)
563*a27d9013SAlfredo Cardigliano {
564*a27d9013SAlfredo Cardigliano 	struct ionic_lif *lif = IONIC_ETH_DEV_TO_LIF(eth_dev);
565*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *rxq;
566*a27d9013SAlfredo Cardigliano 	uint64_t offloads;
567*a27d9013SAlfredo Cardigliano 	int err;
568*a27d9013SAlfredo Cardigliano 
569*a27d9013SAlfredo Cardigliano 	IONIC_PRINT_CALL();
570*a27d9013SAlfredo Cardigliano 
571*a27d9013SAlfredo Cardigliano 	IONIC_PRINT(DEBUG, "Configuring RX queue %u with %u buffers",
572*a27d9013SAlfredo Cardigliano 		rx_queue_id, nb_desc);
573*a27d9013SAlfredo Cardigliano 
574*a27d9013SAlfredo Cardigliano 	if (rx_queue_id >= lif->nrxqcqs) {
575*a27d9013SAlfredo Cardigliano 		IONIC_PRINT(ERR,
576*a27d9013SAlfredo Cardigliano 			"Queue index %u not available (max %u queues)",
577*a27d9013SAlfredo Cardigliano 			rx_queue_id, lif->nrxqcqs);
578*a27d9013SAlfredo Cardigliano 		return -EINVAL;
579*a27d9013SAlfredo Cardigliano 	}
580*a27d9013SAlfredo Cardigliano 
581*a27d9013SAlfredo Cardigliano 	offloads = rx_conf->offloads | eth_dev->data->dev_conf.rxmode.offloads;
582*a27d9013SAlfredo Cardigliano 
583*a27d9013SAlfredo Cardigliano 	/* Validate number of receive descriptors */
584*a27d9013SAlfredo Cardigliano 	if (!rte_is_power_of_2(nb_desc) ||
585*a27d9013SAlfredo Cardigliano 			nb_desc < IONIC_MIN_RING_DESC ||
586*a27d9013SAlfredo Cardigliano 			nb_desc > IONIC_MAX_RING_DESC) {
587*a27d9013SAlfredo Cardigliano 		IONIC_PRINT(ERR,
588*a27d9013SAlfredo Cardigliano 			"Bad number of descriptors (%u) for queue %u (min: %u)",
589*a27d9013SAlfredo Cardigliano 			nb_desc, rx_queue_id, IONIC_MIN_RING_DESC);
590*a27d9013SAlfredo Cardigliano 		return -EINVAL; /* or use IONIC_DEFAULT_RING_DESC */
591*a27d9013SAlfredo Cardigliano 	}
592*a27d9013SAlfredo Cardigliano 
593*a27d9013SAlfredo Cardigliano 	if (rx_conf->offloads & DEV_RX_OFFLOAD_SCATTER)
594*a27d9013SAlfredo Cardigliano 		eth_dev->data->scattered_rx = 1;
595*a27d9013SAlfredo Cardigliano 
596*a27d9013SAlfredo Cardigliano 	/* Free memory prior to re-allocation if needed... */
597*a27d9013SAlfredo Cardigliano 	if (eth_dev->data->rx_queues[rx_queue_id] != NULL) {
598*a27d9013SAlfredo Cardigliano 		void *rx_queue = eth_dev->data->rx_queues[rx_queue_id];
599*a27d9013SAlfredo Cardigliano 		ionic_dev_rx_queue_release(rx_queue);
600*a27d9013SAlfredo Cardigliano 		eth_dev->data->rx_queues[rx_queue_id] = NULL;
601*a27d9013SAlfredo Cardigliano 	}
602*a27d9013SAlfredo Cardigliano 
603*a27d9013SAlfredo Cardigliano 	err = ionic_rx_qcq_alloc(lif, rx_queue_id, nb_desc, &rxq);
604*a27d9013SAlfredo Cardigliano 	if (err) {
605*a27d9013SAlfredo Cardigliano 		IONIC_PRINT(ERR, "Queue allocation failure");
606*a27d9013SAlfredo Cardigliano 		return -EINVAL;
607*a27d9013SAlfredo Cardigliano 	}
608*a27d9013SAlfredo Cardigliano 
609*a27d9013SAlfredo Cardigliano 	rxq->mb_pool = mp;
610*a27d9013SAlfredo Cardigliano 
611*a27d9013SAlfredo Cardigliano 	/*
612*a27d9013SAlfredo Cardigliano 	 * Note: the interface does not currently support
613*a27d9013SAlfredo Cardigliano 	 * DEV_RX_OFFLOAD_KEEP_CRC, please also consider ETHER_CRC_LEN
614*a27d9013SAlfredo Cardigliano 	 * when the adapter will be able to keep the CRC and subtract
615*a27d9013SAlfredo Cardigliano 	 * it to the length for all received packets:
616*a27d9013SAlfredo Cardigliano 	 * if (eth_dev->data->dev_conf.rxmode.offloads &
617*a27d9013SAlfredo Cardigliano 	 *     DEV_RX_OFFLOAD_KEEP_CRC)
618*a27d9013SAlfredo Cardigliano 	 *   rxq->crc_len = ETHER_CRC_LEN;
619*a27d9013SAlfredo Cardigliano 	 */
620*a27d9013SAlfredo Cardigliano 
621*a27d9013SAlfredo Cardigliano 	/* Do not start queue with rte_eth_dev_start() */
622*a27d9013SAlfredo Cardigliano 	rxq->deferred_start = rx_conf->rx_deferred_start;
623*a27d9013SAlfredo Cardigliano 
624*a27d9013SAlfredo Cardigliano 	rxq->offloads = offloads;
625*a27d9013SAlfredo Cardigliano 
626*a27d9013SAlfredo Cardigliano 	eth_dev->data->rx_queues[rx_queue_id] = rxq;
627*a27d9013SAlfredo Cardigliano 
628*a27d9013SAlfredo Cardigliano 	return 0;
629*a27d9013SAlfredo Cardigliano }
630*a27d9013SAlfredo Cardigliano 
631*a27d9013SAlfredo Cardigliano static void
632*a27d9013SAlfredo Cardigliano ionic_rx_clean(struct ionic_queue *q,
633*a27d9013SAlfredo Cardigliano 		uint32_t q_desc_index, uint32_t cq_desc_index,
634*a27d9013SAlfredo Cardigliano 		void *cb_arg, void *service_cb_arg)
635*a27d9013SAlfredo Cardigliano {
636*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_comp *cq_desc_base = q->bound_cq->base;
637*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_comp *cq_desc = &cq_desc_base[cq_desc_index];
638*a27d9013SAlfredo Cardigliano 	struct rte_mbuf *rxm = cb_arg;
639*a27d9013SAlfredo Cardigliano 	struct rte_mbuf *rxm_seg;
640*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *rxq = IONIC_Q_TO_QCQ(q);
641*a27d9013SAlfredo Cardigliano 	uint32_t max_frame_size =
642*a27d9013SAlfredo Cardigliano 		rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
643*a27d9013SAlfredo Cardigliano 	uint64_t pkt_flags = 0;
644*a27d9013SAlfredo Cardigliano 	uint32_t pkt_type;
645*a27d9013SAlfredo Cardigliano 	struct ionic_rx_stats *stats = IONIC_Q_TO_RX_STATS(q);
646*a27d9013SAlfredo Cardigliano 	struct ionic_rx_service *recv_args = (struct ionic_rx_service *)
647*a27d9013SAlfredo Cardigliano 		service_cb_arg;
648*a27d9013SAlfredo Cardigliano 	uint32_t buf_size = (uint16_t)
649*a27d9013SAlfredo Cardigliano 		(rte_pktmbuf_data_room_size(rxq->mb_pool) -
650*a27d9013SAlfredo Cardigliano 		RTE_PKTMBUF_HEADROOM);
651*a27d9013SAlfredo Cardigliano 	uint32_t left;
652*a27d9013SAlfredo Cardigliano 
653*a27d9013SAlfredo Cardigliano 	if (!recv_args) {
654*a27d9013SAlfredo Cardigliano 		stats->no_cb_arg++;
655*a27d9013SAlfredo Cardigliano 		/* Flush */
656*a27d9013SAlfredo Cardigliano 		rte_pktmbuf_free(rxm);
657*a27d9013SAlfredo Cardigliano 		/*
658*a27d9013SAlfredo Cardigliano 		 * Note: rte_mempool_put is faster with no segs
659*a27d9013SAlfredo Cardigliano 		 * rte_mempool_put(rxq->mb_pool, rxm);
660*a27d9013SAlfredo Cardigliano 		 */
661*a27d9013SAlfredo Cardigliano 		return;
662*a27d9013SAlfredo Cardigliano 	}
663*a27d9013SAlfredo Cardigliano 
664*a27d9013SAlfredo Cardigliano 	if (cq_desc->status) {
665*a27d9013SAlfredo Cardigliano 		stats->bad_cq_status++;
666*a27d9013SAlfredo Cardigliano 		ionic_rx_recycle(q, q_desc_index, rxm);
667*a27d9013SAlfredo Cardigliano 		return;
668*a27d9013SAlfredo Cardigliano 	}
669*a27d9013SAlfredo Cardigliano 
670*a27d9013SAlfredo Cardigliano 	if (recv_args->nb_rx >= recv_args->nb_pkts) {
671*a27d9013SAlfredo Cardigliano 		stats->no_room++;
672*a27d9013SAlfredo Cardigliano 		ionic_rx_recycle(q, q_desc_index, rxm);
673*a27d9013SAlfredo Cardigliano 		return;
674*a27d9013SAlfredo Cardigliano 	}
675*a27d9013SAlfredo Cardigliano 
676*a27d9013SAlfredo Cardigliano 	if (cq_desc->len > max_frame_size ||
677*a27d9013SAlfredo Cardigliano 			cq_desc->len == 0) {
678*a27d9013SAlfredo Cardigliano 		stats->bad_len++;
679*a27d9013SAlfredo Cardigliano 		ionic_rx_recycle(q, q_desc_index, rxm);
680*a27d9013SAlfredo Cardigliano 		return;
681*a27d9013SAlfredo Cardigliano 	}
682*a27d9013SAlfredo Cardigliano 
683*a27d9013SAlfredo Cardigliano 	rxm->data_off = RTE_PKTMBUF_HEADROOM;
684*a27d9013SAlfredo Cardigliano 	rte_prefetch1((char *)rxm->buf_addr + rxm->data_off);
685*a27d9013SAlfredo Cardigliano 	rxm->nb_segs = 1; /* cq_desc->num_sg_elems */
686*a27d9013SAlfredo Cardigliano 	rxm->pkt_len = cq_desc->len;
687*a27d9013SAlfredo Cardigliano 	rxm->port = rxq->lif->port_id;
688*a27d9013SAlfredo Cardigliano 
689*a27d9013SAlfredo Cardigliano 	left = cq_desc->len;
690*a27d9013SAlfredo Cardigliano 
691*a27d9013SAlfredo Cardigliano 	rxm->data_len = RTE_MIN(buf_size, left);
692*a27d9013SAlfredo Cardigliano 	left -= rxm->data_len;
693*a27d9013SAlfredo Cardigliano 
694*a27d9013SAlfredo Cardigliano 	rxm_seg = rxm->next;
695*a27d9013SAlfredo Cardigliano 	while (rxm_seg && left) {
696*a27d9013SAlfredo Cardigliano 		rxm_seg->data_len = RTE_MIN(buf_size, left);
697*a27d9013SAlfredo Cardigliano 		left -= rxm_seg->data_len;
698*a27d9013SAlfredo Cardigliano 
699*a27d9013SAlfredo Cardigliano 		rxm_seg = rxm_seg->next;
700*a27d9013SAlfredo Cardigliano 		rxm->nb_segs++;
701*a27d9013SAlfredo Cardigliano 	}
702*a27d9013SAlfredo Cardigliano 
703*a27d9013SAlfredo Cardigliano 	/* Vlan Strip */
704*a27d9013SAlfredo Cardigliano 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
705*a27d9013SAlfredo Cardigliano 		pkt_flags |= PKT_RX_VLAN | PKT_RX_VLAN_STRIPPED;
706*a27d9013SAlfredo Cardigliano 		rxm->vlan_tci = cq_desc->vlan_tci;
707*a27d9013SAlfredo Cardigliano 	}
708*a27d9013SAlfredo Cardigliano 
709*a27d9013SAlfredo Cardigliano 	/* Checksum */
710*a27d9013SAlfredo Cardigliano 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
711*a27d9013SAlfredo Cardigliano 		if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_OK)
712*a27d9013SAlfredo Cardigliano 			pkt_flags |= PKT_RX_IP_CKSUM_GOOD;
713*a27d9013SAlfredo Cardigliano 		else if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_IP_BAD)
714*a27d9013SAlfredo Cardigliano 			pkt_flags |= PKT_RX_IP_CKSUM_BAD;
715*a27d9013SAlfredo Cardigliano 
716*a27d9013SAlfredo Cardigliano 		if ((cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_TCP_OK) ||
717*a27d9013SAlfredo Cardigliano 			(cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_UDP_OK))
718*a27d9013SAlfredo Cardigliano 			pkt_flags |= PKT_RX_L4_CKSUM_GOOD;
719*a27d9013SAlfredo Cardigliano 		else if ((cq_desc->csum_flags &
720*a27d9013SAlfredo Cardigliano 				IONIC_RXQ_COMP_CSUM_F_TCP_BAD) ||
721*a27d9013SAlfredo Cardigliano 				(cq_desc->csum_flags &
722*a27d9013SAlfredo Cardigliano 				IONIC_RXQ_COMP_CSUM_F_UDP_BAD))
723*a27d9013SAlfredo Cardigliano 			pkt_flags |= PKT_RX_L4_CKSUM_BAD;
724*a27d9013SAlfredo Cardigliano 	}
725*a27d9013SAlfredo Cardigliano 
726*a27d9013SAlfredo Cardigliano 	rxm->ol_flags = pkt_flags;
727*a27d9013SAlfredo Cardigliano 
728*a27d9013SAlfredo Cardigliano 	/* Packet Type */
729*a27d9013SAlfredo Cardigliano 	switch (cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK) {
730*a27d9013SAlfredo Cardigliano 	case IONIC_PKT_TYPE_IPV4:
731*a27d9013SAlfredo Cardigliano 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4;
732*a27d9013SAlfredo Cardigliano 		break;
733*a27d9013SAlfredo Cardigliano 	case IONIC_PKT_TYPE_IPV6:
734*a27d9013SAlfredo Cardigliano 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6;
735*a27d9013SAlfredo Cardigliano 		break;
736*a27d9013SAlfredo Cardigliano 	case IONIC_PKT_TYPE_IPV4_TCP:
737*a27d9013SAlfredo Cardigliano 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
738*a27d9013SAlfredo Cardigliano 			RTE_PTYPE_L4_TCP;
739*a27d9013SAlfredo Cardigliano 		break;
740*a27d9013SAlfredo Cardigliano 	case IONIC_PKT_TYPE_IPV6_TCP:
741*a27d9013SAlfredo Cardigliano 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
742*a27d9013SAlfredo Cardigliano 			RTE_PTYPE_L4_TCP;
743*a27d9013SAlfredo Cardigliano 		break;
744*a27d9013SAlfredo Cardigliano 	case IONIC_PKT_TYPE_IPV4_UDP:
745*a27d9013SAlfredo Cardigliano 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 |
746*a27d9013SAlfredo Cardigliano 			RTE_PTYPE_L4_UDP;
747*a27d9013SAlfredo Cardigliano 		break;
748*a27d9013SAlfredo Cardigliano 	case IONIC_PKT_TYPE_IPV6_UDP:
749*a27d9013SAlfredo Cardigliano 		pkt_type = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 |
750*a27d9013SAlfredo Cardigliano 			RTE_PTYPE_L4_UDP;
751*a27d9013SAlfredo Cardigliano 		break;
752*a27d9013SAlfredo Cardigliano 	default:
753*a27d9013SAlfredo Cardigliano 		{
754*a27d9013SAlfredo Cardigliano 			struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
755*a27d9013SAlfredo Cardigliano 				struct rte_ether_hdr *);
756*a27d9013SAlfredo Cardigliano 			uint16_t ether_type = eth_h->ether_type;
757*a27d9013SAlfredo Cardigliano 			if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
758*a27d9013SAlfredo Cardigliano 				pkt_type = RTE_PTYPE_L2_ETHER_ARP;
759*a27d9013SAlfredo Cardigliano 			else
760*a27d9013SAlfredo Cardigliano 				pkt_type = RTE_PTYPE_UNKNOWN;
761*a27d9013SAlfredo Cardigliano 			break;
762*a27d9013SAlfredo Cardigliano 		}
763*a27d9013SAlfredo Cardigliano 	}
764*a27d9013SAlfredo Cardigliano 
765*a27d9013SAlfredo Cardigliano 	rxm->packet_type = pkt_type;
766*a27d9013SAlfredo Cardigliano 
767*a27d9013SAlfredo Cardigliano 	recv_args->rx_pkts[recv_args->nb_rx] = rxm;
768*a27d9013SAlfredo Cardigliano 	recv_args->nb_rx++;
769*a27d9013SAlfredo Cardigliano 
770*a27d9013SAlfredo Cardigliano 	stats->packets++;
771*a27d9013SAlfredo Cardigliano 	stats->bytes += rxm->pkt_len;
772*a27d9013SAlfredo Cardigliano }
773*a27d9013SAlfredo Cardigliano 
774*a27d9013SAlfredo Cardigliano static void
775*a27d9013SAlfredo Cardigliano ionic_rx_recycle(struct ionic_queue *q, uint32_t q_desc_index,
776*a27d9013SAlfredo Cardigliano 		 struct rte_mbuf *mbuf)
777*a27d9013SAlfredo Cardigliano {
778*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_desc *desc_base = q->base;
779*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_desc *old = &desc_base[q_desc_index];
780*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_desc *new = &desc_base[q->head_idx];
781*a27d9013SAlfredo Cardigliano 
782*a27d9013SAlfredo Cardigliano 	new->addr = old->addr;
783*a27d9013SAlfredo Cardigliano 	new->len = old->len;
784*a27d9013SAlfredo Cardigliano 
785*a27d9013SAlfredo Cardigliano 	ionic_q_post(q, true, ionic_rx_clean, mbuf);
786*a27d9013SAlfredo Cardigliano }
787*a27d9013SAlfredo Cardigliano 
788*a27d9013SAlfredo Cardigliano static int __attribute__((cold))
789*a27d9013SAlfredo Cardigliano ionic_rx_fill(struct ionic_qcq *rxq, uint32_t len)
790*a27d9013SAlfredo Cardigliano {
791*a27d9013SAlfredo Cardigliano 	struct ionic_queue *q = &rxq->q;
792*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_desc *desc_base = q->base;
793*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_sg_desc *sg_desc_base = q->sg_base;
794*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_desc *desc;
795*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_sg_desc *sg_desc;
796*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_sg_elem *elem;
797*a27d9013SAlfredo Cardigliano 	rte_iova_t dma_addr;
798*a27d9013SAlfredo Cardigliano 	uint32_t i, j, nsegs, buf_size, size;
799*a27d9013SAlfredo Cardigliano 	bool ring_doorbell;
800*a27d9013SAlfredo Cardigliano 
801*a27d9013SAlfredo Cardigliano 	buf_size = (uint16_t)(rte_pktmbuf_data_room_size(rxq->mb_pool) -
802*a27d9013SAlfredo Cardigliano 		RTE_PKTMBUF_HEADROOM);
803*a27d9013SAlfredo Cardigliano 
804*a27d9013SAlfredo Cardigliano 	/* Initialize software ring entries */
805*a27d9013SAlfredo Cardigliano 	for (i = ionic_q_space_avail(q); i; i--) {
806*a27d9013SAlfredo Cardigliano 		struct rte_mbuf *rxm = rte_mbuf_raw_alloc(rxq->mb_pool);
807*a27d9013SAlfredo Cardigliano 		struct rte_mbuf *prev_rxm_seg;
808*a27d9013SAlfredo Cardigliano 
809*a27d9013SAlfredo Cardigliano 		if (rxm == NULL) {
810*a27d9013SAlfredo Cardigliano 			IONIC_PRINT(ERR, "RX mbuf alloc failed");
811*a27d9013SAlfredo Cardigliano 			return -ENOMEM;
812*a27d9013SAlfredo Cardigliano 		}
813*a27d9013SAlfredo Cardigliano 
814*a27d9013SAlfredo Cardigliano 		nsegs = (len + buf_size - 1) / buf_size;
815*a27d9013SAlfredo Cardigliano 
816*a27d9013SAlfredo Cardigliano 		desc = &desc_base[q->head_idx];
817*a27d9013SAlfredo Cardigliano 		dma_addr = rte_cpu_to_le_64(rte_mbuf_data_iova_default(rxm));
818*a27d9013SAlfredo Cardigliano 		desc->addr = dma_addr;
819*a27d9013SAlfredo Cardigliano 		desc->len = buf_size;
820*a27d9013SAlfredo Cardigliano 		size = buf_size;
821*a27d9013SAlfredo Cardigliano 		desc->opcode = (nsegs > 1) ? IONIC_RXQ_DESC_OPCODE_SG :
822*a27d9013SAlfredo Cardigliano 			IONIC_RXQ_DESC_OPCODE_SIMPLE;
823*a27d9013SAlfredo Cardigliano 		rxm->next = NULL;
824*a27d9013SAlfredo Cardigliano 
825*a27d9013SAlfredo Cardigliano 		prev_rxm_seg = rxm;
826*a27d9013SAlfredo Cardigliano 		sg_desc = &sg_desc_base[q->head_idx];
827*a27d9013SAlfredo Cardigliano 		elem = sg_desc->elems;
828*a27d9013SAlfredo Cardigliano 		for (j = 0; j < nsegs - 1 && j < IONIC_RX_MAX_SG_ELEMS; j++) {
829*a27d9013SAlfredo Cardigliano 			struct rte_mbuf *rxm_seg;
830*a27d9013SAlfredo Cardigliano 			rte_iova_t data_iova;
831*a27d9013SAlfredo Cardigliano 
832*a27d9013SAlfredo Cardigliano 			rxm_seg = rte_mbuf_raw_alloc(rxq->mb_pool);
833*a27d9013SAlfredo Cardigliano 			if (rxm_seg == NULL) {
834*a27d9013SAlfredo Cardigliano 				IONIC_PRINT(ERR, "RX mbuf alloc failed");
835*a27d9013SAlfredo Cardigliano 				return -ENOMEM;
836*a27d9013SAlfredo Cardigliano 			}
837*a27d9013SAlfredo Cardigliano 
838*a27d9013SAlfredo Cardigliano 			data_iova = rte_mbuf_data_iova(rxm_seg);
839*a27d9013SAlfredo Cardigliano 			dma_addr = rte_cpu_to_le_64(data_iova);
840*a27d9013SAlfredo Cardigliano 			elem->addr = dma_addr;
841*a27d9013SAlfredo Cardigliano 			elem->len = buf_size;
842*a27d9013SAlfredo Cardigliano 			size += buf_size;
843*a27d9013SAlfredo Cardigliano 			elem++;
844*a27d9013SAlfredo Cardigliano 			rxm_seg->next = NULL;
845*a27d9013SAlfredo Cardigliano 			prev_rxm_seg->next = rxm_seg;
846*a27d9013SAlfredo Cardigliano 			prev_rxm_seg = rxm_seg;
847*a27d9013SAlfredo Cardigliano 		}
848*a27d9013SAlfredo Cardigliano 
849*a27d9013SAlfredo Cardigliano 		if (size < len)
850*a27d9013SAlfredo Cardigliano 			IONIC_PRINT(ERR, "Rx SG size is not sufficient (%d < %d)",
851*a27d9013SAlfredo Cardigliano 				size, len);
852*a27d9013SAlfredo Cardigliano 
853*a27d9013SAlfredo Cardigliano 		ring_doorbell = ((q->head_idx + 1) &
854*a27d9013SAlfredo Cardigliano 			IONIC_RX_RING_DOORBELL_STRIDE) == 0;
855*a27d9013SAlfredo Cardigliano 
856*a27d9013SAlfredo Cardigliano 		ionic_q_post(q, ring_doorbell, ionic_rx_clean, rxm);
857*a27d9013SAlfredo Cardigliano 	}
858*a27d9013SAlfredo Cardigliano 
859*a27d9013SAlfredo Cardigliano 	return 0;
860*a27d9013SAlfredo Cardigliano }
861*a27d9013SAlfredo Cardigliano 
862*a27d9013SAlfredo Cardigliano /*
863*a27d9013SAlfredo Cardigliano  * Start Receive Units for specified queue.
864*a27d9013SAlfredo Cardigliano  */
865*a27d9013SAlfredo Cardigliano int __attribute__((cold))
866*a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_start(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
867*a27d9013SAlfredo Cardigliano {
868*a27d9013SAlfredo Cardigliano 	uint32_t frame_size = eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
869*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *rxq;
870*a27d9013SAlfredo Cardigliano 	int err;
871*a27d9013SAlfredo Cardigliano 
872*a27d9013SAlfredo Cardigliano 	IONIC_PRINT_CALL();
873*a27d9013SAlfredo Cardigliano 
874*a27d9013SAlfredo Cardigliano 	IONIC_PRINT(DEBUG, "Allocating RX queue buffers (size: %u)",
875*a27d9013SAlfredo Cardigliano 		frame_size);
876*a27d9013SAlfredo Cardigliano 
877*a27d9013SAlfredo Cardigliano 	rxq = eth_dev->data->rx_queues[rx_queue_id];
878*a27d9013SAlfredo Cardigliano 
879*a27d9013SAlfredo Cardigliano 	err = ionic_lif_rxq_init(rxq);
880*a27d9013SAlfredo Cardigliano 	if (err)
881*a27d9013SAlfredo Cardigliano 		return err;
882*a27d9013SAlfredo Cardigliano 
883*a27d9013SAlfredo Cardigliano 	/* Allocate buffers for descriptor rings */
884*a27d9013SAlfredo Cardigliano 	if (ionic_rx_fill(rxq, frame_size) != 0) {
885*a27d9013SAlfredo Cardigliano 		IONIC_PRINT(ERR, "Could not alloc mbuf for queue:%d",
886*a27d9013SAlfredo Cardigliano 			rx_queue_id);
887*a27d9013SAlfredo Cardigliano 		return -1;
888*a27d9013SAlfredo Cardigliano 	}
889*a27d9013SAlfredo Cardigliano 
890*a27d9013SAlfredo Cardigliano 	ionic_qcq_enable(rxq);
891*a27d9013SAlfredo Cardigliano 
892*a27d9013SAlfredo Cardigliano 	eth_dev->data->rx_queue_state[rx_queue_id] =
893*a27d9013SAlfredo Cardigliano 		RTE_ETH_QUEUE_STATE_STARTED;
894*a27d9013SAlfredo Cardigliano 
895*a27d9013SAlfredo Cardigliano 	return 0;
896*a27d9013SAlfredo Cardigliano }
897*a27d9013SAlfredo Cardigliano 
898*a27d9013SAlfredo Cardigliano static inline void __attribute__((cold))
899*a27d9013SAlfredo Cardigliano ionic_rxq_service(struct ionic_cq *cq, uint32_t work_to_do,
900*a27d9013SAlfredo Cardigliano 		void *service_cb_arg)
901*a27d9013SAlfredo Cardigliano {
902*a27d9013SAlfredo Cardigliano 	struct ionic_queue *q = cq->bound_q;
903*a27d9013SAlfredo Cardigliano 	struct ionic_desc_info *q_desc_info;
904*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_comp *cq_desc_base = cq->base;
905*a27d9013SAlfredo Cardigliano 	struct ionic_rxq_comp *cq_desc;
906*a27d9013SAlfredo Cardigliano 	bool more;
907*a27d9013SAlfredo Cardigliano 	uint32_t curr_q_tail_idx, curr_cq_tail_idx;
908*a27d9013SAlfredo Cardigliano 	uint32_t work_done = 0;
909*a27d9013SAlfredo Cardigliano 
910*a27d9013SAlfredo Cardigliano 	if (work_to_do == 0)
911*a27d9013SAlfredo Cardigliano 		return;
912*a27d9013SAlfredo Cardigliano 
913*a27d9013SAlfredo Cardigliano 	cq_desc = &cq_desc_base[cq->tail_idx];
914*a27d9013SAlfredo Cardigliano 	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
915*a27d9013SAlfredo Cardigliano 		curr_cq_tail_idx = cq->tail_idx;
916*a27d9013SAlfredo Cardigliano 		cq->tail_idx = (cq->tail_idx + 1) & (cq->num_descs - 1);
917*a27d9013SAlfredo Cardigliano 
918*a27d9013SAlfredo Cardigliano 		if (cq->tail_idx == 0)
919*a27d9013SAlfredo Cardigliano 			cq->done_color = !cq->done_color;
920*a27d9013SAlfredo Cardigliano 
921*a27d9013SAlfredo Cardigliano 		/* Prefetch the next 4 descriptors */
922*a27d9013SAlfredo Cardigliano 		if ((cq->tail_idx & 0x3) == 0)
923*a27d9013SAlfredo Cardigliano 			rte_prefetch0(&cq_desc_base[cq->tail_idx]);
924*a27d9013SAlfredo Cardigliano 
925*a27d9013SAlfredo Cardigliano 		do {
926*a27d9013SAlfredo Cardigliano 			more = (q->tail_idx != cq_desc->comp_index);
927*a27d9013SAlfredo Cardigliano 
928*a27d9013SAlfredo Cardigliano 			q_desc_info = &q->info[q->tail_idx];
929*a27d9013SAlfredo Cardigliano 
930*a27d9013SAlfredo Cardigliano 			curr_q_tail_idx = q->tail_idx;
931*a27d9013SAlfredo Cardigliano 			q->tail_idx = (q->tail_idx + 1) & (q->num_descs - 1);
932*a27d9013SAlfredo Cardigliano 
933*a27d9013SAlfredo Cardigliano 			/* Prefetch the next 4 descriptors */
934*a27d9013SAlfredo Cardigliano 			if ((q->tail_idx & 0x3) == 0)
935*a27d9013SAlfredo Cardigliano 				/* q desc info */
936*a27d9013SAlfredo Cardigliano 				rte_prefetch0(&q->info[q->tail_idx]);
937*a27d9013SAlfredo Cardigliano 
938*a27d9013SAlfredo Cardigliano 			ionic_rx_clean(q, curr_q_tail_idx, curr_cq_tail_idx,
939*a27d9013SAlfredo Cardigliano 				q_desc_info->cb_arg, service_cb_arg);
940*a27d9013SAlfredo Cardigliano 
941*a27d9013SAlfredo Cardigliano 		} while (more);
942*a27d9013SAlfredo Cardigliano 
943*a27d9013SAlfredo Cardigliano 		if (++work_done == work_to_do)
944*a27d9013SAlfredo Cardigliano 			break;
945*a27d9013SAlfredo Cardigliano 
946*a27d9013SAlfredo Cardigliano 		cq_desc = &cq_desc_base[cq->tail_idx];
947*a27d9013SAlfredo Cardigliano 	}
948*a27d9013SAlfredo Cardigliano }
949*a27d9013SAlfredo Cardigliano 
950*a27d9013SAlfredo Cardigliano /*
951*a27d9013SAlfredo Cardigliano  * Stop Receive Units for specified queue.
952*a27d9013SAlfredo Cardigliano  */
953*a27d9013SAlfredo Cardigliano int __attribute__((cold))
954*a27d9013SAlfredo Cardigliano ionic_dev_rx_queue_stop(struct rte_eth_dev *eth_dev, uint16_t rx_queue_id)
955*a27d9013SAlfredo Cardigliano {
956*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *rxq;
957*a27d9013SAlfredo Cardigliano 
958*a27d9013SAlfredo Cardigliano 	IONIC_PRINT_CALL();
959*a27d9013SAlfredo Cardigliano 
960*a27d9013SAlfredo Cardigliano 	rxq = eth_dev->data->rx_queues[rx_queue_id];
961*a27d9013SAlfredo Cardigliano 
962*a27d9013SAlfredo Cardigliano 	ionic_qcq_disable(rxq);
963*a27d9013SAlfredo Cardigliano 
964*a27d9013SAlfredo Cardigliano 	/* Flush */
965*a27d9013SAlfredo Cardigliano 	ionic_rxq_service(&rxq->cq, -1, NULL);
966*a27d9013SAlfredo Cardigliano 
967*a27d9013SAlfredo Cardigliano 	ionic_lif_rxq_deinit(rxq);
968*a27d9013SAlfredo Cardigliano 
969*a27d9013SAlfredo Cardigliano 	eth_dev->data->rx_queue_state[rx_queue_id] =
970*a27d9013SAlfredo Cardigliano 		RTE_ETH_QUEUE_STATE_STOPPED;
971*a27d9013SAlfredo Cardigliano 
972*a27d9013SAlfredo Cardigliano 	return 0;
973*a27d9013SAlfredo Cardigliano }
974*a27d9013SAlfredo Cardigliano 
975*a27d9013SAlfredo Cardigliano uint16_t
976*a27d9013SAlfredo Cardigliano ionic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
977*a27d9013SAlfredo Cardigliano 		uint16_t nb_pkts)
978*a27d9013SAlfredo Cardigliano {
979*a27d9013SAlfredo Cardigliano 	struct ionic_qcq *rxq = (struct ionic_qcq *)rx_queue;
980*a27d9013SAlfredo Cardigliano 	uint32_t frame_size =
981*a27d9013SAlfredo Cardigliano 		rxq->lif->eth_dev->data->dev_conf.rxmode.max_rx_pkt_len;
982*a27d9013SAlfredo Cardigliano 	struct ionic_cq *cq = &rxq->cq;
983*a27d9013SAlfredo Cardigliano 	struct ionic_rx_service service_cb_arg;
984*a27d9013SAlfredo Cardigliano 
985*a27d9013SAlfredo Cardigliano 	service_cb_arg.rx_pkts = rx_pkts;
986*a27d9013SAlfredo Cardigliano 	service_cb_arg.nb_pkts = nb_pkts;
987*a27d9013SAlfredo Cardigliano 	service_cb_arg.nb_rx = 0;
988*a27d9013SAlfredo Cardigliano 
989*a27d9013SAlfredo Cardigliano 	ionic_rxq_service(cq, nb_pkts, &service_cb_arg);
990*a27d9013SAlfredo Cardigliano 
991*a27d9013SAlfredo Cardigliano 	ionic_rx_fill(rxq, frame_size);
992*a27d9013SAlfredo Cardigliano 
993*a27d9013SAlfredo Cardigliano 	return service_cb_arg.nb_rx;
994*a27d9013SAlfredo Cardigliano }
995