xref: /dpdk/drivers/net/ionic/ionic_rxtx_sg.c (revision e86a6fcc7cf3774bf13b96778afec64df4d7f4ae)
1*e86a6fccSAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause
2*e86a6fccSAndrew Boyer  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3*e86a6fccSAndrew Boyer  */
4*e86a6fccSAndrew Boyer 
5*e86a6fccSAndrew Boyer #include <stdio.h>
6*e86a6fccSAndrew Boyer #include <errno.h>
7*e86a6fccSAndrew Boyer #include <stdint.h>
8*e86a6fccSAndrew Boyer #include <assert.h>
9*e86a6fccSAndrew Boyer 
10*e86a6fccSAndrew Boyer #include <rte_common.h>
11*e86a6fccSAndrew Boyer #include <rte_byteorder.h>
12*e86a6fccSAndrew Boyer #include <rte_atomic.h>
13*e86a6fccSAndrew Boyer #include <rte_mempool.h>
14*e86a6fccSAndrew Boyer #include <rte_mbuf.h>
15*e86a6fccSAndrew Boyer #include <rte_ether.h>
16*e86a6fccSAndrew Boyer #include <rte_prefetch.h>
17*e86a6fccSAndrew Boyer 
18*e86a6fccSAndrew Boyer #include "ionic.h"
19*e86a6fccSAndrew Boyer #include "ionic_if.h"
20*e86a6fccSAndrew Boyer #include "ionic_dev.h"
21*e86a6fccSAndrew Boyer #include "ionic_lif.h"
22*e86a6fccSAndrew Boyer #include "ionic_rxtx.h"
23*e86a6fccSAndrew Boyer 
24*e86a6fccSAndrew Boyer static __rte_always_inline void
25*e86a6fccSAndrew Boyer ionic_tx_flush_sg(struct ionic_tx_qcq *txq)
26*e86a6fccSAndrew Boyer {
27*e86a6fccSAndrew Boyer 	struct ionic_cq *cq = &txq->qcq.cq;
28*e86a6fccSAndrew Boyer 	struct ionic_queue *q = &txq->qcq.q;
29*e86a6fccSAndrew Boyer 	struct rte_mbuf *txm;
30*e86a6fccSAndrew Boyer 	struct ionic_txq_comp *cq_desc, *cq_desc_base = cq->base;
31*e86a6fccSAndrew Boyer 	void **info;
32*e86a6fccSAndrew Boyer 	uint32_t i;
33*e86a6fccSAndrew Boyer 
34*e86a6fccSAndrew Boyer 	cq_desc = &cq_desc_base[cq->tail_idx];
35*e86a6fccSAndrew Boyer 
36*e86a6fccSAndrew Boyer 	while (color_match(cq_desc->color, cq->done_color)) {
37*e86a6fccSAndrew Boyer 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
38*e86a6fccSAndrew Boyer 		if (cq->tail_idx == 0)
39*e86a6fccSAndrew Boyer 			cq->done_color = !cq->done_color;
40*e86a6fccSAndrew Boyer 
41*e86a6fccSAndrew Boyer 		/* Prefetch 4 x 16B comp at cq->tail_idx + 4 */
42*e86a6fccSAndrew Boyer 		if ((cq->tail_idx & 0x3) == 0)
43*e86a6fccSAndrew Boyer 			rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
44*e86a6fccSAndrew Boyer 
45*e86a6fccSAndrew Boyer 		while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {
46*e86a6fccSAndrew Boyer 			/* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */
47*e86a6fccSAndrew Boyer 			rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));
48*e86a6fccSAndrew Boyer 
49*e86a6fccSAndrew Boyer 			/* Prefetch next mbuf */
50*e86a6fccSAndrew Boyer 			void **next_info =
51*e86a6fccSAndrew Boyer 				IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));
52*e86a6fccSAndrew Boyer 			if (next_info[0])
53*e86a6fccSAndrew Boyer 				rte_mbuf_prefetch_part2(next_info[0]);
54*e86a6fccSAndrew Boyer 			if (next_info[1])
55*e86a6fccSAndrew Boyer 				rte_mbuf_prefetch_part2(next_info[1]);
56*e86a6fccSAndrew Boyer 
57*e86a6fccSAndrew Boyer 			info = IONIC_INFO_PTR(q, q->tail_idx);
58*e86a6fccSAndrew Boyer 			for (i = 0; i < q->num_segs; i++) {
59*e86a6fccSAndrew Boyer 				txm = info[i];
60*e86a6fccSAndrew Boyer 				if (!txm)
61*e86a6fccSAndrew Boyer 					break;
62*e86a6fccSAndrew Boyer 
63*e86a6fccSAndrew Boyer 				if (txq->flags & IONIC_QCQ_F_FAST_FREE)
64*e86a6fccSAndrew Boyer 					rte_mempool_put(txm->pool, txm);
65*e86a6fccSAndrew Boyer 				else
66*e86a6fccSAndrew Boyer 					rte_pktmbuf_free_seg(txm);
67*e86a6fccSAndrew Boyer 
68*e86a6fccSAndrew Boyer 				info[i] = NULL;
69*e86a6fccSAndrew Boyer 			}
70*e86a6fccSAndrew Boyer 
71*e86a6fccSAndrew Boyer 			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
72*e86a6fccSAndrew Boyer 		}
73*e86a6fccSAndrew Boyer 
74*e86a6fccSAndrew Boyer 		cq_desc = &cq_desc_base[cq->tail_idx];
75*e86a6fccSAndrew Boyer 	}
76*e86a6fccSAndrew Boyer }
77*e86a6fccSAndrew Boyer 
78*e86a6fccSAndrew Boyer static __rte_always_inline int
79*e86a6fccSAndrew Boyer ionic_tx_sg(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
80*e86a6fccSAndrew Boyer {
81*e86a6fccSAndrew Boyer 	struct ionic_queue *q = &txq->qcq.q;
82*e86a6fccSAndrew Boyer 	struct ionic_txq_desc *desc, *desc_base = q->base;
83*e86a6fccSAndrew Boyer 	struct ionic_txq_sg_desc_v1 *sg_desc, *sg_desc_base = q->sg_base;
84*e86a6fccSAndrew Boyer 	struct ionic_txq_sg_elem *elem;
85*e86a6fccSAndrew Boyer 	struct ionic_tx_stats *stats = &txq->stats;
86*e86a6fccSAndrew Boyer 	struct rte_mbuf *txm_seg;
87*e86a6fccSAndrew Boyer 	rte_iova_t data_iova;
88*e86a6fccSAndrew Boyer 	void **info;
89*e86a6fccSAndrew Boyer 	uint64_t ol_flags = txm->ol_flags;
90*e86a6fccSAndrew Boyer 	uint64_t addr, cmd;
91*e86a6fccSAndrew Boyer 	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
92*e86a6fccSAndrew Boyer 	uint8_t flags = 0;
93*e86a6fccSAndrew Boyer 
94*e86a6fccSAndrew Boyer 	desc = &desc_base[q->head_idx];
95*e86a6fccSAndrew Boyer 	sg_desc = &sg_desc_base[q->head_idx];
96*e86a6fccSAndrew Boyer 	info = IONIC_INFO_PTR(q, q->head_idx);
97*e86a6fccSAndrew Boyer 
98*e86a6fccSAndrew Boyer 	if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
99*e86a6fccSAndrew Boyer 	    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
100*e86a6fccSAndrew Boyer 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
101*e86a6fccSAndrew Boyer 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
102*e86a6fccSAndrew Boyer 	}
103*e86a6fccSAndrew Boyer 
104*e86a6fccSAndrew Boyer 	if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
105*e86a6fccSAndrew Boyer 	     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
106*e86a6fccSAndrew Boyer 	    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
107*e86a6fccSAndrew Boyer 	     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
108*e86a6fccSAndrew Boyer 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
109*e86a6fccSAndrew Boyer 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
110*e86a6fccSAndrew Boyer 	}
111*e86a6fccSAndrew Boyer 
112*e86a6fccSAndrew Boyer 	if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
113*e86a6fccSAndrew Boyer 		stats->no_csum++;
114*e86a6fccSAndrew Boyer 
115*e86a6fccSAndrew Boyer 	if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
116*e86a6fccSAndrew Boyer 	     (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
117*e86a6fccSAndrew Boyer 	    ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
118*e86a6fccSAndrew Boyer 	     (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {
119*e86a6fccSAndrew Boyer 		flags |= IONIC_TXQ_DESC_FLAG_ENCAP;
120*e86a6fccSAndrew Boyer 	}
121*e86a6fccSAndrew Boyer 
122*e86a6fccSAndrew Boyer 	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
123*e86a6fccSAndrew Boyer 		flags |= IONIC_TXQ_DESC_FLAG_VLAN;
124*e86a6fccSAndrew Boyer 		desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
125*e86a6fccSAndrew Boyer 	}
126*e86a6fccSAndrew Boyer 
127*e86a6fccSAndrew Boyer 	addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
128*e86a6fccSAndrew Boyer 
129*e86a6fccSAndrew Boyer 	cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
130*e86a6fccSAndrew Boyer 	desc->cmd = rte_cpu_to_le_64(cmd);
131*e86a6fccSAndrew Boyer 	desc->len = rte_cpu_to_le_16(txm->data_len);
132*e86a6fccSAndrew Boyer 
133*e86a6fccSAndrew Boyer 	info[0] = txm;
134*e86a6fccSAndrew Boyer 
135*e86a6fccSAndrew Boyer 	if (txm->nb_segs > 1) {
136*e86a6fccSAndrew Boyer 		txm_seg = txm->next;
137*e86a6fccSAndrew Boyer 
138*e86a6fccSAndrew Boyer 		elem = sg_desc->elems;
139*e86a6fccSAndrew Boyer 
140*e86a6fccSAndrew Boyer 		while (txm_seg != NULL) {
141*e86a6fccSAndrew Boyer 			/* Stash the mbuf ptr in the array */
142*e86a6fccSAndrew Boyer 			info++;
143*e86a6fccSAndrew Boyer 			*info = txm_seg;
144*e86a6fccSAndrew Boyer 
145*e86a6fccSAndrew Boyer 			/* Configure the SGE */
146*e86a6fccSAndrew Boyer 			data_iova = rte_mbuf_data_iova(txm_seg);
147*e86a6fccSAndrew Boyer 			elem->len = rte_cpu_to_le_16(txm_seg->data_len);
148*e86a6fccSAndrew Boyer 			elem->addr = rte_cpu_to_le_64(data_iova);
149*e86a6fccSAndrew Boyer 			elem++;
150*e86a6fccSAndrew Boyer 
151*e86a6fccSAndrew Boyer 			txm_seg = txm_seg->next;
152*e86a6fccSAndrew Boyer 		}
153*e86a6fccSAndrew Boyer 	}
154*e86a6fccSAndrew Boyer 
155*e86a6fccSAndrew Boyer 	q->head_idx = Q_NEXT_TO_POST(q, 1);
156*e86a6fccSAndrew Boyer 
157*e86a6fccSAndrew Boyer 	return 0;
158*e86a6fccSAndrew Boyer }
159*e86a6fccSAndrew Boyer 
160*e86a6fccSAndrew Boyer uint16_t
161*e86a6fccSAndrew Boyer ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts,
162*e86a6fccSAndrew Boyer 		uint16_t nb_pkts)
163*e86a6fccSAndrew Boyer {
164*e86a6fccSAndrew Boyer 	struct ionic_tx_qcq *txq = tx_queue;
165*e86a6fccSAndrew Boyer 	struct ionic_queue *q = &txq->qcq.q;
166*e86a6fccSAndrew Boyer 	struct ionic_tx_stats *stats = &txq->stats;
167*e86a6fccSAndrew Boyer 	struct rte_mbuf *mbuf;
168*e86a6fccSAndrew Boyer 	uint32_t bytes_tx = 0;
169*e86a6fccSAndrew Boyer 	uint16_t nb_avail, nb_tx = 0;
170*e86a6fccSAndrew Boyer 	int err;
171*e86a6fccSAndrew Boyer 
172*e86a6fccSAndrew Boyer 	struct ionic_txq_desc *desc_base = q->base;
173*e86a6fccSAndrew Boyer 	if (!(txq->flags & IONIC_QCQ_F_CMB))
174*e86a6fccSAndrew Boyer 		rte_prefetch0(&desc_base[q->head_idx]);
175*e86a6fccSAndrew Boyer 	rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));
176*e86a6fccSAndrew Boyer 
177*e86a6fccSAndrew Boyer 	if (tx_pkts) {
178*e86a6fccSAndrew Boyer 		rte_mbuf_prefetch_part1(tx_pkts[0]);
179*e86a6fccSAndrew Boyer 		rte_mbuf_prefetch_part2(tx_pkts[0]);
180*e86a6fccSAndrew Boyer 	}
181*e86a6fccSAndrew Boyer 
182*e86a6fccSAndrew Boyer 	if (ionic_q_space_avail(q) < txq->free_thresh) {
183*e86a6fccSAndrew Boyer 		/* Cleaning old buffers */
184*e86a6fccSAndrew Boyer 		ionic_tx_flush_sg(txq);
185*e86a6fccSAndrew Boyer 	}
186*e86a6fccSAndrew Boyer 
187*e86a6fccSAndrew Boyer 	nb_avail = ionic_q_space_avail(q);
188*e86a6fccSAndrew Boyer 	if (nb_avail < nb_pkts) {
189*e86a6fccSAndrew Boyer 		stats->stop += nb_pkts - nb_avail;
190*e86a6fccSAndrew Boyer 		nb_pkts = nb_avail;
191*e86a6fccSAndrew Boyer 	}
192*e86a6fccSAndrew Boyer 
193*e86a6fccSAndrew Boyer 	while (nb_tx < nb_pkts) {
194*e86a6fccSAndrew Boyer 		uint16_t next_idx = Q_NEXT_TO_POST(q, 1);
195*e86a6fccSAndrew Boyer 		if (!(txq->flags & IONIC_QCQ_F_CMB))
196*e86a6fccSAndrew Boyer 			rte_prefetch0(&desc_base[next_idx]);
197*e86a6fccSAndrew Boyer 		rte_prefetch0(IONIC_INFO_PTR(q, next_idx));
198*e86a6fccSAndrew Boyer 
199*e86a6fccSAndrew Boyer 		if (nb_tx + 1 < nb_pkts) {
200*e86a6fccSAndrew Boyer 			rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);
201*e86a6fccSAndrew Boyer 			rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);
202*e86a6fccSAndrew Boyer 		}
203*e86a6fccSAndrew Boyer 
204*e86a6fccSAndrew Boyer 		mbuf = tx_pkts[nb_tx];
205*e86a6fccSAndrew Boyer 
206*e86a6fccSAndrew Boyer 		if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
207*e86a6fccSAndrew Boyer 			err = ionic_tx_tso(txq, mbuf);
208*e86a6fccSAndrew Boyer 		else
209*e86a6fccSAndrew Boyer 			err = ionic_tx_sg(txq, mbuf);
210*e86a6fccSAndrew Boyer 		if (err) {
211*e86a6fccSAndrew Boyer 			stats->drop += nb_pkts - nb_tx;
212*e86a6fccSAndrew Boyer 			break;
213*e86a6fccSAndrew Boyer 		}
214*e86a6fccSAndrew Boyer 
215*e86a6fccSAndrew Boyer 		bytes_tx += mbuf->pkt_len;
216*e86a6fccSAndrew Boyer 		nb_tx++;
217*e86a6fccSAndrew Boyer 	}
218*e86a6fccSAndrew Boyer 
219*e86a6fccSAndrew Boyer 	if (nb_tx > 0) {
220*e86a6fccSAndrew Boyer 		rte_wmb();
221*e86a6fccSAndrew Boyer 		ionic_q_flush(q);
222*e86a6fccSAndrew Boyer 
223*e86a6fccSAndrew Boyer 		stats->packets += nb_tx;
224*e86a6fccSAndrew Boyer 		stats->bytes += bytes_tx;
225*e86a6fccSAndrew Boyer 	}
226*e86a6fccSAndrew Boyer 
227*e86a6fccSAndrew Boyer 	return nb_tx;
228*e86a6fccSAndrew Boyer }
229*e86a6fccSAndrew Boyer 
230*e86a6fccSAndrew Boyer /*
231*e86a6fccSAndrew Boyer  * Cleans one descriptor. Connects the filled mbufs into a chain.
232*e86a6fccSAndrew Boyer  * Does not advance the tail index.
233*e86a6fccSAndrew Boyer  */
234*e86a6fccSAndrew Boyer static __rte_always_inline void
235*e86a6fccSAndrew Boyer ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq,
236*e86a6fccSAndrew Boyer 		struct ionic_rxq_comp *cq_desc,
237*e86a6fccSAndrew Boyer 		struct ionic_rx_service *rx_svc)
238*e86a6fccSAndrew Boyer {
239*e86a6fccSAndrew Boyer 	struct ionic_queue *q = &rxq->qcq.q;
240*e86a6fccSAndrew Boyer 	struct rte_mbuf *rxm;
241*e86a6fccSAndrew Boyer 	struct rte_mbuf *rxm_seg, *prev_rxm;
242*e86a6fccSAndrew Boyer 	struct ionic_rx_stats *stats = &rxq->stats;
243*e86a6fccSAndrew Boyer 	uint64_t pkt_flags = 0;
244*e86a6fccSAndrew Boyer 	uint32_t pkt_type;
245*e86a6fccSAndrew Boyer 	uint32_t left, i;
246*e86a6fccSAndrew Boyer 	uint16_t cq_desc_len;
247*e86a6fccSAndrew Boyer 	uint8_t ptype, cflags;
248*e86a6fccSAndrew Boyer 	void **info;
249*e86a6fccSAndrew Boyer 
250*e86a6fccSAndrew Boyer 	cq_desc_len = rte_le_to_cpu_16(cq_desc->len);
251*e86a6fccSAndrew Boyer 
252*e86a6fccSAndrew Boyer 	info = IONIC_INFO_PTR(q, q->tail_idx);
253*e86a6fccSAndrew Boyer 
254*e86a6fccSAndrew Boyer 	rxm = info[0];
255*e86a6fccSAndrew Boyer 
256*e86a6fccSAndrew Boyer 	if (cq_desc->status) {
257*e86a6fccSAndrew Boyer 		stats->bad_cq_status++;
258*e86a6fccSAndrew Boyer 		return;
259*e86a6fccSAndrew Boyer 	}
260*e86a6fccSAndrew Boyer 
261*e86a6fccSAndrew Boyer 	if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {
262*e86a6fccSAndrew Boyer 		stats->bad_len++;
263*e86a6fccSAndrew Boyer 		return;
264*e86a6fccSAndrew Boyer 	}
265*e86a6fccSAndrew Boyer 
266*e86a6fccSAndrew Boyer 	info[0] = NULL;
267*e86a6fccSAndrew Boyer 
268*e86a6fccSAndrew Boyer 	/* Set the mbuf metadata based on the cq entry */
269*e86a6fccSAndrew Boyer 	rxm->rearm_data[0] = rxq->rearm_data;
270*e86a6fccSAndrew Boyer 	rxm->pkt_len = cq_desc_len;
271*e86a6fccSAndrew Boyer 	rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);
272*e86a6fccSAndrew Boyer 	left = cq_desc_len - rxm->data_len;
273*e86a6fccSAndrew Boyer 	rxm->nb_segs = cq_desc->num_sg_elems + 1;
274*e86a6fccSAndrew Boyer 
275*e86a6fccSAndrew Boyer 	prev_rxm = rxm;
276*e86a6fccSAndrew Boyer 
277*e86a6fccSAndrew Boyer 	for (i = 1; i < rxm->nb_segs && left; i++) {
278*e86a6fccSAndrew Boyer 		rxm_seg = info[i];
279*e86a6fccSAndrew Boyer 		info[i] = NULL;
280*e86a6fccSAndrew Boyer 
281*e86a6fccSAndrew Boyer 		/* Set the chained mbuf metadata */
282*e86a6fccSAndrew Boyer 		rxm_seg->rearm_data[0] = rxq->rearm_seg_data;
283*e86a6fccSAndrew Boyer 		rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
284*e86a6fccSAndrew Boyer 		left -= rxm_seg->data_len;
285*e86a6fccSAndrew Boyer 
286*e86a6fccSAndrew Boyer 		/* Link the mbuf */
287*e86a6fccSAndrew Boyer 		prev_rxm->next = rxm_seg;
288*e86a6fccSAndrew Boyer 		prev_rxm = rxm_seg;
289*e86a6fccSAndrew Boyer 	}
290*e86a6fccSAndrew Boyer 
291*e86a6fccSAndrew Boyer 	/* Terminate the mbuf chain */
292*e86a6fccSAndrew Boyer 	prev_rxm->next = NULL;
293*e86a6fccSAndrew Boyer 
294*e86a6fccSAndrew Boyer 	/* RSS */
295*e86a6fccSAndrew Boyer 	pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
296*e86a6fccSAndrew Boyer 	rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);
297*e86a6fccSAndrew Boyer 
298*e86a6fccSAndrew Boyer 	/* Vlan Strip */
299*e86a6fccSAndrew Boyer 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
300*e86a6fccSAndrew Boyer 		pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
301*e86a6fccSAndrew Boyer 		rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
302*e86a6fccSAndrew Boyer 	}
303*e86a6fccSAndrew Boyer 
304*e86a6fccSAndrew Boyer 	/* Checksum */
305*e86a6fccSAndrew Boyer 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
306*e86a6fccSAndrew Boyer 		cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;
307*e86a6fccSAndrew Boyer 		pkt_flags |= ionic_csum_flags[cflags];
308*e86a6fccSAndrew Boyer 	}
309*e86a6fccSAndrew Boyer 
310*e86a6fccSAndrew Boyer 	rxm->ol_flags = pkt_flags;
311*e86a6fccSAndrew Boyer 
312*e86a6fccSAndrew Boyer 	/* Packet Type */
313*e86a6fccSAndrew Boyer 	ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;
314*e86a6fccSAndrew Boyer 	pkt_type = ionic_ptype_table[ptype];
315*e86a6fccSAndrew Boyer 	if (pkt_type == RTE_PTYPE_UNKNOWN) {
316*e86a6fccSAndrew Boyer 		struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
317*e86a6fccSAndrew Boyer 				struct rte_ether_hdr *);
318*e86a6fccSAndrew Boyer 		uint16_t ether_type = eth_h->ether_type;
319*e86a6fccSAndrew Boyer 		if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
320*e86a6fccSAndrew Boyer 			pkt_type = RTE_PTYPE_L2_ETHER_ARP;
321*e86a6fccSAndrew Boyer 		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))
322*e86a6fccSAndrew Boyer 			pkt_type = RTE_PTYPE_L2_ETHER_LLDP;
323*e86a6fccSAndrew Boyer 		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))
324*e86a6fccSAndrew Boyer 			pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;
325*e86a6fccSAndrew Boyer 		stats->mtods++;
326*e86a6fccSAndrew Boyer 	} else if (pkt_flags & RTE_MBUF_F_RX_VLAN) {
327*e86a6fccSAndrew Boyer 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
328*e86a6fccSAndrew Boyer 	} else {
329*e86a6fccSAndrew Boyer 		pkt_type |= RTE_PTYPE_L2_ETHER;
330*e86a6fccSAndrew Boyer 	}
331*e86a6fccSAndrew Boyer 
332*e86a6fccSAndrew Boyer 	rxm->packet_type = pkt_type;
333*e86a6fccSAndrew Boyer 
334*e86a6fccSAndrew Boyer 	rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
335*e86a6fccSAndrew Boyer 	rx_svc->nb_rx++;
336*e86a6fccSAndrew Boyer 
337*e86a6fccSAndrew Boyer 	stats->packets++;
338*e86a6fccSAndrew Boyer 	stats->bytes += rxm->pkt_len;
339*e86a6fccSAndrew Boyer }
340*e86a6fccSAndrew Boyer 
341*e86a6fccSAndrew Boyer /*
342*e86a6fccSAndrew Boyer  * Fills one descriptor with mbufs. Does not advance the head index.
343*e86a6fccSAndrew Boyer  */
344*e86a6fccSAndrew Boyer static __rte_always_inline int
345*e86a6fccSAndrew Boyer ionic_rx_fill_one_sg(struct ionic_rx_qcq *rxq)
346*e86a6fccSAndrew Boyer {
347*e86a6fccSAndrew Boyer 	struct ionic_queue *q = &rxq->qcq.q;
348*e86a6fccSAndrew Boyer 	struct rte_mbuf *rxm;
349*e86a6fccSAndrew Boyer 	struct rte_mbuf *rxm_seg;
350*e86a6fccSAndrew Boyer 	struct ionic_rxq_desc *desc, *desc_base = q->base;
351*e86a6fccSAndrew Boyer 	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
352*e86a6fccSAndrew Boyer 	rte_iova_t data_iova;
353*e86a6fccSAndrew Boyer 	uint32_t i;
354*e86a6fccSAndrew Boyer 	void **info;
355*e86a6fccSAndrew Boyer 	int ret;
356*e86a6fccSAndrew Boyer 
357*e86a6fccSAndrew Boyer 	info = IONIC_INFO_PTR(q, q->head_idx);
358*e86a6fccSAndrew Boyer 	desc = &desc_base[q->head_idx];
359*e86a6fccSAndrew Boyer 	sg_desc = &sg_desc_base[q->head_idx];
360*e86a6fccSAndrew Boyer 
361*e86a6fccSAndrew Boyer 	/* mbuf is unused => whole chain is unused */
362*e86a6fccSAndrew Boyer 	if (info[0])
363*e86a6fccSAndrew Boyer 		return 0;
364*e86a6fccSAndrew Boyer 
365*e86a6fccSAndrew Boyer 	if (rxq->mb_idx == 0) {
366*e86a6fccSAndrew Boyer 		ret = rte_mempool_get_bulk(rxq->mb_pool,
367*e86a6fccSAndrew Boyer 					(void **)rxq->mbs,
368*e86a6fccSAndrew Boyer 					IONIC_MBUF_BULK_ALLOC);
369*e86a6fccSAndrew Boyer 		if (ret) {
370*e86a6fccSAndrew Boyer 			assert(0);
371*e86a6fccSAndrew Boyer 			return -ENOMEM;
372*e86a6fccSAndrew Boyer 		}
373*e86a6fccSAndrew Boyer 
374*e86a6fccSAndrew Boyer 		rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
375*e86a6fccSAndrew Boyer 	}
376*e86a6fccSAndrew Boyer 
377*e86a6fccSAndrew Boyer 	rxm = rxq->mbs[--rxq->mb_idx];
378*e86a6fccSAndrew Boyer 	info[0] = rxm;
379*e86a6fccSAndrew Boyer 
380*e86a6fccSAndrew Boyer 	data_iova = rte_mbuf_data_iova_default(rxm);
381*e86a6fccSAndrew Boyer 	desc->addr = rte_cpu_to_le_64(data_iova);
382*e86a6fccSAndrew Boyer 
383*e86a6fccSAndrew Boyer 	for (i = 1; i < q->num_segs; i++) {
384*e86a6fccSAndrew Boyer 		/* mbuf is unused => rest of the chain is unused */
385*e86a6fccSAndrew Boyer 		if (info[i])
386*e86a6fccSAndrew Boyer 			return 0;
387*e86a6fccSAndrew Boyer 
388*e86a6fccSAndrew Boyer 		if (rxq->mb_idx == 0) {
389*e86a6fccSAndrew Boyer 			ret = rte_mempool_get_bulk(rxq->mb_pool,
390*e86a6fccSAndrew Boyer 					(void **)rxq->mbs,
391*e86a6fccSAndrew Boyer 					IONIC_MBUF_BULK_ALLOC);
392*e86a6fccSAndrew Boyer 			if (ret) {
393*e86a6fccSAndrew Boyer 				assert(0);
394*e86a6fccSAndrew Boyer 				return -ENOMEM;
395*e86a6fccSAndrew Boyer 			}
396*e86a6fccSAndrew Boyer 
397*e86a6fccSAndrew Boyer 			rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
398*e86a6fccSAndrew Boyer 		}
399*e86a6fccSAndrew Boyer 
400*e86a6fccSAndrew Boyer 		rxm_seg = rxq->mbs[--rxq->mb_idx];
401*e86a6fccSAndrew Boyer 		info[i] = rxm_seg;
402*e86a6fccSAndrew Boyer 
403*e86a6fccSAndrew Boyer 		/* The data_off does not get set to 0 until later */
404*e86a6fccSAndrew Boyer 		data_iova = rxm_seg->buf_iova;
405*e86a6fccSAndrew Boyer 		sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova);
406*e86a6fccSAndrew Boyer 	}
407*e86a6fccSAndrew Boyer 
408*e86a6fccSAndrew Boyer 	return 0;
409*e86a6fccSAndrew Boyer }
410*e86a6fccSAndrew Boyer 
411*e86a6fccSAndrew Boyer /*
412*e86a6fccSAndrew Boyer  * Walk the CQ to find completed receive descriptors.
413*e86a6fccSAndrew Boyer  * Any completed descriptor found is refilled.
414*e86a6fccSAndrew Boyer  */
415*e86a6fccSAndrew Boyer static __rte_always_inline void
416*e86a6fccSAndrew Boyer ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
417*e86a6fccSAndrew Boyer 		struct ionic_rx_service *rx_svc)
418*e86a6fccSAndrew Boyer {
419*e86a6fccSAndrew Boyer 	struct ionic_cq *cq = &rxq->qcq.cq;
420*e86a6fccSAndrew Boyer 	struct ionic_queue *q = &rxq->qcq.q;
421*e86a6fccSAndrew Boyer 	struct ionic_rxq_desc *q_desc_base = q->base;
422*e86a6fccSAndrew Boyer 	struct ionic_rxq_comp *cq_desc, *cq_desc_base = cq->base;
423*e86a6fccSAndrew Boyer 	uint32_t work_done = 0;
424*e86a6fccSAndrew Boyer 
425*e86a6fccSAndrew Boyer 	cq_desc = &cq_desc_base[cq->tail_idx];
426*e86a6fccSAndrew Boyer 
427*e86a6fccSAndrew Boyer 	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
428*e86a6fccSAndrew Boyer 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
429*e86a6fccSAndrew Boyer 		if (cq->tail_idx == 0)
430*e86a6fccSAndrew Boyer 			cq->done_color = !cq->done_color;
431*e86a6fccSAndrew Boyer 
432*e86a6fccSAndrew Boyer 		/* Prefetch 8 x 8B bufinfo */
433*e86a6fccSAndrew Boyer 		rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));
434*e86a6fccSAndrew Boyer 		/* Prefetch 4 x 16B comp */
435*e86a6fccSAndrew Boyer 		rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
436*e86a6fccSAndrew Boyer 		/* Prefetch 4 x 16B descriptors */
437*e86a6fccSAndrew Boyer 		if (!(rxq->flags & IONIC_QCQ_F_CMB))
438*e86a6fccSAndrew Boyer 			rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);
439*e86a6fccSAndrew Boyer 
440*e86a6fccSAndrew Boyer 		/* Clean one descriptor */
441*e86a6fccSAndrew Boyer 		ionic_rx_clean_one_sg(rxq, cq_desc, rx_svc);
442*e86a6fccSAndrew Boyer 		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
443*e86a6fccSAndrew Boyer 
444*e86a6fccSAndrew Boyer 		/* Fill one descriptor */
445*e86a6fccSAndrew Boyer 		(void)ionic_rx_fill_one_sg(rxq);
446*e86a6fccSAndrew Boyer 
447*e86a6fccSAndrew Boyer 		q->head_idx = Q_NEXT_TO_POST(q, 1);
448*e86a6fccSAndrew Boyer 
449*e86a6fccSAndrew Boyer 		if (++work_done == work_to_do)
450*e86a6fccSAndrew Boyer 			break;
451*e86a6fccSAndrew Boyer 
452*e86a6fccSAndrew Boyer 		cq_desc = &cq_desc_base[cq->tail_idx];
453*e86a6fccSAndrew Boyer 	}
454*e86a6fccSAndrew Boyer 
455*e86a6fccSAndrew Boyer 	/* Update the queue indices and ring the doorbell */
456*e86a6fccSAndrew Boyer 	if (work_done)
457*e86a6fccSAndrew Boyer 		ionic_q_flush(q);
458*e86a6fccSAndrew Boyer }
459*e86a6fccSAndrew Boyer 
460*e86a6fccSAndrew Boyer uint16_t
461*e86a6fccSAndrew Boyer ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts,
462*e86a6fccSAndrew Boyer 		uint16_t nb_pkts)
463*e86a6fccSAndrew Boyer {
464*e86a6fccSAndrew Boyer 	struct ionic_rx_qcq *rxq = rx_queue;
465*e86a6fccSAndrew Boyer 	struct ionic_rx_service rx_svc;
466*e86a6fccSAndrew Boyer 
467*e86a6fccSAndrew Boyer 	rx_svc.rx_pkts = rx_pkts;
468*e86a6fccSAndrew Boyer 	rx_svc.nb_rx = 0;
469*e86a6fccSAndrew Boyer 
470*e86a6fccSAndrew Boyer 	ionic_rxq_service_sg(rxq, nb_pkts, &rx_svc);
471*e86a6fccSAndrew Boyer 
472*e86a6fccSAndrew Boyer 	return rx_svc.nb_rx;
473*e86a6fccSAndrew Boyer }
474*e86a6fccSAndrew Boyer 
475*e86a6fccSAndrew Boyer /*
476*e86a6fccSAndrew Boyer  * Fills all descriptors with mbufs.
477*e86a6fccSAndrew Boyer  */
478*e86a6fccSAndrew Boyer int __rte_cold
479*e86a6fccSAndrew Boyer ionic_rx_fill_sg(struct ionic_rx_qcq *rxq)
480*e86a6fccSAndrew Boyer {
481*e86a6fccSAndrew Boyer 	struct ionic_queue *q = &rxq->qcq.q;
482*e86a6fccSAndrew Boyer 	uint32_t i;
483*e86a6fccSAndrew Boyer 	int err = 0;
484*e86a6fccSAndrew Boyer 
485*e86a6fccSAndrew Boyer 	for (i = 0; i < q->num_descs - 1u; i++) {
486*e86a6fccSAndrew Boyer 		err = ionic_rx_fill_one_sg(rxq);
487*e86a6fccSAndrew Boyer 		if (err)
488*e86a6fccSAndrew Boyer 			break;
489*e86a6fccSAndrew Boyer 
490*e86a6fccSAndrew Boyer 		q->head_idx = Q_NEXT_TO_POST(q, 1);
491*e86a6fccSAndrew Boyer 	}
492*e86a6fccSAndrew Boyer 
493*e86a6fccSAndrew Boyer 	ionic_q_flush(q);
494*e86a6fccSAndrew Boyer 
495*e86a6fccSAndrew Boyer 	return err;
496*e86a6fccSAndrew Boyer }
497