xref: /dpdk/drivers/net/ionic/ionic_rxtx_sg.c (revision 463ad260d35ee5934ab206d392a1a3e08b5506d0)
1e86a6fccSAndrew Boyer /* SPDX-License-Identifier: BSD-3-Clause
2e86a6fccSAndrew Boyer  * Copyright 2018-2022 Advanced Micro Devices, Inc.
3e86a6fccSAndrew Boyer  */
4e86a6fccSAndrew Boyer 
5e86a6fccSAndrew Boyer #include <stdio.h>
6e86a6fccSAndrew Boyer #include <errno.h>
7e86a6fccSAndrew Boyer #include <stdint.h>
8e86a6fccSAndrew Boyer #include <assert.h>
9e86a6fccSAndrew Boyer 
10e86a6fccSAndrew Boyer #include <rte_common.h>
11e86a6fccSAndrew Boyer #include <rte_byteorder.h>
12e86a6fccSAndrew Boyer #include <rte_atomic.h>
13e86a6fccSAndrew Boyer #include <rte_mempool.h>
14e86a6fccSAndrew Boyer #include <rte_mbuf.h>
15e86a6fccSAndrew Boyer #include <rte_ether.h>
16e86a6fccSAndrew Boyer #include <rte_prefetch.h>
17e86a6fccSAndrew Boyer 
18e86a6fccSAndrew Boyer #include "ionic.h"
19e86a6fccSAndrew Boyer #include "ionic_if.h"
20e86a6fccSAndrew Boyer #include "ionic_dev.h"
21e86a6fccSAndrew Boyer #include "ionic_lif.h"
22e86a6fccSAndrew Boyer #include "ionic_rxtx.h"
23e86a6fccSAndrew Boyer 
24e86a6fccSAndrew Boyer static __rte_always_inline void
25e86a6fccSAndrew Boyer ionic_tx_flush_sg(struct ionic_tx_qcq *txq)
26e86a6fccSAndrew Boyer {
27e86a6fccSAndrew Boyer 	struct ionic_cq *cq = &txq->qcq.cq;
28e86a6fccSAndrew Boyer 	struct ionic_queue *q = &txq->qcq.q;
29ea81e9f2SAndrew Boyer 	struct ionic_tx_stats *stats = &txq->stats;
30e86a6fccSAndrew Boyer 	struct rte_mbuf *txm;
31*463ad260SNeel Patel 	struct ionic_txq_comp *cq_desc_base = cq->base;
32*463ad260SNeel Patel 	volatile struct ionic_txq_comp *cq_desc;
33e86a6fccSAndrew Boyer 	void **info;
34e86a6fccSAndrew Boyer 	uint32_t i;
35e86a6fccSAndrew Boyer 
36e86a6fccSAndrew Boyer 	cq_desc = &cq_desc_base[cq->tail_idx];
37e86a6fccSAndrew Boyer 
38e86a6fccSAndrew Boyer 	while (color_match(cq_desc->color, cq->done_color)) {
39e86a6fccSAndrew Boyer 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
40e86a6fccSAndrew Boyer 		if (cq->tail_idx == 0)
41e86a6fccSAndrew Boyer 			cq->done_color = !cq->done_color;
42e86a6fccSAndrew Boyer 
43e86a6fccSAndrew Boyer 		/* Prefetch 4 x 16B comp at cq->tail_idx + 4 */
44e86a6fccSAndrew Boyer 		if ((cq->tail_idx & 0x3) == 0)
45e86a6fccSAndrew Boyer 			rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
46e86a6fccSAndrew Boyer 
47e86a6fccSAndrew Boyer 		while (q->tail_idx != rte_le_to_cpu_16(cq_desc->comp_index)) {
48e86a6fccSAndrew Boyer 			/* Prefetch 8 mbuf ptrs at q->tail_idx + 2 */
49e86a6fccSAndrew Boyer 			rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 2)));
50e86a6fccSAndrew Boyer 
51e86a6fccSAndrew Boyer 			/* Prefetch next mbuf */
52e86a6fccSAndrew Boyer 			void **next_info =
53e86a6fccSAndrew Boyer 				IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 1));
54e86a6fccSAndrew Boyer 			if (next_info[0])
55e86a6fccSAndrew Boyer 				rte_mbuf_prefetch_part2(next_info[0]);
56e86a6fccSAndrew Boyer 			if (next_info[1])
57e86a6fccSAndrew Boyer 				rte_mbuf_prefetch_part2(next_info[1]);
58e86a6fccSAndrew Boyer 
59e86a6fccSAndrew Boyer 			info = IONIC_INFO_PTR(q, q->tail_idx);
60e86a6fccSAndrew Boyer 			for (i = 0; i < q->num_segs; i++) {
61e86a6fccSAndrew Boyer 				txm = info[i];
62e86a6fccSAndrew Boyer 				if (!txm)
63e86a6fccSAndrew Boyer 					break;
64e86a6fccSAndrew Boyer 
65e86a6fccSAndrew Boyer 				if (txq->flags & IONIC_QCQ_F_FAST_FREE)
66e86a6fccSAndrew Boyer 					rte_mempool_put(txm->pool, txm);
67e86a6fccSAndrew Boyer 				else
68e86a6fccSAndrew Boyer 					rte_pktmbuf_free_seg(txm);
69e86a6fccSAndrew Boyer 
70e86a6fccSAndrew Boyer 				info[i] = NULL;
71e86a6fccSAndrew Boyer 			}
72e86a6fccSAndrew Boyer 
73e86a6fccSAndrew Boyer 			q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
74e86a6fccSAndrew Boyer 		}
75e86a6fccSAndrew Boyer 
76e86a6fccSAndrew Boyer 		cq_desc = &cq_desc_base[cq->tail_idx];
77ea81e9f2SAndrew Boyer 		stats->comps++;
78e86a6fccSAndrew Boyer 	}
79e86a6fccSAndrew Boyer }
80e86a6fccSAndrew Boyer 
81e86a6fccSAndrew Boyer static __rte_always_inline int
82e86a6fccSAndrew Boyer ionic_tx_sg(struct ionic_tx_qcq *txq, struct rte_mbuf *txm)
83e86a6fccSAndrew Boyer {
84e86a6fccSAndrew Boyer 	struct ionic_queue *q = &txq->qcq.q;
85e86a6fccSAndrew Boyer 	struct ionic_txq_desc *desc, *desc_base = q->base;
86e86a6fccSAndrew Boyer 	struct ionic_txq_sg_desc_v1 *sg_desc, *sg_desc_base = q->sg_base;
87e86a6fccSAndrew Boyer 	struct ionic_txq_sg_elem *elem;
88e86a6fccSAndrew Boyer 	struct ionic_tx_stats *stats = &txq->stats;
89e86a6fccSAndrew Boyer 	struct rte_mbuf *txm_seg;
90e86a6fccSAndrew Boyer 	rte_iova_t data_iova;
91e86a6fccSAndrew Boyer 	void **info;
92e86a6fccSAndrew Boyer 	uint64_t ol_flags = txm->ol_flags;
93e86a6fccSAndrew Boyer 	uint64_t addr, cmd;
94e86a6fccSAndrew Boyer 	uint8_t opcode = IONIC_TXQ_DESC_OPCODE_CSUM_NONE;
95e86a6fccSAndrew Boyer 	uint8_t flags = 0;
96e86a6fccSAndrew Boyer 
97e86a6fccSAndrew Boyer 	desc = &desc_base[q->head_idx];
98e86a6fccSAndrew Boyer 	sg_desc = &sg_desc_base[q->head_idx];
99e86a6fccSAndrew Boyer 	info = IONIC_INFO_PTR(q, q->head_idx);
100e86a6fccSAndrew Boyer 
101e86a6fccSAndrew Boyer 	if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) &&
102e86a6fccSAndrew Boyer 	    (txq->flags & IONIC_QCQ_F_CSUM_L3)) {
103e86a6fccSAndrew Boyer 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
104e86a6fccSAndrew Boyer 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L3;
105e86a6fccSAndrew Boyer 	}
106e86a6fccSAndrew Boyer 
107e86a6fccSAndrew Boyer 	if (((ol_flags & RTE_MBUF_F_TX_TCP_CKSUM) &&
108e86a6fccSAndrew Boyer 	     (txq->flags & IONIC_QCQ_F_CSUM_TCP)) ||
109e86a6fccSAndrew Boyer 	    ((ol_flags & RTE_MBUF_F_TX_UDP_CKSUM) &&
110e86a6fccSAndrew Boyer 	     (txq->flags & IONIC_QCQ_F_CSUM_UDP))) {
111e86a6fccSAndrew Boyer 		opcode = IONIC_TXQ_DESC_OPCODE_CSUM_HW;
112e86a6fccSAndrew Boyer 		flags |= IONIC_TXQ_DESC_FLAG_CSUM_L4;
113e86a6fccSAndrew Boyer 	}
114e86a6fccSAndrew Boyer 
115e86a6fccSAndrew Boyer 	if (opcode == IONIC_TXQ_DESC_OPCODE_CSUM_NONE)
116e86a6fccSAndrew Boyer 		stats->no_csum++;
117e86a6fccSAndrew Boyer 
118e86a6fccSAndrew Boyer 	if (((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) ||
119e86a6fccSAndrew Boyer 	     (ol_flags & RTE_MBUF_F_TX_OUTER_UDP_CKSUM)) &&
120e86a6fccSAndrew Boyer 	    ((ol_flags & RTE_MBUF_F_TX_OUTER_IPV4) ||
121e86a6fccSAndrew Boyer 	     (ol_flags & RTE_MBUF_F_TX_OUTER_IPV6))) {
122e86a6fccSAndrew Boyer 		flags |= IONIC_TXQ_DESC_FLAG_ENCAP;
123e86a6fccSAndrew Boyer 	}
124e86a6fccSAndrew Boyer 
125e86a6fccSAndrew Boyer 	if (ol_flags & RTE_MBUF_F_TX_VLAN) {
126e86a6fccSAndrew Boyer 		flags |= IONIC_TXQ_DESC_FLAG_VLAN;
127e86a6fccSAndrew Boyer 		desc->vlan_tci = rte_cpu_to_le_16(txm->vlan_tci);
128e86a6fccSAndrew Boyer 	}
129e86a6fccSAndrew Boyer 
130e86a6fccSAndrew Boyer 	addr = rte_cpu_to_le_64(rte_mbuf_data_iova(txm));
131e86a6fccSAndrew Boyer 
132e86a6fccSAndrew Boyer 	cmd = encode_txq_desc_cmd(opcode, flags, txm->nb_segs - 1, addr);
133e86a6fccSAndrew Boyer 	desc->cmd = rte_cpu_to_le_64(cmd);
134e86a6fccSAndrew Boyer 	desc->len = rte_cpu_to_le_16(txm->data_len);
135e86a6fccSAndrew Boyer 
136e86a6fccSAndrew Boyer 	info[0] = txm;
137e86a6fccSAndrew Boyer 
138e86a6fccSAndrew Boyer 	if (txm->nb_segs > 1) {
139e86a6fccSAndrew Boyer 		txm_seg = txm->next;
140e86a6fccSAndrew Boyer 
141e86a6fccSAndrew Boyer 		elem = sg_desc->elems;
142e86a6fccSAndrew Boyer 
143e86a6fccSAndrew Boyer 		while (txm_seg != NULL) {
144e86a6fccSAndrew Boyer 			/* Stash the mbuf ptr in the array */
145e86a6fccSAndrew Boyer 			info++;
146e86a6fccSAndrew Boyer 			*info = txm_seg;
147e86a6fccSAndrew Boyer 
148e86a6fccSAndrew Boyer 			/* Configure the SGE */
149e86a6fccSAndrew Boyer 			data_iova = rte_mbuf_data_iova(txm_seg);
150e86a6fccSAndrew Boyer 			elem->len = rte_cpu_to_le_16(txm_seg->data_len);
151e86a6fccSAndrew Boyer 			elem->addr = rte_cpu_to_le_64(data_iova);
152e86a6fccSAndrew Boyer 			elem++;
153e86a6fccSAndrew Boyer 
154e86a6fccSAndrew Boyer 			txm_seg = txm_seg->next;
155e86a6fccSAndrew Boyer 		}
156e86a6fccSAndrew Boyer 	}
157e86a6fccSAndrew Boyer 
158e86a6fccSAndrew Boyer 	q->head_idx = Q_NEXT_TO_POST(q, 1);
159e86a6fccSAndrew Boyer 
160e86a6fccSAndrew Boyer 	return 0;
161e86a6fccSAndrew Boyer }
162e86a6fccSAndrew Boyer 
163e86a6fccSAndrew Boyer uint16_t
164e86a6fccSAndrew Boyer ionic_xmit_pkts_sg(void *tx_queue, struct rte_mbuf **tx_pkts,
165e86a6fccSAndrew Boyer 		uint16_t nb_pkts)
166e86a6fccSAndrew Boyer {
167e86a6fccSAndrew Boyer 	struct ionic_tx_qcq *txq = tx_queue;
168e86a6fccSAndrew Boyer 	struct ionic_queue *q = &txq->qcq.q;
169e86a6fccSAndrew Boyer 	struct ionic_tx_stats *stats = &txq->stats;
170e86a6fccSAndrew Boyer 	struct rte_mbuf *mbuf;
171e86a6fccSAndrew Boyer 	uint32_t bytes_tx = 0;
172e86a6fccSAndrew Boyer 	uint16_t nb_avail, nb_tx = 0;
173a5b1ffd8SAndrew Boyer 	uint64_t then, now, hz, delta;
174e86a6fccSAndrew Boyer 	int err;
175e86a6fccSAndrew Boyer 
176e86a6fccSAndrew Boyer 	struct ionic_txq_desc *desc_base = q->base;
177e86a6fccSAndrew Boyer 	if (!(txq->flags & IONIC_QCQ_F_CMB))
178e86a6fccSAndrew Boyer 		rte_prefetch0(&desc_base[q->head_idx]);
179e86a6fccSAndrew Boyer 	rte_prefetch0(IONIC_INFO_PTR(q, q->head_idx));
180e86a6fccSAndrew Boyer 
1813c02593cSAndrew Boyer 	if (nb_pkts) {
182e86a6fccSAndrew Boyer 		rte_mbuf_prefetch_part1(tx_pkts[0]);
183e86a6fccSAndrew Boyer 		rte_mbuf_prefetch_part2(tx_pkts[0]);
184e86a6fccSAndrew Boyer 	}
185e86a6fccSAndrew Boyer 
186e86a6fccSAndrew Boyer 	if (ionic_q_space_avail(q) < txq->free_thresh) {
187e86a6fccSAndrew Boyer 		/* Cleaning old buffers */
188e86a6fccSAndrew Boyer 		ionic_tx_flush_sg(txq);
189e86a6fccSAndrew Boyer 	}
190e86a6fccSAndrew Boyer 
191e86a6fccSAndrew Boyer 	nb_avail = ionic_q_space_avail(q);
192e86a6fccSAndrew Boyer 	if (nb_avail < nb_pkts) {
193e86a6fccSAndrew Boyer 		stats->stop += nb_pkts - nb_avail;
194e86a6fccSAndrew Boyer 		nb_pkts = nb_avail;
195e86a6fccSAndrew Boyer 	}
196e86a6fccSAndrew Boyer 
197e86a6fccSAndrew Boyer 	while (nb_tx < nb_pkts) {
198e86a6fccSAndrew Boyer 		uint16_t next_idx = Q_NEXT_TO_POST(q, 1);
199e86a6fccSAndrew Boyer 		if (!(txq->flags & IONIC_QCQ_F_CMB))
200e86a6fccSAndrew Boyer 			rte_prefetch0(&desc_base[next_idx]);
201e86a6fccSAndrew Boyer 		rte_prefetch0(IONIC_INFO_PTR(q, next_idx));
202e86a6fccSAndrew Boyer 
203e86a6fccSAndrew Boyer 		if (nb_tx + 1 < nb_pkts) {
204e86a6fccSAndrew Boyer 			rte_mbuf_prefetch_part1(tx_pkts[nb_tx + 1]);
205e86a6fccSAndrew Boyer 			rte_mbuf_prefetch_part2(tx_pkts[nb_tx + 1]);
206e86a6fccSAndrew Boyer 		}
207e86a6fccSAndrew Boyer 
208e86a6fccSAndrew Boyer 		mbuf = tx_pkts[nb_tx];
209e86a6fccSAndrew Boyer 
210e86a6fccSAndrew Boyer 		if (mbuf->ol_flags & RTE_MBUF_F_TX_TCP_SEG)
211e86a6fccSAndrew Boyer 			err = ionic_tx_tso(txq, mbuf);
212e86a6fccSAndrew Boyer 		else
213e86a6fccSAndrew Boyer 			err = ionic_tx_sg(txq, mbuf);
214e86a6fccSAndrew Boyer 		if (err) {
215e86a6fccSAndrew Boyer 			stats->drop += nb_pkts - nb_tx;
216e86a6fccSAndrew Boyer 			break;
217e86a6fccSAndrew Boyer 		}
218e86a6fccSAndrew Boyer 
219e86a6fccSAndrew Boyer 		bytes_tx += mbuf->pkt_len;
220e86a6fccSAndrew Boyer 		nb_tx++;
221e86a6fccSAndrew Boyer 	}
222e86a6fccSAndrew Boyer 
223e86a6fccSAndrew Boyer 	if (nb_tx > 0) {
224e86a6fccSAndrew Boyer 		rte_wmb();
225e86a6fccSAndrew Boyer 		ionic_q_flush(q);
226e86a6fccSAndrew Boyer 
227a5b1ffd8SAndrew Boyer 		txq->last_wdog_cycles = rte_get_timer_cycles();
228a5b1ffd8SAndrew Boyer 
229e86a6fccSAndrew Boyer 		stats->packets += nb_tx;
230e86a6fccSAndrew Boyer 		stats->bytes += bytes_tx;
231a5b1ffd8SAndrew Boyer 	} else {
232a5b1ffd8SAndrew Boyer 		/*
233a5b1ffd8SAndrew Boyer 		 * Ring the doorbell again if no work could be posted and work
234a5b1ffd8SAndrew Boyer 		 * is still pending after the deadline.
235a5b1ffd8SAndrew Boyer 		 */
236a5b1ffd8SAndrew Boyer 		if (q->head_idx != q->tail_idx) {
237a5b1ffd8SAndrew Boyer 			then = txq->last_wdog_cycles;
238a5b1ffd8SAndrew Boyer 			now = rte_get_timer_cycles();
239a5b1ffd8SAndrew Boyer 			hz = rte_get_timer_hz();
240a5b1ffd8SAndrew Boyer 			delta = (now - then) * 1000;
241a5b1ffd8SAndrew Boyer 
242a5b1ffd8SAndrew Boyer 			if (delta >= hz * IONIC_Q_WDOG_MS) {
243a5b1ffd8SAndrew Boyer 				ionic_q_flush(q);
244a5b1ffd8SAndrew Boyer 				txq->last_wdog_cycles = now;
245a5b1ffd8SAndrew Boyer 			}
246a5b1ffd8SAndrew Boyer 		}
247e86a6fccSAndrew Boyer 	}
248e86a6fccSAndrew Boyer 
249e86a6fccSAndrew Boyer 	return nb_tx;
250e86a6fccSAndrew Boyer }
251e86a6fccSAndrew Boyer 
252e86a6fccSAndrew Boyer /*
253e86a6fccSAndrew Boyer  * Cleans one descriptor. Connects the filled mbufs into a chain.
254e86a6fccSAndrew Boyer  * Does not advance the tail index.
255e86a6fccSAndrew Boyer  */
256e86a6fccSAndrew Boyer static __rte_always_inline void
257e86a6fccSAndrew Boyer ionic_rx_clean_one_sg(struct ionic_rx_qcq *rxq,
258*463ad260SNeel Patel 		volatile struct ionic_rxq_comp *cq_desc,
259e86a6fccSAndrew Boyer 		struct ionic_rx_service *rx_svc)
260e86a6fccSAndrew Boyer {
261e86a6fccSAndrew Boyer 	struct ionic_queue *q = &rxq->qcq.q;
262e86a6fccSAndrew Boyer 	struct rte_mbuf *rxm;
263e86a6fccSAndrew Boyer 	struct rte_mbuf *rxm_seg, *prev_rxm;
264e86a6fccSAndrew Boyer 	struct ionic_rx_stats *stats = &rxq->stats;
265e86a6fccSAndrew Boyer 	uint64_t pkt_flags = 0;
266e86a6fccSAndrew Boyer 	uint32_t pkt_type;
267e86a6fccSAndrew Boyer 	uint32_t left, i;
268e86a6fccSAndrew Boyer 	uint16_t cq_desc_len;
269e86a6fccSAndrew Boyer 	uint8_t ptype, cflags;
270e86a6fccSAndrew Boyer 	void **info;
271e86a6fccSAndrew Boyer 
272e86a6fccSAndrew Boyer 	cq_desc_len = rte_le_to_cpu_16(cq_desc->len);
273e86a6fccSAndrew Boyer 
274e86a6fccSAndrew Boyer 	info = IONIC_INFO_PTR(q, q->tail_idx);
275e86a6fccSAndrew Boyer 
276e86a6fccSAndrew Boyer 	rxm = info[0];
277e86a6fccSAndrew Boyer 
278e86a6fccSAndrew Boyer 	if (cq_desc->status) {
279e86a6fccSAndrew Boyer 		stats->bad_cq_status++;
280e86a6fccSAndrew Boyer 		return;
281e86a6fccSAndrew Boyer 	}
282e86a6fccSAndrew Boyer 
283e86a6fccSAndrew Boyer 	if (cq_desc_len > rxq->frame_size || cq_desc_len == 0) {
284e86a6fccSAndrew Boyer 		stats->bad_len++;
285e86a6fccSAndrew Boyer 		return;
286e86a6fccSAndrew Boyer 	}
287e86a6fccSAndrew Boyer 
288e86a6fccSAndrew Boyer 	info[0] = NULL;
289e86a6fccSAndrew Boyer 
290e86a6fccSAndrew Boyer 	/* Set the mbuf metadata based on the cq entry */
291e86a6fccSAndrew Boyer 	rxm->rearm_data[0] = rxq->rearm_data;
292e86a6fccSAndrew Boyer 	rxm->pkt_len = cq_desc_len;
293e86a6fccSAndrew Boyer 	rxm->data_len = RTE_MIN(rxq->hdr_seg_size, cq_desc_len);
294e86a6fccSAndrew Boyer 	left = cq_desc_len - rxm->data_len;
295e86a6fccSAndrew Boyer 	rxm->nb_segs = cq_desc->num_sg_elems + 1;
296e86a6fccSAndrew Boyer 
297e86a6fccSAndrew Boyer 	prev_rxm = rxm;
298e86a6fccSAndrew Boyer 
299e86a6fccSAndrew Boyer 	for (i = 1; i < rxm->nb_segs && left; i++) {
300e86a6fccSAndrew Boyer 		rxm_seg = info[i];
301e86a6fccSAndrew Boyer 		info[i] = NULL;
302e86a6fccSAndrew Boyer 
303e86a6fccSAndrew Boyer 		/* Set the chained mbuf metadata */
304e86a6fccSAndrew Boyer 		rxm_seg->rearm_data[0] = rxq->rearm_seg_data;
305e86a6fccSAndrew Boyer 		rxm_seg->data_len = RTE_MIN(rxq->seg_size, left);
306e86a6fccSAndrew Boyer 		left -= rxm_seg->data_len;
307e86a6fccSAndrew Boyer 
308e86a6fccSAndrew Boyer 		/* Link the mbuf */
309e86a6fccSAndrew Boyer 		prev_rxm->next = rxm_seg;
310e86a6fccSAndrew Boyer 		prev_rxm = rxm_seg;
311e86a6fccSAndrew Boyer 	}
312e86a6fccSAndrew Boyer 
313e86a6fccSAndrew Boyer 	/* Terminate the mbuf chain */
314e86a6fccSAndrew Boyer 	prev_rxm->next = NULL;
315e86a6fccSAndrew Boyer 
316e86a6fccSAndrew Boyer 	/* RSS */
317e86a6fccSAndrew Boyer 	pkt_flags |= RTE_MBUF_F_RX_RSS_HASH;
318e86a6fccSAndrew Boyer 	rxm->hash.rss = rte_le_to_cpu_32(cq_desc->rss_hash);
319e86a6fccSAndrew Boyer 
320e86a6fccSAndrew Boyer 	/* Vlan Strip */
321e86a6fccSAndrew Boyer 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_VLAN) {
322e86a6fccSAndrew Boyer 		pkt_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
323e86a6fccSAndrew Boyer 		rxm->vlan_tci = rte_le_to_cpu_16(cq_desc->vlan_tci);
324e86a6fccSAndrew Boyer 	}
325e86a6fccSAndrew Boyer 
326e86a6fccSAndrew Boyer 	/* Checksum */
327e86a6fccSAndrew Boyer 	if (cq_desc->csum_flags & IONIC_RXQ_COMP_CSUM_F_CALC) {
328e86a6fccSAndrew Boyer 		cflags = cq_desc->csum_flags & IONIC_CSUM_FLAG_MASK;
329e86a6fccSAndrew Boyer 		pkt_flags |= ionic_csum_flags[cflags];
330e86a6fccSAndrew Boyer 	}
331e86a6fccSAndrew Boyer 
332e86a6fccSAndrew Boyer 	rxm->ol_flags = pkt_flags;
333e86a6fccSAndrew Boyer 
334e86a6fccSAndrew Boyer 	/* Packet Type */
335e86a6fccSAndrew Boyer 	ptype = cq_desc->pkt_type_color & IONIC_RXQ_COMP_PKT_TYPE_MASK;
336e86a6fccSAndrew Boyer 	pkt_type = ionic_ptype_table[ptype];
337e86a6fccSAndrew Boyer 	if (pkt_type == RTE_PTYPE_UNKNOWN) {
338e86a6fccSAndrew Boyer 		struct rte_ether_hdr *eth_h = rte_pktmbuf_mtod(rxm,
339e86a6fccSAndrew Boyer 				struct rte_ether_hdr *);
340e86a6fccSAndrew Boyer 		uint16_t ether_type = eth_h->ether_type;
341e86a6fccSAndrew Boyer 		if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_ARP))
342e86a6fccSAndrew Boyer 			pkt_type = RTE_PTYPE_L2_ETHER_ARP;
343e86a6fccSAndrew Boyer 		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_LLDP))
344e86a6fccSAndrew Boyer 			pkt_type = RTE_PTYPE_L2_ETHER_LLDP;
345e86a6fccSAndrew Boyer 		else if (ether_type == rte_cpu_to_be_16(RTE_ETHER_TYPE_1588))
346e86a6fccSAndrew Boyer 			pkt_type = RTE_PTYPE_L2_ETHER_TIMESYNC;
347e86a6fccSAndrew Boyer 		stats->mtods++;
348e86a6fccSAndrew Boyer 	} else if (pkt_flags & RTE_MBUF_F_RX_VLAN) {
349e86a6fccSAndrew Boyer 		pkt_type |= RTE_PTYPE_L2_ETHER_VLAN;
350e86a6fccSAndrew Boyer 	} else {
351e86a6fccSAndrew Boyer 		pkt_type |= RTE_PTYPE_L2_ETHER;
352e86a6fccSAndrew Boyer 	}
353e86a6fccSAndrew Boyer 
354e86a6fccSAndrew Boyer 	rxm->packet_type = pkt_type;
355e86a6fccSAndrew Boyer 
356e86a6fccSAndrew Boyer 	rx_svc->rx_pkts[rx_svc->nb_rx] = rxm;
357e86a6fccSAndrew Boyer 	rx_svc->nb_rx++;
358e86a6fccSAndrew Boyer 
359e86a6fccSAndrew Boyer 	stats->packets++;
360e86a6fccSAndrew Boyer 	stats->bytes += rxm->pkt_len;
361e86a6fccSAndrew Boyer }
362e86a6fccSAndrew Boyer 
363e86a6fccSAndrew Boyer /*
364e86a6fccSAndrew Boyer  * Fills one descriptor with mbufs. Does not advance the head index.
365e86a6fccSAndrew Boyer  */
366e86a6fccSAndrew Boyer static __rte_always_inline int
367e86a6fccSAndrew Boyer ionic_rx_fill_one_sg(struct ionic_rx_qcq *rxq)
368e86a6fccSAndrew Boyer {
369e86a6fccSAndrew Boyer 	struct ionic_queue *q = &rxq->qcq.q;
370e86a6fccSAndrew Boyer 	struct rte_mbuf *rxm;
371e86a6fccSAndrew Boyer 	struct rte_mbuf *rxm_seg;
372e86a6fccSAndrew Boyer 	struct ionic_rxq_desc *desc, *desc_base = q->base;
373e86a6fccSAndrew Boyer 	struct ionic_rxq_sg_desc *sg_desc, *sg_desc_base = q->sg_base;
374e86a6fccSAndrew Boyer 	rte_iova_t data_iova;
375e86a6fccSAndrew Boyer 	uint32_t i;
376e86a6fccSAndrew Boyer 	void **info;
377e86a6fccSAndrew Boyer 	int ret;
378e86a6fccSAndrew Boyer 
379e86a6fccSAndrew Boyer 	info = IONIC_INFO_PTR(q, q->head_idx);
380e86a6fccSAndrew Boyer 	desc = &desc_base[q->head_idx];
381e86a6fccSAndrew Boyer 	sg_desc = &sg_desc_base[q->head_idx];
382e86a6fccSAndrew Boyer 
383e86a6fccSAndrew Boyer 	/* mbuf is unused => whole chain is unused */
384e86a6fccSAndrew Boyer 	if (info[0])
385e86a6fccSAndrew Boyer 		return 0;
386e86a6fccSAndrew Boyer 
387e86a6fccSAndrew Boyer 	if (rxq->mb_idx == 0) {
388e86a6fccSAndrew Boyer 		ret = rte_mempool_get_bulk(rxq->mb_pool,
389e86a6fccSAndrew Boyer 					(void **)rxq->mbs,
390e86a6fccSAndrew Boyer 					IONIC_MBUF_BULK_ALLOC);
391e86a6fccSAndrew Boyer 		if (ret) {
392e86a6fccSAndrew Boyer 			assert(0);
393e86a6fccSAndrew Boyer 			return -ENOMEM;
394e86a6fccSAndrew Boyer 		}
395e86a6fccSAndrew Boyer 
396e86a6fccSAndrew Boyer 		rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
397e86a6fccSAndrew Boyer 	}
398e86a6fccSAndrew Boyer 
399e86a6fccSAndrew Boyer 	rxm = rxq->mbs[--rxq->mb_idx];
400e86a6fccSAndrew Boyer 	info[0] = rxm;
401e86a6fccSAndrew Boyer 
402e86a6fccSAndrew Boyer 	data_iova = rte_mbuf_data_iova_default(rxm);
403e86a6fccSAndrew Boyer 	desc->addr = rte_cpu_to_le_64(data_iova);
404e86a6fccSAndrew Boyer 
405e86a6fccSAndrew Boyer 	for (i = 1; i < q->num_segs; i++) {
406e86a6fccSAndrew Boyer 		/* mbuf is unused => rest of the chain is unused */
407e86a6fccSAndrew Boyer 		if (info[i])
408e86a6fccSAndrew Boyer 			return 0;
409e86a6fccSAndrew Boyer 
410e86a6fccSAndrew Boyer 		if (rxq->mb_idx == 0) {
411e86a6fccSAndrew Boyer 			ret = rte_mempool_get_bulk(rxq->mb_pool,
412e86a6fccSAndrew Boyer 					(void **)rxq->mbs,
413e86a6fccSAndrew Boyer 					IONIC_MBUF_BULK_ALLOC);
414e86a6fccSAndrew Boyer 			if (ret) {
415e86a6fccSAndrew Boyer 				assert(0);
416e86a6fccSAndrew Boyer 				return -ENOMEM;
417e86a6fccSAndrew Boyer 			}
418e86a6fccSAndrew Boyer 
419e86a6fccSAndrew Boyer 			rxq->mb_idx = IONIC_MBUF_BULK_ALLOC;
420e86a6fccSAndrew Boyer 		}
421e86a6fccSAndrew Boyer 
422e86a6fccSAndrew Boyer 		rxm_seg = rxq->mbs[--rxq->mb_idx];
423e86a6fccSAndrew Boyer 		info[i] = rxm_seg;
424e86a6fccSAndrew Boyer 
425e86a6fccSAndrew Boyer 		/* The data_off does not get set to 0 until later */
426e86a6fccSAndrew Boyer 		data_iova = rxm_seg->buf_iova;
427e86a6fccSAndrew Boyer 		sg_desc->elems[i - 1].addr = rte_cpu_to_le_64(data_iova);
428e86a6fccSAndrew Boyer 	}
429e86a6fccSAndrew Boyer 
430e86a6fccSAndrew Boyer 	return 0;
431e86a6fccSAndrew Boyer }
432e86a6fccSAndrew Boyer 
433e86a6fccSAndrew Boyer /*
434e86a6fccSAndrew Boyer  * Walk the CQ to find completed receive descriptors.
435e86a6fccSAndrew Boyer  * Any completed descriptor found is refilled.
436e86a6fccSAndrew Boyer  */
437e86a6fccSAndrew Boyer static __rte_always_inline void
438e86a6fccSAndrew Boyer ionic_rxq_service_sg(struct ionic_rx_qcq *rxq, uint32_t work_to_do,
439e86a6fccSAndrew Boyer 		struct ionic_rx_service *rx_svc)
440e86a6fccSAndrew Boyer {
441e86a6fccSAndrew Boyer 	struct ionic_cq *cq = &rxq->qcq.cq;
442e86a6fccSAndrew Boyer 	struct ionic_queue *q = &rxq->qcq.q;
443e86a6fccSAndrew Boyer 	struct ionic_rxq_desc *q_desc_base = q->base;
444*463ad260SNeel Patel 	struct ionic_rxq_comp *cq_desc_base = cq->base;
445*463ad260SNeel Patel 	volatile struct ionic_rxq_comp *cq_desc;
446e86a6fccSAndrew Boyer 	uint32_t work_done = 0;
447a5b1ffd8SAndrew Boyer 	uint64_t then, now, hz, delta;
448e86a6fccSAndrew Boyer 
449e86a6fccSAndrew Boyer 	cq_desc = &cq_desc_base[cq->tail_idx];
450e86a6fccSAndrew Boyer 
451e86a6fccSAndrew Boyer 	while (color_match(cq_desc->pkt_type_color, cq->done_color)) {
452e86a6fccSAndrew Boyer 		cq->tail_idx = Q_NEXT_TO_SRVC(cq, 1);
453e86a6fccSAndrew Boyer 		if (cq->tail_idx == 0)
454e86a6fccSAndrew Boyer 			cq->done_color = !cq->done_color;
455e86a6fccSAndrew Boyer 
456e86a6fccSAndrew Boyer 		/* Prefetch 8 x 8B bufinfo */
457e86a6fccSAndrew Boyer 		rte_prefetch0(IONIC_INFO_PTR(q, Q_NEXT_TO_SRVC(q, 8)));
458e86a6fccSAndrew Boyer 		/* Prefetch 4 x 16B comp */
459e86a6fccSAndrew Boyer 		rte_prefetch0(&cq_desc_base[Q_NEXT_TO_SRVC(cq, 4)]);
460e86a6fccSAndrew Boyer 		/* Prefetch 4 x 16B descriptors */
461e86a6fccSAndrew Boyer 		if (!(rxq->flags & IONIC_QCQ_F_CMB))
462e86a6fccSAndrew Boyer 			rte_prefetch0(&q_desc_base[Q_NEXT_TO_POST(q, 4)]);
463e86a6fccSAndrew Boyer 
464e86a6fccSAndrew Boyer 		/* Clean one descriptor */
465e86a6fccSAndrew Boyer 		ionic_rx_clean_one_sg(rxq, cq_desc, rx_svc);
466e86a6fccSAndrew Boyer 		q->tail_idx = Q_NEXT_TO_SRVC(q, 1);
467e86a6fccSAndrew Boyer 
468e86a6fccSAndrew Boyer 		/* Fill one descriptor */
469e86a6fccSAndrew Boyer 		(void)ionic_rx_fill_one_sg(rxq);
470e86a6fccSAndrew Boyer 
471e86a6fccSAndrew Boyer 		q->head_idx = Q_NEXT_TO_POST(q, 1);
472e86a6fccSAndrew Boyer 
473e86a6fccSAndrew Boyer 		if (++work_done == work_to_do)
474e86a6fccSAndrew Boyer 			break;
475e86a6fccSAndrew Boyer 
476e86a6fccSAndrew Boyer 		cq_desc = &cq_desc_base[cq->tail_idx];
477e86a6fccSAndrew Boyer 	}
478e86a6fccSAndrew Boyer 
479e86a6fccSAndrew Boyer 	/* Update the queue indices and ring the doorbell */
480a5b1ffd8SAndrew Boyer 	if (work_done) {
481e86a6fccSAndrew Boyer 		ionic_q_flush(q);
482a5b1ffd8SAndrew Boyer 		rxq->last_wdog_cycles = rte_get_timer_cycles();
483a5b1ffd8SAndrew Boyer 		rxq->wdog_ms = IONIC_Q_WDOG_MS;
484a5b1ffd8SAndrew Boyer 	} else {
485a5b1ffd8SAndrew Boyer 		/*
486a5b1ffd8SAndrew Boyer 		 * Ring the doorbell again if no recvs were posted and the
487a5b1ffd8SAndrew Boyer 		 * recv queue is not empty after the deadline.
488a5b1ffd8SAndrew Boyer 		 *
489a5b1ffd8SAndrew Boyer 		 * Exponentially back off the deadline to avoid excessive
490a5b1ffd8SAndrew Boyer 		 * doorbells when the recv queue is idle.
491a5b1ffd8SAndrew Boyer 		 */
492a5b1ffd8SAndrew Boyer 		if (q->head_idx != q->tail_idx) {
493a5b1ffd8SAndrew Boyer 			then = rxq->last_wdog_cycles;
494a5b1ffd8SAndrew Boyer 			now = rte_get_timer_cycles();
495a5b1ffd8SAndrew Boyer 			hz = rte_get_timer_hz();
496a5b1ffd8SAndrew Boyer 			delta = (now - then) * 1000;
497a5b1ffd8SAndrew Boyer 
498a5b1ffd8SAndrew Boyer 			if (delta >= hz * rxq->wdog_ms) {
499a5b1ffd8SAndrew Boyer 				ionic_q_flush(q);
500a5b1ffd8SAndrew Boyer 				rxq->last_wdog_cycles = now;
501a5b1ffd8SAndrew Boyer 
502a5b1ffd8SAndrew Boyer 				delta = 2 * rxq->wdog_ms;
503a5b1ffd8SAndrew Boyer 				if (delta > IONIC_Q_WDOG_MAX_MS)
504a5b1ffd8SAndrew Boyer 					delta = IONIC_Q_WDOG_MAX_MS;
505a5b1ffd8SAndrew Boyer 
506a5b1ffd8SAndrew Boyer 				rxq->wdog_ms = delta;
507a5b1ffd8SAndrew Boyer 			}
508a5b1ffd8SAndrew Boyer 		}
509a5b1ffd8SAndrew Boyer 	}
510e86a6fccSAndrew Boyer }
511e86a6fccSAndrew Boyer 
512e86a6fccSAndrew Boyer uint16_t
513e86a6fccSAndrew Boyer ionic_recv_pkts_sg(void *rx_queue, struct rte_mbuf **rx_pkts,
514e86a6fccSAndrew Boyer 		uint16_t nb_pkts)
515e86a6fccSAndrew Boyer {
516e86a6fccSAndrew Boyer 	struct ionic_rx_qcq *rxq = rx_queue;
517e86a6fccSAndrew Boyer 	struct ionic_rx_service rx_svc;
518e86a6fccSAndrew Boyer 
519e86a6fccSAndrew Boyer 	rx_svc.rx_pkts = rx_pkts;
520e86a6fccSAndrew Boyer 	rx_svc.nb_rx = 0;
521e86a6fccSAndrew Boyer 
522e86a6fccSAndrew Boyer 	ionic_rxq_service_sg(rxq, nb_pkts, &rx_svc);
523e86a6fccSAndrew Boyer 
524e86a6fccSAndrew Boyer 	return rx_svc.nb_rx;
525e86a6fccSAndrew Boyer }
526e86a6fccSAndrew Boyer 
527e86a6fccSAndrew Boyer /*
528e86a6fccSAndrew Boyer  * Fills all descriptors with mbufs.
529e86a6fccSAndrew Boyer  */
530e86a6fccSAndrew Boyer int __rte_cold
531e86a6fccSAndrew Boyer ionic_rx_fill_sg(struct ionic_rx_qcq *rxq)
532e86a6fccSAndrew Boyer {
533e86a6fccSAndrew Boyer 	struct ionic_queue *q = &rxq->qcq.q;
534e86a6fccSAndrew Boyer 	uint32_t i;
535e86a6fccSAndrew Boyer 	int err = 0;
536e86a6fccSAndrew Boyer 
537e86a6fccSAndrew Boyer 	for (i = 0; i < q->num_descs - 1u; i++) {
538e86a6fccSAndrew Boyer 		err = ionic_rx_fill_one_sg(rxq);
539e86a6fccSAndrew Boyer 		if (err)
540e86a6fccSAndrew Boyer 			break;
541e86a6fccSAndrew Boyer 
542e86a6fccSAndrew Boyer 		q->head_idx = Q_NEXT_TO_POST(q, 1);
543e86a6fccSAndrew Boyer 	}
544e86a6fccSAndrew Boyer 
545e86a6fccSAndrew Boyer 	ionic_q_flush(q);
546e86a6fccSAndrew Boyer 
547e86a6fccSAndrew Boyer 	return err;
548e86a6fccSAndrew Boyer }
549