xref: /dpdk/drivers/net/sfc/sfc_ef10_rx.c (revision a0147be54763c09daca94eec7cb075214788ca65)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2638bddc9SAndrew Rybchenko  *
3*a0147be5SAndrew Rybchenko  * Copyright(c) 2019-2020 Xilinx, Inc.
4*a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5638bddc9SAndrew Rybchenko  *
6638bddc9SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
7638bddc9SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
8638bddc9SAndrew Rybchenko  */
9638bddc9SAndrew Rybchenko 
10638bddc9SAndrew Rybchenko /* EF10 native datapath implementation */
11638bddc9SAndrew Rybchenko 
12638bddc9SAndrew Rybchenko #include <stdbool.h>
13638bddc9SAndrew Rybchenko 
14638bddc9SAndrew Rybchenko #include <rte_byteorder.h>
15638bddc9SAndrew Rybchenko #include <rte_mbuf_ptype.h>
16638bddc9SAndrew Rybchenko #include <rte_mbuf.h>
17638bddc9SAndrew Rybchenko #include <rte_io.h>
18638bddc9SAndrew Rybchenko 
19638bddc9SAndrew Rybchenko #include "efx.h"
20638bddc9SAndrew Rybchenko #include "efx_types.h"
21638bddc9SAndrew Rybchenko #include "efx_regs.h"
22638bddc9SAndrew Rybchenko #include "efx_regs_ef10.h"
23638bddc9SAndrew Rybchenko 
24638bddc9SAndrew Rybchenko #include "sfc_tweak.h"
25638bddc9SAndrew Rybchenko #include "sfc_dp_rx.h"
26638bddc9SAndrew Rybchenko #include "sfc_kvargs.h"
27638bddc9SAndrew Rybchenko #include "sfc_ef10.h"
287ee7e57cSAndrew Rybchenko 
297ee7e57cSAndrew Rybchenko #define SFC_EF10_RX_EV_ENCAP_SUPPORT	1
30c121f008SAndrew Rybchenko #include "sfc_ef10_rx_ev.h"
31638bddc9SAndrew Rybchenko 
32638bddc9SAndrew Rybchenko #define sfc_ef10_rx_err(dpq, ...) \
33638bddc9SAndrew Rybchenko 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
34638bddc9SAndrew Rybchenko 
35638bddc9SAndrew Rybchenko /**
36638bddc9SAndrew Rybchenko  * Maximum number of descriptors/buffers in the Rx ring.
37638bddc9SAndrew Rybchenko  * It should guarantee that corresponding event queue never overfill.
38638bddc9SAndrew Rybchenko  * EF10 native datapath uses event queue of the same size as Rx queue.
39638bddc9SAndrew Rybchenko  * Maximum number of events on datapath can be estimated as number of
40638bddc9SAndrew Rybchenko  * Rx queue entries (one event per Rx buffer in the worst case) plus
41638bddc9SAndrew Rybchenko  * Rx error and flush events.
42638bddc9SAndrew Rybchenko  */
43638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_LIMIT(_ndesc) \
44638bddc9SAndrew Rybchenko 	((_ndesc) - 1 /* head must not step on tail */ - \
45638bddc9SAndrew Rybchenko 	 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
46638bddc9SAndrew Rybchenko 	 1 /* Rx error */ - 1 /* flush */)
47638bddc9SAndrew Rybchenko 
48638bddc9SAndrew Rybchenko struct sfc_ef10_rx_sw_desc {
49638bddc9SAndrew Rybchenko 	struct rte_mbuf			*mbuf;
50638bddc9SAndrew Rybchenko };
51638bddc9SAndrew Rybchenko 
52638bddc9SAndrew Rybchenko struct sfc_ef10_rxq {
53638bddc9SAndrew Rybchenko 	/* Used on data path */
54638bddc9SAndrew Rybchenko 	unsigned int			flags;
55638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_STARTED		0x1
56638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_NOT_RUNNING	0x2
57638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_EXCEPTION		0x4
58638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_RSS_HASH		0x8
591245e3faSGeorgiy Levashov #define SFC_EF10_RXQ_FLAG_INTR_EN	0x10
60638bddc9SAndrew Rybchenko 	unsigned int			ptr_mask;
613be22684SAndrew Rybchenko 	unsigned int			pending;
62638bddc9SAndrew Rybchenko 	unsigned int			completed;
63638bddc9SAndrew Rybchenko 	unsigned int			evq_read_ptr;
641245e3faSGeorgiy Levashov 	unsigned int			evq_read_ptr_primed;
65638bddc9SAndrew Rybchenko 	efx_qword_t			*evq_hw_ring;
66638bddc9SAndrew Rybchenko 	struct sfc_ef10_rx_sw_desc	*sw_ring;
67638bddc9SAndrew Rybchenko 	uint64_t			rearm_data;
6852e10cb0SAndrew Rybchenko 	struct rte_mbuf			*scatter_pkt;
691245e3faSGeorgiy Levashov 	volatile void			*evq_prime;
70638bddc9SAndrew Rybchenko 	uint16_t			prefix_size;
71638bddc9SAndrew Rybchenko 
72638bddc9SAndrew Rybchenko 	/* Used on refill */
73638bddc9SAndrew Rybchenko 	uint16_t			buf_size;
74638bddc9SAndrew Rybchenko 	unsigned int			added;
75e5595ee2SAndrew Rybchenko 	unsigned int			max_fill_level;
76638bddc9SAndrew Rybchenko 	unsigned int			refill_threshold;
77638bddc9SAndrew Rybchenko 	struct rte_mempool		*refill_mb_pool;
78638bddc9SAndrew Rybchenko 	efx_qword_t			*rxq_hw_ring;
79638bddc9SAndrew Rybchenko 	volatile void			*doorbell;
80638bddc9SAndrew Rybchenko 
81638bddc9SAndrew Rybchenko 	/* Datapath receive queue anchor */
82638bddc9SAndrew Rybchenko 	struct sfc_dp_rxq		dp;
83638bddc9SAndrew Rybchenko };
84638bddc9SAndrew Rybchenko 
85638bddc9SAndrew Rybchenko static inline struct sfc_ef10_rxq *
86638bddc9SAndrew Rybchenko sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
87638bddc9SAndrew Rybchenko {
88638bddc9SAndrew Rybchenko 	return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
89638bddc9SAndrew Rybchenko }
90638bddc9SAndrew Rybchenko 
91638bddc9SAndrew Rybchenko static void
921245e3faSGeorgiy Levashov sfc_ef10_rx_qprime(struct sfc_ef10_rxq *rxq)
931245e3faSGeorgiy Levashov {
941245e3faSGeorgiy Levashov 	sfc_ef10_ev_qprime(rxq->evq_prime, rxq->evq_read_ptr, rxq->ptr_mask);
951245e3faSGeorgiy Levashov 	rxq->evq_read_ptr_primed = rxq->evq_read_ptr;
961245e3faSGeorgiy Levashov }
971245e3faSGeorgiy Levashov 
981245e3faSGeorgiy Levashov static void
99638bddc9SAndrew Rybchenko sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
100638bddc9SAndrew Rybchenko {
101638bddc9SAndrew Rybchenko 	const unsigned int ptr_mask = rxq->ptr_mask;
102638bddc9SAndrew Rybchenko 	const uint32_t buf_size = rxq->buf_size;
103638bddc9SAndrew Rybchenko 	unsigned int free_space;
104638bddc9SAndrew Rybchenko 	unsigned int bulks;
105638bddc9SAndrew Rybchenko 	void *objs[SFC_RX_REFILL_BULK];
106638bddc9SAndrew Rybchenko 	unsigned int added = rxq->added;
107638bddc9SAndrew Rybchenko 
108a2443fdfSAndrew Rybchenko 	RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
109a2443fdfSAndrew Rybchenko 
110e5595ee2SAndrew Rybchenko 	free_space = rxq->max_fill_level - (added - rxq->completed);
111638bddc9SAndrew Rybchenko 
112638bddc9SAndrew Rybchenko 	if (free_space < rxq->refill_threshold)
113638bddc9SAndrew Rybchenko 		return;
114638bddc9SAndrew Rybchenko 
115638bddc9SAndrew Rybchenko 	bulks = free_space / RTE_DIM(objs);
116638bddc9SAndrew Rybchenko 	/* refill_threshold guarantees that bulks is positive */
117638bddc9SAndrew Rybchenko 	SFC_ASSERT(bulks > 0);
118638bddc9SAndrew Rybchenko 
119638bddc9SAndrew Rybchenko 	do {
120638bddc9SAndrew Rybchenko 		unsigned int id;
121638bddc9SAndrew Rybchenko 		unsigned int i;
122638bddc9SAndrew Rybchenko 
123638bddc9SAndrew Rybchenko 		if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
124638bddc9SAndrew Rybchenko 						  RTE_DIM(objs)) < 0)) {
125638bddc9SAndrew Rybchenko 			struct rte_eth_dev_data *dev_data =
126638bddc9SAndrew Rybchenko 				rte_eth_devices[rxq->dp.dpq.port_id].data;
127638bddc9SAndrew Rybchenko 
128638bddc9SAndrew Rybchenko 			/*
129638bddc9SAndrew Rybchenko 			 * It is hardly a safe way to increment counter
130638bddc9SAndrew Rybchenko 			 * from different contexts, but all PMDs do it.
131638bddc9SAndrew Rybchenko 			 */
132638bddc9SAndrew Rybchenko 			dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
133638bddc9SAndrew Rybchenko 			/* Return if we have posted nothing yet */
134638bddc9SAndrew Rybchenko 			if (added == rxq->added)
135638bddc9SAndrew Rybchenko 				return;
136638bddc9SAndrew Rybchenko 			/* Push posted */
137638bddc9SAndrew Rybchenko 			break;
138638bddc9SAndrew Rybchenko 		}
139638bddc9SAndrew Rybchenko 
140638bddc9SAndrew Rybchenko 		for (i = 0, id = added & ptr_mask;
141638bddc9SAndrew Rybchenko 		     i < RTE_DIM(objs);
142638bddc9SAndrew Rybchenko 		     ++i, ++id) {
143638bddc9SAndrew Rybchenko 			struct rte_mbuf *m = objs[i];
144638bddc9SAndrew Rybchenko 			struct sfc_ef10_rx_sw_desc *rxd;
145df6e0a06SSantosh Shukla 			rte_iova_t phys_addr;
146638bddc9SAndrew Rybchenko 
147f3a5fa85SAndrew Rybchenko 			MBUF_RAW_ALLOC_CHECK(m);
148f3a5fa85SAndrew Rybchenko 
149638bddc9SAndrew Rybchenko 			SFC_ASSERT((id & ~ptr_mask) == 0);
150638bddc9SAndrew Rybchenko 			rxd = &rxq->sw_ring[id];
151638bddc9SAndrew Rybchenko 			rxd->mbuf = m;
152638bddc9SAndrew Rybchenko 
153638bddc9SAndrew Rybchenko 			/*
154638bddc9SAndrew Rybchenko 			 * Avoid writing to mbuf. It is cheaper to do it
155638bddc9SAndrew Rybchenko 			 * when we receive packet and fill in nearby
156638bddc9SAndrew Rybchenko 			 * structure members.
157638bddc9SAndrew Rybchenko 			 */
158638bddc9SAndrew Rybchenko 
159bfa9a8a4SThomas Monjalon 			phys_addr = rte_mbuf_data_iova_default(m);
160638bddc9SAndrew Rybchenko 			EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
161638bddc9SAndrew Rybchenko 			    ESF_DZ_RX_KER_BYTE_CNT, buf_size,
162638bddc9SAndrew Rybchenko 			    ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
163638bddc9SAndrew Rybchenko 		}
164638bddc9SAndrew Rybchenko 
165638bddc9SAndrew Rybchenko 		added += RTE_DIM(objs);
166638bddc9SAndrew Rybchenko 	} while (--bulks > 0);
167638bddc9SAndrew Rybchenko 
168638bddc9SAndrew Rybchenko 	SFC_ASSERT(rxq->added != added);
169638bddc9SAndrew Rybchenko 	rxq->added = added;
170a2443fdfSAndrew Rybchenko 	sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask);
171638bddc9SAndrew Rybchenko }
172638bddc9SAndrew Rybchenko 
173638bddc9SAndrew Rybchenko static void
174638bddc9SAndrew Rybchenko sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
175638bddc9SAndrew Rybchenko {
176638bddc9SAndrew Rybchenko 	struct rte_mbuf *next_mbuf;
177638bddc9SAndrew Rybchenko 
178638bddc9SAndrew Rybchenko 	/* Prefetch next bunch of software descriptors */
179638bddc9SAndrew Rybchenko 	if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
180638bddc9SAndrew Rybchenko 		rte_prefetch0(&rxq->sw_ring[next_id]);
181638bddc9SAndrew Rybchenko 
182638bddc9SAndrew Rybchenko 	/*
183638bddc9SAndrew Rybchenko 	 * It looks strange to prefetch depending on previous prefetch
184638bddc9SAndrew Rybchenko 	 * data, but measurements show that it is really efficient and
185638bddc9SAndrew Rybchenko 	 * increases packet rate.
186638bddc9SAndrew Rybchenko 	 */
187638bddc9SAndrew Rybchenko 	next_mbuf = rxq->sw_ring[next_id].mbuf;
188638bddc9SAndrew Rybchenko 	if (likely(next_mbuf != NULL)) {
189638bddc9SAndrew Rybchenko 		/* Prefetch the next mbuf structure */
190638bddc9SAndrew Rybchenko 		rte_mbuf_prefetch_part1(next_mbuf);
191638bddc9SAndrew Rybchenko 
192638bddc9SAndrew Rybchenko 		/* Prefetch pseudo header of the next packet */
193638bddc9SAndrew Rybchenko 		/* data_off is not filled in yet */
194638bddc9SAndrew Rybchenko 		/* Yes, data could be not ready yet, but we hope */
195638bddc9SAndrew Rybchenko 		rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
196638bddc9SAndrew Rybchenko 			      RTE_PKTMBUF_HEADROOM);
197638bddc9SAndrew Rybchenko 	}
198638bddc9SAndrew Rybchenko }
199638bddc9SAndrew Rybchenko 
20084b63b5cSAndrew Rybchenko static struct rte_mbuf **
2013be22684SAndrew Rybchenko sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
202638bddc9SAndrew Rybchenko 		    uint16_t nb_pkts)
203638bddc9SAndrew Rybchenko {
2043be22684SAndrew Rybchenko 	uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed);
2053ac6ddd4SAndrew Rybchenko 
20652e10cb0SAndrew Rybchenko 	SFC_ASSERT(rxq->pending == rxq->completed || rxq->scatter_pkt == NULL);
20752e10cb0SAndrew Rybchenko 
2083ac6ddd4SAndrew Rybchenko 	if (n_rx_pkts != 0) {
209638bddc9SAndrew Rybchenko 		unsigned int completed = rxq->completed;
210638bddc9SAndrew Rybchenko 
211638bddc9SAndrew Rybchenko 		rxq->completed = completed + n_rx_pkts;
212638bddc9SAndrew Rybchenko 
2133ac6ddd4SAndrew Rybchenko 		do {
2143ac6ddd4SAndrew Rybchenko 			*rx_pkts++ =
2153ac6ddd4SAndrew Rybchenko 				rxq->sw_ring[completed++ & rxq->ptr_mask].mbuf;
2163ac6ddd4SAndrew Rybchenko 		} while (completed != rxq->completed);
2173ac6ddd4SAndrew Rybchenko 	}
218638bddc9SAndrew Rybchenko 
21984b63b5cSAndrew Rybchenko 	return rx_pkts;
220638bddc9SAndrew Rybchenko }
221638bddc9SAndrew Rybchenko 
222638bddc9SAndrew Rybchenko static uint16_t
223638bddc9SAndrew Rybchenko sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
224638bddc9SAndrew Rybchenko {
225638bddc9SAndrew Rybchenko 	return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
226638bddc9SAndrew Rybchenko }
227638bddc9SAndrew Rybchenko 
228638bddc9SAndrew Rybchenko static uint32_t
229638bddc9SAndrew Rybchenko sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
230638bddc9SAndrew Rybchenko {
231638bddc9SAndrew Rybchenko 	return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
232638bddc9SAndrew Rybchenko }
233638bddc9SAndrew Rybchenko 
23484b63b5cSAndrew Rybchenko static struct rte_mbuf **
235638bddc9SAndrew Rybchenko sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
23684b63b5cSAndrew Rybchenko 			  struct rte_mbuf **rx_pkts,
23784b63b5cSAndrew Rybchenko 			  struct rte_mbuf ** const rx_pkts_end)
238638bddc9SAndrew Rybchenko {
239638bddc9SAndrew Rybchenko 	const unsigned int ptr_mask = rxq->ptr_mask;
2403be22684SAndrew Rybchenko 	unsigned int pending = rxq->pending;
241638bddc9SAndrew Rybchenko 	unsigned int ready;
242638bddc9SAndrew Rybchenko 	struct sfc_ef10_rx_sw_desc *rxd;
243638bddc9SAndrew Rybchenko 	struct rte_mbuf *m;
244638bddc9SAndrew Rybchenko 	struct rte_mbuf *m0;
245638bddc9SAndrew Rybchenko 	const uint8_t *pseudo_hdr;
246c4753858SAndrew Rybchenko 	uint16_t seg_len;
247638bddc9SAndrew Rybchenko 
2483be22684SAndrew Rybchenko 	ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) &
249638bddc9SAndrew Rybchenko 		EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
25052e10cb0SAndrew Rybchenko 
25152e10cb0SAndrew Rybchenko 	if (ready == 0) {
25252e10cb0SAndrew Rybchenko 		/* Rx abort - it was no enough descriptors for Rx packet */
25352e10cb0SAndrew Rybchenko 		rte_pktmbuf_free(rxq->scatter_pkt);
25452e10cb0SAndrew Rybchenko 		rxq->scatter_pkt = NULL;
25552e10cb0SAndrew Rybchenko 		return rx_pkts;
25652e10cb0SAndrew Rybchenko 	}
257638bddc9SAndrew Rybchenko 
2583be22684SAndrew Rybchenko 	rxq->pending = pending + ready;
2593be22684SAndrew Rybchenko 
260638bddc9SAndrew Rybchenko 	if (rx_ev.eq_u64[0] &
261638bddc9SAndrew Rybchenko 	    rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
262638bddc9SAndrew Rybchenko 			     (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
2633be22684SAndrew Rybchenko 		SFC_ASSERT(rxq->completed == pending);
2643be22684SAndrew Rybchenko 		do {
2653be22684SAndrew Rybchenko 			rxd = &rxq->sw_ring[pending++ & ptr_mask];
26666e10b8dSAndrew Rybchenko 			rte_mbuf_raw_free(rxd->mbuf);
2673be22684SAndrew Rybchenko 		} while (pending != rxq->pending);
2683be22684SAndrew Rybchenko 		rxq->completed = pending;
26984b63b5cSAndrew Rybchenko 		return rx_pkts;
270638bddc9SAndrew Rybchenko 	}
271638bddc9SAndrew Rybchenko 
27252e10cb0SAndrew Rybchenko 	/* If scattered packet is in progress */
27352e10cb0SAndrew Rybchenko 	if (rxq->scatter_pkt != NULL) {
27452e10cb0SAndrew Rybchenko 		/* Events for scattered packet frags are not merged */
27552e10cb0SAndrew Rybchenko 		SFC_ASSERT(ready == 1);
27652e10cb0SAndrew Rybchenko 		SFC_ASSERT(rxq->completed == pending);
27752e10cb0SAndrew Rybchenko 
27852e10cb0SAndrew Rybchenko 		/* There is no pseudo-header in scatter segments. */
27952e10cb0SAndrew Rybchenko 		seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES);
28052e10cb0SAndrew Rybchenko 
28152e10cb0SAndrew Rybchenko 		rxd = &rxq->sw_ring[pending++ & ptr_mask];
28252e10cb0SAndrew Rybchenko 		m = rxd->mbuf;
28352e10cb0SAndrew Rybchenko 
28452e10cb0SAndrew Rybchenko 		MBUF_RAW_ALLOC_CHECK(m);
28552e10cb0SAndrew Rybchenko 
28652e10cb0SAndrew Rybchenko 		m->data_off = RTE_PKTMBUF_HEADROOM;
28752e10cb0SAndrew Rybchenko 		rte_pktmbuf_data_len(m) = seg_len;
28852e10cb0SAndrew Rybchenko 		rte_pktmbuf_pkt_len(m) = seg_len;
28952e10cb0SAndrew Rybchenko 
29052e10cb0SAndrew Rybchenko 		rxq->scatter_pkt->nb_segs++;
29152e10cb0SAndrew Rybchenko 		rte_pktmbuf_pkt_len(rxq->scatter_pkt) += seg_len;
29252e10cb0SAndrew Rybchenko 		rte_pktmbuf_lastseg(rxq->scatter_pkt)->next = m;
29352e10cb0SAndrew Rybchenko 
29452e10cb0SAndrew Rybchenko 		if (~rx_ev.eq_u64[0] &
29552e10cb0SAndrew Rybchenko 		    rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
29652e10cb0SAndrew Rybchenko 			*rx_pkts++ = rxq->scatter_pkt;
29752e10cb0SAndrew Rybchenko 			rxq->scatter_pkt = NULL;
29852e10cb0SAndrew Rybchenko 		}
29952e10cb0SAndrew Rybchenko 		rxq->completed = pending;
30052e10cb0SAndrew Rybchenko 		return rx_pkts;
30152e10cb0SAndrew Rybchenko 	}
30252e10cb0SAndrew Rybchenko 
3033be22684SAndrew Rybchenko 	rxd = &rxq->sw_ring[pending++ & ptr_mask];
304638bddc9SAndrew Rybchenko 
3053be22684SAndrew Rybchenko 	sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
306638bddc9SAndrew Rybchenko 
307638bddc9SAndrew Rybchenko 	m = rxd->mbuf;
308638bddc9SAndrew Rybchenko 
30968de5f6cSAndrew Rybchenko 	RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
31068de5f6cSAndrew Rybchenko 	m->rearm_data[0] = rxq->rearm_data;
311638bddc9SAndrew Rybchenko 
312638bddc9SAndrew Rybchenko 	/* Classify packet based on Rx event */
313a6539283SAndrew Rybchenko 	/* Mask RSS hash offload flag if RSS is not enabled */
314a6539283SAndrew Rybchenko 	sfc_ef10_rx_ev_to_offloads(rx_ev, m,
315a6539283SAndrew Rybchenko 				   (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
316a6539283SAndrew Rybchenko 				   ~0ull : ~PKT_RX_RSS_HASH);
317638bddc9SAndrew Rybchenko 
318638bddc9SAndrew Rybchenko 	/* data_off already moved past pseudo header */
319638bddc9SAndrew Rybchenko 	pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
320638bddc9SAndrew Rybchenko 
321638bddc9SAndrew Rybchenko 	/*
322638bddc9SAndrew Rybchenko 	 * Always get RSS hash from pseudo header to avoid
323638bddc9SAndrew Rybchenko 	 * condition/branching. If it is valid or not depends on
324638bddc9SAndrew Rybchenko 	 * PKT_RX_RSS_HASH in m->ol_flags.
325638bddc9SAndrew Rybchenko 	 */
326638bddc9SAndrew Rybchenko 	m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
327638bddc9SAndrew Rybchenko 
328638bddc9SAndrew Rybchenko 	if (ready == 1)
329c4753858SAndrew Rybchenko 		seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
330638bddc9SAndrew Rybchenko 			rxq->prefix_size;
331638bddc9SAndrew Rybchenko 	else
332c4753858SAndrew Rybchenko 		seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
333c4753858SAndrew Rybchenko 	SFC_ASSERT(seg_len > 0);
334c4753858SAndrew Rybchenko 	rte_pktmbuf_data_len(m) = seg_len;
335c4753858SAndrew Rybchenko 	rte_pktmbuf_pkt_len(m) = seg_len;
336638bddc9SAndrew Rybchenko 
33768de5f6cSAndrew Rybchenko 	SFC_ASSERT(m->next == NULL);
338638bddc9SAndrew Rybchenko 
33952e10cb0SAndrew Rybchenko 	if (~rx_ev.eq_u64[0] & rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
34052e10cb0SAndrew Rybchenko 		*rx_pkts++ = m;
34152e10cb0SAndrew Rybchenko 		rxq->completed = pending;
34252e10cb0SAndrew Rybchenko 	} else {
34352e10cb0SAndrew Rybchenko 		/* Events with CONT bit are not merged */
34452e10cb0SAndrew Rybchenko 		SFC_ASSERT(ready == 1);
34552e10cb0SAndrew Rybchenko 		rxq->scatter_pkt = m;
34652e10cb0SAndrew Rybchenko 		rxq->completed = pending;
34752e10cb0SAndrew Rybchenko 		return rx_pkts;
34852e10cb0SAndrew Rybchenko 	}
34952e10cb0SAndrew Rybchenko 
350638bddc9SAndrew Rybchenko 	/* Remember mbuf to copy offload flags and packet type from */
351638bddc9SAndrew Rybchenko 	m0 = m;
3523be22684SAndrew Rybchenko 	while (pending != rxq->pending) {
3533be22684SAndrew Rybchenko 		rxd = &rxq->sw_ring[pending++ & ptr_mask];
354638bddc9SAndrew Rybchenko 
3553be22684SAndrew Rybchenko 		sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
356638bddc9SAndrew Rybchenko 
357638bddc9SAndrew Rybchenko 		m = rxd->mbuf;
358638bddc9SAndrew Rybchenko 
3593be22684SAndrew Rybchenko 		if (rx_pkts != rx_pkts_end) {
360638bddc9SAndrew Rybchenko 			*rx_pkts++ = m;
3613be22684SAndrew Rybchenko 			rxq->completed = pending;
3623be22684SAndrew Rybchenko 		}
363638bddc9SAndrew Rybchenko 
36468de5f6cSAndrew Rybchenko 		RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
36568de5f6cSAndrew Rybchenko 				 sizeof(rxq->rearm_data));
36668de5f6cSAndrew Rybchenko 		m->rearm_data[0] = rxq->rearm_data;
367638bddc9SAndrew Rybchenko 
368638bddc9SAndrew Rybchenko 		/* Event-dependent information is the same */
369638bddc9SAndrew Rybchenko 		m->ol_flags = m0->ol_flags;
370638bddc9SAndrew Rybchenko 		m->packet_type = m0->packet_type;
371638bddc9SAndrew Rybchenko 
372638bddc9SAndrew Rybchenko 		/* data_off already moved past pseudo header */
373638bddc9SAndrew Rybchenko 		pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
374638bddc9SAndrew Rybchenko 
375638bddc9SAndrew Rybchenko 		/*
376638bddc9SAndrew Rybchenko 		 * Always get RSS hash from pseudo header to avoid
377638bddc9SAndrew Rybchenko 		 * condition/branching. If it is valid or not depends on
378638bddc9SAndrew Rybchenko 		 * PKT_RX_RSS_HASH in m->ol_flags.
379638bddc9SAndrew Rybchenko 		 */
380638bddc9SAndrew Rybchenko 		m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
381638bddc9SAndrew Rybchenko 
382c4753858SAndrew Rybchenko 		seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
383c4753858SAndrew Rybchenko 		SFC_ASSERT(seg_len > 0);
384c4753858SAndrew Rybchenko 		rte_pktmbuf_data_len(m) = seg_len;
385c4753858SAndrew Rybchenko 		rte_pktmbuf_pkt_len(m) = seg_len;
386638bddc9SAndrew Rybchenko 
38768de5f6cSAndrew Rybchenko 		SFC_ASSERT(m->next == NULL);
388638bddc9SAndrew Rybchenko 	}
389638bddc9SAndrew Rybchenko 
39084b63b5cSAndrew Rybchenko 	return rx_pkts;
391638bddc9SAndrew Rybchenko }
392638bddc9SAndrew Rybchenko 
393638bddc9SAndrew Rybchenko static bool
394638bddc9SAndrew Rybchenko sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
395638bddc9SAndrew Rybchenko {
396638bddc9SAndrew Rybchenko 	*rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
397638bddc9SAndrew Rybchenko 
398638bddc9SAndrew Rybchenko 	if (!sfc_ef10_ev_present(*rx_ev))
399638bddc9SAndrew Rybchenko 		return false;
400638bddc9SAndrew Rybchenko 
401638bddc9SAndrew Rybchenko 	if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
402638bddc9SAndrew Rybchenko 		     FSE_AZ_EV_CODE_RX_EV)) {
403638bddc9SAndrew Rybchenko 		/*
404638bddc9SAndrew Rybchenko 		 * Do not move read_ptr to keep the event for exception
405638bddc9SAndrew Rybchenko 		 * handling by the control path.
406638bddc9SAndrew Rybchenko 		 */
407638bddc9SAndrew Rybchenko 		rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
408638bddc9SAndrew Rybchenko 		sfc_ef10_rx_err(&rxq->dp.dpq,
409638bddc9SAndrew Rybchenko 				"RxQ exception at EvQ read ptr %#x",
410638bddc9SAndrew Rybchenko 				rxq->evq_read_ptr);
411638bddc9SAndrew Rybchenko 		return false;
412638bddc9SAndrew Rybchenko 	}
413638bddc9SAndrew Rybchenko 
414638bddc9SAndrew Rybchenko 	rxq->evq_read_ptr++;
415638bddc9SAndrew Rybchenko 	return true;
416638bddc9SAndrew Rybchenko }
417638bddc9SAndrew Rybchenko 
418638bddc9SAndrew Rybchenko static uint16_t
419638bddc9SAndrew Rybchenko sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
420638bddc9SAndrew Rybchenko {
421638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
42284b63b5cSAndrew Rybchenko 	struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
423638bddc9SAndrew Rybchenko 	unsigned int evq_old_read_ptr;
424638bddc9SAndrew Rybchenko 	efx_qword_t rx_ev;
425638bddc9SAndrew Rybchenko 
4263be22684SAndrew Rybchenko 	rx_pkts = sfc_ef10_rx_pending(rxq, rx_pkts, nb_pkts);
427f609ee3fSAndrew Rybchenko 
428638bddc9SAndrew Rybchenko 	if (unlikely(rxq->flags &
429638bddc9SAndrew Rybchenko 		     (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
430f609ee3fSAndrew Rybchenko 		goto done;
431638bddc9SAndrew Rybchenko 
432638bddc9SAndrew Rybchenko 	evq_old_read_ptr = rxq->evq_read_ptr;
43384b63b5cSAndrew Rybchenko 	while (rx_pkts != rx_pkts_end && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
434638bddc9SAndrew Rybchenko 		/*
435638bddc9SAndrew Rybchenko 		 * DROP_EVENT is an internal to the NIC, software should
436638bddc9SAndrew Rybchenko 		 * never see it and, therefore, may ignore it.
437638bddc9SAndrew Rybchenko 		 */
438638bddc9SAndrew Rybchenko 
43984b63b5cSAndrew Rybchenko 		rx_pkts = sfc_ef10_rx_process_event(rxq, rx_ev,
44084b63b5cSAndrew Rybchenko 						    rx_pkts, rx_pkts_end);
441638bddc9SAndrew Rybchenko 	}
442638bddc9SAndrew Rybchenko 
443638bddc9SAndrew Rybchenko 	sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
444638bddc9SAndrew Rybchenko 			   rxq->evq_read_ptr);
445638bddc9SAndrew Rybchenko 
446638bddc9SAndrew Rybchenko 	/* It is not a problem if we refill in the case of exception */
447638bddc9SAndrew Rybchenko 	sfc_ef10_rx_qrefill(rxq);
448638bddc9SAndrew Rybchenko 
4491245e3faSGeorgiy Levashov 	if ((rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) &&
4501245e3faSGeorgiy Levashov 	    rxq->evq_read_ptr_primed != rxq->evq_read_ptr)
4511245e3faSGeorgiy Levashov 		sfc_ef10_rx_qprime(rxq);
4521245e3faSGeorgiy Levashov 
453f609ee3fSAndrew Rybchenko done:
45484b63b5cSAndrew Rybchenko 	return nb_pkts - (rx_pkts_end - rx_pkts);
455638bddc9SAndrew Rybchenko }
456638bddc9SAndrew Rybchenko 
457390f9b8dSAndrew Rybchenko const uint32_t *
458591cbbb1SAndrew Rybchenko sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps)
459638bddc9SAndrew Rybchenko {
460638bddc9SAndrew Rybchenko 	static const uint32_t ef10_native_ptypes[] = {
461638bddc9SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER,
462638bddc9SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_ARP,
463638bddc9SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_VLAN,
464638bddc9SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_QINQ,
465638bddc9SAndrew Rybchenko 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
466638bddc9SAndrew Rybchenko 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
467638bddc9SAndrew Rybchenko 		RTE_PTYPE_L4_FRAG,
468638bddc9SAndrew Rybchenko 		RTE_PTYPE_L4_TCP,
469638bddc9SAndrew Rybchenko 		RTE_PTYPE_L4_UDP,
470638bddc9SAndrew Rybchenko 		RTE_PTYPE_UNKNOWN
471638bddc9SAndrew Rybchenko 	};
472591cbbb1SAndrew Rybchenko 	static const uint32_t ef10_overlay_ptypes[] = {
473591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER,
474591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_ARP,
475591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_VLAN,
476591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_QINQ,
477591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
478591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
479591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L4_FRAG,
480591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L4_TCP,
481591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L4_UDP,
482591cbbb1SAndrew Rybchenko 		RTE_PTYPE_TUNNEL_VXLAN,
483591cbbb1SAndrew Rybchenko 		RTE_PTYPE_TUNNEL_NVGRE,
484591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L2_ETHER,
485591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L2_ETHER_VLAN,
486591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L2_ETHER_QINQ,
487591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
488591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
489591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_FRAG,
490591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_TCP,
491591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_UDP,
492591cbbb1SAndrew Rybchenko 		RTE_PTYPE_UNKNOWN
493591cbbb1SAndrew Rybchenko 	};
494638bddc9SAndrew Rybchenko 
495591cbbb1SAndrew Rybchenko 	/*
496591cbbb1SAndrew Rybchenko 	 * The function returns static set of supported packet types,
497591cbbb1SAndrew Rybchenko 	 * so we can't build it dynamically based on supported tunnel
498591cbbb1SAndrew Rybchenko 	 * encapsulations and should limit to known sets.
499591cbbb1SAndrew Rybchenko 	 */
500591cbbb1SAndrew Rybchenko 	switch (tunnel_encaps) {
501591cbbb1SAndrew Rybchenko 	case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
502591cbbb1SAndrew Rybchenko 	      1u << EFX_TUNNEL_PROTOCOL_GENEVE |
503591cbbb1SAndrew Rybchenko 	      1u << EFX_TUNNEL_PROTOCOL_NVGRE):
504591cbbb1SAndrew Rybchenko 		return ef10_overlay_ptypes;
505591cbbb1SAndrew Rybchenko 	default:
506fdceb100SIvan Malov 		SFC_GENERIC_LOG(ERR,
507fdceb100SIvan Malov 			"Unexpected set of supported tunnel encapsulations: %#x",
508591cbbb1SAndrew Rybchenko 			tunnel_encaps);
509591cbbb1SAndrew Rybchenko 		/* FALLTHROUGH */
510591cbbb1SAndrew Rybchenko 	case 0:
511638bddc9SAndrew Rybchenko 		return ef10_native_ptypes;
512638bddc9SAndrew Rybchenko 	}
513591cbbb1SAndrew Rybchenko }
514638bddc9SAndrew Rybchenko 
515638bddc9SAndrew Rybchenko static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
516638bddc9SAndrew Rybchenko static unsigned int
5171a9d944fSIgor Romanov sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
518638bddc9SAndrew Rybchenko {
5191a9d944fSIgor Romanov 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
5201a9d944fSIgor Romanov 	efx_qword_t rx_ev;
5211a9d944fSIgor Romanov 	const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
5221a9d944fSIgor Romanov 	unsigned int pending = rxq->pending;
5231a9d944fSIgor Romanov 	unsigned int ready;
5241a9d944fSIgor Romanov 
5251a9d944fSIgor Romanov 	if (unlikely(rxq->flags &
5261a9d944fSIgor Romanov 		     (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
5271a9d944fSIgor Romanov 		goto done;
5281a9d944fSIgor Romanov 
5291a9d944fSIgor Romanov 	while (sfc_ef10_rx_get_event(rxq, &rx_ev)) {
5301a9d944fSIgor Romanov 		ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
5311a9d944fSIgor Romanov 			 pending) &
5321a9d944fSIgor Romanov 			EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
5331a9d944fSIgor Romanov 		pending += ready;
5341a9d944fSIgor Romanov 	}
5351a9d944fSIgor Romanov 
536638bddc9SAndrew Rybchenko 	/*
5371a9d944fSIgor Romanov 	 * The function does not process events, so return event queue read
5381a9d944fSIgor Romanov 	 * pointer to the original position to allow the events that were
5391a9d944fSIgor Romanov 	 * read to be processed later
540638bddc9SAndrew Rybchenko 	 */
5411a9d944fSIgor Romanov 	rxq->evq_read_ptr = evq_old_read_ptr;
5421a9d944fSIgor Romanov 
5431a9d944fSIgor Romanov done:
5441a9d944fSIgor Romanov 	return pending - rxq->completed;
545638bddc9SAndrew Rybchenko }
546638bddc9SAndrew Rybchenko 
5471d8f3a80SIvan Malov static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
5481d8f3a80SIvan Malov static int
5491a9d944fSIgor Romanov sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
5501d8f3a80SIvan Malov {
5511a9d944fSIgor Romanov 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
5521a9d944fSIgor Romanov 	unsigned int npending = sfc_ef10_rx_qdesc_npending(dp_rxq);
5531a9d944fSIgor Romanov 
5541a9d944fSIgor Romanov 	if (unlikely(offset > rxq->ptr_mask))
5551a9d944fSIgor Romanov 		return -EINVAL;
5561a9d944fSIgor Romanov 
5571a9d944fSIgor Romanov 	if (offset < npending)
5581a9d944fSIgor Romanov 		return RTE_ETH_RX_DESC_DONE;
5591a9d944fSIgor Romanov 
5601a9d944fSIgor Romanov 	if (offset < (rxq->added - rxq->completed))
5611a9d944fSIgor Romanov 		return RTE_ETH_RX_DESC_AVAIL;
5621a9d944fSIgor Romanov 
5631a9d944fSIgor Romanov 	return RTE_ETH_RX_DESC_UNAVAIL;
5641d8f3a80SIvan Malov }
5651d8f3a80SIvan Malov 
566638bddc9SAndrew Rybchenko 
5673c335b7fSAndrew Rybchenko static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info;
5683c335b7fSAndrew Rybchenko static void
5693c335b7fSAndrew Rybchenko sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
5703c335b7fSAndrew Rybchenko {
5713c335b7fSAndrew Rybchenko 	/*
5723c335b7fSAndrew Rybchenko 	 * Number of descriptors just defines maximum number of pushed
5733c335b7fSAndrew Rybchenko 	 * descriptors (fill level).
5743c335b7fSAndrew Rybchenko 	 */
5753c335b7fSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
5763c335b7fSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
5773c335b7fSAndrew Rybchenko }
5783c335b7fSAndrew Rybchenko 
5793c335b7fSAndrew Rybchenko 
580f7da270aSAndrew Rybchenko static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
581f7da270aSAndrew Rybchenko static int
582f7da270aSAndrew Rybchenko sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
583048a0d1aSIgor Romanov 			   struct sfc_dp_rx_hw_limits *limits,
584d101da1bSAndrew Rybchenko 			   __rte_unused struct rte_mempool *mb_pool,
585f7da270aSAndrew Rybchenko 			   unsigned int *rxq_entries,
586f7da270aSAndrew Rybchenko 			   unsigned int *evq_entries,
587f7da270aSAndrew Rybchenko 			   unsigned int *rxq_max_fill_level)
588f7da270aSAndrew Rybchenko {
5893c335b7fSAndrew Rybchenko 	/*
5903c335b7fSAndrew Rybchenko 	 * rte_ethdev API guarantees that the number meets min, max and
5913c335b7fSAndrew Rybchenko 	 * alignment requirements.
5923c335b7fSAndrew Rybchenko 	 */
593048a0d1aSIgor Romanov 	if (nb_rx_desc <= limits->rxq_min_entries)
594048a0d1aSIgor Romanov 		*rxq_entries = limits->rxq_min_entries;
5953c335b7fSAndrew Rybchenko 	else
5963c335b7fSAndrew Rybchenko 		*rxq_entries = rte_align32pow2(nb_rx_desc);
5973c335b7fSAndrew Rybchenko 
5983c335b7fSAndrew Rybchenko 	*evq_entries = *rxq_entries;
5993c335b7fSAndrew Rybchenko 
6003c335b7fSAndrew Rybchenko 	*rxq_max_fill_level = RTE_MIN(nb_rx_desc,
6013c335b7fSAndrew Rybchenko 				      SFC_EF10_RXQ_LIMIT(*evq_entries));
602f7da270aSAndrew Rybchenko 	return 0;
603f7da270aSAndrew Rybchenko }
604f7da270aSAndrew Rybchenko 
605f7da270aSAndrew Rybchenko 
606638bddc9SAndrew Rybchenko static uint64_t
607638bddc9SAndrew Rybchenko sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
608638bddc9SAndrew Rybchenko {
609638bddc9SAndrew Rybchenko 	struct rte_mbuf m;
610638bddc9SAndrew Rybchenko 
611638bddc9SAndrew Rybchenko 	memset(&m, 0, sizeof(m));
612638bddc9SAndrew Rybchenko 
613638bddc9SAndrew Rybchenko 	rte_mbuf_refcnt_set(&m, 1);
614638bddc9SAndrew Rybchenko 	m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
615638bddc9SAndrew Rybchenko 	m.nb_segs = 1;
616638bddc9SAndrew Rybchenko 	m.port = port_id;
617638bddc9SAndrew Rybchenko 
618638bddc9SAndrew Rybchenko 	/* rearm_data covers structure members filled in above */
619638bddc9SAndrew Rybchenko 	rte_compiler_barrier();
62068de5f6cSAndrew Rybchenko 	RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
62168de5f6cSAndrew Rybchenko 	return m.rearm_data[0];
622638bddc9SAndrew Rybchenko }
623638bddc9SAndrew Rybchenko 
624638bddc9SAndrew Rybchenko static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
625638bddc9SAndrew Rybchenko static int
626638bddc9SAndrew Rybchenko sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
627638bddc9SAndrew Rybchenko 		    const struct rte_pci_addr *pci_addr, int socket_id,
628638bddc9SAndrew Rybchenko 		    const struct sfc_dp_rx_qcreate_info *info,
629638bddc9SAndrew Rybchenko 		    struct sfc_dp_rxq **dp_rxqp)
630638bddc9SAndrew Rybchenko {
631638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq;
632638bddc9SAndrew Rybchenko 	int rc;
633638bddc9SAndrew Rybchenko 
634638bddc9SAndrew Rybchenko 	rc = EINVAL;
635638bddc9SAndrew Rybchenko 	if (info->rxq_entries != info->evq_entries)
636638bddc9SAndrew Rybchenko 		goto fail_rxq_args;
637638bddc9SAndrew Rybchenko 
638638bddc9SAndrew Rybchenko 	rc = ENOMEM;
639638bddc9SAndrew Rybchenko 	rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
640638bddc9SAndrew Rybchenko 				 RTE_CACHE_LINE_SIZE, socket_id);
641638bddc9SAndrew Rybchenko 	if (rxq == NULL)
642638bddc9SAndrew Rybchenko 		goto fail_rxq_alloc;
643638bddc9SAndrew Rybchenko 
644638bddc9SAndrew Rybchenko 	sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
645638bddc9SAndrew Rybchenko 
646638bddc9SAndrew Rybchenko 	rc = ENOMEM;
647638bddc9SAndrew Rybchenko 	rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
648638bddc9SAndrew Rybchenko 					 info->rxq_entries,
649638bddc9SAndrew Rybchenko 					 sizeof(*rxq->sw_ring),
650638bddc9SAndrew Rybchenko 					 RTE_CACHE_LINE_SIZE, socket_id);
651638bddc9SAndrew Rybchenko 	if (rxq->sw_ring == NULL)
652638bddc9SAndrew Rybchenko 		goto fail_desc_alloc;
653638bddc9SAndrew Rybchenko 
654638bddc9SAndrew Rybchenko 	rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
655638bddc9SAndrew Rybchenko 	if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
656638bddc9SAndrew Rybchenko 		rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
657638bddc9SAndrew Rybchenko 	rxq->ptr_mask = info->rxq_entries - 1;
658638bddc9SAndrew Rybchenko 	rxq->evq_hw_ring = info->evq_hw_ring;
659e5595ee2SAndrew Rybchenko 	rxq->max_fill_level = info->max_fill_level;
660638bddc9SAndrew Rybchenko 	rxq->refill_threshold = info->refill_threshold;
661638bddc9SAndrew Rybchenko 	rxq->rearm_data =
662638bddc9SAndrew Rybchenko 		sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
663638bddc9SAndrew Rybchenko 	rxq->prefix_size = info->prefix_size;
664638bddc9SAndrew Rybchenko 	rxq->buf_size = info->buf_size;
665638bddc9SAndrew Rybchenko 	rxq->refill_mb_pool = info->refill_mb_pool;
666638bddc9SAndrew Rybchenko 	rxq->rxq_hw_ring = info->rxq_hw_ring;
667638bddc9SAndrew Rybchenko 	rxq->doorbell = (volatile uint8_t *)info->mem_bar +
668638bddc9SAndrew Rybchenko 			ER_DZ_RX_DESC_UPD_REG_OFST +
669714bff55SAndrew Rybchenko 			(info->hw_index << info->vi_window_shift);
6701245e3faSGeorgiy Levashov 	rxq->evq_prime = (volatile uint8_t *)info->mem_bar +
6711245e3faSGeorgiy Levashov 		      ER_DZ_EVQ_RPTR_REG_OFST +
6721245e3faSGeorgiy Levashov 		      (info->evq_hw_index << info->vi_window_shift);
673638bddc9SAndrew Rybchenko 
674638bddc9SAndrew Rybchenko 	*dp_rxqp = &rxq->dp;
675638bddc9SAndrew Rybchenko 	return 0;
676638bddc9SAndrew Rybchenko 
677638bddc9SAndrew Rybchenko fail_desc_alloc:
678638bddc9SAndrew Rybchenko 	rte_free(rxq);
679638bddc9SAndrew Rybchenko 
680638bddc9SAndrew Rybchenko fail_rxq_alloc:
681638bddc9SAndrew Rybchenko fail_rxq_args:
682638bddc9SAndrew Rybchenko 	return rc;
683638bddc9SAndrew Rybchenko }
684638bddc9SAndrew Rybchenko 
685638bddc9SAndrew Rybchenko static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
686638bddc9SAndrew Rybchenko static void
687638bddc9SAndrew Rybchenko sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
688638bddc9SAndrew Rybchenko {
689638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
690638bddc9SAndrew Rybchenko 
691638bddc9SAndrew Rybchenko 	rte_free(rxq->sw_ring);
692638bddc9SAndrew Rybchenko 	rte_free(rxq);
693638bddc9SAndrew Rybchenko }
694638bddc9SAndrew Rybchenko 
695638bddc9SAndrew Rybchenko static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
696638bddc9SAndrew Rybchenko static int
697638bddc9SAndrew Rybchenko sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr)
698638bddc9SAndrew Rybchenko {
699638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
700638bddc9SAndrew Rybchenko 
701f609ee3fSAndrew Rybchenko 	SFC_ASSERT(rxq->completed == 0);
7023be22684SAndrew Rybchenko 	SFC_ASSERT(rxq->pending == 0);
703f609ee3fSAndrew Rybchenko 	SFC_ASSERT(rxq->added == 0);
704638bddc9SAndrew Rybchenko 
705638bddc9SAndrew Rybchenko 	sfc_ef10_rx_qrefill(rxq);
706638bddc9SAndrew Rybchenko 
707638bddc9SAndrew Rybchenko 	rxq->evq_read_ptr = evq_read_ptr;
708638bddc9SAndrew Rybchenko 
709638bddc9SAndrew Rybchenko 	rxq->flags |= SFC_EF10_RXQ_STARTED;
710638bddc9SAndrew Rybchenko 	rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
711638bddc9SAndrew Rybchenko 
7121245e3faSGeorgiy Levashov 	if (rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN)
7131245e3faSGeorgiy Levashov 		sfc_ef10_rx_qprime(rxq);
7141245e3faSGeorgiy Levashov 
715638bddc9SAndrew Rybchenko 	return 0;
716638bddc9SAndrew Rybchenko }
717638bddc9SAndrew Rybchenko 
718638bddc9SAndrew Rybchenko static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
719638bddc9SAndrew Rybchenko static void
720638bddc9SAndrew Rybchenko sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
721638bddc9SAndrew Rybchenko {
722638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
723638bddc9SAndrew Rybchenko 
724638bddc9SAndrew Rybchenko 	rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
725638bddc9SAndrew Rybchenko 
726638bddc9SAndrew Rybchenko 	*evq_read_ptr = rxq->evq_read_ptr;
727638bddc9SAndrew Rybchenko }
728638bddc9SAndrew Rybchenko 
729638bddc9SAndrew Rybchenko static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
730638bddc9SAndrew Rybchenko static bool
731638bddc9SAndrew Rybchenko sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
732638bddc9SAndrew Rybchenko {
733638bddc9SAndrew Rybchenko 	__rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
734638bddc9SAndrew Rybchenko 
735638bddc9SAndrew Rybchenko 	SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
736638bddc9SAndrew Rybchenko 
737638bddc9SAndrew Rybchenko 	/*
738638bddc9SAndrew Rybchenko 	 * It is safe to ignore Rx event since we free all mbufs on
739638bddc9SAndrew Rybchenko 	 * queue purge anyway.
740638bddc9SAndrew Rybchenko 	 */
741638bddc9SAndrew Rybchenko 
742638bddc9SAndrew Rybchenko 	return false;
743638bddc9SAndrew Rybchenko }
744638bddc9SAndrew Rybchenko 
745638bddc9SAndrew Rybchenko static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
746638bddc9SAndrew Rybchenko static void
747638bddc9SAndrew Rybchenko sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
748638bddc9SAndrew Rybchenko {
749638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
750638bddc9SAndrew Rybchenko 	unsigned int i;
751638bddc9SAndrew Rybchenko 	struct sfc_ef10_rx_sw_desc *rxd;
752638bddc9SAndrew Rybchenko 
75352e10cb0SAndrew Rybchenko 	rte_pktmbuf_free(rxq->scatter_pkt);
75452e10cb0SAndrew Rybchenko 	rxq->scatter_pkt = NULL;
75552e10cb0SAndrew Rybchenko 
756638bddc9SAndrew Rybchenko 	for (i = rxq->completed; i != rxq->added; ++i) {
757638bddc9SAndrew Rybchenko 		rxd = &rxq->sw_ring[i & rxq->ptr_mask];
75866e10b8dSAndrew Rybchenko 		rte_mbuf_raw_free(rxd->mbuf);
759638bddc9SAndrew Rybchenko 		rxd->mbuf = NULL;
760638bddc9SAndrew Rybchenko 	}
761638bddc9SAndrew Rybchenko 
7623be22684SAndrew Rybchenko 	rxq->completed = rxq->pending = rxq->added = 0;
763f609ee3fSAndrew Rybchenko 
764638bddc9SAndrew Rybchenko 	rxq->flags &= ~SFC_EF10_RXQ_STARTED;
765638bddc9SAndrew Rybchenko }
766638bddc9SAndrew Rybchenko 
7671245e3faSGeorgiy Levashov static sfc_dp_rx_intr_enable_t sfc_ef10_rx_intr_enable;
7681245e3faSGeorgiy Levashov static int
7691245e3faSGeorgiy Levashov sfc_ef10_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
7701245e3faSGeorgiy Levashov {
7711245e3faSGeorgiy Levashov 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
7721245e3faSGeorgiy Levashov 
7731245e3faSGeorgiy Levashov 	rxq->flags |= SFC_EF10_RXQ_FLAG_INTR_EN;
7741245e3faSGeorgiy Levashov 	if (rxq->flags & SFC_EF10_RXQ_STARTED)
7751245e3faSGeorgiy Levashov 		sfc_ef10_rx_qprime(rxq);
7761245e3faSGeorgiy Levashov 	return 0;
7771245e3faSGeorgiy Levashov }
7781245e3faSGeorgiy Levashov 
7791245e3faSGeorgiy Levashov static sfc_dp_rx_intr_disable_t sfc_ef10_rx_intr_disable;
7801245e3faSGeorgiy Levashov static int
7811245e3faSGeorgiy Levashov sfc_ef10_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
7821245e3faSGeorgiy Levashov {
7831245e3faSGeorgiy Levashov 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
7841245e3faSGeorgiy Levashov 
7851245e3faSGeorgiy Levashov 	/* Cannot disarm, just disable rearm */
7861245e3faSGeorgiy Levashov 	rxq->flags &= ~SFC_EF10_RXQ_FLAG_INTR_EN;
7871245e3faSGeorgiy Levashov 	return 0;
7881245e3faSGeorgiy Levashov }
7891245e3faSGeorgiy Levashov 
790638bddc9SAndrew Rybchenko struct sfc_dp_rx sfc_ef10_rx = {
791638bddc9SAndrew Rybchenko 	.dp = {
792638bddc9SAndrew Rybchenko 		.name		= SFC_KVARG_DATAPATH_EF10,
793638bddc9SAndrew Rybchenko 		.type		= SFC_DP_RX,
794638bddc9SAndrew Rybchenko 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
795638bddc9SAndrew Rybchenko 	},
7961245e3faSGeorgiy Levashov 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
7971245e3faSGeorgiy Levashov 				  SFC_DP_RX_FEAT_INTR,
798f08d113dSAndrew Rybchenko 	.dev_offload_capa	= DEV_RX_OFFLOAD_CHECKSUM |
7998b945a7fSPavan Nikhilesh 				  DEV_RX_OFFLOAD_OUTER_IPV4_CKSUM |
8008b945a7fSPavan Nikhilesh 				  DEV_RX_OFFLOAD_RSS_HASH,
801f08d113dSAndrew Rybchenko 	.queue_offload_capa	= DEV_RX_OFFLOAD_SCATTER,
8023c335b7fSAndrew Rybchenko 	.get_dev_info		= sfc_ef10_rx_get_dev_info,
803f7da270aSAndrew Rybchenko 	.qsize_up_rings		= sfc_ef10_rx_qsize_up_rings,
804638bddc9SAndrew Rybchenko 	.qcreate		= sfc_ef10_rx_qcreate,
805638bddc9SAndrew Rybchenko 	.qdestroy		= sfc_ef10_rx_qdestroy,
806638bddc9SAndrew Rybchenko 	.qstart			= sfc_ef10_rx_qstart,
807638bddc9SAndrew Rybchenko 	.qstop			= sfc_ef10_rx_qstop,
808638bddc9SAndrew Rybchenko 	.qrx_ev			= sfc_ef10_rx_qrx_ev,
809638bddc9SAndrew Rybchenko 	.qpurge			= sfc_ef10_rx_qpurge,
810638bddc9SAndrew Rybchenko 	.supported_ptypes_get	= sfc_ef10_supported_ptypes_get,
811638bddc9SAndrew Rybchenko 	.qdesc_npending		= sfc_ef10_rx_qdesc_npending,
8121d8f3a80SIvan Malov 	.qdesc_status		= sfc_ef10_rx_qdesc_status,
8131245e3faSGeorgiy Levashov 	.intr_enable		= sfc_ef10_rx_intr_enable,
8141245e3faSGeorgiy Levashov 	.intr_disable		= sfc_ef10_rx_intr_disable,
815638bddc9SAndrew Rybchenko 	.pkt_burst		= sfc_ef10_recv_pkts,
816638bddc9SAndrew Rybchenko };
817