xref: /dpdk/drivers/net/sfc/sfc_ef10_rx.c (revision ba6a168a06581b5b3d523f984722a3e5f65bbb82)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2638bddc9SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5638bddc9SAndrew Rybchenko  *
6638bddc9SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
7638bddc9SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
8638bddc9SAndrew Rybchenko  */
9638bddc9SAndrew Rybchenko 
10638bddc9SAndrew Rybchenko /* EF10 native datapath implementation */
11638bddc9SAndrew Rybchenko 
12638bddc9SAndrew Rybchenko #include <stdbool.h>
13638bddc9SAndrew Rybchenko 
14638bddc9SAndrew Rybchenko #include <rte_byteorder.h>
15638bddc9SAndrew Rybchenko #include <rte_mbuf_ptype.h>
16638bddc9SAndrew Rybchenko #include <rte_mbuf.h>
17638bddc9SAndrew Rybchenko #include <rte_io.h>
18638bddc9SAndrew Rybchenko 
19638bddc9SAndrew Rybchenko #include "efx.h"
20638bddc9SAndrew Rybchenko #include "efx_types.h"
21638bddc9SAndrew Rybchenko #include "efx_regs.h"
22638bddc9SAndrew Rybchenko #include "efx_regs_ef10.h"
23638bddc9SAndrew Rybchenko 
241b0236e2SAndrew Rybchenko #include "sfc_debug.h"
25638bddc9SAndrew Rybchenko #include "sfc_tweak.h"
26638bddc9SAndrew Rybchenko #include "sfc_dp_rx.h"
27638bddc9SAndrew Rybchenko #include "sfc_kvargs.h"
28638bddc9SAndrew Rybchenko #include "sfc_ef10.h"
297ee7e57cSAndrew Rybchenko 
307ee7e57cSAndrew Rybchenko #define SFC_EF10_RX_EV_ENCAP_SUPPORT	1
31c121f008SAndrew Rybchenko #include "sfc_ef10_rx_ev.h"
32638bddc9SAndrew Rybchenko 
33638bddc9SAndrew Rybchenko #define sfc_ef10_rx_err(dpq, ...) \
34638bddc9SAndrew Rybchenko 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, ERR, dpq, __VA_ARGS__)
35638bddc9SAndrew Rybchenko 
36e7fbf6f5SAndrew Rybchenko #define sfc_ef10_rx_info(dpq, ...) \
37e7fbf6f5SAndrew Rybchenko 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF10, INFO, dpq, __VA_ARGS__)
38e7fbf6f5SAndrew Rybchenko 
39638bddc9SAndrew Rybchenko /**
40638bddc9SAndrew Rybchenko  * Maximum number of descriptors/buffers in the Rx ring.
41638bddc9SAndrew Rybchenko  * It should guarantee that corresponding event queue never overfill.
42638bddc9SAndrew Rybchenko  * EF10 native datapath uses event queue of the same size as Rx queue.
43638bddc9SAndrew Rybchenko  * Maximum number of events on datapath can be estimated as number of
44638bddc9SAndrew Rybchenko  * Rx queue entries (one event per Rx buffer in the worst case) plus
45638bddc9SAndrew Rybchenko  * Rx error and flush events.
46638bddc9SAndrew Rybchenko  */
47638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_LIMIT(_ndesc) \
48638bddc9SAndrew Rybchenko 	((_ndesc) - 1 /* head must not step on tail */ - \
49638bddc9SAndrew Rybchenko 	 (SFC_EF10_EV_PER_CACHE_LINE - 1) /* max unused EvQ entries */ - \
50638bddc9SAndrew Rybchenko 	 1 /* Rx error */ - 1 /* flush */)
51638bddc9SAndrew Rybchenko 
52638bddc9SAndrew Rybchenko struct sfc_ef10_rx_sw_desc {
53638bddc9SAndrew Rybchenko 	struct rte_mbuf			*mbuf;
54638bddc9SAndrew Rybchenko };
55638bddc9SAndrew Rybchenko 
56638bddc9SAndrew Rybchenko struct sfc_ef10_rxq {
57638bddc9SAndrew Rybchenko 	/* Used on data path */
58638bddc9SAndrew Rybchenko 	unsigned int			flags;
59638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_STARTED		0x1
60638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_NOT_RUNNING	0x2
61638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_EXCEPTION		0x4
62638bddc9SAndrew Rybchenko #define SFC_EF10_RXQ_RSS_HASH		0x8
631245e3faSGeorgiy Levashov #define SFC_EF10_RXQ_FLAG_INTR_EN	0x10
64638bddc9SAndrew Rybchenko 	unsigned int			ptr_mask;
653be22684SAndrew Rybchenko 	unsigned int			pending;
66638bddc9SAndrew Rybchenko 	unsigned int			completed;
67638bddc9SAndrew Rybchenko 	unsigned int			evq_read_ptr;
681245e3faSGeorgiy Levashov 	unsigned int			evq_read_ptr_primed;
69638bddc9SAndrew Rybchenko 	efx_qword_t			*evq_hw_ring;
70638bddc9SAndrew Rybchenko 	struct sfc_ef10_rx_sw_desc	*sw_ring;
71638bddc9SAndrew Rybchenko 	uint64_t			rearm_data;
7252e10cb0SAndrew Rybchenko 	struct rte_mbuf			*scatter_pkt;
731245e3faSGeorgiy Levashov 	volatile void			*evq_prime;
74638bddc9SAndrew Rybchenko 	uint16_t			prefix_size;
75638bddc9SAndrew Rybchenko 
76638bddc9SAndrew Rybchenko 	/* Used on refill */
77638bddc9SAndrew Rybchenko 	uint16_t			buf_size;
78638bddc9SAndrew Rybchenko 	unsigned int			added;
79e5595ee2SAndrew Rybchenko 	unsigned int			max_fill_level;
80638bddc9SAndrew Rybchenko 	unsigned int			refill_threshold;
81638bddc9SAndrew Rybchenko 	struct rte_mempool		*refill_mb_pool;
82638bddc9SAndrew Rybchenko 	efx_qword_t			*rxq_hw_ring;
83638bddc9SAndrew Rybchenko 	volatile void			*doorbell;
84638bddc9SAndrew Rybchenko 
85638bddc9SAndrew Rybchenko 	/* Datapath receive queue anchor */
86638bddc9SAndrew Rybchenko 	struct sfc_dp_rxq		dp;
87638bddc9SAndrew Rybchenko };
88638bddc9SAndrew Rybchenko 
89638bddc9SAndrew Rybchenko static inline struct sfc_ef10_rxq *
sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq * dp_rxq)90638bddc9SAndrew Rybchenko sfc_ef10_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
91638bddc9SAndrew Rybchenko {
92638bddc9SAndrew Rybchenko 	return container_of(dp_rxq, struct sfc_ef10_rxq, dp);
93638bddc9SAndrew Rybchenko }
94638bddc9SAndrew Rybchenko 
95638bddc9SAndrew Rybchenko static void
sfc_ef10_rx_qprime(struct sfc_ef10_rxq * rxq)961245e3faSGeorgiy Levashov sfc_ef10_rx_qprime(struct sfc_ef10_rxq *rxq)
971245e3faSGeorgiy Levashov {
981245e3faSGeorgiy Levashov 	sfc_ef10_ev_qprime(rxq->evq_prime, rxq->evq_read_ptr, rxq->ptr_mask);
991245e3faSGeorgiy Levashov 	rxq->evq_read_ptr_primed = rxq->evq_read_ptr;
1001245e3faSGeorgiy Levashov }
1011245e3faSGeorgiy Levashov 
1021245e3faSGeorgiy Levashov static void
sfc_ef10_rx_qrefill(struct sfc_ef10_rxq * rxq)103638bddc9SAndrew Rybchenko sfc_ef10_rx_qrefill(struct sfc_ef10_rxq *rxq)
104638bddc9SAndrew Rybchenko {
105638bddc9SAndrew Rybchenko 	const unsigned int ptr_mask = rxq->ptr_mask;
106638bddc9SAndrew Rybchenko 	const uint32_t buf_size = rxq->buf_size;
107638bddc9SAndrew Rybchenko 	unsigned int free_space;
108638bddc9SAndrew Rybchenko 	unsigned int bulks;
109638bddc9SAndrew Rybchenko 	void *objs[SFC_RX_REFILL_BULK];
110638bddc9SAndrew Rybchenko 	unsigned int added = rxq->added;
111638bddc9SAndrew Rybchenko 
112a2443fdfSAndrew Rybchenko 	RTE_BUILD_BUG_ON(SFC_RX_REFILL_BULK % SFC_EF10_RX_WPTR_ALIGN != 0);
113a2443fdfSAndrew Rybchenko 
114e5595ee2SAndrew Rybchenko 	free_space = rxq->max_fill_level - (added - rxq->completed);
115638bddc9SAndrew Rybchenko 
116638bddc9SAndrew Rybchenko 	if (free_space < rxq->refill_threshold)
117638bddc9SAndrew Rybchenko 		return;
118638bddc9SAndrew Rybchenko 
119638bddc9SAndrew Rybchenko 	bulks = free_space / RTE_DIM(objs);
120638bddc9SAndrew Rybchenko 	/* refill_threshold guarantees that bulks is positive */
121638bddc9SAndrew Rybchenko 	SFC_ASSERT(bulks > 0);
122638bddc9SAndrew Rybchenko 
123638bddc9SAndrew Rybchenko 	do {
124638bddc9SAndrew Rybchenko 		unsigned int id;
125638bddc9SAndrew Rybchenko 		unsigned int i;
126638bddc9SAndrew Rybchenko 
127638bddc9SAndrew Rybchenko 		if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
128638bddc9SAndrew Rybchenko 						  RTE_DIM(objs)) < 0)) {
129638bddc9SAndrew Rybchenko 			struct rte_eth_dev_data *dev_data =
130638bddc9SAndrew Rybchenko 				rte_eth_devices[rxq->dp.dpq.port_id].data;
131638bddc9SAndrew Rybchenko 
132638bddc9SAndrew Rybchenko 			/*
133638bddc9SAndrew Rybchenko 			 * It is hardly a safe way to increment counter
134638bddc9SAndrew Rybchenko 			 * from different contexts, but all PMDs do it.
135638bddc9SAndrew Rybchenko 			 */
136638bddc9SAndrew Rybchenko 			dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
137638bddc9SAndrew Rybchenko 			/* Return if we have posted nothing yet */
138638bddc9SAndrew Rybchenko 			if (added == rxq->added)
139638bddc9SAndrew Rybchenko 				return;
140638bddc9SAndrew Rybchenko 			/* Push posted */
141638bddc9SAndrew Rybchenko 			break;
142638bddc9SAndrew Rybchenko 		}
143638bddc9SAndrew Rybchenko 
144638bddc9SAndrew Rybchenko 		for (i = 0, id = added & ptr_mask;
145638bddc9SAndrew Rybchenko 		     i < RTE_DIM(objs);
146638bddc9SAndrew Rybchenko 		     ++i, ++id) {
147638bddc9SAndrew Rybchenko 			struct rte_mbuf *m = objs[i];
148638bddc9SAndrew Rybchenko 			struct sfc_ef10_rx_sw_desc *rxd;
149df6e0a06SSantosh Shukla 			rte_iova_t phys_addr;
150638bddc9SAndrew Rybchenko 
1513a35c1c0SMorten Brørup 			__rte_mbuf_raw_sanity_check(m);
152f3a5fa85SAndrew Rybchenko 
153638bddc9SAndrew Rybchenko 			SFC_ASSERT((id & ~ptr_mask) == 0);
154638bddc9SAndrew Rybchenko 			rxd = &rxq->sw_ring[id];
155638bddc9SAndrew Rybchenko 			rxd->mbuf = m;
156638bddc9SAndrew Rybchenko 
157638bddc9SAndrew Rybchenko 			/*
158638bddc9SAndrew Rybchenko 			 * Avoid writing to mbuf. It is cheaper to do it
159638bddc9SAndrew Rybchenko 			 * when we receive packet and fill in nearby
160638bddc9SAndrew Rybchenko 			 * structure members.
161638bddc9SAndrew Rybchenko 			 */
162638bddc9SAndrew Rybchenko 
163bfa9a8a4SThomas Monjalon 			phys_addr = rte_mbuf_data_iova_default(m);
164638bddc9SAndrew Rybchenko 			EFX_POPULATE_QWORD_2(rxq->rxq_hw_ring[id],
165638bddc9SAndrew Rybchenko 			    ESF_DZ_RX_KER_BYTE_CNT, buf_size,
166638bddc9SAndrew Rybchenko 			    ESF_DZ_RX_KER_BUF_ADDR, phys_addr);
167638bddc9SAndrew Rybchenko 		}
168638bddc9SAndrew Rybchenko 
169638bddc9SAndrew Rybchenko 		added += RTE_DIM(objs);
170638bddc9SAndrew Rybchenko 	} while (--bulks > 0);
171638bddc9SAndrew Rybchenko 
172638bddc9SAndrew Rybchenko 	SFC_ASSERT(rxq->added != added);
173638bddc9SAndrew Rybchenko 	rxq->added = added;
17450448dd3SAndrew Rybchenko 	sfc_ef10_rx_qpush(rxq->doorbell, added, ptr_mask, &rxq->dp.dpq.dbells);
175638bddc9SAndrew Rybchenko }
176638bddc9SAndrew Rybchenko 
177638bddc9SAndrew Rybchenko static void
sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq * rxq,unsigned int next_id)178638bddc9SAndrew Rybchenko sfc_ef10_rx_prefetch_next(struct sfc_ef10_rxq *rxq, unsigned int next_id)
179638bddc9SAndrew Rybchenko {
180638bddc9SAndrew Rybchenko 	struct rte_mbuf *next_mbuf;
181638bddc9SAndrew Rybchenko 
182638bddc9SAndrew Rybchenko 	/* Prefetch next bunch of software descriptors */
183638bddc9SAndrew Rybchenko 	if ((next_id % (RTE_CACHE_LINE_SIZE / sizeof(rxq->sw_ring[0]))) == 0)
184638bddc9SAndrew Rybchenko 		rte_prefetch0(&rxq->sw_ring[next_id]);
185638bddc9SAndrew Rybchenko 
186638bddc9SAndrew Rybchenko 	/*
187638bddc9SAndrew Rybchenko 	 * It looks strange to prefetch depending on previous prefetch
188638bddc9SAndrew Rybchenko 	 * data, but measurements show that it is really efficient and
189638bddc9SAndrew Rybchenko 	 * increases packet rate.
190638bddc9SAndrew Rybchenko 	 */
191638bddc9SAndrew Rybchenko 	next_mbuf = rxq->sw_ring[next_id].mbuf;
192638bddc9SAndrew Rybchenko 	if (likely(next_mbuf != NULL)) {
193638bddc9SAndrew Rybchenko 		/* Prefetch the next mbuf structure */
194638bddc9SAndrew Rybchenko 		rte_mbuf_prefetch_part1(next_mbuf);
195638bddc9SAndrew Rybchenko 
196638bddc9SAndrew Rybchenko 		/* Prefetch pseudo header of the next packet */
197638bddc9SAndrew Rybchenko 		/* data_off is not filled in yet */
198638bddc9SAndrew Rybchenko 		/* Yes, data could be not ready yet, but we hope */
199638bddc9SAndrew Rybchenko 		rte_prefetch0((uint8_t *)next_mbuf->buf_addr +
200638bddc9SAndrew Rybchenko 			      RTE_PKTMBUF_HEADROOM);
201638bddc9SAndrew Rybchenko 	}
202638bddc9SAndrew Rybchenko }
203638bddc9SAndrew Rybchenko 
20484b63b5cSAndrew Rybchenko static struct rte_mbuf **
sfc_ef10_rx_pending(struct sfc_ef10_rxq * rxq,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)2053be22684SAndrew Rybchenko sfc_ef10_rx_pending(struct sfc_ef10_rxq *rxq, struct rte_mbuf **rx_pkts,
206638bddc9SAndrew Rybchenko 		    uint16_t nb_pkts)
207638bddc9SAndrew Rybchenko {
2083be22684SAndrew Rybchenko 	uint16_t n_rx_pkts = RTE_MIN(nb_pkts, rxq->pending - rxq->completed);
2093ac6ddd4SAndrew Rybchenko 
21052e10cb0SAndrew Rybchenko 	SFC_ASSERT(rxq->pending == rxq->completed || rxq->scatter_pkt == NULL);
21152e10cb0SAndrew Rybchenko 
2123ac6ddd4SAndrew Rybchenko 	if (n_rx_pkts != 0) {
213638bddc9SAndrew Rybchenko 		unsigned int completed = rxq->completed;
214638bddc9SAndrew Rybchenko 
215638bddc9SAndrew Rybchenko 		rxq->completed = completed + n_rx_pkts;
216638bddc9SAndrew Rybchenko 
2173ac6ddd4SAndrew Rybchenko 		do {
2183ac6ddd4SAndrew Rybchenko 			*rx_pkts++ =
2193ac6ddd4SAndrew Rybchenko 				rxq->sw_ring[completed++ & rxq->ptr_mask].mbuf;
2203ac6ddd4SAndrew Rybchenko 		} while (completed != rxq->completed);
2213ac6ddd4SAndrew Rybchenko 	}
222638bddc9SAndrew Rybchenko 
22384b63b5cSAndrew Rybchenko 	return rx_pkts;
224638bddc9SAndrew Rybchenko }
225638bddc9SAndrew Rybchenko 
226c6845644SAndrew Rybchenko /*
227c6845644SAndrew Rybchenko  * Below Rx pseudo-header (aka Rx prefix) accessors rely on the
228c6845644SAndrew Rybchenko  * following fields layout.
229c6845644SAndrew Rybchenko  */
230c6845644SAndrew Rybchenko static const efx_rx_prefix_layout_t sfc_ef10_rx_prefix_layout = {
231c6845644SAndrew Rybchenko 	.erpl_fields	= {
232c6845644SAndrew Rybchenko 		[EFX_RX_PREFIX_FIELD_RSS_HASH]	=
233c6845644SAndrew Rybchenko 		    { 0, sizeof(uint32_t) * CHAR_BIT, B_FALSE },
234c6845644SAndrew Rybchenko 		[EFX_RX_PREFIX_FIELD_LENGTH]	=
235c6845644SAndrew Rybchenko 		    { 8 * CHAR_BIT, sizeof(uint16_t) * CHAR_BIT, B_FALSE },
236c6845644SAndrew Rybchenko 	}
237c6845644SAndrew Rybchenko };
238638bddc9SAndrew Rybchenko static uint16_t
sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t * pseudo_hdr)239638bddc9SAndrew Rybchenko sfc_ef10_rx_pseudo_hdr_get_len(const uint8_t *pseudo_hdr)
240638bddc9SAndrew Rybchenko {
241638bddc9SAndrew Rybchenko 	return rte_le_to_cpu_16(*(const uint16_t *)&pseudo_hdr[8]);
242638bddc9SAndrew Rybchenko }
243638bddc9SAndrew Rybchenko 
244638bddc9SAndrew Rybchenko static uint32_t
sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t * pseudo_hdr)245638bddc9SAndrew Rybchenko sfc_ef10_rx_pseudo_hdr_get_hash(const uint8_t *pseudo_hdr)
246638bddc9SAndrew Rybchenko {
247638bddc9SAndrew Rybchenko 	return rte_le_to_cpu_32(*(const uint32_t *)pseudo_hdr);
248638bddc9SAndrew Rybchenko }
249638bddc9SAndrew Rybchenko 
25084b63b5cSAndrew Rybchenko static struct rte_mbuf **
sfc_ef10_rx_process_event(struct sfc_ef10_rxq * rxq,efx_qword_t rx_ev,struct rte_mbuf ** rx_pkts,struct rte_mbuf ** const rx_pkts_end)251638bddc9SAndrew Rybchenko sfc_ef10_rx_process_event(struct sfc_ef10_rxq *rxq, efx_qword_t rx_ev,
25284b63b5cSAndrew Rybchenko 			  struct rte_mbuf **rx_pkts,
25384b63b5cSAndrew Rybchenko 			  struct rte_mbuf ** const rx_pkts_end)
254638bddc9SAndrew Rybchenko {
255638bddc9SAndrew Rybchenko 	const unsigned int ptr_mask = rxq->ptr_mask;
2563be22684SAndrew Rybchenko 	unsigned int pending = rxq->pending;
257638bddc9SAndrew Rybchenko 	unsigned int ready;
258638bddc9SAndrew Rybchenko 	struct sfc_ef10_rx_sw_desc *rxd;
259638bddc9SAndrew Rybchenko 	struct rte_mbuf *m;
260638bddc9SAndrew Rybchenko 	struct rte_mbuf *m0;
261638bddc9SAndrew Rybchenko 	const uint8_t *pseudo_hdr;
262c4753858SAndrew Rybchenko 	uint16_t seg_len;
263638bddc9SAndrew Rybchenko 
2643be22684SAndrew Rybchenko 	ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) - pending) &
265638bddc9SAndrew Rybchenko 		EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
26652e10cb0SAndrew Rybchenko 
26752e10cb0SAndrew Rybchenko 	if (ready == 0) {
26852e10cb0SAndrew Rybchenko 		/* Rx abort - it was no enough descriptors for Rx packet */
26952e10cb0SAndrew Rybchenko 		rte_pktmbuf_free(rxq->scatter_pkt);
27052e10cb0SAndrew Rybchenko 		rxq->scatter_pkt = NULL;
27152e10cb0SAndrew Rybchenko 		return rx_pkts;
27252e10cb0SAndrew Rybchenko 	}
273638bddc9SAndrew Rybchenko 
2743be22684SAndrew Rybchenko 	rxq->pending = pending + ready;
2753be22684SAndrew Rybchenko 
276638bddc9SAndrew Rybchenko 	if (rx_ev.eq_u64[0] &
277638bddc9SAndrew Rybchenko 	    rte_cpu_to_le_64((1ull << ESF_DZ_RX_ECC_ERR_LBN) |
278638bddc9SAndrew Rybchenko 			     (1ull << ESF_DZ_RX_ECRC_ERR_LBN))) {
2793be22684SAndrew Rybchenko 		SFC_ASSERT(rxq->completed == pending);
2803be22684SAndrew Rybchenko 		do {
2813be22684SAndrew Rybchenko 			rxd = &rxq->sw_ring[pending++ & ptr_mask];
28266e10b8dSAndrew Rybchenko 			rte_mbuf_raw_free(rxd->mbuf);
2833be22684SAndrew Rybchenko 		} while (pending != rxq->pending);
2843be22684SAndrew Rybchenko 		rxq->completed = pending;
28584b63b5cSAndrew Rybchenko 		return rx_pkts;
286638bddc9SAndrew Rybchenko 	}
287638bddc9SAndrew Rybchenko 
28852e10cb0SAndrew Rybchenko 	/* If scattered packet is in progress */
28952e10cb0SAndrew Rybchenko 	if (rxq->scatter_pkt != NULL) {
29052e10cb0SAndrew Rybchenko 		/* Events for scattered packet frags are not merged */
29152e10cb0SAndrew Rybchenko 		SFC_ASSERT(ready == 1);
29252e10cb0SAndrew Rybchenko 		SFC_ASSERT(rxq->completed == pending);
29352e10cb0SAndrew Rybchenko 
29452e10cb0SAndrew Rybchenko 		/* There is no pseudo-header in scatter segments. */
29552e10cb0SAndrew Rybchenko 		seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES);
29652e10cb0SAndrew Rybchenko 
29752e10cb0SAndrew Rybchenko 		rxd = &rxq->sw_ring[pending++ & ptr_mask];
29852e10cb0SAndrew Rybchenko 		m = rxd->mbuf;
29952e10cb0SAndrew Rybchenko 
3003a35c1c0SMorten Brørup 		__rte_mbuf_raw_sanity_check(m);
30152e10cb0SAndrew Rybchenko 
30252e10cb0SAndrew Rybchenko 		m->data_off = RTE_PKTMBUF_HEADROOM;
30352e10cb0SAndrew Rybchenko 		rte_pktmbuf_data_len(m) = seg_len;
30452e10cb0SAndrew Rybchenko 		rte_pktmbuf_pkt_len(m) = seg_len;
30552e10cb0SAndrew Rybchenko 
30652e10cb0SAndrew Rybchenko 		rxq->scatter_pkt->nb_segs++;
30752e10cb0SAndrew Rybchenko 		rte_pktmbuf_pkt_len(rxq->scatter_pkt) += seg_len;
30852e10cb0SAndrew Rybchenko 		rte_pktmbuf_lastseg(rxq->scatter_pkt)->next = m;
30952e10cb0SAndrew Rybchenko 
31052e10cb0SAndrew Rybchenko 		if (~rx_ev.eq_u64[0] &
31152e10cb0SAndrew Rybchenko 		    rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
31252e10cb0SAndrew Rybchenko 			*rx_pkts++ = rxq->scatter_pkt;
31352e10cb0SAndrew Rybchenko 			rxq->scatter_pkt = NULL;
31452e10cb0SAndrew Rybchenko 		}
31552e10cb0SAndrew Rybchenko 		rxq->completed = pending;
31652e10cb0SAndrew Rybchenko 		return rx_pkts;
31752e10cb0SAndrew Rybchenko 	}
31852e10cb0SAndrew Rybchenko 
3193be22684SAndrew Rybchenko 	rxd = &rxq->sw_ring[pending++ & ptr_mask];
320638bddc9SAndrew Rybchenko 
3213be22684SAndrew Rybchenko 	sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
322638bddc9SAndrew Rybchenko 
323638bddc9SAndrew Rybchenko 	m = rxd->mbuf;
324638bddc9SAndrew Rybchenko 
32568de5f6cSAndrew Rybchenko 	RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) != sizeof(rxq->rearm_data));
32668de5f6cSAndrew Rybchenko 	m->rearm_data[0] = rxq->rearm_data;
327638bddc9SAndrew Rybchenko 
328638bddc9SAndrew Rybchenko 	/* Classify packet based on Rx event */
329a6539283SAndrew Rybchenko 	/* Mask RSS hash offload flag if RSS is not enabled */
330a6539283SAndrew Rybchenko 	sfc_ef10_rx_ev_to_offloads(rx_ev, m,
331a6539283SAndrew Rybchenko 				   (rxq->flags & SFC_EF10_RXQ_RSS_HASH) ?
332daa02b5cSOlivier Matz 				   ~0ull : ~RTE_MBUF_F_RX_RSS_HASH);
333638bddc9SAndrew Rybchenko 
334638bddc9SAndrew Rybchenko 	/* data_off already moved past pseudo header */
335638bddc9SAndrew Rybchenko 	pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
336638bddc9SAndrew Rybchenko 
337638bddc9SAndrew Rybchenko 	/*
338638bddc9SAndrew Rybchenko 	 * Always get RSS hash from pseudo header to avoid
339638bddc9SAndrew Rybchenko 	 * condition/branching. If it is valid or not depends on
340daa02b5cSOlivier Matz 	 * RTE_MBUF_F_RX_RSS_HASH in m->ol_flags.
341638bddc9SAndrew Rybchenko 	 */
342638bddc9SAndrew Rybchenko 	m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
343638bddc9SAndrew Rybchenko 
344638bddc9SAndrew Rybchenko 	if (ready == 1)
345c4753858SAndrew Rybchenko 		seg_len = EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_BYTES) -
346638bddc9SAndrew Rybchenko 			rxq->prefix_size;
347638bddc9SAndrew Rybchenko 	else
348c4753858SAndrew Rybchenko 		seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
349c4753858SAndrew Rybchenko 	SFC_ASSERT(seg_len > 0);
350c4753858SAndrew Rybchenko 	rte_pktmbuf_data_len(m) = seg_len;
351c4753858SAndrew Rybchenko 	rte_pktmbuf_pkt_len(m) = seg_len;
352638bddc9SAndrew Rybchenko 
35368de5f6cSAndrew Rybchenko 	SFC_ASSERT(m->next == NULL);
354638bddc9SAndrew Rybchenko 
35552e10cb0SAndrew Rybchenko 	if (~rx_ev.eq_u64[0] & rte_cpu_to_le_64(1ull << ESF_DZ_RX_CONT_LBN)) {
35652e10cb0SAndrew Rybchenko 		*rx_pkts++ = m;
35752e10cb0SAndrew Rybchenko 		rxq->completed = pending;
35852e10cb0SAndrew Rybchenko 	} else {
35952e10cb0SAndrew Rybchenko 		/* Events with CONT bit are not merged */
36052e10cb0SAndrew Rybchenko 		SFC_ASSERT(ready == 1);
36152e10cb0SAndrew Rybchenko 		rxq->scatter_pkt = m;
36252e10cb0SAndrew Rybchenko 		rxq->completed = pending;
36352e10cb0SAndrew Rybchenko 		return rx_pkts;
36452e10cb0SAndrew Rybchenko 	}
36552e10cb0SAndrew Rybchenko 
366638bddc9SAndrew Rybchenko 	/* Remember mbuf to copy offload flags and packet type from */
367638bddc9SAndrew Rybchenko 	m0 = m;
3683be22684SAndrew Rybchenko 	while (pending != rxq->pending) {
3693be22684SAndrew Rybchenko 		rxd = &rxq->sw_ring[pending++ & ptr_mask];
370638bddc9SAndrew Rybchenko 
3713be22684SAndrew Rybchenko 		sfc_ef10_rx_prefetch_next(rxq, pending & ptr_mask);
372638bddc9SAndrew Rybchenko 
373638bddc9SAndrew Rybchenko 		m = rxd->mbuf;
374638bddc9SAndrew Rybchenko 
3753be22684SAndrew Rybchenko 		if (rx_pkts != rx_pkts_end) {
376638bddc9SAndrew Rybchenko 			*rx_pkts++ = m;
3773be22684SAndrew Rybchenko 			rxq->completed = pending;
3783be22684SAndrew Rybchenko 		}
379638bddc9SAndrew Rybchenko 
38068de5f6cSAndrew Rybchenko 		RTE_BUILD_BUG_ON(sizeof(m->rearm_data[0]) !=
38168de5f6cSAndrew Rybchenko 				 sizeof(rxq->rearm_data));
38268de5f6cSAndrew Rybchenko 		m->rearm_data[0] = rxq->rearm_data;
383638bddc9SAndrew Rybchenko 
384638bddc9SAndrew Rybchenko 		/* Event-dependent information is the same */
385638bddc9SAndrew Rybchenko 		m->ol_flags = m0->ol_flags;
386638bddc9SAndrew Rybchenko 		m->packet_type = m0->packet_type;
387638bddc9SAndrew Rybchenko 
388638bddc9SAndrew Rybchenko 		/* data_off already moved past pseudo header */
389638bddc9SAndrew Rybchenko 		pseudo_hdr = (uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
390638bddc9SAndrew Rybchenko 
391638bddc9SAndrew Rybchenko 		/*
392638bddc9SAndrew Rybchenko 		 * Always get RSS hash from pseudo header to avoid
393638bddc9SAndrew Rybchenko 		 * condition/branching. If it is valid or not depends on
394daa02b5cSOlivier Matz 		 * RTE_MBUF_F_RX_RSS_HASH in m->ol_flags.
395638bddc9SAndrew Rybchenko 		 */
396638bddc9SAndrew Rybchenko 		m->hash.rss = sfc_ef10_rx_pseudo_hdr_get_hash(pseudo_hdr);
397638bddc9SAndrew Rybchenko 
398c4753858SAndrew Rybchenko 		seg_len = sfc_ef10_rx_pseudo_hdr_get_len(pseudo_hdr);
399c4753858SAndrew Rybchenko 		SFC_ASSERT(seg_len > 0);
400c4753858SAndrew Rybchenko 		rte_pktmbuf_data_len(m) = seg_len;
401c4753858SAndrew Rybchenko 		rte_pktmbuf_pkt_len(m) = seg_len;
402638bddc9SAndrew Rybchenko 
40368de5f6cSAndrew Rybchenko 		SFC_ASSERT(m->next == NULL);
404638bddc9SAndrew Rybchenko 	}
405638bddc9SAndrew Rybchenko 
40684b63b5cSAndrew Rybchenko 	return rx_pkts;
407638bddc9SAndrew Rybchenko }
408638bddc9SAndrew Rybchenko 
409638bddc9SAndrew Rybchenko static bool
sfc_ef10_rx_get_event(struct sfc_ef10_rxq * rxq,efx_qword_t * rx_ev)410638bddc9SAndrew Rybchenko sfc_ef10_rx_get_event(struct sfc_ef10_rxq *rxq, efx_qword_t *rx_ev)
411638bddc9SAndrew Rybchenko {
412638bddc9SAndrew Rybchenko 	*rx_ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
413638bddc9SAndrew Rybchenko 
414638bddc9SAndrew Rybchenko 	if (!sfc_ef10_ev_present(*rx_ev))
415638bddc9SAndrew Rybchenko 		return false;
416638bddc9SAndrew Rybchenko 
417638bddc9SAndrew Rybchenko 	if (unlikely(EFX_QWORD_FIELD(*rx_ev, FSF_AZ_EV_CODE) !=
418638bddc9SAndrew Rybchenko 		     FSE_AZ_EV_CODE_RX_EV)) {
419638bddc9SAndrew Rybchenko 		/*
420638bddc9SAndrew Rybchenko 		 * Do not move read_ptr to keep the event for exception
421638bddc9SAndrew Rybchenko 		 * handling by the control path.
422638bddc9SAndrew Rybchenko 		 */
423638bddc9SAndrew Rybchenko 		rxq->flags |= SFC_EF10_RXQ_EXCEPTION;
424638bddc9SAndrew Rybchenko 		sfc_ef10_rx_err(&rxq->dp.dpq,
425638bddc9SAndrew Rybchenko 				"RxQ exception at EvQ read ptr %#x",
426638bddc9SAndrew Rybchenko 				rxq->evq_read_ptr);
427638bddc9SAndrew Rybchenko 		return false;
428638bddc9SAndrew Rybchenko 	}
429638bddc9SAndrew Rybchenko 
430638bddc9SAndrew Rybchenko 	rxq->evq_read_ptr++;
431638bddc9SAndrew Rybchenko 	return true;
432638bddc9SAndrew Rybchenko }
433638bddc9SAndrew Rybchenko 
434638bddc9SAndrew Rybchenko static uint16_t
sfc_ef10_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)435638bddc9SAndrew Rybchenko sfc_ef10_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
436638bddc9SAndrew Rybchenko {
437638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(rx_queue);
43884b63b5cSAndrew Rybchenko 	struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
439638bddc9SAndrew Rybchenko 	unsigned int evq_old_read_ptr;
440638bddc9SAndrew Rybchenko 	efx_qword_t rx_ev;
441638bddc9SAndrew Rybchenko 
4423be22684SAndrew Rybchenko 	rx_pkts = sfc_ef10_rx_pending(rxq, rx_pkts, nb_pkts);
443f609ee3fSAndrew Rybchenko 
444638bddc9SAndrew Rybchenko 	if (unlikely(rxq->flags &
445638bddc9SAndrew Rybchenko 		     (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
446f609ee3fSAndrew Rybchenko 		goto done;
447638bddc9SAndrew Rybchenko 
448638bddc9SAndrew Rybchenko 	evq_old_read_ptr = rxq->evq_read_ptr;
44984b63b5cSAndrew Rybchenko 	while (rx_pkts != rx_pkts_end && sfc_ef10_rx_get_event(rxq, &rx_ev)) {
450638bddc9SAndrew Rybchenko 		/*
451638bddc9SAndrew Rybchenko 		 * DROP_EVENT is an internal to the NIC, software should
452638bddc9SAndrew Rybchenko 		 * never see it and, therefore, may ignore it.
453638bddc9SAndrew Rybchenko 		 */
454638bddc9SAndrew Rybchenko 
45584b63b5cSAndrew Rybchenko 		rx_pkts = sfc_ef10_rx_process_event(rxq, rx_ev,
45684b63b5cSAndrew Rybchenko 						    rx_pkts, rx_pkts_end);
457638bddc9SAndrew Rybchenko 	}
458638bddc9SAndrew Rybchenko 
459638bddc9SAndrew Rybchenko 	sfc_ef10_ev_qclear(rxq->evq_hw_ring, rxq->ptr_mask, evq_old_read_ptr,
460638bddc9SAndrew Rybchenko 			   rxq->evq_read_ptr);
461638bddc9SAndrew Rybchenko 
462638bddc9SAndrew Rybchenko 	/* It is not a problem if we refill in the case of exception */
463638bddc9SAndrew Rybchenko 	sfc_ef10_rx_qrefill(rxq);
464638bddc9SAndrew Rybchenko 
4651245e3faSGeorgiy Levashov 	if ((rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN) &&
4661245e3faSGeorgiy Levashov 	    rxq->evq_read_ptr_primed != rxq->evq_read_ptr)
4671245e3faSGeorgiy Levashov 		sfc_ef10_rx_qprime(rxq);
4681245e3faSGeorgiy Levashov 
469f609ee3fSAndrew Rybchenko done:
47084b63b5cSAndrew Rybchenko 	return nb_pkts - (rx_pkts_end - rx_pkts);
471638bddc9SAndrew Rybchenko }
472638bddc9SAndrew Rybchenko 
473390f9b8dSAndrew Rybchenko const uint32_t *
sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps,size_t * no_of_elements)474*ba6a168aSSivaramakrishnan Venkat sfc_ef10_supported_ptypes_get(uint32_t tunnel_encaps, size_t *no_of_elements)
475638bddc9SAndrew Rybchenko {
476638bddc9SAndrew Rybchenko 	static const uint32_t ef10_native_ptypes[] = {
477638bddc9SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER,
478638bddc9SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_ARP,
479638bddc9SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_VLAN,
480638bddc9SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_QINQ,
481638bddc9SAndrew Rybchenko 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
482638bddc9SAndrew Rybchenko 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
483638bddc9SAndrew Rybchenko 		RTE_PTYPE_L4_FRAG,
484638bddc9SAndrew Rybchenko 		RTE_PTYPE_L4_TCP,
485638bddc9SAndrew Rybchenko 		RTE_PTYPE_L4_UDP,
486638bddc9SAndrew Rybchenko 	};
487591cbbb1SAndrew Rybchenko 	static const uint32_t ef10_overlay_ptypes[] = {
488591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER,
489591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_ARP,
490591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_VLAN,
491591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_QINQ,
492591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
493591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
494591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L4_FRAG,
495591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L4_TCP,
496591cbbb1SAndrew Rybchenko 		RTE_PTYPE_L4_UDP,
497591cbbb1SAndrew Rybchenko 		RTE_PTYPE_TUNNEL_VXLAN,
498591cbbb1SAndrew Rybchenko 		RTE_PTYPE_TUNNEL_NVGRE,
499591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L2_ETHER,
500591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L2_ETHER_VLAN,
501591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L2_ETHER_QINQ,
502591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
503591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
504591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_FRAG,
505591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_TCP,
506591cbbb1SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_UDP,
507591cbbb1SAndrew Rybchenko 	};
508638bddc9SAndrew Rybchenko 
509591cbbb1SAndrew Rybchenko 	/*
510591cbbb1SAndrew Rybchenko 	 * The function returns static set of supported packet types,
511591cbbb1SAndrew Rybchenko 	 * so we can't build it dynamically based on supported tunnel
512591cbbb1SAndrew Rybchenko 	 * encapsulations and should limit to known sets.
513591cbbb1SAndrew Rybchenko 	 */
514591cbbb1SAndrew Rybchenko 	switch (tunnel_encaps) {
515591cbbb1SAndrew Rybchenko 	case (1u << EFX_TUNNEL_PROTOCOL_VXLAN |
516591cbbb1SAndrew Rybchenko 	      1u << EFX_TUNNEL_PROTOCOL_GENEVE |
517591cbbb1SAndrew Rybchenko 	      1u << EFX_TUNNEL_PROTOCOL_NVGRE):
518*ba6a168aSSivaramakrishnan Venkat 		*no_of_elements = RTE_DIM(ef10_overlay_ptypes);
519591cbbb1SAndrew Rybchenko 		return ef10_overlay_ptypes;
520591cbbb1SAndrew Rybchenko 	default:
521fdceb100SIvan Malov 		SFC_GENERIC_LOG(ERR,
522fdceb100SIvan Malov 			"Unexpected set of supported tunnel encapsulations: %#x",
523591cbbb1SAndrew Rybchenko 			tunnel_encaps);
524591cbbb1SAndrew Rybchenko 		/* FALLTHROUGH */
525591cbbb1SAndrew Rybchenko 	case 0:
526*ba6a168aSSivaramakrishnan Venkat 		*no_of_elements = RTE_DIM(ef10_native_ptypes);
527638bddc9SAndrew Rybchenko 		return ef10_native_ptypes;
528638bddc9SAndrew Rybchenko 	}
529591cbbb1SAndrew Rybchenko }
530638bddc9SAndrew Rybchenko 
531638bddc9SAndrew Rybchenko static sfc_dp_rx_qdesc_npending_t sfc_ef10_rx_qdesc_npending;
532638bddc9SAndrew Rybchenko static unsigned int
sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq * dp_rxq)5331a9d944fSIgor Romanov sfc_ef10_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
534638bddc9SAndrew Rybchenko {
5351a9d944fSIgor Romanov 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
5361a9d944fSIgor Romanov 	efx_qword_t rx_ev;
5371a9d944fSIgor Romanov 	const unsigned int evq_old_read_ptr = rxq->evq_read_ptr;
5381a9d944fSIgor Romanov 	unsigned int pending = rxq->pending;
5391a9d944fSIgor Romanov 	unsigned int ready;
5401a9d944fSIgor Romanov 
5411a9d944fSIgor Romanov 	if (unlikely(rxq->flags &
5421a9d944fSIgor Romanov 		     (SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION)))
5431a9d944fSIgor Romanov 		goto done;
5441a9d944fSIgor Romanov 
5451a9d944fSIgor Romanov 	while (sfc_ef10_rx_get_event(rxq, &rx_ev)) {
5461a9d944fSIgor Romanov 		ready = (EFX_QWORD_FIELD(rx_ev, ESF_DZ_RX_DSC_PTR_LBITS) -
5471a9d944fSIgor Romanov 			 pending) &
5481a9d944fSIgor Romanov 			EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
5491a9d944fSIgor Romanov 		pending += ready;
5501a9d944fSIgor Romanov 	}
5511a9d944fSIgor Romanov 
552638bddc9SAndrew Rybchenko 	/*
5531a9d944fSIgor Romanov 	 * The function does not process events, so return event queue read
5541a9d944fSIgor Romanov 	 * pointer to the original position to allow the events that were
5551a9d944fSIgor Romanov 	 * read to be processed later
556638bddc9SAndrew Rybchenko 	 */
5571a9d944fSIgor Romanov 	rxq->evq_read_ptr = evq_old_read_ptr;
5581a9d944fSIgor Romanov 
5591a9d944fSIgor Romanov done:
5601a9d944fSIgor Romanov 	return pending - rxq->completed;
561638bddc9SAndrew Rybchenko }
562638bddc9SAndrew Rybchenko 
5631d8f3a80SIvan Malov static sfc_dp_rx_qdesc_status_t sfc_ef10_rx_qdesc_status;
5641d8f3a80SIvan Malov static int
sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq * dp_rxq,uint16_t offset)5651a9d944fSIgor Romanov sfc_ef10_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
5661d8f3a80SIvan Malov {
5671a9d944fSIgor Romanov 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
5681a9d944fSIgor Romanov 	unsigned int npending = sfc_ef10_rx_qdesc_npending(dp_rxq);
5691a9d944fSIgor Romanov 
5701a9d944fSIgor Romanov 	if (unlikely(offset > rxq->ptr_mask))
5711a9d944fSIgor Romanov 		return -EINVAL;
5721a9d944fSIgor Romanov 
5731a9d944fSIgor Romanov 	if (offset < npending)
5741a9d944fSIgor Romanov 		return RTE_ETH_RX_DESC_DONE;
5751a9d944fSIgor Romanov 
5761a9d944fSIgor Romanov 	if (offset < (rxq->added - rxq->completed))
5771a9d944fSIgor Romanov 		return RTE_ETH_RX_DESC_AVAIL;
5781a9d944fSIgor Romanov 
5791a9d944fSIgor Romanov 	return RTE_ETH_RX_DESC_UNAVAIL;
5801d8f3a80SIvan Malov }
5811d8f3a80SIvan Malov 
582638bddc9SAndrew Rybchenko 
5833c335b7fSAndrew Rybchenko static sfc_dp_rx_get_dev_info_t sfc_ef10_rx_get_dev_info;
5843c335b7fSAndrew Rybchenko static void
sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info * dev_info)5853c335b7fSAndrew Rybchenko sfc_ef10_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
5863c335b7fSAndrew Rybchenko {
5873c335b7fSAndrew Rybchenko 	/*
5883c335b7fSAndrew Rybchenko 	 * Number of descriptors just defines maximum number of pushed
5893c335b7fSAndrew Rybchenko 	 * descriptors (fill level).
5903c335b7fSAndrew Rybchenko 	 */
5913c335b7fSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
5923c335b7fSAndrew Rybchenko 	dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
5933c335b7fSAndrew Rybchenko }
5943c335b7fSAndrew Rybchenko 
5953c335b7fSAndrew Rybchenko 
596f7da270aSAndrew Rybchenko static sfc_dp_rx_qsize_up_rings_t sfc_ef10_rx_qsize_up_rings;
597f7da270aSAndrew Rybchenko static int
sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,struct sfc_dp_rx_hw_limits * limits,__rte_unused struct rte_mempool * mb_pool,unsigned int * rxq_entries,unsigned int * evq_entries,unsigned int * rxq_max_fill_level)598f7da270aSAndrew Rybchenko sfc_ef10_rx_qsize_up_rings(uint16_t nb_rx_desc,
599048a0d1aSIgor Romanov 			   struct sfc_dp_rx_hw_limits *limits,
600d101da1bSAndrew Rybchenko 			   __rte_unused struct rte_mempool *mb_pool,
601f7da270aSAndrew Rybchenko 			   unsigned int *rxq_entries,
602f7da270aSAndrew Rybchenko 			   unsigned int *evq_entries,
603f7da270aSAndrew Rybchenko 			   unsigned int *rxq_max_fill_level)
604f7da270aSAndrew Rybchenko {
6053c335b7fSAndrew Rybchenko 	/*
6063c335b7fSAndrew Rybchenko 	 * rte_ethdev API guarantees that the number meets min, max and
6073c335b7fSAndrew Rybchenko 	 * alignment requirements.
6083c335b7fSAndrew Rybchenko 	 */
609048a0d1aSIgor Romanov 	if (nb_rx_desc <= limits->rxq_min_entries)
610048a0d1aSIgor Romanov 		*rxq_entries = limits->rxq_min_entries;
6113c335b7fSAndrew Rybchenko 	else
6123c335b7fSAndrew Rybchenko 		*rxq_entries = rte_align32pow2(nb_rx_desc);
6133c335b7fSAndrew Rybchenko 
6143c335b7fSAndrew Rybchenko 	*evq_entries = *rxq_entries;
6153c335b7fSAndrew Rybchenko 
6163c335b7fSAndrew Rybchenko 	*rxq_max_fill_level = RTE_MIN(nb_rx_desc,
6173c335b7fSAndrew Rybchenko 				      SFC_EF10_RXQ_LIMIT(*evq_entries));
618f7da270aSAndrew Rybchenko 	return 0;
619f7da270aSAndrew Rybchenko }
620f7da270aSAndrew Rybchenko 
621f7da270aSAndrew Rybchenko 
622638bddc9SAndrew Rybchenko static uint64_t
sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id,uint16_t prefix_size)623638bddc9SAndrew Rybchenko sfc_ef10_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
624638bddc9SAndrew Rybchenko {
625638bddc9SAndrew Rybchenko 	struct rte_mbuf m;
626638bddc9SAndrew Rybchenko 
627638bddc9SAndrew Rybchenko 	memset(&m, 0, sizeof(m));
628638bddc9SAndrew Rybchenko 
629638bddc9SAndrew Rybchenko 	rte_mbuf_refcnt_set(&m, 1);
630638bddc9SAndrew Rybchenko 	m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
631638bddc9SAndrew Rybchenko 	m.nb_segs = 1;
632638bddc9SAndrew Rybchenko 	m.port = port_id;
633638bddc9SAndrew Rybchenko 
634638bddc9SAndrew Rybchenko 	/* rearm_data covers structure members filled in above */
635638bddc9SAndrew Rybchenko 	rte_compiler_barrier();
63668de5f6cSAndrew Rybchenko 	RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
63768de5f6cSAndrew Rybchenko 	return m.rearm_data[0];
638638bddc9SAndrew Rybchenko }
639638bddc9SAndrew Rybchenko 
640638bddc9SAndrew Rybchenko static sfc_dp_rx_qcreate_t sfc_ef10_rx_qcreate;
641638bddc9SAndrew Rybchenko static int
sfc_ef10_rx_qcreate(uint16_t port_id,uint16_t queue_id,const struct rte_pci_addr * pci_addr,int socket_id,const struct sfc_dp_rx_qcreate_info * info,struct sfc_dp_rxq ** dp_rxqp)642638bddc9SAndrew Rybchenko sfc_ef10_rx_qcreate(uint16_t port_id, uint16_t queue_id,
643638bddc9SAndrew Rybchenko 		    const struct rte_pci_addr *pci_addr, int socket_id,
644638bddc9SAndrew Rybchenko 		    const struct sfc_dp_rx_qcreate_info *info,
645638bddc9SAndrew Rybchenko 		    struct sfc_dp_rxq **dp_rxqp)
646638bddc9SAndrew Rybchenko {
647638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq;
648638bddc9SAndrew Rybchenko 	int rc;
649638bddc9SAndrew Rybchenko 
650638bddc9SAndrew Rybchenko 	rc = EINVAL;
651638bddc9SAndrew Rybchenko 	if (info->rxq_entries != info->evq_entries)
652638bddc9SAndrew Rybchenko 		goto fail_rxq_args;
653638bddc9SAndrew Rybchenko 
6543037e6cfSViacheslav Galaktionov 	rc = ENOTSUP;
6553037e6cfSViacheslav Galaktionov 	if (info->nic_dma_info->nb_regions > 0)
6563037e6cfSViacheslav Galaktionov 		goto fail_nic_dma;
6573037e6cfSViacheslav Galaktionov 
658638bddc9SAndrew Rybchenko 	rc = ENOMEM;
659638bddc9SAndrew Rybchenko 	rxq = rte_zmalloc_socket("sfc-ef10-rxq", sizeof(*rxq),
660638bddc9SAndrew Rybchenko 				 RTE_CACHE_LINE_SIZE, socket_id);
661638bddc9SAndrew Rybchenko 	if (rxq == NULL)
662638bddc9SAndrew Rybchenko 		goto fail_rxq_alloc;
663638bddc9SAndrew Rybchenko 
664638bddc9SAndrew Rybchenko 	sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
665638bddc9SAndrew Rybchenko 
666638bddc9SAndrew Rybchenko 	rc = ENOMEM;
667638bddc9SAndrew Rybchenko 	rxq->sw_ring = rte_calloc_socket("sfc-ef10-rxq-sw_ring",
668638bddc9SAndrew Rybchenko 					 info->rxq_entries,
669638bddc9SAndrew Rybchenko 					 sizeof(*rxq->sw_ring),
670638bddc9SAndrew Rybchenko 					 RTE_CACHE_LINE_SIZE, socket_id);
671638bddc9SAndrew Rybchenko 	if (rxq->sw_ring == NULL)
672638bddc9SAndrew Rybchenko 		goto fail_desc_alloc;
673638bddc9SAndrew Rybchenko 
674638bddc9SAndrew Rybchenko 	rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
675638bddc9SAndrew Rybchenko 	if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
676638bddc9SAndrew Rybchenko 		rxq->flags |= SFC_EF10_RXQ_RSS_HASH;
677638bddc9SAndrew Rybchenko 	rxq->ptr_mask = info->rxq_entries - 1;
678638bddc9SAndrew Rybchenko 	rxq->evq_hw_ring = info->evq_hw_ring;
679e5595ee2SAndrew Rybchenko 	rxq->max_fill_level = info->max_fill_level;
680638bddc9SAndrew Rybchenko 	rxq->refill_threshold = info->refill_threshold;
681638bddc9SAndrew Rybchenko 	rxq->rearm_data =
682638bddc9SAndrew Rybchenko 		sfc_ef10_mk_mbuf_rearm_data(port_id, info->prefix_size);
683638bddc9SAndrew Rybchenko 	rxq->prefix_size = info->prefix_size;
684638bddc9SAndrew Rybchenko 	rxq->buf_size = info->buf_size;
685638bddc9SAndrew Rybchenko 	rxq->refill_mb_pool = info->refill_mb_pool;
686638bddc9SAndrew Rybchenko 	rxq->rxq_hw_ring = info->rxq_hw_ring;
687638bddc9SAndrew Rybchenko 	rxq->doorbell = (volatile uint8_t *)info->mem_bar +
688638bddc9SAndrew Rybchenko 			ER_DZ_RX_DESC_UPD_REG_OFST +
689714bff55SAndrew Rybchenko 			(info->hw_index << info->vi_window_shift);
6901245e3faSGeorgiy Levashov 	rxq->evq_prime = (volatile uint8_t *)info->mem_bar +
6911245e3faSGeorgiy Levashov 		      ER_DZ_EVQ_RPTR_REG_OFST +
6921245e3faSGeorgiy Levashov 		      (info->evq_hw_index << info->vi_window_shift);
693638bddc9SAndrew Rybchenko 
694e7fbf6f5SAndrew Rybchenko 	sfc_ef10_rx_info(&rxq->dp.dpq, "RxQ doorbell is %p", rxq->doorbell);
695e7fbf6f5SAndrew Rybchenko 
696638bddc9SAndrew Rybchenko 	*dp_rxqp = &rxq->dp;
697638bddc9SAndrew Rybchenko 	return 0;
698638bddc9SAndrew Rybchenko 
699638bddc9SAndrew Rybchenko fail_desc_alloc:
700638bddc9SAndrew Rybchenko 	rte_free(rxq);
701638bddc9SAndrew Rybchenko 
702638bddc9SAndrew Rybchenko fail_rxq_alloc:
7033037e6cfSViacheslav Galaktionov fail_nic_dma:
704638bddc9SAndrew Rybchenko fail_rxq_args:
705638bddc9SAndrew Rybchenko 	return rc;
706638bddc9SAndrew Rybchenko }
707638bddc9SAndrew Rybchenko 
708638bddc9SAndrew Rybchenko static sfc_dp_rx_qdestroy_t sfc_ef10_rx_qdestroy;
709638bddc9SAndrew Rybchenko static void
sfc_ef10_rx_qdestroy(struct sfc_dp_rxq * dp_rxq)710638bddc9SAndrew Rybchenko sfc_ef10_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
711638bddc9SAndrew Rybchenko {
712638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
713638bddc9SAndrew Rybchenko 
714638bddc9SAndrew Rybchenko 	rte_free(rxq->sw_ring);
715638bddc9SAndrew Rybchenko 	rte_free(rxq);
716638bddc9SAndrew Rybchenko }
717638bddc9SAndrew Rybchenko 
718638bddc9SAndrew Rybchenko static sfc_dp_rx_qstart_t sfc_ef10_rx_qstart;
719638bddc9SAndrew Rybchenko static int
sfc_ef10_rx_qstart(struct sfc_dp_rxq * dp_rxq,unsigned int evq_read_ptr,const efx_rx_prefix_layout_t * pinfo)720c6845644SAndrew Rybchenko sfc_ef10_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
721c6845644SAndrew Rybchenko 		   const efx_rx_prefix_layout_t *pinfo)
722638bddc9SAndrew Rybchenko {
723638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
724638bddc9SAndrew Rybchenko 
725f609ee3fSAndrew Rybchenko 	SFC_ASSERT(rxq->completed == 0);
7263be22684SAndrew Rybchenko 	SFC_ASSERT(rxq->pending == 0);
727f609ee3fSAndrew Rybchenko 	SFC_ASSERT(rxq->added == 0);
728638bddc9SAndrew Rybchenko 
729c6845644SAndrew Rybchenko 	if (pinfo->erpl_length != rxq->prefix_size ||
730c6845644SAndrew Rybchenko 	    efx_rx_prefix_layout_check(pinfo, &sfc_ef10_rx_prefix_layout) != 0)
731c6845644SAndrew Rybchenko 		return ENOTSUP;
732c6845644SAndrew Rybchenko 
733638bddc9SAndrew Rybchenko 	sfc_ef10_rx_qrefill(rxq);
734638bddc9SAndrew Rybchenko 
735638bddc9SAndrew Rybchenko 	rxq->evq_read_ptr = evq_read_ptr;
736638bddc9SAndrew Rybchenko 
737638bddc9SAndrew Rybchenko 	rxq->flags |= SFC_EF10_RXQ_STARTED;
738638bddc9SAndrew Rybchenko 	rxq->flags &= ~(SFC_EF10_RXQ_NOT_RUNNING | SFC_EF10_RXQ_EXCEPTION);
739638bddc9SAndrew Rybchenko 
7401245e3faSGeorgiy Levashov 	if (rxq->flags & SFC_EF10_RXQ_FLAG_INTR_EN)
7411245e3faSGeorgiy Levashov 		sfc_ef10_rx_qprime(rxq);
7421245e3faSGeorgiy Levashov 
743638bddc9SAndrew Rybchenko 	return 0;
744638bddc9SAndrew Rybchenko }
745638bddc9SAndrew Rybchenko 
746638bddc9SAndrew Rybchenko static sfc_dp_rx_qstop_t sfc_ef10_rx_qstop;
747638bddc9SAndrew Rybchenko static void
sfc_ef10_rx_qstop(struct sfc_dp_rxq * dp_rxq,unsigned int * evq_read_ptr)748638bddc9SAndrew Rybchenko sfc_ef10_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
749638bddc9SAndrew Rybchenko {
750638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
751638bddc9SAndrew Rybchenko 
752638bddc9SAndrew Rybchenko 	rxq->flags |= SFC_EF10_RXQ_NOT_RUNNING;
753638bddc9SAndrew Rybchenko 
754638bddc9SAndrew Rybchenko 	*evq_read_ptr = rxq->evq_read_ptr;
755638bddc9SAndrew Rybchenko }
756638bddc9SAndrew Rybchenko 
757638bddc9SAndrew Rybchenko static sfc_dp_rx_qrx_ev_t sfc_ef10_rx_qrx_ev;
758638bddc9SAndrew Rybchenko static bool
sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq * dp_rxq,__rte_unused unsigned int id)759638bddc9SAndrew Rybchenko sfc_ef10_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
760638bddc9SAndrew Rybchenko {
761638bddc9SAndrew Rybchenko 	__rte_unused struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
762638bddc9SAndrew Rybchenko 
763638bddc9SAndrew Rybchenko 	SFC_ASSERT(rxq->flags & SFC_EF10_RXQ_NOT_RUNNING);
764638bddc9SAndrew Rybchenko 
765638bddc9SAndrew Rybchenko 	/*
766638bddc9SAndrew Rybchenko 	 * It is safe to ignore Rx event since we free all mbufs on
767638bddc9SAndrew Rybchenko 	 * queue purge anyway.
768638bddc9SAndrew Rybchenko 	 */
769638bddc9SAndrew Rybchenko 
770638bddc9SAndrew Rybchenko 	return false;
771638bddc9SAndrew Rybchenko }
772638bddc9SAndrew Rybchenko 
773638bddc9SAndrew Rybchenko static sfc_dp_rx_qpurge_t sfc_ef10_rx_qpurge;
774638bddc9SAndrew Rybchenko static void
sfc_ef10_rx_qpurge(struct sfc_dp_rxq * dp_rxq)775638bddc9SAndrew Rybchenko sfc_ef10_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
776638bddc9SAndrew Rybchenko {
777638bddc9SAndrew Rybchenko 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
778638bddc9SAndrew Rybchenko 	unsigned int i;
779638bddc9SAndrew Rybchenko 	struct sfc_ef10_rx_sw_desc *rxd;
780638bddc9SAndrew Rybchenko 
78152e10cb0SAndrew Rybchenko 	rte_pktmbuf_free(rxq->scatter_pkt);
78252e10cb0SAndrew Rybchenko 	rxq->scatter_pkt = NULL;
78352e10cb0SAndrew Rybchenko 
784638bddc9SAndrew Rybchenko 	for (i = rxq->completed; i != rxq->added; ++i) {
785638bddc9SAndrew Rybchenko 		rxd = &rxq->sw_ring[i & rxq->ptr_mask];
78666e10b8dSAndrew Rybchenko 		rte_mbuf_raw_free(rxd->mbuf);
787638bddc9SAndrew Rybchenko 		rxd->mbuf = NULL;
788638bddc9SAndrew Rybchenko 	}
789638bddc9SAndrew Rybchenko 
7903be22684SAndrew Rybchenko 	rxq->completed = rxq->pending = rxq->added = 0;
791f609ee3fSAndrew Rybchenko 
792638bddc9SAndrew Rybchenko 	rxq->flags &= ~SFC_EF10_RXQ_STARTED;
793638bddc9SAndrew Rybchenko }
794638bddc9SAndrew Rybchenko 
7951245e3faSGeorgiy Levashov static sfc_dp_rx_intr_enable_t sfc_ef10_rx_intr_enable;
7961245e3faSGeorgiy Levashov static int
sfc_ef10_rx_intr_enable(struct sfc_dp_rxq * dp_rxq)7971245e3faSGeorgiy Levashov sfc_ef10_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
7981245e3faSGeorgiy Levashov {
7991245e3faSGeorgiy Levashov 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
8001245e3faSGeorgiy Levashov 
8011245e3faSGeorgiy Levashov 	rxq->flags |= SFC_EF10_RXQ_FLAG_INTR_EN;
8021245e3faSGeorgiy Levashov 	if (rxq->flags & SFC_EF10_RXQ_STARTED)
8031245e3faSGeorgiy Levashov 		sfc_ef10_rx_qprime(rxq);
8041245e3faSGeorgiy Levashov 	return 0;
8051245e3faSGeorgiy Levashov }
8061245e3faSGeorgiy Levashov 
8071245e3faSGeorgiy Levashov static sfc_dp_rx_intr_disable_t sfc_ef10_rx_intr_disable;
8081245e3faSGeorgiy Levashov static int
sfc_ef10_rx_intr_disable(struct sfc_dp_rxq * dp_rxq)8091245e3faSGeorgiy Levashov sfc_ef10_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
8101245e3faSGeorgiy Levashov {
8111245e3faSGeorgiy Levashov 	struct sfc_ef10_rxq *rxq = sfc_ef10_rxq_by_dp_rxq(dp_rxq);
8121245e3faSGeorgiy Levashov 
8131245e3faSGeorgiy Levashov 	/* Cannot disarm, just disable rearm */
8141245e3faSGeorgiy Levashov 	rxq->flags &= ~SFC_EF10_RXQ_FLAG_INTR_EN;
8151245e3faSGeorgiy Levashov 	return 0;
8161245e3faSGeorgiy Levashov }
8171245e3faSGeorgiy Levashov 
818638bddc9SAndrew Rybchenko struct sfc_dp_rx sfc_ef10_rx = {
819638bddc9SAndrew Rybchenko 	.dp = {
820638bddc9SAndrew Rybchenko 		.name		= SFC_KVARG_DATAPATH_EF10,
821638bddc9SAndrew Rybchenko 		.type		= SFC_DP_RX,
822638bddc9SAndrew Rybchenko 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF10,
823638bddc9SAndrew Rybchenko 	},
8241245e3faSGeorgiy Levashov 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
8251245e3faSGeorgiy Levashov 				  SFC_DP_RX_FEAT_INTR,
826295968d1SFerruh Yigit 	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
827295968d1SFerruh Yigit 				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
82804a04943SDenis Pryazhennikov 				  RTE_ETH_RX_OFFLOAD_RSS_HASH |
82904a04943SDenis Pryazhennikov 				  RTE_ETH_RX_OFFLOAD_KEEP_CRC,
830295968d1SFerruh Yigit 	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
8313c335b7fSAndrew Rybchenko 	.get_dev_info		= sfc_ef10_rx_get_dev_info,
832f7da270aSAndrew Rybchenko 	.qsize_up_rings		= sfc_ef10_rx_qsize_up_rings,
833638bddc9SAndrew Rybchenko 	.qcreate		= sfc_ef10_rx_qcreate,
834638bddc9SAndrew Rybchenko 	.qdestroy		= sfc_ef10_rx_qdestroy,
835638bddc9SAndrew Rybchenko 	.qstart			= sfc_ef10_rx_qstart,
836638bddc9SAndrew Rybchenko 	.qstop			= sfc_ef10_rx_qstop,
837638bddc9SAndrew Rybchenko 	.qrx_ev			= sfc_ef10_rx_qrx_ev,
838638bddc9SAndrew Rybchenko 	.qpurge			= sfc_ef10_rx_qpurge,
839638bddc9SAndrew Rybchenko 	.supported_ptypes_get	= sfc_ef10_supported_ptypes_get,
840638bddc9SAndrew Rybchenko 	.qdesc_npending		= sfc_ef10_rx_qdesc_npending,
8411d8f3a80SIvan Malov 	.qdesc_status		= sfc_ef10_rx_qdesc_status,
8421245e3faSGeorgiy Levashov 	.intr_enable		= sfc_ef10_rx_intr_enable,
8431245e3faSGeorgiy Levashov 	.intr_disable		= sfc_ef10_rx_intr_disable,
844638bddc9SAndrew Rybchenko 	.pkt_burst		= sfc_ef10_recv_pkts,
845638bddc9SAndrew Rybchenko };
846