xref: /dpdk/drivers/net/sfc/sfc_ef100_rx.c (revision ba6a168a06581b5b3d523f984722a3e5f65bbb82)
1554644e3SAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2554644e3SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4554644e3SAndrew Rybchenko  * Copyright(c) 2018-2019 Solarflare Communications Inc.
5554644e3SAndrew Rybchenko  *
6554644e3SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
7554644e3SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
8554644e3SAndrew Rybchenko  */
9554644e3SAndrew Rybchenko 
10554644e3SAndrew Rybchenko /* EF100 native datapath implementation */
11554644e3SAndrew Rybchenko 
12554644e3SAndrew Rybchenko #include <stdbool.h>
13554644e3SAndrew Rybchenko 
14554644e3SAndrew Rybchenko #include <rte_byteorder.h>
15554644e3SAndrew Rybchenko #include <rte_mbuf_ptype.h>
16554644e3SAndrew Rybchenko #include <rte_mbuf.h>
17554644e3SAndrew Rybchenko #include <rte_io.h>
18554644e3SAndrew Rybchenko 
19554644e3SAndrew Rybchenko #include "efx_types.h"
20554644e3SAndrew Rybchenko #include "efx_regs_ef100.h"
21c6845644SAndrew Rybchenko #include "efx.h"
22554644e3SAndrew Rybchenko 
2353a80512SIvan Malov #include "sfc.h"
24554644e3SAndrew Rybchenko #include "sfc_debug.h"
2553a80512SIvan Malov #include "sfc_flow_tunnel.h"
26554644e3SAndrew Rybchenko #include "sfc_tweak.h"
27554644e3SAndrew Rybchenko #include "sfc_dp_rx.h"
28554644e3SAndrew Rybchenko #include "sfc_kvargs.h"
29554644e3SAndrew Rybchenko #include "sfc_ef100.h"
303037e6cfSViacheslav Galaktionov #include "sfc_nic_dma_dp.h"
31554644e3SAndrew Rybchenko 
32554644e3SAndrew Rybchenko 
33554644e3SAndrew Rybchenko #define sfc_ef100_rx_err(_rxq, ...) \
34554644e3SAndrew Rybchenko 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, ERR, &(_rxq)->dp.dpq, __VA_ARGS__)
35554644e3SAndrew Rybchenko 
36554644e3SAndrew Rybchenko #define sfc_ef100_rx_debug(_rxq, ...) \
37554644e3SAndrew Rybchenko 	SFC_DP_LOG(SFC_KVARG_DATAPATH_EF100, DEBUG, &(_rxq)->dp.dpq, \
38554644e3SAndrew Rybchenko 		   __VA_ARGS__)
39554644e3SAndrew Rybchenko 
40554644e3SAndrew Rybchenko /**
41554644e3SAndrew Rybchenko  * Maximum number of descriptors/buffers in the Rx ring.
42554644e3SAndrew Rybchenko  * It should guarantee that corresponding event queue never overfill.
43554644e3SAndrew Rybchenko  * EF10 native datapath uses event queue of the same size as Rx queue.
44554644e3SAndrew Rybchenko  * Maximum number of events on datapath can be estimated as number of
45554644e3SAndrew Rybchenko  * Rx queue entries (one event per Rx buffer in the worst case) plus
46554644e3SAndrew Rybchenko  * Rx error and flush events.
47554644e3SAndrew Rybchenko  */
48554644e3SAndrew Rybchenko #define SFC_EF100_RXQ_LIMIT(_ndesc) \
49554644e3SAndrew Rybchenko 	((_ndesc) - 1 /* head must not step on tail */ - \
50554644e3SAndrew Rybchenko 	 1 /* Rx error */ - 1 /* flush */)
51554644e3SAndrew Rybchenko 
52ad82838eSAndrew Rybchenko /** Invalid user mark value when the mark should be treated as unset */
53ad82838eSAndrew Rybchenko #define SFC_EF100_USER_MARK_INVALID	0
54ad82838eSAndrew Rybchenko 
55554644e3SAndrew Rybchenko struct sfc_ef100_rx_sw_desc {
56554644e3SAndrew Rybchenko 	struct rte_mbuf			*mbuf;
57554644e3SAndrew Rybchenko };
58554644e3SAndrew Rybchenko 
59554644e3SAndrew Rybchenko struct sfc_ef100_rxq {
60554644e3SAndrew Rybchenko 	/* Used on data path */
61554644e3SAndrew Rybchenko 	unsigned int			flags;
62554644e3SAndrew Rybchenko #define SFC_EF100_RXQ_STARTED		0x1
63554644e3SAndrew Rybchenko #define SFC_EF100_RXQ_NOT_RUNNING	0x2
64554644e3SAndrew Rybchenko #define SFC_EF100_RXQ_EXCEPTION		0x4
656ce88e50SAndrew Rybchenko #define SFC_EF100_RXQ_RSS_HASH		0x10
661aacc3d3SAndrew Rybchenko #define SFC_EF100_RXQ_USER_MARK		0x20
67333fd5d4SAndrew Rybchenko #define SFC_EF100_RXQ_FLAG_INTR_EN	0x40
68d0f981a3SIgor Romanov #define SFC_EF100_RXQ_INGRESS_MPORT	0x80
69bf38764aSIvan Malov #define SFC_EF100_RXQ_USER_FLAG		0x100
703037e6cfSViacheslav Galaktionov #define SFC_EF100_RXQ_NIC_DMA_MAP	0x200
7162082124SArtemii Morozov #define SFC_EF100_RXQ_VLAN_STRIPPED_TCI	0x400
72554644e3SAndrew Rybchenko 	unsigned int			ptr_mask;
73554644e3SAndrew Rybchenko 	unsigned int			evq_phase_bit_shift;
74554644e3SAndrew Rybchenko 	unsigned int			ready_pkts;
75554644e3SAndrew Rybchenko 	unsigned int			completed;
76554644e3SAndrew Rybchenko 	unsigned int			evq_read_ptr;
77333fd5d4SAndrew Rybchenko 	unsigned int			evq_read_ptr_primed;
78554644e3SAndrew Rybchenko 	volatile efx_qword_t		*evq_hw_ring;
79554644e3SAndrew Rybchenko 	struct sfc_ef100_rx_sw_desc	*sw_ring;
80554644e3SAndrew Rybchenko 	uint64_t			rearm_data;
81554644e3SAndrew Rybchenko 	uint16_t			buf_size;
82554644e3SAndrew Rybchenko 	uint16_t			prefix_size;
8353a80512SIvan Malov 	uint32_t			user_mark_mask;
84554644e3SAndrew Rybchenko 
85333fd5d4SAndrew Rybchenko 	unsigned int			evq_hw_index;
86333fd5d4SAndrew Rybchenko 	volatile void			*evq_prime;
87333fd5d4SAndrew Rybchenko 
88554644e3SAndrew Rybchenko 	/* Used on refill */
89554644e3SAndrew Rybchenko 	unsigned int			added;
90554644e3SAndrew Rybchenko 	unsigned int			max_fill_level;
91554644e3SAndrew Rybchenko 	unsigned int			refill_threshold;
92554644e3SAndrew Rybchenko 	struct rte_mempool		*refill_mb_pool;
93554644e3SAndrew Rybchenko 	efx_qword_t			*rxq_hw_ring;
94554644e3SAndrew Rybchenko 	volatile void			*doorbell;
95554644e3SAndrew Rybchenko 
96554644e3SAndrew Rybchenko 	/* Datapath receive queue anchor */
97554644e3SAndrew Rybchenko 	struct sfc_dp_rxq		dp;
983037e6cfSViacheslav Galaktionov 
993037e6cfSViacheslav Galaktionov 	const struct sfc_nic_dma_info	*nic_dma_info;
100554644e3SAndrew Rybchenko };
101554644e3SAndrew Rybchenko 
102554644e3SAndrew Rybchenko static inline struct sfc_ef100_rxq *
sfc_ef100_rxq_by_dp_rxq(struct sfc_dp_rxq * dp_rxq)103554644e3SAndrew Rybchenko sfc_ef100_rxq_by_dp_rxq(struct sfc_dp_rxq *dp_rxq)
104554644e3SAndrew Rybchenko {
105554644e3SAndrew Rybchenko 	return container_of(dp_rxq, struct sfc_ef100_rxq, dp);
106554644e3SAndrew Rybchenko }
107554644e3SAndrew Rybchenko 
108333fd5d4SAndrew Rybchenko static void
sfc_ef100_rx_qprime(struct sfc_ef100_rxq * rxq)109333fd5d4SAndrew Rybchenko sfc_ef100_rx_qprime(struct sfc_ef100_rxq *rxq)
110333fd5d4SAndrew Rybchenko {
111333fd5d4SAndrew Rybchenko 	sfc_ef100_evq_prime(rxq->evq_prime, rxq->evq_hw_index,
112333fd5d4SAndrew Rybchenko 			    rxq->evq_read_ptr & rxq->ptr_mask);
113333fd5d4SAndrew Rybchenko 	rxq->evq_read_ptr_primed = rxq->evq_read_ptr;
114333fd5d4SAndrew Rybchenko }
115333fd5d4SAndrew Rybchenko 
116554644e3SAndrew Rybchenko static inline void
sfc_ef100_rx_qpush(struct sfc_ef100_rxq * rxq,unsigned int added)117554644e3SAndrew Rybchenko sfc_ef100_rx_qpush(struct sfc_ef100_rxq *rxq, unsigned int added)
118554644e3SAndrew Rybchenko {
119554644e3SAndrew Rybchenko 	efx_dword_t dword;
120554644e3SAndrew Rybchenko 
121554644e3SAndrew Rybchenko 	EFX_POPULATE_DWORD_1(dword, ERF_GZ_RX_RING_PIDX, added & rxq->ptr_mask);
122554644e3SAndrew Rybchenko 
123554644e3SAndrew Rybchenko 	/* DMA sync to device is not required */
124554644e3SAndrew Rybchenko 
125554644e3SAndrew Rybchenko 	/*
126554644e3SAndrew Rybchenko 	 * rte_write32() has rte_io_wmb() which guarantees that the STORE
127554644e3SAndrew Rybchenko 	 * operations (i.e. Rx and event descriptor updates) that precede
128554644e3SAndrew Rybchenko 	 * the rte_io_wmb() call are visible to NIC before the STORE
129554644e3SAndrew Rybchenko 	 * operations that follow it (i.e. doorbell write).
130554644e3SAndrew Rybchenko 	 */
131554644e3SAndrew Rybchenko 	rte_write32(dword.ed_u32[0], rxq->doorbell);
13250448dd3SAndrew Rybchenko 	rxq->dp.dpq.dbells++;
133554644e3SAndrew Rybchenko 
134554644e3SAndrew Rybchenko 	sfc_ef100_rx_debug(rxq, "RxQ pushed doorbell at pidx %u (added=%u)",
135554644e3SAndrew Rybchenko 			   EFX_DWORD_FIELD(dword, ERF_GZ_RX_RING_PIDX),
136554644e3SAndrew Rybchenko 			   added);
137554644e3SAndrew Rybchenko }
138554644e3SAndrew Rybchenko 
139554644e3SAndrew Rybchenko static void
sfc_ef100_rx_qrefill(struct sfc_ef100_rxq * rxq)140554644e3SAndrew Rybchenko sfc_ef100_rx_qrefill(struct sfc_ef100_rxq *rxq)
141554644e3SAndrew Rybchenko {
142554644e3SAndrew Rybchenko 	const unsigned int ptr_mask = rxq->ptr_mask;
143554644e3SAndrew Rybchenko 	unsigned int free_space;
144554644e3SAndrew Rybchenko 	unsigned int bulks;
145554644e3SAndrew Rybchenko 	void *objs[SFC_RX_REFILL_BULK];
146554644e3SAndrew Rybchenko 	unsigned int added = rxq->added;
147554644e3SAndrew Rybchenko 
148554644e3SAndrew Rybchenko 	free_space = rxq->max_fill_level - (added - rxq->completed);
149554644e3SAndrew Rybchenko 
150554644e3SAndrew Rybchenko 	if (free_space < rxq->refill_threshold)
151554644e3SAndrew Rybchenko 		return;
152554644e3SAndrew Rybchenko 
153554644e3SAndrew Rybchenko 	bulks = free_space / RTE_DIM(objs);
154554644e3SAndrew Rybchenko 	/* refill_threshold guarantees that bulks is positive */
155554644e3SAndrew Rybchenko 	SFC_ASSERT(bulks > 0);
156554644e3SAndrew Rybchenko 
157554644e3SAndrew Rybchenko 	do {
158554644e3SAndrew Rybchenko 		unsigned int i;
159554644e3SAndrew Rybchenko 
160554644e3SAndrew Rybchenko 		if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
161554644e3SAndrew Rybchenko 						  RTE_DIM(objs)) < 0)) {
162554644e3SAndrew Rybchenko 			struct rte_eth_dev_data *dev_data =
163554644e3SAndrew Rybchenko 				rte_eth_devices[rxq->dp.dpq.port_id].data;
164554644e3SAndrew Rybchenko 
165554644e3SAndrew Rybchenko 			/*
166554644e3SAndrew Rybchenko 			 * It is hardly a safe way to increment counter
167554644e3SAndrew Rybchenko 			 * from different contexts, but all PMDs do it.
168554644e3SAndrew Rybchenko 			 */
169554644e3SAndrew Rybchenko 			dev_data->rx_mbuf_alloc_failed += RTE_DIM(objs);
170554644e3SAndrew Rybchenko 			/* Return if we have posted nothing yet */
171554644e3SAndrew Rybchenko 			if (added == rxq->added)
172554644e3SAndrew Rybchenko 				return;
173554644e3SAndrew Rybchenko 			/* Push posted */
174554644e3SAndrew Rybchenko 			break;
175554644e3SAndrew Rybchenko 		}
176554644e3SAndrew Rybchenko 
1773037e6cfSViacheslav Galaktionov 		for (i = 0; i < RTE_DIM(objs); ++i) {
178554644e3SAndrew Rybchenko 			struct rte_mbuf *m = objs[i];
179554644e3SAndrew Rybchenko 			struct sfc_ef100_rx_sw_desc *rxd;
1803037e6cfSViacheslav Galaktionov 			rte_iova_t dma_addr;
181554644e3SAndrew Rybchenko 
1823a35c1c0SMorten Brørup 			__rte_mbuf_raw_sanity_check(m);
183554644e3SAndrew Rybchenko 
1843037e6cfSViacheslav Galaktionov 			dma_addr = rte_mbuf_data_iova_default(m);
1853037e6cfSViacheslav Galaktionov 			if (rxq->flags & SFC_EF100_RXQ_NIC_DMA_MAP) {
1863037e6cfSViacheslav Galaktionov 				dma_addr = sfc_nic_dma_map(rxq->nic_dma_info,
1873037e6cfSViacheslav Galaktionov 						dma_addr,
1883037e6cfSViacheslav Galaktionov 						rte_pktmbuf_data_len(m));
1893037e6cfSViacheslav Galaktionov 				if (unlikely(dma_addr == RTE_BAD_IOVA)) {
1903037e6cfSViacheslav Galaktionov 					sfc_ef100_rx_err(rxq,
1913037e6cfSViacheslav Galaktionov 						"failed to map DMA address on Rx");
1923037e6cfSViacheslav Galaktionov 					/* Just skip buffer and try to continue */
1933037e6cfSViacheslav Galaktionov 					rte_mempool_put(rxq->refill_mb_pool, m);
1943037e6cfSViacheslav Galaktionov 					continue;
1953037e6cfSViacheslav Galaktionov 				}
1963037e6cfSViacheslav Galaktionov 			}
1973037e6cfSViacheslav Galaktionov 
1983037e6cfSViacheslav Galaktionov 			rxd = &rxq->sw_ring[added & ptr_mask];
199554644e3SAndrew Rybchenko 			rxd->mbuf = m;
200554644e3SAndrew Rybchenko 
201554644e3SAndrew Rybchenko 			/*
202554644e3SAndrew Rybchenko 			 * Avoid writing to mbuf. It is cheaper to do it
203554644e3SAndrew Rybchenko 			 * when we receive packet and fill in nearby
204554644e3SAndrew Rybchenko 			 * structure members.
205554644e3SAndrew Rybchenko 			 */
206554644e3SAndrew Rybchenko 
2073037e6cfSViacheslav Galaktionov 			EFX_POPULATE_QWORD_1(rxq->rxq_hw_ring[added & ptr_mask],
2083037e6cfSViacheslav Galaktionov 			    ESF_GZ_RX_BUF_ADDR, dma_addr);
2093037e6cfSViacheslav Galaktionov 			added++;
210554644e3SAndrew Rybchenko 		}
211554644e3SAndrew Rybchenko 	} while (--bulks > 0);
212554644e3SAndrew Rybchenko 
213554644e3SAndrew Rybchenko 	SFC_ASSERT(rxq->added != added);
214554644e3SAndrew Rybchenko 	rxq->added = added;
215554644e3SAndrew Rybchenko 	sfc_ef100_rx_qpush(rxq, added);
216554644e3SAndrew Rybchenko }
217554644e3SAndrew Rybchenko 
2189e6e7f47SAndrew Rybchenko static inline uint64_t
sfc_ef100_rx_nt_or_inner_l4_csum(const efx_word_t class)2199e6e7f47SAndrew Rybchenko sfc_ef100_rx_nt_or_inner_l4_csum(const efx_word_t class)
2209e6e7f47SAndrew Rybchenko {
2219e6e7f47SAndrew Rybchenko 	return EFX_WORD_FIELD(class,
2229e6e7f47SAndrew Rybchenko 			      ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CSUM) ==
2239e6e7f47SAndrew Rybchenko 		ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
224daa02b5cSOlivier Matz 		RTE_MBUF_F_RX_L4_CKSUM_GOOD : RTE_MBUF_F_RX_L4_CKSUM_BAD;
2259e6e7f47SAndrew Rybchenko }
2269e6e7f47SAndrew Rybchenko 
2279e6e7f47SAndrew Rybchenko static inline uint64_t
sfc_ef100_rx_tun_outer_l4_csum(const efx_word_t class)2289e6e7f47SAndrew Rybchenko sfc_ef100_rx_tun_outer_l4_csum(const efx_word_t class)
2299e6e7f47SAndrew Rybchenko {
2309e6e7f47SAndrew Rybchenko 	return EFX_WORD_FIELD(class,
2319e6e7f47SAndrew Rybchenko 			      ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L4_CSUM) ==
2329e6e7f47SAndrew Rybchenko 		ESE_GZ_RH_HCLASS_L4_CSUM_GOOD ?
233daa02b5cSOlivier Matz 		RTE_MBUF_F_RX_OUTER_L4_CKSUM_GOOD : RTE_MBUF_F_RX_OUTER_L4_CKSUM_BAD;
2349e6e7f47SAndrew Rybchenko }
2359e6e7f47SAndrew Rybchenko 
2369e6e7f47SAndrew Rybchenko static uint32_t
sfc_ef100_rx_class_decode(const efx_word_t class,uint64_t * ol_flags)2379e6e7f47SAndrew Rybchenko sfc_ef100_rx_class_decode(const efx_word_t class, uint64_t *ol_flags)
2389e6e7f47SAndrew Rybchenko {
2399e6e7f47SAndrew Rybchenko 	uint32_t ptype;
2409e6e7f47SAndrew Rybchenko 	bool no_tunnel = false;
2419e6e7f47SAndrew Rybchenko 
2429e6e7f47SAndrew Rybchenko 	if (unlikely(EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_L2_CLASS) !=
2439e6e7f47SAndrew Rybchenko 		     ESE_GZ_RH_HCLASS_L2_CLASS_E2_0123VLAN))
2449e6e7f47SAndrew Rybchenko 		return 0;
2459e6e7f47SAndrew Rybchenko 
2469e6e7f47SAndrew Rybchenko 	switch (EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_L2_N_VLAN)) {
2479e6e7f47SAndrew Rybchenko 	case 0:
2489e6e7f47SAndrew Rybchenko 		ptype = RTE_PTYPE_L2_ETHER;
2499e6e7f47SAndrew Rybchenko 		break;
2509e6e7f47SAndrew Rybchenko 	case 1:
2519e6e7f47SAndrew Rybchenko 		ptype = RTE_PTYPE_L2_ETHER_VLAN;
2529e6e7f47SAndrew Rybchenko 		break;
2539e6e7f47SAndrew Rybchenko 	default:
2549e6e7f47SAndrew Rybchenko 		ptype = RTE_PTYPE_L2_ETHER_QINQ;
2559e6e7f47SAndrew Rybchenko 		break;
2569e6e7f47SAndrew Rybchenko 	}
2579e6e7f47SAndrew Rybchenko 
2589e6e7f47SAndrew Rybchenko 	switch (EFX_WORD_FIELD(class, ESF_GZ_RX_PREFIX_HCLASS_TUNNEL_CLASS)) {
2599e6e7f47SAndrew Rybchenko 	case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NONE:
2609e6e7f47SAndrew Rybchenko 		no_tunnel = true;
2619e6e7f47SAndrew Rybchenko 		break;
2629e6e7f47SAndrew Rybchenko 	case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_VXLAN:
2639e6e7f47SAndrew Rybchenko 		ptype |= RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_L4_UDP;
2649e6e7f47SAndrew Rybchenko 		*ol_flags |= sfc_ef100_rx_tun_outer_l4_csum(class);
2659e6e7f47SAndrew Rybchenko 		break;
2669e6e7f47SAndrew Rybchenko 	case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_NVGRE:
2679e6e7f47SAndrew Rybchenko 		ptype |= RTE_PTYPE_TUNNEL_NVGRE;
2689e6e7f47SAndrew Rybchenko 		break;
2699e6e7f47SAndrew Rybchenko 	case ESE_GZ_RH_HCLASS_TUNNEL_CLASS_GENEVE:
2709e6e7f47SAndrew Rybchenko 		ptype |= RTE_PTYPE_TUNNEL_GENEVE | RTE_PTYPE_L4_UDP;
2719e6e7f47SAndrew Rybchenko 		*ol_flags |= sfc_ef100_rx_tun_outer_l4_csum(class);
2729e6e7f47SAndrew Rybchenko 		break;
2739e6e7f47SAndrew Rybchenko 	default:
2749e6e7f47SAndrew Rybchenko 		/*
2759e6e7f47SAndrew Rybchenko 		 * Driver does not know the tunnel, but it is
2769e6e7f47SAndrew Rybchenko 		 * still a tunnel and NT_OR_INNER refer to inner
2779e6e7f47SAndrew Rybchenko 		 * frame.
2789e6e7f47SAndrew Rybchenko 		 */
2799e6e7f47SAndrew Rybchenko 		no_tunnel = false;
2809e6e7f47SAndrew Rybchenko 	}
2819e6e7f47SAndrew Rybchenko 
2829e6e7f47SAndrew Rybchenko 	if (no_tunnel) {
2839e6e7f47SAndrew Rybchenko 		bool l4_valid = true;
2849e6e7f47SAndrew Rybchenko 
2859e6e7f47SAndrew Rybchenko 		switch (EFX_WORD_FIELD(class,
2869e6e7f47SAndrew Rybchenko 			ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
2879e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
2889e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
289daa02b5cSOlivier Matz 			*ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
2909e6e7f47SAndrew Rybchenko 			break;
2919e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
2929e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
293daa02b5cSOlivier Matz 			*ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
2949e6e7f47SAndrew Rybchenko 			break;
2959e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
2969e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
2979e6e7f47SAndrew Rybchenko 			break;
2989e6e7f47SAndrew Rybchenko 		default:
2999e6e7f47SAndrew Rybchenko 			l4_valid = false;
3009e6e7f47SAndrew Rybchenko 		}
3019e6e7f47SAndrew Rybchenko 
3029e6e7f47SAndrew Rybchenko 		if (l4_valid) {
3039e6e7f47SAndrew Rybchenko 			switch (EFX_WORD_FIELD(class,
3049e6e7f47SAndrew Rybchenko 				ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS)) {
3059e6e7f47SAndrew Rybchenko 			case ESE_GZ_RH_HCLASS_L4_CLASS_TCP:
3069e6e7f47SAndrew Rybchenko 				ptype |= RTE_PTYPE_L4_TCP;
3079e6e7f47SAndrew Rybchenko 				*ol_flags |=
3089e6e7f47SAndrew Rybchenko 					sfc_ef100_rx_nt_or_inner_l4_csum(class);
3099e6e7f47SAndrew Rybchenko 				break;
3109e6e7f47SAndrew Rybchenko 			case ESE_GZ_RH_HCLASS_L4_CLASS_UDP:
3119e6e7f47SAndrew Rybchenko 				ptype |= RTE_PTYPE_L4_UDP;
3129e6e7f47SAndrew Rybchenko 				*ol_flags |=
3139e6e7f47SAndrew Rybchenko 					sfc_ef100_rx_nt_or_inner_l4_csum(class);
3149e6e7f47SAndrew Rybchenko 				break;
3159e6e7f47SAndrew Rybchenko 			case ESE_GZ_RH_HCLASS_L4_CLASS_FRAG:
3169e6e7f47SAndrew Rybchenko 				ptype |= RTE_PTYPE_L4_FRAG;
3179e6e7f47SAndrew Rybchenko 				break;
3189e6e7f47SAndrew Rybchenko 			}
3199e6e7f47SAndrew Rybchenko 		}
3209e6e7f47SAndrew Rybchenko 	} else {
3219e6e7f47SAndrew Rybchenko 		bool l4_valid = true;
3229e6e7f47SAndrew Rybchenko 
3239e6e7f47SAndrew Rybchenko 		switch (EFX_WORD_FIELD(class,
3249e6e7f47SAndrew Rybchenko 			ESF_GZ_RX_PREFIX_HCLASS_TUN_OUTER_L3_CLASS)) {
3259e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
3269e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
3279e6e7f47SAndrew Rybchenko 			break;
3289e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
3299e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_L3_IPV4_EXT_UNKNOWN;
330daa02b5cSOlivier Matz 			*ol_flags |= RTE_MBUF_F_RX_OUTER_IP_CKSUM_BAD;
3319e6e7f47SAndrew Rybchenko 			break;
3329e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
3339e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_L3_IPV6_EXT_UNKNOWN;
3349e6e7f47SAndrew Rybchenko 			break;
3359e6e7f47SAndrew Rybchenko 		}
3369e6e7f47SAndrew Rybchenko 
3379e6e7f47SAndrew Rybchenko 		switch (EFX_WORD_FIELD(class,
3389e6e7f47SAndrew Rybchenko 			ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS)) {
3399e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP4GOOD:
3409e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
341daa02b5cSOlivier Matz 			*ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
3429e6e7f47SAndrew Rybchenko 			break;
3439e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP4BAD:
3449e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN;
345daa02b5cSOlivier Matz 			*ol_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
3469e6e7f47SAndrew Rybchenko 			break;
3479e6e7f47SAndrew Rybchenko 		case ESE_GZ_RH_HCLASS_L3_CLASS_IP6:
3489e6e7f47SAndrew Rybchenko 			ptype |= RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN;
3499e6e7f47SAndrew Rybchenko 			break;
3509e6e7f47SAndrew Rybchenko 		default:
3519e6e7f47SAndrew Rybchenko 			l4_valid = false;
3529e6e7f47SAndrew Rybchenko 			break;
3539e6e7f47SAndrew Rybchenko 		}
3549e6e7f47SAndrew Rybchenko 
3559e6e7f47SAndrew Rybchenko 		if (l4_valid) {
3569e6e7f47SAndrew Rybchenko 			switch (EFX_WORD_FIELD(class,
3579e6e7f47SAndrew Rybchenko 				ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L4_CLASS)) {
3589e6e7f47SAndrew Rybchenko 			case ESE_GZ_RH_HCLASS_L4_CLASS_TCP:
3599e6e7f47SAndrew Rybchenko 				ptype |= RTE_PTYPE_INNER_L4_TCP;
3609e6e7f47SAndrew Rybchenko 				*ol_flags |=
3619e6e7f47SAndrew Rybchenko 					sfc_ef100_rx_nt_or_inner_l4_csum(class);
3629e6e7f47SAndrew Rybchenko 				break;
3639e6e7f47SAndrew Rybchenko 			case ESE_GZ_RH_HCLASS_L4_CLASS_UDP:
3649e6e7f47SAndrew Rybchenko 				ptype |= RTE_PTYPE_INNER_L4_UDP;
3659e6e7f47SAndrew Rybchenko 				*ol_flags |=
3669e6e7f47SAndrew Rybchenko 					sfc_ef100_rx_nt_or_inner_l4_csum(class);
3679e6e7f47SAndrew Rybchenko 				break;
3689e6e7f47SAndrew Rybchenko 			case ESE_GZ_RH_HCLASS_L4_CLASS_FRAG:
3699e6e7f47SAndrew Rybchenko 				ptype |= RTE_PTYPE_INNER_L4_FRAG;
3709e6e7f47SAndrew Rybchenko 				break;
3719e6e7f47SAndrew Rybchenko 			}
3729e6e7f47SAndrew Rybchenko 		}
3739e6e7f47SAndrew Rybchenko 	}
3749e6e7f47SAndrew Rybchenko 
3759e6e7f47SAndrew Rybchenko 	return ptype;
3769e6e7f47SAndrew Rybchenko }
3779e6e7f47SAndrew Rybchenko 
378c6845644SAndrew Rybchenko /*
379c6845644SAndrew Rybchenko  * Below function relies on the following fields in Rx prefix.
380c6845644SAndrew Rybchenko  * Some fields are mandatory, some fields are optional.
381c6845644SAndrew Rybchenko  * See sfc_ef100_rx_qstart() below.
382c6845644SAndrew Rybchenko  */
383c6845644SAndrew Rybchenko static const efx_rx_prefix_layout_t sfc_ef100_rx_prefix_layout = {
384c6845644SAndrew Rybchenko 	.erpl_fields	= {
385c6845644SAndrew Rybchenko #define	SFC_EF100_RX_PREFIX_FIELD(_name, _big_endian) \
386c6845644SAndrew Rybchenko 	EFX_RX_PREFIX_FIELD(_name, ESF_GZ_RX_PREFIX_ ## _name, _big_endian)
387c6845644SAndrew Rybchenko 
388c6845644SAndrew Rybchenko 		SFC_EF100_RX_PREFIX_FIELD(LENGTH, B_FALSE),
3896ce88e50SAndrew Rybchenko 		SFC_EF100_RX_PREFIX_FIELD(RSS_HASH_VALID, B_FALSE),
390c6845644SAndrew Rybchenko 		SFC_EF100_RX_PREFIX_FIELD(CLASS, B_FALSE),
391d0f981a3SIgor Romanov 		EFX_RX_PREFIX_FIELD(INGRESS_MPORT,
392d0f981a3SIgor Romanov 				    ESF_GZ_RX_PREFIX_INGRESS_MPORT, B_FALSE),
3936ce88e50SAndrew Rybchenko 		SFC_EF100_RX_PREFIX_FIELD(RSS_HASH, B_FALSE),
394bf38764aSIvan Malov 		SFC_EF100_RX_PREFIX_FIELD(USER_FLAG, B_FALSE),
3951aacc3d3SAndrew Rybchenko 		SFC_EF100_RX_PREFIX_FIELD(USER_MARK, B_FALSE),
39662082124SArtemii Morozov 		SFC_EF100_RX_PREFIX_FIELD(VLAN_STRIP_TCI, B_FALSE),
397c6845644SAndrew Rybchenko 
398c6845644SAndrew Rybchenko #undef	SFC_EF100_RX_PREFIX_FIELD
399c6845644SAndrew Rybchenko 	}
400c6845644SAndrew Rybchenko };
401c6845644SAndrew Rybchenko 
402554644e3SAndrew Rybchenko static bool
sfc_ef100_rx_prefix_to_offloads(const struct sfc_ef100_rxq * rxq,const efx_xword_t * rx_prefix,struct rte_mbuf * m)4036ce88e50SAndrew Rybchenko sfc_ef100_rx_prefix_to_offloads(const struct sfc_ef100_rxq *rxq,
4048dae7087SIgor Romanov 				const efx_xword_t *rx_prefix,
405554644e3SAndrew Rybchenko 				struct rte_mbuf *m)
406554644e3SAndrew Rybchenko {
407554644e3SAndrew Rybchenko 	const efx_word_t *class;
408554644e3SAndrew Rybchenko 	uint64_t ol_flags = 0;
409554644e3SAndrew Rybchenko 
410554644e3SAndrew Rybchenko 	RTE_BUILD_BUG_ON(EFX_LOW_BIT(ESF_GZ_RX_PREFIX_CLASS) % CHAR_BIT != 0);
411554644e3SAndrew Rybchenko 	RTE_BUILD_BUG_ON(EFX_WIDTH(ESF_GZ_RX_PREFIX_CLASS) % CHAR_BIT != 0);
412554644e3SAndrew Rybchenko 	RTE_BUILD_BUG_ON(EFX_WIDTH(ESF_GZ_RX_PREFIX_CLASS) / CHAR_BIT !=
413554644e3SAndrew Rybchenko 			 sizeof(*class));
414554644e3SAndrew Rybchenko 	class = (const efx_word_t *)((const uint8_t *)rx_prefix +
415554644e3SAndrew Rybchenko 		EFX_LOW_BIT(ESF_GZ_RX_PREFIX_CLASS) / CHAR_BIT);
416554644e3SAndrew Rybchenko 	if (unlikely(EFX_WORD_FIELD(*class,
417554644e3SAndrew Rybchenko 				    ESF_GZ_RX_PREFIX_HCLASS_L2_STATUS) !=
418554644e3SAndrew Rybchenko 		     ESE_GZ_RH_HCLASS_L2_STATUS_OK))
419554644e3SAndrew Rybchenko 		return false;
420554644e3SAndrew Rybchenko 
4219e6e7f47SAndrew Rybchenko 	m->packet_type = sfc_ef100_rx_class_decode(*class, &ol_flags);
4229e6e7f47SAndrew Rybchenko 
4236ce88e50SAndrew Rybchenko 	if ((rxq->flags & SFC_EF100_RXQ_RSS_HASH) &&
4248dae7087SIgor Romanov 	    EFX_TEST_XWORD_BIT(rx_prefix[0],
4256ce88e50SAndrew Rybchenko 			       ESF_GZ_RX_PREFIX_RSS_HASH_VALID_LBN)) {
426daa02b5cSOlivier Matz 		ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
4278dae7087SIgor Romanov 		/* EFX_XWORD_FIELD converts little-endian to CPU */
4288dae7087SIgor Romanov 		m->hash.rss = EFX_XWORD_FIELD(rx_prefix[0],
4296ce88e50SAndrew Rybchenko 					      ESF_GZ_RX_PREFIX_RSS_HASH);
4306ce88e50SAndrew Rybchenko 	}
4316ce88e50SAndrew Rybchenko 
432bf38764aSIvan Malov 	if (rxq->flags & SFC_EF100_RXQ_USER_FLAG) {
433bf38764aSIvan Malov 		uint32_t user_flag;
434bf38764aSIvan Malov 
435bf38764aSIvan Malov 		user_flag = EFX_XWORD_FIELD(rx_prefix[0],
436bf38764aSIvan Malov 					    ESF_GZ_RX_PREFIX_USER_FLAG);
437bf38764aSIvan Malov 		if (user_flag != 0)
438daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_FDIR;
439bf38764aSIvan Malov 	}
440bf38764aSIvan Malov 
441ad82838eSAndrew Rybchenko 	if (rxq->flags & SFC_EF100_RXQ_USER_MARK) {
442f55fe01fSIvan Malov 		uint8_t ft_ctx_mark;
443ad82838eSAndrew Rybchenko 		uint32_t user_mark;
44453a80512SIvan Malov 		uint32_t mark;
445ad82838eSAndrew Rybchenko 
4468dae7087SIgor Romanov 		/* EFX_XWORD_FIELD converts little-endian to CPU */
44753a80512SIvan Malov 		mark = EFX_XWORD_FIELD(rx_prefix[0],
4481aacc3d3SAndrew Rybchenko 				       ESF_GZ_RX_PREFIX_USER_MARK);
44953a80512SIvan Malov 
45053a80512SIvan Malov 		user_mark = mark & rxq->user_mark_mask;
451ad82838eSAndrew Rybchenko 		if (user_mark != SFC_EF100_USER_MARK_INVALID) {
452daa02b5cSOlivier Matz 			ol_flags |= RTE_MBUF_F_RX_FDIR | RTE_MBUF_F_RX_FDIR_ID;
453ad82838eSAndrew Rybchenko 			m->hash.fdir.hi = user_mark;
454ad82838eSAndrew Rybchenko 		}
4557e5b4798SIvan Malov 
456f55fe01fSIvan Malov 		ft_ctx_mark = SFC_FT_FLOW_MARK_TO_CTX_MARK(mark);
457f55fe01fSIvan Malov 		if (ft_ctx_mark != SFC_FT_CTX_MARK_INVALID) {
458f55fe01fSIvan Malov 			sfc_ft_ctx_id_t ft_ctx_id;
4597e5b4798SIvan Malov 
460f55fe01fSIvan Malov 			ft_ctx_id = SFC_FT_CTX_MARK_TO_CTX_ID(ft_ctx_mark);
4617e5b4798SIvan Malov 
462f55fe01fSIvan Malov 			ol_flags |= sfc_dp_ft_ctx_id_valid;
463f55fe01fSIvan Malov 			*RTE_MBUF_DYNFIELD(m, sfc_dp_ft_ctx_id_offset,
464f55fe01fSIvan Malov 					   sfc_ft_ctx_id_t *) = ft_ctx_id;
4657e5b4798SIvan Malov 		}
4661aacc3d3SAndrew Rybchenko 	}
4671aacc3d3SAndrew Rybchenko 
468d0f981a3SIgor Romanov 	if (rxq->flags & SFC_EF100_RXQ_INGRESS_MPORT) {
469d0f981a3SIgor Romanov 		ol_flags |= sfc_dp_mport_override;
470d0f981a3SIgor Romanov 		*RTE_MBUF_DYNFIELD(m,
471d0f981a3SIgor Romanov 			sfc_dp_mport_offset,
472d0f981a3SIgor Romanov 			typeof(&((efx_mport_id_t *)0)->id)) =
473d0f981a3SIgor Romanov 				EFX_XWORD_FIELD(rx_prefix[0],
474d0f981a3SIgor Romanov 						ESF_GZ_RX_PREFIX_INGRESS_MPORT);
475d0f981a3SIgor Romanov 	}
476d0f981a3SIgor Romanov 
47762082124SArtemii Morozov 	if (rxq->flags & SFC_EF100_RXQ_VLAN_STRIPPED_TCI &&
47862082124SArtemii Morozov 	    EFX_TEST_XWORD_BIT(rx_prefix[0],
47962082124SArtemii Morozov 				   ESF_GZ_RX_PREFIX_VLAN_STRIPPED_LBN)) {
48062082124SArtemii Morozov 		ol_flags |= RTE_MBUF_F_RX_VLAN | RTE_MBUF_F_RX_VLAN_STRIPPED;
48162082124SArtemii Morozov 		m->vlan_tci = EFX_XWORD_FIELD(rx_prefix[0],
48262082124SArtemii Morozov 						ESF_GZ_RX_PREFIX_VLAN_STRIP_TCI);
48362082124SArtemii Morozov 	}
48462082124SArtemii Morozov 
485554644e3SAndrew Rybchenko 	m->ol_flags = ol_flags;
486554644e3SAndrew Rybchenko 	return true;
487554644e3SAndrew Rybchenko }
488554644e3SAndrew Rybchenko 
489554644e3SAndrew Rybchenko static const uint8_t *
sfc_ef100_rx_pkt_prefix(const struct rte_mbuf * m)490554644e3SAndrew Rybchenko sfc_ef100_rx_pkt_prefix(const struct rte_mbuf *m)
491554644e3SAndrew Rybchenko {
492554644e3SAndrew Rybchenko 	return (const uint8_t *)m->buf_addr + RTE_PKTMBUF_HEADROOM;
493554644e3SAndrew Rybchenko }
494554644e3SAndrew Rybchenko 
495554644e3SAndrew Rybchenko static struct rte_mbuf *
sfc_ef100_rx_next_mbuf(struct sfc_ef100_rxq * rxq)496554644e3SAndrew Rybchenko sfc_ef100_rx_next_mbuf(struct sfc_ef100_rxq *rxq)
497554644e3SAndrew Rybchenko {
498554644e3SAndrew Rybchenko 	struct rte_mbuf *m;
499554644e3SAndrew Rybchenko 	unsigned int id;
500554644e3SAndrew Rybchenko 
501554644e3SAndrew Rybchenko 	/* mbuf associated with current Rx descriptor */
502554644e3SAndrew Rybchenko 	m = rxq->sw_ring[rxq->completed++ & rxq->ptr_mask].mbuf;
503554644e3SAndrew Rybchenko 
504554644e3SAndrew Rybchenko 	/* completed is already moved to the next one */
505554644e3SAndrew Rybchenko 	if (unlikely(rxq->completed == rxq->added))
506554644e3SAndrew Rybchenko 		goto done;
507554644e3SAndrew Rybchenko 
508554644e3SAndrew Rybchenko 	/*
509554644e3SAndrew Rybchenko 	 * Prefetch Rx prefix of the next packet.
510554644e3SAndrew Rybchenko 	 * Current packet is scattered and the next mbuf is its fragment
511554644e3SAndrew Rybchenko 	 * it simply prefetches some data - no harm since packet rate
512554644e3SAndrew Rybchenko 	 * should not be high if scatter is used.
513554644e3SAndrew Rybchenko 	 */
514554644e3SAndrew Rybchenko 	id = rxq->completed & rxq->ptr_mask;
515554644e3SAndrew Rybchenko 	rte_prefetch0(sfc_ef100_rx_pkt_prefix(rxq->sw_ring[id].mbuf));
516554644e3SAndrew Rybchenko 
517554644e3SAndrew Rybchenko 	if (unlikely(rxq->completed + 1 == rxq->added))
518554644e3SAndrew Rybchenko 		goto done;
519554644e3SAndrew Rybchenko 
520554644e3SAndrew Rybchenko 	/*
521554644e3SAndrew Rybchenko 	 * Prefetch mbuf control structure of the next after next Rx
522554644e3SAndrew Rybchenko 	 * descriptor.
523554644e3SAndrew Rybchenko 	 */
524554644e3SAndrew Rybchenko 	id = (id == rxq->ptr_mask) ? 0 : (id + 1);
525554644e3SAndrew Rybchenko 	rte_mbuf_prefetch_part1(rxq->sw_ring[id].mbuf);
526554644e3SAndrew Rybchenko 
527554644e3SAndrew Rybchenko 	/*
528554644e3SAndrew Rybchenko 	 * If the next time we'll need SW Rx descriptor from the next
529554644e3SAndrew Rybchenko 	 * cache line, try to make sure that we have it in cache.
530554644e3SAndrew Rybchenko 	 */
531554644e3SAndrew Rybchenko 	if ((id & 0x7) == 0x7)
532554644e3SAndrew Rybchenko 		rte_prefetch0(&rxq->sw_ring[(id + 1) & rxq->ptr_mask]);
533554644e3SAndrew Rybchenko 
534554644e3SAndrew Rybchenko done:
535554644e3SAndrew Rybchenko 	return m;
536554644e3SAndrew Rybchenko }
537554644e3SAndrew Rybchenko 
538554644e3SAndrew Rybchenko static struct rte_mbuf **
sfc_ef100_rx_process_ready_pkts(struct sfc_ef100_rxq * rxq,struct rte_mbuf ** rx_pkts,struct rte_mbuf ** const rx_pkts_end)539554644e3SAndrew Rybchenko sfc_ef100_rx_process_ready_pkts(struct sfc_ef100_rxq *rxq,
540554644e3SAndrew Rybchenko 				struct rte_mbuf **rx_pkts,
541554644e3SAndrew Rybchenko 				struct rte_mbuf ** const rx_pkts_end)
542554644e3SAndrew Rybchenko {
543554644e3SAndrew Rybchenko 	while (rxq->ready_pkts > 0 && rx_pkts != rx_pkts_end) {
544554644e3SAndrew Rybchenko 		struct rte_mbuf *pkt;
545554644e3SAndrew Rybchenko 		struct rte_mbuf *lastseg;
5468dae7087SIgor Romanov 		const efx_xword_t *rx_prefix;
547554644e3SAndrew Rybchenko 		uint16_t pkt_len;
548554644e3SAndrew Rybchenko 		uint16_t seg_len;
549554644e3SAndrew Rybchenko 		bool deliver;
550554644e3SAndrew Rybchenko 
551554644e3SAndrew Rybchenko 		rxq->ready_pkts--;
552554644e3SAndrew Rybchenko 
553554644e3SAndrew Rybchenko 		pkt = sfc_ef100_rx_next_mbuf(rxq);
5543a35c1c0SMorten Brørup 		__rte_mbuf_raw_sanity_check(pkt);
555554644e3SAndrew Rybchenko 
556554644e3SAndrew Rybchenko 		RTE_BUILD_BUG_ON(sizeof(pkt->rearm_data[0]) !=
557554644e3SAndrew Rybchenko 				 sizeof(rxq->rearm_data));
558554644e3SAndrew Rybchenko 		pkt->rearm_data[0] = rxq->rearm_data;
559554644e3SAndrew Rybchenko 
560554644e3SAndrew Rybchenko 		/* data_off already moved past Rx prefix */
5618dae7087SIgor Romanov 		rx_prefix = (const efx_xword_t *)sfc_ef100_rx_pkt_prefix(pkt);
562554644e3SAndrew Rybchenko 
5638dae7087SIgor Romanov 		pkt_len = EFX_XWORD_FIELD(rx_prefix[0],
564554644e3SAndrew Rybchenko 					  ESF_GZ_RX_PREFIX_LENGTH);
565554644e3SAndrew Rybchenko 		SFC_ASSERT(pkt_len > 0);
566554644e3SAndrew Rybchenko 		rte_pktmbuf_pkt_len(pkt) = pkt_len;
567554644e3SAndrew Rybchenko 
568554644e3SAndrew Rybchenko 		seg_len = RTE_MIN(pkt_len, rxq->buf_size - rxq->prefix_size);
569554644e3SAndrew Rybchenko 		rte_pktmbuf_data_len(pkt) = seg_len;
570554644e3SAndrew Rybchenko 
5716ce88e50SAndrew Rybchenko 		deliver = sfc_ef100_rx_prefix_to_offloads(rxq, rx_prefix, pkt);
572554644e3SAndrew Rybchenko 
573554644e3SAndrew Rybchenko 		lastseg = pkt;
574554644e3SAndrew Rybchenko 		while ((pkt_len -= seg_len) > 0) {
575554644e3SAndrew Rybchenko 			struct rte_mbuf *seg;
576554644e3SAndrew Rybchenko 
577554644e3SAndrew Rybchenko 			seg = sfc_ef100_rx_next_mbuf(rxq);
5783a35c1c0SMorten Brørup 			__rte_mbuf_raw_sanity_check(seg);
579554644e3SAndrew Rybchenko 
580554644e3SAndrew Rybchenko 			seg->data_off = RTE_PKTMBUF_HEADROOM;
581554644e3SAndrew Rybchenko 
582554644e3SAndrew Rybchenko 			seg_len = RTE_MIN(pkt_len, rxq->buf_size);
583554644e3SAndrew Rybchenko 			rte_pktmbuf_data_len(seg) = seg_len;
584554644e3SAndrew Rybchenko 			rte_pktmbuf_pkt_len(seg) = seg_len;
585554644e3SAndrew Rybchenko 
586554644e3SAndrew Rybchenko 			pkt->nb_segs++;
587554644e3SAndrew Rybchenko 			lastseg->next = seg;
588554644e3SAndrew Rybchenko 			lastseg = seg;
589554644e3SAndrew Rybchenko 		}
590554644e3SAndrew Rybchenko 
591395ffcb4SIvan Ilchenko 		if (likely(deliver)) {
592554644e3SAndrew Rybchenko 			*rx_pkts++ = pkt;
593395ffcb4SIvan Ilchenko 			sfc_pkts_bytes_add(&rxq->dp.dpq.stats, 1,
594395ffcb4SIvan Ilchenko 					   rte_pktmbuf_pkt_len(pkt));
595395ffcb4SIvan Ilchenko 		} else {
596554644e3SAndrew Rybchenko 			rte_pktmbuf_free(pkt);
597554644e3SAndrew Rybchenko 		}
598395ffcb4SIvan Ilchenko 	}
599554644e3SAndrew Rybchenko 
600554644e3SAndrew Rybchenko 	return rx_pkts;
601554644e3SAndrew Rybchenko }
602554644e3SAndrew Rybchenko 
603554644e3SAndrew Rybchenko static bool
sfc_ef100_rx_get_event(struct sfc_ef100_rxq * rxq,efx_qword_t * ev)604554644e3SAndrew Rybchenko sfc_ef100_rx_get_event(struct sfc_ef100_rxq *rxq, efx_qword_t *ev)
605554644e3SAndrew Rybchenko {
606554644e3SAndrew Rybchenko 	*ev = rxq->evq_hw_ring[rxq->evq_read_ptr & rxq->ptr_mask];
607554644e3SAndrew Rybchenko 
608554644e3SAndrew Rybchenko 	if (!sfc_ef100_ev_present(ev,
609554644e3SAndrew Rybchenko 			(rxq->evq_read_ptr >> rxq->evq_phase_bit_shift) & 1))
610554644e3SAndrew Rybchenko 		return false;
611554644e3SAndrew Rybchenko 
612554644e3SAndrew Rybchenko 	if (unlikely(!sfc_ef100_ev_type_is(ev, ESE_GZ_EF100_EV_RX_PKTS))) {
613554644e3SAndrew Rybchenko 		/*
614554644e3SAndrew Rybchenko 		 * Do not move read_ptr to keep the event for exception
615554644e3SAndrew Rybchenko 		 * handling by the control path.
616554644e3SAndrew Rybchenko 		 */
617554644e3SAndrew Rybchenko 		rxq->flags |= SFC_EF100_RXQ_EXCEPTION;
618554644e3SAndrew Rybchenko 		sfc_ef100_rx_err(rxq,
619554644e3SAndrew Rybchenko 			"RxQ exception at EvQ ptr %u(%#x), event %08x:%08x",
620554644e3SAndrew Rybchenko 			rxq->evq_read_ptr, rxq->evq_read_ptr & rxq->ptr_mask,
621554644e3SAndrew Rybchenko 			EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
622554644e3SAndrew Rybchenko 			EFX_QWORD_FIELD(*ev, EFX_DWORD_0));
623554644e3SAndrew Rybchenko 		return false;
624554644e3SAndrew Rybchenko 	}
625554644e3SAndrew Rybchenko 
626554644e3SAndrew Rybchenko 	sfc_ef100_rx_debug(rxq, "RxQ got event %08x:%08x at %u (%#x)",
627554644e3SAndrew Rybchenko 			   EFX_QWORD_FIELD(*ev, EFX_DWORD_1),
628554644e3SAndrew Rybchenko 			   EFX_QWORD_FIELD(*ev, EFX_DWORD_0),
629554644e3SAndrew Rybchenko 			   rxq->evq_read_ptr,
630554644e3SAndrew Rybchenko 			   rxq->evq_read_ptr & rxq->ptr_mask);
631554644e3SAndrew Rybchenko 
632554644e3SAndrew Rybchenko 	rxq->evq_read_ptr++;
633554644e3SAndrew Rybchenko 	return true;
634554644e3SAndrew Rybchenko }
635554644e3SAndrew Rybchenko 
636554644e3SAndrew Rybchenko static uint16_t
sfc_ef100_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)637554644e3SAndrew Rybchenko sfc_ef100_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
638554644e3SAndrew Rybchenko {
639554644e3SAndrew Rybchenko 	struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(rx_queue);
640554644e3SAndrew Rybchenko 	struct rte_mbuf ** const rx_pkts_end = &rx_pkts[nb_pkts];
641554644e3SAndrew Rybchenko 	efx_qword_t rx_ev;
642554644e3SAndrew Rybchenko 
643554644e3SAndrew Rybchenko 	rx_pkts = sfc_ef100_rx_process_ready_pkts(rxq, rx_pkts, rx_pkts_end);
644554644e3SAndrew Rybchenko 
645554644e3SAndrew Rybchenko 	if (unlikely(rxq->flags &
646554644e3SAndrew Rybchenko 		     (SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION)))
647554644e3SAndrew Rybchenko 		goto done;
648554644e3SAndrew Rybchenko 
649554644e3SAndrew Rybchenko 	while (rx_pkts != rx_pkts_end && sfc_ef100_rx_get_event(rxq, &rx_ev)) {
650554644e3SAndrew Rybchenko 		rxq->ready_pkts =
651554644e3SAndrew Rybchenko 			EFX_QWORD_FIELD(rx_ev, ESF_GZ_EV_RXPKTS_NUM_PKT);
652554644e3SAndrew Rybchenko 		rx_pkts = sfc_ef100_rx_process_ready_pkts(rxq, rx_pkts,
653554644e3SAndrew Rybchenko 							  rx_pkts_end);
654554644e3SAndrew Rybchenko 	}
655554644e3SAndrew Rybchenko 
656554644e3SAndrew Rybchenko 	/* It is not a problem if we refill in the case of exception */
657554644e3SAndrew Rybchenko 	sfc_ef100_rx_qrefill(rxq);
658554644e3SAndrew Rybchenko 
659333fd5d4SAndrew Rybchenko 	if ((rxq->flags & SFC_EF100_RXQ_FLAG_INTR_EN) &&
660333fd5d4SAndrew Rybchenko 	    rxq->evq_read_ptr_primed != rxq->evq_read_ptr)
661333fd5d4SAndrew Rybchenko 		sfc_ef100_rx_qprime(rxq);
662333fd5d4SAndrew Rybchenko 
663554644e3SAndrew Rybchenko done:
664554644e3SAndrew Rybchenko 	return nb_pkts - (rx_pkts_end - rx_pkts);
665554644e3SAndrew Rybchenko }
666554644e3SAndrew Rybchenko 
667554644e3SAndrew Rybchenko static const uint32_t *
sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps,size_t * no_of_elements)668*ba6a168aSSivaramakrishnan Venkat sfc_ef100_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps,
669*ba6a168aSSivaramakrishnan Venkat 			       size_t *no_of_elements)
670554644e3SAndrew Rybchenko {
671554644e3SAndrew Rybchenko 	static const uint32_t ef100_native_ptypes[] = {
6729e6e7f47SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER,
6739e6e7f47SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_VLAN,
6749e6e7f47SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER_QINQ,
6759e6e7f47SAndrew Rybchenko 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
6769e6e7f47SAndrew Rybchenko 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
6779e6e7f47SAndrew Rybchenko 		RTE_PTYPE_L4_TCP,
6789e6e7f47SAndrew Rybchenko 		RTE_PTYPE_L4_UDP,
6799e6e7f47SAndrew Rybchenko 		RTE_PTYPE_L4_FRAG,
6809e6e7f47SAndrew Rybchenko 		RTE_PTYPE_TUNNEL_VXLAN,
6819e6e7f47SAndrew Rybchenko 		RTE_PTYPE_TUNNEL_NVGRE,
6829e6e7f47SAndrew Rybchenko 		RTE_PTYPE_TUNNEL_GENEVE,
6839e6e7f47SAndrew Rybchenko 		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
6849e6e7f47SAndrew Rybchenko 		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
6859e6e7f47SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_TCP,
6869e6e7f47SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_UDP,
6879e6e7f47SAndrew Rybchenko 		RTE_PTYPE_INNER_L4_FRAG,
688554644e3SAndrew Rybchenko 	};
689554644e3SAndrew Rybchenko 
690*ba6a168aSSivaramakrishnan Venkat 	*no_of_elements = RTE_DIM(ef100_native_ptypes);
691554644e3SAndrew Rybchenko 	return ef100_native_ptypes;
692554644e3SAndrew Rybchenko }
693554644e3SAndrew Rybchenko 
694554644e3SAndrew Rybchenko static sfc_dp_rx_qdesc_npending_t sfc_ef100_rx_qdesc_npending;
695554644e3SAndrew Rybchenko static unsigned int
sfc_ef100_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq * dp_rxq)696554644e3SAndrew Rybchenko sfc_ef100_rx_qdesc_npending(__rte_unused struct sfc_dp_rxq *dp_rxq)
697554644e3SAndrew Rybchenko {
698554644e3SAndrew Rybchenko 	return 0;
699554644e3SAndrew Rybchenko }
700554644e3SAndrew Rybchenko 
701554644e3SAndrew Rybchenko static sfc_dp_rx_qdesc_status_t sfc_ef100_rx_qdesc_status;
702554644e3SAndrew Rybchenko static int
sfc_ef100_rx_qdesc_status(__rte_unused struct sfc_dp_rxq * dp_rxq,__rte_unused uint16_t offset)703554644e3SAndrew Rybchenko sfc_ef100_rx_qdesc_status(__rte_unused struct sfc_dp_rxq *dp_rxq,
704554644e3SAndrew Rybchenko 			  __rte_unused uint16_t offset)
705554644e3SAndrew Rybchenko {
706554644e3SAndrew Rybchenko 	return -ENOTSUP;
707554644e3SAndrew Rybchenko }
708554644e3SAndrew Rybchenko 
709554644e3SAndrew Rybchenko 
710554644e3SAndrew Rybchenko static sfc_dp_rx_get_dev_info_t sfc_ef100_rx_get_dev_info;
711554644e3SAndrew Rybchenko static void
sfc_ef100_rx_get_dev_info(struct rte_eth_dev_info * dev_info)712554644e3SAndrew Rybchenko sfc_ef100_rx_get_dev_info(struct rte_eth_dev_info *dev_info)
713554644e3SAndrew Rybchenko {
714554644e3SAndrew Rybchenko 	/*
715554644e3SAndrew Rybchenko 	 * Number of descriptors just defines maximum number of pushed
716554644e3SAndrew Rybchenko 	 * descriptors (fill level).
717554644e3SAndrew Rybchenko 	 */
718554644e3SAndrew Rybchenko 	dev_info->rx_desc_lim.nb_min = SFC_RX_REFILL_BULK;
719554644e3SAndrew Rybchenko 	dev_info->rx_desc_lim.nb_align = SFC_RX_REFILL_BULK;
720554644e3SAndrew Rybchenko }
721554644e3SAndrew Rybchenko 
722554644e3SAndrew Rybchenko 
723554644e3SAndrew Rybchenko static sfc_dp_rx_qsize_up_rings_t sfc_ef100_rx_qsize_up_rings;
724554644e3SAndrew Rybchenko static int
sfc_ef100_rx_qsize_up_rings(uint16_t nb_rx_desc,struct sfc_dp_rx_hw_limits * limits,__rte_unused struct rte_mempool * mb_pool,unsigned int * rxq_entries,unsigned int * evq_entries,unsigned int * rxq_max_fill_level)725554644e3SAndrew Rybchenko sfc_ef100_rx_qsize_up_rings(uint16_t nb_rx_desc,
726554644e3SAndrew Rybchenko 			   struct sfc_dp_rx_hw_limits *limits,
727554644e3SAndrew Rybchenko 			   __rte_unused struct rte_mempool *mb_pool,
728554644e3SAndrew Rybchenko 			   unsigned int *rxq_entries,
729554644e3SAndrew Rybchenko 			   unsigned int *evq_entries,
730554644e3SAndrew Rybchenko 			   unsigned int *rxq_max_fill_level)
731554644e3SAndrew Rybchenko {
732554644e3SAndrew Rybchenko 	/*
733554644e3SAndrew Rybchenko 	 * rte_ethdev API guarantees that the number meets min, max and
734554644e3SAndrew Rybchenko 	 * alignment requirements.
735554644e3SAndrew Rybchenko 	 */
736554644e3SAndrew Rybchenko 	if (nb_rx_desc <= limits->rxq_min_entries)
737554644e3SAndrew Rybchenko 		*rxq_entries = limits->rxq_min_entries;
738554644e3SAndrew Rybchenko 	else
739554644e3SAndrew Rybchenko 		*rxq_entries = rte_align32pow2(nb_rx_desc);
740554644e3SAndrew Rybchenko 
741554644e3SAndrew Rybchenko 	*evq_entries = *rxq_entries;
742554644e3SAndrew Rybchenko 
743554644e3SAndrew Rybchenko 	*rxq_max_fill_level = RTE_MIN(nb_rx_desc,
744554644e3SAndrew Rybchenko 				      SFC_EF100_RXQ_LIMIT(*evq_entries));
745554644e3SAndrew Rybchenko 	return 0;
746554644e3SAndrew Rybchenko }
747554644e3SAndrew Rybchenko 
748554644e3SAndrew Rybchenko 
749554644e3SAndrew Rybchenko static uint64_t
sfc_ef100_mk_mbuf_rearm_data(uint16_t port_id,uint16_t prefix_size)750554644e3SAndrew Rybchenko sfc_ef100_mk_mbuf_rearm_data(uint16_t port_id, uint16_t prefix_size)
751554644e3SAndrew Rybchenko {
752554644e3SAndrew Rybchenko 	struct rte_mbuf m;
753554644e3SAndrew Rybchenko 
754554644e3SAndrew Rybchenko 	memset(&m, 0, sizeof(m));
755554644e3SAndrew Rybchenko 
756554644e3SAndrew Rybchenko 	rte_mbuf_refcnt_set(&m, 1);
757554644e3SAndrew Rybchenko 	m.data_off = RTE_PKTMBUF_HEADROOM + prefix_size;
758554644e3SAndrew Rybchenko 	m.nb_segs = 1;
759554644e3SAndrew Rybchenko 	m.port = port_id;
760554644e3SAndrew Rybchenko 
761554644e3SAndrew Rybchenko 	/* rearm_data covers structure members filled in above */
762554644e3SAndrew Rybchenko 	rte_compiler_barrier();
763554644e3SAndrew Rybchenko 	RTE_BUILD_BUG_ON(sizeof(m.rearm_data[0]) != sizeof(uint64_t));
764554644e3SAndrew Rybchenko 	return m.rearm_data[0];
765554644e3SAndrew Rybchenko }
766554644e3SAndrew Rybchenko 
767554644e3SAndrew Rybchenko static sfc_dp_rx_qcreate_t sfc_ef100_rx_qcreate;
768554644e3SAndrew Rybchenko static int
sfc_ef100_rx_qcreate(uint16_t port_id,uint16_t queue_id,const struct rte_pci_addr * pci_addr,int socket_id,const struct sfc_dp_rx_qcreate_info * info,struct sfc_dp_rxq ** dp_rxqp)769554644e3SAndrew Rybchenko sfc_ef100_rx_qcreate(uint16_t port_id, uint16_t queue_id,
770554644e3SAndrew Rybchenko 		    const struct rte_pci_addr *pci_addr, int socket_id,
771554644e3SAndrew Rybchenko 		    const struct sfc_dp_rx_qcreate_info *info,
772554644e3SAndrew Rybchenko 		    struct sfc_dp_rxq **dp_rxqp)
773554644e3SAndrew Rybchenko {
774554644e3SAndrew Rybchenko 	struct sfc_ef100_rxq *rxq;
775554644e3SAndrew Rybchenko 	int rc;
776554644e3SAndrew Rybchenko 
777554644e3SAndrew Rybchenko 	rc = EINVAL;
778554644e3SAndrew Rybchenko 	if (info->rxq_entries != info->evq_entries)
779554644e3SAndrew Rybchenko 		goto fail_rxq_args;
780554644e3SAndrew Rybchenko 
781554644e3SAndrew Rybchenko 	rc = ENOMEM;
782554644e3SAndrew Rybchenko 	rxq = rte_zmalloc_socket("sfc-ef100-rxq", sizeof(*rxq),
783554644e3SAndrew Rybchenko 				 RTE_CACHE_LINE_SIZE, socket_id);
784554644e3SAndrew Rybchenko 	if (rxq == NULL)
785554644e3SAndrew Rybchenko 		goto fail_rxq_alloc;
786554644e3SAndrew Rybchenko 
787554644e3SAndrew Rybchenko 	sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
788554644e3SAndrew Rybchenko 
789554644e3SAndrew Rybchenko 	rc = ENOMEM;
790554644e3SAndrew Rybchenko 	rxq->sw_ring = rte_calloc_socket("sfc-ef100-rxq-sw_ring",
791554644e3SAndrew Rybchenko 					 info->rxq_entries,
792554644e3SAndrew Rybchenko 					 sizeof(*rxq->sw_ring),
793554644e3SAndrew Rybchenko 					 RTE_CACHE_LINE_SIZE, socket_id);
794554644e3SAndrew Rybchenko 	if (rxq->sw_ring == NULL)
795554644e3SAndrew Rybchenko 		goto fail_desc_alloc;
796554644e3SAndrew Rybchenko 
797554644e3SAndrew Rybchenko 	rxq->flags |= SFC_EF100_RXQ_NOT_RUNNING;
798554644e3SAndrew Rybchenko 	rxq->ptr_mask = info->rxq_entries - 1;
799554644e3SAndrew Rybchenko 	rxq->evq_phase_bit_shift = rte_bsf32(info->evq_entries);
800554644e3SAndrew Rybchenko 	rxq->evq_hw_ring = info->evq_hw_ring;
801554644e3SAndrew Rybchenko 	rxq->max_fill_level = info->max_fill_level;
802554644e3SAndrew Rybchenko 	rxq->refill_threshold = info->refill_threshold;
803554644e3SAndrew Rybchenko 	rxq->prefix_size = info->prefix_size;
80453a80512SIvan Malov 
80553a80512SIvan Malov 	SFC_ASSERT(info->user_mark_mask != 0);
80653a80512SIvan Malov 	rxq->user_mark_mask = info->user_mark_mask;
80753a80512SIvan Malov 
808554644e3SAndrew Rybchenko 	rxq->buf_size = info->buf_size;
809554644e3SAndrew Rybchenko 	rxq->refill_mb_pool = info->refill_mb_pool;
810554644e3SAndrew Rybchenko 	rxq->rxq_hw_ring = info->rxq_hw_ring;
811554644e3SAndrew Rybchenko 	rxq->doorbell = (volatile uint8_t *)info->mem_bar +
812554644e3SAndrew Rybchenko 			ER_GZ_RX_RING_DOORBELL_OFST +
813554644e3SAndrew Rybchenko 			(info->hw_index << info->vi_window_shift);
814554644e3SAndrew Rybchenko 
815333fd5d4SAndrew Rybchenko 	rxq->evq_hw_index = info->evq_hw_index;
816333fd5d4SAndrew Rybchenko 	rxq->evq_prime = (volatile uint8_t *)info->mem_bar +
817333fd5d4SAndrew Rybchenko 			 info->fcw_offset +
818333fd5d4SAndrew Rybchenko 			 ER_GZ_EVQ_INT_PRIME_OFST;
819333fd5d4SAndrew Rybchenko 
8203037e6cfSViacheslav Galaktionov 	rxq->nic_dma_info = info->nic_dma_info;
8213037e6cfSViacheslav Galaktionov 	if (rxq->nic_dma_info->nb_regions > 0)
8223037e6cfSViacheslav Galaktionov 		rxq->flags |= SFC_EF100_RXQ_NIC_DMA_MAP;
8233037e6cfSViacheslav Galaktionov 
824462c4f08SIvan Malov 	if (info->flags & SFC_RXQ_FLAG_INGRESS_MPORT)
825462c4f08SIvan Malov 		rxq->flags |= SFC_EF100_RXQ_INGRESS_MPORT;
826462c4f08SIvan Malov 
82762082124SArtemii Morozov 	if (info->flags & SFC_RXQ_FLAG_VLAN_STRIPPED_TCI)
82862082124SArtemii Morozov 		rxq->flags |= SFC_EF100_RXQ_VLAN_STRIPPED_TCI;
82962082124SArtemii Morozov 
830554644e3SAndrew Rybchenko 	sfc_ef100_rx_debug(rxq, "RxQ doorbell is %p", rxq->doorbell);
831554644e3SAndrew Rybchenko 
832554644e3SAndrew Rybchenko 	*dp_rxqp = &rxq->dp;
833554644e3SAndrew Rybchenko 	return 0;
834554644e3SAndrew Rybchenko 
835554644e3SAndrew Rybchenko fail_desc_alloc:
836554644e3SAndrew Rybchenko 	rte_free(rxq);
837554644e3SAndrew Rybchenko 
838554644e3SAndrew Rybchenko fail_rxq_alloc:
839554644e3SAndrew Rybchenko fail_rxq_args:
840554644e3SAndrew Rybchenko 	return rc;
841554644e3SAndrew Rybchenko }
842554644e3SAndrew Rybchenko 
843554644e3SAndrew Rybchenko static sfc_dp_rx_qdestroy_t sfc_ef100_rx_qdestroy;
844554644e3SAndrew Rybchenko static void
sfc_ef100_rx_qdestroy(struct sfc_dp_rxq * dp_rxq)845554644e3SAndrew Rybchenko sfc_ef100_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
846554644e3SAndrew Rybchenko {
847554644e3SAndrew Rybchenko 	struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
848554644e3SAndrew Rybchenko 
849554644e3SAndrew Rybchenko 	rte_free(rxq->sw_ring);
850554644e3SAndrew Rybchenko 	rte_free(rxq);
851554644e3SAndrew Rybchenko }
852554644e3SAndrew Rybchenko 
853554644e3SAndrew Rybchenko static sfc_dp_rx_qstart_t sfc_ef100_rx_qstart;
854554644e3SAndrew Rybchenko static int
sfc_ef100_rx_qstart(struct sfc_dp_rxq * dp_rxq,unsigned int evq_read_ptr,const efx_rx_prefix_layout_t * pinfo)855c6845644SAndrew Rybchenko sfc_ef100_rx_qstart(struct sfc_dp_rxq *dp_rxq, unsigned int evq_read_ptr,
856c6845644SAndrew Rybchenko 		    const efx_rx_prefix_layout_t *pinfo)
857554644e3SAndrew Rybchenko {
858554644e3SAndrew Rybchenko 	struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
859c6845644SAndrew Rybchenko 	uint32_t unsup_rx_prefix_fields;
860554644e3SAndrew Rybchenko 
861554644e3SAndrew Rybchenko 	SFC_ASSERT(rxq->completed == 0);
862554644e3SAndrew Rybchenko 	SFC_ASSERT(rxq->added == 0);
863554644e3SAndrew Rybchenko 
864c6845644SAndrew Rybchenko 	/* Prefix must fit into reserved Rx buffer space */
865c6845644SAndrew Rybchenko 	if (pinfo->erpl_length > rxq->prefix_size)
866c6845644SAndrew Rybchenko 		return ENOTSUP;
867c6845644SAndrew Rybchenko 
868c6845644SAndrew Rybchenko 	unsup_rx_prefix_fields =
869c6845644SAndrew Rybchenko 		efx_rx_prefix_layout_check(pinfo, &sfc_ef100_rx_prefix_layout);
870c6845644SAndrew Rybchenko 
8717be78d02SJosh Soref 	/* LENGTH and CLASS fields must always be present */
872c6845644SAndrew Rybchenko 	if ((unsup_rx_prefix_fields &
873c6845644SAndrew Rybchenko 	     ((1U << EFX_RX_PREFIX_FIELD_LENGTH) |
874c6845644SAndrew Rybchenko 	      (1U << EFX_RX_PREFIX_FIELD_CLASS))) != 0)
875c6845644SAndrew Rybchenko 		return ENOTSUP;
876c6845644SAndrew Rybchenko 
8776ce88e50SAndrew Rybchenko 	if ((unsup_rx_prefix_fields &
8786ce88e50SAndrew Rybchenko 	     ((1U << EFX_RX_PREFIX_FIELD_RSS_HASH_VALID) |
8796ce88e50SAndrew Rybchenko 	      (1U << EFX_RX_PREFIX_FIELD_RSS_HASH))) == 0)
8806ce88e50SAndrew Rybchenko 		rxq->flags |= SFC_EF100_RXQ_RSS_HASH;
8816ce88e50SAndrew Rybchenko 	else
8826ce88e50SAndrew Rybchenko 		rxq->flags &= ~SFC_EF100_RXQ_RSS_HASH;
8836ce88e50SAndrew Rybchenko 
8841aacc3d3SAndrew Rybchenko 	if ((unsup_rx_prefix_fields &
885bf38764aSIvan Malov 	     (1U << EFX_RX_PREFIX_FIELD_USER_FLAG)) == 0)
886bf38764aSIvan Malov 		rxq->flags |= SFC_EF100_RXQ_USER_FLAG;
887bf38764aSIvan Malov 	else
888bf38764aSIvan Malov 		rxq->flags &= ~SFC_EF100_RXQ_USER_FLAG;
889bf38764aSIvan Malov 
890bf38764aSIvan Malov 	if ((unsup_rx_prefix_fields &
891ad82838eSAndrew Rybchenko 	     (1U << EFX_RX_PREFIX_FIELD_USER_MARK)) == 0)
8921aacc3d3SAndrew Rybchenko 		rxq->flags |= SFC_EF100_RXQ_USER_MARK;
8931aacc3d3SAndrew Rybchenko 	else
8941aacc3d3SAndrew Rybchenko 		rxq->flags &= ~SFC_EF100_RXQ_USER_MARK;
8951aacc3d3SAndrew Rybchenko 
896462c4f08SIvan Malov 
897462c4f08SIvan Malov 	/*
898462c4f08SIvan Malov 	 * At the moment, this feature is used only
899462c4f08SIvan Malov 	 * by the representor proxy Rx queue and is
900462c4f08SIvan Malov 	 * essential for representor support, so if
901462c4f08SIvan Malov 	 * it has been requested but is unsupported,
902462c4f08SIvan Malov 	 * point this inconsistency out to the user.
903462c4f08SIvan Malov 	 */
904d0f981a3SIgor Romanov 	if ((unsup_rx_prefix_fields &
905462c4f08SIvan Malov 	     (1U << EFX_RX_PREFIX_FIELD_INGRESS_MPORT)) &&
906462c4f08SIvan Malov 	    (rxq->flags & SFC_EF100_RXQ_INGRESS_MPORT))
907462c4f08SIvan Malov 		return ENOTSUP;
908d0f981a3SIgor Romanov 
909c6845644SAndrew Rybchenko 	rxq->prefix_size = pinfo->erpl_length;
910c6845644SAndrew Rybchenko 	rxq->rearm_data = sfc_ef100_mk_mbuf_rearm_data(rxq->dp.dpq.port_id,
911c6845644SAndrew Rybchenko 						       rxq->prefix_size);
912c6845644SAndrew Rybchenko 
913554644e3SAndrew Rybchenko 	sfc_ef100_rx_qrefill(rxq);
914554644e3SAndrew Rybchenko 
915554644e3SAndrew Rybchenko 	rxq->evq_read_ptr = evq_read_ptr;
916554644e3SAndrew Rybchenko 
917554644e3SAndrew Rybchenko 	rxq->flags |= SFC_EF100_RXQ_STARTED;
918554644e3SAndrew Rybchenko 	rxq->flags &= ~(SFC_EF100_RXQ_NOT_RUNNING | SFC_EF100_RXQ_EXCEPTION);
919554644e3SAndrew Rybchenko 
920333fd5d4SAndrew Rybchenko 	if (rxq->flags & SFC_EF100_RXQ_FLAG_INTR_EN)
921333fd5d4SAndrew Rybchenko 		sfc_ef100_rx_qprime(rxq);
922333fd5d4SAndrew Rybchenko 
923554644e3SAndrew Rybchenko 	return 0;
924554644e3SAndrew Rybchenko }
925554644e3SAndrew Rybchenko 
926554644e3SAndrew Rybchenko static sfc_dp_rx_qstop_t sfc_ef100_rx_qstop;
927554644e3SAndrew Rybchenko static void
sfc_ef100_rx_qstop(struct sfc_dp_rxq * dp_rxq,unsigned int * evq_read_ptr)928554644e3SAndrew Rybchenko sfc_ef100_rx_qstop(struct sfc_dp_rxq *dp_rxq, unsigned int *evq_read_ptr)
929554644e3SAndrew Rybchenko {
930554644e3SAndrew Rybchenko 	struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
931554644e3SAndrew Rybchenko 
932554644e3SAndrew Rybchenko 	rxq->flags |= SFC_EF100_RXQ_NOT_RUNNING;
933554644e3SAndrew Rybchenko 
934554644e3SAndrew Rybchenko 	*evq_read_ptr = rxq->evq_read_ptr;
935554644e3SAndrew Rybchenko }
936554644e3SAndrew Rybchenko 
937554644e3SAndrew Rybchenko static sfc_dp_rx_qrx_ev_t sfc_ef100_rx_qrx_ev;
938554644e3SAndrew Rybchenko static bool
sfc_ef100_rx_qrx_ev(struct sfc_dp_rxq * dp_rxq,__rte_unused unsigned int id)939554644e3SAndrew Rybchenko sfc_ef100_rx_qrx_ev(struct sfc_dp_rxq *dp_rxq, __rte_unused unsigned int id)
940554644e3SAndrew Rybchenko {
941554644e3SAndrew Rybchenko 	__rte_unused struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
942554644e3SAndrew Rybchenko 
943554644e3SAndrew Rybchenko 	SFC_ASSERT(rxq->flags & SFC_EF100_RXQ_NOT_RUNNING);
944554644e3SAndrew Rybchenko 
945554644e3SAndrew Rybchenko 	/*
946554644e3SAndrew Rybchenko 	 * It is safe to ignore Rx event since we free all mbufs on
947554644e3SAndrew Rybchenko 	 * queue purge anyway.
948554644e3SAndrew Rybchenko 	 */
949554644e3SAndrew Rybchenko 
950554644e3SAndrew Rybchenko 	return false;
951554644e3SAndrew Rybchenko }
952554644e3SAndrew Rybchenko 
953554644e3SAndrew Rybchenko static sfc_dp_rx_qpurge_t sfc_ef100_rx_qpurge;
954554644e3SAndrew Rybchenko static void
sfc_ef100_rx_qpurge(struct sfc_dp_rxq * dp_rxq)955554644e3SAndrew Rybchenko sfc_ef100_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
956554644e3SAndrew Rybchenko {
957554644e3SAndrew Rybchenko 	struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
958554644e3SAndrew Rybchenko 	unsigned int i;
959554644e3SAndrew Rybchenko 	struct sfc_ef100_rx_sw_desc *rxd;
960554644e3SAndrew Rybchenko 
961554644e3SAndrew Rybchenko 	for (i = rxq->completed; i != rxq->added; ++i) {
962554644e3SAndrew Rybchenko 		rxd = &rxq->sw_ring[i & rxq->ptr_mask];
963554644e3SAndrew Rybchenko 		rte_mbuf_raw_free(rxd->mbuf);
964554644e3SAndrew Rybchenko 		rxd->mbuf = NULL;
965554644e3SAndrew Rybchenko 	}
966554644e3SAndrew Rybchenko 
967554644e3SAndrew Rybchenko 	rxq->completed = rxq->added = 0;
968554644e3SAndrew Rybchenko 	rxq->ready_pkts = 0;
969554644e3SAndrew Rybchenko 
970554644e3SAndrew Rybchenko 	rxq->flags &= ~SFC_EF100_RXQ_STARTED;
971554644e3SAndrew Rybchenko }
972554644e3SAndrew Rybchenko 
973333fd5d4SAndrew Rybchenko static sfc_dp_rx_intr_enable_t sfc_ef100_rx_intr_enable;
974333fd5d4SAndrew Rybchenko static int
sfc_ef100_rx_intr_enable(struct sfc_dp_rxq * dp_rxq)975333fd5d4SAndrew Rybchenko sfc_ef100_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
976333fd5d4SAndrew Rybchenko {
977333fd5d4SAndrew Rybchenko 	struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
978333fd5d4SAndrew Rybchenko 
979333fd5d4SAndrew Rybchenko 	rxq->flags |= SFC_EF100_RXQ_FLAG_INTR_EN;
980333fd5d4SAndrew Rybchenko 	if (rxq->flags & SFC_EF100_RXQ_STARTED)
981333fd5d4SAndrew Rybchenko 		sfc_ef100_rx_qprime(rxq);
982333fd5d4SAndrew Rybchenko 	return 0;
983333fd5d4SAndrew Rybchenko }
984333fd5d4SAndrew Rybchenko 
985333fd5d4SAndrew Rybchenko static sfc_dp_rx_intr_disable_t sfc_ef100_rx_intr_disable;
986333fd5d4SAndrew Rybchenko static int
sfc_ef100_rx_intr_disable(struct sfc_dp_rxq * dp_rxq)987333fd5d4SAndrew Rybchenko sfc_ef100_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
988333fd5d4SAndrew Rybchenko {
989333fd5d4SAndrew Rybchenko 	struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
990333fd5d4SAndrew Rybchenko 
991333fd5d4SAndrew Rybchenko 	/* Cannot disarm, just disable rearm */
992333fd5d4SAndrew Rybchenko 	rxq->flags &= ~SFC_EF100_RXQ_FLAG_INTR_EN;
993333fd5d4SAndrew Rybchenko 	return 0;
994333fd5d4SAndrew Rybchenko }
995333fd5d4SAndrew Rybchenko 
996a9a238e9SIgor Romanov static sfc_dp_rx_get_pushed_t sfc_ef100_rx_get_pushed;
997a9a238e9SIgor Romanov static unsigned int
sfc_ef100_rx_get_pushed(struct sfc_dp_rxq * dp_rxq)998a9a238e9SIgor Romanov sfc_ef100_rx_get_pushed(struct sfc_dp_rxq *dp_rxq)
999a9a238e9SIgor Romanov {
1000a9a238e9SIgor Romanov 	struct sfc_ef100_rxq *rxq = sfc_ef100_rxq_by_dp_rxq(dp_rxq);
1001a9a238e9SIgor Romanov 
1002a9a238e9SIgor Romanov 	/*
1003a9a238e9SIgor Romanov 	 * The datapath keeps track only of added descriptors, since
1004a9a238e9SIgor Romanov 	 * the number of pushed descriptors always equals the number
1005a9a238e9SIgor Romanov 	 * of added descriptors due to enforced alignment.
1006a9a238e9SIgor Romanov 	 */
1007a9a238e9SIgor Romanov 	return rxq->added;
1008a9a238e9SIgor Romanov }
1009a9a238e9SIgor Romanov 
1010554644e3SAndrew Rybchenko struct sfc_dp_rx sfc_ef100_rx = {
1011554644e3SAndrew Rybchenko 	.dp = {
1012554644e3SAndrew Rybchenko 		.name		= SFC_KVARG_DATAPATH_EF100,
1013554644e3SAndrew Rybchenko 		.type		= SFC_DP_RX,
1014554644e3SAndrew Rybchenko 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_EF100,
1015554644e3SAndrew Rybchenko 	},
1016333fd5d4SAndrew Rybchenko 	.features		= SFC_DP_RX_FEAT_MULTI_PROCESS |
1017bf38764aSIvan Malov 				  SFC_DP_RX_FEAT_FLOW_FLAG |
1018a9cc128cSIvan Malov 				  SFC_DP_RX_FEAT_FLOW_MARK |
1019395ffcb4SIvan Ilchenko 				  SFC_DP_RX_FEAT_INTR |
1020395ffcb4SIvan Ilchenko 				  SFC_DP_RX_FEAT_STATS,
102162082124SArtemii Morozov 	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_KEEP_CRC |
102262082124SArtemii Morozov 				  RTE_ETH_RX_OFFLOAD_VLAN_STRIP,
1023295968d1SFerruh Yigit 	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
1024295968d1SFerruh Yigit 				  RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM |
1025295968d1SFerruh Yigit 				  RTE_ETH_RX_OFFLOAD_OUTER_UDP_CKSUM |
1026295968d1SFerruh Yigit 				  RTE_ETH_RX_OFFLOAD_SCATTER |
1027295968d1SFerruh Yigit 				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
1028554644e3SAndrew Rybchenko 	.get_dev_info		= sfc_ef100_rx_get_dev_info,
1029554644e3SAndrew Rybchenko 	.qsize_up_rings		= sfc_ef100_rx_qsize_up_rings,
1030554644e3SAndrew Rybchenko 	.qcreate		= sfc_ef100_rx_qcreate,
1031554644e3SAndrew Rybchenko 	.qdestroy		= sfc_ef100_rx_qdestroy,
1032554644e3SAndrew Rybchenko 	.qstart			= sfc_ef100_rx_qstart,
1033554644e3SAndrew Rybchenko 	.qstop			= sfc_ef100_rx_qstop,
1034554644e3SAndrew Rybchenko 	.qrx_ev			= sfc_ef100_rx_qrx_ev,
1035554644e3SAndrew Rybchenko 	.qpurge			= sfc_ef100_rx_qpurge,
1036554644e3SAndrew Rybchenko 	.supported_ptypes_get	= sfc_ef100_supported_ptypes_get,
1037554644e3SAndrew Rybchenko 	.qdesc_npending		= sfc_ef100_rx_qdesc_npending,
1038554644e3SAndrew Rybchenko 	.qdesc_status		= sfc_ef100_rx_qdesc_status,
1039333fd5d4SAndrew Rybchenko 	.intr_enable		= sfc_ef100_rx_intr_enable,
1040333fd5d4SAndrew Rybchenko 	.intr_disable		= sfc_ef100_rx_intr_disable,
1041a9a238e9SIgor Romanov 	.get_pushed		= sfc_ef100_rx_get_pushed,
1042554644e3SAndrew Rybchenko 	.pkt_burst		= sfc_ef100_recv_pkts,
1043554644e3SAndrew Rybchenko };
1044