xref: /dpdk/drivers/net/sfc/sfc_rx.c (revision ba6a168a06581b5b3d523f984722a3e5f65bbb82)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2244cfa79SAndrew Rybchenko  *
398d26ef7SAndrew Rybchenko  * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5a8e64c6bSAndrew Rybchenko  *
6a8e64c6bSAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
7a8e64c6bSAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
8a8e64c6bSAndrew Rybchenko  */
9a8e64c6bSAndrew Rybchenko 
1028944ac0SAndrew Rybchenko #include <rte_mempool.h>
1128944ac0SAndrew Rybchenko 
12a8e64c6bSAndrew Rybchenko #include "efx.h"
13a8e64c6bSAndrew Rybchenko 
14a8e64c6bSAndrew Rybchenko #include "sfc.h"
15921f6cf1SAndrew Rybchenko #include "sfc_debug.h"
1653a80512SIvan Malov #include "sfc_flow_tunnel.h"
17a8e64c6bSAndrew Rybchenko #include "sfc_log.h"
18ce35b05cSAndrew Rybchenko #include "sfc_ev.h"
19a8e64c6bSAndrew Rybchenko #include "sfc_rx.h"
20983ce116SIgor Romanov #include "sfc_mae_counter.h"
21df1bfde4SAndrew Rybchenko #include "sfc_kvargs.h"
2228944ac0SAndrew Rybchenko #include "sfc_tweak.h"
2328944ac0SAndrew Rybchenko 
2428944ac0SAndrew Rybchenko /*
2528944ac0SAndrew Rybchenko  * Maximum number of Rx queue flush attempt in the case of failure or
2628944ac0SAndrew Rybchenko  * flush timeout
2728944ac0SAndrew Rybchenko  */
2828944ac0SAndrew Rybchenko #define SFC_RX_QFLUSH_ATTEMPTS		(3)
2928944ac0SAndrew Rybchenko 
3028944ac0SAndrew Rybchenko /*
3128944ac0SAndrew Rybchenko  * Time to wait between event queue polling attempts when waiting for Rx
3228944ac0SAndrew Rybchenko  * queue flush done or failed events.
3328944ac0SAndrew Rybchenko  */
3428944ac0SAndrew Rybchenko #define SFC_RX_QFLUSH_POLL_WAIT_MS	(1)
3528944ac0SAndrew Rybchenko 
3628944ac0SAndrew Rybchenko /*
3728944ac0SAndrew Rybchenko  * Maximum number of event queue polling attempts when waiting for Rx queue
3828944ac0SAndrew Rybchenko  * flush done or failed events. It defines Rx queue flush attempt timeout
3928944ac0SAndrew Rybchenko  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
4028944ac0SAndrew Rybchenko  */
4128944ac0SAndrew Rybchenko #define SFC_RX_QFLUSH_POLL_ATTEMPTS	(2000)
4228944ac0SAndrew Rybchenko 
4328944ac0SAndrew Rybchenko void
sfc_rx_qflush_done(struct sfc_rxq_info * rxq_info)442e42d78dSAndrew Rybchenko sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info)
4528944ac0SAndrew Rybchenko {
462e42d78dSAndrew Rybchenko 	rxq_info->state |= SFC_RXQ_FLUSHED;
472e42d78dSAndrew Rybchenko 	rxq_info->state &= ~SFC_RXQ_FLUSHING;
4828944ac0SAndrew Rybchenko }
4928944ac0SAndrew Rybchenko 
5028944ac0SAndrew Rybchenko void
sfc_rx_qflush_failed(struct sfc_rxq_info * rxq_info)512e42d78dSAndrew Rybchenko sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info)
5228944ac0SAndrew Rybchenko {
532e42d78dSAndrew Rybchenko 	rxq_info->state |= SFC_RXQ_FLUSH_FAILED;
542e42d78dSAndrew Rybchenko 	rxq_info->state &= ~SFC_RXQ_FLUSHING;
5528944ac0SAndrew Rybchenko }
5628944ac0SAndrew Rybchenko 
57a9a238e9SIgor Romanov /* This returns the running counter, which is not bounded by ring size */
58a9a238e9SIgor Romanov unsigned int
sfc_rx_get_pushed(struct sfc_adapter * sa,struct sfc_dp_rxq * dp_rxq)59a9a238e9SIgor Romanov sfc_rx_get_pushed(struct sfc_adapter *sa, struct sfc_dp_rxq *dp_rxq)
60a9a238e9SIgor Romanov {
61a9a238e9SIgor Romanov 	SFC_ASSERT(sa->priv.dp_rx->get_pushed != NULL);
62a9a238e9SIgor Romanov 
63a9a238e9SIgor Romanov 	return sa->priv.dp_rx->get_pushed(dp_rxq);
64a9a238e9SIgor Romanov }
65a9a238e9SIgor Romanov 
664279b54eSGeorgiy Levashov static int
sfc_efx_rx_qprime(struct sfc_efx_rxq * rxq)674279b54eSGeorgiy Levashov sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
684279b54eSGeorgiy Levashov {
694279b54eSGeorgiy Levashov 	int rc = 0;
704279b54eSGeorgiy Levashov 
714279b54eSGeorgiy Levashov 	if (rxq->evq->read_ptr_primed != rxq->evq->read_ptr) {
724279b54eSGeorgiy Levashov 		rc = efx_ev_qprime(rxq->evq->common, rxq->evq->read_ptr);
734279b54eSGeorgiy Levashov 		if (rc == 0)
744279b54eSGeorgiy Levashov 			rxq->evq->read_ptr_primed = rxq->evq->read_ptr;
754279b54eSGeorgiy Levashov 	}
764279b54eSGeorgiy Levashov 	return rc;
774279b54eSGeorgiy Levashov }
784279b54eSGeorgiy Levashov 
7928944ac0SAndrew Rybchenko static void
sfc_efx_rx_qrefill(struct sfc_efx_rxq * rxq)80df1bfde4SAndrew Rybchenko sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
8128944ac0SAndrew Rybchenko {
8228944ac0SAndrew Rybchenko 	unsigned int free_space;
8328944ac0SAndrew Rybchenko 	unsigned int bulks;
8428944ac0SAndrew Rybchenko 	void *objs[SFC_RX_REFILL_BULK];
8528944ac0SAndrew Rybchenko 	efsys_dma_addr_t addr[RTE_DIM(objs)];
8628944ac0SAndrew Rybchenko 	unsigned int added = rxq->added;
8728944ac0SAndrew Rybchenko 	unsigned int id;
8828944ac0SAndrew Rybchenko 	unsigned int i;
89df1bfde4SAndrew Rybchenko 	struct sfc_efx_rx_sw_desc *rxd;
9028944ac0SAndrew Rybchenko 	struct rte_mbuf *m;
91df1bfde4SAndrew Rybchenko 	uint16_t port_id = rxq->dp.dpq.port_id;
9228944ac0SAndrew Rybchenko 
93e5595ee2SAndrew Rybchenko 	free_space = rxq->max_fill_level - (added - rxq->completed);
949e612223SAndrew Rybchenko 
959e612223SAndrew Rybchenko 	if (free_space < rxq->refill_threshold)
969e612223SAndrew Rybchenko 		return;
979e612223SAndrew Rybchenko 
9828944ac0SAndrew Rybchenko 	bulks = free_space / RTE_DIM(objs);
99f1d2d936SAndrew Rybchenko 	/* refill_threshold guarantees that bulks is positive */
100f1d2d936SAndrew Rybchenko 	SFC_ASSERT(bulks > 0);
10128944ac0SAndrew Rybchenko 
10228944ac0SAndrew Rybchenko 	id = added & rxq->ptr_mask;
103f1d2d936SAndrew Rybchenko 	do {
104f1d2d936SAndrew Rybchenko 		if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
105f1d2d936SAndrew Rybchenko 						  RTE_DIM(objs)) < 0)) {
10628944ac0SAndrew Rybchenko 			/*
10728944ac0SAndrew Rybchenko 			 * It is hardly a safe way to increment counter
10828944ac0SAndrew Rybchenko 			 * from different contexts, but all PMDs do it.
10928944ac0SAndrew Rybchenko 			 */
11028944ac0SAndrew Rybchenko 			rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
11128944ac0SAndrew Rybchenko 				RTE_DIM(objs);
112f1d2d936SAndrew Rybchenko 			/* Return if we have posted nothing yet */
113f1d2d936SAndrew Rybchenko 			if (added == rxq->added)
114f1d2d936SAndrew Rybchenko 				return;
115f1d2d936SAndrew Rybchenko 			/* Push posted */
11628944ac0SAndrew Rybchenko 			break;
11728944ac0SAndrew Rybchenko 		}
11828944ac0SAndrew Rybchenko 
11928944ac0SAndrew Rybchenko 		for (i = 0; i < RTE_DIM(objs);
12028944ac0SAndrew Rybchenko 		     ++i, id = (id + 1) & rxq->ptr_mask) {
12128944ac0SAndrew Rybchenko 			m = objs[i];
12228944ac0SAndrew Rybchenko 
1233a35c1c0SMorten Brørup 			__rte_mbuf_raw_sanity_check(m);
124f3a5fa85SAndrew Rybchenko 
12528944ac0SAndrew Rybchenko 			rxd = &rxq->sw_desc[id];
12628944ac0SAndrew Rybchenko 			rxd->mbuf = m;
12728944ac0SAndrew Rybchenko 
12828944ac0SAndrew Rybchenko 			m->data_off = RTE_PKTMBUF_HEADROOM;
12928944ac0SAndrew Rybchenko 			m->port = port_id;
13028944ac0SAndrew Rybchenko 
131bfa9a8a4SThomas Monjalon 			addr[i] = rte_pktmbuf_iova(m);
13228944ac0SAndrew Rybchenko 		}
13328944ac0SAndrew Rybchenko 
13428944ac0SAndrew Rybchenko 		efx_rx_qpost(rxq->common, addr, rxq->buf_size,
13528944ac0SAndrew Rybchenko 			     RTE_DIM(objs), rxq->completed, added);
13628944ac0SAndrew Rybchenko 		added += RTE_DIM(objs);
137f1d2d936SAndrew Rybchenko 	} while (--bulks > 0);
13828944ac0SAndrew Rybchenko 
139f1d2d936SAndrew Rybchenko 	SFC_ASSERT(added != rxq->added);
14028944ac0SAndrew Rybchenko 	rxq->added = added;
14128944ac0SAndrew Rybchenko 	efx_rx_qpush(rxq->common, added, &rxq->pushed);
14250448dd3SAndrew Rybchenko 	rxq->dp.dpq.dbells++;
14328944ac0SAndrew Rybchenko }
14428944ac0SAndrew Rybchenko 
14516e42e91SAndrew Rybchenko static uint64_t
sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)146df1bfde4SAndrew Rybchenko sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
14716e42e91SAndrew Rybchenko {
14816e42e91SAndrew Rybchenko 	uint64_t mbuf_flags = 0;
14916e42e91SAndrew Rybchenko 
15016e42e91SAndrew Rybchenko 	switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
15116e42e91SAndrew Rybchenko 	case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
152daa02b5cSOlivier Matz 		mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
15316e42e91SAndrew Rybchenko 		break;
15416e42e91SAndrew Rybchenko 	case EFX_PKT_IPV4:
155daa02b5cSOlivier Matz 		mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
15616e42e91SAndrew Rybchenko 		break;
15716e42e91SAndrew Rybchenko 	default:
158daa02b5cSOlivier Matz 		RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN != 0);
159daa02b5cSOlivier Matz 		SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) ==
160daa02b5cSOlivier Matz 			   RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN);
16116e42e91SAndrew Rybchenko 		break;
16216e42e91SAndrew Rybchenko 	}
16316e42e91SAndrew Rybchenko 
16416e42e91SAndrew Rybchenko 	switch ((desc_flags &
16516e42e91SAndrew Rybchenko 		 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
16616e42e91SAndrew Rybchenko 	case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
16716e42e91SAndrew Rybchenko 	case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
168daa02b5cSOlivier Matz 		mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
16916e42e91SAndrew Rybchenko 		break;
17016e42e91SAndrew Rybchenko 	case EFX_PKT_TCP:
17116e42e91SAndrew Rybchenko 	case EFX_PKT_UDP:
172daa02b5cSOlivier Matz 		mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
17316e42e91SAndrew Rybchenko 		break;
17416e42e91SAndrew Rybchenko 	default:
175daa02b5cSOlivier Matz 		RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN != 0);
176daa02b5cSOlivier Matz 		SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) ==
177daa02b5cSOlivier Matz 			   RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN);
17816e42e91SAndrew Rybchenko 		break;
17916e42e91SAndrew Rybchenko 	}
18016e42e91SAndrew Rybchenko 
18116e42e91SAndrew Rybchenko 	return mbuf_flags;
18216e42e91SAndrew Rybchenko }
18316e42e91SAndrew Rybchenko 
18456349dc9SAndrew Rybchenko static uint32_t
sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)185df1bfde4SAndrew Rybchenko sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
18656349dc9SAndrew Rybchenko {
18756349dc9SAndrew Rybchenko 	return RTE_PTYPE_L2_ETHER |
18856349dc9SAndrew Rybchenko 		((desc_flags & EFX_PKT_IPV4) ?
18956349dc9SAndrew Rybchenko 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
19056349dc9SAndrew Rybchenko 		((desc_flags & EFX_PKT_IPV6) ?
19156349dc9SAndrew Rybchenko 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
19256349dc9SAndrew Rybchenko 		((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
19356349dc9SAndrew Rybchenko 		((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
19456349dc9SAndrew Rybchenko }
19556349dc9SAndrew Rybchenko 
196df1bfde4SAndrew Rybchenko static const uint32_t *
sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps,size_t * no_of_elements)197*ba6a168aSSivaramakrishnan Venkat sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps,
198*ba6a168aSSivaramakrishnan Venkat 			     size_t *no_of_elements)
199df1bfde4SAndrew Rybchenko {
200df1bfde4SAndrew Rybchenko 	static const uint32_t ptypes[] = {
201df1bfde4SAndrew Rybchenko 		RTE_PTYPE_L2_ETHER,
202df1bfde4SAndrew Rybchenko 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
203df1bfde4SAndrew Rybchenko 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
204df1bfde4SAndrew Rybchenko 		RTE_PTYPE_L4_TCP,
205df1bfde4SAndrew Rybchenko 		RTE_PTYPE_L4_UDP,
206df1bfde4SAndrew Rybchenko 	};
207df1bfde4SAndrew Rybchenko 
208*ba6a168aSSivaramakrishnan Venkat 	*no_of_elements = RTE_DIM(ptypes);
209df1bfde4SAndrew Rybchenko 	return ptypes;
210df1bfde4SAndrew Rybchenko }
211df1bfde4SAndrew Rybchenko 
212d9ff551fSIvan Malov static void
sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq * rxq,unsigned int flags,struct rte_mbuf * m)213df1bfde4SAndrew Rybchenko sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
214df1bfde4SAndrew Rybchenko 			struct rte_mbuf *m)
215d9ff551fSIvan Malov {
216d9ff551fSIvan Malov 	uint8_t *mbuf_data;
217d9ff551fSIvan Malov 
218d9ff551fSIvan Malov 
219df1bfde4SAndrew Rybchenko 	if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
220d9ff551fSIvan Malov 		return;
221d9ff551fSIvan Malov 
222d9ff551fSIvan Malov 	mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
223d9ff551fSIvan Malov 
224d9ff551fSIvan Malov 	if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
225d9ff551fSIvan Malov 		m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
226d9ff551fSIvan Malov 						      EFX_RX_HASHALG_TOEPLITZ,
227d9ff551fSIvan Malov 						      mbuf_data);
228d9ff551fSIvan Malov 
229daa02b5cSOlivier Matz 		m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
230d9ff551fSIvan Malov 	}
231d9ff551fSIvan Malov }
232d9ff551fSIvan Malov 
233df1bfde4SAndrew Rybchenko static uint16_t
sfc_efx_recv_pkts(void * rx_queue,struct rte_mbuf ** rx_pkts,uint16_t nb_pkts)234df1bfde4SAndrew Rybchenko sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
235921f6cf1SAndrew Rybchenko {
236df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq = rx_queue;
237df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
238921f6cf1SAndrew Rybchenko 	unsigned int completed;
239921f6cf1SAndrew Rybchenko 	unsigned int prefix_size = rxq->prefix_size;
240921f6cf1SAndrew Rybchenko 	unsigned int done_pkts = 0;
24109a09b6fSAndrew Rybchenko 	boolean_t discard_next = B_FALSE;
242e0b06394SAndrew Rybchenko 	struct rte_mbuf *scatter_pkt = NULL;
243921f6cf1SAndrew Rybchenko 
244df1bfde4SAndrew Rybchenko 	if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
245921f6cf1SAndrew Rybchenko 		return 0;
246921f6cf1SAndrew Rybchenko 
247921f6cf1SAndrew Rybchenko 	sfc_ev_qpoll(rxq->evq);
248921f6cf1SAndrew Rybchenko 
249921f6cf1SAndrew Rybchenko 	completed = rxq->completed;
250921f6cf1SAndrew Rybchenko 	while (completed != rxq->pending && done_pkts < nb_pkts) {
251921f6cf1SAndrew Rybchenko 		unsigned int id;
252df1bfde4SAndrew Rybchenko 		struct sfc_efx_rx_sw_desc *rxd;
253921f6cf1SAndrew Rybchenko 		struct rte_mbuf *m;
254921f6cf1SAndrew Rybchenko 		unsigned int seg_len;
255921f6cf1SAndrew Rybchenko 		unsigned int desc_flags;
256921f6cf1SAndrew Rybchenko 
257921f6cf1SAndrew Rybchenko 		id = completed++ & rxq->ptr_mask;
258921f6cf1SAndrew Rybchenko 		rxd = &rxq->sw_desc[id];
259921f6cf1SAndrew Rybchenko 		m = rxd->mbuf;
260921f6cf1SAndrew Rybchenko 		desc_flags = rxd->flags;
261921f6cf1SAndrew Rybchenko 
26209a09b6fSAndrew Rybchenko 		if (discard_next)
26309a09b6fSAndrew Rybchenko 			goto discard;
26409a09b6fSAndrew Rybchenko 
265921f6cf1SAndrew Rybchenko 		if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
266921f6cf1SAndrew Rybchenko 			goto discard;
267921f6cf1SAndrew Rybchenko 
268921f6cf1SAndrew Rybchenko 		if (desc_flags & EFX_PKT_PREFIX_LEN) {
269921f6cf1SAndrew Rybchenko 			uint16_t tmp_size;
270921f6cf1SAndrew Rybchenko 			int rc __rte_unused;
271921f6cf1SAndrew Rybchenko 
272921f6cf1SAndrew Rybchenko 			rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
273921f6cf1SAndrew Rybchenko 				rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
274921f6cf1SAndrew Rybchenko 			SFC_ASSERT(rc == 0);
275921f6cf1SAndrew Rybchenko 			seg_len = tmp_size;
276921f6cf1SAndrew Rybchenko 		} else {
277921f6cf1SAndrew Rybchenko 			seg_len = rxd->size - prefix_size;
278921f6cf1SAndrew Rybchenko 		}
279921f6cf1SAndrew Rybchenko 
280921f6cf1SAndrew Rybchenko 		rte_pktmbuf_data_len(m) = seg_len;
281921f6cf1SAndrew Rybchenko 		rte_pktmbuf_pkt_len(m) = seg_len;
282921f6cf1SAndrew Rybchenko 
283e0b06394SAndrew Rybchenko 		if (scatter_pkt != NULL) {
284e0b06394SAndrew Rybchenko 			if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
2859137da46SAndrew Rybchenko 				rte_pktmbuf_free(scatter_pkt);
286e0b06394SAndrew Rybchenko 				goto discard;
287e0b06394SAndrew Rybchenko 			}
288e0b06394SAndrew Rybchenko 			/* The packet to deliver */
289e0b06394SAndrew Rybchenko 			m = scatter_pkt;
290e0b06394SAndrew Rybchenko 		}
291e0b06394SAndrew Rybchenko 
292e0b06394SAndrew Rybchenko 		if (desc_flags & EFX_PKT_CONT) {
293e0b06394SAndrew Rybchenko 			/* The packet is scattered, more fragments to come */
294e0b06394SAndrew Rybchenko 			scatter_pkt = m;
29598a7ea33SJerin Jacob 			/* Further fragments have no prefix */
296e0b06394SAndrew Rybchenko 			prefix_size = 0;
297e0b06394SAndrew Rybchenko 			continue;
298e0b06394SAndrew Rybchenko 		}
299e0b06394SAndrew Rybchenko 
300e0b06394SAndrew Rybchenko 		/* Scattered packet is done */
301e0b06394SAndrew Rybchenko 		scatter_pkt = NULL;
302e0b06394SAndrew Rybchenko 		/* The first fragment of the packet has prefix */
303e0b06394SAndrew Rybchenko 		prefix_size = rxq->prefix_size;
304e0b06394SAndrew Rybchenko 
305df1bfde4SAndrew Rybchenko 		m->ol_flags =
306df1bfde4SAndrew Rybchenko 			sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
307df1bfde4SAndrew Rybchenko 		m->packet_type =
308df1bfde4SAndrew Rybchenko 			sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
309921f6cf1SAndrew Rybchenko 
310d9ff551fSIvan Malov 		/*
311d9ff551fSIvan Malov 		 * Extract RSS hash from the packet prefix and
312d9ff551fSIvan Malov 		 * set the corresponding field (if needed and possible)
313d9ff551fSIvan Malov 		 */
314df1bfde4SAndrew Rybchenko 		sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
315d9ff551fSIvan Malov 
316d9ff551fSIvan Malov 		m->data_off += prefix_size;
317d9ff551fSIvan Malov 
318921f6cf1SAndrew Rybchenko 		*rx_pkts++ = m;
319921f6cf1SAndrew Rybchenko 		done_pkts++;
320921f6cf1SAndrew Rybchenko 		continue;
321921f6cf1SAndrew Rybchenko 
322921f6cf1SAndrew Rybchenko discard:
32309a09b6fSAndrew Rybchenko 		discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
32466e10b8dSAndrew Rybchenko 		rte_mbuf_raw_free(m);
325921f6cf1SAndrew Rybchenko 		rxd->mbuf = NULL;
326921f6cf1SAndrew Rybchenko 	}
327921f6cf1SAndrew Rybchenko 
328e0b06394SAndrew Rybchenko 	/* pending is only moved when entire packet is received */
329e0b06394SAndrew Rybchenko 	SFC_ASSERT(scatter_pkt == NULL);
330e0b06394SAndrew Rybchenko 
331921f6cf1SAndrew Rybchenko 	rxq->completed = completed;
332921f6cf1SAndrew Rybchenko 
333df1bfde4SAndrew Rybchenko 	sfc_efx_rx_qrefill(rxq);
334921f6cf1SAndrew Rybchenko 
3354279b54eSGeorgiy Levashov 	if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN)
3364279b54eSGeorgiy Levashov 		sfc_efx_rx_qprime(rxq);
3374279b54eSGeorgiy Levashov 
338921f6cf1SAndrew Rybchenko 	return done_pkts;
339921f6cf1SAndrew Rybchenko }
340921f6cf1SAndrew Rybchenko 
341df1bfde4SAndrew Rybchenko static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
342df1bfde4SAndrew Rybchenko static unsigned int
sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq * dp_rxq)343df1bfde4SAndrew Rybchenko sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
344df1bfde4SAndrew Rybchenko {
345df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
346df1bfde4SAndrew Rybchenko 
347df1bfde4SAndrew Rybchenko 	if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
348df1bfde4SAndrew Rybchenko 		return 0;
349df1bfde4SAndrew Rybchenko 
350df1bfde4SAndrew Rybchenko 	sfc_ev_qpoll(rxq->evq);
351df1bfde4SAndrew Rybchenko 
352df1bfde4SAndrew Rybchenko 	return rxq->pending - rxq->completed;
353df1bfde4SAndrew Rybchenko }
354df1bfde4SAndrew Rybchenko 
3551d8f3a80SIvan Malov static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
3561d8f3a80SIvan Malov static int
sfc_efx_rx_qdesc_status(struct sfc_dp_rxq * dp_rxq,uint16_t offset)3571d8f3a80SIvan Malov sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
3581d8f3a80SIvan Malov {
3591d8f3a80SIvan Malov 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
3601d8f3a80SIvan Malov 
3611d8f3a80SIvan Malov 	if (unlikely(offset > rxq->ptr_mask))
3621d8f3a80SIvan Malov 		return -EINVAL;
3631d8f3a80SIvan Malov 
3641d8f3a80SIvan Malov 	/*
3651d8f3a80SIvan Malov 	 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
3661d8f3a80SIvan Malov 	 * it is required for the queue to be running, but the
3671d8f3a80SIvan Malov 	 * check is omitted because API design assumes that it
3681d8f3a80SIvan Malov 	 * is the duty of the caller to satisfy all conditions
3691d8f3a80SIvan Malov 	 */
3701d8f3a80SIvan Malov 	SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
3711d8f3a80SIvan Malov 		   SFC_EFX_RXQ_FLAG_RUNNING);
3721d8f3a80SIvan Malov 	sfc_ev_qpoll(rxq->evq);
3731d8f3a80SIvan Malov 
3741d8f3a80SIvan Malov 	/*
3751d8f3a80SIvan Malov 	 * There is a handful of reserved entries in the ring,
3761d8f3a80SIvan Malov 	 * but an explicit check whether the offset points to
3771d8f3a80SIvan Malov 	 * a reserved entry is neglected since the two checks
3781d8f3a80SIvan Malov 	 * below rely on the figures which take the HW limits
3791d8f3a80SIvan Malov 	 * into account and thus if an entry is reserved, the
3801d8f3a80SIvan Malov 	 * checks will fail and UNAVAIL code will be returned
3811d8f3a80SIvan Malov 	 */
3821d8f3a80SIvan Malov 
3831d8f3a80SIvan Malov 	if (offset < (rxq->pending - rxq->completed))
3841d8f3a80SIvan Malov 		return RTE_ETH_RX_DESC_DONE;
3851d8f3a80SIvan Malov 
3861d8f3a80SIvan Malov 	if (offset < (rxq->added - rxq->completed))
3871d8f3a80SIvan Malov 		return RTE_ETH_RX_DESC_AVAIL;
3881d8f3a80SIvan Malov 
3891d8f3a80SIvan Malov 	return RTE_ETH_RX_DESC_UNAVAIL;
3901d8f3a80SIvan Malov }
3911d8f3a80SIvan Malov 
3926c0cc77aSIgor Romanov boolean_t
sfc_rx_check_scatter(size_t pdu,size_t rx_buf_size,uint32_t rx_prefix_size,boolean_t rx_scatter_enabled,uint32_t rx_scatter_max,const char ** error)3936c0cc77aSIgor Romanov sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size,
394d41a6268SIgor Romanov 		     boolean_t rx_scatter_enabled, uint32_t rx_scatter_max,
395d41a6268SIgor Romanov 		     const char **error)
3966c0cc77aSIgor Romanov {
397d41a6268SIgor Romanov 	uint32_t effective_rx_scatter_max;
398d41a6268SIgor Romanov 	uint32_t rx_scatter_bufs;
399d41a6268SIgor Romanov 
400d41a6268SIgor Romanov 	effective_rx_scatter_max = rx_scatter_enabled ? rx_scatter_max : 1;
401d41a6268SIgor Romanov 	rx_scatter_bufs = EFX_DIV_ROUND_UP(pdu + rx_prefix_size, rx_buf_size);
402d41a6268SIgor Romanov 
403d41a6268SIgor Romanov 	if (rx_scatter_bufs > effective_rx_scatter_max) {
404d41a6268SIgor Romanov 		if (rx_scatter_enabled)
405d41a6268SIgor Romanov 			*error = "Possible number of Rx scatter buffers exceeds maximum number";
406d41a6268SIgor Romanov 		else
4076c0cc77aSIgor Romanov 			*error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
4086c0cc77aSIgor Romanov 		return B_FALSE;
4096c0cc77aSIgor Romanov 	}
4106c0cc77aSIgor Romanov 
4116c0cc77aSIgor Romanov 	return B_TRUE;
4126c0cc77aSIgor Romanov }
4136c0cc77aSIgor Romanov 
414b76e1b2cSAndrew Rybchenko /** Get Rx datapath ops by the datapath RxQ handle */
415b76e1b2cSAndrew Rybchenko const struct sfc_dp_rx *
sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq * dp_rxq)416b76e1b2cSAndrew Rybchenko sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
417b76e1b2cSAndrew Rybchenko {
418b76e1b2cSAndrew Rybchenko 	const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
419b76e1b2cSAndrew Rybchenko 	struct rte_eth_dev *eth_dev;
420b76e1b2cSAndrew Rybchenko 	struct sfc_adapter_priv *sap;
421b76e1b2cSAndrew Rybchenko 
422b76e1b2cSAndrew Rybchenko 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
423b76e1b2cSAndrew Rybchenko 	eth_dev = &rte_eth_devices[dpq->port_id];
424b76e1b2cSAndrew Rybchenko 
425b76e1b2cSAndrew Rybchenko 	sap = sfc_adapter_priv_by_eth_dev(eth_dev);
426b76e1b2cSAndrew Rybchenko 
427b76e1b2cSAndrew Rybchenko 	return sap->dp_rx;
428b76e1b2cSAndrew Rybchenko }
429b76e1b2cSAndrew Rybchenko 
4302e42d78dSAndrew Rybchenko struct sfc_rxq_info *
sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq * dp_rxq)4312e42d78dSAndrew Rybchenko sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
432df1bfde4SAndrew Rybchenko {
433df1bfde4SAndrew Rybchenko 	const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
434df1bfde4SAndrew Rybchenko 	struct rte_eth_dev *eth_dev;
435dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared *sas;
436df1bfde4SAndrew Rybchenko 
437df1bfde4SAndrew Rybchenko 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
438df1bfde4SAndrew Rybchenko 	eth_dev = &rte_eth_devices[dpq->port_id];
439df1bfde4SAndrew Rybchenko 
440dda791c2SAndrew Rybchenko 	sas = sfc_adapter_shared_by_eth_dev(eth_dev);
441df1bfde4SAndrew Rybchenko 
442dda791c2SAndrew Rybchenko 	SFC_ASSERT(dpq->queue_id < sas->rxq_count);
443dda791c2SAndrew Rybchenko 	return &sas->rxq_info[dpq->queue_id];
4442e42d78dSAndrew Rybchenko }
445df1bfde4SAndrew Rybchenko 
4462e42d78dSAndrew Rybchenko struct sfc_rxq *
sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq * dp_rxq)4472e42d78dSAndrew Rybchenko sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
4482e42d78dSAndrew Rybchenko {
4494e8938ddSAndrew Rybchenko 	const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
4504e8938ddSAndrew Rybchenko 	struct rte_eth_dev *eth_dev;
4514e8938ddSAndrew Rybchenko 	struct sfc_adapter *sa;
4522e42d78dSAndrew Rybchenko 
4534e8938ddSAndrew Rybchenko 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
4544e8938ddSAndrew Rybchenko 	eth_dev = &rte_eth_devices[dpq->port_id];
4552e42d78dSAndrew Rybchenko 
4565313b441SAndrew Rybchenko 	sa = sfc_adapter_by_eth_dev(eth_dev);
4574e8938ddSAndrew Rybchenko 
458dda791c2SAndrew Rybchenko 	SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count);
4594e8938ddSAndrew Rybchenko 	return &sa->rxq_ctrl[dpq->queue_id];
460df1bfde4SAndrew Rybchenko }
461df1bfde4SAndrew Rybchenko 
462f7da270aSAndrew Rybchenko static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
463f7da270aSAndrew Rybchenko static int
sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,__rte_unused struct sfc_dp_rx_hw_limits * limits,__rte_unused struct rte_mempool * mb_pool,unsigned int * rxq_entries,unsigned int * evq_entries,unsigned int * rxq_max_fill_level)464f7da270aSAndrew Rybchenko sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
465048a0d1aSIgor Romanov 			  __rte_unused struct sfc_dp_rx_hw_limits *limits,
466d101da1bSAndrew Rybchenko 			  __rte_unused struct rte_mempool *mb_pool,
467f7da270aSAndrew Rybchenko 			  unsigned int *rxq_entries,
468f7da270aSAndrew Rybchenko 			  unsigned int *evq_entries,
469f7da270aSAndrew Rybchenko 			  unsigned int *rxq_max_fill_level)
470f7da270aSAndrew Rybchenko {
471f7da270aSAndrew Rybchenko 	*rxq_entries = nb_rx_desc;
472f7da270aSAndrew Rybchenko 	*evq_entries = nb_rx_desc;
473f7da270aSAndrew Rybchenko 	*rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
474f7da270aSAndrew Rybchenko 	return 0;
475f7da270aSAndrew Rybchenko }
476f7da270aSAndrew Rybchenko 
477df1bfde4SAndrew Rybchenko static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
478df1bfde4SAndrew Rybchenko static int
sfc_efx_rx_qcreate(uint16_t port_id,uint16_t queue_id,const struct rte_pci_addr * pci_addr,int socket_id,const struct sfc_dp_rx_qcreate_info * info,struct sfc_dp_rxq ** dp_rxqp)479df1bfde4SAndrew Rybchenko sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
480df1bfde4SAndrew Rybchenko 		   const struct rte_pci_addr *pci_addr, int socket_id,
481df1bfde4SAndrew Rybchenko 		   const struct sfc_dp_rx_qcreate_info *info,
482df1bfde4SAndrew Rybchenko 		   struct sfc_dp_rxq **dp_rxqp)
483df1bfde4SAndrew Rybchenko {
484df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq;
485df1bfde4SAndrew Rybchenko 	int rc;
486df1bfde4SAndrew Rybchenko 
4873037e6cfSViacheslav Galaktionov 	rc = ENOTSUP;
4883037e6cfSViacheslav Galaktionov 	if (info->nic_dma_info->nb_regions > 0)
4893037e6cfSViacheslav Galaktionov 		goto fail_nic_dma;
4903037e6cfSViacheslav Galaktionov 
491df1bfde4SAndrew Rybchenko 	rc = ENOMEM;
492df1bfde4SAndrew Rybchenko 	rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
493df1bfde4SAndrew Rybchenko 				 RTE_CACHE_LINE_SIZE, socket_id);
494df1bfde4SAndrew Rybchenko 	if (rxq == NULL)
495df1bfde4SAndrew Rybchenko 		goto fail_rxq_alloc;
496df1bfde4SAndrew Rybchenko 
497df1bfde4SAndrew Rybchenko 	sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
498df1bfde4SAndrew Rybchenko 
499df1bfde4SAndrew Rybchenko 	rc = ENOMEM;
500df1bfde4SAndrew Rybchenko 	rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
501df1bfde4SAndrew Rybchenko 					 info->rxq_entries,
502df1bfde4SAndrew Rybchenko 					 sizeof(*rxq->sw_desc),
503df1bfde4SAndrew Rybchenko 					 RTE_CACHE_LINE_SIZE, socket_id);
504df1bfde4SAndrew Rybchenko 	if (rxq->sw_desc == NULL)
505df1bfde4SAndrew Rybchenko 		goto fail_desc_alloc;
506df1bfde4SAndrew Rybchenko 
507df1bfde4SAndrew Rybchenko 	/* efx datapath is bound to efx control path */
508df1bfde4SAndrew Rybchenko 	rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
509df1bfde4SAndrew Rybchenko 	if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
510df1bfde4SAndrew Rybchenko 		rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
511df1bfde4SAndrew Rybchenko 	rxq->ptr_mask = info->rxq_entries - 1;
512df1bfde4SAndrew Rybchenko 	rxq->batch_max = info->batch_max;
513df1bfde4SAndrew Rybchenko 	rxq->prefix_size = info->prefix_size;
514e5595ee2SAndrew Rybchenko 	rxq->max_fill_level = info->max_fill_level;
515df1bfde4SAndrew Rybchenko 	rxq->refill_threshold = info->refill_threshold;
516df1bfde4SAndrew Rybchenko 	rxq->buf_size = info->buf_size;
517df1bfde4SAndrew Rybchenko 	rxq->refill_mb_pool = info->refill_mb_pool;
518df1bfde4SAndrew Rybchenko 
519df1bfde4SAndrew Rybchenko 	*dp_rxqp = &rxq->dp;
520df1bfde4SAndrew Rybchenko 	return 0;
521df1bfde4SAndrew Rybchenko 
522df1bfde4SAndrew Rybchenko fail_desc_alloc:
523df1bfde4SAndrew Rybchenko 	rte_free(rxq);
524df1bfde4SAndrew Rybchenko 
525df1bfde4SAndrew Rybchenko fail_rxq_alloc:
5263037e6cfSViacheslav Galaktionov fail_nic_dma:
527df1bfde4SAndrew Rybchenko 	return rc;
528df1bfde4SAndrew Rybchenko }
529df1bfde4SAndrew Rybchenko 
530df1bfde4SAndrew Rybchenko static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
531df1bfde4SAndrew Rybchenko static void
sfc_efx_rx_qdestroy(struct sfc_dp_rxq * dp_rxq)532df1bfde4SAndrew Rybchenko sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
533df1bfde4SAndrew Rybchenko {
534df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
535df1bfde4SAndrew Rybchenko 
536df1bfde4SAndrew Rybchenko 	rte_free(rxq->sw_desc);
537df1bfde4SAndrew Rybchenko 	rte_free(rxq);
538df1bfde4SAndrew Rybchenko }
539df1bfde4SAndrew Rybchenko 
5404279b54eSGeorgiy Levashov 
5414279b54eSGeorgiy Levashov /* Use qstop and qstart functions in the case of qstart failure */
5424279b54eSGeorgiy Levashov static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
5434279b54eSGeorgiy Levashov static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
5444279b54eSGeorgiy Levashov 
5454279b54eSGeorgiy Levashov 
546df1bfde4SAndrew Rybchenko static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
547df1bfde4SAndrew Rybchenko static int
sfc_efx_rx_qstart(struct sfc_dp_rxq * dp_rxq,__rte_unused unsigned int evq_read_ptr,const efx_rx_prefix_layout_t * pinfo)548df1bfde4SAndrew Rybchenko sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
549c6845644SAndrew Rybchenko 		  __rte_unused unsigned int evq_read_ptr,
550c6845644SAndrew Rybchenko 		  const efx_rx_prefix_layout_t *pinfo)
551df1bfde4SAndrew Rybchenko {
552df1bfde4SAndrew Rybchenko 	/* libefx-based datapath is specific to libefx-based PMD */
553df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
554df1bfde4SAndrew Rybchenko 	struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
5554279b54eSGeorgiy Levashov 	int rc;
556df1bfde4SAndrew Rybchenko 
557c6845644SAndrew Rybchenko 	/*
558c6845644SAndrew Rybchenko 	 * libefx API is used to extract information from Rx prefix and
559c6845644SAndrew Rybchenko 	 * it guarantees consistency. Just do length check to ensure
560c6845644SAndrew Rybchenko 	 * that we reserved space in Rx buffers correctly.
561c6845644SAndrew Rybchenko 	 */
562c6845644SAndrew Rybchenko 	if (rxq->prefix_size != pinfo->erpl_length)
563c6845644SAndrew Rybchenko 		return ENOTSUP;
564c6845644SAndrew Rybchenko 
565df1bfde4SAndrew Rybchenko 	rxq->common = crxq->common;
566df1bfde4SAndrew Rybchenko 
567df1bfde4SAndrew Rybchenko 	rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
568df1bfde4SAndrew Rybchenko 
569df1bfde4SAndrew Rybchenko 	sfc_efx_rx_qrefill(rxq);
570df1bfde4SAndrew Rybchenko 
571df1bfde4SAndrew Rybchenko 	rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
572df1bfde4SAndrew Rybchenko 
5734279b54eSGeorgiy Levashov 	if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) {
5744279b54eSGeorgiy Levashov 		rc = sfc_efx_rx_qprime(rxq);
5754279b54eSGeorgiy Levashov 		if (rc != 0)
5764279b54eSGeorgiy Levashov 			goto fail_rx_qprime;
577df1bfde4SAndrew Rybchenko 	}
578df1bfde4SAndrew Rybchenko 
5794279b54eSGeorgiy Levashov 	return 0;
5804279b54eSGeorgiy Levashov 
5814279b54eSGeorgiy Levashov fail_rx_qprime:
5824279b54eSGeorgiy Levashov 	sfc_efx_rx_qstop(dp_rxq, NULL);
5834279b54eSGeorgiy Levashov 	sfc_efx_rx_qpurge(dp_rxq);
5844279b54eSGeorgiy Levashov 	return rc;
5854279b54eSGeorgiy Levashov }
5864279b54eSGeorgiy Levashov 
587df1bfde4SAndrew Rybchenko static void
sfc_efx_rx_qstop(struct sfc_dp_rxq * dp_rxq,__rte_unused unsigned int * evq_read_ptr)588df1bfde4SAndrew Rybchenko sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
589df1bfde4SAndrew Rybchenko 		 __rte_unused unsigned int *evq_read_ptr)
590df1bfde4SAndrew Rybchenko {
591df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
592df1bfde4SAndrew Rybchenko 
593df1bfde4SAndrew Rybchenko 	rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
594df1bfde4SAndrew Rybchenko 
595df1bfde4SAndrew Rybchenko 	/* libefx-based datapath is bound to libefx-based PMD and uses
596df1bfde4SAndrew Rybchenko 	 * event queue structure directly. So, there is no necessity to
597df1bfde4SAndrew Rybchenko 	 * return EvQ read pointer.
598df1bfde4SAndrew Rybchenko 	 */
599df1bfde4SAndrew Rybchenko }
600df1bfde4SAndrew Rybchenko 
601df1bfde4SAndrew Rybchenko static void
sfc_efx_rx_qpurge(struct sfc_dp_rxq * dp_rxq)602df1bfde4SAndrew Rybchenko sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
603df1bfde4SAndrew Rybchenko {
604df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
605df1bfde4SAndrew Rybchenko 	unsigned int i;
606df1bfde4SAndrew Rybchenko 	struct sfc_efx_rx_sw_desc *rxd;
607df1bfde4SAndrew Rybchenko 
608df1bfde4SAndrew Rybchenko 	for (i = rxq->completed; i != rxq->added; ++i) {
609df1bfde4SAndrew Rybchenko 		rxd = &rxq->sw_desc[i & rxq->ptr_mask];
61066e10b8dSAndrew Rybchenko 		rte_mbuf_raw_free(rxd->mbuf);
611df1bfde4SAndrew Rybchenko 		rxd->mbuf = NULL;
612df1bfde4SAndrew Rybchenko 		/* Packed stream relies on 0 in inactive SW desc.
613df1bfde4SAndrew Rybchenko 		 * Rx queue stop is not performance critical, so
614df1bfde4SAndrew Rybchenko 		 * there is no harm to do it always.
615df1bfde4SAndrew Rybchenko 		 */
616df1bfde4SAndrew Rybchenko 		rxd->flags = 0;
617df1bfde4SAndrew Rybchenko 		rxd->size = 0;
618df1bfde4SAndrew Rybchenko 	}
619df1bfde4SAndrew Rybchenko 
620df1bfde4SAndrew Rybchenko 	rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
621df1bfde4SAndrew Rybchenko }
622df1bfde4SAndrew Rybchenko 
6234279b54eSGeorgiy Levashov static sfc_dp_rx_intr_enable_t sfc_efx_rx_intr_enable;
6244279b54eSGeorgiy Levashov static int
sfc_efx_rx_intr_enable(struct sfc_dp_rxq * dp_rxq)6254279b54eSGeorgiy Levashov sfc_efx_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
6264279b54eSGeorgiy Levashov {
6274279b54eSGeorgiy Levashov 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
6284279b54eSGeorgiy Levashov 	int rc = 0;
6294279b54eSGeorgiy Levashov 
6304279b54eSGeorgiy Levashov 	rxq->flags |= SFC_EFX_RXQ_FLAG_INTR_EN;
6314279b54eSGeorgiy Levashov 	if (rxq->flags & SFC_EFX_RXQ_FLAG_STARTED) {
6324279b54eSGeorgiy Levashov 		rc = sfc_efx_rx_qprime(rxq);
6334279b54eSGeorgiy Levashov 		if (rc != 0)
6344279b54eSGeorgiy Levashov 			rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
6354279b54eSGeorgiy Levashov 	}
6364279b54eSGeorgiy Levashov 	return rc;
6374279b54eSGeorgiy Levashov }
6384279b54eSGeorgiy Levashov 
6394279b54eSGeorgiy Levashov static sfc_dp_rx_intr_disable_t sfc_efx_rx_intr_disable;
6404279b54eSGeorgiy Levashov static int
sfc_efx_rx_intr_disable(struct sfc_dp_rxq * dp_rxq)6414279b54eSGeorgiy Levashov sfc_efx_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
6424279b54eSGeorgiy Levashov {
6434279b54eSGeorgiy Levashov 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
6444279b54eSGeorgiy Levashov 
6454279b54eSGeorgiy Levashov 	/* Cannot disarm, just disable rearm */
6464279b54eSGeorgiy Levashov 	rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
6474279b54eSGeorgiy Levashov 	return 0;
6484279b54eSGeorgiy Levashov }
6494279b54eSGeorgiy Levashov 
650df1bfde4SAndrew Rybchenko struct sfc_dp_rx sfc_efx_rx = {
651df1bfde4SAndrew Rybchenko 	.dp = {
652df1bfde4SAndrew Rybchenko 		.name		= SFC_KVARG_DATAPATH_EFX,
653df1bfde4SAndrew Rybchenko 		.type		= SFC_DP_RX,
654849c2d91SAndrew Rybchenko 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_RX_EFX,
655df1bfde4SAndrew Rybchenko 	},
6564279b54eSGeorgiy Levashov 	.features		= SFC_DP_RX_FEAT_INTR,
657295968d1SFerruh Yigit 	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
65804a04943SDenis Pryazhennikov 				  RTE_ETH_RX_OFFLOAD_RSS_HASH |
65904a04943SDenis Pryazhennikov 				  RTE_ETH_RX_OFFLOAD_KEEP_CRC,
660295968d1SFerruh Yigit 	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
661f7da270aSAndrew Rybchenko 	.qsize_up_rings		= sfc_efx_rx_qsize_up_rings,
662df1bfde4SAndrew Rybchenko 	.qcreate		= sfc_efx_rx_qcreate,
663df1bfde4SAndrew Rybchenko 	.qdestroy		= sfc_efx_rx_qdestroy,
664df1bfde4SAndrew Rybchenko 	.qstart			= sfc_efx_rx_qstart,
665df1bfde4SAndrew Rybchenko 	.qstop			= sfc_efx_rx_qstop,
666df1bfde4SAndrew Rybchenko 	.qpurge			= sfc_efx_rx_qpurge,
667df1bfde4SAndrew Rybchenko 	.supported_ptypes_get	= sfc_efx_supported_ptypes_get,
668df1bfde4SAndrew Rybchenko 	.qdesc_npending		= sfc_efx_rx_qdesc_npending,
6691d8f3a80SIvan Malov 	.qdesc_status		= sfc_efx_rx_qdesc_status,
6704279b54eSGeorgiy Levashov 	.intr_enable		= sfc_efx_rx_intr_enable,
6714279b54eSGeorgiy Levashov 	.intr_disable		= sfc_efx_rx_intr_disable,
672df1bfde4SAndrew Rybchenko 	.pkt_burst		= sfc_efx_recv_pkts,
673df1bfde4SAndrew Rybchenko };
674df1bfde4SAndrew Rybchenko 
67528944ac0SAndrew Rybchenko static void
sfc_rx_qflush(struct sfc_adapter * sa,sfc_sw_index_t sw_index)67609cafbddSIgor Romanov sfc_rx_qflush(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
67728944ac0SAndrew Rybchenko {
67809cafbddSIgor Romanov 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
67909cafbddSIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
6802e42d78dSAndrew Rybchenko 	struct sfc_rxq_info *rxq_info;
68128944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
68228944ac0SAndrew Rybchenko 	unsigned int retry_count;
68328944ac0SAndrew Rybchenko 	unsigned int wait_count;
68469d753f4SAndrew Rybchenko 	int rc;
68528944ac0SAndrew Rybchenko 
68609cafbddSIgor Romanov 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
687dda791c2SAndrew Rybchenko 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
6882e42d78dSAndrew Rybchenko 	SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
68928944ac0SAndrew Rybchenko 
6904e8938ddSAndrew Rybchenko 	rxq = &sa->rxq_ctrl[sw_index];
6914e8938ddSAndrew Rybchenko 
69228944ac0SAndrew Rybchenko 	/*
69328944ac0SAndrew Rybchenko 	 * Retry Rx queue flushing in the case of flush failed or
69428944ac0SAndrew Rybchenko 	 * timeout. In the worst case it can delay for 6 seconds.
69528944ac0SAndrew Rybchenko 	 */
69628944ac0SAndrew Rybchenko 	for (retry_count = 0;
6972e42d78dSAndrew Rybchenko 	     ((rxq_info->state & SFC_RXQ_FLUSHED) == 0) &&
69828944ac0SAndrew Rybchenko 	     (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
69928944ac0SAndrew Rybchenko 	     ++retry_count) {
70069d753f4SAndrew Rybchenko 		rc = efx_rx_qflush(rxq->common);
70169d753f4SAndrew Rybchenko 		if (rc != 0) {
7022e42d78dSAndrew Rybchenko 			rxq_info->state |= (rc == EALREADY) ?
70369d753f4SAndrew Rybchenko 				SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
70428944ac0SAndrew Rybchenko 			break;
70528944ac0SAndrew Rybchenko 		}
7062e42d78dSAndrew Rybchenko 		rxq_info->state &= ~SFC_RXQ_FLUSH_FAILED;
7072e42d78dSAndrew Rybchenko 		rxq_info->state |= SFC_RXQ_FLUSHING;
70828944ac0SAndrew Rybchenko 
70928944ac0SAndrew Rybchenko 		/*
71028944ac0SAndrew Rybchenko 		 * Wait for Rx queue flush done or failed event at least
71128944ac0SAndrew Rybchenko 		 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
71228944ac0SAndrew Rybchenko 		 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
71328944ac0SAndrew Rybchenko 		 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
71428944ac0SAndrew Rybchenko 		 */
71528944ac0SAndrew Rybchenko 		wait_count = 0;
71628944ac0SAndrew Rybchenko 		do {
71728944ac0SAndrew Rybchenko 			rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
71828944ac0SAndrew Rybchenko 			sfc_ev_qpoll(rxq->evq);
7192e42d78dSAndrew Rybchenko 		} while ((rxq_info->state & SFC_RXQ_FLUSHING) &&
72028944ac0SAndrew Rybchenko 			 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
72128944ac0SAndrew Rybchenko 
7222e42d78dSAndrew Rybchenko 		if (rxq_info->state & SFC_RXQ_FLUSHING)
72309cafbddSIgor Romanov 			sfc_err(sa, "RxQ %d (internal %u) flush timed out",
72409cafbddSIgor Romanov 				ethdev_qid, sw_index);
72528944ac0SAndrew Rybchenko 
7262e42d78dSAndrew Rybchenko 		if (rxq_info->state & SFC_RXQ_FLUSH_FAILED)
72709cafbddSIgor Romanov 			sfc_err(sa, "RxQ %d (internal %u) flush failed",
72809cafbddSIgor Romanov 				ethdev_qid, sw_index);
72928944ac0SAndrew Rybchenko 
7302e42d78dSAndrew Rybchenko 		if (rxq_info->state & SFC_RXQ_FLUSHED)
73109cafbddSIgor Romanov 			sfc_notice(sa, "RxQ %d (internal %u) flushed",
73209cafbddSIgor Romanov 				   ethdev_qid, sw_index);
73328944ac0SAndrew Rybchenko 	}
73428944ac0SAndrew Rybchenko 
735bfea01bcSAndrew Rybchenko 	sa->priv.dp_rx->qpurge(rxq_info->dp);
73628944ac0SAndrew Rybchenko }
73728944ac0SAndrew Rybchenko 
738f5258439SIvan Malov static int
sfc_rx_default_rxq_set_filter(struct sfc_adapter * sa,struct sfc_rxq * rxq)739f5258439SIvan Malov sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
740f5258439SIvan Malov {
741e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
742d1482e21SIvan Malov 	boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
743f5258439SIvan Malov 	struct sfc_port *port = &sa->port;
744f5258439SIvan Malov 	int rc;
745f5258439SIvan Malov 
746f5258439SIvan Malov 	/*
747f5258439SIvan Malov 	 * If promiscuous or all-multicast mode has been requested, setting
748f5258439SIvan Malov 	 * filter for the default Rx queue might fail, in particular, while
749f5258439SIvan Malov 	 * running over PCI function which is not a member of corresponding
750f5258439SIvan Malov 	 * privilege groups; if this occurs, few iterations will be made to
751f5258439SIvan Malov 	 * repeat this step without promiscuous and all-multicast flags set
752f5258439SIvan Malov 	 */
753f5258439SIvan Malov retry:
754d1482e21SIvan Malov 	rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
755f5258439SIvan Malov 	if (rc == 0)
756f5258439SIvan Malov 		return 0;
757f5258439SIvan Malov 	else if (rc != EOPNOTSUPP)
758f5258439SIvan Malov 		return rc;
759f5258439SIvan Malov 
760f5258439SIvan Malov 	if (port->promisc) {
761f5258439SIvan Malov 		sfc_warn(sa, "promiscuous mode has been requested, "
762f5258439SIvan Malov 			     "but the HW rejects it");
763f5258439SIvan Malov 		sfc_warn(sa, "promiscuous mode will be disabled");
764f5258439SIvan Malov 
765f5258439SIvan Malov 		port->promisc = B_FALSE;
7669d28d6b0SAndrew Rybchenko 		sa->eth_dev->data->promiscuous = 0;
76798608e18SIgor Romanov 		rc = sfc_set_rx_mode_unchecked(sa);
768f5258439SIvan Malov 		if (rc != 0)
769f5258439SIvan Malov 			return rc;
770f5258439SIvan Malov 
771f5258439SIvan Malov 		goto retry;
772f5258439SIvan Malov 	}
773f5258439SIvan Malov 
774f5258439SIvan Malov 	if (port->allmulti) {
775f5258439SIvan Malov 		sfc_warn(sa, "all-multicast mode has been requested, "
776f5258439SIvan Malov 			     "but the HW rejects it");
777f5258439SIvan Malov 		sfc_warn(sa, "all-multicast mode will be disabled");
778f5258439SIvan Malov 
779f5258439SIvan Malov 		port->allmulti = B_FALSE;
7809d28d6b0SAndrew Rybchenko 		sa->eth_dev->data->all_multicast = 0;
78198608e18SIgor Romanov 		rc = sfc_set_rx_mode_unchecked(sa);
782f5258439SIvan Malov 		if (rc != 0)
783f5258439SIvan Malov 			return rc;
784f5258439SIvan Malov 
785f5258439SIvan Malov 		goto retry;
786f5258439SIvan Malov 	}
787f5258439SIvan Malov 
788f5258439SIvan Malov 	return rc;
789f5258439SIvan Malov }
790f5258439SIvan Malov 
79128944ac0SAndrew Rybchenko int
sfc_rx_qstart(struct sfc_adapter * sa,sfc_sw_index_t sw_index)79209cafbddSIgor Romanov sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
79328944ac0SAndrew Rybchenko {
79409cafbddSIgor Romanov 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
79509cafbddSIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
79628944ac0SAndrew Rybchenko 	struct sfc_rxq_info *rxq_info;
79728944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
79828944ac0SAndrew Rybchenko 	struct sfc_evq *evq;
799c6845644SAndrew Rybchenko 	efx_rx_prefix_layout_t pinfo;
80028944ac0SAndrew Rybchenko 	int rc;
80128944ac0SAndrew Rybchenko 
802dda791c2SAndrew Rybchenko 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
80309cafbddSIgor Romanov 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
80409cafbddSIgor Romanov 
80509cafbddSIgor Romanov 	sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
80628944ac0SAndrew Rybchenko 
807dda791c2SAndrew Rybchenko 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
8082e42d78dSAndrew Rybchenko 	SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
80928944ac0SAndrew Rybchenko 
8104e8938ddSAndrew Rybchenko 	rxq = &sa->rxq_ctrl[sw_index];
81128944ac0SAndrew Rybchenko 	evq = rxq->evq;
81228944ac0SAndrew Rybchenko 
81309cafbddSIgor Romanov 	rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index));
81428944ac0SAndrew Rybchenko 	if (rc != 0)
81528944ac0SAndrew Rybchenko 		goto fail_ev_qstart;
81628944ac0SAndrew Rybchenko 
817390f9b8dSAndrew Rybchenko 	switch (rxq_info->type) {
818390f9b8dSAndrew Rybchenko 	case EFX_RXQ_TYPE_DEFAULT:
81928944ac0SAndrew Rybchenko 		rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
8205df5d264SAndrew Rybchenko 			rxq->buf_size,
821390f9b8dSAndrew Rybchenko 			&rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
822390f9b8dSAndrew Rybchenko 			rxq_info->type_flags, evq->common, &rxq->common);
823390f9b8dSAndrew Rybchenko 		break;
824390f9b8dSAndrew Rybchenko 	case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
8255befcecbSAndrew Rybchenko 		struct rte_mempool *mp = rxq_info->refill_mb_pool;
826390f9b8dSAndrew Rybchenko 		struct rte_mempool_info mp_info;
827390f9b8dSAndrew Rybchenko 
828390f9b8dSAndrew Rybchenko 		rc = rte_mempool_ops_get_info(mp, &mp_info);
829390f9b8dSAndrew Rybchenko 		if (rc != 0) {
830390f9b8dSAndrew Rybchenko 			/* Positive errno is used in the driver */
831390f9b8dSAndrew Rybchenko 			rc = -rc;
832390f9b8dSAndrew Rybchenko 			goto fail_mp_get_info;
833390f9b8dSAndrew Rybchenko 		}
834390f9b8dSAndrew Rybchenko 		if (mp_info.contig_block_size <= 0) {
835390f9b8dSAndrew Rybchenko 			rc = EINVAL;
836390f9b8dSAndrew Rybchenko 			goto fail_bad_contig_block_size;
837390f9b8dSAndrew Rybchenko 		}
838390f9b8dSAndrew Rybchenko 		rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
839390f9b8dSAndrew Rybchenko 			mp_info.contig_block_size, rxq->buf_size,
840390f9b8dSAndrew Rybchenko 			mp->header_size + mp->elt_size + mp->trailer_size,
8415a1ae82dSAndrew Rybchenko 			sa->rxd_wait_timeout_ns,
842390f9b8dSAndrew Rybchenko 			&rxq->mem, rxq_info->entries, rxq_info->type_flags,
843d882d617SAndrew Rybchenko 			evq->common, &rxq->common);
844390f9b8dSAndrew Rybchenko 		break;
845390f9b8dSAndrew Rybchenko 	}
846390f9b8dSAndrew Rybchenko 	default:
847390f9b8dSAndrew Rybchenko 		rc = ENOTSUP;
848390f9b8dSAndrew Rybchenko 	}
84928944ac0SAndrew Rybchenko 	if (rc != 0)
85028944ac0SAndrew Rybchenko 		goto fail_rx_qcreate;
85128944ac0SAndrew Rybchenko 
852c6845644SAndrew Rybchenko 	rc = efx_rx_prefix_get_layout(rxq->common, &pinfo);
853c6845644SAndrew Rybchenko 	if (rc != 0)
854c6845644SAndrew Rybchenko 		goto fail_prefix_get_layout;
855c6845644SAndrew Rybchenko 
85628944ac0SAndrew Rybchenko 	efx_rx_qenable(rxq->common);
85728944ac0SAndrew Rybchenko 
858c6845644SAndrew Rybchenko 	rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr, &pinfo);
859df1bfde4SAndrew Rybchenko 	if (rc != 0)
860df1bfde4SAndrew Rybchenko 		goto fail_dp_qstart;
86128944ac0SAndrew Rybchenko 
8622e42d78dSAndrew Rybchenko 	rxq_info->state |= SFC_RXQ_STARTED;
86328944ac0SAndrew Rybchenko 
86409cafbddSIgor Romanov 	if (ethdev_qid == 0 && !sfc_sa2shared(sa)->isolated) {
865f5258439SIvan Malov 		rc = sfc_rx_default_rxq_set_filter(sa, rxq);
86628944ac0SAndrew Rybchenko 		if (rc != 0)
86728944ac0SAndrew Rybchenko 			goto fail_mac_filter_default_rxq_set;
86828944ac0SAndrew Rybchenko 	}
86928944ac0SAndrew Rybchenko 
87028944ac0SAndrew Rybchenko 	/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
87109cafbddSIgor Romanov 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
87209cafbddSIgor Romanov 		sa->eth_dev->data->rx_queue_state[ethdev_qid] =
87328944ac0SAndrew Rybchenko 			RTE_ETH_QUEUE_STATE_STARTED;
87428944ac0SAndrew Rybchenko 
87528944ac0SAndrew Rybchenko 	return 0;
87628944ac0SAndrew Rybchenko 
87728944ac0SAndrew Rybchenko fail_mac_filter_default_rxq_set:
878a8bcd99dSIgor Romanov 	sfc_rx_qflush(sa, sw_index);
879bfea01bcSAndrew Rybchenko 	sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
880a8bcd99dSIgor Romanov 	rxq_info->state = SFC_RXQ_INITIALIZED;
881df1bfde4SAndrew Rybchenko 
882df1bfde4SAndrew Rybchenko fail_dp_qstart:
883a8bcd99dSIgor Romanov 	efx_rx_qdestroy(rxq->common);
88428944ac0SAndrew Rybchenko 
885c6845644SAndrew Rybchenko fail_prefix_get_layout:
88628944ac0SAndrew Rybchenko fail_rx_qcreate:
887390f9b8dSAndrew Rybchenko fail_bad_contig_block_size:
888390f9b8dSAndrew Rybchenko fail_mp_get_info:
8896caeec47SAndrew Rybchenko 	sfc_ev_qstop(evq);
89028944ac0SAndrew Rybchenko 
89128944ac0SAndrew Rybchenko fail_ev_qstart:
89228944ac0SAndrew Rybchenko 	return rc;
89328944ac0SAndrew Rybchenko }
89428944ac0SAndrew Rybchenko 
89528944ac0SAndrew Rybchenko void
sfc_rx_qstop(struct sfc_adapter * sa,sfc_sw_index_t sw_index)89609cafbddSIgor Romanov sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
89728944ac0SAndrew Rybchenko {
89809cafbddSIgor Romanov 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
89909cafbddSIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
90028944ac0SAndrew Rybchenko 	struct sfc_rxq_info *rxq_info;
90128944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
90228944ac0SAndrew Rybchenko 
903dda791c2SAndrew Rybchenko 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
90409cafbddSIgor Romanov 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
90509cafbddSIgor Romanov 
90609cafbddSIgor Romanov 	sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
90728944ac0SAndrew Rybchenko 
908dda791c2SAndrew Rybchenko 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
909ac7af396SAndrew Rybchenko 
9104e8938ddSAndrew Rybchenko 	if (rxq_info->state == SFC_RXQ_INITIALIZED)
911ac7af396SAndrew Rybchenko 		return;
9122e42d78dSAndrew Rybchenko 	SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
91328944ac0SAndrew Rybchenko 
91428944ac0SAndrew Rybchenko 	/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
91509cafbddSIgor Romanov 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
91609cafbddSIgor Romanov 		sa->eth_dev->data->rx_queue_state[ethdev_qid] =
91728944ac0SAndrew Rybchenko 			RTE_ETH_QUEUE_STATE_STOPPED;
91828944ac0SAndrew Rybchenko 
9194e8938ddSAndrew Rybchenko 	rxq = &sa->rxq_ctrl[sw_index];
920bfea01bcSAndrew Rybchenko 	sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
921921f6cf1SAndrew Rybchenko 
92209cafbddSIgor Romanov 	if (ethdev_qid == 0)
92328944ac0SAndrew Rybchenko 		efx_mac_filter_default_rxq_clear(sa->nic);
92428944ac0SAndrew Rybchenko 
92528944ac0SAndrew Rybchenko 	sfc_rx_qflush(sa, sw_index);
92628944ac0SAndrew Rybchenko 
9272e42d78dSAndrew Rybchenko 	rxq_info->state = SFC_RXQ_INITIALIZED;
92828944ac0SAndrew Rybchenko 
92928944ac0SAndrew Rybchenko 	efx_rx_qdestroy(rxq->common);
93028944ac0SAndrew Rybchenko 
9316caeec47SAndrew Rybchenko 	sfc_ev_qstop(rxq->evq);
93228944ac0SAndrew Rybchenko }
933a8e64c6bSAndrew Rybchenko 
934f08d113dSAndrew Rybchenko static uint64_t
sfc_rx_get_offload_mask(struct sfc_adapter * sa)935f08d113dSAndrew Rybchenko sfc_rx_get_offload_mask(struct sfc_adapter *sa)
936f08d113dSAndrew Rybchenko {
937f08d113dSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
938f08d113dSAndrew Rybchenko 	uint64_t no_caps = 0;
939f08d113dSAndrew Rybchenko 
940f08d113dSAndrew Rybchenko 	if (encp->enc_tunnel_encapsulations_supported == 0)
941295968d1SFerruh Yigit 		no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
942f08d113dSAndrew Rybchenko 
94304a04943SDenis Pryazhennikov 	if (encp->enc_rx_include_fcs_supported == 0)
94404a04943SDenis Pryazhennikov 		no_caps |= RTE_ETH_RX_OFFLOAD_KEEP_CRC;
94504a04943SDenis Pryazhennikov 
94662082124SArtemii Morozov 	if (encp->enc_rx_vlan_stripping_supported == 0)
94762082124SArtemii Morozov 		no_caps |= RTE_ETH_RX_OFFLOAD_VLAN_STRIP;
94862082124SArtemii Morozov 
949f08d113dSAndrew Rybchenko 	return ~no_caps;
950f08d113dSAndrew Rybchenko }
951f08d113dSAndrew Rybchenko 
952cd8da5e8SIvan Malov uint64_t
sfc_rx_get_dev_offload_caps(struct sfc_adapter * sa)953cd8da5e8SIvan Malov sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
954cd8da5e8SIvan Malov {
955f08d113dSAndrew Rybchenko 	uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
956cd8da5e8SIvan Malov 
957f08d113dSAndrew Rybchenko 	return caps & sfc_rx_get_offload_mask(sa);
958cd8da5e8SIvan Malov }
959cd8da5e8SIvan Malov 
960ff6a1197SIvan Malov uint64_t
sfc_rx_get_queue_offload_caps(struct sfc_adapter * sa)961ff6a1197SIvan Malov sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
962ff6a1197SIvan Malov {
963f08d113dSAndrew Rybchenko 	return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa);
964ff6a1197SIvan Malov }
965ff6a1197SIvan Malov 
966a8e64c6bSAndrew Rybchenko static int
sfc_rx_qcheck_conf(struct sfc_adapter * sa,unsigned int rxq_max_fill_level,const struct rte_eth_rxconf * rx_conf,__rte_unused uint64_t offloads)967f7da270aSAndrew Rybchenko sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
968a4996bd8SWei Dai 		   const struct rte_eth_rxconf *rx_conf,
969eacbad76SAndrew Rybchenko 		   __rte_unused uint64_t offloads)
970ce35b05cSAndrew Rybchenko {
971ce35b05cSAndrew Rybchenko 	int rc = 0;
972ce35b05cSAndrew Rybchenko 
973ce35b05cSAndrew Rybchenko 	if (rx_conf->rx_thresh.pthresh != 0 ||
974ce35b05cSAndrew Rybchenko 	    rx_conf->rx_thresh.hthresh != 0 ||
975ce35b05cSAndrew Rybchenko 	    rx_conf->rx_thresh.wthresh != 0) {
976fdd7361bSAndrew Rybchenko 		sfc_warn(sa,
977ce35b05cSAndrew Rybchenko 			"RxQ prefetch/host/writeback thresholds are not supported");
978ce35b05cSAndrew Rybchenko 	}
979ce35b05cSAndrew Rybchenko 
980f7da270aSAndrew Rybchenko 	if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
9819e612223SAndrew Rybchenko 		sfc_err(sa,
9829e612223SAndrew Rybchenko 			"RxQ free threshold too large: %u vs maximum %u",
983f7da270aSAndrew Rybchenko 			rx_conf->rx_free_thresh, rxq_max_fill_level);
984ce35b05cSAndrew Rybchenko 		rc = EINVAL;
985ce35b05cSAndrew Rybchenko 	}
986ce35b05cSAndrew Rybchenko 
987ce35b05cSAndrew Rybchenko 	if (rx_conf->rx_drop_en == 0) {
988ce35b05cSAndrew Rybchenko 		sfc_err(sa, "RxQ drop disable is not supported");
989ce35b05cSAndrew Rybchenko 		rc = EINVAL;
990ce35b05cSAndrew Rybchenko 	}
991ce35b05cSAndrew Rybchenko 
992ce35b05cSAndrew Rybchenko 	return rc;
993ce35b05cSAndrew Rybchenko }
994ce35b05cSAndrew Rybchenko 
9950c7a0c35SAndrew Rybchenko static unsigned int
sfc_rx_mbuf_data_alignment(struct rte_mempool * mb_pool)9960c7a0c35SAndrew Rybchenko sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
9970c7a0c35SAndrew Rybchenko {
9980c7a0c35SAndrew Rybchenko 	uint32_t data_off;
9990c7a0c35SAndrew Rybchenko 	uint32_t order;
10000c7a0c35SAndrew Rybchenko 
10010c7a0c35SAndrew Rybchenko 	/* The mbuf object itself is always cache line aligned */
10020c7a0c35SAndrew Rybchenko 	order = rte_bsf32(RTE_CACHE_LINE_SIZE);
10030c7a0c35SAndrew Rybchenko 
10040c7a0c35SAndrew Rybchenko 	/* Data offset from mbuf object start */
10050c7a0c35SAndrew Rybchenko 	data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
10060c7a0c35SAndrew Rybchenko 		RTE_PKTMBUF_HEADROOM;
10070c7a0c35SAndrew Rybchenko 
10080c7a0c35SAndrew Rybchenko 	order = MIN(order, rte_bsf32(data_off));
10090c7a0c35SAndrew Rybchenko 
1010bd0c7b4dSAndrew Rybchenko 	return 1u << order;
10110c7a0c35SAndrew Rybchenko }
10120c7a0c35SAndrew Rybchenko 
10130c7a0c35SAndrew Rybchenko static uint16_t
sfc_rx_mb_pool_buf_size(struct sfc_adapter * sa,struct rte_mempool * mb_pool)10140c7a0c35SAndrew Rybchenko sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
10150c7a0c35SAndrew Rybchenko {
10160c7a0c35SAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
10170c7a0c35SAndrew Rybchenko 	const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
10180c7a0c35SAndrew Rybchenko 	const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
10190c7a0c35SAndrew Rybchenko 	uint16_t buf_size;
10200c7a0c35SAndrew Rybchenko 	unsigned int buf_aligned;
10210c7a0c35SAndrew Rybchenko 	unsigned int start_alignment;
10220c7a0c35SAndrew Rybchenko 	unsigned int end_padding_alignment;
10230c7a0c35SAndrew Rybchenko 
10240c7a0c35SAndrew Rybchenko 	/* Below it is assumed that both alignments are power of 2 */
10250c7a0c35SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(nic_align_start));
10260c7a0c35SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(nic_align_end));
10270c7a0c35SAndrew Rybchenko 
10280c7a0c35SAndrew Rybchenko 	/*
10290c7a0c35SAndrew Rybchenko 	 * mbuf is always cache line aligned, double-check
10300c7a0c35SAndrew Rybchenko 	 * that it meets rx buffer start alignment requirements.
10310c7a0c35SAndrew Rybchenko 	 */
10320c7a0c35SAndrew Rybchenko 
10330c7a0c35SAndrew Rybchenko 	/* Start from mbuf pool data room size */
10340c7a0c35SAndrew Rybchenko 	buf_size = rte_pktmbuf_data_room_size(mb_pool);
10350c7a0c35SAndrew Rybchenko 
10360c7a0c35SAndrew Rybchenko 	/* Remove headroom */
10370c7a0c35SAndrew Rybchenko 	if (buf_size <= RTE_PKTMBUF_HEADROOM) {
10380c7a0c35SAndrew Rybchenko 		sfc_err(sa,
10390c7a0c35SAndrew Rybchenko 			"RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
10400c7a0c35SAndrew Rybchenko 			mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
10410c7a0c35SAndrew Rybchenko 		return 0;
10420c7a0c35SAndrew Rybchenko 	}
10430c7a0c35SAndrew Rybchenko 	buf_size -= RTE_PKTMBUF_HEADROOM;
10440c7a0c35SAndrew Rybchenko 
10450c7a0c35SAndrew Rybchenko 	/* Calculate guaranteed data start alignment */
10460c7a0c35SAndrew Rybchenko 	buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
10470c7a0c35SAndrew Rybchenko 
10480c7a0c35SAndrew Rybchenko 	/* Reserve space for start alignment */
10490c7a0c35SAndrew Rybchenko 	if (buf_aligned < nic_align_start) {
10500c7a0c35SAndrew Rybchenko 		start_alignment = nic_align_start - buf_aligned;
10510c7a0c35SAndrew Rybchenko 		if (buf_size <= start_alignment) {
10520c7a0c35SAndrew Rybchenko 			sfc_err(sa,
10530c7a0c35SAndrew Rybchenko 				"RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
10540c7a0c35SAndrew Rybchenko 				mb_pool->name,
10550c7a0c35SAndrew Rybchenko 				rte_pktmbuf_data_room_size(mb_pool),
10560c7a0c35SAndrew Rybchenko 				RTE_PKTMBUF_HEADROOM, start_alignment);
10570c7a0c35SAndrew Rybchenko 			return 0;
10580c7a0c35SAndrew Rybchenko 		}
10590c7a0c35SAndrew Rybchenko 		buf_aligned = nic_align_start;
10600c7a0c35SAndrew Rybchenko 		buf_size -= start_alignment;
10610c7a0c35SAndrew Rybchenko 	} else {
10620c7a0c35SAndrew Rybchenko 		start_alignment = 0;
10630c7a0c35SAndrew Rybchenko 	}
10640c7a0c35SAndrew Rybchenko 
10650c7a0c35SAndrew Rybchenko 	/* Make sure that end padding does not write beyond the buffer */
10660c7a0c35SAndrew Rybchenko 	if (buf_aligned < nic_align_end) {
10670c7a0c35SAndrew Rybchenko 		/*
10687be78d02SJosh Soref 		 * Estimate space which can be lost. If guaranteed buffer
10690c7a0c35SAndrew Rybchenko 		 * size is odd, lost space is (nic_align_end - 1). More
10700c7a0c35SAndrew Rybchenko 		 * accurate formula is below.
10710c7a0c35SAndrew Rybchenko 		 */
10720c7a0c35SAndrew Rybchenko 		end_padding_alignment = nic_align_end -
10730c7a0c35SAndrew Rybchenko 			MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
10740c7a0c35SAndrew Rybchenko 		if (buf_size <= end_padding_alignment) {
10750c7a0c35SAndrew Rybchenko 			sfc_err(sa,
10760c7a0c35SAndrew Rybchenko 				"RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
10770c7a0c35SAndrew Rybchenko 				mb_pool->name,
10780c7a0c35SAndrew Rybchenko 				rte_pktmbuf_data_room_size(mb_pool),
10790c7a0c35SAndrew Rybchenko 				RTE_PKTMBUF_HEADROOM, start_alignment,
10800c7a0c35SAndrew Rybchenko 				end_padding_alignment);
10810c7a0c35SAndrew Rybchenko 			return 0;
10820c7a0c35SAndrew Rybchenko 		}
10830c7a0c35SAndrew Rybchenko 		buf_size -= end_padding_alignment;
10840c7a0c35SAndrew Rybchenko 	} else {
10850c7a0c35SAndrew Rybchenko 		/*
10860c7a0c35SAndrew Rybchenko 		 * Start is aligned the same or better than end,
10870c7a0c35SAndrew Rybchenko 		 * just align length.
10880c7a0c35SAndrew Rybchenko 		 */
1089827ad823SAndrew Rybchenko 		buf_size = EFX_P2ALIGN(uint32_t, buf_size, nic_align_end);
10900c7a0c35SAndrew Rybchenko 	}
10910c7a0c35SAndrew Rybchenko 
1092cd8e2d82SIvan Malov 	/*
1093cd8e2d82SIvan Malov 	 * Buffer length field of a Rx descriptor may not be wide
1094cd8e2d82SIvan Malov 	 * enough to store a 16-bit data count taken from an mbuf.
1095cd8e2d82SIvan Malov 	 */
1096cd8e2d82SIvan Malov 	return MIN(buf_size, encp->enc_rx_dma_desc_size_max);
10970c7a0c35SAndrew Rybchenko }
10980c7a0c35SAndrew Rybchenko 
1099ce35b05cSAndrew Rybchenko int
sfc_rx_qinit(struct sfc_adapter * sa,sfc_sw_index_t sw_index,uint16_t nb_rx_desc,unsigned int socket_id,const struct rte_eth_rxconf * rx_conf,struct rte_mempool * mb_pool)110009cafbddSIgor Romanov sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
1101ce35b05cSAndrew Rybchenko 	     uint16_t nb_rx_desc, unsigned int socket_id,
1102ce35b05cSAndrew Rybchenko 	     const struct rte_eth_rxconf *rx_conf,
1103ce35b05cSAndrew Rybchenko 	     struct rte_mempool *mb_pool)
1104ce35b05cSAndrew Rybchenko {
110509cafbddSIgor Romanov 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
110609cafbddSIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
1107dcc3285fSAndrew Rybchenko 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1108e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1109ce35b05cSAndrew Rybchenko 	int rc;
1110f7da270aSAndrew Rybchenko 	unsigned int rxq_entries;
1111f7da270aSAndrew Rybchenko 	unsigned int evq_entries;
1112f7da270aSAndrew Rybchenko 	unsigned int rxq_max_fill_level;
1113a4996bd8SWei Dai 	uint64_t offloads;
11140c7a0c35SAndrew Rybchenko 	uint16_t buf_size;
1115ce35b05cSAndrew Rybchenko 	struct sfc_rxq_info *rxq_info;
1116ce35b05cSAndrew Rybchenko 	struct sfc_evq *evq;
1117ce35b05cSAndrew Rybchenko 	struct sfc_rxq *rxq;
1118df1bfde4SAndrew Rybchenko 	struct sfc_dp_rx_qcreate_info info;
1119048a0d1aSIgor Romanov 	struct sfc_dp_rx_hw_limits hw_limits;
112062082124SArtemii Morozov 	struct sfc_port *port = &sa->port;
112106b186a0SAndrew Rybchenko 	uint16_t rx_free_thresh;
11226c0cc77aSIgor Romanov 	const char *error;
1123ce35b05cSAndrew Rybchenko 
1124048a0d1aSIgor Romanov 	memset(&hw_limits, 0, sizeof(hw_limits));
1125048a0d1aSIgor Romanov 	hw_limits.rxq_max_entries = sa->rxq_max_entries;
1126048a0d1aSIgor Romanov 	hw_limits.rxq_min_entries = sa->rxq_min_entries;
1127d5371f3dSIgor Romanov 	hw_limits.evq_max_entries = sa->evq_max_entries;
1128d5371f3dSIgor Romanov 	hw_limits.evq_min_entries = sa->evq_min_entries;
1129048a0d1aSIgor Romanov 
1130048a0d1aSIgor Romanov 	rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool,
1131048a0d1aSIgor Romanov 					    &rxq_entries, &evq_entries,
1132048a0d1aSIgor Romanov 					    &rxq_max_fill_level);
1133f7da270aSAndrew Rybchenko 	if (rc != 0)
1134f7da270aSAndrew Rybchenko 		goto fail_size_up_rings;
1135048a0d1aSIgor Romanov 	SFC_ASSERT(rxq_entries >= sa->rxq_min_entries);
1136048a0d1aSIgor Romanov 	SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
11373c335b7fSAndrew Rybchenko 	SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
1138f7da270aSAndrew Rybchenko 
113909cafbddSIgor Romanov 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
114009cafbddSIgor Romanov 
114109cafbddSIgor Romanov 	offloads = rx_conf->offloads;
114209cafbddSIgor Romanov 	/* Add device level Rx offloads if the queue is an ethdev Rx queue */
114309cafbddSIgor Romanov 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
114409cafbddSIgor Romanov 		offloads |= sa->eth_dev->data->dev_conf.rxmode.offloads;
114509cafbddSIgor Romanov 
1146a4996bd8SWei Dai 	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
1147ce35b05cSAndrew Rybchenko 	if (rc != 0)
1148ce35b05cSAndrew Rybchenko 		goto fail_bad_conf;
1149ce35b05cSAndrew Rybchenko 
11500c7a0c35SAndrew Rybchenko 	buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
11510c7a0c35SAndrew Rybchenko 	if (buf_size == 0) {
115209cafbddSIgor Romanov 		sfc_err(sa,
115309cafbddSIgor Romanov 			"RxQ %d (internal %u) mbuf pool object size is too small",
115409cafbddSIgor Romanov 			ethdev_qid, sw_index);
1155dcc3285fSAndrew Rybchenko 		rc = EINVAL;
1156dcc3285fSAndrew Rybchenko 		goto fail_bad_conf;
1157dcc3285fSAndrew Rybchenko 	}
1158dcc3285fSAndrew Rybchenko 
11596c0cc77aSIgor Romanov 	if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
11606c0cc77aSIgor Romanov 				  encp->enc_rx_prefix_size,
1161295968d1SFerruh Yigit 				  (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
1162d41a6268SIgor Romanov 				  encp->enc_rx_scatter_max,
11636c0cc77aSIgor Romanov 				  &error)) {
116409cafbddSIgor Romanov 		sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
116509cafbddSIgor Romanov 			ethdev_qid, sw_index, error);
116609cafbddSIgor Romanov 		sfc_err(sa,
116709cafbddSIgor Romanov 			"RxQ %d (internal %u) calculated Rx buffer size is %u vs "
1168dcc3285fSAndrew Rybchenko 			"PDU size %u plus Rx prefix %u bytes",
116909cafbddSIgor Romanov 			ethdev_qid, sw_index, buf_size,
117009cafbddSIgor Romanov 			(unsigned int)sa->port.pdu, encp->enc_rx_prefix_size);
1171dcc3285fSAndrew Rybchenko 		rc = EINVAL;
1172ce35b05cSAndrew Rybchenko 		goto fail_bad_conf;
1173ce35b05cSAndrew Rybchenko 	}
1174ce35b05cSAndrew Rybchenko 
1175dda791c2SAndrew Rybchenko 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
1176dda791c2SAndrew Rybchenko 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
1177ce35b05cSAndrew Rybchenko 
1178f7da270aSAndrew Rybchenko 	SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
1179f7da270aSAndrew Rybchenko 	rxq_info->entries = rxq_entries;
1180390f9b8dSAndrew Rybchenko 
11815dec95e3SAndrew Rybchenko 	if (sa->priv.dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
1182390f9b8dSAndrew Rybchenko 		rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
1183390f9b8dSAndrew Rybchenko 	else
1184d882d617SAndrew Rybchenko 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
1185390f9b8dSAndrew Rybchenko 
1186b8cf5ba5SIgor Romanov 	rxq_info->type_flags |=
1187295968d1SFerruh Yigit 		(offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
1188d882d617SAndrew Rybchenko 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
1189ce35b05cSAndrew Rybchenko 
1190591cbbb1SAndrew Rybchenko 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
1191f08d113dSAndrew Rybchenko 	    (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
1192295968d1SFerruh Yigit 	     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
1193591cbbb1SAndrew Rybchenko 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
1194591cbbb1SAndrew Rybchenko 
1195295968d1SFerruh Yigit 	if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
1196eb043628SAndrew Rybchenko 		rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
1197eb043628SAndrew Rybchenko 
1198bf38764aSIvan Malov 	if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
1199bf38764aSIvan Malov 		rxq_info->type_flags |= EFX_RXQ_FLAG_USER_FLAG;
1200bf38764aSIvan Malov 
120153a80512SIvan Malov 	if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
1202f55fe01fSIvan Malov 	    sfc_ft_is_active(sa))
1203a9cc128cSIvan Malov 		rxq_info->type_flags |= EFX_RXQ_FLAG_USER_MARK;
1204a9cc128cSIvan Malov 
120562082124SArtemii Morozov 	if (port->vlan_strip)
120662082124SArtemii Morozov 		rxq_info->type_flags |= EFX_RXQ_FLAG_VLAN_STRIPPED_TCI;
120762082124SArtemii Morozov 
12086caeec47SAndrew Rybchenko 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
1209f7da270aSAndrew Rybchenko 			  evq_entries, socket_id, &evq);
1210ce35b05cSAndrew Rybchenko 	if (rc != 0)
1211ce35b05cSAndrew Rybchenko 		goto fail_ev_qinit;
1212ce35b05cSAndrew Rybchenko 
12134e8938ddSAndrew Rybchenko 	rxq = &sa->rxq_ctrl[sw_index];
1214df1bfde4SAndrew Rybchenko 	rxq->evq = evq;
1215df1bfde4SAndrew Rybchenko 	rxq->hw_index = sw_index;
121606b186a0SAndrew Rybchenko 	/*
121706b186a0SAndrew Rybchenko 	 * If Rx refill threshold is specified (its value is non zero) in
121806b186a0SAndrew Rybchenko 	 * Rx configuration, use specified value. Otherwise use 1/8 of
121906b186a0SAndrew Rybchenko 	 * the Rx descriptors number as the default. It allows to keep
122006b186a0SAndrew Rybchenko 	 * Rx ring full-enough and does not refill too aggressive if
122106b186a0SAndrew Rybchenko 	 * packet rate is high.
122206b186a0SAndrew Rybchenko 	 *
122306b186a0SAndrew Rybchenko 	 * Since PMD refills in bulks waiting for full bulk may be
122406b186a0SAndrew Rybchenko 	 * refilled (basically round down), it is better to round up
122506b186a0SAndrew Rybchenko 	 * here to mitigate it a bit.
122606b186a0SAndrew Rybchenko 	 */
122706b186a0SAndrew Rybchenko 	rx_free_thresh = (rx_conf->rx_free_thresh != 0) ?
122806b186a0SAndrew Rybchenko 		rx_conf->rx_free_thresh : EFX_DIV_ROUND_UP(nb_rx_desc, 8);
122906b186a0SAndrew Rybchenko 	/* Rx refill threshold cannot be smaller than refill bulk */
12305befcecbSAndrew Rybchenko 	rxq_info->refill_threshold =
123106b186a0SAndrew Rybchenko 		RTE_MAX(rx_free_thresh, SFC_RX_REFILL_BULK);
12325befcecbSAndrew Rybchenko 	rxq_info->refill_mb_pool = mb_pool;
123392a15fc5SIgor Romanov 
123492a15fc5SIgor Romanov 	if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
1235295968d1SFerruh Yigit 	    (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
123692a15fc5SIgor Romanov 		rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
123792a15fc5SIgor Romanov 	else
123892a15fc5SIgor Romanov 		rxq_info->rxq_flags = 0;
123992a15fc5SIgor Romanov 
1240462c4f08SIvan Malov 	if (rxq_info->type_flags & EFX_RXQ_FLAG_INGRESS_MPORT)
1241462c4f08SIvan Malov 		rxq_info->rxq_flags |= SFC_RXQ_FLAG_INGRESS_MPORT;
1242462c4f08SIvan Malov 
124362082124SArtemii Morozov 	if (rxq_info->type_flags & EFX_RXQ_FLAG_VLAN_STRIPPED_TCI)
124462082124SArtemii Morozov 		rxq_info->rxq_flags |= SFC_RXQ_FLAG_VLAN_STRIPPED_TCI;
124562082124SArtemii Morozov 
1246390f9b8dSAndrew Rybchenko 	rxq->buf_size = buf_size;
1247df1bfde4SAndrew Rybchenko 
12483037e6cfSViacheslav Galaktionov 	rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_NIC_DMA_ADDR_RX_RING,
12498c5ca0c6SIgor Romanov 			   efx_rxq_size(sa->nic, rxq_info->entries),
1250ce35b05cSAndrew Rybchenko 			   socket_id, &rxq->mem);
1251ce35b05cSAndrew Rybchenko 	if (rc != 0)
1252ce35b05cSAndrew Rybchenko 		goto fail_dma_alloc;
1253ce35b05cSAndrew Rybchenko 
1254df1bfde4SAndrew Rybchenko 	memset(&info, 0, sizeof(info));
12555befcecbSAndrew Rybchenko 	info.refill_mb_pool = rxq_info->refill_mb_pool;
1256e5595ee2SAndrew Rybchenko 	info.max_fill_level = rxq_max_fill_level;
12575befcecbSAndrew Rybchenko 	info.refill_threshold = rxq_info->refill_threshold;
1258df1bfde4SAndrew Rybchenko 	info.buf_size = buf_size;
1259df1bfde4SAndrew Rybchenko 	info.batch_max = encp->enc_rx_batch_max;
1260df1bfde4SAndrew Rybchenko 	info.prefix_size = encp->enc_rx_prefix_size;
126153a80512SIvan Malov 
1262f55fe01fSIvan Malov 	if (sfc_ft_is_active(sa))
126353a80512SIvan Malov 		info.user_mark_mask = SFC_FT_USER_MARK_MASK;
126453a80512SIvan Malov 	else
126553a80512SIvan Malov 		info.user_mark_mask = UINT32_MAX;
126653a80512SIvan Malov 
126792a15fc5SIgor Romanov 	info.flags = rxq_info->rxq_flags;
1268df1bfde4SAndrew Rybchenko 	info.rxq_entries = rxq_info->entries;
1269638bddc9SAndrew Rybchenko 	info.rxq_hw_ring = rxq->mem.esm_base;
127009cafbddSIgor Romanov 	info.evq_hw_index = sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index);
1271f7da270aSAndrew Rybchenko 	info.evq_entries = evq_entries;
1272638bddc9SAndrew Rybchenko 	info.evq_hw_ring = evq->mem.esm_base;
1273638bddc9SAndrew Rybchenko 	info.hw_index = rxq->hw_index;
1274638bddc9SAndrew Rybchenko 	info.mem_bar = sa->mem_bar.esb_base;
1275714bff55SAndrew Rybchenko 	info.vi_window_shift = encp->enc_vi_window_shift;
1276e285f30dSIgor Romanov 	info.fcw_offset = sa->fcw_offset;
1277df1bfde4SAndrew Rybchenko 
12783037e6cfSViacheslav Galaktionov 	info.nic_dma_info = &sas->nic_dma_info;
12793037e6cfSViacheslav Galaktionov 
12805dec95e3SAndrew Rybchenko 	rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1281c0802544SFerruh Yigit 				     &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1282bfea01bcSAndrew Rybchenko 				     socket_id, &info, &rxq_info->dp);
1283df1bfde4SAndrew Rybchenko 	if (rc != 0)
1284df1bfde4SAndrew Rybchenko 		goto fail_dp_rx_qcreate;
1285df1bfde4SAndrew Rybchenko 
1286bfea01bcSAndrew Rybchenko 	evq->dp_rxq = rxq_info->dp;
1287df1bfde4SAndrew Rybchenko 
12882e42d78dSAndrew Rybchenko 	rxq_info->state = SFC_RXQ_INITIALIZED;
1289ce35b05cSAndrew Rybchenko 
1290ac7af396SAndrew Rybchenko 	rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1291ce35b05cSAndrew Rybchenko 
1292ce35b05cSAndrew Rybchenko 	return 0;
1293ce35b05cSAndrew Rybchenko 
1294df1bfde4SAndrew Rybchenko fail_dp_rx_qcreate:
1295ce35b05cSAndrew Rybchenko 	sfc_dma_free(sa, &rxq->mem);
1296ce35b05cSAndrew Rybchenko 
1297ce35b05cSAndrew Rybchenko fail_dma_alloc:
12986caeec47SAndrew Rybchenko 	sfc_ev_qfini(evq);
1299ce35b05cSAndrew Rybchenko 
1300ce35b05cSAndrew Rybchenko fail_ev_qinit:
1301ce35b05cSAndrew Rybchenko 	rxq_info->entries = 0;
1302ce35b05cSAndrew Rybchenko 
1303ce35b05cSAndrew Rybchenko fail_bad_conf:
1304f7da270aSAndrew Rybchenko fail_size_up_rings:
1305ce35b05cSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
1306ce35b05cSAndrew Rybchenko 	return rc;
1307ce35b05cSAndrew Rybchenko }
1308ce35b05cSAndrew Rybchenko 
1309ce35b05cSAndrew Rybchenko void
sfc_rx_qfini(struct sfc_adapter * sa,sfc_sw_index_t sw_index)131009cafbddSIgor Romanov sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
1311ce35b05cSAndrew Rybchenko {
131209cafbddSIgor Romanov 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
131309cafbddSIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
1314ce35b05cSAndrew Rybchenko 	struct sfc_rxq_info *rxq_info;
1315ce35b05cSAndrew Rybchenko 	struct sfc_rxq *rxq;
1316ce35b05cSAndrew Rybchenko 
1317dda791c2SAndrew Rybchenko 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
131809cafbddSIgor Romanov 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
131909cafbddSIgor Romanov 
132009cafbddSIgor Romanov 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
132109cafbddSIgor Romanov 		sa->eth_dev->data->rx_queues[ethdev_qid] = NULL;
1322ce35b05cSAndrew Rybchenko 
1323dda791c2SAndrew Rybchenko 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
1324ce35b05cSAndrew Rybchenko 
13252e42d78dSAndrew Rybchenko 	SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
1326ce35b05cSAndrew Rybchenko 
1327bfea01bcSAndrew Rybchenko 	sa->priv.dp_rx->qdestroy(rxq_info->dp);
1328bfea01bcSAndrew Rybchenko 	rxq_info->dp = NULL;
1329df1bfde4SAndrew Rybchenko 
13304e8938ddSAndrew Rybchenko 	rxq_info->state &= ~SFC_RXQ_INITIALIZED;
1331ce35b05cSAndrew Rybchenko 	rxq_info->entries = 0;
1332ce35b05cSAndrew Rybchenko 
13334e8938ddSAndrew Rybchenko 	rxq = &sa->rxq_ctrl[sw_index];
13344e8938ddSAndrew Rybchenko 
1335ce35b05cSAndrew Rybchenko 	sfc_dma_free(sa, &rxq->mem);
13366774b24eSAndrew Rybchenko 
13376caeec47SAndrew Rybchenko 	sfc_ev_qfini(rxq->evq);
13386caeec47SAndrew Rybchenko 	rxq->evq = NULL;
1339ce35b05cSAndrew Rybchenko }
1340ce35b05cSAndrew Rybchenko 
134101764b20SIvan Malov /*
134201764b20SIvan Malov  * Mapping between RTE RSS hash functions and their EFX counterparts.
134301764b20SIvan Malov  */
1344b74fd6b8SFerruh Yigit static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
1345295968d1SFerruh Yigit 	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
134601764b20SIvan Malov 	  EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
1347295968d1SFerruh Yigit 	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
134801764b20SIvan Malov 	  EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
1349295968d1SFerruh Yigit 	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
135001764b20SIvan Malov 	  EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
1351295968d1SFerruh Yigit 	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
135201764b20SIvan Malov 	  EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
1353295968d1SFerruh Yigit 	{ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
135401764b20SIvan Malov 	  EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
135501764b20SIvan Malov 	  EFX_RX_HASH(IPV4, 2TUPLE) },
1356295968d1SFerruh Yigit 	{ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
1357295968d1SFerruh Yigit 	  RTE_ETH_RSS_IPV6_EX,
135801764b20SIvan Malov 	  EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
135901764b20SIvan Malov 	  EFX_RX_HASH(IPV6, 2TUPLE) }
136001764b20SIvan Malov };
136101764b20SIvan Malov 
136201764b20SIvan Malov static efx_rx_hash_type_t
sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,unsigned int * hash_type_flags_supported,unsigned int nb_hash_type_flags_supported)136301764b20SIvan Malov sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
136401764b20SIvan Malov 			    unsigned int *hash_type_flags_supported,
136501764b20SIvan Malov 			    unsigned int nb_hash_type_flags_supported)
13664ec1fc3bSIvan Malov {
136701764b20SIvan Malov 	efx_rx_hash_type_t hash_type_masked = 0;
136801764b20SIvan Malov 	unsigned int i, j;
13694ec1fc3bSIvan Malov 
137001764b20SIvan Malov 	for (i = 0; i < nb_hash_type_flags_supported; ++i) {
137101764b20SIvan Malov 		unsigned int class_tuple_lbn[] = {
137201764b20SIvan Malov 			EFX_RX_CLASS_IPV4_TCP_LBN,
137301764b20SIvan Malov 			EFX_RX_CLASS_IPV4_UDP_LBN,
137401764b20SIvan Malov 			EFX_RX_CLASS_IPV4_LBN,
137501764b20SIvan Malov 			EFX_RX_CLASS_IPV6_TCP_LBN,
137601764b20SIvan Malov 			EFX_RX_CLASS_IPV6_UDP_LBN,
137701764b20SIvan Malov 			EFX_RX_CLASS_IPV6_LBN
137801764b20SIvan Malov 		};
13794ec1fc3bSIvan Malov 
138001764b20SIvan Malov 		for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
138101764b20SIvan Malov 			unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
138201764b20SIvan Malov 			unsigned int flag;
13834ec1fc3bSIvan Malov 
138401764b20SIvan Malov 			tuple_mask <<= class_tuple_lbn[j];
138501764b20SIvan Malov 			flag = hash_type & tuple_mask;
13864ec1fc3bSIvan Malov 
138701764b20SIvan Malov 			if (flag == hash_type_flags_supported[i])
138801764b20SIvan Malov 				hash_type_masked |= flag;
138901764b20SIvan Malov 		}
139001764b20SIvan Malov 	}
13914ec1fc3bSIvan Malov 
139201764b20SIvan Malov 	return hash_type_masked;
139301764b20SIvan Malov }
139401764b20SIvan Malov 
139501764b20SIvan Malov int
sfc_rx_hash_init(struct sfc_adapter * sa)139601764b20SIvan Malov sfc_rx_hash_init(struct sfc_adapter *sa)
139701764b20SIvan Malov {
1398e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
139901764b20SIvan Malov 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
140001764b20SIvan Malov 	uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
140101764b20SIvan Malov 	efx_rx_hash_alg_t alg;
140201764b20SIvan Malov 	unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
140301764b20SIvan Malov 	unsigned int nb_flags_supp;
140401764b20SIvan Malov 	struct sfc_rss_hf_rte_to_efx *hf_map;
140501764b20SIvan Malov 	struct sfc_rss_hf_rte_to_efx *entry;
140601764b20SIvan Malov 	efx_rx_hash_type_t efx_hash_types;
140701764b20SIvan Malov 	unsigned int i;
140801764b20SIvan Malov 	int rc;
140901764b20SIvan Malov 
141001764b20SIvan Malov 	if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
141101764b20SIvan Malov 		alg = EFX_RX_HASHALG_TOEPLITZ;
141201764b20SIvan Malov 	else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
141301764b20SIvan Malov 		alg = EFX_RX_HASHALG_PACKED_STREAM;
141401764b20SIvan Malov 	else
141501764b20SIvan Malov 		return EINVAL;
141601764b20SIvan Malov 
141701764b20SIvan Malov 	rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
1418c4ea98c4SIvan Malov 					 RTE_DIM(flags_supp), &nb_flags_supp);
141901764b20SIvan Malov 	if (rc != 0)
142001764b20SIvan Malov 		return rc;
142101764b20SIvan Malov 
142201764b20SIvan Malov 	hf_map = rte_calloc_socket("sfc-rss-hf-map",
142301764b20SIvan Malov 				   RTE_DIM(sfc_rss_hf_map),
142401764b20SIvan Malov 				   sizeof(*hf_map), 0, sa->socket_id);
142501764b20SIvan Malov 	if (hf_map == NULL)
142601764b20SIvan Malov 		return ENOMEM;
142701764b20SIvan Malov 
142801764b20SIvan Malov 	entry = hf_map;
142901764b20SIvan Malov 	efx_hash_types = 0;
143001764b20SIvan Malov 	for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
143101764b20SIvan Malov 		efx_rx_hash_type_t ht;
143201764b20SIvan Malov 
143301764b20SIvan Malov 		ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
143401764b20SIvan Malov 						 flags_supp, nb_flags_supp);
143501764b20SIvan Malov 		if (ht != 0) {
143601764b20SIvan Malov 			entry->rte = sfc_rss_hf_map[i].rte;
143701764b20SIvan Malov 			entry->efx = ht;
143801764b20SIvan Malov 			efx_hash_types |= ht;
143901764b20SIvan Malov 			++entry;
144001764b20SIvan Malov 		}
144101764b20SIvan Malov 	}
144201764b20SIvan Malov 
144301764b20SIvan Malov 	rss->hash_alg = alg;
144401764b20SIvan Malov 	rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
144501764b20SIvan Malov 	rss->hf_map = hf_map;
144601764b20SIvan Malov 	rss->hash_types = efx_hash_types;
144701764b20SIvan Malov 
144801764b20SIvan Malov 	return 0;
144901764b20SIvan Malov }
145001764b20SIvan Malov 
145101764b20SIvan Malov void
sfc_rx_hash_fini(struct sfc_adapter * sa)145201764b20SIvan Malov sfc_rx_hash_fini(struct sfc_adapter *sa)
145301764b20SIvan Malov {
1454e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
145501764b20SIvan Malov 
145601764b20SIvan Malov 	rte_free(rss->hf_map);
145701764b20SIvan Malov }
145801764b20SIvan Malov 
145901764b20SIvan Malov int
sfc_rx_hf_rte_to_efx(struct sfc_adapter * sa,uint64_t rte,efx_rx_hash_type_t * efx)146001764b20SIvan Malov sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
146101764b20SIvan Malov 		     efx_rx_hash_type_t *efx)
146201764b20SIvan Malov {
1463e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
146401764b20SIvan Malov 	efx_rx_hash_type_t hash_types = 0;
146501764b20SIvan Malov 	unsigned int i;
146601764b20SIvan Malov 
146701764b20SIvan Malov 	for (i = 0; i < rss->hf_map_nb_entries; ++i) {
146801764b20SIvan Malov 		uint64_t rte_mask = rss->hf_map[i].rte;
146901764b20SIvan Malov 
147001764b20SIvan Malov 		if ((rte & rte_mask) != 0) {
147101764b20SIvan Malov 			rte &= ~rte_mask;
147201764b20SIvan Malov 			hash_types |= rss->hf_map[i].efx;
147301764b20SIvan Malov 		}
147401764b20SIvan Malov 	}
147501764b20SIvan Malov 
147601764b20SIvan Malov 	if (rte != 0) {
147701764b20SIvan Malov 		sfc_err(sa, "unsupported hash functions requested");
147801764b20SIvan Malov 		return EINVAL;
147901764b20SIvan Malov 	}
148001764b20SIvan Malov 
148101764b20SIvan Malov 	*efx = hash_types;
148201764b20SIvan Malov 
148301764b20SIvan Malov 	return 0;
14844ec1fc3bSIvan Malov }
1485088e1721SIvan Malov 
1486088e1721SIvan Malov uint64_t
sfc_rx_hf_efx_to_rte(struct sfc_rss * rss,efx_rx_hash_type_t efx)1487e295f175SAndrew Rybchenko sfc_rx_hf_efx_to_rte(struct sfc_rss *rss, efx_rx_hash_type_t efx)
1488088e1721SIvan Malov {
148901764b20SIvan Malov 	uint64_t rte = 0;
149001764b20SIvan Malov 	unsigned int i;
1491088e1721SIvan Malov 
149201764b20SIvan Malov 	for (i = 0; i < rss->hf_map_nb_entries; ++i) {
149301764b20SIvan Malov 		efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
1494088e1721SIvan Malov 
149501764b20SIvan Malov 		if ((efx & hash_type) == hash_type)
149601764b20SIvan Malov 			rte |= rss->hf_map[i].rte;
149701764b20SIvan Malov 	}
1498088e1721SIvan Malov 
149901764b20SIvan Malov 	return rte;
1500088e1721SIvan Malov }
15014ec1fc3bSIvan Malov 
15024ec1fc3bSIvan Malov static int
sfc_rx_process_adv_conf_rss(struct sfc_adapter * sa,struct rte_eth_rss_conf * conf)15037803554aSIvan Malov sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
15047803554aSIvan Malov 			    struct rte_eth_rss_conf *conf)
15057803554aSIvan Malov {
1506e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1507d1482e21SIvan Malov 	efx_rx_hash_type_t efx_hash_types = rss->hash_types;
1508e295f175SAndrew Rybchenko 	uint64_t rss_hf = sfc_rx_hf_efx_to_rte(rss, efx_hash_types);
150901764b20SIvan Malov 	int rc;
15107803554aSIvan Malov 
1511d1482e21SIvan Malov 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
151201764b20SIvan Malov 		if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
15137803554aSIvan Malov 		    conf->rss_key != NULL)
15147803554aSIvan Malov 			return EINVAL;
15157803554aSIvan Malov 	}
15167803554aSIvan Malov 
15177803554aSIvan Malov 	if (conf->rss_hf != 0) {
151801764b20SIvan Malov 		rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
151901764b20SIvan Malov 		if (rc != 0)
152001764b20SIvan Malov 			return rc;
15217803554aSIvan Malov 	}
15227803554aSIvan Malov 
15237803554aSIvan Malov 	if (conf->rss_key != NULL) {
1524d1482e21SIvan Malov 		if (conf->rss_key_len != sizeof(rss->key)) {
15256b9a30d9SFerruh Yigit 			sfc_err(sa, "RSS key size is wrong (should be %zu)",
1526d1482e21SIvan Malov 				sizeof(rss->key));
15277803554aSIvan Malov 			return EINVAL;
15287803554aSIvan Malov 		}
1529d1482e21SIvan Malov 		rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
15307803554aSIvan Malov 	}
15317803554aSIvan Malov 
1532d1482e21SIvan Malov 	rss->hash_types = efx_hash_types;
15337803554aSIvan Malov 
15347803554aSIvan Malov 	return 0;
15357803554aSIvan Malov }
15367803554aSIvan Malov 
15377803554aSIvan Malov static int
sfc_rx_rss_config(struct sfc_adapter * sa)15384ec1fc3bSIvan Malov sfc_rx_rss_config(struct sfc_adapter *sa)
15394ec1fc3bSIvan Malov {
1540e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
15414ec1fc3bSIvan Malov 	int rc = 0;
15424ec1fc3bSIvan Malov 
1543d1482e21SIvan Malov 	if (rss->channels > 0) {
154403081632SMark Spender 		rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
154501764b20SIvan Malov 					   rss->hash_alg, rss->hash_types,
154601764b20SIvan Malov 					   B_TRUE);
15474ec1fc3bSIvan Malov 		if (rc != 0)
15484ec1fc3bSIvan Malov 			goto finish;
15494ec1fc3bSIvan Malov 
155003081632SMark Spender 		rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1551d1482e21SIvan Malov 					  rss->key, sizeof(rss->key));
15524ec1fc3bSIvan Malov 		if (rc != 0)
15534ec1fc3bSIvan Malov 			goto finish;
15544ec1fc3bSIvan Malov 
155503081632SMark Spender 		rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1556d1482e21SIvan Malov 					  rss->tbl, RTE_DIM(rss->tbl));
15574ec1fc3bSIvan Malov 	}
15584ec1fc3bSIvan Malov 
15594ec1fc3bSIvan Malov finish:
15604ec1fc3bSIvan Malov 	return rc;
15614ec1fc3bSIvan Malov }
15624ec1fc3bSIvan Malov 
156309cafbddSIgor Romanov struct sfc_rxq_info *
sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared * sas,sfc_ethdev_qid_t ethdev_qid)156409cafbddSIgor Romanov sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
156509cafbddSIgor Romanov 			   sfc_ethdev_qid_t ethdev_qid)
156609cafbddSIgor Romanov {
156709cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
156809cafbddSIgor Romanov 
156909cafbddSIgor Romanov 	SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
157009cafbddSIgor Romanov 	SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
157109cafbddSIgor Romanov 
157209cafbddSIgor Romanov 	sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
157309cafbddSIgor Romanov 	return &sas->rxq_info[sw_index];
157409cafbddSIgor Romanov }
157509cafbddSIgor Romanov 
157609cafbddSIgor Romanov struct sfc_rxq *
sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter * sa,sfc_ethdev_qid_t ethdev_qid)157709cafbddSIgor Romanov sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa, sfc_ethdev_qid_t ethdev_qid)
157809cafbddSIgor Romanov {
157909cafbddSIgor Romanov 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
158009cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
158109cafbddSIgor Romanov 
158209cafbddSIgor Romanov 	SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
158309cafbddSIgor Romanov 	SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
158409cafbddSIgor Romanov 
158509cafbddSIgor Romanov 	sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
158609cafbddSIgor Romanov 	return &sa->rxq_ctrl[sw_index];
158709cafbddSIgor Romanov }
158809cafbddSIgor Romanov 
158928944ac0SAndrew Rybchenko int
sfc_rx_start(struct sfc_adapter * sa)159028944ac0SAndrew Rybchenko sfc_rx_start(struct sfc_adapter *sa)
159128944ac0SAndrew Rybchenko {
1592dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
159309cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
159428944ac0SAndrew Rybchenko 	int rc;
159528944ac0SAndrew Rybchenko 
159609cafbddSIgor Romanov 	sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
159709cafbddSIgor Romanov 		     sas->rxq_count);
159828944ac0SAndrew Rybchenko 
159928944ac0SAndrew Rybchenko 	rc = efx_rx_init(sa->nic);
160028944ac0SAndrew Rybchenko 	if (rc != 0)
160128944ac0SAndrew Rybchenko 		goto fail_rx_init;
160228944ac0SAndrew Rybchenko 
16034ec1fc3bSIvan Malov 	rc = sfc_rx_rss_config(sa);
16044ec1fc3bSIvan Malov 	if (rc != 0)
16054ec1fc3bSIvan Malov 		goto fail_rss_config;
16064ec1fc3bSIvan Malov 
1607dda791c2SAndrew Rybchenko 	for (sw_index = 0; sw_index < sas->rxq_count; ++sw_index) {
1608dda791c2SAndrew Rybchenko 		if (sas->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED &&
1609dda791c2SAndrew Rybchenko 		    (!sas->rxq_info[sw_index].deferred_start ||
1610dda791c2SAndrew Rybchenko 		     sas->rxq_info[sw_index].deferred_started)) {
161128944ac0SAndrew Rybchenko 			rc = sfc_rx_qstart(sa, sw_index);
161228944ac0SAndrew Rybchenko 			if (rc != 0)
161328944ac0SAndrew Rybchenko 				goto fail_rx_qstart;
161428944ac0SAndrew Rybchenko 		}
1615ac7af396SAndrew Rybchenko 	}
161628944ac0SAndrew Rybchenko 
161728944ac0SAndrew Rybchenko 	return 0;
161828944ac0SAndrew Rybchenko 
161928944ac0SAndrew Rybchenko fail_rx_qstart:
162028944ac0SAndrew Rybchenko 	while (sw_index-- > 0)
162128944ac0SAndrew Rybchenko 		sfc_rx_qstop(sa, sw_index);
162228944ac0SAndrew Rybchenko 
16234ec1fc3bSIvan Malov fail_rss_config:
162428944ac0SAndrew Rybchenko 	efx_rx_fini(sa->nic);
162528944ac0SAndrew Rybchenko 
162628944ac0SAndrew Rybchenko fail_rx_init:
162728944ac0SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
162828944ac0SAndrew Rybchenko 	return rc;
162928944ac0SAndrew Rybchenko }
163028944ac0SAndrew Rybchenko 
163128944ac0SAndrew Rybchenko void
sfc_rx_stop(struct sfc_adapter * sa)163228944ac0SAndrew Rybchenko sfc_rx_stop(struct sfc_adapter *sa)
163328944ac0SAndrew Rybchenko {
1634dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
163509cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
163628944ac0SAndrew Rybchenko 
163709cafbddSIgor Romanov 	sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
163809cafbddSIgor Romanov 		     sas->rxq_count);
163928944ac0SAndrew Rybchenko 
1640dda791c2SAndrew Rybchenko 	sw_index = sas->rxq_count;
164128944ac0SAndrew Rybchenko 	while (sw_index-- > 0) {
1642dda791c2SAndrew Rybchenko 		if (sas->rxq_info[sw_index].state & SFC_RXQ_STARTED)
164328944ac0SAndrew Rybchenko 			sfc_rx_qstop(sa, sw_index);
164428944ac0SAndrew Rybchenko 	}
164528944ac0SAndrew Rybchenko 
164628944ac0SAndrew Rybchenko 	efx_rx_fini(sa->nic);
164728944ac0SAndrew Rybchenko }
164828944ac0SAndrew Rybchenko 
1649b8cf5ba5SIgor Romanov int
sfc_rx_qinit_info(struct sfc_adapter * sa,sfc_sw_index_t sw_index,unsigned int extra_efx_type_flags)1650b8cf5ba5SIgor Romanov sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
1651b8cf5ba5SIgor Romanov 		  unsigned int extra_efx_type_flags)
1652a8e64c6bSAndrew Rybchenko {
1653dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1654dda791c2SAndrew Rybchenko 	struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
1655048a0d1aSIgor Romanov 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1656a8e64c6bSAndrew Rybchenko 	unsigned int max_entries;
1657a8e64c6bSAndrew Rybchenko 
1658048a0d1aSIgor Romanov 	max_entries = encp->enc_rxq_max_ndescs;
1659a8e64c6bSAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(max_entries));
1660a8e64c6bSAndrew Rybchenko 
1661a8e64c6bSAndrew Rybchenko 	rxq_info->max_entries = max_entries;
1662b8cf5ba5SIgor Romanov 	rxq_info->type_flags = extra_efx_type_flags;
1663a8e64c6bSAndrew Rybchenko 
1664a8e64c6bSAndrew Rybchenko 	return 0;
1665a8e64c6bSAndrew Rybchenko }
1666a8e64c6bSAndrew Rybchenko 
1667976f2e5aSAndrew Rybchenko static int
sfc_rx_check_mode(struct sfc_adapter * sa,struct rte_eth_rxmode * rxmode)1668976f2e5aSAndrew Rybchenko sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1669976f2e5aSAndrew Rybchenko {
1670e295f175SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1671eacbad76SAndrew Rybchenko 	uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
1672eacbad76SAndrew Rybchenko 				      sfc_rx_get_queue_offload_caps(sa);
1673e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sas->rss;
1674976f2e5aSAndrew Rybchenko 	int rc = 0;
1675976f2e5aSAndrew Rybchenko 
1676976f2e5aSAndrew Rybchenko 	switch (rxmode->mq_mode) {
1677295968d1SFerruh Yigit 	case RTE_ETH_MQ_RX_NONE:
1678976f2e5aSAndrew Rybchenko 		/* No special checks are required */
1679976f2e5aSAndrew Rybchenko 		break;
1680295968d1SFerruh Yigit 	case RTE_ETH_MQ_RX_RSS:
1681d1482e21SIvan Malov 		if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
16824ec1fc3bSIvan Malov 			sfc_err(sa, "RSS is not available");
16834ec1fc3bSIvan Malov 			rc = EINVAL;
16844ec1fc3bSIvan Malov 		}
16854ec1fc3bSIvan Malov 		break;
1686976f2e5aSAndrew Rybchenko 	default:
1687976f2e5aSAndrew Rybchenko 		sfc_err(sa, "Rx multi-queue mode %u not supported",
1688976f2e5aSAndrew Rybchenko 			rxmode->mq_mode);
1689976f2e5aSAndrew Rybchenko 		rc = EINVAL;
1690976f2e5aSAndrew Rybchenko 	}
1691976f2e5aSAndrew Rybchenko 
16924a61f164SAndrew Rybchenko 	/*
16934a61f164SAndrew Rybchenko 	 * Requested offloads are validated against supported by ethdev,
16944a61f164SAndrew Rybchenko 	 * so unsupported offloads cannot be added as the result of
16954a61f164SAndrew Rybchenko 	 * below check.
16964a61f164SAndrew Rybchenko 	 */
1697295968d1SFerruh Yigit 	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
1698295968d1SFerruh Yigit 	    (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
1699eacbad76SAndrew Rybchenko 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
1700295968d1SFerruh Yigit 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
1701eacbad76SAndrew Rybchenko 	}
1702eacbad76SAndrew Rybchenko 
1703295968d1SFerruh Yigit 	if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
1704295968d1SFerruh Yigit 	    (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
1705eacbad76SAndrew Rybchenko 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
1706295968d1SFerruh Yigit 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1707eacbad76SAndrew Rybchenko 	}
1708eacbad76SAndrew Rybchenko 
1709976f2e5aSAndrew Rybchenko 	return rc;
1710976f2e5aSAndrew Rybchenko }
1711976f2e5aSAndrew Rybchenko 
1712a8e64c6bSAndrew Rybchenko /**
171355a53900SAndrew Rybchenko  * Destroy excess queues that are no longer needed after reconfiguration
171455a53900SAndrew Rybchenko  * or complete close.
171555a53900SAndrew Rybchenko  */
171655a53900SAndrew Rybchenko static void
sfc_rx_fini_queues(struct sfc_adapter * sa,unsigned int nb_rx_queues)171755a53900SAndrew Rybchenko sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
171855a53900SAndrew Rybchenko {
1719dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
172009cafbddSIgor Romanov 	sfc_sw_index_t sw_index;
172109cafbddSIgor Romanov 	sfc_ethdev_qid_t ethdev_qid;
172255a53900SAndrew Rybchenko 
172309cafbddSIgor Romanov 	SFC_ASSERT(nb_rx_queues <= sas->ethdev_rxq_count);
172455a53900SAndrew Rybchenko 
172509cafbddSIgor Romanov 	/*
172609cafbddSIgor Romanov 	 * Finalize only ethdev queues since other ones are finalized only
17277be78d02SJosh Soref 	 * on device close and they may require additional deinitialization.
172809cafbddSIgor Romanov 	 */
172909cafbddSIgor Romanov 	ethdev_qid = sas->ethdev_rxq_count;
173009cafbddSIgor Romanov 	while (--ethdev_qid >= (int)nb_rx_queues) {
173109cafbddSIgor Romanov 		struct sfc_rxq_info *rxq_info;
173209cafbddSIgor Romanov 
173309cafbddSIgor Romanov 		rxq_info = sfc_rxq_info_by_ethdev_qid(sas, ethdev_qid);
173409cafbddSIgor Romanov 		if (rxq_info->state & SFC_RXQ_INITIALIZED) {
173509cafbddSIgor Romanov 			sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
173609cafbddSIgor Romanov 								ethdev_qid);
173755a53900SAndrew Rybchenko 			sfc_rx_qfini(sa, sw_index);
173855a53900SAndrew Rybchenko 		}
173955a53900SAndrew Rybchenko 
174009cafbddSIgor Romanov 	}
174109cafbddSIgor Romanov 
174209cafbddSIgor Romanov 	sas->ethdev_rxq_count = nb_rx_queues;
174355a53900SAndrew Rybchenko }
174455a53900SAndrew Rybchenko 
174555a53900SAndrew Rybchenko /**
1746a8e64c6bSAndrew Rybchenko  * Initialize Rx subsystem.
1747a8e64c6bSAndrew Rybchenko  *
174855a53900SAndrew Rybchenko  * Called at device (re)configuration stage when number of receive queues is
1749a8e64c6bSAndrew Rybchenko  * specified together with other device level receive configuration.
1750a8e64c6bSAndrew Rybchenko  *
1751a8e64c6bSAndrew Rybchenko  * It should be used to allocate NUMA-unaware resources.
1752a8e64c6bSAndrew Rybchenko  */
1753a8e64c6bSAndrew Rybchenko int
sfc_rx_configure(struct sfc_adapter * sa)1754f7637d4dSAndrew Rybchenko sfc_rx_configure(struct sfc_adapter *sa)
1755a8e64c6bSAndrew Rybchenko {
1756dda791c2SAndrew Rybchenko 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1757e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sas->rss;
1758976f2e5aSAndrew Rybchenko 	struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
175955a53900SAndrew Rybchenko 	const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1760983ce116SIgor Romanov 	const unsigned int nb_rsrv_rx_queues = sfc_nb_reserved_rxq(sas);
1761983ce116SIgor Romanov 	const unsigned int nb_rxq_total = nb_rx_queues + nb_rsrv_rx_queues;
1762983ce116SIgor Romanov 	bool reconfigure;
1763a8e64c6bSAndrew Rybchenko 	int rc;
1764a8e64c6bSAndrew Rybchenko 
176555a53900SAndrew Rybchenko 	sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
176609cafbddSIgor Romanov 		     nb_rx_queues, sas->ethdev_rxq_count);
176755a53900SAndrew Rybchenko 
1768976f2e5aSAndrew Rybchenko 	rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1769976f2e5aSAndrew Rybchenko 	if (rc != 0)
1770976f2e5aSAndrew Rybchenko 		goto fail_check_mode;
1771976f2e5aSAndrew Rybchenko 
1772983ce116SIgor Romanov 	if (nb_rxq_total == sas->rxq_count) {
1773983ce116SIgor Romanov 		reconfigure = true;
17745205c436SIvan Malov 		goto configure_rss;
1775983ce116SIgor Romanov 	}
1776a8e64c6bSAndrew Rybchenko 
1777dda791c2SAndrew Rybchenko 	if (sas->rxq_info == NULL) {
1778983ce116SIgor Romanov 		reconfigure = false;
1779a8e64c6bSAndrew Rybchenko 		rc = ENOMEM;
1780983ce116SIgor Romanov 		sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rxq_total,
1781dda791c2SAndrew Rybchenko 						  sizeof(sas->rxq_info[0]), 0,
1782a8e64c6bSAndrew Rybchenko 						  sa->socket_id);
1783dda791c2SAndrew Rybchenko 		if (sas->rxq_info == NULL)
1784a8e64c6bSAndrew Rybchenko 			goto fail_rxqs_alloc;
17854e8938ddSAndrew Rybchenko 
17864e8938ddSAndrew Rybchenko 		/*
17874e8938ddSAndrew Rybchenko 		 * Allocate primary process only RxQ control from heap
17884e8938ddSAndrew Rybchenko 		 * since it should not be shared.
17894e8938ddSAndrew Rybchenko 		 */
17904e8938ddSAndrew Rybchenko 		rc = ENOMEM;
1791983ce116SIgor Romanov 		sa->rxq_ctrl = calloc(nb_rxq_total, sizeof(sa->rxq_ctrl[0]));
17924e8938ddSAndrew Rybchenko 		if (sa->rxq_ctrl == NULL)
17934e8938ddSAndrew Rybchenko 			goto fail_rxqs_ctrl_alloc;
179455a53900SAndrew Rybchenko 	} else {
179555a53900SAndrew Rybchenko 		struct sfc_rxq_info *new_rxq_info;
17964e8938ddSAndrew Rybchenko 		struct sfc_rxq *new_rxq_ctrl;
1797a8e64c6bSAndrew Rybchenko 
1798983ce116SIgor Romanov 		reconfigure = true;
1799983ce116SIgor Romanov 
18007be78d02SJosh Soref 		/* Do not uninitialize reserved queues */
180109cafbddSIgor Romanov 		if (nb_rx_queues < sas->ethdev_rxq_count)
180255a53900SAndrew Rybchenko 			sfc_rx_fini_queues(sa, nb_rx_queues);
180355a53900SAndrew Rybchenko 
180455a53900SAndrew Rybchenko 		rc = ENOMEM;
180555a53900SAndrew Rybchenko 		new_rxq_info =
1806dda791c2SAndrew Rybchenko 			rte_realloc(sas->rxq_info,
1807983ce116SIgor Romanov 				    nb_rxq_total * sizeof(sas->rxq_info[0]), 0);
1808983ce116SIgor Romanov 		if (new_rxq_info == NULL && nb_rxq_total > 0)
180955a53900SAndrew Rybchenko 			goto fail_rxqs_realloc;
181055a53900SAndrew Rybchenko 
18114e8938ddSAndrew Rybchenko 		rc = ENOMEM;
18124e8938ddSAndrew Rybchenko 		new_rxq_ctrl = realloc(sa->rxq_ctrl,
1813983ce116SIgor Romanov 				       nb_rxq_total * sizeof(sa->rxq_ctrl[0]));
1814983ce116SIgor Romanov 		if (new_rxq_ctrl == NULL && nb_rxq_total > 0)
18154e8938ddSAndrew Rybchenko 			goto fail_rxqs_ctrl_realloc;
18164e8938ddSAndrew Rybchenko 
1817dda791c2SAndrew Rybchenko 		sas->rxq_info = new_rxq_info;
18184e8938ddSAndrew Rybchenko 		sa->rxq_ctrl = new_rxq_ctrl;
1819983ce116SIgor Romanov 		if (nb_rxq_total > sas->rxq_count) {
182009cafbddSIgor Romanov 			unsigned int rxq_count = sas->rxq_count;
182109cafbddSIgor Romanov 
182209cafbddSIgor Romanov 			memset(&sas->rxq_info[rxq_count], 0,
1823983ce116SIgor Romanov 			       (nb_rxq_total - rxq_count) *
1824dda791c2SAndrew Rybchenko 			       sizeof(sas->rxq_info[0]));
182509cafbddSIgor Romanov 			memset(&sa->rxq_ctrl[rxq_count], 0,
1826983ce116SIgor Romanov 			       (nb_rxq_total - rxq_count) *
18274e8938ddSAndrew Rybchenko 			       sizeof(sa->rxq_ctrl[0]));
18284e8938ddSAndrew Rybchenko 		}
182955a53900SAndrew Rybchenko 	}
183055a53900SAndrew Rybchenko 
183109cafbddSIgor Romanov 	while (sas->ethdev_rxq_count < nb_rx_queues) {
183209cafbddSIgor Romanov 		sfc_sw_index_t sw_index;
183309cafbddSIgor Romanov 
183409cafbddSIgor Romanov 		sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
183509cafbddSIgor Romanov 							sas->ethdev_rxq_count);
1836b8cf5ba5SIgor Romanov 		rc = sfc_rx_qinit_info(sa, sw_index, 0);
1837a8e64c6bSAndrew Rybchenko 		if (rc != 0)
1838a8e64c6bSAndrew Rybchenko 			goto fail_rx_qinit_info;
183955a53900SAndrew Rybchenko 
184009cafbddSIgor Romanov 		sas->ethdev_rxq_count++;
1841a8e64c6bSAndrew Rybchenko 	}
1842a8e64c6bSAndrew Rybchenko 
1843983ce116SIgor Romanov 	sas->rxq_count = sas->ethdev_rxq_count + nb_rsrv_rx_queues;
1844983ce116SIgor Romanov 
1845983ce116SIgor Romanov 	if (!reconfigure) {
1846983ce116SIgor Romanov 		rc = sfc_mae_counter_rxq_init(sa);
1847983ce116SIgor Romanov 		if (rc != 0)
1848983ce116SIgor Romanov 			goto fail_count_rxq_init;
1849983ce116SIgor Romanov 	}
185009cafbddSIgor Romanov 
18515205c436SIvan Malov configure_rss:
1852295968d1SFerruh Yigit 	rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
185309cafbddSIgor Romanov 			 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
18544ec1fc3bSIvan Malov 
1855d1482e21SIvan Malov 	if (rss->channels > 0) {
18567803554aSIvan Malov 		struct rte_eth_rss_conf *adv_conf_rss;
185709cafbddSIgor Romanov 		sfc_sw_index_t sw_index;
1858879b01bdSIvan Malov 
18594ec1fc3bSIvan Malov 		for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1860d1482e21SIvan Malov 			rss->tbl[sw_index] = sw_index % rss->channels;
18617803554aSIvan Malov 
18627803554aSIvan Malov 		adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
18637803554aSIvan Malov 		rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
18647803554aSIvan Malov 		if (rc != 0)
18657803554aSIvan Malov 			goto fail_rx_process_adv_conf_rss;
18664ec1fc3bSIvan Malov 	}
18674ec1fc3bSIvan Malov 
1868a8e64c6bSAndrew Rybchenko 	return 0;
1869a8e64c6bSAndrew Rybchenko 
18707803554aSIvan Malov fail_rx_process_adv_conf_rss:
1871983ce116SIgor Romanov 	if (!reconfigure)
1872983ce116SIgor Romanov 		sfc_mae_counter_rxq_fini(sa);
1873983ce116SIgor Romanov 
1874983ce116SIgor Romanov fail_count_rxq_init:
1875a8e64c6bSAndrew Rybchenko fail_rx_qinit_info:
18764e8938ddSAndrew Rybchenko fail_rxqs_ctrl_realloc:
187755a53900SAndrew Rybchenko fail_rxqs_realloc:
18784e8938ddSAndrew Rybchenko fail_rxqs_ctrl_alloc:
1879a8e64c6bSAndrew Rybchenko fail_rxqs_alloc:
188055a53900SAndrew Rybchenko 	sfc_rx_close(sa);
188155a53900SAndrew Rybchenko 
1882976f2e5aSAndrew Rybchenko fail_check_mode:
1883a8e64c6bSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
1884a8e64c6bSAndrew Rybchenko 	return rc;
1885a8e64c6bSAndrew Rybchenko }
1886a8e64c6bSAndrew Rybchenko 
1887a8e64c6bSAndrew Rybchenko /**
1888a8e64c6bSAndrew Rybchenko  * Shutdown Rx subsystem.
1889a8e64c6bSAndrew Rybchenko  *
189055a53900SAndrew Rybchenko  * Called at device close stage, for example, before device shutdown.
1891a8e64c6bSAndrew Rybchenko  */
1892a8e64c6bSAndrew Rybchenko void
sfc_rx_close(struct sfc_adapter * sa)1893f7637d4dSAndrew Rybchenko sfc_rx_close(struct sfc_adapter *sa)
1894a8e64c6bSAndrew Rybchenko {
1895e295f175SAndrew Rybchenko 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1896d1482e21SIvan Malov 
189755a53900SAndrew Rybchenko 	sfc_rx_fini_queues(sa, 0);
1898983ce116SIgor Romanov 	sfc_mae_counter_rxq_fini(sa);
1899ce35b05cSAndrew Rybchenko 
1900d1482e21SIvan Malov 	rss->channels = 0;
19012617ec3fSAndrew Rybchenko 
19024e8938ddSAndrew Rybchenko 	free(sa->rxq_ctrl);
19034e8938ddSAndrew Rybchenko 	sa->rxq_ctrl = NULL;
19044e8938ddSAndrew Rybchenko 
1905dda791c2SAndrew Rybchenko 	rte_free(sfc_sa2shared(sa)->rxq_info);
1906dda791c2SAndrew Rybchenko 	sfc_sa2shared(sa)->rxq_info = NULL;
1907a8e64c6bSAndrew Rybchenko }
1908