xref: /dpdk/drivers/net/sfc/sfc_rx.c (revision 30a1de105a5f40d77b344a891c4a68f79e815c43)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_mempool.h>
11 
12 #include "efx.h"
13 
14 #include "sfc.h"
15 #include "sfc_debug.h"
16 #include "sfc_flow_tunnel.h"
17 #include "sfc_log.h"
18 #include "sfc_ev.h"
19 #include "sfc_rx.h"
20 #include "sfc_mae_counter.h"
21 #include "sfc_kvargs.h"
22 #include "sfc_tweak.h"
23 
24 /*
25  * Maximum number of Rx queue flush attempt in the case of failure or
26  * flush timeout
27  */
28 #define SFC_RX_QFLUSH_ATTEMPTS		(3)
29 
30 /*
31  * Time to wait between event queue polling attempts when waiting for Rx
32  * queue flush done or failed events.
33  */
34 #define SFC_RX_QFLUSH_POLL_WAIT_MS	(1)
35 
36 /*
37  * Maximum number of event queue polling attempts when waiting for Rx queue
38  * flush done or failed events. It defines Rx queue flush attempt timeout
39  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
40  */
41 #define SFC_RX_QFLUSH_POLL_ATTEMPTS	(2000)
42 
43 void
44 sfc_rx_qflush_done(struct sfc_rxq_info *rxq_info)
45 {
46 	rxq_info->state |= SFC_RXQ_FLUSHED;
47 	rxq_info->state &= ~SFC_RXQ_FLUSHING;
48 }
49 
50 void
51 sfc_rx_qflush_failed(struct sfc_rxq_info *rxq_info)
52 {
53 	rxq_info->state |= SFC_RXQ_FLUSH_FAILED;
54 	rxq_info->state &= ~SFC_RXQ_FLUSHING;
55 }
56 
57 /* This returns the running counter, which is not bounded by ring size */
58 unsigned int
59 sfc_rx_get_pushed(struct sfc_adapter *sa, struct sfc_dp_rxq *dp_rxq)
60 {
61 	SFC_ASSERT(sa->priv.dp_rx->get_pushed != NULL);
62 
63 	return sa->priv.dp_rx->get_pushed(dp_rxq);
64 }
65 
66 static int
67 sfc_efx_rx_qprime(struct sfc_efx_rxq *rxq)
68 {
69 	int rc = 0;
70 
71 	if (rxq->evq->read_ptr_primed != rxq->evq->read_ptr) {
72 		rc = efx_ev_qprime(rxq->evq->common, rxq->evq->read_ptr);
73 		if (rc == 0)
74 			rxq->evq->read_ptr_primed = rxq->evq->read_ptr;
75 	}
76 	return rc;
77 }
78 
79 static void
80 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
81 {
82 	unsigned int free_space;
83 	unsigned int bulks;
84 	void *objs[SFC_RX_REFILL_BULK];
85 	efsys_dma_addr_t addr[RTE_DIM(objs)];
86 	unsigned int added = rxq->added;
87 	unsigned int id;
88 	unsigned int i;
89 	struct sfc_efx_rx_sw_desc *rxd;
90 	struct rte_mbuf *m;
91 	uint16_t port_id = rxq->dp.dpq.port_id;
92 
93 	free_space = rxq->max_fill_level - (added - rxq->completed);
94 
95 	if (free_space < rxq->refill_threshold)
96 		return;
97 
98 	bulks = free_space / RTE_DIM(objs);
99 	/* refill_threshold guarantees that bulks is positive */
100 	SFC_ASSERT(bulks > 0);
101 
102 	id = added & rxq->ptr_mask;
103 	do {
104 		if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
105 						  RTE_DIM(objs)) < 0)) {
106 			/*
107 			 * It is hardly a safe way to increment counter
108 			 * from different contexts, but all PMDs do it.
109 			 */
110 			rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
111 				RTE_DIM(objs);
112 			/* Return if we have posted nothing yet */
113 			if (added == rxq->added)
114 				return;
115 			/* Push posted */
116 			break;
117 		}
118 
119 		for (i = 0; i < RTE_DIM(objs);
120 		     ++i, id = (id + 1) & rxq->ptr_mask) {
121 			m = objs[i];
122 
123 			__rte_mbuf_raw_sanity_check(m);
124 
125 			rxd = &rxq->sw_desc[id];
126 			rxd->mbuf = m;
127 
128 			m->data_off = RTE_PKTMBUF_HEADROOM;
129 			m->port = port_id;
130 
131 			addr[i] = rte_pktmbuf_iova(m);
132 		}
133 
134 		efx_rx_qpost(rxq->common, addr, rxq->buf_size,
135 			     RTE_DIM(objs), rxq->completed, added);
136 		added += RTE_DIM(objs);
137 	} while (--bulks > 0);
138 
139 	SFC_ASSERT(added != rxq->added);
140 	rxq->added = added;
141 	efx_rx_qpush(rxq->common, added, &rxq->pushed);
142 	rxq->dp.dpq.dbells++;
143 }
144 
145 static uint64_t
146 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
147 {
148 	uint64_t mbuf_flags = 0;
149 
150 	switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
151 	case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
152 		mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_GOOD;
153 		break;
154 	case EFX_PKT_IPV4:
155 		mbuf_flags |= RTE_MBUF_F_RX_IP_CKSUM_BAD;
156 		break;
157 	default:
158 		RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN != 0);
159 		SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_IP_CKSUM_MASK) ==
160 			   RTE_MBUF_F_RX_IP_CKSUM_UNKNOWN);
161 		break;
162 	}
163 
164 	switch ((desc_flags &
165 		 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
166 	case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
167 	case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
168 		mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_GOOD;
169 		break;
170 	case EFX_PKT_TCP:
171 	case EFX_PKT_UDP:
172 		mbuf_flags |= RTE_MBUF_F_RX_L4_CKSUM_BAD;
173 		break;
174 	default:
175 		RTE_BUILD_BUG_ON(RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN != 0);
176 		SFC_ASSERT((mbuf_flags & RTE_MBUF_F_RX_L4_CKSUM_MASK) ==
177 			   RTE_MBUF_F_RX_L4_CKSUM_UNKNOWN);
178 		break;
179 	}
180 
181 	return mbuf_flags;
182 }
183 
184 static uint32_t
185 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
186 {
187 	return RTE_PTYPE_L2_ETHER |
188 		((desc_flags & EFX_PKT_IPV4) ?
189 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
190 		((desc_flags & EFX_PKT_IPV6) ?
191 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
192 		((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
193 		((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
194 }
195 
196 static const uint32_t *
197 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
198 {
199 	static const uint32_t ptypes[] = {
200 		RTE_PTYPE_L2_ETHER,
201 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
202 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
203 		RTE_PTYPE_L4_TCP,
204 		RTE_PTYPE_L4_UDP,
205 		RTE_PTYPE_UNKNOWN
206 	};
207 
208 	return ptypes;
209 }
210 
211 static void
212 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
213 			struct rte_mbuf *m)
214 {
215 	uint8_t *mbuf_data;
216 
217 
218 	if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
219 		return;
220 
221 	mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
222 
223 	if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
224 		m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
225 						      EFX_RX_HASHALG_TOEPLITZ,
226 						      mbuf_data);
227 
228 		m->ol_flags |= RTE_MBUF_F_RX_RSS_HASH;
229 	}
230 }
231 
232 static uint16_t
233 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
234 {
235 	struct sfc_dp_rxq *dp_rxq = rx_queue;
236 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
237 	unsigned int completed;
238 	unsigned int prefix_size = rxq->prefix_size;
239 	unsigned int done_pkts = 0;
240 	boolean_t discard_next = B_FALSE;
241 	struct rte_mbuf *scatter_pkt = NULL;
242 
243 	if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
244 		return 0;
245 
246 	sfc_ev_qpoll(rxq->evq);
247 
248 	completed = rxq->completed;
249 	while (completed != rxq->pending && done_pkts < nb_pkts) {
250 		unsigned int id;
251 		struct sfc_efx_rx_sw_desc *rxd;
252 		struct rte_mbuf *m;
253 		unsigned int seg_len;
254 		unsigned int desc_flags;
255 
256 		id = completed++ & rxq->ptr_mask;
257 		rxd = &rxq->sw_desc[id];
258 		m = rxd->mbuf;
259 		desc_flags = rxd->flags;
260 
261 		if (discard_next)
262 			goto discard;
263 
264 		if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
265 			goto discard;
266 
267 		if (desc_flags & EFX_PKT_PREFIX_LEN) {
268 			uint16_t tmp_size;
269 			int rc __rte_unused;
270 
271 			rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
272 				rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
273 			SFC_ASSERT(rc == 0);
274 			seg_len = tmp_size;
275 		} else {
276 			seg_len = rxd->size - prefix_size;
277 		}
278 
279 		rte_pktmbuf_data_len(m) = seg_len;
280 		rte_pktmbuf_pkt_len(m) = seg_len;
281 
282 		if (scatter_pkt != NULL) {
283 			if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
284 				rte_pktmbuf_free(scatter_pkt);
285 				goto discard;
286 			}
287 			/* The packet to deliver */
288 			m = scatter_pkt;
289 		}
290 
291 		if (desc_flags & EFX_PKT_CONT) {
292 			/* The packet is scattered, more fragments to come */
293 			scatter_pkt = m;
294 			/* Further fragments have no prefix */
295 			prefix_size = 0;
296 			continue;
297 		}
298 
299 		/* Scattered packet is done */
300 		scatter_pkt = NULL;
301 		/* The first fragment of the packet has prefix */
302 		prefix_size = rxq->prefix_size;
303 
304 		m->ol_flags =
305 			sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
306 		m->packet_type =
307 			sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
308 
309 		/*
310 		 * Extract RSS hash from the packet prefix and
311 		 * set the corresponding field (if needed and possible)
312 		 */
313 		sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
314 
315 		m->data_off += prefix_size;
316 
317 		*rx_pkts++ = m;
318 		done_pkts++;
319 		continue;
320 
321 discard:
322 		discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
323 		rte_mbuf_raw_free(m);
324 		rxd->mbuf = NULL;
325 	}
326 
327 	/* pending is only moved when entire packet is received */
328 	SFC_ASSERT(scatter_pkt == NULL);
329 
330 	rxq->completed = completed;
331 
332 	sfc_efx_rx_qrefill(rxq);
333 
334 	if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN)
335 		sfc_efx_rx_qprime(rxq);
336 
337 	return done_pkts;
338 }
339 
340 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
341 static unsigned int
342 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
343 {
344 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
345 
346 	if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
347 		return 0;
348 
349 	sfc_ev_qpoll(rxq->evq);
350 
351 	return rxq->pending - rxq->completed;
352 }
353 
354 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
355 static int
356 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
357 {
358 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
359 
360 	if (unlikely(offset > rxq->ptr_mask))
361 		return -EINVAL;
362 
363 	/*
364 	 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
365 	 * it is required for the queue to be running, but the
366 	 * check is omitted because API design assumes that it
367 	 * is the duty of the caller to satisfy all conditions
368 	 */
369 	SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
370 		   SFC_EFX_RXQ_FLAG_RUNNING);
371 	sfc_ev_qpoll(rxq->evq);
372 
373 	/*
374 	 * There is a handful of reserved entries in the ring,
375 	 * but an explicit check whether the offset points to
376 	 * a reserved entry is neglected since the two checks
377 	 * below rely on the figures which take the HW limits
378 	 * into account and thus if an entry is reserved, the
379 	 * checks will fail and UNAVAIL code will be returned
380 	 */
381 
382 	if (offset < (rxq->pending - rxq->completed))
383 		return RTE_ETH_RX_DESC_DONE;
384 
385 	if (offset < (rxq->added - rxq->completed))
386 		return RTE_ETH_RX_DESC_AVAIL;
387 
388 	return RTE_ETH_RX_DESC_UNAVAIL;
389 }
390 
391 boolean_t
392 sfc_rx_check_scatter(size_t pdu, size_t rx_buf_size, uint32_t rx_prefix_size,
393 		     boolean_t rx_scatter_enabled, uint32_t rx_scatter_max,
394 		     const char **error)
395 {
396 	uint32_t effective_rx_scatter_max;
397 	uint32_t rx_scatter_bufs;
398 
399 	effective_rx_scatter_max = rx_scatter_enabled ? rx_scatter_max : 1;
400 	rx_scatter_bufs = EFX_DIV_ROUND_UP(pdu + rx_prefix_size, rx_buf_size);
401 
402 	if (rx_scatter_bufs > effective_rx_scatter_max) {
403 		if (rx_scatter_enabled)
404 			*error = "Possible number of Rx scatter buffers exceeds maximum number";
405 		else
406 			*error = "Rx scatter is disabled and RxQ mbuf pool object size is too small";
407 		return B_FALSE;
408 	}
409 
410 	return B_TRUE;
411 }
412 
413 /** Get Rx datapath ops by the datapath RxQ handle */
414 const struct sfc_dp_rx *
415 sfc_dp_rx_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
416 {
417 	const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
418 	struct rte_eth_dev *eth_dev;
419 	struct sfc_adapter_priv *sap;
420 
421 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
422 	eth_dev = &rte_eth_devices[dpq->port_id];
423 
424 	sap = sfc_adapter_priv_by_eth_dev(eth_dev);
425 
426 	return sap->dp_rx;
427 }
428 
429 struct sfc_rxq_info *
430 sfc_rxq_info_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
431 {
432 	const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
433 	struct rte_eth_dev *eth_dev;
434 	struct sfc_adapter_shared *sas;
435 
436 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
437 	eth_dev = &rte_eth_devices[dpq->port_id];
438 
439 	sas = sfc_adapter_shared_by_eth_dev(eth_dev);
440 
441 	SFC_ASSERT(dpq->queue_id < sas->rxq_count);
442 	return &sas->rxq_info[dpq->queue_id];
443 }
444 
445 struct sfc_rxq *
446 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
447 {
448 	const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
449 	struct rte_eth_dev *eth_dev;
450 	struct sfc_adapter *sa;
451 
452 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
453 	eth_dev = &rte_eth_devices[dpq->port_id];
454 
455 	sa = sfc_adapter_by_eth_dev(eth_dev);
456 
457 	SFC_ASSERT(dpq->queue_id < sfc_sa2shared(sa)->rxq_count);
458 	return &sa->rxq_ctrl[dpq->queue_id];
459 }
460 
461 static sfc_dp_rx_qsize_up_rings_t sfc_efx_rx_qsize_up_rings;
462 static int
463 sfc_efx_rx_qsize_up_rings(uint16_t nb_rx_desc,
464 			  __rte_unused struct sfc_dp_rx_hw_limits *limits,
465 			  __rte_unused struct rte_mempool *mb_pool,
466 			  unsigned int *rxq_entries,
467 			  unsigned int *evq_entries,
468 			  unsigned int *rxq_max_fill_level)
469 {
470 	*rxq_entries = nb_rx_desc;
471 	*evq_entries = nb_rx_desc;
472 	*rxq_max_fill_level = EFX_RXQ_LIMIT(*rxq_entries);
473 	return 0;
474 }
475 
476 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
477 static int
478 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
479 		   const struct rte_pci_addr *pci_addr, int socket_id,
480 		   const struct sfc_dp_rx_qcreate_info *info,
481 		   struct sfc_dp_rxq **dp_rxqp)
482 {
483 	struct sfc_efx_rxq *rxq;
484 	int rc;
485 
486 	rc = ENOTSUP;
487 	if (info->nic_dma_info->nb_regions > 0)
488 		goto fail_nic_dma;
489 
490 	rc = ENOMEM;
491 	rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
492 				 RTE_CACHE_LINE_SIZE, socket_id);
493 	if (rxq == NULL)
494 		goto fail_rxq_alloc;
495 
496 	sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
497 
498 	rc = ENOMEM;
499 	rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
500 					 info->rxq_entries,
501 					 sizeof(*rxq->sw_desc),
502 					 RTE_CACHE_LINE_SIZE, socket_id);
503 	if (rxq->sw_desc == NULL)
504 		goto fail_desc_alloc;
505 
506 	/* efx datapath is bound to efx control path */
507 	rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
508 	if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
509 		rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
510 	rxq->ptr_mask = info->rxq_entries - 1;
511 	rxq->batch_max = info->batch_max;
512 	rxq->prefix_size = info->prefix_size;
513 	rxq->max_fill_level = info->max_fill_level;
514 	rxq->refill_threshold = info->refill_threshold;
515 	rxq->buf_size = info->buf_size;
516 	rxq->refill_mb_pool = info->refill_mb_pool;
517 
518 	*dp_rxqp = &rxq->dp;
519 	return 0;
520 
521 fail_desc_alloc:
522 	rte_free(rxq);
523 
524 fail_rxq_alloc:
525 fail_nic_dma:
526 	return rc;
527 }
528 
529 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
530 static void
531 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
532 {
533 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
534 
535 	rte_free(rxq->sw_desc);
536 	rte_free(rxq);
537 }
538 
539 
540 /* Use qstop and qstart functions in the case of qstart failure */
541 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
542 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
543 
544 
545 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
546 static int
547 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
548 		  __rte_unused unsigned int evq_read_ptr,
549 		  const efx_rx_prefix_layout_t *pinfo)
550 {
551 	/* libefx-based datapath is specific to libefx-based PMD */
552 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
553 	struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
554 	int rc;
555 
556 	/*
557 	 * libefx API is used to extract information from Rx prefix and
558 	 * it guarantees consistency. Just do length check to ensure
559 	 * that we reserved space in Rx buffers correctly.
560 	 */
561 	if (rxq->prefix_size != pinfo->erpl_length)
562 		return ENOTSUP;
563 
564 	rxq->common = crxq->common;
565 
566 	rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
567 
568 	sfc_efx_rx_qrefill(rxq);
569 
570 	rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
571 
572 	if (rxq->flags & SFC_EFX_RXQ_FLAG_INTR_EN) {
573 		rc = sfc_efx_rx_qprime(rxq);
574 		if (rc != 0)
575 			goto fail_rx_qprime;
576 	}
577 
578 	return 0;
579 
580 fail_rx_qprime:
581 	sfc_efx_rx_qstop(dp_rxq, NULL);
582 	sfc_efx_rx_qpurge(dp_rxq);
583 	return rc;
584 }
585 
586 static void
587 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
588 		 __rte_unused unsigned int *evq_read_ptr)
589 {
590 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
591 
592 	rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
593 
594 	/* libefx-based datapath is bound to libefx-based PMD and uses
595 	 * event queue structure directly. So, there is no necessity to
596 	 * return EvQ read pointer.
597 	 */
598 }
599 
600 static void
601 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
602 {
603 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
604 	unsigned int i;
605 	struct sfc_efx_rx_sw_desc *rxd;
606 
607 	for (i = rxq->completed; i != rxq->added; ++i) {
608 		rxd = &rxq->sw_desc[i & rxq->ptr_mask];
609 		rte_mbuf_raw_free(rxd->mbuf);
610 		rxd->mbuf = NULL;
611 		/* Packed stream relies on 0 in inactive SW desc.
612 		 * Rx queue stop is not performance critical, so
613 		 * there is no harm to do it always.
614 		 */
615 		rxd->flags = 0;
616 		rxd->size = 0;
617 	}
618 
619 	rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
620 }
621 
622 static sfc_dp_rx_intr_enable_t sfc_efx_rx_intr_enable;
623 static int
624 sfc_efx_rx_intr_enable(struct sfc_dp_rxq *dp_rxq)
625 {
626 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
627 	int rc = 0;
628 
629 	rxq->flags |= SFC_EFX_RXQ_FLAG_INTR_EN;
630 	if (rxq->flags & SFC_EFX_RXQ_FLAG_STARTED) {
631 		rc = sfc_efx_rx_qprime(rxq);
632 		if (rc != 0)
633 			rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
634 	}
635 	return rc;
636 }
637 
638 static sfc_dp_rx_intr_disable_t sfc_efx_rx_intr_disable;
639 static int
640 sfc_efx_rx_intr_disable(struct sfc_dp_rxq *dp_rxq)
641 {
642 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
643 
644 	/* Cannot disarm, just disable rearm */
645 	rxq->flags &= ~SFC_EFX_RXQ_FLAG_INTR_EN;
646 	return 0;
647 }
648 
649 struct sfc_dp_rx sfc_efx_rx = {
650 	.dp = {
651 		.name		= SFC_KVARG_DATAPATH_EFX,
652 		.type		= SFC_DP_RX,
653 		.hw_fw_caps	= SFC_DP_HW_FW_CAP_RX_EFX,
654 	},
655 	.features		= SFC_DP_RX_FEAT_INTR,
656 	.dev_offload_capa	= RTE_ETH_RX_OFFLOAD_CHECKSUM |
657 				  RTE_ETH_RX_OFFLOAD_RSS_HASH,
658 	.queue_offload_capa	= RTE_ETH_RX_OFFLOAD_SCATTER,
659 	.qsize_up_rings		= sfc_efx_rx_qsize_up_rings,
660 	.qcreate		= sfc_efx_rx_qcreate,
661 	.qdestroy		= sfc_efx_rx_qdestroy,
662 	.qstart			= sfc_efx_rx_qstart,
663 	.qstop			= sfc_efx_rx_qstop,
664 	.qpurge			= sfc_efx_rx_qpurge,
665 	.supported_ptypes_get	= sfc_efx_supported_ptypes_get,
666 	.qdesc_npending		= sfc_efx_rx_qdesc_npending,
667 	.qdesc_status		= sfc_efx_rx_qdesc_status,
668 	.intr_enable		= sfc_efx_rx_intr_enable,
669 	.intr_disable		= sfc_efx_rx_intr_disable,
670 	.pkt_burst		= sfc_efx_recv_pkts,
671 };
672 
673 static void
674 sfc_rx_qflush(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
675 {
676 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
677 	sfc_ethdev_qid_t ethdev_qid;
678 	struct sfc_rxq_info *rxq_info;
679 	struct sfc_rxq *rxq;
680 	unsigned int retry_count;
681 	unsigned int wait_count;
682 	int rc;
683 
684 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
685 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
686 	SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
687 
688 	rxq = &sa->rxq_ctrl[sw_index];
689 
690 	/*
691 	 * Retry Rx queue flushing in the case of flush failed or
692 	 * timeout. In the worst case it can delay for 6 seconds.
693 	 */
694 	for (retry_count = 0;
695 	     ((rxq_info->state & SFC_RXQ_FLUSHED) == 0) &&
696 	     (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
697 	     ++retry_count) {
698 		rc = efx_rx_qflush(rxq->common);
699 		if (rc != 0) {
700 			rxq_info->state |= (rc == EALREADY) ?
701 				SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
702 			break;
703 		}
704 		rxq_info->state &= ~SFC_RXQ_FLUSH_FAILED;
705 		rxq_info->state |= SFC_RXQ_FLUSHING;
706 
707 		/*
708 		 * Wait for Rx queue flush done or failed event at least
709 		 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
710 		 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
711 		 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
712 		 */
713 		wait_count = 0;
714 		do {
715 			rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
716 			sfc_ev_qpoll(rxq->evq);
717 		} while ((rxq_info->state & SFC_RXQ_FLUSHING) &&
718 			 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
719 
720 		if (rxq_info->state & SFC_RXQ_FLUSHING)
721 			sfc_err(sa, "RxQ %d (internal %u) flush timed out",
722 				ethdev_qid, sw_index);
723 
724 		if (rxq_info->state & SFC_RXQ_FLUSH_FAILED)
725 			sfc_err(sa, "RxQ %d (internal %u) flush failed",
726 				ethdev_qid, sw_index);
727 
728 		if (rxq_info->state & SFC_RXQ_FLUSHED)
729 			sfc_notice(sa, "RxQ %d (internal %u) flushed",
730 				   ethdev_qid, sw_index);
731 	}
732 
733 	sa->priv.dp_rx->qpurge(rxq_info->dp);
734 }
735 
736 static int
737 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
738 {
739 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
740 	boolean_t need_rss = (rss->channels > 0) ? B_TRUE : B_FALSE;
741 	struct sfc_port *port = &sa->port;
742 	int rc;
743 
744 	/*
745 	 * If promiscuous or all-multicast mode has been requested, setting
746 	 * filter for the default Rx queue might fail, in particular, while
747 	 * running over PCI function which is not a member of corresponding
748 	 * privilege groups; if this occurs, few iterations will be made to
749 	 * repeat this step without promiscuous and all-multicast flags set
750 	 */
751 retry:
752 	rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, need_rss);
753 	if (rc == 0)
754 		return 0;
755 	else if (rc != EOPNOTSUPP)
756 		return rc;
757 
758 	if (port->promisc) {
759 		sfc_warn(sa, "promiscuous mode has been requested, "
760 			     "but the HW rejects it");
761 		sfc_warn(sa, "promiscuous mode will be disabled");
762 
763 		port->promisc = B_FALSE;
764 		sa->eth_dev->data->promiscuous = 0;
765 		rc = sfc_set_rx_mode_unchecked(sa);
766 		if (rc != 0)
767 			return rc;
768 
769 		goto retry;
770 	}
771 
772 	if (port->allmulti) {
773 		sfc_warn(sa, "all-multicast mode has been requested, "
774 			     "but the HW rejects it");
775 		sfc_warn(sa, "all-multicast mode will be disabled");
776 
777 		port->allmulti = B_FALSE;
778 		sa->eth_dev->data->all_multicast = 0;
779 		rc = sfc_set_rx_mode_unchecked(sa);
780 		if (rc != 0)
781 			return rc;
782 
783 		goto retry;
784 	}
785 
786 	return rc;
787 }
788 
789 int
790 sfc_rx_qstart(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
791 {
792 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
793 	sfc_ethdev_qid_t ethdev_qid;
794 	struct sfc_rxq_info *rxq_info;
795 	struct sfc_rxq *rxq;
796 	struct sfc_evq *evq;
797 	efx_rx_prefix_layout_t pinfo;
798 	int rc;
799 
800 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
801 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
802 
803 	sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
804 
805 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
806 	SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
807 
808 	rxq = &sa->rxq_ctrl[sw_index];
809 	evq = rxq->evq;
810 
811 	rc = sfc_ev_qstart(evq, sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index));
812 	if (rc != 0)
813 		goto fail_ev_qstart;
814 
815 	switch (rxq_info->type) {
816 	case EFX_RXQ_TYPE_DEFAULT:
817 		rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
818 			rxq->buf_size,
819 			&rxq->mem, rxq_info->entries, 0 /* not used on EF10 */,
820 			rxq_info->type_flags, evq->common, &rxq->common);
821 		break;
822 	case EFX_RXQ_TYPE_ES_SUPER_BUFFER: {
823 		struct rte_mempool *mp = rxq_info->refill_mb_pool;
824 		struct rte_mempool_info mp_info;
825 
826 		rc = rte_mempool_ops_get_info(mp, &mp_info);
827 		if (rc != 0) {
828 			/* Positive errno is used in the driver */
829 			rc = -rc;
830 			goto fail_mp_get_info;
831 		}
832 		if (mp_info.contig_block_size <= 0) {
833 			rc = EINVAL;
834 			goto fail_bad_contig_block_size;
835 		}
836 		rc = efx_rx_qcreate_es_super_buffer(sa->nic, rxq->hw_index, 0,
837 			mp_info.contig_block_size, rxq->buf_size,
838 			mp->header_size + mp->elt_size + mp->trailer_size,
839 			sa->rxd_wait_timeout_ns,
840 			&rxq->mem, rxq_info->entries, rxq_info->type_flags,
841 			evq->common, &rxq->common);
842 		break;
843 	}
844 	default:
845 		rc = ENOTSUP;
846 	}
847 	if (rc != 0)
848 		goto fail_rx_qcreate;
849 
850 	rc = efx_rx_prefix_get_layout(rxq->common, &pinfo);
851 	if (rc != 0)
852 		goto fail_prefix_get_layout;
853 
854 	efx_rx_qenable(rxq->common);
855 
856 	rc = sa->priv.dp_rx->qstart(rxq_info->dp, evq->read_ptr, &pinfo);
857 	if (rc != 0)
858 		goto fail_dp_qstart;
859 
860 	rxq_info->state |= SFC_RXQ_STARTED;
861 
862 	if (ethdev_qid == 0 && !sfc_sa2shared(sa)->isolated) {
863 		rc = sfc_rx_default_rxq_set_filter(sa, rxq);
864 		if (rc != 0)
865 			goto fail_mac_filter_default_rxq_set;
866 	}
867 
868 	/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
869 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
870 		sa->eth_dev->data->rx_queue_state[ethdev_qid] =
871 			RTE_ETH_QUEUE_STATE_STARTED;
872 
873 	return 0;
874 
875 fail_mac_filter_default_rxq_set:
876 	sfc_rx_qflush(sa, sw_index);
877 	sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
878 	rxq_info->state = SFC_RXQ_INITIALIZED;
879 
880 fail_dp_qstart:
881 	efx_rx_qdestroy(rxq->common);
882 
883 fail_prefix_get_layout:
884 fail_rx_qcreate:
885 fail_bad_contig_block_size:
886 fail_mp_get_info:
887 	sfc_ev_qstop(evq);
888 
889 fail_ev_qstart:
890 	return rc;
891 }
892 
893 void
894 sfc_rx_qstop(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
895 {
896 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
897 	sfc_ethdev_qid_t ethdev_qid;
898 	struct sfc_rxq_info *rxq_info;
899 	struct sfc_rxq *rxq;
900 
901 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
902 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
903 
904 	sfc_log_init(sa, "RxQ %d (internal %u)", ethdev_qid, sw_index);
905 
906 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
907 
908 	if (rxq_info->state == SFC_RXQ_INITIALIZED)
909 		return;
910 	SFC_ASSERT(rxq_info->state & SFC_RXQ_STARTED);
911 
912 	/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
913 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
914 		sa->eth_dev->data->rx_queue_state[ethdev_qid] =
915 			RTE_ETH_QUEUE_STATE_STOPPED;
916 
917 	rxq = &sa->rxq_ctrl[sw_index];
918 	sa->priv.dp_rx->qstop(rxq_info->dp, &rxq->evq->read_ptr);
919 
920 	if (ethdev_qid == 0)
921 		efx_mac_filter_default_rxq_clear(sa->nic);
922 
923 	sfc_rx_qflush(sa, sw_index);
924 
925 	rxq_info->state = SFC_RXQ_INITIALIZED;
926 
927 	efx_rx_qdestroy(rxq->common);
928 
929 	sfc_ev_qstop(rxq->evq);
930 }
931 
932 static uint64_t
933 sfc_rx_get_offload_mask(struct sfc_adapter *sa)
934 {
935 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
936 	uint64_t no_caps = 0;
937 
938 	if (encp->enc_tunnel_encapsulations_supported == 0)
939 		no_caps |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
940 
941 	return ~no_caps;
942 }
943 
944 uint64_t
945 sfc_rx_get_dev_offload_caps(struct sfc_adapter *sa)
946 {
947 	uint64_t caps = sa->priv.dp_rx->dev_offload_capa;
948 
949 	return caps & sfc_rx_get_offload_mask(sa);
950 }
951 
952 uint64_t
953 sfc_rx_get_queue_offload_caps(struct sfc_adapter *sa)
954 {
955 	return sa->priv.dp_rx->queue_offload_capa & sfc_rx_get_offload_mask(sa);
956 }
957 
958 static int
959 sfc_rx_qcheck_conf(struct sfc_adapter *sa, unsigned int rxq_max_fill_level,
960 		   const struct rte_eth_rxconf *rx_conf,
961 		   __rte_unused uint64_t offloads)
962 {
963 	int rc = 0;
964 
965 	if (rx_conf->rx_thresh.pthresh != 0 ||
966 	    rx_conf->rx_thresh.hthresh != 0 ||
967 	    rx_conf->rx_thresh.wthresh != 0) {
968 		sfc_warn(sa,
969 			"RxQ prefetch/host/writeback thresholds are not supported");
970 	}
971 
972 	if (rx_conf->rx_free_thresh > rxq_max_fill_level) {
973 		sfc_err(sa,
974 			"RxQ free threshold too large: %u vs maximum %u",
975 			rx_conf->rx_free_thresh, rxq_max_fill_level);
976 		rc = EINVAL;
977 	}
978 
979 	if (rx_conf->rx_drop_en == 0) {
980 		sfc_err(sa, "RxQ drop disable is not supported");
981 		rc = EINVAL;
982 	}
983 
984 	return rc;
985 }
986 
987 static unsigned int
988 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
989 {
990 	uint32_t data_off;
991 	uint32_t order;
992 
993 	/* The mbuf object itself is always cache line aligned */
994 	order = rte_bsf32(RTE_CACHE_LINE_SIZE);
995 
996 	/* Data offset from mbuf object start */
997 	data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
998 		RTE_PKTMBUF_HEADROOM;
999 
1000 	order = MIN(order, rte_bsf32(data_off));
1001 
1002 	return 1u << order;
1003 }
1004 
1005 static uint16_t
1006 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
1007 {
1008 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1009 	const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
1010 	const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
1011 	uint16_t buf_size;
1012 	unsigned int buf_aligned;
1013 	unsigned int start_alignment;
1014 	unsigned int end_padding_alignment;
1015 
1016 	/* Below it is assumed that both alignments are power of 2 */
1017 	SFC_ASSERT(rte_is_power_of_2(nic_align_start));
1018 	SFC_ASSERT(rte_is_power_of_2(nic_align_end));
1019 
1020 	/*
1021 	 * mbuf is always cache line aligned, double-check
1022 	 * that it meets rx buffer start alignment requirements.
1023 	 */
1024 
1025 	/* Start from mbuf pool data room size */
1026 	buf_size = rte_pktmbuf_data_room_size(mb_pool);
1027 
1028 	/* Remove headroom */
1029 	if (buf_size <= RTE_PKTMBUF_HEADROOM) {
1030 		sfc_err(sa,
1031 			"RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
1032 			mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
1033 		return 0;
1034 	}
1035 	buf_size -= RTE_PKTMBUF_HEADROOM;
1036 
1037 	/* Calculate guaranteed data start alignment */
1038 	buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
1039 
1040 	/* Reserve space for start alignment */
1041 	if (buf_aligned < nic_align_start) {
1042 		start_alignment = nic_align_start - buf_aligned;
1043 		if (buf_size <= start_alignment) {
1044 			sfc_err(sa,
1045 				"RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
1046 				mb_pool->name,
1047 				rte_pktmbuf_data_room_size(mb_pool),
1048 				RTE_PKTMBUF_HEADROOM, start_alignment);
1049 			return 0;
1050 		}
1051 		buf_aligned = nic_align_start;
1052 		buf_size -= start_alignment;
1053 	} else {
1054 		start_alignment = 0;
1055 	}
1056 
1057 	/* Make sure that end padding does not write beyond the buffer */
1058 	if (buf_aligned < nic_align_end) {
1059 		/*
1060 		 * Estimate space which can be lost. If guaranteed buffer
1061 		 * size is odd, lost space is (nic_align_end - 1). More
1062 		 * accurate formula is below.
1063 		 */
1064 		end_padding_alignment = nic_align_end -
1065 			MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
1066 		if (buf_size <= end_padding_alignment) {
1067 			sfc_err(sa,
1068 				"RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
1069 				mb_pool->name,
1070 				rte_pktmbuf_data_room_size(mb_pool),
1071 				RTE_PKTMBUF_HEADROOM, start_alignment,
1072 				end_padding_alignment);
1073 			return 0;
1074 		}
1075 		buf_size -= end_padding_alignment;
1076 	} else {
1077 		/*
1078 		 * Start is aligned the same or better than end,
1079 		 * just align length.
1080 		 */
1081 		buf_size = EFX_P2ALIGN(uint32_t, buf_size, nic_align_end);
1082 	}
1083 
1084 	return buf_size;
1085 }
1086 
1087 int
1088 sfc_rx_qinit(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
1089 	     uint16_t nb_rx_desc, unsigned int socket_id,
1090 	     const struct rte_eth_rxconf *rx_conf,
1091 	     struct rte_mempool *mb_pool)
1092 {
1093 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
1094 	sfc_ethdev_qid_t ethdev_qid;
1095 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1096 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1097 	int rc;
1098 	unsigned int rxq_entries;
1099 	unsigned int evq_entries;
1100 	unsigned int rxq_max_fill_level;
1101 	uint64_t offloads;
1102 	uint16_t buf_size;
1103 	struct sfc_rxq_info *rxq_info;
1104 	struct sfc_evq *evq;
1105 	struct sfc_rxq *rxq;
1106 	struct sfc_dp_rx_qcreate_info info;
1107 	struct sfc_dp_rx_hw_limits hw_limits;
1108 	uint16_t rx_free_thresh;
1109 	const char *error;
1110 
1111 	memset(&hw_limits, 0, sizeof(hw_limits));
1112 	hw_limits.rxq_max_entries = sa->rxq_max_entries;
1113 	hw_limits.rxq_min_entries = sa->rxq_min_entries;
1114 	hw_limits.evq_max_entries = sa->evq_max_entries;
1115 	hw_limits.evq_min_entries = sa->evq_min_entries;
1116 
1117 	rc = sa->priv.dp_rx->qsize_up_rings(nb_rx_desc, &hw_limits, mb_pool,
1118 					    &rxq_entries, &evq_entries,
1119 					    &rxq_max_fill_level);
1120 	if (rc != 0)
1121 		goto fail_size_up_rings;
1122 	SFC_ASSERT(rxq_entries >= sa->rxq_min_entries);
1123 	SFC_ASSERT(rxq_entries <= sa->rxq_max_entries);
1124 	SFC_ASSERT(rxq_max_fill_level <= nb_rx_desc);
1125 
1126 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
1127 
1128 	offloads = rx_conf->offloads;
1129 	/* Add device level Rx offloads if the queue is an ethdev Rx queue */
1130 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
1131 		offloads |= sa->eth_dev->data->dev_conf.rxmode.offloads;
1132 
1133 	rc = sfc_rx_qcheck_conf(sa, rxq_max_fill_level, rx_conf, offloads);
1134 	if (rc != 0)
1135 		goto fail_bad_conf;
1136 
1137 	buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
1138 	if (buf_size == 0) {
1139 		sfc_err(sa,
1140 			"RxQ %d (internal %u) mbuf pool object size is too small",
1141 			ethdev_qid, sw_index);
1142 		rc = EINVAL;
1143 		goto fail_bad_conf;
1144 	}
1145 
1146 	if (!sfc_rx_check_scatter(sa->port.pdu, buf_size,
1147 				  encp->enc_rx_prefix_size,
1148 				  (offloads & RTE_ETH_RX_OFFLOAD_SCATTER),
1149 				  encp->enc_rx_scatter_max,
1150 				  &error)) {
1151 		sfc_err(sa, "RxQ %d (internal %u) MTU check failed: %s",
1152 			ethdev_qid, sw_index, error);
1153 		sfc_err(sa,
1154 			"RxQ %d (internal %u) calculated Rx buffer size is %u vs "
1155 			"PDU size %u plus Rx prefix %u bytes",
1156 			ethdev_qid, sw_index, buf_size,
1157 			(unsigned int)sa->port.pdu, encp->enc_rx_prefix_size);
1158 		rc = EINVAL;
1159 		goto fail_bad_conf;
1160 	}
1161 
1162 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
1163 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
1164 
1165 	SFC_ASSERT(rxq_entries <= rxq_info->max_entries);
1166 	rxq_info->entries = rxq_entries;
1167 
1168 	if (sa->priv.dp_rx->dp.hw_fw_caps & SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER)
1169 		rxq_info->type = EFX_RXQ_TYPE_ES_SUPER_BUFFER;
1170 	else
1171 		rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
1172 
1173 	rxq_info->type_flags |=
1174 		(offloads & RTE_ETH_RX_OFFLOAD_SCATTER) ?
1175 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
1176 
1177 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
1178 	    (sfc_dp_rx_offload_capa(sa->priv.dp_rx) &
1179 	     RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) != 0)
1180 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
1181 
1182 	if (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH)
1183 		rxq_info->type_flags |= EFX_RXQ_FLAG_RSS_HASH;
1184 
1185 	if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG) != 0)
1186 		rxq_info->type_flags |= EFX_RXQ_FLAG_USER_FLAG;
1187 
1188 	if ((sa->negotiated_rx_metadata & RTE_ETH_RX_METADATA_USER_MARK) != 0 ||
1189 	    sfc_flow_tunnel_is_active(sa))
1190 		rxq_info->type_flags |= EFX_RXQ_FLAG_USER_MARK;
1191 
1192 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
1193 			  evq_entries, socket_id, &evq);
1194 	if (rc != 0)
1195 		goto fail_ev_qinit;
1196 
1197 	rxq = &sa->rxq_ctrl[sw_index];
1198 	rxq->evq = evq;
1199 	rxq->hw_index = sw_index;
1200 	/*
1201 	 * If Rx refill threshold is specified (its value is non zero) in
1202 	 * Rx configuration, use specified value. Otherwise use 1/8 of
1203 	 * the Rx descriptors number as the default. It allows to keep
1204 	 * Rx ring full-enough and does not refill too aggressive if
1205 	 * packet rate is high.
1206 	 *
1207 	 * Since PMD refills in bulks waiting for full bulk may be
1208 	 * refilled (basically round down), it is better to round up
1209 	 * here to mitigate it a bit.
1210 	 */
1211 	rx_free_thresh = (rx_conf->rx_free_thresh != 0) ?
1212 		rx_conf->rx_free_thresh : EFX_DIV_ROUND_UP(nb_rx_desc, 8);
1213 	/* Rx refill threshold cannot be smaller than refill bulk */
1214 	rxq_info->refill_threshold =
1215 		RTE_MAX(rx_free_thresh, SFC_RX_REFILL_BULK);
1216 	rxq_info->refill_mb_pool = mb_pool;
1217 
1218 	if (rss->hash_support == EFX_RX_HASH_AVAILABLE && rss->channels > 0 &&
1219 	    (offloads & RTE_ETH_RX_OFFLOAD_RSS_HASH))
1220 		rxq_info->rxq_flags = SFC_RXQ_FLAG_RSS_HASH;
1221 	else
1222 		rxq_info->rxq_flags = 0;
1223 
1224 	rxq->buf_size = buf_size;
1225 
1226 	rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_NIC_DMA_ADDR_RX_RING,
1227 			   efx_rxq_size(sa->nic, rxq_info->entries),
1228 			   socket_id, &rxq->mem);
1229 	if (rc != 0)
1230 		goto fail_dma_alloc;
1231 
1232 	memset(&info, 0, sizeof(info));
1233 	info.refill_mb_pool = rxq_info->refill_mb_pool;
1234 	info.max_fill_level = rxq_max_fill_level;
1235 	info.refill_threshold = rxq_info->refill_threshold;
1236 	info.buf_size = buf_size;
1237 	info.batch_max = encp->enc_rx_batch_max;
1238 	info.prefix_size = encp->enc_rx_prefix_size;
1239 
1240 	if (sfc_flow_tunnel_is_active(sa))
1241 		info.user_mark_mask = SFC_FT_USER_MARK_MASK;
1242 	else
1243 		info.user_mark_mask = UINT32_MAX;
1244 
1245 	info.flags = rxq_info->rxq_flags;
1246 	info.rxq_entries = rxq_info->entries;
1247 	info.rxq_hw_ring = rxq->mem.esm_base;
1248 	info.evq_hw_index = sfc_evq_sw_index_by_rxq_sw_index(sa, sw_index);
1249 	info.evq_entries = evq_entries;
1250 	info.evq_hw_ring = evq->mem.esm_base;
1251 	info.hw_index = rxq->hw_index;
1252 	info.mem_bar = sa->mem_bar.esb_base;
1253 	info.vi_window_shift = encp->enc_vi_window_shift;
1254 	info.fcw_offset = sa->fcw_offset;
1255 
1256 	info.nic_dma_info = &sas->nic_dma_info;
1257 
1258 	rc = sa->priv.dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
1259 				     &RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
1260 				     socket_id, &info, &rxq_info->dp);
1261 	if (rc != 0)
1262 		goto fail_dp_rx_qcreate;
1263 
1264 	evq->dp_rxq = rxq_info->dp;
1265 
1266 	rxq_info->state = SFC_RXQ_INITIALIZED;
1267 
1268 	rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1269 
1270 	return 0;
1271 
1272 fail_dp_rx_qcreate:
1273 	sfc_dma_free(sa, &rxq->mem);
1274 
1275 fail_dma_alloc:
1276 	sfc_ev_qfini(evq);
1277 
1278 fail_ev_qinit:
1279 	rxq_info->entries = 0;
1280 
1281 fail_bad_conf:
1282 fail_size_up_rings:
1283 	sfc_log_init(sa, "failed %d", rc);
1284 	return rc;
1285 }
1286 
1287 void
1288 sfc_rx_qfini(struct sfc_adapter *sa, sfc_sw_index_t sw_index)
1289 {
1290 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
1291 	sfc_ethdev_qid_t ethdev_qid;
1292 	struct sfc_rxq_info *rxq_info;
1293 	struct sfc_rxq *rxq;
1294 
1295 	SFC_ASSERT(sw_index < sfc_sa2shared(sa)->rxq_count);
1296 	ethdev_qid = sfc_ethdev_rx_qid_by_rxq_sw_index(sas, sw_index);
1297 
1298 	if (ethdev_qid != SFC_ETHDEV_QID_INVALID)
1299 		sa->eth_dev->data->rx_queues[ethdev_qid] = NULL;
1300 
1301 	rxq_info = &sfc_sa2shared(sa)->rxq_info[sw_index];
1302 
1303 	SFC_ASSERT(rxq_info->state == SFC_RXQ_INITIALIZED);
1304 
1305 	sa->priv.dp_rx->qdestroy(rxq_info->dp);
1306 	rxq_info->dp = NULL;
1307 
1308 	rxq_info->state &= ~SFC_RXQ_INITIALIZED;
1309 	rxq_info->entries = 0;
1310 
1311 	rxq = &sa->rxq_ctrl[sw_index];
1312 
1313 	sfc_dma_free(sa, &rxq->mem);
1314 
1315 	sfc_ev_qfini(rxq->evq);
1316 	rxq->evq = NULL;
1317 }
1318 
1319 /*
1320  * Mapping between RTE RSS hash functions and their EFX counterparts.
1321  */
1322 static const struct sfc_rss_hf_rte_to_efx sfc_rss_hf_map[] = {
1323 	{ RTE_ETH_RSS_NONFRAG_IPV4_TCP,
1324 	  EFX_RX_HASH(IPV4_TCP, 4TUPLE) },
1325 	{ RTE_ETH_RSS_NONFRAG_IPV4_UDP,
1326 	  EFX_RX_HASH(IPV4_UDP, 4TUPLE) },
1327 	{ RTE_ETH_RSS_NONFRAG_IPV6_TCP | RTE_ETH_RSS_IPV6_TCP_EX,
1328 	  EFX_RX_HASH(IPV6_TCP, 4TUPLE) },
1329 	{ RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_UDP_EX,
1330 	  EFX_RX_HASH(IPV6_UDP, 4TUPLE) },
1331 	{ RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | RTE_ETH_RSS_NONFRAG_IPV4_OTHER,
1332 	  EFX_RX_HASH(IPV4_TCP, 2TUPLE) | EFX_RX_HASH(IPV4_UDP, 2TUPLE) |
1333 	  EFX_RX_HASH(IPV4, 2TUPLE) },
1334 	{ RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_OTHER |
1335 	  RTE_ETH_RSS_IPV6_EX,
1336 	  EFX_RX_HASH(IPV6_TCP, 2TUPLE) | EFX_RX_HASH(IPV6_UDP, 2TUPLE) |
1337 	  EFX_RX_HASH(IPV6, 2TUPLE) }
1338 };
1339 
1340 static efx_rx_hash_type_t
1341 sfc_rx_hash_types_mask_supp(efx_rx_hash_type_t hash_type,
1342 			    unsigned int *hash_type_flags_supported,
1343 			    unsigned int nb_hash_type_flags_supported)
1344 {
1345 	efx_rx_hash_type_t hash_type_masked = 0;
1346 	unsigned int i, j;
1347 
1348 	for (i = 0; i < nb_hash_type_flags_supported; ++i) {
1349 		unsigned int class_tuple_lbn[] = {
1350 			EFX_RX_CLASS_IPV4_TCP_LBN,
1351 			EFX_RX_CLASS_IPV4_UDP_LBN,
1352 			EFX_RX_CLASS_IPV4_LBN,
1353 			EFX_RX_CLASS_IPV6_TCP_LBN,
1354 			EFX_RX_CLASS_IPV6_UDP_LBN,
1355 			EFX_RX_CLASS_IPV6_LBN
1356 		};
1357 
1358 		for (j = 0; j < RTE_DIM(class_tuple_lbn); ++j) {
1359 			unsigned int tuple_mask = EFX_RX_CLASS_HASH_4TUPLE;
1360 			unsigned int flag;
1361 
1362 			tuple_mask <<= class_tuple_lbn[j];
1363 			flag = hash_type & tuple_mask;
1364 
1365 			if (flag == hash_type_flags_supported[i])
1366 				hash_type_masked |= flag;
1367 		}
1368 	}
1369 
1370 	return hash_type_masked;
1371 }
1372 
1373 int
1374 sfc_rx_hash_init(struct sfc_adapter *sa)
1375 {
1376 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1377 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1378 	uint32_t alg_mask = encp->enc_rx_scale_hash_alg_mask;
1379 	efx_rx_hash_alg_t alg;
1380 	unsigned int flags_supp[EFX_RX_HASH_NFLAGS];
1381 	unsigned int nb_flags_supp;
1382 	struct sfc_rss_hf_rte_to_efx *hf_map;
1383 	struct sfc_rss_hf_rte_to_efx *entry;
1384 	efx_rx_hash_type_t efx_hash_types;
1385 	unsigned int i;
1386 	int rc;
1387 
1388 	if (alg_mask & (1U << EFX_RX_HASHALG_TOEPLITZ))
1389 		alg = EFX_RX_HASHALG_TOEPLITZ;
1390 	else if (alg_mask & (1U << EFX_RX_HASHALG_PACKED_STREAM))
1391 		alg = EFX_RX_HASHALG_PACKED_STREAM;
1392 	else
1393 		return EINVAL;
1394 
1395 	rc = efx_rx_scale_hash_flags_get(sa->nic, alg, flags_supp,
1396 					 RTE_DIM(flags_supp), &nb_flags_supp);
1397 	if (rc != 0)
1398 		return rc;
1399 
1400 	hf_map = rte_calloc_socket("sfc-rss-hf-map",
1401 				   RTE_DIM(sfc_rss_hf_map),
1402 				   sizeof(*hf_map), 0, sa->socket_id);
1403 	if (hf_map == NULL)
1404 		return ENOMEM;
1405 
1406 	entry = hf_map;
1407 	efx_hash_types = 0;
1408 	for (i = 0; i < RTE_DIM(sfc_rss_hf_map); ++i) {
1409 		efx_rx_hash_type_t ht;
1410 
1411 		ht = sfc_rx_hash_types_mask_supp(sfc_rss_hf_map[i].efx,
1412 						 flags_supp, nb_flags_supp);
1413 		if (ht != 0) {
1414 			entry->rte = sfc_rss_hf_map[i].rte;
1415 			entry->efx = ht;
1416 			efx_hash_types |= ht;
1417 			++entry;
1418 		}
1419 	}
1420 
1421 	rss->hash_alg = alg;
1422 	rss->hf_map_nb_entries = (unsigned int)(entry - hf_map);
1423 	rss->hf_map = hf_map;
1424 	rss->hash_types = efx_hash_types;
1425 
1426 	return 0;
1427 }
1428 
1429 void
1430 sfc_rx_hash_fini(struct sfc_adapter *sa)
1431 {
1432 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1433 
1434 	rte_free(rss->hf_map);
1435 }
1436 
1437 int
1438 sfc_rx_hf_rte_to_efx(struct sfc_adapter *sa, uint64_t rte,
1439 		     efx_rx_hash_type_t *efx)
1440 {
1441 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1442 	efx_rx_hash_type_t hash_types = 0;
1443 	unsigned int i;
1444 
1445 	for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1446 		uint64_t rte_mask = rss->hf_map[i].rte;
1447 
1448 		if ((rte & rte_mask) != 0) {
1449 			rte &= ~rte_mask;
1450 			hash_types |= rss->hf_map[i].efx;
1451 		}
1452 	}
1453 
1454 	if (rte != 0) {
1455 		sfc_err(sa, "unsupported hash functions requested");
1456 		return EINVAL;
1457 	}
1458 
1459 	*efx = hash_types;
1460 
1461 	return 0;
1462 }
1463 
1464 uint64_t
1465 sfc_rx_hf_efx_to_rte(struct sfc_rss *rss, efx_rx_hash_type_t efx)
1466 {
1467 	uint64_t rte = 0;
1468 	unsigned int i;
1469 
1470 	for (i = 0; i < rss->hf_map_nb_entries; ++i) {
1471 		efx_rx_hash_type_t hash_type = rss->hf_map[i].efx;
1472 
1473 		if ((efx & hash_type) == hash_type)
1474 			rte |= rss->hf_map[i].rte;
1475 	}
1476 
1477 	return rte;
1478 }
1479 
1480 static int
1481 sfc_rx_process_adv_conf_rss(struct sfc_adapter *sa,
1482 			    struct rte_eth_rss_conf *conf)
1483 {
1484 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1485 	efx_rx_hash_type_t efx_hash_types = rss->hash_types;
1486 	uint64_t rss_hf = sfc_rx_hf_efx_to_rte(rss, efx_hash_types);
1487 	int rc;
1488 
1489 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1490 		if ((conf->rss_hf != 0 && conf->rss_hf != rss_hf) ||
1491 		    conf->rss_key != NULL)
1492 			return EINVAL;
1493 	}
1494 
1495 	if (conf->rss_hf != 0) {
1496 		rc = sfc_rx_hf_rte_to_efx(sa, conf->rss_hf, &efx_hash_types);
1497 		if (rc != 0)
1498 			return rc;
1499 	}
1500 
1501 	if (conf->rss_key != NULL) {
1502 		if (conf->rss_key_len != sizeof(rss->key)) {
1503 			sfc_err(sa, "RSS key size is wrong (should be %zu)",
1504 				sizeof(rss->key));
1505 			return EINVAL;
1506 		}
1507 		rte_memcpy(rss->key, conf->rss_key, sizeof(rss->key));
1508 	}
1509 
1510 	rss->hash_types = efx_hash_types;
1511 
1512 	return 0;
1513 }
1514 
1515 static int
1516 sfc_rx_rss_config(struct sfc_adapter *sa)
1517 {
1518 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1519 	int rc = 0;
1520 
1521 	if (rss->channels > 0) {
1522 		rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1523 					   rss->hash_alg, rss->hash_types,
1524 					   B_TRUE);
1525 		if (rc != 0)
1526 			goto finish;
1527 
1528 		rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1529 					  rss->key, sizeof(rss->key));
1530 		if (rc != 0)
1531 			goto finish;
1532 
1533 		rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1534 					  rss->tbl, RTE_DIM(rss->tbl));
1535 	}
1536 
1537 finish:
1538 	return rc;
1539 }
1540 
1541 struct sfc_rxq_info *
1542 sfc_rxq_info_by_ethdev_qid(struct sfc_adapter_shared *sas,
1543 			   sfc_ethdev_qid_t ethdev_qid)
1544 {
1545 	sfc_sw_index_t sw_index;
1546 
1547 	SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
1548 	SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
1549 
1550 	sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
1551 	return &sas->rxq_info[sw_index];
1552 }
1553 
1554 struct sfc_rxq *
1555 sfc_rxq_ctrl_by_ethdev_qid(struct sfc_adapter *sa, sfc_ethdev_qid_t ethdev_qid)
1556 {
1557 	struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
1558 	sfc_sw_index_t sw_index;
1559 
1560 	SFC_ASSERT((unsigned int)ethdev_qid < sas->ethdev_rxq_count);
1561 	SFC_ASSERT(ethdev_qid != SFC_ETHDEV_QID_INVALID);
1562 
1563 	sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas, ethdev_qid);
1564 	return &sa->rxq_ctrl[sw_index];
1565 }
1566 
1567 int
1568 sfc_rx_start(struct sfc_adapter *sa)
1569 {
1570 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1571 	sfc_sw_index_t sw_index;
1572 	int rc;
1573 
1574 	sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
1575 		     sas->rxq_count);
1576 
1577 	rc = efx_rx_init(sa->nic);
1578 	if (rc != 0)
1579 		goto fail_rx_init;
1580 
1581 	rc = sfc_rx_rss_config(sa);
1582 	if (rc != 0)
1583 		goto fail_rss_config;
1584 
1585 	for (sw_index = 0; sw_index < sas->rxq_count; ++sw_index) {
1586 		if (sas->rxq_info[sw_index].state == SFC_RXQ_INITIALIZED &&
1587 		    (!sas->rxq_info[sw_index].deferred_start ||
1588 		     sas->rxq_info[sw_index].deferred_started)) {
1589 			rc = sfc_rx_qstart(sa, sw_index);
1590 			if (rc != 0)
1591 				goto fail_rx_qstart;
1592 		}
1593 	}
1594 
1595 	return 0;
1596 
1597 fail_rx_qstart:
1598 	while (sw_index-- > 0)
1599 		sfc_rx_qstop(sa, sw_index);
1600 
1601 fail_rss_config:
1602 	efx_rx_fini(sa->nic);
1603 
1604 fail_rx_init:
1605 	sfc_log_init(sa, "failed %d", rc);
1606 	return rc;
1607 }
1608 
1609 void
1610 sfc_rx_stop(struct sfc_adapter *sa)
1611 {
1612 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1613 	sfc_sw_index_t sw_index;
1614 
1615 	sfc_log_init(sa, "rxq_count=%u (internal %u)", sas->ethdev_rxq_count,
1616 		     sas->rxq_count);
1617 
1618 	sw_index = sas->rxq_count;
1619 	while (sw_index-- > 0) {
1620 		if (sas->rxq_info[sw_index].state & SFC_RXQ_STARTED)
1621 			sfc_rx_qstop(sa, sw_index);
1622 	}
1623 
1624 	efx_rx_fini(sa->nic);
1625 }
1626 
1627 int
1628 sfc_rx_qinit_info(struct sfc_adapter *sa, sfc_sw_index_t sw_index,
1629 		  unsigned int extra_efx_type_flags)
1630 {
1631 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1632 	struct sfc_rxq_info *rxq_info = &sas->rxq_info[sw_index];
1633 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1634 	unsigned int max_entries;
1635 
1636 	max_entries = encp->enc_rxq_max_ndescs;
1637 	SFC_ASSERT(rte_is_power_of_2(max_entries));
1638 
1639 	rxq_info->max_entries = max_entries;
1640 	rxq_info->type_flags = extra_efx_type_flags;
1641 
1642 	return 0;
1643 }
1644 
1645 static int
1646 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1647 {
1648 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1649 	uint64_t offloads_supported = sfc_rx_get_dev_offload_caps(sa) |
1650 				      sfc_rx_get_queue_offload_caps(sa);
1651 	struct sfc_rss *rss = &sas->rss;
1652 	int rc = 0;
1653 
1654 	switch (rxmode->mq_mode) {
1655 	case RTE_ETH_MQ_RX_NONE:
1656 		/* No special checks are required */
1657 		break;
1658 	case RTE_ETH_MQ_RX_RSS:
1659 		if (rss->context_type == EFX_RX_SCALE_UNAVAILABLE) {
1660 			sfc_err(sa, "RSS is not available");
1661 			rc = EINVAL;
1662 		}
1663 		break;
1664 	default:
1665 		sfc_err(sa, "Rx multi-queue mode %u not supported",
1666 			rxmode->mq_mode);
1667 		rc = EINVAL;
1668 	}
1669 
1670 	/*
1671 	 * Requested offloads are validated against supported by ethdev,
1672 	 * so unsupported offloads cannot be added as the result of
1673 	 * below check.
1674 	 */
1675 	if ((rxmode->offloads & RTE_ETH_RX_OFFLOAD_CHECKSUM) !=
1676 	    (offloads_supported & RTE_ETH_RX_OFFLOAD_CHECKSUM)) {
1677 		sfc_warn(sa, "Rx checksum offloads cannot be disabled - always on (IPv4/TCP/UDP)");
1678 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_CHECKSUM;
1679 	}
1680 
1681 	if ((offloads_supported & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM) &&
1682 	    (~rxmode->offloads & RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM)) {
1683 		sfc_warn(sa, "Rx outer IPv4 checksum offload cannot be disabled - always on");
1684 		rxmode->offloads |= RTE_ETH_RX_OFFLOAD_OUTER_IPV4_CKSUM;
1685 	}
1686 
1687 	return rc;
1688 }
1689 
1690 /**
1691  * Destroy excess queues that are no longer needed after reconfiguration
1692  * or complete close.
1693  */
1694 static void
1695 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1696 {
1697 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1698 	sfc_sw_index_t sw_index;
1699 	sfc_ethdev_qid_t ethdev_qid;
1700 
1701 	SFC_ASSERT(nb_rx_queues <= sas->ethdev_rxq_count);
1702 
1703 	/*
1704 	 * Finalize only ethdev queues since other ones are finalized only
1705 	 * on device close and they may require additional deinitialization.
1706 	 */
1707 	ethdev_qid = sas->ethdev_rxq_count;
1708 	while (--ethdev_qid >= (int)nb_rx_queues) {
1709 		struct sfc_rxq_info *rxq_info;
1710 
1711 		rxq_info = sfc_rxq_info_by_ethdev_qid(sas, ethdev_qid);
1712 		if (rxq_info->state & SFC_RXQ_INITIALIZED) {
1713 			sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
1714 								ethdev_qid);
1715 			sfc_rx_qfini(sa, sw_index);
1716 		}
1717 
1718 	}
1719 
1720 	sas->ethdev_rxq_count = nb_rx_queues;
1721 }
1722 
1723 /**
1724  * Initialize Rx subsystem.
1725  *
1726  * Called at device (re)configuration stage when number of receive queues is
1727  * specified together with other device level receive configuration.
1728  *
1729  * It should be used to allocate NUMA-unaware resources.
1730  */
1731 int
1732 sfc_rx_configure(struct sfc_adapter *sa)
1733 {
1734 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
1735 	struct sfc_rss *rss = &sas->rss;
1736 	struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1737 	const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1738 	const unsigned int nb_rsrv_rx_queues = sfc_nb_reserved_rxq(sas);
1739 	const unsigned int nb_rxq_total = nb_rx_queues + nb_rsrv_rx_queues;
1740 	bool reconfigure;
1741 	int rc;
1742 
1743 	sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1744 		     nb_rx_queues, sas->ethdev_rxq_count);
1745 
1746 	rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1747 	if (rc != 0)
1748 		goto fail_check_mode;
1749 
1750 	if (nb_rxq_total == sas->rxq_count) {
1751 		reconfigure = true;
1752 		goto configure_rss;
1753 	}
1754 
1755 	if (sas->rxq_info == NULL) {
1756 		reconfigure = false;
1757 		rc = ENOMEM;
1758 		sas->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rxq_total,
1759 						  sizeof(sas->rxq_info[0]), 0,
1760 						  sa->socket_id);
1761 		if (sas->rxq_info == NULL)
1762 			goto fail_rxqs_alloc;
1763 
1764 		/*
1765 		 * Allocate primary process only RxQ control from heap
1766 		 * since it should not be shared.
1767 		 */
1768 		rc = ENOMEM;
1769 		sa->rxq_ctrl = calloc(nb_rxq_total, sizeof(sa->rxq_ctrl[0]));
1770 		if (sa->rxq_ctrl == NULL)
1771 			goto fail_rxqs_ctrl_alloc;
1772 	} else {
1773 		struct sfc_rxq_info *new_rxq_info;
1774 		struct sfc_rxq *new_rxq_ctrl;
1775 
1776 		reconfigure = true;
1777 
1778 		/* Do not uninitialize reserved queues */
1779 		if (nb_rx_queues < sas->ethdev_rxq_count)
1780 			sfc_rx_fini_queues(sa, nb_rx_queues);
1781 
1782 		rc = ENOMEM;
1783 		new_rxq_info =
1784 			rte_realloc(sas->rxq_info,
1785 				    nb_rxq_total * sizeof(sas->rxq_info[0]), 0);
1786 		if (new_rxq_info == NULL && nb_rxq_total > 0)
1787 			goto fail_rxqs_realloc;
1788 
1789 		rc = ENOMEM;
1790 		new_rxq_ctrl = realloc(sa->rxq_ctrl,
1791 				       nb_rxq_total * sizeof(sa->rxq_ctrl[0]));
1792 		if (new_rxq_ctrl == NULL && nb_rxq_total > 0)
1793 			goto fail_rxqs_ctrl_realloc;
1794 
1795 		sas->rxq_info = new_rxq_info;
1796 		sa->rxq_ctrl = new_rxq_ctrl;
1797 		if (nb_rxq_total > sas->rxq_count) {
1798 			unsigned int rxq_count = sas->rxq_count;
1799 
1800 			memset(&sas->rxq_info[rxq_count], 0,
1801 			       (nb_rxq_total - rxq_count) *
1802 			       sizeof(sas->rxq_info[0]));
1803 			memset(&sa->rxq_ctrl[rxq_count], 0,
1804 			       (nb_rxq_total - rxq_count) *
1805 			       sizeof(sa->rxq_ctrl[0]));
1806 		}
1807 	}
1808 
1809 	while (sas->ethdev_rxq_count < nb_rx_queues) {
1810 		sfc_sw_index_t sw_index;
1811 
1812 		sw_index = sfc_rxq_sw_index_by_ethdev_rx_qid(sas,
1813 							sas->ethdev_rxq_count);
1814 		rc = sfc_rx_qinit_info(sa, sw_index, 0);
1815 		if (rc != 0)
1816 			goto fail_rx_qinit_info;
1817 
1818 		sas->ethdev_rxq_count++;
1819 	}
1820 
1821 	sas->rxq_count = sas->ethdev_rxq_count + nb_rsrv_rx_queues;
1822 
1823 	if (!reconfigure) {
1824 		rc = sfc_mae_counter_rxq_init(sa);
1825 		if (rc != 0)
1826 			goto fail_count_rxq_init;
1827 	}
1828 
1829 configure_rss:
1830 	rss->channels = (dev_conf->rxmode.mq_mode == RTE_ETH_MQ_RX_RSS) ?
1831 			 MIN(sas->ethdev_rxq_count, EFX_MAXRSS) : 0;
1832 
1833 	if (rss->channels > 0) {
1834 		struct rte_eth_rss_conf *adv_conf_rss;
1835 		sfc_sw_index_t sw_index;
1836 
1837 		for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1838 			rss->tbl[sw_index] = sw_index % rss->channels;
1839 
1840 		adv_conf_rss = &dev_conf->rx_adv_conf.rss_conf;
1841 		rc = sfc_rx_process_adv_conf_rss(sa, adv_conf_rss);
1842 		if (rc != 0)
1843 			goto fail_rx_process_adv_conf_rss;
1844 	}
1845 
1846 	return 0;
1847 
1848 fail_rx_process_adv_conf_rss:
1849 	if (!reconfigure)
1850 		sfc_mae_counter_rxq_fini(sa);
1851 
1852 fail_count_rxq_init:
1853 fail_rx_qinit_info:
1854 fail_rxqs_ctrl_realloc:
1855 fail_rxqs_realloc:
1856 fail_rxqs_ctrl_alloc:
1857 fail_rxqs_alloc:
1858 	sfc_rx_close(sa);
1859 
1860 fail_check_mode:
1861 	sfc_log_init(sa, "failed %d", rc);
1862 	return rc;
1863 }
1864 
1865 /**
1866  * Shutdown Rx subsystem.
1867  *
1868  * Called at device close stage, for example, before device shutdown.
1869  */
1870 void
1871 sfc_rx_close(struct sfc_adapter *sa)
1872 {
1873 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1874 
1875 	sfc_rx_fini_queues(sa, 0);
1876 	sfc_mae_counter_rxq_fini(sa);
1877 
1878 	rss->channels = 0;
1879 
1880 	free(sa->rxq_ctrl);
1881 	sa->rxq_ctrl = NULL;
1882 
1883 	rte_free(sfc_sa2shared(sa)->rxq_info);
1884 	sfc_sa2shared(sa)->rxq_info = NULL;
1885 }
1886