xref: /dpdk/drivers/net/sfc/sfc_rx.c (revision 5dba3b9c4c131b88a78bcecfef39db23ebc47873)
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (c) 2016-2017 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was jointly developed between OKTET Labs (under contract
8  * for Solarflare) and Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <rte_mempool.h>
33 
34 #include "efx.h"
35 
36 #include "sfc.h"
37 #include "sfc_debug.h"
38 #include "sfc_log.h"
39 #include "sfc_ev.h"
40 #include "sfc_rx.h"
41 #include "sfc_kvargs.h"
42 #include "sfc_tweak.h"
43 
44 /*
45  * Maximum number of Rx queue flush attempt in the case of failure or
46  * flush timeout
47  */
48 #define SFC_RX_QFLUSH_ATTEMPTS		(3)
49 
50 /*
51  * Time to wait between event queue polling attempts when waiting for Rx
52  * queue flush done or failed events.
53  */
54 #define SFC_RX_QFLUSH_POLL_WAIT_MS	(1)
55 
56 /*
57  * Maximum number of event queue polling attempts when waiting for Rx queue
58  * flush done or failed events. It defines Rx queue flush attempt timeout
59  * together with SFC_RX_QFLUSH_POLL_WAIT_MS.
60  */
61 #define SFC_RX_QFLUSH_POLL_ATTEMPTS	(2000)
62 
63 void
64 sfc_rx_qflush_done(struct sfc_rxq *rxq)
65 {
66 	rxq->state |= SFC_RXQ_FLUSHED;
67 	rxq->state &= ~SFC_RXQ_FLUSHING;
68 }
69 
70 void
71 sfc_rx_qflush_failed(struct sfc_rxq *rxq)
72 {
73 	rxq->state |= SFC_RXQ_FLUSH_FAILED;
74 	rxq->state &= ~SFC_RXQ_FLUSHING;
75 }
76 
77 static void
78 sfc_efx_rx_qrefill(struct sfc_efx_rxq *rxq)
79 {
80 	unsigned int free_space;
81 	unsigned int bulks;
82 	void *objs[SFC_RX_REFILL_BULK];
83 	efsys_dma_addr_t addr[RTE_DIM(objs)];
84 	unsigned int added = rxq->added;
85 	unsigned int id;
86 	unsigned int i;
87 	struct sfc_efx_rx_sw_desc *rxd;
88 	struct rte_mbuf *m;
89 	uint16_t port_id = rxq->dp.dpq.port_id;
90 
91 	free_space = EFX_RXQ_LIMIT(rxq->ptr_mask + 1) -
92 		(added - rxq->completed);
93 
94 	if (free_space < rxq->refill_threshold)
95 		return;
96 
97 	bulks = free_space / RTE_DIM(objs);
98 	/* refill_threshold guarantees that bulks is positive */
99 	SFC_ASSERT(bulks > 0);
100 
101 	id = added & rxq->ptr_mask;
102 	do {
103 		if (unlikely(rte_mempool_get_bulk(rxq->refill_mb_pool, objs,
104 						  RTE_DIM(objs)) < 0)) {
105 			/*
106 			 * It is hardly a safe way to increment counter
107 			 * from different contexts, but all PMDs do it.
108 			 */
109 			rxq->evq->sa->eth_dev->data->rx_mbuf_alloc_failed +=
110 				RTE_DIM(objs);
111 			/* Return if we have posted nothing yet */
112 			if (added == rxq->added)
113 				return;
114 			/* Push posted */
115 			break;
116 		}
117 
118 		for (i = 0; i < RTE_DIM(objs);
119 		     ++i, id = (id + 1) & rxq->ptr_mask) {
120 			m = objs[i];
121 
122 			rxd = &rxq->sw_desc[id];
123 			rxd->mbuf = m;
124 
125 			SFC_ASSERT(rte_mbuf_refcnt_read(m) == 1);
126 			m->data_off = RTE_PKTMBUF_HEADROOM;
127 			SFC_ASSERT(m->next == NULL);
128 			SFC_ASSERT(m->nb_segs == 1);
129 			m->port = port_id;
130 
131 			addr[i] = rte_pktmbuf_iova(m);
132 		}
133 
134 		efx_rx_qpost(rxq->common, addr, rxq->buf_size,
135 			     RTE_DIM(objs), rxq->completed, added);
136 		added += RTE_DIM(objs);
137 	} while (--bulks > 0);
138 
139 	SFC_ASSERT(added != rxq->added);
140 	rxq->added = added;
141 	efx_rx_qpush(rxq->common, added, &rxq->pushed);
142 }
143 
144 static uint64_t
145 sfc_efx_rx_desc_flags_to_offload_flags(const unsigned int desc_flags)
146 {
147 	uint64_t mbuf_flags = 0;
148 
149 	switch (desc_flags & (EFX_PKT_IPV4 | EFX_CKSUM_IPV4)) {
150 	case (EFX_PKT_IPV4 | EFX_CKSUM_IPV4):
151 		mbuf_flags |= PKT_RX_IP_CKSUM_GOOD;
152 		break;
153 	case EFX_PKT_IPV4:
154 		mbuf_flags |= PKT_RX_IP_CKSUM_BAD;
155 		break;
156 	default:
157 		RTE_BUILD_BUG_ON(PKT_RX_IP_CKSUM_UNKNOWN != 0);
158 		SFC_ASSERT((mbuf_flags & PKT_RX_IP_CKSUM_MASK) ==
159 			   PKT_RX_IP_CKSUM_UNKNOWN);
160 		break;
161 	}
162 
163 	switch ((desc_flags &
164 		 (EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP))) {
165 	case (EFX_PKT_TCP | EFX_CKSUM_TCPUDP):
166 	case (EFX_PKT_UDP | EFX_CKSUM_TCPUDP):
167 		mbuf_flags |= PKT_RX_L4_CKSUM_GOOD;
168 		break;
169 	case EFX_PKT_TCP:
170 	case EFX_PKT_UDP:
171 		mbuf_flags |= PKT_RX_L4_CKSUM_BAD;
172 		break;
173 	default:
174 		RTE_BUILD_BUG_ON(PKT_RX_L4_CKSUM_UNKNOWN != 0);
175 		SFC_ASSERT((mbuf_flags & PKT_RX_L4_CKSUM_MASK) ==
176 			   PKT_RX_L4_CKSUM_UNKNOWN);
177 		break;
178 	}
179 
180 	return mbuf_flags;
181 }
182 
183 static uint32_t
184 sfc_efx_rx_desc_flags_to_packet_type(const unsigned int desc_flags)
185 {
186 	return RTE_PTYPE_L2_ETHER |
187 		((desc_flags & EFX_PKT_IPV4) ?
188 			RTE_PTYPE_L3_IPV4_EXT_UNKNOWN : 0) |
189 		((desc_flags & EFX_PKT_IPV6) ?
190 			RTE_PTYPE_L3_IPV6_EXT_UNKNOWN : 0) |
191 		((desc_flags & EFX_PKT_TCP) ? RTE_PTYPE_L4_TCP : 0) |
192 		((desc_flags & EFX_PKT_UDP) ? RTE_PTYPE_L4_UDP : 0);
193 }
194 
195 static const uint32_t *
196 sfc_efx_supported_ptypes_get(__rte_unused uint32_t tunnel_encaps)
197 {
198 	static const uint32_t ptypes[] = {
199 		RTE_PTYPE_L2_ETHER,
200 		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
201 		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
202 		RTE_PTYPE_L4_TCP,
203 		RTE_PTYPE_L4_UDP,
204 		RTE_PTYPE_UNKNOWN
205 	};
206 
207 	return ptypes;
208 }
209 
210 #if EFSYS_OPT_RX_SCALE
211 static void
212 sfc_efx_rx_set_rss_hash(struct sfc_efx_rxq *rxq, unsigned int flags,
213 			struct rte_mbuf *m)
214 {
215 	uint8_t *mbuf_data;
216 
217 
218 	if ((rxq->flags & SFC_EFX_RXQ_FLAG_RSS_HASH) == 0)
219 		return;
220 
221 	mbuf_data = rte_pktmbuf_mtod(m, uint8_t *);
222 
223 	if (flags & (EFX_PKT_IPV4 | EFX_PKT_IPV6)) {
224 		m->hash.rss = efx_pseudo_hdr_hash_get(rxq->common,
225 						      EFX_RX_HASHALG_TOEPLITZ,
226 						      mbuf_data);
227 
228 		m->ol_flags |= PKT_RX_RSS_HASH;
229 	}
230 }
231 #else
232 static void
233 sfc_efx_rx_set_rss_hash(__rte_unused struct sfc_efx_rxq *rxq,
234 			__rte_unused unsigned int flags,
235 			__rte_unused struct rte_mbuf *m)
236 {
237 }
238 #endif
239 
240 static uint16_t
241 sfc_efx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
242 {
243 	struct sfc_dp_rxq *dp_rxq = rx_queue;
244 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
245 	unsigned int completed;
246 	unsigned int prefix_size = rxq->prefix_size;
247 	unsigned int done_pkts = 0;
248 	boolean_t discard_next = B_FALSE;
249 	struct rte_mbuf *scatter_pkt = NULL;
250 
251 	if (unlikely((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0))
252 		return 0;
253 
254 	sfc_ev_qpoll(rxq->evq);
255 
256 	completed = rxq->completed;
257 	while (completed != rxq->pending && done_pkts < nb_pkts) {
258 		unsigned int id;
259 		struct sfc_efx_rx_sw_desc *rxd;
260 		struct rte_mbuf *m;
261 		unsigned int seg_len;
262 		unsigned int desc_flags;
263 
264 		id = completed++ & rxq->ptr_mask;
265 		rxd = &rxq->sw_desc[id];
266 		m = rxd->mbuf;
267 		desc_flags = rxd->flags;
268 
269 		if (discard_next)
270 			goto discard;
271 
272 		if (desc_flags & (EFX_ADDR_MISMATCH | EFX_DISCARD))
273 			goto discard;
274 
275 		if (desc_flags & EFX_PKT_PREFIX_LEN) {
276 			uint16_t tmp_size;
277 			int rc __rte_unused;
278 
279 			rc = efx_pseudo_hdr_pkt_length_get(rxq->common,
280 				rte_pktmbuf_mtod(m, uint8_t *), &tmp_size);
281 			SFC_ASSERT(rc == 0);
282 			seg_len = tmp_size;
283 		} else {
284 			seg_len = rxd->size - prefix_size;
285 		}
286 
287 		rte_pktmbuf_data_len(m) = seg_len;
288 		rte_pktmbuf_pkt_len(m) = seg_len;
289 
290 		if (scatter_pkt != NULL) {
291 			if (rte_pktmbuf_chain(scatter_pkt, m) != 0) {
292 				rte_pktmbuf_free(scatter_pkt);
293 				goto discard;
294 			}
295 			/* The packet to deliver */
296 			m = scatter_pkt;
297 		}
298 
299 		if (desc_flags & EFX_PKT_CONT) {
300 			/* The packet is scattered, more fragments to come */
301 			scatter_pkt = m;
302 			/* Further fragments have no prefix */
303 			prefix_size = 0;
304 			continue;
305 		}
306 
307 		/* Scattered packet is done */
308 		scatter_pkt = NULL;
309 		/* The first fragment of the packet has prefix */
310 		prefix_size = rxq->prefix_size;
311 
312 		m->ol_flags =
313 			sfc_efx_rx_desc_flags_to_offload_flags(desc_flags);
314 		m->packet_type =
315 			sfc_efx_rx_desc_flags_to_packet_type(desc_flags);
316 
317 		/*
318 		 * Extract RSS hash from the packet prefix and
319 		 * set the corresponding field (if needed and possible)
320 		 */
321 		sfc_efx_rx_set_rss_hash(rxq, desc_flags, m);
322 
323 		m->data_off += prefix_size;
324 
325 		*rx_pkts++ = m;
326 		done_pkts++;
327 		continue;
328 
329 discard:
330 		discard_next = ((desc_flags & EFX_PKT_CONT) != 0);
331 		rte_mempool_put(rxq->refill_mb_pool, m);
332 		rxd->mbuf = NULL;
333 	}
334 
335 	/* pending is only moved when entire packet is received */
336 	SFC_ASSERT(scatter_pkt == NULL);
337 
338 	rxq->completed = completed;
339 
340 	sfc_efx_rx_qrefill(rxq);
341 
342 	return done_pkts;
343 }
344 
345 static sfc_dp_rx_qdesc_npending_t sfc_efx_rx_qdesc_npending;
346 static unsigned int
347 sfc_efx_rx_qdesc_npending(struct sfc_dp_rxq *dp_rxq)
348 {
349 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
350 
351 	if ((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) == 0)
352 		return 0;
353 
354 	sfc_ev_qpoll(rxq->evq);
355 
356 	return rxq->pending - rxq->completed;
357 }
358 
359 static sfc_dp_rx_qdesc_status_t sfc_efx_rx_qdesc_status;
360 static int
361 sfc_efx_rx_qdesc_status(struct sfc_dp_rxq *dp_rxq, uint16_t offset)
362 {
363 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
364 
365 	if (unlikely(offset > rxq->ptr_mask))
366 		return -EINVAL;
367 
368 	/*
369 	 * Poll EvQ to derive up-to-date 'rxq->pending' figure;
370 	 * it is required for the queue to be running, but the
371 	 * check is omitted because API design assumes that it
372 	 * is the duty of the caller to satisfy all conditions
373 	 */
374 	SFC_ASSERT((rxq->flags & SFC_EFX_RXQ_FLAG_RUNNING) ==
375 		   SFC_EFX_RXQ_FLAG_RUNNING);
376 	sfc_ev_qpoll(rxq->evq);
377 
378 	/*
379 	 * There is a handful of reserved entries in the ring,
380 	 * but an explicit check whether the offset points to
381 	 * a reserved entry is neglected since the two checks
382 	 * below rely on the figures which take the HW limits
383 	 * into account and thus if an entry is reserved, the
384 	 * checks will fail and UNAVAIL code will be returned
385 	 */
386 
387 	if (offset < (rxq->pending - rxq->completed))
388 		return RTE_ETH_RX_DESC_DONE;
389 
390 	if (offset < (rxq->added - rxq->completed))
391 		return RTE_ETH_RX_DESC_AVAIL;
392 
393 	return RTE_ETH_RX_DESC_UNAVAIL;
394 }
395 
396 struct sfc_rxq *
397 sfc_rxq_by_dp_rxq(const struct sfc_dp_rxq *dp_rxq)
398 {
399 	const struct sfc_dp_queue *dpq = &dp_rxq->dpq;
400 	struct rte_eth_dev *eth_dev;
401 	struct sfc_adapter *sa;
402 	struct sfc_rxq *rxq;
403 
404 	SFC_ASSERT(rte_eth_dev_is_valid_port(dpq->port_id));
405 	eth_dev = &rte_eth_devices[dpq->port_id];
406 
407 	sa = eth_dev->data->dev_private;
408 
409 	SFC_ASSERT(dpq->queue_id < sa->rxq_count);
410 	rxq = sa->rxq_info[dpq->queue_id].rxq;
411 
412 	SFC_ASSERT(rxq != NULL);
413 	return rxq;
414 }
415 
416 static sfc_dp_rx_qcreate_t sfc_efx_rx_qcreate;
417 static int
418 sfc_efx_rx_qcreate(uint16_t port_id, uint16_t queue_id,
419 		   const struct rte_pci_addr *pci_addr, int socket_id,
420 		   const struct sfc_dp_rx_qcreate_info *info,
421 		   struct sfc_dp_rxq **dp_rxqp)
422 {
423 	struct sfc_efx_rxq *rxq;
424 	int rc;
425 
426 	rc = ENOMEM;
427 	rxq = rte_zmalloc_socket("sfc-efx-rxq", sizeof(*rxq),
428 				 RTE_CACHE_LINE_SIZE, socket_id);
429 	if (rxq == NULL)
430 		goto fail_rxq_alloc;
431 
432 	sfc_dp_queue_init(&rxq->dp.dpq, port_id, queue_id, pci_addr);
433 
434 	rc = ENOMEM;
435 	rxq->sw_desc = rte_calloc_socket("sfc-efx-rxq-sw_desc",
436 					 info->rxq_entries,
437 					 sizeof(*rxq->sw_desc),
438 					 RTE_CACHE_LINE_SIZE, socket_id);
439 	if (rxq->sw_desc == NULL)
440 		goto fail_desc_alloc;
441 
442 	/* efx datapath is bound to efx control path */
443 	rxq->evq = sfc_rxq_by_dp_rxq(&rxq->dp)->evq;
444 	if (info->flags & SFC_RXQ_FLAG_RSS_HASH)
445 		rxq->flags |= SFC_EFX_RXQ_FLAG_RSS_HASH;
446 	rxq->ptr_mask = info->rxq_entries - 1;
447 	rxq->batch_max = info->batch_max;
448 	rxq->prefix_size = info->prefix_size;
449 	rxq->refill_threshold = info->refill_threshold;
450 	rxq->buf_size = info->buf_size;
451 	rxq->refill_mb_pool = info->refill_mb_pool;
452 
453 	*dp_rxqp = &rxq->dp;
454 	return 0;
455 
456 fail_desc_alloc:
457 	rte_free(rxq);
458 
459 fail_rxq_alloc:
460 	return rc;
461 }
462 
463 static sfc_dp_rx_qdestroy_t sfc_efx_rx_qdestroy;
464 static void
465 sfc_efx_rx_qdestroy(struct sfc_dp_rxq *dp_rxq)
466 {
467 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
468 
469 	rte_free(rxq->sw_desc);
470 	rte_free(rxq);
471 }
472 
473 static sfc_dp_rx_qstart_t sfc_efx_rx_qstart;
474 static int
475 sfc_efx_rx_qstart(struct sfc_dp_rxq *dp_rxq,
476 		  __rte_unused unsigned int evq_read_ptr)
477 {
478 	/* libefx-based datapath is specific to libefx-based PMD */
479 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
480 	struct sfc_rxq *crxq = sfc_rxq_by_dp_rxq(dp_rxq);
481 
482 	rxq->common = crxq->common;
483 
484 	rxq->pending = rxq->completed = rxq->added = rxq->pushed = 0;
485 
486 	sfc_efx_rx_qrefill(rxq);
487 
488 	rxq->flags |= (SFC_EFX_RXQ_FLAG_STARTED | SFC_EFX_RXQ_FLAG_RUNNING);
489 
490 	return 0;
491 }
492 
493 static sfc_dp_rx_qstop_t sfc_efx_rx_qstop;
494 static void
495 sfc_efx_rx_qstop(struct sfc_dp_rxq *dp_rxq,
496 		 __rte_unused unsigned int *evq_read_ptr)
497 {
498 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
499 
500 	rxq->flags &= ~SFC_EFX_RXQ_FLAG_RUNNING;
501 
502 	/* libefx-based datapath is bound to libefx-based PMD and uses
503 	 * event queue structure directly. So, there is no necessity to
504 	 * return EvQ read pointer.
505 	 */
506 }
507 
508 static sfc_dp_rx_qpurge_t sfc_efx_rx_qpurge;
509 static void
510 sfc_efx_rx_qpurge(struct sfc_dp_rxq *dp_rxq)
511 {
512 	struct sfc_efx_rxq *rxq = sfc_efx_rxq_by_dp_rxq(dp_rxq);
513 	unsigned int i;
514 	struct sfc_efx_rx_sw_desc *rxd;
515 
516 	for (i = rxq->completed; i != rxq->added; ++i) {
517 		rxd = &rxq->sw_desc[i & rxq->ptr_mask];
518 		rte_mempool_put(rxq->refill_mb_pool, rxd->mbuf);
519 		rxd->mbuf = NULL;
520 		/* Packed stream relies on 0 in inactive SW desc.
521 		 * Rx queue stop is not performance critical, so
522 		 * there is no harm to do it always.
523 		 */
524 		rxd->flags = 0;
525 		rxd->size = 0;
526 	}
527 
528 	rxq->flags &= ~SFC_EFX_RXQ_FLAG_STARTED;
529 }
530 
531 struct sfc_dp_rx sfc_efx_rx = {
532 	.dp = {
533 		.name		= SFC_KVARG_DATAPATH_EFX,
534 		.type		= SFC_DP_RX,
535 		.hw_fw_caps	= 0,
536 	},
537 	.features		= SFC_DP_RX_FEAT_SCATTER,
538 	.qcreate		= sfc_efx_rx_qcreate,
539 	.qdestroy		= sfc_efx_rx_qdestroy,
540 	.qstart			= sfc_efx_rx_qstart,
541 	.qstop			= sfc_efx_rx_qstop,
542 	.qpurge			= sfc_efx_rx_qpurge,
543 	.supported_ptypes_get	= sfc_efx_supported_ptypes_get,
544 	.qdesc_npending		= sfc_efx_rx_qdesc_npending,
545 	.qdesc_status		= sfc_efx_rx_qdesc_status,
546 	.pkt_burst		= sfc_efx_recv_pkts,
547 };
548 
549 unsigned int
550 sfc_rx_qdesc_npending(struct sfc_adapter *sa, unsigned int sw_index)
551 {
552 	struct sfc_rxq *rxq;
553 
554 	SFC_ASSERT(sw_index < sa->rxq_count);
555 	rxq = sa->rxq_info[sw_index].rxq;
556 
557 	if (rxq == NULL || (rxq->state & SFC_RXQ_STARTED) == 0)
558 		return 0;
559 
560 	return sa->dp_rx->qdesc_npending(rxq->dp);
561 }
562 
563 int
564 sfc_rx_qdesc_done(struct sfc_dp_rxq *dp_rxq, unsigned int offset)
565 {
566 	struct sfc_rxq *rxq = sfc_rxq_by_dp_rxq(dp_rxq);
567 
568 	return offset < rxq->evq->sa->dp_rx->qdesc_npending(dp_rxq);
569 }
570 
571 static void
572 sfc_rx_qflush(struct sfc_adapter *sa, unsigned int sw_index)
573 {
574 	struct sfc_rxq *rxq;
575 	unsigned int retry_count;
576 	unsigned int wait_count;
577 	int rc;
578 
579 	rxq = sa->rxq_info[sw_index].rxq;
580 	SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
581 
582 	/*
583 	 * Retry Rx queue flushing in the case of flush failed or
584 	 * timeout. In the worst case it can delay for 6 seconds.
585 	 */
586 	for (retry_count = 0;
587 	     ((rxq->state & SFC_RXQ_FLUSHED) == 0) &&
588 	     (retry_count < SFC_RX_QFLUSH_ATTEMPTS);
589 	     ++retry_count) {
590 		rc = efx_rx_qflush(rxq->common);
591 		if (rc != 0) {
592 			rxq->state |= (rc == EALREADY) ?
593 				SFC_RXQ_FLUSHED : SFC_RXQ_FLUSH_FAILED;
594 			break;
595 		}
596 		rxq->state &= ~SFC_RXQ_FLUSH_FAILED;
597 		rxq->state |= SFC_RXQ_FLUSHING;
598 
599 		/*
600 		 * Wait for Rx queue flush done or failed event at least
601 		 * SFC_RX_QFLUSH_POLL_WAIT_MS milliseconds and not more
602 		 * than 2 seconds (SFC_RX_QFLUSH_POLL_WAIT_MS multiplied
603 		 * by SFC_RX_QFLUSH_POLL_ATTEMPTS).
604 		 */
605 		wait_count = 0;
606 		do {
607 			rte_delay_ms(SFC_RX_QFLUSH_POLL_WAIT_MS);
608 			sfc_ev_qpoll(rxq->evq);
609 		} while ((rxq->state & SFC_RXQ_FLUSHING) &&
610 			 (wait_count++ < SFC_RX_QFLUSH_POLL_ATTEMPTS));
611 
612 		if (rxq->state & SFC_RXQ_FLUSHING)
613 			sfc_err(sa, "RxQ %u flush timed out", sw_index);
614 
615 		if (rxq->state & SFC_RXQ_FLUSH_FAILED)
616 			sfc_err(sa, "RxQ %u flush failed", sw_index);
617 
618 		if (rxq->state & SFC_RXQ_FLUSHED)
619 			sfc_info(sa, "RxQ %u flushed", sw_index);
620 	}
621 
622 	sa->dp_rx->qpurge(rxq->dp);
623 }
624 
625 static int
626 sfc_rx_default_rxq_set_filter(struct sfc_adapter *sa, struct sfc_rxq *rxq)
627 {
628 	boolean_t rss = (sa->rss_channels > 0) ? B_TRUE : B_FALSE;
629 	struct sfc_port *port = &sa->port;
630 	int rc;
631 
632 	/*
633 	 * If promiscuous or all-multicast mode has been requested, setting
634 	 * filter for the default Rx queue might fail, in particular, while
635 	 * running over PCI function which is not a member of corresponding
636 	 * privilege groups; if this occurs, few iterations will be made to
637 	 * repeat this step without promiscuous and all-multicast flags set
638 	 */
639 retry:
640 	rc = efx_mac_filter_default_rxq_set(sa->nic, rxq->common, rss);
641 	if (rc == 0)
642 		return 0;
643 	else if (rc != EOPNOTSUPP)
644 		return rc;
645 
646 	if (port->promisc) {
647 		sfc_warn(sa, "promiscuous mode has been requested, "
648 			     "but the HW rejects it");
649 		sfc_warn(sa, "promiscuous mode will be disabled");
650 
651 		port->promisc = B_FALSE;
652 		rc = sfc_set_rx_mode(sa);
653 		if (rc != 0)
654 			return rc;
655 
656 		goto retry;
657 	}
658 
659 	if (port->allmulti) {
660 		sfc_warn(sa, "all-multicast mode has been requested, "
661 			     "but the HW rejects it");
662 		sfc_warn(sa, "all-multicast mode will be disabled");
663 
664 		port->allmulti = B_FALSE;
665 		rc = sfc_set_rx_mode(sa);
666 		if (rc != 0)
667 			return rc;
668 
669 		goto retry;
670 	}
671 
672 	return rc;
673 }
674 
675 int
676 sfc_rx_qstart(struct sfc_adapter *sa, unsigned int sw_index)
677 {
678 	struct sfc_port *port = &sa->port;
679 	struct sfc_rxq_info *rxq_info;
680 	struct sfc_rxq *rxq;
681 	struct sfc_evq *evq;
682 	int rc;
683 
684 	sfc_log_init(sa, "sw_index=%u", sw_index);
685 
686 	SFC_ASSERT(sw_index < sa->rxq_count);
687 
688 	rxq_info = &sa->rxq_info[sw_index];
689 	rxq = rxq_info->rxq;
690 	SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
691 
692 	evq = rxq->evq;
693 
694 	rc = sfc_ev_qstart(evq, sfc_evq_index_by_rxq_sw_index(sa, sw_index));
695 	if (rc != 0)
696 		goto fail_ev_qstart;
697 
698 	rc = efx_rx_qcreate(sa->nic, rxq->hw_index, 0, rxq_info->type,
699 			    &rxq->mem, rxq_info->entries,
700 			    0 /* not used on EF10 */, rxq_info->type_flags,
701 			    evq->common, &rxq->common);
702 	if (rc != 0)
703 		goto fail_rx_qcreate;
704 
705 	efx_rx_qenable(rxq->common);
706 
707 	rc = sa->dp_rx->qstart(rxq->dp, evq->read_ptr);
708 	if (rc != 0)
709 		goto fail_dp_qstart;
710 
711 	rxq->state |= SFC_RXQ_STARTED;
712 
713 	if ((sw_index == 0) && !port->isolated) {
714 		rc = sfc_rx_default_rxq_set_filter(sa, rxq);
715 		if (rc != 0)
716 			goto fail_mac_filter_default_rxq_set;
717 	}
718 
719 	/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
720 	sa->eth_dev->data->rx_queue_state[sw_index] =
721 		RTE_ETH_QUEUE_STATE_STARTED;
722 
723 	return 0;
724 
725 fail_mac_filter_default_rxq_set:
726 	sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
727 
728 fail_dp_qstart:
729 	sfc_rx_qflush(sa, sw_index);
730 
731 fail_rx_qcreate:
732 	sfc_ev_qstop(evq);
733 
734 fail_ev_qstart:
735 	return rc;
736 }
737 
738 void
739 sfc_rx_qstop(struct sfc_adapter *sa, unsigned int sw_index)
740 {
741 	struct sfc_rxq_info *rxq_info;
742 	struct sfc_rxq *rxq;
743 
744 	sfc_log_init(sa, "sw_index=%u", sw_index);
745 
746 	SFC_ASSERT(sw_index < sa->rxq_count);
747 
748 	rxq_info = &sa->rxq_info[sw_index];
749 	rxq = rxq_info->rxq;
750 
751 	if (rxq->state == SFC_RXQ_INITIALIZED)
752 		return;
753 	SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
754 
755 	/* It seems to be used by DPDK for debug purposes only ('rte_ether') */
756 	sa->eth_dev->data->rx_queue_state[sw_index] =
757 		RTE_ETH_QUEUE_STATE_STOPPED;
758 
759 	sa->dp_rx->qstop(rxq->dp, &rxq->evq->read_ptr);
760 
761 	if (sw_index == 0)
762 		efx_mac_filter_default_rxq_clear(sa->nic);
763 
764 	sfc_rx_qflush(sa, sw_index);
765 
766 	rxq->state = SFC_RXQ_INITIALIZED;
767 
768 	efx_rx_qdestroy(rxq->common);
769 
770 	sfc_ev_qstop(rxq->evq);
771 }
772 
773 static int
774 sfc_rx_qcheck_conf(struct sfc_adapter *sa, uint16_t nb_rx_desc,
775 		   const struct rte_eth_rxconf *rx_conf)
776 {
777 	const uint16_t rx_free_thresh_max = EFX_RXQ_LIMIT(nb_rx_desc);
778 	int rc = 0;
779 
780 	if (rx_conf->rx_thresh.pthresh != 0 ||
781 	    rx_conf->rx_thresh.hthresh != 0 ||
782 	    rx_conf->rx_thresh.wthresh != 0) {
783 		sfc_warn(sa,
784 			"RxQ prefetch/host/writeback thresholds are not supported");
785 	}
786 
787 	if (rx_conf->rx_free_thresh > rx_free_thresh_max) {
788 		sfc_err(sa,
789 			"RxQ free threshold too large: %u vs maximum %u",
790 			rx_conf->rx_free_thresh, rx_free_thresh_max);
791 		rc = EINVAL;
792 	}
793 
794 	if (rx_conf->rx_drop_en == 0) {
795 		sfc_err(sa, "RxQ drop disable is not supported");
796 		rc = EINVAL;
797 	}
798 
799 	return rc;
800 }
801 
802 static unsigned int
803 sfc_rx_mbuf_data_alignment(struct rte_mempool *mb_pool)
804 {
805 	uint32_t data_off;
806 	uint32_t order;
807 
808 	/* The mbuf object itself is always cache line aligned */
809 	order = rte_bsf32(RTE_CACHE_LINE_SIZE);
810 
811 	/* Data offset from mbuf object start */
812 	data_off = sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mb_pool) +
813 		RTE_PKTMBUF_HEADROOM;
814 
815 	order = MIN(order, rte_bsf32(data_off));
816 
817 	return 1u << (order - 1);
818 }
819 
820 static uint16_t
821 sfc_rx_mb_pool_buf_size(struct sfc_adapter *sa, struct rte_mempool *mb_pool)
822 {
823 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
824 	const uint32_t nic_align_start = MAX(1, encp->enc_rx_buf_align_start);
825 	const uint32_t nic_align_end = MAX(1, encp->enc_rx_buf_align_end);
826 	uint16_t buf_size;
827 	unsigned int buf_aligned;
828 	unsigned int start_alignment;
829 	unsigned int end_padding_alignment;
830 
831 	/* Below it is assumed that both alignments are power of 2 */
832 	SFC_ASSERT(rte_is_power_of_2(nic_align_start));
833 	SFC_ASSERT(rte_is_power_of_2(nic_align_end));
834 
835 	/*
836 	 * mbuf is always cache line aligned, double-check
837 	 * that it meets rx buffer start alignment requirements.
838 	 */
839 
840 	/* Start from mbuf pool data room size */
841 	buf_size = rte_pktmbuf_data_room_size(mb_pool);
842 
843 	/* Remove headroom */
844 	if (buf_size <= RTE_PKTMBUF_HEADROOM) {
845 		sfc_err(sa,
846 			"RxQ mbuf pool %s object data room size %u is smaller than headroom %u",
847 			mb_pool->name, buf_size, RTE_PKTMBUF_HEADROOM);
848 		return 0;
849 	}
850 	buf_size -= RTE_PKTMBUF_HEADROOM;
851 
852 	/* Calculate guaranteed data start alignment */
853 	buf_aligned = sfc_rx_mbuf_data_alignment(mb_pool);
854 
855 	/* Reserve space for start alignment */
856 	if (buf_aligned < nic_align_start) {
857 		start_alignment = nic_align_start - buf_aligned;
858 		if (buf_size <= start_alignment) {
859 			sfc_err(sa,
860 				"RxQ mbuf pool %s object data room size %u is insufficient for headroom %u and buffer start alignment %u required by NIC",
861 				mb_pool->name,
862 				rte_pktmbuf_data_room_size(mb_pool),
863 				RTE_PKTMBUF_HEADROOM, start_alignment);
864 			return 0;
865 		}
866 		buf_aligned = nic_align_start;
867 		buf_size -= start_alignment;
868 	} else {
869 		start_alignment = 0;
870 	}
871 
872 	/* Make sure that end padding does not write beyond the buffer */
873 	if (buf_aligned < nic_align_end) {
874 		/*
875 		 * Estimate space which can be lost. If guarnteed buffer
876 		 * size is odd, lost space is (nic_align_end - 1). More
877 		 * accurate formula is below.
878 		 */
879 		end_padding_alignment = nic_align_end -
880 			MIN(buf_aligned, 1u << (rte_bsf32(buf_size) - 1));
881 		if (buf_size <= end_padding_alignment) {
882 			sfc_err(sa,
883 				"RxQ mbuf pool %s object data room size %u is insufficient for headroom %u, buffer start alignment %u and end padding alignment %u required by NIC",
884 				mb_pool->name,
885 				rte_pktmbuf_data_room_size(mb_pool),
886 				RTE_PKTMBUF_HEADROOM, start_alignment,
887 				end_padding_alignment);
888 			return 0;
889 		}
890 		buf_size -= end_padding_alignment;
891 	} else {
892 		/*
893 		 * Start is aligned the same or better than end,
894 		 * just align length.
895 		 */
896 		buf_size = P2ALIGN(buf_size, nic_align_end);
897 	}
898 
899 	return buf_size;
900 }
901 
902 int
903 sfc_rx_qinit(struct sfc_adapter *sa, unsigned int sw_index,
904 	     uint16_t nb_rx_desc, unsigned int socket_id,
905 	     const struct rte_eth_rxconf *rx_conf,
906 	     struct rte_mempool *mb_pool)
907 {
908 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
909 	int rc;
910 	uint16_t buf_size;
911 	struct sfc_rxq_info *rxq_info;
912 	struct sfc_evq *evq;
913 	struct sfc_rxq *rxq;
914 	struct sfc_dp_rx_qcreate_info info;
915 
916 	rc = sfc_rx_qcheck_conf(sa, nb_rx_desc, rx_conf);
917 	if (rc != 0)
918 		goto fail_bad_conf;
919 
920 	buf_size = sfc_rx_mb_pool_buf_size(sa, mb_pool);
921 	if (buf_size == 0) {
922 		sfc_err(sa, "RxQ %u mbuf pool object size is too small",
923 			sw_index);
924 		rc = EINVAL;
925 		goto fail_bad_conf;
926 	}
927 
928 	if ((buf_size < sa->port.pdu + encp->enc_rx_prefix_size) &&
929 	    !sa->eth_dev->data->dev_conf.rxmode.enable_scatter) {
930 		sfc_err(sa, "Rx scatter is disabled and RxQ %u mbuf pool "
931 			"object size is too small", sw_index);
932 		sfc_err(sa, "RxQ %u calculated Rx buffer size is %u vs "
933 			"PDU size %u plus Rx prefix %u bytes",
934 			sw_index, buf_size, (unsigned int)sa->port.pdu,
935 			encp->enc_rx_prefix_size);
936 		rc = EINVAL;
937 		goto fail_bad_conf;
938 	}
939 
940 	SFC_ASSERT(sw_index < sa->rxq_count);
941 	rxq_info = &sa->rxq_info[sw_index];
942 
943 	SFC_ASSERT(nb_rx_desc <= rxq_info->max_entries);
944 	rxq_info->entries = nb_rx_desc;
945 	rxq_info->type = EFX_RXQ_TYPE_DEFAULT;
946 	rxq_info->type_flags =
947 		sa->eth_dev->data->dev_conf.rxmode.enable_scatter ?
948 		EFX_RXQ_FLAG_SCATTER : EFX_RXQ_FLAG_NONE;
949 
950 	if ((encp->enc_tunnel_encapsulations_supported != 0) &&
951 	    (sa->dp_rx->features & SFC_DP_RX_FEAT_TUNNELS))
952 		rxq_info->type_flags |= EFX_RXQ_FLAG_INNER_CLASSES;
953 
954 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_RX, sw_index,
955 			  rxq_info->entries, socket_id, &evq);
956 	if (rc != 0)
957 		goto fail_ev_qinit;
958 
959 	rc = ENOMEM;
960 	rxq = rte_zmalloc_socket("sfc-rxq", sizeof(*rxq), RTE_CACHE_LINE_SIZE,
961 				 socket_id);
962 	if (rxq == NULL)
963 		goto fail_rxq_alloc;
964 
965 	rxq_info->rxq = rxq;
966 
967 	rxq->evq = evq;
968 	rxq->hw_index = sw_index;
969 	rxq->refill_threshold =
970 		RTE_MAX(rx_conf->rx_free_thresh, SFC_RX_REFILL_BULK);
971 	rxq->refill_mb_pool = mb_pool;
972 
973 	rc = sfc_dma_alloc(sa, "rxq", sw_index, EFX_RXQ_SIZE(rxq_info->entries),
974 			   socket_id, &rxq->mem);
975 	if (rc != 0)
976 		goto fail_dma_alloc;
977 
978 	memset(&info, 0, sizeof(info));
979 	info.refill_mb_pool = rxq->refill_mb_pool;
980 	info.refill_threshold = rxq->refill_threshold;
981 	info.buf_size = buf_size;
982 	info.batch_max = encp->enc_rx_batch_max;
983 	info.prefix_size = encp->enc_rx_prefix_size;
984 
985 #if EFSYS_OPT_RX_SCALE
986 	if (sa->hash_support == EFX_RX_HASH_AVAILABLE && sa->rss_channels > 0)
987 		info.flags |= SFC_RXQ_FLAG_RSS_HASH;
988 #endif
989 
990 	info.rxq_entries = rxq_info->entries;
991 	info.rxq_hw_ring = rxq->mem.esm_base;
992 	info.evq_entries = rxq_info->entries;
993 	info.evq_hw_ring = evq->mem.esm_base;
994 	info.hw_index = rxq->hw_index;
995 	info.mem_bar = sa->mem_bar.esb_base;
996 
997 	rc = sa->dp_rx->qcreate(sa->eth_dev->data->port_id, sw_index,
998 				&RTE_ETH_DEV_TO_PCI(sa->eth_dev)->addr,
999 				socket_id, &info, &rxq->dp);
1000 	if (rc != 0)
1001 		goto fail_dp_rx_qcreate;
1002 
1003 	evq->dp_rxq = rxq->dp;
1004 
1005 	rxq->state = SFC_RXQ_INITIALIZED;
1006 
1007 	rxq_info->deferred_start = (rx_conf->rx_deferred_start != 0);
1008 
1009 	return 0;
1010 
1011 fail_dp_rx_qcreate:
1012 	sfc_dma_free(sa, &rxq->mem);
1013 
1014 fail_dma_alloc:
1015 	rxq_info->rxq = NULL;
1016 	rte_free(rxq);
1017 
1018 fail_rxq_alloc:
1019 	sfc_ev_qfini(evq);
1020 
1021 fail_ev_qinit:
1022 	rxq_info->entries = 0;
1023 
1024 fail_bad_conf:
1025 	sfc_log_init(sa, "failed %d", rc);
1026 	return rc;
1027 }
1028 
1029 void
1030 sfc_rx_qfini(struct sfc_adapter *sa, unsigned int sw_index)
1031 {
1032 	struct sfc_rxq_info *rxq_info;
1033 	struct sfc_rxq *rxq;
1034 
1035 	SFC_ASSERT(sw_index < sa->rxq_count);
1036 
1037 	rxq_info = &sa->rxq_info[sw_index];
1038 
1039 	rxq = rxq_info->rxq;
1040 	SFC_ASSERT(rxq->state == SFC_RXQ_INITIALIZED);
1041 
1042 	sa->dp_rx->qdestroy(rxq->dp);
1043 	rxq->dp = NULL;
1044 
1045 	rxq_info->rxq = NULL;
1046 	rxq_info->entries = 0;
1047 
1048 	sfc_dma_free(sa, &rxq->mem);
1049 
1050 	sfc_ev_qfini(rxq->evq);
1051 	rxq->evq = NULL;
1052 
1053 	rte_free(rxq);
1054 }
1055 
1056 #if EFSYS_OPT_RX_SCALE
1057 efx_rx_hash_type_t
1058 sfc_rte_to_efx_hash_type(uint64_t rss_hf)
1059 {
1060 	efx_rx_hash_type_t efx_hash_types = 0;
1061 
1062 	if ((rss_hf & (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1063 		       ETH_RSS_NONFRAG_IPV4_OTHER)) != 0)
1064 		efx_hash_types |= EFX_RX_HASH_IPV4;
1065 
1066 	if ((rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) != 0)
1067 		efx_hash_types |= EFX_RX_HASH_TCPIPV4;
1068 
1069 	if ((rss_hf & (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1070 			ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX)) != 0)
1071 		efx_hash_types |= EFX_RX_HASH_IPV6;
1072 
1073 	if ((rss_hf & (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX)) != 0)
1074 		efx_hash_types |= EFX_RX_HASH_TCPIPV6;
1075 
1076 	return efx_hash_types;
1077 }
1078 
1079 uint64_t
1080 sfc_efx_to_rte_hash_type(efx_rx_hash_type_t efx_hash_types)
1081 {
1082 	uint64_t rss_hf = 0;
1083 
1084 	if ((efx_hash_types & EFX_RX_HASH_IPV4) != 0)
1085 		rss_hf |= (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 |
1086 			   ETH_RSS_NONFRAG_IPV4_OTHER);
1087 
1088 	if ((efx_hash_types & EFX_RX_HASH_TCPIPV4) != 0)
1089 		rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP;
1090 
1091 	if ((efx_hash_types & EFX_RX_HASH_IPV6) != 0)
1092 		rss_hf |= (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 |
1093 			   ETH_RSS_NONFRAG_IPV6_OTHER | ETH_RSS_IPV6_EX);
1094 
1095 	if ((efx_hash_types & EFX_RX_HASH_TCPIPV6) != 0)
1096 		rss_hf |= (ETH_RSS_NONFRAG_IPV6_TCP | ETH_RSS_IPV6_TCP_EX);
1097 
1098 	return rss_hf;
1099 }
1100 #endif
1101 
1102 #if EFSYS_OPT_RX_SCALE
1103 static int
1104 sfc_rx_rss_config(struct sfc_adapter *sa)
1105 {
1106 	int rc = 0;
1107 
1108 	if (sa->rss_channels > 0) {
1109 		rc = efx_rx_scale_mode_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1110 					   EFX_RX_HASHALG_TOEPLITZ,
1111 					   sa->rss_hash_types, B_TRUE);
1112 		if (rc != 0)
1113 			goto finish;
1114 
1115 		rc = efx_rx_scale_key_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1116 					  sa->rss_key,
1117 					  sizeof(sa->rss_key));
1118 		if (rc != 0)
1119 			goto finish;
1120 
1121 		rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1122 					  sa->rss_tbl, RTE_DIM(sa->rss_tbl));
1123 	}
1124 
1125 finish:
1126 	return rc;
1127 }
1128 #else
1129 static int
1130 sfc_rx_rss_config(__rte_unused struct sfc_adapter *sa)
1131 {
1132 	return 0;
1133 }
1134 #endif
1135 
1136 int
1137 sfc_rx_start(struct sfc_adapter *sa)
1138 {
1139 	unsigned int sw_index;
1140 	int rc;
1141 
1142 	sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1143 
1144 	rc = efx_rx_init(sa->nic);
1145 	if (rc != 0)
1146 		goto fail_rx_init;
1147 
1148 	rc = sfc_rx_rss_config(sa);
1149 	if (rc != 0)
1150 		goto fail_rss_config;
1151 
1152 	for (sw_index = 0; sw_index < sa->rxq_count; ++sw_index) {
1153 		if ((!sa->rxq_info[sw_index].deferred_start ||
1154 		     sa->rxq_info[sw_index].deferred_started)) {
1155 			rc = sfc_rx_qstart(sa, sw_index);
1156 			if (rc != 0)
1157 				goto fail_rx_qstart;
1158 		}
1159 	}
1160 
1161 	return 0;
1162 
1163 fail_rx_qstart:
1164 	while (sw_index-- > 0)
1165 		sfc_rx_qstop(sa, sw_index);
1166 
1167 fail_rss_config:
1168 	efx_rx_fini(sa->nic);
1169 
1170 fail_rx_init:
1171 	sfc_log_init(sa, "failed %d", rc);
1172 	return rc;
1173 }
1174 
1175 void
1176 sfc_rx_stop(struct sfc_adapter *sa)
1177 {
1178 	unsigned int sw_index;
1179 
1180 	sfc_log_init(sa, "rxq_count=%u", sa->rxq_count);
1181 
1182 	sw_index = sa->rxq_count;
1183 	while (sw_index-- > 0) {
1184 		if (sa->rxq_info[sw_index].rxq != NULL)
1185 			sfc_rx_qstop(sa, sw_index);
1186 	}
1187 
1188 	efx_rx_fini(sa->nic);
1189 }
1190 
1191 static int
1192 sfc_rx_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
1193 {
1194 	struct sfc_rxq_info *rxq_info = &sa->rxq_info[sw_index];
1195 	unsigned int max_entries;
1196 
1197 	max_entries = EFX_RXQ_MAXNDESCS;
1198 	SFC_ASSERT(rte_is_power_of_2(max_entries));
1199 
1200 	rxq_info->max_entries = max_entries;
1201 
1202 	return 0;
1203 }
1204 
1205 static int
1206 sfc_rx_check_mode(struct sfc_adapter *sa, struct rte_eth_rxmode *rxmode)
1207 {
1208 	int rc = 0;
1209 
1210 	switch (rxmode->mq_mode) {
1211 	case ETH_MQ_RX_NONE:
1212 		/* No special checks are required */
1213 		break;
1214 #if EFSYS_OPT_RX_SCALE
1215 	case ETH_MQ_RX_RSS:
1216 		if (sa->rss_support == EFX_RX_SCALE_UNAVAILABLE) {
1217 			sfc_err(sa, "RSS is not available");
1218 			rc = EINVAL;
1219 		}
1220 		break;
1221 #endif
1222 	default:
1223 		sfc_err(sa, "Rx multi-queue mode %u not supported",
1224 			rxmode->mq_mode);
1225 		rc = EINVAL;
1226 	}
1227 
1228 	if (rxmode->header_split) {
1229 		sfc_err(sa, "Header split on Rx not supported");
1230 		rc = EINVAL;
1231 	}
1232 
1233 	if (rxmode->hw_vlan_filter) {
1234 		sfc_err(sa, "HW VLAN filtering not supported");
1235 		rc = EINVAL;
1236 	}
1237 
1238 	if (rxmode->hw_vlan_strip) {
1239 		sfc_err(sa, "HW VLAN stripping not supported");
1240 		rc = EINVAL;
1241 	}
1242 
1243 	if (rxmode->hw_vlan_extend) {
1244 		sfc_err(sa,
1245 			"Q-in-Q HW VLAN stripping not supported");
1246 		rc = EINVAL;
1247 	}
1248 
1249 	if (!rxmode->hw_strip_crc) {
1250 		sfc_warn(sa,
1251 			 "FCS stripping control not supported - always stripped");
1252 		rxmode->hw_strip_crc = 1;
1253 	}
1254 
1255 	if (rxmode->enable_scatter &&
1256 	    (~sa->dp_rx->features & SFC_DP_RX_FEAT_SCATTER)) {
1257 		sfc_err(sa, "Rx scatter not supported by %s datapath",
1258 			sa->dp_rx->dp.name);
1259 		rc = EINVAL;
1260 	}
1261 
1262 	if (rxmode->enable_lro) {
1263 		sfc_err(sa, "LRO not supported");
1264 		rc = EINVAL;
1265 	}
1266 
1267 	return rc;
1268 }
1269 
1270 /**
1271  * Destroy excess queues that are no longer needed after reconfiguration
1272  * or complete close.
1273  */
1274 static void
1275 sfc_rx_fini_queues(struct sfc_adapter *sa, unsigned int nb_rx_queues)
1276 {
1277 	int sw_index;
1278 
1279 	SFC_ASSERT(nb_rx_queues <= sa->rxq_count);
1280 
1281 	sw_index = sa->rxq_count;
1282 	while (--sw_index >= (int)nb_rx_queues) {
1283 		if (sa->rxq_info[sw_index].rxq != NULL)
1284 			sfc_rx_qfini(sa, sw_index);
1285 	}
1286 
1287 	sa->rxq_count = nb_rx_queues;
1288 }
1289 
1290 /**
1291  * Initialize Rx subsystem.
1292  *
1293  * Called at device (re)configuration stage when number of receive queues is
1294  * specified together with other device level receive configuration.
1295  *
1296  * It should be used to allocate NUMA-unaware resources.
1297  */
1298 int
1299 sfc_rx_configure(struct sfc_adapter *sa)
1300 {
1301 	struct rte_eth_conf *dev_conf = &sa->eth_dev->data->dev_conf;
1302 	const unsigned int nb_rx_queues = sa->eth_dev->data->nb_rx_queues;
1303 	int rc;
1304 
1305 	sfc_log_init(sa, "nb_rx_queues=%u (old %u)",
1306 		     nb_rx_queues, sa->rxq_count);
1307 
1308 	rc = sfc_rx_check_mode(sa, &dev_conf->rxmode);
1309 	if (rc != 0)
1310 		goto fail_check_mode;
1311 
1312 	if (nb_rx_queues == sa->rxq_count)
1313 		goto done;
1314 
1315 	if (sa->rxq_info == NULL) {
1316 		rc = ENOMEM;
1317 		sa->rxq_info = rte_calloc_socket("sfc-rxqs", nb_rx_queues,
1318 						 sizeof(sa->rxq_info[0]), 0,
1319 						 sa->socket_id);
1320 		if (sa->rxq_info == NULL)
1321 			goto fail_rxqs_alloc;
1322 	} else {
1323 		struct sfc_rxq_info *new_rxq_info;
1324 
1325 		if (nb_rx_queues < sa->rxq_count)
1326 			sfc_rx_fini_queues(sa, nb_rx_queues);
1327 
1328 		rc = ENOMEM;
1329 		new_rxq_info =
1330 			rte_realloc(sa->rxq_info,
1331 				    nb_rx_queues * sizeof(sa->rxq_info[0]), 0);
1332 		if (new_rxq_info == NULL && nb_rx_queues > 0)
1333 			goto fail_rxqs_realloc;
1334 
1335 		sa->rxq_info = new_rxq_info;
1336 		if (nb_rx_queues > sa->rxq_count)
1337 			memset(&sa->rxq_info[sa->rxq_count], 0,
1338 			       (nb_rx_queues - sa->rxq_count) *
1339 			       sizeof(sa->rxq_info[0]));
1340 	}
1341 
1342 	while (sa->rxq_count < nb_rx_queues) {
1343 		rc = sfc_rx_qinit_info(sa, sa->rxq_count);
1344 		if (rc != 0)
1345 			goto fail_rx_qinit_info;
1346 
1347 		sa->rxq_count++;
1348 	}
1349 
1350 #if EFSYS_OPT_RX_SCALE
1351 	sa->rss_channels = (dev_conf->rxmode.mq_mode == ETH_MQ_RX_RSS) ?
1352 			   MIN(sa->rxq_count, EFX_MAXRSS) : 0;
1353 
1354 	if (sa->rss_channels > 0) {
1355 		unsigned int sw_index;
1356 
1357 		for (sw_index = 0; sw_index < EFX_RSS_TBL_SIZE; ++sw_index)
1358 			sa->rss_tbl[sw_index] = sw_index % sa->rss_channels;
1359 	}
1360 #endif
1361 
1362 done:
1363 	return 0;
1364 
1365 fail_rx_qinit_info:
1366 fail_rxqs_realloc:
1367 fail_rxqs_alloc:
1368 	sfc_rx_close(sa);
1369 
1370 fail_check_mode:
1371 	sfc_log_init(sa, "failed %d", rc);
1372 	return rc;
1373 }
1374 
1375 /**
1376  * Shutdown Rx subsystem.
1377  *
1378  * Called at device close stage, for example, before device shutdown.
1379  */
1380 void
1381 sfc_rx_close(struct sfc_adapter *sa)
1382 {
1383 	sfc_rx_fini_queues(sa, 0);
1384 
1385 	sa->rss_channels = 0;
1386 
1387 	rte_free(sa->rxq_info);
1388 	sa->rxq_info = NULL;
1389 }
1390