xref: /dpdk/drivers/net/sfc/sfc_ethdev.c (revision bc8e32473cc3978d763a1387eaa8244bcf75e77d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2016-2019 Solarflare Communications Inc.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 #include <rte_dev.h>
11 #include <rte_ethdev_driver.h>
12 #include <rte_ethdev_pci.h>
13 #include <rte_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_errno.h>
16 #include <rte_string_fns.h>
17 #include <rte_ether.h>
18 
19 #include "efx.h"
20 
21 #include "sfc.h"
22 #include "sfc_debug.h"
23 #include "sfc_log.h"
24 #include "sfc_kvargs.h"
25 #include "sfc_ev.h"
26 #include "sfc_rx.h"
27 #include "sfc_tx.h"
28 #include "sfc_flow.h"
29 #include "sfc_dp.h"
30 #include "sfc_dp_rx.h"
31 
32 uint32_t sfc_logtype_driver;
33 
34 static struct sfc_dp_list sfc_dp_head =
35 	TAILQ_HEAD_INITIALIZER(sfc_dp_head);
36 
37 
38 static void sfc_eth_dev_clear_ops(struct rte_eth_dev *dev);
39 
40 
41 static int
42 sfc_fw_version_get(struct rte_eth_dev *dev, char *fw_version, size_t fw_size)
43 {
44 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
45 	efx_nic_fw_info_t enfi;
46 	int ret;
47 	int rc;
48 
49 	/*
50 	 * Return value of the callback is likely supposed to be
51 	 * equal to or greater than 0, nevertheless, if an error
52 	 * occurs, it will be desirable to pass it to the caller
53 	 */
54 	if ((fw_version == NULL) || (fw_size == 0))
55 		return -EINVAL;
56 
57 	rc = efx_nic_get_fw_version(sa->nic, &enfi);
58 	if (rc != 0)
59 		return -rc;
60 
61 	ret = snprintf(fw_version, fw_size,
62 		       "%" PRIu16 ".%" PRIu16 ".%" PRIu16 ".%" PRIu16,
63 		       enfi.enfi_mc_fw_version[0], enfi.enfi_mc_fw_version[1],
64 		       enfi.enfi_mc_fw_version[2], enfi.enfi_mc_fw_version[3]);
65 	if (ret < 0)
66 		return ret;
67 
68 	if (enfi.enfi_dpcpu_fw_ids_valid) {
69 		size_t dpcpu_fw_ids_offset = MIN(fw_size - 1, (size_t)ret);
70 		int ret_extra;
71 
72 		ret_extra = snprintf(fw_version + dpcpu_fw_ids_offset,
73 				     fw_size - dpcpu_fw_ids_offset,
74 				     " rx%" PRIx16 " tx%" PRIx16,
75 				     enfi.enfi_rx_dpcpu_fw_id,
76 				     enfi.enfi_tx_dpcpu_fw_id);
77 		if (ret_extra < 0)
78 			return ret_extra;
79 
80 		ret += ret_extra;
81 	}
82 
83 	if (fw_size < (size_t)(++ret))
84 		return ret;
85 	else
86 		return 0;
87 }
88 
89 static int
90 sfc_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
91 {
92 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
93 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
94 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
95 	struct sfc_rss *rss = &sas->rss;
96 	struct sfc_mae *mae = &sa->mae;
97 	uint64_t txq_offloads_def = 0;
98 
99 	sfc_log_init(sa, "entry");
100 
101 	dev_info->min_mtu = RTE_ETHER_MIN_MTU;
102 	dev_info->max_mtu = EFX_MAC_SDU_MAX;
103 
104 	dev_info->max_rx_pktlen = EFX_MAC_PDU_MAX;
105 
106 	dev_info->max_vfs = sa->sriov.num_vfs;
107 
108 	/* Autonegotiation may be disabled */
109 	dev_info->speed_capa = ETH_LINK_SPEED_FIXED;
110 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_1000FDX))
111 		dev_info->speed_capa |= ETH_LINK_SPEED_1G;
112 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_10000FDX))
113 		dev_info->speed_capa |= ETH_LINK_SPEED_10G;
114 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_25000FDX))
115 		dev_info->speed_capa |= ETH_LINK_SPEED_25G;
116 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_40000FDX))
117 		dev_info->speed_capa |= ETH_LINK_SPEED_40G;
118 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_50000FDX))
119 		dev_info->speed_capa |= ETH_LINK_SPEED_50G;
120 	if (sa->port.phy_adv_cap_mask & (1u << EFX_PHY_CAP_100000FDX))
121 		dev_info->speed_capa |= ETH_LINK_SPEED_100G;
122 
123 	dev_info->max_rx_queues = sa->rxq_max;
124 	dev_info->max_tx_queues = sa->txq_max;
125 
126 	/* By default packets are dropped if no descriptors are available */
127 	dev_info->default_rxconf.rx_drop_en = 1;
128 
129 	dev_info->rx_queue_offload_capa = sfc_rx_get_queue_offload_caps(sa);
130 
131 	/*
132 	 * rx_offload_capa includes both device and queue offloads since
133 	 * the latter may be requested on a per device basis which makes
134 	 * sense when some offloads are needed to be set on all queues.
135 	 */
136 	dev_info->rx_offload_capa = sfc_rx_get_dev_offload_caps(sa) |
137 				    dev_info->rx_queue_offload_capa;
138 
139 	dev_info->tx_queue_offload_capa = sfc_tx_get_queue_offload_caps(sa);
140 
141 	/*
142 	 * tx_offload_capa includes both device and queue offloads since
143 	 * the latter may be requested on a per device basis which makes
144 	 * sense when some offloads are needed to be set on all queues.
145 	 */
146 	dev_info->tx_offload_capa = sfc_tx_get_dev_offload_caps(sa) |
147 				    dev_info->tx_queue_offload_capa;
148 
149 	if (dev_info->tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE)
150 		txq_offloads_def |= DEV_TX_OFFLOAD_MBUF_FAST_FREE;
151 
152 	dev_info->default_txconf.offloads |= txq_offloads_def;
153 
154 	if (rss->context_type != EFX_RX_SCALE_UNAVAILABLE) {
155 		uint64_t rte_hf = 0;
156 		unsigned int i;
157 
158 		for (i = 0; i < rss->hf_map_nb_entries; ++i)
159 			rte_hf |= rss->hf_map[i].rte;
160 
161 		dev_info->reta_size = EFX_RSS_TBL_SIZE;
162 		dev_info->hash_key_size = EFX_RSS_KEY_SIZE;
163 		dev_info->flow_type_rss_offloads = rte_hf;
164 	}
165 
166 	/* Initialize to hardware limits */
167 	dev_info->rx_desc_lim.nb_max = sa->rxq_max_entries;
168 	dev_info->rx_desc_lim.nb_min = sa->rxq_min_entries;
169 	/* The RXQ hardware requires that the descriptor count is a power
170 	 * of 2, but rx_desc_lim cannot properly describe that constraint.
171 	 */
172 	dev_info->rx_desc_lim.nb_align = sa->rxq_min_entries;
173 
174 	/* Initialize to hardware limits */
175 	dev_info->tx_desc_lim.nb_max = sa->txq_max_entries;
176 	dev_info->tx_desc_lim.nb_min = sa->txq_min_entries;
177 	/*
178 	 * The TXQ hardware requires that the descriptor count is a power
179 	 * of 2, but tx_desc_lim cannot properly describe that constraint
180 	 */
181 	dev_info->tx_desc_lim.nb_align = sa->txq_min_entries;
182 
183 	if (sap->dp_rx->get_dev_info != NULL)
184 		sap->dp_rx->get_dev_info(dev_info);
185 	if (sap->dp_tx->get_dev_info != NULL)
186 		sap->dp_tx->get_dev_info(dev_info);
187 
188 	dev_info->dev_capa = RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
189 			     RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP;
190 
191 	if (mae->status == SFC_MAE_STATUS_SUPPORTED) {
192 		dev_info->switch_info.name = dev->device->driver->name;
193 		dev_info->switch_info.domain_id = mae->switch_domain_id;
194 		dev_info->switch_info.port_id = mae->switch_port_id;
195 	}
196 
197 	return 0;
198 }
199 
200 static const uint32_t *
201 sfc_dev_supported_ptypes_get(struct rte_eth_dev *dev)
202 {
203 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
204 
205 	return sap->dp_rx->supported_ptypes_get(sap->shared->tunnel_encaps);
206 }
207 
208 static int
209 sfc_dev_configure(struct rte_eth_dev *dev)
210 {
211 	struct rte_eth_dev_data *dev_data = dev->data;
212 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
213 	int rc;
214 
215 	sfc_log_init(sa, "entry n_rxq=%u n_txq=%u",
216 		     dev_data->nb_rx_queues, dev_data->nb_tx_queues);
217 
218 	sfc_adapter_lock(sa);
219 	switch (sa->state) {
220 	case SFC_ADAPTER_CONFIGURED:
221 		/* FALLTHROUGH */
222 	case SFC_ADAPTER_INITIALIZED:
223 		rc = sfc_configure(sa);
224 		break;
225 	default:
226 		sfc_err(sa, "unexpected adapter state %u to configure",
227 			sa->state);
228 		rc = EINVAL;
229 		break;
230 	}
231 	sfc_adapter_unlock(sa);
232 
233 	sfc_log_init(sa, "done %d", rc);
234 	SFC_ASSERT(rc >= 0);
235 	return -rc;
236 }
237 
238 static int
239 sfc_dev_start(struct rte_eth_dev *dev)
240 {
241 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
242 	int rc;
243 
244 	sfc_log_init(sa, "entry");
245 
246 	sfc_adapter_lock(sa);
247 	rc = sfc_start(sa);
248 	sfc_adapter_unlock(sa);
249 
250 	sfc_log_init(sa, "done %d", rc);
251 	SFC_ASSERT(rc >= 0);
252 	return -rc;
253 }
254 
255 static int
256 sfc_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
257 {
258 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
259 	struct rte_eth_link current_link;
260 	int ret;
261 
262 	sfc_log_init(sa, "entry");
263 
264 	if (sa->state != SFC_ADAPTER_STARTED) {
265 		sfc_port_link_mode_to_info(EFX_LINK_UNKNOWN, &current_link);
266 	} else if (wait_to_complete) {
267 		efx_link_mode_t link_mode;
268 
269 		if (efx_port_poll(sa->nic, &link_mode) != 0)
270 			link_mode = EFX_LINK_UNKNOWN;
271 		sfc_port_link_mode_to_info(link_mode, &current_link);
272 
273 	} else {
274 		sfc_ev_mgmt_qpoll(sa);
275 		rte_eth_linkstatus_get(dev, &current_link);
276 	}
277 
278 	ret = rte_eth_linkstatus_set(dev, &current_link);
279 	if (ret == 0)
280 		sfc_notice(sa, "Link status is %s",
281 			   current_link.link_status ? "UP" : "DOWN");
282 
283 	return ret;
284 }
285 
286 static int
287 sfc_dev_stop(struct rte_eth_dev *dev)
288 {
289 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
290 
291 	sfc_log_init(sa, "entry");
292 
293 	sfc_adapter_lock(sa);
294 	sfc_stop(sa);
295 	sfc_adapter_unlock(sa);
296 
297 	sfc_log_init(sa, "done");
298 
299 	return 0;
300 }
301 
302 static int
303 sfc_dev_set_link_up(struct rte_eth_dev *dev)
304 {
305 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
306 	int rc;
307 
308 	sfc_log_init(sa, "entry");
309 
310 	sfc_adapter_lock(sa);
311 	rc = sfc_start(sa);
312 	sfc_adapter_unlock(sa);
313 
314 	SFC_ASSERT(rc >= 0);
315 	return -rc;
316 }
317 
318 static int
319 sfc_dev_set_link_down(struct rte_eth_dev *dev)
320 {
321 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
322 
323 	sfc_log_init(sa, "entry");
324 
325 	sfc_adapter_lock(sa);
326 	sfc_stop(sa);
327 	sfc_adapter_unlock(sa);
328 
329 	return 0;
330 }
331 
332 static void
333 sfc_eth_dev_secondary_clear_ops(struct rte_eth_dev *dev)
334 {
335 	free(dev->process_private);
336 	rte_eth_dev_release_port(dev);
337 }
338 
339 static int
340 sfc_dev_close(struct rte_eth_dev *dev)
341 {
342 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
343 
344 	sfc_log_init(sa, "entry");
345 
346 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
347 		sfc_eth_dev_secondary_clear_ops(dev);
348 		return 0;
349 	}
350 
351 	sfc_adapter_lock(sa);
352 	switch (sa->state) {
353 	case SFC_ADAPTER_STARTED:
354 		sfc_stop(sa);
355 		SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
356 		/* FALLTHROUGH */
357 	case SFC_ADAPTER_CONFIGURED:
358 		sfc_close(sa);
359 		SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED);
360 		/* FALLTHROUGH */
361 	case SFC_ADAPTER_INITIALIZED:
362 		break;
363 	default:
364 		sfc_err(sa, "unexpected adapter state %u on close", sa->state);
365 		break;
366 	}
367 
368 	/*
369 	 * Cleanup all resources.
370 	 * Rollback primary process sfc_eth_dev_init() below.
371 	 */
372 
373 	sfc_eth_dev_clear_ops(dev);
374 
375 	sfc_detach(sa);
376 	sfc_unprobe(sa);
377 
378 	sfc_kvargs_cleanup(sa);
379 
380 	sfc_adapter_unlock(sa);
381 	sfc_adapter_lock_fini(sa);
382 
383 	sfc_log_init(sa, "done");
384 
385 	/* Required for logging, so cleanup last */
386 	sa->eth_dev = NULL;
387 
388 	free(sa);
389 
390 	return 0;
391 }
392 
393 static int
394 sfc_dev_filter_set(struct rte_eth_dev *dev, enum sfc_dev_filter_mode mode,
395 		   boolean_t enabled)
396 {
397 	struct sfc_port *port;
398 	boolean_t *toggle;
399 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
400 	boolean_t allmulti = (mode == SFC_DEV_FILTER_MODE_ALLMULTI);
401 	const char *desc = (allmulti) ? "all-multi" : "promiscuous";
402 	int rc = 0;
403 
404 	sfc_adapter_lock(sa);
405 
406 	port = &sa->port;
407 	toggle = (allmulti) ? (&port->allmulti) : (&port->promisc);
408 
409 	if (*toggle != enabled) {
410 		*toggle = enabled;
411 
412 		if (sfc_sa2shared(sa)->isolated) {
413 			sfc_warn(sa, "isolated mode is active on the port");
414 			sfc_warn(sa, "the change is to be applied on the next "
415 				     "start provided that isolated mode is "
416 				     "disabled prior the next start");
417 		} else if ((sa->state == SFC_ADAPTER_STARTED) &&
418 			   ((rc = sfc_set_rx_mode(sa)) != 0)) {
419 			*toggle = !(enabled);
420 			sfc_warn(sa, "Failed to %s %s mode, rc = %d",
421 				 ((enabled) ? "enable" : "disable"), desc, rc);
422 
423 			/*
424 			 * For promiscuous and all-multicast filters a
425 			 * permission failure should be reported as an
426 			 * unsupported filter.
427 			 */
428 			if (rc == EPERM)
429 				rc = ENOTSUP;
430 		}
431 	}
432 
433 	sfc_adapter_unlock(sa);
434 	return rc;
435 }
436 
437 static int
438 sfc_dev_promisc_enable(struct rte_eth_dev *dev)
439 {
440 	int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_TRUE);
441 
442 	SFC_ASSERT(rc >= 0);
443 	return -rc;
444 }
445 
446 static int
447 sfc_dev_promisc_disable(struct rte_eth_dev *dev)
448 {
449 	int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_PROMISC, B_FALSE);
450 
451 	SFC_ASSERT(rc >= 0);
452 	return -rc;
453 }
454 
455 static int
456 sfc_dev_allmulti_enable(struct rte_eth_dev *dev)
457 {
458 	int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_TRUE);
459 
460 	SFC_ASSERT(rc >= 0);
461 	return -rc;
462 }
463 
464 static int
465 sfc_dev_allmulti_disable(struct rte_eth_dev *dev)
466 {
467 	int rc = sfc_dev_filter_set(dev, SFC_DEV_FILTER_MODE_ALLMULTI, B_FALSE);
468 
469 	SFC_ASSERT(rc >= 0);
470 	return -rc;
471 }
472 
473 static int
474 sfc_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
475 		   uint16_t nb_rx_desc, unsigned int socket_id,
476 		   const struct rte_eth_rxconf *rx_conf,
477 		   struct rte_mempool *mb_pool)
478 {
479 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
480 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
481 	int rc;
482 
483 	sfc_log_init(sa, "RxQ=%u nb_rx_desc=%u socket_id=%u",
484 		     rx_queue_id, nb_rx_desc, socket_id);
485 
486 	sfc_adapter_lock(sa);
487 
488 	rc = sfc_rx_qinit(sa, rx_queue_id, nb_rx_desc, socket_id,
489 			  rx_conf, mb_pool);
490 	if (rc != 0)
491 		goto fail_rx_qinit;
492 
493 	dev->data->rx_queues[rx_queue_id] = sas->rxq_info[rx_queue_id].dp;
494 
495 	sfc_adapter_unlock(sa);
496 
497 	return 0;
498 
499 fail_rx_qinit:
500 	sfc_adapter_unlock(sa);
501 	SFC_ASSERT(rc > 0);
502 	return -rc;
503 }
504 
505 static void
506 sfc_rx_queue_release(void *queue)
507 {
508 	struct sfc_dp_rxq *dp_rxq = queue;
509 	struct sfc_rxq *rxq;
510 	struct sfc_adapter *sa;
511 	unsigned int sw_index;
512 
513 	if (dp_rxq == NULL)
514 		return;
515 
516 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
517 	sa = rxq->evq->sa;
518 	sfc_adapter_lock(sa);
519 
520 	sw_index = dp_rxq->dpq.queue_id;
521 
522 	sfc_log_init(sa, "RxQ=%u", sw_index);
523 
524 	sfc_rx_qfini(sa, sw_index);
525 
526 	sfc_adapter_unlock(sa);
527 }
528 
529 static int
530 sfc_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
531 		   uint16_t nb_tx_desc, unsigned int socket_id,
532 		   const struct rte_eth_txconf *tx_conf)
533 {
534 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
535 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
536 	int rc;
537 
538 	sfc_log_init(sa, "TxQ = %u, nb_tx_desc = %u, socket_id = %u",
539 		     tx_queue_id, nb_tx_desc, socket_id);
540 
541 	sfc_adapter_lock(sa);
542 
543 	rc = sfc_tx_qinit(sa, tx_queue_id, nb_tx_desc, socket_id, tx_conf);
544 	if (rc != 0)
545 		goto fail_tx_qinit;
546 
547 	dev->data->tx_queues[tx_queue_id] = sas->txq_info[tx_queue_id].dp;
548 
549 	sfc_adapter_unlock(sa);
550 	return 0;
551 
552 fail_tx_qinit:
553 	sfc_adapter_unlock(sa);
554 	SFC_ASSERT(rc > 0);
555 	return -rc;
556 }
557 
558 static void
559 sfc_tx_queue_release(void *queue)
560 {
561 	struct sfc_dp_txq *dp_txq = queue;
562 	struct sfc_txq *txq;
563 	unsigned int sw_index;
564 	struct sfc_adapter *sa;
565 
566 	if (dp_txq == NULL)
567 		return;
568 
569 	txq = sfc_txq_by_dp_txq(dp_txq);
570 	sw_index = dp_txq->dpq.queue_id;
571 
572 	SFC_ASSERT(txq->evq != NULL);
573 	sa = txq->evq->sa;
574 
575 	sfc_log_init(sa, "TxQ = %u", sw_index);
576 
577 	sfc_adapter_lock(sa);
578 
579 	sfc_tx_qfini(sa, sw_index);
580 
581 	sfc_adapter_unlock(sa);
582 }
583 
584 /*
585  * Some statistics are computed as A - B where A and B each increase
586  * monotonically with some hardware counter(s) and the counters are read
587  * asynchronously.
588  *
589  * If packet X is counted in A, but not counted in B yet, computed value is
590  * greater than real.
591  *
592  * If packet X is not counted in A at the moment of reading the counter,
593  * but counted in B at the moment of reading the counter, computed value
594  * is less than real.
595  *
596  * However, counter which grows backward is worse evil than slightly wrong
597  * value. So, let's try to guarantee that it never happens except may be
598  * the case when the MAC stats are zeroed as a result of a NIC reset.
599  */
600 static void
601 sfc_update_diff_stat(uint64_t *stat, uint64_t newval)
602 {
603 	if ((int64_t)(newval - *stat) > 0 || newval == 0)
604 		*stat = newval;
605 }
606 
607 static int
608 sfc_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
609 {
610 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
611 	struct sfc_port *port = &sa->port;
612 	uint64_t *mac_stats;
613 	int ret;
614 
615 	rte_spinlock_lock(&port->mac_stats_lock);
616 
617 	ret = sfc_port_update_mac_stats(sa);
618 	if (ret != 0)
619 		goto unlock;
620 
621 	mac_stats = port->mac_stats_buf;
622 
623 	if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask,
624 				   EFX_MAC_VADAPTER_RX_UNICAST_PACKETS)) {
625 		stats->ipackets =
626 			mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_PACKETS] +
627 			mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_PACKETS] +
628 			mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_PACKETS];
629 		stats->opackets =
630 			mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_PACKETS] +
631 			mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_PACKETS] +
632 			mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_PACKETS];
633 		stats->ibytes =
634 			mac_stats[EFX_MAC_VADAPTER_RX_UNICAST_BYTES] +
635 			mac_stats[EFX_MAC_VADAPTER_RX_MULTICAST_BYTES] +
636 			mac_stats[EFX_MAC_VADAPTER_RX_BROADCAST_BYTES];
637 		stats->obytes =
638 			mac_stats[EFX_MAC_VADAPTER_TX_UNICAST_BYTES] +
639 			mac_stats[EFX_MAC_VADAPTER_TX_MULTICAST_BYTES] +
640 			mac_stats[EFX_MAC_VADAPTER_TX_BROADCAST_BYTES];
641 		stats->imissed = mac_stats[EFX_MAC_VADAPTER_RX_BAD_PACKETS];
642 		stats->oerrors = mac_stats[EFX_MAC_VADAPTER_TX_BAD_PACKETS];
643 	} else {
644 		stats->opackets = mac_stats[EFX_MAC_TX_PKTS];
645 		stats->ibytes = mac_stats[EFX_MAC_RX_OCTETS];
646 		stats->obytes = mac_stats[EFX_MAC_TX_OCTETS];
647 		/*
648 		 * Take into account stats which are whenever supported
649 		 * on EF10. If some stat is not supported by current
650 		 * firmware variant or HW revision, it is guaranteed
651 		 * to be zero in mac_stats.
652 		 */
653 		stats->imissed =
654 			mac_stats[EFX_MAC_RX_NODESC_DROP_CNT] +
655 			mac_stats[EFX_MAC_PM_TRUNC_BB_OVERFLOW] +
656 			mac_stats[EFX_MAC_PM_DISCARD_BB_OVERFLOW] +
657 			mac_stats[EFX_MAC_PM_TRUNC_VFIFO_FULL] +
658 			mac_stats[EFX_MAC_PM_DISCARD_VFIFO_FULL] +
659 			mac_stats[EFX_MAC_PM_TRUNC_QBB] +
660 			mac_stats[EFX_MAC_PM_DISCARD_QBB] +
661 			mac_stats[EFX_MAC_PM_DISCARD_MAPPING] +
662 			mac_stats[EFX_MAC_RXDP_Q_DISABLED_PKTS] +
663 			mac_stats[EFX_MAC_RXDP_DI_DROPPED_PKTS];
664 		stats->ierrors =
665 			mac_stats[EFX_MAC_RX_FCS_ERRORS] +
666 			mac_stats[EFX_MAC_RX_ALIGN_ERRORS] +
667 			mac_stats[EFX_MAC_RX_JABBER_PKTS];
668 		/* no oerrors counters supported on EF10 */
669 
670 		/* Exclude missed, errors and pauses from Rx packets */
671 		sfc_update_diff_stat(&port->ipackets,
672 			mac_stats[EFX_MAC_RX_PKTS] -
673 			mac_stats[EFX_MAC_RX_PAUSE_PKTS] -
674 			stats->imissed - stats->ierrors);
675 		stats->ipackets = port->ipackets;
676 	}
677 
678 unlock:
679 	rte_spinlock_unlock(&port->mac_stats_lock);
680 	SFC_ASSERT(ret >= 0);
681 	return -ret;
682 }
683 
684 static int
685 sfc_stats_reset(struct rte_eth_dev *dev)
686 {
687 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
688 	struct sfc_port *port = &sa->port;
689 	int rc;
690 
691 	if (sa->state != SFC_ADAPTER_STARTED) {
692 		/*
693 		 * The operation cannot be done if port is not started; it
694 		 * will be scheduled to be done during the next port start
695 		 */
696 		port->mac_stats_reset_pending = B_TRUE;
697 		return 0;
698 	}
699 
700 	rc = sfc_port_reset_mac_stats(sa);
701 	if (rc != 0)
702 		sfc_err(sa, "failed to reset statistics (rc = %d)", rc);
703 
704 	SFC_ASSERT(rc >= 0);
705 	return -rc;
706 }
707 
708 static int
709 sfc_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats,
710 	       unsigned int xstats_count)
711 {
712 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
713 	struct sfc_port *port = &sa->port;
714 	uint64_t *mac_stats;
715 	int rc;
716 	unsigned int i;
717 	int nstats = 0;
718 
719 	rte_spinlock_lock(&port->mac_stats_lock);
720 
721 	rc = sfc_port_update_mac_stats(sa);
722 	if (rc != 0) {
723 		SFC_ASSERT(rc > 0);
724 		nstats = -rc;
725 		goto unlock;
726 	}
727 
728 	mac_stats = port->mac_stats_buf;
729 
730 	for (i = 0; i < EFX_MAC_NSTATS; ++i) {
731 		if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
732 			if (xstats != NULL && nstats < (int)xstats_count) {
733 				xstats[nstats].id = nstats;
734 				xstats[nstats].value = mac_stats[i];
735 			}
736 			nstats++;
737 		}
738 	}
739 
740 unlock:
741 	rte_spinlock_unlock(&port->mac_stats_lock);
742 
743 	return nstats;
744 }
745 
746 static int
747 sfc_xstats_get_names(struct rte_eth_dev *dev,
748 		     struct rte_eth_xstat_name *xstats_names,
749 		     unsigned int xstats_count)
750 {
751 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
752 	struct sfc_port *port = &sa->port;
753 	unsigned int i;
754 	unsigned int nstats = 0;
755 
756 	for (i = 0; i < EFX_MAC_NSTATS; ++i) {
757 		if (EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i)) {
758 			if (xstats_names != NULL && nstats < xstats_count)
759 				strlcpy(xstats_names[nstats].name,
760 					efx_mac_stat_name(sa->nic, i),
761 					sizeof(xstats_names[0].name));
762 			nstats++;
763 		}
764 	}
765 
766 	return nstats;
767 }
768 
769 static int
770 sfc_xstats_get_by_id(struct rte_eth_dev *dev, const uint64_t *ids,
771 		     uint64_t *values, unsigned int n)
772 {
773 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
774 	struct sfc_port *port = &sa->port;
775 	uint64_t *mac_stats;
776 	unsigned int nb_supported = 0;
777 	unsigned int nb_written = 0;
778 	unsigned int i;
779 	int ret;
780 	int rc;
781 
782 	if (unlikely(values == NULL) ||
783 	    unlikely((ids == NULL) && (n < port->mac_stats_nb_supported)))
784 		return port->mac_stats_nb_supported;
785 
786 	rte_spinlock_lock(&port->mac_stats_lock);
787 
788 	rc = sfc_port_update_mac_stats(sa);
789 	if (rc != 0) {
790 		SFC_ASSERT(rc > 0);
791 		ret = -rc;
792 		goto unlock;
793 	}
794 
795 	mac_stats = port->mac_stats_buf;
796 
797 	for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < n); ++i) {
798 		if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
799 			continue;
800 
801 		if ((ids == NULL) || (ids[nb_written] == nb_supported))
802 			values[nb_written++] = mac_stats[i];
803 
804 		++nb_supported;
805 	}
806 
807 	ret = nb_written;
808 
809 unlock:
810 	rte_spinlock_unlock(&port->mac_stats_lock);
811 
812 	return ret;
813 }
814 
815 static int
816 sfc_xstats_get_names_by_id(struct rte_eth_dev *dev,
817 			   struct rte_eth_xstat_name *xstats_names,
818 			   const uint64_t *ids, unsigned int size)
819 {
820 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
821 	struct sfc_port *port = &sa->port;
822 	unsigned int nb_supported = 0;
823 	unsigned int nb_written = 0;
824 	unsigned int i;
825 
826 	if (unlikely(xstats_names == NULL) ||
827 	    unlikely((ids == NULL) && (size < port->mac_stats_nb_supported)))
828 		return port->mac_stats_nb_supported;
829 
830 	for (i = 0; (i < EFX_MAC_NSTATS) && (nb_written < size); ++i) {
831 		if (!EFX_MAC_STAT_SUPPORTED(port->mac_stats_mask, i))
832 			continue;
833 
834 		if ((ids == NULL) || (ids[nb_written] == nb_supported)) {
835 			char *name = xstats_names[nb_written++].name;
836 
837 			strlcpy(name, efx_mac_stat_name(sa->nic, i),
838 				sizeof(xstats_names[0].name));
839 		}
840 
841 		++nb_supported;
842 	}
843 
844 	return nb_written;
845 }
846 
847 static int
848 sfc_flow_ctrl_get(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
849 {
850 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
851 	unsigned int wanted_fc, link_fc;
852 
853 	memset(fc_conf, 0, sizeof(*fc_conf));
854 
855 	sfc_adapter_lock(sa);
856 
857 	if (sa->state == SFC_ADAPTER_STARTED)
858 		efx_mac_fcntl_get(sa->nic, &wanted_fc, &link_fc);
859 	else
860 		link_fc = sa->port.flow_ctrl;
861 
862 	switch (link_fc) {
863 	case 0:
864 		fc_conf->mode = RTE_FC_NONE;
865 		break;
866 	case EFX_FCNTL_RESPOND:
867 		fc_conf->mode = RTE_FC_RX_PAUSE;
868 		break;
869 	case EFX_FCNTL_GENERATE:
870 		fc_conf->mode = RTE_FC_TX_PAUSE;
871 		break;
872 	case (EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE):
873 		fc_conf->mode = RTE_FC_FULL;
874 		break;
875 	default:
876 		sfc_err(sa, "%s: unexpected flow control value %#x",
877 			__func__, link_fc);
878 	}
879 
880 	fc_conf->autoneg = sa->port.flow_ctrl_autoneg;
881 
882 	sfc_adapter_unlock(sa);
883 
884 	return 0;
885 }
886 
887 static int
888 sfc_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf)
889 {
890 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
891 	struct sfc_port *port = &sa->port;
892 	unsigned int fcntl;
893 	int rc;
894 
895 	if (fc_conf->high_water != 0 || fc_conf->low_water != 0 ||
896 	    fc_conf->pause_time != 0 || fc_conf->send_xon != 0 ||
897 	    fc_conf->mac_ctrl_frame_fwd != 0) {
898 		sfc_err(sa, "unsupported flow control settings specified");
899 		rc = EINVAL;
900 		goto fail_inval;
901 	}
902 
903 	switch (fc_conf->mode) {
904 	case RTE_FC_NONE:
905 		fcntl = 0;
906 		break;
907 	case RTE_FC_RX_PAUSE:
908 		fcntl = EFX_FCNTL_RESPOND;
909 		break;
910 	case RTE_FC_TX_PAUSE:
911 		fcntl = EFX_FCNTL_GENERATE;
912 		break;
913 	case RTE_FC_FULL:
914 		fcntl = EFX_FCNTL_RESPOND | EFX_FCNTL_GENERATE;
915 		break;
916 	default:
917 		rc = EINVAL;
918 		goto fail_inval;
919 	}
920 
921 	sfc_adapter_lock(sa);
922 
923 	if (sa->state == SFC_ADAPTER_STARTED) {
924 		rc = efx_mac_fcntl_set(sa->nic, fcntl, fc_conf->autoneg);
925 		if (rc != 0)
926 			goto fail_mac_fcntl_set;
927 	}
928 
929 	port->flow_ctrl = fcntl;
930 	port->flow_ctrl_autoneg = fc_conf->autoneg;
931 
932 	sfc_adapter_unlock(sa);
933 
934 	return 0;
935 
936 fail_mac_fcntl_set:
937 	sfc_adapter_unlock(sa);
938 fail_inval:
939 	SFC_ASSERT(rc > 0);
940 	return -rc;
941 }
942 
943 static int
944 sfc_check_scatter_on_all_rx_queues(struct sfc_adapter *sa, size_t pdu)
945 {
946 	struct sfc_adapter_shared * const sas = sfc_sa2shared(sa);
947 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
948 	boolean_t scatter_enabled;
949 	const char *error;
950 	unsigned int i;
951 
952 	for (i = 0; i < sas->rxq_count; i++) {
953 		if ((sas->rxq_info[i].state & SFC_RXQ_INITIALIZED) == 0)
954 			continue;
955 
956 		scatter_enabled = (sas->rxq_info[i].type_flags &
957 				   EFX_RXQ_FLAG_SCATTER);
958 
959 		if (!sfc_rx_check_scatter(pdu, sa->rxq_ctrl[i].buf_size,
960 					  encp->enc_rx_prefix_size,
961 					  scatter_enabled,
962 					  encp->enc_rx_scatter_max, &error)) {
963 			sfc_err(sa, "MTU check for RxQ %u failed: %s", i,
964 				error);
965 			return EINVAL;
966 		}
967 	}
968 
969 	return 0;
970 }
971 
972 static int
973 sfc_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu)
974 {
975 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
976 	size_t pdu = EFX_MAC_PDU(mtu);
977 	size_t old_pdu;
978 	int rc;
979 
980 	sfc_log_init(sa, "mtu=%u", mtu);
981 
982 	rc = EINVAL;
983 	if (pdu < EFX_MAC_PDU_MIN) {
984 		sfc_err(sa, "too small MTU %u (PDU size %u less than min %u)",
985 			(unsigned int)mtu, (unsigned int)pdu,
986 			EFX_MAC_PDU_MIN);
987 		goto fail_inval;
988 	}
989 	if (pdu > EFX_MAC_PDU_MAX) {
990 		sfc_err(sa, "too big MTU %u (PDU size %u greater than max %u)",
991 			(unsigned int)mtu, (unsigned int)pdu,
992 			(unsigned int)EFX_MAC_PDU_MAX);
993 		goto fail_inval;
994 	}
995 
996 	sfc_adapter_lock(sa);
997 
998 	rc = sfc_check_scatter_on_all_rx_queues(sa, pdu);
999 	if (rc != 0)
1000 		goto fail_check_scatter;
1001 
1002 	if (pdu != sa->port.pdu) {
1003 		if (sa->state == SFC_ADAPTER_STARTED) {
1004 			sfc_stop(sa);
1005 
1006 			old_pdu = sa->port.pdu;
1007 			sa->port.pdu = pdu;
1008 			rc = sfc_start(sa);
1009 			if (rc != 0)
1010 				goto fail_start;
1011 		} else {
1012 			sa->port.pdu = pdu;
1013 		}
1014 	}
1015 
1016 	/*
1017 	 * The driver does not use it, but other PMDs update jumbo frame
1018 	 * flag and max_rx_pkt_len when MTU is set.
1019 	 */
1020 	if (mtu > RTE_ETHER_MAX_LEN) {
1021 		struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode;
1022 		rxmode->offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
1023 	}
1024 
1025 	dev->data->dev_conf.rxmode.max_rx_pkt_len = sa->port.pdu;
1026 
1027 	sfc_adapter_unlock(sa);
1028 
1029 	sfc_log_init(sa, "done");
1030 	return 0;
1031 
1032 fail_start:
1033 	sa->port.pdu = old_pdu;
1034 	if (sfc_start(sa) != 0)
1035 		sfc_err(sa, "cannot start with neither new (%u) nor old (%u) "
1036 			"PDU max size - port is stopped",
1037 			(unsigned int)pdu, (unsigned int)old_pdu);
1038 
1039 fail_check_scatter:
1040 	sfc_adapter_unlock(sa);
1041 
1042 fail_inval:
1043 	sfc_log_init(sa, "failed %d", rc);
1044 	SFC_ASSERT(rc > 0);
1045 	return -rc;
1046 }
1047 static int
1048 sfc_mac_addr_set(struct rte_eth_dev *dev, struct rte_ether_addr *mac_addr)
1049 {
1050 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1051 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
1052 	struct sfc_port *port = &sa->port;
1053 	struct rte_ether_addr *old_addr = &dev->data->mac_addrs[0];
1054 	int rc = 0;
1055 
1056 	sfc_adapter_lock(sa);
1057 
1058 	if (rte_is_same_ether_addr(mac_addr, &port->default_mac_addr))
1059 		goto unlock;
1060 
1061 	/*
1062 	 * Copy the address to the device private data so that
1063 	 * it could be recalled in the case of adapter restart.
1064 	 */
1065 	rte_ether_addr_copy(mac_addr, &port->default_mac_addr);
1066 
1067 	/*
1068 	 * Neither of the two following checks can return
1069 	 * an error. The new MAC address is preserved in
1070 	 * the device private data and can be activated
1071 	 * on the next port start if the user prevents
1072 	 * isolated mode from being enabled.
1073 	 */
1074 	if (sfc_sa2shared(sa)->isolated) {
1075 		sfc_warn(sa, "isolated mode is active on the port");
1076 		sfc_warn(sa, "will not set MAC address");
1077 		goto unlock;
1078 	}
1079 
1080 	if (sa->state != SFC_ADAPTER_STARTED) {
1081 		sfc_notice(sa, "the port is not started");
1082 		sfc_notice(sa, "the new MAC address will be set on port start");
1083 
1084 		goto unlock;
1085 	}
1086 
1087 	if (encp->enc_allow_set_mac_with_installed_filters) {
1088 		rc = efx_mac_addr_set(sa->nic, mac_addr->addr_bytes);
1089 		if (rc != 0) {
1090 			sfc_err(sa, "cannot set MAC address (rc = %u)", rc);
1091 			goto unlock;
1092 		}
1093 
1094 		/*
1095 		 * Changing the MAC address by means of MCDI request
1096 		 * has no effect on received traffic, therefore
1097 		 * we also need to update unicast filters
1098 		 */
1099 		rc = sfc_set_rx_mode_unchecked(sa);
1100 		if (rc != 0) {
1101 			sfc_err(sa, "cannot set filter (rc = %u)", rc);
1102 			/* Rollback the old address */
1103 			(void)efx_mac_addr_set(sa->nic, old_addr->addr_bytes);
1104 			(void)sfc_set_rx_mode_unchecked(sa);
1105 		}
1106 	} else {
1107 		sfc_warn(sa, "cannot set MAC address with filters installed");
1108 		sfc_warn(sa, "adapter will be restarted to pick the new MAC");
1109 		sfc_warn(sa, "(some traffic may be dropped)");
1110 
1111 		/*
1112 		 * Since setting MAC address with filters installed is not
1113 		 * allowed on the adapter, the new MAC address will be set
1114 		 * by means of adapter restart. sfc_start() shall retrieve
1115 		 * the new address from the device private data and set it.
1116 		 */
1117 		sfc_stop(sa);
1118 		rc = sfc_start(sa);
1119 		if (rc != 0)
1120 			sfc_err(sa, "cannot restart adapter (rc = %u)", rc);
1121 	}
1122 
1123 unlock:
1124 	if (rc != 0)
1125 		rte_ether_addr_copy(old_addr, &port->default_mac_addr);
1126 
1127 	sfc_adapter_unlock(sa);
1128 
1129 	SFC_ASSERT(rc >= 0);
1130 	return -rc;
1131 }
1132 
1133 
1134 static int
1135 sfc_set_mc_addr_list(struct rte_eth_dev *dev,
1136 		struct rte_ether_addr *mc_addr_set, uint32_t nb_mc_addr)
1137 {
1138 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1139 	struct sfc_port *port = &sa->port;
1140 	uint8_t *mc_addrs = port->mcast_addrs;
1141 	int rc;
1142 	unsigned int i;
1143 
1144 	if (sfc_sa2shared(sa)->isolated) {
1145 		sfc_err(sa, "isolated mode is active on the port");
1146 		sfc_err(sa, "will not set multicast address list");
1147 		return -ENOTSUP;
1148 	}
1149 
1150 	if (mc_addrs == NULL)
1151 		return -ENOBUFS;
1152 
1153 	if (nb_mc_addr > port->max_mcast_addrs) {
1154 		sfc_err(sa, "too many multicast addresses: %u > %u",
1155 			 nb_mc_addr, port->max_mcast_addrs);
1156 		return -EINVAL;
1157 	}
1158 
1159 	for (i = 0; i < nb_mc_addr; ++i) {
1160 		rte_memcpy(mc_addrs, mc_addr_set[i].addr_bytes,
1161 				 EFX_MAC_ADDR_LEN);
1162 		mc_addrs += EFX_MAC_ADDR_LEN;
1163 	}
1164 
1165 	port->nb_mcast_addrs = nb_mc_addr;
1166 
1167 	if (sa->state != SFC_ADAPTER_STARTED)
1168 		return 0;
1169 
1170 	rc = efx_mac_multicast_list_set(sa->nic, port->mcast_addrs,
1171 					port->nb_mcast_addrs);
1172 	if (rc != 0)
1173 		sfc_err(sa, "cannot set multicast address list (rc = %u)", rc);
1174 
1175 	SFC_ASSERT(rc >= 0);
1176 	return -rc;
1177 }
1178 
1179 /*
1180  * The function is used by the secondary process as well. It must not
1181  * use any process-local pointers from the adapter data.
1182  */
1183 static void
1184 sfc_rx_queue_info_get(struct rte_eth_dev *dev, uint16_t rx_queue_id,
1185 		      struct rte_eth_rxq_info *qinfo)
1186 {
1187 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1188 	struct sfc_rxq_info *rxq_info;
1189 
1190 	SFC_ASSERT(rx_queue_id < sas->rxq_count);
1191 
1192 	rxq_info = &sas->rxq_info[rx_queue_id];
1193 
1194 	qinfo->mp = rxq_info->refill_mb_pool;
1195 	qinfo->conf.rx_free_thresh = rxq_info->refill_threshold;
1196 	qinfo->conf.rx_drop_en = 1;
1197 	qinfo->conf.rx_deferred_start = rxq_info->deferred_start;
1198 	qinfo->conf.offloads = dev->data->dev_conf.rxmode.offloads;
1199 	if (rxq_info->type_flags & EFX_RXQ_FLAG_SCATTER) {
1200 		qinfo->conf.offloads |= DEV_RX_OFFLOAD_SCATTER;
1201 		qinfo->scattered_rx = 1;
1202 	}
1203 	qinfo->nb_desc = rxq_info->entries;
1204 }
1205 
1206 /*
1207  * The function is used by the secondary process as well. It must not
1208  * use any process-local pointers from the adapter data.
1209  */
1210 static void
1211 sfc_tx_queue_info_get(struct rte_eth_dev *dev, uint16_t tx_queue_id,
1212 		      struct rte_eth_txq_info *qinfo)
1213 {
1214 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1215 	struct sfc_txq_info *txq_info;
1216 
1217 	SFC_ASSERT(tx_queue_id < sas->txq_count);
1218 
1219 	txq_info = &sas->txq_info[tx_queue_id];
1220 
1221 	memset(qinfo, 0, sizeof(*qinfo));
1222 
1223 	qinfo->conf.offloads = txq_info->offloads;
1224 	qinfo->conf.tx_free_thresh = txq_info->free_thresh;
1225 	qinfo->conf.tx_deferred_start = txq_info->deferred_start;
1226 	qinfo->nb_desc = txq_info->entries;
1227 }
1228 
1229 /*
1230  * The function is used by the secondary process as well. It must not
1231  * use any process-local pointers from the adapter data.
1232  */
1233 static uint32_t
1234 sfc_rx_queue_count(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1235 {
1236 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1237 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1238 	struct sfc_rxq_info *rxq_info;
1239 
1240 	SFC_ASSERT(rx_queue_id < sas->rxq_count);
1241 	rxq_info = &sas->rxq_info[rx_queue_id];
1242 
1243 	if ((rxq_info->state & SFC_RXQ_STARTED) == 0)
1244 		return 0;
1245 
1246 	return sap->dp_rx->qdesc_npending(rxq_info->dp);
1247 }
1248 
1249 /*
1250  * The function is used by the secondary process as well. It must not
1251  * use any process-local pointers from the adapter data.
1252  */
1253 static int
1254 sfc_rx_descriptor_done(void *queue, uint16_t offset)
1255 {
1256 	struct sfc_dp_rxq *dp_rxq = queue;
1257 	const struct sfc_dp_rx *dp_rx;
1258 
1259 	dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1260 
1261 	return offset < dp_rx->qdesc_npending(dp_rxq);
1262 }
1263 
1264 /*
1265  * The function is used by the secondary process as well. It must not
1266  * use any process-local pointers from the adapter data.
1267  */
1268 static int
1269 sfc_rx_descriptor_status(void *queue, uint16_t offset)
1270 {
1271 	struct sfc_dp_rxq *dp_rxq = queue;
1272 	const struct sfc_dp_rx *dp_rx;
1273 
1274 	dp_rx = sfc_dp_rx_by_dp_rxq(dp_rxq);
1275 
1276 	return dp_rx->qdesc_status(dp_rxq, offset);
1277 }
1278 
1279 /*
1280  * The function is used by the secondary process as well. It must not
1281  * use any process-local pointers from the adapter data.
1282  */
1283 static int
1284 sfc_tx_descriptor_status(void *queue, uint16_t offset)
1285 {
1286 	struct sfc_dp_txq *dp_txq = queue;
1287 	const struct sfc_dp_tx *dp_tx;
1288 
1289 	dp_tx = sfc_dp_tx_by_dp_txq(dp_txq);
1290 
1291 	return dp_tx->qdesc_status(dp_txq, offset);
1292 }
1293 
1294 static int
1295 sfc_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1296 {
1297 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1298 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1299 	int rc;
1300 
1301 	sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1302 
1303 	sfc_adapter_lock(sa);
1304 
1305 	rc = EINVAL;
1306 	if (sa->state != SFC_ADAPTER_STARTED)
1307 		goto fail_not_started;
1308 
1309 	if (sas->rxq_info[rx_queue_id].state != SFC_RXQ_INITIALIZED)
1310 		goto fail_not_setup;
1311 
1312 	rc = sfc_rx_qstart(sa, rx_queue_id);
1313 	if (rc != 0)
1314 		goto fail_rx_qstart;
1315 
1316 	sas->rxq_info[rx_queue_id].deferred_started = B_TRUE;
1317 
1318 	sfc_adapter_unlock(sa);
1319 
1320 	return 0;
1321 
1322 fail_rx_qstart:
1323 fail_not_setup:
1324 fail_not_started:
1325 	sfc_adapter_unlock(sa);
1326 	SFC_ASSERT(rc > 0);
1327 	return -rc;
1328 }
1329 
1330 static int
1331 sfc_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
1332 {
1333 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1334 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1335 
1336 	sfc_log_init(sa, "RxQ=%u", rx_queue_id);
1337 
1338 	sfc_adapter_lock(sa);
1339 	sfc_rx_qstop(sa, rx_queue_id);
1340 
1341 	sas->rxq_info[rx_queue_id].deferred_started = B_FALSE;
1342 
1343 	sfc_adapter_unlock(sa);
1344 
1345 	return 0;
1346 }
1347 
1348 static int
1349 sfc_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1350 {
1351 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1352 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1353 	int rc;
1354 
1355 	sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1356 
1357 	sfc_adapter_lock(sa);
1358 
1359 	rc = EINVAL;
1360 	if (sa->state != SFC_ADAPTER_STARTED)
1361 		goto fail_not_started;
1362 
1363 	if (sas->txq_info[tx_queue_id].state != SFC_TXQ_INITIALIZED)
1364 		goto fail_not_setup;
1365 
1366 	rc = sfc_tx_qstart(sa, tx_queue_id);
1367 	if (rc != 0)
1368 		goto fail_tx_qstart;
1369 
1370 	sas->txq_info[tx_queue_id].deferred_started = B_TRUE;
1371 
1372 	sfc_adapter_unlock(sa);
1373 	return 0;
1374 
1375 fail_tx_qstart:
1376 
1377 fail_not_setup:
1378 fail_not_started:
1379 	sfc_adapter_unlock(sa);
1380 	SFC_ASSERT(rc > 0);
1381 	return -rc;
1382 }
1383 
1384 static int
1385 sfc_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
1386 {
1387 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1388 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1389 
1390 	sfc_log_init(sa, "TxQ = %u", tx_queue_id);
1391 
1392 	sfc_adapter_lock(sa);
1393 
1394 	sfc_tx_qstop(sa, tx_queue_id);
1395 
1396 	sas->txq_info[tx_queue_id].deferred_started = B_FALSE;
1397 
1398 	sfc_adapter_unlock(sa);
1399 	return 0;
1400 }
1401 
1402 static efx_tunnel_protocol_t
1403 sfc_tunnel_rte_type_to_efx_udp_proto(enum rte_eth_tunnel_type rte_type)
1404 {
1405 	switch (rte_type) {
1406 	case RTE_TUNNEL_TYPE_VXLAN:
1407 		return EFX_TUNNEL_PROTOCOL_VXLAN;
1408 	case RTE_TUNNEL_TYPE_GENEVE:
1409 		return EFX_TUNNEL_PROTOCOL_GENEVE;
1410 	default:
1411 		return EFX_TUNNEL_NPROTOS;
1412 	}
1413 }
1414 
1415 enum sfc_udp_tunnel_op_e {
1416 	SFC_UDP_TUNNEL_ADD_PORT,
1417 	SFC_UDP_TUNNEL_DEL_PORT,
1418 };
1419 
1420 static int
1421 sfc_dev_udp_tunnel_op(struct rte_eth_dev *dev,
1422 		      struct rte_eth_udp_tunnel *tunnel_udp,
1423 		      enum sfc_udp_tunnel_op_e op)
1424 {
1425 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1426 	efx_tunnel_protocol_t tunnel_proto;
1427 	int rc;
1428 
1429 	sfc_log_init(sa, "%s udp_port=%u prot_type=%u",
1430 		     (op == SFC_UDP_TUNNEL_ADD_PORT) ? "add" :
1431 		     (op == SFC_UDP_TUNNEL_DEL_PORT) ? "delete" : "unknown",
1432 		     tunnel_udp->udp_port, tunnel_udp->prot_type);
1433 
1434 	tunnel_proto =
1435 		sfc_tunnel_rte_type_to_efx_udp_proto(tunnel_udp->prot_type);
1436 	if (tunnel_proto >= EFX_TUNNEL_NPROTOS) {
1437 		rc = ENOTSUP;
1438 		goto fail_bad_proto;
1439 	}
1440 
1441 	sfc_adapter_lock(sa);
1442 
1443 	switch (op) {
1444 	case SFC_UDP_TUNNEL_ADD_PORT:
1445 		rc = efx_tunnel_config_udp_add(sa->nic,
1446 					       tunnel_udp->udp_port,
1447 					       tunnel_proto);
1448 		break;
1449 	case SFC_UDP_TUNNEL_DEL_PORT:
1450 		rc = efx_tunnel_config_udp_remove(sa->nic,
1451 						  tunnel_udp->udp_port,
1452 						  tunnel_proto);
1453 		break;
1454 	default:
1455 		rc = EINVAL;
1456 		goto fail_bad_op;
1457 	}
1458 
1459 	if (rc != 0)
1460 		goto fail_op;
1461 
1462 	if (sa->state == SFC_ADAPTER_STARTED) {
1463 		rc = efx_tunnel_reconfigure(sa->nic);
1464 		if (rc == EAGAIN) {
1465 			/*
1466 			 * Configuration is accepted by FW and MC reboot
1467 			 * is initiated to apply the changes. MC reboot
1468 			 * will be handled in a usual way (MC reboot
1469 			 * event on management event queue and adapter
1470 			 * restart).
1471 			 */
1472 			rc = 0;
1473 		} else if (rc != 0) {
1474 			goto fail_reconfigure;
1475 		}
1476 	}
1477 
1478 	sfc_adapter_unlock(sa);
1479 	return 0;
1480 
1481 fail_reconfigure:
1482 	/* Remove/restore entry since the change makes the trouble */
1483 	switch (op) {
1484 	case SFC_UDP_TUNNEL_ADD_PORT:
1485 		(void)efx_tunnel_config_udp_remove(sa->nic,
1486 						   tunnel_udp->udp_port,
1487 						   tunnel_proto);
1488 		break;
1489 	case SFC_UDP_TUNNEL_DEL_PORT:
1490 		(void)efx_tunnel_config_udp_add(sa->nic,
1491 						tunnel_udp->udp_port,
1492 						tunnel_proto);
1493 		break;
1494 	}
1495 
1496 fail_op:
1497 fail_bad_op:
1498 	sfc_adapter_unlock(sa);
1499 
1500 fail_bad_proto:
1501 	SFC_ASSERT(rc > 0);
1502 	return -rc;
1503 }
1504 
1505 static int
1506 sfc_dev_udp_tunnel_port_add(struct rte_eth_dev *dev,
1507 			    struct rte_eth_udp_tunnel *tunnel_udp)
1508 {
1509 	return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_ADD_PORT);
1510 }
1511 
1512 static int
1513 sfc_dev_udp_tunnel_port_del(struct rte_eth_dev *dev,
1514 			    struct rte_eth_udp_tunnel *tunnel_udp)
1515 {
1516 	return sfc_dev_udp_tunnel_op(dev, tunnel_udp, SFC_UDP_TUNNEL_DEL_PORT);
1517 }
1518 
1519 /*
1520  * The function is used by the secondary process as well. It must not
1521  * use any process-local pointers from the adapter data.
1522  */
1523 static int
1524 sfc_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
1525 			  struct rte_eth_rss_conf *rss_conf)
1526 {
1527 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1528 	struct sfc_rss *rss = &sas->rss;
1529 
1530 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE)
1531 		return -ENOTSUP;
1532 
1533 	/*
1534 	 * Mapping of hash configuration between RTE and EFX is not one-to-one,
1535 	 * hence, conversion is done here to derive a correct set of ETH_RSS
1536 	 * flags which corresponds to the active EFX configuration stored
1537 	 * locally in 'sfc_adapter' and kept up-to-date
1538 	 */
1539 	rss_conf->rss_hf = sfc_rx_hf_efx_to_rte(rss, rss->hash_types);
1540 	rss_conf->rss_key_len = EFX_RSS_KEY_SIZE;
1541 	if (rss_conf->rss_key != NULL)
1542 		rte_memcpy(rss_conf->rss_key, rss->key, EFX_RSS_KEY_SIZE);
1543 
1544 	return 0;
1545 }
1546 
1547 static int
1548 sfc_dev_rss_hash_update(struct rte_eth_dev *dev,
1549 			struct rte_eth_rss_conf *rss_conf)
1550 {
1551 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1552 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1553 	unsigned int efx_hash_types;
1554 	uint32_t contexts[] = {EFX_RSS_CONTEXT_DEFAULT, rss->dummy_rss_context};
1555 	unsigned int n_contexts;
1556 	unsigned int mode_i = 0;
1557 	unsigned int key_i = 0;
1558 	unsigned int i = 0;
1559 	int rc = 0;
1560 
1561 	n_contexts = rss->dummy_rss_context == EFX_RSS_CONTEXT_DEFAULT ? 1 : 2;
1562 
1563 	if (sfc_sa2shared(sa)->isolated)
1564 		return -ENOTSUP;
1565 
1566 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1567 		sfc_err(sa, "RSS is not available");
1568 		return -ENOTSUP;
1569 	}
1570 
1571 	if (rss->channels == 0) {
1572 		sfc_err(sa, "RSS is not configured");
1573 		return -EINVAL;
1574 	}
1575 
1576 	if ((rss_conf->rss_key != NULL) &&
1577 	    (rss_conf->rss_key_len != sizeof(rss->key))) {
1578 		sfc_err(sa, "RSS key size is wrong (should be %zu)",
1579 			sizeof(rss->key));
1580 		return -EINVAL;
1581 	}
1582 
1583 	sfc_adapter_lock(sa);
1584 
1585 	rc = sfc_rx_hf_rte_to_efx(sa, rss_conf->rss_hf, &efx_hash_types);
1586 	if (rc != 0)
1587 		goto fail_rx_hf_rte_to_efx;
1588 
1589 	for (mode_i = 0; mode_i < n_contexts; mode_i++) {
1590 		rc = efx_rx_scale_mode_set(sa->nic, contexts[mode_i],
1591 					   rss->hash_alg, efx_hash_types,
1592 					   B_TRUE);
1593 		if (rc != 0)
1594 			goto fail_scale_mode_set;
1595 	}
1596 
1597 	if (rss_conf->rss_key != NULL) {
1598 		if (sa->state == SFC_ADAPTER_STARTED) {
1599 			for (key_i = 0; key_i < n_contexts; key_i++) {
1600 				rc = efx_rx_scale_key_set(sa->nic,
1601 							  contexts[key_i],
1602 							  rss_conf->rss_key,
1603 							  sizeof(rss->key));
1604 				if (rc != 0)
1605 					goto fail_scale_key_set;
1606 			}
1607 		}
1608 
1609 		rte_memcpy(rss->key, rss_conf->rss_key, sizeof(rss->key));
1610 	}
1611 
1612 	rss->hash_types = efx_hash_types;
1613 
1614 	sfc_adapter_unlock(sa);
1615 
1616 	return 0;
1617 
1618 fail_scale_key_set:
1619 	for (i = 0; i < key_i; i++) {
1620 		if (efx_rx_scale_key_set(sa->nic, contexts[i], rss->key,
1621 					 sizeof(rss->key)) != 0)
1622 			sfc_err(sa, "failed to restore RSS key");
1623 	}
1624 
1625 fail_scale_mode_set:
1626 	for (i = 0; i < mode_i; i++) {
1627 		if (efx_rx_scale_mode_set(sa->nic, contexts[i],
1628 					  EFX_RX_HASHALG_TOEPLITZ,
1629 					  rss->hash_types, B_TRUE) != 0)
1630 			sfc_err(sa, "failed to restore RSS mode");
1631 	}
1632 
1633 fail_rx_hf_rte_to_efx:
1634 	sfc_adapter_unlock(sa);
1635 	return -rc;
1636 }
1637 
1638 /*
1639  * The function is used by the secondary process as well. It must not
1640  * use any process-local pointers from the adapter data.
1641  */
1642 static int
1643 sfc_dev_rss_reta_query(struct rte_eth_dev *dev,
1644 		       struct rte_eth_rss_reta_entry64 *reta_conf,
1645 		       uint16_t reta_size)
1646 {
1647 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1648 	struct sfc_rss *rss = &sas->rss;
1649 	int entry;
1650 
1651 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE || sas->isolated)
1652 		return -ENOTSUP;
1653 
1654 	if (rss->channels == 0)
1655 		return -EINVAL;
1656 
1657 	if (reta_size != EFX_RSS_TBL_SIZE)
1658 		return -EINVAL;
1659 
1660 	for (entry = 0; entry < reta_size; entry++) {
1661 		int grp = entry / RTE_RETA_GROUP_SIZE;
1662 		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1663 
1664 		if ((reta_conf[grp].mask >> grp_idx) & 1)
1665 			reta_conf[grp].reta[grp_idx] = rss->tbl[entry];
1666 	}
1667 
1668 	return 0;
1669 }
1670 
1671 static int
1672 sfc_dev_rss_reta_update(struct rte_eth_dev *dev,
1673 			struct rte_eth_rss_reta_entry64 *reta_conf,
1674 			uint16_t reta_size)
1675 {
1676 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1677 	struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
1678 	unsigned int *rss_tbl_new;
1679 	uint16_t entry;
1680 	int rc = 0;
1681 
1682 
1683 	if (sfc_sa2shared(sa)->isolated)
1684 		return -ENOTSUP;
1685 
1686 	if (rss->context_type != EFX_RX_SCALE_EXCLUSIVE) {
1687 		sfc_err(sa, "RSS is not available");
1688 		return -ENOTSUP;
1689 	}
1690 
1691 	if (rss->channels == 0) {
1692 		sfc_err(sa, "RSS is not configured");
1693 		return -EINVAL;
1694 	}
1695 
1696 	if (reta_size != EFX_RSS_TBL_SIZE) {
1697 		sfc_err(sa, "RETA size is wrong (should be %u)",
1698 			EFX_RSS_TBL_SIZE);
1699 		return -EINVAL;
1700 	}
1701 
1702 	rss_tbl_new = rte_zmalloc("rss_tbl_new", sizeof(rss->tbl), 0);
1703 	if (rss_tbl_new == NULL)
1704 		return -ENOMEM;
1705 
1706 	sfc_adapter_lock(sa);
1707 
1708 	rte_memcpy(rss_tbl_new, rss->tbl, sizeof(rss->tbl));
1709 
1710 	for (entry = 0; entry < reta_size; entry++) {
1711 		int grp_idx = entry % RTE_RETA_GROUP_SIZE;
1712 		struct rte_eth_rss_reta_entry64 *grp;
1713 
1714 		grp = &reta_conf[entry / RTE_RETA_GROUP_SIZE];
1715 
1716 		if (grp->mask & (1ull << grp_idx)) {
1717 			if (grp->reta[grp_idx] >= rss->channels) {
1718 				rc = EINVAL;
1719 				goto bad_reta_entry;
1720 			}
1721 			rss_tbl_new[entry] = grp->reta[grp_idx];
1722 		}
1723 	}
1724 
1725 	if (sa->state == SFC_ADAPTER_STARTED) {
1726 		rc = efx_rx_scale_tbl_set(sa->nic, EFX_RSS_CONTEXT_DEFAULT,
1727 					  rss_tbl_new, EFX_RSS_TBL_SIZE);
1728 		if (rc != 0)
1729 			goto fail_scale_tbl_set;
1730 	}
1731 
1732 	rte_memcpy(rss->tbl, rss_tbl_new, sizeof(rss->tbl));
1733 
1734 fail_scale_tbl_set:
1735 bad_reta_entry:
1736 	sfc_adapter_unlock(sa);
1737 
1738 	rte_free(rss_tbl_new);
1739 
1740 	SFC_ASSERT(rc >= 0);
1741 	return -rc;
1742 }
1743 
1744 static int
1745 sfc_dev_filter_ctrl(struct rte_eth_dev *dev, enum rte_filter_type filter_type,
1746 		    enum rte_filter_op filter_op,
1747 		    void *arg)
1748 {
1749 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1750 	int rc = ENOTSUP;
1751 
1752 	sfc_log_init(sa, "entry");
1753 
1754 	switch (filter_type) {
1755 	case RTE_ETH_FILTER_GENERIC:
1756 		if (filter_op != RTE_ETH_FILTER_GET) {
1757 			rc = EINVAL;
1758 		} else {
1759 			*(const void **)arg = &sfc_flow_ops;
1760 			rc = 0;
1761 		}
1762 		break;
1763 	default:
1764 		sfc_err(sa, "Unknown filter type %u", filter_type);
1765 		break;
1766 	}
1767 
1768 	sfc_log_init(sa, "exit: %d", -rc);
1769 	SFC_ASSERT(rc >= 0);
1770 	return -rc;
1771 }
1772 
1773 static int
1774 sfc_pool_ops_supported(struct rte_eth_dev *dev, const char *pool)
1775 {
1776 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1777 
1778 	/*
1779 	 * If Rx datapath does not provide callback to check mempool,
1780 	 * all pools are supported.
1781 	 */
1782 	if (sap->dp_rx->pool_ops_supported == NULL)
1783 		return 1;
1784 
1785 	return sap->dp_rx->pool_ops_supported(pool);
1786 }
1787 
1788 static int
1789 sfc_rx_queue_intr_enable(struct rte_eth_dev *dev, uint16_t queue_id)
1790 {
1791 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1792 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1793 	struct sfc_rxq_info *rxq_info;
1794 
1795 	SFC_ASSERT(queue_id < sas->rxq_count);
1796 	rxq_info = &sas->rxq_info[queue_id];
1797 
1798 	return sap->dp_rx->intr_enable(rxq_info->dp);
1799 }
1800 
1801 static int
1802 sfc_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
1803 {
1804 	const struct sfc_adapter_priv *sap = sfc_adapter_priv_by_eth_dev(dev);
1805 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1806 	struct sfc_rxq_info *rxq_info;
1807 
1808 	SFC_ASSERT(queue_id < sas->rxq_count);
1809 	rxq_info = &sas->rxq_info[queue_id];
1810 
1811 	return sap->dp_rx->intr_disable(rxq_info->dp);
1812 }
1813 
1814 static const struct eth_dev_ops sfc_eth_dev_ops = {
1815 	.dev_configure			= sfc_dev_configure,
1816 	.dev_start			= sfc_dev_start,
1817 	.dev_stop			= sfc_dev_stop,
1818 	.dev_set_link_up		= sfc_dev_set_link_up,
1819 	.dev_set_link_down		= sfc_dev_set_link_down,
1820 	.dev_close			= sfc_dev_close,
1821 	.promiscuous_enable		= sfc_dev_promisc_enable,
1822 	.promiscuous_disable		= sfc_dev_promisc_disable,
1823 	.allmulticast_enable		= sfc_dev_allmulti_enable,
1824 	.allmulticast_disable		= sfc_dev_allmulti_disable,
1825 	.link_update			= sfc_dev_link_update,
1826 	.stats_get			= sfc_stats_get,
1827 	.stats_reset			= sfc_stats_reset,
1828 	.xstats_get			= sfc_xstats_get,
1829 	.xstats_reset			= sfc_stats_reset,
1830 	.xstats_get_names		= sfc_xstats_get_names,
1831 	.dev_infos_get			= sfc_dev_infos_get,
1832 	.dev_supported_ptypes_get	= sfc_dev_supported_ptypes_get,
1833 	.mtu_set			= sfc_dev_set_mtu,
1834 	.rx_queue_start			= sfc_rx_queue_start,
1835 	.rx_queue_stop			= sfc_rx_queue_stop,
1836 	.tx_queue_start			= sfc_tx_queue_start,
1837 	.tx_queue_stop			= sfc_tx_queue_stop,
1838 	.rx_queue_setup			= sfc_rx_queue_setup,
1839 	.rx_queue_release		= sfc_rx_queue_release,
1840 	.rx_queue_intr_enable		= sfc_rx_queue_intr_enable,
1841 	.rx_queue_intr_disable		= sfc_rx_queue_intr_disable,
1842 	.tx_queue_setup			= sfc_tx_queue_setup,
1843 	.tx_queue_release		= sfc_tx_queue_release,
1844 	.flow_ctrl_get			= sfc_flow_ctrl_get,
1845 	.flow_ctrl_set			= sfc_flow_ctrl_set,
1846 	.mac_addr_set			= sfc_mac_addr_set,
1847 	.udp_tunnel_port_add		= sfc_dev_udp_tunnel_port_add,
1848 	.udp_tunnel_port_del		= sfc_dev_udp_tunnel_port_del,
1849 	.reta_update			= sfc_dev_rss_reta_update,
1850 	.reta_query			= sfc_dev_rss_reta_query,
1851 	.rss_hash_update		= sfc_dev_rss_hash_update,
1852 	.rss_hash_conf_get		= sfc_dev_rss_hash_conf_get,
1853 	.filter_ctrl			= sfc_dev_filter_ctrl,
1854 	.set_mc_addr_list		= sfc_set_mc_addr_list,
1855 	.rxq_info_get			= sfc_rx_queue_info_get,
1856 	.txq_info_get			= sfc_tx_queue_info_get,
1857 	.fw_version_get			= sfc_fw_version_get,
1858 	.xstats_get_by_id		= sfc_xstats_get_by_id,
1859 	.xstats_get_names_by_id		= sfc_xstats_get_names_by_id,
1860 	.pool_ops_supported		= sfc_pool_ops_supported,
1861 };
1862 
1863 /**
1864  * Duplicate a string in potentially shared memory required for
1865  * multi-process support.
1866  *
1867  * strdup() allocates from process-local heap/memory.
1868  */
1869 static char *
1870 sfc_strdup(const char *str)
1871 {
1872 	size_t size;
1873 	char *copy;
1874 
1875 	if (str == NULL)
1876 		return NULL;
1877 
1878 	size = strlen(str) + 1;
1879 	copy = rte_malloc(__func__, size, 0);
1880 	if (copy != NULL)
1881 		rte_memcpy(copy, str, size);
1882 
1883 	return copy;
1884 }
1885 
1886 static int
1887 sfc_eth_dev_set_ops(struct rte_eth_dev *dev)
1888 {
1889 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
1890 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
1891 	const struct sfc_dp_rx *dp_rx;
1892 	const struct sfc_dp_tx *dp_tx;
1893 	const efx_nic_cfg_t *encp;
1894 	unsigned int avail_caps = 0;
1895 	const char *rx_name = NULL;
1896 	const char *tx_name = NULL;
1897 	int rc;
1898 
1899 	switch (sa->family) {
1900 	case EFX_FAMILY_HUNTINGTON:
1901 	case EFX_FAMILY_MEDFORD:
1902 	case EFX_FAMILY_MEDFORD2:
1903 		avail_caps |= SFC_DP_HW_FW_CAP_EF10;
1904 		avail_caps |= SFC_DP_HW_FW_CAP_RX_EFX;
1905 		avail_caps |= SFC_DP_HW_FW_CAP_TX_EFX;
1906 		break;
1907 	case EFX_FAMILY_RIVERHEAD:
1908 		avail_caps |= SFC_DP_HW_FW_CAP_EF100;
1909 		break;
1910 	default:
1911 		break;
1912 	}
1913 
1914 	encp = efx_nic_cfg_get(sa->nic);
1915 	if (encp->enc_rx_es_super_buffer_supported)
1916 		avail_caps |= SFC_DP_HW_FW_CAP_RX_ES_SUPER_BUFFER;
1917 
1918 	rc = sfc_kvargs_process(sa, SFC_KVARG_RX_DATAPATH,
1919 				sfc_kvarg_string_handler, &rx_name);
1920 	if (rc != 0)
1921 		goto fail_kvarg_rx_datapath;
1922 
1923 	if (rx_name != NULL) {
1924 		dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, rx_name);
1925 		if (dp_rx == NULL) {
1926 			sfc_err(sa, "Rx datapath %s not found", rx_name);
1927 			rc = ENOENT;
1928 			goto fail_dp_rx;
1929 		}
1930 		if (!sfc_dp_match_hw_fw_caps(&dp_rx->dp, avail_caps)) {
1931 			sfc_err(sa,
1932 				"Insufficient Hw/FW capabilities to use Rx datapath %s",
1933 				rx_name);
1934 			rc = EINVAL;
1935 			goto fail_dp_rx_caps;
1936 		}
1937 	} else {
1938 		dp_rx = sfc_dp_find_rx_by_caps(&sfc_dp_head, avail_caps);
1939 		if (dp_rx == NULL) {
1940 			sfc_err(sa, "Rx datapath by caps %#x not found",
1941 				avail_caps);
1942 			rc = ENOENT;
1943 			goto fail_dp_rx;
1944 		}
1945 	}
1946 
1947 	sas->dp_rx_name = sfc_strdup(dp_rx->dp.name);
1948 	if (sas->dp_rx_name == NULL) {
1949 		rc = ENOMEM;
1950 		goto fail_dp_rx_name;
1951 	}
1952 
1953 	sfc_notice(sa, "use %s Rx datapath", sas->dp_rx_name);
1954 
1955 	rc = sfc_kvargs_process(sa, SFC_KVARG_TX_DATAPATH,
1956 				sfc_kvarg_string_handler, &tx_name);
1957 	if (rc != 0)
1958 		goto fail_kvarg_tx_datapath;
1959 
1960 	if (tx_name != NULL) {
1961 		dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, tx_name);
1962 		if (dp_tx == NULL) {
1963 			sfc_err(sa, "Tx datapath %s not found", tx_name);
1964 			rc = ENOENT;
1965 			goto fail_dp_tx;
1966 		}
1967 		if (!sfc_dp_match_hw_fw_caps(&dp_tx->dp, avail_caps)) {
1968 			sfc_err(sa,
1969 				"Insufficient Hw/FW capabilities to use Tx datapath %s",
1970 				tx_name);
1971 			rc = EINVAL;
1972 			goto fail_dp_tx_caps;
1973 		}
1974 	} else {
1975 		dp_tx = sfc_dp_find_tx_by_caps(&sfc_dp_head, avail_caps);
1976 		if (dp_tx == NULL) {
1977 			sfc_err(sa, "Tx datapath by caps %#x not found",
1978 				avail_caps);
1979 			rc = ENOENT;
1980 			goto fail_dp_tx;
1981 		}
1982 	}
1983 
1984 	sas->dp_tx_name = sfc_strdup(dp_tx->dp.name);
1985 	if (sas->dp_tx_name == NULL) {
1986 		rc = ENOMEM;
1987 		goto fail_dp_tx_name;
1988 	}
1989 
1990 	sfc_notice(sa, "use %s Tx datapath", sas->dp_tx_name);
1991 
1992 	sa->priv.dp_rx = dp_rx;
1993 	sa->priv.dp_tx = dp_tx;
1994 
1995 	dev->rx_pkt_burst = dp_rx->pkt_burst;
1996 	dev->tx_pkt_prepare = dp_tx->pkt_prepare;
1997 	dev->tx_pkt_burst = dp_tx->pkt_burst;
1998 
1999 	dev->rx_queue_count = sfc_rx_queue_count;
2000 	dev->rx_descriptor_done = sfc_rx_descriptor_done;
2001 	dev->rx_descriptor_status = sfc_rx_descriptor_status;
2002 	dev->tx_descriptor_status = sfc_tx_descriptor_status;
2003 	dev->dev_ops = &sfc_eth_dev_ops;
2004 
2005 	return 0;
2006 
2007 fail_dp_tx_name:
2008 fail_dp_tx_caps:
2009 fail_dp_tx:
2010 fail_kvarg_tx_datapath:
2011 	rte_free(sas->dp_rx_name);
2012 	sas->dp_rx_name = NULL;
2013 
2014 fail_dp_rx_name:
2015 fail_dp_rx_caps:
2016 fail_dp_rx:
2017 fail_kvarg_rx_datapath:
2018 	return rc;
2019 }
2020 
2021 static void
2022 sfc_eth_dev_clear_ops(struct rte_eth_dev *dev)
2023 {
2024 	struct sfc_adapter *sa = sfc_adapter_by_eth_dev(dev);
2025 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2026 
2027 	dev->dev_ops = NULL;
2028 	dev->tx_pkt_prepare = NULL;
2029 	dev->rx_pkt_burst = NULL;
2030 	dev->tx_pkt_burst = NULL;
2031 
2032 	rte_free(sas->dp_tx_name);
2033 	sas->dp_tx_name = NULL;
2034 	sa->priv.dp_tx = NULL;
2035 
2036 	rte_free(sas->dp_rx_name);
2037 	sas->dp_rx_name = NULL;
2038 	sa->priv.dp_rx = NULL;
2039 }
2040 
2041 static const struct eth_dev_ops sfc_eth_dev_secondary_ops = {
2042 	.dev_supported_ptypes_get	= sfc_dev_supported_ptypes_get,
2043 	.reta_query			= sfc_dev_rss_reta_query,
2044 	.rss_hash_conf_get		= sfc_dev_rss_hash_conf_get,
2045 	.rxq_info_get			= sfc_rx_queue_info_get,
2046 	.txq_info_get			= sfc_tx_queue_info_get,
2047 };
2048 
2049 static int
2050 sfc_eth_dev_secondary_init(struct rte_eth_dev *dev, uint32_t logtype_main)
2051 {
2052 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2053 	struct sfc_adapter_priv *sap;
2054 	const struct sfc_dp_rx *dp_rx;
2055 	const struct sfc_dp_tx *dp_tx;
2056 	int rc;
2057 
2058 	/*
2059 	 * Allocate process private data from heap, since it should not
2060 	 * be located in shared memory allocated using rte_malloc() API.
2061 	 */
2062 	sap = calloc(1, sizeof(*sap));
2063 	if (sap == NULL) {
2064 		rc = ENOMEM;
2065 		goto fail_alloc_priv;
2066 	}
2067 
2068 	sap->logtype_main = logtype_main;
2069 
2070 	dp_rx = sfc_dp_find_rx_by_name(&sfc_dp_head, sas->dp_rx_name);
2071 	if (dp_rx == NULL) {
2072 		SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2073 			"cannot find %s Rx datapath", sas->dp_rx_name);
2074 		rc = ENOENT;
2075 		goto fail_dp_rx;
2076 	}
2077 	if (~dp_rx->features & SFC_DP_RX_FEAT_MULTI_PROCESS) {
2078 		SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2079 			"%s Rx datapath does not support multi-process",
2080 			sas->dp_rx_name);
2081 		rc = EINVAL;
2082 		goto fail_dp_rx_multi_process;
2083 	}
2084 
2085 	dp_tx = sfc_dp_find_tx_by_name(&sfc_dp_head, sas->dp_tx_name);
2086 	if (dp_tx == NULL) {
2087 		SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2088 			"cannot find %s Tx datapath", sas->dp_tx_name);
2089 		rc = ENOENT;
2090 		goto fail_dp_tx;
2091 	}
2092 	if (~dp_tx->features & SFC_DP_TX_FEAT_MULTI_PROCESS) {
2093 		SFC_LOG(sas, RTE_LOG_ERR, logtype_main,
2094 			"%s Tx datapath does not support multi-process",
2095 			sas->dp_tx_name);
2096 		rc = EINVAL;
2097 		goto fail_dp_tx_multi_process;
2098 	}
2099 
2100 	sap->dp_rx = dp_rx;
2101 	sap->dp_tx = dp_tx;
2102 
2103 	dev->process_private = sap;
2104 	dev->rx_pkt_burst = dp_rx->pkt_burst;
2105 	dev->tx_pkt_prepare = dp_tx->pkt_prepare;
2106 	dev->tx_pkt_burst = dp_tx->pkt_burst;
2107 	dev->rx_queue_count = sfc_rx_queue_count;
2108 	dev->rx_descriptor_done = sfc_rx_descriptor_done;
2109 	dev->rx_descriptor_status = sfc_rx_descriptor_status;
2110 	dev->tx_descriptor_status = sfc_tx_descriptor_status;
2111 	dev->dev_ops = &sfc_eth_dev_secondary_ops;
2112 
2113 	return 0;
2114 
2115 fail_dp_tx_multi_process:
2116 fail_dp_tx:
2117 fail_dp_rx_multi_process:
2118 fail_dp_rx:
2119 	free(sap);
2120 
2121 fail_alloc_priv:
2122 	return rc;
2123 }
2124 
2125 static void
2126 sfc_register_dp(void)
2127 {
2128 	/* Register once */
2129 	if (TAILQ_EMPTY(&sfc_dp_head)) {
2130 		/* Prefer EF10 datapath */
2131 		sfc_dp_register(&sfc_dp_head, &sfc_ef100_rx.dp);
2132 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_essb_rx.dp);
2133 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_rx.dp);
2134 		sfc_dp_register(&sfc_dp_head, &sfc_efx_rx.dp);
2135 
2136 		sfc_dp_register(&sfc_dp_head, &sfc_ef100_tx.dp);
2137 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_tx.dp);
2138 		sfc_dp_register(&sfc_dp_head, &sfc_efx_tx.dp);
2139 		sfc_dp_register(&sfc_dp_head, &sfc_ef10_simple_tx.dp);
2140 	}
2141 }
2142 
2143 static int
2144 sfc_eth_dev_init(struct rte_eth_dev *dev)
2145 {
2146 	struct sfc_adapter_shared *sas = sfc_adapter_shared_by_eth_dev(dev);
2147 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
2148 	uint32_t logtype_main;
2149 	struct sfc_adapter *sa;
2150 	int rc;
2151 	const efx_nic_cfg_t *encp;
2152 	const struct rte_ether_addr *from;
2153 	int ret;
2154 
2155 	sfc_register_dp();
2156 
2157 	logtype_main = sfc_register_logtype(&pci_dev->addr,
2158 					    SFC_LOGTYPE_MAIN_STR,
2159 					    RTE_LOG_NOTICE);
2160 
2161 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
2162 		return -sfc_eth_dev_secondary_init(dev, logtype_main);
2163 
2164 	/* Required for logging */
2165 	ret = snprintf(sas->log_prefix, sizeof(sas->log_prefix),
2166 			"PMD: sfc_efx " PCI_PRI_FMT " #%" PRIu16 ": ",
2167 			pci_dev->addr.domain, pci_dev->addr.bus,
2168 			pci_dev->addr.devid, pci_dev->addr.function,
2169 			dev->data->port_id);
2170 	if (ret < 0 || ret >= (int)sizeof(sas->log_prefix)) {
2171 		SFC_GENERIC_LOG(ERR,
2172 			"reserved log prefix is too short for " PCI_PRI_FMT,
2173 			pci_dev->addr.domain, pci_dev->addr.bus,
2174 			pci_dev->addr.devid, pci_dev->addr.function);
2175 		return -EINVAL;
2176 	}
2177 	sas->pci_addr = pci_dev->addr;
2178 	sas->port_id = dev->data->port_id;
2179 
2180 	/*
2181 	 * Allocate process private data from heap, since it should not
2182 	 * be located in shared memory allocated using rte_malloc() API.
2183 	 */
2184 	sa = calloc(1, sizeof(*sa));
2185 	if (sa == NULL) {
2186 		rc = ENOMEM;
2187 		goto fail_alloc_sa;
2188 	}
2189 
2190 	dev->process_private = sa;
2191 
2192 	/* Required for logging */
2193 	sa->priv.shared = sas;
2194 	sa->priv.logtype_main = logtype_main;
2195 
2196 	sa->eth_dev = dev;
2197 
2198 	/* Copy PCI device info to the dev->data */
2199 	rte_eth_copy_pci_info(dev, pci_dev);
2200 	dev->data->dev_flags |= RTE_ETH_DEV_AUTOFILL_QUEUE_XSTATS;
2201 
2202 	rc = sfc_kvargs_parse(sa);
2203 	if (rc != 0)
2204 		goto fail_kvargs_parse;
2205 
2206 	sfc_log_init(sa, "entry");
2207 
2208 	dev->data->mac_addrs = rte_zmalloc("sfc", RTE_ETHER_ADDR_LEN, 0);
2209 	if (dev->data->mac_addrs == NULL) {
2210 		rc = ENOMEM;
2211 		goto fail_mac_addrs;
2212 	}
2213 
2214 	sfc_adapter_lock_init(sa);
2215 	sfc_adapter_lock(sa);
2216 
2217 	sfc_log_init(sa, "probing");
2218 	rc = sfc_probe(sa);
2219 	if (rc != 0)
2220 		goto fail_probe;
2221 
2222 	sfc_log_init(sa, "set device ops");
2223 	rc = sfc_eth_dev_set_ops(dev);
2224 	if (rc != 0)
2225 		goto fail_set_ops;
2226 
2227 	sfc_log_init(sa, "attaching");
2228 	rc = sfc_attach(sa);
2229 	if (rc != 0)
2230 		goto fail_attach;
2231 
2232 	encp = efx_nic_cfg_get(sa->nic);
2233 
2234 	/*
2235 	 * The arguments are really reverse order in comparison to
2236 	 * Linux kernel. Copy from NIC config to Ethernet device data.
2237 	 */
2238 	from = (const struct rte_ether_addr *)(encp->enc_mac_addr);
2239 	rte_ether_addr_copy(from, &dev->data->mac_addrs[0]);
2240 
2241 	sfc_adapter_unlock(sa);
2242 
2243 	sfc_log_init(sa, "done");
2244 	return 0;
2245 
2246 fail_attach:
2247 	sfc_eth_dev_clear_ops(dev);
2248 
2249 fail_set_ops:
2250 	sfc_unprobe(sa);
2251 
2252 fail_probe:
2253 	sfc_adapter_unlock(sa);
2254 	sfc_adapter_lock_fini(sa);
2255 	rte_free(dev->data->mac_addrs);
2256 	dev->data->mac_addrs = NULL;
2257 
2258 fail_mac_addrs:
2259 	sfc_kvargs_cleanup(sa);
2260 
2261 fail_kvargs_parse:
2262 	sfc_log_init(sa, "failed %d", rc);
2263 	dev->process_private = NULL;
2264 	free(sa);
2265 
2266 fail_alloc_sa:
2267 	SFC_ASSERT(rc > 0);
2268 	return -rc;
2269 }
2270 
2271 static int
2272 sfc_eth_dev_uninit(struct rte_eth_dev *dev)
2273 {
2274 	sfc_dev_close(dev);
2275 
2276 	return 0;
2277 }
2278 
2279 static const struct rte_pci_id pci_id_sfc_efx_map[] = {
2280 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE) },
2281 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_FARMINGDALE_VF) },
2282 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT) },
2283 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_GREENPORT_VF) },
2284 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD) },
2285 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD_VF) },
2286 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2) },
2287 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_SFC, EFX_PCI_DEVID_MEDFORD2_VF) },
2288 	{ RTE_PCI_DEVICE(EFX_PCI_VENID_XILINX, EFX_PCI_DEVID_RIVERHEAD) },
2289 	{ .vendor_id = 0 /* sentinel */ }
2290 };
2291 
2292 static int sfc_eth_dev_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
2293 	struct rte_pci_device *pci_dev)
2294 {
2295 	return rte_eth_dev_pci_generic_probe(pci_dev,
2296 		sizeof(struct sfc_adapter_shared), sfc_eth_dev_init);
2297 }
2298 
2299 static int sfc_eth_dev_pci_remove(struct rte_pci_device *pci_dev)
2300 {
2301 	return rte_eth_dev_pci_generic_remove(pci_dev, sfc_eth_dev_uninit);
2302 }
2303 
2304 static struct rte_pci_driver sfc_efx_pmd = {
2305 	.id_table = pci_id_sfc_efx_map,
2306 	.drv_flags =
2307 		RTE_PCI_DRV_INTR_LSC |
2308 		RTE_PCI_DRV_NEED_MAPPING,
2309 	.probe = sfc_eth_dev_pci_probe,
2310 	.remove = sfc_eth_dev_pci_remove,
2311 };
2312 
2313 RTE_PMD_REGISTER_PCI(net_sfc_efx, sfc_efx_pmd);
2314 RTE_PMD_REGISTER_PCI_TABLE(net_sfc_efx, pci_id_sfc_efx_map);
2315 RTE_PMD_REGISTER_KMOD_DEP(net_sfc_efx, "* igb_uio | uio_pci_generic | vfio-pci");
2316 RTE_PMD_REGISTER_PARAM_STRING(net_sfc_efx,
2317 	SFC_KVARG_RX_DATAPATH "=" SFC_KVARG_VALUES_RX_DATAPATH " "
2318 	SFC_KVARG_TX_DATAPATH "=" SFC_KVARG_VALUES_TX_DATAPATH " "
2319 	SFC_KVARG_PERF_PROFILE "=" SFC_KVARG_VALUES_PERF_PROFILE " "
2320 	SFC_KVARG_FW_VARIANT "=" SFC_KVARG_VALUES_FW_VARIANT " "
2321 	SFC_KVARG_RXD_WAIT_TIMEOUT_NS "=<long> "
2322 	SFC_KVARG_STATS_UPDATE_PERIOD_MS "=<long>");
2323 
2324 RTE_INIT(sfc_driver_register_logtype)
2325 {
2326 	int ret;
2327 
2328 	ret = rte_log_register_type_and_pick_level(SFC_LOGTYPE_PREFIX "driver",
2329 						   RTE_LOG_NOTICE);
2330 	sfc_logtype_driver = (ret < 0) ? RTE_LOGTYPE_PMD : ret;
2331 }
2332