xref: /dpdk/drivers/net/sfc/sfc.c (revision 89f0711f9ddfb5822da9d34f384b92f72a61c4dc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright (c) 2016-2018 Solarflare Communications Inc.
4  * All rights reserved.
5  *
6  * This software was jointly developed between OKTET Labs (under contract
7  * for Solarflare) and Solarflare Communications, Inc.
8  */
9 
10 /* sysconf() */
11 #include <unistd.h>
12 
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
15 
16 #include "efx.h"
17 
18 #include "sfc.h"
19 #include "sfc_log.h"
20 #include "sfc_ev.h"
21 #include "sfc_rx.h"
22 #include "sfc_tx.h"
23 
24 
25 int
26 sfc_dma_alloc(const struct sfc_adapter *sa, const char *name, uint16_t id,
27 	      size_t len, int socket_id, efsys_mem_t *esmp)
28 {
29 	const struct rte_memzone *mz;
30 
31 	sfc_log_init(sa, "name=%s id=%u len=%lu socket_id=%d",
32 		     name, id, len, socket_id);
33 
34 	mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
35 				      sysconf(_SC_PAGESIZE), socket_id);
36 	if (mz == NULL) {
37 		sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
38 			name, (unsigned int)id, (unsigned int)len, socket_id,
39 			rte_strerror(rte_errno));
40 		return ENOMEM;
41 	}
42 
43 	esmp->esm_addr = mz->iova;
44 	if (esmp->esm_addr == RTE_BAD_IOVA) {
45 		(void)rte_memzone_free(mz);
46 		return EFAULT;
47 	}
48 
49 	esmp->esm_mz = mz;
50 	esmp->esm_base = mz->addr;
51 
52 	return 0;
53 }
54 
55 void
56 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
57 {
58 	int rc;
59 
60 	sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
61 
62 	rc = rte_memzone_free(esmp->esm_mz);
63 	if (rc != 0)
64 		sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
65 
66 	memset(esmp, 0, sizeof(*esmp));
67 }
68 
69 static uint32_t
70 sfc_phy_cap_from_link_speeds(uint32_t speeds)
71 {
72 	uint32_t phy_caps = 0;
73 
74 	if (~speeds & ETH_LINK_SPEED_FIXED) {
75 		phy_caps |= (1 << EFX_PHY_CAP_AN);
76 		/*
77 		 * If no speeds are specified in the mask, any supported
78 		 * may be negotiated
79 		 */
80 		if (speeds == ETH_LINK_SPEED_AUTONEG)
81 			phy_caps |=
82 				(1 << EFX_PHY_CAP_1000FDX) |
83 				(1 << EFX_PHY_CAP_10000FDX) |
84 				(1 << EFX_PHY_CAP_40000FDX);
85 	}
86 	if (speeds & ETH_LINK_SPEED_1G)
87 		phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
88 	if (speeds & ETH_LINK_SPEED_10G)
89 		phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
90 	if (speeds & ETH_LINK_SPEED_40G)
91 		phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
92 
93 	return phy_caps;
94 }
95 
96 /*
97  * Check requested device level configuration.
98  * Receive and transmit configuration is checked in corresponding
99  * modules.
100  */
101 static int
102 sfc_check_conf(struct sfc_adapter *sa)
103 {
104 	const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
105 	int rc = 0;
106 
107 	sa->port.phy_adv_cap =
108 		sfc_phy_cap_from_link_speeds(conf->link_speeds) &
109 		sa->port.phy_adv_cap_mask;
110 	if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
111 		sfc_err(sa, "No link speeds from mask %#x are supported",
112 			conf->link_speeds);
113 		rc = EINVAL;
114 	}
115 
116 	if (conf->lpbk_mode != 0) {
117 		sfc_err(sa, "Loopback not supported");
118 		rc = EINVAL;
119 	}
120 
121 	if (conf->dcb_capability_en != 0) {
122 		sfc_err(sa, "Priority-based flow control not supported");
123 		rc = EINVAL;
124 	}
125 
126 	if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) {
127 		sfc_err(sa, "Flow Director not supported");
128 		rc = EINVAL;
129 	}
130 
131 	if ((conf->intr_conf.lsc != 0) &&
132 	    (sa->intr.type != EFX_INTR_LINE) &&
133 	    (sa->intr.type != EFX_INTR_MESSAGE)) {
134 		sfc_err(sa, "Link status change interrupt not supported");
135 		rc = EINVAL;
136 	}
137 
138 	if (conf->intr_conf.rxq != 0) {
139 		sfc_err(sa, "Receive queue interrupt not supported");
140 		rc = EINVAL;
141 	}
142 
143 	return rc;
144 }
145 
146 /*
147  * Find out maximum number of receive and transmit queues which could be
148  * advertised.
149  *
150  * NIC is kept initialized on success to allow other modules acquire
151  * defaults and capabilities.
152  */
153 static int
154 sfc_estimate_resource_limits(struct sfc_adapter *sa)
155 {
156 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
157 	efx_drv_limits_t limits;
158 	int rc;
159 	uint32_t evq_allocated;
160 	uint32_t rxq_allocated;
161 	uint32_t txq_allocated;
162 
163 	memset(&limits, 0, sizeof(limits));
164 
165 	/* Request at least one Rx and Tx queue */
166 	limits.edl_min_rxq_count = 1;
167 	limits.edl_min_txq_count = 1;
168 	/* Management event queue plus event queue for each Tx and Rx queue */
169 	limits.edl_min_evq_count =
170 		1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
171 
172 	/* Divide by number of functions to guarantee that all functions
173 	 * will get promised resources
174 	 */
175 	/* FIXME Divide by number of functions (not 2) below */
176 	limits.edl_max_evq_count = encp->enc_evq_limit / 2;
177 	SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
178 
179 	/* Split equally between receive and transmit */
180 	limits.edl_max_rxq_count =
181 		MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
182 	SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
183 
184 	limits.edl_max_txq_count =
185 		MIN(encp->enc_txq_limit,
186 		    limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
187 
188 	if (sa->tso)
189 		limits.edl_max_txq_count =
190 			MIN(limits.edl_max_txq_count,
191 			    encp->enc_fw_assisted_tso_v2_n_contexts /
192 			    encp->enc_hw_pf_count);
193 
194 	SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
195 
196 	/* Configure the minimum required resources needed for the
197 	 * driver to operate, and the maximum desired resources that the
198 	 * driver is capable of using.
199 	 */
200 	efx_nic_set_drv_limits(sa->nic, &limits);
201 
202 	sfc_log_init(sa, "init nic");
203 	rc = efx_nic_init(sa->nic);
204 	if (rc != 0)
205 		goto fail_nic_init;
206 
207 	/* Find resource dimensions assigned by firmware to this function */
208 	rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
209 				 &txq_allocated);
210 	if (rc != 0)
211 		goto fail_get_vi_pool;
212 
213 	/* It still may allocate more than maximum, ensure limit */
214 	evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
215 	rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
216 	txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
217 
218 	/* Subtract management EVQ not used for traffic */
219 	SFC_ASSERT(evq_allocated > 0);
220 	evq_allocated--;
221 
222 	/* Right now we use separate EVQ for Rx and Tx */
223 	sa->rxq_max = MIN(rxq_allocated, evq_allocated / 2);
224 	sa->txq_max = MIN(txq_allocated, evq_allocated - sa->rxq_max);
225 
226 	/* Keep NIC initialized */
227 	return 0;
228 
229 fail_get_vi_pool:
230 fail_nic_init:
231 	efx_nic_fini(sa->nic);
232 	return rc;
233 }
234 
235 static int
236 sfc_set_drv_limits(struct sfc_adapter *sa)
237 {
238 	const struct rte_eth_dev_data *data = sa->eth_dev->data;
239 	efx_drv_limits_t lim;
240 
241 	memset(&lim, 0, sizeof(lim));
242 
243 	/* Limits are strict since take into account initial estimation */
244 	lim.edl_min_evq_count = lim.edl_max_evq_count =
245 		1 + data->nb_rx_queues + data->nb_tx_queues;
246 	lim.edl_min_rxq_count = lim.edl_max_rxq_count = data->nb_rx_queues;
247 	lim.edl_min_txq_count = lim.edl_max_txq_count = data->nb_tx_queues;
248 
249 	return efx_nic_set_drv_limits(sa->nic, &lim);
250 }
251 
252 static int
253 sfc_try_start(struct sfc_adapter *sa)
254 {
255 	const efx_nic_cfg_t *encp;
256 	int rc;
257 
258 	sfc_log_init(sa, "entry");
259 
260 	SFC_ASSERT(sfc_adapter_is_locked(sa));
261 	SFC_ASSERT(sa->state == SFC_ADAPTER_STARTING);
262 
263 	sfc_log_init(sa, "set resource limits");
264 	rc = sfc_set_drv_limits(sa);
265 	if (rc != 0)
266 		goto fail_set_drv_limits;
267 
268 	sfc_log_init(sa, "init nic");
269 	rc = efx_nic_init(sa->nic);
270 	if (rc != 0)
271 		goto fail_nic_init;
272 
273 	encp = efx_nic_cfg_get(sa->nic);
274 	if (encp->enc_tunnel_encapsulations_supported != 0) {
275 		sfc_log_init(sa, "apply tunnel config");
276 		rc = efx_tunnel_reconfigure(sa->nic);
277 		if (rc != 0)
278 			goto fail_tunnel_reconfigure;
279 	}
280 
281 	rc = sfc_intr_start(sa);
282 	if (rc != 0)
283 		goto fail_intr_start;
284 
285 	rc = sfc_ev_start(sa);
286 	if (rc != 0)
287 		goto fail_ev_start;
288 
289 	rc = sfc_port_start(sa);
290 	if (rc != 0)
291 		goto fail_port_start;
292 
293 	rc = sfc_rx_start(sa);
294 	if (rc != 0)
295 		goto fail_rx_start;
296 
297 	rc = sfc_tx_start(sa);
298 	if (rc != 0)
299 		goto fail_tx_start;
300 
301 	rc = sfc_flow_start(sa);
302 	if (rc != 0)
303 		goto fail_flows_insert;
304 
305 	sfc_log_init(sa, "done");
306 	return 0;
307 
308 fail_flows_insert:
309 	sfc_tx_stop(sa);
310 
311 fail_tx_start:
312 	sfc_rx_stop(sa);
313 
314 fail_rx_start:
315 	sfc_port_stop(sa);
316 
317 fail_port_start:
318 	sfc_ev_stop(sa);
319 
320 fail_ev_start:
321 	sfc_intr_stop(sa);
322 
323 fail_intr_start:
324 fail_tunnel_reconfigure:
325 	efx_nic_fini(sa->nic);
326 
327 fail_nic_init:
328 fail_set_drv_limits:
329 	sfc_log_init(sa, "failed %d", rc);
330 	return rc;
331 }
332 
333 int
334 sfc_start(struct sfc_adapter *sa)
335 {
336 	unsigned int start_tries = 3;
337 	int rc;
338 
339 	sfc_log_init(sa, "entry");
340 
341 	SFC_ASSERT(sfc_adapter_is_locked(sa));
342 
343 	switch (sa->state) {
344 	case SFC_ADAPTER_CONFIGURED:
345 		break;
346 	case SFC_ADAPTER_STARTED:
347 		sfc_info(sa, "already started");
348 		return 0;
349 	default:
350 		rc = EINVAL;
351 		goto fail_bad_state;
352 	}
353 
354 	sa->state = SFC_ADAPTER_STARTING;
355 
356 	do {
357 		rc = sfc_try_start(sa);
358 	} while ((--start_tries > 0) &&
359 		 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
360 
361 	if (rc != 0)
362 		goto fail_try_start;
363 
364 	sa->state = SFC_ADAPTER_STARTED;
365 	sfc_log_init(sa, "done");
366 	return 0;
367 
368 fail_try_start:
369 	sa->state = SFC_ADAPTER_CONFIGURED;
370 fail_bad_state:
371 	sfc_log_init(sa, "failed %d", rc);
372 	return rc;
373 }
374 
375 void
376 sfc_stop(struct sfc_adapter *sa)
377 {
378 	sfc_log_init(sa, "entry");
379 
380 	SFC_ASSERT(sfc_adapter_is_locked(sa));
381 
382 	switch (sa->state) {
383 	case SFC_ADAPTER_STARTED:
384 		break;
385 	case SFC_ADAPTER_CONFIGURED:
386 		sfc_info(sa, "already stopped");
387 		return;
388 	default:
389 		sfc_err(sa, "stop in unexpected state %u", sa->state);
390 		SFC_ASSERT(B_FALSE);
391 		return;
392 	}
393 
394 	sa->state = SFC_ADAPTER_STOPPING;
395 
396 	sfc_flow_stop(sa);
397 	sfc_tx_stop(sa);
398 	sfc_rx_stop(sa);
399 	sfc_port_stop(sa);
400 	sfc_ev_stop(sa);
401 	sfc_intr_stop(sa);
402 	efx_nic_fini(sa->nic);
403 
404 	sa->state = SFC_ADAPTER_CONFIGURED;
405 	sfc_log_init(sa, "done");
406 }
407 
408 static int
409 sfc_restart(struct sfc_adapter *sa)
410 {
411 	int rc;
412 
413 	SFC_ASSERT(sfc_adapter_is_locked(sa));
414 
415 	if (sa->state != SFC_ADAPTER_STARTED)
416 		return EINVAL;
417 
418 	sfc_stop(sa);
419 
420 	rc = sfc_start(sa);
421 	if (rc != 0)
422 		sfc_err(sa, "restart failed");
423 
424 	return rc;
425 }
426 
427 static void
428 sfc_restart_if_required(void *arg)
429 {
430 	struct sfc_adapter *sa = arg;
431 
432 	/* If restart is scheduled, clear the flag and do it */
433 	if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
434 				1, 0)) {
435 		sfc_adapter_lock(sa);
436 		if (sa->state == SFC_ADAPTER_STARTED)
437 			(void)sfc_restart(sa);
438 		sfc_adapter_unlock(sa);
439 	}
440 }
441 
442 void
443 sfc_schedule_restart(struct sfc_adapter *sa)
444 {
445 	int rc;
446 
447 	/* Schedule restart alarm if it is not scheduled yet */
448 	if (!rte_atomic32_test_and_set(&sa->restart_required))
449 		return;
450 
451 	rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
452 	if (rc == -ENOTSUP)
453 		sfc_warn(sa, "alarms are not supported, restart is pending");
454 	else if (rc != 0)
455 		sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
456 	else
457 		sfc_info(sa, "restart scheduled");
458 }
459 
460 int
461 sfc_configure(struct sfc_adapter *sa)
462 {
463 	int rc;
464 
465 	sfc_log_init(sa, "entry");
466 
467 	SFC_ASSERT(sfc_adapter_is_locked(sa));
468 
469 	SFC_ASSERT(sa->state == SFC_ADAPTER_INITIALIZED ||
470 		   sa->state == SFC_ADAPTER_CONFIGURED);
471 	sa->state = SFC_ADAPTER_CONFIGURING;
472 
473 	rc = sfc_check_conf(sa);
474 	if (rc != 0)
475 		goto fail_check_conf;
476 
477 	rc = sfc_intr_configure(sa);
478 	if (rc != 0)
479 		goto fail_intr_configure;
480 
481 	rc = sfc_port_configure(sa);
482 	if (rc != 0)
483 		goto fail_port_configure;
484 
485 	rc = sfc_rx_configure(sa);
486 	if (rc != 0)
487 		goto fail_rx_configure;
488 
489 	rc = sfc_tx_configure(sa);
490 	if (rc != 0)
491 		goto fail_tx_configure;
492 
493 	sa->state = SFC_ADAPTER_CONFIGURED;
494 	sfc_log_init(sa, "done");
495 	return 0;
496 
497 fail_tx_configure:
498 	sfc_rx_close(sa);
499 
500 fail_rx_configure:
501 	sfc_port_close(sa);
502 
503 fail_port_configure:
504 	sfc_intr_close(sa);
505 
506 fail_intr_configure:
507 fail_check_conf:
508 	sa->state = SFC_ADAPTER_INITIALIZED;
509 	sfc_log_init(sa, "failed %d", rc);
510 	return rc;
511 }
512 
513 void
514 sfc_close(struct sfc_adapter *sa)
515 {
516 	sfc_log_init(sa, "entry");
517 
518 	SFC_ASSERT(sfc_adapter_is_locked(sa));
519 
520 	SFC_ASSERT(sa->state == SFC_ADAPTER_CONFIGURED);
521 	sa->state = SFC_ADAPTER_CLOSING;
522 
523 	sfc_tx_close(sa);
524 	sfc_rx_close(sa);
525 	sfc_port_close(sa);
526 	sfc_intr_close(sa);
527 
528 	sa->state = SFC_ADAPTER_INITIALIZED;
529 	sfc_log_init(sa, "done");
530 }
531 
532 static int
533 sfc_mem_bar_init(struct sfc_adapter *sa)
534 {
535 	struct rte_eth_dev *eth_dev = sa->eth_dev;
536 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
537 	efsys_bar_t *ebp = &sa->mem_bar;
538 	unsigned int i;
539 	struct rte_mem_resource *res;
540 
541 	for (i = 0; i < RTE_DIM(pci_dev->mem_resource); i++) {
542 		res = &pci_dev->mem_resource[i];
543 		if ((res->len != 0) && (res->phys_addr != 0)) {
544 			/* Found first memory BAR */
545 			SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
546 			ebp->esb_rid = i;
547 			ebp->esb_dev = pci_dev;
548 			ebp->esb_base = res->addr;
549 			return 0;
550 		}
551 	}
552 
553 	return EFAULT;
554 }
555 
556 static void
557 sfc_mem_bar_fini(struct sfc_adapter *sa)
558 {
559 	efsys_bar_t *ebp = &sa->mem_bar;
560 
561 	SFC_BAR_LOCK_DESTROY(ebp);
562 	memset(ebp, 0, sizeof(*ebp));
563 }
564 
565 #if EFSYS_OPT_RX_SCALE
566 /*
567  * A fixed RSS key which has a property of being symmetric
568  * (symmetrical flows are distributed to the same CPU)
569  * and also known to give a uniform distribution
570  * (a good distribution of traffic between different CPUs)
571  */
572 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
573 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
574 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
575 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
576 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
577 	0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
578 };
579 #endif
580 
581 #if EFSYS_OPT_RX_SCALE
582 static int
583 sfc_set_rss_defaults(struct sfc_adapter *sa)
584 {
585 	int rc;
586 
587 	rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
588 	if (rc != 0)
589 		goto fail_intr_init;
590 
591 	rc = efx_ev_init(sa->nic);
592 	if (rc != 0)
593 		goto fail_ev_init;
594 
595 	rc = efx_rx_init(sa->nic);
596 	if (rc != 0)
597 		goto fail_rx_init;
598 
599 	rc = efx_rx_scale_default_support_get(sa->nic, &sa->rss_support);
600 	if (rc != 0)
601 		goto fail_scale_support_get;
602 
603 	rc = efx_rx_hash_default_support_get(sa->nic, &sa->hash_support);
604 	if (rc != 0)
605 		goto fail_hash_support_get;
606 
607 	efx_rx_fini(sa->nic);
608 	efx_ev_fini(sa->nic);
609 	efx_intr_fini(sa->nic);
610 
611 	sa->rss_hash_types = sfc_rte_to_efx_hash_type(SFC_RSS_OFFLOADS);
612 
613 	rte_memcpy(sa->rss_key, default_rss_key, sizeof(sa->rss_key));
614 
615 	return 0;
616 
617 fail_hash_support_get:
618 fail_scale_support_get:
619 fail_rx_init:
620 	efx_ev_fini(sa->nic);
621 
622 fail_ev_init:
623 	efx_intr_fini(sa->nic);
624 
625 fail_intr_init:
626 	return rc;
627 }
628 #else
629 static int
630 sfc_set_rss_defaults(__rte_unused struct sfc_adapter *sa)
631 {
632 	return 0;
633 }
634 #endif
635 
636 int
637 sfc_attach(struct sfc_adapter *sa)
638 {
639 	const efx_nic_cfg_t *encp;
640 	efx_nic_t *enp = sa->nic;
641 	int rc;
642 
643 	sfc_log_init(sa, "entry");
644 
645 	SFC_ASSERT(sfc_adapter_is_locked(sa));
646 
647 	efx_mcdi_new_epoch(enp);
648 
649 	sfc_log_init(sa, "reset nic");
650 	rc = efx_nic_reset(enp);
651 	if (rc != 0)
652 		goto fail_nic_reset;
653 
654 	/*
655 	 * Probed NIC is sufficient for tunnel init.
656 	 * Initialize tunnel support to be able to use libefx
657 	 * efx_tunnel_config_udp_{add,remove}() in any state and
658 	 * efx_tunnel_reconfigure() on start up.
659 	 */
660 	rc = efx_tunnel_init(enp);
661 	if (rc != 0)
662 		goto fail_tunnel_init;
663 
664 	encp = efx_nic_cfg_get(sa->nic);
665 
666 	if (sa->dp_tx->features & SFC_DP_TX_FEAT_TSO) {
667 		sa->tso = encp->enc_fw_assisted_tso_v2_enabled;
668 		if (!sa->tso)
669 			sfc_warn(sa,
670 				 "TSO support isn't available on this adapter");
671 	}
672 
673 	sfc_log_init(sa, "estimate resource limits");
674 	rc = sfc_estimate_resource_limits(sa);
675 	if (rc != 0)
676 		goto fail_estimate_rsrc_limits;
677 
678 	sa->txq_max_entries = encp->enc_txq_max_ndescs;
679 	SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
680 
681 	rc = sfc_intr_attach(sa);
682 	if (rc != 0)
683 		goto fail_intr_attach;
684 
685 	rc = sfc_ev_attach(sa);
686 	if (rc != 0)
687 		goto fail_ev_attach;
688 
689 	rc = sfc_port_attach(sa);
690 	if (rc != 0)
691 		goto fail_port_attach;
692 
693 	rc = sfc_set_rss_defaults(sa);
694 	if (rc != 0)
695 		goto fail_set_rss_defaults;
696 
697 	rc = sfc_filter_attach(sa);
698 	if (rc != 0)
699 		goto fail_filter_attach;
700 
701 	sfc_log_init(sa, "fini nic");
702 	efx_nic_fini(enp);
703 
704 	sfc_flow_init(sa);
705 
706 	sa->state = SFC_ADAPTER_INITIALIZED;
707 
708 	sfc_log_init(sa, "done");
709 	return 0;
710 
711 fail_filter_attach:
712 fail_set_rss_defaults:
713 	sfc_port_detach(sa);
714 
715 fail_port_attach:
716 	sfc_ev_detach(sa);
717 
718 fail_ev_attach:
719 	sfc_intr_detach(sa);
720 
721 fail_intr_attach:
722 	efx_nic_fini(sa->nic);
723 
724 fail_estimate_rsrc_limits:
725 fail_tunnel_init:
726 	efx_tunnel_fini(sa->nic);
727 
728 fail_nic_reset:
729 
730 	sfc_log_init(sa, "failed %d", rc);
731 	return rc;
732 }
733 
734 void
735 sfc_detach(struct sfc_adapter *sa)
736 {
737 	sfc_log_init(sa, "entry");
738 
739 	SFC_ASSERT(sfc_adapter_is_locked(sa));
740 
741 	sfc_flow_fini(sa);
742 
743 	sfc_filter_detach(sa);
744 	sfc_port_detach(sa);
745 	sfc_ev_detach(sa);
746 	sfc_intr_detach(sa);
747 	efx_tunnel_fini(sa->nic);
748 
749 	sa->state = SFC_ADAPTER_UNINITIALIZED;
750 }
751 
752 int
753 sfc_probe(struct sfc_adapter *sa)
754 {
755 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(sa->eth_dev);
756 	efx_nic_t *enp;
757 	int rc;
758 
759 	sfc_log_init(sa, "entry");
760 
761 	SFC_ASSERT(sfc_adapter_is_locked(sa));
762 
763 	sa->socket_id = rte_socket_id();
764 	rte_atomic32_init(&sa->restart_required);
765 
766 	sfc_log_init(sa, "init mem bar");
767 	rc = sfc_mem_bar_init(sa);
768 	if (rc != 0)
769 		goto fail_mem_bar_init;
770 
771 	sfc_log_init(sa, "get family");
772 	rc = efx_family(pci_dev->id.vendor_id, pci_dev->id.device_id,
773 			&sa->family);
774 	if (rc != 0)
775 		goto fail_family;
776 	sfc_log_init(sa, "family is %u", sa->family);
777 
778 	sfc_log_init(sa, "create nic");
779 	rte_spinlock_init(&sa->nic_lock);
780 	rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
781 			    &sa->mem_bar, &sa->nic_lock, &enp);
782 	if (rc != 0)
783 		goto fail_nic_create;
784 	sa->nic = enp;
785 
786 	rc = sfc_mcdi_init(sa);
787 	if (rc != 0)
788 		goto fail_mcdi_init;
789 
790 	sfc_log_init(sa, "probe nic");
791 	rc = efx_nic_probe(enp);
792 	if (rc != 0)
793 		goto fail_nic_probe;
794 
795 	sfc_log_init(sa, "done");
796 	return 0;
797 
798 fail_nic_probe:
799 	sfc_mcdi_fini(sa);
800 
801 fail_mcdi_init:
802 	sfc_log_init(sa, "destroy nic");
803 	sa->nic = NULL;
804 	efx_nic_destroy(enp);
805 
806 fail_nic_create:
807 fail_family:
808 	sfc_mem_bar_fini(sa);
809 
810 fail_mem_bar_init:
811 	sfc_log_init(sa, "failed %d", rc);
812 	return rc;
813 }
814 
815 void
816 sfc_unprobe(struct sfc_adapter *sa)
817 {
818 	efx_nic_t *enp = sa->nic;
819 
820 	sfc_log_init(sa, "entry");
821 
822 	SFC_ASSERT(sfc_adapter_is_locked(sa));
823 
824 	sfc_log_init(sa, "unprobe nic");
825 	efx_nic_unprobe(enp);
826 
827 	sfc_mcdi_fini(sa);
828 
829 	/*
830 	 * Make sure there is no pending alarm to restart since we are
831 	 * going to free device private which is passed as the callback
832 	 * opaque data. A new alarm cannot be scheduled since MCDI is
833 	 * shut down.
834 	 */
835 	rte_eal_alarm_cancel(sfc_restart_if_required, sa);
836 
837 	sfc_log_init(sa, "destroy nic");
838 	sa->nic = NULL;
839 	efx_nic_destroy(enp);
840 
841 	sfc_mem_bar_fini(sa);
842 
843 	sfc_flow_fini(sa);
844 	sa->state = SFC_ADAPTER_UNINITIALIZED;
845 }
846