1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2016-2019 Solarflare Communications Inc.
5 *
6 * This software was jointly developed between OKTET Labs (under contract
7 * for Solarflare) and Solarflare Communications, Inc.
8 */
9
10 /* sysconf() */
11 #include <unistd.h>
12
13 #include <rte_errno.h>
14 #include <rte_alarm.h>
15
16 #include "efx.h"
17
18 #include "sfc.h"
19 #include "sfc_debug.h"
20 #include "sfc_log.h"
21 #include "sfc_ev.h"
22 #include "sfc_rx.h"
23 #include "sfc_mae_counter.h"
24 #include "sfc_tx.h"
25 #include "sfc_kvargs.h"
26 #include "sfc_tweak.h"
27 #include "sfc_sw_stats.h"
28 #include "sfc_switch.h"
29 #include "sfc_nic_dma.h"
30
31 bool
sfc_repr_supported(const struct sfc_adapter * sa)32 sfc_repr_supported(const struct sfc_adapter *sa)
33 {
34 if (!sa->switchdev)
35 return false;
36
37 /*
38 * Representor proxy should use service lcore on PF's socket
39 * (sa->socket_id) to be efficient. But the proxy will fall back
40 * to any socket if it is not possible to get the service core
41 * on the same socket. Check that at least service core on any
42 * socket is available.
43 */
44 if (sfc_get_service_lcore(SOCKET_ID_ANY) == RTE_MAX_LCORE)
45 return false;
46
47 return true;
48 }
49
50 bool
sfc_repr_available(const struct sfc_adapter_shared * sas)51 sfc_repr_available(const struct sfc_adapter_shared *sas)
52 {
53 return sas->nb_repr_rxq > 0 && sas->nb_repr_txq > 0;
54 }
55
56 int
sfc_dma_alloc(struct sfc_adapter * sa,const char * name,uint16_t id,efx_nic_dma_addr_type_t addr_type,size_t len,int socket_id,efsys_mem_t * esmp)57 sfc_dma_alloc(struct sfc_adapter *sa, const char *name, uint16_t id,
58 efx_nic_dma_addr_type_t addr_type, size_t len, int socket_id,
59 efsys_mem_t *esmp)
60 {
61 const struct rte_memzone *mz;
62 int rc;
63
64 sfc_log_init(sa, "name=%s id=%u len=%zu socket_id=%d",
65 name, id, len, socket_id);
66
67 mz = rte_eth_dma_zone_reserve(sa->eth_dev, name, id, len,
68 sysconf(_SC_PAGESIZE), socket_id);
69 if (mz == NULL) {
70 sfc_err(sa, "cannot reserve DMA zone for %s:%u %#x@%d: %s",
71 name, (unsigned int)id, (unsigned int)len, socket_id,
72 rte_strerror(rte_errno));
73 return ENOMEM;
74 }
75 if (mz->iova == RTE_BAD_IOVA) {
76 (void)rte_memzone_free(mz);
77 return EFAULT;
78 }
79
80 rc = sfc_nic_dma_mz_map(sa, mz, addr_type, &esmp->esm_addr);
81 if (rc != 0) {
82 (void)rte_memzone_free(mz);
83 return rc;
84 }
85
86 esmp->esm_mz = mz;
87 esmp->esm_base = mz->addr;
88
89 sfc_info(sa,
90 "DMA name=%s id=%u len=%lu socket_id=%d => virt=%p iova=%lx",
91 name, id, len, socket_id, esmp->esm_base,
92 (unsigned long)esmp->esm_addr);
93
94 return 0;
95 }
96
97 void
sfc_dma_free(const struct sfc_adapter * sa,efsys_mem_t * esmp)98 sfc_dma_free(const struct sfc_adapter *sa, efsys_mem_t *esmp)
99 {
100 int rc;
101
102 sfc_log_init(sa, "name=%s", esmp->esm_mz->name);
103
104 rc = rte_memzone_free(esmp->esm_mz);
105 if (rc != 0)
106 sfc_err(sa, "rte_memzone_free(() failed: %d", rc);
107
108 memset(esmp, 0, sizeof(*esmp));
109 }
110
111 static uint32_t
sfc_phy_cap_from_link_speeds(uint32_t speeds)112 sfc_phy_cap_from_link_speeds(uint32_t speeds)
113 {
114 uint32_t phy_caps = 0;
115
116 if (~speeds & RTE_ETH_LINK_SPEED_FIXED) {
117 phy_caps |= (1 << EFX_PHY_CAP_AN);
118 /*
119 * If no speeds are specified in the mask, any supported
120 * may be negotiated
121 */
122 if (speeds == RTE_ETH_LINK_SPEED_AUTONEG)
123 phy_caps |=
124 (1 << EFX_PHY_CAP_1000FDX) |
125 (1 << EFX_PHY_CAP_10000FDX) |
126 (1 << EFX_PHY_CAP_25000FDX) |
127 (1 << EFX_PHY_CAP_40000FDX) |
128 (1 << EFX_PHY_CAP_50000FDX) |
129 (1 << EFX_PHY_CAP_100000FDX);
130 }
131 if (speeds & RTE_ETH_LINK_SPEED_1G)
132 phy_caps |= (1 << EFX_PHY_CAP_1000FDX);
133 if (speeds & RTE_ETH_LINK_SPEED_10G)
134 phy_caps |= (1 << EFX_PHY_CAP_10000FDX);
135 if (speeds & RTE_ETH_LINK_SPEED_25G)
136 phy_caps |= (1 << EFX_PHY_CAP_25000FDX);
137 if (speeds & RTE_ETH_LINK_SPEED_40G)
138 phy_caps |= (1 << EFX_PHY_CAP_40000FDX);
139 if (speeds & RTE_ETH_LINK_SPEED_50G)
140 phy_caps |= (1 << EFX_PHY_CAP_50000FDX);
141 if (speeds & RTE_ETH_LINK_SPEED_100G)
142 phy_caps |= (1 << EFX_PHY_CAP_100000FDX);
143
144 return phy_caps;
145 }
146
147 /*
148 * Check requested device level configuration.
149 * Receive and transmit configuration is checked in corresponding
150 * modules.
151 */
152 static int
sfc_check_conf(struct sfc_adapter * sa)153 sfc_check_conf(struct sfc_adapter *sa)
154 {
155 const struct rte_eth_conf *conf = &sa->eth_dev->data->dev_conf;
156 int rc = 0;
157
158 sa->port.phy_adv_cap =
159 sfc_phy_cap_from_link_speeds(conf->link_speeds) &
160 sa->port.phy_adv_cap_mask;
161 if ((sa->port.phy_adv_cap & ~(1 << EFX_PHY_CAP_AN)) == 0) {
162 sfc_err(sa, "No link speeds from mask %#x are supported",
163 conf->link_speeds);
164 rc = EINVAL;
165 }
166
167 #if !EFSYS_OPT_LOOPBACK
168 if (conf->lpbk_mode != 0) {
169 sfc_err(sa, "Loopback not supported");
170 rc = EINVAL;
171 }
172 #endif
173
174 if (conf->dcb_capability_en != 0) {
175 sfc_err(sa, "Priority-based flow control not supported");
176 rc = EINVAL;
177 }
178
179 if ((conf->intr_conf.lsc != 0) &&
180 (sa->intr.type != EFX_INTR_LINE) &&
181 (sa->intr.type != EFX_INTR_MESSAGE)) {
182 sfc_err(sa, "Link status change interrupt not supported");
183 rc = EINVAL;
184 }
185
186 if (conf->intr_conf.rxq != 0 &&
187 (sa->priv.dp_rx->features & SFC_DP_RX_FEAT_INTR) == 0) {
188 sfc_err(sa, "Receive queue interrupt not supported");
189 rc = EINVAL;
190 }
191
192 return rc;
193 }
194
195 /*
196 * Find out maximum number of receive and transmit queues which could be
197 * advertised.
198 *
199 * NIC is kept initialized on success to allow other modules acquire
200 * defaults and capabilities.
201 */
202 static int
sfc_estimate_resource_limits(struct sfc_adapter * sa)203 sfc_estimate_resource_limits(struct sfc_adapter *sa)
204 {
205 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
206 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
207 efx_drv_limits_t limits;
208 int rc;
209 uint32_t evq_allocated;
210 uint32_t rxq_allocated;
211 uint32_t txq_allocated;
212
213 memset(&limits, 0, sizeof(limits));
214
215 /* Request at least one Rx and Tx queue */
216 limits.edl_min_rxq_count = 1;
217 limits.edl_min_txq_count = 1;
218 /* Management event queue plus event queue for each Tx and Rx queue */
219 limits.edl_min_evq_count =
220 1 + limits.edl_min_rxq_count + limits.edl_min_txq_count;
221
222 /* Divide by number of functions to guarantee that all functions
223 * will get promised resources
224 */
225 /* FIXME Divide by number of functions (not 2) below */
226 limits.edl_max_evq_count = encp->enc_evq_limit / 2;
227 SFC_ASSERT(limits.edl_max_evq_count >= limits.edl_min_rxq_count);
228
229 /* Split equally between receive and transmit */
230 limits.edl_max_rxq_count =
231 MIN(encp->enc_rxq_limit, (limits.edl_max_evq_count - 1) / 2);
232 SFC_ASSERT(limits.edl_max_rxq_count >= limits.edl_min_rxq_count);
233
234 limits.edl_max_txq_count =
235 MIN(encp->enc_txq_limit,
236 limits.edl_max_evq_count - 1 - limits.edl_max_rxq_count);
237
238 if (sa->tso && encp->enc_fw_assisted_tso_v2_enabled)
239 limits.edl_max_txq_count =
240 MIN(limits.edl_max_txq_count,
241 encp->enc_fw_assisted_tso_v2_n_contexts /
242 encp->enc_hw_pf_count);
243
244 SFC_ASSERT(limits.edl_max_txq_count >= limits.edl_min_rxq_count);
245
246 /* Configure the minimum required resources needed for the
247 * driver to operate, and the maximum desired resources that the
248 * driver is capable of using.
249 */
250 efx_nic_set_drv_limits(sa->nic, &limits);
251
252 sfc_log_init(sa, "init nic");
253 rc = efx_nic_init(sa->nic);
254 if (rc != 0)
255 goto fail_nic_init;
256
257 /* Find resource dimensions assigned by firmware to this function */
258 rc = efx_nic_get_vi_pool(sa->nic, &evq_allocated, &rxq_allocated,
259 &txq_allocated);
260 if (rc != 0)
261 goto fail_get_vi_pool;
262
263 /* It still may allocate more than maximum, ensure limit */
264 evq_allocated = MIN(evq_allocated, limits.edl_max_evq_count);
265 rxq_allocated = MIN(rxq_allocated, limits.edl_max_rxq_count);
266 txq_allocated = MIN(txq_allocated, limits.edl_max_txq_count);
267
268 /*
269 * Subtract management EVQ not used for traffic
270 * The resource allocation strategy is as follows:
271 * - one EVQ for management
272 * - one EVQ for each ethdev RXQ
273 * - one EVQ for each ethdev TXQ
274 * - one EVQ and one RXQ for optional MAE counters.
275 */
276 if (evq_allocated == 0) {
277 sfc_err(sa, "count of allocated EvQ is 0");
278 rc = ENOMEM;
279 goto fail_allocate_evq;
280 }
281 evq_allocated--;
282
283 /*
284 * Reserve absolutely required minimum.
285 * Right now we use separate EVQ for Rx and Tx.
286 */
287 if (rxq_allocated > 0 && evq_allocated > 0) {
288 sa->rxq_max = 1;
289 rxq_allocated--;
290 evq_allocated--;
291 }
292 if (txq_allocated > 0 && evq_allocated > 0) {
293 sa->txq_max = 1;
294 txq_allocated--;
295 evq_allocated--;
296 }
297
298 if (sfc_mae_counter_rxq_required(sa) &&
299 rxq_allocated > 0 && evq_allocated > 0) {
300 rxq_allocated--;
301 evq_allocated--;
302 sas->counters_rxq_allocated = true;
303 } else {
304 sas->counters_rxq_allocated = false;
305 }
306
307 if (sfc_repr_supported(sa) &&
308 evq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN +
309 SFC_REPR_PROXY_NB_TXQ_MIN &&
310 rxq_allocated >= SFC_REPR_PROXY_NB_RXQ_MIN &&
311 txq_allocated >= SFC_REPR_PROXY_NB_TXQ_MIN) {
312 unsigned int extra;
313
314 txq_allocated -= SFC_REPR_PROXY_NB_TXQ_MIN;
315 rxq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN;
316 evq_allocated -= SFC_REPR_PROXY_NB_RXQ_MIN +
317 SFC_REPR_PROXY_NB_TXQ_MIN;
318
319 sas->nb_repr_rxq = SFC_REPR_PROXY_NB_RXQ_MIN;
320 sas->nb_repr_txq = SFC_REPR_PROXY_NB_TXQ_MIN;
321
322 /* Allocate extra representor RxQs up to the maximum */
323 extra = MIN(evq_allocated, rxq_allocated);
324 extra = MIN(extra,
325 SFC_REPR_PROXY_NB_RXQ_MAX - sas->nb_repr_rxq);
326 evq_allocated -= extra;
327 rxq_allocated -= extra;
328 sas->nb_repr_rxq += extra;
329
330 /* Allocate extra representor TxQs up to the maximum */
331 extra = MIN(evq_allocated, txq_allocated);
332 extra = MIN(extra,
333 SFC_REPR_PROXY_NB_TXQ_MAX - sas->nb_repr_txq);
334 evq_allocated -= extra;
335 txq_allocated -= extra;
336 sas->nb_repr_txq += extra;
337 } else {
338 sas->nb_repr_rxq = 0;
339 sas->nb_repr_txq = 0;
340 }
341
342 /* Add remaining allocated queues */
343 sa->rxq_max += MIN(rxq_allocated, evq_allocated / 2);
344 sa->txq_max += MIN(txq_allocated, evq_allocated - sa->rxq_max);
345
346 /* Keep NIC initialized */
347 return 0;
348
349 fail_allocate_evq:
350 fail_get_vi_pool:
351 efx_nic_fini(sa->nic);
352 fail_nic_init:
353 return rc;
354 }
355
356 static int
sfc_set_drv_limits(struct sfc_adapter * sa)357 sfc_set_drv_limits(struct sfc_adapter *sa)
358 {
359 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
360 const struct rte_eth_dev_data *data = sa->eth_dev->data;
361 uint32_t rxq_reserved = sfc_nb_reserved_rxq(sas);
362 uint32_t txq_reserved = sfc_nb_txq_reserved(sas);
363 efx_drv_limits_t lim;
364
365 memset(&lim, 0, sizeof(lim));
366
367 /*
368 * Limits are strict since take into account initial estimation.
369 * Resource allocation strategy is described in
370 * sfc_estimate_resource_limits().
371 */
372 lim.edl_min_evq_count = lim.edl_max_evq_count =
373 1 + data->nb_rx_queues + data->nb_tx_queues +
374 rxq_reserved + txq_reserved;
375 lim.edl_min_rxq_count = lim.edl_max_rxq_count =
376 data->nb_rx_queues + rxq_reserved;
377 lim.edl_min_txq_count = lim.edl_max_txq_count =
378 data->nb_tx_queues + txq_reserved;
379
380 return efx_nic_set_drv_limits(sa->nic, &lim);
381 }
382
383 static int
sfc_set_fw_subvariant(struct sfc_adapter * sa)384 sfc_set_fw_subvariant(struct sfc_adapter *sa)
385 {
386 struct sfc_adapter_shared *sas = sfc_sa2shared(sa);
387 const efx_nic_cfg_t *encp = efx_nic_cfg_get(sa->nic);
388 uint64_t tx_offloads = sa->eth_dev->data->dev_conf.txmode.offloads;
389 unsigned int txq_index;
390 efx_nic_fw_subvariant_t req_fw_subvariant;
391 efx_nic_fw_subvariant_t cur_fw_subvariant;
392 int rc;
393
394 if (!encp->enc_fw_subvariant_no_tx_csum_supported) {
395 sfc_info(sa, "no-Tx-checksum subvariant not supported");
396 return 0;
397 }
398
399 for (txq_index = 0; txq_index < sas->txq_count; ++txq_index) {
400 struct sfc_txq_info *txq_info = &sas->txq_info[txq_index];
401
402 if (txq_info->state & SFC_TXQ_INITIALIZED)
403 tx_offloads |= txq_info->offloads;
404 }
405
406 if (tx_offloads & (RTE_ETH_TX_OFFLOAD_IPV4_CKSUM |
407 RTE_ETH_TX_OFFLOAD_TCP_CKSUM |
408 RTE_ETH_TX_OFFLOAD_UDP_CKSUM |
409 RTE_ETH_TX_OFFLOAD_OUTER_IPV4_CKSUM))
410 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_DEFAULT;
411 else
412 req_fw_subvariant = EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM;
413
414 rc = efx_nic_get_fw_subvariant(sa->nic, &cur_fw_subvariant);
415 if (rc != 0) {
416 sfc_err(sa, "failed to get FW subvariant: %d", rc);
417 return rc;
418 }
419 sfc_info(sa, "FW subvariant is %u vs required %u",
420 cur_fw_subvariant, req_fw_subvariant);
421
422 if (cur_fw_subvariant == req_fw_subvariant)
423 return 0;
424
425 rc = efx_nic_set_fw_subvariant(sa->nic, req_fw_subvariant);
426 if (rc != 0) {
427 sfc_err(sa, "failed to set FW subvariant %u: %d",
428 req_fw_subvariant, rc);
429 return rc;
430 }
431 sfc_info(sa, "FW subvariant set to %u", req_fw_subvariant);
432
433 return 0;
434 }
435
436 static int
sfc_try_start(struct sfc_adapter * sa)437 sfc_try_start(struct sfc_adapter *sa)
438 {
439 const efx_nic_cfg_t *encp;
440 int rc;
441
442 sfc_log_init(sa, "entry");
443
444 SFC_ASSERT(sfc_adapter_is_locked(sa));
445 SFC_ASSERT(sa->state == SFC_ETHDEV_STARTING);
446
447 sfc_log_init(sa, "set FW subvariant");
448 rc = sfc_set_fw_subvariant(sa);
449 if (rc != 0)
450 goto fail_set_fw_subvariant;
451
452 sfc_log_init(sa, "set resource limits");
453 rc = sfc_set_drv_limits(sa);
454 if (rc != 0)
455 goto fail_set_drv_limits;
456
457 sfc_log_init(sa, "init nic");
458 rc = efx_nic_init(sa->nic);
459 if (rc != 0)
460 goto fail_nic_init;
461
462 sfc_log_init(sa, "reconfigure NIC DMA");
463 rc = efx_nic_dma_reconfigure(sa->nic);
464 if (rc != 0) {
465 sfc_err(sa, "cannot reconfigure NIC DMA: %s", rte_strerror(rc));
466 goto fail_nic_dma_reconfigure;
467 }
468
469 encp = efx_nic_cfg_get(sa->nic);
470
471 /*
472 * Refresh (since it may change on NIC reset/restart) a copy of
473 * supported tunnel encapsulations in shared memory to be used
474 * on supported Rx packet type classes get.
475 */
476 sa->priv.shared->tunnel_encaps =
477 encp->enc_tunnel_encapsulations_supported;
478
479 if (encp->enc_tunnel_encapsulations_supported != 0) {
480 sfc_log_init(sa, "apply tunnel config");
481 rc = efx_tunnel_reconfigure(sa->nic);
482 if (rc != 0)
483 goto fail_tunnel_reconfigure;
484 }
485
486 rc = sfc_intr_start(sa);
487 if (rc != 0)
488 goto fail_intr_start;
489
490 rc = sfc_ev_start(sa);
491 if (rc != 0)
492 goto fail_ev_start;
493
494 rc = sfc_tbls_start(sa);
495 if (rc != 0)
496 goto fail_tbls_start;
497
498 rc = sfc_port_start(sa);
499 if (rc != 0)
500 goto fail_port_start;
501
502 rc = sfc_rx_start(sa);
503 if (rc != 0)
504 goto fail_rx_start;
505
506 rc = sfc_tx_start(sa);
507 if (rc != 0)
508 goto fail_tx_start;
509
510 rc = sfc_flow_start(sa);
511 if (rc != 0)
512 goto fail_flows_insert;
513
514 rc = sfc_repr_proxy_start(sa);
515 if (rc != 0)
516 goto fail_repr_proxy_start;
517
518 sfc_log_init(sa, "done");
519 return 0;
520
521 fail_repr_proxy_start:
522 sfc_flow_stop(sa);
523
524 fail_flows_insert:
525 sfc_tx_stop(sa);
526
527 fail_tx_start:
528 sfc_rx_stop(sa);
529
530 fail_rx_start:
531 sfc_port_stop(sa);
532
533 fail_tbls_start:
534 sfc_ev_stop(sa);
535
536 fail_port_start:
537 sfc_tbls_stop(sa);
538
539 fail_ev_start:
540 sfc_intr_stop(sa);
541
542 fail_intr_start:
543 fail_tunnel_reconfigure:
544 fail_nic_dma_reconfigure:
545 efx_nic_fini(sa->nic);
546
547 fail_nic_init:
548 fail_set_drv_limits:
549 fail_set_fw_subvariant:
550 sfc_log_init(sa, "failed %d", rc);
551 return rc;
552 }
553
554 int
sfc_start(struct sfc_adapter * sa)555 sfc_start(struct sfc_adapter *sa)
556 {
557 unsigned int start_tries = 3;
558 int rc;
559
560 sfc_log_init(sa, "entry");
561
562 SFC_ASSERT(sfc_adapter_is_locked(sa));
563
564 switch (sa->state) {
565 case SFC_ETHDEV_CONFIGURED:
566 break;
567 case SFC_ETHDEV_STARTED:
568 sfc_notice(sa, "already started");
569 return 0;
570 default:
571 rc = EINVAL;
572 goto fail_bad_state;
573 }
574
575 sa->state = SFC_ETHDEV_STARTING;
576
577 rc = 0;
578 do {
579 /*
580 * FIXME Try to recreate vSwitch on start retry.
581 * vSwitch is absent after MC reboot like events and
582 * we should recreate it. May be we need proper
583 * indication instead of guessing.
584 */
585 if (rc != 0) {
586 sfc_sriov_vswitch_destroy(sa);
587 rc = sfc_sriov_vswitch_create(sa);
588 if (rc != 0)
589 goto fail_sriov_vswitch_create;
590 }
591 rc = sfc_try_start(sa);
592 } while ((--start_tries > 0) &&
593 (rc == EIO || rc == EAGAIN || rc == ENOENT || rc == EINVAL));
594
595 if (rc != 0)
596 goto fail_try_start;
597
598 sa->state = SFC_ETHDEV_STARTED;
599 sfc_log_init(sa, "done");
600 return 0;
601
602 fail_try_start:
603 fail_sriov_vswitch_create:
604 sa->state = SFC_ETHDEV_CONFIGURED;
605 fail_bad_state:
606 sfc_log_init(sa, "failed %d", rc);
607 return rc;
608 }
609
610 void
sfc_stop(struct sfc_adapter * sa)611 sfc_stop(struct sfc_adapter *sa)
612 {
613 sfc_log_init(sa, "entry");
614
615 SFC_ASSERT(sfc_adapter_is_locked(sa));
616
617 switch (sa->state) {
618 case SFC_ETHDEV_STARTED:
619 break;
620 case SFC_ETHDEV_CONFIGURED:
621 sfc_notice(sa, "already stopped");
622 return;
623 default:
624 sfc_err(sa, "stop in unexpected state %u", sa->state);
625 SFC_ASSERT(B_FALSE);
626 return;
627 }
628
629 sa->state = SFC_ETHDEV_STOPPING;
630
631 sfc_repr_proxy_stop(sa);
632 sfc_flow_stop(sa);
633 sfc_tx_stop(sa);
634 sfc_rx_stop(sa);
635 sfc_port_stop(sa);
636 sfc_tbls_stop(sa);
637 sfc_ev_stop(sa);
638 sfc_intr_stop(sa);
639 efx_nic_fini(sa->nic);
640
641 sa->state = SFC_ETHDEV_CONFIGURED;
642 sfc_log_init(sa, "done");
643 }
644
645 static int
sfc_restart(struct sfc_adapter * sa)646 sfc_restart(struct sfc_adapter *sa)
647 {
648 int rc;
649
650 SFC_ASSERT(sfc_adapter_is_locked(sa));
651
652 if (sa->state != SFC_ETHDEV_STARTED)
653 return EINVAL;
654
655 sfc_stop(sa);
656
657 rc = sfc_start(sa);
658 if (rc != 0)
659 sfc_err(sa, "restart failed");
660
661 return rc;
662 }
663
664 static void
sfc_restart_if_required(void * arg)665 sfc_restart_if_required(void *arg)
666 {
667 struct sfc_adapter *sa = arg;
668
669 /* If restart is scheduled, clear the flag and do it */
670 if (rte_atomic32_cmpset((volatile uint32_t *)&sa->restart_required,
671 1, 0)) {
672 sfc_adapter_lock(sa);
673 if (sa->state == SFC_ETHDEV_STARTED)
674 (void)sfc_restart(sa);
675 sfc_adapter_unlock(sa);
676 }
677 }
678
679 void
sfc_schedule_restart(struct sfc_adapter * sa)680 sfc_schedule_restart(struct sfc_adapter *sa)
681 {
682 int rc;
683
684 /* Schedule restart alarm if it is not scheduled yet */
685 if (!rte_atomic32_test_and_set(&sa->restart_required))
686 return;
687
688 rc = rte_eal_alarm_set(1, sfc_restart_if_required, sa);
689 if (rc == -ENOTSUP)
690 sfc_warn(sa, "alarms are not supported, restart is pending");
691 else if (rc != 0)
692 sfc_err(sa, "cannot arm restart alarm (rc=%d)", rc);
693 else
694 sfc_notice(sa, "restart scheduled");
695 }
696
697 int
sfc_configure(struct sfc_adapter * sa)698 sfc_configure(struct sfc_adapter *sa)
699 {
700 int rc;
701
702 sfc_log_init(sa, "entry");
703
704 SFC_ASSERT(sfc_adapter_is_locked(sa));
705
706 SFC_ASSERT(sa->state == SFC_ETHDEV_INITIALIZED ||
707 sa->state == SFC_ETHDEV_CONFIGURED);
708 sa->state = SFC_ETHDEV_CONFIGURING;
709
710 rc = sfc_check_conf(sa);
711 if (rc != 0)
712 goto fail_check_conf;
713
714 rc = sfc_intr_configure(sa);
715 if (rc != 0)
716 goto fail_intr_configure;
717
718 rc = sfc_port_configure(sa);
719 if (rc != 0)
720 goto fail_port_configure;
721
722 rc = sfc_rx_configure(sa);
723 if (rc != 0)
724 goto fail_rx_configure;
725
726 rc = sfc_tx_configure(sa);
727 if (rc != 0)
728 goto fail_tx_configure;
729
730 rc = sfc_sw_xstats_configure(sa);
731 if (rc != 0)
732 goto fail_sw_xstats_configure;
733
734 sa->state = SFC_ETHDEV_CONFIGURED;
735 sfc_log_init(sa, "done");
736 return 0;
737
738 fail_sw_xstats_configure:
739 sfc_tx_close(sa);
740
741 fail_tx_configure:
742 sfc_rx_close(sa);
743
744 fail_rx_configure:
745 sfc_port_close(sa);
746
747 fail_port_configure:
748 sfc_intr_close(sa);
749
750 fail_intr_configure:
751 fail_check_conf:
752 sa->state = SFC_ETHDEV_INITIALIZED;
753 sfc_log_init(sa, "failed %d", rc);
754 return rc;
755 }
756
757 void
sfc_close(struct sfc_adapter * sa)758 sfc_close(struct sfc_adapter *sa)
759 {
760 sfc_log_init(sa, "entry");
761
762 SFC_ASSERT(sfc_adapter_is_locked(sa));
763
764 SFC_ASSERT(sa->state == SFC_ETHDEV_CONFIGURED);
765 sa->state = SFC_ETHDEV_CLOSING;
766
767 sfc_sw_xstats_close(sa);
768 sfc_tx_close(sa);
769 sfc_rx_close(sa);
770 sfc_port_close(sa);
771 sfc_intr_close(sa);
772
773 sa->state = SFC_ETHDEV_INITIALIZED;
774 sfc_log_init(sa, "done");
775 }
776
777 static int
sfc_mem_bar_init(struct sfc_adapter * sa,const efx_bar_region_t * mem_ebrp)778 sfc_mem_bar_init(struct sfc_adapter *sa, const efx_bar_region_t *mem_ebrp)
779 {
780 struct rte_eth_dev *eth_dev = sa->eth_dev;
781 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
782 efsys_bar_t *ebp = &sa->mem_bar;
783 struct rte_mem_resource *res =
784 &pci_dev->mem_resource[mem_ebrp->ebr_index];
785
786 SFC_BAR_LOCK_INIT(ebp, eth_dev->data->name);
787 ebp->esb_rid = mem_ebrp->ebr_index;
788 ebp->esb_dev = pci_dev;
789 ebp->esb_base = res->addr;
790
791 sa->fcw_offset = mem_ebrp->ebr_offset;
792
793 return 0;
794 }
795
796 static void
sfc_mem_bar_fini(struct sfc_adapter * sa)797 sfc_mem_bar_fini(struct sfc_adapter *sa)
798 {
799 efsys_bar_t *ebp = &sa->mem_bar;
800
801 SFC_BAR_LOCK_DESTROY(ebp);
802 memset(ebp, 0, sizeof(*ebp));
803 }
804
805 /*
806 * A fixed RSS key which has a property of being symmetric
807 * (symmetrical flows are distributed to the same CPU)
808 * and also known to give a uniform distribution
809 * (a good distribution of traffic between different CPUs)
810 */
811 static const uint8_t default_rss_key[EFX_RSS_KEY_SIZE] = {
812 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
813 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
814 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
815 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
816 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a, 0x6d, 0x5a,
817 };
818
819 static int
sfc_rss_attach(struct sfc_adapter * sa)820 sfc_rss_attach(struct sfc_adapter *sa)
821 {
822 struct sfc_rss *rss = &sfc_sa2shared(sa)->rss;
823 int rc;
824
825 rc = efx_intr_init(sa->nic, sa->intr.type, NULL);
826 if (rc != 0)
827 goto fail_intr_init;
828
829 rc = efx_ev_init(sa->nic);
830 if (rc != 0)
831 goto fail_ev_init;
832
833 rc = efx_rx_init(sa->nic);
834 if (rc != 0)
835 goto fail_rx_init;
836
837 rc = efx_rx_scale_default_support_get(sa->nic, &rss->context_type);
838 if (rc != 0)
839 goto fail_scale_support_get;
840
841 rc = efx_rx_hash_default_support_get(sa->nic, &rss->hash_support);
842 if (rc != 0)
843 goto fail_hash_support_get;
844
845 rc = sfc_rx_hash_init(sa);
846 if (rc != 0)
847 goto fail_rx_hash_init;
848
849 efx_rx_fini(sa->nic);
850 efx_ev_fini(sa->nic);
851 efx_intr_fini(sa->nic);
852
853 rte_memcpy(rss->key, default_rss_key, sizeof(rss->key));
854 memset(&rss->dummy_ctx, 0, sizeof(rss->dummy_ctx));
855 rss->dummy_ctx.conf.qid_span = 1;
856 rss->dummy_ctx.dummy = true;
857
858 return 0;
859
860 fail_rx_hash_init:
861 fail_hash_support_get:
862 fail_scale_support_get:
863 efx_rx_fini(sa->nic);
864
865 fail_rx_init:
866 efx_ev_fini(sa->nic);
867
868 fail_ev_init:
869 efx_intr_fini(sa->nic);
870
871 fail_intr_init:
872 return rc;
873 }
874
875 static void
sfc_rss_detach(struct sfc_adapter * sa)876 sfc_rss_detach(struct sfc_adapter *sa)
877 {
878 sfc_rx_hash_fini(sa);
879 }
880
881 int
sfc_attach(struct sfc_adapter * sa)882 sfc_attach(struct sfc_adapter *sa)
883 {
884 const efx_nic_cfg_t *encp;
885 efx_nic_t *enp = sa->nic;
886 int rc;
887
888 sfc_log_init(sa, "entry");
889
890 SFC_ASSERT(sfc_adapter_is_locked(sa));
891
892 efx_mcdi_new_epoch(enp);
893
894 sfc_log_init(sa, "reset nic");
895 rc = efx_nic_reset(enp);
896 if (rc != 0)
897 goto fail_nic_reset;
898
899 rc = sfc_sriov_attach(sa);
900 if (rc != 0)
901 goto fail_sriov_attach;
902
903 /*
904 * Probed NIC is sufficient for tunnel init.
905 * Initialize tunnel support to be able to use libefx
906 * efx_tunnel_config_udp_{add,remove}() in any state and
907 * efx_tunnel_reconfigure() on start up.
908 */
909 rc = efx_tunnel_init(enp);
910 if (rc != 0)
911 goto fail_tunnel_init;
912
913 encp = efx_nic_cfg_get(sa->nic);
914
915 /*
916 * Make a copy of supported tunnel encapsulations in shared
917 * memory to be used on supported Rx packet type classes get.
918 */
919 sa->priv.shared->tunnel_encaps =
920 encp->enc_tunnel_encapsulations_supported;
921
922 if (sfc_dp_tx_offload_capa(sa->priv.dp_tx) & RTE_ETH_TX_OFFLOAD_TCP_TSO) {
923 sa->tso = encp->enc_fw_assisted_tso_v2_enabled ||
924 encp->enc_tso_v3_enabled;
925 if (!sa->tso)
926 sfc_info(sa, "TSO support isn't available on this adapter");
927 }
928
929 if (sa->tso &&
930 (sfc_dp_tx_offload_capa(sa->priv.dp_tx) &
931 (RTE_ETH_TX_OFFLOAD_VXLAN_TNL_TSO |
932 RTE_ETH_TX_OFFLOAD_GENEVE_TNL_TSO)) != 0) {
933 sa->tso_encap = encp->enc_fw_assisted_tso_v2_encap_enabled ||
934 encp->enc_tso_v3_enabled;
935 if (!sa->tso_encap)
936 sfc_info(sa, "Encapsulated TSO support isn't available on this adapter");
937 }
938
939 sfc_log_init(sa, "estimate resource limits");
940 rc = sfc_estimate_resource_limits(sa);
941 if (rc != 0)
942 goto fail_estimate_rsrc_limits;
943
944 sa->evq_max_entries = encp->enc_evq_max_nevs;
945 SFC_ASSERT(rte_is_power_of_2(sa->evq_max_entries));
946
947 sa->evq_min_entries = encp->enc_evq_min_nevs;
948 SFC_ASSERT(rte_is_power_of_2(sa->evq_min_entries));
949
950 sa->rxq_max_entries = encp->enc_rxq_max_ndescs;
951 SFC_ASSERT(rte_is_power_of_2(sa->rxq_max_entries));
952
953 sa->rxq_min_entries = encp->enc_rxq_min_ndescs;
954 SFC_ASSERT(rte_is_power_of_2(sa->rxq_min_entries));
955
956 sa->txq_max_entries = encp->enc_txq_max_ndescs;
957 SFC_ASSERT(rte_is_power_of_2(sa->txq_max_entries));
958
959 sa->txq_min_entries = encp->enc_txq_min_ndescs;
960 SFC_ASSERT(rte_is_power_of_2(sa->txq_min_entries));
961
962 rc = sfc_intr_attach(sa);
963 if (rc != 0)
964 goto fail_intr_attach;
965
966 rc = sfc_ev_attach(sa);
967 if (rc != 0)
968 goto fail_ev_attach;
969
970 rc = sfc_port_attach(sa);
971 if (rc != 0)
972 goto fail_port_attach;
973
974 rc = sfc_rss_attach(sa);
975 if (rc != 0)
976 goto fail_rss_attach;
977
978 sfc_flow_init(sa);
979
980 rc = sfc_flow_rss_attach(sa);
981 if (rc != 0)
982 goto fail_flow_rss_attach;
983
984 rc = sfc_filter_attach(sa);
985 if (rc != 0)
986 goto fail_filter_attach;
987
988 rc = sfc_mae_counter_rxq_attach(sa);
989 if (rc != 0)
990 goto fail_mae_counter_rxq_attach;
991
992 rc = sfc_mae_attach(sa);
993 if (rc != 0)
994 goto fail_mae_attach;
995
996 rc = sfc_tbls_attach(sa);
997 if (rc != 0)
998 goto fail_tables_attach;
999
1000 rc = sfc_mae_switchdev_init(sa);
1001 if (rc != 0)
1002 goto fail_mae_switchdev_init;
1003
1004 rc = sfc_repr_proxy_attach(sa);
1005 if (rc != 0)
1006 goto fail_repr_proxy_attach;
1007
1008 sfc_log_init(sa, "fini nic");
1009 efx_nic_fini(enp);
1010
1011 rc = sfc_sw_xstats_init(sa);
1012 if (rc != 0)
1013 goto fail_sw_xstats_init;
1014
1015 /*
1016 * Create vSwitch to be able to use VFs when PF is not started yet
1017 * as DPDK port. VFs should be able to talk to each other even
1018 * if PF is down.
1019 */
1020 rc = sfc_sriov_vswitch_create(sa);
1021 if (rc != 0)
1022 goto fail_sriov_vswitch_create;
1023
1024 sa->state = SFC_ETHDEV_INITIALIZED;
1025
1026 sfc_log_init(sa, "done");
1027 return 0;
1028
1029 fail_sriov_vswitch_create:
1030 sfc_sw_xstats_close(sa);
1031
1032 fail_sw_xstats_init:
1033 sfc_repr_proxy_detach(sa);
1034
1035 fail_repr_proxy_attach:
1036 sfc_mae_switchdev_fini(sa);
1037
1038 fail_mae_switchdev_init:
1039 sfc_tbls_detach(sa);
1040
1041 fail_tables_attach:
1042 sfc_mae_detach(sa);
1043
1044 fail_mae_attach:
1045 sfc_mae_counter_rxq_detach(sa);
1046
1047 fail_mae_counter_rxq_attach:
1048 sfc_filter_detach(sa);
1049
1050 fail_filter_attach:
1051 sfc_flow_rss_detach(sa);
1052
1053 fail_flow_rss_attach:
1054 sfc_flow_fini(sa);
1055 sfc_rss_detach(sa);
1056
1057 fail_rss_attach:
1058 sfc_port_detach(sa);
1059
1060 fail_port_attach:
1061 sfc_ev_detach(sa);
1062
1063 fail_ev_attach:
1064 sfc_intr_detach(sa);
1065
1066 fail_intr_attach:
1067 efx_nic_fini(sa->nic);
1068
1069 fail_estimate_rsrc_limits:
1070 fail_tunnel_init:
1071 efx_tunnel_fini(sa->nic);
1072 sfc_sriov_detach(sa);
1073
1074 fail_sriov_attach:
1075 fail_nic_reset:
1076
1077 sfc_log_init(sa, "failed %d", rc);
1078 return rc;
1079 }
1080
1081 void
sfc_pre_detach(struct sfc_adapter * sa)1082 sfc_pre_detach(struct sfc_adapter *sa)
1083 {
1084 sfc_log_init(sa, "entry");
1085
1086 SFC_ASSERT(!sfc_adapter_is_locked(sa));
1087
1088 sfc_repr_proxy_pre_detach(sa);
1089
1090 sfc_log_init(sa, "done");
1091 }
1092
1093 void
sfc_detach(struct sfc_adapter * sa)1094 sfc_detach(struct sfc_adapter *sa)
1095 {
1096 sfc_log_init(sa, "entry");
1097
1098 SFC_ASSERT(sfc_adapter_is_locked(sa));
1099
1100 sfc_sriov_vswitch_destroy(sa);
1101
1102 sfc_repr_proxy_detach(sa);
1103 sfc_mae_switchdev_fini(sa);
1104 sfc_tbls_detach(sa);
1105 sfc_mae_detach(sa);
1106 sfc_mae_counter_rxq_detach(sa);
1107 sfc_filter_detach(sa);
1108 sfc_flow_rss_detach(sa);
1109 sfc_flow_fini(sa);
1110 sfc_rss_detach(sa);
1111 sfc_port_detach(sa);
1112 sfc_ev_detach(sa);
1113 sfc_intr_detach(sa);
1114 efx_tunnel_fini(sa->nic);
1115 sfc_sriov_detach(sa);
1116
1117 sa->state = SFC_ETHDEV_UNINITIALIZED;
1118 }
1119
1120 static int
sfc_kvarg_fv_variant_handler(__rte_unused const char * key,const char * value_str,void * opaque)1121 sfc_kvarg_fv_variant_handler(__rte_unused const char *key,
1122 const char *value_str, void *opaque)
1123 {
1124 uint32_t *value = opaque;
1125
1126 if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DONT_CARE) == 0)
1127 *value = EFX_FW_VARIANT_DONT_CARE;
1128 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_FULL_FEATURED) == 0)
1129 *value = EFX_FW_VARIANT_FULL_FEATURED;
1130 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_LOW_LATENCY) == 0)
1131 *value = EFX_FW_VARIANT_LOW_LATENCY;
1132 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_PACKED_STREAM) == 0)
1133 *value = EFX_FW_VARIANT_PACKED_STREAM;
1134 else if (strcasecmp(value_str, SFC_KVARG_FW_VARIANT_DPDK) == 0)
1135 *value = EFX_FW_VARIANT_DPDK;
1136 else
1137 return -EINVAL;
1138
1139 return 0;
1140 }
1141
1142 static int
sfc_get_fw_variant(struct sfc_adapter * sa,efx_fw_variant_t * efv)1143 sfc_get_fw_variant(struct sfc_adapter *sa, efx_fw_variant_t *efv)
1144 {
1145 efx_nic_fw_info_t enfi;
1146 int rc;
1147
1148 rc = efx_nic_get_fw_version(sa->nic, &enfi);
1149 if (rc != 0)
1150 return rc;
1151 else if (!enfi.enfi_dpcpu_fw_ids_valid)
1152 return ENOTSUP;
1153
1154 /*
1155 * Firmware variant can be uniquely identified by the RxDPCPU
1156 * firmware id
1157 */
1158 switch (enfi.enfi_rx_dpcpu_fw_id) {
1159 case EFX_RXDP_FULL_FEATURED_FW_ID:
1160 *efv = EFX_FW_VARIANT_FULL_FEATURED;
1161 break;
1162
1163 case EFX_RXDP_LOW_LATENCY_FW_ID:
1164 *efv = EFX_FW_VARIANT_LOW_LATENCY;
1165 break;
1166
1167 case EFX_RXDP_PACKED_STREAM_FW_ID:
1168 *efv = EFX_FW_VARIANT_PACKED_STREAM;
1169 break;
1170
1171 case EFX_RXDP_DPDK_FW_ID:
1172 *efv = EFX_FW_VARIANT_DPDK;
1173 break;
1174
1175 default:
1176 /*
1177 * Other firmware variants are not considered, since they are
1178 * not supported in the device parameters
1179 */
1180 *efv = EFX_FW_VARIANT_DONT_CARE;
1181 break;
1182 }
1183
1184 return 0;
1185 }
1186
1187 static const char *
sfc_fw_variant2str(efx_fw_variant_t efv)1188 sfc_fw_variant2str(efx_fw_variant_t efv)
1189 {
1190 switch (efv) {
1191 case EFX_RXDP_FULL_FEATURED_FW_ID:
1192 return SFC_KVARG_FW_VARIANT_FULL_FEATURED;
1193 case EFX_RXDP_LOW_LATENCY_FW_ID:
1194 return SFC_KVARG_FW_VARIANT_LOW_LATENCY;
1195 case EFX_RXDP_PACKED_STREAM_FW_ID:
1196 return SFC_KVARG_FW_VARIANT_PACKED_STREAM;
1197 case EFX_RXDP_DPDK_FW_ID:
1198 return SFC_KVARG_FW_VARIANT_DPDK;
1199 default:
1200 return "unknown";
1201 }
1202 }
1203
1204 static int
sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter * sa)1205 sfc_kvarg_rxd_wait_timeout_ns(struct sfc_adapter *sa)
1206 {
1207 int rc;
1208 long value;
1209
1210 value = SFC_RXD_WAIT_TIMEOUT_NS_DEF;
1211
1212 rc = sfc_kvargs_process(sa, SFC_KVARG_RXD_WAIT_TIMEOUT_NS,
1213 sfc_kvarg_long_handler, &value);
1214 if (rc != 0)
1215 return rc;
1216
1217 if (value < 0 ||
1218 (unsigned long)value > EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX) {
1219 sfc_err(sa, "wrong '" SFC_KVARG_RXD_WAIT_TIMEOUT_NS "' "
1220 "was set (%ld);", value);
1221 sfc_err(sa, "it must not be less than 0 or greater than %u",
1222 EFX_RXQ_ES_SUPER_BUFFER_HOL_BLOCK_MAX);
1223 return EINVAL;
1224 }
1225
1226 sa->rxd_wait_timeout_ns = value;
1227 return 0;
1228 }
1229
1230 static int
sfc_nic_probe(struct sfc_adapter * sa)1231 sfc_nic_probe(struct sfc_adapter *sa)
1232 {
1233 efx_nic_t *enp = sa->nic;
1234 efx_fw_variant_t preferred_efv;
1235 efx_fw_variant_t efv;
1236 int rc;
1237
1238 preferred_efv = EFX_FW_VARIANT_DONT_CARE;
1239 rc = sfc_kvargs_process(sa, SFC_KVARG_FW_VARIANT,
1240 sfc_kvarg_fv_variant_handler,
1241 &preferred_efv);
1242 if (rc != 0) {
1243 sfc_err(sa, "invalid %s parameter value", SFC_KVARG_FW_VARIANT);
1244 return rc;
1245 }
1246
1247 rc = sfc_kvarg_rxd_wait_timeout_ns(sa);
1248 if (rc != 0)
1249 return rc;
1250
1251 rc = efx_nic_probe(enp, preferred_efv);
1252 if (rc == EACCES) {
1253 /* Unprivileged functions cannot set FW variant */
1254 rc = efx_nic_probe(enp, EFX_FW_VARIANT_DONT_CARE);
1255 }
1256 if (rc != 0)
1257 return rc;
1258
1259 rc = sfc_get_fw_variant(sa, &efv);
1260 if (rc == ENOTSUP) {
1261 sfc_warn(sa, "FW variant can not be obtained");
1262 return 0;
1263 }
1264 if (rc != 0)
1265 return rc;
1266
1267 /* Check that firmware variant was changed to the requested one */
1268 if (preferred_efv != EFX_FW_VARIANT_DONT_CARE && preferred_efv != efv) {
1269 sfc_warn(sa, "FW variant has not changed to the requested %s",
1270 sfc_fw_variant2str(preferred_efv));
1271 }
1272
1273 sfc_notice(sa, "running FW variant is %s", sfc_fw_variant2str(efv));
1274
1275 return 0;
1276 }
1277
1278 int
sfc_probe(struct sfc_adapter * sa)1279 sfc_probe(struct sfc_adapter *sa)
1280 {
1281 efx_bar_region_t mem_ebrp;
1282 struct rte_eth_dev *eth_dev = sa->eth_dev;
1283 struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev);
1284 efx_nic_t *enp;
1285 int rc;
1286
1287 sfc_log_init(sa, "entry");
1288
1289 SFC_ASSERT(sfc_adapter_is_locked(sa));
1290
1291 sa->socket_id = rte_socket_id();
1292 rte_atomic32_init(&sa->restart_required);
1293
1294 sfc_log_init(sa, "get family");
1295 rc = sfc_efx_family(pci_dev, &mem_ebrp, &sa->family);
1296
1297 if (rc != 0)
1298 goto fail_family;
1299 sfc_log_init(sa,
1300 "family is %u, membar is %u, function control window offset is %lu",
1301 sa->family, mem_ebrp.ebr_index, mem_ebrp.ebr_offset);
1302
1303 sfc_log_init(sa, "init mem bar");
1304 rc = sfc_mem_bar_init(sa, &mem_ebrp);
1305 if (rc != 0)
1306 goto fail_mem_bar_init;
1307
1308 sfc_log_init(sa, "create nic");
1309 rte_spinlock_init(&sa->nic_lock);
1310 rc = efx_nic_create(sa->family, (efsys_identifier_t *)sa,
1311 &sa->mem_bar, mem_ebrp.ebr_offset,
1312 &sa->nic_lock, &enp);
1313 if (rc != 0)
1314 goto fail_nic_create;
1315 sa->nic = enp;
1316
1317 rc = sfc_mcdi_init(sa);
1318 if (rc != 0)
1319 goto fail_mcdi_init;
1320
1321 sfc_log_init(sa, "probe nic");
1322 rc = sfc_nic_probe(sa);
1323 if (rc != 0)
1324 goto fail_nic_probe;
1325
1326 sfc_log_init(sa, "done");
1327 return 0;
1328
1329 fail_nic_probe:
1330 sfc_mcdi_fini(sa);
1331
1332 fail_mcdi_init:
1333 sfc_log_init(sa, "destroy nic");
1334 sa->nic = NULL;
1335 efx_nic_destroy(enp);
1336
1337 fail_nic_create:
1338 sfc_mem_bar_fini(sa);
1339
1340 fail_mem_bar_init:
1341 fail_family:
1342 sfc_log_init(sa, "failed %d", rc);
1343 return rc;
1344 }
1345
1346 void
sfc_unprobe(struct sfc_adapter * sa)1347 sfc_unprobe(struct sfc_adapter *sa)
1348 {
1349 efx_nic_t *enp = sa->nic;
1350
1351 sfc_log_init(sa, "entry");
1352
1353 SFC_ASSERT(sfc_adapter_is_locked(sa));
1354
1355 sfc_log_init(sa, "unprobe nic");
1356 efx_nic_unprobe(enp);
1357
1358 sfc_mcdi_fini(sa);
1359
1360 /*
1361 * Make sure there is no pending alarm to restart since we are
1362 * going to free device private which is passed as the callback
1363 * opaque data. A new alarm cannot be scheduled since MCDI is
1364 * shut down.
1365 */
1366 rte_eal_alarm_cancel(sfc_restart_if_required, sa);
1367
1368 sfc_mae_clear_switch_port(sa->mae.switch_domain_id,
1369 sa->mae.switch_port_id);
1370
1371 sfc_log_init(sa, "destroy nic");
1372 sa->nic = NULL;
1373 efx_nic_destroy(enp);
1374
1375 sfc_mem_bar_fini(sa);
1376
1377 sfc_flow_fini(sa);
1378 sa->state = SFC_ETHDEV_UNINITIALIZED;
1379 }
1380
1381 uint32_t
sfc_register_logtype(const struct rte_pci_addr * pci_addr,const char * lt_prefix_str,uint32_t ll_default)1382 sfc_register_logtype(const struct rte_pci_addr *pci_addr,
1383 const char *lt_prefix_str, uint32_t ll_default)
1384 {
1385 size_t lt_prefix_str_size = strlen(lt_prefix_str);
1386 size_t lt_str_size_max;
1387 char *lt_str = NULL;
1388 int ret;
1389
1390 if (SIZE_MAX - PCI_PRI_STR_SIZE - 1 > lt_prefix_str_size) {
1391 ++lt_prefix_str_size; /* Reserve space for prefix separator */
1392 lt_str_size_max = lt_prefix_str_size + PCI_PRI_STR_SIZE + 1;
1393 } else {
1394 return sfc_logtype_driver;
1395 }
1396
1397 lt_str = rte_zmalloc("logtype_str", lt_str_size_max, 0);
1398 if (lt_str == NULL)
1399 return sfc_logtype_driver;
1400
1401 strncpy(lt_str, lt_prefix_str, lt_prefix_str_size);
1402 lt_str[lt_prefix_str_size - 1] = '.';
1403 rte_pci_device_name(pci_addr, lt_str + lt_prefix_str_size,
1404 lt_str_size_max - lt_prefix_str_size);
1405 lt_str[lt_str_size_max - 1] = '\0';
1406
1407 ret = rte_log_register_type_and_pick_level(lt_str, ll_default);
1408 rte_free(lt_str);
1409
1410 if (ret < 0)
1411 return sfc_logtype_driver;
1412
1413 return ret;
1414 }
1415
1416 struct sfc_hw_switch_id {
1417 char board_sn[RTE_SIZEOF_FIELD(efx_nic_board_info_t, enbi_serial)];
1418 };
1419
1420 int
sfc_hw_switch_id_init(struct sfc_adapter * sa,struct sfc_hw_switch_id ** idp)1421 sfc_hw_switch_id_init(struct sfc_adapter *sa,
1422 struct sfc_hw_switch_id **idp)
1423 {
1424 efx_nic_board_info_t board_info;
1425 struct sfc_hw_switch_id *id;
1426 int rc;
1427
1428 if (idp == NULL)
1429 return EINVAL;
1430
1431 id = rte_zmalloc("sfc_hw_switch_id", sizeof(*id), 0);
1432 if (id == NULL)
1433 return ENOMEM;
1434
1435 rc = efx_nic_get_board_info(sa->nic, &board_info);
1436 if (rc != 0)
1437 return rc;
1438
1439 memcpy(id->board_sn, board_info.enbi_serial, sizeof(id->board_sn));
1440
1441 *idp = id;
1442
1443 return 0;
1444 }
1445
1446 void
sfc_hw_switch_id_fini(__rte_unused struct sfc_adapter * sa,struct sfc_hw_switch_id * id)1447 sfc_hw_switch_id_fini(__rte_unused struct sfc_adapter *sa,
1448 struct sfc_hw_switch_id *id)
1449 {
1450 rte_free(id);
1451 }
1452
1453 bool
sfc_hw_switch_ids_equal(const struct sfc_hw_switch_id * left,const struct sfc_hw_switch_id * right)1454 sfc_hw_switch_ids_equal(const struct sfc_hw_switch_id *left,
1455 const struct sfc_hw_switch_id *right)
1456 {
1457 return strncmp(left->board_sn, right->board_sn,
1458 sizeof(left->board_sn)) == 0;
1459 }
1460