1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright(c) 2019-2021 Xilinx, Inc.
4 * Copyright(c) 2007-2019 Solarflare Communications Inc.
5 */
6
7 #include "efx.h"
8 #include "efx_impl.h"
9
10
11 __checkReturn efx_rc_t
efx_family(__in uint16_t venid,__in uint16_t devid,__out efx_family_t * efp,__out unsigned int * membarp)12 efx_family(
13 __in uint16_t venid,
14 __in uint16_t devid,
15 __out efx_family_t *efp,
16 __out unsigned int *membarp)
17 {
18 if (venid == EFX_PCI_VENID_SFC) {
19 switch (devid) {
20 #if EFSYS_OPT_SIENA
21 case EFX_PCI_DEVID_SIENA_F1_UNINIT:
22 /*
23 * Hardware default for PF0 of uninitialised Siena.
24 * manftest must be able to cope with this device id.
25 */
26 case EFX_PCI_DEVID_BETHPAGE:
27 case EFX_PCI_DEVID_SIENA:
28 *efp = EFX_FAMILY_SIENA;
29 *membarp = EFX_MEM_BAR_SIENA;
30 return (0);
31 #endif /* EFSYS_OPT_SIENA */
32
33 #if EFSYS_OPT_HUNTINGTON
34 case EFX_PCI_DEVID_HUNTINGTON_PF_UNINIT:
35 /*
36 * Hardware default for PF0 of uninitialised Huntington.
37 * manftest must be able to cope with this device id.
38 */
39 case EFX_PCI_DEVID_FARMINGDALE:
40 case EFX_PCI_DEVID_GREENPORT:
41 *efp = EFX_FAMILY_HUNTINGTON;
42 *membarp = EFX_MEM_BAR_HUNTINGTON_PF;
43 return (0);
44
45 case EFX_PCI_DEVID_FARMINGDALE_VF:
46 case EFX_PCI_DEVID_GREENPORT_VF:
47 *efp = EFX_FAMILY_HUNTINGTON;
48 *membarp = EFX_MEM_BAR_HUNTINGTON_VF;
49 return (0);
50 #endif /* EFSYS_OPT_HUNTINGTON */
51
52 #if EFSYS_OPT_MEDFORD
53 case EFX_PCI_DEVID_MEDFORD_PF_UNINIT:
54 /*
55 * Hardware default for PF0 of uninitialised Medford.
56 * manftest must be able to cope with this device id.
57 */
58 case EFX_PCI_DEVID_MEDFORD:
59 *efp = EFX_FAMILY_MEDFORD;
60 *membarp = EFX_MEM_BAR_MEDFORD_PF;
61 return (0);
62
63 case EFX_PCI_DEVID_MEDFORD_VF:
64 *efp = EFX_FAMILY_MEDFORD;
65 *membarp = EFX_MEM_BAR_MEDFORD_VF;
66 return (0);
67 #endif /* EFSYS_OPT_MEDFORD */
68
69 #if EFSYS_OPT_MEDFORD2
70 case EFX_PCI_DEVID_MEDFORD2_PF_UNINIT:
71 /*
72 * Hardware default for PF0 of uninitialised Medford2.
73 * manftest must be able to cope with this device id.
74 */
75 case EFX_PCI_DEVID_MEDFORD2:
76 case EFX_PCI_DEVID_MEDFORD2_VF:
77 *efp = EFX_FAMILY_MEDFORD2;
78 *membarp = EFX_MEM_BAR_MEDFORD2;
79 return (0);
80 #endif /* EFSYS_OPT_MEDFORD2 */
81
82 case EFX_PCI_DEVID_FALCON: /* Obsolete, not supported */
83 default:
84 break;
85 }
86 }
87
88 if (venid == EFX_PCI_VENID_XILINX) {
89 switch (devid) {
90 #if EFSYS_OPT_RIVERHEAD
91 case EFX_PCI_DEVID_RIVERHEAD:
92 case EFX_PCI_DEVID_RIVERHEAD_VF:
93 *efp = EFX_FAMILY_RIVERHEAD;
94 *membarp = EFX_MEM_BAR_RIVERHEAD;
95 return (0);
96 #endif /* EFSYS_OPT_RIVERHEAD */
97 default:
98 break;
99 }
100 }
101
102 *efp = EFX_FAMILY_INVALID;
103 return (ENOTSUP);
104 }
105
106 #if EFSYS_OPT_PCI
107
108 __checkReturn efx_rc_t
efx_family_probe_bar(__in uint16_t venid,__in uint16_t devid,__in efsys_pci_config_t * espcp,__in const efx_pci_ops_t * epop,__out efx_family_t * efp,__out efx_bar_region_t * ebrp)109 efx_family_probe_bar(
110 __in uint16_t venid,
111 __in uint16_t devid,
112 __in efsys_pci_config_t *espcp,
113 __in const efx_pci_ops_t *epop,
114 __out efx_family_t *efp,
115 __out efx_bar_region_t *ebrp)
116 {
117 efx_rc_t rc;
118 unsigned int membar;
119
120 if (venid == EFX_PCI_VENID_XILINX) {
121 switch (devid) {
122 #if EFSYS_OPT_RIVERHEAD
123 case EFX_PCI_DEVID_RIVERHEAD:
124 case EFX_PCI_DEVID_RIVERHEAD_VF:
125 rc = rhead_pci_nic_membar_lookup(espcp, epop, ebrp);
126 if (rc == 0)
127 *efp = EFX_FAMILY_RIVERHEAD;
128
129 return (rc);
130 #endif /* EFSYS_OPT_RIVERHEAD */
131 default:
132 break;
133 }
134 }
135
136 rc = efx_family(venid, devid, efp, &membar);
137 if (rc == 0) {
138 ebrp->ebr_type = EFX_BAR_TYPE_MEM;
139 ebrp->ebr_index = membar;
140 ebrp->ebr_offset = 0;
141 ebrp->ebr_length = 0;
142 }
143
144 return (rc);
145 }
146
147 #endif /* EFSYS_OPT_PCI */
148
149 #if EFSYS_OPT_SIENA
150
151 static const efx_nic_ops_t __efx_nic_siena_ops = {
152 siena_nic_probe, /* eno_probe */
153 NULL, /* eno_board_cfg */
154 NULL, /* eno_set_drv_limits */
155 siena_nic_reset, /* eno_reset */
156 siena_nic_init, /* eno_init */
157 NULL, /* eno_get_vi_pool */
158 NULL, /* eno_get_bar_region */
159 NULL, /* eno_hw_unavailable */
160 NULL, /* eno_set_hw_unavailable */
161 #if EFSYS_OPT_DIAG
162 siena_nic_register_test, /* eno_register_test */
163 #endif /* EFSYS_OPT_DIAG */
164 siena_nic_fini, /* eno_fini */
165 siena_nic_unprobe, /* eno_unprobe */
166 };
167
168 #endif /* EFSYS_OPT_SIENA */
169
170 #if EFSYS_OPT_HUNTINGTON
171
172 static const efx_nic_ops_t __efx_nic_hunt_ops = {
173 ef10_nic_probe, /* eno_probe */
174 hunt_board_cfg, /* eno_board_cfg */
175 ef10_nic_set_drv_limits, /* eno_set_drv_limits */
176 ef10_nic_reset, /* eno_reset */
177 ef10_nic_init, /* eno_init */
178 ef10_nic_get_vi_pool, /* eno_get_vi_pool */
179 ef10_nic_get_bar_region, /* eno_get_bar_region */
180 ef10_nic_hw_unavailable, /* eno_hw_unavailable */
181 ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */
182 #if EFSYS_OPT_DIAG
183 ef10_nic_register_test, /* eno_register_test */
184 #endif /* EFSYS_OPT_DIAG */
185 ef10_nic_fini, /* eno_fini */
186 ef10_nic_unprobe, /* eno_unprobe */
187 };
188
189 #endif /* EFSYS_OPT_HUNTINGTON */
190
191 #if EFSYS_OPT_MEDFORD
192
193 static const efx_nic_ops_t __efx_nic_medford_ops = {
194 ef10_nic_probe, /* eno_probe */
195 medford_board_cfg, /* eno_board_cfg */
196 ef10_nic_set_drv_limits, /* eno_set_drv_limits */
197 ef10_nic_reset, /* eno_reset */
198 ef10_nic_init, /* eno_init */
199 ef10_nic_get_vi_pool, /* eno_get_vi_pool */
200 ef10_nic_get_bar_region, /* eno_get_bar_region */
201 ef10_nic_hw_unavailable, /* eno_hw_unavailable */
202 ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */
203 #if EFSYS_OPT_DIAG
204 ef10_nic_register_test, /* eno_register_test */
205 #endif /* EFSYS_OPT_DIAG */
206 ef10_nic_fini, /* eno_fini */
207 ef10_nic_unprobe, /* eno_unprobe */
208 };
209
210 #endif /* EFSYS_OPT_MEDFORD */
211
212 #if EFSYS_OPT_MEDFORD2
213
214 static const efx_nic_ops_t __efx_nic_medford2_ops = {
215 ef10_nic_probe, /* eno_probe */
216 medford2_board_cfg, /* eno_board_cfg */
217 ef10_nic_set_drv_limits, /* eno_set_drv_limits */
218 ef10_nic_reset, /* eno_reset */
219 ef10_nic_init, /* eno_init */
220 ef10_nic_get_vi_pool, /* eno_get_vi_pool */
221 ef10_nic_get_bar_region, /* eno_get_bar_region */
222 ef10_nic_hw_unavailable, /* eno_hw_unavailable */
223 ef10_nic_set_hw_unavailable, /* eno_set_hw_unavailable */
224 #if EFSYS_OPT_DIAG
225 ef10_nic_register_test, /* eno_register_test */
226 #endif /* EFSYS_OPT_DIAG */
227 ef10_nic_fini, /* eno_fini */
228 ef10_nic_unprobe, /* eno_unprobe */
229 };
230
231 #endif /* EFSYS_OPT_MEDFORD2 */
232
233 #if EFSYS_OPT_RIVERHEAD
234
235 static const efx_nic_ops_t __efx_nic_riverhead_ops = {
236 rhead_nic_probe, /* eno_probe */
237 rhead_board_cfg, /* eno_board_cfg */
238 rhead_nic_set_drv_limits, /* eno_set_drv_limits */
239 rhead_nic_reset, /* eno_reset */
240 rhead_nic_init, /* eno_init */
241 rhead_nic_get_vi_pool, /* eno_get_vi_pool */
242 rhead_nic_get_bar_region, /* eno_get_bar_region */
243 rhead_nic_hw_unavailable, /* eno_hw_unavailable */
244 rhead_nic_set_hw_unavailable, /* eno_set_hw_unavailable */
245 #if EFSYS_OPT_DIAG
246 rhead_nic_register_test, /* eno_register_test */
247 #endif /* EFSYS_OPT_DIAG */
248 rhead_nic_fini, /* eno_fini */
249 rhead_nic_unprobe, /* eno_unprobe */
250 };
251
252 #endif /* EFSYS_OPT_RIVERHEAD */
253
254
255 __checkReturn efx_rc_t
efx_nic_create(__in efx_family_t family,__in efsys_identifier_t * esip,__in efsys_bar_t * esbp,__in uint32_t fcw_offset,__in efsys_lock_t * eslp,__deref_out efx_nic_t ** enpp)256 efx_nic_create(
257 __in efx_family_t family,
258 __in efsys_identifier_t *esip,
259 __in efsys_bar_t *esbp,
260 __in uint32_t fcw_offset,
261 __in efsys_lock_t *eslp,
262 __deref_out efx_nic_t **enpp)
263 {
264 efx_nic_t *enp;
265 efx_rc_t rc;
266
267 EFSYS_ASSERT3U(family, >, EFX_FAMILY_INVALID);
268 EFSYS_ASSERT3U(family, <, EFX_FAMILY_NTYPES);
269
270 /* Allocate a NIC object */
271 EFSYS_KMEM_ALLOC(esip, sizeof (efx_nic_t), enp);
272
273 if (enp == NULL) {
274 rc = ENOMEM;
275 goto fail1;
276 }
277
278 enp->en_magic = EFX_NIC_MAGIC;
279
280 switch (family) {
281 #if EFSYS_OPT_SIENA
282 case EFX_FAMILY_SIENA:
283 enp->en_enop = &__efx_nic_siena_ops;
284 enp->en_features =
285 EFX_FEATURE_IPV6 |
286 EFX_FEATURE_LFSR_HASH_INSERT |
287 EFX_FEATURE_LINK_EVENTS |
288 EFX_FEATURE_PERIODIC_MAC_STATS |
289 EFX_FEATURE_MCDI |
290 EFX_FEATURE_LOOKAHEAD_SPLIT |
291 EFX_FEATURE_MAC_HEADER_FILTERS |
292 EFX_FEATURE_TX_SRC_FILTERS;
293 break;
294 #endif /* EFSYS_OPT_SIENA */
295
296 #if EFSYS_OPT_HUNTINGTON
297 case EFX_FAMILY_HUNTINGTON:
298 enp->en_enop = &__efx_nic_hunt_ops;
299 enp->en_features =
300 EFX_FEATURE_IPV6 |
301 EFX_FEATURE_LINK_EVENTS |
302 EFX_FEATURE_PERIODIC_MAC_STATS |
303 EFX_FEATURE_MCDI |
304 EFX_FEATURE_MAC_HEADER_FILTERS |
305 EFX_FEATURE_MCDI_DMA |
306 EFX_FEATURE_PIO_BUFFERS |
307 EFX_FEATURE_FW_ASSISTED_TSO |
308 EFX_FEATURE_FW_ASSISTED_TSO_V2 |
309 EFX_FEATURE_PACKED_STREAM |
310 EFX_FEATURE_TXQ_CKSUM_OP_DESC;
311 break;
312 #endif /* EFSYS_OPT_HUNTINGTON */
313
314 #if EFSYS_OPT_MEDFORD
315 case EFX_FAMILY_MEDFORD:
316 enp->en_enop = &__efx_nic_medford_ops;
317 /*
318 * FW_ASSISTED_TSO omitted as Medford only supports firmware
319 * assisted TSO version 2, not the v1 scheme used on Huntington.
320 */
321 enp->en_features =
322 EFX_FEATURE_IPV6 |
323 EFX_FEATURE_LINK_EVENTS |
324 EFX_FEATURE_PERIODIC_MAC_STATS |
325 EFX_FEATURE_MCDI |
326 EFX_FEATURE_MAC_HEADER_FILTERS |
327 EFX_FEATURE_MCDI_DMA |
328 EFX_FEATURE_PIO_BUFFERS |
329 EFX_FEATURE_FW_ASSISTED_TSO_V2 |
330 EFX_FEATURE_PACKED_STREAM |
331 EFX_FEATURE_TXQ_CKSUM_OP_DESC;
332 break;
333 #endif /* EFSYS_OPT_MEDFORD */
334
335 #if EFSYS_OPT_MEDFORD2
336 case EFX_FAMILY_MEDFORD2:
337 enp->en_enop = &__efx_nic_medford2_ops;
338 enp->en_features =
339 EFX_FEATURE_IPV6 |
340 EFX_FEATURE_LINK_EVENTS |
341 EFX_FEATURE_PERIODIC_MAC_STATS |
342 EFX_FEATURE_MCDI |
343 EFX_FEATURE_MAC_HEADER_FILTERS |
344 EFX_FEATURE_MCDI_DMA |
345 EFX_FEATURE_PIO_BUFFERS |
346 EFX_FEATURE_FW_ASSISTED_TSO_V2 |
347 EFX_FEATURE_PACKED_STREAM |
348 EFX_FEATURE_TXQ_CKSUM_OP_DESC;
349 break;
350 #endif /* EFSYS_OPT_MEDFORD2 */
351
352 #if EFSYS_OPT_RIVERHEAD
353 case EFX_FAMILY_RIVERHEAD:
354 enp->en_enop = &__efx_nic_riverhead_ops;
355 enp->en_features =
356 EFX_FEATURE_IPV6 |
357 EFX_FEATURE_LINK_EVENTS |
358 EFX_FEATURE_PERIODIC_MAC_STATS |
359 EFX_FEATURE_MCDI |
360 EFX_FEATURE_MAC_HEADER_FILTERS |
361 EFX_FEATURE_MCDI_DMA;
362 enp->en_arch.ef10.ena_fcw_base = fcw_offset;
363 break;
364 #endif /* EFSYS_OPT_RIVERHEAD */
365
366 default:
367 rc = ENOTSUP;
368 goto fail2;
369 }
370
371 if ((family != EFX_FAMILY_RIVERHEAD) && (fcw_offset != 0)) {
372 rc = EINVAL;
373 goto fail3;
374 }
375
376 enp->en_family = family;
377 enp->en_esip = esip;
378 enp->en_esbp = esbp;
379 enp->en_eslp = eslp;
380
381 *enpp = enp;
382
383 return (0);
384
385 fail3:
386 EFSYS_PROBE(fail3);
387 fail2:
388 EFSYS_PROBE(fail2);
389
390 enp->en_magic = 0;
391
392 /* Free the NIC object */
393 EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
394
395 fail1:
396 EFSYS_PROBE1(fail1, efx_rc_t, rc);
397
398 return (rc);
399 }
400
401 __checkReturn efx_rc_t
efx_nic_probe(__in efx_nic_t * enp,__in efx_fw_variant_t efv)402 efx_nic_probe(
403 __in efx_nic_t *enp,
404 __in efx_fw_variant_t efv)
405 {
406 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
407 const efx_nic_ops_t *enop;
408 efx_rc_t rc;
409
410 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
411 #if EFSYS_OPT_MCDI
412 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
413 #endif /* EFSYS_OPT_MCDI */
414 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
415
416 /* Ensure FW variant codes match with MC_CMD_FW codes */
417 EFX_STATIC_ASSERT(EFX_FW_VARIANT_FULL_FEATURED ==
418 MC_CMD_FW_FULL_FEATURED);
419 EFX_STATIC_ASSERT(EFX_FW_VARIANT_LOW_LATENCY ==
420 MC_CMD_FW_LOW_LATENCY);
421 EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM ==
422 MC_CMD_FW_PACKED_STREAM);
423 EFX_STATIC_ASSERT(EFX_FW_VARIANT_HIGH_TX_RATE ==
424 MC_CMD_FW_HIGH_TX_RATE);
425 EFX_STATIC_ASSERT(EFX_FW_VARIANT_PACKED_STREAM_HASH_MODE_1 ==
426 MC_CMD_FW_PACKED_STREAM_HASH_MODE_1);
427 EFX_STATIC_ASSERT(EFX_FW_VARIANT_RULES_ENGINE ==
428 MC_CMD_FW_RULES_ENGINE);
429 EFX_STATIC_ASSERT(EFX_FW_VARIANT_DPDK ==
430 MC_CMD_FW_DPDK);
431 EFX_STATIC_ASSERT(EFX_FW_VARIANT_DONT_CARE ==
432 (int)MC_CMD_FW_DONT_CARE);
433
434 enop = enp->en_enop;
435 enp->efv = efv;
436
437 if ((rc = enop->eno_probe(enp)) != 0)
438 goto fail1;
439
440 encp->enc_features = enp->en_features;
441
442 if ((rc = efx_phy_probe(enp)) != 0)
443 goto fail2;
444
445 enp->en_mod_flags |= EFX_MOD_PROBE;
446
447 return (0);
448
449 fail2:
450 EFSYS_PROBE(fail2);
451
452 enop->eno_unprobe(enp);
453
454 fail1:
455 EFSYS_PROBE1(fail1, efx_rc_t, rc);
456
457 return (rc);
458 }
459
460 __checkReturn efx_rc_t
efx_nic_set_drv_limits(__inout efx_nic_t * enp,__in efx_drv_limits_t * edlp)461 efx_nic_set_drv_limits(
462 __inout efx_nic_t *enp,
463 __in efx_drv_limits_t *edlp)
464 {
465 const efx_nic_ops_t *enop = enp->en_enop;
466 efx_rc_t rc;
467
468 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
469 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
470
471 if (enop->eno_set_drv_limits != NULL) {
472 if ((rc = enop->eno_set_drv_limits(enp, edlp)) != 0)
473 goto fail1;
474 }
475
476 return (0);
477
478 fail1:
479 EFSYS_PROBE1(fail1, efx_rc_t, rc);
480
481 return (rc);
482 }
483
484 __checkReturn efx_rc_t
efx_nic_set_drv_version(__inout efx_nic_t * enp,__in_ecount (length)char const * verp,__in size_t length)485 efx_nic_set_drv_version(
486 __inout efx_nic_t *enp,
487 __in_ecount(length) char const *verp,
488 __in size_t length)
489 {
490 efx_rc_t rc;
491
492 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
493 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_PROBE));
494
495 /*
496 * length is the string content length in bytes.
497 * Accept any content which fits into the version
498 * buffer, excluding the last byte. This is reserved
499 * for an appended NUL terminator.
500 */
501 if (length >= sizeof (enp->en_drv_version)) {
502 rc = E2BIG;
503 goto fail1;
504 }
505
506 (void) memset(enp->en_drv_version, 0,
507 sizeof (enp->en_drv_version));
508 memcpy(enp->en_drv_version, verp, length);
509
510 return (0);
511
512 fail1:
513 EFSYS_PROBE1(fail1, efx_rc_t, rc);
514
515 return (rc);
516 }
517
518
519 __checkReturn efx_rc_t
efx_nic_get_bar_region(__in efx_nic_t * enp,__in efx_nic_region_t region,__out uint32_t * offsetp,__out size_t * sizep)520 efx_nic_get_bar_region(
521 __in efx_nic_t *enp,
522 __in efx_nic_region_t region,
523 __out uint32_t *offsetp,
524 __out size_t *sizep)
525 {
526 const efx_nic_ops_t *enop = enp->en_enop;
527 efx_rc_t rc;
528
529 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
530 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
531 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
532
533 if (enop->eno_get_bar_region == NULL) {
534 rc = ENOTSUP;
535 goto fail1;
536 }
537 if ((rc = (enop->eno_get_bar_region)(enp,
538 region, offsetp, sizep)) != 0) {
539 goto fail2;
540 }
541
542 return (0);
543
544 fail2:
545 EFSYS_PROBE(fail2);
546
547 fail1:
548 EFSYS_PROBE1(fail1, efx_rc_t, rc);
549
550 return (rc);
551 }
552
553
554 __checkReturn efx_rc_t
efx_nic_get_vi_pool(__in efx_nic_t * enp,__out uint32_t * evq_countp,__out uint32_t * rxq_countp,__out uint32_t * txq_countp)555 efx_nic_get_vi_pool(
556 __in efx_nic_t *enp,
557 __out uint32_t *evq_countp,
558 __out uint32_t *rxq_countp,
559 __out uint32_t *txq_countp)
560 {
561 const efx_nic_ops_t *enop = enp->en_enop;
562 efx_nic_cfg_t *encp = &enp->en_nic_cfg;
563 efx_rc_t rc;
564
565 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
566 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
567 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_NIC);
568
569 if (enop->eno_get_vi_pool != NULL) {
570 uint32_t vi_count = 0;
571
572 if ((rc = (enop->eno_get_vi_pool)(enp, &vi_count)) != 0)
573 goto fail1;
574
575 *evq_countp = vi_count;
576 *rxq_countp = vi_count;
577 *txq_countp = vi_count;
578 } else {
579 /* Use NIC limits as default value */
580 *evq_countp = encp->enc_evq_limit;
581 *rxq_countp = encp->enc_rxq_limit;
582 *txq_countp = encp->enc_txq_limit;
583 }
584
585 return (0);
586
587 fail1:
588 EFSYS_PROBE1(fail1, efx_rc_t, rc);
589
590 return (rc);
591 }
592
593
594 __checkReturn efx_rc_t
efx_nic_init(__in efx_nic_t * enp)595 efx_nic_init(
596 __in efx_nic_t *enp)
597 {
598 const efx_nic_ops_t *enop = enp->en_enop;
599 efx_rc_t rc;
600
601 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
602 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
603
604 if (enp->en_mod_flags & EFX_MOD_NIC) {
605 rc = EINVAL;
606 goto fail1;
607 }
608
609 if ((rc = enop->eno_init(enp)) != 0)
610 goto fail2;
611
612 enp->en_mod_flags |= EFX_MOD_NIC;
613
614 return (0);
615
616 fail2:
617 EFSYS_PROBE(fail2);
618 fail1:
619 EFSYS_PROBE1(fail1, efx_rc_t, rc);
620
621 return (rc);
622 }
623
624 void
efx_nic_fini(__in efx_nic_t * enp)625 efx_nic_fini(
626 __in efx_nic_t *enp)
627 {
628 const efx_nic_ops_t *enop = enp->en_enop;
629
630 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
631 EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
632 EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_NIC);
633 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
634 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
635 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
636 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
637
638 enop->eno_fini(enp);
639
640 enp->en_mod_flags &= ~EFX_MOD_NIC;
641 }
642
643 void
efx_nic_unprobe(__in efx_nic_t * enp)644 efx_nic_unprobe(
645 __in efx_nic_t *enp)
646 {
647 const efx_nic_ops_t *enop = enp->en_enop;
648
649 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
650 #if EFSYS_OPT_MCDI
651 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
652 #endif /* EFSYS_OPT_MCDI */
653 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
654 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
655 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_INTR));
656 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_EV));
657 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX));
658 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX));
659
660 efx_phy_unprobe(enp);
661
662 enop->eno_unprobe(enp);
663
664 enp->en_mod_flags &= ~EFX_MOD_PROBE;
665 }
666
667 void
efx_nic_destroy(__in efx_nic_t * enp)668 efx_nic_destroy(
669 __in efx_nic_t *enp)
670 {
671 efsys_identifier_t *esip = enp->en_esip;
672
673 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
674 EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0);
675
676 enp->en_family = EFX_FAMILY_INVALID;
677 enp->en_esip = NULL;
678 enp->en_esbp = NULL;
679 enp->en_eslp = NULL;
680
681 enp->en_enop = NULL;
682
683 enp->en_magic = 0;
684
685 /* Free the NIC object */
686 EFSYS_KMEM_FREE(esip, sizeof (efx_nic_t), enp);
687 }
688
689 __checkReturn efx_rc_t
efx_nic_reset(__in efx_nic_t * enp)690 efx_nic_reset(
691 __in efx_nic_t *enp)
692 {
693 const efx_nic_ops_t *enop = enp->en_enop;
694 unsigned int mod_flags;
695 efx_rc_t rc;
696
697 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
698 EFSYS_ASSERT(enp->en_mod_flags & EFX_MOD_PROBE);
699 /*
700 * All modules except the MCDI, PROBE, NVRAM, VPD, MON, TUNNEL
701 * (which we do not reset here) must have been shut down or never
702 * initialized.
703 *
704 * A rule of thumb here is: If the controller or MC reboots, is *any*
705 * state lost. If it's lost and needs reapplying, then the module
706 * *must* not be initialised during the reset.
707 */
708 mod_flags = enp->en_mod_flags;
709 mod_flags &= ~(EFX_MOD_MCDI | EFX_MOD_PROBE | EFX_MOD_NVRAM |
710 EFX_MOD_VPD | EFX_MOD_MON);
711 #if EFSYS_OPT_TUNNEL
712 mod_flags &= ~EFX_MOD_TUNNEL;
713 #endif /* EFSYS_OPT_TUNNEL */
714 EFSYS_ASSERT3U(mod_flags, ==, 0);
715 if (mod_flags != 0) {
716 rc = EINVAL;
717 goto fail1;
718 }
719
720 if ((rc = enop->eno_reset(enp)) != 0)
721 goto fail2;
722
723 return (0);
724
725 fail2:
726 EFSYS_PROBE(fail2);
727 fail1:
728 EFSYS_PROBE1(fail1, efx_rc_t, rc);
729
730 return (rc);
731 }
732
733 const efx_nic_cfg_t *
efx_nic_cfg_get(__in const efx_nic_t * enp)734 efx_nic_cfg_get(
735 __in const efx_nic_t *enp)
736 {
737 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
738 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
739
740 return (&(enp->en_nic_cfg));
741 }
742
743 __checkReturn efx_rc_t
efx_nic_get_fw_version(__in efx_nic_t * enp,__out efx_nic_fw_info_t * enfip)744 efx_nic_get_fw_version(
745 __in efx_nic_t *enp,
746 __out efx_nic_fw_info_t *enfip)
747 {
748 uint16_t mc_fw_version[4];
749 efx_rc_t rc;
750
751 if (enfip == NULL) {
752 rc = EINVAL;
753 goto fail1;
754 }
755
756 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
757 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
758
759 /* Ensure RXDP_FW_ID codes match with MC_CMD_GET_CAPABILITIES codes */
760 EFX_STATIC_ASSERT(EFX_RXDP_FULL_FEATURED_FW_ID ==
761 MC_CMD_GET_CAPABILITIES_OUT_RXDP);
762 EFX_STATIC_ASSERT(EFX_RXDP_LOW_LATENCY_FW_ID ==
763 MC_CMD_GET_CAPABILITIES_OUT_RXDP_LOW_LATENCY);
764 EFX_STATIC_ASSERT(EFX_RXDP_PACKED_STREAM_FW_ID ==
765 MC_CMD_GET_CAPABILITIES_OUT_RXDP_PACKED_STREAM);
766 EFX_STATIC_ASSERT(EFX_RXDP_RULES_ENGINE_FW_ID ==
767 MC_CMD_GET_CAPABILITIES_OUT_RXDP_RULES_ENGINE);
768 EFX_STATIC_ASSERT(EFX_RXDP_DPDK_FW_ID ==
769 MC_CMD_GET_CAPABILITIES_OUT_RXDP_DPDK);
770
771 rc = efx_mcdi_version(enp, mc_fw_version, NULL, NULL);
772 if (rc != 0)
773 goto fail2;
774
775 rc = efx_mcdi_get_capabilities(enp, NULL,
776 &enfip->enfi_rx_dpcpu_fw_id,
777 &enfip->enfi_tx_dpcpu_fw_id,
778 NULL, NULL);
779 if (rc == 0) {
780 enfip->enfi_dpcpu_fw_ids_valid = B_TRUE;
781 } else if (rc == ENOTSUP) {
782 enfip->enfi_dpcpu_fw_ids_valid = B_FALSE;
783 enfip->enfi_rx_dpcpu_fw_id = 0;
784 enfip->enfi_tx_dpcpu_fw_id = 0;
785 } else {
786 goto fail3;
787 }
788
789 memcpy(enfip->enfi_mc_fw_version, mc_fw_version,
790 sizeof (mc_fw_version));
791
792 return (0);
793
794 fail3:
795 EFSYS_PROBE(fail3);
796 fail2:
797 EFSYS_PROBE(fail2);
798 fail1:
799 EFSYS_PROBE1(fail1, efx_rc_t, rc);
800
801 return (rc);
802 }
803
804 __checkReturn efx_rc_t
efx_nic_get_board_info(__in efx_nic_t * enp,__out efx_nic_board_info_t * board_infop)805 efx_nic_get_board_info(
806 __in efx_nic_t *enp,
807 __out efx_nic_board_info_t *board_infop)
808 {
809 efx_mcdi_version_t ver;
810 efx_rc_t rc;
811
812 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI);
813 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI);
814
815 rc = efx_mcdi_get_version(enp, EFX_MCDI_VERSION_BOARD_INFO, &ver);
816 if (rc == EMSGSIZE) {
817 /*
818 * Typically, EMSGSIZE is returned by above call in the
819 * case when the NIC does not provide extra information.
820 */
821 rc = ENOTSUP;
822 goto fail1;
823 } else if (rc != 0) {
824 goto fail2;
825 }
826
827 if ((ver.emv_flags & EFX_MCDI_VERSION_BOARD_INFO) == 0) {
828 rc = ENOTSUP;
829 goto fail3;
830 }
831
832 memcpy(board_infop, &ver.emv_board_info, sizeof (*board_infop));
833
834 /* MCDI should provide NUL-terminated strings, but stay vigilant. */
835 board_infop->enbi_serial[sizeof (board_infop->enbi_serial) - 1] = '\0';
836 board_infop->enbi_name[sizeof (board_infop->enbi_name) - 1] = '\0';
837
838 return (0);
839
840 fail3:
841 EFSYS_PROBE(fail3);
842 fail2:
843 EFSYS_PROBE(fail2);
844 fail1:
845 EFSYS_PROBE1(fail1, efx_rc_t, rc);
846
847 return (rc);
848 }
849
850 __checkReturn boolean_t
efx_nic_hw_unavailable(__in efx_nic_t * enp)851 efx_nic_hw_unavailable(
852 __in efx_nic_t *enp)
853 {
854 const efx_nic_ops_t *enop = enp->en_enop;
855
856 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
857 /* NOTE: can be used by MCDI before NIC probe */
858
859 if (enop->eno_hw_unavailable != NULL) {
860 if ((enop->eno_hw_unavailable)(enp) != B_FALSE)
861 goto unavail;
862 }
863
864 return (B_FALSE);
865
866 unavail:
867 return (B_TRUE);
868 }
869
870 void
efx_nic_set_hw_unavailable(__in efx_nic_t * enp)871 efx_nic_set_hw_unavailable(
872 __in efx_nic_t *enp)
873 {
874 const efx_nic_ops_t *enop = enp->en_enop;
875
876 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
877
878 if (enop->eno_set_hw_unavailable != NULL)
879 enop->eno_set_hw_unavailable(enp);
880 }
881
882
883 #if EFSYS_OPT_DIAG
884
885 __checkReturn efx_rc_t
efx_nic_register_test(__in efx_nic_t * enp)886 efx_nic_register_test(
887 __in efx_nic_t *enp)
888 {
889 const efx_nic_ops_t *enop = enp->en_enop;
890 efx_rc_t rc;
891
892 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC);
893 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE);
894 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_NIC));
895
896 if ((rc = enop->eno_register_test(enp)) != 0)
897 goto fail1;
898
899 return (0);
900
901 fail1:
902 EFSYS_PROBE1(fail1, efx_rc_t, rc);
903
904 return (rc);
905 }
906
907 #endif /* EFSYS_OPT_DIAG */
908
909 #if EFSYS_OPT_LOOPBACK
910
911 extern void
efx_loopback_mask(__in efx_loopback_kind_t loopback_kind,__out efx_qword_t * maskp)912 efx_loopback_mask(
913 __in efx_loopback_kind_t loopback_kind,
914 __out efx_qword_t *maskp)
915 {
916 efx_qword_t mask;
917
918 EFSYS_ASSERT3U(loopback_kind, <, EFX_LOOPBACK_NKINDS);
919 EFSYS_ASSERT(maskp != NULL);
920
921 /* Assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree */
922 #define LOOPBACK_CHECK(_mcdi, _efx) \
923 EFX_STATIC_ASSERT(MC_CMD_LOOPBACK_##_mcdi == EFX_LOOPBACK_##_efx)
924
925 LOOPBACK_CHECK(NONE, OFF);
926 LOOPBACK_CHECK(DATA, DATA);
927 LOOPBACK_CHECK(GMAC, GMAC);
928 LOOPBACK_CHECK(XGMII, XGMII);
929 LOOPBACK_CHECK(XGXS, XGXS);
930 LOOPBACK_CHECK(XAUI, XAUI);
931 LOOPBACK_CHECK(GMII, GMII);
932 LOOPBACK_CHECK(SGMII, SGMII);
933 LOOPBACK_CHECK(XGBR, XGBR);
934 LOOPBACK_CHECK(XFI, XFI);
935 LOOPBACK_CHECK(XAUI_FAR, XAUI_FAR);
936 LOOPBACK_CHECK(GMII_FAR, GMII_FAR);
937 LOOPBACK_CHECK(SGMII_FAR, SGMII_FAR);
938 LOOPBACK_CHECK(XFI_FAR, XFI_FAR);
939 LOOPBACK_CHECK(GPHY, GPHY);
940 LOOPBACK_CHECK(PHYXS, PHY_XS);
941 LOOPBACK_CHECK(PCS, PCS);
942 LOOPBACK_CHECK(PMAPMD, PMA_PMD);
943 LOOPBACK_CHECK(XPORT, XPORT);
944 LOOPBACK_CHECK(XGMII_WS, XGMII_WS);
945 LOOPBACK_CHECK(XAUI_WS, XAUI_WS);
946 LOOPBACK_CHECK(XAUI_WS_FAR, XAUI_WS_FAR);
947 LOOPBACK_CHECK(XAUI_WS_NEAR, XAUI_WS_NEAR);
948 LOOPBACK_CHECK(GMII_WS, GMII_WS);
949 LOOPBACK_CHECK(XFI_WS, XFI_WS);
950 LOOPBACK_CHECK(XFI_WS_FAR, XFI_WS_FAR);
951 LOOPBACK_CHECK(PHYXS_WS, PHYXS_WS);
952 LOOPBACK_CHECK(PMA_INT, PMA_INT);
953 LOOPBACK_CHECK(SD_NEAR, SD_NEAR);
954 LOOPBACK_CHECK(SD_FAR, SD_FAR);
955 LOOPBACK_CHECK(PMA_INT_WS, PMA_INT_WS);
956 LOOPBACK_CHECK(SD_FEP2_WS, SD_FEP2_WS);
957 LOOPBACK_CHECK(SD_FEP1_5_WS, SD_FEP1_5_WS);
958 LOOPBACK_CHECK(SD_FEP_WS, SD_FEP_WS);
959 LOOPBACK_CHECK(SD_FES_WS, SD_FES_WS);
960 LOOPBACK_CHECK(AOE_INT_NEAR, AOE_INT_NEAR);
961 LOOPBACK_CHECK(DATA_WS, DATA_WS);
962 LOOPBACK_CHECK(FORCE_EXT_LINK, FORCE_EXT_LINK);
963 #undef LOOPBACK_CHECK
964
965 /* Build bitmask of possible loopback types */
966 EFX_ZERO_QWORD(mask);
967
968 if ((loopback_kind == EFX_LOOPBACK_KIND_OFF) ||
969 (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
970 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_OFF);
971 }
972
973 if ((loopback_kind == EFX_LOOPBACK_KIND_MAC) ||
974 (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
975 /*
976 * The "MAC" grouping has historically been used by drivers to
977 * mean loopbacks supported by on-chip hardware. Keep that
978 * meaning here, and include on-chip PHY layer loopbacks.
979 */
980 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_DATA);
981 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMAC);
982 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGMII);
983 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGXS);
984 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI);
985 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII);
986 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII);
987 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XGBR);
988 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI);
989 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XAUI_FAR);
990 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GMII_FAR);
991 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SGMII_FAR);
992 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_XFI_FAR);
993 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_INT);
994 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_NEAR);
995 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_SD_FAR);
996 }
997
998 if ((loopback_kind == EFX_LOOPBACK_KIND_PHY) ||
999 (loopback_kind == EFX_LOOPBACK_KIND_ALL)) {
1000 /*
1001 * The "PHY" grouping has historically been used by drivers to
1002 * mean loopbacks supported by off-chip hardware. Keep that
1003 * meaning here.
1004 */
1005 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_GPHY);
1006 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PHY_XS);
1007 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PCS);
1008 EFX_SET_QWORD_BIT(mask, EFX_LOOPBACK_PMA_PMD);
1009 }
1010
1011 *maskp = mask;
1012 }
1013
1014 __checkReturn efx_rc_t
efx_mcdi_get_loopback_modes(__in efx_nic_t * enp)1015 efx_mcdi_get_loopback_modes(
1016 __in efx_nic_t *enp)
1017 {
1018 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1019 efx_mcdi_req_t req;
1020 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_LOOPBACK_MODES_IN_LEN,
1021 MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN);
1022 efx_qword_t mask;
1023 efx_qword_t modes;
1024 efx_rc_t rc;
1025
1026 req.emr_cmd = MC_CMD_GET_LOOPBACK_MODES;
1027 req.emr_in_buf = payload;
1028 req.emr_in_length = MC_CMD_GET_LOOPBACK_MODES_IN_LEN;
1029 req.emr_out_buf = payload;
1030 req.emr_out_length = MC_CMD_GET_LOOPBACK_MODES_OUT_V2_LEN;
1031
1032 efx_mcdi_execute(enp, &req);
1033
1034 if (req.emr_rc != 0) {
1035 rc = req.emr_rc;
1036 goto fail1;
1037 }
1038
1039 if (req.emr_out_length_used <
1040 MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_OFST +
1041 MC_CMD_GET_LOOPBACK_MODES_OUT_SUGGESTED_LEN) {
1042 rc = EMSGSIZE;
1043 goto fail2;
1044 }
1045
1046 /*
1047 * We assert the MC_CMD_LOOPBACK and EFX_LOOPBACK namespaces agree
1048 * in efx_loopback_mask() and in siena_phy.c:siena_phy_get_link().
1049 */
1050 efx_loopback_mask(EFX_LOOPBACK_KIND_ALL, &mask);
1051
1052 EFX_AND_QWORD(mask,
1053 *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_SUGGESTED));
1054
1055 modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_100M);
1056 EFX_AND_QWORD(modes, mask);
1057 encp->enc_loopback_types[EFX_LINK_100FDX] = modes;
1058
1059 modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_1G);
1060 EFX_AND_QWORD(modes, mask);
1061 encp->enc_loopback_types[EFX_LINK_1000FDX] = modes;
1062
1063 modes = *MCDI_OUT2(req, efx_qword_t, GET_LOOPBACK_MODES_OUT_10G);
1064 EFX_AND_QWORD(modes, mask);
1065 encp->enc_loopback_types[EFX_LINK_10000FDX] = modes;
1066
1067 if (req.emr_out_length_used >=
1068 MC_CMD_GET_LOOPBACK_MODES_OUT_40G_OFST +
1069 MC_CMD_GET_LOOPBACK_MODES_OUT_40G_LEN) {
1070 /* Response includes 40G loopback modes */
1071 modes = *MCDI_OUT2(req, efx_qword_t,
1072 GET_LOOPBACK_MODES_OUT_40G);
1073 EFX_AND_QWORD(modes, mask);
1074 encp->enc_loopback_types[EFX_LINK_40000FDX] = modes;
1075 }
1076
1077 if (req.emr_out_length_used >=
1078 MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_OFST +
1079 MC_CMD_GET_LOOPBACK_MODES_OUT_V2_25G_LEN) {
1080 /* Response includes 25G loopback modes */
1081 modes = *MCDI_OUT2(req, efx_qword_t,
1082 GET_LOOPBACK_MODES_OUT_V2_25G);
1083 EFX_AND_QWORD(modes, mask);
1084 encp->enc_loopback_types[EFX_LINK_25000FDX] = modes;
1085 }
1086
1087 if (req.emr_out_length_used >=
1088 MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_OFST +
1089 MC_CMD_GET_LOOPBACK_MODES_OUT_V2_50G_LEN) {
1090 /* Response includes 50G loopback modes */
1091 modes = *MCDI_OUT2(req, efx_qword_t,
1092 GET_LOOPBACK_MODES_OUT_V2_50G);
1093 EFX_AND_QWORD(modes, mask);
1094 encp->enc_loopback_types[EFX_LINK_50000FDX] = modes;
1095 }
1096
1097 if (req.emr_out_length_used >=
1098 MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_OFST +
1099 MC_CMD_GET_LOOPBACK_MODES_OUT_V2_100G_LEN) {
1100 /* Response includes 100G loopback modes */
1101 modes = *MCDI_OUT2(req, efx_qword_t,
1102 GET_LOOPBACK_MODES_OUT_V2_100G);
1103 EFX_AND_QWORD(modes, mask);
1104 encp->enc_loopback_types[EFX_LINK_100000FDX] = modes;
1105 }
1106
1107 EFX_ZERO_QWORD(modes);
1108 EFX_SET_QWORD_BIT(modes, EFX_LOOPBACK_OFF);
1109 EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100FDX]);
1110 EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_1000FDX]);
1111 EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_10000FDX]);
1112 EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_40000FDX]);
1113 EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_25000FDX]);
1114 EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_50000FDX]);
1115 EFX_OR_QWORD(modes, encp->enc_loopback_types[EFX_LINK_100000FDX]);
1116 encp->enc_loopback_types[EFX_LINK_UNKNOWN] = modes;
1117
1118 return (0);
1119
1120 fail2:
1121 EFSYS_PROBE(fail2);
1122 fail1:
1123 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1124
1125 return (rc);
1126 }
1127
1128 #endif /* EFSYS_OPT_LOOPBACK */
1129
1130 __checkReturn efx_rc_t
efx_nic_calculate_pcie_link_bandwidth(__in uint32_t pcie_link_width,__in uint32_t pcie_link_gen,__out uint32_t * bandwidth_mbpsp)1131 efx_nic_calculate_pcie_link_bandwidth(
1132 __in uint32_t pcie_link_width,
1133 __in uint32_t pcie_link_gen,
1134 __out uint32_t *bandwidth_mbpsp)
1135 {
1136 uint32_t lane_bandwidth;
1137 uint32_t total_bandwidth;
1138 efx_rc_t rc;
1139
1140 if ((pcie_link_width == 0) || (pcie_link_width > 16) ||
1141 !ISP2(pcie_link_width)) {
1142 rc = EINVAL;
1143 goto fail1;
1144 }
1145
1146 switch (pcie_link_gen) {
1147 case EFX_PCIE_LINK_SPEED_GEN1:
1148 /* 2.5 Gb/s raw bandwidth with 8b/10b encoding */
1149 lane_bandwidth = 2000;
1150 break;
1151 case EFX_PCIE_LINK_SPEED_GEN2:
1152 /* 5.0 Gb/s raw bandwidth with 8b/10b encoding */
1153 lane_bandwidth = 4000;
1154 break;
1155 case EFX_PCIE_LINK_SPEED_GEN3:
1156 /* 8.0 Gb/s raw bandwidth with 128b/130b encoding */
1157 lane_bandwidth = 7877;
1158 break;
1159 default:
1160 rc = EINVAL;
1161 goto fail2;
1162 }
1163
1164 total_bandwidth = lane_bandwidth * pcie_link_width;
1165 *bandwidth_mbpsp = total_bandwidth;
1166
1167 return (0);
1168
1169 fail2:
1170 EFSYS_PROBE(fail2);
1171 fail1:
1172 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1173
1174 return (rc);
1175 }
1176
1177 #if EFSYS_OPT_FW_SUBVARIANT_AWARE
1178
1179 __checkReturn efx_rc_t
efx_nic_get_fw_subvariant(__in efx_nic_t * enp,__out efx_nic_fw_subvariant_t * subvariantp)1180 efx_nic_get_fw_subvariant(
1181 __in efx_nic_t *enp,
1182 __out efx_nic_fw_subvariant_t *subvariantp)
1183 {
1184 efx_rc_t rc;
1185 uint32_t value;
1186
1187 rc = efx_mcdi_get_nic_global(enp,
1188 MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, &value);
1189 if (rc != 0)
1190 goto fail1;
1191
1192 /* Mapping is not required since values match MCDI */
1193 EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_DEFAULT ==
1194 MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT);
1195 EFX_STATIC_ASSERT(EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM ==
1196 MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM);
1197
1198 switch (value) {
1199 case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_DEFAULT:
1200 case MC_CMD_SET_NIC_GLOBAL_IN_FW_SUBVARIANT_NO_TX_CSUM:
1201 *subvariantp = value;
1202 break;
1203 default:
1204 rc = EINVAL;
1205 goto fail2;
1206 }
1207
1208 return (0);
1209
1210 fail2:
1211 EFSYS_PROBE(fail2);
1212
1213 fail1:
1214 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1215
1216 return (rc);
1217 }
1218
1219 __checkReturn efx_rc_t
efx_nic_set_fw_subvariant(__in efx_nic_t * enp,__in efx_nic_fw_subvariant_t subvariant)1220 efx_nic_set_fw_subvariant(
1221 __in efx_nic_t *enp,
1222 __in efx_nic_fw_subvariant_t subvariant)
1223 {
1224 efx_rc_t rc;
1225
1226 switch (subvariant) {
1227 case EFX_NIC_FW_SUBVARIANT_DEFAULT:
1228 case EFX_NIC_FW_SUBVARIANT_NO_TX_CSUM:
1229 /* Mapping is not required since values match MCDI */
1230 break;
1231 default:
1232 rc = EINVAL;
1233 goto fail1;
1234 }
1235
1236 rc = efx_mcdi_set_nic_global(enp,
1237 MC_CMD_SET_NIC_GLOBAL_IN_FIRMWARE_SUBVARIANT, subvariant);
1238 if (rc != 0)
1239 goto fail2;
1240
1241 return (0);
1242
1243 fail2:
1244 EFSYS_PROBE(fail2);
1245
1246 fail1:
1247 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1248
1249 return (rc);
1250 }
1251
1252 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */
1253
1254 __checkReturn efx_rc_t
efx_nic_check_pcie_link_speed(__in efx_nic_t * enp,__in uint32_t pcie_link_width,__in uint32_t pcie_link_gen,__out efx_pcie_link_performance_t * resultp)1255 efx_nic_check_pcie_link_speed(
1256 __in efx_nic_t *enp,
1257 __in uint32_t pcie_link_width,
1258 __in uint32_t pcie_link_gen,
1259 __out efx_pcie_link_performance_t *resultp)
1260 {
1261 efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1262 uint32_t bandwidth;
1263 efx_pcie_link_performance_t result;
1264 efx_rc_t rc;
1265
1266 if ((encp->enc_required_pcie_bandwidth_mbps == 0) ||
1267 (pcie_link_width == 0) || (pcie_link_width == 32) ||
1268 (pcie_link_gen == 0)) {
1269 /*
1270 * No usable info on what is required and/or in use. In virtual
1271 * machines, sometimes the PCIe link width is reported as 0 or
1272 * 32, or the speed as 0.
1273 */
1274 result = EFX_PCIE_LINK_PERFORMANCE_UNKNOWN_BANDWIDTH;
1275 goto out;
1276 }
1277
1278 /* Calculate the available bandwidth in megabits per second */
1279 rc = efx_nic_calculate_pcie_link_bandwidth(pcie_link_width,
1280 pcie_link_gen, &bandwidth);
1281 if (rc != 0)
1282 goto fail1;
1283
1284 if (bandwidth < encp->enc_required_pcie_bandwidth_mbps) {
1285 result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_BANDWIDTH;
1286 } else if (pcie_link_gen < encp->enc_max_pcie_link_gen) {
1287 /* The link provides enough bandwidth but not optimal latency */
1288 result = EFX_PCIE_LINK_PERFORMANCE_SUBOPTIMAL_LATENCY;
1289 } else {
1290 result = EFX_PCIE_LINK_PERFORMANCE_OPTIMAL;
1291 }
1292
1293 out:
1294 *resultp = result;
1295
1296 return (0);
1297
1298 fail1:
1299 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1300
1301 return (rc);
1302 }
1303
1304 /* Required en_eslp lock held */
1305 static __checkReturn efx_rc_t
efx_nic_dma_config_regioned_find_region(__in const efx_nic_t * enp,__in efsys_dma_addr_t trgt_addr,__in size_t len,__out const efx_nic_dma_region_t ** regionp)1306 efx_nic_dma_config_regioned_find_region(
1307 __in const efx_nic_t *enp,
1308 __in efsys_dma_addr_t trgt_addr,
1309 __in size_t len,
1310 __out const efx_nic_dma_region_t **regionp)
1311 {
1312 const efx_nic_dma_region_info_t *region_info;
1313 const efx_nic_dma_region_t *region;
1314 unsigned int i;
1315 efx_rc_t rc;
1316
1317 if (efx_nic_cfg_get(enp)->enc_dma_mapping !=
1318 EFX_NIC_DMA_MAPPING_REGIONED) {
1319 rc = EINVAL;
1320 goto fail1;
1321 }
1322
1323 region_info = &enp->en_dma.end_u.endu_region_info;
1324
1325 for (i = 0; i < region_info->endri_count; ++i) {
1326 efsys_dma_addr_t offset;
1327
1328 region = ®ion_info->endri_regions[i];
1329 if (region->endr_inuse == B_FALSE)
1330 continue;
1331
1332 if (trgt_addr < region->endr_trgt_base)
1333 continue;
1334
1335 EFSYS_ASSERT3U(region->endr_window_log2, <, 64);
1336 offset = trgt_addr - region->endr_trgt_base;
1337 if (offset + len > (1ULL << region->endr_window_log2))
1338 continue;
1339
1340 *regionp = region;
1341 return (0);
1342 }
1343
1344 rc = ENOENT;
1345 goto fail2;
1346
1347 fail2:
1348 EFSYS_PROBE(fail2);
1349 fail1:
1350 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1351
1352 return (rc);
1353 }
1354
1355 static __checkReturn efx_rc_t
efx_nic_dma_config_regioned_add_region(__in efx_nic_t * enp,__in efsys_dma_addr_t trgt_addr,__in size_t len,__out const efx_nic_dma_region_t ** regionp)1356 efx_nic_dma_config_regioned_add_region(
1357 __in efx_nic_t *enp,
1358 __in efsys_dma_addr_t trgt_addr,
1359 __in size_t len,
1360 __out const efx_nic_dma_region_t **regionp)
1361 {
1362 efx_nic_dma_region_info_t *region_info;
1363 efx_nic_dma_region_t *region;
1364 unsigned int i;
1365 efx_rc_t rc;
1366
1367 if (efx_nic_cfg_get(enp)->enc_dma_mapping !=
1368 EFX_NIC_DMA_MAPPING_REGIONED) {
1369 rc = EINVAL;
1370 goto fail1;
1371 }
1372
1373 region_info = &enp->en_dma.end_u.endu_region_info;
1374
1375 for (i = 0; i < region_info->endri_count; ++i) {
1376 efsys_dma_addr_t trgt_base;
1377 efsys_dma_addr_t offset;
1378
1379 region = ®ion_info->endri_regions[i];
1380 if (region->endr_inuse == B_TRUE)
1381 continue;
1382
1383 /*
1384 * Align target address base in accordance with
1385 * the region requirements.
1386 */
1387 EFSYS_ASSERT3U(region->endr_align_log2, <, 64);
1388 trgt_base = EFX_P2ALIGN(efsys_dma_addr_t, trgt_addr,
1389 (1ULL << region->endr_align_log2));
1390
1391 offset = trgt_addr - trgt_base;
1392
1393 /* Check if region window is sufficient */
1394 EFSYS_ASSERT3U(region->endr_window_log2, <, 64);
1395 if (offset + len > (1ULL << region->endr_window_log2))
1396 continue;
1397
1398 region->endr_trgt_base = trgt_base;
1399 region->endr_inuse = B_TRUE;
1400
1401 *regionp = region;
1402 return (0);
1403 }
1404
1405 /* No suitable free region found */
1406 rc = ENOMEM;
1407 goto fail2;
1408
1409 fail2:
1410 EFSYS_PROBE(fail2);
1411 fail1:
1412 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1413
1414 return (rc);
1415 }
1416
1417 static __checkReturn efx_rc_t
efx_nic_dma_config_regioned_add(__in efx_nic_t * enp,__in efsys_dma_addr_t trgt_addr,__in size_t len,__out_opt efsys_dma_addr_t * nic_basep,__out_opt efsys_dma_addr_t * trgt_basep,__out_opt size_t * map_lenp)1418 efx_nic_dma_config_regioned_add(
1419 __in efx_nic_t *enp,
1420 __in efsys_dma_addr_t trgt_addr,
1421 __in size_t len,
1422 __out_opt efsys_dma_addr_t *nic_basep,
1423 __out_opt efsys_dma_addr_t *trgt_basep,
1424 __out_opt size_t *map_lenp)
1425 {
1426 const efx_nic_dma_region_t *region;
1427 efsys_lock_state_t state;
1428 efx_rc_t rc;
1429
1430 EFSYS_LOCK(enp->en_eslp, state);
1431
1432 rc = efx_nic_dma_config_regioned_find_region(enp, trgt_addr, len,
1433 ®ion);
1434 switch (rc) {
1435 case 0:
1436 /* Already covered by existing mapping */
1437 break;
1438 case ENOENT:
1439 /* No existing mapping found */
1440 rc = efx_nic_dma_config_regioned_add_region(enp,
1441 trgt_addr, len, ®ion);
1442 if (rc != 0)
1443 goto fail1;
1444 break;
1445 default:
1446 goto fail2;
1447 }
1448
1449 if (nic_basep != NULL)
1450 *nic_basep = region->endr_nic_base;
1451 if (trgt_basep != NULL)
1452 *trgt_basep = region->endr_trgt_base;
1453 if (map_lenp != NULL)
1454 *map_lenp = 1ULL << region->endr_window_log2;
1455
1456 EFSYS_UNLOCK(enp->en_eslp, state);
1457
1458 return (0);
1459
1460 fail2:
1461 EFSYS_PROBE(fail2);
1462 fail1:
1463 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1464
1465 EFSYS_UNLOCK(enp->en_eslp, state);
1466
1467 return (rc);
1468 }
1469
1470 __checkReturn efx_rc_t
efx_nic_dma_config_add(__in efx_nic_t * enp,__in efsys_dma_addr_t trgt_addr,__in size_t len,__out_opt efsys_dma_addr_t * nic_basep,__out_opt efsys_dma_addr_t * trgt_basep,__out_opt size_t * map_lenp)1471 efx_nic_dma_config_add(
1472 __in efx_nic_t *enp,
1473 __in efsys_dma_addr_t trgt_addr,
1474 __in size_t len,
1475 __out_opt efsys_dma_addr_t *nic_basep,
1476 __out_opt efsys_dma_addr_t *trgt_basep,
1477 __out_opt size_t *map_lenp)
1478 {
1479 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
1480 efx_rc_t rc;
1481
1482 switch (encp->enc_dma_mapping) {
1483 case EFX_NIC_DMA_MAPPING_FLAT:
1484 /* No mapping is required */
1485 if (nic_basep != NULL)
1486 *nic_basep = 0;
1487 if (trgt_basep != NULL)
1488 *trgt_basep = 0;
1489 if (map_lenp != NULL)
1490 *map_lenp = 0;
1491 break;
1492 case EFX_NIC_DMA_MAPPING_REGIONED:
1493 rc = efx_nic_dma_config_regioned_add(enp, trgt_addr, len,
1494 nic_basep, trgt_basep, map_lenp);
1495 if (rc != 0)
1496 goto fail1;
1497 break;
1498 case EFX_NIC_DMA_MAPPING_UNKNOWN:
1499 default:
1500 rc = ENOTSUP;
1501 goto fail2;
1502 }
1503
1504 return (0);
1505
1506 fail2:
1507 EFSYS_PROBE(fail2);
1508 fail1:
1509 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1510
1511 return (rc);
1512 }
1513
1514 static __checkReturn efx_rc_t
efx_nic_dma_reconfigure_regioned(__in efx_nic_t * enp)1515 efx_nic_dma_reconfigure_regioned(
1516 __in efx_nic_t *enp)
1517 {
1518 efx_rc_t rc;
1519
1520 rc = efx_mcdi_set_nic_addr_regions(enp,
1521 &enp->en_dma.end_u.endu_region_info);
1522 if (rc != 0)
1523 goto fail1;
1524
1525 return (0);
1526
1527 fail1:
1528 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1529
1530 return (rc);
1531
1532 }
1533
1534 __checkReturn efx_rc_t
efx_nic_dma_reconfigure(__in efx_nic_t * enp)1535 efx_nic_dma_reconfigure(
1536 __in efx_nic_t *enp)
1537 {
1538 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
1539 efx_rc_t rc;
1540
1541 switch (encp->enc_dma_mapping) {
1542 case EFX_NIC_DMA_MAPPING_UNKNOWN:
1543 case EFX_NIC_DMA_MAPPING_FLAT:
1544 /* Nothing to do */
1545 break;
1546 case EFX_NIC_DMA_MAPPING_REGIONED:
1547 rc = efx_nic_dma_reconfigure_regioned(enp);
1548 if (rc != 0)
1549 goto fail1;
1550 break;
1551 default:
1552 rc = ENOTSUP;
1553 goto fail2;
1554 }
1555
1556 return (0);
1557
1558 fail2:
1559 EFSYS_PROBE(fail2);
1560 fail1:
1561 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1562
1563 return (rc);
1564 }
1565
1566 static __checkReturn efx_rc_t
efx_nic_dma_unknown_map(__in efx_nic_t * enp,__in efx_nic_dma_addr_type_t addr_type,__in efsys_dma_addr_t trgt_addr,__in size_t len,__out efsys_dma_addr_t * nic_addrp)1567 efx_nic_dma_unknown_map(
1568 __in efx_nic_t *enp,
1569 __in efx_nic_dma_addr_type_t addr_type,
1570 __in efsys_dma_addr_t trgt_addr,
1571 __in size_t len,
1572 __out efsys_dma_addr_t *nic_addrp)
1573 {
1574 efx_rc_t rc;
1575
1576 /* This function may be called before the NIC has been probed. */
1577 if (enp->en_mod_flags & EFX_MOD_PROBE) {
1578 EFSYS_ASSERT3U(efx_nic_cfg_get(enp)->enc_dma_mapping, ==,
1579 EFX_NIC_DMA_MAPPING_UNKNOWN);
1580 }
1581
1582 switch (addr_type) {
1583 case EFX_NIC_DMA_ADDR_MCDI_BUF:
1584 /*
1585 * MC cares about MCDI buffer mapping itself since it cannot
1586 * be really mapped using MCDI because mapped MCDI
1587 * buffer is required to execute MCDI commands.
1588 */
1589 *nic_addrp = trgt_addr;
1590 break;
1591
1592 case EFX_NIC_DMA_ADDR_MAC_STATS_BUF:
1593 case EFX_NIC_DMA_ADDR_EVENT_RING:
1594 case EFX_NIC_DMA_ADDR_RX_RING:
1595 case EFX_NIC_DMA_ADDR_TX_RING:
1596 case EFX_NIC_DMA_ADDR_RX_BUF:
1597 case EFX_NIC_DMA_ADDR_TX_BUF:
1598 /* Mapping type must be discovered first */
1599 rc = EFAULT;
1600 goto fail1;
1601
1602 default:
1603 rc = EINVAL;
1604 goto fail2;
1605 }
1606
1607 return (0);
1608
1609 fail2:
1610 EFSYS_PROBE(fail2);
1611 fail1:
1612 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1613
1614 return (rc);
1615 }
1616
1617 static __checkReturn efx_rc_t
efx_nic_dma_flat_map(__in efx_nic_t * enp,__in efx_nic_dma_addr_type_t addr_type,__in efsys_dma_addr_t trgt_addr,__in size_t len,__out efsys_dma_addr_t * nic_addrp)1618 efx_nic_dma_flat_map(
1619 __in efx_nic_t *enp,
1620 __in efx_nic_dma_addr_type_t addr_type,
1621 __in efsys_dma_addr_t trgt_addr,
1622 __in size_t len,
1623 __out efsys_dma_addr_t *nic_addrp)
1624 {
1625 _NOTE(ARGUNUSED(addr_type, len))
1626
1627 EFSYS_ASSERT3U(efx_nic_cfg_get(enp)->enc_dma_mapping, ==,
1628 EFX_NIC_DMA_MAPPING_FLAT);
1629
1630 /* No re-mapping is required */
1631 *nic_addrp = trgt_addr;
1632
1633 return (0);
1634 }
1635
1636 static __checkReturn efx_rc_t
efx_nic_dma_regioned_map(__in efx_nic_t * enp,__in efx_nic_dma_addr_type_t addr_type,__in efsys_dma_addr_t trgt_addr,__in size_t len,__out efsys_dma_addr_t * nic_addrp)1637 efx_nic_dma_regioned_map(
1638 __in efx_nic_t *enp,
1639 __in efx_nic_dma_addr_type_t addr_type,
1640 __in efsys_dma_addr_t trgt_addr,
1641 __in size_t len,
1642 __out efsys_dma_addr_t *nic_addrp)
1643 {
1644 const efx_nic_dma_region_t *region;
1645 efsys_lock_state_t state;
1646 efx_rc_t rc;
1647
1648 if (efx_nic_cfg_get(enp)->enc_dma_mapping !=
1649 EFX_NIC_DMA_MAPPING_REGIONED) {
1650 rc = EINVAL;
1651 goto fail1;
1652 }
1653
1654 switch (addr_type) {
1655 case EFX_NIC_DMA_ADDR_MCDI_BUF:
1656 case EFX_NIC_DMA_ADDR_MAC_STATS_BUF:
1657 /*
1658 * MC cares about MCDI buffer mapping itself since it cannot
1659 * be really mapped using MCDI because mapped MCDI buffer is
1660 * required to execute MCDI commands. It is not a problem
1661 * for MAC stats buffer, but since MC can care about mapping
1662 * itself, it may be done for MAC stats buffer as well.
1663 */
1664 *nic_addrp = trgt_addr;
1665 goto out;
1666
1667 case EFX_NIC_DMA_ADDR_EVENT_RING:
1668 case EFX_NIC_DMA_ADDR_RX_RING:
1669 case EFX_NIC_DMA_ADDR_TX_RING:
1670 case EFX_NIC_DMA_ADDR_RX_BUF:
1671 case EFX_NIC_DMA_ADDR_TX_BUF:
1672 /* Rings and buffer addresses should be mapped */
1673 break;
1674
1675 default:
1676 rc = EINVAL;
1677 goto fail2;
1678 }
1679
1680 EFSYS_LOCK(enp->en_eslp, state);
1681
1682 rc = efx_nic_dma_config_regioned_find_region(enp, trgt_addr, len,
1683 ®ion);
1684 if (rc != 0)
1685 goto fail3;
1686
1687 *nic_addrp = region->endr_nic_base +
1688 (trgt_addr - region->endr_trgt_base);
1689
1690 EFSYS_UNLOCK(enp->en_eslp, state);
1691
1692 out:
1693 return (0);
1694
1695 fail3:
1696 EFSYS_PROBE(fail3);
1697 EFSYS_UNLOCK(enp->en_eslp, state);
1698 fail2:
1699 EFSYS_PROBE(fail2);
1700 fail1:
1701 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1702
1703 return (rc);
1704 }
1705
1706 __checkReturn efx_rc_t
efx_nic_dma_map(__in efx_nic_t * enp,__in efx_nic_dma_addr_type_t addr_type,__in efsys_dma_addr_t trgt_addr,__in size_t len,__out efsys_dma_addr_t * nic_addrp)1707 efx_nic_dma_map(
1708 __in efx_nic_t *enp,
1709 __in efx_nic_dma_addr_type_t addr_type,
1710 __in efsys_dma_addr_t trgt_addr,
1711 __in size_t len,
1712 __out efsys_dma_addr_t *nic_addrp)
1713 {
1714 efx_nic_dma_mapping_t mapping;
1715 efx_rc_t rc;
1716
1717 /*
1718 * We cannot check configuration of a NIC that hasn't been probed.
1719 * Use EFX_NIC_DMA_MAPPING_UNKNOWN by default.
1720 */
1721 if ((enp->en_mod_flags & EFX_MOD_PROBE) == 0)
1722 mapping = EFX_NIC_DMA_MAPPING_UNKNOWN;
1723 else
1724 mapping = efx_nic_cfg_get(enp)->enc_dma_mapping;
1725
1726 switch (mapping) {
1727 case EFX_NIC_DMA_MAPPING_UNKNOWN:
1728 rc = efx_nic_dma_unknown_map(enp, addr_type, trgt_addr,
1729 len, nic_addrp);
1730 if (rc != 0)
1731 goto fail1;
1732 break;
1733 case EFX_NIC_DMA_MAPPING_FLAT:
1734 rc = efx_nic_dma_flat_map(enp, addr_type, trgt_addr,
1735 len, nic_addrp);
1736 if (rc != 0)
1737 goto fail2;
1738 break;
1739 case EFX_NIC_DMA_MAPPING_REGIONED:
1740 rc = efx_nic_dma_regioned_map(enp, addr_type, trgt_addr,
1741 len, nic_addrp);
1742 if (rc != 0)
1743 goto fail3;
1744 break;
1745 default:
1746 rc = ENOTSUP;
1747 goto fail4;
1748 }
1749
1750 return (0);
1751
1752 fail4:
1753 EFSYS_PROBE(fail4);
1754 fail3:
1755 EFSYS_PROBE(fail3);
1756 fail2:
1757 EFSYS_PROBE(fail2);
1758 fail1:
1759 EFSYS_PROBE1(fail1, efx_rc_t, rc);
1760
1761 return (rc);
1762 }
1763