1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2012-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 #if EFSYS_OPT_MON_MCDI 10 #include "mcdi_mon.h" 11 #endif 12 13 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 14 15 #include "ef10_tlv_layout.h" 16 17 __checkReturn efx_rc_t 18 efx_mcdi_get_port_assignment( 19 __in efx_nic_t *enp, 20 __out uint32_t *portp) 21 { 22 efx_mcdi_req_t req; 23 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN, 24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); 25 efx_rc_t rc; 26 27 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 28 29 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT; 30 req.emr_in_buf = payload; 31 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN; 32 req.emr_out_buf = payload; 33 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN; 34 35 efx_mcdi_execute(enp, &req); 36 37 if (req.emr_rc != 0) { 38 rc = req.emr_rc; 39 goto fail1; 40 } 41 42 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) { 43 rc = EMSGSIZE; 44 goto fail2; 45 } 46 47 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT); 48 49 return (0); 50 51 fail2: 52 EFSYS_PROBE(fail2); 53 fail1: 54 EFSYS_PROBE1(fail1, efx_rc_t, rc); 55 56 return (rc); 57 } 58 59 __checkReturn efx_rc_t 60 efx_mcdi_get_port_modes( 61 __in efx_nic_t *enp, 62 __out uint32_t *modesp, 63 __out_opt uint32_t *current_modep, 64 __out_opt uint32_t *default_modep) 65 { 66 efx_mcdi_req_t req; 67 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN, 68 MC_CMD_GET_PORT_MODES_OUT_LEN); 69 efx_rc_t rc; 70 71 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 72 73 req.emr_cmd = MC_CMD_GET_PORT_MODES; 74 req.emr_in_buf = payload; 75 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN; 76 req.emr_out_buf = payload; 77 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN; 78 79 efx_mcdi_execute(enp, &req); 80 81 if (req.emr_rc != 0) { 82 rc = req.emr_rc; 83 goto fail1; 84 } 85 86 /* 87 * Require only Modes and DefaultMode fields, unless the current mode 88 * was requested (CurrentMode field was added for Medford). 89 */ 90 if (req.emr_out_length_used < 91 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) { 92 rc = EMSGSIZE; 93 goto fail2; 94 } 95 if ((current_modep != NULL) && (req.emr_out_length_used < 96 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) { 97 rc = EMSGSIZE; 98 goto fail3; 99 } 100 101 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES); 102 103 if (current_modep != NULL) { 104 *current_modep = MCDI_OUT_DWORD(req, 105 GET_PORT_MODES_OUT_CURRENT_MODE); 106 } 107 108 if (default_modep != NULL) { 109 *default_modep = MCDI_OUT_DWORD(req, 110 GET_PORT_MODES_OUT_DEFAULT_MODE); 111 } 112 113 return (0); 114 115 fail3: 116 EFSYS_PROBE(fail3); 117 fail2: 118 EFSYS_PROBE(fail2); 119 fail1: 120 EFSYS_PROBE1(fail1, efx_rc_t, rc); 121 122 return (rc); 123 } 124 125 __checkReturn efx_rc_t 126 ef10_nic_get_port_mode_bandwidth( 127 __in efx_nic_t *enp, 128 __out uint32_t *bandwidth_mbpsp) 129 { 130 uint32_t port_modes; 131 uint32_t current_mode; 132 efx_port_t *epp = &(enp->en_port); 133 134 uint32_t single_lane; 135 uint32_t dual_lane; 136 uint32_t quad_lane; 137 uint32_t bandwidth; 138 efx_rc_t rc; 139 140 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, 141 ¤t_mode, NULL)) != 0) { 142 /* No port mode info available. */ 143 goto fail1; 144 } 145 146 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX)) 147 single_lane = 25000; 148 else 149 single_lane = 10000; 150 151 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX)) 152 dual_lane = 50000; 153 else 154 dual_lane = 20000; 155 156 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX)) 157 quad_lane = 100000; 158 else 159 quad_lane = 40000; 160 161 switch (current_mode) { 162 case TLV_PORT_MODE_1x1_NA: /* mode 0 */ 163 bandwidth = single_lane; 164 break; 165 case TLV_PORT_MODE_1x2_NA: /* mode 10 */ 166 case TLV_PORT_MODE_NA_1x2: /* mode 11 */ 167 bandwidth = dual_lane; 168 break; 169 case TLV_PORT_MODE_1x1_1x1: /* mode 2 */ 170 bandwidth = single_lane + single_lane; 171 break; 172 case TLV_PORT_MODE_4x1_NA: /* mode 4 */ 173 case TLV_PORT_MODE_NA_4x1: /* mode 8 */ 174 bandwidth = 4 * single_lane; 175 break; 176 case TLV_PORT_MODE_2x1_2x1: /* mode 5 */ 177 bandwidth = (2 * single_lane) + (2 * single_lane); 178 break; 179 case TLV_PORT_MODE_1x2_1x2: /* mode 12 */ 180 bandwidth = dual_lane + dual_lane; 181 break; 182 case TLV_PORT_MODE_1x2_2x1: /* mode 17 */ 183 case TLV_PORT_MODE_2x1_1x2: /* mode 18 */ 184 bandwidth = dual_lane + (2 * single_lane); 185 break; 186 /* Legacy Medford-only mode. Do not use (see bug63270) */ 187 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: /* mode 9 */ 188 bandwidth = 4 * single_lane; 189 break; 190 case TLV_PORT_MODE_1x4_NA: /* mode 1 */ 191 case TLV_PORT_MODE_NA_1x4: /* mode 22 */ 192 bandwidth = quad_lane; 193 break; 194 case TLV_PORT_MODE_2x2_NA: /* mode 13 */ 195 case TLV_PORT_MODE_NA_2x2: /* mode 14 */ 196 bandwidth = 2 * dual_lane; 197 break; 198 case TLV_PORT_MODE_1x4_2x1: /* mode 6 */ 199 case TLV_PORT_MODE_2x1_1x4: /* mode 7 */ 200 bandwidth = quad_lane + (2 * single_lane); 201 break; 202 case TLV_PORT_MODE_1x4_1x2: /* mode 15 */ 203 case TLV_PORT_MODE_1x2_1x4: /* mode 16 */ 204 bandwidth = quad_lane + dual_lane; 205 break; 206 case TLV_PORT_MODE_1x4_1x4: /* mode 3 */ 207 bandwidth = quad_lane + quad_lane; 208 break; 209 default: 210 rc = EINVAL; 211 goto fail2; 212 } 213 214 *bandwidth_mbpsp = bandwidth; 215 216 return (0); 217 218 fail2: 219 EFSYS_PROBE(fail2); 220 fail1: 221 EFSYS_PROBE1(fail1, efx_rc_t, rc); 222 223 return (rc); 224 } 225 226 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 227 228 #if EFX_OPTS_EF10() 229 230 __checkReturn efx_rc_t 231 efx_mcdi_vadaptor_alloc( 232 __in efx_nic_t *enp, 233 __in uint32_t port_id) 234 { 235 efx_mcdi_req_t req; 236 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN, 237 MC_CMD_VADAPTOR_ALLOC_OUT_LEN); 238 efx_rc_t rc; 239 240 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC; 241 req.emr_in_buf = payload; 242 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN; 243 req.emr_out_buf = payload; 244 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN; 245 246 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 247 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS, 248 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED, 249 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0); 250 251 efx_mcdi_execute(enp, &req); 252 253 if (req.emr_rc != 0) { 254 rc = req.emr_rc; 255 goto fail1; 256 } 257 258 return (0); 259 260 fail1: 261 EFSYS_PROBE1(fail1, efx_rc_t, rc); 262 263 return (rc); 264 } 265 266 __checkReturn efx_rc_t 267 efx_mcdi_vadaptor_free( 268 __in efx_nic_t *enp, 269 __in uint32_t port_id) 270 { 271 efx_mcdi_req_t req; 272 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN, 273 MC_CMD_VADAPTOR_FREE_OUT_LEN); 274 efx_rc_t rc; 275 276 req.emr_cmd = MC_CMD_VADAPTOR_FREE; 277 req.emr_in_buf = payload; 278 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN; 279 req.emr_out_buf = payload; 280 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN; 281 282 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 283 284 efx_mcdi_execute(enp, &req); 285 286 if (req.emr_rc != 0) { 287 rc = req.emr_rc; 288 goto fail1; 289 } 290 291 return (0); 292 293 fail1: 294 EFSYS_PROBE1(fail1, efx_rc_t, rc); 295 296 return (rc); 297 } 298 299 #endif /* EFX_OPTS_EF10() */ 300 301 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 302 303 __checkReturn efx_rc_t 304 efx_mcdi_get_mac_address_pf( 305 __in efx_nic_t *enp, 306 __out_ecount_opt(6) uint8_t mac_addrp[6]) 307 { 308 efx_mcdi_req_t req; 309 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN, 310 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); 311 efx_rc_t rc; 312 313 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 314 315 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; 316 req.emr_in_buf = payload; 317 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN; 318 req.emr_out_buf = payload; 319 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN; 320 321 efx_mcdi_execute(enp, &req); 322 323 if (req.emr_rc != 0) { 324 rc = req.emr_rc; 325 goto fail1; 326 } 327 328 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) { 329 rc = EMSGSIZE; 330 goto fail2; 331 } 332 333 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) { 334 rc = ENOENT; 335 goto fail3; 336 } 337 338 if (mac_addrp != NULL) { 339 uint8_t *addrp; 340 341 addrp = MCDI_OUT2(req, uint8_t, 342 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE); 343 344 EFX_MAC_ADDR_COPY(mac_addrp, addrp); 345 } 346 347 return (0); 348 349 fail3: 350 EFSYS_PROBE(fail3); 351 fail2: 352 EFSYS_PROBE(fail2); 353 fail1: 354 EFSYS_PROBE1(fail1, efx_rc_t, rc); 355 356 return (rc); 357 } 358 359 __checkReturn efx_rc_t 360 efx_mcdi_get_mac_address_vf( 361 __in efx_nic_t *enp, 362 __out_ecount_opt(6) uint8_t mac_addrp[6]) 363 { 364 efx_mcdi_req_t req; 365 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN, 366 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); 367 efx_rc_t rc; 368 369 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 370 371 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES; 372 req.emr_in_buf = payload; 373 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN; 374 req.emr_out_buf = payload; 375 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX; 376 377 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, 378 EVB_PORT_ID_ASSIGNED); 379 380 efx_mcdi_execute(enp, &req); 381 382 if (req.emr_rc != 0) { 383 rc = req.emr_rc; 384 goto fail1; 385 } 386 387 if (req.emr_out_length_used < 388 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) { 389 rc = EMSGSIZE; 390 goto fail2; 391 } 392 393 if (MCDI_OUT_DWORD(req, 394 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) { 395 rc = ENOENT; 396 goto fail3; 397 } 398 399 if (mac_addrp != NULL) { 400 uint8_t *addrp; 401 402 addrp = MCDI_OUT2(req, uint8_t, 403 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR); 404 405 EFX_MAC_ADDR_COPY(mac_addrp, addrp); 406 } 407 408 return (0); 409 410 fail3: 411 EFSYS_PROBE(fail3); 412 fail2: 413 EFSYS_PROBE(fail2); 414 fail1: 415 EFSYS_PROBE1(fail1, efx_rc_t, rc); 416 417 return (rc); 418 } 419 420 __checkReturn efx_rc_t 421 efx_mcdi_get_clock( 422 __in efx_nic_t *enp, 423 __out uint32_t *sys_freqp, 424 __out uint32_t *dpcpu_freqp) 425 { 426 efx_mcdi_req_t req; 427 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN, 428 MC_CMD_GET_CLOCK_OUT_LEN); 429 efx_rc_t rc; 430 431 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 432 433 req.emr_cmd = MC_CMD_GET_CLOCK; 434 req.emr_in_buf = payload; 435 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN; 436 req.emr_out_buf = payload; 437 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN; 438 439 efx_mcdi_execute(enp, &req); 440 441 if (req.emr_rc != 0) { 442 rc = req.emr_rc; 443 goto fail1; 444 } 445 446 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) { 447 rc = EMSGSIZE; 448 goto fail2; 449 } 450 451 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ); 452 if (*sys_freqp == 0) { 453 rc = EINVAL; 454 goto fail3; 455 } 456 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ); 457 if (*dpcpu_freqp == 0) { 458 rc = EINVAL; 459 goto fail4; 460 } 461 462 return (0); 463 464 fail4: 465 EFSYS_PROBE(fail4); 466 fail3: 467 EFSYS_PROBE(fail3); 468 fail2: 469 EFSYS_PROBE(fail2); 470 fail1: 471 EFSYS_PROBE1(fail1, efx_rc_t, rc); 472 473 return (rc); 474 } 475 476 __checkReturn efx_rc_t 477 efx_mcdi_get_rxdp_config( 478 __in efx_nic_t *enp, 479 __out uint32_t *end_paddingp) 480 { 481 efx_mcdi_req_t req; 482 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN, 483 MC_CMD_GET_RXDP_CONFIG_OUT_LEN); 484 uint32_t end_padding; 485 efx_rc_t rc; 486 487 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG; 488 req.emr_in_buf = payload; 489 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN; 490 req.emr_out_buf = payload; 491 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN; 492 493 efx_mcdi_execute(enp, &req); 494 if (req.emr_rc != 0) { 495 rc = req.emr_rc; 496 goto fail1; 497 } 498 499 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, 500 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) { 501 /* RX DMA end padding is disabled */ 502 end_padding = 0; 503 } else { 504 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, 505 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) { 506 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64: 507 end_padding = 64; 508 break; 509 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128: 510 end_padding = 128; 511 break; 512 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256: 513 end_padding = 256; 514 break; 515 default: 516 rc = ENOTSUP; 517 goto fail2; 518 } 519 } 520 521 *end_paddingp = end_padding; 522 523 return (0); 524 525 fail2: 526 EFSYS_PROBE(fail2); 527 fail1: 528 EFSYS_PROBE1(fail1, efx_rc_t, rc); 529 530 return (rc); 531 } 532 533 __checkReturn efx_rc_t 534 efx_mcdi_get_vector_cfg( 535 __in efx_nic_t *enp, 536 __out_opt uint32_t *vec_basep, 537 __out_opt uint32_t *pf_nvecp, 538 __out_opt uint32_t *vf_nvecp) 539 { 540 efx_mcdi_req_t req; 541 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN, 542 MC_CMD_GET_VECTOR_CFG_OUT_LEN); 543 efx_rc_t rc; 544 545 req.emr_cmd = MC_CMD_GET_VECTOR_CFG; 546 req.emr_in_buf = payload; 547 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN; 548 req.emr_out_buf = payload; 549 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN; 550 551 efx_mcdi_execute(enp, &req); 552 553 if (req.emr_rc != 0) { 554 rc = req.emr_rc; 555 goto fail1; 556 } 557 558 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) { 559 rc = EMSGSIZE; 560 goto fail2; 561 } 562 563 if (vec_basep != NULL) 564 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE); 565 if (pf_nvecp != NULL) 566 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF); 567 if (vf_nvecp != NULL) 568 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF); 569 570 return (0); 571 572 fail2: 573 EFSYS_PROBE(fail2); 574 fail1: 575 EFSYS_PROBE1(fail1, efx_rc_t, rc); 576 577 return (rc); 578 } 579 580 __checkReturn efx_rc_t 581 efx_mcdi_alloc_vis( 582 __in efx_nic_t *enp, 583 __in uint32_t min_vi_count, 584 __in uint32_t max_vi_count, 585 __out uint32_t *vi_basep, 586 __out uint32_t *vi_countp, 587 __out uint32_t *vi_shiftp) 588 { 589 efx_mcdi_req_t req; 590 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN, 591 MC_CMD_ALLOC_VIS_EXT_OUT_LEN); 592 efx_rc_t rc; 593 594 if (vi_countp == NULL) { 595 rc = EINVAL; 596 goto fail1; 597 } 598 599 req.emr_cmd = MC_CMD_ALLOC_VIS; 600 req.emr_in_buf = payload; 601 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN; 602 req.emr_out_buf = payload; 603 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN; 604 605 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count); 606 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count); 607 608 efx_mcdi_execute(enp, &req); 609 610 if (req.emr_rc != 0) { 611 rc = req.emr_rc; 612 goto fail2; 613 } 614 615 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) { 616 rc = EMSGSIZE; 617 goto fail3; 618 } 619 620 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE); 621 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT); 622 623 /* Report VI_SHIFT if available (always zero for Huntington) */ 624 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN) 625 *vi_shiftp = 0; 626 else 627 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT); 628 629 return (0); 630 631 fail3: 632 EFSYS_PROBE(fail3); 633 fail2: 634 EFSYS_PROBE(fail2); 635 fail1: 636 EFSYS_PROBE1(fail1, efx_rc_t, rc); 637 638 return (rc); 639 } 640 641 642 __checkReturn efx_rc_t 643 efx_mcdi_free_vis( 644 __in efx_nic_t *enp) 645 { 646 efx_mcdi_req_t req; 647 efx_rc_t rc; 648 649 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0); 650 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0); 651 652 req.emr_cmd = MC_CMD_FREE_VIS; 653 req.emr_in_buf = NULL; 654 req.emr_in_length = 0; 655 req.emr_out_buf = NULL; 656 req.emr_out_length = 0; 657 658 efx_mcdi_execute_quiet(enp, &req); 659 660 /* Ignore ELREADY (no allocated VIs, so nothing to free) */ 661 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) { 662 rc = req.emr_rc; 663 goto fail1; 664 } 665 666 return (0); 667 668 fail1: 669 EFSYS_PROBE1(fail1, efx_rc_t, rc); 670 671 return (rc); 672 } 673 674 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 675 676 #if EFX_OPTS_EF10() 677 678 static __checkReturn efx_rc_t 679 efx_mcdi_alloc_piobuf( 680 __in efx_nic_t *enp, 681 __out efx_piobuf_handle_t *handlep) 682 { 683 efx_mcdi_req_t req; 684 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN, 685 MC_CMD_ALLOC_PIOBUF_OUT_LEN); 686 efx_rc_t rc; 687 688 if (handlep == NULL) { 689 rc = EINVAL; 690 goto fail1; 691 } 692 693 req.emr_cmd = MC_CMD_ALLOC_PIOBUF; 694 req.emr_in_buf = payload; 695 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN; 696 req.emr_out_buf = payload; 697 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN; 698 699 efx_mcdi_execute_quiet(enp, &req); 700 701 if (req.emr_rc != 0) { 702 rc = req.emr_rc; 703 goto fail2; 704 } 705 706 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { 707 rc = EMSGSIZE; 708 goto fail3; 709 } 710 711 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); 712 713 return (0); 714 715 fail3: 716 EFSYS_PROBE(fail3); 717 fail2: 718 EFSYS_PROBE(fail2); 719 fail1: 720 EFSYS_PROBE1(fail1, efx_rc_t, rc); 721 722 return (rc); 723 } 724 725 static __checkReturn efx_rc_t 726 efx_mcdi_free_piobuf( 727 __in efx_nic_t *enp, 728 __in efx_piobuf_handle_t handle) 729 { 730 efx_mcdi_req_t req; 731 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN, 732 MC_CMD_FREE_PIOBUF_OUT_LEN); 733 efx_rc_t rc; 734 735 req.emr_cmd = MC_CMD_FREE_PIOBUF; 736 req.emr_in_buf = payload; 737 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN; 738 req.emr_out_buf = payload; 739 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN; 740 741 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle); 742 743 efx_mcdi_execute_quiet(enp, &req); 744 745 if (req.emr_rc != 0) { 746 rc = req.emr_rc; 747 goto fail1; 748 } 749 750 return (0); 751 752 fail1: 753 EFSYS_PROBE1(fail1, efx_rc_t, rc); 754 755 return (rc); 756 } 757 758 static __checkReturn efx_rc_t 759 efx_mcdi_link_piobuf( 760 __in efx_nic_t *enp, 761 __in uint32_t vi_index, 762 __in efx_piobuf_handle_t handle) 763 { 764 efx_mcdi_req_t req; 765 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN, 766 MC_CMD_LINK_PIOBUF_OUT_LEN); 767 efx_rc_t rc; 768 769 req.emr_cmd = MC_CMD_LINK_PIOBUF; 770 req.emr_in_buf = payload; 771 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN; 772 req.emr_out_buf = payload; 773 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN; 774 775 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle); 776 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); 777 778 efx_mcdi_execute(enp, &req); 779 780 if (req.emr_rc != 0) { 781 rc = req.emr_rc; 782 goto fail1; 783 } 784 785 return (0); 786 787 fail1: 788 EFSYS_PROBE1(fail1, efx_rc_t, rc); 789 790 return (rc); 791 } 792 793 static __checkReturn efx_rc_t 794 efx_mcdi_unlink_piobuf( 795 __in efx_nic_t *enp, 796 __in uint32_t vi_index) 797 { 798 efx_mcdi_req_t req; 799 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN, 800 MC_CMD_UNLINK_PIOBUF_OUT_LEN); 801 efx_rc_t rc; 802 803 req.emr_cmd = MC_CMD_UNLINK_PIOBUF; 804 req.emr_in_buf = payload; 805 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN; 806 req.emr_out_buf = payload; 807 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN; 808 809 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); 810 811 efx_mcdi_execute_quiet(enp, &req); 812 813 if (req.emr_rc != 0) { 814 rc = req.emr_rc; 815 goto fail1; 816 } 817 818 return (0); 819 820 fail1: 821 EFSYS_PROBE1(fail1, efx_rc_t, rc); 822 823 return (rc); 824 } 825 826 static void 827 ef10_nic_alloc_piobufs( 828 __in efx_nic_t *enp, 829 __in uint32_t max_piobuf_count) 830 { 831 efx_piobuf_handle_t *handlep; 832 unsigned int i; 833 834 EFSYS_ASSERT3U(max_piobuf_count, <=, 835 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle)); 836 837 enp->en_arch.ef10.ena_piobuf_count = 0; 838 839 for (i = 0; i < max_piobuf_count; i++) { 840 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; 841 842 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0) 843 goto fail1; 844 845 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0; 846 enp->en_arch.ef10.ena_piobuf_count++; 847 } 848 849 return; 850 851 fail1: 852 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { 853 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; 854 855 (void) efx_mcdi_free_piobuf(enp, *handlep); 856 *handlep = EFX_PIOBUF_HANDLE_INVALID; 857 } 858 enp->en_arch.ef10.ena_piobuf_count = 0; 859 } 860 861 862 static void 863 ef10_nic_free_piobufs( 864 __in efx_nic_t *enp) 865 { 866 efx_piobuf_handle_t *handlep; 867 unsigned int i; 868 869 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { 870 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; 871 872 (void) efx_mcdi_free_piobuf(enp, *handlep); 873 *handlep = EFX_PIOBUF_HANDLE_INVALID; 874 } 875 enp->en_arch.ef10.ena_piobuf_count = 0; 876 } 877 878 /* Sub-allocate a block from a piobuf */ 879 __checkReturn efx_rc_t 880 ef10_nic_pio_alloc( 881 __inout efx_nic_t *enp, 882 __out uint32_t *bufnump, 883 __out efx_piobuf_handle_t *handlep, 884 __out uint32_t *blknump, 885 __out uint32_t *offsetp, 886 __out size_t *sizep) 887 { 888 efx_nic_cfg_t *encp = &enp->en_nic_cfg; 889 efx_drv_cfg_t *edcp = &enp->en_drv_cfg; 890 uint32_t blk_per_buf; 891 uint32_t buf, blk; 892 efx_rc_t rc; 893 894 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 895 EFSYS_ASSERT(bufnump); 896 EFSYS_ASSERT(handlep); 897 EFSYS_ASSERT(blknump); 898 EFSYS_ASSERT(offsetp); 899 EFSYS_ASSERT(sizep); 900 901 if ((edcp->edc_pio_alloc_size == 0) || 902 (enp->en_arch.ef10.ena_piobuf_count == 0)) { 903 rc = ENOMEM; 904 goto fail1; 905 } 906 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size; 907 908 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) { 909 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf]; 910 911 if (~(*map) == 0) 912 continue; 913 914 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map))); 915 for (blk = 0; blk < blk_per_buf; blk++) { 916 if ((*map & (1u << blk)) == 0) { 917 *map |= (1u << blk); 918 goto done; 919 } 920 } 921 } 922 rc = ENOMEM; 923 goto fail2; 924 925 done: 926 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf]; 927 *bufnump = buf; 928 *blknump = blk; 929 *sizep = edcp->edc_pio_alloc_size; 930 *offsetp = blk * (*sizep); 931 932 return (0); 933 934 fail2: 935 EFSYS_PROBE(fail2); 936 fail1: 937 EFSYS_PROBE1(fail1, efx_rc_t, rc); 938 939 return (rc); 940 } 941 942 /* Free a piobuf sub-allocated block */ 943 __checkReturn efx_rc_t 944 ef10_nic_pio_free( 945 __inout efx_nic_t *enp, 946 __in uint32_t bufnum, 947 __in uint32_t blknum) 948 { 949 uint32_t *map; 950 efx_rc_t rc; 951 952 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) || 953 (blknum >= (8 * sizeof (*map)))) { 954 rc = EINVAL; 955 goto fail1; 956 } 957 958 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum]; 959 if ((*map & (1u << blknum)) == 0) { 960 rc = ENOENT; 961 goto fail2; 962 } 963 *map &= ~(1u << blknum); 964 965 return (0); 966 967 fail2: 968 EFSYS_PROBE(fail2); 969 fail1: 970 EFSYS_PROBE1(fail1, efx_rc_t, rc); 971 972 return (rc); 973 } 974 975 __checkReturn efx_rc_t 976 ef10_nic_pio_link( 977 __inout efx_nic_t *enp, 978 __in uint32_t vi_index, 979 __in efx_piobuf_handle_t handle) 980 { 981 return (efx_mcdi_link_piobuf(enp, vi_index, handle)); 982 } 983 984 __checkReturn efx_rc_t 985 ef10_nic_pio_unlink( 986 __inout efx_nic_t *enp, 987 __in uint32_t vi_index) 988 { 989 return (efx_mcdi_unlink_piobuf(enp, vi_index)); 990 } 991 992 #endif /* EFX_OPTS_EF10() */ 993 994 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 995 996 static __checkReturn efx_rc_t 997 ef10_mcdi_get_pf_count( 998 __in efx_nic_t *enp, 999 __out uint32_t *pf_countp) 1000 { 1001 efx_mcdi_req_t req; 1002 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN, 1003 MC_CMD_GET_PF_COUNT_OUT_LEN); 1004 efx_rc_t rc; 1005 1006 req.emr_cmd = MC_CMD_GET_PF_COUNT; 1007 req.emr_in_buf = payload; 1008 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN; 1009 req.emr_out_buf = payload; 1010 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN; 1011 1012 efx_mcdi_execute(enp, &req); 1013 1014 if (req.emr_rc != 0) { 1015 rc = req.emr_rc; 1016 goto fail1; 1017 } 1018 1019 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) { 1020 rc = EMSGSIZE; 1021 goto fail2; 1022 } 1023 1024 *pf_countp = *MCDI_OUT(req, uint8_t, 1025 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST); 1026 1027 EFSYS_ASSERT(*pf_countp != 0); 1028 1029 return (0); 1030 1031 fail2: 1032 EFSYS_PROBE(fail2); 1033 fail1: 1034 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1035 1036 return (rc); 1037 } 1038 1039 static __checkReturn efx_rc_t 1040 ef10_get_datapath_caps( 1041 __in efx_nic_t *enp) 1042 { 1043 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1044 efx_mcdi_req_t req; 1045 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN, 1046 MC_CMD_GET_CAPABILITIES_V7_OUT_LEN); 1047 efx_rc_t rc; 1048 1049 req.emr_cmd = MC_CMD_GET_CAPABILITIES; 1050 req.emr_in_buf = payload; 1051 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; 1052 req.emr_out_buf = payload; 1053 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V7_OUT_LEN; 1054 1055 efx_mcdi_execute_quiet(enp, &req); 1056 1057 if (req.emr_rc != 0) { 1058 rc = req.emr_rc; 1059 goto fail1; 1060 } 1061 1062 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 1063 rc = EMSGSIZE; 1064 goto fail2; 1065 } 1066 1067 #define CAP_FLAGS1(_req, _flag) \ 1068 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \ 1069 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))) 1070 1071 #define CAP_FLAGS2(_req, _flag) \ 1072 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \ 1073 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \ 1074 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))) 1075 1076 #define CAP_FLAGS3(_req, _flag) \ 1077 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V7_OUT_LEN) && \ 1078 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V7_OUT_FLAGS3) & \ 1079 (1u << (MC_CMD_GET_CAPABILITIES_V7_OUT_ ## _flag ## _LBN)))) 1080 1081 /* Check if RXDP firmware inserts 14 byte prefix */ 1082 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14)) 1083 encp->enc_rx_prefix_size = 14; 1084 else 1085 encp->enc_rx_prefix_size = 0; 1086 1087 #if EFSYS_OPT_RX_SCALE 1088 /* Check if the firmware supports additional RSS modes */ 1089 if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES)) 1090 encp->enc_rx_scale_additional_modes_supported = B_TRUE; 1091 else 1092 encp->enc_rx_scale_additional_modes_supported = B_FALSE; 1093 #endif /* EFSYS_OPT_RX_SCALE */ 1094 1095 /* Check if the firmware supports TSO */ 1096 if (CAP_FLAGS1(req, TX_TSO)) 1097 encp->enc_fw_assisted_tso_enabled = B_TRUE; 1098 else 1099 encp->enc_fw_assisted_tso_enabled = B_FALSE; 1100 1101 /* Check if the firmware supports FATSOv2 */ 1102 if (CAP_FLAGS2(req, TX_TSO_V2)) { 1103 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE; 1104 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req, 1105 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS); 1106 } else { 1107 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE; 1108 encp->enc_fw_assisted_tso_v2_n_contexts = 0; 1109 } 1110 1111 /* Check if the firmware supports FATSOv2 encap */ 1112 if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP)) 1113 encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE; 1114 else 1115 encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE; 1116 1117 /* Check if TSOv3 is supported */ 1118 if (CAP_FLAGS2(req, TX_TSO_V3)) 1119 encp->enc_tso_v3_enabled = B_TRUE; 1120 else 1121 encp->enc_tso_v3_enabled = B_FALSE; 1122 1123 /* Check if the firmware has vadapter/vport/vswitch support */ 1124 if (CAP_FLAGS1(req, EVB)) 1125 encp->enc_datapath_cap_evb = B_TRUE; 1126 else 1127 encp->enc_datapath_cap_evb = B_FALSE; 1128 1129 /* Check if the firmware supports vport reconfiguration */ 1130 if (CAP_FLAGS1(req, VPORT_RECONFIGURE)) 1131 encp->enc_vport_reconfigure_supported = B_TRUE; 1132 else 1133 encp->enc_vport_reconfigure_supported = B_FALSE; 1134 1135 /* Check if the firmware supports VLAN insertion */ 1136 if (CAP_FLAGS1(req, TX_VLAN_INSERTION)) 1137 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE; 1138 else 1139 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; 1140 1141 /* Check if the firmware supports RX event batching */ 1142 if (CAP_FLAGS1(req, RX_BATCHING)) 1143 encp->enc_rx_batching_enabled = B_TRUE; 1144 else 1145 encp->enc_rx_batching_enabled = B_FALSE; 1146 1147 /* 1148 * Even if batching isn't reported as supported, we may still get 1149 * batched events (see bug61153). 1150 */ 1151 encp->enc_rx_batch_max = 16; 1152 1153 /* Check if the firmware supports disabling scatter on RXQs */ 1154 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER)) 1155 encp->enc_rx_disable_scatter_supported = B_TRUE; 1156 else 1157 encp->enc_rx_disable_scatter_supported = B_FALSE; 1158 1159 /* Check if the firmware supports packed stream mode */ 1160 if (CAP_FLAGS1(req, RX_PACKED_STREAM)) 1161 encp->enc_rx_packed_stream_supported = B_TRUE; 1162 else 1163 encp->enc_rx_packed_stream_supported = B_FALSE; 1164 1165 /* 1166 * Check if the firmware supports configurable buffer sizes 1167 * for packed stream mode (otherwise buffer size is 1Mbyte) 1168 */ 1169 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS)) 1170 encp->enc_rx_var_packed_stream_supported = B_TRUE; 1171 else 1172 encp->enc_rx_var_packed_stream_supported = B_FALSE; 1173 1174 /* Check if the firmware supports equal stride super-buffer mode */ 1175 if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER)) 1176 encp->enc_rx_es_super_buffer_supported = B_TRUE; 1177 else 1178 encp->enc_rx_es_super_buffer_supported = B_FALSE; 1179 1180 /* Check if the firmware supports FW subvariant w/o Tx checksumming */ 1181 if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM)) 1182 encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE; 1183 else 1184 encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE; 1185 1186 /* Check if the firmware supports set mac with running filters */ 1187 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED)) 1188 encp->enc_allow_set_mac_with_installed_filters = B_TRUE; 1189 else 1190 encp->enc_allow_set_mac_with_installed_filters = B_FALSE; 1191 1192 /* 1193 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows 1194 * specifying which parameters to configure. 1195 */ 1196 if (CAP_FLAGS1(req, SET_MAC_ENHANCED)) 1197 encp->enc_enhanced_set_mac_supported = B_TRUE; 1198 else 1199 encp->enc_enhanced_set_mac_supported = B_FALSE; 1200 1201 /* 1202 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows 1203 * us to let the firmware choose the settings to use on an EVQ. 1204 */ 1205 if (CAP_FLAGS2(req, INIT_EVQ_V2)) 1206 encp->enc_init_evq_v2_supported = B_TRUE; 1207 else 1208 encp->enc_init_evq_v2_supported = B_FALSE; 1209 1210 /* 1211 * Check if firmware supports extended width event queues, which have 1212 * a different event descriptor layout. 1213 */ 1214 if (CAP_FLAGS3(req, EXTENDED_WIDTH_EVQS_SUPPORTED)) 1215 encp->enc_init_evq_extended_width_supported = B_TRUE; 1216 else 1217 encp->enc_init_evq_extended_width_supported = B_FALSE; 1218 1219 /* 1220 * Check if the NO_CONT_EV mode for RX events is supported. 1221 */ 1222 if (CAP_FLAGS2(req, INIT_RXQ_NO_CONT_EV)) 1223 encp->enc_no_cont_ev_mode_supported = B_TRUE; 1224 else 1225 encp->enc_no_cont_ev_mode_supported = B_FALSE; 1226 1227 /* 1228 * Check if buffer size may and must be specified on INIT_RXQ. 1229 * It may be always specified to efx_rx_qcreate(), but will be 1230 * just kept libefx internal if MCDI does not support it. 1231 */ 1232 if (CAP_FLAGS2(req, INIT_RXQ_WITH_BUFFER_SIZE)) 1233 encp->enc_init_rxq_with_buffer_size = B_TRUE; 1234 else 1235 encp->enc_init_rxq_with_buffer_size = B_FALSE; 1236 1237 /* 1238 * Check if firmware-verified NVRAM updates must be used. 1239 * 1240 * The firmware trusted installer requires all NVRAM updates to use 1241 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update) 1242 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated 1243 * partition and report the result). 1244 */ 1245 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT)) 1246 encp->enc_nvram_update_verify_result_supported = B_TRUE; 1247 else 1248 encp->enc_nvram_update_verify_result_supported = B_FALSE; 1249 1250 if (CAP_FLAGS2(req, NVRAM_UPDATE_POLL_VERIFY_RESULT)) 1251 encp->enc_nvram_update_poll_verify_result_supported = B_TRUE; 1252 else 1253 encp->enc_nvram_update_poll_verify_result_supported = B_FALSE; 1254 1255 /* 1256 * Check if firmware update via the BUNDLE partition is supported 1257 */ 1258 if (CAP_FLAGS2(req, BUNDLE_UPDATE)) 1259 encp->enc_nvram_bundle_update_supported = B_TRUE; 1260 else 1261 encp->enc_nvram_bundle_update_supported = B_FALSE; 1262 1263 /* 1264 * Check if firmware provides packet memory and Rx datapath 1265 * counters. 1266 */ 1267 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS)) 1268 encp->enc_pm_and_rxdp_counters = B_TRUE; 1269 else 1270 encp->enc_pm_and_rxdp_counters = B_FALSE; 1271 1272 /* 1273 * Check if the 40G MAC hardware is capable of reporting 1274 * statistics for Tx size bins. 1275 */ 1276 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS)) 1277 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE; 1278 else 1279 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE; 1280 1281 /* 1282 * Check if firmware supports VXLAN and NVGRE tunnels. 1283 * The capability indicates Geneve protocol support as well. 1284 */ 1285 if (CAP_FLAGS1(req, VXLAN_NVGRE)) { 1286 encp->enc_tunnel_encapsulations_supported = 1287 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) | 1288 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) | 1289 (1u << EFX_TUNNEL_PROTOCOL_NVGRE); 1290 1291 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES == 1292 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); 1293 encp->enc_tunnel_config_udp_entries_max = 1294 EFX_TUNNEL_MAXNENTRIES; 1295 } else { 1296 encp->enc_tunnel_config_udp_entries_max = 0; 1297 } 1298 1299 /* 1300 * Check if firmware reports the VI window mode. 1301 * Medford2 has a variable VI window size (8K, 16K or 64K). 1302 * Medford and Huntington have a fixed 8K VI window size. 1303 */ 1304 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { 1305 uint8_t mode = 1306 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); 1307 1308 switch (mode) { 1309 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: 1310 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; 1311 break; 1312 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: 1313 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K; 1314 break; 1315 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: 1316 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K; 1317 break; 1318 default: 1319 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; 1320 break; 1321 } 1322 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) || 1323 (enp->en_family == EFX_FAMILY_MEDFORD)) { 1324 /* Huntington and Medford have fixed 8K window size */ 1325 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; 1326 } else { 1327 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; 1328 } 1329 1330 /* Check if firmware supports extended MAC stats. */ 1331 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { 1332 /* Extended stats buffer supported */ 1333 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req, 1334 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); 1335 } else { 1336 /* Use Siena-compatible legacy MAC stats */ 1337 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS; 1338 } 1339 1340 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2) 1341 encp->enc_fec_counters = B_TRUE; 1342 else 1343 encp->enc_fec_counters = B_FALSE; 1344 1345 /* Check if the firmware provides head-of-line blocking counters */ 1346 if (CAP_FLAGS2(req, RXDP_HLB_IDLE)) 1347 encp->enc_hlb_counters = B_TRUE; 1348 else 1349 encp->enc_hlb_counters = B_FALSE; 1350 1351 #if EFSYS_OPT_RX_SCALE 1352 if (CAP_FLAGS1(req, RX_RSS_LIMITED)) { 1353 /* Only one exclusive RSS context is available per port. */ 1354 encp->enc_rx_scale_max_exclusive_contexts = 1; 1355 1356 switch (enp->en_family) { 1357 case EFX_FAMILY_MEDFORD2: 1358 encp->enc_rx_scale_hash_alg_mask = 1359 (1U << EFX_RX_HASHALG_TOEPLITZ); 1360 break; 1361 1362 case EFX_FAMILY_MEDFORD: 1363 case EFX_FAMILY_HUNTINGTON: 1364 /* 1365 * Packed stream firmware variant maintains a 1366 * non-standard algorithm for hash computation. 1367 * It implies explicit XORing together 1368 * source + destination IP addresses (or last 1369 * four bytes in the case of IPv6) and using the 1370 * resulting value as the input to a Toeplitz hash. 1371 */ 1372 encp->enc_rx_scale_hash_alg_mask = 1373 (1U << EFX_RX_HASHALG_PACKED_STREAM); 1374 break; 1375 1376 default: 1377 rc = EINVAL; 1378 goto fail3; 1379 } 1380 1381 /* Port numbers cannot contribute to the hash value */ 1382 encp->enc_rx_scale_l4_hash_supported = B_FALSE; 1383 } else { 1384 /* 1385 * Maximum number of exclusive RSS contexts. 1386 * EF10 hardware supports 64 in total, but 6 are reserved 1387 * for shared contexts. They are a global resource so 1388 * not all may be available. 1389 */ 1390 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6; 1391 1392 encp->enc_rx_scale_hash_alg_mask = 1393 (1U << EFX_RX_HASHALG_TOEPLITZ); 1394 1395 /* 1396 * It is possible to use port numbers as 1397 * the input data for hash computation. 1398 */ 1399 encp->enc_rx_scale_l4_hash_supported = B_TRUE; 1400 } 1401 #endif /* EFSYS_OPT_RX_SCALE */ 1402 1403 /* Check if the firmware supports "FLAG" and "MARK" filter actions */ 1404 if (CAP_FLAGS2(req, FILTER_ACTION_FLAG)) 1405 encp->enc_filter_action_flag_supported = B_TRUE; 1406 else 1407 encp->enc_filter_action_flag_supported = B_FALSE; 1408 1409 if (CAP_FLAGS2(req, FILTER_ACTION_MARK)) 1410 encp->enc_filter_action_mark_supported = B_TRUE; 1411 else 1412 encp->enc_filter_action_mark_supported = B_FALSE; 1413 1414 /* Get maximum supported value for "MARK" filter action */ 1415 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN) 1416 encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req, 1417 GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX); 1418 else 1419 encp->enc_filter_action_mark_max = 0; 1420 1421 #undef CAP_FLAGS1 1422 #undef CAP_FLAGS2 1423 1424 return (0); 1425 1426 #if EFSYS_OPT_RX_SCALE 1427 fail3: 1428 EFSYS_PROBE(fail3); 1429 #endif /* EFSYS_OPT_RX_SCALE */ 1430 fail2: 1431 EFSYS_PROBE(fail2); 1432 fail1: 1433 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1434 1435 return (rc); 1436 } 1437 1438 1439 #define EF10_LEGACY_PF_PRIVILEGE_MASK \ 1440 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \ 1441 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \ 1442 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \ 1443 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \ 1444 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \ 1445 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \ 1446 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \ 1447 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \ 1448 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \ 1449 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \ 1450 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS) 1451 1452 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0 1453 1454 1455 __checkReturn efx_rc_t 1456 ef10_get_privilege_mask( 1457 __in efx_nic_t *enp, 1458 __out uint32_t *maskp) 1459 { 1460 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1461 uint32_t mask; 1462 efx_rc_t rc; 1463 1464 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf, 1465 &mask)) != 0) { 1466 if (rc != ENOTSUP) 1467 goto fail1; 1468 1469 /* Fallback for old firmware without privilege mask support */ 1470 if (EFX_PCI_FUNCTION_IS_PF(encp)) { 1471 /* Assume PF has admin privilege */ 1472 mask = EF10_LEGACY_PF_PRIVILEGE_MASK; 1473 } else { 1474 /* VF is always unprivileged by default */ 1475 mask = EF10_LEGACY_VF_PRIVILEGE_MASK; 1476 } 1477 } 1478 1479 *maskp = mask; 1480 1481 return (0); 1482 1483 fail1: 1484 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1485 1486 return (rc); 1487 } 1488 1489 1490 #define EFX_EXT_PORT_MAX 4 1491 #define EFX_EXT_PORT_NA 0xFF 1492 1493 /* 1494 * Table of mapping schemes from port number to external number. 1495 * 1496 * Each port number ultimately corresponds to a connector: either as part of 1497 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on 1498 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T 1499 * "Salina"). In general: 1500 * 1501 * Port number (0-based) 1502 * | 1503 * port mapping (n:1) 1504 * | 1505 * v 1506 * External port number (1-based) 1507 * | 1508 * fixed (1:1) or cable assembly (1:m) 1509 * | 1510 * v 1511 * Connector 1512 * 1513 * The external numbering refers to the cages or magjacks on the board, 1514 * as visibly annotated on the board or back panel. This table describes 1515 * how to determine which external cage/magjack corresponds to the port 1516 * numbers used by the driver. 1517 * 1518 * The count of consecutive port numbers that map to each external number, 1519 * is determined by the chip family and the current port mode. 1520 * 1521 * For the Huntington family, the current port mode cannot be discovered, 1522 * but a single mapping is used by all modes for a given chip variant, 1523 * so the mapping used is instead the last match in the table to the full 1524 * set of port modes to which the NIC can be configured. Therefore the 1525 * ordering of entries in the mapping table is significant. 1526 */ 1527 static struct ef10_external_port_map_s { 1528 efx_family_t family; 1529 uint32_t modes_mask; 1530 uint8_t base_port[EFX_EXT_PORT_MAX]; 1531 } __ef10_external_port_mappings[] = { 1532 /* 1533 * Modes used by Huntington family controllers where each port 1534 * number maps to a separate cage. 1535 * SFN7x22F (Torino): 1536 * port 0 -> cage 1 1537 * port 1 -> cage 2 1538 * SFN7xx4F (Pavia): 1539 * port 0 -> cage 1 1540 * port 1 -> cage 2 1541 * port 2 -> cage 3 1542 * port 3 -> cage 4 1543 */ 1544 { 1545 EFX_FAMILY_HUNTINGTON, 1546 (1U << TLV_PORT_MODE_10G) | /* mode 0 */ 1547 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */ 1548 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */ 1549 { 0, 1, 2, 3 } 1550 }, 1551 /* 1552 * Modes which for Huntington identify a chip variant where 2 1553 * adjacent port numbers map to each cage. 1554 * SFN7x42Q (Monza): 1555 * port 0 -> cage 1 1556 * port 1 -> cage 1 1557 * port 2 -> cage 2 1558 * port 3 -> cage 2 1559 */ 1560 { 1561 EFX_FAMILY_HUNTINGTON, 1562 (1U << TLV_PORT_MODE_40G) | /* mode 1 */ 1563 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */ 1564 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */ 1565 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */ 1566 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1567 }, 1568 /* 1569 * Modes that on Medford allocate each port number to a separate 1570 * cage. 1571 * port 0 -> cage 1 1572 * port 1 -> cage 2 1573 * port 2 -> cage 3 1574 * port 3 -> cage 4 1575 */ 1576 { 1577 EFX_FAMILY_MEDFORD, 1578 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ 1579 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ 1580 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */ 1581 { 0, 1, 2, 3 } 1582 }, 1583 /* 1584 * Modes that on Medford allocate 2 adjacent port numbers to each 1585 * cage. 1586 * port 0 -> cage 1 1587 * port 1 -> cage 1 1588 * port 2 -> cage 2 1589 * port 3 -> cage 2 1590 */ 1591 { 1592 EFX_FAMILY_MEDFORD, 1593 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */ 1594 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 5 */ 1595 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */ 1596 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */ 1597 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */ 1598 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */ 1599 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1600 }, 1601 /* 1602 * Modes that on Medford allocate 4 adjacent port numbers to 1603 * cage 1. 1604 * port 0 -> cage 1 1605 * port 1 -> cage 1 1606 * port 2 -> cage 1 1607 * port 3 -> cage 1 1608 */ 1609 { 1610 EFX_FAMILY_MEDFORD, 1611 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */ 1612 (1U << TLV_PORT_MODE_4x1_NA), /* mode 4 */ 1613 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1614 }, 1615 /* 1616 * Modes that on Medford allocate 4 adjacent port numbers to 1617 * cage 2. 1618 * port 0 -> cage 2 1619 * port 1 -> cage 2 1620 * port 2 -> cage 2 1621 * port 3 -> cage 2 1622 */ 1623 { 1624 EFX_FAMILY_MEDFORD, 1625 (1U << TLV_PORT_MODE_NA_4x1), /* mode 8 */ 1626 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1627 }, 1628 /* 1629 * Modes that on Medford2 allocate each port number to a separate 1630 * cage. 1631 * port 0 -> cage 1 1632 * port 1 -> cage 2 1633 * port 2 -> cage 3 1634 * port 3 -> cage 4 1635 */ 1636 { 1637 EFX_FAMILY_MEDFORD2, 1638 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ 1639 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ 1640 (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */ 1641 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */ 1642 (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */ 1643 (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */ 1644 (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */ 1645 (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */ 1646 { 0, 1, 2, 3 } 1647 }, 1648 /* 1649 * Modes that on Medford2 allocate 1 port to cage 1 and the rest 1650 * to cage 2. 1651 * port 0 -> cage 1 1652 * port 1 -> cage 2 1653 * port 2 -> cage 2 1654 */ 1655 { 1656 EFX_FAMILY_MEDFORD2, 1657 (1U << TLV_PORT_MODE_1x2_2x1) | /* mode 17 */ 1658 (1U << TLV_PORT_MODE_1x4_2x1), /* mode 6 */ 1659 { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1660 }, 1661 /* 1662 * Modes that on Medford2 allocate 2 adjacent port numbers to cage 1 1663 * and the rest to cage 2. 1664 * port 0 -> cage 1 1665 * port 1 -> cage 1 1666 * port 2 -> cage 2 1667 * port 3 -> cage 2 1668 */ 1669 { 1670 EFX_FAMILY_MEDFORD2, 1671 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */ 1672 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */ 1673 (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */ 1674 (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */ 1675 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1676 }, 1677 /* 1678 * Modes that on Medford2 allocate up to 4 adjacent port numbers 1679 * to cage 1. 1680 * port 0 -> cage 1 1681 * port 1 -> cage 1 1682 * port 2 -> cage 1 1683 * port 3 -> cage 1 1684 */ 1685 { 1686 EFX_FAMILY_MEDFORD2, 1687 (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */ 1688 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1689 }, 1690 /* 1691 * Modes that on Medford2 allocate up to 4 adjacent port numbers 1692 * to cage 2. 1693 * port 0 -> cage 2 1694 * port 1 -> cage 2 1695 * port 2 -> cage 2 1696 * port 3 -> cage 2 1697 */ 1698 { 1699 EFX_FAMILY_MEDFORD2, 1700 (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */ 1701 (1U << TLV_PORT_MODE_NA_1x2) | /* mode 11 */ 1702 (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */ 1703 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1704 }, 1705 /* 1706 * Modes that on Riverhead allocate each port number to a separate 1707 * cage. 1708 * port 0 -> cage 1 1709 * port 1 -> cage 2 1710 */ 1711 { 1712 EFX_FAMILY_RIVERHEAD, 1713 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ 1714 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ 1715 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */ 1716 { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1717 }, 1718 }; 1719 1720 static __checkReturn efx_rc_t 1721 ef10_external_port_mapping( 1722 __in efx_nic_t *enp, 1723 __in uint32_t port, 1724 __out uint8_t *external_portp) 1725 { 1726 efx_rc_t rc; 1727 int i; 1728 uint32_t port_modes; 1729 uint32_t matches; 1730 uint32_t current; 1731 struct ef10_external_port_map_s *mapp = NULL; 1732 int ext_index = port; /* Default 1-1 mapping */ 1733 1734 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t, 1735 NULL)) != 0) { 1736 /* 1737 * No current port mode information (i.e. Huntington) 1738 * - infer mapping from available modes 1739 */ 1740 if ((rc = efx_mcdi_get_port_modes(enp, 1741 &port_modes, NULL, NULL)) != 0) { 1742 /* 1743 * No port mode information available 1744 * - use default mapping 1745 */ 1746 goto out; 1747 } 1748 } else { 1749 /* Only need to scan the current mode */ 1750 port_modes = 1 << current; 1751 } 1752 1753 /* 1754 * Infer the internal port -> external number mapping from 1755 * the possible port modes for this NIC. 1756 */ 1757 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) { 1758 struct ef10_external_port_map_s *eepmp = 1759 &__ef10_external_port_mappings[i]; 1760 if (eepmp->family != enp->en_family) 1761 continue; 1762 matches = (eepmp->modes_mask & port_modes); 1763 if (matches != 0) { 1764 /* 1765 * Some modes match. For some Huntington boards 1766 * there will be multiple matches. The mapping on the 1767 * last match is used. 1768 */ 1769 mapp = eepmp; 1770 port_modes &= ~matches; 1771 } 1772 } 1773 1774 if (port_modes != 0) { 1775 /* Some advertised modes are not supported */ 1776 rc = ENOTSUP; 1777 goto fail1; 1778 } 1779 1780 out: 1781 if (mapp != NULL) { 1782 /* 1783 * External ports are assigned a sequence of consecutive 1784 * port numbers, so find the one with the closest base_port. 1785 */ 1786 uint32_t delta = EFX_EXT_PORT_NA; 1787 1788 for (i = 0; i < EFX_EXT_PORT_MAX; i++) { 1789 uint32_t base = mapp->base_port[i]; 1790 if ((base != EFX_EXT_PORT_NA) && (base <= port)) { 1791 if ((port - base) < delta) { 1792 delta = (port - base); 1793 ext_index = i; 1794 } 1795 } 1796 } 1797 } 1798 *external_portp = (uint8_t)(ext_index + 1); 1799 1800 return (0); 1801 1802 fail1: 1803 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1804 1805 return (rc); 1806 } 1807 1808 __checkReturn efx_rc_t 1809 efx_mcdi_nic_board_cfg( 1810 __in efx_nic_t *enp) 1811 { 1812 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 1813 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1814 ef10_link_state_t els; 1815 efx_port_t *epp = &(enp->en_port); 1816 uint32_t board_type = 0; 1817 uint32_t base, nvec; 1818 uint32_t port; 1819 uint32_t mask; 1820 uint32_t pf; 1821 uint32_t vf; 1822 uint8_t mac_addr[6] = { 0 }; 1823 efx_rc_t rc; 1824 1825 /* Get the (zero-based) MCDI port number */ 1826 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) 1827 goto fail1; 1828 1829 /* EFX MCDI interface uses one-based port numbers */ 1830 emip->emi_port = port + 1; 1831 1832 encp->enc_assigned_port = port; 1833 1834 if ((rc = ef10_external_port_mapping(enp, port, 1835 &encp->enc_external_port)) != 0) 1836 goto fail2; 1837 1838 /* 1839 * Get PCIe function number from firmware (used for 1840 * per-function privilege and dynamic config info). 1841 * - PCIe PF: pf = PF number, vf = 0xffff. 1842 * - PCIe VF: pf = parent PF, vf = VF number. 1843 */ 1844 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) 1845 goto fail3; 1846 1847 encp->enc_pf = pf; 1848 encp->enc_vf = vf; 1849 1850 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0) 1851 goto fail4; 1852 1853 /* MAC address for this function */ 1854 if (EFX_PCI_FUNCTION_IS_PF(encp)) { 1855 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); 1856 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 1857 /* 1858 * Disable static config checking, ONLY for manufacturing test 1859 * and setup at the factory, to allow the static config to be 1860 * installed. 1861 */ 1862 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ 1863 if ((rc == 0) && (mac_addr[0] & 0x02)) { 1864 /* 1865 * If the static config does not include a global MAC 1866 * address pool then the board may return a locally 1867 * administered MAC address (this should only happen on 1868 * incorrectly programmed boards). 1869 */ 1870 rc = EINVAL; 1871 } 1872 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ 1873 } else { 1874 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); 1875 } 1876 if (rc != 0) 1877 goto fail5; 1878 1879 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); 1880 1881 /* Board configuration (legacy) */ 1882 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); 1883 if (rc != 0) { 1884 /* Unprivileged functions may not be able to read board cfg */ 1885 if (rc == EACCES) 1886 board_type = 0; 1887 else 1888 goto fail6; 1889 } 1890 1891 encp->enc_board_type = board_type; 1892 1893 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ 1894 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) 1895 goto fail7; 1896 1897 /* 1898 * Firmware with support for *_FEC capability bits does not 1899 * report that the corresponding *_FEC_REQUESTED bits are supported. 1900 * Add them here so that drivers understand that they are supported. 1901 */ 1902 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC)) 1903 epp->ep_phy_cap_mask |= 1904 (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED); 1905 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC)) 1906 epp->ep_phy_cap_mask |= 1907 (1u << EFX_PHY_CAP_RS_FEC_REQUESTED); 1908 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC)) 1909 epp->ep_phy_cap_mask |= 1910 (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED); 1911 1912 /* Obtain the default PHY advertised capabilities */ 1913 if ((rc = ef10_phy_get_link(enp, &els)) != 0) 1914 goto fail8; 1915 epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask; 1916 epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask; 1917 1918 /* Check capabilities of running datapath firmware */ 1919 if ((rc = ef10_get_datapath_caps(enp)) != 0) 1920 goto fail9; 1921 1922 /* Get interrupt vector limits */ 1923 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { 1924 if (EFX_PCI_FUNCTION_IS_PF(encp)) 1925 goto fail10; 1926 1927 /* Ignore error (cannot query vector limits from a VF). */ 1928 base = 0; 1929 nvec = 1024; 1930 } 1931 encp->enc_intr_vec_base = base; 1932 encp->enc_intr_limit = nvec; 1933 1934 /* 1935 * Get the current privilege mask. Note that this may be modified 1936 * dynamically, so this value is informational only. DO NOT use 1937 * the privilege mask to check for sufficient privileges, as that 1938 * can result in time-of-check/time-of-use bugs. 1939 */ 1940 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) 1941 goto fail11; 1942 encp->enc_privilege_mask = mask; 1943 1944 return (0); 1945 1946 fail11: 1947 EFSYS_PROBE(fail11); 1948 fail10: 1949 EFSYS_PROBE(fail10); 1950 fail9: 1951 EFSYS_PROBE(fail9); 1952 fail8: 1953 EFSYS_PROBE(fail8); 1954 fail7: 1955 EFSYS_PROBE(fail7); 1956 fail6: 1957 EFSYS_PROBE(fail6); 1958 fail5: 1959 EFSYS_PROBE(fail5); 1960 fail4: 1961 EFSYS_PROBE(fail4); 1962 fail3: 1963 EFSYS_PROBE(fail3); 1964 fail2: 1965 EFSYS_PROBE(fail2); 1966 fail1: 1967 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1968 1969 return (rc); 1970 } 1971 1972 __checkReturn efx_rc_t 1973 efx_mcdi_entity_reset( 1974 __in efx_nic_t *enp) 1975 { 1976 efx_mcdi_req_t req; 1977 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN, 1978 MC_CMD_ENTITY_RESET_OUT_LEN); 1979 efx_rc_t rc; 1980 1981 req.emr_cmd = MC_CMD_ENTITY_RESET; 1982 req.emr_in_buf = payload; 1983 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN; 1984 req.emr_out_buf = payload; 1985 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN; 1986 1987 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG, 1988 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); 1989 1990 efx_mcdi_execute(enp, &req); 1991 1992 if (req.emr_rc != 0) { 1993 rc = req.emr_rc; 1994 goto fail1; 1995 } 1996 1997 return (0); 1998 1999 fail1: 2000 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2001 2002 return (rc); 2003 } 2004 2005 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 2006 2007 #if EFX_OPTS_EF10() 2008 2009 static __checkReturn efx_rc_t 2010 ef10_set_workaround_bug26807( 2011 __in efx_nic_t *enp) 2012 { 2013 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2014 uint32_t flags; 2015 efx_rc_t rc; 2016 2017 /* 2018 * If the bug26807 workaround is enabled, then firmware has enabled 2019 * support for chained multicast filters. Firmware will reset (FLR) 2020 * functions which have filters in the hardware filter table when the 2021 * workaround is enabled/disabled. 2022 * 2023 * We must recheck if the workaround is enabled after inserting the 2024 * first hardware filter, in case it has been changed since this check. 2025 */ 2026 rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807, 2027 B_TRUE, &flags); 2028 if (rc == 0) { 2029 encp->enc_bug26807_workaround = B_TRUE; 2030 if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) { 2031 /* 2032 * Other functions had installed filters before the 2033 * workaround was enabled, and they have been reset 2034 * by firmware. 2035 */ 2036 EFSYS_PROBE(bug26807_workaround_flr_done); 2037 /* FIXME: bump MC warm boot count ? */ 2038 } 2039 } else if (rc == EACCES) { 2040 /* 2041 * Unprivileged functions cannot enable the workaround in older 2042 * firmware. 2043 */ 2044 encp->enc_bug26807_workaround = B_FALSE; 2045 } else if ((rc == ENOTSUP) || (rc == ENOENT)) { 2046 encp->enc_bug26807_workaround = B_FALSE; 2047 } else { 2048 goto fail1; 2049 } 2050 2051 return (0); 2052 2053 fail1: 2054 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2055 2056 return (rc); 2057 } 2058 2059 static __checkReturn efx_rc_t 2060 ef10_nic_board_cfg( 2061 __in efx_nic_t *enp) 2062 { 2063 const efx_nic_ops_t *enop = enp->en_enop; 2064 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2065 efx_rc_t rc; 2066 2067 if ((rc = efx_mcdi_nic_board_cfg(enp)) != 0) 2068 goto fail1; 2069 2070 /* 2071 * Huntington RXDP firmware inserts a 0 or 14 byte prefix. 2072 * We only support the 14 byte prefix here. 2073 */ 2074 if (encp->enc_rx_prefix_size != 14) { 2075 rc = ENOTSUP; 2076 goto fail2; 2077 } 2078 2079 encp->enc_clk_mult = 1; /* not used for EF10 */ 2080 2081 /* Alignment for WPTR updates */ 2082 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; 2083 2084 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); 2085 /* No boundary crossing limits */ 2086 encp->enc_tx_dma_desc_boundary = 0; 2087 2088 /* 2089 * Maximum number of bytes into the frame the TCP header can start for 2090 * firmware assisted TSO to work. 2091 */ 2092 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; 2093 2094 /* EF10 TSO engine demands that packet header be contiguous. */ 2095 encp->enc_tx_tso_max_header_ndescs = 1; 2096 2097 /* The overall TSO header length is not limited. */ 2098 encp->enc_tx_tso_max_header_length = UINT32_MAX; 2099 2100 /* 2101 * There are no specific limitations on the number of 2102 * TSO payload descriptors. 2103 */ 2104 encp->enc_tx_tso_max_payload_ndescs = UINT32_MAX; 2105 2106 /* TSO superframe payload length is not limited. */ 2107 encp->enc_tx_tso_max_payload_length = UINT32_MAX; 2108 2109 /* 2110 * Limitation on the maximum number of outgoing packets per 2111 * TSO transaction described in SF-108452-SW. 2112 */ 2113 encp->enc_tx_tso_max_nframes = 32767; 2114 2115 /* 2116 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use 2117 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available 2118 * resources (allocated to this PCIe function), which is zero until 2119 * after we have allocated VIs. 2120 */ 2121 encp->enc_evq_limit = 1024; 2122 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; 2123 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; 2124 2125 encp->enc_buftbl_limit = UINT32_MAX; 2126 2127 if ((rc = ef10_set_workaround_bug26807(enp)) != 0) 2128 goto fail3; 2129 2130 /* Get remaining controller-specific board config */ 2131 if ((rc = enop->eno_board_cfg(enp)) != 0) 2132 if (rc != EACCES) 2133 goto fail4; 2134 2135 return (0); 2136 2137 fail4: 2138 EFSYS_PROBE(fail4); 2139 fail3: 2140 EFSYS_PROBE(fail3); 2141 fail2: 2142 EFSYS_PROBE(fail2); 2143 fail1: 2144 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2145 2146 return (rc); 2147 } 2148 2149 __checkReturn efx_rc_t 2150 ef10_nic_probe( 2151 __in efx_nic_t *enp) 2152 { 2153 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2154 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 2155 efx_rc_t rc; 2156 2157 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 2158 2159 /* Read and clear any assertion state */ 2160 if ((rc = efx_mcdi_read_assertion(enp)) != 0) 2161 goto fail1; 2162 2163 /* Exit the assertion handler */ 2164 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) 2165 if (rc != EACCES) 2166 goto fail2; 2167 2168 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) 2169 goto fail3; 2170 2171 if ((rc = ef10_nic_board_cfg(enp)) != 0) 2172 goto fail4; 2173 2174 /* 2175 * Set default driver config limits (based on board config). 2176 * 2177 * FIXME: For now allocate a fixed number of VIs which is likely to be 2178 * sufficient and small enough to allow multiple functions on the same 2179 * port. 2180 */ 2181 edcp->edc_min_vi_count = edcp->edc_max_vi_count = 2182 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit)); 2183 2184 /* The client driver must configure and enable PIO buffer support */ 2185 edcp->edc_max_piobuf_count = 0; 2186 edcp->edc_pio_alloc_size = 0; 2187 2188 #if EFSYS_OPT_MAC_STATS 2189 /* Wipe the MAC statistics */ 2190 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) 2191 goto fail5; 2192 #endif 2193 2194 #if EFSYS_OPT_LOOPBACK 2195 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) 2196 goto fail6; 2197 #endif 2198 2199 #if EFSYS_OPT_MON_STATS 2200 if ((rc = mcdi_mon_cfg_build(enp)) != 0) { 2201 /* Unprivileged functions do not have access to sensors */ 2202 if (rc != EACCES) 2203 goto fail7; 2204 } 2205 #endif 2206 2207 return (0); 2208 2209 #if EFSYS_OPT_MON_STATS 2210 fail7: 2211 EFSYS_PROBE(fail7); 2212 #endif 2213 #if EFSYS_OPT_LOOPBACK 2214 fail6: 2215 EFSYS_PROBE(fail6); 2216 #endif 2217 #if EFSYS_OPT_MAC_STATS 2218 fail5: 2219 EFSYS_PROBE(fail5); 2220 #endif 2221 fail4: 2222 EFSYS_PROBE(fail4); 2223 fail3: 2224 EFSYS_PROBE(fail3); 2225 fail2: 2226 EFSYS_PROBE(fail2); 2227 fail1: 2228 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2229 2230 return (rc); 2231 } 2232 2233 __checkReturn efx_rc_t 2234 ef10_nic_set_drv_limits( 2235 __inout efx_nic_t *enp, 2236 __in efx_drv_limits_t *edlp) 2237 { 2238 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2239 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 2240 uint32_t min_evq_count, max_evq_count; 2241 uint32_t min_rxq_count, max_rxq_count; 2242 uint32_t min_txq_count, max_txq_count; 2243 efx_rc_t rc; 2244 2245 if (edlp == NULL) { 2246 rc = EINVAL; 2247 goto fail1; 2248 } 2249 2250 /* Get minimum required and maximum usable VI limits */ 2251 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit); 2252 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit); 2253 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit); 2254 2255 edcp->edc_min_vi_count = 2256 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count)); 2257 2258 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit); 2259 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit); 2260 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit); 2261 2262 edcp->edc_max_vi_count = 2263 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count)); 2264 2265 /* 2266 * Check limits for sub-allocated piobuf blocks. 2267 * PIO is optional, so don't fail if the limits are incorrect. 2268 */ 2269 if ((encp->enc_piobuf_size == 0) || 2270 (encp->enc_piobuf_limit == 0) || 2271 (edlp->edl_min_pio_alloc_size == 0) || 2272 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) { 2273 /* Disable PIO */ 2274 edcp->edc_max_piobuf_count = 0; 2275 edcp->edc_pio_alloc_size = 0; 2276 } else { 2277 uint32_t blk_size, blk_count, blks_per_piobuf; 2278 2279 blk_size = 2280 MAX(edlp->edl_min_pio_alloc_size, 2281 encp->enc_piobuf_min_alloc_size); 2282 2283 blks_per_piobuf = encp->enc_piobuf_size / blk_size; 2284 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32); 2285 2286 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf); 2287 2288 /* A zero max pio alloc count means unlimited */ 2289 if ((edlp->edl_max_pio_alloc_count > 0) && 2290 (edlp->edl_max_pio_alloc_count < blk_count)) { 2291 blk_count = edlp->edl_max_pio_alloc_count; 2292 } 2293 2294 edcp->edc_pio_alloc_size = blk_size; 2295 edcp->edc_max_piobuf_count = 2296 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf; 2297 } 2298 2299 return (0); 2300 2301 fail1: 2302 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2303 2304 return (rc); 2305 } 2306 2307 2308 __checkReturn efx_rc_t 2309 ef10_nic_reset( 2310 __in efx_nic_t *enp) 2311 { 2312 efx_rc_t rc; 2313 2314 /* ef10_nic_reset() is called to recover from BADASSERT failures. */ 2315 if ((rc = efx_mcdi_read_assertion(enp)) != 0) 2316 goto fail1; 2317 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) 2318 goto fail2; 2319 2320 if ((rc = efx_mcdi_entity_reset(enp)) != 0) 2321 goto fail3; 2322 2323 /* Clear RX/TX DMA queue errors */ 2324 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR); 2325 2326 return (0); 2327 2328 fail3: 2329 EFSYS_PROBE(fail3); 2330 fail2: 2331 EFSYS_PROBE(fail2); 2332 fail1: 2333 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2334 2335 return (rc); 2336 } 2337 2338 #endif /* EFX_OPTS_EF10() */ 2339 2340 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2341 2342 __checkReturn efx_rc_t 2343 ef10_upstream_port_vadaptor_alloc( 2344 __in efx_nic_t *enp) 2345 { 2346 uint32_t retry; 2347 uint32_t delay_us; 2348 efx_rc_t rc; 2349 2350 /* 2351 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF 2352 * driver has yet to bring up the EVB port. See bug 56147. In this case, 2353 * retry the request several times after waiting a while. The wait time 2354 * between retries starts small (10ms) and exponentially increases. 2355 * Total wait time is a little over two seconds. Retry logic in the 2356 * client driver may mean this whole loop is repeated if it continues to 2357 * fail. 2358 */ 2359 retry = 0; 2360 delay_us = 10000; 2361 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) { 2362 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) || 2363 (rc != ENOENT)) { 2364 /* 2365 * Do not retry alloc for PF, or for other errors on 2366 * a VF. 2367 */ 2368 goto fail1; 2369 } 2370 2371 /* VF startup before PF is ready. Retry allocation. */ 2372 if (retry > 5) { 2373 /* Too many attempts */ 2374 rc = EINVAL; 2375 goto fail2; 2376 } 2377 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry); 2378 EFSYS_SLEEP(delay_us); 2379 retry++; 2380 if (delay_us < 500000) 2381 delay_us <<= 2; 2382 } 2383 2384 return (0); 2385 2386 fail2: 2387 EFSYS_PROBE(fail2); 2388 fail1: 2389 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2390 2391 return (rc); 2392 } 2393 2394 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 2395 2396 #if EFX_OPTS_EF10() 2397 2398 __checkReturn efx_rc_t 2399 ef10_nic_init( 2400 __in efx_nic_t *enp) 2401 { 2402 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 2403 uint32_t min_vi_count, max_vi_count; 2404 uint32_t vi_count, vi_base, vi_shift; 2405 uint32_t i; 2406 uint32_t vi_window_size; 2407 efx_rc_t rc; 2408 boolean_t alloc_vadaptor = B_TRUE; 2409 2410 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 2411 2412 /* Enable reporting of some events (e.g. link change) */ 2413 if ((rc = efx_mcdi_log_ctrl(enp)) != 0) 2414 goto fail1; 2415 2416 /* Allocate (optional) on-chip PIO buffers */ 2417 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count); 2418 2419 /* 2420 * For best performance, PIO writes should use a write-combined 2421 * (WC) memory mapping. Using a separate WC mapping for the PIO 2422 * aperture of each VI would be a burden to drivers (and not 2423 * possible if the host page size is >4Kbyte). 2424 * 2425 * To avoid this we use a single uncached (UC) mapping for VI 2426 * register access, and a single WC mapping for extra VIs used 2427 * for PIO writes. 2428 * 2429 * Each piobuf must be linked to a VI in the WC mapping, and to 2430 * each VI that is using a sub-allocated block from the piobuf. 2431 */ 2432 min_vi_count = edcp->edc_min_vi_count; 2433 max_vi_count = 2434 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count; 2435 2436 /* Ensure that the previously attached driver's VIs are freed */ 2437 if ((rc = efx_mcdi_free_vis(enp)) != 0) 2438 goto fail2; 2439 2440 /* 2441 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this 2442 * fails then retrying the request for fewer VI resources may succeed. 2443 */ 2444 vi_count = 0; 2445 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count, 2446 &vi_base, &vi_count, &vi_shift)) != 0) 2447 goto fail3; 2448 2449 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count); 2450 2451 if (vi_count < min_vi_count) { 2452 rc = ENOMEM; 2453 goto fail4; 2454 } 2455 2456 enp->en_arch.ef10.ena_vi_base = vi_base; 2457 enp->en_arch.ef10.ena_vi_count = vi_count; 2458 enp->en_arch.ef10.ena_vi_shift = vi_shift; 2459 2460 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) { 2461 /* Not enough extra VIs to map piobufs */ 2462 ef10_nic_free_piobufs(enp); 2463 } 2464 2465 enp->en_arch.ef10.ena_pio_write_vi_base = 2466 vi_count - enp->en_arch.ef10.ena_piobuf_count; 2467 2468 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=, 2469 EFX_VI_WINDOW_SHIFT_INVALID); 2470 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=, 2471 EFX_VI_WINDOW_SHIFT_64K); 2472 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift; 2473 2474 /* Save UC memory mapping details */ 2475 enp->en_arch.ef10.ena_uc_mem_map_offset = 0; 2476 if (enp->en_arch.ef10.ena_piobuf_count > 0) { 2477 enp->en_arch.ef10.ena_uc_mem_map_size = 2478 (vi_window_size * 2479 enp->en_arch.ef10.ena_pio_write_vi_base); 2480 } else { 2481 enp->en_arch.ef10.ena_uc_mem_map_size = 2482 (vi_window_size * 2483 enp->en_arch.ef10.ena_vi_count); 2484 } 2485 2486 /* Save WC memory mapping details */ 2487 enp->en_arch.ef10.ena_wc_mem_map_offset = 2488 enp->en_arch.ef10.ena_uc_mem_map_offset + 2489 enp->en_arch.ef10.ena_uc_mem_map_size; 2490 2491 enp->en_arch.ef10.ena_wc_mem_map_size = 2492 (vi_window_size * 2493 enp->en_arch.ef10.ena_piobuf_count); 2494 2495 /* Link piobufs to extra VIs in WC mapping */ 2496 if (enp->en_arch.ef10.ena_piobuf_count > 0) { 2497 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { 2498 rc = efx_mcdi_link_piobuf(enp, 2499 enp->en_arch.ef10.ena_pio_write_vi_base + i, 2500 enp->en_arch.ef10.ena_piobuf_handle[i]); 2501 if (rc != 0) 2502 break; 2503 } 2504 } 2505 2506 /* 2507 * For SR-IOV use case, vAdaptor is allocated for PF and associated VFs 2508 * during NIC initialization when vSwitch is created and vports are 2509 * allocated. Hence, skip vAdaptor allocation for EVB and update vport 2510 * id in NIC structure with the one allocated for PF. 2511 */ 2512 2513 enp->en_vport_id = EVB_PORT_ID_ASSIGNED; 2514 #if EFSYS_OPT_EVB 2515 if ((enp->en_vswitchp != NULL) && (enp->en_vswitchp->ev_evcp != NULL)) { 2516 /* For EVB use vport allocated on vswitch */ 2517 enp->en_vport_id = enp->en_vswitchp->ev_evcp->evc_vport_id; 2518 alloc_vadaptor = B_FALSE; 2519 } 2520 #endif 2521 if (alloc_vadaptor != B_FALSE) { 2522 /* Allocate a vAdaptor attached to our upstream vPort/pPort */ 2523 if ((rc = ef10_upstream_port_vadaptor_alloc(enp)) != 0) 2524 goto fail5; 2525 } 2526 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2; 2527 2528 return (0); 2529 2530 fail5: 2531 EFSYS_PROBE(fail5); 2532 fail4: 2533 EFSYS_PROBE(fail4); 2534 fail3: 2535 EFSYS_PROBE(fail3); 2536 fail2: 2537 EFSYS_PROBE(fail2); 2538 2539 ef10_nic_free_piobufs(enp); 2540 2541 fail1: 2542 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2543 2544 return (rc); 2545 } 2546 2547 __checkReturn efx_rc_t 2548 ef10_nic_get_vi_pool( 2549 __in efx_nic_t *enp, 2550 __out uint32_t *vi_countp) 2551 { 2552 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 2553 2554 /* 2555 * Report VIs that the client driver can use. 2556 * Do not include VIs used for PIO buffer writes. 2557 */ 2558 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base; 2559 2560 return (0); 2561 } 2562 2563 __checkReturn efx_rc_t 2564 ef10_nic_get_bar_region( 2565 __in efx_nic_t *enp, 2566 __in efx_nic_region_t region, 2567 __out uint32_t *offsetp, 2568 __out size_t *sizep) 2569 { 2570 efx_rc_t rc; 2571 2572 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 2573 2574 /* 2575 * TODO: Specify host memory mapping alignment and granularity 2576 * in efx_drv_limits_t so that they can be taken into account 2577 * when allocating extra VIs for PIO writes. 2578 */ 2579 switch (region) { 2580 case EFX_REGION_VI: 2581 /* UC mapped memory BAR region for VI registers */ 2582 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset; 2583 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size; 2584 break; 2585 2586 case EFX_REGION_PIO_WRITE_VI: 2587 /* WC mapped memory BAR region for piobuf writes */ 2588 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset; 2589 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size; 2590 break; 2591 2592 default: 2593 rc = EINVAL; 2594 goto fail1; 2595 } 2596 2597 return (0); 2598 2599 fail1: 2600 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2601 2602 return (rc); 2603 } 2604 2605 __checkReturn boolean_t 2606 ef10_nic_hw_unavailable( 2607 __in efx_nic_t *enp) 2608 { 2609 efx_dword_t dword; 2610 2611 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL) 2612 return (B_TRUE); 2613 2614 EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE); 2615 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff) 2616 goto unavail; 2617 2618 return (B_FALSE); 2619 2620 unavail: 2621 ef10_nic_set_hw_unavailable(enp); 2622 2623 return (B_TRUE); 2624 } 2625 2626 void 2627 ef10_nic_set_hw_unavailable( 2628 __in efx_nic_t *enp) 2629 { 2630 EFSYS_PROBE(hw_unavail); 2631 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL; 2632 } 2633 2634 2635 void 2636 ef10_nic_fini( 2637 __in efx_nic_t *enp) 2638 { 2639 uint32_t i; 2640 efx_rc_t rc; 2641 boolean_t do_vadaptor_free = B_TRUE; 2642 2643 #if EFSYS_OPT_EVB 2644 if (enp->en_vswitchp != NULL) { 2645 /* 2646 * For SR-IOV the vAdaptor is freed with the vswitch, 2647 * so do not free it here. 2648 */ 2649 do_vadaptor_free = B_FALSE; 2650 } 2651 #endif 2652 if (do_vadaptor_free != B_FALSE) { 2653 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id); 2654 enp->en_vport_id = EVB_PORT_ID_NULL; 2655 } 2656 2657 /* Unlink piobufs from extra VIs in WC mapping */ 2658 if (enp->en_arch.ef10.ena_piobuf_count > 0) { 2659 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { 2660 rc = efx_mcdi_unlink_piobuf(enp, 2661 enp->en_arch.ef10.ena_pio_write_vi_base + i); 2662 if (rc != 0) 2663 break; 2664 } 2665 } 2666 2667 ef10_nic_free_piobufs(enp); 2668 2669 (void) efx_mcdi_free_vis(enp); 2670 enp->en_arch.ef10.ena_vi_count = 0; 2671 } 2672 2673 void 2674 ef10_nic_unprobe( 2675 __in efx_nic_t *enp) 2676 { 2677 #if EFSYS_OPT_MON_STATS 2678 mcdi_mon_cfg_free(enp); 2679 #endif /* EFSYS_OPT_MON_STATS */ 2680 (void) efx_mcdi_drv_attach(enp, B_FALSE); 2681 } 2682 2683 #if EFSYS_OPT_DIAG 2684 2685 __checkReturn efx_rc_t 2686 ef10_nic_register_test( 2687 __in efx_nic_t *enp) 2688 { 2689 efx_rc_t rc; 2690 2691 /* FIXME */ 2692 _NOTE(ARGUNUSED(enp)) 2693 _NOTE(CONSTANTCONDITION) 2694 if (B_FALSE) { 2695 rc = ENOTSUP; 2696 goto fail1; 2697 } 2698 /* FIXME */ 2699 2700 return (0); 2701 2702 fail1: 2703 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2704 2705 return (rc); 2706 } 2707 2708 #endif /* EFSYS_OPT_DIAG */ 2709 2710 #if EFSYS_OPT_FW_SUBVARIANT_AWARE 2711 2712 __checkReturn efx_rc_t 2713 efx_mcdi_get_nic_global( 2714 __in efx_nic_t *enp, 2715 __in uint32_t key, 2716 __out uint32_t *valuep) 2717 { 2718 efx_mcdi_req_t req; 2719 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN, 2720 MC_CMD_GET_NIC_GLOBAL_OUT_LEN); 2721 efx_rc_t rc; 2722 2723 req.emr_cmd = MC_CMD_GET_NIC_GLOBAL; 2724 req.emr_in_buf = payload; 2725 req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN; 2726 req.emr_out_buf = payload; 2727 req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN; 2728 2729 MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key); 2730 2731 efx_mcdi_execute(enp, &req); 2732 2733 if (req.emr_rc != 0) { 2734 rc = req.emr_rc; 2735 goto fail1; 2736 } 2737 2738 if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) { 2739 rc = EMSGSIZE; 2740 goto fail2; 2741 } 2742 2743 *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE); 2744 2745 return (0); 2746 2747 fail2: 2748 EFSYS_PROBE(fail2); 2749 fail1: 2750 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2751 2752 return (rc); 2753 } 2754 2755 __checkReturn efx_rc_t 2756 efx_mcdi_set_nic_global( 2757 __in efx_nic_t *enp, 2758 __in uint32_t key, 2759 __in uint32_t value) 2760 { 2761 efx_mcdi_req_t req; 2762 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0); 2763 efx_rc_t rc; 2764 2765 req.emr_cmd = MC_CMD_SET_NIC_GLOBAL; 2766 req.emr_in_buf = payload; 2767 req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN; 2768 req.emr_out_buf = NULL; 2769 req.emr_out_length = 0; 2770 2771 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key); 2772 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value); 2773 2774 efx_mcdi_execute(enp, &req); 2775 2776 if (req.emr_rc != 0) { 2777 rc = req.emr_rc; 2778 goto fail1; 2779 } 2780 2781 return (0); 2782 2783 fail1: 2784 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2785 2786 return (rc); 2787 } 2788 2789 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ 2790 2791 #endif /* EFX_OPTS_EF10() */ 2792