1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2012-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 #if EFSYS_OPT_MON_MCDI 10 #include "mcdi_mon.h" 11 #endif 12 13 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 14 15 #include "ef10_tlv_layout.h" 16 17 __checkReturn efx_rc_t 18 efx_mcdi_get_port_assignment( 19 __in efx_nic_t *enp, 20 __out uint32_t *portp) 21 { 22 efx_mcdi_req_t req; 23 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN, 24 MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN); 25 efx_rc_t rc; 26 27 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 28 29 req.emr_cmd = MC_CMD_GET_PORT_ASSIGNMENT; 30 req.emr_in_buf = payload; 31 req.emr_in_length = MC_CMD_GET_PORT_ASSIGNMENT_IN_LEN; 32 req.emr_out_buf = payload; 33 req.emr_out_length = MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN; 34 35 efx_mcdi_execute(enp, &req); 36 37 if (req.emr_rc != 0) { 38 rc = req.emr_rc; 39 goto fail1; 40 } 41 42 if (req.emr_out_length_used < MC_CMD_GET_PORT_ASSIGNMENT_OUT_LEN) { 43 rc = EMSGSIZE; 44 goto fail2; 45 } 46 47 *portp = MCDI_OUT_DWORD(req, GET_PORT_ASSIGNMENT_OUT_PORT); 48 49 return (0); 50 51 fail2: 52 EFSYS_PROBE(fail2); 53 fail1: 54 EFSYS_PROBE1(fail1, efx_rc_t, rc); 55 56 return (rc); 57 } 58 59 __checkReturn efx_rc_t 60 efx_mcdi_get_port_modes( 61 __in efx_nic_t *enp, 62 __out uint32_t *modesp, 63 __out_opt uint32_t *current_modep, 64 __out_opt uint32_t *default_modep) 65 { 66 efx_mcdi_req_t req; 67 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PORT_MODES_IN_LEN, 68 MC_CMD_GET_PORT_MODES_OUT_LEN); 69 efx_rc_t rc; 70 71 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 72 73 req.emr_cmd = MC_CMD_GET_PORT_MODES; 74 req.emr_in_buf = payload; 75 req.emr_in_length = MC_CMD_GET_PORT_MODES_IN_LEN; 76 req.emr_out_buf = payload; 77 req.emr_out_length = MC_CMD_GET_PORT_MODES_OUT_LEN; 78 79 efx_mcdi_execute(enp, &req); 80 81 if (req.emr_rc != 0) { 82 rc = req.emr_rc; 83 goto fail1; 84 } 85 86 /* 87 * Require only Modes and DefaultMode fields, unless the current mode 88 * was requested (CurrentMode field was added for Medford). 89 */ 90 if (req.emr_out_length_used < 91 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST) { 92 rc = EMSGSIZE; 93 goto fail2; 94 } 95 if ((current_modep != NULL) && (req.emr_out_length_used < 96 MC_CMD_GET_PORT_MODES_OUT_CURRENT_MODE_OFST + 4)) { 97 rc = EMSGSIZE; 98 goto fail3; 99 } 100 101 *modesp = MCDI_OUT_DWORD(req, GET_PORT_MODES_OUT_MODES); 102 103 if (current_modep != NULL) { 104 *current_modep = MCDI_OUT_DWORD(req, 105 GET_PORT_MODES_OUT_CURRENT_MODE); 106 } 107 108 if (default_modep != NULL) { 109 *default_modep = MCDI_OUT_DWORD(req, 110 GET_PORT_MODES_OUT_DEFAULT_MODE); 111 } 112 113 return (0); 114 115 fail3: 116 EFSYS_PROBE(fail3); 117 fail2: 118 EFSYS_PROBE(fail2); 119 fail1: 120 EFSYS_PROBE1(fail1, efx_rc_t, rc); 121 122 return (rc); 123 } 124 125 __checkReturn efx_rc_t 126 ef10_nic_get_port_mode_bandwidth( 127 __in efx_nic_t *enp, 128 __out uint32_t *bandwidth_mbpsp) 129 { 130 uint32_t port_modes; 131 uint32_t current_mode; 132 efx_port_t *epp = &(enp->en_port); 133 134 uint32_t single_lane; 135 uint32_t dual_lane; 136 uint32_t quad_lane; 137 uint32_t bandwidth; 138 efx_rc_t rc; 139 140 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, 141 ¤t_mode, NULL)) != 0) { 142 /* No port mode info available. */ 143 goto fail1; 144 } 145 146 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_25000FDX)) 147 single_lane = 25000; 148 else 149 single_lane = 10000; 150 151 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_50000FDX)) 152 dual_lane = 50000; 153 else 154 dual_lane = 20000; 155 156 if (epp->ep_phy_cap_mask & (1 << EFX_PHY_CAP_100000FDX)) 157 quad_lane = 100000; 158 else 159 quad_lane = 40000; 160 161 switch (current_mode) { 162 case TLV_PORT_MODE_1x1_NA: /* mode 0 */ 163 bandwidth = single_lane; 164 break; 165 case TLV_PORT_MODE_1x2_NA: /* mode 10 */ 166 case TLV_PORT_MODE_NA_1x2: /* mode 11 */ 167 bandwidth = dual_lane; 168 break; 169 case TLV_PORT_MODE_1x1_1x1: /* mode 2 */ 170 bandwidth = single_lane + single_lane; 171 break; 172 case TLV_PORT_MODE_4x1_NA: /* mode 4 */ 173 case TLV_PORT_MODE_NA_4x1: /* mode 8 */ 174 bandwidth = 4 * single_lane; 175 break; 176 case TLV_PORT_MODE_2x1_2x1: /* mode 5 */ 177 bandwidth = (2 * single_lane) + (2 * single_lane); 178 break; 179 case TLV_PORT_MODE_1x2_1x2: /* mode 12 */ 180 bandwidth = dual_lane + dual_lane; 181 break; 182 case TLV_PORT_MODE_1x2_2x1: /* mode 17 */ 183 case TLV_PORT_MODE_2x1_1x2: /* mode 18 */ 184 bandwidth = dual_lane + (2 * single_lane); 185 break; 186 /* Legacy Medford-only mode. Do not use (see bug63270) */ 187 case TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2: /* mode 9 */ 188 bandwidth = 4 * single_lane; 189 break; 190 case TLV_PORT_MODE_1x4_NA: /* mode 1 */ 191 case TLV_PORT_MODE_NA_1x4: /* mode 22 */ 192 bandwidth = quad_lane; 193 break; 194 case TLV_PORT_MODE_2x2_NA: /* mode 13 */ 195 case TLV_PORT_MODE_NA_2x2: /* mode 14 */ 196 bandwidth = 2 * dual_lane; 197 break; 198 case TLV_PORT_MODE_1x4_2x1: /* mode 6 */ 199 case TLV_PORT_MODE_2x1_1x4: /* mode 7 */ 200 bandwidth = quad_lane + (2 * single_lane); 201 break; 202 case TLV_PORT_MODE_1x4_1x2: /* mode 15 */ 203 case TLV_PORT_MODE_1x2_1x4: /* mode 16 */ 204 bandwidth = quad_lane + dual_lane; 205 break; 206 case TLV_PORT_MODE_1x4_1x4: /* mode 3 */ 207 bandwidth = quad_lane + quad_lane; 208 break; 209 default: 210 rc = EINVAL; 211 goto fail2; 212 } 213 214 *bandwidth_mbpsp = bandwidth; 215 216 return (0); 217 218 fail2: 219 EFSYS_PROBE(fail2); 220 fail1: 221 EFSYS_PROBE1(fail1, efx_rc_t, rc); 222 223 return (rc); 224 } 225 226 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 227 228 #if EFX_OPTS_EF10() 229 230 __checkReturn efx_rc_t 231 efx_mcdi_vadaptor_alloc( 232 __in efx_nic_t *enp, 233 __in uint32_t port_id) 234 { 235 efx_mcdi_req_t req; 236 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_ALLOC_IN_LEN, 237 MC_CMD_VADAPTOR_ALLOC_OUT_LEN); 238 efx_rc_t rc; 239 240 req.emr_cmd = MC_CMD_VADAPTOR_ALLOC; 241 req.emr_in_buf = payload; 242 req.emr_in_length = MC_CMD_VADAPTOR_ALLOC_IN_LEN; 243 req.emr_out_buf = payload; 244 req.emr_out_length = MC_CMD_VADAPTOR_ALLOC_OUT_LEN; 245 246 MCDI_IN_SET_DWORD(req, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id); 247 MCDI_IN_POPULATE_DWORD_1(req, VADAPTOR_ALLOC_IN_FLAGS, 248 VADAPTOR_ALLOC_IN_FLAG_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED, 249 enp->en_nic_cfg.enc_allow_set_mac_with_installed_filters ? 1 : 0); 250 251 efx_mcdi_execute(enp, &req); 252 253 if (req.emr_rc != 0) { 254 rc = req.emr_rc; 255 goto fail1; 256 } 257 258 return (0); 259 260 fail1: 261 EFSYS_PROBE1(fail1, efx_rc_t, rc); 262 263 return (rc); 264 } 265 266 __checkReturn efx_rc_t 267 efx_mcdi_vadaptor_free( 268 __in efx_nic_t *enp, 269 __in uint32_t port_id) 270 { 271 efx_mcdi_req_t req; 272 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VADAPTOR_FREE_IN_LEN, 273 MC_CMD_VADAPTOR_FREE_OUT_LEN); 274 efx_rc_t rc; 275 276 req.emr_cmd = MC_CMD_VADAPTOR_FREE; 277 req.emr_in_buf = payload; 278 req.emr_in_length = MC_CMD_VADAPTOR_FREE_IN_LEN; 279 req.emr_out_buf = payload; 280 req.emr_out_length = MC_CMD_VADAPTOR_FREE_OUT_LEN; 281 282 MCDI_IN_SET_DWORD(req, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id); 283 284 efx_mcdi_execute(enp, &req); 285 286 if (req.emr_rc != 0) { 287 rc = req.emr_rc; 288 goto fail1; 289 } 290 291 return (0); 292 293 fail1: 294 EFSYS_PROBE1(fail1, efx_rc_t, rc); 295 296 return (rc); 297 } 298 299 #endif /* EFX_OPTS_EF10() */ 300 301 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 302 303 __checkReturn efx_rc_t 304 efx_mcdi_get_mac_address_pf( 305 __in efx_nic_t *enp, 306 __out_ecount_opt(6) uint8_t mac_addrp[6]) 307 { 308 efx_mcdi_req_t req; 309 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_MAC_ADDRESSES_IN_LEN, 310 MC_CMD_GET_MAC_ADDRESSES_OUT_LEN); 311 efx_rc_t rc; 312 313 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 314 315 req.emr_cmd = MC_CMD_GET_MAC_ADDRESSES; 316 req.emr_in_buf = payload; 317 req.emr_in_length = MC_CMD_GET_MAC_ADDRESSES_IN_LEN; 318 req.emr_out_buf = payload; 319 req.emr_out_length = MC_CMD_GET_MAC_ADDRESSES_OUT_LEN; 320 321 efx_mcdi_execute(enp, &req); 322 323 if (req.emr_rc != 0) { 324 rc = req.emr_rc; 325 goto fail1; 326 } 327 328 if (req.emr_out_length_used < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN) { 329 rc = EMSGSIZE; 330 goto fail2; 331 } 332 333 if (MCDI_OUT_DWORD(req, GET_MAC_ADDRESSES_OUT_MAC_COUNT) < 1) { 334 rc = ENOENT; 335 goto fail3; 336 } 337 338 if (mac_addrp != NULL) { 339 uint8_t *addrp; 340 341 addrp = MCDI_OUT2(req, uint8_t, 342 GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE); 343 344 EFX_MAC_ADDR_COPY(mac_addrp, addrp); 345 } 346 347 return (0); 348 349 fail3: 350 EFSYS_PROBE(fail3); 351 fail2: 352 EFSYS_PROBE(fail2); 353 fail1: 354 EFSYS_PROBE1(fail1, efx_rc_t, rc); 355 356 return (rc); 357 } 358 359 __checkReturn efx_rc_t 360 efx_mcdi_get_mac_address_vf( 361 __in efx_nic_t *enp, 362 __out_ecount_opt(6) uint8_t mac_addrp[6]) 363 { 364 efx_mcdi_req_t req; 365 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN, 366 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX); 367 efx_rc_t rc; 368 369 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 370 371 req.emr_cmd = MC_CMD_VPORT_GET_MAC_ADDRESSES; 372 req.emr_in_buf = payload; 373 req.emr_in_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN; 374 req.emr_out_buf = payload; 375 req.emr_out_length = MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX; 376 377 MCDI_IN_SET_DWORD(req, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID, 378 EVB_PORT_ID_ASSIGNED); 379 380 efx_mcdi_execute(enp, &req); 381 382 if (req.emr_rc != 0) { 383 rc = req.emr_rc; 384 goto fail1; 385 } 386 387 if (req.emr_out_length_used < 388 MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN) { 389 rc = EMSGSIZE; 390 goto fail2; 391 } 392 393 if (MCDI_OUT_DWORD(req, 394 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT) < 1) { 395 rc = ENOENT; 396 goto fail3; 397 } 398 399 if (mac_addrp != NULL) { 400 uint8_t *addrp; 401 402 addrp = MCDI_OUT2(req, uint8_t, 403 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR); 404 405 EFX_MAC_ADDR_COPY(mac_addrp, addrp); 406 } 407 408 return (0); 409 410 fail3: 411 EFSYS_PROBE(fail3); 412 fail2: 413 EFSYS_PROBE(fail2); 414 fail1: 415 EFSYS_PROBE1(fail1, efx_rc_t, rc); 416 417 return (rc); 418 } 419 420 __checkReturn efx_rc_t 421 efx_mcdi_get_clock( 422 __in efx_nic_t *enp, 423 __out uint32_t *sys_freqp, 424 __out uint32_t *dpcpu_freqp) 425 { 426 efx_mcdi_req_t req; 427 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CLOCK_IN_LEN, 428 MC_CMD_GET_CLOCK_OUT_LEN); 429 efx_rc_t rc; 430 431 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp) || EFX_FAMILY_IS_EF10(enp)); 432 433 req.emr_cmd = MC_CMD_GET_CLOCK; 434 req.emr_in_buf = payload; 435 req.emr_in_length = MC_CMD_GET_CLOCK_IN_LEN; 436 req.emr_out_buf = payload; 437 req.emr_out_length = MC_CMD_GET_CLOCK_OUT_LEN; 438 439 efx_mcdi_execute(enp, &req); 440 441 if (req.emr_rc != 0) { 442 rc = req.emr_rc; 443 goto fail1; 444 } 445 446 if (req.emr_out_length_used < MC_CMD_GET_CLOCK_OUT_LEN) { 447 rc = EMSGSIZE; 448 goto fail2; 449 } 450 451 *sys_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_SYS_FREQ); 452 if (*sys_freqp == 0) { 453 rc = EINVAL; 454 goto fail3; 455 } 456 *dpcpu_freqp = MCDI_OUT_DWORD(req, GET_CLOCK_OUT_DPCPU_FREQ); 457 if (*dpcpu_freqp == 0) { 458 rc = EINVAL; 459 goto fail4; 460 } 461 462 return (0); 463 464 fail4: 465 EFSYS_PROBE(fail4); 466 fail3: 467 EFSYS_PROBE(fail3); 468 fail2: 469 EFSYS_PROBE(fail2); 470 fail1: 471 EFSYS_PROBE1(fail1, efx_rc_t, rc); 472 473 return (rc); 474 } 475 476 __checkReturn efx_rc_t 477 efx_mcdi_get_rxdp_config( 478 __in efx_nic_t *enp, 479 __out uint32_t *end_paddingp) 480 { 481 efx_mcdi_req_t req; 482 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RXDP_CONFIG_IN_LEN, 483 MC_CMD_GET_RXDP_CONFIG_OUT_LEN); 484 uint32_t end_padding; 485 efx_rc_t rc; 486 487 req.emr_cmd = MC_CMD_GET_RXDP_CONFIG; 488 req.emr_in_buf = payload; 489 req.emr_in_length = MC_CMD_GET_RXDP_CONFIG_IN_LEN; 490 req.emr_out_buf = payload; 491 req.emr_out_length = MC_CMD_GET_RXDP_CONFIG_OUT_LEN; 492 493 efx_mcdi_execute(enp, &req); 494 if (req.emr_rc != 0) { 495 rc = req.emr_rc; 496 goto fail1; 497 } 498 499 if (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, 500 GET_RXDP_CONFIG_OUT_PAD_HOST_DMA) == 0) { 501 /* RX DMA end padding is disabled */ 502 end_padding = 0; 503 } else { 504 switch (MCDI_OUT_DWORD_FIELD(req, GET_RXDP_CONFIG_OUT_DATA, 505 GET_RXDP_CONFIG_OUT_PAD_HOST_LEN)) { 506 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_64: 507 end_padding = 64; 508 break; 509 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_128: 510 end_padding = 128; 511 break; 512 case MC_CMD_SET_RXDP_CONFIG_IN_PAD_HOST_256: 513 end_padding = 256; 514 break; 515 default: 516 rc = ENOTSUP; 517 goto fail2; 518 } 519 } 520 521 *end_paddingp = end_padding; 522 523 return (0); 524 525 fail2: 526 EFSYS_PROBE(fail2); 527 fail1: 528 EFSYS_PROBE1(fail1, efx_rc_t, rc); 529 530 return (rc); 531 } 532 533 __checkReturn efx_rc_t 534 efx_mcdi_get_vector_cfg( 535 __in efx_nic_t *enp, 536 __out_opt uint32_t *vec_basep, 537 __out_opt uint32_t *pf_nvecp, 538 __out_opt uint32_t *vf_nvecp) 539 { 540 efx_mcdi_req_t req; 541 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_VECTOR_CFG_IN_LEN, 542 MC_CMD_GET_VECTOR_CFG_OUT_LEN); 543 efx_rc_t rc; 544 545 req.emr_cmd = MC_CMD_GET_VECTOR_CFG; 546 req.emr_in_buf = payload; 547 req.emr_in_length = MC_CMD_GET_VECTOR_CFG_IN_LEN; 548 req.emr_out_buf = payload; 549 req.emr_out_length = MC_CMD_GET_VECTOR_CFG_OUT_LEN; 550 551 efx_mcdi_execute(enp, &req); 552 553 if (req.emr_rc != 0) { 554 rc = req.emr_rc; 555 goto fail1; 556 } 557 558 if (req.emr_out_length_used < MC_CMD_GET_VECTOR_CFG_OUT_LEN) { 559 rc = EMSGSIZE; 560 goto fail2; 561 } 562 563 if (vec_basep != NULL) 564 *vec_basep = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VEC_BASE); 565 if (pf_nvecp != NULL) 566 *pf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_PF); 567 if (vf_nvecp != NULL) 568 *vf_nvecp = MCDI_OUT_DWORD(req, GET_VECTOR_CFG_OUT_VECS_PER_VF); 569 570 return (0); 571 572 fail2: 573 EFSYS_PROBE(fail2); 574 fail1: 575 EFSYS_PROBE1(fail1, efx_rc_t, rc); 576 577 return (rc); 578 } 579 580 __checkReturn efx_rc_t 581 efx_mcdi_alloc_vis( 582 __in efx_nic_t *enp, 583 __in uint32_t min_vi_count, 584 __in uint32_t max_vi_count, 585 __out uint32_t *vi_basep, 586 __out uint32_t *vi_countp, 587 __out uint32_t *vi_shiftp) 588 { 589 efx_mcdi_req_t req; 590 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_VIS_IN_LEN, 591 MC_CMD_ALLOC_VIS_EXT_OUT_LEN); 592 efx_rc_t rc; 593 594 if (vi_countp == NULL) { 595 rc = EINVAL; 596 goto fail1; 597 } 598 599 req.emr_cmd = MC_CMD_ALLOC_VIS; 600 req.emr_in_buf = payload; 601 req.emr_in_length = MC_CMD_ALLOC_VIS_IN_LEN; 602 req.emr_out_buf = payload; 603 req.emr_out_length = MC_CMD_ALLOC_VIS_EXT_OUT_LEN; 604 605 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MIN_VI_COUNT, min_vi_count); 606 MCDI_IN_SET_DWORD(req, ALLOC_VIS_IN_MAX_VI_COUNT, max_vi_count); 607 608 efx_mcdi_execute(enp, &req); 609 610 if (req.emr_rc != 0) { 611 rc = req.emr_rc; 612 goto fail2; 613 } 614 615 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_OUT_LEN) { 616 rc = EMSGSIZE; 617 goto fail3; 618 } 619 620 *vi_basep = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_BASE); 621 *vi_countp = MCDI_OUT_DWORD(req, ALLOC_VIS_OUT_VI_COUNT); 622 623 /* Report VI_SHIFT if available (always zero for Huntington) */ 624 if (req.emr_out_length_used < MC_CMD_ALLOC_VIS_EXT_OUT_LEN) 625 *vi_shiftp = 0; 626 else 627 *vi_shiftp = MCDI_OUT_DWORD(req, ALLOC_VIS_EXT_OUT_VI_SHIFT); 628 629 return (0); 630 631 fail3: 632 EFSYS_PROBE(fail3); 633 fail2: 634 EFSYS_PROBE(fail2); 635 fail1: 636 EFSYS_PROBE1(fail1, efx_rc_t, rc); 637 638 return (rc); 639 } 640 641 642 __checkReturn efx_rc_t 643 efx_mcdi_free_vis( 644 __in efx_nic_t *enp) 645 { 646 efx_mcdi_req_t req; 647 efx_rc_t rc; 648 649 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_IN_LEN == 0); 650 EFX_STATIC_ASSERT(MC_CMD_FREE_VIS_OUT_LEN == 0); 651 652 req.emr_cmd = MC_CMD_FREE_VIS; 653 req.emr_in_buf = NULL; 654 req.emr_in_length = 0; 655 req.emr_out_buf = NULL; 656 req.emr_out_length = 0; 657 658 efx_mcdi_execute_quiet(enp, &req); 659 660 /* Ignore ELREADY (no allocated VIs, so nothing to free) */ 661 if ((req.emr_rc != 0) && (req.emr_rc != EALREADY)) { 662 rc = req.emr_rc; 663 goto fail1; 664 } 665 666 return (0); 667 668 fail1: 669 EFSYS_PROBE1(fail1, efx_rc_t, rc); 670 671 return (rc); 672 } 673 674 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 675 676 #if EFX_OPTS_EF10() 677 678 static __checkReturn efx_rc_t 679 efx_mcdi_alloc_piobuf( 680 __in efx_nic_t *enp, 681 __out efx_piobuf_handle_t *handlep) 682 { 683 efx_mcdi_req_t req; 684 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ALLOC_PIOBUF_IN_LEN, 685 MC_CMD_ALLOC_PIOBUF_OUT_LEN); 686 efx_rc_t rc; 687 688 if (handlep == NULL) { 689 rc = EINVAL; 690 goto fail1; 691 } 692 693 req.emr_cmd = MC_CMD_ALLOC_PIOBUF; 694 req.emr_in_buf = payload; 695 req.emr_in_length = MC_CMD_ALLOC_PIOBUF_IN_LEN; 696 req.emr_out_buf = payload; 697 req.emr_out_length = MC_CMD_ALLOC_PIOBUF_OUT_LEN; 698 699 efx_mcdi_execute_quiet(enp, &req); 700 701 if (req.emr_rc != 0) { 702 rc = req.emr_rc; 703 goto fail2; 704 } 705 706 if (req.emr_out_length_used < MC_CMD_ALLOC_PIOBUF_OUT_LEN) { 707 rc = EMSGSIZE; 708 goto fail3; 709 } 710 711 *handlep = MCDI_OUT_DWORD(req, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE); 712 713 return (0); 714 715 fail3: 716 EFSYS_PROBE(fail3); 717 fail2: 718 EFSYS_PROBE(fail2); 719 fail1: 720 EFSYS_PROBE1(fail1, efx_rc_t, rc); 721 722 return (rc); 723 } 724 725 static __checkReturn efx_rc_t 726 efx_mcdi_free_piobuf( 727 __in efx_nic_t *enp, 728 __in efx_piobuf_handle_t handle) 729 { 730 efx_mcdi_req_t req; 731 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FREE_PIOBUF_IN_LEN, 732 MC_CMD_FREE_PIOBUF_OUT_LEN); 733 efx_rc_t rc; 734 735 req.emr_cmd = MC_CMD_FREE_PIOBUF; 736 req.emr_in_buf = payload; 737 req.emr_in_length = MC_CMD_FREE_PIOBUF_IN_LEN; 738 req.emr_out_buf = payload; 739 req.emr_out_length = MC_CMD_FREE_PIOBUF_OUT_LEN; 740 741 MCDI_IN_SET_DWORD(req, FREE_PIOBUF_IN_PIOBUF_HANDLE, handle); 742 743 efx_mcdi_execute_quiet(enp, &req); 744 745 if (req.emr_rc != 0) { 746 rc = req.emr_rc; 747 goto fail1; 748 } 749 750 return (0); 751 752 fail1: 753 EFSYS_PROBE1(fail1, efx_rc_t, rc); 754 755 return (rc); 756 } 757 758 static __checkReturn efx_rc_t 759 efx_mcdi_link_piobuf( 760 __in efx_nic_t *enp, 761 __in uint32_t vi_index, 762 __in efx_piobuf_handle_t handle) 763 { 764 efx_mcdi_req_t req; 765 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LINK_PIOBUF_IN_LEN, 766 MC_CMD_LINK_PIOBUF_OUT_LEN); 767 efx_rc_t rc; 768 769 req.emr_cmd = MC_CMD_LINK_PIOBUF; 770 req.emr_in_buf = payload; 771 req.emr_in_length = MC_CMD_LINK_PIOBUF_IN_LEN; 772 req.emr_out_buf = payload; 773 req.emr_out_length = MC_CMD_LINK_PIOBUF_OUT_LEN; 774 775 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_PIOBUF_HANDLE, handle); 776 MCDI_IN_SET_DWORD(req, LINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); 777 778 efx_mcdi_execute(enp, &req); 779 780 if (req.emr_rc != 0) { 781 rc = req.emr_rc; 782 goto fail1; 783 } 784 785 return (0); 786 787 fail1: 788 EFSYS_PROBE1(fail1, efx_rc_t, rc); 789 790 return (rc); 791 } 792 793 static __checkReturn efx_rc_t 794 efx_mcdi_unlink_piobuf( 795 __in efx_nic_t *enp, 796 __in uint32_t vi_index) 797 { 798 efx_mcdi_req_t req; 799 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_UNLINK_PIOBUF_IN_LEN, 800 MC_CMD_UNLINK_PIOBUF_OUT_LEN); 801 efx_rc_t rc; 802 803 req.emr_cmd = MC_CMD_UNLINK_PIOBUF; 804 req.emr_in_buf = payload; 805 req.emr_in_length = MC_CMD_UNLINK_PIOBUF_IN_LEN; 806 req.emr_out_buf = payload; 807 req.emr_out_length = MC_CMD_UNLINK_PIOBUF_OUT_LEN; 808 809 MCDI_IN_SET_DWORD(req, UNLINK_PIOBUF_IN_TXQ_INSTANCE, vi_index); 810 811 efx_mcdi_execute_quiet(enp, &req); 812 813 if (req.emr_rc != 0) { 814 rc = req.emr_rc; 815 goto fail1; 816 } 817 818 return (0); 819 820 fail1: 821 EFSYS_PROBE1(fail1, efx_rc_t, rc); 822 823 return (rc); 824 } 825 826 static void 827 ef10_nic_alloc_piobufs( 828 __in efx_nic_t *enp, 829 __in uint32_t max_piobuf_count) 830 { 831 efx_piobuf_handle_t *handlep; 832 unsigned int i; 833 834 EFSYS_ASSERT3U(max_piobuf_count, <=, 835 EFX_ARRAY_SIZE(enp->en_arch.ef10.ena_piobuf_handle)); 836 837 enp->en_arch.ef10.ena_piobuf_count = 0; 838 839 for (i = 0; i < max_piobuf_count; i++) { 840 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; 841 842 if (efx_mcdi_alloc_piobuf(enp, handlep) != 0) 843 goto fail1; 844 845 enp->en_arch.ef10.ena_pio_alloc_map[i] = 0; 846 enp->en_arch.ef10.ena_piobuf_count++; 847 } 848 849 return; 850 851 fail1: 852 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { 853 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; 854 855 (void) efx_mcdi_free_piobuf(enp, *handlep); 856 *handlep = EFX_PIOBUF_HANDLE_INVALID; 857 } 858 enp->en_arch.ef10.ena_piobuf_count = 0; 859 } 860 861 862 static void 863 ef10_nic_free_piobufs( 864 __in efx_nic_t *enp) 865 { 866 efx_piobuf_handle_t *handlep; 867 unsigned int i; 868 869 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { 870 handlep = &enp->en_arch.ef10.ena_piobuf_handle[i]; 871 872 (void) efx_mcdi_free_piobuf(enp, *handlep); 873 *handlep = EFX_PIOBUF_HANDLE_INVALID; 874 } 875 enp->en_arch.ef10.ena_piobuf_count = 0; 876 } 877 878 /* Sub-allocate a block from a piobuf */ 879 __checkReturn efx_rc_t 880 ef10_nic_pio_alloc( 881 __inout efx_nic_t *enp, 882 __out uint32_t *bufnump, 883 __out efx_piobuf_handle_t *handlep, 884 __out uint32_t *blknump, 885 __out uint32_t *offsetp, 886 __out size_t *sizep) 887 { 888 efx_nic_cfg_t *encp = &enp->en_nic_cfg; 889 efx_drv_cfg_t *edcp = &enp->en_drv_cfg; 890 uint32_t blk_per_buf; 891 uint32_t buf, blk; 892 efx_rc_t rc; 893 894 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 895 EFSYS_ASSERT(bufnump); 896 EFSYS_ASSERT(handlep); 897 EFSYS_ASSERT(blknump); 898 EFSYS_ASSERT(offsetp); 899 EFSYS_ASSERT(sizep); 900 901 if ((edcp->edc_pio_alloc_size == 0) || 902 (enp->en_arch.ef10.ena_piobuf_count == 0)) { 903 rc = ENOMEM; 904 goto fail1; 905 } 906 blk_per_buf = encp->enc_piobuf_size / edcp->edc_pio_alloc_size; 907 908 for (buf = 0; buf < enp->en_arch.ef10.ena_piobuf_count; buf++) { 909 uint32_t *map = &enp->en_arch.ef10.ena_pio_alloc_map[buf]; 910 911 if (~(*map) == 0) 912 continue; 913 914 EFSYS_ASSERT3U(blk_per_buf, <=, (8 * sizeof (*map))); 915 for (blk = 0; blk < blk_per_buf; blk++) { 916 if ((*map & (1u << blk)) == 0) { 917 *map |= (1u << blk); 918 goto done; 919 } 920 } 921 } 922 rc = ENOMEM; 923 goto fail2; 924 925 done: 926 *handlep = enp->en_arch.ef10.ena_piobuf_handle[buf]; 927 *bufnump = buf; 928 *blknump = blk; 929 *sizep = edcp->edc_pio_alloc_size; 930 *offsetp = blk * (*sizep); 931 932 return (0); 933 934 fail2: 935 EFSYS_PROBE(fail2); 936 fail1: 937 EFSYS_PROBE1(fail1, efx_rc_t, rc); 938 939 return (rc); 940 } 941 942 /* Free a piobuf sub-allocated block */ 943 __checkReturn efx_rc_t 944 ef10_nic_pio_free( 945 __inout efx_nic_t *enp, 946 __in uint32_t bufnum, 947 __in uint32_t blknum) 948 { 949 uint32_t *map; 950 efx_rc_t rc; 951 952 if ((bufnum >= enp->en_arch.ef10.ena_piobuf_count) || 953 (blknum >= (8 * sizeof (*map)))) { 954 rc = EINVAL; 955 goto fail1; 956 } 957 958 map = &enp->en_arch.ef10.ena_pio_alloc_map[bufnum]; 959 if ((*map & (1u << blknum)) == 0) { 960 rc = ENOENT; 961 goto fail2; 962 } 963 *map &= ~(1u << blknum); 964 965 return (0); 966 967 fail2: 968 EFSYS_PROBE(fail2); 969 fail1: 970 EFSYS_PROBE1(fail1, efx_rc_t, rc); 971 972 return (rc); 973 } 974 975 __checkReturn efx_rc_t 976 ef10_nic_pio_link( 977 __inout efx_nic_t *enp, 978 __in uint32_t vi_index, 979 __in efx_piobuf_handle_t handle) 980 { 981 return (efx_mcdi_link_piobuf(enp, vi_index, handle)); 982 } 983 984 __checkReturn efx_rc_t 985 ef10_nic_pio_unlink( 986 __inout efx_nic_t *enp, 987 __in uint32_t vi_index) 988 { 989 return (efx_mcdi_unlink_piobuf(enp, vi_index)); 990 } 991 992 #endif /* EFX_OPTS_EF10() */ 993 994 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 995 996 static __checkReturn efx_rc_t 997 ef10_mcdi_get_pf_count( 998 __in efx_nic_t *enp, 999 __out uint32_t *pf_countp) 1000 { 1001 efx_mcdi_req_t req; 1002 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PF_COUNT_IN_LEN, 1003 MC_CMD_GET_PF_COUNT_OUT_LEN); 1004 efx_rc_t rc; 1005 1006 req.emr_cmd = MC_CMD_GET_PF_COUNT; 1007 req.emr_in_buf = payload; 1008 req.emr_in_length = MC_CMD_GET_PF_COUNT_IN_LEN; 1009 req.emr_out_buf = payload; 1010 req.emr_out_length = MC_CMD_GET_PF_COUNT_OUT_LEN; 1011 1012 efx_mcdi_execute(enp, &req); 1013 1014 if (req.emr_rc != 0) { 1015 rc = req.emr_rc; 1016 goto fail1; 1017 } 1018 1019 if (req.emr_out_length_used < MC_CMD_GET_PF_COUNT_OUT_LEN) { 1020 rc = EMSGSIZE; 1021 goto fail2; 1022 } 1023 1024 *pf_countp = *MCDI_OUT(req, uint8_t, 1025 MC_CMD_GET_PF_COUNT_OUT_PF_COUNT_OFST); 1026 1027 EFSYS_ASSERT(*pf_countp != 0); 1028 1029 return (0); 1030 1031 fail2: 1032 EFSYS_PROBE(fail2); 1033 fail1: 1034 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1035 1036 return (rc); 1037 } 1038 1039 static __checkReturn efx_rc_t 1040 ef10_get_datapath_caps( 1041 __in efx_nic_t *enp) 1042 { 1043 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1044 efx_mcdi_req_t req; 1045 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN, 1046 MC_CMD_GET_CAPABILITIES_V7_OUT_LEN); 1047 efx_rc_t rc; 1048 1049 req.emr_cmd = MC_CMD_GET_CAPABILITIES; 1050 req.emr_in_buf = payload; 1051 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; 1052 req.emr_out_buf = payload; 1053 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V7_OUT_LEN; 1054 1055 efx_mcdi_execute_quiet(enp, &req); 1056 1057 if (req.emr_rc != 0) { 1058 rc = req.emr_rc; 1059 goto fail1; 1060 } 1061 1062 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 1063 rc = EMSGSIZE; 1064 goto fail2; 1065 } 1066 1067 #define CAP_FLAGS1(_req, _flag) \ 1068 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_OUT_FLAGS1) & \ 1069 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN))) 1070 1071 #define CAP_FLAGS2(_req, _flag) \ 1072 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) && \ 1073 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V2_OUT_FLAGS2) & \ 1074 (1u << (MC_CMD_GET_CAPABILITIES_V2_OUT_ ## _flag ## _LBN)))) 1075 1076 #define CAP_FLAGS3(_req, _flag) \ 1077 (((_req).emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V7_OUT_LEN) && \ 1078 (MCDI_OUT_DWORD((_req), GET_CAPABILITIES_V7_OUT_FLAGS3) & \ 1079 (1u << (MC_CMD_GET_CAPABILITIES_V7_OUT_ ## _flag ## _LBN)))) 1080 1081 /* Check if RXDP firmware inserts 14 byte prefix */ 1082 if (CAP_FLAGS1(req, RX_PREFIX_LEN_14)) 1083 encp->enc_rx_prefix_size = 14; 1084 else 1085 encp->enc_rx_prefix_size = 0; 1086 1087 #if EFSYS_OPT_RX_SCALE 1088 /* Check if the firmware supports additional RSS modes */ 1089 if (CAP_FLAGS1(req, ADDITIONAL_RSS_MODES)) 1090 encp->enc_rx_scale_additional_modes_supported = B_TRUE; 1091 else 1092 encp->enc_rx_scale_additional_modes_supported = B_FALSE; 1093 #endif /* EFSYS_OPT_RX_SCALE */ 1094 1095 /* Check if the firmware supports TSO */ 1096 if (CAP_FLAGS1(req, TX_TSO)) 1097 encp->enc_fw_assisted_tso_enabled = B_TRUE; 1098 else 1099 encp->enc_fw_assisted_tso_enabled = B_FALSE; 1100 1101 /* Check if the firmware supports FATSOv2 */ 1102 if (CAP_FLAGS2(req, TX_TSO_V2)) { 1103 encp->enc_fw_assisted_tso_v2_enabled = B_TRUE; 1104 encp->enc_fw_assisted_tso_v2_n_contexts = MCDI_OUT_WORD(req, 1105 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS); 1106 } else { 1107 encp->enc_fw_assisted_tso_v2_enabled = B_FALSE; 1108 encp->enc_fw_assisted_tso_v2_n_contexts = 0; 1109 } 1110 1111 /* Check if the firmware supports FATSOv2 encap */ 1112 if (CAP_FLAGS2(req, TX_TSO_V2_ENCAP)) 1113 encp->enc_fw_assisted_tso_v2_encap_enabled = B_TRUE; 1114 else 1115 encp->enc_fw_assisted_tso_v2_encap_enabled = B_FALSE; 1116 1117 /* Check if TSOv3 is supported */ 1118 if (CAP_FLAGS2(req, TX_TSO_V3)) 1119 encp->enc_tso_v3_enabled = B_TRUE; 1120 else 1121 encp->enc_tso_v3_enabled = B_FALSE; 1122 1123 /* Check if the firmware has vadapter/vport/vswitch support */ 1124 if (CAP_FLAGS1(req, EVB)) 1125 encp->enc_datapath_cap_evb = B_TRUE; 1126 else 1127 encp->enc_datapath_cap_evb = B_FALSE; 1128 1129 /* Check if the firmware supports vport reconfiguration */ 1130 if (CAP_FLAGS1(req, VPORT_RECONFIGURE)) 1131 encp->enc_vport_reconfigure_supported = B_TRUE; 1132 else 1133 encp->enc_vport_reconfigure_supported = B_FALSE; 1134 1135 /* Check if the firmware supports VLAN insertion */ 1136 if (CAP_FLAGS1(req, TX_VLAN_INSERTION)) 1137 encp->enc_hw_tx_insert_vlan_enabled = B_TRUE; 1138 else 1139 encp->enc_hw_tx_insert_vlan_enabled = B_FALSE; 1140 1141 /* Check if the firmware supports RX event batching */ 1142 if (CAP_FLAGS1(req, RX_BATCHING)) 1143 encp->enc_rx_batching_enabled = B_TRUE; 1144 else 1145 encp->enc_rx_batching_enabled = B_FALSE; 1146 1147 /* 1148 * Even if batching isn't reported as supported, we may still get 1149 * batched events (see bug61153). 1150 */ 1151 encp->enc_rx_batch_max = 16; 1152 1153 /* Check if the firmware supports disabling scatter on RXQs */ 1154 if (CAP_FLAGS1(req, RX_DISABLE_SCATTER)) 1155 encp->enc_rx_disable_scatter_supported = B_TRUE; 1156 else 1157 encp->enc_rx_disable_scatter_supported = B_FALSE; 1158 1159 /* No limit on maximum number of Rx scatter elements per packet. */ 1160 encp->enc_rx_scatter_max = -1; 1161 1162 /* Check if the firmware supports packed stream mode */ 1163 if (CAP_FLAGS1(req, RX_PACKED_STREAM)) 1164 encp->enc_rx_packed_stream_supported = B_TRUE; 1165 else 1166 encp->enc_rx_packed_stream_supported = B_FALSE; 1167 1168 /* 1169 * Check if the firmware supports configurable buffer sizes 1170 * for packed stream mode (otherwise buffer size is 1Mbyte) 1171 */ 1172 if (CAP_FLAGS1(req, RX_PACKED_STREAM_VAR_BUFFERS)) 1173 encp->enc_rx_var_packed_stream_supported = B_TRUE; 1174 else 1175 encp->enc_rx_var_packed_stream_supported = B_FALSE; 1176 1177 /* Check if the firmware supports equal stride super-buffer mode */ 1178 if (CAP_FLAGS2(req, EQUAL_STRIDE_SUPER_BUFFER)) 1179 encp->enc_rx_es_super_buffer_supported = B_TRUE; 1180 else 1181 encp->enc_rx_es_super_buffer_supported = B_FALSE; 1182 1183 /* Check if the firmware supports FW subvariant w/o Tx checksumming */ 1184 if (CAP_FLAGS2(req, FW_SUBVARIANT_NO_TX_CSUM)) 1185 encp->enc_fw_subvariant_no_tx_csum_supported = B_TRUE; 1186 else 1187 encp->enc_fw_subvariant_no_tx_csum_supported = B_FALSE; 1188 1189 /* Check if the firmware supports set mac with running filters */ 1190 if (CAP_FLAGS1(req, VADAPTOR_PERMIT_SET_MAC_WHEN_FILTERS_INSTALLED)) 1191 encp->enc_allow_set_mac_with_installed_filters = B_TRUE; 1192 else 1193 encp->enc_allow_set_mac_with_installed_filters = B_FALSE; 1194 1195 /* 1196 * Check if firmware supports the extended MC_CMD_SET_MAC, which allows 1197 * specifying which parameters to configure. 1198 */ 1199 if (CAP_FLAGS1(req, SET_MAC_ENHANCED)) 1200 encp->enc_enhanced_set_mac_supported = B_TRUE; 1201 else 1202 encp->enc_enhanced_set_mac_supported = B_FALSE; 1203 1204 /* 1205 * Check if firmware supports version 2 of MC_CMD_INIT_EVQ, which allows 1206 * us to let the firmware choose the settings to use on an EVQ. 1207 */ 1208 if (CAP_FLAGS2(req, INIT_EVQ_V2)) 1209 encp->enc_init_evq_v2_supported = B_TRUE; 1210 else 1211 encp->enc_init_evq_v2_supported = B_FALSE; 1212 1213 /* 1214 * Check if firmware supports extended width event queues, which have 1215 * a different event descriptor layout. 1216 */ 1217 if (CAP_FLAGS3(req, EXTENDED_WIDTH_EVQS_SUPPORTED)) 1218 encp->enc_init_evq_extended_width_supported = B_TRUE; 1219 else 1220 encp->enc_init_evq_extended_width_supported = B_FALSE; 1221 1222 /* 1223 * Check if the NO_CONT_EV mode for RX events is supported. 1224 */ 1225 if (CAP_FLAGS2(req, INIT_RXQ_NO_CONT_EV)) 1226 encp->enc_no_cont_ev_mode_supported = B_TRUE; 1227 else 1228 encp->enc_no_cont_ev_mode_supported = B_FALSE; 1229 1230 /* 1231 * Check if buffer size may and must be specified on INIT_RXQ. 1232 * It may be always specified to efx_rx_qcreate(), but will be 1233 * just kept libefx internal if MCDI does not support it. 1234 */ 1235 if (CAP_FLAGS2(req, INIT_RXQ_WITH_BUFFER_SIZE)) 1236 encp->enc_init_rxq_with_buffer_size = B_TRUE; 1237 else 1238 encp->enc_init_rxq_with_buffer_size = B_FALSE; 1239 1240 /* 1241 * Check if firmware-verified NVRAM updates must be used. 1242 * 1243 * The firmware trusted installer requires all NVRAM updates to use 1244 * version 2 of MC_CMD_NVRAM_UPDATE_START (to enable verified update) 1245 * and version 2 of MC_CMD_NVRAM_UPDATE_FINISH (to verify the updated 1246 * partition and report the result). 1247 */ 1248 if (CAP_FLAGS2(req, NVRAM_UPDATE_REPORT_VERIFY_RESULT)) 1249 encp->enc_nvram_update_verify_result_supported = B_TRUE; 1250 else 1251 encp->enc_nvram_update_verify_result_supported = B_FALSE; 1252 1253 if (CAP_FLAGS2(req, NVRAM_UPDATE_POLL_VERIFY_RESULT)) 1254 encp->enc_nvram_update_poll_verify_result_supported = B_TRUE; 1255 else 1256 encp->enc_nvram_update_poll_verify_result_supported = B_FALSE; 1257 1258 /* 1259 * Check if firmware update via the BUNDLE partition is supported 1260 */ 1261 if (CAP_FLAGS2(req, BUNDLE_UPDATE)) 1262 encp->enc_nvram_bundle_update_supported = B_TRUE; 1263 else 1264 encp->enc_nvram_bundle_update_supported = B_FALSE; 1265 1266 /* 1267 * Check if firmware provides packet memory and Rx datapath 1268 * counters. 1269 */ 1270 if (CAP_FLAGS1(req, PM_AND_RXDP_COUNTERS)) 1271 encp->enc_pm_and_rxdp_counters = B_TRUE; 1272 else 1273 encp->enc_pm_and_rxdp_counters = B_FALSE; 1274 1275 /* 1276 * Check if the 40G MAC hardware is capable of reporting 1277 * statistics for Tx size bins. 1278 */ 1279 if (CAP_FLAGS2(req, MAC_STATS_40G_TX_SIZE_BINS)) 1280 encp->enc_mac_stats_40g_tx_size_bins = B_TRUE; 1281 else 1282 encp->enc_mac_stats_40g_tx_size_bins = B_FALSE; 1283 1284 /* 1285 * Check if firmware supports VXLAN and NVGRE tunnels. 1286 * The capability indicates Geneve protocol support as well. 1287 */ 1288 if (CAP_FLAGS1(req, VXLAN_NVGRE)) { 1289 encp->enc_tunnel_encapsulations_supported = 1290 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) | 1291 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) | 1292 (1u << EFX_TUNNEL_PROTOCOL_NVGRE); 1293 1294 EFX_STATIC_ASSERT(EFX_TUNNEL_MAXNENTRIES == 1295 MC_CMD_SET_TUNNEL_ENCAP_UDP_PORTS_IN_ENTRIES_MAXNUM); 1296 encp->enc_tunnel_config_udp_entries_max = 1297 EFX_TUNNEL_MAXNENTRIES; 1298 } else { 1299 encp->enc_tunnel_config_udp_entries_max = 0; 1300 } 1301 1302 /* 1303 * Check if firmware reports the VI window mode. 1304 * Medford2 has a variable VI window size (8K, 16K or 64K). 1305 * Medford and Huntington have a fixed 8K VI window size. 1306 */ 1307 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V3_OUT_LEN) { 1308 uint8_t mode = 1309 MCDI_OUT_BYTE(req, GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE); 1310 1311 switch (mode) { 1312 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K: 1313 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; 1314 break; 1315 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K: 1316 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_16K; 1317 break; 1318 case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K: 1319 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_64K; 1320 break; 1321 default: 1322 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; 1323 break; 1324 } 1325 } else if ((enp->en_family == EFX_FAMILY_HUNTINGTON) || 1326 (enp->en_family == EFX_FAMILY_MEDFORD)) { 1327 /* Huntington and Medford have fixed 8K window size */ 1328 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_8K; 1329 } else { 1330 encp->enc_vi_window_shift = EFX_VI_WINDOW_SHIFT_INVALID; 1331 } 1332 1333 /* Check if firmware supports extended MAC stats. */ 1334 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) { 1335 /* Extended stats buffer supported */ 1336 encp->enc_mac_stats_nstats = MCDI_OUT_WORD(req, 1337 GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS); 1338 } else { 1339 /* Use Siena-compatible legacy MAC stats */ 1340 encp->enc_mac_stats_nstats = MC_CMD_MAC_NSTATS; 1341 } 1342 1343 if (encp->enc_mac_stats_nstats >= MC_CMD_MAC_NSTATS_V2) 1344 encp->enc_fec_counters = B_TRUE; 1345 else 1346 encp->enc_fec_counters = B_FALSE; 1347 1348 /* Check if the firmware provides head-of-line blocking counters */ 1349 if (CAP_FLAGS2(req, RXDP_HLB_IDLE)) 1350 encp->enc_hlb_counters = B_TRUE; 1351 else 1352 encp->enc_hlb_counters = B_FALSE; 1353 1354 #if EFSYS_OPT_RX_SCALE 1355 if (CAP_FLAGS1(req, RX_RSS_LIMITED)) { 1356 /* Only one exclusive RSS context is available per port. */ 1357 encp->enc_rx_scale_max_exclusive_contexts = 1; 1358 1359 switch (enp->en_family) { 1360 case EFX_FAMILY_MEDFORD2: 1361 encp->enc_rx_scale_hash_alg_mask = 1362 (1U << EFX_RX_HASHALG_TOEPLITZ); 1363 break; 1364 1365 case EFX_FAMILY_MEDFORD: 1366 case EFX_FAMILY_HUNTINGTON: 1367 /* 1368 * Packed stream firmware variant maintains a 1369 * non-standard algorithm for hash computation. 1370 * It implies explicit XORing together 1371 * source + destination IP addresses (or last 1372 * four bytes in the case of IPv6) and using the 1373 * resulting value as the input to a Toeplitz hash. 1374 */ 1375 encp->enc_rx_scale_hash_alg_mask = 1376 (1U << EFX_RX_HASHALG_PACKED_STREAM); 1377 break; 1378 1379 default: 1380 rc = EINVAL; 1381 goto fail3; 1382 } 1383 1384 /* Port numbers cannot contribute to the hash value */ 1385 encp->enc_rx_scale_l4_hash_supported = B_FALSE; 1386 } else { 1387 /* 1388 * Maximum number of exclusive RSS contexts. 1389 * EF10 hardware supports 64 in total, but 6 are reserved 1390 * for shared contexts. They are a global resource so 1391 * not all may be available. 1392 */ 1393 encp->enc_rx_scale_max_exclusive_contexts = 64 - 6; 1394 1395 encp->enc_rx_scale_hash_alg_mask = 1396 (1U << EFX_RX_HASHALG_TOEPLITZ); 1397 1398 /* 1399 * It is possible to use port numbers as 1400 * the input data for hash computation. 1401 */ 1402 encp->enc_rx_scale_l4_hash_supported = B_TRUE; 1403 } 1404 #endif /* EFSYS_OPT_RX_SCALE */ 1405 1406 /* Check if the firmware supports "FLAG" and "MARK" filter actions */ 1407 if (CAP_FLAGS2(req, FILTER_ACTION_FLAG)) 1408 encp->enc_filter_action_flag_supported = B_TRUE; 1409 else 1410 encp->enc_filter_action_flag_supported = B_FALSE; 1411 1412 if (CAP_FLAGS2(req, FILTER_ACTION_MARK)) 1413 encp->enc_filter_action_mark_supported = B_TRUE; 1414 else 1415 encp->enc_filter_action_mark_supported = B_FALSE; 1416 1417 /* Get maximum supported value for "MARK" filter action */ 1418 if (req.emr_out_length_used >= MC_CMD_GET_CAPABILITIES_V5_OUT_LEN) 1419 encp->enc_filter_action_mark_max = MCDI_OUT_DWORD(req, 1420 GET_CAPABILITIES_V5_OUT_FILTER_ACTION_MARK_MAX); 1421 else 1422 encp->enc_filter_action_mark_max = 0; 1423 1424 #undef CAP_FLAGS1 1425 #undef CAP_FLAGS2 1426 1427 return (0); 1428 1429 #if EFSYS_OPT_RX_SCALE 1430 fail3: 1431 EFSYS_PROBE(fail3); 1432 #endif /* EFSYS_OPT_RX_SCALE */ 1433 fail2: 1434 EFSYS_PROBE(fail2); 1435 fail1: 1436 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1437 1438 return (rc); 1439 } 1440 1441 1442 #define EF10_LEGACY_PF_PRIVILEGE_MASK \ 1443 (MC_CMD_PRIVILEGE_MASK_IN_GRP_ADMIN | \ 1444 MC_CMD_PRIVILEGE_MASK_IN_GRP_LINK | \ 1445 MC_CMD_PRIVILEGE_MASK_IN_GRP_ONLOAD | \ 1446 MC_CMD_PRIVILEGE_MASK_IN_GRP_PTP | \ 1447 MC_CMD_PRIVILEGE_MASK_IN_GRP_INSECURE_FILTERS | \ 1448 MC_CMD_PRIVILEGE_MASK_IN_GRP_MAC_SPOOFING | \ 1449 MC_CMD_PRIVILEGE_MASK_IN_GRP_UNICAST | \ 1450 MC_CMD_PRIVILEGE_MASK_IN_GRP_MULTICAST | \ 1451 MC_CMD_PRIVILEGE_MASK_IN_GRP_BROADCAST | \ 1452 MC_CMD_PRIVILEGE_MASK_IN_GRP_ALL_MULTICAST | \ 1453 MC_CMD_PRIVILEGE_MASK_IN_GRP_PROMISCUOUS) 1454 1455 #define EF10_LEGACY_VF_PRIVILEGE_MASK 0 1456 1457 1458 __checkReturn efx_rc_t 1459 ef10_get_privilege_mask( 1460 __in efx_nic_t *enp, 1461 __out uint32_t *maskp) 1462 { 1463 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1464 uint32_t mask; 1465 efx_rc_t rc; 1466 1467 if ((rc = efx_mcdi_privilege_mask(enp, encp->enc_pf, encp->enc_vf, 1468 &mask)) != 0) { 1469 if (rc != ENOTSUP) 1470 goto fail1; 1471 1472 /* Fallback for old firmware without privilege mask support */ 1473 if (EFX_PCI_FUNCTION_IS_PF(encp)) { 1474 /* Assume PF has admin privilege */ 1475 mask = EF10_LEGACY_PF_PRIVILEGE_MASK; 1476 } else { 1477 /* VF is always unprivileged by default */ 1478 mask = EF10_LEGACY_VF_PRIVILEGE_MASK; 1479 } 1480 } 1481 1482 *maskp = mask; 1483 1484 return (0); 1485 1486 fail1: 1487 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1488 1489 return (rc); 1490 } 1491 1492 1493 #define EFX_EXT_PORT_MAX 4 1494 #define EFX_EXT_PORT_NA 0xFF 1495 1496 /* 1497 * Table of mapping schemes from port number to external number. 1498 * 1499 * Each port number ultimately corresponds to a connector: either as part of 1500 * a cable assembly attached to a module inserted in an SFP+/QSFP+ cage on 1501 * the board, or fixed to the board (e.g. 10GBASE-T magjack on SFN5121T 1502 * "Salina"). In general: 1503 * 1504 * Port number (0-based) 1505 * | 1506 * port mapping (n:1) 1507 * | 1508 * v 1509 * External port number (1-based) 1510 * | 1511 * fixed (1:1) or cable assembly (1:m) 1512 * | 1513 * v 1514 * Connector 1515 * 1516 * The external numbering refers to the cages or magjacks on the board, 1517 * as visibly annotated on the board or back panel. This table describes 1518 * how to determine which external cage/magjack corresponds to the port 1519 * numbers used by the driver. 1520 * 1521 * The count of consecutive port numbers that map to each external number, 1522 * is determined by the chip family and the current port mode. 1523 * 1524 * For the Huntington family, the current port mode cannot be discovered, 1525 * but a single mapping is used by all modes for a given chip variant, 1526 * so the mapping used is instead the last match in the table to the full 1527 * set of port modes to which the NIC can be configured. Therefore the 1528 * ordering of entries in the mapping table is significant. 1529 */ 1530 static struct ef10_external_port_map_s { 1531 efx_family_t family; 1532 uint32_t modes_mask; 1533 uint8_t base_port[EFX_EXT_PORT_MAX]; 1534 } __ef10_external_port_mappings[] = { 1535 /* 1536 * Modes used by Huntington family controllers where each port 1537 * number maps to a separate cage. 1538 * SFN7x22F (Torino): 1539 * port 0 -> cage 1 1540 * port 1 -> cage 2 1541 * SFN7xx4F (Pavia): 1542 * port 0 -> cage 1 1543 * port 1 -> cage 2 1544 * port 2 -> cage 3 1545 * port 3 -> cage 4 1546 */ 1547 { 1548 EFX_FAMILY_HUNTINGTON, 1549 (1U << TLV_PORT_MODE_10G) | /* mode 0 */ 1550 (1U << TLV_PORT_MODE_10G_10G) | /* mode 2 */ 1551 (1U << TLV_PORT_MODE_10G_10G_10G_10G), /* mode 4 */ 1552 { 0, 1, 2, 3 } 1553 }, 1554 /* 1555 * Modes which for Huntington identify a chip variant where 2 1556 * adjacent port numbers map to each cage. 1557 * SFN7x42Q (Monza): 1558 * port 0 -> cage 1 1559 * port 1 -> cage 1 1560 * port 2 -> cage 2 1561 * port 3 -> cage 2 1562 */ 1563 { 1564 EFX_FAMILY_HUNTINGTON, 1565 (1U << TLV_PORT_MODE_40G) | /* mode 1 */ 1566 (1U << TLV_PORT_MODE_40G_40G) | /* mode 3 */ 1567 (1U << TLV_PORT_MODE_40G_10G_10G) | /* mode 6 */ 1568 (1U << TLV_PORT_MODE_10G_10G_40G), /* mode 7 */ 1569 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1570 }, 1571 /* 1572 * Modes that on Medford allocate each port number to a separate 1573 * cage. 1574 * port 0 -> cage 1 1575 * port 1 -> cage 2 1576 * port 2 -> cage 3 1577 * port 3 -> cage 4 1578 */ 1579 { 1580 EFX_FAMILY_MEDFORD, 1581 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ 1582 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ 1583 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */ 1584 { 0, 1, 2, 3 } 1585 }, 1586 /* 1587 * Modes that on Medford allocate 2 adjacent port numbers to each 1588 * cage. 1589 * port 0 -> cage 1 1590 * port 1 -> cage 1 1591 * port 2 -> cage 2 1592 * port 3 -> cage 2 1593 */ 1594 { 1595 EFX_FAMILY_MEDFORD, 1596 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */ 1597 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 5 */ 1598 (1U << TLV_PORT_MODE_1x4_2x1) | /* mode 6 */ 1599 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */ 1600 /* Do not use 10G_10G_10G_10G_Q1_Q2 (see bug63270) */ 1601 (1U << TLV_PORT_MODE_10G_10G_10G_10G_Q1_Q2), /* mode 9 */ 1602 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1603 }, 1604 /* 1605 * Modes that on Medford allocate 4 adjacent port numbers to 1606 * cage 1. 1607 * port 0 -> cage 1 1608 * port 1 -> cage 1 1609 * port 2 -> cage 1 1610 * port 3 -> cage 1 1611 */ 1612 { 1613 EFX_FAMILY_MEDFORD, 1614 /* Do not use 10G_10G_10G_10G_Q1 (see bug63270) */ 1615 (1U << TLV_PORT_MODE_4x1_NA), /* mode 4 */ 1616 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1617 }, 1618 /* 1619 * Modes that on Medford allocate 4 adjacent port numbers to 1620 * cage 2. 1621 * port 0 -> cage 2 1622 * port 1 -> cage 2 1623 * port 2 -> cage 2 1624 * port 3 -> cage 2 1625 */ 1626 { 1627 EFX_FAMILY_MEDFORD, 1628 (1U << TLV_PORT_MODE_NA_4x1), /* mode 8 */ 1629 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1630 }, 1631 /* 1632 * Modes that on Medford2 allocate each port number to a separate 1633 * cage. 1634 * port 0 -> cage 1 1635 * port 1 -> cage 2 1636 * port 2 -> cage 3 1637 * port 3 -> cage 4 1638 */ 1639 { 1640 EFX_FAMILY_MEDFORD2, 1641 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ 1642 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ 1643 (1U << TLV_PORT_MODE_1x1_1x1) | /* mode 2 */ 1644 (1U << TLV_PORT_MODE_1x4_1x4) | /* mode 3 */ 1645 (1U << TLV_PORT_MODE_1x2_NA) | /* mode 10 */ 1646 (1U << TLV_PORT_MODE_1x2_1x2) | /* mode 12 */ 1647 (1U << TLV_PORT_MODE_1x4_1x2) | /* mode 15 */ 1648 (1U << TLV_PORT_MODE_1x2_1x4), /* mode 16 */ 1649 { 0, 1, 2, 3 } 1650 }, 1651 /* 1652 * Modes that on Medford2 allocate 1 port to cage 1 and the rest 1653 * to cage 2. 1654 * port 0 -> cage 1 1655 * port 1 -> cage 2 1656 * port 2 -> cage 2 1657 */ 1658 { 1659 EFX_FAMILY_MEDFORD2, 1660 (1U << TLV_PORT_MODE_1x2_2x1) | /* mode 17 */ 1661 (1U << TLV_PORT_MODE_1x4_2x1), /* mode 6 */ 1662 { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1663 }, 1664 /* 1665 * Modes that on Medford2 allocate 2 adjacent port numbers to cage 1 1666 * and the rest to cage 2. 1667 * port 0 -> cage 1 1668 * port 1 -> cage 1 1669 * port 2 -> cage 2 1670 * port 3 -> cage 2 1671 */ 1672 { 1673 EFX_FAMILY_MEDFORD2, 1674 (1U << TLV_PORT_MODE_2x1_2x1) | /* mode 4 */ 1675 (1U << TLV_PORT_MODE_2x1_1x4) | /* mode 7 */ 1676 (1U << TLV_PORT_MODE_2x2_NA) | /* mode 13 */ 1677 (1U << TLV_PORT_MODE_2x1_1x2), /* mode 18 */ 1678 { 0, 2, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1679 }, 1680 /* 1681 * Modes that on Medford2 allocate up to 4 adjacent port numbers 1682 * to cage 1. 1683 * port 0 -> cage 1 1684 * port 1 -> cage 1 1685 * port 2 -> cage 1 1686 * port 3 -> cage 1 1687 */ 1688 { 1689 EFX_FAMILY_MEDFORD2, 1690 (1U << TLV_PORT_MODE_4x1_NA), /* mode 5 */ 1691 { 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1692 }, 1693 /* 1694 * Modes that on Medford2 allocate up to 4 adjacent port numbers 1695 * to cage 2. 1696 * port 0 -> cage 2 1697 * port 1 -> cage 2 1698 * port 2 -> cage 2 1699 * port 3 -> cage 2 1700 */ 1701 { 1702 EFX_FAMILY_MEDFORD2, 1703 (1U << TLV_PORT_MODE_NA_4x1) | /* mode 8 */ 1704 (1U << TLV_PORT_MODE_NA_1x2) | /* mode 11 */ 1705 (1U << TLV_PORT_MODE_NA_2x2), /* mode 14 */ 1706 { EFX_EXT_PORT_NA, 0, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1707 }, 1708 /* 1709 * Modes that on Riverhead allocate each port number to a separate 1710 * cage. 1711 * port 0 -> cage 1 1712 * port 1 -> cage 2 1713 */ 1714 { 1715 EFX_FAMILY_RIVERHEAD, 1716 (1U << TLV_PORT_MODE_1x1_NA) | /* mode 0 */ 1717 (1U << TLV_PORT_MODE_1x4_NA) | /* mode 1 */ 1718 (1U << TLV_PORT_MODE_1x1_1x1), /* mode 2 */ 1719 { 0, 1, EFX_EXT_PORT_NA, EFX_EXT_PORT_NA } 1720 }, 1721 }; 1722 1723 static __checkReturn efx_rc_t 1724 ef10_external_port_mapping( 1725 __in efx_nic_t *enp, 1726 __in uint32_t port, 1727 __out uint8_t *external_portp) 1728 { 1729 efx_rc_t rc; 1730 int i; 1731 uint32_t port_modes; 1732 uint32_t matches; 1733 uint32_t current; 1734 struct ef10_external_port_map_s *mapp = NULL; 1735 int ext_index = port; /* Default 1-1 mapping */ 1736 1737 if ((rc = efx_mcdi_get_port_modes(enp, &port_modes, ¤t, 1738 NULL)) != 0) { 1739 /* 1740 * No current port mode information (i.e. Huntington) 1741 * - infer mapping from available modes 1742 */ 1743 if ((rc = efx_mcdi_get_port_modes(enp, 1744 &port_modes, NULL, NULL)) != 0) { 1745 /* 1746 * No port mode information available 1747 * - use default mapping 1748 */ 1749 goto out; 1750 } 1751 } else { 1752 /* Only need to scan the current mode */ 1753 port_modes = 1 << current; 1754 } 1755 1756 /* 1757 * Infer the internal port -> external number mapping from 1758 * the possible port modes for this NIC. 1759 */ 1760 for (i = 0; i < EFX_ARRAY_SIZE(__ef10_external_port_mappings); ++i) { 1761 struct ef10_external_port_map_s *eepmp = 1762 &__ef10_external_port_mappings[i]; 1763 if (eepmp->family != enp->en_family) 1764 continue; 1765 matches = (eepmp->modes_mask & port_modes); 1766 if (matches != 0) { 1767 /* 1768 * Some modes match. For some Huntington boards 1769 * there will be multiple matches. The mapping on the 1770 * last match is used. 1771 */ 1772 mapp = eepmp; 1773 port_modes &= ~matches; 1774 } 1775 } 1776 1777 if (port_modes != 0) { 1778 /* Some advertised modes are not supported */ 1779 rc = ENOTSUP; 1780 goto fail1; 1781 } 1782 1783 out: 1784 if (mapp != NULL) { 1785 /* 1786 * External ports are assigned a sequence of consecutive 1787 * port numbers, so find the one with the closest base_port. 1788 */ 1789 uint32_t delta = EFX_EXT_PORT_NA; 1790 1791 for (i = 0; i < EFX_EXT_PORT_MAX; i++) { 1792 uint32_t base = mapp->base_port[i]; 1793 if ((base != EFX_EXT_PORT_NA) && (base <= port)) { 1794 if ((port - base) < delta) { 1795 delta = (port - base); 1796 ext_index = i; 1797 } 1798 } 1799 } 1800 } 1801 *external_portp = (uint8_t)(ext_index + 1); 1802 1803 return (0); 1804 1805 fail1: 1806 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1807 1808 return (rc); 1809 } 1810 1811 __checkReturn efx_rc_t 1812 efx_mcdi_nic_board_cfg( 1813 __in efx_nic_t *enp) 1814 { 1815 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 1816 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1817 ef10_link_state_t els; 1818 efx_port_t *epp = &(enp->en_port); 1819 uint32_t board_type = 0; 1820 uint32_t base, nvec; 1821 uint32_t port; 1822 uint32_t mask; 1823 uint32_t pf; 1824 uint32_t vf; 1825 uint8_t mac_addr[6] = { 0 }; 1826 efx_rc_t rc; 1827 1828 /* Get the (zero-based) MCDI port number */ 1829 if ((rc = efx_mcdi_get_port_assignment(enp, &port)) != 0) 1830 goto fail1; 1831 1832 /* EFX MCDI interface uses one-based port numbers */ 1833 emip->emi_port = port + 1; 1834 1835 encp->enc_assigned_port = port; 1836 1837 if ((rc = ef10_external_port_mapping(enp, port, 1838 &encp->enc_external_port)) != 0) 1839 goto fail2; 1840 1841 /* 1842 * Get PCIe function number from firmware (used for 1843 * per-function privilege and dynamic config info). 1844 * - PCIe PF: pf = PF number, vf = 0xffff. 1845 * - PCIe VF: pf = parent PF, vf = VF number. 1846 */ 1847 if ((rc = efx_mcdi_get_function_info(enp, &pf, &vf)) != 0) 1848 goto fail3; 1849 1850 encp->enc_pf = pf; 1851 encp->enc_vf = vf; 1852 1853 if ((rc = ef10_mcdi_get_pf_count(enp, &encp->enc_hw_pf_count)) != 0) 1854 goto fail4; 1855 1856 /* MAC address for this function */ 1857 if (EFX_PCI_FUNCTION_IS_PF(encp)) { 1858 rc = efx_mcdi_get_mac_address_pf(enp, mac_addr); 1859 #if EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 1860 /* 1861 * Disable static config checking, ONLY for manufacturing test 1862 * and setup at the factory, to allow the static config to be 1863 * installed. 1864 */ 1865 #else /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ 1866 if ((rc == 0) && (mac_addr[0] & 0x02)) { 1867 /* 1868 * If the static config does not include a global MAC 1869 * address pool then the board may return a locally 1870 * administered MAC address (this should only happen on 1871 * incorrectly programmed boards). 1872 */ 1873 rc = EINVAL; 1874 } 1875 #endif /* EFSYS_OPT_ALLOW_UNCONFIGURED_NIC */ 1876 } else { 1877 rc = efx_mcdi_get_mac_address_vf(enp, mac_addr); 1878 } 1879 if (rc != 0) 1880 goto fail5; 1881 1882 EFX_MAC_ADDR_COPY(encp->enc_mac_addr, mac_addr); 1883 1884 /* Board configuration (legacy) */ 1885 rc = efx_mcdi_get_board_cfg(enp, &board_type, NULL, NULL); 1886 if (rc != 0) { 1887 /* Unprivileged functions may not be able to read board cfg */ 1888 if (rc == EACCES) 1889 board_type = 0; 1890 else 1891 goto fail6; 1892 } 1893 1894 encp->enc_board_type = board_type; 1895 1896 /* Fill out fields in enp->en_port and enp->en_nic_cfg from MCDI */ 1897 if ((rc = efx_mcdi_get_phy_cfg(enp)) != 0) 1898 goto fail7; 1899 1900 /* 1901 * Firmware with support for *_FEC capability bits does not 1902 * report that the corresponding *_FEC_REQUESTED bits are supported. 1903 * Add them here so that drivers understand that they are supported. 1904 */ 1905 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_BASER_FEC)) 1906 epp->ep_phy_cap_mask |= 1907 (1u << EFX_PHY_CAP_BASER_FEC_REQUESTED); 1908 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_RS_FEC)) 1909 epp->ep_phy_cap_mask |= 1910 (1u << EFX_PHY_CAP_RS_FEC_REQUESTED); 1911 if (epp->ep_phy_cap_mask & (1u << EFX_PHY_CAP_25G_BASER_FEC)) 1912 epp->ep_phy_cap_mask |= 1913 (1u << EFX_PHY_CAP_25G_BASER_FEC_REQUESTED); 1914 1915 /* Obtain the default PHY advertised capabilities */ 1916 if ((rc = ef10_phy_get_link(enp, &els)) != 0) 1917 goto fail8; 1918 epp->ep_default_adv_cap_mask = els.epls.epls_adv_cap_mask; 1919 epp->ep_adv_cap_mask = els.epls.epls_adv_cap_mask; 1920 1921 /* Check capabilities of running datapath firmware */ 1922 if ((rc = ef10_get_datapath_caps(enp)) != 0) 1923 goto fail9; 1924 1925 /* Get interrupt vector limits */ 1926 if ((rc = efx_mcdi_get_vector_cfg(enp, &base, &nvec, NULL)) != 0) { 1927 if (EFX_PCI_FUNCTION_IS_PF(encp)) 1928 goto fail10; 1929 1930 /* Ignore error (cannot query vector limits from a VF). */ 1931 base = 0; 1932 nvec = 1024; 1933 } 1934 encp->enc_intr_vec_base = base; 1935 encp->enc_intr_limit = nvec; 1936 1937 /* 1938 * Get the current privilege mask. Note that this may be modified 1939 * dynamically, so this value is informational only. DO NOT use 1940 * the privilege mask to check for sufficient privileges, as that 1941 * can result in time-of-check/time-of-use bugs. 1942 */ 1943 if ((rc = ef10_get_privilege_mask(enp, &mask)) != 0) 1944 goto fail11; 1945 encp->enc_privilege_mask = mask; 1946 1947 return (0); 1948 1949 fail11: 1950 EFSYS_PROBE(fail11); 1951 fail10: 1952 EFSYS_PROBE(fail10); 1953 fail9: 1954 EFSYS_PROBE(fail9); 1955 fail8: 1956 EFSYS_PROBE(fail8); 1957 fail7: 1958 EFSYS_PROBE(fail7); 1959 fail6: 1960 EFSYS_PROBE(fail6); 1961 fail5: 1962 EFSYS_PROBE(fail5); 1963 fail4: 1964 EFSYS_PROBE(fail4); 1965 fail3: 1966 EFSYS_PROBE(fail3); 1967 fail2: 1968 EFSYS_PROBE(fail2); 1969 fail1: 1970 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1971 1972 return (rc); 1973 } 1974 1975 __checkReturn efx_rc_t 1976 efx_mcdi_entity_reset( 1977 __in efx_nic_t *enp) 1978 { 1979 efx_mcdi_req_t req; 1980 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_ENTITY_RESET_IN_LEN, 1981 MC_CMD_ENTITY_RESET_OUT_LEN); 1982 efx_rc_t rc; 1983 1984 req.emr_cmd = MC_CMD_ENTITY_RESET; 1985 req.emr_in_buf = payload; 1986 req.emr_in_length = MC_CMD_ENTITY_RESET_IN_LEN; 1987 req.emr_out_buf = payload; 1988 req.emr_out_length = MC_CMD_ENTITY_RESET_OUT_LEN; 1989 1990 MCDI_IN_POPULATE_DWORD_1(req, ENTITY_RESET_IN_FLAG, 1991 ENTITY_RESET_IN_FUNCTION_RESOURCE_RESET, 1); 1992 1993 efx_mcdi_execute(enp, &req); 1994 1995 if (req.emr_rc != 0) { 1996 rc = req.emr_rc; 1997 goto fail1; 1998 } 1999 2000 return (0); 2001 2002 fail1: 2003 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2004 2005 return (rc); 2006 } 2007 2008 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 2009 2010 #if EFX_OPTS_EF10() 2011 2012 static __checkReturn efx_rc_t 2013 ef10_set_workaround_bug26807( 2014 __in efx_nic_t *enp) 2015 { 2016 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2017 uint32_t flags; 2018 efx_rc_t rc; 2019 2020 /* 2021 * If the bug26807 workaround is enabled, then firmware has enabled 2022 * support for chained multicast filters. Firmware will reset (FLR) 2023 * functions which have filters in the hardware filter table when the 2024 * workaround is enabled/disabled. 2025 * 2026 * We must recheck if the workaround is enabled after inserting the 2027 * first hardware filter, in case it has been changed since this check. 2028 */ 2029 rc = efx_mcdi_set_workaround(enp, MC_CMD_WORKAROUND_BUG26807, 2030 B_TRUE, &flags); 2031 if (rc == 0) { 2032 encp->enc_bug26807_workaround = B_TRUE; 2033 if (flags & (1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN)) { 2034 /* 2035 * Other functions had installed filters before the 2036 * workaround was enabled, and they have been reset 2037 * by firmware. 2038 */ 2039 EFSYS_PROBE(bug26807_workaround_flr_done); 2040 /* FIXME: bump MC warm boot count ? */ 2041 } 2042 } else if (rc == EACCES) { 2043 /* 2044 * Unprivileged functions cannot enable the workaround in older 2045 * firmware. 2046 */ 2047 encp->enc_bug26807_workaround = B_FALSE; 2048 } else if ((rc == ENOTSUP) || (rc == ENOENT)) { 2049 encp->enc_bug26807_workaround = B_FALSE; 2050 } else { 2051 goto fail1; 2052 } 2053 2054 return (0); 2055 2056 fail1: 2057 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2058 2059 return (rc); 2060 } 2061 2062 static __checkReturn efx_rc_t 2063 ef10_nic_board_cfg( 2064 __in efx_nic_t *enp) 2065 { 2066 const efx_nic_ops_t *enop = enp->en_enop; 2067 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2068 efx_rc_t rc; 2069 2070 if ((rc = efx_mcdi_nic_board_cfg(enp)) != 0) 2071 goto fail1; 2072 2073 /* 2074 * Huntington RXDP firmware inserts a 0 or 14 byte prefix. 2075 * We only support the 14 byte prefix here. 2076 */ 2077 if (encp->enc_rx_prefix_size != 14) { 2078 rc = ENOTSUP; 2079 goto fail2; 2080 } 2081 2082 encp->enc_clk_mult = 1; /* not used for EF10 */ 2083 2084 /* Alignment for WPTR updates */ 2085 encp->enc_rx_push_align = EF10_RX_WPTR_ALIGN; 2086 2087 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_DZ_RX_KER_BYTE_CNT); 2088 /* No boundary crossing limits */ 2089 encp->enc_tx_dma_desc_boundary = 0; 2090 2091 /* 2092 * Maximum number of bytes into the frame the TCP header can start for 2093 * firmware assisted TSO to work. 2094 */ 2095 encp->enc_tx_tso_tcp_header_offset_limit = EF10_TCP_HEADER_OFFSET_LIMIT; 2096 2097 /* EF10 TSO engine demands that packet header be contiguous. */ 2098 encp->enc_tx_tso_max_header_ndescs = 1; 2099 2100 /* The overall TSO header length is not limited. */ 2101 encp->enc_tx_tso_max_header_length = UINT32_MAX; 2102 2103 /* 2104 * There are no specific limitations on the number of 2105 * TSO payload descriptors. 2106 */ 2107 encp->enc_tx_tso_max_payload_ndescs = UINT32_MAX; 2108 2109 /* TSO superframe payload length is not limited. */ 2110 encp->enc_tx_tso_max_payload_length = UINT32_MAX; 2111 2112 /* 2113 * Limitation on the maximum number of outgoing packets per 2114 * TSO transaction described in SF-108452-SW. 2115 */ 2116 encp->enc_tx_tso_max_nframes = 32767; 2117 2118 /* 2119 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use 2120 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available 2121 * resources (allocated to this PCIe function), which is zero until 2122 * after we have allocated VIs. 2123 */ 2124 encp->enc_evq_limit = 1024; 2125 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; 2126 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; 2127 2128 encp->enc_buftbl_limit = UINT32_MAX; 2129 2130 if ((rc = ef10_set_workaround_bug26807(enp)) != 0) 2131 goto fail3; 2132 2133 /* Get remaining controller-specific board config */ 2134 if ((rc = enop->eno_board_cfg(enp)) != 0) 2135 if (rc != EACCES) 2136 goto fail4; 2137 2138 return (0); 2139 2140 fail4: 2141 EFSYS_PROBE(fail4); 2142 fail3: 2143 EFSYS_PROBE(fail3); 2144 fail2: 2145 EFSYS_PROBE(fail2); 2146 fail1: 2147 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2148 2149 return (rc); 2150 } 2151 2152 __checkReturn efx_rc_t 2153 ef10_nic_probe( 2154 __in efx_nic_t *enp) 2155 { 2156 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2157 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 2158 efx_rc_t rc; 2159 2160 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 2161 2162 /* Read and clear any assertion state */ 2163 if ((rc = efx_mcdi_read_assertion(enp)) != 0) 2164 goto fail1; 2165 2166 /* Exit the assertion handler */ 2167 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) 2168 if (rc != EACCES) 2169 goto fail2; 2170 2171 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) 2172 goto fail3; 2173 2174 if ((rc = ef10_nic_board_cfg(enp)) != 0) 2175 goto fail4; 2176 2177 /* 2178 * Set default driver config limits (based on board config). 2179 * 2180 * FIXME: For now allocate a fixed number of VIs which is likely to be 2181 * sufficient and small enough to allow multiple functions on the same 2182 * port. 2183 */ 2184 edcp->edc_min_vi_count = edcp->edc_max_vi_count = 2185 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit)); 2186 2187 /* The client driver must configure and enable PIO buffer support */ 2188 edcp->edc_max_piobuf_count = 0; 2189 edcp->edc_pio_alloc_size = 0; 2190 2191 #if EFSYS_OPT_MAC_STATS 2192 /* Wipe the MAC statistics */ 2193 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) 2194 goto fail5; 2195 #endif 2196 2197 #if EFSYS_OPT_LOOPBACK 2198 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) 2199 goto fail6; 2200 #endif 2201 2202 #if EFSYS_OPT_MON_STATS 2203 if ((rc = mcdi_mon_cfg_build(enp)) != 0) { 2204 /* Unprivileged functions do not have access to sensors */ 2205 if (rc != EACCES) 2206 goto fail7; 2207 } 2208 #endif 2209 2210 return (0); 2211 2212 #if EFSYS_OPT_MON_STATS 2213 fail7: 2214 EFSYS_PROBE(fail7); 2215 #endif 2216 #if EFSYS_OPT_LOOPBACK 2217 fail6: 2218 EFSYS_PROBE(fail6); 2219 #endif 2220 #if EFSYS_OPT_MAC_STATS 2221 fail5: 2222 EFSYS_PROBE(fail5); 2223 #endif 2224 fail4: 2225 EFSYS_PROBE(fail4); 2226 fail3: 2227 EFSYS_PROBE(fail3); 2228 fail2: 2229 EFSYS_PROBE(fail2); 2230 fail1: 2231 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2232 2233 return (rc); 2234 } 2235 2236 __checkReturn efx_rc_t 2237 ef10_nic_set_drv_limits( 2238 __inout efx_nic_t *enp, 2239 __in efx_drv_limits_t *edlp) 2240 { 2241 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2242 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 2243 uint32_t min_evq_count, max_evq_count; 2244 uint32_t min_rxq_count, max_rxq_count; 2245 uint32_t min_txq_count, max_txq_count; 2246 efx_rc_t rc; 2247 2248 if (edlp == NULL) { 2249 rc = EINVAL; 2250 goto fail1; 2251 } 2252 2253 /* Get minimum required and maximum usable VI limits */ 2254 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit); 2255 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit); 2256 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit); 2257 2258 edcp->edc_min_vi_count = 2259 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count)); 2260 2261 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit); 2262 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit); 2263 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit); 2264 2265 edcp->edc_max_vi_count = 2266 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count)); 2267 2268 /* 2269 * Check limits for sub-allocated piobuf blocks. 2270 * PIO is optional, so don't fail if the limits are incorrect. 2271 */ 2272 if ((encp->enc_piobuf_size == 0) || 2273 (encp->enc_piobuf_limit == 0) || 2274 (edlp->edl_min_pio_alloc_size == 0) || 2275 (edlp->edl_min_pio_alloc_size > encp->enc_piobuf_size)) { 2276 /* Disable PIO */ 2277 edcp->edc_max_piobuf_count = 0; 2278 edcp->edc_pio_alloc_size = 0; 2279 } else { 2280 uint32_t blk_size, blk_count, blks_per_piobuf; 2281 2282 blk_size = 2283 MAX(edlp->edl_min_pio_alloc_size, 2284 encp->enc_piobuf_min_alloc_size); 2285 2286 blks_per_piobuf = encp->enc_piobuf_size / blk_size; 2287 EFSYS_ASSERT3U(blks_per_piobuf, <=, 32); 2288 2289 blk_count = (encp->enc_piobuf_limit * blks_per_piobuf); 2290 2291 /* A zero max pio alloc count means unlimited */ 2292 if ((edlp->edl_max_pio_alloc_count > 0) && 2293 (edlp->edl_max_pio_alloc_count < blk_count)) { 2294 blk_count = edlp->edl_max_pio_alloc_count; 2295 } 2296 2297 edcp->edc_pio_alloc_size = blk_size; 2298 edcp->edc_max_piobuf_count = 2299 (blk_count + (blks_per_piobuf - 1)) / blks_per_piobuf; 2300 } 2301 2302 return (0); 2303 2304 fail1: 2305 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2306 2307 return (rc); 2308 } 2309 2310 2311 __checkReturn efx_rc_t 2312 ef10_nic_reset( 2313 __in efx_nic_t *enp) 2314 { 2315 efx_rc_t rc; 2316 2317 /* ef10_nic_reset() is called to recover from BADASSERT failures. */ 2318 if ((rc = efx_mcdi_read_assertion(enp)) != 0) 2319 goto fail1; 2320 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) 2321 goto fail2; 2322 2323 if ((rc = efx_mcdi_entity_reset(enp)) != 0) 2324 goto fail3; 2325 2326 /* Clear RX/TX DMA queue errors */ 2327 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR); 2328 2329 return (0); 2330 2331 fail3: 2332 EFSYS_PROBE(fail3); 2333 fail2: 2334 EFSYS_PROBE(fail2); 2335 fail1: 2336 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2337 2338 return (rc); 2339 } 2340 2341 #endif /* EFX_OPTS_EF10() */ 2342 2343 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2344 2345 __checkReturn efx_rc_t 2346 ef10_upstream_port_vadaptor_alloc( 2347 __in efx_nic_t *enp) 2348 { 2349 uint32_t retry; 2350 uint32_t delay_us; 2351 efx_rc_t rc; 2352 2353 /* 2354 * On a VF, this may fail with MC_CMD_ERR_NO_EVB_PORT (ENOENT) if the PF 2355 * driver has yet to bring up the EVB port. See bug 56147. In this case, 2356 * retry the request several times after waiting a while. The wait time 2357 * between retries starts small (10ms) and exponentially increases. 2358 * Total wait time is a little over two seconds. Retry logic in the 2359 * client driver may mean this whole loop is repeated if it continues to 2360 * fail. 2361 */ 2362 retry = 0; 2363 delay_us = 10000; 2364 while ((rc = efx_mcdi_vadaptor_alloc(enp, EVB_PORT_ID_ASSIGNED)) != 0) { 2365 if (EFX_PCI_FUNCTION_IS_PF(&enp->en_nic_cfg) || 2366 (rc != ENOENT)) { 2367 /* 2368 * Do not retry alloc for PF, or for other errors on 2369 * a VF. 2370 */ 2371 goto fail1; 2372 } 2373 2374 /* VF startup before PF is ready. Retry allocation. */ 2375 if (retry > 5) { 2376 /* Too many attempts */ 2377 rc = EINVAL; 2378 goto fail2; 2379 } 2380 EFSYS_PROBE1(mcdi_no_evb_port_retry, int, retry); 2381 EFSYS_SLEEP(delay_us); 2382 retry++; 2383 if (delay_us < 500000) 2384 delay_us <<= 2; 2385 } 2386 2387 return (0); 2388 2389 fail2: 2390 EFSYS_PROBE(fail2); 2391 fail1: 2392 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2393 2394 return (rc); 2395 } 2396 2397 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 2398 2399 #if EFX_OPTS_EF10() 2400 2401 __checkReturn efx_rc_t 2402 ef10_nic_init( 2403 __in efx_nic_t *enp) 2404 { 2405 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 2406 uint32_t min_vi_count, max_vi_count; 2407 uint32_t vi_count, vi_base, vi_shift; 2408 uint32_t i; 2409 uint32_t vi_window_size; 2410 efx_rc_t rc; 2411 boolean_t alloc_vadaptor = B_TRUE; 2412 2413 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 2414 2415 /* Enable reporting of some events (e.g. link change) */ 2416 if ((rc = efx_mcdi_log_ctrl(enp)) != 0) 2417 goto fail1; 2418 2419 /* Allocate (optional) on-chip PIO buffers */ 2420 ef10_nic_alloc_piobufs(enp, edcp->edc_max_piobuf_count); 2421 2422 /* 2423 * For best performance, PIO writes should use a write-combined 2424 * (WC) memory mapping. Using a separate WC mapping for the PIO 2425 * aperture of each VI would be a burden to drivers (and not 2426 * possible if the host page size is >4Kbyte). 2427 * 2428 * To avoid this we use a single uncached (UC) mapping for VI 2429 * register access, and a single WC mapping for extra VIs used 2430 * for PIO writes. 2431 * 2432 * Each piobuf must be linked to a VI in the WC mapping, and to 2433 * each VI that is using a sub-allocated block from the piobuf. 2434 */ 2435 min_vi_count = edcp->edc_min_vi_count; 2436 max_vi_count = 2437 edcp->edc_max_vi_count + enp->en_arch.ef10.ena_piobuf_count; 2438 2439 /* Ensure that the previously attached driver's VIs are freed */ 2440 if ((rc = efx_mcdi_free_vis(enp)) != 0) 2441 goto fail2; 2442 2443 /* 2444 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this 2445 * fails then retrying the request for fewer VI resources may succeed. 2446 */ 2447 vi_count = 0; 2448 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count, 2449 &vi_base, &vi_count, &vi_shift)) != 0) 2450 goto fail3; 2451 2452 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count); 2453 2454 if (vi_count < min_vi_count) { 2455 rc = ENOMEM; 2456 goto fail4; 2457 } 2458 2459 enp->en_arch.ef10.ena_vi_base = vi_base; 2460 enp->en_arch.ef10.ena_vi_count = vi_count; 2461 enp->en_arch.ef10.ena_vi_shift = vi_shift; 2462 2463 if (vi_count < min_vi_count + enp->en_arch.ef10.ena_piobuf_count) { 2464 /* Not enough extra VIs to map piobufs */ 2465 ef10_nic_free_piobufs(enp); 2466 } 2467 2468 enp->en_arch.ef10.ena_pio_write_vi_base = 2469 vi_count - enp->en_arch.ef10.ena_piobuf_count; 2470 2471 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=, 2472 EFX_VI_WINDOW_SHIFT_INVALID); 2473 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=, 2474 EFX_VI_WINDOW_SHIFT_64K); 2475 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift; 2476 2477 /* Save UC memory mapping details */ 2478 enp->en_arch.ef10.ena_uc_mem_map_offset = 0; 2479 if (enp->en_arch.ef10.ena_piobuf_count > 0) { 2480 enp->en_arch.ef10.ena_uc_mem_map_size = 2481 (vi_window_size * 2482 enp->en_arch.ef10.ena_pio_write_vi_base); 2483 } else { 2484 enp->en_arch.ef10.ena_uc_mem_map_size = 2485 (vi_window_size * 2486 enp->en_arch.ef10.ena_vi_count); 2487 } 2488 2489 /* Save WC memory mapping details */ 2490 enp->en_arch.ef10.ena_wc_mem_map_offset = 2491 enp->en_arch.ef10.ena_uc_mem_map_offset + 2492 enp->en_arch.ef10.ena_uc_mem_map_size; 2493 2494 enp->en_arch.ef10.ena_wc_mem_map_size = 2495 (vi_window_size * 2496 enp->en_arch.ef10.ena_piobuf_count); 2497 2498 /* Link piobufs to extra VIs in WC mapping */ 2499 if (enp->en_arch.ef10.ena_piobuf_count > 0) { 2500 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { 2501 rc = efx_mcdi_link_piobuf(enp, 2502 enp->en_arch.ef10.ena_pio_write_vi_base + i, 2503 enp->en_arch.ef10.ena_piobuf_handle[i]); 2504 if (rc != 0) 2505 break; 2506 } 2507 } 2508 2509 /* 2510 * For SR-IOV use case, vAdaptor is allocated for PF and associated VFs 2511 * during NIC initialization when vSwitch is created and vports are 2512 * allocated. Hence, skip vAdaptor allocation for EVB and update vport 2513 * id in NIC structure with the one allocated for PF. 2514 */ 2515 2516 enp->en_vport_id = EVB_PORT_ID_ASSIGNED; 2517 #if EFSYS_OPT_EVB 2518 if ((enp->en_vswitchp != NULL) && (enp->en_vswitchp->ev_evcp != NULL)) { 2519 /* For EVB use vport allocated on vswitch */ 2520 enp->en_vport_id = enp->en_vswitchp->ev_evcp->evc_vport_id; 2521 alloc_vadaptor = B_FALSE; 2522 } 2523 #endif 2524 if (alloc_vadaptor != B_FALSE) { 2525 /* Allocate a vAdaptor attached to our upstream vPort/pPort */ 2526 if ((rc = ef10_upstream_port_vadaptor_alloc(enp)) != 0) 2527 goto fail5; 2528 } 2529 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2; 2530 2531 return (0); 2532 2533 fail5: 2534 EFSYS_PROBE(fail5); 2535 fail4: 2536 EFSYS_PROBE(fail4); 2537 fail3: 2538 EFSYS_PROBE(fail3); 2539 fail2: 2540 EFSYS_PROBE(fail2); 2541 2542 ef10_nic_free_piobufs(enp); 2543 2544 fail1: 2545 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2546 2547 return (rc); 2548 } 2549 2550 __checkReturn efx_rc_t 2551 ef10_nic_get_vi_pool( 2552 __in efx_nic_t *enp, 2553 __out uint32_t *vi_countp) 2554 { 2555 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 2556 2557 /* 2558 * Report VIs that the client driver can use. 2559 * Do not include VIs used for PIO buffer writes. 2560 */ 2561 *vi_countp = enp->en_arch.ef10.ena_pio_write_vi_base; 2562 2563 return (0); 2564 } 2565 2566 __checkReturn efx_rc_t 2567 ef10_nic_get_bar_region( 2568 __in efx_nic_t *enp, 2569 __in efx_nic_region_t region, 2570 __out uint32_t *offsetp, 2571 __out size_t *sizep) 2572 { 2573 efx_rc_t rc; 2574 2575 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 2576 2577 /* 2578 * TODO: Specify host memory mapping alignment and granularity 2579 * in efx_drv_limits_t so that they can be taken into account 2580 * when allocating extra VIs for PIO writes. 2581 */ 2582 switch (region) { 2583 case EFX_REGION_VI: 2584 /* UC mapped memory BAR region for VI registers */ 2585 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset; 2586 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size; 2587 break; 2588 2589 case EFX_REGION_PIO_WRITE_VI: 2590 /* WC mapped memory BAR region for piobuf writes */ 2591 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset; 2592 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size; 2593 break; 2594 2595 default: 2596 rc = EINVAL; 2597 goto fail1; 2598 } 2599 2600 return (0); 2601 2602 fail1: 2603 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2604 2605 return (rc); 2606 } 2607 2608 __checkReturn boolean_t 2609 ef10_nic_hw_unavailable( 2610 __in efx_nic_t *enp) 2611 { 2612 efx_dword_t dword; 2613 2614 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL) 2615 return (B_TRUE); 2616 2617 EFX_BAR_READD(enp, ER_DZ_BIU_MC_SFT_STATUS_REG, &dword, B_FALSE); 2618 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff) 2619 goto unavail; 2620 2621 return (B_FALSE); 2622 2623 unavail: 2624 ef10_nic_set_hw_unavailable(enp); 2625 2626 return (B_TRUE); 2627 } 2628 2629 void 2630 ef10_nic_set_hw_unavailable( 2631 __in efx_nic_t *enp) 2632 { 2633 EFSYS_PROBE(hw_unavail); 2634 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL; 2635 } 2636 2637 2638 void 2639 ef10_nic_fini( 2640 __in efx_nic_t *enp) 2641 { 2642 uint32_t i; 2643 efx_rc_t rc; 2644 boolean_t do_vadaptor_free = B_TRUE; 2645 2646 #if EFSYS_OPT_EVB 2647 if (enp->en_vswitchp != NULL) { 2648 /* 2649 * For SR-IOV the vAdaptor is freed with the vswitch, 2650 * so do not free it here. 2651 */ 2652 do_vadaptor_free = B_FALSE; 2653 } 2654 #endif 2655 if (do_vadaptor_free != B_FALSE) { 2656 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id); 2657 enp->en_vport_id = EVB_PORT_ID_NULL; 2658 } 2659 2660 /* Unlink piobufs from extra VIs in WC mapping */ 2661 if (enp->en_arch.ef10.ena_piobuf_count > 0) { 2662 for (i = 0; i < enp->en_arch.ef10.ena_piobuf_count; i++) { 2663 rc = efx_mcdi_unlink_piobuf(enp, 2664 enp->en_arch.ef10.ena_pio_write_vi_base + i); 2665 if (rc != 0) 2666 break; 2667 } 2668 } 2669 2670 ef10_nic_free_piobufs(enp); 2671 2672 (void) efx_mcdi_free_vis(enp); 2673 enp->en_arch.ef10.ena_vi_count = 0; 2674 } 2675 2676 void 2677 ef10_nic_unprobe( 2678 __in efx_nic_t *enp) 2679 { 2680 #if EFSYS_OPT_MON_STATS 2681 mcdi_mon_cfg_free(enp); 2682 #endif /* EFSYS_OPT_MON_STATS */ 2683 (void) efx_mcdi_drv_attach(enp, B_FALSE); 2684 } 2685 2686 #if EFSYS_OPT_DIAG 2687 2688 __checkReturn efx_rc_t 2689 ef10_nic_register_test( 2690 __in efx_nic_t *enp) 2691 { 2692 efx_rc_t rc; 2693 2694 /* FIXME */ 2695 _NOTE(ARGUNUSED(enp)) 2696 _NOTE(CONSTANTCONDITION) 2697 if (B_FALSE) { 2698 rc = ENOTSUP; 2699 goto fail1; 2700 } 2701 /* FIXME */ 2702 2703 return (0); 2704 2705 fail1: 2706 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2707 2708 return (rc); 2709 } 2710 2711 #endif /* EFSYS_OPT_DIAG */ 2712 2713 #if EFSYS_OPT_FW_SUBVARIANT_AWARE 2714 2715 __checkReturn efx_rc_t 2716 efx_mcdi_get_nic_global( 2717 __in efx_nic_t *enp, 2718 __in uint32_t key, 2719 __out uint32_t *valuep) 2720 { 2721 efx_mcdi_req_t req; 2722 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_NIC_GLOBAL_IN_LEN, 2723 MC_CMD_GET_NIC_GLOBAL_OUT_LEN); 2724 efx_rc_t rc; 2725 2726 req.emr_cmd = MC_CMD_GET_NIC_GLOBAL; 2727 req.emr_in_buf = payload; 2728 req.emr_in_length = MC_CMD_GET_NIC_GLOBAL_IN_LEN; 2729 req.emr_out_buf = payload; 2730 req.emr_out_length = MC_CMD_GET_NIC_GLOBAL_OUT_LEN; 2731 2732 MCDI_IN_SET_DWORD(req, GET_NIC_GLOBAL_IN_KEY, key); 2733 2734 efx_mcdi_execute(enp, &req); 2735 2736 if (req.emr_rc != 0) { 2737 rc = req.emr_rc; 2738 goto fail1; 2739 } 2740 2741 if (req.emr_out_length_used != MC_CMD_GET_NIC_GLOBAL_OUT_LEN) { 2742 rc = EMSGSIZE; 2743 goto fail2; 2744 } 2745 2746 *valuep = MCDI_OUT_DWORD(req, GET_NIC_GLOBAL_OUT_VALUE); 2747 2748 return (0); 2749 2750 fail2: 2751 EFSYS_PROBE(fail2); 2752 fail1: 2753 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2754 2755 return (rc); 2756 } 2757 2758 __checkReturn efx_rc_t 2759 efx_mcdi_set_nic_global( 2760 __in efx_nic_t *enp, 2761 __in uint32_t key, 2762 __in uint32_t value) 2763 { 2764 efx_mcdi_req_t req; 2765 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_NIC_GLOBAL_IN_LEN, 0); 2766 efx_rc_t rc; 2767 2768 req.emr_cmd = MC_CMD_SET_NIC_GLOBAL; 2769 req.emr_in_buf = payload; 2770 req.emr_in_length = MC_CMD_SET_NIC_GLOBAL_IN_LEN; 2771 req.emr_out_buf = NULL; 2772 req.emr_out_length = 0; 2773 2774 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_KEY, key); 2775 MCDI_IN_SET_DWORD(req, SET_NIC_GLOBAL_IN_VALUE, value); 2776 2777 efx_mcdi_execute(enp, &req); 2778 2779 if (req.emr_rc != 0) { 2780 rc = req.emr_rc; 2781 goto fail1; 2782 } 2783 2784 return (0); 2785 2786 fail1: 2787 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2788 2789 return (rc); 2790 } 2791 2792 #endif /* EFSYS_OPT_FW_SUBVARIANT_AWARE */ 2793 2794 #endif /* EFX_OPTS_EF10() */ 2795