1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2008-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 10 #if EFSYS_OPT_MCDI 11 12 /* 13 * There are three versions of the MCDI interface: 14 * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers. 15 * - MCDIv1: Siena firmware and Huntington BootROM. 16 * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM. 17 * Transport uses MCDIv2 headers. 18 * 19 * MCDIv2 Header NOT_EPOCH flag 20 * ---------------------------- 21 * A new epoch begins at initial startup or after an MC reboot, and defines when 22 * the MC should reject stale MCDI requests. 23 * 24 * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all 25 * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1. 26 * 27 * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a 28 * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0. 29 */ 30 31 32 33 #if EFSYS_OPT_SIENA 34 35 static const efx_mcdi_ops_t __efx_mcdi_siena_ops = { 36 siena_mcdi_init, /* emco_init */ 37 siena_mcdi_send_request, /* emco_send_request */ 38 siena_mcdi_poll_reboot, /* emco_poll_reboot */ 39 siena_mcdi_poll_response, /* emco_poll_response */ 40 siena_mcdi_read_response, /* emco_read_response */ 41 siena_mcdi_fini, /* emco_fini */ 42 siena_mcdi_feature_supported, /* emco_feature_supported */ 43 siena_mcdi_get_timeout, /* emco_get_timeout */ 44 }; 45 46 #endif /* EFSYS_OPT_SIENA */ 47 48 #if EFX_OPTS_EF10() 49 50 static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { 51 ef10_mcdi_init, /* emco_init */ 52 ef10_mcdi_send_request, /* emco_send_request */ 53 ef10_mcdi_poll_reboot, /* emco_poll_reboot */ 54 ef10_mcdi_poll_response, /* emco_poll_response */ 55 ef10_mcdi_read_response, /* emco_read_response */ 56 ef10_mcdi_fini, /* emco_fini */ 57 ef10_mcdi_feature_supported, /* emco_feature_supported */ 58 ef10_mcdi_get_timeout, /* emco_get_timeout */ 59 }; 60 61 #endif /* EFX_OPTS_EF10() */ 62 63 #if EFSYS_OPT_RIVERHEAD 64 65 static const efx_mcdi_ops_t __efx_mcdi_rhead_ops = { 66 ef10_mcdi_init, /* emco_init */ 67 ef10_mcdi_send_request, /* emco_send_request */ 68 ef10_mcdi_poll_reboot, /* emco_poll_reboot */ 69 ef10_mcdi_poll_response, /* emco_poll_response */ 70 ef10_mcdi_read_response, /* emco_read_response */ 71 ef10_mcdi_fini, /* emco_fini */ 72 ef10_mcdi_feature_supported, /* emco_feature_supported */ 73 ef10_mcdi_get_timeout, /* emco_get_timeout */ 74 }; 75 76 #endif /* EFSYS_OPT_RIVERHEAD */ 77 78 79 80 __checkReturn efx_rc_t 81 efx_mcdi_init( 82 __in efx_nic_t *enp, 83 __in const efx_mcdi_transport_t *emtp) 84 { 85 const efx_mcdi_ops_t *emcop; 86 efx_rc_t rc; 87 88 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 89 EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); 90 91 switch (enp->en_family) { 92 #if EFSYS_OPT_SIENA 93 case EFX_FAMILY_SIENA: 94 emcop = &__efx_mcdi_siena_ops; 95 break; 96 #endif /* EFSYS_OPT_SIENA */ 97 98 #if EFSYS_OPT_HUNTINGTON 99 case EFX_FAMILY_HUNTINGTON: 100 emcop = &__efx_mcdi_ef10_ops; 101 break; 102 #endif /* EFSYS_OPT_HUNTINGTON */ 103 104 #if EFSYS_OPT_MEDFORD 105 case EFX_FAMILY_MEDFORD: 106 emcop = &__efx_mcdi_ef10_ops; 107 break; 108 #endif /* EFSYS_OPT_MEDFORD */ 109 110 #if EFSYS_OPT_MEDFORD2 111 case EFX_FAMILY_MEDFORD2: 112 emcop = &__efx_mcdi_ef10_ops; 113 break; 114 #endif /* EFSYS_OPT_MEDFORD2 */ 115 116 #if EFSYS_OPT_RIVERHEAD 117 case EFX_FAMILY_RIVERHEAD: 118 emcop = &__efx_mcdi_rhead_ops; 119 break; 120 #endif /* EFSYS_OPT_RIVERHEAD */ 121 122 default: 123 EFSYS_ASSERT(0); 124 rc = ENOTSUP; 125 goto fail1; 126 } 127 128 if (enp->en_features & EFX_FEATURE_MCDI_DMA) { 129 /* MCDI requires a DMA buffer in host memory */ 130 if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) { 131 rc = EINVAL; 132 goto fail2; 133 } 134 } 135 enp->en_mcdi.em_emtp = emtp; 136 137 if (emcop != NULL && emcop->emco_init != NULL) { 138 if ((rc = emcop->emco_init(enp, emtp)) != 0) 139 goto fail3; 140 } 141 142 enp->en_mcdi.em_emcop = emcop; 143 enp->en_mod_flags |= EFX_MOD_MCDI; 144 145 return (0); 146 147 fail3: 148 EFSYS_PROBE(fail3); 149 fail2: 150 EFSYS_PROBE(fail2); 151 fail1: 152 EFSYS_PROBE1(fail1, efx_rc_t, rc); 153 154 enp->en_mcdi.em_emcop = NULL; 155 enp->en_mcdi.em_emtp = NULL; 156 enp->en_mod_flags &= ~EFX_MOD_MCDI; 157 158 return (rc); 159 } 160 161 void 162 efx_mcdi_fini( 163 __in efx_nic_t *enp) 164 { 165 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 166 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 167 168 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 169 EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI); 170 171 if (emcop != NULL && emcop->emco_fini != NULL) 172 emcop->emco_fini(enp); 173 174 emip->emi_port = 0; 175 emip->emi_aborted = 0; 176 177 enp->en_mcdi.em_emcop = NULL; 178 enp->en_mod_flags &= ~EFX_MOD_MCDI; 179 } 180 181 void 182 efx_mcdi_new_epoch( 183 __in efx_nic_t *enp) 184 { 185 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 186 efsys_lock_state_t state; 187 188 /* Start a new epoch (allow fresh MCDI requests to succeed) */ 189 EFSYS_LOCK(enp->en_eslp, state); 190 emip->emi_new_epoch = B_TRUE; 191 EFSYS_UNLOCK(enp->en_eslp, state); 192 } 193 194 static void 195 efx_mcdi_send_request( 196 __in efx_nic_t *enp, 197 __in void *hdrp, 198 __in size_t hdr_len, 199 __in void *sdup, 200 __in size_t sdu_len) 201 { 202 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 203 204 emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len); 205 } 206 207 static efx_rc_t 208 efx_mcdi_poll_reboot( 209 __in efx_nic_t *enp) 210 { 211 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 212 efx_rc_t rc; 213 214 rc = emcop->emco_poll_reboot(enp); 215 return (rc); 216 } 217 218 static boolean_t 219 efx_mcdi_poll_response( 220 __in efx_nic_t *enp) 221 { 222 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 223 boolean_t available; 224 225 available = emcop->emco_poll_response(enp); 226 return (available); 227 } 228 229 static void 230 efx_mcdi_read_response( 231 __in efx_nic_t *enp, 232 __out void *bufferp, 233 __in size_t offset, 234 __in size_t length) 235 { 236 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 237 238 emcop->emco_read_response(enp, bufferp, offset, length); 239 } 240 241 void 242 efx_mcdi_request_start( 243 __in efx_nic_t *enp, 244 __in efx_mcdi_req_t *emrp, 245 __in boolean_t ev_cpl) 246 { 247 #if EFSYS_OPT_MCDI_LOGGING 248 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 249 #endif 250 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 251 efx_dword_t hdr[2]; 252 size_t hdr_len; 253 unsigned int max_version; 254 unsigned int seq; 255 unsigned int xflags; 256 boolean_t new_epoch; 257 efsys_lock_state_t state; 258 259 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 260 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 261 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 262 263 /* 264 * efx_mcdi_request_start() is naturally serialised against both 265 * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(), 266 * by virtue of there only being one outstanding MCDI request. 267 * Unfortunately, upper layers may also call efx_mcdi_request_abort() 268 * at any time, to timeout a pending mcdi request, That request may 269 * then subsequently complete, meaning efx_mcdi_ev_cpl() or 270 * efx_mcdi_ev_death() may end up running in parallel with 271 * efx_mcdi_request_start(). This race is handled by ensuring that 272 * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the 273 * en_eslp lock. 274 */ 275 EFSYS_LOCK(enp->en_eslp, state); 276 EFSYS_ASSERT(emip->emi_pending_req == NULL); 277 emip->emi_pending_req = emrp; 278 emip->emi_ev_cpl = ev_cpl; 279 emip->emi_poll_cnt = 0; 280 seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ); 281 new_epoch = emip->emi_new_epoch; 282 max_version = emip->emi_max_version; 283 EFSYS_UNLOCK(enp->en_eslp, state); 284 285 xflags = 0; 286 if (ev_cpl) 287 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 288 289 /* 290 * Huntington firmware supports MCDIv2, but the Huntington BootROM only 291 * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where 292 * possible to support this. 293 */ 294 if ((max_version >= 2) && 295 ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) || 296 (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) || 297 (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) { 298 /* Construct MCDI v2 header */ 299 hdr_len = sizeof (hdr); 300 EFX_POPULATE_DWORD_8(hdr[0], 301 MCDI_HEADER_CODE, MC_CMD_V2_EXTN, 302 MCDI_HEADER_RESYNC, 1, 303 MCDI_HEADER_DATALEN, 0, 304 MCDI_HEADER_SEQ, seq, 305 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, 306 MCDI_HEADER_ERROR, 0, 307 MCDI_HEADER_RESPONSE, 0, 308 MCDI_HEADER_XFLAGS, xflags); 309 310 EFX_POPULATE_DWORD_2(hdr[1], 311 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd, 312 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length); 313 } else { 314 /* Construct MCDI v1 header */ 315 hdr_len = sizeof (hdr[0]); 316 EFX_POPULATE_DWORD_8(hdr[0], 317 MCDI_HEADER_CODE, emrp->emr_cmd, 318 MCDI_HEADER_RESYNC, 1, 319 MCDI_HEADER_DATALEN, emrp->emr_in_length, 320 MCDI_HEADER_SEQ, seq, 321 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, 322 MCDI_HEADER_ERROR, 0, 323 MCDI_HEADER_RESPONSE, 0, 324 MCDI_HEADER_XFLAGS, xflags); 325 } 326 327 #if EFSYS_OPT_MCDI_LOGGING 328 if (emtp->emt_logger != NULL) { 329 emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST, 330 &hdr[0], hdr_len, 331 emrp->emr_in_buf, emrp->emr_in_length); 332 } 333 #endif /* EFSYS_OPT_MCDI_LOGGING */ 334 335 efx_mcdi_send_request(enp, &hdr[0], hdr_len, 336 emrp->emr_in_buf, emrp->emr_in_length); 337 } 338 339 340 static void 341 efx_mcdi_read_response_header( 342 __in efx_nic_t *enp, 343 __inout efx_mcdi_req_t *emrp) 344 { 345 #if EFSYS_OPT_MCDI_LOGGING 346 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 347 #endif /* EFSYS_OPT_MCDI_LOGGING */ 348 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 349 efx_dword_t hdr[2]; 350 unsigned int hdr_len; 351 unsigned int data_len; 352 unsigned int seq; 353 unsigned int cmd; 354 unsigned int error; 355 efx_rc_t rc; 356 357 EFSYS_ASSERT(emrp != NULL); 358 359 efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0])); 360 hdr_len = sizeof (hdr[0]); 361 362 cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE); 363 seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ); 364 error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR); 365 366 if (cmd != MC_CMD_V2_EXTN) { 367 data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN); 368 } else { 369 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); 370 hdr_len += sizeof (hdr[1]); 371 372 cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); 373 data_len = 374 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 375 } 376 377 if (error && (data_len == 0)) { 378 /* The MC has rebooted since the request was sent. */ 379 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); 380 efx_mcdi_poll_reboot(enp); 381 rc = EIO; 382 goto fail1; 383 } 384 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 385 if (((cmd != emrp->emr_cmd) && (emrp->emr_cmd != MC_CMD_PROXY_CMD)) || 386 #else 387 if ((cmd != emrp->emr_cmd) || 388 #endif 389 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { 390 /* Response is for a different request */ 391 rc = EIO; 392 goto fail2; 393 } 394 if (error) { 395 efx_dword_t err[2]; 396 unsigned int err_len = MIN(data_len, sizeof (err)); 397 int err_code = MC_CMD_ERR_EPROTO; 398 int err_arg = 0; 399 400 /* Read error code (and arg num for MCDI v2 commands) */ 401 efx_mcdi_read_response(enp, &err, hdr_len, err_len); 402 403 if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t))) 404 err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0); 405 #ifdef WITH_MCDI_V2 406 if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t))) 407 err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0); 408 #endif 409 emrp->emr_err_code = err_code; 410 emrp->emr_err_arg = err_arg; 411 412 #if EFSYS_OPT_MCDI_PROXY_AUTH 413 if ((err_code == MC_CMD_ERR_PROXY_PENDING) && 414 (err_len == sizeof (err))) { 415 /* 416 * The MCDI request would normally fail with EPERM, but 417 * firmware has forwarded it to an authorization agent 418 * attached to a privileged PF. 419 * 420 * Save the authorization request handle. The client 421 * must wait for a PROXY_RESPONSE event, or timeout. 422 */ 423 emrp->emr_proxy_handle = err_arg; 424 } 425 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 426 427 #if EFSYS_OPT_MCDI_LOGGING 428 if (emtp->emt_logger != NULL) { 429 emtp->emt_logger(emtp->emt_context, 430 EFX_LOG_MCDI_RESPONSE, 431 &hdr[0], hdr_len, 432 &err[0], err_len); 433 } 434 #endif /* EFSYS_OPT_MCDI_LOGGING */ 435 436 if (!emrp->emr_quiet) { 437 EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd, 438 int, err_code, int, err_arg); 439 } 440 441 rc = efx_mcdi_request_errcode(err_code); 442 goto fail3; 443 } 444 445 emrp->emr_rc = 0; 446 emrp->emr_out_length_used = data_len; 447 #if EFSYS_OPT_MCDI_PROXY_AUTH 448 emrp->emr_proxy_handle = 0; 449 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 450 return; 451 452 fail3: 453 fail2: 454 fail1: 455 emrp->emr_rc = rc; 456 emrp->emr_out_length_used = 0; 457 } 458 459 static void 460 efx_mcdi_finish_response( 461 __in efx_nic_t *enp, 462 __in efx_mcdi_req_t *emrp) 463 { 464 #if EFSYS_OPT_MCDI_LOGGING 465 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 466 #endif /* EFSYS_OPT_MCDI_LOGGING */ 467 efx_dword_t hdr[2]; 468 unsigned int hdr_len; 469 size_t bytes; 470 unsigned int resp_off; 471 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 472 unsigned int resp_cmd; 473 boolean_t proxied_cmd_resp = B_FALSE; 474 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 475 476 if (emrp->emr_out_buf == NULL) 477 return; 478 479 /* Read the command header to detect MCDI response format */ 480 hdr_len = sizeof (hdr[0]); 481 efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len); 482 if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) { 483 /* 484 * Read the actual payload length. The length given in the event 485 * is only correct for responses with the V1 format. 486 */ 487 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); 488 hdr_len += sizeof (hdr[1]); 489 resp_off = hdr_len; 490 491 emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1], 492 MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 493 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 494 /* 495 * A proxy MCDI command is executed by PF on behalf of 496 * one of its VFs. The command to be proxied follows 497 * immediately afterward in the host buffer. 498 * PROXY_CMD inner call complete response should be copied to 499 * output buffer so that it can be returned to the requesting 500 * function in MC_CMD_PROXY_COMPLETE payload. 501 */ 502 resp_cmd = 503 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); 504 proxied_cmd_resp = ((emrp->emr_cmd == MC_CMD_PROXY_CMD) && 505 (resp_cmd != MC_CMD_PROXY_CMD)); 506 if (proxied_cmd_resp) { 507 resp_off = 0; 508 emrp->emr_out_length_used += hdr_len; 509 } 510 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 511 } else { 512 resp_off = hdr_len; 513 } 514 515 /* Copy payload out into caller supplied buffer */ 516 bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length); 517 efx_mcdi_read_response(enp, emrp->emr_out_buf, resp_off, bytes); 518 519 /* Report bytes copied to caller (response message may be larger) */ 520 emrp->emr_out_length_used = bytes; 521 522 #if EFSYS_OPT_MCDI_LOGGING 523 if (emtp->emt_logger != NULL) { 524 emtp->emt_logger(emtp->emt_context, 525 EFX_LOG_MCDI_RESPONSE, 526 &hdr[0], hdr_len, 527 emrp->emr_out_buf, bytes); 528 } 529 #endif /* EFSYS_OPT_MCDI_LOGGING */ 530 } 531 532 533 __checkReturn boolean_t 534 efx_mcdi_request_poll( 535 __in efx_nic_t *enp) 536 { 537 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 538 efx_mcdi_req_t *emrp; 539 efsys_lock_state_t state; 540 efx_rc_t rc; 541 542 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 543 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 544 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 545 546 /* Serialise against post-watchdog efx_mcdi_ev* */ 547 EFSYS_LOCK(enp->en_eslp, state); 548 549 EFSYS_ASSERT(emip->emi_pending_req != NULL); 550 EFSYS_ASSERT(!emip->emi_ev_cpl); 551 emrp = emip->emi_pending_req; 552 553 /* Check if hardware is unavailable */ 554 if (efx_nic_hw_unavailable(enp)) { 555 EFSYS_UNLOCK(enp->en_eslp, state); 556 return (B_FALSE); 557 } 558 559 /* Check for reboot atomically w.r.t efx_mcdi_request_start */ 560 if (emip->emi_poll_cnt++ == 0) { 561 if ((rc = efx_mcdi_poll_reboot(enp)) != 0) { 562 emip->emi_pending_req = NULL; 563 EFSYS_UNLOCK(enp->en_eslp, state); 564 565 /* Reboot/Assertion */ 566 if (rc == EIO || rc == EINTR) 567 efx_mcdi_raise_exception(enp, emrp, rc); 568 569 goto fail1; 570 } 571 } 572 573 /* Check if a response is available */ 574 if (efx_mcdi_poll_response(enp) == B_FALSE) { 575 EFSYS_UNLOCK(enp->en_eslp, state); 576 return (B_FALSE); 577 } 578 579 /* Read the response header */ 580 efx_mcdi_read_response_header(enp, emrp); 581 582 /* Request complete */ 583 emip->emi_pending_req = NULL; 584 585 /* Ensure stale MCDI requests fail after an MC reboot. */ 586 emip->emi_new_epoch = B_FALSE; 587 588 EFSYS_UNLOCK(enp->en_eslp, state); 589 590 if ((rc = emrp->emr_rc) != 0) 591 goto fail2; 592 593 efx_mcdi_finish_response(enp, emrp); 594 return (B_TRUE); 595 596 fail2: 597 if (!emrp->emr_quiet) 598 EFSYS_PROBE(fail2); 599 fail1: 600 if (!emrp->emr_quiet) 601 EFSYS_PROBE1(fail1, efx_rc_t, rc); 602 603 return (B_TRUE); 604 } 605 606 __checkReturn boolean_t 607 efx_mcdi_request_abort( 608 __in efx_nic_t *enp) 609 { 610 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 611 efx_mcdi_req_t *emrp; 612 boolean_t aborted; 613 efsys_lock_state_t state; 614 615 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 616 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 617 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 618 619 /* 620 * efx_mcdi_ev_* may have already completed this event, and be 621 * spinning/blocked on the upper layer lock. So it *is* legitimate 622 * to for emi_pending_req to be NULL. If there is a pending event 623 * completed request, then provide a "credit" to allow 624 * efx_mcdi_ev_cpl() to accept a single spurious completion. 625 */ 626 EFSYS_LOCK(enp->en_eslp, state); 627 emrp = emip->emi_pending_req; 628 aborted = (emrp != NULL); 629 if (aborted) { 630 emip->emi_pending_req = NULL; 631 632 /* Error the request */ 633 emrp->emr_out_length_used = 0; 634 emrp->emr_rc = ETIMEDOUT; 635 636 /* Provide a credit for seqno/emr_pending_req mismatches */ 637 if (emip->emi_ev_cpl) 638 ++emip->emi_aborted; 639 640 /* 641 * The upper layer has called us, so we don't 642 * need to complete the request. 643 */ 644 } 645 EFSYS_UNLOCK(enp->en_eslp, state); 646 647 return (aborted); 648 } 649 650 __checkReturn efx_rc_t 651 efx_mcdi_get_client_handle( 652 __in efx_nic_t *enp, 653 __in efx_pcie_interface_t intf, 654 __in uint16_t pf, 655 __in uint16_t vf, 656 __out uint32_t *handle) 657 { 658 efx_mcdi_req_t req; 659 EFX_MCDI_DECLARE_BUF(payload, 660 MC_CMD_GET_CLIENT_HANDLE_IN_LEN, 661 MC_CMD_GET_CLIENT_HANDLE_OUT_LEN); 662 uint32_t pcie_intf; 663 efx_rc_t rc; 664 665 if (handle == NULL) { 666 rc = EINVAL; 667 goto fail1; 668 } 669 670 rc = efx_mcdi_intf_to_pcie(intf, &pcie_intf); 671 if (rc != 0) 672 goto fail2; 673 674 req.emr_cmd = MC_CMD_GET_CLIENT_HANDLE; 675 req.emr_in_buf = payload; 676 req.emr_in_length = MC_CMD_GET_CLIENT_HANDLE_IN_LEN; 677 req.emr_out_buf = payload; 678 req.emr_out_length = MC_CMD_GET_CLIENT_HANDLE_OUT_LEN; 679 680 MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_TYPE, 681 MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC); 682 MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_PF, pf); 683 MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_VF, vf); 684 MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_FUNC_INTF, pcie_intf); 685 686 efx_mcdi_execute(enp, &req); 687 688 if (req.emr_rc != 0) { 689 rc = req.emr_rc; 690 goto fail3; 691 } 692 693 if (req.emr_out_length_used < MC_CMD_GET_CLIENT_HANDLE_OUT_LEN) { 694 rc = EMSGSIZE; 695 goto fail4; 696 } 697 698 *handle = MCDI_OUT_DWORD(req, GET_CLIENT_HANDLE_OUT_HANDLE); 699 700 return 0; 701 fail4: 702 EFSYS_PROBE(fail4); 703 fail3: 704 EFSYS_PROBE(fail3); 705 fail2: 706 EFSYS_PROBE(fail2); 707 fail1: 708 EFSYS_PROBE1(fail1, efx_rc_t, rc); 709 return (rc); 710 } 711 712 __checkReturn efx_rc_t 713 efx_mcdi_get_own_client_handle( 714 __in efx_nic_t *enp, 715 __out uint32_t *handle) 716 { 717 efx_rc_t rc; 718 719 rc = efx_mcdi_get_client_handle(enp, EFX_PCIE_INTERFACE_CALLER, 720 PCIE_FUNCTION_PF_NULL, PCIE_FUNCTION_VF_NULL, handle); 721 if (rc != 0) 722 goto fail1; 723 724 return (0); 725 fail1: 726 EFSYS_PROBE1(fail1, efx_rc_t, rc); 727 return (rc); 728 } 729 730 void 731 efx_mcdi_get_timeout( 732 __in efx_nic_t *enp, 733 __in efx_mcdi_req_t *emrp, 734 __out uint32_t *timeoutp) 735 { 736 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 737 738 emcop->emco_get_timeout(enp, emrp, timeoutp); 739 } 740 741 __checkReturn efx_rc_t 742 efx_mcdi_request_errcode( 743 __in unsigned int err) 744 { 745 746 switch (err) { 747 /* MCDI v1 */ 748 case MC_CMD_ERR_EPERM: 749 return (EACCES); 750 case MC_CMD_ERR_ENOENT: 751 return (ENOENT); 752 case MC_CMD_ERR_EINTR: 753 return (EINTR); 754 case MC_CMD_ERR_EACCES: 755 return (EACCES); 756 case MC_CMD_ERR_EBUSY: 757 return (EBUSY); 758 case MC_CMD_ERR_EINVAL: 759 return (EINVAL); 760 case MC_CMD_ERR_EDEADLK: 761 return (EDEADLK); 762 case MC_CMD_ERR_ENOSYS: 763 return (ENOTSUP); 764 case MC_CMD_ERR_ETIME: 765 return (ETIMEDOUT); 766 case MC_CMD_ERR_ENOTSUP: 767 return (ENOTSUP); 768 case MC_CMD_ERR_EALREADY: 769 return (EALREADY); 770 771 /* MCDI v2 */ 772 case MC_CMD_ERR_EEXIST: 773 return (EEXIST); 774 #ifdef MC_CMD_ERR_EAGAIN 775 case MC_CMD_ERR_EAGAIN: 776 return (EAGAIN); 777 #endif 778 #ifdef MC_CMD_ERR_ENOSPC 779 case MC_CMD_ERR_ENOSPC: 780 return (ENOSPC); 781 #endif 782 case MC_CMD_ERR_ERANGE: 783 return (ERANGE); 784 785 case MC_CMD_ERR_ALLOC_FAIL: 786 return (ENOMEM); 787 case MC_CMD_ERR_NO_VADAPTOR: 788 return (ENOENT); 789 case MC_CMD_ERR_NO_EVB_PORT: 790 return (ENOENT); 791 case MC_CMD_ERR_NO_VSWITCH: 792 return (ENODEV); 793 case MC_CMD_ERR_VLAN_LIMIT: 794 return (EINVAL); 795 case MC_CMD_ERR_BAD_PCI_FUNC: 796 return (ENODEV); 797 case MC_CMD_ERR_BAD_VLAN_MODE: 798 return (EINVAL); 799 case MC_CMD_ERR_BAD_VSWITCH_TYPE: 800 return (EINVAL); 801 case MC_CMD_ERR_BAD_VPORT_TYPE: 802 return (EINVAL); 803 case MC_CMD_ERR_MAC_EXIST: 804 return (EEXIST); 805 806 case MC_CMD_ERR_PROXY_PENDING: 807 return (EAGAIN); 808 809 default: 810 EFSYS_PROBE1(mc_pcol_error, int, err); 811 return (EIO); 812 } 813 } 814 815 void 816 efx_mcdi_raise_exception( 817 __in efx_nic_t *enp, 818 __in_opt efx_mcdi_req_t *emrp, 819 __in int rc) 820 { 821 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 822 efx_mcdi_exception_t exception; 823 824 /* Reboot or Assertion failure only */ 825 EFSYS_ASSERT(rc == EIO || rc == EINTR); 826 827 /* 828 * If MC_CMD_REBOOT causes a reboot (dependent on parameters), 829 * then the EIO is not worthy of an exception. 830 */ 831 if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO) 832 return; 833 834 exception = (rc == EIO) 835 ? EFX_MCDI_EXCEPTION_MC_REBOOT 836 : EFX_MCDI_EXCEPTION_MC_BADASSERT; 837 838 emtp->emt_exception(emtp->emt_context, exception); 839 } 840 841 void 842 efx_mcdi_execute( 843 __in efx_nic_t *enp, 844 __inout efx_mcdi_req_t *emrp) 845 { 846 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 847 848 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 849 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 850 851 emrp->emr_quiet = B_FALSE; 852 emtp->emt_execute(emtp->emt_context, emrp); 853 } 854 855 void 856 efx_mcdi_execute_quiet( 857 __in efx_nic_t *enp, 858 __inout efx_mcdi_req_t *emrp) 859 { 860 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 861 862 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 863 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 864 865 emrp->emr_quiet = B_TRUE; 866 emtp->emt_execute(emtp->emt_context, emrp); 867 } 868 869 void 870 efx_mcdi_ev_cpl( 871 __in efx_nic_t *enp, 872 __in unsigned int seq, 873 __in unsigned int outlen, 874 __in int errcode) 875 { 876 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 877 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 878 efx_mcdi_req_t *emrp; 879 efsys_lock_state_t state; 880 881 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 882 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 883 884 /* 885 * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start() 886 * when we're completing an aborted request. 887 */ 888 EFSYS_LOCK(enp->en_eslp, state); 889 if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl || 890 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { 891 EFSYS_ASSERT(emip->emi_aborted > 0); 892 if (emip->emi_aborted > 0) 893 --emip->emi_aborted; 894 EFSYS_UNLOCK(enp->en_eslp, state); 895 return; 896 } 897 898 emrp = emip->emi_pending_req; 899 emip->emi_pending_req = NULL; 900 EFSYS_UNLOCK(enp->en_eslp, state); 901 902 if (emip->emi_max_version >= 2) { 903 /* MCDIv2 response details do not fit into an event. */ 904 efx_mcdi_read_response_header(enp, emrp); 905 } else { 906 if (errcode != 0) { 907 if (!emrp->emr_quiet) { 908 EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, 909 int, errcode); 910 } 911 emrp->emr_out_length_used = 0; 912 emrp->emr_rc = efx_mcdi_request_errcode(errcode); 913 } else { 914 emrp->emr_out_length_used = outlen; 915 emrp->emr_rc = 0; 916 } 917 } 918 if (emrp->emr_rc == 0) 919 efx_mcdi_finish_response(enp, emrp); 920 921 emtp->emt_ev_cpl(emtp->emt_context); 922 } 923 924 #if EFSYS_OPT_MCDI_PROXY_AUTH 925 926 __checkReturn efx_rc_t 927 efx_mcdi_get_proxy_handle( 928 __in efx_nic_t *enp, 929 __in efx_mcdi_req_t *emrp, 930 __out uint32_t *handlep) 931 { 932 efx_rc_t rc; 933 934 _NOTE(ARGUNUSED(enp)) 935 936 /* 937 * Return proxy handle from MCDI request that returned with error 938 * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching 939 * PROXY_RESPONSE event. 940 */ 941 if ((emrp == NULL) || (handlep == NULL)) { 942 rc = EINVAL; 943 goto fail1; 944 } 945 if ((emrp->emr_rc != 0) && 946 (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) { 947 *handlep = emrp->emr_proxy_handle; 948 rc = 0; 949 } else { 950 *handlep = 0; 951 rc = ENOENT; 952 } 953 return (rc); 954 955 fail1: 956 EFSYS_PROBE1(fail1, efx_rc_t, rc); 957 return (rc); 958 } 959 960 void 961 efx_mcdi_ev_proxy_response( 962 __in efx_nic_t *enp, 963 __in unsigned int handle, 964 __in unsigned int status) 965 { 966 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 967 efx_rc_t rc; 968 969 /* 970 * Handle results of an authorization request for a privileged MCDI 971 * command. If authorization was granted then we must re-issue the 972 * original MCDI request. If authorization failed or timed out, 973 * then the original MCDI request should be completed with the 974 * result code from this event. 975 */ 976 rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status); 977 978 emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc); 979 } 980 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 981 982 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 983 void 984 efx_mcdi_ev_proxy_request( 985 __in efx_nic_t *enp, 986 __in unsigned int index) 987 { 988 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 989 990 if (emtp->emt_ev_proxy_request != NULL) 991 emtp->emt_ev_proxy_request(emtp->emt_context, index); 992 } 993 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 994 void 995 efx_mcdi_ev_death( 996 __in efx_nic_t *enp, 997 __in int rc) 998 { 999 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 1000 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 1001 efx_mcdi_req_t *emrp = NULL; 1002 boolean_t ev_cpl; 1003 efsys_lock_state_t state; 1004 1005 /* 1006 * The MCDI request (if there is one) has been terminated, either 1007 * by a BADASSERT or REBOOT event. 1008 * 1009 * If there is an outstanding event-completed MCDI operation, then we 1010 * will never receive the completion event (because both MCDI 1011 * completions and BADASSERT events are sent to the same evq). So 1012 * complete this MCDI op. 1013 * 1014 * This function might run in parallel with efx_mcdi_request_poll() 1015 * for poll completed mcdi requests, and also with 1016 * efx_mcdi_request_start() for post-watchdog completions. 1017 */ 1018 EFSYS_LOCK(enp->en_eslp, state); 1019 emrp = emip->emi_pending_req; 1020 ev_cpl = emip->emi_ev_cpl; 1021 if (emrp != NULL && emip->emi_ev_cpl) { 1022 emip->emi_pending_req = NULL; 1023 1024 emrp->emr_out_length_used = 0; 1025 emrp->emr_rc = rc; 1026 ++emip->emi_aborted; 1027 } 1028 1029 /* 1030 * Since we're running in parallel with a request, consume the 1031 * status word before dropping the lock. 1032 */ 1033 if (rc == EIO || rc == EINTR) { 1034 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); 1035 (void) efx_mcdi_poll_reboot(enp); 1036 emip->emi_new_epoch = B_TRUE; 1037 } 1038 1039 EFSYS_UNLOCK(enp->en_eslp, state); 1040 1041 efx_mcdi_raise_exception(enp, emrp, rc); 1042 1043 if (emrp != NULL && ev_cpl) 1044 emtp->emt_ev_cpl(emtp->emt_context); 1045 } 1046 1047 __checkReturn efx_rc_t 1048 efx_mcdi_get_version( 1049 __in efx_nic_t *enp, 1050 __in uint32_t flags, 1051 __out efx_mcdi_version_t *verp) 1052 { 1053 efx_nic_board_info_t *board_infop = &verp->emv_board_info; 1054 EFX_MCDI_DECLARE_BUF(payload, 1055 MC_CMD_GET_VERSION_EXT_IN_LEN, 1056 MC_CMD_GET_VERSION_V2_OUT_LEN); 1057 efx_word_t *ver_words; 1058 uint16_t version[4]; 1059 efx_mcdi_req_t req; 1060 uint32_t firmware; 1061 efx_rc_t rc; 1062 1063 EFX_STATIC_ASSERT(sizeof (verp->emv_version) == 1064 MC_CMD_GET_VERSION_OUT_VERSION_LEN); 1065 EFX_STATIC_ASSERT(sizeof (verp->emv_firmware) == 1066 MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN); 1067 1068 EFX_STATIC_ASSERT(EFX_MCDI_VERSION_BOARD_INFO == 1069 (1U << MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN)); 1070 1071 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_serial) == 1072 MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN); 1073 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_name) == 1074 MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN); 1075 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_revision) == 1076 MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN); 1077 1078 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 1079 1080 req.emr_cmd = MC_CMD_GET_VERSION; 1081 req.emr_in_buf = payload; 1082 req.emr_out_buf = payload; 1083 1084 if ((flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) { 1085 /* Request basic + extended version information. */ 1086 req.emr_in_length = MC_CMD_GET_VERSION_EXT_IN_LEN; 1087 req.emr_out_length = MC_CMD_GET_VERSION_V2_OUT_LEN; 1088 } else { 1089 /* Request only basic version information. */ 1090 req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN; 1091 req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN; 1092 } 1093 1094 efx_mcdi_execute(enp, &req); 1095 1096 if (req.emr_rc != 0) { 1097 rc = req.emr_rc; 1098 goto fail1; 1099 } 1100 1101 /* bootrom support */ 1102 if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) { 1103 version[0] = version[1] = version[2] = version[3] = 0; 1104 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); 1105 goto out; 1106 } 1107 1108 if (req.emr_out_length_used < req.emr_out_length) { 1109 rc = EMSGSIZE; 1110 goto fail2; 1111 } 1112 1113 ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION); 1114 version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0); 1115 version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0); 1116 version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0); 1117 version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0); 1118 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); 1119 1120 out: 1121 memset(verp, 0, sizeof (*verp)); 1122 1123 verp->emv_version[0] = version[0]; 1124 verp->emv_version[1] = version[1]; 1125 verp->emv_version[2] = version[2]; 1126 verp->emv_version[3] = version[3]; 1127 verp->emv_firmware = firmware; 1128 1129 verp->emv_flags = MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_FLAGS); 1130 verp->emv_flags &= flags; 1131 1132 if ((verp->emv_flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) { 1133 memcpy(board_infop->enbi_serial, 1134 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_SERIAL), 1135 sizeof (board_infop->enbi_serial)); 1136 memcpy(board_infop->enbi_name, 1137 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_NAME), 1138 sizeof (board_infop->enbi_name)); 1139 board_infop->enbi_revision = 1140 MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_BOARD_REVISION); 1141 } 1142 1143 return (0); 1144 1145 fail2: 1146 EFSYS_PROBE(fail2); 1147 fail1: 1148 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1149 1150 return (rc); 1151 } 1152 1153 static __checkReturn efx_rc_t 1154 efx_mcdi_get_boot_status( 1155 __in efx_nic_t *enp, 1156 __out efx_mcdi_boot_t *statusp) 1157 { 1158 EFX_MCDI_DECLARE_BUF(payload, 1159 MC_CMD_GET_BOOT_STATUS_IN_LEN, 1160 MC_CMD_GET_BOOT_STATUS_OUT_LEN); 1161 efx_mcdi_req_t req; 1162 efx_rc_t rc; 1163 1164 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 1165 1166 req.emr_cmd = MC_CMD_GET_BOOT_STATUS; 1167 req.emr_in_buf = payload; 1168 req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN; 1169 req.emr_out_buf = payload; 1170 req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN; 1171 1172 efx_mcdi_execute_quiet(enp, &req); 1173 1174 /* 1175 * NOTE: Unprivileged functions cannot access boot status, 1176 * so the MCDI request will return EACCES. This is 1177 * also checked in efx_mcdi_version. 1178 */ 1179 1180 if (req.emr_rc != 0) { 1181 rc = req.emr_rc; 1182 goto fail1; 1183 } 1184 1185 if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) { 1186 rc = EMSGSIZE; 1187 goto fail2; 1188 } 1189 1190 if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS, 1191 GET_BOOT_STATUS_OUT_FLAGS_PRIMARY)) 1192 *statusp = EFX_MCDI_BOOT_PRIMARY; 1193 else 1194 *statusp = EFX_MCDI_BOOT_SECONDARY; 1195 1196 return (0); 1197 1198 fail2: 1199 EFSYS_PROBE(fail2); 1200 fail1: 1201 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1202 1203 return (rc); 1204 } 1205 1206 __checkReturn efx_rc_t 1207 efx_mcdi_version( 1208 __in efx_nic_t *enp, 1209 __out_ecount_opt(4) uint16_t versionp[4], 1210 __out_opt uint32_t *buildp, 1211 __out_opt efx_mcdi_boot_t *statusp) 1212 { 1213 efx_mcdi_version_t ver; 1214 efx_mcdi_boot_t status; 1215 efx_rc_t rc; 1216 1217 rc = efx_mcdi_get_version(enp, 0, &ver); 1218 if (rc != 0) 1219 goto fail1; 1220 1221 /* The bootrom doesn't understand BOOT_STATUS */ 1222 if (MC_FW_VERSION_IS_BOOTLOADER(ver.emv_firmware)) { 1223 status = EFX_MCDI_BOOT_ROM; 1224 goto out; 1225 } 1226 1227 rc = efx_mcdi_get_boot_status(enp, &status); 1228 if (rc == EACCES) { 1229 /* Unprivileged functions cannot access BOOT_STATUS */ 1230 status = EFX_MCDI_BOOT_PRIMARY; 1231 memset(ver.emv_version, 0, sizeof (ver.emv_version)); 1232 ver.emv_firmware = 0; 1233 } else if (rc != 0) { 1234 goto fail2; 1235 } 1236 1237 out: 1238 if (versionp != NULL) 1239 memcpy(versionp, ver.emv_version, sizeof (ver.emv_version)); 1240 if (buildp != NULL) 1241 *buildp = ver.emv_firmware; 1242 if (statusp != NULL) 1243 *statusp = status; 1244 1245 return (0); 1246 1247 fail2: 1248 EFSYS_PROBE(fail2); 1249 fail1: 1250 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1251 1252 return (rc); 1253 } 1254 1255 __checkReturn efx_rc_t 1256 efx_mcdi_get_capabilities( 1257 __in efx_nic_t *enp, 1258 __out_opt uint32_t *flagsp, 1259 __out_opt uint16_t *rx_dpcpu_fw_idp, 1260 __out_opt uint16_t *tx_dpcpu_fw_idp, 1261 __out_opt uint32_t *flags2p, 1262 __out_opt uint32_t *tso2ncp) 1263 { 1264 efx_mcdi_req_t req; 1265 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN, 1266 MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); 1267 boolean_t v2_capable; 1268 efx_rc_t rc; 1269 1270 req.emr_cmd = MC_CMD_GET_CAPABILITIES; 1271 req.emr_in_buf = payload; 1272 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; 1273 req.emr_out_buf = payload; 1274 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN; 1275 1276 efx_mcdi_execute_quiet(enp, &req); 1277 1278 if (req.emr_rc != 0) { 1279 rc = req.emr_rc; 1280 goto fail1; 1281 } 1282 1283 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 1284 rc = EMSGSIZE; 1285 goto fail2; 1286 } 1287 1288 if (flagsp != NULL) 1289 *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1); 1290 1291 if (rx_dpcpu_fw_idp != NULL) 1292 *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req, 1293 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 1294 1295 if (tx_dpcpu_fw_idp != NULL) 1296 *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req, 1297 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 1298 1299 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) 1300 v2_capable = B_FALSE; 1301 else 1302 v2_capable = B_TRUE; 1303 1304 if (flags2p != NULL) { 1305 *flags2p = (v2_capable) ? 1306 MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) : 1307 0; 1308 } 1309 1310 if (tso2ncp != NULL) { 1311 *tso2ncp = (v2_capable) ? 1312 MCDI_OUT_WORD(req, 1313 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) : 1314 0; 1315 } 1316 1317 return (0); 1318 1319 fail2: 1320 EFSYS_PROBE(fail2); 1321 fail1: 1322 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1323 1324 return (rc); 1325 } 1326 1327 static __checkReturn efx_rc_t 1328 efx_mcdi_do_reboot( 1329 __in efx_nic_t *enp, 1330 __in boolean_t after_assertion) 1331 { 1332 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN, 1333 MC_CMD_REBOOT_OUT_LEN); 1334 efx_mcdi_req_t req; 1335 efx_rc_t rc; 1336 1337 /* 1338 * We could require the caller to have caused en_mod_flags=0 to 1339 * call this function. This doesn't help the other port though, 1340 * who's about to get the MC ripped out from underneath them. 1341 * Since they have to cope with the subsequent fallout of MCDI 1342 * failures, we should as well. 1343 */ 1344 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 1345 1346 req.emr_cmd = MC_CMD_REBOOT; 1347 req.emr_in_buf = payload; 1348 req.emr_in_length = MC_CMD_REBOOT_IN_LEN; 1349 req.emr_out_buf = payload; 1350 req.emr_out_length = MC_CMD_REBOOT_OUT_LEN; 1351 1352 MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, 1353 (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0)); 1354 1355 efx_mcdi_execute_quiet(enp, &req); 1356 1357 if (req.emr_rc == EACCES) { 1358 /* Unprivileged functions cannot reboot the MC. */ 1359 goto out; 1360 } 1361 1362 /* A successful reboot request returns EIO. */ 1363 if (req.emr_rc != 0 && req.emr_rc != EIO) { 1364 rc = req.emr_rc; 1365 goto fail1; 1366 } 1367 1368 out: 1369 return (0); 1370 1371 fail1: 1372 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1373 1374 return (rc); 1375 } 1376 1377 __checkReturn efx_rc_t 1378 efx_mcdi_reboot( 1379 __in efx_nic_t *enp) 1380 { 1381 return (efx_mcdi_do_reboot(enp, B_FALSE)); 1382 } 1383 1384 __checkReturn efx_rc_t 1385 efx_mcdi_exit_assertion_handler( 1386 __in efx_nic_t *enp) 1387 { 1388 return (efx_mcdi_do_reboot(enp, B_TRUE)); 1389 } 1390 1391 __checkReturn efx_rc_t 1392 efx_mcdi_read_assertion( 1393 __in efx_nic_t *enp) 1394 { 1395 efx_mcdi_req_t req; 1396 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN, 1397 MC_CMD_GET_ASSERTS_OUT_LEN); 1398 const char *reason; 1399 unsigned int flags; 1400 unsigned int index; 1401 unsigned int ofst; 1402 int retry; 1403 efx_rc_t rc; 1404 1405 /* 1406 * Before we attempt to chat to the MC, we should verify that the MC 1407 * isn't in it's assertion handler, either due to a previous reboot, 1408 * or because we're reinitializing due to an eec_exception(). 1409 * 1410 * Use GET_ASSERTS to read any assertion state that may be present. 1411 * Retry this command twice. Once because a boot-time assertion failure 1412 * might cause the 1st MCDI request to fail. And once again because 1413 * we might race with efx_mcdi_exit_assertion_handler() running on 1414 * partner port(s) on the same NIC. 1415 */ 1416 retry = 2; 1417 do { 1418 (void) memset(payload, 0, sizeof (payload)); 1419 req.emr_cmd = MC_CMD_GET_ASSERTS; 1420 req.emr_in_buf = payload; 1421 req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN; 1422 req.emr_out_buf = payload; 1423 req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN; 1424 1425 MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1); 1426 efx_mcdi_execute_quiet(enp, &req); 1427 1428 } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0); 1429 1430 if (req.emr_rc != 0) { 1431 if (req.emr_rc == EACCES) { 1432 /* Unprivileged functions cannot clear assertions. */ 1433 goto out; 1434 } 1435 rc = req.emr_rc; 1436 goto fail1; 1437 } 1438 1439 if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) { 1440 rc = EMSGSIZE; 1441 goto fail2; 1442 } 1443 1444 /* Print out any assertion state recorded */ 1445 flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS); 1446 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 1447 return (0); 1448 1449 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 1450 ? "system-level assertion" 1451 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 1452 ? "thread-level assertion" 1453 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1454 ? "watchdog reset" 1455 : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP) 1456 ? "illegal address trap" 1457 : "unknown assertion"; 1458 EFSYS_PROBE3(mcpu_assertion, 1459 const char *, reason, unsigned int, 1460 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1461 unsigned int, 1462 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS)); 1463 1464 /* Print out the registers (r1 ... r31) */ 1465 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1466 for (index = 1; 1467 index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; 1468 index++) { 1469 EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int, 1470 EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst), 1471 EFX_DWORD_0)); 1472 ofst += sizeof (efx_dword_t); 1473 } 1474 EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN); 1475 1476 out: 1477 return (0); 1478 1479 fail2: 1480 EFSYS_PROBE(fail2); 1481 fail1: 1482 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1483 1484 return (rc); 1485 } 1486 1487 1488 /* 1489 * Internal routines for for specific MCDI requests. 1490 */ 1491 1492 __checkReturn efx_rc_t 1493 efx_mcdi_drv_attach( 1494 __in efx_nic_t *enp, 1495 __in boolean_t attach) 1496 { 1497 efx_mcdi_req_t req; 1498 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_V2_LEN, 1499 MC_CMD_DRV_ATTACH_EXT_OUT_LEN); 1500 efx_rc_t rc; 1501 1502 req.emr_cmd = MC_CMD_DRV_ATTACH; 1503 req.emr_in_buf = payload; 1504 if (enp->en_drv_version[0] == '\0') { 1505 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN; 1506 } else { 1507 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_V2_LEN; 1508 } 1509 req.emr_out_buf = payload; 1510 req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; 1511 1512 /* 1513 * Typically, client drivers use DONT_CARE for the datapath firmware 1514 * type to ensure that the driver can attach to an unprivileged 1515 * function. The datapath firmware type to use is controlled by the 1516 * 'sfboot' utility. 1517 * If a client driver wishes to attach with a specific datapath firmware 1518 * type, that can be passed in second argument of efx_nic_probe API. One 1519 * such example is the ESXi native driver that attempts attaching with 1520 * FULL_FEATURED datapath firmware type first and fall backs to 1521 * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails. 1522 */ 1523 MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE, 1524 DRV_ATTACH_IN_ATTACH, attach ? 1 : 0, 1525 DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE); 1526 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); 1527 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv); 1528 1529 if (req.emr_in_length >= MC_CMD_DRV_ATTACH_IN_V2_LEN) { 1530 EFX_STATIC_ASSERT(sizeof (enp->en_drv_version) == 1531 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); 1532 memcpy(MCDI_IN2(req, char, DRV_ATTACH_IN_V2_DRIVER_VERSION), 1533 enp->en_drv_version, 1534 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); 1535 } 1536 1537 efx_mcdi_execute(enp, &req); 1538 1539 if (req.emr_rc != 0) { 1540 rc = req.emr_rc; 1541 goto fail1; 1542 } 1543 1544 if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) { 1545 rc = EMSGSIZE; 1546 goto fail2; 1547 } 1548 1549 return (0); 1550 1551 fail2: 1552 EFSYS_PROBE(fail2); 1553 fail1: 1554 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1555 1556 return (rc); 1557 } 1558 1559 __checkReturn efx_rc_t 1560 efx_mcdi_get_board_cfg( 1561 __in efx_nic_t *enp, 1562 __out_opt uint32_t *board_typep, 1563 __out_opt efx_dword_t *capabilitiesp, 1564 __out_ecount_opt(6) uint8_t mac_addrp[6]) 1565 { 1566 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 1567 efx_mcdi_req_t req; 1568 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN, 1569 MC_CMD_GET_BOARD_CFG_OUT_LENMIN); 1570 efx_rc_t rc; 1571 1572 req.emr_cmd = MC_CMD_GET_BOARD_CFG; 1573 req.emr_in_buf = payload; 1574 req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; 1575 req.emr_out_buf = payload; 1576 req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN; 1577 1578 efx_mcdi_execute(enp, &req); 1579 1580 if (req.emr_rc != 0) { 1581 rc = req.emr_rc; 1582 goto fail1; 1583 } 1584 1585 if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 1586 rc = EMSGSIZE; 1587 goto fail2; 1588 } 1589 1590 if (mac_addrp != NULL) { 1591 uint8_t *addrp; 1592 1593 if (emip->emi_port == 1) { 1594 addrp = MCDI_OUT2(req, uint8_t, 1595 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0); 1596 } else if (emip->emi_port == 2) { 1597 addrp = MCDI_OUT2(req, uint8_t, 1598 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1); 1599 } else { 1600 rc = EINVAL; 1601 goto fail3; 1602 } 1603 1604 EFX_MAC_ADDR_COPY(mac_addrp, addrp); 1605 } 1606 1607 if (capabilitiesp != NULL) { 1608 if (emip->emi_port == 1) { 1609 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, 1610 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 1611 } else if (emip->emi_port == 2) { 1612 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, 1613 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 1614 } else { 1615 rc = EINVAL; 1616 goto fail4; 1617 } 1618 } 1619 1620 if (board_typep != NULL) { 1621 *board_typep = MCDI_OUT_DWORD(req, 1622 GET_BOARD_CFG_OUT_BOARD_TYPE); 1623 } 1624 1625 return (0); 1626 1627 fail4: 1628 EFSYS_PROBE(fail4); 1629 fail3: 1630 EFSYS_PROBE(fail3); 1631 fail2: 1632 EFSYS_PROBE(fail2); 1633 fail1: 1634 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1635 1636 return (rc); 1637 } 1638 1639 __checkReturn efx_rc_t 1640 efx_mcdi_get_resource_limits( 1641 __in efx_nic_t *enp, 1642 __out_opt uint32_t *nevqp, 1643 __out_opt uint32_t *nrxqp, 1644 __out_opt uint32_t *ntxqp) 1645 { 1646 efx_mcdi_req_t req; 1647 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN, 1648 MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN); 1649 efx_rc_t rc; 1650 1651 req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS; 1652 req.emr_in_buf = payload; 1653 req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN; 1654 req.emr_out_buf = payload; 1655 req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN; 1656 1657 efx_mcdi_execute(enp, &req); 1658 1659 if (req.emr_rc != 0) { 1660 rc = req.emr_rc; 1661 goto fail1; 1662 } 1663 1664 if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) { 1665 rc = EMSGSIZE; 1666 goto fail2; 1667 } 1668 1669 if (nevqp != NULL) 1670 *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ); 1671 if (nrxqp != NULL) 1672 *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ); 1673 if (ntxqp != NULL) 1674 *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ); 1675 1676 return (0); 1677 1678 fail2: 1679 EFSYS_PROBE(fail2); 1680 fail1: 1681 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1682 1683 return (rc); 1684 } 1685 1686 __checkReturn efx_rc_t 1687 efx_mcdi_get_phy_cfg( 1688 __in efx_nic_t *enp) 1689 { 1690 efx_port_t *epp = &(enp->en_port); 1691 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1692 efx_mcdi_req_t req; 1693 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN, 1694 MC_CMD_GET_PHY_CFG_OUT_LEN); 1695 #if EFSYS_OPT_NAMES 1696 const char *namep; 1697 size_t namelen; 1698 #endif 1699 uint32_t phy_media_type; 1700 efx_rc_t rc; 1701 1702 req.emr_cmd = MC_CMD_GET_PHY_CFG; 1703 req.emr_in_buf = payload; 1704 req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN; 1705 req.emr_out_buf = payload; 1706 req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN; 1707 1708 efx_mcdi_execute(enp, &req); 1709 1710 if (req.emr_rc != 0) { 1711 rc = req.emr_rc; 1712 goto fail1; 1713 } 1714 1715 if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) { 1716 rc = EMSGSIZE; 1717 goto fail2; 1718 } 1719 1720 encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); 1721 #if EFSYS_OPT_NAMES 1722 namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME); 1723 namelen = MIN(sizeof (encp->enc_phy_name) - 1, 1724 strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); 1725 (void) memset(encp->enc_phy_name, 0, 1726 sizeof (encp->enc_phy_name)); 1727 memcpy(encp->enc_phy_name, namep, namelen); 1728 #endif /* EFSYS_OPT_NAMES */ 1729 (void) memset(encp->enc_phy_revision, 0, 1730 sizeof (encp->enc_phy_revision)); 1731 memcpy(encp->enc_phy_revision, 1732 MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION), 1733 MIN(sizeof (encp->enc_phy_revision) - 1, 1734 MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN)); 1735 #if EFSYS_OPT_PHY_LED_CONTROL 1736 encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) | 1737 (1 << EFX_PHY_LED_OFF) | 1738 (1 << EFX_PHY_LED_ON)); 1739 #endif /* EFSYS_OPT_PHY_LED_CONTROL */ 1740 1741 /* Get the media type of the fixed port, if recognised. */ 1742 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI); 1743 EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4); 1744 EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4); 1745 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP); 1746 EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); 1747 EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); 1748 EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); 1749 phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); 1750 epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type; 1751 if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) 1752 epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; 1753 1754 epp->ep_phy_cap_mask = 1755 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP); 1756 #if EFSYS_OPT_PHY_FLAGS 1757 encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS); 1758 #endif /* EFSYS_OPT_PHY_FLAGS */ 1759 1760 encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT); 1761 1762 /* Populate internal state */ 1763 encp->enc_mcdi_mdio_channel = 1764 (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL); 1765 1766 #if EFSYS_OPT_PHY_STATS 1767 encp->enc_mcdi_phy_stat_mask = 1768 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK); 1769 #endif /* EFSYS_OPT_PHY_STATS */ 1770 1771 #if EFSYS_OPT_BIST 1772 encp->enc_bist_mask = 0; 1773 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1774 GET_PHY_CFG_OUT_BIST_CABLE_SHORT)) 1775 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT); 1776 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1777 GET_PHY_CFG_OUT_BIST_CABLE_LONG)) 1778 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG); 1779 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1780 GET_PHY_CFG_OUT_BIST)) 1781 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL); 1782 #endif /* EFSYS_OPT_BIST */ 1783 1784 return (0); 1785 1786 fail2: 1787 EFSYS_PROBE(fail2); 1788 fail1: 1789 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1790 1791 return (rc); 1792 } 1793 1794 __checkReturn efx_rc_t 1795 efx_mcdi_firmware_update_supported( 1796 __in efx_nic_t *enp, 1797 __out boolean_t *supportedp) 1798 { 1799 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1800 efx_rc_t rc; 1801 1802 if (emcop != NULL) { 1803 if ((rc = emcop->emco_feature_supported(enp, 1804 EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0) 1805 goto fail1; 1806 } else { 1807 /* Earlier devices always supported updates */ 1808 *supportedp = B_TRUE; 1809 } 1810 1811 return (0); 1812 1813 fail1: 1814 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1815 1816 return (rc); 1817 } 1818 1819 __checkReturn efx_rc_t 1820 efx_mcdi_macaddr_change_supported( 1821 __in efx_nic_t *enp, 1822 __out boolean_t *supportedp) 1823 { 1824 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1825 efx_rc_t rc; 1826 1827 if (emcop != NULL) { 1828 if ((rc = emcop->emco_feature_supported(enp, 1829 EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0) 1830 goto fail1; 1831 } else { 1832 /* Earlier devices always supported MAC changes */ 1833 *supportedp = B_TRUE; 1834 } 1835 1836 return (0); 1837 1838 fail1: 1839 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1840 1841 return (rc); 1842 } 1843 1844 __checkReturn efx_rc_t 1845 efx_mcdi_link_control_supported( 1846 __in efx_nic_t *enp, 1847 __out boolean_t *supportedp) 1848 { 1849 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1850 efx_rc_t rc; 1851 1852 if (emcop != NULL) { 1853 if ((rc = emcop->emco_feature_supported(enp, 1854 EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0) 1855 goto fail1; 1856 } else { 1857 /* Earlier devices always supported link control */ 1858 *supportedp = B_TRUE; 1859 } 1860 1861 return (0); 1862 1863 fail1: 1864 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1865 1866 return (rc); 1867 } 1868 1869 __checkReturn efx_rc_t 1870 efx_mcdi_mac_spoofing_supported( 1871 __in efx_nic_t *enp, 1872 __out boolean_t *supportedp) 1873 { 1874 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1875 efx_rc_t rc; 1876 1877 if (emcop != NULL) { 1878 if ((rc = emcop->emco_feature_supported(enp, 1879 EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0) 1880 goto fail1; 1881 } else { 1882 /* Earlier devices always supported MAC spoofing */ 1883 *supportedp = B_TRUE; 1884 } 1885 1886 return (0); 1887 1888 fail1: 1889 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1890 1891 return (rc); 1892 } 1893 1894 #if EFSYS_OPT_BIST 1895 1896 #if EFX_OPTS_EF10() 1897 /* 1898 * Enter bist offline mode. This is a fw mode which puts the NIC into a state 1899 * where memory BIST tests can be run and not much else can interfere or happen. 1900 * A reboot is required to exit this mode. 1901 */ 1902 __checkReturn efx_rc_t 1903 efx_mcdi_bist_enable_offline( 1904 __in efx_nic_t *enp) 1905 { 1906 efx_mcdi_req_t req; 1907 efx_rc_t rc; 1908 1909 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0); 1910 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0); 1911 1912 req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST; 1913 req.emr_in_buf = NULL; 1914 req.emr_in_length = 0; 1915 req.emr_out_buf = NULL; 1916 req.emr_out_length = 0; 1917 1918 efx_mcdi_execute(enp, &req); 1919 1920 if (req.emr_rc != 0) { 1921 rc = req.emr_rc; 1922 goto fail1; 1923 } 1924 1925 return (0); 1926 1927 fail1: 1928 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1929 1930 return (rc); 1931 } 1932 #endif /* EFX_OPTS_EF10() */ 1933 1934 __checkReturn efx_rc_t 1935 efx_mcdi_bist_start( 1936 __in efx_nic_t *enp, 1937 __in efx_bist_type_t type) 1938 { 1939 efx_mcdi_req_t req; 1940 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN, 1941 MC_CMD_START_BIST_OUT_LEN); 1942 efx_rc_t rc; 1943 1944 req.emr_cmd = MC_CMD_START_BIST; 1945 req.emr_in_buf = payload; 1946 req.emr_in_length = MC_CMD_START_BIST_IN_LEN; 1947 req.emr_out_buf = payload; 1948 req.emr_out_length = MC_CMD_START_BIST_OUT_LEN; 1949 1950 switch (type) { 1951 case EFX_BIST_TYPE_PHY_NORMAL: 1952 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST); 1953 break; 1954 case EFX_BIST_TYPE_PHY_CABLE_SHORT: 1955 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1956 MC_CMD_PHY_BIST_CABLE_SHORT); 1957 break; 1958 case EFX_BIST_TYPE_PHY_CABLE_LONG: 1959 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1960 MC_CMD_PHY_BIST_CABLE_LONG); 1961 break; 1962 case EFX_BIST_TYPE_MC_MEM: 1963 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1964 MC_CMD_MC_MEM_BIST); 1965 break; 1966 case EFX_BIST_TYPE_SAT_MEM: 1967 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1968 MC_CMD_PORT_MEM_BIST); 1969 break; 1970 case EFX_BIST_TYPE_REG: 1971 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1972 MC_CMD_REG_BIST); 1973 break; 1974 default: 1975 EFSYS_ASSERT(0); 1976 } 1977 1978 efx_mcdi_execute(enp, &req); 1979 1980 if (req.emr_rc != 0) { 1981 rc = req.emr_rc; 1982 goto fail1; 1983 } 1984 1985 return (0); 1986 1987 fail1: 1988 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1989 1990 return (rc); 1991 } 1992 1993 #endif /* EFSYS_OPT_BIST */ 1994 1995 1996 /* Enable logging of some events (e.g. link state changes) */ 1997 __checkReturn efx_rc_t 1998 efx_mcdi_log_ctrl( 1999 __in efx_nic_t *enp) 2000 { 2001 efx_mcdi_req_t req; 2002 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN, 2003 MC_CMD_LOG_CTRL_OUT_LEN); 2004 efx_rc_t rc; 2005 2006 req.emr_cmd = MC_CMD_LOG_CTRL; 2007 req.emr_in_buf = payload; 2008 req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN; 2009 req.emr_out_buf = payload; 2010 req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN; 2011 2012 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST, 2013 MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ); 2014 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0); 2015 2016 efx_mcdi_execute(enp, &req); 2017 2018 if (req.emr_rc != 0) { 2019 rc = req.emr_rc; 2020 goto fail1; 2021 } 2022 2023 return (0); 2024 2025 fail1: 2026 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2027 2028 return (rc); 2029 } 2030 2031 2032 #if EFSYS_OPT_MAC_STATS 2033 2034 __checkReturn efx_rc_t 2035 efx_mcdi_mac_stats( 2036 __in efx_nic_t *enp, 2037 __in uint32_t vport_id, 2038 __in_opt efsys_mem_t *esmp, 2039 __in efx_stats_action_t action, 2040 __in uint16_t period_ms) 2041 { 2042 efx_mcdi_req_t req; 2043 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN, 2044 MC_CMD_MAC_STATS_V2_OUT_DMA_LEN); 2045 int clear = (action == EFX_STATS_CLEAR); 2046 int upload = (action == EFX_STATS_UPLOAD); 2047 int enable = (action == EFX_STATS_ENABLE_NOEVENTS); 2048 int events = (action == EFX_STATS_ENABLE_EVENTS); 2049 int disable = (action == EFX_STATS_DISABLE); 2050 efx_rc_t rc; 2051 2052 req.emr_cmd = MC_CMD_MAC_STATS; 2053 req.emr_in_buf = payload; 2054 req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; 2055 req.emr_out_buf = payload; 2056 req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN; 2057 2058 MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, 2059 MAC_STATS_IN_DMA, upload, 2060 MAC_STATS_IN_CLEAR, clear, 2061 MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable, 2062 MAC_STATS_IN_PERIODIC_ENABLE, enable | events, 2063 MAC_STATS_IN_PERIODIC_NOEVENT, !events, 2064 MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); 2065 2066 if (enable || events || upload) { 2067 const efx_nic_cfg_t *encp = &enp->en_nic_cfg; 2068 uint32_t bytes; 2069 2070 /* Periodic stats or stats upload require a DMA buffer */ 2071 if (esmp == NULL) { 2072 rc = EINVAL; 2073 goto fail1; 2074 } 2075 2076 if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { 2077 /* MAC stats count too small for legacy MAC stats */ 2078 rc = ENOSPC; 2079 goto fail2; 2080 } 2081 2082 bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t); 2083 2084 if (EFSYS_MEM_SIZE(esmp) < bytes) { 2085 /* DMA buffer too small */ 2086 rc = ENOSPC; 2087 goto fail3; 2088 } 2089 2090 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, 2091 EFSYS_MEM_ADDR(esmp) & 0xffffffff); 2092 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, 2093 EFSYS_MEM_ADDR(esmp) >> 32); 2094 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); 2095 } 2096 2097 /* 2098 * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats, 2099 * as this may fail (and leave periodic DMA enabled) if the 2100 * vadapter has already been deleted. 2101 */ 2102 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID, 2103 (disable ? EVB_PORT_ID_NULL : vport_id)); 2104 2105 efx_mcdi_execute(enp, &req); 2106 2107 if (req.emr_rc != 0) { 2108 /* EF10: Expect ENOENT if no DMA queues are initialised */ 2109 if ((req.emr_rc != ENOENT) || 2110 (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { 2111 rc = req.emr_rc; 2112 goto fail4; 2113 } 2114 } 2115 2116 return (0); 2117 2118 fail4: 2119 EFSYS_PROBE(fail4); 2120 fail3: 2121 EFSYS_PROBE(fail3); 2122 fail2: 2123 EFSYS_PROBE(fail2); 2124 fail1: 2125 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2126 2127 return (rc); 2128 } 2129 2130 __checkReturn efx_rc_t 2131 efx_mcdi_mac_stats_clear( 2132 __in efx_nic_t *enp) 2133 { 2134 efx_rc_t rc; 2135 2136 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, 2137 EFX_STATS_CLEAR, 0)) != 0) 2138 goto fail1; 2139 2140 return (0); 2141 2142 fail1: 2143 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2144 2145 return (rc); 2146 } 2147 2148 __checkReturn efx_rc_t 2149 efx_mcdi_mac_stats_upload( 2150 __in efx_nic_t *enp, 2151 __in efsys_mem_t *esmp) 2152 { 2153 efx_rc_t rc; 2154 2155 /* 2156 * The MC DMAs aggregate statistics for our convenience, so we can 2157 * avoid having to pull the statistics buffer into the cache to 2158 * maintain cumulative statistics. 2159 */ 2160 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2161 EFX_STATS_UPLOAD, 0)) != 0) 2162 goto fail1; 2163 2164 return (0); 2165 2166 fail1: 2167 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2168 2169 return (rc); 2170 } 2171 2172 __checkReturn efx_rc_t 2173 efx_mcdi_mac_stats_periodic( 2174 __in efx_nic_t *enp, 2175 __in efsys_mem_t *esmp, 2176 __in uint16_t period_ms, 2177 __in boolean_t events) 2178 { 2179 efx_rc_t rc; 2180 2181 /* 2182 * The MC DMAs aggregate statistics for our convenience, so we can 2183 * avoid having to pull the statistics buffer into the cache to 2184 * maintain cumulative statistics. 2185 * Huntington uses a fixed 1sec period. 2186 * Medford uses a fixed 1sec period before v6.2.1.1033 firmware. 2187 */ 2188 if (period_ms == 0) 2189 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, 2190 EFX_STATS_DISABLE, 0); 2191 else if (events) 2192 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2193 EFX_STATS_ENABLE_EVENTS, period_ms); 2194 else 2195 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2196 EFX_STATS_ENABLE_NOEVENTS, period_ms); 2197 2198 if (rc != 0) 2199 goto fail1; 2200 2201 return (0); 2202 2203 fail1: 2204 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2205 2206 return (rc); 2207 } 2208 2209 #endif /* EFSYS_OPT_MAC_STATS */ 2210 2211 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2212 2213 __checkReturn efx_rc_t 2214 efx_mcdi_intf_from_pcie( 2215 __in uint32_t pcie_intf, 2216 __out efx_pcie_interface_t *efx_intf) 2217 { 2218 efx_rc_t rc; 2219 2220 switch (pcie_intf) { 2221 case PCIE_INTERFACE_CALLER: 2222 *efx_intf = EFX_PCIE_INTERFACE_CALLER; 2223 break; 2224 case PCIE_INTERFACE_HOST_PRIMARY: 2225 *efx_intf = EFX_PCIE_INTERFACE_HOST_PRIMARY; 2226 break; 2227 case PCIE_INTERFACE_NIC_EMBEDDED: 2228 *efx_intf = EFX_PCIE_INTERFACE_NIC_EMBEDDED; 2229 break; 2230 default: 2231 rc = EINVAL; 2232 goto fail1; 2233 } 2234 2235 return (0); 2236 2237 fail1: 2238 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2239 2240 return (rc); 2241 } 2242 2243 __checkReturn efx_rc_t 2244 efx_mcdi_intf_to_pcie( 2245 __in efx_pcie_interface_t efx_intf, 2246 __out uint32_t *pcie_intf) 2247 { 2248 efx_rc_t rc; 2249 2250 switch (efx_intf) { 2251 case EFX_PCIE_INTERFACE_CALLER: 2252 *pcie_intf = PCIE_INTERFACE_CALLER; 2253 break; 2254 case EFX_PCIE_INTERFACE_HOST_PRIMARY: 2255 *pcie_intf = PCIE_INTERFACE_HOST_PRIMARY; 2256 break; 2257 case EFX_PCIE_INTERFACE_NIC_EMBEDDED: 2258 *pcie_intf = PCIE_INTERFACE_NIC_EMBEDDED; 2259 break; 2260 default: 2261 rc = EINVAL; 2262 goto fail1; 2263 } 2264 2265 return (0); 2266 2267 fail1: 2268 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2269 return (rc); 2270 } 2271 2272 /* 2273 * This function returns the pf and vf number of a function. If it is a pf the 2274 * vf number is 0xffff. The vf number is the index of the vf on that 2275 * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0), 2276 * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff). 2277 */ 2278 __checkReturn efx_rc_t 2279 efx_mcdi_get_function_info( 2280 __in efx_nic_t *enp, 2281 __out uint32_t *pfp, 2282 __out_opt uint32_t *vfp, 2283 __out_opt efx_pcie_interface_t *intfp) 2284 { 2285 efx_pcie_interface_t intf; 2286 efx_mcdi_req_t req; 2287 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN, 2288 MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN); 2289 uint32_t pcie_intf; 2290 efx_rc_t rc; 2291 2292 req.emr_cmd = MC_CMD_GET_FUNCTION_INFO; 2293 req.emr_in_buf = payload; 2294 req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN; 2295 req.emr_out_buf = payload; 2296 req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN; 2297 2298 efx_mcdi_execute(enp, &req); 2299 2300 if (req.emr_rc != 0) { 2301 rc = req.emr_rc; 2302 goto fail1; 2303 } 2304 2305 if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) { 2306 rc = EMSGSIZE; 2307 goto fail2; 2308 } 2309 2310 *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF); 2311 if (vfp != NULL) 2312 *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF); 2313 2314 if (req.emr_out_length < MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN) { 2315 intf = EFX_PCIE_INTERFACE_HOST_PRIMARY; 2316 } else { 2317 pcie_intf = MCDI_OUT_DWORD(req, 2318 GET_FUNCTION_INFO_OUT_V2_INTF); 2319 2320 rc = efx_mcdi_intf_from_pcie(pcie_intf, &intf); 2321 if (rc != 0) 2322 goto fail3; 2323 } 2324 2325 if (intfp != NULL) 2326 *intfp = intf; 2327 2328 return (0); 2329 2330 fail3: 2331 EFSYS_PROBE(fail3); 2332 fail2: 2333 EFSYS_PROBE(fail2); 2334 fail1: 2335 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2336 2337 return (rc); 2338 } 2339 2340 __checkReturn efx_rc_t 2341 efx_mcdi_privilege_mask( 2342 __in efx_nic_t *enp, 2343 __in uint32_t pf, 2344 __in uint32_t vf, 2345 __out uint32_t *maskp) 2346 { 2347 efx_mcdi_req_t req; 2348 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN, 2349 MC_CMD_PRIVILEGE_MASK_OUT_LEN); 2350 efx_rc_t rc; 2351 2352 req.emr_cmd = MC_CMD_PRIVILEGE_MASK; 2353 req.emr_in_buf = payload; 2354 req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; 2355 req.emr_out_buf = payload; 2356 req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; 2357 2358 MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION, 2359 PRIVILEGE_MASK_IN_FUNCTION_PF, pf, 2360 PRIVILEGE_MASK_IN_FUNCTION_VF, vf); 2361 2362 efx_mcdi_execute(enp, &req); 2363 2364 if (req.emr_rc != 0) { 2365 rc = req.emr_rc; 2366 goto fail1; 2367 } 2368 2369 if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) { 2370 rc = EMSGSIZE; 2371 goto fail2; 2372 } 2373 2374 *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK); 2375 2376 return (0); 2377 2378 fail2: 2379 EFSYS_PROBE(fail2); 2380 fail1: 2381 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2382 2383 return (rc); 2384 } 2385 2386 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 2387 2388 __checkReturn efx_rc_t 2389 efx_mcdi_set_workaround( 2390 __in efx_nic_t *enp, 2391 __in uint32_t type, 2392 __in boolean_t enabled, 2393 __out_opt uint32_t *flagsp) 2394 { 2395 efx_mcdi_req_t req; 2396 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN, 2397 MC_CMD_WORKAROUND_EXT_OUT_LEN); 2398 efx_rc_t rc; 2399 2400 req.emr_cmd = MC_CMD_WORKAROUND; 2401 req.emr_in_buf = payload; 2402 req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN; 2403 req.emr_out_buf = payload; 2404 req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN; 2405 2406 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type); 2407 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0); 2408 2409 efx_mcdi_execute_quiet(enp, &req); 2410 2411 if (req.emr_rc != 0) { 2412 rc = req.emr_rc; 2413 goto fail1; 2414 } 2415 2416 if (flagsp != NULL) { 2417 if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN) 2418 *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS); 2419 else 2420 *flagsp = 0; 2421 } 2422 2423 return (0); 2424 2425 fail1: 2426 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2427 2428 return (rc); 2429 } 2430 2431 2432 __checkReturn efx_rc_t 2433 efx_mcdi_get_workarounds( 2434 __in efx_nic_t *enp, 2435 __out_opt uint32_t *implementedp, 2436 __out_opt uint32_t *enabledp) 2437 { 2438 efx_mcdi_req_t req; 2439 EFX_MCDI_DECLARE_BUF(payload, 0, MC_CMD_GET_WORKAROUNDS_OUT_LEN); 2440 efx_rc_t rc; 2441 2442 req.emr_cmd = MC_CMD_GET_WORKAROUNDS; 2443 req.emr_in_buf = NULL; 2444 req.emr_in_length = 0; 2445 req.emr_out_buf = payload; 2446 req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN; 2447 2448 efx_mcdi_execute(enp, &req); 2449 2450 if (req.emr_rc != 0) { 2451 rc = req.emr_rc; 2452 goto fail1; 2453 } 2454 2455 if (req.emr_out_length_used < MC_CMD_GET_WORKAROUNDS_OUT_LEN) { 2456 rc = EMSGSIZE; 2457 goto fail2; 2458 } 2459 2460 if (implementedp != NULL) { 2461 *implementedp = 2462 MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED); 2463 } 2464 2465 if (enabledp != NULL) { 2466 *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED); 2467 } 2468 2469 return (0); 2470 2471 fail2: 2472 EFSYS_PROBE(fail2); 2473 fail1: 2474 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2475 2476 return (rc); 2477 } 2478 2479 /* 2480 * Size of media information page in accordance with SFF-8472 and SFF-8436. 2481 * It is used in MCDI interface as well. 2482 */ 2483 #define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80 2484 2485 /* 2486 * Transceiver identifiers from SFF-8024 Table 4-1. 2487 */ 2488 #define EFX_SFF_TRANSCEIVER_ID_SFP 0x03 /* SFP/SFP+/SFP28 */ 2489 #define EFX_SFF_TRANSCEIVER_ID_QSFP 0x0c /* QSFP */ 2490 #define EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS 0x0d /* QSFP+ or later */ 2491 #define EFX_SFF_TRANSCEIVER_ID_QSFP28 0x11 /* QSFP28 or later */ 2492 2493 static __checkReturn efx_rc_t 2494 efx_mcdi_get_phy_media_info( 2495 __in efx_nic_t *enp, 2496 __in uint32_t mcdi_page, 2497 __in uint8_t offset, 2498 __in uint8_t len, 2499 __out_bcount(len) uint8_t *data) 2500 { 2501 efx_mcdi_req_t req; 2502 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN, 2503 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN( 2504 EFX_PHY_MEDIA_INFO_PAGE_SIZE)); 2505 efx_rc_t rc; 2506 2507 EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2508 2509 req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO; 2510 req.emr_in_buf = payload; 2511 req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN; 2512 req.emr_out_buf = payload; 2513 req.emr_out_length = 2514 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2515 2516 MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page); 2517 2518 efx_mcdi_execute(enp, &req); 2519 2520 if (req.emr_rc != 0) { 2521 rc = req.emr_rc; 2522 goto fail1; 2523 } 2524 2525 if (req.emr_out_length_used != 2526 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) { 2527 rc = EMSGSIZE; 2528 goto fail2; 2529 } 2530 2531 if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) != 2532 EFX_PHY_MEDIA_INFO_PAGE_SIZE) { 2533 rc = EIO; 2534 goto fail3; 2535 } 2536 2537 memcpy(data, 2538 MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, 2539 len); 2540 2541 return (0); 2542 2543 fail3: 2544 EFSYS_PROBE(fail3); 2545 fail2: 2546 EFSYS_PROBE(fail2); 2547 fail1: 2548 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2549 2550 return (rc); 2551 } 2552 2553 __checkReturn efx_rc_t 2554 efx_mcdi_phy_module_get_info( 2555 __in efx_nic_t *enp, 2556 __in uint8_t dev_addr, 2557 __in size_t offset, 2558 __in size_t len, 2559 __out_bcount(len) uint8_t *data) 2560 { 2561 efx_port_t *epp = &(enp->en_port); 2562 efx_rc_t rc; 2563 uint32_t mcdi_lower_page; 2564 uint32_t mcdi_upper_page; 2565 uint8_t id; 2566 2567 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); 2568 2569 /* 2570 * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages. 2571 * Offset plus length interface allows to access page 0 only. 2572 * I.e. non-zero upper pages are not accessible. 2573 * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6 2574 * QSFP+ Memory Map for details on how information is structured 2575 * and accessible. 2576 */ 2577 switch (epp->ep_fixed_port_type) { 2578 case EFX_PHY_MEDIA_SFP_PLUS: 2579 case EFX_PHY_MEDIA_QSFP_PLUS: 2580 /* Port type supports modules */ 2581 break; 2582 default: 2583 rc = ENOTSUP; 2584 goto fail1; 2585 } 2586 2587 /* 2588 * For all supported port types, MCDI page 0 offset 0 holds the 2589 * transceiver identifier. Probe to determine the data layout. 2590 * Definitions from SFF-8024 Table 4-1. 2591 */ 2592 rc = efx_mcdi_get_phy_media_info(enp, 2593 0, 0, sizeof(id), &id); 2594 if (rc != 0) 2595 goto fail2; 2596 2597 switch (id) { 2598 case EFX_SFF_TRANSCEIVER_ID_SFP: 2599 /* 2600 * In accordance with SFF-8472 Diagnostic Monitoring 2601 * Interface for Optical Transceivers section 4 Memory 2602 * Organization two 2-wire addresses are defined. 2603 */ 2604 switch (dev_addr) { 2605 /* Base information */ 2606 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE: 2607 /* 2608 * MCDI page 0 should be used to access lower 2609 * page 0 (0x00 - 0x7f) at the device address 0xA0. 2610 */ 2611 mcdi_lower_page = 0; 2612 /* 2613 * MCDI page 1 should be used to access upper 2614 * page 0 (0x80 - 0xff) at the device address 0xA0. 2615 */ 2616 mcdi_upper_page = 1; 2617 break; 2618 /* Diagnostics */ 2619 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM: 2620 /* 2621 * MCDI page 2 should be used to access lower 2622 * page 0 (0x00 - 0x7f) at the device address 0xA2. 2623 */ 2624 mcdi_lower_page = 2; 2625 /* 2626 * MCDI page 3 should be used to access upper 2627 * page 0 (0x80 - 0xff) at the device address 0xA2. 2628 */ 2629 mcdi_upper_page = 3; 2630 break; 2631 default: 2632 rc = ENOTSUP; 2633 goto fail3; 2634 } 2635 break; 2636 case EFX_SFF_TRANSCEIVER_ID_QSFP: 2637 case EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS: 2638 case EFX_SFF_TRANSCEIVER_ID_QSFP28: 2639 switch (dev_addr) { 2640 case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP: 2641 /* 2642 * MCDI page -1 should be used to access lower page 0 2643 * (0x00 - 0x7f). 2644 */ 2645 mcdi_lower_page = (uint32_t)-1; 2646 /* 2647 * MCDI page 0 should be used to access upper page 0 2648 * (0x80h - 0xff). 2649 */ 2650 mcdi_upper_page = 0; 2651 break; 2652 default: 2653 rc = ENOTSUP; 2654 goto fail3; 2655 } 2656 break; 2657 default: 2658 rc = ENOTSUP; 2659 goto fail3; 2660 } 2661 2662 EFX_STATIC_ASSERT(EFX_PHY_MEDIA_INFO_PAGE_SIZE <= 0xFF); 2663 2664 if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) { 2665 size_t read_len = 2666 MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset); 2667 2668 rc = efx_mcdi_get_phy_media_info(enp, 2669 mcdi_lower_page, (uint8_t)offset, (uint8_t)read_len, data); 2670 if (rc != 0) 2671 goto fail4; 2672 2673 data += read_len; 2674 len -= read_len; 2675 2676 offset = 0; 2677 } else { 2678 offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE; 2679 } 2680 2681 if (len > 0) { 2682 EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2683 EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2684 2685 rc = efx_mcdi_get_phy_media_info(enp, 2686 mcdi_upper_page, (uint8_t)offset, (uint8_t)len, data); 2687 if (rc != 0) 2688 goto fail5; 2689 } 2690 2691 return (0); 2692 2693 fail5: 2694 EFSYS_PROBE(fail5); 2695 fail4: 2696 EFSYS_PROBE(fail4); 2697 fail3: 2698 EFSYS_PROBE(fail3); 2699 fail2: 2700 EFSYS_PROBE(fail2); 2701 fail1: 2702 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2703 2704 return (rc); 2705 } 2706 2707 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2708 2709 #define INIT_EVQ_MAXNBUFS MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 2710 2711 #if EFX_OPTS_EF10() 2712 # if (INIT_EVQ_MAXNBUFS < EF10_EVQ_MAXNBUFS) 2713 # error "INIT_EVQ_MAXNBUFS too small" 2714 # endif 2715 #endif /* EFX_OPTS_EF10 */ 2716 #if EFSYS_OPT_RIVERHEAD 2717 # if (INIT_EVQ_MAXNBUFS < RHEAD_EVQ_MAXNBUFS) 2718 # error "INIT_EVQ_MAXNBUFS too small" 2719 # endif 2720 #endif /* EFSYS_OPT_RIVERHEAD */ 2721 2722 __checkReturn efx_rc_t 2723 efx_mcdi_init_evq( 2724 __in efx_nic_t *enp, 2725 __in unsigned int instance, 2726 __in efsys_mem_t *esmp, 2727 __in size_t nevs, 2728 __in uint32_t irq, 2729 __in uint32_t target_evq, 2730 __in uint32_t us, 2731 __in uint32_t flags, 2732 __in boolean_t low_latency) 2733 { 2734 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 2735 efx_mcdi_req_t req; 2736 EFX_MCDI_DECLARE_BUF(payload, 2737 MC_CMD_INIT_EVQ_V2_IN_LEN(INIT_EVQ_MAXNBUFS), 2738 MC_CMD_INIT_EVQ_V2_OUT_LEN); 2739 boolean_t interrupting; 2740 int ev_extended_width; 2741 int ev_cut_through; 2742 int ev_merge; 2743 unsigned int evq_type; 2744 efx_qword_t *dma_addr; 2745 uint64_t addr; 2746 int npages; 2747 int i; 2748 efx_rc_t rc; 2749 2750 npages = efx_evq_nbufs(enp, nevs, flags); 2751 if (npages > INIT_EVQ_MAXNBUFS) { 2752 rc = EINVAL; 2753 goto fail1; 2754 } 2755 2756 req.emr_cmd = MC_CMD_INIT_EVQ; 2757 req.emr_in_buf = payload; 2758 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); 2759 req.emr_out_buf = payload; 2760 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; 2761 2762 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); 2763 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); 2764 2765 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 2766 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 2767 2768 if (interrupting) 2769 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); 2770 else 2771 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TARGET_EVQ, target_evq); 2772 2773 if (encp->enc_init_evq_v2_supported) { 2774 /* 2775 * On Medford the low latency license is required to enable RX 2776 * and event cut through and to disable RX batching. If event 2777 * queue type in flags is auto, we let the firmware decide the 2778 * settings to use. If the adapter has a low latency license, 2779 * it will choose the best settings for low latency, otherwise 2780 * it will choose the best settings for throughput. 2781 */ 2782 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 2783 case EFX_EVQ_FLAGS_TYPE_AUTO: 2784 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; 2785 break; 2786 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 2787 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; 2788 break; 2789 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 2790 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; 2791 break; 2792 default: 2793 rc = EINVAL; 2794 goto fail2; 2795 } 2796 /* EvQ type controls merging, no manual settings */ 2797 ev_merge = 0; 2798 ev_cut_through = 0; 2799 } else { 2800 /* EvQ types other than manual are not supported */ 2801 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL; 2802 /* 2803 * On Huntington RX and TX event batching can only be requested 2804 * together (even if the datapath firmware doesn't actually 2805 * support RX batching). If event cut through is enabled no RX 2806 * batching will occur. 2807 * 2808 * So always enable RX and TX event batching, and enable event 2809 * cut through if we want low latency operation. 2810 */ 2811 ev_merge = 1; 2812 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 2813 case EFX_EVQ_FLAGS_TYPE_AUTO: 2814 ev_cut_through = low_latency ? 1 : 0; 2815 break; 2816 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 2817 ev_cut_through = 0; 2818 break; 2819 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 2820 ev_cut_through = 1; 2821 break; 2822 default: 2823 rc = EINVAL; 2824 goto fail2; 2825 } 2826 } 2827 2828 /* 2829 * On EF100, extended width event queues have a different event 2830 * descriptor layout and are used to support descriptor proxy queues. 2831 */ 2832 ev_extended_width = 0; 2833 #if EFSYS_OPT_EV_EXTENDED_WIDTH 2834 if (encp->enc_init_evq_extended_width_supported) { 2835 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) 2836 ev_extended_width = 1; 2837 } 2838 #endif 2839 2840 MCDI_IN_POPULATE_DWORD_8(req, INIT_EVQ_V2_IN_FLAGS, 2841 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, 2842 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, 2843 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, 2844 INIT_EVQ_V2_IN_FLAG_CUT_THRU, ev_cut_through, 2845 INIT_EVQ_V2_IN_FLAG_RX_MERGE, ev_merge, 2846 INIT_EVQ_V2_IN_FLAG_TX_MERGE, ev_merge, 2847 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type, 2848 INIT_EVQ_V2_IN_FLAG_EXT_WIDTH, ev_extended_width); 2849 2850 /* If the value is zero then disable the timer */ 2851 if (us == 0) { 2852 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 2853 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); 2854 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); 2855 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); 2856 } else { 2857 unsigned int ticks; 2858 2859 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 2860 goto fail3; 2861 2862 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 2863 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); 2864 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); 2865 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); 2866 } 2867 2868 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, 2869 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); 2870 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); 2871 2872 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); 2873 addr = EFSYS_MEM_ADDR(esmp); 2874 2875 for (i = 0; i < npages; i++) { 2876 EFX_POPULATE_QWORD_2(*dma_addr, 2877 EFX_DWORD_1, (uint32_t)(addr >> 32), 2878 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 2879 2880 dma_addr++; 2881 addr += EFX_BUF_SIZE; 2882 } 2883 2884 efx_mcdi_execute(enp, &req); 2885 2886 if (req.emr_rc != 0) { 2887 rc = req.emr_rc; 2888 goto fail4; 2889 } 2890 2891 if (encp->enc_init_evq_v2_supported) { 2892 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { 2893 rc = EMSGSIZE; 2894 goto fail5; 2895 } 2896 EFSYS_PROBE1(mcdi_evq_flags, uint32_t, 2897 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); 2898 } else { 2899 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { 2900 rc = EMSGSIZE; 2901 goto fail6; 2902 } 2903 } 2904 2905 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 2906 2907 return (0); 2908 2909 fail6: 2910 EFSYS_PROBE(fail6); 2911 fail5: 2912 EFSYS_PROBE(fail5); 2913 fail4: 2914 EFSYS_PROBE(fail4); 2915 fail3: 2916 EFSYS_PROBE(fail3); 2917 fail2: 2918 EFSYS_PROBE(fail2); 2919 fail1: 2920 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2921 2922 return (rc); 2923 } 2924 2925 __checkReturn efx_rc_t 2926 efx_mcdi_fini_evq( 2927 __in efx_nic_t *enp, 2928 __in uint32_t instance) 2929 { 2930 efx_mcdi_req_t req; 2931 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN, 2932 MC_CMD_FINI_EVQ_OUT_LEN); 2933 efx_rc_t rc; 2934 2935 req.emr_cmd = MC_CMD_FINI_EVQ; 2936 req.emr_in_buf = payload; 2937 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; 2938 req.emr_out_buf = payload; 2939 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; 2940 2941 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); 2942 2943 efx_mcdi_execute_quiet(enp, &req); 2944 2945 if (req.emr_rc != 0) { 2946 rc = req.emr_rc; 2947 goto fail1; 2948 } 2949 2950 return (0); 2951 2952 fail1: 2953 /* 2954 * EALREADY is not an error, but indicates that the MC has rebooted and 2955 * that the EVQ has already been destroyed. 2956 */ 2957 if (rc != EALREADY) 2958 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2959 2960 return (rc); 2961 } 2962 2963 __checkReturn efx_rc_t 2964 efx_mcdi_init_rxq( 2965 __in efx_nic_t *enp, 2966 __in uint32_t ndescs, 2967 __in efx_evq_t *eep, 2968 __in uint32_t label, 2969 __in uint32_t instance, 2970 __in efsys_mem_t *esmp, 2971 __in const efx_mcdi_init_rxq_params_t *params) 2972 { 2973 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2974 efx_mcdi_req_t req; 2975 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V5_IN_LEN, 2976 MC_CMD_INIT_RXQ_V5_OUT_LEN); 2977 int npages = efx_rxq_nbufs(enp, ndescs); 2978 int i; 2979 efx_qword_t *dma_addr; 2980 uint64_t addr; 2981 efx_rc_t rc; 2982 uint32_t dma_mode; 2983 boolean_t want_outer_classes; 2984 boolean_t no_cont_ev; 2985 2986 EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs); 2987 2988 if ((esmp == NULL) || 2989 (EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) { 2990 rc = EINVAL; 2991 goto fail1; 2992 } 2993 2994 no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV); 2995 if ((no_cont_ev == B_TRUE) && (params->disable_scatter == B_FALSE)) { 2996 /* TODO: Support scatter in NO_CONT_EV mode */ 2997 rc = EINVAL; 2998 goto fail2; 2999 } 3000 3001 if (params->ps_buf_size > 0) 3002 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; 3003 else if (params->es_bufs_per_desc > 0) 3004 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER; 3005 else 3006 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; 3007 3008 if (encp->enc_tunnel_encapsulations_supported != 0 && 3009 !params->want_inner_classes) { 3010 /* 3011 * WANT_OUTER_CLASSES can only be specified on hardware which 3012 * supports tunnel encapsulation offloads, even though it is 3013 * effectively the behaviour the hardware gives. 3014 * 3015 * Also, on hardware which does support such offloads, older 3016 * firmware rejects the flag if the offloads are not supported 3017 * by the current firmware variant, which means this may fail if 3018 * the capabilities are not updated when the firmware variant 3019 * changes. This is not an issue on newer firmware, as it was 3020 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be 3021 * specified on all firmware variants. 3022 */ 3023 want_outer_classes = B_TRUE; 3024 } else { 3025 want_outer_classes = B_FALSE; 3026 } 3027 3028 req.emr_cmd = MC_CMD_INIT_RXQ; 3029 req.emr_in_buf = payload; 3030 req.emr_in_length = MC_CMD_INIT_RXQ_V5_IN_LEN; 3031 req.emr_out_buf = payload; 3032 req.emr_out_length = MC_CMD_INIT_RXQ_V5_OUT_LEN; 3033 3034 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs); 3035 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index); 3036 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label); 3037 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance); 3038 MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS, 3039 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0, 3040 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0, 3041 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0, 3042 INIT_RXQ_EXT_IN_CRC_MODE, 0, 3043 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1, 3044 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, params->disable_scatter, 3045 INIT_RXQ_EXT_IN_DMA_MODE, 3046 dma_mode, 3047 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, params->ps_buf_size, 3048 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes, 3049 INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev); 3050 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); 3051 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id); 3052 3053 if (params->es_bufs_per_desc > 0) { 3054 MCDI_IN_SET_DWORD(req, 3055 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET, 3056 params->es_bufs_per_desc); 3057 MCDI_IN_SET_DWORD(req, 3058 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, params->es_max_dma_len); 3059 MCDI_IN_SET_DWORD(req, 3060 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, params->es_buf_stride); 3061 MCDI_IN_SET_DWORD(req, 3062 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT, 3063 params->hol_block_timeout); 3064 } 3065 3066 if (encp->enc_init_rxq_with_buffer_size) 3067 MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, 3068 params->buf_size); 3069 3070 MCDI_IN_SET_DWORD(req, INIT_RXQ_V5_IN_RX_PREFIX_ID, params->prefix_id); 3071 3072 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); 3073 addr = EFSYS_MEM_ADDR(esmp); 3074 3075 for (i = 0; i < npages; i++) { 3076 EFX_POPULATE_QWORD_2(*dma_addr, 3077 EFX_DWORD_1, (uint32_t)(addr >> 32), 3078 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 3079 3080 dma_addr++; 3081 addr += EFX_BUF_SIZE; 3082 } 3083 3084 efx_mcdi_execute(enp, &req); 3085 3086 if (req.emr_rc != 0) { 3087 rc = req.emr_rc; 3088 goto fail3; 3089 } 3090 3091 return (0); 3092 3093 fail3: 3094 EFSYS_PROBE(fail3); 3095 fail2: 3096 EFSYS_PROBE(fail2); 3097 fail1: 3098 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3099 3100 return (rc); 3101 } 3102 3103 __checkReturn efx_rc_t 3104 efx_mcdi_fini_rxq( 3105 __in efx_nic_t *enp, 3106 __in uint32_t instance) 3107 { 3108 efx_mcdi_req_t req; 3109 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN, 3110 MC_CMD_FINI_RXQ_OUT_LEN); 3111 efx_rc_t rc; 3112 3113 req.emr_cmd = MC_CMD_FINI_RXQ; 3114 req.emr_in_buf = payload; 3115 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; 3116 req.emr_out_buf = payload; 3117 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN; 3118 3119 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance); 3120 3121 efx_mcdi_execute_quiet(enp, &req); 3122 3123 if (req.emr_rc != 0) { 3124 rc = req.emr_rc; 3125 goto fail1; 3126 } 3127 3128 return (0); 3129 3130 fail1: 3131 /* 3132 * EALREADY is not an error, but indicates that the MC has rebooted and 3133 * that the RXQ has already been destroyed. 3134 */ 3135 if (rc != EALREADY) 3136 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3137 3138 return (rc); 3139 } 3140 3141 __checkReturn efx_rc_t 3142 efx_mcdi_init_txq( 3143 __in efx_nic_t *enp, 3144 __in uint32_t ndescs, 3145 __in uint32_t target_evq, 3146 __in uint32_t label, 3147 __in uint32_t instance, 3148 __in uint16_t flags, 3149 __in efsys_mem_t *esmp) 3150 { 3151 efx_mcdi_req_t req; 3152 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_EXT_IN_LEN, 3153 MC_CMD_INIT_TXQ_OUT_LEN); 3154 efx_qword_t *dma_addr; 3155 uint64_t addr; 3156 int npages; 3157 int i; 3158 efx_rc_t rc; 3159 3160 EFSYS_ASSERT(MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM >= 3161 efx_txq_nbufs(enp, enp->en_nic_cfg.enc_txq_max_ndescs)); 3162 3163 if ((esmp == NULL) || 3164 (EFSYS_MEM_SIZE(esmp) < efx_txq_size(enp, ndescs))) { 3165 rc = EINVAL; 3166 goto fail1; 3167 } 3168 3169 npages = efx_txq_nbufs(enp, ndescs); 3170 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { 3171 rc = EINVAL; 3172 goto fail2; 3173 } 3174 3175 req.emr_cmd = MC_CMD_INIT_TXQ; 3176 req.emr_in_buf = payload; 3177 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); 3178 req.emr_out_buf = payload; 3179 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; 3180 3181 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); 3182 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); 3183 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); 3184 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); 3185 3186 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, 3187 INIT_TXQ_IN_FLAG_BUFF_MODE, 0, 3188 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, 3189 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, 3190 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, 3191 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, 3192 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, 3193 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, 3194 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, 3195 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, 3196 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, 3197 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, 3198 INIT_TXQ_IN_CRC_MODE, 0, 3199 INIT_TXQ_IN_FLAG_TIMESTAMP, 0); 3200 3201 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); 3202 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, enp->en_vport_id); 3203 3204 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); 3205 addr = EFSYS_MEM_ADDR(esmp); 3206 3207 for (i = 0; i < npages; i++) { 3208 EFX_POPULATE_QWORD_2(*dma_addr, 3209 EFX_DWORD_1, (uint32_t)(addr >> 32), 3210 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 3211 3212 dma_addr++; 3213 addr += EFX_BUF_SIZE; 3214 } 3215 3216 efx_mcdi_execute(enp, &req); 3217 3218 if (req.emr_rc != 0) { 3219 rc = req.emr_rc; 3220 goto fail3; 3221 } 3222 3223 return (0); 3224 3225 fail3: 3226 EFSYS_PROBE(fail3); 3227 fail2: 3228 EFSYS_PROBE(fail2); 3229 fail1: 3230 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3231 3232 return (rc); 3233 } 3234 3235 __checkReturn efx_rc_t 3236 efx_mcdi_fini_txq( 3237 __in efx_nic_t *enp, 3238 __in uint32_t instance) 3239 { 3240 efx_mcdi_req_t req; 3241 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN, 3242 MC_CMD_FINI_TXQ_OUT_LEN); 3243 efx_rc_t rc; 3244 3245 req.emr_cmd = MC_CMD_FINI_TXQ; 3246 req.emr_in_buf = payload; 3247 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; 3248 req.emr_out_buf = payload; 3249 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; 3250 3251 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); 3252 3253 efx_mcdi_execute_quiet(enp, &req); 3254 3255 if (req.emr_rc != 0) { 3256 rc = req.emr_rc; 3257 goto fail1; 3258 } 3259 3260 return (0); 3261 3262 fail1: 3263 /* 3264 * EALREADY is not an error, but indicates that the MC has rebooted and 3265 * that the TXQ has already been destroyed. 3266 */ 3267 if (rc != EALREADY) 3268 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3269 3270 return (rc); 3271 } 3272 3273 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 3274 3275 __checkReturn efx_rc_t 3276 efx_mcdi_get_nic_addr_info( 3277 __in efx_nic_t *enp, 3278 __out uint32_t *mapping_typep) 3279 { 3280 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_DESC_ADDR_INFO_IN_LEN, 3281 MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN); 3282 efx_mcdi_req_t req; 3283 efx_rc_t rc; 3284 3285 req.emr_cmd = MC_CMD_GET_DESC_ADDR_INFO; 3286 req.emr_in_buf = payload; 3287 req.emr_in_length = MC_CMD_GET_DESC_ADDR_INFO_IN_LEN; 3288 req.emr_out_buf = payload; 3289 req.emr_out_length = MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN; 3290 3291 efx_mcdi_execute_quiet(enp, &req); 3292 3293 if (req.emr_rc != 0) { 3294 rc = req.emr_rc; 3295 goto fail1; 3296 } 3297 3298 if (req.emr_out_length_used < MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN) { 3299 rc = EMSGSIZE; 3300 goto fail2; 3301 } 3302 3303 *mapping_typep = 3304 MCDI_OUT_DWORD(req, GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE); 3305 3306 return (0); 3307 3308 fail2: 3309 EFSYS_PROBE(fail2); 3310 fail1: 3311 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3312 3313 return (rc); 3314 } 3315 3316 __checkReturn efx_rc_t 3317 efx_mcdi_get_nic_addr_regions( 3318 __in efx_nic_t *enp, 3319 __out efx_nic_dma_region_info_t *endrip) 3320 { 3321 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN, 3322 MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2); 3323 efx_xword_t *regions; 3324 efx_mcdi_req_t req; 3325 efx_rc_t rc; 3326 size_t alloc_size; 3327 unsigned int nregions; 3328 unsigned int i; 3329 3330 req.emr_cmd = MC_CMD_GET_DESC_ADDR_REGIONS; 3331 req.emr_in_buf = payload; 3332 req.emr_in_length = MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN; 3333 req.emr_out_buf = payload; 3334 req.emr_out_length = MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2; 3335 3336 efx_mcdi_execute_quiet(enp, &req); 3337 3338 if (req.emr_rc != 0) { 3339 rc = req.emr_rc; 3340 goto fail1; 3341 } 3342 3343 if (req.emr_out_length_used < 3344 MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMIN) { 3345 rc = EMSGSIZE; 3346 goto fail2; 3347 } 3348 3349 nregions = MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_NUM( 3350 req.emr_out_length_used); 3351 3352 EFX_STATIC_ASSERT(sizeof (*regions) == DESC_ADDR_REGION_LEN); 3353 regions = MCDI_OUT2(req, efx_xword_t, 3354 GET_DESC_ADDR_REGIONS_OUT_REGIONS); 3355 3356 alloc_size = nregions * sizeof(endrip->endri_regions[0]); 3357 if (alloc_size / sizeof (endrip->endri_regions[0]) != nregions) { 3358 rc = ENOMEM; 3359 goto fail3; 3360 } 3361 3362 EFSYS_KMEM_ALLOC(enp->en_esip, 3363 alloc_size, 3364 endrip->endri_regions); 3365 if (endrip->endri_regions == NULL) { 3366 rc = ENOMEM; 3367 goto fail4; 3368 } 3369 3370 endrip->endri_count = nregions; 3371 for (i = 0; i < nregions; ++i) { 3372 efx_nic_dma_region_t *region_info; 3373 3374 region_info = &endrip->endri_regions[i]; 3375 3376 region_info->endr_inuse = B_FALSE; 3377 3378 region_info->endr_nic_base = 3379 MCDI_OUT_INDEXED_MEMBER_QWORD(req, 3380 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i, 3381 DESC_ADDR_REGION_DESC_ADDR_BASE); 3382 3383 region_info->endr_trgt_base = 3384 MCDI_OUT_INDEXED_MEMBER_QWORD(req, 3385 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i, 3386 DESC_ADDR_REGION_TRGT_ADDR_BASE); 3387 3388 region_info->endr_window_log2 = 3389 MCDI_OUT_INDEXED_MEMBER_DWORD(req, 3390 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i, 3391 DESC_ADDR_REGION_WINDOW_SIZE_LOG2); 3392 3393 region_info->endr_align_log2 = 3394 MCDI_OUT_INDEXED_MEMBER_DWORD(req, 3395 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i, 3396 DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2); 3397 } 3398 3399 return (0); 3400 3401 fail4: 3402 EFSYS_PROBE(fail4); 3403 fail3: 3404 EFSYS_PROBE(fail3); 3405 fail2: 3406 EFSYS_PROBE(fail2); 3407 fail1: 3408 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3409 3410 return (rc); 3411 } 3412 3413 __checkReturn efx_rc_t 3414 efx_mcdi_set_nic_addr_regions( 3415 __in efx_nic_t *enp, 3416 __in const efx_nic_dma_region_info_t *endrip) 3417 { 3418 EFX_MCDI_DECLARE_BUF(payload, 3419 MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX_MCDI2, 3420 MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN); 3421 efx_qword_t *trgt_addr_base; 3422 efx_mcdi_req_t req; 3423 unsigned int i; 3424 efx_rc_t rc; 3425 3426 if (endrip->endri_count > 3427 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM) { 3428 rc = EINVAL; 3429 goto fail1; 3430 } 3431 3432 req.emr_cmd = MC_CMD_SET_DESC_ADDR_REGIONS; 3433 req.emr_in_buf = payload; 3434 req.emr_in_length = 3435 MC_CMD_SET_DESC_ADDR_REGIONS_IN_LEN(endrip->endri_count); 3436 req.emr_out_buf = payload; 3437 req.emr_out_length = MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN; 3438 3439 EFX_STATIC_ASSERT(sizeof (*trgt_addr_base) == 3440 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LEN); 3441 trgt_addr_base = MCDI_OUT2(req, efx_qword_t, 3442 SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE); 3443 3444 for (i = 0; i < endrip->endri_count; ++i) { 3445 const efx_nic_dma_region_t *region_info; 3446 3447 region_info = &endrip->endri_regions[i]; 3448 3449 if (region_info->endr_inuse != B_TRUE) 3450 continue; 3451 3452 EFX_STATIC_ASSERT(sizeof (1U) * 8 >= 3453 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM); 3454 MCDI_IN_SET_DWORD(req, 3455 SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK, 1U << i); 3456 3457 MCDI_IN_SET_INDEXED_QWORD(req, 3458 SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE, i, 3459 region_info->endr_trgt_base); 3460 } 3461 3462 efx_mcdi_execute_quiet(enp, &req); 3463 3464 if (req.emr_rc != 0) { 3465 rc = req.emr_rc; 3466 goto fail2; 3467 } 3468 3469 return (0); 3470 3471 fail2: 3472 EFSYS_PROBE(fail2); 3473 fail1: 3474 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3475 3476 return (rc); 3477 } 3478 3479 #endif /* EFSYS_OPT_MCDI */ 3480