1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2008-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 10 #if EFSYS_OPT_MCDI 11 12 /* 13 * There are three versions of the MCDI interface: 14 * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers. 15 * - MCDIv1: Siena firmware and Huntington BootROM. 16 * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM. 17 * Transport uses MCDIv2 headers. 18 * 19 * MCDIv2 Header NOT_EPOCH flag 20 * ---------------------------- 21 * A new epoch begins at initial startup or after an MC reboot, and defines when 22 * the MC should reject stale MCDI requests. 23 * 24 * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all 25 * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1. 26 * 27 * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a 28 * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0. 29 */ 30 31 32 33 #if EFSYS_OPT_SIENA 34 35 static const efx_mcdi_ops_t __efx_mcdi_siena_ops = { 36 siena_mcdi_init, /* emco_init */ 37 siena_mcdi_send_request, /* emco_send_request */ 38 siena_mcdi_poll_reboot, /* emco_poll_reboot */ 39 siena_mcdi_poll_response, /* emco_poll_response */ 40 siena_mcdi_read_response, /* emco_read_response */ 41 siena_mcdi_fini, /* emco_fini */ 42 siena_mcdi_feature_supported, /* emco_feature_supported */ 43 siena_mcdi_get_timeout, /* emco_get_timeout */ 44 }; 45 46 #endif /* EFSYS_OPT_SIENA */ 47 48 #if EFX_OPTS_EF10() 49 50 static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { 51 ef10_mcdi_init, /* emco_init */ 52 ef10_mcdi_send_request, /* emco_send_request */ 53 ef10_mcdi_poll_reboot, /* emco_poll_reboot */ 54 ef10_mcdi_poll_response, /* emco_poll_response */ 55 ef10_mcdi_read_response, /* emco_read_response */ 56 ef10_mcdi_fini, /* emco_fini */ 57 ef10_mcdi_feature_supported, /* emco_feature_supported */ 58 ef10_mcdi_get_timeout, /* emco_get_timeout */ 59 }; 60 61 #endif /* EFX_OPTS_EF10() */ 62 63 #if EFSYS_OPT_RIVERHEAD 64 65 static const efx_mcdi_ops_t __efx_mcdi_rhead_ops = { 66 ef10_mcdi_init, /* emco_init */ 67 ef10_mcdi_send_request, /* emco_send_request */ 68 ef10_mcdi_poll_reboot, /* emco_poll_reboot */ 69 ef10_mcdi_poll_response, /* emco_poll_response */ 70 ef10_mcdi_read_response, /* emco_read_response */ 71 ef10_mcdi_fini, /* emco_fini */ 72 ef10_mcdi_feature_supported, /* emco_feature_supported */ 73 ef10_mcdi_get_timeout, /* emco_get_timeout */ 74 }; 75 76 #endif /* EFSYS_OPT_RIVERHEAD */ 77 78 79 80 __checkReturn efx_rc_t 81 efx_mcdi_init( 82 __in efx_nic_t *enp, 83 __in const efx_mcdi_transport_t *emtp) 84 { 85 const efx_mcdi_ops_t *emcop; 86 efx_rc_t rc; 87 88 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 89 EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); 90 91 switch (enp->en_family) { 92 #if EFSYS_OPT_SIENA 93 case EFX_FAMILY_SIENA: 94 emcop = &__efx_mcdi_siena_ops; 95 break; 96 #endif /* EFSYS_OPT_SIENA */ 97 98 #if EFSYS_OPT_HUNTINGTON 99 case EFX_FAMILY_HUNTINGTON: 100 emcop = &__efx_mcdi_ef10_ops; 101 break; 102 #endif /* EFSYS_OPT_HUNTINGTON */ 103 104 #if EFSYS_OPT_MEDFORD 105 case EFX_FAMILY_MEDFORD: 106 emcop = &__efx_mcdi_ef10_ops; 107 break; 108 #endif /* EFSYS_OPT_MEDFORD */ 109 110 #if EFSYS_OPT_MEDFORD2 111 case EFX_FAMILY_MEDFORD2: 112 emcop = &__efx_mcdi_ef10_ops; 113 break; 114 #endif /* EFSYS_OPT_MEDFORD2 */ 115 116 #if EFSYS_OPT_RIVERHEAD 117 case EFX_FAMILY_RIVERHEAD: 118 emcop = &__efx_mcdi_rhead_ops; 119 break; 120 #endif /* EFSYS_OPT_RIVERHEAD */ 121 122 default: 123 EFSYS_ASSERT(0); 124 rc = ENOTSUP; 125 goto fail1; 126 } 127 128 if (enp->en_features & EFX_FEATURE_MCDI_DMA) { 129 /* MCDI requires a DMA buffer in host memory */ 130 if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) { 131 rc = EINVAL; 132 goto fail2; 133 } 134 } 135 enp->en_mcdi.em_emtp = emtp; 136 137 if (emcop != NULL && emcop->emco_init != NULL) { 138 if ((rc = emcop->emco_init(enp, emtp)) != 0) 139 goto fail3; 140 } 141 142 enp->en_mcdi.em_emcop = emcop; 143 enp->en_mod_flags |= EFX_MOD_MCDI; 144 145 return (0); 146 147 fail3: 148 EFSYS_PROBE(fail3); 149 fail2: 150 EFSYS_PROBE(fail2); 151 fail1: 152 EFSYS_PROBE1(fail1, efx_rc_t, rc); 153 154 enp->en_mcdi.em_emcop = NULL; 155 enp->en_mcdi.em_emtp = NULL; 156 enp->en_mod_flags &= ~EFX_MOD_MCDI; 157 158 return (rc); 159 } 160 161 void 162 efx_mcdi_fini( 163 __in efx_nic_t *enp) 164 { 165 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 166 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 167 168 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 169 EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI); 170 171 if (emcop != NULL && emcop->emco_fini != NULL) 172 emcop->emco_fini(enp); 173 174 emip->emi_port = 0; 175 emip->emi_aborted = 0; 176 177 enp->en_mcdi.em_emcop = NULL; 178 enp->en_mod_flags &= ~EFX_MOD_MCDI; 179 } 180 181 void 182 efx_mcdi_new_epoch( 183 __in efx_nic_t *enp) 184 { 185 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 186 efsys_lock_state_t state; 187 188 /* Start a new epoch (allow fresh MCDI requests to succeed) */ 189 EFSYS_LOCK(enp->en_eslp, state); 190 emip->emi_new_epoch = B_TRUE; 191 EFSYS_UNLOCK(enp->en_eslp, state); 192 } 193 194 static void 195 efx_mcdi_send_request( 196 __in efx_nic_t *enp, 197 __in void *hdrp, 198 __in size_t hdr_len, 199 __in void *sdup, 200 __in size_t sdu_len) 201 { 202 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 203 204 emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len); 205 } 206 207 static efx_rc_t 208 efx_mcdi_poll_reboot( 209 __in efx_nic_t *enp) 210 { 211 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 212 efx_rc_t rc; 213 214 rc = emcop->emco_poll_reboot(enp); 215 return (rc); 216 } 217 218 static boolean_t 219 efx_mcdi_poll_response( 220 __in efx_nic_t *enp) 221 { 222 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 223 boolean_t available; 224 225 available = emcop->emco_poll_response(enp); 226 return (available); 227 } 228 229 static void 230 efx_mcdi_read_response( 231 __in efx_nic_t *enp, 232 __out void *bufferp, 233 __in size_t offset, 234 __in size_t length) 235 { 236 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 237 238 emcop->emco_read_response(enp, bufferp, offset, length); 239 } 240 241 void 242 efx_mcdi_request_start( 243 __in efx_nic_t *enp, 244 __in efx_mcdi_req_t *emrp, 245 __in boolean_t ev_cpl) 246 { 247 #if EFSYS_OPT_MCDI_LOGGING 248 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 249 #endif 250 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 251 efx_dword_t hdr[2]; 252 size_t hdr_len; 253 unsigned int max_version; 254 unsigned int seq; 255 unsigned int xflags; 256 boolean_t new_epoch; 257 efsys_lock_state_t state; 258 259 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 260 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 261 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 262 263 /* 264 * efx_mcdi_request_start() is naturally serialised against both 265 * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(), 266 * by virtue of there only being one outstanding MCDI request. 267 * Unfortunately, upper layers may also call efx_mcdi_request_abort() 268 * at any time, to timeout a pending mcdi request, That request may 269 * then subsequently complete, meaning efx_mcdi_ev_cpl() or 270 * efx_mcdi_ev_death() may end up running in parallel with 271 * efx_mcdi_request_start(). This race is handled by ensuring that 272 * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the 273 * en_eslp lock. 274 */ 275 EFSYS_LOCK(enp->en_eslp, state); 276 EFSYS_ASSERT(emip->emi_pending_req == NULL); 277 emip->emi_pending_req = emrp; 278 emip->emi_ev_cpl = ev_cpl; 279 emip->emi_poll_cnt = 0; 280 seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ); 281 new_epoch = emip->emi_new_epoch; 282 max_version = emip->emi_max_version; 283 EFSYS_UNLOCK(enp->en_eslp, state); 284 285 xflags = 0; 286 if (ev_cpl) 287 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 288 289 /* 290 * Huntington firmware supports MCDIv2, but the Huntington BootROM only 291 * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where 292 * possible to support this. 293 */ 294 if ((max_version >= 2) && 295 ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) || 296 (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) || 297 (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) { 298 /* Construct MCDI v2 header */ 299 hdr_len = sizeof (hdr); 300 EFX_POPULATE_DWORD_8(hdr[0], 301 MCDI_HEADER_CODE, MC_CMD_V2_EXTN, 302 MCDI_HEADER_RESYNC, 1, 303 MCDI_HEADER_DATALEN, 0, 304 MCDI_HEADER_SEQ, seq, 305 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, 306 MCDI_HEADER_ERROR, 0, 307 MCDI_HEADER_RESPONSE, 0, 308 MCDI_HEADER_XFLAGS, xflags); 309 310 EFX_POPULATE_DWORD_2(hdr[1], 311 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd, 312 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length); 313 } else { 314 /* Construct MCDI v1 header */ 315 hdr_len = sizeof (hdr[0]); 316 EFX_POPULATE_DWORD_8(hdr[0], 317 MCDI_HEADER_CODE, emrp->emr_cmd, 318 MCDI_HEADER_RESYNC, 1, 319 MCDI_HEADER_DATALEN, emrp->emr_in_length, 320 MCDI_HEADER_SEQ, seq, 321 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, 322 MCDI_HEADER_ERROR, 0, 323 MCDI_HEADER_RESPONSE, 0, 324 MCDI_HEADER_XFLAGS, xflags); 325 } 326 327 #if EFSYS_OPT_MCDI_LOGGING 328 if (emtp->emt_logger != NULL) { 329 emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST, 330 &hdr[0], hdr_len, 331 emrp->emr_in_buf, emrp->emr_in_length); 332 } 333 #endif /* EFSYS_OPT_MCDI_LOGGING */ 334 335 efx_mcdi_send_request(enp, &hdr[0], hdr_len, 336 emrp->emr_in_buf, emrp->emr_in_length); 337 } 338 339 340 static void 341 efx_mcdi_read_response_header( 342 __in efx_nic_t *enp, 343 __inout efx_mcdi_req_t *emrp) 344 { 345 #if EFSYS_OPT_MCDI_LOGGING 346 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 347 #endif /* EFSYS_OPT_MCDI_LOGGING */ 348 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 349 efx_dword_t hdr[2]; 350 unsigned int hdr_len; 351 unsigned int data_len; 352 unsigned int seq; 353 unsigned int cmd; 354 unsigned int error; 355 efx_rc_t rc; 356 357 EFSYS_ASSERT(emrp != NULL); 358 359 efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0])); 360 hdr_len = sizeof (hdr[0]); 361 362 cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE); 363 seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ); 364 error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR); 365 366 if (cmd != MC_CMD_V2_EXTN) { 367 data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN); 368 } else { 369 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); 370 hdr_len += sizeof (hdr[1]); 371 372 cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); 373 data_len = 374 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 375 } 376 377 if (error && (data_len == 0)) { 378 /* The MC has rebooted since the request was sent. */ 379 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); 380 efx_mcdi_poll_reboot(enp); 381 rc = EIO; 382 goto fail1; 383 } 384 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 385 if (((cmd != emrp->emr_cmd) && (emrp->emr_cmd != MC_CMD_PROXY_CMD)) || 386 #else 387 if ((cmd != emrp->emr_cmd) || 388 #endif 389 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { 390 /* Response is for a different request */ 391 rc = EIO; 392 goto fail2; 393 } 394 if (error) { 395 efx_dword_t err[2]; 396 unsigned int err_len = MIN(data_len, sizeof (err)); 397 int err_code = MC_CMD_ERR_EPROTO; 398 int err_arg = 0; 399 400 /* Read error code (and arg num for MCDI v2 commands) */ 401 efx_mcdi_read_response(enp, &err, hdr_len, err_len); 402 403 if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t))) 404 err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0); 405 #ifdef WITH_MCDI_V2 406 if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t))) 407 err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0); 408 #endif 409 emrp->emr_err_code = err_code; 410 emrp->emr_err_arg = err_arg; 411 412 #if EFSYS_OPT_MCDI_PROXY_AUTH 413 if ((err_code == MC_CMD_ERR_PROXY_PENDING) && 414 (err_len == sizeof (err))) { 415 /* 416 * The MCDI request would normally fail with EPERM, but 417 * firmware has forwarded it to an authorization agent 418 * attached to a privileged PF. 419 * 420 * Save the authorization request handle. The client 421 * must wait for a PROXY_RESPONSE event, or timeout. 422 */ 423 emrp->emr_proxy_handle = err_arg; 424 } 425 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 426 427 #if EFSYS_OPT_MCDI_LOGGING 428 if (emtp->emt_logger != NULL) { 429 emtp->emt_logger(emtp->emt_context, 430 EFX_LOG_MCDI_RESPONSE, 431 &hdr[0], hdr_len, 432 &err[0], err_len); 433 } 434 #endif /* EFSYS_OPT_MCDI_LOGGING */ 435 436 if (!emrp->emr_quiet) { 437 EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd, 438 int, err_code, int, err_arg); 439 } 440 441 rc = efx_mcdi_request_errcode(err_code); 442 goto fail3; 443 } 444 445 emrp->emr_rc = 0; 446 emrp->emr_out_length_used = data_len; 447 #if EFSYS_OPT_MCDI_PROXY_AUTH 448 emrp->emr_proxy_handle = 0; 449 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 450 return; 451 452 fail3: 453 fail2: 454 fail1: 455 emrp->emr_rc = rc; 456 emrp->emr_out_length_used = 0; 457 } 458 459 static void 460 efx_mcdi_finish_response( 461 __in efx_nic_t *enp, 462 __in efx_mcdi_req_t *emrp) 463 { 464 #if EFSYS_OPT_MCDI_LOGGING 465 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 466 #endif /* EFSYS_OPT_MCDI_LOGGING */ 467 efx_dword_t hdr[2]; 468 unsigned int hdr_len; 469 size_t bytes; 470 unsigned int resp_off; 471 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 472 unsigned int resp_cmd; 473 boolean_t proxied_cmd_resp = B_FALSE; 474 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 475 476 if (emrp->emr_out_buf == NULL) 477 return; 478 479 /* Read the command header to detect MCDI response format */ 480 hdr_len = sizeof (hdr[0]); 481 efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len); 482 if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) { 483 /* 484 * Read the actual payload length. The length given in the event 485 * is only correct for responses with the V1 format. 486 */ 487 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); 488 hdr_len += sizeof (hdr[1]); 489 resp_off = hdr_len; 490 491 emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1], 492 MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 493 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 494 /* 495 * A proxy MCDI command is executed by PF on behalf of 496 * one of its VFs. The command to be proxied follows 497 * immediately afterward in the host buffer. 498 * PROXY_CMD inner call complete response should be copied to 499 * output buffer so that it can be returned to the requesting 500 * function in MC_CMD_PROXY_COMPLETE payload. 501 */ 502 resp_cmd = 503 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); 504 proxied_cmd_resp = ((emrp->emr_cmd == MC_CMD_PROXY_CMD) && 505 (resp_cmd != MC_CMD_PROXY_CMD)); 506 if (proxied_cmd_resp) { 507 resp_off = 0; 508 emrp->emr_out_length_used += hdr_len; 509 } 510 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 511 } else { 512 resp_off = hdr_len; 513 } 514 515 /* Copy payload out into caller supplied buffer */ 516 bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length); 517 efx_mcdi_read_response(enp, emrp->emr_out_buf, resp_off, bytes); 518 519 /* Report bytes copied to caller (response message may be larger) */ 520 emrp->emr_out_length_used = bytes; 521 522 #if EFSYS_OPT_MCDI_LOGGING 523 if (emtp->emt_logger != NULL) { 524 emtp->emt_logger(emtp->emt_context, 525 EFX_LOG_MCDI_RESPONSE, 526 &hdr[0], hdr_len, 527 emrp->emr_out_buf, bytes); 528 } 529 #endif /* EFSYS_OPT_MCDI_LOGGING */ 530 } 531 532 533 __checkReturn boolean_t 534 efx_mcdi_request_poll( 535 __in efx_nic_t *enp) 536 { 537 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 538 efx_mcdi_req_t *emrp; 539 efsys_lock_state_t state; 540 efx_rc_t rc; 541 542 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 543 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 544 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 545 546 /* Serialise against post-watchdog efx_mcdi_ev* */ 547 EFSYS_LOCK(enp->en_eslp, state); 548 549 EFSYS_ASSERT(emip->emi_pending_req != NULL); 550 EFSYS_ASSERT(!emip->emi_ev_cpl); 551 emrp = emip->emi_pending_req; 552 553 /* Check if hardware is unavailable */ 554 if (efx_nic_hw_unavailable(enp)) { 555 EFSYS_UNLOCK(enp->en_eslp, state); 556 return (B_FALSE); 557 } 558 559 /* Check for reboot atomically w.r.t efx_mcdi_request_start */ 560 if (emip->emi_poll_cnt++ == 0) { 561 if ((rc = efx_mcdi_poll_reboot(enp)) != 0) { 562 emip->emi_pending_req = NULL; 563 EFSYS_UNLOCK(enp->en_eslp, state); 564 565 /* Reboot/Assertion */ 566 if (rc == EIO || rc == EINTR) 567 efx_mcdi_raise_exception(enp, emrp, rc); 568 569 goto fail1; 570 } 571 } 572 573 /* Check if a response is available */ 574 if (efx_mcdi_poll_response(enp) == B_FALSE) { 575 EFSYS_UNLOCK(enp->en_eslp, state); 576 return (B_FALSE); 577 } 578 579 /* Read the response header */ 580 efx_mcdi_read_response_header(enp, emrp); 581 582 /* Request complete */ 583 emip->emi_pending_req = NULL; 584 585 /* Ensure stale MCDI requests fail after an MC reboot. */ 586 emip->emi_new_epoch = B_FALSE; 587 588 EFSYS_UNLOCK(enp->en_eslp, state); 589 590 if ((rc = emrp->emr_rc) != 0) 591 goto fail2; 592 593 efx_mcdi_finish_response(enp, emrp); 594 return (B_TRUE); 595 596 fail2: 597 if (!emrp->emr_quiet) 598 EFSYS_PROBE(fail2); 599 fail1: 600 if (!emrp->emr_quiet) 601 EFSYS_PROBE1(fail1, efx_rc_t, rc); 602 603 return (B_TRUE); 604 } 605 606 __checkReturn boolean_t 607 efx_mcdi_request_abort( 608 __in efx_nic_t *enp) 609 { 610 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 611 efx_mcdi_req_t *emrp; 612 boolean_t aborted; 613 efsys_lock_state_t state; 614 615 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 616 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 617 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 618 619 /* 620 * efx_mcdi_ev_* may have already completed this event, and be 621 * spinning/blocked on the upper layer lock. So it *is* legitimate 622 * to for emi_pending_req to be NULL. If there is a pending event 623 * completed request, then provide a "credit" to allow 624 * efx_mcdi_ev_cpl() to accept a single spurious completion. 625 */ 626 EFSYS_LOCK(enp->en_eslp, state); 627 emrp = emip->emi_pending_req; 628 aborted = (emrp != NULL); 629 if (aborted) { 630 emip->emi_pending_req = NULL; 631 632 /* Error the request */ 633 emrp->emr_out_length_used = 0; 634 emrp->emr_rc = ETIMEDOUT; 635 636 /* Provide a credit for seqno/emr_pending_req mismatches */ 637 if (emip->emi_ev_cpl) 638 ++emip->emi_aborted; 639 640 /* 641 * The upper layer has called us, so we don't 642 * need to complete the request. 643 */ 644 } 645 EFSYS_UNLOCK(enp->en_eslp, state); 646 647 return (aborted); 648 } 649 650 __checkReturn efx_rc_t 651 efx_mcdi_get_client_handle( 652 __in efx_nic_t *enp, 653 __in efx_pcie_interface_t intf, 654 __in uint16_t pf, 655 __in uint16_t vf, 656 __out uint32_t *handle) 657 { 658 efx_mcdi_req_t req; 659 EFX_MCDI_DECLARE_BUF(payload, 660 MC_CMD_GET_CLIENT_HANDLE_IN_LEN, 661 MC_CMD_GET_CLIENT_HANDLE_OUT_LEN); 662 uint32_t pcie_intf; 663 efx_rc_t rc; 664 665 if (handle == NULL) { 666 rc = EINVAL; 667 goto fail1; 668 } 669 670 rc = efx_mcdi_intf_to_pcie(intf, &pcie_intf); 671 if (rc != 0) 672 goto fail2; 673 674 req.emr_cmd = MC_CMD_GET_CLIENT_HANDLE; 675 req.emr_in_buf = payload; 676 req.emr_in_length = MC_CMD_GET_CLIENT_HANDLE_IN_LEN; 677 req.emr_out_buf = payload; 678 req.emr_out_length = MC_CMD_GET_CLIENT_HANDLE_OUT_LEN; 679 680 MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_TYPE, 681 MC_CMD_GET_CLIENT_HANDLE_IN_TYPE_FUNC); 682 MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_PF, pf); 683 MCDI_IN_SET_WORD(req, GET_CLIENT_HANDLE_IN_FUNC_VF, vf); 684 MCDI_IN_SET_DWORD(req, GET_CLIENT_HANDLE_IN_FUNC_INTF, pcie_intf); 685 686 efx_mcdi_execute(enp, &req); 687 688 if (req.emr_rc != 0) { 689 rc = req.emr_rc; 690 goto fail3; 691 } 692 693 if (req.emr_out_length_used < MC_CMD_GET_CLIENT_HANDLE_OUT_LEN) { 694 rc = EMSGSIZE; 695 goto fail4; 696 } 697 698 *handle = MCDI_OUT_DWORD(req, GET_CLIENT_HANDLE_OUT_HANDLE); 699 700 return 0; 701 fail4: 702 EFSYS_PROBE(fail4); 703 fail3: 704 EFSYS_PROBE(fail3); 705 fail2: 706 EFSYS_PROBE(fail2); 707 fail1: 708 EFSYS_PROBE1(fail1, efx_rc_t, rc); 709 return (rc); 710 } 711 712 __checkReturn efx_rc_t 713 efx_mcdi_get_own_client_handle( 714 __in efx_nic_t *enp, 715 __out uint32_t *handle) 716 { 717 efx_rc_t rc; 718 719 rc = efx_mcdi_get_client_handle(enp, EFX_PCIE_INTERFACE_CALLER, 720 PCIE_FUNCTION_PF_NULL, PCIE_FUNCTION_VF_NULL, handle); 721 if (rc != 0) 722 goto fail1; 723 724 return (0); 725 fail1: 726 EFSYS_PROBE1(fail1, efx_rc_t, rc); 727 return (rc); 728 } 729 730 __checkReturn efx_rc_t 731 efx_mcdi_client_mac_addr_get( 732 __in efx_nic_t *enp, 733 __in uint32_t client_handle, 734 __out uint8_t addr_bytes[EFX_MAC_ADDR_LEN]) 735 { 736 efx_mcdi_req_t req; 737 EFX_MCDI_DECLARE_BUF(payload, 738 MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN, 739 MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1)); 740 efx_rc_t rc; 741 742 req.emr_cmd = MC_CMD_GET_CLIENT_MAC_ADDRESSES; 743 req.emr_in_buf = payload; 744 req.emr_in_length = MC_CMD_GET_CLIENT_MAC_ADDRESSES_IN_LEN; 745 req.emr_out_buf = payload; 746 req.emr_out_length = MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1); 747 748 MCDI_IN_SET_DWORD(req, GET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE, 749 client_handle); 750 751 efx_mcdi_execute(enp, &req); 752 753 if (req.emr_rc != 0) { 754 rc = req.emr_rc; 755 goto fail1; 756 } 757 758 if (req.emr_out_length_used < 759 MC_CMD_GET_CLIENT_MAC_ADDRESSES_OUT_LEN(1)) { 760 rc = EMSGSIZE; 761 goto fail2; 762 } 763 764 memcpy(addr_bytes, 765 MCDI_OUT2(req, uint8_t, GET_CLIENT_MAC_ADDRESSES_OUT_MAC_ADDRS), 766 EFX_MAC_ADDR_LEN); 767 768 return (0); 769 770 fail2: 771 EFSYS_PROBE(fail2); 772 fail1: 773 EFSYS_PROBE1(fail1, efx_rc_t, rc); 774 return (rc); 775 } 776 777 __checkReturn efx_rc_t 778 efx_mcdi_client_mac_addr_set( 779 __in efx_nic_t *enp, 780 __in uint32_t client_handle, 781 __in const uint8_t addr_bytes[EFX_MAC_ADDR_LEN]) 782 { 783 efx_mcdi_req_t req; 784 EFX_MCDI_DECLARE_BUF(payload, 785 MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(1), 786 MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT_LEN); 787 uint32_t oui; 788 efx_rc_t rc; 789 790 if (EFX_MAC_ADDR_IS_MULTICAST(addr_bytes)) { 791 rc = EINVAL; 792 goto fail1; 793 } 794 795 oui = addr_bytes[0] << 16 | addr_bytes[1] << 8 | addr_bytes[2]; 796 if (oui == 0x000000) { 797 rc = EINVAL; 798 goto fail2; 799 } 800 801 req.emr_cmd = MC_CMD_SET_CLIENT_MAC_ADDRESSES; 802 req.emr_in_buf = payload; 803 req.emr_in_length = MC_CMD_SET_CLIENT_MAC_ADDRESSES_IN_LEN(1); 804 req.emr_out_buf = payload; 805 req.emr_out_length = MC_CMD_SET_CLIENT_MAC_ADDRESSES_OUT_LEN; 806 807 MCDI_IN_SET_DWORD(req, SET_CLIENT_MAC_ADDRESSES_IN_CLIENT_HANDLE, 808 client_handle); 809 810 memcpy(MCDI_IN2(req, uint8_t, SET_CLIENT_MAC_ADDRESSES_IN_MAC_ADDRS), 811 addr_bytes, EFX_MAC_ADDR_LEN); 812 813 efx_mcdi_execute(enp, &req); 814 815 if (req.emr_rc != 0) { 816 rc = req.emr_rc; 817 goto fail3; 818 } 819 820 return (0); 821 822 fail3: 823 EFSYS_PROBE(fail3); 824 fail2: 825 EFSYS_PROBE(fail2); 826 fail1: 827 EFSYS_PROBE1(fail1, efx_rc_t, rc); 828 return (rc); 829 } 830 831 void 832 efx_mcdi_get_timeout( 833 __in efx_nic_t *enp, 834 __in efx_mcdi_req_t *emrp, 835 __out uint32_t *timeoutp) 836 { 837 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 838 839 emcop->emco_get_timeout(enp, emrp, timeoutp); 840 } 841 842 __checkReturn efx_rc_t 843 efx_mcdi_request_errcode( 844 __in unsigned int err) 845 { 846 847 switch (err) { 848 /* MCDI v1 */ 849 case MC_CMD_ERR_EPERM: 850 return (EACCES); 851 case MC_CMD_ERR_ENOENT: 852 return (ENOENT); 853 case MC_CMD_ERR_EINTR: 854 return (EINTR); 855 case MC_CMD_ERR_EACCES: 856 return (EACCES); 857 case MC_CMD_ERR_EBUSY: 858 return (EBUSY); 859 case MC_CMD_ERR_EINVAL: 860 return (EINVAL); 861 case MC_CMD_ERR_EDEADLK: 862 return (EDEADLK); 863 case MC_CMD_ERR_ENOSYS: 864 return (ENOTSUP); 865 case MC_CMD_ERR_ETIME: 866 return (ETIMEDOUT); 867 case MC_CMD_ERR_ENOTSUP: 868 return (ENOTSUP); 869 case MC_CMD_ERR_EALREADY: 870 return (EALREADY); 871 872 /* MCDI v2 */ 873 case MC_CMD_ERR_EEXIST: 874 return (EEXIST); 875 #ifdef MC_CMD_ERR_EAGAIN 876 case MC_CMD_ERR_EAGAIN: 877 return (EAGAIN); 878 #endif 879 #ifdef MC_CMD_ERR_ENOSPC 880 case MC_CMD_ERR_ENOSPC: 881 return (ENOSPC); 882 #endif 883 case MC_CMD_ERR_ERANGE: 884 return (ERANGE); 885 886 case MC_CMD_ERR_ALLOC_FAIL: 887 return (ENOMEM); 888 case MC_CMD_ERR_NO_VADAPTOR: 889 return (ENOENT); 890 case MC_CMD_ERR_NO_EVB_PORT: 891 return (ENOENT); 892 case MC_CMD_ERR_NO_VSWITCH: 893 return (ENODEV); 894 case MC_CMD_ERR_VLAN_LIMIT: 895 return (EINVAL); 896 case MC_CMD_ERR_BAD_PCI_FUNC: 897 return (ENODEV); 898 case MC_CMD_ERR_BAD_VLAN_MODE: 899 return (EINVAL); 900 case MC_CMD_ERR_BAD_VSWITCH_TYPE: 901 return (EINVAL); 902 case MC_CMD_ERR_BAD_VPORT_TYPE: 903 return (EINVAL); 904 case MC_CMD_ERR_MAC_EXIST: 905 return (EEXIST); 906 907 case MC_CMD_ERR_PROXY_PENDING: 908 return (EAGAIN); 909 910 default: 911 EFSYS_PROBE1(mc_pcol_error, int, err); 912 return (EIO); 913 } 914 } 915 916 void 917 efx_mcdi_raise_exception( 918 __in efx_nic_t *enp, 919 __in_opt efx_mcdi_req_t *emrp, 920 __in int rc) 921 { 922 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 923 efx_mcdi_exception_t exception; 924 925 /* Reboot or Assertion failure only */ 926 EFSYS_ASSERT(rc == EIO || rc == EINTR); 927 928 /* 929 * If MC_CMD_REBOOT causes a reboot (dependent on parameters), 930 * then the EIO is not worthy of an exception. 931 */ 932 if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO) 933 return; 934 935 exception = (rc == EIO) 936 ? EFX_MCDI_EXCEPTION_MC_REBOOT 937 : EFX_MCDI_EXCEPTION_MC_BADASSERT; 938 939 emtp->emt_exception(emtp->emt_context, exception); 940 } 941 942 void 943 efx_mcdi_execute( 944 __in efx_nic_t *enp, 945 __inout efx_mcdi_req_t *emrp) 946 { 947 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 948 949 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 950 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 951 952 emrp->emr_quiet = B_FALSE; 953 emtp->emt_execute(emtp->emt_context, emrp); 954 } 955 956 void 957 efx_mcdi_execute_quiet( 958 __in efx_nic_t *enp, 959 __inout efx_mcdi_req_t *emrp) 960 { 961 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 962 963 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 964 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 965 966 emrp->emr_quiet = B_TRUE; 967 emtp->emt_execute(emtp->emt_context, emrp); 968 } 969 970 void 971 efx_mcdi_ev_cpl( 972 __in efx_nic_t *enp, 973 __in unsigned int seq, 974 __in unsigned int outlen, 975 __in int errcode) 976 { 977 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 978 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 979 efx_mcdi_req_t *emrp; 980 efsys_lock_state_t state; 981 982 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 983 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 984 985 /* 986 * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start() 987 * when we're completing an aborted request. 988 */ 989 EFSYS_LOCK(enp->en_eslp, state); 990 if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl || 991 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { 992 EFSYS_ASSERT(emip->emi_aborted > 0); 993 if (emip->emi_aborted > 0) 994 --emip->emi_aborted; 995 EFSYS_UNLOCK(enp->en_eslp, state); 996 return; 997 } 998 999 emrp = emip->emi_pending_req; 1000 emip->emi_pending_req = NULL; 1001 EFSYS_UNLOCK(enp->en_eslp, state); 1002 1003 if (emip->emi_max_version >= 2) { 1004 /* MCDIv2 response details do not fit into an event. */ 1005 efx_mcdi_read_response_header(enp, emrp); 1006 } else { 1007 if (errcode != 0) { 1008 if (!emrp->emr_quiet) { 1009 EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, 1010 int, errcode); 1011 } 1012 emrp->emr_out_length_used = 0; 1013 emrp->emr_rc = efx_mcdi_request_errcode(errcode); 1014 } else { 1015 emrp->emr_out_length_used = outlen; 1016 emrp->emr_rc = 0; 1017 } 1018 } 1019 if (emrp->emr_rc == 0) 1020 efx_mcdi_finish_response(enp, emrp); 1021 1022 emtp->emt_ev_cpl(emtp->emt_context); 1023 } 1024 1025 #if EFSYS_OPT_MCDI_PROXY_AUTH 1026 1027 __checkReturn efx_rc_t 1028 efx_mcdi_get_proxy_handle( 1029 __in efx_nic_t *enp, 1030 __in efx_mcdi_req_t *emrp, 1031 __out uint32_t *handlep) 1032 { 1033 efx_rc_t rc; 1034 1035 _NOTE(ARGUNUSED(enp)) 1036 1037 /* 1038 * Return proxy handle from MCDI request that returned with error 1039 * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching 1040 * PROXY_RESPONSE event. 1041 */ 1042 if ((emrp == NULL) || (handlep == NULL)) { 1043 rc = EINVAL; 1044 goto fail1; 1045 } 1046 if ((emrp->emr_rc != 0) && 1047 (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) { 1048 *handlep = emrp->emr_proxy_handle; 1049 rc = 0; 1050 } else { 1051 *handlep = 0; 1052 rc = ENOENT; 1053 } 1054 return (rc); 1055 1056 fail1: 1057 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1058 return (rc); 1059 } 1060 1061 void 1062 efx_mcdi_ev_proxy_response( 1063 __in efx_nic_t *enp, 1064 __in unsigned int handle, 1065 __in unsigned int status) 1066 { 1067 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 1068 efx_rc_t rc; 1069 1070 /* 1071 * Handle results of an authorization request for a privileged MCDI 1072 * command. If authorization was granted then we must re-issue the 1073 * original MCDI request. If authorization failed or timed out, 1074 * then the original MCDI request should be completed with the 1075 * result code from this event. 1076 */ 1077 rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status); 1078 1079 emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc); 1080 } 1081 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 1082 1083 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 1084 void 1085 efx_mcdi_ev_proxy_request( 1086 __in efx_nic_t *enp, 1087 __in unsigned int index) 1088 { 1089 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 1090 1091 if (emtp->emt_ev_proxy_request != NULL) 1092 emtp->emt_ev_proxy_request(emtp->emt_context, index); 1093 } 1094 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 1095 void 1096 efx_mcdi_ev_death( 1097 __in efx_nic_t *enp, 1098 __in int rc) 1099 { 1100 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 1101 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 1102 efx_mcdi_req_t *emrp = NULL; 1103 boolean_t ev_cpl; 1104 efsys_lock_state_t state; 1105 1106 /* 1107 * The MCDI request (if there is one) has been terminated, either 1108 * by a BADASSERT or REBOOT event. 1109 * 1110 * If there is an outstanding event-completed MCDI operation, then we 1111 * will never receive the completion event (because both MCDI 1112 * completions and BADASSERT events are sent to the same evq). So 1113 * complete this MCDI op. 1114 * 1115 * This function might run in parallel with efx_mcdi_request_poll() 1116 * for poll completed mcdi requests, and also with 1117 * efx_mcdi_request_start() for post-watchdog completions. 1118 */ 1119 EFSYS_LOCK(enp->en_eslp, state); 1120 emrp = emip->emi_pending_req; 1121 ev_cpl = emip->emi_ev_cpl; 1122 if (emrp != NULL && emip->emi_ev_cpl) { 1123 emip->emi_pending_req = NULL; 1124 1125 emrp->emr_out_length_used = 0; 1126 emrp->emr_rc = rc; 1127 ++emip->emi_aborted; 1128 } 1129 1130 /* 1131 * Since we're running in parallel with a request, consume the 1132 * status word before dropping the lock. 1133 */ 1134 if (rc == EIO || rc == EINTR) { 1135 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); 1136 (void) efx_mcdi_poll_reboot(enp); 1137 emip->emi_new_epoch = B_TRUE; 1138 } 1139 1140 EFSYS_UNLOCK(enp->en_eslp, state); 1141 1142 efx_mcdi_raise_exception(enp, emrp, rc); 1143 1144 if (emrp != NULL && ev_cpl) 1145 emtp->emt_ev_cpl(emtp->emt_context); 1146 } 1147 1148 __checkReturn efx_rc_t 1149 efx_mcdi_get_version( 1150 __in efx_nic_t *enp, 1151 __in uint32_t flags, 1152 __out efx_mcdi_version_t *verp) 1153 { 1154 efx_nic_board_info_t *board_infop = &verp->emv_board_info; 1155 EFX_MCDI_DECLARE_BUF(payload, 1156 MC_CMD_GET_VERSION_EXT_IN_LEN, 1157 MC_CMD_GET_VERSION_V2_OUT_LEN); 1158 efx_word_t *ver_words; 1159 uint16_t version[4]; 1160 efx_mcdi_req_t req; 1161 uint32_t firmware; 1162 efx_rc_t rc; 1163 1164 EFX_STATIC_ASSERT(sizeof (verp->emv_version) == 1165 MC_CMD_GET_VERSION_OUT_VERSION_LEN); 1166 EFX_STATIC_ASSERT(sizeof (verp->emv_firmware) == 1167 MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN); 1168 1169 EFX_STATIC_ASSERT(EFX_MCDI_VERSION_BOARD_INFO == 1170 (1U << MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN)); 1171 1172 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_serial) == 1173 MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN); 1174 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_name) == 1175 MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN); 1176 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_revision) == 1177 MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN); 1178 1179 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 1180 1181 req.emr_cmd = MC_CMD_GET_VERSION; 1182 req.emr_in_buf = payload; 1183 req.emr_out_buf = payload; 1184 1185 if ((flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) { 1186 /* Request basic + extended version information. */ 1187 req.emr_in_length = MC_CMD_GET_VERSION_EXT_IN_LEN; 1188 req.emr_out_length = MC_CMD_GET_VERSION_V2_OUT_LEN; 1189 } else { 1190 /* Request only basic version information. */ 1191 req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN; 1192 req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN; 1193 } 1194 1195 efx_mcdi_execute(enp, &req); 1196 1197 if (req.emr_rc != 0) { 1198 rc = req.emr_rc; 1199 goto fail1; 1200 } 1201 1202 /* bootrom support */ 1203 if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) { 1204 version[0] = version[1] = version[2] = version[3] = 0; 1205 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); 1206 goto out; 1207 } 1208 1209 if (req.emr_out_length_used < req.emr_out_length) { 1210 rc = EMSGSIZE; 1211 goto fail2; 1212 } 1213 1214 ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION); 1215 version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0); 1216 version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0); 1217 version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0); 1218 version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0); 1219 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); 1220 1221 out: 1222 memset(verp, 0, sizeof (*verp)); 1223 1224 verp->emv_version[0] = version[0]; 1225 verp->emv_version[1] = version[1]; 1226 verp->emv_version[2] = version[2]; 1227 verp->emv_version[3] = version[3]; 1228 verp->emv_firmware = firmware; 1229 1230 verp->emv_flags = MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_FLAGS); 1231 verp->emv_flags &= flags; 1232 1233 if ((verp->emv_flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) { 1234 memcpy(board_infop->enbi_serial, 1235 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_SERIAL), 1236 sizeof (board_infop->enbi_serial)); 1237 memcpy(board_infop->enbi_name, 1238 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_NAME), 1239 sizeof (board_infop->enbi_name)); 1240 board_infop->enbi_revision = 1241 MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_BOARD_REVISION); 1242 } 1243 1244 return (0); 1245 1246 fail2: 1247 EFSYS_PROBE(fail2); 1248 fail1: 1249 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1250 1251 return (rc); 1252 } 1253 1254 static __checkReturn efx_rc_t 1255 efx_mcdi_get_boot_status( 1256 __in efx_nic_t *enp, 1257 __out efx_mcdi_boot_t *statusp) 1258 { 1259 EFX_MCDI_DECLARE_BUF(payload, 1260 MC_CMD_GET_BOOT_STATUS_IN_LEN, 1261 MC_CMD_GET_BOOT_STATUS_OUT_LEN); 1262 efx_mcdi_req_t req; 1263 efx_rc_t rc; 1264 1265 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 1266 1267 req.emr_cmd = MC_CMD_GET_BOOT_STATUS; 1268 req.emr_in_buf = payload; 1269 req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN; 1270 req.emr_out_buf = payload; 1271 req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN; 1272 1273 efx_mcdi_execute_quiet(enp, &req); 1274 1275 /* 1276 * NOTE: Unprivileged functions cannot access boot status, 1277 * so the MCDI request will return EACCES. This is 1278 * also checked in efx_mcdi_version. 1279 */ 1280 1281 if (req.emr_rc != 0) { 1282 rc = req.emr_rc; 1283 goto fail1; 1284 } 1285 1286 if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) { 1287 rc = EMSGSIZE; 1288 goto fail2; 1289 } 1290 1291 if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS, 1292 GET_BOOT_STATUS_OUT_FLAGS_PRIMARY)) 1293 *statusp = EFX_MCDI_BOOT_PRIMARY; 1294 else 1295 *statusp = EFX_MCDI_BOOT_SECONDARY; 1296 1297 return (0); 1298 1299 fail2: 1300 EFSYS_PROBE(fail2); 1301 fail1: 1302 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1303 1304 return (rc); 1305 } 1306 1307 __checkReturn efx_rc_t 1308 efx_mcdi_version( 1309 __in efx_nic_t *enp, 1310 __out_ecount_opt(4) uint16_t versionp[4], 1311 __out_opt uint32_t *buildp, 1312 __out_opt efx_mcdi_boot_t *statusp) 1313 { 1314 efx_mcdi_version_t ver; 1315 efx_mcdi_boot_t status; 1316 efx_rc_t rc; 1317 1318 rc = efx_mcdi_get_version(enp, 0, &ver); 1319 if (rc != 0) 1320 goto fail1; 1321 1322 /* The bootrom doesn't understand BOOT_STATUS */ 1323 if (MC_FW_VERSION_IS_BOOTLOADER(ver.emv_firmware)) { 1324 status = EFX_MCDI_BOOT_ROM; 1325 goto out; 1326 } 1327 1328 rc = efx_mcdi_get_boot_status(enp, &status); 1329 if (rc == EACCES) { 1330 /* Unprivileged functions cannot access BOOT_STATUS */ 1331 status = EFX_MCDI_BOOT_PRIMARY; 1332 memset(ver.emv_version, 0, sizeof (ver.emv_version)); 1333 ver.emv_firmware = 0; 1334 } else if (rc != 0) { 1335 goto fail2; 1336 } 1337 1338 out: 1339 if (versionp != NULL) 1340 memcpy(versionp, ver.emv_version, sizeof (ver.emv_version)); 1341 if (buildp != NULL) 1342 *buildp = ver.emv_firmware; 1343 if (statusp != NULL) 1344 *statusp = status; 1345 1346 return (0); 1347 1348 fail2: 1349 EFSYS_PROBE(fail2); 1350 fail1: 1351 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1352 1353 return (rc); 1354 } 1355 1356 __checkReturn efx_rc_t 1357 efx_mcdi_get_capabilities( 1358 __in efx_nic_t *enp, 1359 __out_opt uint32_t *flagsp, 1360 __out_opt uint16_t *rx_dpcpu_fw_idp, 1361 __out_opt uint16_t *tx_dpcpu_fw_idp, 1362 __out_opt uint32_t *flags2p, 1363 __out_opt uint32_t *tso2ncp) 1364 { 1365 efx_mcdi_req_t req; 1366 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN, 1367 MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); 1368 boolean_t v2_capable; 1369 efx_rc_t rc; 1370 1371 req.emr_cmd = MC_CMD_GET_CAPABILITIES; 1372 req.emr_in_buf = payload; 1373 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; 1374 req.emr_out_buf = payload; 1375 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN; 1376 1377 efx_mcdi_execute_quiet(enp, &req); 1378 1379 if (req.emr_rc != 0) { 1380 rc = req.emr_rc; 1381 goto fail1; 1382 } 1383 1384 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 1385 rc = EMSGSIZE; 1386 goto fail2; 1387 } 1388 1389 if (flagsp != NULL) 1390 *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1); 1391 1392 if (rx_dpcpu_fw_idp != NULL) 1393 *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req, 1394 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 1395 1396 if (tx_dpcpu_fw_idp != NULL) 1397 *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req, 1398 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 1399 1400 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) 1401 v2_capable = B_FALSE; 1402 else 1403 v2_capable = B_TRUE; 1404 1405 if (flags2p != NULL) { 1406 *flags2p = (v2_capable) ? 1407 MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) : 1408 0; 1409 } 1410 1411 if (tso2ncp != NULL) { 1412 *tso2ncp = (v2_capable) ? 1413 MCDI_OUT_WORD(req, 1414 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) : 1415 0; 1416 } 1417 1418 return (0); 1419 1420 fail2: 1421 EFSYS_PROBE(fail2); 1422 fail1: 1423 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1424 1425 return (rc); 1426 } 1427 1428 static __checkReturn efx_rc_t 1429 efx_mcdi_do_reboot( 1430 __in efx_nic_t *enp, 1431 __in boolean_t after_assertion) 1432 { 1433 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN, 1434 MC_CMD_REBOOT_OUT_LEN); 1435 efx_mcdi_req_t req; 1436 efx_rc_t rc; 1437 1438 /* 1439 * We could require the caller to have caused en_mod_flags=0 to 1440 * call this function. This doesn't help the other port though, 1441 * who's about to get the MC ripped out from underneath them. 1442 * Since they have to cope with the subsequent fallout of MCDI 1443 * failures, we should as well. 1444 */ 1445 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 1446 1447 req.emr_cmd = MC_CMD_REBOOT; 1448 req.emr_in_buf = payload; 1449 req.emr_in_length = MC_CMD_REBOOT_IN_LEN; 1450 req.emr_out_buf = payload; 1451 req.emr_out_length = MC_CMD_REBOOT_OUT_LEN; 1452 1453 MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, 1454 (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0)); 1455 1456 efx_mcdi_execute_quiet(enp, &req); 1457 1458 if (req.emr_rc == EACCES) { 1459 /* Unprivileged functions cannot reboot the MC. */ 1460 goto out; 1461 } 1462 1463 /* A successful reboot request returns EIO. */ 1464 if (req.emr_rc != 0 && req.emr_rc != EIO) { 1465 rc = req.emr_rc; 1466 goto fail1; 1467 } 1468 1469 out: 1470 return (0); 1471 1472 fail1: 1473 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1474 1475 return (rc); 1476 } 1477 1478 __checkReturn efx_rc_t 1479 efx_mcdi_reboot( 1480 __in efx_nic_t *enp) 1481 { 1482 return (efx_mcdi_do_reboot(enp, B_FALSE)); 1483 } 1484 1485 __checkReturn efx_rc_t 1486 efx_mcdi_exit_assertion_handler( 1487 __in efx_nic_t *enp) 1488 { 1489 return (efx_mcdi_do_reboot(enp, B_TRUE)); 1490 } 1491 1492 __checkReturn efx_rc_t 1493 efx_mcdi_read_assertion( 1494 __in efx_nic_t *enp) 1495 { 1496 efx_mcdi_req_t req; 1497 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN, 1498 MC_CMD_GET_ASSERTS_OUT_LEN); 1499 const char *reason; 1500 unsigned int flags; 1501 unsigned int index; 1502 unsigned int ofst; 1503 int retry; 1504 efx_rc_t rc; 1505 1506 /* 1507 * Before we attempt to chat to the MC, we should verify that the MC 1508 * isn't in it's assertion handler, either due to a previous reboot, 1509 * or because we're reinitializing due to an eec_exception(). 1510 * 1511 * Use GET_ASSERTS to read any assertion state that may be present. 1512 * Retry this command twice. Once because a boot-time assertion failure 1513 * might cause the 1st MCDI request to fail. And once again because 1514 * we might race with efx_mcdi_exit_assertion_handler() running on 1515 * partner port(s) on the same NIC. 1516 */ 1517 retry = 2; 1518 do { 1519 (void) memset(payload, 0, sizeof (payload)); 1520 req.emr_cmd = MC_CMD_GET_ASSERTS; 1521 req.emr_in_buf = payload; 1522 req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN; 1523 req.emr_out_buf = payload; 1524 req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN; 1525 1526 MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1); 1527 efx_mcdi_execute_quiet(enp, &req); 1528 1529 } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0); 1530 1531 if (req.emr_rc != 0) { 1532 if (req.emr_rc == EACCES) { 1533 /* Unprivileged functions cannot clear assertions. */ 1534 goto out; 1535 } 1536 rc = req.emr_rc; 1537 goto fail1; 1538 } 1539 1540 if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) { 1541 rc = EMSGSIZE; 1542 goto fail2; 1543 } 1544 1545 /* Print out any assertion state recorded */ 1546 flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS); 1547 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 1548 return (0); 1549 1550 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 1551 ? "system-level assertion" 1552 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 1553 ? "thread-level assertion" 1554 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1555 ? "watchdog reset" 1556 : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP) 1557 ? "illegal address trap" 1558 : "unknown assertion"; 1559 EFSYS_PROBE3(mcpu_assertion, 1560 const char *, reason, unsigned int, 1561 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1562 unsigned int, 1563 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS)); 1564 1565 /* Print out the registers (r1 ... r31) */ 1566 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1567 for (index = 1; 1568 index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; 1569 index++) { 1570 EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int, 1571 EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst), 1572 EFX_DWORD_0)); 1573 ofst += sizeof (efx_dword_t); 1574 } 1575 EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN); 1576 1577 out: 1578 return (0); 1579 1580 fail2: 1581 EFSYS_PROBE(fail2); 1582 fail1: 1583 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1584 1585 return (rc); 1586 } 1587 1588 1589 /* 1590 * Internal routines for for specific MCDI requests. 1591 */ 1592 1593 __checkReturn efx_rc_t 1594 efx_mcdi_drv_attach( 1595 __in efx_nic_t *enp, 1596 __in boolean_t attach) 1597 { 1598 efx_mcdi_req_t req; 1599 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_V2_LEN, 1600 MC_CMD_DRV_ATTACH_EXT_OUT_LEN); 1601 efx_rc_t rc; 1602 1603 req.emr_cmd = MC_CMD_DRV_ATTACH; 1604 req.emr_in_buf = payload; 1605 if (enp->en_drv_version[0] == '\0') { 1606 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN; 1607 } else { 1608 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_V2_LEN; 1609 } 1610 req.emr_out_buf = payload; 1611 req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; 1612 1613 /* 1614 * Typically, client drivers use DONT_CARE for the datapath firmware 1615 * type to ensure that the driver can attach to an unprivileged 1616 * function. The datapath firmware type to use is controlled by the 1617 * 'sfboot' utility. 1618 * If a client driver wishes to attach with a specific datapath firmware 1619 * type, that can be passed in second argument of efx_nic_probe API. One 1620 * such example is the ESXi native driver that attempts attaching with 1621 * FULL_FEATURED datapath firmware type first and fall backs to 1622 * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails. 1623 */ 1624 MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE, 1625 DRV_ATTACH_IN_ATTACH, attach ? 1 : 0, 1626 DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE); 1627 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); 1628 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv); 1629 1630 if (req.emr_in_length >= MC_CMD_DRV_ATTACH_IN_V2_LEN) { 1631 EFX_STATIC_ASSERT(sizeof (enp->en_drv_version) == 1632 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); 1633 memcpy(MCDI_IN2(req, char, DRV_ATTACH_IN_V2_DRIVER_VERSION), 1634 enp->en_drv_version, 1635 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); 1636 } 1637 1638 efx_mcdi_execute(enp, &req); 1639 1640 if (req.emr_rc != 0) { 1641 rc = req.emr_rc; 1642 goto fail1; 1643 } 1644 1645 if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) { 1646 rc = EMSGSIZE; 1647 goto fail2; 1648 } 1649 1650 return (0); 1651 1652 fail2: 1653 EFSYS_PROBE(fail2); 1654 fail1: 1655 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1656 1657 return (rc); 1658 } 1659 1660 __checkReturn efx_rc_t 1661 efx_mcdi_get_board_cfg( 1662 __in efx_nic_t *enp, 1663 __out_opt uint32_t *board_typep, 1664 __out_opt efx_dword_t *capabilitiesp, 1665 __out_ecount_opt(6) uint8_t mac_addrp[6]) 1666 { 1667 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 1668 efx_mcdi_req_t req; 1669 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN, 1670 MC_CMD_GET_BOARD_CFG_OUT_LENMIN); 1671 efx_rc_t rc; 1672 1673 req.emr_cmd = MC_CMD_GET_BOARD_CFG; 1674 req.emr_in_buf = payload; 1675 req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; 1676 req.emr_out_buf = payload; 1677 req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN; 1678 1679 efx_mcdi_execute(enp, &req); 1680 1681 if (req.emr_rc != 0) { 1682 rc = req.emr_rc; 1683 goto fail1; 1684 } 1685 1686 if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 1687 rc = EMSGSIZE; 1688 goto fail2; 1689 } 1690 1691 if (mac_addrp != NULL) { 1692 uint8_t *addrp; 1693 1694 if (emip->emi_port == 1) { 1695 addrp = MCDI_OUT2(req, uint8_t, 1696 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0); 1697 } else if (emip->emi_port == 2) { 1698 addrp = MCDI_OUT2(req, uint8_t, 1699 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1); 1700 } else { 1701 rc = EINVAL; 1702 goto fail3; 1703 } 1704 1705 EFX_MAC_ADDR_COPY(mac_addrp, addrp); 1706 } 1707 1708 if (capabilitiesp != NULL) { 1709 if (emip->emi_port == 1) { 1710 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, 1711 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 1712 } else if (emip->emi_port == 2) { 1713 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, 1714 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 1715 } else { 1716 rc = EINVAL; 1717 goto fail4; 1718 } 1719 } 1720 1721 if (board_typep != NULL) { 1722 *board_typep = MCDI_OUT_DWORD(req, 1723 GET_BOARD_CFG_OUT_BOARD_TYPE); 1724 } 1725 1726 return (0); 1727 1728 fail4: 1729 EFSYS_PROBE(fail4); 1730 fail3: 1731 EFSYS_PROBE(fail3); 1732 fail2: 1733 EFSYS_PROBE(fail2); 1734 fail1: 1735 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1736 1737 return (rc); 1738 } 1739 1740 __checkReturn efx_rc_t 1741 efx_mcdi_get_resource_limits( 1742 __in efx_nic_t *enp, 1743 __out_opt uint32_t *nevqp, 1744 __out_opt uint32_t *nrxqp, 1745 __out_opt uint32_t *ntxqp) 1746 { 1747 efx_mcdi_req_t req; 1748 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN, 1749 MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN); 1750 efx_rc_t rc; 1751 1752 req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS; 1753 req.emr_in_buf = payload; 1754 req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN; 1755 req.emr_out_buf = payload; 1756 req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN; 1757 1758 efx_mcdi_execute(enp, &req); 1759 1760 if (req.emr_rc != 0) { 1761 rc = req.emr_rc; 1762 goto fail1; 1763 } 1764 1765 if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) { 1766 rc = EMSGSIZE; 1767 goto fail2; 1768 } 1769 1770 if (nevqp != NULL) 1771 *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ); 1772 if (nrxqp != NULL) 1773 *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ); 1774 if (ntxqp != NULL) 1775 *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ); 1776 1777 return (0); 1778 1779 fail2: 1780 EFSYS_PROBE(fail2); 1781 fail1: 1782 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1783 1784 return (rc); 1785 } 1786 1787 __checkReturn efx_rc_t 1788 efx_mcdi_get_phy_cfg( 1789 __in efx_nic_t *enp) 1790 { 1791 efx_port_t *epp = &(enp->en_port); 1792 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1793 efx_mcdi_req_t req; 1794 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN, 1795 MC_CMD_GET_PHY_CFG_OUT_LEN); 1796 #if EFSYS_OPT_NAMES 1797 const char *namep; 1798 size_t namelen; 1799 #endif 1800 uint32_t phy_media_type; 1801 efx_rc_t rc; 1802 1803 req.emr_cmd = MC_CMD_GET_PHY_CFG; 1804 req.emr_in_buf = payload; 1805 req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN; 1806 req.emr_out_buf = payload; 1807 req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN; 1808 1809 efx_mcdi_execute(enp, &req); 1810 1811 if (req.emr_rc != 0) { 1812 rc = req.emr_rc; 1813 goto fail1; 1814 } 1815 1816 if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) { 1817 rc = EMSGSIZE; 1818 goto fail2; 1819 } 1820 1821 encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); 1822 #if EFSYS_OPT_NAMES 1823 namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME); 1824 namelen = MIN(sizeof (encp->enc_phy_name) - 1, 1825 strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); 1826 (void) memset(encp->enc_phy_name, 0, 1827 sizeof (encp->enc_phy_name)); 1828 memcpy(encp->enc_phy_name, namep, namelen); 1829 #endif /* EFSYS_OPT_NAMES */ 1830 (void) memset(encp->enc_phy_revision, 0, 1831 sizeof (encp->enc_phy_revision)); 1832 memcpy(encp->enc_phy_revision, 1833 MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION), 1834 MIN(sizeof (encp->enc_phy_revision) - 1, 1835 MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN)); 1836 #if EFSYS_OPT_PHY_LED_CONTROL 1837 encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) | 1838 (1 << EFX_PHY_LED_OFF) | 1839 (1 << EFX_PHY_LED_ON)); 1840 #endif /* EFSYS_OPT_PHY_LED_CONTROL */ 1841 1842 /* Get the media type of the fixed port, if recognised. */ 1843 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI); 1844 EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4); 1845 EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4); 1846 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP); 1847 EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); 1848 EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); 1849 EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); 1850 phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); 1851 epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type; 1852 if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) 1853 epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; 1854 1855 epp->ep_phy_cap_mask = 1856 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP); 1857 #if EFSYS_OPT_PHY_FLAGS 1858 encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS); 1859 #endif /* EFSYS_OPT_PHY_FLAGS */ 1860 1861 encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT); 1862 1863 /* Populate internal state */ 1864 encp->enc_mcdi_mdio_channel = 1865 (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL); 1866 1867 #if EFSYS_OPT_PHY_STATS 1868 encp->enc_mcdi_phy_stat_mask = 1869 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK); 1870 #endif /* EFSYS_OPT_PHY_STATS */ 1871 1872 #if EFSYS_OPT_BIST 1873 encp->enc_bist_mask = 0; 1874 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1875 GET_PHY_CFG_OUT_BIST_CABLE_SHORT)) 1876 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT); 1877 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1878 GET_PHY_CFG_OUT_BIST_CABLE_LONG)) 1879 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG); 1880 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1881 GET_PHY_CFG_OUT_BIST)) 1882 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL); 1883 #endif /* EFSYS_OPT_BIST */ 1884 1885 return (0); 1886 1887 fail2: 1888 EFSYS_PROBE(fail2); 1889 fail1: 1890 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1891 1892 return (rc); 1893 } 1894 1895 __checkReturn efx_rc_t 1896 efx_mcdi_firmware_update_supported( 1897 __in efx_nic_t *enp, 1898 __out boolean_t *supportedp) 1899 { 1900 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1901 efx_rc_t rc; 1902 1903 if (emcop != NULL) { 1904 if ((rc = emcop->emco_feature_supported(enp, 1905 EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0) 1906 goto fail1; 1907 } else { 1908 /* Earlier devices always supported updates */ 1909 *supportedp = B_TRUE; 1910 } 1911 1912 return (0); 1913 1914 fail1: 1915 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1916 1917 return (rc); 1918 } 1919 1920 __checkReturn efx_rc_t 1921 efx_mcdi_macaddr_change_supported( 1922 __in efx_nic_t *enp, 1923 __out boolean_t *supportedp) 1924 { 1925 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1926 efx_rc_t rc; 1927 1928 if (emcop != NULL) { 1929 if ((rc = emcop->emco_feature_supported(enp, 1930 EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0) 1931 goto fail1; 1932 } else { 1933 /* Earlier devices always supported MAC changes */ 1934 *supportedp = B_TRUE; 1935 } 1936 1937 return (0); 1938 1939 fail1: 1940 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1941 1942 return (rc); 1943 } 1944 1945 __checkReturn efx_rc_t 1946 efx_mcdi_link_control_supported( 1947 __in efx_nic_t *enp, 1948 __out boolean_t *supportedp) 1949 { 1950 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1951 efx_rc_t rc; 1952 1953 if (emcop != NULL) { 1954 if ((rc = emcop->emco_feature_supported(enp, 1955 EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0) 1956 goto fail1; 1957 } else { 1958 /* Earlier devices always supported link control */ 1959 *supportedp = B_TRUE; 1960 } 1961 1962 return (0); 1963 1964 fail1: 1965 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1966 1967 return (rc); 1968 } 1969 1970 __checkReturn efx_rc_t 1971 efx_mcdi_mac_spoofing_supported( 1972 __in efx_nic_t *enp, 1973 __out boolean_t *supportedp) 1974 { 1975 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1976 efx_rc_t rc; 1977 1978 if (emcop != NULL) { 1979 if ((rc = emcop->emco_feature_supported(enp, 1980 EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0) 1981 goto fail1; 1982 } else { 1983 /* Earlier devices always supported MAC spoofing */ 1984 *supportedp = B_TRUE; 1985 } 1986 1987 return (0); 1988 1989 fail1: 1990 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1991 1992 return (rc); 1993 } 1994 1995 #if EFSYS_OPT_BIST 1996 1997 #if EFX_OPTS_EF10() 1998 /* 1999 * Enter bist offline mode. This is a fw mode which puts the NIC into a state 2000 * where memory BIST tests can be run and not much else can interfere or happen. 2001 * A reboot is required to exit this mode. 2002 */ 2003 __checkReturn efx_rc_t 2004 efx_mcdi_bist_enable_offline( 2005 __in efx_nic_t *enp) 2006 { 2007 efx_mcdi_req_t req; 2008 efx_rc_t rc; 2009 2010 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0); 2011 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0); 2012 2013 req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST; 2014 req.emr_in_buf = NULL; 2015 req.emr_in_length = 0; 2016 req.emr_out_buf = NULL; 2017 req.emr_out_length = 0; 2018 2019 efx_mcdi_execute(enp, &req); 2020 2021 if (req.emr_rc != 0) { 2022 rc = req.emr_rc; 2023 goto fail1; 2024 } 2025 2026 return (0); 2027 2028 fail1: 2029 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2030 2031 return (rc); 2032 } 2033 #endif /* EFX_OPTS_EF10() */ 2034 2035 __checkReturn efx_rc_t 2036 efx_mcdi_bist_start( 2037 __in efx_nic_t *enp, 2038 __in efx_bist_type_t type) 2039 { 2040 efx_mcdi_req_t req; 2041 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN, 2042 MC_CMD_START_BIST_OUT_LEN); 2043 efx_rc_t rc; 2044 2045 req.emr_cmd = MC_CMD_START_BIST; 2046 req.emr_in_buf = payload; 2047 req.emr_in_length = MC_CMD_START_BIST_IN_LEN; 2048 req.emr_out_buf = payload; 2049 req.emr_out_length = MC_CMD_START_BIST_OUT_LEN; 2050 2051 switch (type) { 2052 case EFX_BIST_TYPE_PHY_NORMAL: 2053 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST); 2054 break; 2055 case EFX_BIST_TYPE_PHY_CABLE_SHORT: 2056 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 2057 MC_CMD_PHY_BIST_CABLE_SHORT); 2058 break; 2059 case EFX_BIST_TYPE_PHY_CABLE_LONG: 2060 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 2061 MC_CMD_PHY_BIST_CABLE_LONG); 2062 break; 2063 case EFX_BIST_TYPE_MC_MEM: 2064 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 2065 MC_CMD_MC_MEM_BIST); 2066 break; 2067 case EFX_BIST_TYPE_SAT_MEM: 2068 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 2069 MC_CMD_PORT_MEM_BIST); 2070 break; 2071 case EFX_BIST_TYPE_REG: 2072 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 2073 MC_CMD_REG_BIST); 2074 break; 2075 default: 2076 EFSYS_ASSERT(0); 2077 } 2078 2079 efx_mcdi_execute(enp, &req); 2080 2081 if (req.emr_rc != 0) { 2082 rc = req.emr_rc; 2083 goto fail1; 2084 } 2085 2086 return (0); 2087 2088 fail1: 2089 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2090 2091 return (rc); 2092 } 2093 2094 #endif /* EFSYS_OPT_BIST */ 2095 2096 2097 /* Enable logging of some events (e.g. link state changes) */ 2098 __checkReturn efx_rc_t 2099 efx_mcdi_log_ctrl( 2100 __in efx_nic_t *enp) 2101 { 2102 efx_mcdi_req_t req; 2103 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN, 2104 MC_CMD_LOG_CTRL_OUT_LEN); 2105 efx_rc_t rc; 2106 2107 req.emr_cmd = MC_CMD_LOG_CTRL; 2108 req.emr_in_buf = payload; 2109 req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN; 2110 req.emr_out_buf = payload; 2111 req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN; 2112 2113 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST, 2114 MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ); 2115 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0); 2116 2117 efx_mcdi_execute(enp, &req); 2118 2119 if (req.emr_rc != 0) { 2120 rc = req.emr_rc; 2121 goto fail1; 2122 } 2123 2124 return (0); 2125 2126 fail1: 2127 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2128 2129 return (rc); 2130 } 2131 2132 2133 #if EFSYS_OPT_MAC_STATS 2134 2135 __checkReturn efx_rc_t 2136 efx_mcdi_mac_stats( 2137 __in efx_nic_t *enp, 2138 __in uint32_t vport_id, 2139 __in_opt efsys_mem_t *esmp, 2140 __in efx_stats_action_t action, 2141 __in uint16_t period_ms) 2142 { 2143 efx_mcdi_req_t req; 2144 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN, 2145 MC_CMD_MAC_STATS_V2_OUT_DMA_LEN); 2146 int clear = (action == EFX_STATS_CLEAR); 2147 int upload = (action == EFX_STATS_UPLOAD); 2148 int enable = (action == EFX_STATS_ENABLE_NOEVENTS); 2149 int events = (action == EFX_STATS_ENABLE_EVENTS); 2150 int disable = (action == EFX_STATS_DISABLE); 2151 efx_rc_t rc; 2152 2153 req.emr_cmd = MC_CMD_MAC_STATS; 2154 req.emr_in_buf = payload; 2155 req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; 2156 req.emr_out_buf = payload; 2157 req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN; 2158 2159 MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, 2160 MAC_STATS_IN_DMA, upload, 2161 MAC_STATS_IN_CLEAR, clear, 2162 MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable, 2163 MAC_STATS_IN_PERIODIC_ENABLE, enable | events, 2164 MAC_STATS_IN_PERIODIC_NOEVENT, !events, 2165 MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); 2166 2167 if (enable || events || upload) { 2168 const efx_nic_cfg_t *encp = &enp->en_nic_cfg; 2169 uint32_t bytes; 2170 2171 /* Periodic stats or stats upload require a DMA buffer */ 2172 if (esmp == NULL) { 2173 rc = EINVAL; 2174 goto fail1; 2175 } 2176 2177 if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { 2178 /* MAC stats count too small for legacy MAC stats */ 2179 rc = ENOSPC; 2180 goto fail2; 2181 } 2182 2183 bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t); 2184 2185 if (EFSYS_MEM_SIZE(esmp) < bytes) { 2186 /* DMA buffer too small */ 2187 rc = ENOSPC; 2188 goto fail3; 2189 } 2190 2191 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, 2192 EFSYS_MEM_ADDR(esmp) & 0xffffffff); 2193 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, 2194 EFSYS_MEM_ADDR(esmp) >> 32); 2195 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); 2196 } 2197 2198 /* 2199 * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats, 2200 * as this may fail (and leave periodic DMA enabled) if the 2201 * vadapter has already been deleted. 2202 */ 2203 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID, 2204 (disable ? EVB_PORT_ID_NULL : vport_id)); 2205 2206 efx_mcdi_execute(enp, &req); 2207 2208 if (req.emr_rc != 0) { 2209 /* EF10: Expect ENOENT if no DMA queues are initialised */ 2210 if ((req.emr_rc != ENOENT) || 2211 (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { 2212 rc = req.emr_rc; 2213 goto fail4; 2214 } 2215 } 2216 2217 return (0); 2218 2219 fail4: 2220 EFSYS_PROBE(fail4); 2221 fail3: 2222 EFSYS_PROBE(fail3); 2223 fail2: 2224 EFSYS_PROBE(fail2); 2225 fail1: 2226 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2227 2228 return (rc); 2229 } 2230 2231 __checkReturn efx_rc_t 2232 efx_mcdi_mac_stats_clear( 2233 __in efx_nic_t *enp) 2234 { 2235 efx_rc_t rc; 2236 2237 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, 2238 EFX_STATS_CLEAR, 0)) != 0) 2239 goto fail1; 2240 2241 return (0); 2242 2243 fail1: 2244 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2245 2246 return (rc); 2247 } 2248 2249 __checkReturn efx_rc_t 2250 efx_mcdi_mac_stats_upload( 2251 __in efx_nic_t *enp, 2252 __in efsys_mem_t *esmp) 2253 { 2254 efx_rc_t rc; 2255 2256 /* 2257 * The MC DMAs aggregate statistics for our convenience, so we can 2258 * avoid having to pull the statistics buffer into the cache to 2259 * maintain cumulative statistics. 2260 */ 2261 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2262 EFX_STATS_UPLOAD, 0)) != 0) 2263 goto fail1; 2264 2265 return (0); 2266 2267 fail1: 2268 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2269 2270 return (rc); 2271 } 2272 2273 __checkReturn efx_rc_t 2274 efx_mcdi_mac_stats_periodic( 2275 __in efx_nic_t *enp, 2276 __in efsys_mem_t *esmp, 2277 __in uint16_t period_ms, 2278 __in boolean_t events) 2279 { 2280 efx_rc_t rc; 2281 2282 /* 2283 * The MC DMAs aggregate statistics for our convenience, so we can 2284 * avoid having to pull the statistics buffer into the cache to 2285 * maintain cumulative statistics. 2286 * Huntington uses a fixed 1sec period. 2287 * Medford uses a fixed 1sec period before v6.2.1.1033 firmware. 2288 */ 2289 if (period_ms == 0) 2290 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, 2291 EFX_STATS_DISABLE, 0); 2292 else if (events) 2293 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2294 EFX_STATS_ENABLE_EVENTS, period_ms); 2295 else 2296 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2297 EFX_STATS_ENABLE_NOEVENTS, period_ms); 2298 2299 if (rc != 0) 2300 goto fail1; 2301 2302 return (0); 2303 2304 fail1: 2305 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2306 2307 return (rc); 2308 } 2309 2310 #endif /* EFSYS_OPT_MAC_STATS */ 2311 2312 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2313 2314 __checkReturn efx_rc_t 2315 efx_mcdi_intf_from_pcie( 2316 __in uint32_t pcie_intf, 2317 __out efx_pcie_interface_t *efx_intf) 2318 { 2319 efx_rc_t rc; 2320 2321 switch (pcie_intf) { 2322 case PCIE_INTERFACE_CALLER: 2323 *efx_intf = EFX_PCIE_INTERFACE_CALLER; 2324 break; 2325 case PCIE_INTERFACE_HOST_PRIMARY: 2326 *efx_intf = EFX_PCIE_INTERFACE_HOST_PRIMARY; 2327 break; 2328 case PCIE_INTERFACE_NIC_EMBEDDED: 2329 *efx_intf = EFX_PCIE_INTERFACE_NIC_EMBEDDED; 2330 break; 2331 default: 2332 rc = EINVAL; 2333 goto fail1; 2334 } 2335 2336 return (0); 2337 2338 fail1: 2339 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2340 2341 return (rc); 2342 } 2343 2344 __checkReturn efx_rc_t 2345 efx_mcdi_intf_to_pcie( 2346 __in efx_pcie_interface_t efx_intf, 2347 __out uint32_t *pcie_intf) 2348 { 2349 efx_rc_t rc; 2350 2351 switch (efx_intf) { 2352 case EFX_PCIE_INTERFACE_CALLER: 2353 *pcie_intf = PCIE_INTERFACE_CALLER; 2354 break; 2355 case EFX_PCIE_INTERFACE_HOST_PRIMARY: 2356 *pcie_intf = PCIE_INTERFACE_HOST_PRIMARY; 2357 break; 2358 case EFX_PCIE_INTERFACE_NIC_EMBEDDED: 2359 *pcie_intf = PCIE_INTERFACE_NIC_EMBEDDED; 2360 break; 2361 default: 2362 rc = EINVAL; 2363 goto fail1; 2364 } 2365 2366 return (0); 2367 2368 fail1: 2369 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2370 return (rc); 2371 } 2372 2373 /* 2374 * This function returns the pf and vf number of a function. If it is a pf the 2375 * vf number is 0xffff. The vf number is the index of the vf on that 2376 * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0), 2377 * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff). 2378 */ 2379 __checkReturn efx_rc_t 2380 efx_mcdi_get_function_info( 2381 __in efx_nic_t *enp, 2382 __out uint32_t *pfp, 2383 __out_opt uint32_t *vfp, 2384 __out_opt efx_pcie_interface_t *intfp) 2385 { 2386 efx_pcie_interface_t intf; 2387 efx_mcdi_req_t req; 2388 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN, 2389 MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN); 2390 uint32_t pcie_intf; 2391 efx_rc_t rc; 2392 2393 req.emr_cmd = MC_CMD_GET_FUNCTION_INFO; 2394 req.emr_in_buf = payload; 2395 req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN; 2396 req.emr_out_buf = payload; 2397 req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN; 2398 2399 efx_mcdi_execute(enp, &req); 2400 2401 if (req.emr_rc != 0) { 2402 rc = req.emr_rc; 2403 goto fail1; 2404 } 2405 2406 if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) { 2407 rc = EMSGSIZE; 2408 goto fail2; 2409 } 2410 2411 *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF); 2412 if (vfp != NULL) 2413 *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF); 2414 2415 if (req.emr_out_length < MC_CMD_GET_FUNCTION_INFO_OUT_V2_LEN) { 2416 intf = EFX_PCIE_INTERFACE_HOST_PRIMARY; 2417 } else { 2418 pcie_intf = MCDI_OUT_DWORD(req, 2419 GET_FUNCTION_INFO_OUT_V2_INTF); 2420 2421 rc = efx_mcdi_intf_from_pcie(pcie_intf, &intf); 2422 if (rc != 0) 2423 goto fail3; 2424 } 2425 2426 if (intfp != NULL) 2427 *intfp = intf; 2428 2429 return (0); 2430 2431 fail3: 2432 EFSYS_PROBE(fail3); 2433 fail2: 2434 EFSYS_PROBE(fail2); 2435 fail1: 2436 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2437 2438 return (rc); 2439 } 2440 2441 __checkReturn efx_rc_t 2442 efx_mcdi_privilege_mask( 2443 __in efx_nic_t *enp, 2444 __in uint32_t pf, 2445 __in uint32_t vf, 2446 __out uint32_t *maskp) 2447 { 2448 efx_mcdi_req_t req; 2449 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN, 2450 MC_CMD_PRIVILEGE_MASK_OUT_LEN); 2451 efx_rc_t rc; 2452 2453 req.emr_cmd = MC_CMD_PRIVILEGE_MASK; 2454 req.emr_in_buf = payload; 2455 req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; 2456 req.emr_out_buf = payload; 2457 req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; 2458 2459 MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION, 2460 PRIVILEGE_MASK_IN_FUNCTION_PF, pf, 2461 PRIVILEGE_MASK_IN_FUNCTION_VF, vf); 2462 2463 efx_mcdi_execute(enp, &req); 2464 2465 if (req.emr_rc != 0) { 2466 rc = req.emr_rc; 2467 goto fail1; 2468 } 2469 2470 if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) { 2471 rc = EMSGSIZE; 2472 goto fail2; 2473 } 2474 2475 *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK); 2476 2477 return (0); 2478 2479 fail2: 2480 EFSYS_PROBE(fail2); 2481 fail1: 2482 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2483 2484 return (rc); 2485 } 2486 2487 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 2488 2489 __checkReturn efx_rc_t 2490 efx_mcdi_set_workaround( 2491 __in efx_nic_t *enp, 2492 __in uint32_t type, 2493 __in boolean_t enabled, 2494 __out_opt uint32_t *flagsp) 2495 { 2496 efx_mcdi_req_t req; 2497 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN, 2498 MC_CMD_WORKAROUND_EXT_OUT_LEN); 2499 efx_rc_t rc; 2500 2501 req.emr_cmd = MC_CMD_WORKAROUND; 2502 req.emr_in_buf = payload; 2503 req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN; 2504 req.emr_out_buf = payload; 2505 req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN; 2506 2507 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type); 2508 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0); 2509 2510 efx_mcdi_execute_quiet(enp, &req); 2511 2512 if (req.emr_rc != 0) { 2513 rc = req.emr_rc; 2514 goto fail1; 2515 } 2516 2517 if (flagsp != NULL) { 2518 if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN) 2519 *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS); 2520 else 2521 *flagsp = 0; 2522 } 2523 2524 return (0); 2525 2526 fail1: 2527 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2528 2529 return (rc); 2530 } 2531 2532 2533 __checkReturn efx_rc_t 2534 efx_mcdi_get_workarounds( 2535 __in efx_nic_t *enp, 2536 __out_opt uint32_t *implementedp, 2537 __out_opt uint32_t *enabledp) 2538 { 2539 efx_mcdi_req_t req; 2540 EFX_MCDI_DECLARE_BUF(payload, 0, MC_CMD_GET_WORKAROUNDS_OUT_LEN); 2541 efx_rc_t rc; 2542 2543 req.emr_cmd = MC_CMD_GET_WORKAROUNDS; 2544 req.emr_in_buf = NULL; 2545 req.emr_in_length = 0; 2546 req.emr_out_buf = payload; 2547 req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN; 2548 2549 efx_mcdi_execute(enp, &req); 2550 2551 if (req.emr_rc != 0) { 2552 rc = req.emr_rc; 2553 goto fail1; 2554 } 2555 2556 if (req.emr_out_length_used < MC_CMD_GET_WORKAROUNDS_OUT_LEN) { 2557 rc = EMSGSIZE; 2558 goto fail2; 2559 } 2560 2561 if (implementedp != NULL) { 2562 *implementedp = 2563 MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED); 2564 } 2565 2566 if (enabledp != NULL) { 2567 *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED); 2568 } 2569 2570 return (0); 2571 2572 fail2: 2573 EFSYS_PROBE(fail2); 2574 fail1: 2575 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2576 2577 return (rc); 2578 } 2579 2580 /* 2581 * Size of media information page in accordance with SFF-8472 and SFF-8436. 2582 * It is used in MCDI interface as well. 2583 */ 2584 #define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80 2585 2586 /* 2587 * Transceiver identifiers from SFF-8024 Table 4-1. 2588 */ 2589 #define EFX_SFF_TRANSCEIVER_ID_SFP 0x03 /* SFP/SFP+/SFP28 */ 2590 #define EFX_SFF_TRANSCEIVER_ID_QSFP 0x0c /* QSFP */ 2591 #define EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS 0x0d /* QSFP+ or later */ 2592 #define EFX_SFF_TRANSCEIVER_ID_QSFP28 0x11 /* QSFP28 or later */ 2593 2594 static __checkReturn efx_rc_t 2595 efx_mcdi_get_phy_media_info( 2596 __in efx_nic_t *enp, 2597 __in uint32_t mcdi_page, 2598 __in uint8_t offset, 2599 __in uint8_t len, 2600 __out_bcount(len) uint8_t *data) 2601 { 2602 efx_mcdi_req_t req; 2603 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN, 2604 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN( 2605 EFX_PHY_MEDIA_INFO_PAGE_SIZE)); 2606 efx_rc_t rc; 2607 2608 EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2609 2610 req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO; 2611 req.emr_in_buf = payload; 2612 req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN; 2613 req.emr_out_buf = payload; 2614 req.emr_out_length = 2615 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2616 2617 MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page); 2618 2619 efx_mcdi_execute(enp, &req); 2620 2621 if (req.emr_rc != 0) { 2622 rc = req.emr_rc; 2623 goto fail1; 2624 } 2625 2626 if (req.emr_out_length_used != 2627 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) { 2628 rc = EMSGSIZE; 2629 goto fail2; 2630 } 2631 2632 if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) != 2633 EFX_PHY_MEDIA_INFO_PAGE_SIZE) { 2634 rc = EIO; 2635 goto fail3; 2636 } 2637 2638 memcpy(data, 2639 MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, 2640 len); 2641 2642 return (0); 2643 2644 fail3: 2645 EFSYS_PROBE(fail3); 2646 fail2: 2647 EFSYS_PROBE(fail2); 2648 fail1: 2649 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2650 2651 return (rc); 2652 } 2653 2654 __checkReturn efx_rc_t 2655 efx_mcdi_phy_module_get_info( 2656 __in efx_nic_t *enp, 2657 __in uint8_t dev_addr, 2658 __in size_t offset, 2659 __in size_t len, 2660 __out_bcount(len) uint8_t *data) 2661 { 2662 efx_port_t *epp = &(enp->en_port); 2663 efx_rc_t rc; 2664 uint32_t mcdi_lower_page; 2665 uint32_t mcdi_upper_page; 2666 uint8_t id; 2667 2668 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); 2669 2670 /* 2671 * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages. 2672 * Offset plus length interface allows to access page 0 only. 2673 * I.e. non-zero upper pages are not accessible. 2674 * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6 2675 * QSFP+ Memory Map for details on how information is structured 2676 * and accessible. 2677 */ 2678 switch (epp->ep_fixed_port_type) { 2679 case EFX_PHY_MEDIA_SFP_PLUS: 2680 case EFX_PHY_MEDIA_QSFP_PLUS: 2681 /* Port type supports modules */ 2682 break; 2683 default: 2684 rc = ENOTSUP; 2685 goto fail1; 2686 } 2687 2688 /* 2689 * For all supported port types, MCDI page 0 offset 0 holds the 2690 * transceiver identifier. Probe to determine the data layout. 2691 * Definitions from SFF-8024 Table 4-1. 2692 */ 2693 rc = efx_mcdi_get_phy_media_info(enp, 2694 0, 0, sizeof(id), &id); 2695 if (rc != 0) 2696 goto fail2; 2697 2698 switch (id) { 2699 case EFX_SFF_TRANSCEIVER_ID_SFP: 2700 /* 2701 * In accordance with SFF-8472 Diagnostic Monitoring 2702 * Interface for Optical Transceivers section 4 Memory 2703 * Organization two 2-wire addresses are defined. 2704 */ 2705 switch (dev_addr) { 2706 /* Base information */ 2707 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE: 2708 /* 2709 * MCDI page 0 should be used to access lower 2710 * page 0 (0x00 - 0x7f) at the device address 0xA0. 2711 */ 2712 mcdi_lower_page = 0; 2713 /* 2714 * MCDI page 1 should be used to access upper 2715 * page 0 (0x80 - 0xff) at the device address 0xA0. 2716 */ 2717 mcdi_upper_page = 1; 2718 break; 2719 /* Diagnostics */ 2720 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM: 2721 /* 2722 * MCDI page 2 should be used to access lower 2723 * page 0 (0x00 - 0x7f) at the device address 0xA2. 2724 */ 2725 mcdi_lower_page = 2; 2726 /* 2727 * MCDI page 3 should be used to access upper 2728 * page 0 (0x80 - 0xff) at the device address 0xA2. 2729 */ 2730 mcdi_upper_page = 3; 2731 break; 2732 default: 2733 rc = ENOTSUP; 2734 goto fail3; 2735 } 2736 break; 2737 case EFX_SFF_TRANSCEIVER_ID_QSFP: 2738 case EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS: 2739 case EFX_SFF_TRANSCEIVER_ID_QSFP28: 2740 switch (dev_addr) { 2741 case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP: 2742 /* 2743 * MCDI page -1 should be used to access lower page 0 2744 * (0x00 - 0x7f). 2745 */ 2746 mcdi_lower_page = (uint32_t)-1; 2747 /* 2748 * MCDI page 0 should be used to access upper page 0 2749 * (0x80h - 0xff). 2750 */ 2751 mcdi_upper_page = 0; 2752 break; 2753 default: 2754 rc = ENOTSUP; 2755 goto fail3; 2756 } 2757 break; 2758 default: 2759 rc = ENOTSUP; 2760 goto fail3; 2761 } 2762 2763 EFX_STATIC_ASSERT(EFX_PHY_MEDIA_INFO_PAGE_SIZE <= 0xFF); 2764 2765 if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) { 2766 size_t read_len = 2767 MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset); 2768 2769 rc = efx_mcdi_get_phy_media_info(enp, 2770 mcdi_lower_page, (uint8_t)offset, (uint8_t)read_len, data); 2771 if (rc != 0) 2772 goto fail4; 2773 2774 data += read_len; 2775 len -= read_len; 2776 2777 offset = 0; 2778 } else { 2779 offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE; 2780 } 2781 2782 if (len > 0) { 2783 EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2784 EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2785 2786 rc = efx_mcdi_get_phy_media_info(enp, 2787 mcdi_upper_page, (uint8_t)offset, (uint8_t)len, data); 2788 if (rc != 0) 2789 goto fail5; 2790 } 2791 2792 return (0); 2793 2794 fail5: 2795 EFSYS_PROBE(fail5); 2796 fail4: 2797 EFSYS_PROBE(fail4); 2798 fail3: 2799 EFSYS_PROBE(fail3); 2800 fail2: 2801 EFSYS_PROBE(fail2); 2802 fail1: 2803 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2804 2805 return (rc); 2806 } 2807 2808 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2809 2810 #define INIT_EVQ_MAXNBUFS MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 2811 2812 #if EFX_OPTS_EF10() 2813 # if (INIT_EVQ_MAXNBUFS < EF10_EVQ_MAXNBUFS) 2814 # error "INIT_EVQ_MAXNBUFS too small" 2815 # endif 2816 #endif /* EFX_OPTS_EF10 */ 2817 #if EFSYS_OPT_RIVERHEAD 2818 # if (INIT_EVQ_MAXNBUFS < RHEAD_EVQ_MAXNBUFS) 2819 # error "INIT_EVQ_MAXNBUFS too small" 2820 # endif 2821 #endif /* EFSYS_OPT_RIVERHEAD */ 2822 2823 __checkReturn efx_rc_t 2824 efx_mcdi_init_evq( 2825 __in efx_nic_t *enp, 2826 __in unsigned int instance, 2827 __in efsys_mem_t *esmp, 2828 __in size_t nevs, 2829 __in uint32_t irq, 2830 __in uint32_t target_evq, 2831 __in uint32_t us, 2832 __in uint32_t flags, 2833 __in boolean_t low_latency) 2834 { 2835 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 2836 efx_mcdi_req_t req; 2837 EFX_MCDI_DECLARE_BUF(payload, 2838 MC_CMD_INIT_EVQ_V2_IN_LEN(INIT_EVQ_MAXNBUFS), 2839 MC_CMD_INIT_EVQ_V2_OUT_LEN); 2840 boolean_t interrupting; 2841 int ev_extended_width; 2842 int ev_cut_through; 2843 int ev_merge; 2844 unsigned int evq_type; 2845 efx_qword_t *dma_addr; 2846 uint64_t addr; 2847 int npages; 2848 int i; 2849 efx_rc_t rc; 2850 2851 npages = efx_evq_nbufs(enp, nevs, flags); 2852 if (npages > INIT_EVQ_MAXNBUFS) { 2853 rc = EINVAL; 2854 goto fail1; 2855 } 2856 2857 req.emr_cmd = MC_CMD_INIT_EVQ; 2858 req.emr_in_buf = payload; 2859 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); 2860 req.emr_out_buf = payload; 2861 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; 2862 2863 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); 2864 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); 2865 2866 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 2867 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 2868 2869 if (interrupting) 2870 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); 2871 else 2872 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TARGET_EVQ, target_evq); 2873 2874 if (encp->enc_init_evq_v2_supported) { 2875 /* 2876 * On Medford the low latency license is required to enable RX 2877 * and event cut through and to disable RX batching. If event 2878 * queue type in flags is auto, we let the firmware decide the 2879 * settings to use. If the adapter has a low latency license, 2880 * it will choose the best settings for low latency, otherwise 2881 * it will choose the best settings for throughput. 2882 */ 2883 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 2884 case EFX_EVQ_FLAGS_TYPE_AUTO: 2885 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; 2886 break; 2887 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 2888 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; 2889 break; 2890 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 2891 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; 2892 break; 2893 default: 2894 rc = EINVAL; 2895 goto fail2; 2896 } 2897 /* EvQ type controls merging, no manual settings */ 2898 ev_merge = 0; 2899 ev_cut_through = 0; 2900 } else { 2901 /* EvQ types other than manual are not supported */ 2902 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL; 2903 /* 2904 * On Huntington RX and TX event batching can only be requested 2905 * together (even if the datapath firmware doesn't actually 2906 * support RX batching). If event cut through is enabled no RX 2907 * batching will occur. 2908 * 2909 * So always enable RX and TX event batching, and enable event 2910 * cut through if we want low latency operation. 2911 */ 2912 ev_merge = 1; 2913 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 2914 case EFX_EVQ_FLAGS_TYPE_AUTO: 2915 ev_cut_through = low_latency ? 1 : 0; 2916 break; 2917 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 2918 ev_cut_through = 0; 2919 break; 2920 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 2921 ev_cut_through = 1; 2922 break; 2923 default: 2924 rc = EINVAL; 2925 goto fail2; 2926 } 2927 } 2928 2929 /* 2930 * On EF100, extended width event queues have a different event 2931 * descriptor layout and are used to support descriptor proxy queues. 2932 */ 2933 ev_extended_width = 0; 2934 #if EFSYS_OPT_EV_EXTENDED_WIDTH 2935 if (encp->enc_init_evq_extended_width_supported) { 2936 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) 2937 ev_extended_width = 1; 2938 } 2939 #endif 2940 2941 MCDI_IN_POPULATE_DWORD_8(req, INIT_EVQ_V2_IN_FLAGS, 2942 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, 2943 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, 2944 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, 2945 INIT_EVQ_V2_IN_FLAG_CUT_THRU, ev_cut_through, 2946 INIT_EVQ_V2_IN_FLAG_RX_MERGE, ev_merge, 2947 INIT_EVQ_V2_IN_FLAG_TX_MERGE, ev_merge, 2948 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type, 2949 INIT_EVQ_V2_IN_FLAG_EXT_WIDTH, ev_extended_width); 2950 2951 /* If the value is zero then disable the timer */ 2952 if (us == 0) { 2953 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 2954 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); 2955 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); 2956 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); 2957 } else { 2958 unsigned int ticks; 2959 2960 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 2961 goto fail3; 2962 2963 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 2964 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); 2965 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); 2966 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); 2967 } 2968 2969 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, 2970 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); 2971 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); 2972 2973 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); 2974 addr = EFSYS_MEM_ADDR(esmp); 2975 2976 for (i = 0; i < npages; i++) { 2977 EFX_POPULATE_QWORD_2(*dma_addr, 2978 EFX_DWORD_1, (uint32_t)(addr >> 32), 2979 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 2980 2981 dma_addr++; 2982 addr += EFX_BUF_SIZE; 2983 } 2984 2985 efx_mcdi_execute(enp, &req); 2986 2987 if (req.emr_rc != 0) { 2988 rc = req.emr_rc; 2989 goto fail4; 2990 } 2991 2992 if (encp->enc_init_evq_v2_supported) { 2993 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { 2994 rc = EMSGSIZE; 2995 goto fail5; 2996 } 2997 EFSYS_PROBE1(mcdi_evq_flags, uint32_t, 2998 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); 2999 } else { 3000 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { 3001 rc = EMSGSIZE; 3002 goto fail6; 3003 } 3004 } 3005 3006 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 3007 3008 return (0); 3009 3010 fail6: 3011 EFSYS_PROBE(fail6); 3012 fail5: 3013 EFSYS_PROBE(fail5); 3014 fail4: 3015 EFSYS_PROBE(fail4); 3016 fail3: 3017 EFSYS_PROBE(fail3); 3018 fail2: 3019 EFSYS_PROBE(fail2); 3020 fail1: 3021 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3022 3023 return (rc); 3024 } 3025 3026 __checkReturn efx_rc_t 3027 efx_mcdi_fini_evq( 3028 __in efx_nic_t *enp, 3029 __in uint32_t instance) 3030 { 3031 efx_mcdi_req_t req; 3032 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN, 3033 MC_CMD_FINI_EVQ_OUT_LEN); 3034 efx_rc_t rc; 3035 3036 req.emr_cmd = MC_CMD_FINI_EVQ; 3037 req.emr_in_buf = payload; 3038 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; 3039 req.emr_out_buf = payload; 3040 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; 3041 3042 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); 3043 3044 efx_mcdi_execute_quiet(enp, &req); 3045 3046 if (req.emr_rc != 0) { 3047 rc = req.emr_rc; 3048 goto fail1; 3049 } 3050 3051 return (0); 3052 3053 fail1: 3054 /* 3055 * EALREADY is not an error, but indicates that the MC has rebooted and 3056 * that the EVQ has already been destroyed. 3057 */ 3058 if (rc != EALREADY) 3059 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3060 3061 return (rc); 3062 } 3063 3064 __checkReturn efx_rc_t 3065 efx_mcdi_init_rxq( 3066 __in efx_nic_t *enp, 3067 __in uint32_t ndescs, 3068 __in efx_evq_t *eep, 3069 __in uint32_t label, 3070 __in uint32_t instance, 3071 __in efsys_mem_t *esmp, 3072 __in const efx_mcdi_init_rxq_params_t *params) 3073 { 3074 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 3075 efx_mcdi_req_t req; 3076 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V5_IN_LEN, 3077 MC_CMD_INIT_RXQ_V5_OUT_LEN); 3078 int npages = efx_rxq_nbufs(enp, ndescs); 3079 int i; 3080 efx_qword_t *dma_addr; 3081 uint64_t addr; 3082 efx_rc_t rc; 3083 uint32_t dma_mode; 3084 boolean_t want_outer_classes; 3085 boolean_t no_cont_ev; 3086 3087 EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs); 3088 3089 if ((esmp == NULL) || 3090 (EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) { 3091 rc = EINVAL; 3092 goto fail1; 3093 } 3094 3095 no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV); 3096 if ((no_cont_ev == B_TRUE) && (params->disable_scatter == B_FALSE)) { 3097 /* TODO: Support scatter in NO_CONT_EV mode */ 3098 rc = EINVAL; 3099 goto fail2; 3100 } 3101 3102 if (params->ps_buf_size > 0) 3103 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; 3104 else if (params->es_bufs_per_desc > 0) 3105 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER; 3106 else 3107 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; 3108 3109 if (encp->enc_tunnel_encapsulations_supported != 0 && 3110 !params->want_inner_classes) { 3111 /* 3112 * WANT_OUTER_CLASSES can only be specified on hardware which 3113 * supports tunnel encapsulation offloads, even though it is 3114 * effectively the behaviour the hardware gives. 3115 * 3116 * Also, on hardware which does support such offloads, older 3117 * firmware rejects the flag if the offloads are not supported 3118 * by the current firmware variant, which means this may fail if 3119 * the capabilities are not updated when the firmware variant 3120 * changes. This is not an issue on newer firmware, as it was 3121 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be 3122 * specified on all firmware variants. 3123 */ 3124 want_outer_classes = B_TRUE; 3125 } else { 3126 want_outer_classes = B_FALSE; 3127 } 3128 3129 req.emr_cmd = MC_CMD_INIT_RXQ; 3130 req.emr_in_buf = payload; 3131 req.emr_in_length = MC_CMD_INIT_RXQ_V5_IN_LEN; 3132 req.emr_out_buf = payload; 3133 req.emr_out_length = MC_CMD_INIT_RXQ_V5_OUT_LEN; 3134 3135 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs); 3136 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index); 3137 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label); 3138 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance); 3139 MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS, 3140 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0, 3141 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0, 3142 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0, 3143 INIT_RXQ_EXT_IN_CRC_MODE, 0, 3144 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1, 3145 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, params->disable_scatter, 3146 INIT_RXQ_EXT_IN_DMA_MODE, 3147 dma_mode, 3148 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, params->ps_buf_size, 3149 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes, 3150 INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev); 3151 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); 3152 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id); 3153 3154 if (params->es_bufs_per_desc > 0) { 3155 MCDI_IN_SET_DWORD(req, 3156 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET, 3157 params->es_bufs_per_desc); 3158 MCDI_IN_SET_DWORD(req, 3159 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, params->es_max_dma_len); 3160 MCDI_IN_SET_DWORD(req, 3161 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, params->es_buf_stride); 3162 MCDI_IN_SET_DWORD(req, 3163 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT, 3164 params->hol_block_timeout); 3165 } 3166 3167 if (encp->enc_init_rxq_with_buffer_size) 3168 MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, 3169 params->buf_size); 3170 3171 MCDI_IN_SET_DWORD(req, INIT_RXQ_V5_IN_RX_PREFIX_ID, params->prefix_id); 3172 3173 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); 3174 addr = EFSYS_MEM_ADDR(esmp); 3175 3176 for (i = 0; i < npages; i++) { 3177 EFX_POPULATE_QWORD_2(*dma_addr, 3178 EFX_DWORD_1, (uint32_t)(addr >> 32), 3179 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 3180 3181 dma_addr++; 3182 addr += EFX_BUF_SIZE; 3183 } 3184 3185 efx_mcdi_execute(enp, &req); 3186 3187 if (req.emr_rc != 0) { 3188 rc = req.emr_rc; 3189 goto fail3; 3190 } 3191 3192 return (0); 3193 3194 fail3: 3195 EFSYS_PROBE(fail3); 3196 fail2: 3197 EFSYS_PROBE(fail2); 3198 fail1: 3199 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3200 3201 return (rc); 3202 } 3203 3204 __checkReturn efx_rc_t 3205 efx_mcdi_fini_rxq( 3206 __in efx_nic_t *enp, 3207 __in uint32_t instance) 3208 { 3209 efx_mcdi_req_t req; 3210 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN, 3211 MC_CMD_FINI_RXQ_OUT_LEN); 3212 efx_rc_t rc; 3213 3214 req.emr_cmd = MC_CMD_FINI_RXQ; 3215 req.emr_in_buf = payload; 3216 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; 3217 req.emr_out_buf = payload; 3218 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN; 3219 3220 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance); 3221 3222 efx_mcdi_execute_quiet(enp, &req); 3223 3224 if (req.emr_rc != 0) { 3225 rc = req.emr_rc; 3226 goto fail1; 3227 } 3228 3229 return (0); 3230 3231 fail1: 3232 /* 3233 * EALREADY is not an error, but indicates that the MC has rebooted and 3234 * that the RXQ has already been destroyed. 3235 */ 3236 if (rc != EALREADY) 3237 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3238 3239 return (rc); 3240 } 3241 3242 __checkReturn efx_rc_t 3243 efx_mcdi_init_txq( 3244 __in efx_nic_t *enp, 3245 __in uint32_t ndescs, 3246 __in uint32_t target_evq, 3247 __in uint32_t label, 3248 __in uint32_t instance, 3249 __in uint16_t flags, 3250 __in efsys_mem_t *esmp) 3251 { 3252 efx_mcdi_req_t req; 3253 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_EXT_IN_LEN, 3254 MC_CMD_INIT_TXQ_OUT_LEN); 3255 efx_qword_t *dma_addr; 3256 uint64_t addr; 3257 int npages; 3258 int i; 3259 efx_rc_t rc; 3260 3261 EFSYS_ASSERT(MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM >= 3262 efx_txq_nbufs(enp, enp->en_nic_cfg.enc_txq_max_ndescs)); 3263 3264 if ((esmp == NULL) || 3265 (EFSYS_MEM_SIZE(esmp) < efx_txq_size(enp, ndescs))) { 3266 rc = EINVAL; 3267 goto fail1; 3268 } 3269 3270 npages = efx_txq_nbufs(enp, ndescs); 3271 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { 3272 rc = EINVAL; 3273 goto fail2; 3274 } 3275 3276 req.emr_cmd = MC_CMD_INIT_TXQ; 3277 req.emr_in_buf = payload; 3278 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); 3279 req.emr_out_buf = payload; 3280 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; 3281 3282 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); 3283 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); 3284 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); 3285 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); 3286 3287 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, 3288 INIT_TXQ_IN_FLAG_BUFF_MODE, 0, 3289 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, 3290 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, 3291 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, 3292 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, 3293 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, 3294 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, 3295 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, 3296 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, 3297 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, 3298 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, 3299 INIT_TXQ_IN_CRC_MODE, 0, 3300 INIT_TXQ_IN_FLAG_TIMESTAMP, 0); 3301 3302 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); 3303 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, enp->en_vport_id); 3304 3305 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); 3306 addr = EFSYS_MEM_ADDR(esmp); 3307 3308 for (i = 0; i < npages; i++) { 3309 EFX_POPULATE_QWORD_2(*dma_addr, 3310 EFX_DWORD_1, (uint32_t)(addr >> 32), 3311 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 3312 3313 dma_addr++; 3314 addr += EFX_BUF_SIZE; 3315 } 3316 3317 efx_mcdi_execute(enp, &req); 3318 3319 if (req.emr_rc != 0) { 3320 rc = req.emr_rc; 3321 goto fail3; 3322 } 3323 3324 return (0); 3325 3326 fail3: 3327 EFSYS_PROBE(fail3); 3328 fail2: 3329 EFSYS_PROBE(fail2); 3330 fail1: 3331 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3332 3333 return (rc); 3334 } 3335 3336 __checkReturn efx_rc_t 3337 efx_mcdi_fini_txq( 3338 __in efx_nic_t *enp, 3339 __in uint32_t instance) 3340 { 3341 efx_mcdi_req_t req; 3342 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN, 3343 MC_CMD_FINI_TXQ_OUT_LEN); 3344 efx_rc_t rc; 3345 3346 req.emr_cmd = MC_CMD_FINI_TXQ; 3347 req.emr_in_buf = payload; 3348 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; 3349 req.emr_out_buf = payload; 3350 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; 3351 3352 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); 3353 3354 efx_mcdi_execute_quiet(enp, &req); 3355 3356 if (req.emr_rc != 0) { 3357 rc = req.emr_rc; 3358 goto fail1; 3359 } 3360 3361 return (0); 3362 3363 fail1: 3364 /* 3365 * EALREADY is not an error, but indicates that the MC has rebooted and 3366 * that the TXQ has already been destroyed. 3367 */ 3368 if (rc != EALREADY) 3369 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3370 3371 return (rc); 3372 } 3373 3374 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 3375 3376 __checkReturn efx_rc_t 3377 efx_mcdi_get_nic_addr_info( 3378 __in efx_nic_t *enp, 3379 __out uint32_t *mapping_typep) 3380 { 3381 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_DESC_ADDR_INFO_IN_LEN, 3382 MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN); 3383 efx_mcdi_req_t req; 3384 efx_rc_t rc; 3385 3386 req.emr_cmd = MC_CMD_GET_DESC_ADDR_INFO; 3387 req.emr_in_buf = payload; 3388 req.emr_in_length = MC_CMD_GET_DESC_ADDR_INFO_IN_LEN; 3389 req.emr_out_buf = payload; 3390 req.emr_out_length = MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN; 3391 3392 efx_mcdi_execute_quiet(enp, &req); 3393 3394 if (req.emr_rc != 0) { 3395 rc = req.emr_rc; 3396 goto fail1; 3397 } 3398 3399 if (req.emr_out_length_used < MC_CMD_GET_DESC_ADDR_INFO_OUT_LEN) { 3400 rc = EMSGSIZE; 3401 goto fail2; 3402 } 3403 3404 *mapping_typep = 3405 MCDI_OUT_DWORD(req, GET_DESC_ADDR_INFO_OUT_MAPPING_TYPE); 3406 3407 return (0); 3408 3409 fail2: 3410 EFSYS_PROBE(fail2); 3411 fail1: 3412 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3413 3414 return (rc); 3415 } 3416 3417 __checkReturn efx_rc_t 3418 efx_mcdi_get_nic_addr_regions( 3419 __in efx_nic_t *enp, 3420 __out efx_nic_dma_region_info_t *endrip) 3421 { 3422 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN, 3423 MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2); 3424 efx_xword_t *regions; 3425 efx_mcdi_req_t req; 3426 efx_rc_t rc; 3427 size_t alloc_size; 3428 unsigned int nregions; 3429 unsigned int i; 3430 3431 req.emr_cmd = MC_CMD_GET_DESC_ADDR_REGIONS; 3432 req.emr_in_buf = payload; 3433 req.emr_in_length = MC_CMD_GET_DESC_ADDR_REGIONS_IN_LEN; 3434 req.emr_out_buf = payload; 3435 req.emr_out_length = MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMAX_MCDI2; 3436 3437 efx_mcdi_execute_quiet(enp, &req); 3438 3439 if (req.emr_rc != 0) { 3440 rc = req.emr_rc; 3441 goto fail1; 3442 } 3443 3444 if (req.emr_out_length_used < 3445 MC_CMD_GET_DESC_ADDR_REGIONS_OUT_LENMIN) { 3446 rc = EMSGSIZE; 3447 goto fail2; 3448 } 3449 3450 nregions = MC_CMD_GET_DESC_ADDR_REGIONS_OUT_REGIONS_NUM( 3451 req.emr_out_length_used); 3452 3453 EFX_STATIC_ASSERT(sizeof (*regions) == DESC_ADDR_REGION_LEN); 3454 regions = MCDI_OUT2(req, efx_xword_t, 3455 GET_DESC_ADDR_REGIONS_OUT_REGIONS); 3456 3457 alloc_size = nregions * sizeof(endrip->endri_regions[0]); 3458 if (alloc_size / sizeof (endrip->endri_regions[0]) != nregions) { 3459 rc = ENOMEM; 3460 goto fail3; 3461 } 3462 3463 EFSYS_KMEM_ALLOC(enp->en_esip, 3464 alloc_size, 3465 endrip->endri_regions); 3466 if (endrip->endri_regions == NULL) { 3467 rc = ENOMEM; 3468 goto fail4; 3469 } 3470 3471 endrip->endri_count = nregions; 3472 for (i = 0; i < nregions; ++i) { 3473 efx_nic_dma_region_t *region_info; 3474 3475 region_info = &endrip->endri_regions[i]; 3476 3477 region_info->endr_inuse = B_FALSE; 3478 3479 region_info->endr_nic_base = 3480 MCDI_OUT_INDEXED_MEMBER_QWORD(req, 3481 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i, 3482 DESC_ADDR_REGION_DESC_ADDR_BASE); 3483 3484 region_info->endr_trgt_base = 3485 MCDI_OUT_INDEXED_MEMBER_QWORD(req, 3486 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i, 3487 DESC_ADDR_REGION_TRGT_ADDR_BASE); 3488 3489 region_info->endr_window_log2 = 3490 MCDI_OUT_INDEXED_MEMBER_DWORD(req, 3491 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i, 3492 DESC_ADDR_REGION_WINDOW_SIZE_LOG2); 3493 3494 region_info->endr_align_log2 = 3495 MCDI_OUT_INDEXED_MEMBER_DWORD(req, 3496 GET_DESC_ADDR_REGIONS_OUT_REGIONS, i, 3497 DESC_ADDR_REGION_TRGT_ADDR_ALIGN_LOG2); 3498 } 3499 3500 return (0); 3501 3502 fail4: 3503 EFSYS_PROBE(fail4); 3504 fail3: 3505 EFSYS_PROBE(fail3); 3506 fail2: 3507 EFSYS_PROBE(fail2); 3508 fail1: 3509 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3510 3511 return (rc); 3512 } 3513 3514 __checkReturn efx_rc_t 3515 efx_mcdi_set_nic_addr_regions( 3516 __in efx_nic_t *enp, 3517 __in const efx_nic_dma_region_info_t *endrip) 3518 { 3519 EFX_MCDI_DECLARE_BUF(payload, 3520 MC_CMD_SET_DESC_ADDR_REGIONS_IN_LENMAX_MCDI2, 3521 MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN); 3522 efx_qword_t *trgt_addr_base; 3523 efx_mcdi_req_t req; 3524 unsigned int i; 3525 efx_rc_t rc; 3526 3527 if (endrip->endri_count > 3528 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM) { 3529 rc = EINVAL; 3530 goto fail1; 3531 } 3532 3533 req.emr_cmd = MC_CMD_SET_DESC_ADDR_REGIONS; 3534 req.emr_in_buf = payload; 3535 req.emr_in_length = 3536 MC_CMD_SET_DESC_ADDR_REGIONS_IN_LEN(endrip->endri_count); 3537 req.emr_out_buf = payload; 3538 req.emr_out_length = MC_CMD_SET_DESC_ADDR_REGIONS_OUT_LEN; 3539 3540 EFX_STATIC_ASSERT(sizeof (*trgt_addr_base) == 3541 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_LEN); 3542 trgt_addr_base = MCDI_OUT2(req, efx_qword_t, 3543 SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE); 3544 3545 for (i = 0; i < endrip->endri_count; ++i) { 3546 const efx_nic_dma_region_t *region_info; 3547 3548 region_info = &endrip->endri_regions[i]; 3549 3550 if (region_info->endr_inuse != B_TRUE) 3551 continue; 3552 3553 EFX_STATIC_ASSERT(sizeof (1U) * 8 >= 3554 MC_CMD_SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE_MAXNUM); 3555 MCDI_IN_SET_DWORD(req, 3556 SET_DESC_ADDR_REGIONS_IN_SET_REGION_MASK, 1U << i); 3557 3558 MCDI_IN_SET_INDEXED_QWORD(req, 3559 SET_DESC_ADDR_REGIONS_IN_TRGT_ADDR_BASE, i, 3560 region_info->endr_trgt_base); 3561 } 3562 3563 efx_mcdi_execute_quiet(enp, &req); 3564 3565 if (req.emr_rc != 0) { 3566 rc = req.emr_rc; 3567 goto fail2; 3568 } 3569 3570 return (0); 3571 3572 fail2: 3573 EFSYS_PROBE(fail2); 3574 fail1: 3575 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3576 3577 return (rc); 3578 } 3579 3580 #endif /* EFSYS_OPT_MCDI */ 3581