1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2008-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 10 #if EFSYS_OPT_MCDI 11 12 /* 13 * There are three versions of the MCDI interface: 14 * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers. 15 * - MCDIv1: Siena firmware and Huntington BootROM. 16 * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM. 17 * Transport uses MCDIv2 headers. 18 * 19 * MCDIv2 Header NOT_EPOCH flag 20 * ---------------------------- 21 * A new epoch begins at initial startup or after an MC reboot, and defines when 22 * the MC should reject stale MCDI requests. 23 * 24 * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all 25 * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1. 26 * 27 * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a 28 * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0. 29 */ 30 31 32 33 #if EFSYS_OPT_SIENA 34 35 static const efx_mcdi_ops_t __efx_mcdi_siena_ops = { 36 siena_mcdi_init, /* emco_init */ 37 siena_mcdi_send_request, /* emco_send_request */ 38 siena_mcdi_poll_reboot, /* emco_poll_reboot */ 39 siena_mcdi_poll_response, /* emco_poll_response */ 40 siena_mcdi_read_response, /* emco_read_response */ 41 siena_mcdi_fini, /* emco_fini */ 42 siena_mcdi_feature_supported, /* emco_feature_supported */ 43 siena_mcdi_get_timeout, /* emco_get_timeout */ 44 }; 45 46 #endif /* EFSYS_OPT_SIENA */ 47 48 #if EFX_OPTS_EF10() 49 50 static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { 51 ef10_mcdi_init, /* emco_init */ 52 ef10_mcdi_send_request, /* emco_send_request */ 53 ef10_mcdi_poll_reboot, /* emco_poll_reboot */ 54 ef10_mcdi_poll_response, /* emco_poll_response */ 55 ef10_mcdi_read_response, /* emco_read_response */ 56 ef10_mcdi_fini, /* emco_fini */ 57 ef10_mcdi_feature_supported, /* emco_feature_supported */ 58 ef10_mcdi_get_timeout, /* emco_get_timeout */ 59 }; 60 61 #endif /* EFX_OPTS_EF10() */ 62 63 #if EFSYS_OPT_RIVERHEAD 64 65 static const efx_mcdi_ops_t __efx_mcdi_rhead_ops = { 66 ef10_mcdi_init, /* emco_init */ 67 ef10_mcdi_send_request, /* emco_send_request */ 68 ef10_mcdi_poll_reboot, /* emco_poll_reboot */ 69 ef10_mcdi_poll_response, /* emco_poll_response */ 70 ef10_mcdi_read_response, /* emco_read_response */ 71 ef10_mcdi_fini, /* emco_fini */ 72 ef10_mcdi_feature_supported, /* emco_feature_supported */ 73 ef10_mcdi_get_timeout, /* emco_get_timeout */ 74 }; 75 76 #endif /* EFSYS_OPT_RIVERHEAD */ 77 78 79 80 __checkReturn efx_rc_t 81 efx_mcdi_init( 82 __in efx_nic_t *enp, 83 __in const efx_mcdi_transport_t *emtp) 84 { 85 const efx_mcdi_ops_t *emcop; 86 efx_rc_t rc; 87 88 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 89 EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); 90 91 switch (enp->en_family) { 92 #if EFSYS_OPT_SIENA 93 case EFX_FAMILY_SIENA: 94 emcop = &__efx_mcdi_siena_ops; 95 break; 96 #endif /* EFSYS_OPT_SIENA */ 97 98 #if EFSYS_OPT_HUNTINGTON 99 case EFX_FAMILY_HUNTINGTON: 100 emcop = &__efx_mcdi_ef10_ops; 101 break; 102 #endif /* EFSYS_OPT_HUNTINGTON */ 103 104 #if EFSYS_OPT_MEDFORD 105 case EFX_FAMILY_MEDFORD: 106 emcop = &__efx_mcdi_ef10_ops; 107 break; 108 #endif /* EFSYS_OPT_MEDFORD */ 109 110 #if EFSYS_OPT_MEDFORD2 111 case EFX_FAMILY_MEDFORD2: 112 emcop = &__efx_mcdi_ef10_ops; 113 break; 114 #endif /* EFSYS_OPT_MEDFORD2 */ 115 116 #if EFSYS_OPT_RIVERHEAD 117 case EFX_FAMILY_RIVERHEAD: 118 emcop = &__efx_mcdi_rhead_ops; 119 break; 120 #endif /* EFSYS_OPT_RIVERHEAD */ 121 122 default: 123 EFSYS_ASSERT(0); 124 rc = ENOTSUP; 125 goto fail1; 126 } 127 128 if (enp->en_features & EFX_FEATURE_MCDI_DMA) { 129 /* MCDI requires a DMA buffer in host memory */ 130 if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) { 131 rc = EINVAL; 132 goto fail2; 133 } 134 } 135 enp->en_mcdi.em_emtp = emtp; 136 137 if (emcop != NULL && emcop->emco_init != NULL) { 138 if ((rc = emcop->emco_init(enp, emtp)) != 0) 139 goto fail3; 140 } 141 142 enp->en_mcdi.em_emcop = emcop; 143 enp->en_mod_flags |= EFX_MOD_MCDI; 144 145 return (0); 146 147 fail3: 148 EFSYS_PROBE(fail3); 149 fail2: 150 EFSYS_PROBE(fail2); 151 fail1: 152 EFSYS_PROBE1(fail1, efx_rc_t, rc); 153 154 enp->en_mcdi.em_emcop = NULL; 155 enp->en_mcdi.em_emtp = NULL; 156 enp->en_mod_flags &= ~EFX_MOD_MCDI; 157 158 return (rc); 159 } 160 161 void 162 efx_mcdi_fini( 163 __in efx_nic_t *enp) 164 { 165 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 166 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 167 168 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 169 EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI); 170 171 if (emcop != NULL && emcop->emco_fini != NULL) 172 emcop->emco_fini(enp); 173 174 emip->emi_port = 0; 175 emip->emi_aborted = 0; 176 177 enp->en_mcdi.em_emcop = NULL; 178 enp->en_mod_flags &= ~EFX_MOD_MCDI; 179 } 180 181 void 182 efx_mcdi_new_epoch( 183 __in efx_nic_t *enp) 184 { 185 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 186 efsys_lock_state_t state; 187 188 /* Start a new epoch (allow fresh MCDI requests to succeed) */ 189 EFSYS_LOCK(enp->en_eslp, state); 190 emip->emi_new_epoch = B_TRUE; 191 EFSYS_UNLOCK(enp->en_eslp, state); 192 } 193 194 static void 195 efx_mcdi_send_request( 196 __in efx_nic_t *enp, 197 __in void *hdrp, 198 __in size_t hdr_len, 199 __in void *sdup, 200 __in size_t sdu_len) 201 { 202 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 203 204 emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len); 205 } 206 207 static efx_rc_t 208 efx_mcdi_poll_reboot( 209 __in efx_nic_t *enp) 210 { 211 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 212 efx_rc_t rc; 213 214 rc = emcop->emco_poll_reboot(enp); 215 return (rc); 216 } 217 218 static boolean_t 219 efx_mcdi_poll_response( 220 __in efx_nic_t *enp) 221 { 222 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 223 boolean_t available; 224 225 available = emcop->emco_poll_response(enp); 226 return (available); 227 } 228 229 static void 230 efx_mcdi_read_response( 231 __in efx_nic_t *enp, 232 __out void *bufferp, 233 __in size_t offset, 234 __in size_t length) 235 { 236 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 237 238 emcop->emco_read_response(enp, bufferp, offset, length); 239 } 240 241 void 242 efx_mcdi_request_start( 243 __in efx_nic_t *enp, 244 __in efx_mcdi_req_t *emrp, 245 __in boolean_t ev_cpl) 246 { 247 #if EFSYS_OPT_MCDI_LOGGING 248 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 249 #endif 250 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 251 efx_dword_t hdr[2]; 252 size_t hdr_len; 253 unsigned int max_version; 254 unsigned int seq; 255 unsigned int xflags; 256 boolean_t new_epoch; 257 efsys_lock_state_t state; 258 259 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 260 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 261 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 262 263 /* 264 * efx_mcdi_request_start() is naturally serialised against both 265 * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(), 266 * by virtue of there only being one outstanding MCDI request. 267 * Unfortunately, upper layers may also call efx_mcdi_request_abort() 268 * at any time, to timeout a pending mcdi request, That request may 269 * then subsequently complete, meaning efx_mcdi_ev_cpl() or 270 * efx_mcdi_ev_death() may end up running in parallel with 271 * efx_mcdi_request_start(). This race is handled by ensuring that 272 * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the 273 * en_eslp lock. 274 */ 275 EFSYS_LOCK(enp->en_eslp, state); 276 EFSYS_ASSERT(emip->emi_pending_req == NULL); 277 emip->emi_pending_req = emrp; 278 emip->emi_ev_cpl = ev_cpl; 279 emip->emi_poll_cnt = 0; 280 seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ); 281 new_epoch = emip->emi_new_epoch; 282 max_version = emip->emi_max_version; 283 EFSYS_UNLOCK(enp->en_eslp, state); 284 285 xflags = 0; 286 if (ev_cpl) 287 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 288 289 /* 290 * Huntington firmware supports MCDIv2, but the Huntington BootROM only 291 * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where 292 * possible to support this. 293 */ 294 if ((max_version >= 2) && 295 ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) || 296 (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) || 297 (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) { 298 /* Construct MCDI v2 header */ 299 hdr_len = sizeof (hdr); 300 EFX_POPULATE_DWORD_8(hdr[0], 301 MCDI_HEADER_CODE, MC_CMD_V2_EXTN, 302 MCDI_HEADER_RESYNC, 1, 303 MCDI_HEADER_DATALEN, 0, 304 MCDI_HEADER_SEQ, seq, 305 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, 306 MCDI_HEADER_ERROR, 0, 307 MCDI_HEADER_RESPONSE, 0, 308 MCDI_HEADER_XFLAGS, xflags); 309 310 EFX_POPULATE_DWORD_2(hdr[1], 311 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd, 312 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length); 313 } else { 314 /* Construct MCDI v1 header */ 315 hdr_len = sizeof (hdr[0]); 316 EFX_POPULATE_DWORD_8(hdr[0], 317 MCDI_HEADER_CODE, emrp->emr_cmd, 318 MCDI_HEADER_RESYNC, 1, 319 MCDI_HEADER_DATALEN, emrp->emr_in_length, 320 MCDI_HEADER_SEQ, seq, 321 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, 322 MCDI_HEADER_ERROR, 0, 323 MCDI_HEADER_RESPONSE, 0, 324 MCDI_HEADER_XFLAGS, xflags); 325 } 326 327 #if EFSYS_OPT_MCDI_LOGGING 328 if (emtp->emt_logger != NULL) { 329 emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST, 330 &hdr[0], hdr_len, 331 emrp->emr_in_buf, emrp->emr_in_length); 332 } 333 #endif /* EFSYS_OPT_MCDI_LOGGING */ 334 335 efx_mcdi_send_request(enp, &hdr[0], hdr_len, 336 emrp->emr_in_buf, emrp->emr_in_length); 337 } 338 339 340 static void 341 efx_mcdi_read_response_header( 342 __in efx_nic_t *enp, 343 __inout efx_mcdi_req_t *emrp) 344 { 345 #if EFSYS_OPT_MCDI_LOGGING 346 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 347 #endif /* EFSYS_OPT_MCDI_LOGGING */ 348 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 349 efx_dword_t hdr[2]; 350 unsigned int hdr_len; 351 unsigned int data_len; 352 unsigned int seq; 353 unsigned int cmd; 354 unsigned int error; 355 efx_rc_t rc; 356 357 EFSYS_ASSERT(emrp != NULL); 358 359 efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0])); 360 hdr_len = sizeof (hdr[0]); 361 362 cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE); 363 seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ); 364 error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR); 365 366 if (cmd != MC_CMD_V2_EXTN) { 367 data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN); 368 } else { 369 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); 370 hdr_len += sizeof (hdr[1]); 371 372 cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); 373 data_len = 374 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 375 } 376 377 if (error && (data_len == 0)) { 378 /* The MC has rebooted since the request was sent. */ 379 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); 380 efx_mcdi_poll_reboot(enp); 381 rc = EIO; 382 goto fail1; 383 } 384 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 385 if (((cmd != emrp->emr_cmd) && (emrp->emr_cmd != MC_CMD_PROXY_CMD)) || 386 #else 387 if ((cmd != emrp->emr_cmd) || 388 #endif 389 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { 390 /* Response is for a different request */ 391 rc = EIO; 392 goto fail2; 393 } 394 if (error) { 395 efx_dword_t err[2]; 396 unsigned int err_len = MIN(data_len, sizeof (err)); 397 int err_code = MC_CMD_ERR_EPROTO; 398 int err_arg = 0; 399 400 /* Read error code (and arg num for MCDI v2 commands) */ 401 efx_mcdi_read_response(enp, &err, hdr_len, err_len); 402 403 if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t))) 404 err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0); 405 #ifdef WITH_MCDI_V2 406 if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t))) 407 err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0); 408 #endif 409 emrp->emr_err_code = err_code; 410 emrp->emr_err_arg = err_arg; 411 412 #if EFSYS_OPT_MCDI_PROXY_AUTH 413 if ((err_code == MC_CMD_ERR_PROXY_PENDING) && 414 (err_len == sizeof (err))) { 415 /* 416 * The MCDI request would normally fail with EPERM, but 417 * firmware has forwarded it to an authorization agent 418 * attached to a privileged PF. 419 * 420 * Save the authorization request handle. The client 421 * must wait for a PROXY_RESPONSE event, or timeout. 422 */ 423 emrp->emr_proxy_handle = err_arg; 424 } 425 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 426 427 #if EFSYS_OPT_MCDI_LOGGING 428 if (emtp->emt_logger != NULL) { 429 emtp->emt_logger(emtp->emt_context, 430 EFX_LOG_MCDI_RESPONSE, 431 &hdr[0], hdr_len, 432 &err[0], err_len); 433 } 434 #endif /* EFSYS_OPT_MCDI_LOGGING */ 435 436 if (!emrp->emr_quiet) { 437 EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd, 438 int, err_code, int, err_arg); 439 } 440 441 rc = efx_mcdi_request_errcode(err_code); 442 goto fail3; 443 } 444 445 emrp->emr_rc = 0; 446 emrp->emr_out_length_used = data_len; 447 #if EFSYS_OPT_MCDI_PROXY_AUTH 448 emrp->emr_proxy_handle = 0; 449 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 450 return; 451 452 fail3: 453 fail2: 454 fail1: 455 emrp->emr_rc = rc; 456 emrp->emr_out_length_used = 0; 457 } 458 459 static void 460 efx_mcdi_finish_response( 461 __in efx_nic_t *enp, 462 __in efx_mcdi_req_t *emrp) 463 { 464 #if EFSYS_OPT_MCDI_LOGGING 465 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 466 #endif /* EFSYS_OPT_MCDI_LOGGING */ 467 efx_dword_t hdr[2]; 468 unsigned int hdr_len; 469 size_t bytes; 470 unsigned int resp_off; 471 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 472 unsigned int resp_cmd; 473 boolean_t proxied_cmd_resp = B_FALSE; 474 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 475 476 if (emrp->emr_out_buf == NULL) 477 return; 478 479 /* Read the command header to detect MCDI response format */ 480 hdr_len = sizeof (hdr[0]); 481 efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len); 482 if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) { 483 /* 484 * Read the actual payload length. The length given in the event 485 * is only correct for responses with the V1 format. 486 */ 487 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); 488 hdr_len += sizeof (hdr[1]); 489 resp_off = hdr_len; 490 491 emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1], 492 MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 493 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 494 /* 495 * A proxy MCDI command is executed by PF on behalf of 496 * one of its VFs. The command to be proxied follows 497 * immediately afterward in the host buffer. 498 * PROXY_CMD inner call complete response should be copied to 499 * output buffer so that it can be returned to the requesting 500 * function in MC_CMD_PROXY_COMPLETE payload. 501 */ 502 resp_cmd = 503 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); 504 proxied_cmd_resp = ((emrp->emr_cmd == MC_CMD_PROXY_CMD) && 505 (resp_cmd != MC_CMD_PROXY_CMD)); 506 if (proxied_cmd_resp) { 507 resp_off = 0; 508 emrp->emr_out_length_used += hdr_len; 509 } 510 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 511 } else { 512 resp_off = hdr_len; 513 } 514 515 /* Copy payload out into caller supplied buffer */ 516 bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length); 517 efx_mcdi_read_response(enp, emrp->emr_out_buf, resp_off, bytes); 518 519 /* Report bytes copied to caller (response message may be larger) */ 520 emrp->emr_out_length_used = bytes; 521 522 #if EFSYS_OPT_MCDI_LOGGING 523 if (emtp->emt_logger != NULL) { 524 emtp->emt_logger(emtp->emt_context, 525 EFX_LOG_MCDI_RESPONSE, 526 &hdr[0], hdr_len, 527 emrp->emr_out_buf, bytes); 528 } 529 #endif /* EFSYS_OPT_MCDI_LOGGING */ 530 } 531 532 533 __checkReturn boolean_t 534 efx_mcdi_request_poll( 535 __in efx_nic_t *enp) 536 { 537 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 538 efx_mcdi_req_t *emrp; 539 efsys_lock_state_t state; 540 efx_rc_t rc; 541 542 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 543 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 544 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 545 546 /* Serialise against post-watchdog efx_mcdi_ev* */ 547 EFSYS_LOCK(enp->en_eslp, state); 548 549 EFSYS_ASSERT(emip->emi_pending_req != NULL); 550 EFSYS_ASSERT(!emip->emi_ev_cpl); 551 emrp = emip->emi_pending_req; 552 553 /* Check if hardware is unavailable */ 554 if (efx_nic_hw_unavailable(enp)) { 555 EFSYS_UNLOCK(enp->en_eslp, state); 556 return (B_FALSE); 557 } 558 559 /* Check for reboot atomically w.r.t efx_mcdi_request_start */ 560 if (emip->emi_poll_cnt++ == 0) { 561 if ((rc = efx_mcdi_poll_reboot(enp)) != 0) { 562 emip->emi_pending_req = NULL; 563 EFSYS_UNLOCK(enp->en_eslp, state); 564 565 /* Reboot/Assertion */ 566 if (rc == EIO || rc == EINTR) 567 efx_mcdi_raise_exception(enp, emrp, rc); 568 569 goto fail1; 570 } 571 } 572 573 /* Check if a response is available */ 574 if (efx_mcdi_poll_response(enp) == B_FALSE) { 575 EFSYS_UNLOCK(enp->en_eslp, state); 576 return (B_FALSE); 577 } 578 579 /* Read the response header */ 580 efx_mcdi_read_response_header(enp, emrp); 581 582 /* Request complete */ 583 emip->emi_pending_req = NULL; 584 585 /* Ensure stale MCDI requests fail after an MC reboot. */ 586 emip->emi_new_epoch = B_FALSE; 587 588 EFSYS_UNLOCK(enp->en_eslp, state); 589 590 if ((rc = emrp->emr_rc) != 0) 591 goto fail2; 592 593 efx_mcdi_finish_response(enp, emrp); 594 return (B_TRUE); 595 596 fail2: 597 if (!emrp->emr_quiet) 598 EFSYS_PROBE(fail2); 599 fail1: 600 if (!emrp->emr_quiet) 601 EFSYS_PROBE1(fail1, efx_rc_t, rc); 602 603 return (B_TRUE); 604 } 605 606 __checkReturn boolean_t 607 efx_mcdi_request_abort( 608 __in efx_nic_t *enp) 609 { 610 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 611 efx_mcdi_req_t *emrp; 612 boolean_t aborted; 613 efsys_lock_state_t state; 614 615 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 616 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 617 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 618 619 /* 620 * efx_mcdi_ev_* may have already completed this event, and be 621 * spinning/blocked on the upper layer lock. So it *is* legitimate 622 * to for emi_pending_req to be NULL. If there is a pending event 623 * completed request, then provide a "credit" to allow 624 * efx_mcdi_ev_cpl() to accept a single spurious completion. 625 */ 626 EFSYS_LOCK(enp->en_eslp, state); 627 emrp = emip->emi_pending_req; 628 aborted = (emrp != NULL); 629 if (aborted) { 630 emip->emi_pending_req = NULL; 631 632 /* Error the request */ 633 emrp->emr_out_length_used = 0; 634 emrp->emr_rc = ETIMEDOUT; 635 636 /* Provide a credit for seqno/emr_pending_req mismatches */ 637 if (emip->emi_ev_cpl) 638 ++emip->emi_aborted; 639 640 /* 641 * The upper layer has called us, so we don't 642 * need to complete the request. 643 */ 644 } 645 EFSYS_UNLOCK(enp->en_eslp, state); 646 647 return (aborted); 648 } 649 650 void 651 efx_mcdi_get_timeout( 652 __in efx_nic_t *enp, 653 __in efx_mcdi_req_t *emrp, 654 __out uint32_t *timeoutp) 655 { 656 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 657 658 emcop->emco_get_timeout(enp, emrp, timeoutp); 659 } 660 661 __checkReturn efx_rc_t 662 efx_mcdi_request_errcode( 663 __in unsigned int err) 664 { 665 666 switch (err) { 667 /* MCDI v1 */ 668 case MC_CMD_ERR_EPERM: 669 return (EACCES); 670 case MC_CMD_ERR_ENOENT: 671 return (ENOENT); 672 case MC_CMD_ERR_EINTR: 673 return (EINTR); 674 case MC_CMD_ERR_EACCES: 675 return (EACCES); 676 case MC_CMD_ERR_EBUSY: 677 return (EBUSY); 678 case MC_CMD_ERR_EINVAL: 679 return (EINVAL); 680 case MC_CMD_ERR_EDEADLK: 681 return (EDEADLK); 682 case MC_CMD_ERR_ENOSYS: 683 return (ENOTSUP); 684 case MC_CMD_ERR_ETIME: 685 return (ETIMEDOUT); 686 case MC_CMD_ERR_ENOTSUP: 687 return (ENOTSUP); 688 case MC_CMD_ERR_EALREADY: 689 return (EALREADY); 690 691 /* MCDI v2 */ 692 case MC_CMD_ERR_EEXIST: 693 return (EEXIST); 694 #ifdef MC_CMD_ERR_EAGAIN 695 case MC_CMD_ERR_EAGAIN: 696 return (EAGAIN); 697 #endif 698 #ifdef MC_CMD_ERR_ENOSPC 699 case MC_CMD_ERR_ENOSPC: 700 return (ENOSPC); 701 #endif 702 case MC_CMD_ERR_ERANGE: 703 return (ERANGE); 704 705 case MC_CMD_ERR_ALLOC_FAIL: 706 return (ENOMEM); 707 case MC_CMD_ERR_NO_VADAPTOR: 708 return (ENOENT); 709 case MC_CMD_ERR_NO_EVB_PORT: 710 return (ENOENT); 711 case MC_CMD_ERR_NO_VSWITCH: 712 return (ENODEV); 713 case MC_CMD_ERR_VLAN_LIMIT: 714 return (EINVAL); 715 case MC_CMD_ERR_BAD_PCI_FUNC: 716 return (ENODEV); 717 case MC_CMD_ERR_BAD_VLAN_MODE: 718 return (EINVAL); 719 case MC_CMD_ERR_BAD_VSWITCH_TYPE: 720 return (EINVAL); 721 case MC_CMD_ERR_BAD_VPORT_TYPE: 722 return (EINVAL); 723 case MC_CMD_ERR_MAC_EXIST: 724 return (EEXIST); 725 726 case MC_CMD_ERR_PROXY_PENDING: 727 return (EAGAIN); 728 729 default: 730 EFSYS_PROBE1(mc_pcol_error, int, err); 731 return (EIO); 732 } 733 } 734 735 void 736 efx_mcdi_raise_exception( 737 __in efx_nic_t *enp, 738 __in_opt efx_mcdi_req_t *emrp, 739 __in int rc) 740 { 741 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 742 efx_mcdi_exception_t exception; 743 744 /* Reboot or Assertion failure only */ 745 EFSYS_ASSERT(rc == EIO || rc == EINTR); 746 747 /* 748 * If MC_CMD_REBOOT causes a reboot (dependent on parameters), 749 * then the EIO is not worthy of an exception. 750 */ 751 if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO) 752 return; 753 754 exception = (rc == EIO) 755 ? EFX_MCDI_EXCEPTION_MC_REBOOT 756 : EFX_MCDI_EXCEPTION_MC_BADASSERT; 757 758 emtp->emt_exception(emtp->emt_context, exception); 759 } 760 761 void 762 efx_mcdi_execute( 763 __in efx_nic_t *enp, 764 __inout efx_mcdi_req_t *emrp) 765 { 766 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 767 768 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 769 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 770 771 emrp->emr_quiet = B_FALSE; 772 emtp->emt_execute(emtp->emt_context, emrp); 773 } 774 775 void 776 efx_mcdi_execute_quiet( 777 __in efx_nic_t *enp, 778 __inout efx_mcdi_req_t *emrp) 779 { 780 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 781 782 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 783 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 784 785 emrp->emr_quiet = B_TRUE; 786 emtp->emt_execute(emtp->emt_context, emrp); 787 } 788 789 void 790 efx_mcdi_ev_cpl( 791 __in efx_nic_t *enp, 792 __in unsigned int seq, 793 __in unsigned int outlen, 794 __in int errcode) 795 { 796 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 797 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 798 efx_mcdi_req_t *emrp; 799 efsys_lock_state_t state; 800 801 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 802 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 803 804 /* 805 * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start() 806 * when we're completing an aborted request. 807 */ 808 EFSYS_LOCK(enp->en_eslp, state); 809 if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl || 810 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { 811 EFSYS_ASSERT(emip->emi_aborted > 0); 812 if (emip->emi_aborted > 0) 813 --emip->emi_aborted; 814 EFSYS_UNLOCK(enp->en_eslp, state); 815 return; 816 } 817 818 emrp = emip->emi_pending_req; 819 emip->emi_pending_req = NULL; 820 EFSYS_UNLOCK(enp->en_eslp, state); 821 822 if (emip->emi_max_version >= 2) { 823 /* MCDIv2 response details do not fit into an event. */ 824 efx_mcdi_read_response_header(enp, emrp); 825 } else { 826 if (errcode != 0) { 827 if (!emrp->emr_quiet) { 828 EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, 829 int, errcode); 830 } 831 emrp->emr_out_length_used = 0; 832 emrp->emr_rc = efx_mcdi_request_errcode(errcode); 833 } else { 834 emrp->emr_out_length_used = outlen; 835 emrp->emr_rc = 0; 836 } 837 } 838 if (emrp->emr_rc == 0) 839 efx_mcdi_finish_response(enp, emrp); 840 841 emtp->emt_ev_cpl(emtp->emt_context); 842 } 843 844 #if EFSYS_OPT_MCDI_PROXY_AUTH 845 846 __checkReturn efx_rc_t 847 efx_mcdi_get_proxy_handle( 848 __in efx_nic_t *enp, 849 __in efx_mcdi_req_t *emrp, 850 __out uint32_t *handlep) 851 { 852 efx_rc_t rc; 853 854 _NOTE(ARGUNUSED(enp)) 855 856 /* 857 * Return proxy handle from MCDI request that returned with error 858 * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching 859 * PROXY_RESPONSE event. 860 */ 861 if ((emrp == NULL) || (handlep == NULL)) { 862 rc = EINVAL; 863 goto fail1; 864 } 865 if ((emrp->emr_rc != 0) && 866 (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) { 867 *handlep = emrp->emr_proxy_handle; 868 rc = 0; 869 } else { 870 *handlep = 0; 871 rc = ENOENT; 872 } 873 return (rc); 874 875 fail1: 876 EFSYS_PROBE1(fail1, efx_rc_t, rc); 877 return (rc); 878 } 879 880 void 881 efx_mcdi_ev_proxy_response( 882 __in efx_nic_t *enp, 883 __in unsigned int handle, 884 __in unsigned int status) 885 { 886 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 887 efx_rc_t rc; 888 889 /* 890 * Handle results of an authorization request for a privileged MCDI 891 * command. If authorization was granted then we must re-issue the 892 * original MCDI request. If authorization failed or timed out, 893 * then the original MCDI request should be completed with the 894 * result code from this event. 895 */ 896 rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status); 897 898 emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc); 899 } 900 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 901 902 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 903 void 904 efx_mcdi_ev_proxy_request( 905 __in efx_nic_t *enp, 906 __in unsigned int index) 907 { 908 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 909 910 if (emtp->emt_ev_proxy_request != NULL) 911 emtp->emt_ev_proxy_request(emtp->emt_context, index); 912 } 913 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 914 void 915 efx_mcdi_ev_death( 916 __in efx_nic_t *enp, 917 __in int rc) 918 { 919 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 920 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 921 efx_mcdi_req_t *emrp = NULL; 922 boolean_t ev_cpl; 923 efsys_lock_state_t state; 924 925 /* 926 * The MCDI request (if there is one) has been terminated, either 927 * by a BADASSERT or REBOOT event. 928 * 929 * If there is an outstanding event-completed MCDI operation, then we 930 * will never receive the completion event (because both MCDI 931 * completions and BADASSERT events are sent to the same evq). So 932 * complete this MCDI op. 933 * 934 * This function might run in parallel with efx_mcdi_request_poll() 935 * for poll completed mcdi requests, and also with 936 * efx_mcdi_request_start() for post-watchdog completions. 937 */ 938 EFSYS_LOCK(enp->en_eslp, state); 939 emrp = emip->emi_pending_req; 940 ev_cpl = emip->emi_ev_cpl; 941 if (emrp != NULL && emip->emi_ev_cpl) { 942 emip->emi_pending_req = NULL; 943 944 emrp->emr_out_length_used = 0; 945 emrp->emr_rc = rc; 946 ++emip->emi_aborted; 947 } 948 949 /* 950 * Since we're running in parallel with a request, consume the 951 * status word before dropping the lock. 952 */ 953 if (rc == EIO || rc == EINTR) { 954 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); 955 (void) efx_mcdi_poll_reboot(enp); 956 emip->emi_new_epoch = B_TRUE; 957 } 958 959 EFSYS_UNLOCK(enp->en_eslp, state); 960 961 efx_mcdi_raise_exception(enp, emrp, rc); 962 963 if (emrp != NULL && ev_cpl) 964 emtp->emt_ev_cpl(emtp->emt_context); 965 } 966 967 __checkReturn efx_rc_t 968 efx_mcdi_get_version( 969 __in efx_nic_t *enp, 970 __in uint32_t flags, 971 __out efx_mcdi_version_t *verp) 972 { 973 efx_nic_board_info_t *board_infop = &verp->emv_board_info; 974 EFX_MCDI_DECLARE_BUF(payload, 975 MC_CMD_GET_VERSION_EXT_IN_LEN, 976 MC_CMD_GET_VERSION_V2_OUT_LEN); 977 efx_word_t *ver_words; 978 uint16_t version[4]; 979 efx_mcdi_req_t req; 980 uint32_t firmware; 981 efx_rc_t rc; 982 983 EFX_STATIC_ASSERT(sizeof (verp->emv_version) == 984 MC_CMD_GET_VERSION_OUT_VERSION_LEN); 985 EFX_STATIC_ASSERT(sizeof (verp->emv_firmware) == 986 MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN); 987 988 EFX_STATIC_ASSERT(EFX_MCDI_VERSION_BOARD_INFO == 989 (1U << MC_CMD_GET_VERSION_V2_OUT_BOARD_EXT_INFO_PRESENT_LBN)); 990 991 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_serial) == 992 MC_CMD_GET_VERSION_V2_OUT_BOARD_SERIAL_LEN); 993 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_name) == 994 MC_CMD_GET_VERSION_V2_OUT_BOARD_NAME_LEN); 995 EFX_STATIC_ASSERT(sizeof (board_infop->enbi_revision) == 996 MC_CMD_GET_VERSION_V2_OUT_BOARD_REVISION_LEN); 997 998 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 999 1000 req.emr_cmd = MC_CMD_GET_VERSION; 1001 req.emr_in_buf = payload; 1002 req.emr_out_buf = payload; 1003 1004 if ((flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) { 1005 /* Request basic + extended version information. */ 1006 req.emr_in_length = MC_CMD_GET_VERSION_EXT_IN_LEN; 1007 req.emr_out_length = MC_CMD_GET_VERSION_V2_OUT_LEN; 1008 } else { 1009 /* Request only basic version information. */ 1010 req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN; 1011 req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN; 1012 } 1013 1014 efx_mcdi_execute(enp, &req); 1015 1016 if (req.emr_rc != 0) { 1017 rc = req.emr_rc; 1018 goto fail1; 1019 } 1020 1021 /* bootrom support */ 1022 if (req.emr_out_length_used == MC_CMD_GET_VERSION_V0_OUT_LEN) { 1023 version[0] = version[1] = version[2] = version[3] = 0; 1024 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); 1025 goto out; 1026 } 1027 1028 if (req.emr_out_length_used < req.emr_out_length) { 1029 rc = EMSGSIZE; 1030 goto fail2; 1031 } 1032 1033 ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION); 1034 version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0); 1035 version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0); 1036 version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0); 1037 version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0); 1038 firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); 1039 1040 out: 1041 memset(verp, 0, sizeof (*verp)); 1042 1043 verp->emv_version[0] = version[0]; 1044 verp->emv_version[1] = version[1]; 1045 verp->emv_version[2] = version[2]; 1046 verp->emv_version[3] = version[3]; 1047 verp->emv_firmware = firmware; 1048 1049 verp->emv_flags = MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_FLAGS); 1050 verp->emv_flags &= flags; 1051 1052 if ((verp->emv_flags & EFX_MCDI_VERSION_BOARD_INFO) != 0) { 1053 memcpy(board_infop->enbi_serial, 1054 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_SERIAL), 1055 sizeof (board_infop->enbi_serial)); 1056 memcpy(board_infop->enbi_name, 1057 MCDI_OUT2(req, char, GET_VERSION_V2_OUT_BOARD_NAME), 1058 sizeof (board_infop->enbi_name)); 1059 board_infop->enbi_revision = 1060 MCDI_OUT_DWORD(req, GET_VERSION_V2_OUT_BOARD_REVISION); 1061 } 1062 1063 return (0); 1064 1065 fail2: 1066 EFSYS_PROBE(fail2); 1067 fail1: 1068 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1069 1070 return (rc); 1071 } 1072 1073 static __checkReturn efx_rc_t 1074 efx_mcdi_get_boot_status( 1075 __in efx_nic_t *enp, 1076 __out efx_mcdi_boot_t *statusp) 1077 { 1078 EFX_MCDI_DECLARE_BUF(payload, 1079 MC_CMD_GET_BOOT_STATUS_IN_LEN, 1080 MC_CMD_GET_BOOT_STATUS_OUT_LEN); 1081 efx_mcdi_req_t req; 1082 efx_rc_t rc; 1083 1084 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 1085 1086 req.emr_cmd = MC_CMD_GET_BOOT_STATUS; 1087 req.emr_in_buf = payload; 1088 req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN; 1089 req.emr_out_buf = payload; 1090 req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN; 1091 1092 efx_mcdi_execute_quiet(enp, &req); 1093 1094 /* 1095 * NOTE: Unprivileged functions cannot access boot status, 1096 * so the MCDI request will return EACCES. This is 1097 * also checked in efx_mcdi_version. 1098 */ 1099 1100 if (req.emr_rc != 0) { 1101 rc = req.emr_rc; 1102 goto fail1; 1103 } 1104 1105 if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) { 1106 rc = EMSGSIZE; 1107 goto fail2; 1108 } 1109 1110 if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS, 1111 GET_BOOT_STATUS_OUT_FLAGS_PRIMARY)) 1112 *statusp = EFX_MCDI_BOOT_PRIMARY; 1113 else 1114 *statusp = EFX_MCDI_BOOT_SECONDARY; 1115 1116 return (0); 1117 1118 fail2: 1119 EFSYS_PROBE(fail2); 1120 fail1: 1121 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1122 1123 return (rc); 1124 } 1125 1126 __checkReturn efx_rc_t 1127 efx_mcdi_version( 1128 __in efx_nic_t *enp, 1129 __out_ecount_opt(4) uint16_t versionp[4], 1130 __out_opt uint32_t *buildp, 1131 __out_opt efx_mcdi_boot_t *statusp) 1132 { 1133 efx_mcdi_version_t ver; 1134 efx_mcdi_boot_t status; 1135 efx_rc_t rc; 1136 1137 rc = efx_mcdi_get_version(enp, 0, &ver); 1138 if (rc != 0) 1139 goto fail1; 1140 1141 /* The bootrom doesn't understand BOOT_STATUS */ 1142 if (MC_FW_VERSION_IS_BOOTLOADER(ver.emv_firmware)) { 1143 status = EFX_MCDI_BOOT_ROM; 1144 goto out; 1145 } 1146 1147 rc = efx_mcdi_get_boot_status(enp, &status); 1148 if (rc == EACCES) { 1149 /* Unprivileged functions cannot access BOOT_STATUS */ 1150 status = EFX_MCDI_BOOT_PRIMARY; 1151 memset(ver.emv_version, 0, sizeof (ver.emv_version)); 1152 ver.emv_firmware = 0; 1153 } else if (rc != 0) { 1154 goto fail2; 1155 } 1156 1157 out: 1158 if (versionp != NULL) 1159 memcpy(versionp, ver.emv_version, sizeof (ver.emv_version)); 1160 if (buildp != NULL) 1161 *buildp = ver.emv_firmware; 1162 if (statusp != NULL) 1163 *statusp = status; 1164 1165 return (0); 1166 1167 fail2: 1168 EFSYS_PROBE(fail2); 1169 fail1: 1170 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1171 1172 return (rc); 1173 } 1174 1175 __checkReturn efx_rc_t 1176 efx_mcdi_get_capabilities( 1177 __in efx_nic_t *enp, 1178 __out_opt uint32_t *flagsp, 1179 __out_opt uint16_t *rx_dpcpu_fw_idp, 1180 __out_opt uint16_t *tx_dpcpu_fw_idp, 1181 __out_opt uint32_t *flags2p, 1182 __out_opt uint32_t *tso2ncp) 1183 { 1184 efx_mcdi_req_t req; 1185 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN, 1186 MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); 1187 boolean_t v2_capable; 1188 efx_rc_t rc; 1189 1190 req.emr_cmd = MC_CMD_GET_CAPABILITIES; 1191 req.emr_in_buf = payload; 1192 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; 1193 req.emr_out_buf = payload; 1194 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN; 1195 1196 efx_mcdi_execute_quiet(enp, &req); 1197 1198 if (req.emr_rc != 0) { 1199 rc = req.emr_rc; 1200 goto fail1; 1201 } 1202 1203 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 1204 rc = EMSGSIZE; 1205 goto fail2; 1206 } 1207 1208 if (flagsp != NULL) 1209 *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1); 1210 1211 if (rx_dpcpu_fw_idp != NULL) 1212 *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req, 1213 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 1214 1215 if (tx_dpcpu_fw_idp != NULL) 1216 *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req, 1217 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 1218 1219 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) 1220 v2_capable = B_FALSE; 1221 else 1222 v2_capable = B_TRUE; 1223 1224 if (flags2p != NULL) { 1225 *flags2p = (v2_capable) ? 1226 MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) : 1227 0; 1228 } 1229 1230 if (tso2ncp != NULL) { 1231 *tso2ncp = (v2_capable) ? 1232 MCDI_OUT_WORD(req, 1233 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) : 1234 0; 1235 } 1236 1237 return (0); 1238 1239 fail2: 1240 EFSYS_PROBE(fail2); 1241 fail1: 1242 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1243 1244 return (rc); 1245 } 1246 1247 static __checkReturn efx_rc_t 1248 efx_mcdi_do_reboot( 1249 __in efx_nic_t *enp, 1250 __in boolean_t after_assertion) 1251 { 1252 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN, 1253 MC_CMD_REBOOT_OUT_LEN); 1254 efx_mcdi_req_t req; 1255 efx_rc_t rc; 1256 1257 /* 1258 * We could require the caller to have caused en_mod_flags=0 to 1259 * call this function. This doesn't help the other port though, 1260 * who's about to get the MC ripped out from underneath them. 1261 * Since they have to cope with the subsequent fallout of MCDI 1262 * failures, we should as well. 1263 */ 1264 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 1265 1266 req.emr_cmd = MC_CMD_REBOOT; 1267 req.emr_in_buf = payload; 1268 req.emr_in_length = MC_CMD_REBOOT_IN_LEN; 1269 req.emr_out_buf = payload; 1270 req.emr_out_length = MC_CMD_REBOOT_OUT_LEN; 1271 1272 MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, 1273 (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0)); 1274 1275 efx_mcdi_execute_quiet(enp, &req); 1276 1277 if (req.emr_rc == EACCES) { 1278 /* Unprivileged functions cannot reboot the MC. */ 1279 goto out; 1280 } 1281 1282 /* A successful reboot request returns EIO. */ 1283 if (req.emr_rc != 0 && req.emr_rc != EIO) { 1284 rc = req.emr_rc; 1285 goto fail1; 1286 } 1287 1288 out: 1289 return (0); 1290 1291 fail1: 1292 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1293 1294 return (rc); 1295 } 1296 1297 __checkReturn efx_rc_t 1298 efx_mcdi_reboot( 1299 __in efx_nic_t *enp) 1300 { 1301 return (efx_mcdi_do_reboot(enp, B_FALSE)); 1302 } 1303 1304 __checkReturn efx_rc_t 1305 efx_mcdi_exit_assertion_handler( 1306 __in efx_nic_t *enp) 1307 { 1308 return (efx_mcdi_do_reboot(enp, B_TRUE)); 1309 } 1310 1311 __checkReturn efx_rc_t 1312 efx_mcdi_read_assertion( 1313 __in efx_nic_t *enp) 1314 { 1315 efx_mcdi_req_t req; 1316 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN, 1317 MC_CMD_GET_ASSERTS_OUT_LEN); 1318 const char *reason; 1319 unsigned int flags; 1320 unsigned int index; 1321 unsigned int ofst; 1322 int retry; 1323 efx_rc_t rc; 1324 1325 /* 1326 * Before we attempt to chat to the MC, we should verify that the MC 1327 * isn't in it's assertion handler, either due to a previous reboot, 1328 * or because we're reinitializing due to an eec_exception(). 1329 * 1330 * Use GET_ASSERTS to read any assertion state that may be present. 1331 * Retry this command twice. Once because a boot-time assertion failure 1332 * might cause the 1st MCDI request to fail. And once again because 1333 * we might race with efx_mcdi_exit_assertion_handler() running on 1334 * partner port(s) on the same NIC. 1335 */ 1336 retry = 2; 1337 do { 1338 (void) memset(payload, 0, sizeof (payload)); 1339 req.emr_cmd = MC_CMD_GET_ASSERTS; 1340 req.emr_in_buf = payload; 1341 req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN; 1342 req.emr_out_buf = payload; 1343 req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN; 1344 1345 MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1); 1346 efx_mcdi_execute_quiet(enp, &req); 1347 1348 } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0); 1349 1350 if (req.emr_rc != 0) { 1351 if (req.emr_rc == EACCES) { 1352 /* Unprivileged functions cannot clear assertions. */ 1353 goto out; 1354 } 1355 rc = req.emr_rc; 1356 goto fail1; 1357 } 1358 1359 if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) { 1360 rc = EMSGSIZE; 1361 goto fail2; 1362 } 1363 1364 /* Print out any assertion state recorded */ 1365 flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS); 1366 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 1367 return (0); 1368 1369 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 1370 ? "system-level assertion" 1371 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 1372 ? "thread-level assertion" 1373 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1374 ? "watchdog reset" 1375 : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP) 1376 ? "illegal address trap" 1377 : "unknown assertion"; 1378 EFSYS_PROBE3(mcpu_assertion, 1379 const char *, reason, unsigned int, 1380 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1381 unsigned int, 1382 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS)); 1383 1384 /* Print out the registers (r1 ... r31) */ 1385 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1386 for (index = 1; 1387 index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; 1388 index++) { 1389 EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int, 1390 EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst), 1391 EFX_DWORD_0)); 1392 ofst += sizeof (efx_dword_t); 1393 } 1394 EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN); 1395 1396 out: 1397 return (0); 1398 1399 fail2: 1400 EFSYS_PROBE(fail2); 1401 fail1: 1402 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1403 1404 return (rc); 1405 } 1406 1407 1408 /* 1409 * Internal routines for for specific MCDI requests. 1410 */ 1411 1412 __checkReturn efx_rc_t 1413 efx_mcdi_drv_attach( 1414 __in efx_nic_t *enp, 1415 __in boolean_t attach) 1416 { 1417 efx_mcdi_req_t req; 1418 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_V2_LEN, 1419 MC_CMD_DRV_ATTACH_EXT_OUT_LEN); 1420 efx_rc_t rc; 1421 1422 req.emr_cmd = MC_CMD_DRV_ATTACH; 1423 req.emr_in_buf = payload; 1424 if (enp->en_drv_version[0] == '\0') { 1425 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN; 1426 } else { 1427 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_V2_LEN; 1428 } 1429 req.emr_out_buf = payload; 1430 req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; 1431 1432 /* 1433 * Typically, client drivers use DONT_CARE for the datapath firmware 1434 * type to ensure that the driver can attach to an unprivileged 1435 * function. The datapath firmware type to use is controlled by the 1436 * 'sfboot' utility. 1437 * If a client driver wishes to attach with a specific datapath firmware 1438 * type, that can be passed in second argument of efx_nic_probe API. One 1439 * such example is the ESXi native driver that attempts attaching with 1440 * FULL_FEATURED datapath firmware type first and fall backs to 1441 * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails. 1442 */ 1443 MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE, 1444 DRV_ATTACH_IN_ATTACH, attach ? 1 : 0, 1445 DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE); 1446 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); 1447 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv); 1448 1449 if (req.emr_in_length >= MC_CMD_DRV_ATTACH_IN_V2_LEN) { 1450 EFX_STATIC_ASSERT(sizeof (enp->en_drv_version) == 1451 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); 1452 memcpy(MCDI_IN2(req, char, DRV_ATTACH_IN_V2_DRIVER_VERSION), 1453 enp->en_drv_version, 1454 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); 1455 } 1456 1457 efx_mcdi_execute(enp, &req); 1458 1459 if (req.emr_rc != 0) { 1460 rc = req.emr_rc; 1461 goto fail1; 1462 } 1463 1464 if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) { 1465 rc = EMSGSIZE; 1466 goto fail2; 1467 } 1468 1469 return (0); 1470 1471 fail2: 1472 EFSYS_PROBE(fail2); 1473 fail1: 1474 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1475 1476 return (rc); 1477 } 1478 1479 __checkReturn efx_rc_t 1480 efx_mcdi_get_board_cfg( 1481 __in efx_nic_t *enp, 1482 __out_opt uint32_t *board_typep, 1483 __out_opt efx_dword_t *capabilitiesp, 1484 __out_ecount_opt(6) uint8_t mac_addrp[6]) 1485 { 1486 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 1487 efx_mcdi_req_t req; 1488 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN, 1489 MC_CMD_GET_BOARD_CFG_OUT_LENMIN); 1490 efx_rc_t rc; 1491 1492 req.emr_cmd = MC_CMD_GET_BOARD_CFG; 1493 req.emr_in_buf = payload; 1494 req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; 1495 req.emr_out_buf = payload; 1496 req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN; 1497 1498 efx_mcdi_execute(enp, &req); 1499 1500 if (req.emr_rc != 0) { 1501 rc = req.emr_rc; 1502 goto fail1; 1503 } 1504 1505 if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 1506 rc = EMSGSIZE; 1507 goto fail2; 1508 } 1509 1510 if (mac_addrp != NULL) { 1511 uint8_t *addrp; 1512 1513 if (emip->emi_port == 1) { 1514 addrp = MCDI_OUT2(req, uint8_t, 1515 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0); 1516 } else if (emip->emi_port == 2) { 1517 addrp = MCDI_OUT2(req, uint8_t, 1518 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1); 1519 } else { 1520 rc = EINVAL; 1521 goto fail3; 1522 } 1523 1524 EFX_MAC_ADDR_COPY(mac_addrp, addrp); 1525 } 1526 1527 if (capabilitiesp != NULL) { 1528 if (emip->emi_port == 1) { 1529 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, 1530 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 1531 } else if (emip->emi_port == 2) { 1532 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, 1533 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 1534 } else { 1535 rc = EINVAL; 1536 goto fail4; 1537 } 1538 } 1539 1540 if (board_typep != NULL) { 1541 *board_typep = MCDI_OUT_DWORD(req, 1542 GET_BOARD_CFG_OUT_BOARD_TYPE); 1543 } 1544 1545 return (0); 1546 1547 fail4: 1548 EFSYS_PROBE(fail4); 1549 fail3: 1550 EFSYS_PROBE(fail3); 1551 fail2: 1552 EFSYS_PROBE(fail2); 1553 fail1: 1554 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1555 1556 return (rc); 1557 } 1558 1559 __checkReturn efx_rc_t 1560 efx_mcdi_get_resource_limits( 1561 __in efx_nic_t *enp, 1562 __out_opt uint32_t *nevqp, 1563 __out_opt uint32_t *nrxqp, 1564 __out_opt uint32_t *ntxqp) 1565 { 1566 efx_mcdi_req_t req; 1567 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN, 1568 MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN); 1569 efx_rc_t rc; 1570 1571 req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS; 1572 req.emr_in_buf = payload; 1573 req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN; 1574 req.emr_out_buf = payload; 1575 req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN; 1576 1577 efx_mcdi_execute(enp, &req); 1578 1579 if (req.emr_rc != 0) { 1580 rc = req.emr_rc; 1581 goto fail1; 1582 } 1583 1584 if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) { 1585 rc = EMSGSIZE; 1586 goto fail2; 1587 } 1588 1589 if (nevqp != NULL) 1590 *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ); 1591 if (nrxqp != NULL) 1592 *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ); 1593 if (ntxqp != NULL) 1594 *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ); 1595 1596 return (0); 1597 1598 fail2: 1599 EFSYS_PROBE(fail2); 1600 fail1: 1601 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1602 1603 return (rc); 1604 } 1605 1606 __checkReturn efx_rc_t 1607 efx_mcdi_get_phy_cfg( 1608 __in efx_nic_t *enp) 1609 { 1610 efx_port_t *epp = &(enp->en_port); 1611 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1612 efx_mcdi_req_t req; 1613 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN, 1614 MC_CMD_GET_PHY_CFG_OUT_LEN); 1615 #if EFSYS_OPT_NAMES 1616 const char *namep; 1617 size_t namelen; 1618 #endif 1619 uint32_t phy_media_type; 1620 efx_rc_t rc; 1621 1622 req.emr_cmd = MC_CMD_GET_PHY_CFG; 1623 req.emr_in_buf = payload; 1624 req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN; 1625 req.emr_out_buf = payload; 1626 req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN; 1627 1628 efx_mcdi_execute(enp, &req); 1629 1630 if (req.emr_rc != 0) { 1631 rc = req.emr_rc; 1632 goto fail1; 1633 } 1634 1635 if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) { 1636 rc = EMSGSIZE; 1637 goto fail2; 1638 } 1639 1640 encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); 1641 #if EFSYS_OPT_NAMES 1642 namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME); 1643 namelen = MIN(sizeof (encp->enc_phy_name) - 1, 1644 strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); 1645 (void) memset(encp->enc_phy_name, 0, 1646 sizeof (encp->enc_phy_name)); 1647 memcpy(encp->enc_phy_name, namep, namelen); 1648 #endif /* EFSYS_OPT_NAMES */ 1649 (void) memset(encp->enc_phy_revision, 0, 1650 sizeof (encp->enc_phy_revision)); 1651 memcpy(encp->enc_phy_revision, 1652 MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION), 1653 MIN(sizeof (encp->enc_phy_revision) - 1, 1654 MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN)); 1655 #if EFSYS_OPT_PHY_LED_CONTROL 1656 encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) | 1657 (1 << EFX_PHY_LED_OFF) | 1658 (1 << EFX_PHY_LED_ON)); 1659 #endif /* EFSYS_OPT_PHY_LED_CONTROL */ 1660 1661 /* Get the media type of the fixed port, if recognised. */ 1662 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI); 1663 EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4); 1664 EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4); 1665 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP); 1666 EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); 1667 EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); 1668 EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); 1669 phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); 1670 epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type; 1671 if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) 1672 epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; 1673 1674 epp->ep_phy_cap_mask = 1675 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP); 1676 #if EFSYS_OPT_PHY_FLAGS 1677 encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS); 1678 #endif /* EFSYS_OPT_PHY_FLAGS */ 1679 1680 encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT); 1681 1682 /* Populate internal state */ 1683 encp->enc_mcdi_mdio_channel = 1684 (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL); 1685 1686 #if EFSYS_OPT_PHY_STATS 1687 encp->enc_mcdi_phy_stat_mask = 1688 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK); 1689 #endif /* EFSYS_OPT_PHY_STATS */ 1690 1691 #if EFSYS_OPT_BIST 1692 encp->enc_bist_mask = 0; 1693 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1694 GET_PHY_CFG_OUT_BIST_CABLE_SHORT)) 1695 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT); 1696 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1697 GET_PHY_CFG_OUT_BIST_CABLE_LONG)) 1698 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG); 1699 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1700 GET_PHY_CFG_OUT_BIST)) 1701 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL); 1702 #endif /* EFSYS_OPT_BIST */ 1703 1704 return (0); 1705 1706 fail2: 1707 EFSYS_PROBE(fail2); 1708 fail1: 1709 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1710 1711 return (rc); 1712 } 1713 1714 __checkReturn efx_rc_t 1715 efx_mcdi_firmware_update_supported( 1716 __in efx_nic_t *enp, 1717 __out boolean_t *supportedp) 1718 { 1719 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1720 efx_rc_t rc; 1721 1722 if (emcop != NULL) { 1723 if ((rc = emcop->emco_feature_supported(enp, 1724 EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0) 1725 goto fail1; 1726 } else { 1727 /* Earlier devices always supported updates */ 1728 *supportedp = B_TRUE; 1729 } 1730 1731 return (0); 1732 1733 fail1: 1734 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1735 1736 return (rc); 1737 } 1738 1739 __checkReturn efx_rc_t 1740 efx_mcdi_macaddr_change_supported( 1741 __in efx_nic_t *enp, 1742 __out boolean_t *supportedp) 1743 { 1744 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1745 efx_rc_t rc; 1746 1747 if (emcop != NULL) { 1748 if ((rc = emcop->emco_feature_supported(enp, 1749 EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0) 1750 goto fail1; 1751 } else { 1752 /* Earlier devices always supported MAC changes */ 1753 *supportedp = B_TRUE; 1754 } 1755 1756 return (0); 1757 1758 fail1: 1759 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1760 1761 return (rc); 1762 } 1763 1764 __checkReturn efx_rc_t 1765 efx_mcdi_link_control_supported( 1766 __in efx_nic_t *enp, 1767 __out boolean_t *supportedp) 1768 { 1769 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1770 efx_rc_t rc; 1771 1772 if (emcop != NULL) { 1773 if ((rc = emcop->emco_feature_supported(enp, 1774 EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0) 1775 goto fail1; 1776 } else { 1777 /* Earlier devices always supported link control */ 1778 *supportedp = B_TRUE; 1779 } 1780 1781 return (0); 1782 1783 fail1: 1784 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1785 1786 return (rc); 1787 } 1788 1789 __checkReturn efx_rc_t 1790 efx_mcdi_mac_spoofing_supported( 1791 __in efx_nic_t *enp, 1792 __out boolean_t *supportedp) 1793 { 1794 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1795 efx_rc_t rc; 1796 1797 if (emcop != NULL) { 1798 if ((rc = emcop->emco_feature_supported(enp, 1799 EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0) 1800 goto fail1; 1801 } else { 1802 /* Earlier devices always supported MAC spoofing */ 1803 *supportedp = B_TRUE; 1804 } 1805 1806 return (0); 1807 1808 fail1: 1809 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1810 1811 return (rc); 1812 } 1813 1814 #if EFSYS_OPT_BIST 1815 1816 #if EFX_OPTS_EF10() 1817 /* 1818 * Enter bist offline mode. This is a fw mode which puts the NIC into a state 1819 * where memory BIST tests can be run and not much else can interfere or happen. 1820 * A reboot is required to exit this mode. 1821 */ 1822 __checkReturn efx_rc_t 1823 efx_mcdi_bist_enable_offline( 1824 __in efx_nic_t *enp) 1825 { 1826 efx_mcdi_req_t req; 1827 efx_rc_t rc; 1828 1829 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0); 1830 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0); 1831 1832 req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST; 1833 req.emr_in_buf = NULL; 1834 req.emr_in_length = 0; 1835 req.emr_out_buf = NULL; 1836 req.emr_out_length = 0; 1837 1838 efx_mcdi_execute(enp, &req); 1839 1840 if (req.emr_rc != 0) { 1841 rc = req.emr_rc; 1842 goto fail1; 1843 } 1844 1845 return (0); 1846 1847 fail1: 1848 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1849 1850 return (rc); 1851 } 1852 #endif /* EFX_OPTS_EF10() */ 1853 1854 __checkReturn efx_rc_t 1855 efx_mcdi_bist_start( 1856 __in efx_nic_t *enp, 1857 __in efx_bist_type_t type) 1858 { 1859 efx_mcdi_req_t req; 1860 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN, 1861 MC_CMD_START_BIST_OUT_LEN); 1862 efx_rc_t rc; 1863 1864 req.emr_cmd = MC_CMD_START_BIST; 1865 req.emr_in_buf = payload; 1866 req.emr_in_length = MC_CMD_START_BIST_IN_LEN; 1867 req.emr_out_buf = payload; 1868 req.emr_out_length = MC_CMD_START_BIST_OUT_LEN; 1869 1870 switch (type) { 1871 case EFX_BIST_TYPE_PHY_NORMAL: 1872 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST); 1873 break; 1874 case EFX_BIST_TYPE_PHY_CABLE_SHORT: 1875 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1876 MC_CMD_PHY_BIST_CABLE_SHORT); 1877 break; 1878 case EFX_BIST_TYPE_PHY_CABLE_LONG: 1879 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1880 MC_CMD_PHY_BIST_CABLE_LONG); 1881 break; 1882 case EFX_BIST_TYPE_MC_MEM: 1883 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1884 MC_CMD_MC_MEM_BIST); 1885 break; 1886 case EFX_BIST_TYPE_SAT_MEM: 1887 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1888 MC_CMD_PORT_MEM_BIST); 1889 break; 1890 case EFX_BIST_TYPE_REG: 1891 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1892 MC_CMD_REG_BIST); 1893 break; 1894 default: 1895 EFSYS_ASSERT(0); 1896 } 1897 1898 efx_mcdi_execute(enp, &req); 1899 1900 if (req.emr_rc != 0) { 1901 rc = req.emr_rc; 1902 goto fail1; 1903 } 1904 1905 return (0); 1906 1907 fail1: 1908 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1909 1910 return (rc); 1911 } 1912 1913 #endif /* EFSYS_OPT_BIST */ 1914 1915 1916 /* Enable logging of some events (e.g. link state changes) */ 1917 __checkReturn efx_rc_t 1918 efx_mcdi_log_ctrl( 1919 __in efx_nic_t *enp) 1920 { 1921 efx_mcdi_req_t req; 1922 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN, 1923 MC_CMD_LOG_CTRL_OUT_LEN); 1924 efx_rc_t rc; 1925 1926 req.emr_cmd = MC_CMD_LOG_CTRL; 1927 req.emr_in_buf = payload; 1928 req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN; 1929 req.emr_out_buf = payload; 1930 req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN; 1931 1932 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST, 1933 MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ); 1934 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0); 1935 1936 efx_mcdi_execute(enp, &req); 1937 1938 if (req.emr_rc != 0) { 1939 rc = req.emr_rc; 1940 goto fail1; 1941 } 1942 1943 return (0); 1944 1945 fail1: 1946 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1947 1948 return (rc); 1949 } 1950 1951 1952 #if EFSYS_OPT_MAC_STATS 1953 1954 __checkReturn efx_rc_t 1955 efx_mcdi_mac_stats( 1956 __in efx_nic_t *enp, 1957 __in uint32_t vport_id, 1958 __in_opt efsys_mem_t *esmp, 1959 __in efx_stats_action_t action, 1960 __in uint16_t period_ms) 1961 { 1962 efx_mcdi_req_t req; 1963 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN, 1964 MC_CMD_MAC_STATS_V2_OUT_DMA_LEN); 1965 int clear = (action == EFX_STATS_CLEAR); 1966 int upload = (action == EFX_STATS_UPLOAD); 1967 int enable = (action == EFX_STATS_ENABLE_NOEVENTS); 1968 int events = (action == EFX_STATS_ENABLE_EVENTS); 1969 int disable = (action == EFX_STATS_DISABLE); 1970 efx_rc_t rc; 1971 1972 req.emr_cmd = MC_CMD_MAC_STATS; 1973 req.emr_in_buf = payload; 1974 req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; 1975 req.emr_out_buf = payload; 1976 req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN; 1977 1978 MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, 1979 MAC_STATS_IN_DMA, upload, 1980 MAC_STATS_IN_CLEAR, clear, 1981 MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable, 1982 MAC_STATS_IN_PERIODIC_ENABLE, enable | events, 1983 MAC_STATS_IN_PERIODIC_NOEVENT, !events, 1984 MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); 1985 1986 if (enable || events || upload) { 1987 const efx_nic_cfg_t *encp = &enp->en_nic_cfg; 1988 uint32_t bytes; 1989 1990 /* Periodic stats or stats upload require a DMA buffer */ 1991 if (esmp == NULL) { 1992 rc = EINVAL; 1993 goto fail1; 1994 } 1995 1996 if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { 1997 /* MAC stats count too small for legacy MAC stats */ 1998 rc = ENOSPC; 1999 goto fail2; 2000 } 2001 2002 bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t); 2003 2004 if (EFSYS_MEM_SIZE(esmp) < bytes) { 2005 /* DMA buffer too small */ 2006 rc = ENOSPC; 2007 goto fail3; 2008 } 2009 2010 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, 2011 EFSYS_MEM_ADDR(esmp) & 0xffffffff); 2012 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, 2013 EFSYS_MEM_ADDR(esmp) >> 32); 2014 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); 2015 } 2016 2017 /* 2018 * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats, 2019 * as this may fail (and leave periodic DMA enabled) if the 2020 * vadapter has already been deleted. 2021 */ 2022 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID, 2023 (disable ? EVB_PORT_ID_NULL : vport_id)); 2024 2025 efx_mcdi_execute(enp, &req); 2026 2027 if (req.emr_rc != 0) { 2028 /* EF10: Expect ENOENT if no DMA queues are initialised */ 2029 if ((req.emr_rc != ENOENT) || 2030 (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { 2031 rc = req.emr_rc; 2032 goto fail4; 2033 } 2034 } 2035 2036 return (0); 2037 2038 fail4: 2039 EFSYS_PROBE(fail4); 2040 fail3: 2041 EFSYS_PROBE(fail3); 2042 fail2: 2043 EFSYS_PROBE(fail2); 2044 fail1: 2045 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2046 2047 return (rc); 2048 } 2049 2050 __checkReturn efx_rc_t 2051 efx_mcdi_mac_stats_clear( 2052 __in efx_nic_t *enp) 2053 { 2054 efx_rc_t rc; 2055 2056 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, 2057 EFX_STATS_CLEAR, 0)) != 0) 2058 goto fail1; 2059 2060 return (0); 2061 2062 fail1: 2063 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2064 2065 return (rc); 2066 } 2067 2068 __checkReturn efx_rc_t 2069 efx_mcdi_mac_stats_upload( 2070 __in efx_nic_t *enp, 2071 __in efsys_mem_t *esmp) 2072 { 2073 efx_rc_t rc; 2074 2075 /* 2076 * The MC DMAs aggregate statistics for our convenience, so we can 2077 * avoid having to pull the statistics buffer into the cache to 2078 * maintain cumulative statistics. 2079 */ 2080 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2081 EFX_STATS_UPLOAD, 0)) != 0) 2082 goto fail1; 2083 2084 return (0); 2085 2086 fail1: 2087 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2088 2089 return (rc); 2090 } 2091 2092 __checkReturn efx_rc_t 2093 efx_mcdi_mac_stats_periodic( 2094 __in efx_nic_t *enp, 2095 __in efsys_mem_t *esmp, 2096 __in uint16_t period_ms, 2097 __in boolean_t events) 2098 { 2099 efx_rc_t rc; 2100 2101 /* 2102 * The MC DMAs aggregate statistics for our convenience, so we can 2103 * avoid having to pull the statistics buffer into the cache to 2104 * maintain cumulative statistics. 2105 * Huntington uses a fixed 1sec period. 2106 * Medford uses a fixed 1sec period before v6.2.1.1033 firmware. 2107 */ 2108 if (period_ms == 0) 2109 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, 2110 EFX_STATS_DISABLE, 0); 2111 else if (events) 2112 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2113 EFX_STATS_ENABLE_EVENTS, period_ms); 2114 else 2115 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2116 EFX_STATS_ENABLE_NOEVENTS, period_ms); 2117 2118 if (rc != 0) 2119 goto fail1; 2120 2121 return (0); 2122 2123 fail1: 2124 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2125 2126 return (rc); 2127 } 2128 2129 #endif /* EFSYS_OPT_MAC_STATS */ 2130 2131 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2132 2133 /* 2134 * This function returns the pf and vf number of a function. If it is a pf the 2135 * vf number is 0xffff. The vf number is the index of the vf on that 2136 * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0), 2137 * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff). 2138 */ 2139 __checkReturn efx_rc_t 2140 efx_mcdi_get_function_info( 2141 __in efx_nic_t *enp, 2142 __out uint32_t *pfp, 2143 __out_opt uint32_t *vfp) 2144 { 2145 efx_mcdi_req_t req; 2146 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN, 2147 MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 2148 efx_rc_t rc; 2149 2150 req.emr_cmd = MC_CMD_GET_FUNCTION_INFO; 2151 req.emr_in_buf = payload; 2152 req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN; 2153 req.emr_out_buf = payload; 2154 req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN; 2155 2156 efx_mcdi_execute(enp, &req); 2157 2158 if (req.emr_rc != 0) { 2159 rc = req.emr_rc; 2160 goto fail1; 2161 } 2162 2163 if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) { 2164 rc = EMSGSIZE; 2165 goto fail2; 2166 } 2167 2168 *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF); 2169 if (vfp != NULL) 2170 *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF); 2171 2172 return (0); 2173 2174 fail2: 2175 EFSYS_PROBE(fail2); 2176 fail1: 2177 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2178 2179 return (rc); 2180 } 2181 2182 __checkReturn efx_rc_t 2183 efx_mcdi_privilege_mask( 2184 __in efx_nic_t *enp, 2185 __in uint32_t pf, 2186 __in uint32_t vf, 2187 __out uint32_t *maskp) 2188 { 2189 efx_mcdi_req_t req; 2190 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN, 2191 MC_CMD_PRIVILEGE_MASK_OUT_LEN); 2192 efx_rc_t rc; 2193 2194 req.emr_cmd = MC_CMD_PRIVILEGE_MASK; 2195 req.emr_in_buf = payload; 2196 req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; 2197 req.emr_out_buf = payload; 2198 req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; 2199 2200 MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION, 2201 PRIVILEGE_MASK_IN_FUNCTION_PF, pf, 2202 PRIVILEGE_MASK_IN_FUNCTION_VF, vf); 2203 2204 efx_mcdi_execute(enp, &req); 2205 2206 if (req.emr_rc != 0) { 2207 rc = req.emr_rc; 2208 goto fail1; 2209 } 2210 2211 if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) { 2212 rc = EMSGSIZE; 2213 goto fail2; 2214 } 2215 2216 *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK); 2217 2218 return (0); 2219 2220 fail2: 2221 EFSYS_PROBE(fail2); 2222 fail1: 2223 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2224 2225 return (rc); 2226 } 2227 2228 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 2229 2230 __checkReturn efx_rc_t 2231 efx_mcdi_set_workaround( 2232 __in efx_nic_t *enp, 2233 __in uint32_t type, 2234 __in boolean_t enabled, 2235 __out_opt uint32_t *flagsp) 2236 { 2237 efx_mcdi_req_t req; 2238 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN, 2239 MC_CMD_WORKAROUND_EXT_OUT_LEN); 2240 efx_rc_t rc; 2241 2242 req.emr_cmd = MC_CMD_WORKAROUND; 2243 req.emr_in_buf = payload; 2244 req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN; 2245 req.emr_out_buf = payload; 2246 req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN; 2247 2248 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type); 2249 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0); 2250 2251 efx_mcdi_execute_quiet(enp, &req); 2252 2253 if (req.emr_rc != 0) { 2254 rc = req.emr_rc; 2255 goto fail1; 2256 } 2257 2258 if (flagsp != NULL) { 2259 if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN) 2260 *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS); 2261 else 2262 *flagsp = 0; 2263 } 2264 2265 return (0); 2266 2267 fail1: 2268 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2269 2270 return (rc); 2271 } 2272 2273 2274 __checkReturn efx_rc_t 2275 efx_mcdi_get_workarounds( 2276 __in efx_nic_t *enp, 2277 __out_opt uint32_t *implementedp, 2278 __out_opt uint32_t *enabledp) 2279 { 2280 efx_mcdi_req_t req; 2281 EFX_MCDI_DECLARE_BUF(payload, 0, MC_CMD_GET_WORKAROUNDS_OUT_LEN); 2282 efx_rc_t rc; 2283 2284 req.emr_cmd = MC_CMD_GET_WORKAROUNDS; 2285 req.emr_in_buf = NULL; 2286 req.emr_in_length = 0; 2287 req.emr_out_buf = payload; 2288 req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN; 2289 2290 efx_mcdi_execute(enp, &req); 2291 2292 if (req.emr_rc != 0) { 2293 rc = req.emr_rc; 2294 goto fail1; 2295 } 2296 2297 if (req.emr_out_length_used < MC_CMD_GET_WORKAROUNDS_OUT_LEN) { 2298 rc = EMSGSIZE; 2299 goto fail2; 2300 } 2301 2302 if (implementedp != NULL) { 2303 *implementedp = 2304 MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED); 2305 } 2306 2307 if (enabledp != NULL) { 2308 *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED); 2309 } 2310 2311 return (0); 2312 2313 fail2: 2314 EFSYS_PROBE(fail2); 2315 fail1: 2316 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2317 2318 return (rc); 2319 } 2320 2321 /* 2322 * Size of media information page in accordance with SFF-8472 and SFF-8436. 2323 * It is used in MCDI interface as well. 2324 */ 2325 #define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80 2326 2327 /* 2328 * Transceiver identifiers from SFF-8024 Table 4-1. 2329 */ 2330 #define EFX_SFF_TRANSCEIVER_ID_SFP 0x03 /* SFP/SFP+/SFP28 */ 2331 #define EFX_SFF_TRANSCEIVER_ID_QSFP 0x0c /* QSFP */ 2332 #define EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS 0x0d /* QSFP+ or later */ 2333 #define EFX_SFF_TRANSCEIVER_ID_QSFP28 0x11 /* QSFP28 or later */ 2334 2335 static __checkReturn efx_rc_t 2336 efx_mcdi_get_phy_media_info( 2337 __in efx_nic_t *enp, 2338 __in uint32_t mcdi_page, 2339 __in uint8_t offset, 2340 __in uint8_t len, 2341 __out_bcount(len) uint8_t *data) 2342 { 2343 efx_mcdi_req_t req; 2344 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN, 2345 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN( 2346 EFX_PHY_MEDIA_INFO_PAGE_SIZE)); 2347 efx_rc_t rc; 2348 2349 EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2350 2351 req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO; 2352 req.emr_in_buf = payload; 2353 req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN; 2354 req.emr_out_buf = payload; 2355 req.emr_out_length = 2356 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2357 2358 MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page); 2359 2360 efx_mcdi_execute(enp, &req); 2361 2362 if (req.emr_rc != 0) { 2363 rc = req.emr_rc; 2364 goto fail1; 2365 } 2366 2367 if (req.emr_out_length_used != 2368 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) { 2369 rc = EMSGSIZE; 2370 goto fail2; 2371 } 2372 2373 if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) != 2374 EFX_PHY_MEDIA_INFO_PAGE_SIZE) { 2375 rc = EIO; 2376 goto fail3; 2377 } 2378 2379 memcpy(data, 2380 MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, 2381 len); 2382 2383 return (0); 2384 2385 fail3: 2386 EFSYS_PROBE(fail3); 2387 fail2: 2388 EFSYS_PROBE(fail2); 2389 fail1: 2390 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2391 2392 return (rc); 2393 } 2394 2395 __checkReturn efx_rc_t 2396 efx_mcdi_phy_module_get_info( 2397 __in efx_nic_t *enp, 2398 __in uint8_t dev_addr, 2399 __in size_t offset, 2400 __in size_t len, 2401 __out_bcount(len) uint8_t *data) 2402 { 2403 efx_port_t *epp = &(enp->en_port); 2404 efx_rc_t rc; 2405 uint32_t mcdi_lower_page; 2406 uint32_t mcdi_upper_page; 2407 uint8_t id; 2408 2409 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); 2410 2411 /* 2412 * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages. 2413 * Offset plus length interface allows to access page 0 only. 2414 * I.e. non-zero upper pages are not accessible. 2415 * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6 2416 * QSFP+ Memory Map for details on how information is structured 2417 * and accessible. 2418 */ 2419 switch (epp->ep_fixed_port_type) { 2420 case EFX_PHY_MEDIA_SFP_PLUS: 2421 case EFX_PHY_MEDIA_QSFP_PLUS: 2422 /* Port type supports modules */ 2423 break; 2424 default: 2425 rc = ENOTSUP; 2426 goto fail1; 2427 } 2428 2429 /* 2430 * For all supported port types, MCDI page 0 offset 0 holds the 2431 * transceiver identifier. Probe to determine the data layout. 2432 * Definitions from SFF-8024 Table 4-1. 2433 */ 2434 rc = efx_mcdi_get_phy_media_info(enp, 2435 0, 0, sizeof(id), &id); 2436 if (rc != 0) 2437 goto fail2; 2438 2439 switch (id) { 2440 case EFX_SFF_TRANSCEIVER_ID_SFP: 2441 /* 2442 * In accordance with SFF-8472 Diagnostic Monitoring 2443 * Interface for Optical Transceivers section 4 Memory 2444 * Organization two 2-wire addresses are defined. 2445 */ 2446 switch (dev_addr) { 2447 /* Base information */ 2448 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE: 2449 /* 2450 * MCDI page 0 should be used to access lower 2451 * page 0 (0x00 - 0x7f) at the device address 0xA0. 2452 */ 2453 mcdi_lower_page = 0; 2454 /* 2455 * MCDI page 1 should be used to access upper 2456 * page 0 (0x80 - 0xff) at the device address 0xA0. 2457 */ 2458 mcdi_upper_page = 1; 2459 break; 2460 /* Diagnostics */ 2461 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM: 2462 /* 2463 * MCDI page 2 should be used to access lower 2464 * page 0 (0x00 - 0x7f) at the device address 0xA2. 2465 */ 2466 mcdi_lower_page = 2; 2467 /* 2468 * MCDI page 3 should be used to access upper 2469 * page 0 (0x80 - 0xff) at the device address 0xA2. 2470 */ 2471 mcdi_upper_page = 3; 2472 break; 2473 default: 2474 rc = ENOTSUP; 2475 goto fail3; 2476 } 2477 break; 2478 case EFX_SFF_TRANSCEIVER_ID_QSFP: 2479 case EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS: 2480 case EFX_SFF_TRANSCEIVER_ID_QSFP28: 2481 switch (dev_addr) { 2482 case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP: 2483 /* 2484 * MCDI page -1 should be used to access lower page 0 2485 * (0x00 - 0x7f). 2486 */ 2487 mcdi_lower_page = (uint32_t)-1; 2488 /* 2489 * MCDI page 0 should be used to access upper page 0 2490 * (0x80h - 0xff). 2491 */ 2492 mcdi_upper_page = 0; 2493 break; 2494 default: 2495 rc = ENOTSUP; 2496 goto fail3; 2497 } 2498 break; 2499 default: 2500 rc = ENOTSUP; 2501 goto fail3; 2502 } 2503 2504 EFX_STATIC_ASSERT(EFX_PHY_MEDIA_INFO_PAGE_SIZE <= 0xFF); 2505 2506 if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) { 2507 size_t read_len = 2508 MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset); 2509 2510 rc = efx_mcdi_get_phy_media_info(enp, 2511 mcdi_lower_page, (uint8_t)offset, (uint8_t)read_len, data); 2512 if (rc != 0) 2513 goto fail4; 2514 2515 data += read_len; 2516 len -= read_len; 2517 2518 offset = 0; 2519 } else { 2520 offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE; 2521 } 2522 2523 if (len > 0) { 2524 EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2525 EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2526 2527 rc = efx_mcdi_get_phy_media_info(enp, 2528 mcdi_upper_page, (uint8_t)offset, (uint8_t)len, data); 2529 if (rc != 0) 2530 goto fail5; 2531 } 2532 2533 return (0); 2534 2535 fail5: 2536 EFSYS_PROBE(fail5); 2537 fail4: 2538 EFSYS_PROBE(fail4); 2539 fail3: 2540 EFSYS_PROBE(fail3); 2541 fail2: 2542 EFSYS_PROBE(fail2); 2543 fail1: 2544 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2545 2546 return (rc); 2547 } 2548 2549 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2550 2551 #define INIT_EVQ_MAXNBUFS MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 2552 2553 #if EFX_OPTS_EF10() 2554 # if (INIT_EVQ_MAXNBUFS < EF10_EVQ_MAXNBUFS) 2555 # error "INIT_EVQ_MAXNBUFS too small" 2556 # endif 2557 #endif /* EFX_OPTS_EF10 */ 2558 #if EFSYS_OPT_RIVERHEAD 2559 # if (INIT_EVQ_MAXNBUFS < RHEAD_EVQ_MAXNBUFS) 2560 # error "INIT_EVQ_MAXNBUFS too small" 2561 # endif 2562 #endif /* EFSYS_OPT_RIVERHEAD */ 2563 2564 __checkReturn efx_rc_t 2565 efx_mcdi_init_evq( 2566 __in efx_nic_t *enp, 2567 __in unsigned int instance, 2568 __in efsys_mem_t *esmp, 2569 __in size_t nevs, 2570 __in uint32_t irq, 2571 __in uint32_t us, 2572 __in uint32_t flags, 2573 __in boolean_t low_latency) 2574 { 2575 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 2576 efx_mcdi_req_t req; 2577 EFX_MCDI_DECLARE_BUF(payload, 2578 MC_CMD_INIT_EVQ_V2_IN_LEN(INIT_EVQ_MAXNBUFS), 2579 MC_CMD_INIT_EVQ_V2_OUT_LEN); 2580 boolean_t interrupting; 2581 int ev_extended_width; 2582 int ev_cut_through; 2583 int ev_merge; 2584 unsigned int evq_type; 2585 efx_qword_t *dma_addr; 2586 uint64_t addr; 2587 int npages; 2588 int i; 2589 efx_rc_t rc; 2590 2591 npages = efx_evq_nbufs(enp, nevs, flags); 2592 if (npages > INIT_EVQ_MAXNBUFS) { 2593 rc = EINVAL; 2594 goto fail1; 2595 } 2596 2597 req.emr_cmd = MC_CMD_INIT_EVQ; 2598 req.emr_in_buf = payload; 2599 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); 2600 req.emr_out_buf = payload; 2601 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; 2602 2603 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); 2604 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); 2605 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); 2606 2607 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 2608 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 2609 2610 if (encp->enc_init_evq_v2_supported) { 2611 /* 2612 * On Medford the low latency license is required to enable RX 2613 * and event cut through and to disable RX batching. If event 2614 * queue type in flags is auto, we let the firmware decide the 2615 * settings to use. If the adapter has a low latency license, 2616 * it will choose the best settings for low latency, otherwise 2617 * it will choose the best settings for throughput. 2618 */ 2619 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 2620 case EFX_EVQ_FLAGS_TYPE_AUTO: 2621 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; 2622 break; 2623 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 2624 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; 2625 break; 2626 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 2627 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; 2628 break; 2629 default: 2630 rc = EINVAL; 2631 goto fail2; 2632 } 2633 /* EvQ type controls merging, no manual settings */ 2634 ev_merge = 0; 2635 ev_cut_through = 0; 2636 } else { 2637 /* EvQ types other than manual are not supported */ 2638 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL; 2639 /* 2640 * On Huntington RX and TX event batching can only be requested 2641 * together (even if the datapath firmware doesn't actually 2642 * support RX batching). If event cut through is enabled no RX 2643 * batching will occur. 2644 * 2645 * So always enable RX and TX event batching, and enable event 2646 * cut through if we want low latency operation. 2647 */ 2648 ev_merge = 1; 2649 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 2650 case EFX_EVQ_FLAGS_TYPE_AUTO: 2651 ev_cut_through = low_latency ? 1 : 0; 2652 break; 2653 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 2654 ev_cut_through = 0; 2655 break; 2656 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 2657 ev_cut_through = 1; 2658 break; 2659 default: 2660 rc = EINVAL; 2661 goto fail2; 2662 } 2663 } 2664 2665 /* 2666 * On EF100, extended width event queues have a different event 2667 * descriptor layout and are used to support descriptor proxy queues. 2668 */ 2669 ev_extended_width = 0; 2670 #if EFSYS_OPT_EV_EXTENDED_WIDTH 2671 if (encp->enc_init_evq_extended_width_supported) { 2672 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) 2673 ev_extended_width = 1; 2674 } 2675 #endif 2676 2677 MCDI_IN_POPULATE_DWORD_8(req, INIT_EVQ_V2_IN_FLAGS, 2678 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, 2679 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, 2680 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, 2681 INIT_EVQ_V2_IN_FLAG_CUT_THRU, ev_cut_through, 2682 INIT_EVQ_V2_IN_FLAG_RX_MERGE, ev_merge, 2683 INIT_EVQ_V2_IN_FLAG_TX_MERGE, ev_merge, 2684 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type, 2685 INIT_EVQ_V2_IN_FLAG_EXT_WIDTH, ev_extended_width); 2686 2687 /* If the value is zero then disable the timer */ 2688 if (us == 0) { 2689 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 2690 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); 2691 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); 2692 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); 2693 } else { 2694 unsigned int ticks; 2695 2696 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 2697 goto fail3; 2698 2699 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 2700 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); 2701 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); 2702 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); 2703 } 2704 2705 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, 2706 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); 2707 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); 2708 2709 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); 2710 addr = EFSYS_MEM_ADDR(esmp); 2711 2712 for (i = 0; i < npages; i++) { 2713 EFX_POPULATE_QWORD_2(*dma_addr, 2714 EFX_DWORD_1, (uint32_t)(addr >> 32), 2715 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 2716 2717 dma_addr++; 2718 addr += EFX_BUF_SIZE; 2719 } 2720 2721 efx_mcdi_execute(enp, &req); 2722 2723 if (req.emr_rc != 0) { 2724 rc = req.emr_rc; 2725 goto fail4; 2726 } 2727 2728 if (encp->enc_init_evq_v2_supported) { 2729 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { 2730 rc = EMSGSIZE; 2731 goto fail5; 2732 } 2733 EFSYS_PROBE1(mcdi_evq_flags, uint32_t, 2734 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); 2735 } else { 2736 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { 2737 rc = EMSGSIZE; 2738 goto fail6; 2739 } 2740 } 2741 2742 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 2743 2744 return (0); 2745 2746 fail6: 2747 EFSYS_PROBE(fail6); 2748 fail5: 2749 EFSYS_PROBE(fail5); 2750 fail4: 2751 EFSYS_PROBE(fail4); 2752 fail3: 2753 EFSYS_PROBE(fail3); 2754 fail2: 2755 EFSYS_PROBE(fail2); 2756 fail1: 2757 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2758 2759 return (rc); 2760 } 2761 2762 __checkReturn efx_rc_t 2763 efx_mcdi_fini_evq( 2764 __in efx_nic_t *enp, 2765 __in uint32_t instance) 2766 { 2767 efx_mcdi_req_t req; 2768 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN, 2769 MC_CMD_FINI_EVQ_OUT_LEN); 2770 efx_rc_t rc; 2771 2772 req.emr_cmd = MC_CMD_FINI_EVQ; 2773 req.emr_in_buf = payload; 2774 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; 2775 req.emr_out_buf = payload; 2776 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; 2777 2778 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); 2779 2780 efx_mcdi_execute_quiet(enp, &req); 2781 2782 if (req.emr_rc != 0) { 2783 rc = req.emr_rc; 2784 goto fail1; 2785 } 2786 2787 return (0); 2788 2789 fail1: 2790 /* 2791 * EALREADY is not an error, but indicates that the MC has rebooted and 2792 * that the EVQ has already been destroyed. 2793 */ 2794 if (rc != EALREADY) 2795 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2796 2797 return (rc); 2798 } 2799 2800 __checkReturn efx_rc_t 2801 efx_mcdi_init_rxq( 2802 __in efx_nic_t *enp, 2803 __in uint32_t ndescs, 2804 __in efx_evq_t *eep, 2805 __in uint32_t label, 2806 __in uint32_t instance, 2807 __in efsys_mem_t *esmp, 2808 __in const efx_mcdi_init_rxq_params_t *params) 2809 { 2810 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2811 efx_mcdi_req_t req; 2812 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V5_IN_LEN, 2813 MC_CMD_INIT_RXQ_V5_OUT_LEN); 2814 int npages = efx_rxq_nbufs(enp, ndescs); 2815 int i; 2816 efx_qword_t *dma_addr; 2817 uint64_t addr; 2818 efx_rc_t rc; 2819 uint32_t dma_mode; 2820 boolean_t want_outer_classes; 2821 boolean_t no_cont_ev; 2822 2823 EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs); 2824 2825 if ((esmp == NULL) || 2826 (EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) { 2827 rc = EINVAL; 2828 goto fail1; 2829 } 2830 2831 no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV); 2832 if ((no_cont_ev == B_TRUE) && (params->disable_scatter == B_FALSE)) { 2833 /* TODO: Support scatter in NO_CONT_EV mode */ 2834 rc = EINVAL; 2835 goto fail2; 2836 } 2837 2838 if (params->ps_buf_size > 0) 2839 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; 2840 else if (params->es_bufs_per_desc > 0) 2841 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER; 2842 else 2843 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; 2844 2845 if (encp->enc_tunnel_encapsulations_supported != 0 && 2846 !params->want_inner_classes) { 2847 /* 2848 * WANT_OUTER_CLASSES can only be specified on hardware which 2849 * supports tunnel encapsulation offloads, even though it is 2850 * effectively the behaviour the hardware gives. 2851 * 2852 * Also, on hardware which does support such offloads, older 2853 * firmware rejects the flag if the offloads are not supported 2854 * by the current firmware variant, which means this may fail if 2855 * the capabilities are not updated when the firmware variant 2856 * changes. This is not an issue on newer firmware, as it was 2857 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be 2858 * specified on all firmware variants. 2859 */ 2860 want_outer_classes = B_TRUE; 2861 } else { 2862 want_outer_classes = B_FALSE; 2863 } 2864 2865 req.emr_cmd = MC_CMD_INIT_RXQ; 2866 req.emr_in_buf = payload; 2867 req.emr_in_length = MC_CMD_INIT_RXQ_V5_IN_LEN; 2868 req.emr_out_buf = payload; 2869 req.emr_out_length = MC_CMD_INIT_RXQ_V5_OUT_LEN; 2870 2871 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs); 2872 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index); 2873 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label); 2874 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance); 2875 MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS, 2876 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0, 2877 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0, 2878 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0, 2879 INIT_RXQ_EXT_IN_CRC_MODE, 0, 2880 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1, 2881 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, params->disable_scatter, 2882 INIT_RXQ_EXT_IN_DMA_MODE, 2883 dma_mode, 2884 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, params->ps_buf_size, 2885 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes, 2886 INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev); 2887 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); 2888 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id); 2889 2890 if (params->es_bufs_per_desc > 0) { 2891 MCDI_IN_SET_DWORD(req, 2892 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET, 2893 params->es_bufs_per_desc); 2894 MCDI_IN_SET_DWORD(req, 2895 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, params->es_max_dma_len); 2896 MCDI_IN_SET_DWORD(req, 2897 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, params->es_buf_stride); 2898 MCDI_IN_SET_DWORD(req, 2899 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT, 2900 params->hol_block_timeout); 2901 } 2902 2903 if (encp->enc_init_rxq_with_buffer_size) 2904 MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, 2905 params->buf_size); 2906 2907 MCDI_IN_SET_DWORD(req, INIT_RXQ_V5_IN_RX_PREFIX_ID, params->prefix_id); 2908 2909 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); 2910 addr = EFSYS_MEM_ADDR(esmp); 2911 2912 for (i = 0; i < npages; i++) { 2913 EFX_POPULATE_QWORD_2(*dma_addr, 2914 EFX_DWORD_1, (uint32_t)(addr >> 32), 2915 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 2916 2917 dma_addr++; 2918 addr += EFX_BUF_SIZE; 2919 } 2920 2921 efx_mcdi_execute(enp, &req); 2922 2923 if (req.emr_rc != 0) { 2924 rc = req.emr_rc; 2925 goto fail3; 2926 } 2927 2928 return (0); 2929 2930 fail3: 2931 EFSYS_PROBE(fail3); 2932 fail2: 2933 EFSYS_PROBE(fail2); 2934 fail1: 2935 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2936 2937 return (rc); 2938 } 2939 2940 __checkReturn efx_rc_t 2941 efx_mcdi_fini_rxq( 2942 __in efx_nic_t *enp, 2943 __in uint32_t instance) 2944 { 2945 efx_mcdi_req_t req; 2946 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN, 2947 MC_CMD_FINI_RXQ_OUT_LEN); 2948 efx_rc_t rc; 2949 2950 req.emr_cmd = MC_CMD_FINI_RXQ; 2951 req.emr_in_buf = payload; 2952 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; 2953 req.emr_out_buf = payload; 2954 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN; 2955 2956 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance); 2957 2958 efx_mcdi_execute_quiet(enp, &req); 2959 2960 if (req.emr_rc != 0) { 2961 rc = req.emr_rc; 2962 goto fail1; 2963 } 2964 2965 return (0); 2966 2967 fail1: 2968 /* 2969 * EALREADY is not an error, but indicates that the MC has rebooted and 2970 * that the RXQ has already been destroyed. 2971 */ 2972 if (rc != EALREADY) 2973 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2974 2975 return (rc); 2976 } 2977 2978 __checkReturn efx_rc_t 2979 efx_mcdi_init_txq( 2980 __in efx_nic_t *enp, 2981 __in uint32_t ndescs, 2982 __in uint32_t target_evq, 2983 __in uint32_t label, 2984 __in uint32_t instance, 2985 __in uint16_t flags, 2986 __in efsys_mem_t *esmp) 2987 { 2988 efx_mcdi_req_t req; 2989 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_EXT_IN_LEN, 2990 MC_CMD_INIT_TXQ_OUT_LEN); 2991 efx_qword_t *dma_addr; 2992 uint64_t addr; 2993 int npages; 2994 int i; 2995 efx_rc_t rc; 2996 2997 EFSYS_ASSERT(MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM >= 2998 efx_txq_nbufs(enp, enp->en_nic_cfg.enc_txq_max_ndescs)); 2999 3000 if ((esmp == NULL) || 3001 (EFSYS_MEM_SIZE(esmp) < efx_txq_size(enp, ndescs))) { 3002 rc = EINVAL; 3003 goto fail1; 3004 } 3005 3006 npages = efx_txq_nbufs(enp, ndescs); 3007 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { 3008 rc = EINVAL; 3009 goto fail2; 3010 } 3011 3012 req.emr_cmd = MC_CMD_INIT_TXQ; 3013 req.emr_in_buf = payload; 3014 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); 3015 req.emr_out_buf = payload; 3016 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; 3017 3018 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); 3019 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); 3020 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); 3021 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); 3022 3023 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, 3024 INIT_TXQ_IN_FLAG_BUFF_MODE, 0, 3025 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, 3026 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, 3027 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, 3028 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, 3029 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, 3030 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, 3031 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, 3032 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, 3033 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, 3034 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, 3035 INIT_TXQ_IN_CRC_MODE, 0, 3036 INIT_TXQ_IN_FLAG_TIMESTAMP, 0); 3037 3038 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); 3039 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, enp->en_vport_id); 3040 3041 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); 3042 addr = EFSYS_MEM_ADDR(esmp); 3043 3044 for (i = 0; i < npages; i++) { 3045 EFX_POPULATE_QWORD_2(*dma_addr, 3046 EFX_DWORD_1, (uint32_t)(addr >> 32), 3047 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 3048 3049 dma_addr++; 3050 addr += EFX_BUF_SIZE; 3051 } 3052 3053 efx_mcdi_execute(enp, &req); 3054 3055 if (req.emr_rc != 0) { 3056 rc = req.emr_rc; 3057 goto fail3; 3058 } 3059 3060 return (0); 3061 3062 fail3: 3063 EFSYS_PROBE(fail3); 3064 fail2: 3065 EFSYS_PROBE(fail2); 3066 fail1: 3067 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3068 3069 return (rc); 3070 } 3071 3072 __checkReturn efx_rc_t 3073 efx_mcdi_fini_txq( 3074 __in efx_nic_t *enp, 3075 __in uint32_t instance) 3076 { 3077 efx_mcdi_req_t req; 3078 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN, 3079 MC_CMD_FINI_TXQ_OUT_LEN); 3080 efx_rc_t rc; 3081 3082 req.emr_cmd = MC_CMD_FINI_TXQ; 3083 req.emr_in_buf = payload; 3084 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; 3085 req.emr_out_buf = payload; 3086 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; 3087 3088 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); 3089 3090 efx_mcdi_execute_quiet(enp, &req); 3091 3092 if (req.emr_rc != 0) { 3093 rc = req.emr_rc; 3094 goto fail1; 3095 } 3096 3097 return (0); 3098 3099 fail1: 3100 /* 3101 * EALREADY is not an error, but indicates that the MC has rebooted and 3102 * that the TXQ has already been destroyed. 3103 */ 3104 if (rc != EALREADY) 3105 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3106 3107 return (rc); 3108 } 3109 3110 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 3111 3112 #endif /* EFSYS_OPT_MCDI */ 3113