1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2008-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 10 #if EFSYS_OPT_MCDI 11 12 /* 13 * There are three versions of the MCDI interface: 14 * - MCDIv0: Siena BootROM. Transport uses MCDIv1 headers. 15 * - MCDIv1: Siena firmware and Huntington BootROM. 16 * - MCDIv2: EF10 firmware (Huntington/Medford) and Medford BootROM. 17 * Transport uses MCDIv2 headers. 18 * 19 * MCDIv2 Header NOT_EPOCH flag 20 * ---------------------------- 21 * A new epoch begins at initial startup or after an MC reboot, and defines when 22 * the MC should reject stale MCDI requests. 23 * 24 * The first MCDI request sent by the host should contain NOT_EPOCH=0, and all 25 * subsequent requests (until the next MC reboot) should contain NOT_EPOCH=1. 26 * 27 * After rebooting the MC will fail all requests with NOT_EPOCH=1 by writing a 28 * response with ERROR=1 and DATALEN=0 until a request is seen with NOT_EPOCH=0. 29 */ 30 31 32 33 #if EFSYS_OPT_SIENA 34 35 static const efx_mcdi_ops_t __efx_mcdi_siena_ops = { 36 siena_mcdi_init, /* emco_init */ 37 siena_mcdi_send_request, /* emco_send_request */ 38 siena_mcdi_poll_reboot, /* emco_poll_reboot */ 39 siena_mcdi_poll_response, /* emco_poll_response */ 40 siena_mcdi_read_response, /* emco_read_response */ 41 siena_mcdi_fini, /* emco_fini */ 42 siena_mcdi_feature_supported, /* emco_feature_supported */ 43 siena_mcdi_get_timeout, /* emco_get_timeout */ 44 }; 45 46 #endif /* EFSYS_OPT_SIENA */ 47 48 #if EFX_OPTS_EF10() 49 50 static const efx_mcdi_ops_t __efx_mcdi_ef10_ops = { 51 ef10_mcdi_init, /* emco_init */ 52 ef10_mcdi_send_request, /* emco_send_request */ 53 ef10_mcdi_poll_reboot, /* emco_poll_reboot */ 54 ef10_mcdi_poll_response, /* emco_poll_response */ 55 ef10_mcdi_read_response, /* emco_read_response */ 56 ef10_mcdi_fini, /* emco_fini */ 57 ef10_mcdi_feature_supported, /* emco_feature_supported */ 58 ef10_mcdi_get_timeout, /* emco_get_timeout */ 59 }; 60 61 #endif /* EFX_OPTS_EF10() */ 62 63 #if EFSYS_OPT_RIVERHEAD 64 65 static const efx_mcdi_ops_t __efx_mcdi_rhead_ops = { 66 ef10_mcdi_init, /* emco_init */ 67 ef10_mcdi_send_request, /* emco_send_request */ 68 ef10_mcdi_poll_reboot, /* emco_poll_reboot */ 69 ef10_mcdi_poll_response, /* emco_poll_response */ 70 ef10_mcdi_read_response, /* emco_read_response */ 71 ef10_mcdi_fini, /* emco_fini */ 72 ef10_mcdi_feature_supported, /* emco_feature_supported */ 73 ef10_mcdi_get_timeout, /* emco_get_timeout */ 74 }; 75 76 #endif /* EFSYS_OPT_RIVERHEAD */ 77 78 79 80 __checkReturn efx_rc_t 81 efx_mcdi_init( 82 __in efx_nic_t *enp, 83 __in const efx_mcdi_transport_t *emtp) 84 { 85 const efx_mcdi_ops_t *emcop; 86 efx_rc_t rc; 87 88 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 89 EFSYS_ASSERT3U(enp->en_mod_flags, ==, 0); 90 91 switch (enp->en_family) { 92 #if EFSYS_OPT_SIENA 93 case EFX_FAMILY_SIENA: 94 emcop = &__efx_mcdi_siena_ops; 95 break; 96 #endif /* EFSYS_OPT_SIENA */ 97 98 #if EFSYS_OPT_HUNTINGTON 99 case EFX_FAMILY_HUNTINGTON: 100 emcop = &__efx_mcdi_ef10_ops; 101 break; 102 #endif /* EFSYS_OPT_HUNTINGTON */ 103 104 #if EFSYS_OPT_MEDFORD 105 case EFX_FAMILY_MEDFORD: 106 emcop = &__efx_mcdi_ef10_ops; 107 break; 108 #endif /* EFSYS_OPT_MEDFORD */ 109 110 #if EFSYS_OPT_MEDFORD2 111 case EFX_FAMILY_MEDFORD2: 112 emcop = &__efx_mcdi_ef10_ops; 113 break; 114 #endif /* EFSYS_OPT_MEDFORD2 */ 115 116 #if EFSYS_OPT_RIVERHEAD 117 case EFX_FAMILY_RIVERHEAD: 118 emcop = &__efx_mcdi_rhead_ops; 119 break; 120 #endif /* EFSYS_OPT_RIVERHEAD */ 121 122 default: 123 EFSYS_ASSERT(0); 124 rc = ENOTSUP; 125 goto fail1; 126 } 127 128 if (enp->en_features & EFX_FEATURE_MCDI_DMA) { 129 /* MCDI requires a DMA buffer in host memory */ 130 if ((emtp == NULL) || (emtp->emt_dma_mem) == NULL) { 131 rc = EINVAL; 132 goto fail2; 133 } 134 } 135 enp->en_mcdi.em_emtp = emtp; 136 137 if (emcop != NULL && emcop->emco_init != NULL) { 138 if ((rc = emcop->emco_init(enp, emtp)) != 0) 139 goto fail3; 140 } 141 142 enp->en_mcdi.em_emcop = emcop; 143 enp->en_mod_flags |= EFX_MOD_MCDI; 144 145 return (0); 146 147 fail3: 148 EFSYS_PROBE(fail3); 149 fail2: 150 EFSYS_PROBE(fail2); 151 fail1: 152 EFSYS_PROBE1(fail1, efx_rc_t, rc); 153 154 enp->en_mcdi.em_emcop = NULL; 155 enp->en_mcdi.em_emtp = NULL; 156 enp->en_mod_flags &= ~EFX_MOD_MCDI; 157 158 return (rc); 159 } 160 161 void 162 efx_mcdi_fini( 163 __in efx_nic_t *enp) 164 { 165 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 166 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 167 168 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 169 EFSYS_ASSERT3U(enp->en_mod_flags, ==, EFX_MOD_MCDI); 170 171 if (emcop != NULL && emcop->emco_fini != NULL) 172 emcop->emco_fini(enp); 173 174 emip->emi_port = 0; 175 emip->emi_aborted = 0; 176 177 enp->en_mcdi.em_emcop = NULL; 178 enp->en_mod_flags &= ~EFX_MOD_MCDI; 179 } 180 181 void 182 efx_mcdi_new_epoch( 183 __in efx_nic_t *enp) 184 { 185 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 186 efsys_lock_state_t state; 187 188 /* Start a new epoch (allow fresh MCDI requests to succeed) */ 189 EFSYS_LOCK(enp->en_eslp, state); 190 emip->emi_new_epoch = B_TRUE; 191 EFSYS_UNLOCK(enp->en_eslp, state); 192 } 193 194 static void 195 efx_mcdi_send_request( 196 __in efx_nic_t *enp, 197 __in void *hdrp, 198 __in size_t hdr_len, 199 __in void *sdup, 200 __in size_t sdu_len) 201 { 202 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 203 204 emcop->emco_send_request(enp, hdrp, hdr_len, sdup, sdu_len); 205 } 206 207 static efx_rc_t 208 efx_mcdi_poll_reboot( 209 __in efx_nic_t *enp) 210 { 211 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 212 efx_rc_t rc; 213 214 rc = emcop->emco_poll_reboot(enp); 215 return (rc); 216 } 217 218 static boolean_t 219 efx_mcdi_poll_response( 220 __in efx_nic_t *enp) 221 { 222 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 223 boolean_t available; 224 225 available = emcop->emco_poll_response(enp); 226 return (available); 227 } 228 229 static void 230 efx_mcdi_read_response( 231 __in efx_nic_t *enp, 232 __out void *bufferp, 233 __in size_t offset, 234 __in size_t length) 235 { 236 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 237 238 emcop->emco_read_response(enp, bufferp, offset, length); 239 } 240 241 void 242 efx_mcdi_request_start( 243 __in efx_nic_t *enp, 244 __in efx_mcdi_req_t *emrp, 245 __in boolean_t ev_cpl) 246 { 247 #if EFSYS_OPT_MCDI_LOGGING 248 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 249 #endif 250 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 251 efx_dword_t hdr[2]; 252 size_t hdr_len; 253 unsigned int max_version; 254 unsigned int seq; 255 unsigned int xflags; 256 boolean_t new_epoch; 257 efsys_lock_state_t state; 258 259 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 260 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 261 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 262 263 /* 264 * efx_mcdi_request_start() is naturally serialised against both 265 * efx_mcdi_request_poll() and efx_mcdi_ev_cpl()/efx_mcdi_ev_death(), 266 * by virtue of there only being one outstanding MCDI request. 267 * Unfortunately, upper layers may also call efx_mcdi_request_abort() 268 * at any time, to timeout a pending mcdi request, That request may 269 * then subsequently complete, meaning efx_mcdi_ev_cpl() or 270 * efx_mcdi_ev_death() may end up running in parallel with 271 * efx_mcdi_request_start(). This race is handled by ensuring that 272 * %emi_pending_req, %emi_ev_cpl and %emi_seq are protected by the 273 * en_eslp lock. 274 */ 275 EFSYS_LOCK(enp->en_eslp, state); 276 EFSYS_ASSERT(emip->emi_pending_req == NULL); 277 emip->emi_pending_req = emrp; 278 emip->emi_ev_cpl = ev_cpl; 279 emip->emi_poll_cnt = 0; 280 seq = emip->emi_seq++ & EFX_MASK32(MCDI_HEADER_SEQ); 281 new_epoch = emip->emi_new_epoch; 282 max_version = emip->emi_max_version; 283 EFSYS_UNLOCK(enp->en_eslp, state); 284 285 xflags = 0; 286 if (ev_cpl) 287 xflags |= MCDI_HEADER_XFLAGS_EVREQ; 288 289 /* 290 * Huntington firmware supports MCDIv2, but the Huntington BootROM only 291 * supports MCDIv1. Use MCDIv1 headers for MCDIv1 commands where 292 * possible to support this. 293 */ 294 if ((max_version >= 2) && 295 ((emrp->emr_cmd > MC_CMD_CMD_SPACE_ESCAPE_7) || 296 (emrp->emr_in_length > MCDI_CTL_SDU_LEN_MAX_V1) || 297 (emrp->emr_out_length > MCDI_CTL_SDU_LEN_MAX_V1))) { 298 /* Construct MCDI v2 header */ 299 hdr_len = sizeof (hdr); 300 EFX_POPULATE_DWORD_8(hdr[0], 301 MCDI_HEADER_CODE, MC_CMD_V2_EXTN, 302 MCDI_HEADER_RESYNC, 1, 303 MCDI_HEADER_DATALEN, 0, 304 MCDI_HEADER_SEQ, seq, 305 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, 306 MCDI_HEADER_ERROR, 0, 307 MCDI_HEADER_RESPONSE, 0, 308 MCDI_HEADER_XFLAGS, xflags); 309 310 EFX_POPULATE_DWORD_2(hdr[1], 311 MC_CMD_V2_EXTN_IN_EXTENDED_CMD, emrp->emr_cmd, 312 MC_CMD_V2_EXTN_IN_ACTUAL_LEN, emrp->emr_in_length); 313 } else { 314 /* Construct MCDI v1 header */ 315 hdr_len = sizeof (hdr[0]); 316 EFX_POPULATE_DWORD_8(hdr[0], 317 MCDI_HEADER_CODE, emrp->emr_cmd, 318 MCDI_HEADER_RESYNC, 1, 319 MCDI_HEADER_DATALEN, emrp->emr_in_length, 320 MCDI_HEADER_SEQ, seq, 321 MCDI_HEADER_NOT_EPOCH, new_epoch ? 0 : 1, 322 MCDI_HEADER_ERROR, 0, 323 MCDI_HEADER_RESPONSE, 0, 324 MCDI_HEADER_XFLAGS, xflags); 325 } 326 327 #if EFSYS_OPT_MCDI_LOGGING 328 if (emtp->emt_logger != NULL) { 329 emtp->emt_logger(emtp->emt_context, EFX_LOG_MCDI_REQUEST, 330 &hdr[0], hdr_len, 331 emrp->emr_in_buf, emrp->emr_in_length); 332 } 333 #endif /* EFSYS_OPT_MCDI_LOGGING */ 334 335 efx_mcdi_send_request(enp, &hdr[0], hdr_len, 336 emrp->emr_in_buf, emrp->emr_in_length); 337 } 338 339 340 static void 341 efx_mcdi_read_response_header( 342 __in efx_nic_t *enp, 343 __inout efx_mcdi_req_t *emrp) 344 { 345 #if EFSYS_OPT_MCDI_LOGGING 346 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 347 #endif /* EFSYS_OPT_MCDI_LOGGING */ 348 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 349 efx_dword_t hdr[2]; 350 unsigned int hdr_len; 351 unsigned int data_len; 352 unsigned int seq; 353 unsigned int cmd; 354 unsigned int error; 355 efx_rc_t rc; 356 357 EFSYS_ASSERT(emrp != NULL); 358 359 efx_mcdi_read_response(enp, &hdr[0], 0, sizeof (hdr[0])); 360 hdr_len = sizeof (hdr[0]); 361 362 cmd = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE); 363 seq = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_SEQ); 364 error = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_ERROR); 365 366 if (cmd != MC_CMD_V2_EXTN) { 367 data_len = EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_DATALEN); 368 } else { 369 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); 370 hdr_len += sizeof (hdr[1]); 371 372 cmd = EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); 373 data_len = 374 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 375 } 376 377 if (error && (data_len == 0)) { 378 /* The MC has rebooted since the request was sent. */ 379 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); 380 efx_mcdi_poll_reboot(enp); 381 rc = EIO; 382 goto fail1; 383 } 384 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 385 if (((cmd != emrp->emr_cmd) && (emrp->emr_cmd != MC_CMD_PROXY_CMD)) || 386 #else 387 if ((cmd != emrp->emr_cmd) || 388 #endif 389 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { 390 /* Response is for a different request */ 391 rc = EIO; 392 goto fail2; 393 } 394 if (error) { 395 efx_dword_t err[2]; 396 unsigned int err_len = MIN(data_len, sizeof (err)); 397 int err_code = MC_CMD_ERR_EPROTO; 398 int err_arg = 0; 399 400 /* Read error code (and arg num for MCDI v2 commands) */ 401 efx_mcdi_read_response(enp, &err, hdr_len, err_len); 402 403 if (err_len >= (MC_CMD_ERR_CODE_OFST + sizeof (efx_dword_t))) 404 err_code = EFX_DWORD_FIELD(err[0], EFX_DWORD_0); 405 #ifdef WITH_MCDI_V2 406 if (err_len >= (MC_CMD_ERR_ARG_OFST + sizeof (efx_dword_t))) 407 err_arg = EFX_DWORD_FIELD(err[1], EFX_DWORD_0); 408 #endif 409 emrp->emr_err_code = err_code; 410 emrp->emr_err_arg = err_arg; 411 412 #if EFSYS_OPT_MCDI_PROXY_AUTH 413 if ((err_code == MC_CMD_ERR_PROXY_PENDING) && 414 (err_len == sizeof (err))) { 415 /* 416 * The MCDI request would normally fail with EPERM, but 417 * firmware has forwarded it to an authorization agent 418 * attached to a privileged PF. 419 * 420 * Save the authorization request handle. The client 421 * must wait for a PROXY_RESPONSE event, or timeout. 422 */ 423 emrp->emr_proxy_handle = err_arg; 424 } 425 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 426 427 #if EFSYS_OPT_MCDI_LOGGING 428 if (emtp->emt_logger != NULL) { 429 emtp->emt_logger(emtp->emt_context, 430 EFX_LOG_MCDI_RESPONSE, 431 &hdr[0], hdr_len, 432 &err[0], err_len); 433 } 434 #endif /* EFSYS_OPT_MCDI_LOGGING */ 435 436 if (!emrp->emr_quiet) { 437 EFSYS_PROBE3(mcdi_err_arg, int, emrp->emr_cmd, 438 int, err_code, int, err_arg); 439 } 440 441 rc = efx_mcdi_request_errcode(err_code); 442 goto fail3; 443 } 444 445 emrp->emr_rc = 0; 446 emrp->emr_out_length_used = data_len; 447 #if EFSYS_OPT_MCDI_PROXY_AUTH 448 emrp->emr_proxy_handle = 0; 449 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 450 return; 451 452 fail3: 453 fail2: 454 fail1: 455 emrp->emr_rc = rc; 456 emrp->emr_out_length_used = 0; 457 } 458 459 static void 460 efx_mcdi_finish_response( 461 __in efx_nic_t *enp, 462 __in efx_mcdi_req_t *emrp) 463 { 464 #if EFSYS_OPT_MCDI_LOGGING 465 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 466 #endif /* EFSYS_OPT_MCDI_LOGGING */ 467 efx_dword_t hdr[2]; 468 unsigned int hdr_len; 469 size_t bytes; 470 unsigned int resp_off; 471 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 472 unsigned int resp_cmd; 473 boolean_t proxied_cmd_resp = B_FALSE; 474 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 475 476 if (emrp->emr_out_buf == NULL) 477 return; 478 479 /* Read the command header to detect MCDI response format */ 480 hdr_len = sizeof (hdr[0]); 481 efx_mcdi_read_response(enp, &hdr[0], 0, hdr_len); 482 if (EFX_DWORD_FIELD(hdr[0], MCDI_HEADER_CODE) == MC_CMD_V2_EXTN) { 483 /* 484 * Read the actual payload length. The length given in the event 485 * is only correct for responses with the V1 format. 486 */ 487 efx_mcdi_read_response(enp, &hdr[1], hdr_len, sizeof (hdr[1])); 488 hdr_len += sizeof (hdr[1]); 489 resp_off = hdr_len; 490 491 emrp->emr_out_length_used = EFX_DWORD_FIELD(hdr[1], 492 MC_CMD_V2_EXTN_IN_ACTUAL_LEN); 493 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 494 /* 495 * A proxy MCDI command is executed by PF on behalf of 496 * one of its VFs. The command to be proxied follows 497 * immediately afterward in the host buffer. 498 * PROXY_CMD inner call complete response should be copied to 499 * output buffer so that it can be returned to the requesting 500 * function in MC_CMD_PROXY_COMPLETE payload. 501 */ 502 resp_cmd = 503 EFX_DWORD_FIELD(hdr[1], MC_CMD_V2_EXTN_IN_EXTENDED_CMD); 504 proxied_cmd_resp = ((emrp->emr_cmd == MC_CMD_PROXY_CMD) && 505 (resp_cmd != MC_CMD_PROXY_CMD)); 506 if (proxied_cmd_resp) { 507 resp_off = 0; 508 emrp->emr_out_length_used += hdr_len; 509 } 510 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 511 } else { 512 resp_off = hdr_len; 513 } 514 515 /* Copy payload out into caller supplied buffer */ 516 bytes = MIN(emrp->emr_out_length_used, emrp->emr_out_length); 517 efx_mcdi_read_response(enp, emrp->emr_out_buf, resp_off, bytes); 518 519 #if EFSYS_OPT_MCDI_LOGGING 520 if (emtp->emt_logger != NULL) { 521 emtp->emt_logger(emtp->emt_context, 522 EFX_LOG_MCDI_RESPONSE, 523 &hdr[0], hdr_len, 524 emrp->emr_out_buf, bytes); 525 } 526 #endif /* EFSYS_OPT_MCDI_LOGGING */ 527 } 528 529 530 __checkReturn boolean_t 531 efx_mcdi_request_poll( 532 __in efx_nic_t *enp) 533 { 534 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 535 efx_mcdi_req_t *emrp; 536 efsys_lock_state_t state; 537 efx_rc_t rc; 538 539 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 540 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 541 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 542 543 /* Serialise against post-watchdog efx_mcdi_ev* */ 544 EFSYS_LOCK(enp->en_eslp, state); 545 546 EFSYS_ASSERT(emip->emi_pending_req != NULL); 547 EFSYS_ASSERT(!emip->emi_ev_cpl); 548 emrp = emip->emi_pending_req; 549 550 /* Check if hardware is unavailable */ 551 if (efx_nic_hw_unavailable(enp)) { 552 EFSYS_UNLOCK(enp->en_eslp, state); 553 return (B_FALSE); 554 } 555 556 /* Check for reboot atomically w.r.t efx_mcdi_request_start */ 557 if (emip->emi_poll_cnt++ == 0) { 558 if ((rc = efx_mcdi_poll_reboot(enp)) != 0) { 559 emip->emi_pending_req = NULL; 560 EFSYS_UNLOCK(enp->en_eslp, state); 561 562 /* Reboot/Assertion */ 563 if (rc == EIO || rc == EINTR) 564 efx_mcdi_raise_exception(enp, emrp, rc); 565 566 goto fail1; 567 } 568 } 569 570 /* Check if a response is available */ 571 if (efx_mcdi_poll_response(enp) == B_FALSE) { 572 EFSYS_UNLOCK(enp->en_eslp, state); 573 return (B_FALSE); 574 } 575 576 /* Read the response header */ 577 efx_mcdi_read_response_header(enp, emrp); 578 579 /* Request complete */ 580 emip->emi_pending_req = NULL; 581 582 /* Ensure stale MCDI requests fail after an MC reboot. */ 583 emip->emi_new_epoch = B_FALSE; 584 585 EFSYS_UNLOCK(enp->en_eslp, state); 586 587 if ((rc = emrp->emr_rc) != 0) 588 goto fail2; 589 590 efx_mcdi_finish_response(enp, emrp); 591 return (B_TRUE); 592 593 fail2: 594 if (!emrp->emr_quiet) 595 EFSYS_PROBE(fail2); 596 fail1: 597 if (!emrp->emr_quiet) 598 EFSYS_PROBE1(fail1, efx_rc_t, rc); 599 600 return (B_TRUE); 601 } 602 603 __checkReturn boolean_t 604 efx_mcdi_request_abort( 605 __in efx_nic_t *enp) 606 { 607 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 608 efx_mcdi_req_t *emrp; 609 boolean_t aborted; 610 efsys_lock_state_t state; 611 612 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 613 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 614 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 615 616 /* 617 * efx_mcdi_ev_* may have already completed this event, and be 618 * spinning/blocked on the upper layer lock. So it *is* legitimate 619 * to for emi_pending_req to be NULL. If there is a pending event 620 * completed request, then provide a "credit" to allow 621 * efx_mcdi_ev_cpl() to accept a single spurious completion. 622 */ 623 EFSYS_LOCK(enp->en_eslp, state); 624 emrp = emip->emi_pending_req; 625 aborted = (emrp != NULL); 626 if (aborted) { 627 emip->emi_pending_req = NULL; 628 629 /* Error the request */ 630 emrp->emr_out_length_used = 0; 631 emrp->emr_rc = ETIMEDOUT; 632 633 /* Provide a credit for seqno/emr_pending_req mismatches */ 634 if (emip->emi_ev_cpl) 635 ++emip->emi_aborted; 636 637 /* 638 * The upper layer has called us, so we don't 639 * need to complete the request. 640 */ 641 } 642 EFSYS_UNLOCK(enp->en_eslp, state); 643 644 return (aborted); 645 } 646 647 void 648 efx_mcdi_get_timeout( 649 __in efx_nic_t *enp, 650 __in efx_mcdi_req_t *emrp, 651 __out uint32_t *timeoutp) 652 { 653 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 654 655 emcop->emco_get_timeout(enp, emrp, timeoutp); 656 } 657 658 __checkReturn efx_rc_t 659 efx_mcdi_request_errcode( 660 __in unsigned int err) 661 { 662 663 switch (err) { 664 /* MCDI v1 */ 665 case MC_CMD_ERR_EPERM: 666 return (EACCES); 667 case MC_CMD_ERR_ENOENT: 668 return (ENOENT); 669 case MC_CMD_ERR_EINTR: 670 return (EINTR); 671 case MC_CMD_ERR_EACCES: 672 return (EACCES); 673 case MC_CMD_ERR_EBUSY: 674 return (EBUSY); 675 case MC_CMD_ERR_EINVAL: 676 return (EINVAL); 677 case MC_CMD_ERR_EDEADLK: 678 return (EDEADLK); 679 case MC_CMD_ERR_ENOSYS: 680 return (ENOTSUP); 681 case MC_CMD_ERR_ETIME: 682 return (ETIMEDOUT); 683 case MC_CMD_ERR_ENOTSUP: 684 return (ENOTSUP); 685 case MC_CMD_ERR_EALREADY: 686 return (EALREADY); 687 688 /* MCDI v2 */ 689 case MC_CMD_ERR_EEXIST: 690 return (EEXIST); 691 #ifdef MC_CMD_ERR_EAGAIN 692 case MC_CMD_ERR_EAGAIN: 693 return (EAGAIN); 694 #endif 695 #ifdef MC_CMD_ERR_ENOSPC 696 case MC_CMD_ERR_ENOSPC: 697 return (ENOSPC); 698 #endif 699 case MC_CMD_ERR_ERANGE: 700 return (ERANGE); 701 702 case MC_CMD_ERR_ALLOC_FAIL: 703 return (ENOMEM); 704 case MC_CMD_ERR_NO_VADAPTOR: 705 return (ENOENT); 706 case MC_CMD_ERR_NO_EVB_PORT: 707 return (ENOENT); 708 case MC_CMD_ERR_NO_VSWITCH: 709 return (ENODEV); 710 case MC_CMD_ERR_VLAN_LIMIT: 711 return (EINVAL); 712 case MC_CMD_ERR_BAD_PCI_FUNC: 713 return (ENODEV); 714 case MC_CMD_ERR_BAD_VLAN_MODE: 715 return (EINVAL); 716 case MC_CMD_ERR_BAD_VSWITCH_TYPE: 717 return (EINVAL); 718 case MC_CMD_ERR_BAD_VPORT_TYPE: 719 return (EINVAL); 720 case MC_CMD_ERR_MAC_EXIST: 721 return (EEXIST); 722 723 case MC_CMD_ERR_PROXY_PENDING: 724 return (EAGAIN); 725 726 default: 727 EFSYS_PROBE1(mc_pcol_error, int, err); 728 return (EIO); 729 } 730 } 731 732 void 733 efx_mcdi_raise_exception( 734 __in efx_nic_t *enp, 735 __in_opt efx_mcdi_req_t *emrp, 736 __in int rc) 737 { 738 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 739 efx_mcdi_exception_t exception; 740 741 /* Reboot or Assertion failure only */ 742 EFSYS_ASSERT(rc == EIO || rc == EINTR); 743 744 /* 745 * If MC_CMD_REBOOT causes a reboot (dependent on parameters), 746 * then the EIO is not worthy of an exception. 747 */ 748 if (emrp != NULL && emrp->emr_cmd == MC_CMD_REBOOT && rc == EIO) 749 return; 750 751 exception = (rc == EIO) 752 ? EFX_MCDI_EXCEPTION_MC_REBOOT 753 : EFX_MCDI_EXCEPTION_MC_BADASSERT; 754 755 emtp->emt_exception(emtp->emt_context, exception); 756 } 757 758 void 759 efx_mcdi_execute( 760 __in efx_nic_t *enp, 761 __inout efx_mcdi_req_t *emrp) 762 { 763 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 764 765 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 766 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 767 768 emrp->emr_quiet = B_FALSE; 769 emtp->emt_execute(emtp->emt_context, emrp); 770 } 771 772 void 773 efx_mcdi_execute_quiet( 774 __in efx_nic_t *enp, 775 __inout efx_mcdi_req_t *emrp) 776 { 777 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 778 779 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 780 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 781 782 emrp->emr_quiet = B_TRUE; 783 emtp->emt_execute(emtp->emt_context, emrp); 784 } 785 786 void 787 efx_mcdi_ev_cpl( 788 __in efx_nic_t *enp, 789 __in unsigned int seq, 790 __in unsigned int outlen, 791 __in int errcode) 792 { 793 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 794 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 795 efx_mcdi_req_t *emrp; 796 efsys_lock_state_t state; 797 798 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_MCDI); 799 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 800 801 /* 802 * Serialise against efx_mcdi_request_poll()/efx_mcdi_request_start() 803 * when we're completing an aborted request. 804 */ 805 EFSYS_LOCK(enp->en_eslp, state); 806 if (emip->emi_pending_req == NULL || !emip->emi_ev_cpl || 807 (seq != ((emip->emi_seq - 1) & EFX_MASK32(MCDI_HEADER_SEQ)))) { 808 EFSYS_ASSERT(emip->emi_aborted > 0); 809 if (emip->emi_aborted > 0) 810 --emip->emi_aborted; 811 EFSYS_UNLOCK(enp->en_eslp, state); 812 return; 813 } 814 815 emrp = emip->emi_pending_req; 816 emip->emi_pending_req = NULL; 817 EFSYS_UNLOCK(enp->en_eslp, state); 818 819 if (emip->emi_max_version >= 2) { 820 /* MCDIv2 response details do not fit into an event. */ 821 efx_mcdi_read_response_header(enp, emrp); 822 } else { 823 if (errcode != 0) { 824 if (!emrp->emr_quiet) { 825 EFSYS_PROBE2(mcdi_err, int, emrp->emr_cmd, 826 int, errcode); 827 } 828 emrp->emr_out_length_used = 0; 829 emrp->emr_rc = efx_mcdi_request_errcode(errcode); 830 } else { 831 emrp->emr_out_length_used = outlen; 832 emrp->emr_rc = 0; 833 } 834 } 835 if (emrp->emr_rc == 0) 836 efx_mcdi_finish_response(enp, emrp); 837 838 emtp->emt_ev_cpl(emtp->emt_context); 839 } 840 841 #if EFSYS_OPT_MCDI_PROXY_AUTH 842 843 __checkReturn efx_rc_t 844 efx_mcdi_get_proxy_handle( 845 __in efx_nic_t *enp, 846 __in efx_mcdi_req_t *emrp, 847 __out uint32_t *handlep) 848 { 849 efx_rc_t rc; 850 851 _NOTE(ARGUNUSED(enp)) 852 853 /* 854 * Return proxy handle from MCDI request that returned with error 855 * MC_MCD_ERR_PROXY_PENDING. This handle is used to wait for a matching 856 * PROXY_RESPONSE event. 857 */ 858 if ((emrp == NULL) || (handlep == NULL)) { 859 rc = EINVAL; 860 goto fail1; 861 } 862 if ((emrp->emr_rc != 0) && 863 (emrp->emr_err_code == MC_CMD_ERR_PROXY_PENDING)) { 864 *handlep = emrp->emr_proxy_handle; 865 rc = 0; 866 } else { 867 *handlep = 0; 868 rc = ENOENT; 869 } 870 return (rc); 871 872 fail1: 873 EFSYS_PROBE1(fail1, efx_rc_t, rc); 874 return (rc); 875 } 876 877 void 878 efx_mcdi_ev_proxy_response( 879 __in efx_nic_t *enp, 880 __in unsigned int handle, 881 __in unsigned int status) 882 { 883 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 884 efx_rc_t rc; 885 886 /* 887 * Handle results of an authorization request for a privileged MCDI 888 * command. If authorization was granted then we must re-issue the 889 * original MCDI request. If authorization failed or timed out, 890 * then the original MCDI request should be completed with the 891 * result code from this event. 892 */ 893 rc = (status == 0) ? 0 : efx_mcdi_request_errcode(status); 894 895 emtp->emt_ev_proxy_response(emtp->emt_context, handle, rc); 896 } 897 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 898 899 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 900 void 901 efx_mcdi_ev_proxy_request( 902 __in efx_nic_t *enp, 903 __in unsigned int index) 904 { 905 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 906 907 if (emtp->emt_ev_proxy_request != NULL) 908 emtp->emt_ev_proxy_request(emtp->emt_context, index); 909 } 910 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 911 void 912 efx_mcdi_ev_death( 913 __in efx_nic_t *enp, 914 __in int rc) 915 { 916 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 917 const efx_mcdi_transport_t *emtp = enp->en_mcdi.em_emtp; 918 efx_mcdi_req_t *emrp = NULL; 919 boolean_t ev_cpl; 920 efsys_lock_state_t state; 921 922 /* 923 * The MCDI request (if there is one) has been terminated, either 924 * by a BADASSERT or REBOOT event. 925 * 926 * If there is an outstanding event-completed MCDI operation, then we 927 * will never receive the completion event (because both MCDI 928 * completions and BADASSERT events are sent to the same evq). So 929 * complete this MCDI op. 930 * 931 * This function might run in parallel with efx_mcdi_request_poll() 932 * for poll completed mcdi requests, and also with 933 * efx_mcdi_request_start() for post-watchdog completions. 934 */ 935 EFSYS_LOCK(enp->en_eslp, state); 936 emrp = emip->emi_pending_req; 937 ev_cpl = emip->emi_ev_cpl; 938 if (emrp != NULL && emip->emi_ev_cpl) { 939 emip->emi_pending_req = NULL; 940 941 emrp->emr_out_length_used = 0; 942 emrp->emr_rc = rc; 943 ++emip->emi_aborted; 944 } 945 946 /* 947 * Since we're running in parallel with a request, consume the 948 * status word before dropping the lock. 949 */ 950 if (rc == EIO || rc == EINTR) { 951 EFSYS_SPIN(EFX_MCDI_STATUS_SLEEP_US); 952 (void) efx_mcdi_poll_reboot(enp); 953 emip->emi_new_epoch = B_TRUE; 954 } 955 956 EFSYS_UNLOCK(enp->en_eslp, state); 957 958 efx_mcdi_raise_exception(enp, emrp, rc); 959 960 if (emrp != NULL && ev_cpl) 961 emtp->emt_ev_cpl(emtp->emt_context); 962 } 963 964 __checkReturn efx_rc_t 965 efx_mcdi_get_version( 966 __in efx_nic_t *enp, 967 __out efx_mcdi_version_t *verp) 968 { 969 EFX_MCDI_DECLARE_BUF(payload, 970 MC_CMD_GET_VERSION_IN_LEN, 971 MC_CMD_GET_VERSION_OUT_LEN); 972 size_t min_resp_len_required; 973 efx_mcdi_req_t req; 974 efx_rc_t rc; 975 976 EFX_STATIC_ASSERT(sizeof (verp->emv_version) == 977 MC_CMD_GET_VERSION_OUT_VERSION_LEN); 978 EFX_STATIC_ASSERT(sizeof (verp->emv_firmware) == 979 MC_CMD_GET_VERSION_OUT_FIRMWARE_LEN); 980 981 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 982 983 req.emr_cmd = MC_CMD_GET_VERSION; 984 req.emr_in_buf = payload; 985 req.emr_out_buf = payload; 986 req.emr_in_length = MC_CMD_GET_VERSION_IN_LEN; 987 req.emr_out_length = MC_CMD_GET_VERSION_OUT_LEN; 988 989 min_resp_len_required = MC_CMD_GET_VERSION_V0_OUT_LEN; 990 991 efx_mcdi_execute(enp, &req); 992 993 if (req.emr_rc != 0) { 994 rc = req.emr_rc; 995 goto fail1; 996 } 997 998 if (req.emr_out_length_used < min_resp_len_required) { 999 rc = EMSGSIZE; 1000 goto fail2; 1001 } 1002 1003 memset(verp, 0, sizeof (*verp)); 1004 1005 if (req.emr_out_length_used > min_resp_len_required) { 1006 efx_word_t *ver_words; 1007 1008 if (req.emr_out_length_used < MC_CMD_GET_VERSION_OUT_LEN) { 1009 rc = EMSGSIZE; 1010 goto fail3; 1011 } 1012 1013 ver_words = MCDI_OUT2(req, efx_word_t, GET_VERSION_OUT_VERSION); 1014 1015 verp->emv_version[0] = EFX_WORD_FIELD(ver_words[0], EFX_WORD_0); 1016 verp->emv_version[1] = EFX_WORD_FIELD(ver_words[1], EFX_WORD_0); 1017 verp->emv_version[2] = EFX_WORD_FIELD(ver_words[2], EFX_WORD_0); 1018 verp->emv_version[3] = EFX_WORD_FIELD(ver_words[3], EFX_WORD_0); 1019 } 1020 1021 verp->emv_firmware = MCDI_OUT_DWORD(req, GET_VERSION_OUT_FIRMWARE); 1022 1023 return (0); 1024 1025 fail3: 1026 EFSYS_PROBE(fail3); 1027 fail2: 1028 EFSYS_PROBE(fail2); 1029 fail1: 1030 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1031 1032 return (rc); 1033 } 1034 1035 static __checkReturn efx_rc_t 1036 efx_mcdi_get_boot_status( 1037 __in efx_nic_t *enp, 1038 __out efx_mcdi_boot_t *statusp) 1039 { 1040 EFX_MCDI_DECLARE_BUF(payload, 1041 MC_CMD_GET_BOOT_STATUS_IN_LEN, 1042 MC_CMD_GET_BOOT_STATUS_OUT_LEN); 1043 efx_mcdi_req_t req; 1044 efx_rc_t rc; 1045 1046 EFSYS_ASSERT3U(enp->en_features, &, EFX_FEATURE_MCDI); 1047 1048 req.emr_cmd = MC_CMD_GET_BOOT_STATUS; 1049 req.emr_in_buf = payload; 1050 req.emr_in_length = MC_CMD_GET_BOOT_STATUS_IN_LEN; 1051 req.emr_out_buf = payload; 1052 req.emr_out_length = MC_CMD_GET_BOOT_STATUS_OUT_LEN; 1053 1054 efx_mcdi_execute_quiet(enp, &req); 1055 1056 if (req.emr_rc != 0) { 1057 rc = req.emr_rc; 1058 goto fail1; 1059 } 1060 1061 if (req.emr_out_length_used < MC_CMD_GET_BOOT_STATUS_OUT_LEN) { 1062 rc = EMSGSIZE; 1063 goto fail2; 1064 } 1065 1066 if (MCDI_OUT_DWORD_FIELD(req, GET_BOOT_STATUS_OUT_FLAGS, 1067 GET_BOOT_STATUS_OUT_FLAGS_PRIMARY)) 1068 *statusp = EFX_MCDI_BOOT_PRIMARY; 1069 else 1070 *statusp = EFX_MCDI_BOOT_SECONDARY; 1071 1072 return (0); 1073 1074 fail2: 1075 EFSYS_PROBE(fail2); 1076 fail1: 1077 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1078 1079 return (rc); 1080 } 1081 1082 __checkReturn efx_rc_t 1083 efx_mcdi_version( 1084 __in efx_nic_t *enp, 1085 __out_ecount_opt(4) uint16_t versionp[4], 1086 __out_opt uint32_t *buildp, 1087 __out_opt efx_mcdi_boot_t *statusp) 1088 { 1089 efx_mcdi_version_t ver; 1090 efx_mcdi_boot_t status; 1091 efx_rc_t rc; 1092 1093 rc = efx_mcdi_get_version(enp, &ver); 1094 if (rc != 0) 1095 goto fail1; 1096 1097 /* The bootrom doesn't understand BOOT_STATUS */ 1098 if (MC_FW_VERSION_IS_BOOTLOADER(ver.emv_firmware)) { 1099 status = EFX_MCDI_BOOT_ROM; 1100 goto out; 1101 } 1102 1103 rc = efx_mcdi_get_boot_status(enp, &status); 1104 if (rc == EACCES) { 1105 /* Unprivileged functions cannot access BOOT_STATUS */ 1106 status = EFX_MCDI_BOOT_PRIMARY; 1107 memset(ver.emv_version, 0, sizeof (ver.emv_version)); 1108 ver.emv_firmware = 0; 1109 } else if (rc != 0) { 1110 goto fail2; 1111 } 1112 1113 out: 1114 if (versionp != NULL) 1115 memcpy(versionp, ver.emv_version, sizeof (ver.emv_version)); 1116 if (buildp != NULL) 1117 *buildp = ver.emv_firmware; 1118 if (statusp != NULL) 1119 *statusp = status; 1120 1121 return (0); 1122 1123 fail2: 1124 EFSYS_PROBE(fail2); 1125 fail1: 1126 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1127 1128 return (rc); 1129 } 1130 1131 __checkReturn efx_rc_t 1132 efx_mcdi_get_capabilities( 1133 __in efx_nic_t *enp, 1134 __out_opt uint32_t *flagsp, 1135 __out_opt uint16_t *rx_dpcpu_fw_idp, 1136 __out_opt uint16_t *tx_dpcpu_fw_idp, 1137 __out_opt uint32_t *flags2p, 1138 __out_opt uint32_t *tso2ncp) 1139 { 1140 efx_mcdi_req_t req; 1141 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_CAPABILITIES_IN_LEN, 1142 MC_CMD_GET_CAPABILITIES_V2_OUT_LEN); 1143 boolean_t v2_capable; 1144 efx_rc_t rc; 1145 1146 req.emr_cmd = MC_CMD_GET_CAPABILITIES; 1147 req.emr_in_buf = payload; 1148 req.emr_in_length = MC_CMD_GET_CAPABILITIES_IN_LEN; 1149 req.emr_out_buf = payload; 1150 req.emr_out_length = MC_CMD_GET_CAPABILITIES_V2_OUT_LEN; 1151 1152 efx_mcdi_execute_quiet(enp, &req); 1153 1154 if (req.emr_rc != 0) { 1155 rc = req.emr_rc; 1156 goto fail1; 1157 } 1158 1159 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_OUT_LEN) { 1160 rc = EMSGSIZE; 1161 goto fail2; 1162 } 1163 1164 if (flagsp != NULL) 1165 *flagsp = MCDI_OUT_DWORD(req, GET_CAPABILITIES_OUT_FLAGS1); 1166 1167 if (rx_dpcpu_fw_idp != NULL) 1168 *rx_dpcpu_fw_idp = MCDI_OUT_WORD(req, 1169 GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID); 1170 1171 if (tx_dpcpu_fw_idp != NULL) 1172 *tx_dpcpu_fw_idp = MCDI_OUT_WORD(req, 1173 GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID); 1174 1175 if (req.emr_out_length_used < MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) 1176 v2_capable = B_FALSE; 1177 else 1178 v2_capable = B_TRUE; 1179 1180 if (flags2p != NULL) { 1181 *flags2p = (v2_capable) ? 1182 MCDI_OUT_DWORD(req, GET_CAPABILITIES_V2_OUT_FLAGS2) : 1183 0; 1184 } 1185 1186 if (tso2ncp != NULL) { 1187 *tso2ncp = (v2_capable) ? 1188 MCDI_OUT_WORD(req, 1189 GET_CAPABILITIES_V2_OUT_TX_TSO_V2_N_CONTEXTS) : 1190 0; 1191 } 1192 1193 return (0); 1194 1195 fail2: 1196 EFSYS_PROBE(fail2); 1197 fail1: 1198 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1199 1200 return (rc); 1201 } 1202 1203 static __checkReturn efx_rc_t 1204 efx_mcdi_do_reboot( 1205 __in efx_nic_t *enp, 1206 __in boolean_t after_assertion) 1207 { 1208 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_REBOOT_IN_LEN, 1209 MC_CMD_REBOOT_OUT_LEN); 1210 efx_mcdi_req_t req; 1211 efx_rc_t rc; 1212 1213 /* 1214 * We could require the caller to have caused en_mod_flags=0 to 1215 * call this function. This doesn't help the other port though, 1216 * who's about to get the MC ripped out from underneath them. 1217 * Since they have to cope with the subsequent fallout of MCDI 1218 * failures, we should as well. 1219 */ 1220 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 1221 1222 req.emr_cmd = MC_CMD_REBOOT; 1223 req.emr_in_buf = payload; 1224 req.emr_in_length = MC_CMD_REBOOT_IN_LEN; 1225 req.emr_out_buf = payload; 1226 req.emr_out_length = MC_CMD_REBOOT_OUT_LEN; 1227 1228 MCDI_IN_SET_DWORD(req, REBOOT_IN_FLAGS, 1229 (after_assertion ? MC_CMD_REBOOT_FLAGS_AFTER_ASSERTION : 0)); 1230 1231 efx_mcdi_execute_quiet(enp, &req); 1232 1233 if (req.emr_rc == EACCES) { 1234 /* Unprivileged functions cannot reboot the MC. */ 1235 goto out; 1236 } 1237 1238 /* A successful reboot request returns EIO. */ 1239 if (req.emr_rc != 0 && req.emr_rc != EIO) { 1240 rc = req.emr_rc; 1241 goto fail1; 1242 } 1243 1244 out: 1245 return (0); 1246 1247 fail1: 1248 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1249 1250 return (rc); 1251 } 1252 1253 __checkReturn efx_rc_t 1254 efx_mcdi_reboot( 1255 __in efx_nic_t *enp) 1256 { 1257 return (efx_mcdi_do_reboot(enp, B_FALSE)); 1258 } 1259 1260 __checkReturn efx_rc_t 1261 efx_mcdi_exit_assertion_handler( 1262 __in efx_nic_t *enp) 1263 { 1264 return (efx_mcdi_do_reboot(enp, B_TRUE)); 1265 } 1266 1267 __checkReturn efx_rc_t 1268 efx_mcdi_read_assertion( 1269 __in efx_nic_t *enp) 1270 { 1271 efx_mcdi_req_t req; 1272 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_ASSERTS_IN_LEN, 1273 MC_CMD_GET_ASSERTS_OUT_LEN); 1274 const char *reason; 1275 unsigned int flags; 1276 unsigned int index; 1277 unsigned int ofst; 1278 int retry; 1279 efx_rc_t rc; 1280 1281 /* 1282 * Before we attempt to chat to the MC, we should verify that the MC 1283 * isn't in it's assertion handler, either due to a previous reboot, 1284 * or because we're reinitializing due to an eec_exception(). 1285 * 1286 * Use GET_ASSERTS to read any assertion state that may be present. 1287 * Retry this command twice. Once because a boot-time assertion failure 1288 * might cause the 1st MCDI request to fail. And once again because 1289 * we might race with efx_mcdi_exit_assertion_handler() running on 1290 * partner port(s) on the same NIC. 1291 */ 1292 retry = 2; 1293 do { 1294 (void) memset(payload, 0, sizeof (payload)); 1295 req.emr_cmd = MC_CMD_GET_ASSERTS; 1296 req.emr_in_buf = payload; 1297 req.emr_in_length = MC_CMD_GET_ASSERTS_IN_LEN; 1298 req.emr_out_buf = payload; 1299 req.emr_out_length = MC_CMD_GET_ASSERTS_OUT_LEN; 1300 1301 MCDI_IN_SET_DWORD(req, GET_ASSERTS_IN_CLEAR, 1); 1302 efx_mcdi_execute_quiet(enp, &req); 1303 1304 } while ((req.emr_rc == EINTR || req.emr_rc == EIO) && retry-- > 0); 1305 1306 if (req.emr_rc != 0) { 1307 if (req.emr_rc == EACCES) { 1308 /* Unprivileged functions cannot clear assertions. */ 1309 goto out; 1310 } 1311 rc = req.emr_rc; 1312 goto fail1; 1313 } 1314 1315 if (req.emr_out_length_used < MC_CMD_GET_ASSERTS_OUT_LEN) { 1316 rc = EMSGSIZE; 1317 goto fail2; 1318 } 1319 1320 /* Print out any assertion state recorded */ 1321 flags = MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_GLOBAL_FLAGS); 1322 if (flags == MC_CMD_GET_ASSERTS_FLAGS_NO_FAILS) 1323 return (0); 1324 1325 reason = (flags == MC_CMD_GET_ASSERTS_FLAGS_SYS_FAIL) 1326 ? "system-level assertion" 1327 : (flags == MC_CMD_GET_ASSERTS_FLAGS_THR_FAIL) 1328 ? "thread-level assertion" 1329 : (flags == MC_CMD_GET_ASSERTS_FLAGS_WDOG_FIRED) 1330 ? "watchdog reset" 1331 : (flags == MC_CMD_GET_ASSERTS_FLAGS_ADDR_TRAP) 1332 ? "illegal address trap" 1333 : "unknown assertion"; 1334 EFSYS_PROBE3(mcpu_assertion, 1335 const char *, reason, unsigned int, 1336 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_SAVED_PC_OFFS), 1337 unsigned int, 1338 MCDI_OUT_DWORD(req, GET_ASSERTS_OUT_THREAD_OFFS)); 1339 1340 /* Print out the registers (r1 ... r31) */ 1341 ofst = MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_OFST; 1342 for (index = 1; 1343 index < 1 + MC_CMD_GET_ASSERTS_OUT_GP_REGS_OFFS_NUM; 1344 index++) { 1345 EFSYS_PROBE2(mcpu_register, unsigned int, index, unsigned int, 1346 EFX_DWORD_FIELD(*MCDI_OUT(req, efx_dword_t, ofst), 1347 EFX_DWORD_0)); 1348 ofst += sizeof (efx_dword_t); 1349 } 1350 EFSYS_ASSERT(ofst <= MC_CMD_GET_ASSERTS_OUT_LEN); 1351 1352 out: 1353 return (0); 1354 1355 fail2: 1356 EFSYS_PROBE(fail2); 1357 fail1: 1358 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1359 1360 return (rc); 1361 } 1362 1363 1364 /* 1365 * Internal routines for for specific MCDI requests. 1366 */ 1367 1368 __checkReturn efx_rc_t 1369 efx_mcdi_drv_attach( 1370 __in efx_nic_t *enp, 1371 __in boolean_t attach) 1372 { 1373 efx_mcdi_req_t req; 1374 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRV_ATTACH_IN_V2_LEN, 1375 MC_CMD_DRV_ATTACH_EXT_OUT_LEN); 1376 efx_rc_t rc; 1377 1378 req.emr_cmd = MC_CMD_DRV_ATTACH; 1379 req.emr_in_buf = payload; 1380 if (enp->en_drv_version[0] == '\0') { 1381 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_LEN; 1382 } else { 1383 req.emr_in_length = MC_CMD_DRV_ATTACH_IN_V2_LEN; 1384 } 1385 req.emr_out_buf = payload; 1386 req.emr_out_length = MC_CMD_DRV_ATTACH_EXT_OUT_LEN; 1387 1388 /* 1389 * Typically, client drivers use DONT_CARE for the datapath firmware 1390 * type to ensure that the driver can attach to an unprivileged 1391 * function. The datapath firmware type to use is controlled by the 1392 * 'sfboot' utility. 1393 * If a client driver wishes to attach with a specific datapath firmware 1394 * type, that can be passed in second argument of efx_nic_probe API. One 1395 * such example is the ESXi native driver that attempts attaching with 1396 * FULL_FEATURED datapath firmware type first and fall backs to 1397 * DONT_CARE datapath firmware type if MC_CMD_DRV_ATTACH fails. 1398 */ 1399 MCDI_IN_POPULATE_DWORD_2(req, DRV_ATTACH_IN_NEW_STATE, 1400 DRV_ATTACH_IN_ATTACH, attach ? 1 : 0, 1401 DRV_ATTACH_IN_SUBVARIANT_AWARE, EFSYS_OPT_FW_SUBVARIANT_AWARE); 1402 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_UPDATE, 1); 1403 MCDI_IN_SET_DWORD(req, DRV_ATTACH_IN_FIRMWARE_ID, enp->efv); 1404 1405 if (req.emr_in_length >= MC_CMD_DRV_ATTACH_IN_V2_LEN) { 1406 EFX_STATIC_ASSERT(sizeof (enp->en_drv_version) == 1407 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); 1408 memcpy(MCDI_IN2(req, char, DRV_ATTACH_IN_V2_DRIVER_VERSION), 1409 enp->en_drv_version, 1410 MC_CMD_DRV_ATTACH_IN_V2_DRIVER_VERSION_LEN); 1411 } 1412 1413 efx_mcdi_execute(enp, &req); 1414 1415 if (req.emr_rc != 0) { 1416 rc = req.emr_rc; 1417 goto fail1; 1418 } 1419 1420 if (req.emr_out_length_used < MC_CMD_DRV_ATTACH_OUT_LEN) { 1421 rc = EMSGSIZE; 1422 goto fail2; 1423 } 1424 1425 return (0); 1426 1427 fail2: 1428 EFSYS_PROBE(fail2); 1429 fail1: 1430 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1431 1432 return (rc); 1433 } 1434 1435 __checkReturn efx_rc_t 1436 efx_mcdi_get_board_cfg( 1437 __in efx_nic_t *enp, 1438 __out_opt uint32_t *board_typep, 1439 __out_opt efx_dword_t *capabilitiesp, 1440 __out_ecount_opt(6) uint8_t mac_addrp[6]) 1441 { 1442 efx_mcdi_iface_t *emip = &(enp->en_mcdi.em_emip); 1443 efx_mcdi_req_t req; 1444 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_BOARD_CFG_IN_LEN, 1445 MC_CMD_GET_BOARD_CFG_OUT_LENMIN); 1446 efx_rc_t rc; 1447 1448 req.emr_cmd = MC_CMD_GET_BOARD_CFG; 1449 req.emr_in_buf = payload; 1450 req.emr_in_length = MC_CMD_GET_BOARD_CFG_IN_LEN; 1451 req.emr_out_buf = payload; 1452 req.emr_out_length = MC_CMD_GET_BOARD_CFG_OUT_LENMIN; 1453 1454 efx_mcdi_execute(enp, &req); 1455 1456 if (req.emr_rc != 0) { 1457 rc = req.emr_rc; 1458 goto fail1; 1459 } 1460 1461 if (req.emr_out_length_used < MC_CMD_GET_BOARD_CFG_OUT_LENMIN) { 1462 rc = EMSGSIZE; 1463 goto fail2; 1464 } 1465 1466 if (mac_addrp != NULL) { 1467 uint8_t *addrp; 1468 1469 if (emip->emi_port == 1) { 1470 addrp = MCDI_OUT2(req, uint8_t, 1471 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT0); 1472 } else if (emip->emi_port == 2) { 1473 addrp = MCDI_OUT2(req, uint8_t, 1474 GET_BOARD_CFG_OUT_MAC_ADDR_BASE_PORT1); 1475 } else { 1476 rc = EINVAL; 1477 goto fail3; 1478 } 1479 1480 EFX_MAC_ADDR_COPY(mac_addrp, addrp); 1481 } 1482 1483 if (capabilitiesp != NULL) { 1484 if (emip->emi_port == 1) { 1485 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, 1486 GET_BOARD_CFG_OUT_CAPABILITIES_PORT0); 1487 } else if (emip->emi_port == 2) { 1488 *capabilitiesp = *MCDI_OUT2(req, efx_dword_t, 1489 GET_BOARD_CFG_OUT_CAPABILITIES_PORT1); 1490 } else { 1491 rc = EINVAL; 1492 goto fail4; 1493 } 1494 } 1495 1496 if (board_typep != NULL) { 1497 *board_typep = MCDI_OUT_DWORD(req, 1498 GET_BOARD_CFG_OUT_BOARD_TYPE); 1499 } 1500 1501 return (0); 1502 1503 fail4: 1504 EFSYS_PROBE(fail4); 1505 fail3: 1506 EFSYS_PROBE(fail3); 1507 fail2: 1508 EFSYS_PROBE(fail2); 1509 fail1: 1510 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1511 1512 return (rc); 1513 } 1514 1515 __checkReturn efx_rc_t 1516 efx_mcdi_get_resource_limits( 1517 __in efx_nic_t *enp, 1518 __out_opt uint32_t *nevqp, 1519 __out_opt uint32_t *nrxqp, 1520 __out_opt uint32_t *ntxqp) 1521 { 1522 efx_mcdi_req_t req; 1523 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_RESOURCE_LIMITS_IN_LEN, 1524 MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN); 1525 efx_rc_t rc; 1526 1527 req.emr_cmd = MC_CMD_GET_RESOURCE_LIMITS; 1528 req.emr_in_buf = payload; 1529 req.emr_in_length = MC_CMD_GET_RESOURCE_LIMITS_IN_LEN; 1530 req.emr_out_buf = payload; 1531 req.emr_out_length = MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN; 1532 1533 efx_mcdi_execute(enp, &req); 1534 1535 if (req.emr_rc != 0) { 1536 rc = req.emr_rc; 1537 goto fail1; 1538 } 1539 1540 if (req.emr_out_length_used < MC_CMD_GET_RESOURCE_LIMITS_OUT_LEN) { 1541 rc = EMSGSIZE; 1542 goto fail2; 1543 } 1544 1545 if (nevqp != NULL) 1546 *nevqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_EVQ); 1547 if (nrxqp != NULL) 1548 *nrxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_RXQ); 1549 if (ntxqp != NULL) 1550 *ntxqp = MCDI_OUT_DWORD(req, GET_RESOURCE_LIMITS_OUT_TXQ); 1551 1552 return (0); 1553 1554 fail2: 1555 EFSYS_PROBE(fail2); 1556 fail1: 1557 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1558 1559 return (rc); 1560 } 1561 1562 __checkReturn efx_rc_t 1563 efx_mcdi_get_phy_cfg( 1564 __in efx_nic_t *enp) 1565 { 1566 efx_port_t *epp = &(enp->en_port); 1567 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1568 efx_mcdi_req_t req; 1569 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_CFG_IN_LEN, 1570 MC_CMD_GET_PHY_CFG_OUT_LEN); 1571 #if EFSYS_OPT_NAMES 1572 const char *namep; 1573 size_t namelen; 1574 #endif 1575 uint32_t phy_media_type; 1576 efx_rc_t rc; 1577 1578 req.emr_cmd = MC_CMD_GET_PHY_CFG; 1579 req.emr_in_buf = payload; 1580 req.emr_in_length = MC_CMD_GET_PHY_CFG_IN_LEN; 1581 req.emr_out_buf = payload; 1582 req.emr_out_length = MC_CMD_GET_PHY_CFG_OUT_LEN; 1583 1584 efx_mcdi_execute(enp, &req); 1585 1586 if (req.emr_rc != 0) { 1587 rc = req.emr_rc; 1588 goto fail1; 1589 } 1590 1591 if (req.emr_out_length_used < MC_CMD_GET_PHY_CFG_OUT_LEN) { 1592 rc = EMSGSIZE; 1593 goto fail2; 1594 } 1595 1596 encp->enc_phy_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_TYPE); 1597 #if EFSYS_OPT_NAMES 1598 namep = MCDI_OUT2(req, char, GET_PHY_CFG_OUT_NAME); 1599 namelen = MIN(sizeof (encp->enc_phy_name) - 1, 1600 strnlen(namep, MC_CMD_GET_PHY_CFG_OUT_NAME_LEN)); 1601 (void) memset(encp->enc_phy_name, 0, 1602 sizeof (encp->enc_phy_name)); 1603 memcpy(encp->enc_phy_name, namep, namelen); 1604 #endif /* EFSYS_OPT_NAMES */ 1605 (void) memset(encp->enc_phy_revision, 0, 1606 sizeof (encp->enc_phy_revision)); 1607 memcpy(encp->enc_phy_revision, 1608 MCDI_OUT2(req, char, GET_PHY_CFG_OUT_REVISION), 1609 MIN(sizeof (encp->enc_phy_revision) - 1, 1610 MC_CMD_GET_PHY_CFG_OUT_REVISION_LEN)); 1611 #if EFSYS_OPT_PHY_LED_CONTROL 1612 encp->enc_led_mask = ((1 << EFX_PHY_LED_DEFAULT) | 1613 (1 << EFX_PHY_LED_OFF) | 1614 (1 << EFX_PHY_LED_ON)); 1615 #endif /* EFSYS_OPT_PHY_LED_CONTROL */ 1616 1617 /* Get the media type of the fixed port, if recognised. */ 1618 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XAUI == EFX_PHY_MEDIA_XAUI); 1619 EFX_STATIC_ASSERT(MC_CMD_MEDIA_CX4 == EFX_PHY_MEDIA_CX4); 1620 EFX_STATIC_ASSERT(MC_CMD_MEDIA_KX4 == EFX_PHY_MEDIA_KX4); 1621 EFX_STATIC_ASSERT(MC_CMD_MEDIA_XFP == EFX_PHY_MEDIA_XFP); 1622 EFX_STATIC_ASSERT(MC_CMD_MEDIA_SFP_PLUS == EFX_PHY_MEDIA_SFP_PLUS); 1623 EFX_STATIC_ASSERT(MC_CMD_MEDIA_BASE_T == EFX_PHY_MEDIA_BASE_T); 1624 EFX_STATIC_ASSERT(MC_CMD_MEDIA_QSFP_PLUS == EFX_PHY_MEDIA_QSFP_PLUS); 1625 phy_media_type = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_MEDIA_TYPE); 1626 epp->ep_fixed_port_type = (efx_phy_media_type_t)phy_media_type; 1627 if (epp->ep_fixed_port_type >= EFX_PHY_MEDIA_NTYPES) 1628 epp->ep_fixed_port_type = EFX_PHY_MEDIA_INVALID; 1629 1630 epp->ep_phy_cap_mask = 1631 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_SUPPORTED_CAP); 1632 #if EFSYS_OPT_PHY_FLAGS 1633 encp->enc_phy_flags_mask = MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_FLAGS); 1634 #endif /* EFSYS_OPT_PHY_FLAGS */ 1635 1636 encp->enc_port = (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_PRT); 1637 1638 /* Populate internal state */ 1639 encp->enc_mcdi_mdio_channel = 1640 (uint8_t)MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_CHANNEL); 1641 1642 #if EFSYS_OPT_PHY_STATS 1643 encp->enc_mcdi_phy_stat_mask = 1644 MCDI_OUT_DWORD(req, GET_PHY_CFG_OUT_STATS_MASK); 1645 #endif /* EFSYS_OPT_PHY_STATS */ 1646 1647 #if EFSYS_OPT_BIST 1648 encp->enc_bist_mask = 0; 1649 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1650 GET_PHY_CFG_OUT_BIST_CABLE_SHORT)) 1651 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_SHORT); 1652 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1653 GET_PHY_CFG_OUT_BIST_CABLE_LONG)) 1654 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_CABLE_LONG); 1655 if (MCDI_OUT_DWORD_FIELD(req, GET_PHY_CFG_OUT_FLAGS, 1656 GET_PHY_CFG_OUT_BIST)) 1657 encp->enc_bist_mask |= (1 << EFX_BIST_TYPE_PHY_NORMAL); 1658 #endif /* EFSYS_OPT_BIST */ 1659 1660 return (0); 1661 1662 fail2: 1663 EFSYS_PROBE(fail2); 1664 fail1: 1665 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1666 1667 return (rc); 1668 } 1669 1670 __checkReturn efx_rc_t 1671 efx_mcdi_firmware_update_supported( 1672 __in efx_nic_t *enp, 1673 __out boolean_t *supportedp) 1674 { 1675 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1676 efx_rc_t rc; 1677 1678 if (emcop != NULL) { 1679 if ((rc = emcop->emco_feature_supported(enp, 1680 EFX_MCDI_FEATURE_FW_UPDATE, supportedp)) != 0) 1681 goto fail1; 1682 } else { 1683 /* Earlier devices always supported updates */ 1684 *supportedp = B_TRUE; 1685 } 1686 1687 return (0); 1688 1689 fail1: 1690 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1691 1692 return (rc); 1693 } 1694 1695 __checkReturn efx_rc_t 1696 efx_mcdi_macaddr_change_supported( 1697 __in efx_nic_t *enp, 1698 __out boolean_t *supportedp) 1699 { 1700 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1701 efx_rc_t rc; 1702 1703 if (emcop != NULL) { 1704 if ((rc = emcop->emco_feature_supported(enp, 1705 EFX_MCDI_FEATURE_MACADDR_CHANGE, supportedp)) != 0) 1706 goto fail1; 1707 } else { 1708 /* Earlier devices always supported MAC changes */ 1709 *supportedp = B_TRUE; 1710 } 1711 1712 return (0); 1713 1714 fail1: 1715 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1716 1717 return (rc); 1718 } 1719 1720 __checkReturn efx_rc_t 1721 efx_mcdi_link_control_supported( 1722 __in efx_nic_t *enp, 1723 __out boolean_t *supportedp) 1724 { 1725 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1726 efx_rc_t rc; 1727 1728 if (emcop != NULL) { 1729 if ((rc = emcop->emco_feature_supported(enp, 1730 EFX_MCDI_FEATURE_LINK_CONTROL, supportedp)) != 0) 1731 goto fail1; 1732 } else { 1733 /* Earlier devices always supported link control */ 1734 *supportedp = B_TRUE; 1735 } 1736 1737 return (0); 1738 1739 fail1: 1740 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1741 1742 return (rc); 1743 } 1744 1745 __checkReturn efx_rc_t 1746 efx_mcdi_mac_spoofing_supported( 1747 __in efx_nic_t *enp, 1748 __out boolean_t *supportedp) 1749 { 1750 const efx_mcdi_ops_t *emcop = enp->en_mcdi.em_emcop; 1751 efx_rc_t rc; 1752 1753 if (emcop != NULL) { 1754 if ((rc = emcop->emco_feature_supported(enp, 1755 EFX_MCDI_FEATURE_MAC_SPOOFING, supportedp)) != 0) 1756 goto fail1; 1757 } else { 1758 /* Earlier devices always supported MAC spoofing */ 1759 *supportedp = B_TRUE; 1760 } 1761 1762 return (0); 1763 1764 fail1: 1765 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1766 1767 return (rc); 1768 } 1769 1770 #if EFSYS_OPT_BIST 1771 1772 #if EFX_OPTS_EF10() 1773 /* 1774 * Enter bist offline mode. This is a fw mode which puts the NIC into a state 1775 * where memory BIST tests can be run and not much else can interfere or happen. 1776 * A reboot is required to exit this mode. 1777 */ 1778 __checkReturn efx_rc_t 1779 efx_mcdi_bist_enable_offline( 1780 __in efx_nic_t *enp) 1781 { 1782 efx_mcdi_req_t req; 1783 efx_rc_t rc; 1784 1785 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_IN_LEN == 0); 1786 EFX_STATIC_ASSERT(MC_CMD_ENABLE_OFFLINE_BIST_OUT_LEN == 0); 1787 1788 req.emr_cmd = MC_CMD_ENABLE_OFFLINE_BIST; 1789 req.emr_in_buf = NULL; 1790 req.emr_in_length = 0; 1791 req.emr_out_buf = NULL; 1792 req.emr_out_length = 0; 1793 1794 efx_mcdi_execute(enp, &req); 1795 1796 if (req.emr_rc != 0) { 1797 rc = req.emr_rc; 1798 goto fail1; 1799 } 1800 1801 return (0); 1802 1803 fail1: 1804 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1805 1806 return (rc); 1807 } 1808 #endif /* EFX_OPTS_EF10() */ 1809 1810 __checkReturn efx_rc_t 1811 efx_mcdi_bist_start( 1812 __in efx_nic_t *enp, 1813 __in efx_bist_type_t type) 1814 { 1815 efx_mcdi_req_t req; 1816 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_START_BIST_IN_LEN, 1817 MC_CMD_START_BIST_OUT_LEN); 1818 efx_rc_t rc; 1819 1820 req.emr_cmd = MC_CMD_START_BIST; 1821 req.emr_in_buf = payload; 1822 req.emr_in_length = MC_CMD_START_BIST_IN_LEN; 1823 req.emr_out_buf = payload; 1824 req.emr_out_length = MC_CMD_START_BIST_OUT_LEN; 1825 1826 switch (type) { 1827 case EFX_BIST_TYPE_PHY_NORMAL: 1828 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, MC_CMD_PHY_BIST); 1829 break; 1830 case EFX_BIST_TYPE_PHY_CABLE_SHORT: 1831 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1832 MC_CMD_PHY_BIST_CABLE_SHORT); 1833 break; 1834 case EFX_BIST_TYPE_PHY_CABLE_LONG: 1835 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1836 MC_CMD_PHY_BIST_CABLE_LONG); 1837 break; 1838 case EFX_BIST_TYPE_MC_MEM: 1839 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1840 MC_CMD_MC_MEM_BIST); 1841 break; 1842 case EFX_BIST_TYPE_SAT_MEM: 1843 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1844 MC_CMD_PORT_MEM_BIST); 1845 break; 1846 case EFX_BIST_TYPE_REG: 1847 MCDI_IN_SET_DWORD(req, START_BIST_IN_TYPE, 1848 MC_CMD_REG_BIST); 1849 break; 1850 default: 1851 EFSYS_ASSERT(0); 1852 } 1853 1854 efx_mcdi_execute(enp, &req); 1855 1856 if (req.emr_rc != 0) { 1857 rc = req.emr_rc; 1858 goto fail1; 1859 } 1860 1861 return (0); 1862 1863 fail1: 1864 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1865 1866 return (rc); 1867 } 1868 1869 #endif /* EFSYS_OPT_BIST */ 1870 1871 1872 /* Enable logging of some events (e.g. link state changes) */ 1873 __checkReturn efx_rc_t 1874 efx_mcdi_log_ctrl( 1875 __in efx_nic_t *enp) 1876 { 1877 efx_mcdi_req_t req; 1878 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_LOG_CTRL_IN_LEN, 1879 MC_CMD_LOG_CTRL_OUT_LEN); 1880 efx_rc_t rc; 1881 1882 req.emr_cmd = MC_CMD_LOG_CTRL; 1883 req.emr_in_buf = payload; 1884 req.emr_in_length = MC_CMD_LOG_CTRL_IN_LEN; 1885 req.emr_out_buf = payload; 1886 req.emr_out_length = MC_CMD_LOG_CTRL_OUT_LEN; 1887 1888 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST, 1889 MC_CMD_LOG_CTRL_IN_LOG_DEST_EVQ); 1890 MCDI_IN_SET_DWORD(req, LOG_CTRL_IN_LOG_DEST_EVQ, 0); 1891 1892 efx_mcdi_execute(enp, &req); 1893 1894 if (req.emr_rc != 0) { 1895 rc = req.emr_rc; 1896 goto fail1; 1897 } 1898 1899 return (0); 1900 1901 fail1: 1902 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1903 1904 return (rc); 1905 } 1906 1907 1908 #if EFSYS_OPT_MAC_STATS 1909 1910 __checkReturn efx_rc_t 1911 efx_mcdi_mac_stats( 1912 __in efx_nic_t *enp, 1913 __in uint32_t vport_id, 1914 __in_opt efsys_mem_t *esmp, 1915 __in efx_stats_action_t action, 1916 __in uint16_t period_ms) 1917 { 1918 efx_mcdi_req_t req; 1919 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_MAC_STATS_IN_LEN, 1920 MC_CMD_MAC_STATS_V2_OUT_DMA_LEN); 1921 int clear = (action == EFX_STATS_CLEAR); 1922 int upload = (action == EFX_STATS_UPLOAD); 1923 int enable = (action == EFX_STATS_ENABLE_NOEVENTS); 1924 int events = (action == EFX_STATS_ENABLE_EVENTS); 1925 int disable = (action == EFX_STATS_DISABLE); 1926 efx_rc_t rc; 1927 1928 req.emr_cmd = MC_CMD_MAC_STATS; 1929 req.emr_in_buf = payload; 1930 req.emr_in_length = MC_CMD_MAC_STATS_IN_LEN; 1931 req.emr_out_buf = payload; 1932 req.emr_out_length = MC_CMD_MAC_STATS_V2_OUT_DMA_LEN; 1933 1934 MCDI_IN_POPULATE_DWORD_6(req, MAC_STATS_IN_CMD, 1935 MAC_STATS_IN_DMA, upload, 1936 MAC_STATS_IN_CLEAR, clear, 1937 MAC_STATS_IN_PERIODIC_CHANGE, enable | events | disable, 1938 MAC_STATS_IN_PERIODIC_ENABLE, enable | events, 1939 MAC_STATS_IN_PERIODIC_NOEVENT, !events, 1940 MAC_STATS_IN_PERIOD_MS, (enable | events) ? period_ms : 0); 1941 1942 if (enable || events || upload) { 1943 const efx_nic_cfg_t *encp = &enp->en_nic_cfg; 1944 uint32_t bytes; 1945 1946 /* Periodic stats or stats upload require a DMA buffer */ 1947 if (esmp == NULL) { 1948 rc = EINVAL; 1949 goto fail1; 1950 } 1951 1952 if (encp->enc_mac_stats_nstats < MC_CMD_MAC_NSTATS) { 1953 /* MAC stats count too small for legacy MAC stats */ 1954 rc = ENOSPC; 1955 goto fail2; 1956 } 1957 1958 bytes = encp->enc_mac_stats_nstats * sizeof (efx_qword_t); 1959 1960 if (EFSYS_MEM_SIZE(esmp) < bytes) { 1961 /* DMA buffer too small */ 1962 rc = ENOSPC; 1963 goto fail3; 1964 } 1965 1966 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_LO, 1967 EFSYS_MEM_ADDR(esmp) & 0xffffffff); 1968 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_ADDR_HI, 1969 EFSYS_MEM_ADDR(esmp) >> 32); 1970 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_DMA_LEN, bytes); 1971 } 1972 1973 /* 1974 * NOTE: Do not use EVB_PORT_ID_ASSIGNED when disabling periodic stats, 1975 * as this may fail (and leave periodic DMA enabled) if the 1976 * vadapter has already been deleted. 1977 */ 1978 MCDI_IN_SET_DWORD(req, MAC_STATS_IN_PORT_ID, 1979 (disable ? EVB_PORT_ID_NULL : vport_id)); 1980 1981 efx_mcdi_execute(enp, &req); 1982 1983 if (req.emr_rc != 0) { 1984 /* EF10: Expect ENOENT if no DMA queues are initialised */ 1985 if ((req.emr_rc != ENOENT) || 1986 (enp->en_rx_qcount + enp->en_tx_qcount != 0)) { 1987 rc = req.emr_rc; 1988 goto fail4; 1989 } 1990 } 1991 1992 return (0); 1993 1994 fail4: 1995 EFSYS_PROBE(fail4); 1996 fail3: 1997 EFSYS_PROBE(fail3); 1998 fail2: 1999 EFSYS_PROBE(fail2); 2000 fail1: 2001 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2002 2003 return (rc); 2004 } 2005 2006 __checkReturn efx_rc_t 2007 efx_mcdi_mac_stats_clear( 2008 __in efx_nic_t *enp) 2009 { 2010 efx_rc_t rc; 2011 2012 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, 2013 EFX_STATS_CLEAR, 0)) != 0) 2014 goto fail1; 2015 2016 return (0); 2017 2018 fail1: 2019 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2020 2021 return (rc); 2022 } 2023 2024 __checkReturn efx_rc_t 2025 efx_mcdi_mac_stats_upload( 2026 __in efx_nic_t *enp, 2027 __in efsys_mem_t *esmp) 2028 { 2029 efx_rc_t rc; 2030 2031 /* 2032 * The MC DMAs aggregate statistics for our convenience, so we can 2033 * avoid having to pull the statistics buffer into the cache to 2034 * maintain cumulative statistics. 2035 */ 2036 if ((rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2037 EFX_STATS_UPLOAD, 0)) != 0) 2038 goto fail1; 2039 2040 return (0); 2041 2042 fail1: 2043 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2044 2045 return (rc); 2046 } 2047 2048 __checkReturn efx_rc_t 2049 efx_mcdi_mac_stats_periodic( 2050 __in efx_nic_t *enp, 2051 __in efsys_mem_t *esmp, 2052 __in uint16_t period_ms, 2053 __in boolean_t events) 2054 { 2055 efx_rc_t rc; 2056 2057 /* 2058 * The MC DMAs aggregate statistics for our convenience, so we can 2059 * avoid having to pull the statistics buffer into the cache to 2060 * maintain cumulative statistics. 2061 * Huntington uses a fixed 1sec period. 2062 * Medford uses a fixed 1sec period before v6.2.1.1033 firmware. 2063 */ 2064 if (period_ms == 0) 2065 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, NULL, 2066 EFX_STATS_DISABLE, 0); 2067 else if (events) 2068 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2069 EFX_STATS_ENABLE_EVENTS, period_ms); 2070 else 2071 rc = efx_mcdi_mac_stats(enp, enp->en_vport_id, esmp, 2072 EFX_STATS_ENABLE_NOEVENTS, period_ms); 2073 2074 if (rc != 0) 2075 goto fail1; 2076 2077 return (0); 2078 2079 fail1: 2080 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2081 2082 return (rc); 2083 } 2084 2085 #endif /* EFSYS_OPT_MAC_STATS */ 2086 2087 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2088 2089 /* 2090 * This function returns the pf and vf number of a function. If it is a pf the 2091 * vf number is 0xffff. The vf number is the index of the vf on that 2092 * function. So if you have 3 vfs on pf 0 the 3 vfs will return (pf=0,vf=0), 2093 * (pf=0,vf=1), (pf=0,vf=2) aand the pf will return (pf=0, vf=0xffff). 2094 */ 2095 __checkReturn efx_rc_t 2096 efx_mcdi_get_function_info( 2097 __in efx_nic_t *enp, 2098 __out uint32_t *pfp, 2099 __out_opt uint32_t *vfp) 2100 { 2101 efx_mcdi_req_t req; 2102 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_FUNCTION_INFO_IN_LEN, 2103 MC_CMD_GET_FUNCTION_INFO_OUT_LEN); 2104 efx_rc_t rc; 2105 2106 req.emr_cmd = MC_CMD_GET_FUNCTION_INFO; 2107 req.emr_in_buf = payload; 2108 req.emr_in_length = MC_CMD_GET_FUNCTION_INFO_IN_LEN; 2109 req.emr_out_buf = payload; 2110 req.emr_out_length = MC_CMD_GET_FUNCTION_INFO_OUT_LEN; 2111 2112 efx_mcdi_execute(enp, &req); 2113 2114 if (req.emr_rc != 0) { 2115 rc = req.emr_rc; 2116 goto fail1; 2117 } 2118 2119 if (req.emr_out_length_used < MC_CMD_GET_FUNCTION_INFO_OUT_LEN) { 2120 rc = EMSGSIZE; 2121 goto fail2; 2122 } 2123 2124 *pfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_PF); 2125 if (vfp != NULL) 2126 *vfp = MCDI_OUT_DWORD(req, GET_FUNCTION_INFO_OUT_VF); 2127 2128 return (0); 2129 2130 fail2: 2131 EFSYS_PROBE(fail2); 2132 fail1: 2133 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2134 2135 return (rc); 2136 } 2137 2138 __checkReturn efx_rc_t 2139 efx_mcdi_privilege_mask( 2140 __in efx_nic_t *enp, 2141 __in uint32_t pf, 2142 __in uint32_t vf, 2143 __out uint32_t *maskp) 2144 { 2145 efx_mcdi_req_t req; 2146 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_PRIVILEGE_MASK_IN_LEN, 2147 MC_CMD_PRIVILEGE_MASK_OUT_LEN); 2148 efx_rc_t rc; 2149 2150 req.emr_cmd = MC_CMD_PRIVILEGE_MASK; 2151 req.emr_in_buf = payload; 2152 req.emr_in_length = MC_CMD_PRIVILEGE_MASK_IN_LEN; 2153 req.emr_out_buf = payload; 2154 req.emr_out_length = MC_CMD_PRIVILEGE_MASK_OUT_LEN; 2155 2156 MCDI_IN_POPULATE_DWORD_2(req, PRIVILEGE_MASK_IN_FUNCTION, 2157 PRIVILEGE_MASK_IN_FUNCTION_PF, pf, 2158 PRIVILEGE_MASK_IN_FUNCTION_VF, vf); 2159 2160 efx_mcdi_execute(enp, &req); 2161 2162 if (req.emr_rc != 0) { 2163 rc = req.emr_rc; 2164 goto fail1; 2165 } 2166 2167 if (req.emr_out_length_used < MC_CMD_PRIVILEGE_MASK_OUT_LEN) { 2168 rc = EMSGSIZE; 2169 goto fail2; 2170 } 2171 2172 *maskp = MCDI_OUT_DWORD(req, PRIVILEGE_MASK_OUT_OLD_MASK); 2173 2174 return (0); 2175 2176 fail2: 2177 EFSYS_PROBE(fail2); 2178 fail1: 2179 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2180 2181 return (rc); 2182 } 2183 2184 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 2185 2186 __checkReturn efx_rc_t 2187 efx_mcdi_set_workaround( 2188 __in efx_nic_t *enp, 2189 __in uint32_t type, 2190 __in boolean_t enabled, 2191 __out_opt uint32_t *flagsp) 2192 { 2193 efx_mcdi_req_t req; 2194 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_WORKAROUND_IN_LEN, 2195 MC_CMD_WORKAROUND_EXT_OUT_LEN); 2196 efx_rc_t rc; 2197 2198 req.emr_cmd = MC_CMD_WORKAROUND; 2199 req.emr_in_buf = payload; 2200 req.emr_in_length = MC_CMD_WORKAROUND_IN_LEN; 2201 req.emr_out_buf = payload; 2202 req.emr_out_length = MC_CMD_WORKAROUND_OUT_LEN; 2203 2204 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_TYPE, type); 2205 MCDI_IN_SET_DWORD(req, WORKAROUND_IN_ENABLED, enabled ? 1 : 0); 2206 2207 efx_mcdi_execute_quiet(enp, &req); 2208 2209 if (req.emr_rc != 0) { 2210 rc = req.emr_rc; 2211 goto fail1; 2212 } 2213 2214 if (flagsp != NULL) { 2215 if (req.emr_out_length_used >= MC_CMD_WORKAROUND_EXT_OUT_LEN) 2216 *flagsp = MCDI_OUT_DWORD(req, WORKAROUND_EXT_OUT_FLAGS); 2217 else 2218 *flagsp = 0; 2219 } 2220 2221 return (0); 2222 2223 fail1: 2224 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2225 2226 return (rc); 2227 } 2228 2229 2230 __checkReturn efx_rc_t 2231 efx_mcdi_get_workarounds( 2232 __in efx_nic_t *enp, 2233 __out_opt uint32_t *implementedp, 2234 __out_opt uint32_t *enabledp) 2235 { 2236 efx_mcdi_req_t req; 2237 EFX_MCDI_DECLARE_BUF(payload, 0, MC_CMD_GET_WORKAROUNDS_OUT_LEN); 2238 efx_rc_t rc; 2239 2240 req.emr_cmd = MC_CMD_GET_WORKAROUNDS; 2241 req.emr_in_buf = NULL; 2242 req.emr_in_length = 0; 2243 req.emr_out_buf = payload; 2244 req.emr_out_length = MC_CMD_GET_WORKAROUNDS_OUT_LEN; 2245 2246 efx_mcdi_execute(enp, &req); 2247 2248 if (req.emr_rc != 0) { 2249 rc = req.emr_rc; 2250 goto fail1; 2251 } 2252 2253 if (implementedp != NULL) { 2254 *implementedp = 2255 MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_IMPLEMENTED); 2256 } 2257 2258 if (enabledp != NULL) { 2259 *enabledp = MCDI_OUT_DWORD(req, GET_WORKAROUNDS_OUT_ENABLED); 2260 } 2261 2262 return (0); 2263 2264 fail1: 2265 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2266 2267 return (rc); 2268 } 2269 2270 /* 2271 * Size of media information page in accordance with SFF-8472 and SFF-8436. 2272 * It is used in MCDI interface as well. 2273 */ 2274 #define EFX_PHY_MEDIA_INFO_PAGE_SIZE 0x80 2275 2276 /* 2277 * Transceiver identifiers from SFF-8024 Table 4-1. 2278 */ 2279 #define EFX_SFF_TRANSCEIVER_ID_SFP 0x03 /* SFP/SFP+/SFP28 */ 2280 #define EFX_SFF_TRANSCEIVER_ID_QSFP 0x0c /* QSFP */ 2281 #define EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS 0x0d /* QSFP+ or later */ 2282 #define EFX_SFF_TRANSCEIVER_ID_QSFP28 0x11 /* QSFP28 or later */ 2283 2284 static __checkReturn efx_rc_t 2285 efx_mcdi_get_phy_media_info( 2286 __in efx_nic_t *enp, 2287 __in uint32_t mcdi_page, 2288 __in uint8_t offset, 2289 __in uint8_t len, 2290 __out_bcount(len) uint8_t *data) 2291 { 2292 efx_mcdi_req_t req; 2293 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN, 2294 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN( 2295 EFX_PHY_MEDIA_INFO_PAGE_SIZE)); 2296 efx_rc_t rc; 2297 2298 EFSYS_ASSERT((uint32_t)offset + len <= EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2299 2300 req.emr_cmd = MC_CMD_GET_PHY_MEDIA_INFO; 2301 req.emr_in_buf = payload; 2302 req.emr_in_length = MC_CMD_GET_PHY_MEDIA_INFO_IN_LEN; 2303 req.emr_out_buf = payload; 2304 req.emr_out_length = 2305 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2306 2307 MCDI_IN_SET_DWORD(req, GET_PHY_MEDIA_INFO_IN_PAGE, mcdi_page); 2308 2309 efx_mcdi_execute(enp, &req); 2310 2311 if (req.emr_rc != 0) { 2312 rc = req.emr_rc; 2313 goto fail1; 2314 } 2315 2316 if (req.emr_out_length_used != 2317 MC_CMD_GET_PHY_MEDIA_INFO_OUT_LEN(EFX_PHY_MEDIA_INFO_PAGE_SIZE)) { 2318 rc = EMSGSIZE; 2319 goto fail2; 2320 } 2321 2322 if (MCDI_OUT_DWORD(req, GET_PHY_MEDIA_INFO_OUT_DATALEN) != 2323 EFX_PHY_MEDIA_INFO_PAGE_SIZE) { 2324 rc = EIO; 2325 goto fail3; 2326 } 2327 2328 memcpy(data, 2329 MCDI_OUT2(req, uint8_t, GET_PHY_MEDIA_INFO_OUT_DATA) + offset, 2330 len); 2331 2332 return (0); 2333 2334 fail3: 2335 EFSYS_PROBE(fail3); 2336 fail2: 2337 EFSYS_PROBE(fail2); 2338 fail1: 2339 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2340 2341 return (rc); 2342 } 2343 2344 __checkReturn efx_rc_t 2345 efx_mcdi_phy_module_get_info( 2346 __in efx_nic_t *enp, 2347 __in uint8_t dev_addr, 2348 __in size_t offset, 2349 __in size_t len, 2350 __out_bcount(len) uint8_t *data) 2351 { 2352 efx_port_t *epp = &(enp->en_port); 2353 efx_rc_t rc; 2354 uint32_t mcdi_lower_page; 2355 uint32_t mcdi_upper_page; 2356 uint8_t id; 2357 2358 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_PROBE); 2359 2360 /* 2361 * Map device address to MC_CMD_GET_PHY_MEDIA_INFO pages. 2362 * Offset plus length interface allows to access page 0 only. 2363 * I.e. non-zero upper pages are not accessible. 2364 * See SFF-8472 section 4 Memory Organization and SFF-8436 section 7.6 2365 * QSFP+ Memory Map for details on how information is structured 2366 * and accessible. 2367 */ 2368 switch (epp->ep_fixed_port_type) { 2369 case EFX_PHY_MEDIA_SFP_PLUS: 2370 case EFX_PHY_MEDIA_QSFP_PLUS: 2371 /* Port type supports modules */ 2372 break; 2373 default: 2374 rc = ENOTSUP; 2375 goto fail1; 2376 } 2377 2378 /* 2379 * For all supported port types, MCDI page 0 offset 0 holds the 2380 * transceiver identifier. Probe to determine the data layout. 2381 * Definitions from SFF-8024 Table 4-1. 2382 */ 2383 rc = efx_mcdi_get_phy_media_info(enp, 2384 0, 0, sizeof(id), &id); 2385 if (rc != 0) 2386 goto fail2; 2387 2388 switch (id) { 2389 case EFX_SFF_TRANSCEIVER_ID_SFP: 2390 /* 2391 * In accordance with SFF-8472 Diagnostic Monitoring 2392 * Interface for Optical Transceivers section 4 Memory 2393 * Organization two 2-wire addresses are defined. 2394 */ 2395 switch (dev_addr) { 2396 /* Base information */ 2397 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_BASE: 2398 /* 2399 * MCDI page 0 should be used to access lower 2400 * page 0 (0x00 - 0x7f) at the device address 0xA0. 2401 */ 2402 mcdi_lower_page = 0; 2403 /* 2404 * MCDI page 1 should be used to access upper 2405 * page 0 (0x80 - 0xff) at the device address 0xA0. 2406 */ 2407 mcdi_upper_page = 1; 2408 break; 2409 /* Diagnostics */ 2410 case EFX_PHY_MEDIA_INFO_DEV_ADDR_SFP_DDM: 2411 /* 2412 * MCDI page 2 should be used to access lower 2413 * page 0 (0x00 - 0x7f) at the device address 0xA2. 2414 */ 2415 mcdi_lower_page = 2; 2416 /* 2417 * MCDI page 3 should be used to access upper 2418 * page 0 (0x80 - 0xff) at the device address 0xA2. 2419 */ 2420 mcdi_upper_page = 3; 2421 break; 2422 default: 2423 rc = ENOTSUP; 2424 goto fail3; 2425 } 2426 break; 2427 case EFX_SFF_TRANSCEIVER_ID_QSFP: 2428 case EFX_SFF_TRANSCEIVER_ID_QSFP_PLUS: 2429 case EFX_SFF_TRANSCEIVER_ID_QSFP28: 2430 switch (dev_addr) { 2431 case EFX_PHY_MEDIA_INFO_DEV_ADDR_QSFP: 2432 /* 2433 * MCDI page -1 should be used to access lower page 0 2434 * (0x00 - 0x7f). 2435 */ 2436 mcdi_lower_page = (uint32_t)-1; 2437 /* 2438 * MCDI page 0 should be used to access upper page 0 2439 * (0x80h - 0xff). 2440 */ 2441 mcdi_upper_page = 0; 2442 break; 2443 default: 2444 rc = ENOTSUP; 2445 goto fail3; 2446 } 2447 break; 2448 default: 2449 rc = ENOTSUP; 2450 goto fail3; 2451 } 2452 2453 EFX_STATIC_ASSERT(EFX_PHY_MEDIA_INFO_PAGE_SIZE <= 0xFF); 2454 2455 if (offset < EFX_PHY_MEDIA_INFO_PAGE_SIZE) { 2456 size_t read_len = 2457 MIN(len, EFX_PHY_MEDIA_INFO_PAGE_SIZE - offset); 2458 2459 rc = efx_mcdi_get_phy_media_info(enp, 2460 mcdi_lower_page, (uint8_t)offset, (uint8_t)read_len, data); 2461 if (rc != 0) 2462 goto fail4; 2463 2464 data += read_len; 2465 len -= read_len; 2466 2467 offset = 0; 2468 } else { 2469 offset -= EFX_PHY_MEDIA_INFO_PAGE_SIZE; 2470 } 2471 2472 if (len > 0) { 2473 EFSYS_ASSERT3U(len, <=, EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2474 EFSYS_ASSERT3U(offset, <, EFX_PHY_MEDIA_INFO_PAGE_SIZE); 2475 2476 rc = efx_mcdi_get_phy_media_info(enp, 2477 mcdi_upper_page, (uint8_t)offset, (uint8_t)len, data); 2478 if (rc != 0) 2479 goto fail5; 2480 } 2481 2482 return (0); 2483 2484 fail5: 2485 EFSYS_PROBE(fail5); 2486 fail4: 2487 EFSYS_PROBE(fail4); 2488 fail3: 2489 EFSYS_PROBE(fail3); 2490 fail2: 2491 EFSYS_PROBE(fail2); 2492 fail1: 2493 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2494 2495 return (rc); 2496 } 2497 2498 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 2499 2500 #define INIT_EVQ_MAXNBUFS MC_CMD_INIT_EVQ_V2_IN_DMA_ADDR_MAXNUM 2501 2502 #if EFX_OPTS_EF10() 2503 # if (INIT_EVQ_MAXNBUFS < EF10_EVQ_MAXNBUFS) 2504 # error "INIT_EVQ_MAXNBUFS too small" 2505 # endif 2506 #endif /* EFX_OPTS_EF10 */ 2507 #if EFSYS_OPT_RIVERHEAD 2508 # if (INIT_EVQ_MAXNBUFS < RHEAD_EVQ_MAXNBUFS) 2509 # error "INIT_EVQ_MAXNBUFS too small" 2510 # endif 2511 #endif /* EFSYS_OPT_RIVERHEAD */ 2512 2513 __checkReturn efx_rc_t 2514 efx_mcdi_init_evq( 2515 __in efx_nic_t *enp, 2516 __in unsigned int instance, 2517 __in efsys_mem_t *esmp, 2518 __in size_t nevs, 2519 __in uint32_t irq, 2520 __in uint32_t us, 2521 __in uint32_t flags, 2522 __in boolean_t low_latency) 2523 { 2524 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 2525 efx_mcdi_req_t req; 2526 EFX_MCDI_DECLARE_BUF(payload, 2527 MC_CMD_INIT_EVQ_V2_IN_LEN(INIT_EVQ_MAXNBUFS), 2528 MC_CMD_INIT_EVQ_V2_OUT_LEN); 2529 boolean_t interrupting; 2530 int ev_extended_width; 2531 int ev_cut_through; 2532 int ev_merge; 2533 unsigned int evq_type; 2534 efx_qword_t *dma_addr; 2535 uint64_t addr; 2536 int npages; 2537 int i; 2538 efx_rc_t rc; 2539 2540 npages = efx_evq_nbufs(enp, nevs, flags); 2541 if (npages > INIT_EVQ_MAXNBUFS) { 2542 rc = EINVAL; 2543 goto fail1; 2544 } 2545 2546 req.emr_cmd = MC_CMD_INIT_EVQ; 2547 req.emr_in_buf = payload; 2548 req.emr_in_length = MC_CMD_INIT_EVQ_V2_IN_LEN(npages); 2549 req.emr_out_buf = payload; 2550 req.emr_out_length = MC_CMD_INIT_EVQ_V2_OUT_LEN; 2551 2552 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_SIZE, nevs); 2553 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_INSTANCE, instance); 2554 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_IRQ_NUM, irq); 2555 2556 interrupting = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 2557 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 2558 2559 if (encp->enc_init_evq_v2_supported) { 2560 /* 2561 * On Medford the low latency license is required to enable RX 2562 * and event cut through and to disable RX batching. If event 2563 * queue type in flags is auto, we let the firmware decide the 2564 * settings to use. If the adapter has a low latency license, 2565 * it will choose the best settings for low latency, otherwise 2566 * it will choose the best settings for throughput. 2567 */ 2568 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 2569 case EFX_EVQ_FLAGS_TYPE_AUTO: 2570 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO; 2571 break; 2572 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 2573 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_THROUGHPUT; 2574 break; 2575 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 2576 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_LOW_LATENCY; 2577 break; 2578 default: 2579 rc = EINVAL; 2580 goto fail2; 2581 } 2582 /* EvQ type controls merging, no manual settings */ 2583 ev_merge = 0; 2584 ev_cut_through = 0; 2585 } else { 2586 /* EvQ types other than manual are not supported */ 2587 evq_type = MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_MANUAL; 2588 /* 2589 * On Huntington RX and TX event batching can only be requested 2590 * together (even if the datapath firmware doesn't actually 2591 * support RX batching). If event cut through is enabled no RX 2592 * batching will occur. 2593 * 2594 * So always enable RX and TX event batching, and enable event 2595 * cut through if we want low latency operation. 2596 */ 2597 ev_merge = 1; 2598 switch (flags & EFX_EVQ_FLAGS_TYPE_MASK) { 2599 case EFX_EVQ_FLAGS_TYPE_AUTO: 2600 ev_cut_through = low_latency ? 1 : 0; 2601 break; 2602 case EFX_EVQ_FLAGS_TYPE_THROUGHPUT: 2603 ev_cut_through = 0; 2604 break; 2605 case EFX_EVQ_FLAGS_TYPE_LOW_LATENCY: 2606 ev_cut_through = 1; 2607 break; 2608 default: 2609 rc = EINVAL; 2610 goto fail2; 2611 } 2612 } 2613 2614 /* 2615 * On EF100, extended width event queues have a different event 2616 * descriptor layout and are used to support descriptor proxy queues. 2617 */ 2618 ev_extended_width = 0; 2619 #if EFSYS_OPT_EV_EXTENDED_WIDTH 2620 if (encp->enc_init_evq_extended_width_supported) { 2621 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) 2622 ev_extended_width = 1; 2623 } 2624 #endif 2625 2626 MCDI_IN_POPULATE_DWORD_8(req, INIT_EVQ_V2_IN_FLAGS, 2627 INIT_EVQ_V2_IN_FLAG_INTERRUPTING, interrupting, 2628 INIT_EVQ_V2_IN_FLAG_RPTR_DOS, 0, 2629 INIT_EVQ_V2_IN_FLAG_INT_ARMD, 0, 2630 INIT_EVQ_V2_IN_FLAG_CUT_THRU, ev_cut_through, 2631 INIT_EVQ_V2_IN_FLAG_RX_MERGE, ev_merge, 2632 INIT_EVQ_V2_IN_FLAG_TX_MERGE, ev_merge, 2633 INIT_EVQ_V2_IN_FLAG_TYPE, evq_type, 2634 INIT_EVQ_V2_IN_FLAG_EXT_WIDTH, ev_extended_width); 2635 2636 /* If the value is zero then disable the timer */ 2637 if (us == 0) { 2638 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 2639 MC_CMD_INIT_EVQ_V2_IN_TMR_MODE_DIS); 2640 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, 0); 2641 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, 0); 2642 } else { 2643 unsigned int ticks; 2644 2645 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 2646 goto fail3; 2647 2648 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_MODE, 2649 MC_CMD_INIT_EVQ_V2_IN_TMR_INT_HLDOFF); 2650 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_LOAD, ticks); 2651 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_TMR_RELOAD, ticks); 2652 } 2653 2654 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_MODE, 2655 MC_CMD_INIT_EVQ_V2_IN_COUNT_MODE_DIS); 2656 MCDI_IN_SET_DWORD(req, INIT_EVQ_V2_IN_COUNT_THRSHLD, 0); 2657 2658 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_EVQ_V2_IN_DMA_ADDR); 2659 addr = EFSYS_MEM_ADDR(esmp); 2660 2661 for (i = 0; i < npages; i++) { 2662 EFX_POPULATE_QWORD_2(*dma_addr, 2663 EFX_DWORD_1, (uint32_t)(addr >> 32), 2664 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 2665 2666 dma_addr++; 2667 addr += EFX_BUF_SIZE; 2668 } 2669 2670 efx_mcdi_execute(enp, &req); 2671 2672 if (req.emr_rc != 0) { 2673 rc = req.emr_rc; 2674 goto fail4; 2675 } 2676 2677 if (encp->enc_init_evq_v2_supported) { 2678 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_V2_OUT_LEN) { 2679 rc = EMSGSIZE; 2680 goto fail5; 2681 } 2682 EFSYS_PROBE1(mcdi_evq_flags, uint32_t, 2683 MCDI_OUT_DWORD(req, INIT_EVQ_V2_OUT_FLAGS)); 2684 } else { 2685 if (req.emr_out_length_used < MC_CMD_INIT_EVQ_OUT_LEN) { 2686 rc = EMSGSIZE; 2687 goto fail6; 2688 } 2689 } 2690 2691 /* NOTE: ignore the returned IRQ param as firmware does not set it. */ 2692 2693 return (0); 2694 2695 fail6: 2696 EFSYS_PROBE(fail6); 2697 fail5: 2698 EFSYS_PROBE(fail5); 2699 fail4: 2700 EFSYS_PROBE(fail4); 2701 fail3: 2702 EFSYS_PROBE(fail3); 2703 fail2: 2704 EFSYS_PROBE(fail2); 2705 fail1: 2706 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2707 2708 return (rc); 2709 } 2710 2711 __checkReturn efx_rc_t 2712 efx_mcdi_fini_evq( 2713 __in efx_nic_t *enp, 2714 __in uint32_t instance) 2715 { 2716 efx_mcdi_req_t req; 2717 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_EVQ_IN_LEN, 2718 MC_CMD_FINI_EVQ_OUT_LEN); 2719 efx_rc_t rc; 2720 2721 req.emr_cmd = MC_CMD_FINI_EVQ; 2722 req.emr_in_buf = payload; 2723 req.emr_in_length = MC_CMD_FINI_EVQ_IN_LEN; 2724 req.emr_out_buf = payload; 2725 req.emr_out_length = MC_CMD_FINI_EVQ_OUT_LEN; 2726 2727 MCDI_IN_SET_DWORD(req, FINI_EVQ_IN_INSTANCE, instance); 2728 2729 efx_mcdi_execute_quiet(enp, &req); 2730 2731 if (req.emr_rc != 0) { 2732 rc = req.emr_rc; 2733 goto fail1; 2734 } 2735 2736 return (0); 2737 2738 fail1: 2739 /* 2740 * EALREADY is not an error, but indicates that the MC has rebooted and 2741 * that the EVQ has already been destroyed. 2742 */ 2743 if (rc != EALREADY) 2744 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2745 2746 return (rc); 2747 } 2748 2749 __checkReturn efx_rc_t 2750 efx_mcdi_init_rxq( 2751 __in efx_nic_t *enp, 2752 __in uint32_t ndescs, 2753 __in efx_evq_t *eep, 2754 __in uint32_t label, 2755 __in uint32_t instance, 2756 __in efsys_mem_t *esmp, 2757 __in const efx_mcdi_init_rxq_params_t *params) 2758 { 2759 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 2760 efx_mcdi_req_t req; 2761 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_RXQ_V5_IN_LEN, 2762 MC_CMD_INIT_RXQ_V5_OUT_LEN); 2763 int npages = efx_rxq_nbufs(enp, ndescs); 2764 int i; 2765 efx_qword_t *dma_addr; 2766 uint64_t addr; 2767 efx_rc_t rc; 2768 uint32_t dma_mode; 2769 boolean_t want_outer_classes; 2770 boolean_t no_cont_ev; 2771 2772 EFSYS_ASSERT3U(ndescs, <=, encp->enc_rxq_max_ndescs); 2773 2774 if ((esmp == NULL) || 2775 (EFSYS_MEM_SIZE(esmp) < efx_rxq_size(enp, ndescs))) { 2776 rc = EINVAL; 2777 goto fail1; 2778 } 2779 2780 no_cont_ev = (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV); 2781 if ((no_cont_ev == B_TRUE) && (params->disable_scatter == B_FALSE)) { 2782 /* TODO: Support scatter in NO_CONT_EV mode */ 2783 rc = EINVAL; 2784 goto fail2; 2785 } 2786 2787 if (params->ps_buf_size > 0) 2788 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_PACKED_STREAM; 2789 else if (params->es_bufs_per_desc > 0) 2790 dma_mode = MC_CMD_INIT_RXQ_V3_IN_EQUAL_STRIDE_SUPER_BUFFER; 2791 else 2792 dma_mode = MC_CMD_INIT_RXQ_EXT_IN_SINGLE_PACKET; 2793 2794 if (encp->enc_tunnel_encapsulations_supported != 0 && 2795 !params->want_inner_classes) { 2796 /* 2797 * WANT_OUTER_CLASSES can only be specified on hardware which 2798 * supports tunnel encapsulation offloads, even though it is 2799 * effectively the behaviour the hardware gives. 2800 * 2801 * Also, on hardware which does support such offloads, older 2802 * firmware rejects the flag if the offloads are not supported 2803 * by the current firmware variant, which means this may fail if 2804 * the capabilities are not updated when the firmware variant 2805 * changes. This is not an issue on newer firmware, as it was 2806 * changed in bug 69842 (v6.4.2.1007) to permit this flag to be 2807 * specified on all firmware variants. 2808 */ 2809 want_outer_classes = B_TRUE; 2810 } else { 2811 want_outer_classes = B_FALSE; 2812 } 2813 2814 req.emr_cmd = MC_CMD_INIT_RXQ; 2815 req.emr_in_buf = payload; 2816 req.emr_in_length = MC_CMD_INIT_RXQ_V5_IN_LEN; 2817 req.emr_out_buf = payload; 2818 req.emr_out_length = MC_CMD_INIT_RXQ_V5_OUT_LEN; 2819 2820 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_SIZE, ndescs); 2821 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_TARGET_EVQ, eep->ee_index); 2822 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_LABEL, label); 2823 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_INSTANCE, instance); 2824 MCDI_IN_POPULATE_DWORD_10(req, INIT_RXQ_EXT_IN_FLAGS, 2825 INIT_RXQ_EXT_IN_FLAG_BUFF_MODE, 0, 2826 INIT_RXQ_EXT_IN_FLAG_HDR_SPLIT, 0, 2827 INIT_RXQ_EXT_IN_FLAG_TIMESTAMP, 0, 2828 INIT_RXQ_EXT_IN_CRC_MODE, 0, 2829 INIT_RXQ_EXT_IN_FLAG_PREFIX, 1, 2830 INIT_RXQ_EXT_IN_FLAG_DISABLE_SCATTER, params->disable_scatter, 2831 INIT_RXQ_EXT_IN_DMA_MODE, 2832 dma_mode, 2833 INIT_RXQ_EXT_IN_PACKED_STREAM_BUFF_SIZE, params->ps_buf_size, 2834 INIT_RXQ_EXT_IN_FLAG_WANT_OUTER_CLASSES, want_outer_classes, 2835 INIT_RXQ_EXT_IN_FLAG_NO_CONT_EV, no_cont_ev); 2836 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_OWNER_ID, 0); 2837 MCDI_IN_SET_DWORD(req, INIT_RXQ_EXT_IN_PORT_ID, enp->en_vport_id); 2838 2839 if (params->es_bufs_per_desc > 0) { 2840 MCDI_IN_SET_DWORD(req, 2841 INIT_RXQ_V3_IN_ES_PACKET_BUFFERS_PER_BUCKET, 2842 params->es_bufs_per_desc); 2843 MCDI_IN_SET_DWORD(req, 2844 INIT_RXQ_V3_IN_ES_MAX_DMA_LEN, params->es_max_dma_len); 2845 MCDI_IN_SET_DWORD(req, 2846 INIT_RXQ_V3_IN_ES_PACKET_STRIDE, params->es_buf_stride); 2847 MCDI_IN_SET_DWORD(req, 2848 INIT_RXQ_V3_IN_ES_HEAD_OF_LINE_BLOCK_TIMEOUT, 2849 params->hol_block_timeout); 2850 } 2851 2852 if (encp->enc_init_rxq_with_buffer_size) 2853 MCDI_IN_SET_DWORD(req, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, 2854 params->buf_size); 2855 2856 MCDI_IN_SET_DWORD(req, INIT_RXQ_V5_IN_RX_PREFIX_ID, params->prefix_id); 2857 2858 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_RXQ_IN_DMA_ADDR); 2859 addr = EFSYS_MEM_ADDR(esmp); 2860 2861 for (i = 0; i < npages; i++) { 2862 EFX_POPULATE_QWORD_2(*dma_addr, 2863 EFX_DWORD_1, (uint32_t)(addr >> 32), 2864 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 2865 2866 dma_addr++; 2867 addr += EFX_BUF_SIZE; 2868 } 2869 2870 efx_mcdi_execute(enp, &req); 2871 2872 if (req.emr_rc != 0) { 2873 rc = req.emr_rc; 2874 goto fail3; 2875 } 2876 2877 return (0); 2878 2879 fail3: 2880 EFSYS_PROBE(fail3); 2881 fail2: 2882 EFSYS_PROBE(fail2); 2883 fail1: 2884 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2885 2886 return (rc); 2887 } 2888 2889 __checkReturn efx_rc_t 2890 efx_mcdi_fini_rxq( 2891 __in efx_nic_t *enp, 2892 __in uint32_t instance) 2893 { 2894 efx_mcdi_req_t req; 2895 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_RXQ_IN_LEN, 2896 MC_CMD_FINI_RXQ_OUT_LEN); 2897 efx_rc_t rc; 2898 2899 req.emr_cmd = MC_CMD_FINI_RXQ; 2900 req.emr_in_buf = payload; 2901 req.emr_in_length = MC_CMD_FINI_RXQ_IN_LEN; 2902 req.emr_out_buf = payload; 2903 req.emr_out_length = MC_CMD_FINI_RXQ_OUT_LEN; 2904 2905 MCDI_IN_SET_DWORD(req, FINI_RXQ_IN_INSTANCE, instance); 2906 2907 efx_mcdi_execute_quiet(enp, &req); 2908 2909 if (req.emr_rc != 0) { 2910 rc = req.emr_rc; 2911 goto fail1; 2912 } 2913 2914 return (0); 2915 2916 fail1: 2917 /* 2918 * EALREADY is not an error, but indicates that the MC has rebooted and 2919 * that the RXQ has already been destroyed. 2920 */ 2921 if (rc != EALREADY) 2922 EFSYS_PROBE1(fail1, efx_rc_t, rc); 2923 2924 return (rc); 2925 } 2926 2927 __checkReturn efx_rc_t 2928 efx_mcdi_init_txq( 2929 __in efx_nic_t *enp, 2930 __in uint32_t ndescs, 2931 __in uint32_t target_evq, 2932 __in uint32_t label, 2933 __in uint32_t instance, 2934 __in uint16_t flags, 2935 __in efsys_mem_t *esmp) 2936 { 2937 efx_mcdi_req_t req; 2938 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_INIT_TXQ_EXT_IN_LEN, 2939 MC_CMD_INIT_TXQ_OUT_LEN); 2940 efx_qword_t *dma_addr; 2941 uint64_t addr; 2942 int npages; 2943 int i; 2944 efx_rc_t rc; 2945 2946 EFSYS_ASSERT(MC_CMD_INIT_TXQ_EXT_IN_DMA_ADDR_MAXNUM >= 2947 efx_txq_nbufs(enp, enp->en_nic_cfg.enc_txq_max_ndescs)); 2948 2949 if ((esmp == NULL) || 2950 (EFSYS_MEM_SIZE(esmp) < efx_txq_size(enp, ndescs))) { 2951 rc = EINVAL; 2952 goto fail1; 2953 } 2954 2955 npages = efx_txq_nbufs(enp, ndescs); 2956 if (MC_CMD_INIT_TXQ_IN_LEN(npages) > sizeof (payload)) { 2957 rc = EINVAL; 2958 goto fail2; 2959 } 2960 2961 req.emr_cmd = MC_CMD_INIT_TXQ; 2962 req.emr_in_buf = payload; 2963 req.emr_in_length = MC_CMD_INIT_TXQ_IN_LEN(npages); 2964 req.emr_out_buf = payload; 2965 req.emr_out_length = MC_CMD_INIT_TXQ_OUT_LEN; 2966 2967 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_SIZE, ndescs); 2968 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_TARGET_EVQ, target_evq); 2969 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_LABEL, label); 2970 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_INSTANCE, instance); 2971 2972 MCDI_IN_POPULATE_DWORD_9(req, INIT_TXQ_IN_FLAGS, 2973 INIT_TXQ_IN_FLAG_BUFF_MODE, 0, 2974 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, 2975 (flags & EFX_TXQ_CKSUM_IPV4) ? 0 : 1, 2976 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, 2977 (flags & EFX_TXQ_CKSUM_TCPUDP) ? 0 : 1, 2978 INIT_TXQ_EXT_IN_FLAG_INNER_IP_CSUM_EN, 2979 (flags & EFX_TXQ_CKSUM_INNER_IPV4) ? 1 : 0, 2980 INIT_TXQ_EXT_IN_FLAG_INNER_TCP_CSUM_EN, 2981 (flags & EFX_TXQ_CKSUM_INNER_TCPUDP) ? 1 : 0, 2982 INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, (flags & EFX_TXQ_FATSOV2) ? 1 : 0, 2983 INIT_TXQ_IN_FLAG_TCP_UDP_ONLY, 0, 2984 INIT_TXQ_IN_CRC_MODE, 0, 2985 INIT_TXQ_IN_FLAG_TIMESTAMP, 0); 2986 2987 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_OWNER_ID, 0); 2988 MCDI_IN_SET_DWORD(req, INIT_TXQ_IN_PORT_ID, enp->en_vport_id); 2989 2990 dma_addr = MCDI_IN2(req, efx_qword_t, INIT_TXQ_IN_DMA_ADDR); 2991 addr = EFSYS_MEM_ADDR(esmp); 2992 2993 for (i = 0; i < npages; i++) { 2994 EFX_POPULATE_QWORD_2(*dma_addr, 2995 EFX_DWORD_1, (uint32_t)(addr >> 32), 2996 EFX_DWORD_0, (uint32_t)(addr & 0xffffffff)); 2997 2998 dma_addr++; 2999 addr += EFX_BUF_SIZE; 3000 } 3001 3002 efx_mcdi_execute(enp, &req); 3003 3004 if (req.emr_rc != 0) { 3005 rc = req.emr_rc; 3006 goto fail3; 3007 } 3008 3009 return (0); 3010 3011 fail3: 3012 EFSYS_PROBE(fail3); 3013 fail2: 3014 EFSYS_PROBE(fail2); 3015 fail1: 3016 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3017 3018 return (rc); 3019 } 3020 3021 __checkReturn efx_rc_t 3022 efx_mcdi_fini_txq( 3023 __in efx_nic_t *enp, 3024 __in uint32_t instance) 3025 { 3026 efx_mcdi_req_t req; 3027 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_FINI_TXQ_IN_LEN, 3028 MC_CMD_FINI_TXQ_OUT_LEN); 3029 efx_rc_t rc; 3030 3031 req.emr_cmd = MC_CMD_FINI_TXQ; 3032 req.emr_in_buf = payload; 3033 req.emr_in_length = MC_CMD_FINI_TXQ_IN_LEN; 3034 req.emr_out_buf = payload; 3035 req.emr_out_length = MC_CMD_FINI_TXQ_OUT_LEN; 3036 3037 MCDI_IN_SET_DWORD(req, FINI_TXQ_IN_INSTANCE, instance); 3038 3039 efx_mcdi_execute_quiet(enp, &req); 3040 3041 if (req.emr_rc != 0) { 3042 rc = req.emr_rc; 3043 goto fail1; 3044 } 3045 3046 return (0); 3047 3048 fail1: 3049 /* 3050 * EALREADY is not an error, but indicates that the MC has rebooted and 3051 * that the TXQ has already been destroyed. 3052 */ 3053 if (rc != EALREADY) 3054 EFSYS_PROBE1(fail1, efx_rc_t, rc); 3055 3056 return (rc); 3057 } 3058 3059 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 3060 3061 #endif /* EFSYS_OPT_MCDI */ 3062