1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2018-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 10 11 #if EFSYS_OPT_RIVERHEAD 12 13 __checkReturn efx_rc_t 14 rhead_board_cfg( 15 __in efx_nic_t *enp) 16 { 17 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 18 uint32_t end_padding; 19 uint32_t bandwidth; 20 efx_rc_t rc; 21 22 if ((rc = efx_mcdi_nic_board_cfg(enp)) != 0) 23 goto fail1; 24 25 /* 26 * The tunnel encapsulation initialization happens unconditionally 27 * for now. 28 */ 29 encp->enc_tunnel_encapsulations_supported = 30 (1u << EFX_TUNNEL_PROTOCOL_VXLAN) | 31 (1u << EFX_TUNNEL_PROTOCOL_GENEVE) | 32 (1u << EFX_TUNNEL_PROTOCOL_NVGRE); 33 34 /* 35 * Software limitation inherited from EF10. This limit is not 36 * increased since the hardware does not report this limit, it is 37 * handled internally resulting in a tunnel add error when there is no 38 * space for more UDP tunnels. 39 */ 40 encp->enc_tunnel_config_udp_entries_max = EFX_TUNNEL_MAXNENTRIES; 41 42 encp->enc_clk_mult = 1; /* not used for Riverhead */ 43 44 /* 45 * FIXME There are TxSend and TxSeg descriptors on Riverhead. 46 * TxSeg is bigger than TxSend. 47 */ 48 encp->enc_tx_dma_desc_size_max = EFX_MASK32(ESF_GZ_TX_SEND_LEN); 49 /* No boundary crossing limits */ 50 encp->enc_tx_dma_desc_boundary = 0; 51 52 /* 53 * Initialise design parameters to either a runtime value read from 54 * the design parameters area or the well known default value 55 * (see SF-119689-TC section 4.4 for details). 56 * FIXME: Read design parameters area values. 57 */ 58 encp->enc_tx_tso_max_header_ndescs = 59 ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT; 60 encp->enc_tx_tso_max_header_length = 61 ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT; 62 encp->enc_tx_tso_max_payload_ndescs = 63 ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT; 64 encp->enc_tx_tso_max_payload_length = 65 ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT; 66 encp->enc_tx_tso_max_nframes = 67 ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT; 68 69 /* 70 * Riverhead does not put any restrictions on TCP header offset limit. 71 */ 72 encp->enc_tx_tso_tcp_header_offset_limit = UINT32_MAX; 73 74 /* 75 * Set resource limits for MC_CMD_ALLOC_VIS. Note that we cannot use 76 * MC_CMD_GET_RESOURCE_LIMITS here as that reports the available 77 * resources (allocated to this PCIe function), which is zero until 78 * after we have allocated VIs. 79 */ 80 encp->enc_evq_limit = 1024; 81 encp->enc_rxq_limit = EFX_RXQ_LIMIT_TARGET; 82 encp->enc_txq_limit = EFX_TXQ_LIMIT_TARGET; 83 84 encp->enc_buftbl_limit = UINT32_MAX; 85 86 /* 87 * Riverhead event queue creation completes 88 * immediately (no initial event). 89 */ 90 encp->enc_evq_init_done_ev_supported = B_FALSE; 91 92 /* 93 * Enable firmware workarounds for hardware errata. 94 * Expected responses are: 95 * - 0 (zero): 96 * Success: workaround enabled or disabled as requested. 97 * - MC_CMD_ERR_ENOSYS (reported as ENOTSUP): 98 * Firmware does not support the MC_CMD_WORKAROUND request. 99 * (assume that the workaround is not supported). 100 * - MC_CMD_ERR_ENOENT (reported as ENOENT): 101 * Firmware does not support the requested workaround. 102 * - MC_CMD_ERR_EPERM (reported as EACCES): 103 * Unprivileged function cannot enable/disable workarounds. 104 * 105 * See efx_mcdi_request_errcode() for MCDI error translations. 106 */ 107 108 /* 109 * Replay engine on Riverhead should suppress duplicate packets 110 * (e.g. because of exact multicast and all-multicast filters 111 * match) to the same RxQ. 112 */ 113 encp->enc_bug26807_workaround = B_FALSE; 114 115 /* 116 * Checksums for TSO sends should always be correct on Riverhead. 117 * FIXME: revisit when TSO support is implemented. 118 */ 119 encp->enc_bug61297_workaround = B_FALSE; 120 121 encp->enc_evq_max_nevs = RHEAD_EVQ_MAXNEVS; 122 encp->enc_evq_min_nevs = RHEAD_EVQ_MINNEVS; 123 encp->enc_rxq_max_ndescs = RHEAD_RXQ_MAXNDESCS; 124 encp->enc_rxq_min_ndescs = RHEAD_RXQ_MINNDESCS; 125 encp->enc_txq_max_ndescs = RHEAD_TXQ_MAXNDESCS; 126 encp->enc_txq_min_ndescs = RHEAD_TXQ_MINNDESCS; 127 128 /* Riverhead FW does not support event queue timers yet. */ 129 encp->enc_evq_timer_quantum_ns = 0; 130 encp->enc_evq_timer_max_us = 0; 131 132 #if EFSYS_OPT_EV_EXTENDED_WIDTH 133 encp->enc_ev_ew_desc_size = RHEAD_EVQ_EW_DESC_SIZE; 134 #else 135 encp->enc_ev_ew_desc_size = 0; 136 #endif 137 138 encp->enc_ev_desc_size = RHEAD_EVQ_DESC_SIZE; 139 encp->enc_rx_desc_size = RHEAD_RXQ_DESC_SIZE; 140 encp->enc_tx_desc_size = RHEAD_TXQ_DESC_SIZE; 141 142 /* No required alignment for WPTR updates */ 143 encp->enc_rx_push_align = 1; 144 145 /* Riverhead supports a single Rx prefix size. */ 146 encp->enc_rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN; 147 148 /* Alignment for receive packet DMA buffers. */ 149 encp->enc_rx_buf_align_start = 1; 150 151 /* Get the RX DMA end padding alignment configuration. */ 152 if ((rc = efx_mcdi_get_rxdp_config(enp, &end_padding)) != 0) { 153 if (rc != EACCES) 154 goto fail2; 155 156 /* Assume largest tail padding size supported by hardware. */ 157 end_padding = 128; 158 } 159 encp->enc_rx_buf_align_end = end_padding; 160 161 /* 162 * Riverhead stores a single global copy of VPD, not per-PF as on 163 * Huntington. 164 */ 165 encp->enc_vpd_is_global = B_TRUE; 166 167 rc = ef10_nic_get_port_mode_bandwidth(enp, &bandwidth); 168 if (rc != 0) 169 goto fail3; 170 encp->enc_required_pcie_bandwidth_mbps = bandwidth; 171 encp->enc_max_pcie_link_gen = EFX_PCIE_LINK_SPEED_GEN3; 172 173 return (0); 174 175 fail3: 176 EFSYS_PROBE(fail3); 177 fail2: 178 EFSYS_PROBE(fail2); 179 fail1: 180 EFSYS_PROBE1(fail1, efx_rc_t, rc); 181 182 return (rc); 183 } 184 185 __checkReturn efx_rc_t 186 rhead_nic_probe( 187 __in efx_nic_t *enp) 188 { 189 const efx_nic_ops_t *enop = enp->en_enop; 190 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 191 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 192 efx_rc_t rc; 193 194 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp)); 195 196 /* Read and clear any assertion state */ 197 if ((rc = efx_mcdi_read_assertion(enp)) != 0) 198 goto fail1; 199 200 /* Exit the assertion handler */ 201 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) 202 if (rc != EACCES) 203 goto fail2; 204 205 if ((rc = efx_mcdi_drv_attach(enp, B_TRUE)) != 0) 206 goto fail3; 207 208 /* Get remaining controller-specific board config */ 209 if ((rc = enop->eno_board_cfg(enp)) != 0) 210 goto fail4; 211 212 /* 213 * Set default driver config limits (based on board config). 214 * 215 * FIXME: For now allocate a fixed number of VIs which is likely to be 216 * sufficient and small enough to allow multiple functions on the same 217 * port. 218 */ 219 edcp->edc_min_vi_count = edcp->edc_max_vi_count = 220 MIN(128, MAX(encp->enc_rxq_limit, encp->enc_txq_limit)); 221 222 /* 223 * The client driver must configure and enable PIO buffer support, 224 * but there is no PIO support on Riverhead anyway. 225 */ 226 edcp->edc_max_piobuf_count = 0; 227 edcp->edc_pio_alloc_size = 0; 228 229 #if EFSYS_OPT_MAC_STATS 230 /* Wipe the MAC statistics */ 231 if ((rc = efx_mcdi_mac_stats_clear(enp)) != 0) 232 goto fail5; 233 #endif 234 235 #if EFSYS_OPT_LOOPBACK 236 if ((rc = efx_mcdi_get_loopback_modes(enp)) != 0) 237 goto fail6; 238 #endif 239 240 return (0); 241 242 #if EFSYS_OPT_LOOPBACK 243 fail6: 244 EFSYS_PROBE(fail6); 245 #endif 246 #if EFSYS_OPT_MAC_STATS 247 fail5: 248 EFSYS_PROBE(fail5); 249 #endif 250 fail4: 251 EFSYS_PROBE(fail4); 252 fail3: 253 EFSYS_PROBE(fail3); 254 fail2: 255 EFSYS_PROBE(fail2); 256 fail1: 257 EFSYS_PROBE1(fail1, efx_rc_t, rc); 258 259 return (rc); 260 } 261 262 __checkReturn efx_rc_t 263 rhead_nic_set_drv_limits( 264 __inout efx_nic_t *enp, 265 __in efx_drv_limits_t *edlp) 266 { 267 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 268 efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 269 uint32_t min_evq_count, max_evq_count; 270 uint32_t min_rxq_count, max_rxq_count; 271 uint32_t min_txq_count, max_txq_count; 272 efx_rc_t rc; 273 274 if (edlp == NULL) { 275 rc = EINVAL; 276 goto fail1; 277 } 278 279 /* Get minimum required and maximum usable VI limits */ 280 min_evq_count = MIN(edlp->edl_min_evq_count, encp->enc_evq_limit); 281 min_rxq_count = MIN(edlp->edl_min_rxq_count, encp->enc_rxq_limit); 282 min_txq_count = MIN(edlp->edl_min_txq_count, encp->enc_txq_limit); 283 284 edcp->edc_min_vi_count = 285 MAX(min_evq_count, MAX(min_rxq_count, min_txq_count)); 286 287 max_evq_count = MIN(edlp->edl_max_evq_count, encp->enc_evq_limit); 288 max_rxq_count = MIN(edlp->edl_max_rxq_count, encp->enc_rxq_limit); 289 max_txq_count = MIN(edlp->edl_max_txq_count, encp->enc_txq_limit); 290 291 edcp->edc_max_vi_count = 292 MAX(max_evq_count, MAX(max_rxq_count, max_txq_count)); 293 294 /* There is no PIO support on Riverhead */ 295 edcp->edc_max_piobuf_count = 0; 296 edcp->edc_pio_alloc_size = 0; 297 298 return (0); 299 300 fail1: 301 EFSYS_PROBE1(fail1, efx_rc_t, rc); 302 303 return (rc); 304 } 305 306 __checkReturn efx_rc_t 307 rhead_nic_reset( 308 __in efx_nic_t *enp) 309 { 310 efx_rc_t rc; 311 312 /* ef10_nic_reset() is called to recover from BADASSERT failures. */ 313 if ((rc = efx_mcdi_read_assertion(enp)) != 0) 314 goto fail1; 315 if ((rc = efx_mcdi_exit_assertion_handler(enp)) != 0) 316 goto fail2; 317 318 if ((rc = efx_mcdi_entity_reset(enp)) != 0) 319 goto fail3; 320 321 /* Clear RX/TX DMA queue errors */ 322 enp->en_reset_flags &= ~(EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR); 323 324 return (0); 325 326 fail3: 327 EFSYS_PROBE(fail3); 328 fail2: 329 EFSYS_PROBE(fail2); 330 fail1: 331 EFSYS_PROBE1(fail1, efx_rc_t, rc); 332 333 return (rc); 334 } 335 336 __checkReturn efx_rc_t 337 rhead_nic_init( 338 __in efx_nic_t *enp) 339 { 340 const efx_drv_cfg_t *edcp = &(enp->en_drv_cfg); 341 uint32_t min_vi_count, max_vi_count; 342 uint32_t vi_count, vi_base, vi_shift; 343 uint32_t vi_window_size; 344 efx_rc_t rc; 345 boolean_t alloc_vadaptor = B_TRUE; 346 347 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp)); 348 EFSYS_ASSERT3U(edcp->edc_max_piobuf_count, ==, 0); 349 350 /* Enable reporting of some events (e.g. link change) */ 351 if ((rc = efx_mcdi_log_ctrl(enp)) != 0) 352 goto fail1; 353 354 min_vi_count = edcp->edc_min_vi_count; 355 max_vi_count = edcp->edc_max_vi_count; 356 357 /* Ensure that the previously attached driver's VIs are freed */ 358 if ((rc = efx_mcdi_free_vis(enp)) != 0) 359 goto fail2; 360 361 /* 362 * Reserve VI resources (EVQ+RXQ+TXQ) for this PCIe function. If this 363 * fails then retrying the request for fewer VI resources may succeed. 364 */ 365 vi_count = 0; 366 if ((rc = efx_mcdi_alloc_vis(enp, min_vi_count, max_vi_count, 367 &vi_base, &vi_count, &vi_shift)) != 0) 368 goto fail3; 369 370 EFSYS_PROBE2(vi_alloc, uint32_t, vi_base, uint32_t, vi_count); 371 372 if (vi_count < min_vi_count) { 373 rc = ENOMEM; 374 goto fail4; 375 } 376 377 enp->en_arch.ef10.ena_vi_base = vi_base; 378 enp->en_arch.ef10.ena_vi_count = vi_count; 379 enp->en_arch.ef10.ena_vi_shift = vi_shift; 380 381 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, !=, 382 EFX_VI_WINDOW_SHIFT_INVALID); 383 EFSYS_ASSERT3U(enp->en_nic_cfg.enc_vi_window_shift, <=, 384 EFX_VI_WINDOW_SHIFT_64K); 385 vi_window_size = 1U << enp->en_nic_cfg.enc_vi_window_shift; 386 387 /* Save UC memory mapping details */ 388 enp->en_arch.ef10.ena_uc_mem_map_offset = 0; 389 enp->en_arch.ef10.ena_uc_mem_map_size = 390 vi_window_size * enp->en_arch.ef10.ena_vi_count; 391 392 /* No WC memory mapping since PIO is not supported */ 393 enp->en_arch.ef10.ena_pio_write_vi_base = 0; 394 enp->en_arch.ef10.ena_wc_mem_map_offset = 0; 395 enp->en_arch.ef10.ena_wc_mem_map_size = 0; 396 397 enp->en_nic_cfg.enc_mcdi_max_payload_length = MCDI_CTL_SDU_LEN_MAX_V2; 398 399 /* 400 * For SR-IOV use case, vAdaptor is allocated for PF and associated VFs 401 * during NIC initialization when vSwitch is created and vPorts are 402 * allocated. Hence, skip vAdaptor allocation for EVB and update vPort 403 * ID in NIC structure with the one allocated for PF. 404 */ 405 406 enp->en_vport_id = EVB_PORT_ID_ASSIGNED; 407 #if EFSYS_OPT_EVB 408 if ((enp->en_vswitchp != NULL) && (enp->en_vswitchp->ev_evcp != NULL)) { 409 /* For EVB use vPort allocated on vSwitch */ 410 enp->en_vport_id = enp->en_vswitchp->ev_evcp->evc_vport_id; 411 alloc_vadaptor = B_FALSE; 412 } 413 #endif 414 if (alloc_vadaptor != B_FALSE) { 415 /* Allocate a vAdaptor attached to our upstream vPort/pPort */ 416 if ((rc = ef10_upstream_port_vadaptor_alloc(enp)) != 0) 417 goto fail5; 418 } 419 420 return (0); 421 422 fail5: 423 EFSYS_PROBE(fail5); 424 425 fail4: 426 EFSYS_PROBE(fail4); 427 428 (void) efx_mcdi_free_vis(enp); 429 430 fail3: 431 EFSYS_PROBE(fail3); 432 fail2: 433 EFSYS_PROBE(fail2); 434 fail1: 435 EFSYS_PROBE1(fail1, efx_rc_t, rc); 436 437 return (rc); 438 } 439 440 __checkReturn efx_rc_t 441 rhead_nic_get_vi_pool( 442 __in efx_nic_t *enp, 443 __out uint32_t *vi_countp) 444 { 445 /* 446 * Report VIs that the client driver can use. 447 * Do not include VIs used for PIO buffer writes. 448 */ 449 *vi_countp = enp->en_arch.ef10.ena_vi_count; 450 451 return (0); 452 } 453 454 __checkReturn efx_rc_t 455 rhead_nic_get_bar_region( 456 __in efx_nic_t *enp, 457 __in efx_nic_region_t region, 458 __out uint32_t *offsetp, 459 __out size_t *sizep) 460 { 461 efx_rc_t rc; 462 463 EFSYS_ASSERT(EFX_FAMILY_IS_EF100(enp)); 464 465 /* 466 * TODO: Specify host memory mapping alignment and granularity 467 * in efx_drv_limits_t so that they can be taken into account 468 * when allocating extra VIs for PIO writes. 469 */ 470 switch (region) { 471 case EFX_REGION_VI: 472 /* UC mapped memory BAR region for VI registers */ 473 *offsetp = enp->en_arch.ef10.ena_uc_mem_map_offset; 474 *sizep = enp->en_arch.ef10.ena_uc_mem_map_size; 475 break; 476 477 case EFX_REGION_PIO_WRITE_VI: 478 /* WC mapped memory BAR region for piobuf writes */ 479 *offsetp = enp->en_arch.ef10.ena_wc_mem_map_offset; 480 *sizep = enp->en_arch.ef10.ena_wc_mem_map_size; 481 break; 482 483 default: 484 rc = EINVAL; 485 goto fail1; 486 } 487 488 return (0); 489 490 fail1: 491 EFSYS_PROBE1(fail1, efx_rc_t, rc); 492 493 return (rc); 494 } 495 496 __checkReturn boolean_t 497 rhead_nic_hw_unavailable( 498 __in efx_nic_t *enp) 499 { 500 efx_dword_t dword; 501 502 if (enp->en_reset_flags & EFX_RESET_HW_UNAVAIL) 503 return (B_TRUE); 504 505 EFX_BAR_FCW_READD(enp, ER_GZ_MC_SFT_STATUS, &dword); 506 if (EFX_DWORD_FIELD(dword, EFX_DWORD_0) == 0xffffffff) 507 goto unavail; 508 509 return (B_FALSE); 510 511 unavail: 512 rhead_nic_set_hw_unavailable(enp); 513 514 return (B_TRUE); 515 } 516 517 void 518 rhead_nic_set_hw_unavailable( 519 __in efx_nic_t *enp) 520 { 521 EFSYS_PROBE(hw_unavail); 522 enp->en_reset_flags |= EFX_RESET_HW_UNAVAIL; 523 } 524 525 void 526 rhead_nic_fini( 527 __in efx_nic_t *enp) 528 { 529 boolean_t do_vadaptor_free = B_TRUE; 530 531 #if EFSYS_OPT_EVB 532 if (enp->en_vswitchp != NULL) { 533 /* 534 * For SR-IOV the vAdaptor is freed with the vSwitch, 535 * so do not free it here. 536 */ 537 do_vadaptor_free = B_FALSE; 538 } 539 #endif 540 if (do_vadaptor_free != B_FALSE) { 541 (void) efx_mcdi_vadaptor_free(enp, enp->en_vport_id); 542 enp->en_vport_id = EVB_PORT_ID_NULL; 543 } 544 545 (void) efx_mcdi_free_vis(enp); 546 enp->en_arch.ef10.ena_vi_count = 0; 547 } 548 549 void 550 rhead_nic_unprobe( 551 __in efx_nic_t *enp) 552 { 553 (void) efx_mcdi_drv_attach(enp, B_FALSE); 554 } 555 556 #if EFSYS_OPT_DIAG 557 558 __checkReturn efx_rc_t 559 rhead_nic_register_test( 560 __in efx_nic_t *enp) 561 { 562 efx_rc_t rc; 563 564 /* FIXME */ 565 _NOTE(ARGUNUSED(enp)) 566 _NOTE(CONSTANTCONDITION) 567 if (B_FALSE) { 568 rc = ENOTSUP; 569 goto fail1; 570 } 571 /* FIXME */ 572 573 return (0); 574 575 fail1: 576 EFSYS_PROBE1(fail1, efx_rc_t, rc); 577 578 return (rc); 579 } 580 581 #endif /* EFSYS_OPT_DIAG */ 582 583 __checkReturn efx_rc_t 584 rhead_nic_xilinx_cap_tbl_read_ef100_locator( 585 __in efsys_bar_t *esbp, 586 __in efsys_dma_addr_t offset, 587 __out efx_bar_region_t *ebrp) 588 { 589 efx_oword_t entry; 590 uint32_t rev; 591 uint32_t len; 592 efx_rc_t rc; 593 594 /* 595 * Xilinx Capabilities Table requires 32bit aligned reads. 596 * See SF-119689-TC section 4.2.2 "Discovery Steps". 597 */ 598 EFSYS_BAR_READD(esbp, offset + 599 (EFX_LOW_BIT(ESF_GZ_CFGBAR_ENTRY_FORMAT) / 8), 600 &entry.eo_dword[0], B_FALSE); 601 EFSYS_BAR_READD(esbp, offset + 602 (EFX_LOW_BIT(ESF_GZ_CFGBAR_ENTRY_SIZE) / 8), 603 &entry.eo_dword[1], B_FALSE); 604 605 rev = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_ENTRY_REV); 606 len = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_ENTRY_SIZE); 607 608 if (rev != ESE_GZ_CFGBAR_ENTRY_REV_EF100 || 609 len < ESE_GZ_CFGBAR_ENTRY_SIZE_EF100) { 610 rc = EINVAL; 611 goto fail1; 612 } 613 614 EFSYS_BAR_READD(esbp, offset + 615 (EFX_LOW_BIT(ESF_GZ_CFGBAR_EF100_BAR) / 8), 616 &entry.eo_dword[2], B_FALSE); 617 618 ebrp->ebr_index = EFX_OWORD_FIELD32(entry, ESF_GZ_CFGBAR_EF100_BAR); 619 ebrp->ebr_offset = EFX_OWORD_FIELD32(entry, 620 ESF_GZ_CFGBAR_EF100_FUNC_CTL_WIN_OFF) << 621 ESE_GZ_EF100_FUNC_CTL_WIN_OFF_SHIFT; 622 ebrp->ebr_type = EFX_BAR_TYPE_MEM; 623 ebrp->ebr_length = 0; 624 625 return (0); 626 627 fail1: 628 EFSYS_PROBE1(fail1, efx_rc_t, rc); 629 630 return (rc); 631 } 632 633 #endif /* EFSYS_OPT_RIVERHEAD */ 634