1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2016-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_debug.h> 11 #include <rte_cycles.h> 12 #include <rte_alarm.h> 13 #include <rte_branch_prediction.h> 14 15 #include "efx.h" 16 17 #include "sfc.h" 18 #include "sfc_debug.h" 19 #include "sfc_log.h" 20 #include "sfc_ev.h" 21 #include "sfc_rx.h" 22 #include "sfc_tx.h" 23 #include "sfc_kvargs.h" 24 25 26 /* Initial delay when waiting for event queue init complete event */ 27 #define SFC_EVQ_INIT_BACKOFF_START_US (1) 28 /* Maximum delay between event queue polling attempts */ 29 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000) 30 /* Event queue init approx timeout */ 31 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S) 32 33 /* Management event queue polling period in microseconds */ 34 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) 35 36 static const char * 37 sfc_evq_type2str(enum sfc_evq_type type) 38 { 39 switch (type) { 40 case SFC_EVQ_TYPE_MGMT: 41 return "mgmt-evq"; 42 case SFC_EVQ_TYPE_RX: 43 return "rx-evq"; 44 case SFC_EVQ_TYPE_TX: 45 return "tx-evq"; 46 default: 47 SFC_ASSERT(B_FALSE); 48 return NULL; 49 } 50 } 51 52 static boolean_t 53 sfc_ev_initialized(void *arg) 54 { 55 struct sfc_evq *evq = arg; 56 57 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ 58 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || 59 evq->init_state == SFC_EVQ_STARTED); 60 61 evq->init_state = SFC_EVQ_STARTED; 62 63 return B_FALSE; 64 } 65 66 static boolean_t 67 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id, 68 uint32_t size, uint16_t flags) 69 { 70 struct sfc_evq *evq = arg; 71 72 sfc_err(evq->sa, 73 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x", 74 evq->evq_index, label, id, size, flags); 75 return B_TRUE; 76 } 77 78 static boolean_t 79 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 80 uint32_t size, uint16_t flags) 81 { 82 struct sfc_evq *evq = arg; 83 struct sfc_efx_rxq *rxq; 84 unsigned int stop; 85 unsigned int pending_id; 86 unsigned int delta; 87 unsigned int i; 88 struct sfc_efx_rx_sw_desc *rxd; 89 90 if (unlikely(evq->exception)) 91 goto done; 92 93 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq); 94 95 SFC_ASSERT(rxq != NULL); 96 SFC_ASSERT(rxq->evq == evq); 97 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED); 98 99 stop = (id + 1) & rxq->ptr_mask; 100 pending_id = rxq->pending & rxq->ptr_mask; 101 delta = (stop >= pending_id) ? (stop - pending_id) : 102 (rxq->ptr_mask + 1 - pending_id + stop); 103 104 if (delta == 0) { 105 /* 106 * Rx event with no new descriptors done and zero length 107 * is used to abort scattered packet when there is no room 108 * for the tail. 109 */ 110 if (unlikely(size != 0)) { 111 evq->exception = B_TRUE; 112 sfc_err(evq->sa, 113 "EVQ %u RxQ %u invalid RX abort " 114 "(id=%#x size=%u flags=%#x); needs restart", 115 evq->evq_index, rxq->dp.dpq.queue_id, 116 id, size, flags); 117 goto done; 118 } 119 120 /* Add discard flag to the first fragment */ 121 rxq->sw_desc[pending_id].flags |= EFX_DISCARD; 122 /* Remove continue flag from the last fragment */ 123 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; 124 } else if (unlikely(delta > rxq->batch_max)) { 125 evq->exception = B_TRUE; 126 127 sfc_err(evq->sa, 128 "EVQ %u RxQ %u completion out of order " 129 "(id=%#x delta=%u flags=%#x); needs restart", 130 evq->evq_index, rxq->dp.dpq.queue_id, 131 id, delta, flags); 132 133 goto done; 134 } 135 136 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { 137 rxd = &rxq->sw_desc[i]; 138 139 rxd->flags = flags; 140 141 SFC_ASSERT(size < (1 << 16)); 142 rxd->size = (uint16_t)size; 143 } 144 145 rxq->pending += delta; 146 147 done: 148 return B_FALSE; 149 } 150 151 static boolean_t 152 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 153 __rte_unused uint32_t size, __rte_unused uint16_t flags) 154 { 155 struct sfc_evq *evq = arg; 156 struct sfc_dp_rxq *dp_rxq; 157 158 dp_rxq = evq->dp_rxq; 159 SFC_ASSERT(dp_rxq != NULL); 160 161 SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL); 162 return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id); 163 } 164 165 static boolean_t 166 sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id, 167 uint32_t pkt_count, uint16_t flags) 168 { 169 struct sfc_evq *evq = arg; 170 171 sfc_err(evq->sa, 172 "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x", 173 evq->evq_index, label, id, pkt_count, flags); 174 return B_TRUE; 175 } 176 177 /* It is not actually used on datapath, but required on RxQ flush */ 178 static boolean_t 179 sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id, 180 __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags) 181 { 182 struct sfc_evq *evq = arg; 183 struct sfc_dp_rxq *dp_rxq; 184 185 dp_rxq = evq->dp_rxq; 186 SFC_ASSERT(dp_rxq != NULL); 187 188 if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL) 189 return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id); 190 else 191 return B_FALSE; 192 } 193 194 static boolean_t 195 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) 196 { 197 struct sfc_evq *evq = arg; 198 199 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x", 200 evq->evq_index, label, id); 201 return B_TRUE; 202 } 203 204 static boolean_t 205 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 206 { 207 struct sfc_evq *evq = arg; 208 struct sfc_dp_txq *dp_txq; 209 struct sfc_efx_txq *txq; 210 unsigned int stop; 211 unsigned int delta; 212 213 dp_txq = evq->dp_txq; 214 SFC_ASSERT(dp_txq != NULL); 215 216 txq = sfc_efx_txq_by_dp_txq(dp_txq); 217 SFC_ASSERT(txq->evq == evq); 218 219 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0)) 220 goto done; 221 222 stop = (id + 1) & txq->ptr_mask; 223 id = txq->pending & txq->ptr_mask; 224 225 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); 226 227 txq->pending += delta; 228 229 done: 230 return B_FALSE; 231 } 232 233 static boolean_t 234 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 235 { 236 struct sfc_evq *evq = arg; 237 struct sfc_dp_txq *dp_txq; 238 239 dp_txq = evq->dp_txq; 240 SFC_ASSERT(dp_txq != NULL); 241 242 SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL); 243 return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id); 244 } 245 246 static boolean_t 247 sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) 248 { 249 struct sfc_evq *evq = arg; 250 251 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) 252 return B_FALSE; 253 254 evq->exception = B_TRUE; 255 sfc_warn(evq->sa, 256 "hardware exception %s (code=%u, data=%#x) on EVQ %u;" 257 " needs recovery", 258 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : 259 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : 260 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : 261 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : 262 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : 263 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : 264 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : 265 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : 266 "UNKNOWN", 267 code, data, evq->evq_index); 268 269 return B_TRUE; 270 } 271 272 static boolean_t 273 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index) 274 { 275 struct sfc_evq *evq = arg; 276 277 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done", 278 evq->evq_index, rxq_hw_index); 279 return B_TRUE; 280 } 281 282 static boolean_t 283 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index) 284 { 285 struct sfc_evq *evq = arg; 286 struct sfc_dp_rxq *dp_rxq; 287 struct sfc_rxq *rxq; 288 289 dp_rxq = evq->dp_rxq; 290 SFC_ASSERT(dp_rxq != NULL); 291 292 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 293 SFC_ASSERT(rxq != NULL); 294 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 295 SFC_ASSERT(rxq->evq == evq); 296 RTE_SET_USED(rxq); 297 298 sfc_rx_qflush_done(sfc_rxq_info_by_dp_rxq(dp_rxq)); 299 300 return B_FALSE; 301 } 302 303 static boolean_t 304 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index) 305 { 306 struct sfc_evq *evq = arg; 307 308 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed", 309 evq->evq_index, rxq_hw_index); 310 return B_TRUE; 311 } 312 313 static boolean_t 314 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) 315 { 316 struct sfc_evq *evq = arg; 317 struct sfc_dp_rxq *dp_rxq; 318 struct sfc_rxq *rxq; 319 320 dp_rxq = evq->dp_rxq; 321 SFC_ASSERT(dp_rxq != NULL); 322 323 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 324 SFC_ASSERT(rxq != NULL); 325 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 326 SFC_ASSERT(rxq->evq == evq); 327 RTE_SET_USED(rxq); 328 329 sfc_rx_qflush_failed(sfc_rxq_info_by_dp_rxq(dp_rxq)); 330 331 return B_FALSE; 332 } 333 334 static boolean_t 335 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index) 336 { 337 struct sfc_evq *evq = arg; 338 339 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done", 340 evq->evq_index, txq_hw_index); 341 return B_TRUE; 342 } 343 344 static boolean_t 345 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) 346 { 347 struct sfc_evq *evq = arg; 348 struct sfc_dp_txq *dp_txq; 349 struct sfc_txq *txq; 350 351 dp_txq = evq->dp_txq; 352 SFC_ASSERT(dp_txq != NULL); 353 354 txq = sfc_txq_by_dp_txq(dp_txq); 355 SFC_ASSERT(txq != NULL); 356 SFC_ASSERT(txq->hw_index == txq_hw_index); 357 SFC_ASSERT(txq->evq == evq); 358 RTE_SET_USED(txq); 359 360 sfc_tx_qflush_done(sfc_txq_info_by_dp_txq(dp_txq)); 361 362 return B_FALSE; 363 } 364 365 static boolean_t 366 sfc_ev_software(void *arg, uint16_t magic) 367 { 368 struct sfc_evq *evq = arg; 369 370 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x", 371 evq->evq_index, magic); 372 return B_TRUE; 373 } 374 375 static boolean_t 376 sfc_ev_sram(void *arg, uint32_t code) 377 { 378 struct sfc_evq *evq = arg; 379 380 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u", 381 evq->evq_index, code); 382 return B_TRUE; 383 } 384 385 static boolean_t 386 sfc_ev_wake_up(void *arg, uint32_t index) 387 { 388 struct sfc_evq *evq = arg; 389 390 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u", 391 evq->evq_index, index); 392 return B_TRUE; 393 } 394 395 static boolean_t 396 sfc_ev_timer(void *arg, uint32_t index) 397 { 398 struct sfc_evq *evq = arg; 399 400 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u", 401 evq->evq_index, index); 402 return B_TRUE; 403 } 404 405 static boolean_t 406 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode) 407 { 408 struct sfc_evq *evq = arg; 409 410 sfc_err(evq->sa, "EVQ %u unexpected link change event", 411 evq->evq_index); 412 return B_TRUE; 413 } 414 415 static boolean_t 416 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) 417 { 418 struct sfc_evq *evq = arg; 419 struct sfc_adapter *sa = evq->sa; 420 struct rte_eth_link new_link; 421 422 sfc_port_link_mode_to_info(link_mode, &new_link); 423 if (rte_eth_linkstatus_set(sa->eth_dev, &new_link) == 0) 424 evq->sa->port.lsc_seq++; 425 426 return B_FALSE; 427 } 428 429 static const efx_ev_callbacks_t sfc_ev_callbacks = { 430 .eec_initialized = sfc_ev_initialized, 431 .eec_rx = sfc_ev_nop_rx, 432 .eec_rx_ps = sfc_ev_nop_rx_ps, 433 .eec_tx = sfc_ev_nop_tx, 434 .eec_exception = sfc_ev_exception, 435 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 436 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 437 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 438 .eec_software = sfc_ev_software, 439 .eec_sram = sfc_ev_sram, 440 .eec_wake_up = sfc_ev_wake_up, 441 .eec_timer = sfc_ev_timer, 442 .eec_link_change = sfc_ev_link_change, 443 }; 444 445 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { 446 .eec_initialized = sfc_ev_initialized, 447 .eec_rx = sfc_ev_efx_rx, 448 .eec_rx_ps = sfc_ev_nop_rx_ps, 449 .eec_tx = sfc_ev_nop_tx, 450 .eec_exception = sfc_ev_exception, 451 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 452 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 453 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 454 .eec_software = sfc_ev_software, 455 .eec_sram = sfc_ev_sram, 456 .eec_wake_up = sfc_ev_wake_up, 457 .eec_timer = sfc_ev_timer, 458 .eec_link_change = sfc_ev_nop_link_change, 459 }; 460 461 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { 462 .eec_initialized = sfc_ev_initialized, 463 .eec_rx = sfc_ev_dp_rx, 464 .eec_rx_ps = sfc_ev_dp_rx_ps, 465 .eec_tx = sfc_ev_nop_tx, 466 .eec_exception = sfc_ev_exception, 467 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 468 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 469 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 470 .eec_software = sfc_ev_software, 471 .eec_sram = sfc_ev_sram, 472 .eec_wake_up = sfc_ev_wake_up, 473 .eec_timer = sfc_ev_timer, 474 .eec_link_change = sfc_ev_nop_link_change, 475 }; 476 477 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { 478 .eec_initialized = sfc_ev_initialized, 479 .eec_rx = sfc_ev_nop_rx, 480 .eec_rx_ps = sfc_ev_nop_rx_ps, 481 .eec_tx = sfc_ev_tx, 482 .eec_exception = sfc_ev_exception, 483 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 484 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 485 .eec_txq_flush_done = sfc_ev_txq_flush_done, 486 .eec_software = sfc_ev_software, 487 .eec_sram = sfc_ev_sram, 488 .eec_wake_up = sfc_ev_wake_up, 489 .eec_timer = sfc_ev_timer, 490 .eec_link_change = sfc_ev_nop_link_change, 491 }; 492 493 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { 494 .eec_initialized = sfc_ev_initialized, 495 .eec_rx = sfc_ev_nop_rx, 496 .eec_rx_ps = sfc_ev_nop_rx_ps, 497 .eec_tx = sfc_ev_dp_tx, 498 .eec_exception = sfc_ev_exception, 499 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 500 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 501 .eec_txq_flush_done = sfc_ev_txq_flush_done, 502 .eec_software = sfc_ev_software, 503 .eec_sram = sfc_ev_sram, 504 .eec_wake_up = sfc_ev_wake_up, 505 .eec_timer = sfc_ev_timer, 506 .eec_link_change = sfc_ev_nop_link_change, 507 }; 508 509 510 void 511 sfc_ev_qpoll(struct sfc_evq *evq) 512 { 513 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || 514 evq->init_state == SFC_EVQ_STARTING); 515 516 /* Synchronize the DMA memory for reading not required */ 517 518 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); 519 520 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { 521 struct sfc_adapter *sa = evq->sa; 522 int rc; 523 524 if (evq->dp_rxq != NULL) { 525 unsigned int rxq_sw_index; 526 527 rxq_sw_index = evq->dp_rxq->dpq.queue_id; 528 529 sfc_warn(sa, 530 "restart RxQ %u because of exception on its EvQ %u", 531 rxq_sw_index, evq->evq_index); 532 533 sfc_rx_qstop(sa, rxq_sw_index); 534 rc = sfc_rx_qstart(sa, rxq_sw_index); 535 if (rc != 0) 536 sfc_err(sa, "cannot restart RxQ %u", 537 rxq_sw_index); 538 } 539 540 if (evq->dp_txq != NULL) { 541 unsigned int txq_sw_index; 542 543 txq_sw_index = evq->dp_txq->dpq.queue_id; 544 545 sfc_warn(sa, 546 "restart TxQ %u because of exception on its EvQ %u", 547 txq_sw_index, evq->evq_index); 548 549 sfc_tx_qstop(sa, txq_sw_index); 550 rc = sfc_tx_qstart(sa, txq_sw_index); 551 if (rc != 0) 552 sfc_err(sa, "cannot restart TxQ %u", 553 txq_sw_index); 554 } 555 556 if (evq->exception) 557 sfc_panic(sa, "unrecoverable exception on EvQ %u", 558 evq->evq_index); 559 560 sfc_adapter_unlock(sa); 561 } 562 563 /* Poll-mode driver does not re-prime the event queue for interrupts */ 564 } 565 566 void 567 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) 568 { 569 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { 570 if (sa->mgmt_evq_running) 571 sfc_ev_qpoll(sa->mgmt_evq); 572 573 rte_spinlock_unlock(&sa->mgmt_evq_lock); 574 } 575 } 576 577 int 578 sfc_ev_qprime(struct sfc_evq *evq) 579 { 580 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); 581 return efx_ev_qprime(evq->common, evq->read_ptr); 582 } 583 584 /* Event queue HW index allocation scheme is described in sfc_ev.h. */ 585 int 586 sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) 587 { 588 struct sfc_adapter *sa = evq->sa; 589 efsys_mem_t *esmp; 590 uint32_t evq_flags = sa->evq_flags; 591 unsigned int total_delay_us; 592 unsigned int delay_us; 593 int rc; 594 595 sfc_log_init(sa, "hw_index=%u", hw_index); 596 597 esmp = &evq->mem; 598 599 evq->evq_index = hw_index; 600 601 /* Clear all events */ 602 (void)memset((void *)esmp->esm_base, 0xff, 603 efx_evq_size(sa->nic, evq->entries, evq_flags)); 604 605 if ((sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) || 606 (sa->intr.rxq_intr && evq->dp_rxq != NULL)) 607 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 608 else 609 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; 610 611 evq->init_state = SFC_EVQ_STARTING; 612 613 /* Create the common code event queue */ 614 rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, 615 0 /* unused on EF10 */, 0, evq_flags, 616 &evq->common); 617 if (rc != 0) 618 goto fail_ev_qcreate; 619 620 SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); 621 if (evq->dp_rxq != 0) { 622 if (strcmp(sa->priv.dp_rx->dp.name, 623 SFC_KVARG_DATAPATH_EFX) == 0) 624 evq->callbacks = &sfc_ev_callbacks_efx_rx; 625 else 626 evq->callbacks = &sfc_ev_callbacks_dp_rx; 627 } else if (evq->dp_txq != 0) { 628 if (strcmp(sa->priv.dp_tx->dp.name, 629 SFC_KVARG_DATAPATH_EFX) == 0) 630 evq->callbacks = &sfc_ev_callbacks_efx_tx; 631 else 632 evq->callbacks = &sfc_ev_callbacks_dp_tx; 633 } else { 634 evq->callbacks = &sfc_ev_callbacks; 635 } 636 637 /* 638 * Poll once to ensure that eec_initialized callback is invoked in 639 * case if the hardware does not support INIT_DONE events. If the 640 * hardware supports INIT_DONE events, this will do nothing, and the 641 * corresponding event will be processed by sfc_ev_qpoll() below. 642 */ 643 efx_ev_qcreate_check_init_done(evq->common, evq->callbacks, evq); 644 645 /* Wait for the initialization event */ 646 total_delay_us = 0; 647 delay_us = SFC_EVQ_INIT_BACKOFF_START_US; 648 do { 649 (void)sfc_ev_qpoll(evq); 650 651 /* Check to see if the initialization complete indication 652 * posted by the hardware. 653 */ 654 if (evq->init_state == SFC_EVQ_STARTED) 655 goto done; 656 657 /* Give event queue some time to init */ 658 rte_delay_us(delay_us); 659 660 total_delay_us += delay_us; 661 662 /* Exponential backoff */ 663 delay_us *= 2; 664 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) 665 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; 666 667 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); 668 669 rc = ETIMEDOUT; 670 goto fail_timedout; 671 672 done: 673 return 0; 674 675 fail_timedout: 676 efx_ev_qdestroy(evq->common); 677 678 fail_ev_qcreate: 679 evq->init_state = SFC_EVQ_INITIALIZED; 680 sfc_log_init(sa, "failed %d", rc); 681 return rc; 682 } 683 684 void 685 sfc_ev_qstop(struct sfc_evq *evq) 686 { 687 if (evq == NULL) 688 return; 689 690 sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); 691 692 if (evq->init_state != SFC_EVQ_STARTED) 693 return; 694 695 evq->init_state = SFC_EVQ_INITIALIZED; 696 evq->callbacks = NULL; 697 evq->read_ptr = 0; 698 evq->exception = B_FALSE; 699 700 efx_ev_qdestroy(evq->common); 701 702 evq->evq_index = 0; 703 } 704 705 static void 706 sfc_ev_mgmt_periodic_qpoll(void *arg) 707 { 708 struct sfc_adapter *sa = arg; 709 int rc; 710 711 sfc_ev_mgmt_qpoll(sa); 712 713 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, 714 sfc_ev_mgmt_periodic_qpoll, sa); 715 if (rc == -ENOTSUP) { 716 sfc_warn(sa, "alarms are not supported"); 717 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update"); 718 } else if (rc != 0) { 719 sfc_err(sa, 720 "cannot rearm management EVQ polling alarm (rc=%d)", 721 rc); 722 } 723 } 724 725 static void 726 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa) 727 { 728 sfc_ev_mgmt_periodic_qpoll(sa); 729 } 730 731 static void 732 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa) 733 { 734 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa); 735 } 736 737 int 738 sfc_ev_start(struct sfc_adapter *sa) 739 { 740 int rc; 741 742 sfc_log_init(sa, "entry"); 743 744 rc = efx_ev_init(sa->nic); 745 if (rc != 0) 746 goto fail_ev_init; 747 748 /* Start management EVQ used for global events */ 749 750 /* 751 * Management event queue start polls the queue, but it cannot 752 * interfere with other polling contexts since mgmt_evq_running 753 * is false yet. 754 */ 755 rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); 756 if (rc != 0) 757 goto fail_mgmt_evq_start; 758 759 rte_spinlock_lock(&sa->mgmt_evq_lock); 760 sa->mgmt_evq_running = true; 761 rte_spinlock_unlock(&sa->mgmt_evq_lock); 762 763 if (sa->intr.lsc_intr) { 764 rc = sfc_ev_qprime(sa->mgmt_evq); 765 if (rc != 0) 766 goto fail_mgmt_evq_prime; 767 } 768 769 /* 770 * Start management EVQ polling. If interrupts are disabled 771 * (not used), it is required to process link status change 772 * and other device level events to avoid unrecoverable 773 * error because the event queue overflow. 774 */ 775 sfc_ev_mgmt_periodic_qpoll_start(sa); 776 777 /* 778 * Rx/Tx event queues are started/stopped when corresponding 779 * Rx/Tx queue is started/stopped. 780 */ 781 782 return 0; 783 784 fail_mgmt_evq_prime: 785 sfc_ev_qstop(sa->mgmt_evq); 786 787 fail_mgmt_evq_start: 788 efx_ev_fini(sa->nic); 789 790 fail_ev_init: 791 sfc_log_init(sa, "failed %d", rc); 792 return rc; 793 } 794 795 void 796 sfc_ev_stop(struct sfc_adapter *sa) 797 { 798 sfc_log_init(sa, "entry"); 799 800 sfc_ev_mgmt_periodic_qpoll_stop(sa); 801 802 rte_spinlock_lock(&sa->mgmt_evq_lock); 803 sa->mgmt_evq_running = false; 804 rte_spinlock_unlock(&sa->mgmt_evq_lock); 805 806 sfc_ev_qstop(sa->mgmt_evq); 807 808 efx_ev_fini(sa->nic); 809 } 810 811 int 812 sfc_ev_qinit(struct sfc_adapter *sa, 813 enum sfc_evq_type type, unsigned int type_index, 814 unsigned int entries, int socket_id, struct sfc_evq **evqp) 815 { 816 struct sfc_evq *evq; 817 int rc; 818 819 sfc_log_init(sa, "type=%s type_index=%u", 820 sfc_evq_type2str(type), type_index); 821 822 SFC_ASSERT(rte_is_power_of_2(entries)); 823 824 rc = ENOMEM; 825 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, 826 socket_id); 827 if (evq == NULL) 828 goto fail_evq_alloc; 829 830 evq->sa = sa; 831 evq->type = type; 832 evq->entries = entries; 833 834 /* Allocate DMA space */ 835 rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, 836 efx_evq_size(sa->nic, evq->entries, sa->evq_flags), 837 socket_id, &evq->mem); 838 if (rc != 0) 839 goto fail_dma_alloc; 840 841 evq->init_state = SFC_EVQ_INITIALIZED; 842 843 sa->evq_count++; 844 845 *evqp = evq; 846 847 return 0; 848 849 fail_dma_alloc: 850 rte_free(evq); 851 852 fail_evq_alloc: 853 854 sfc_log_init(sa, "failed %d", rc); 855 return rc; 856 } 857 858 void 859 sfc_ev_qfini(struct sfc_evq *evq) 860 { 861 struct sfc_adapter *sa = evq->sa; 862 863 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); 864 865 sfc_dma_free(sa, &evq->mem); 866 867 rte_free(evq); 868 869 SFC_ASSERT(sa->evq_count > 0); 870 sa->evq_count--; 871 } 872 873 static int 874 sfc_kvarg_perf_profile_handler(__rte_unused const char *key, 875 const char *value_str, void *opaque) 876 { 877 uint32_t *value = opaque; 878 879 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) 880 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 881 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0) 882 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY; 883 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0) 884 *value = EFX_EVQ_FLAGS_TYPE_AUTO; 885 else 886 return -EINVAL; 887 888 return 0; 889 } 890 891 int 892 sfc_ev_attach(struct sfc_adapter *sa) 893 { 894 int rc; 895 896 sfc_log_init(sa, "entry"); 897 898 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 899 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, 900 sfc_kvarg_perf_profile_handler, 901 &sa->evq_flags); 902 if (rc != 0) { 903 sfc_err(sa, "invalid %s parameter value", 904 SFC_KVARG_PERF_PROFILE); 905 goto fail_kvarg_perf_profile; 906 } 907 908 sa->mgmt_evq_index = 0; 909 rte_spinlock_init(&sa->mgmt_evq_lock); 910 911 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries, 912 sa->socket_id, &sa->mgmt_evq); 913 if (rc != 0) 914 goto fail_mgmt_evq_init; 915 916 /* 917 * Rx/Tx event queues are created/destroyed when corresponding 918 * Rx/Tx queue is created/destroyed. 919 */ 920 921 return 0; 922 923 fail_mgmt_evq_init: 924 925 fail_kvarg_perf_profile: 926 sfc_log_init(sa, "failed %d", rc); 927 return rc; 928 } 929 930 void 931 sfc_ev_detach(struct sfc_adapter *sa) 932 { 933 sfc_log_init(sa, "entry"); 934 935 sfc_ev_qfini(sa->mgmt_evq); 936 937 if (sa->evq_count != 0) 938 sfc_err(sa, "%u EvQs are not destroyed before detach", 939 sa->evq_count); 940 } 941