1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016-2018 Solarflare Communications Inc. 4 * All rights reserved. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_debug.h> 11 #include <rte_cycles.h> 12 #include <rte_alarm.h> 13 #include <rte_branch_prediction.h> 14 15 #include "efx.h" 16 17 #include "sfc.h" 18 #include "sfc_debug.h" 19 #include "sfc_log.h" 20 #include "sfc_ev.h" 21 #include "sfc_rx.h" 22 #include "sfc_tx.h" 23 #include "sfc_kvargs.h" 24 25 26 /* Initial delay when waiting for event queue init complete event */ 27 #define SFC_EVQ_INIT_BACKOFF_START_US (1) 28 /* Maximum delay between event queue polling attempts */ 29 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000) 30 /* Event queue init approx timeout */ 31 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S) 32 33 /* Management event queue polling period in microseconds */ 34 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) 35 36 static const char * 37 sfc_evq_type2str(enum sfc_evq_type type) 38 { 39 switch (type) { 40 case SFC_EVQ_TYPE_MGMT: 41 return "mgmt-evq"; 42 case SFC_EVQ_TYPE_RX: 43 return "rx-evq"; 44 case SFC_EVQ_TYPE_TX: 45 return "tx-evq"; 46 default: 47 SFC_ASSERT(B_FALSE); 48 return NULL; 49 } 50 } 51 52 static boolean_t 53 sfc_ev_initialized(void *arg) 54 { 55 struct sfc_evq *evq = arg; 56 57 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ 58 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || 59 evq->init_state == SFC_EVQ_STARTED); 60 61 evq->init_state = SFC_EVQ_STARTED; 62 63 return B_FALSE; 64 } 65 66 static boolean_t 67 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id, 68 uint32_t size, uint16_t flags) 69 { 70 struct sfc_evq *evq = arg; 71 72 sfc_err(evq->sa, 73 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x", 74 evq->evq_index, label, id, size, flags); 75 return B_TRUE; 76 } 77 78 static boolean_t 79 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 80 uint32_t size, uint16_t flags) 81 { 82 struct sfc_evq *evq = arg; 83 struct sfc_efx_rxq *rxq; 84 unsigned int stop; 85 unsigned int pending_id; 86 unsigned int delta; 87 unsigned int i; 88 struct sfc_efx_rx_sw_desc *rxd; 89 90 if (unlikely(evq->exception)) 91 goto done; 92 93 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq); 94 95 SFC_ASSERT(rxq != NULL); 96 SFC_ASSERT(rxq->evq == evq); 97 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED); 98 99 stop = (id + 1) & rxq->ptr_mask; 100 pending_id = rxq->pending & rxq->ptr_mask; 101 delta = (stop >= pending_id) ? (stop - pending_id) : 102 (rxq->ptr_mask + 1 - pending_id + stop); 103 104 if (delta == 0) { 105 /* 106 * Rx event with no new descriptors done and zero length 107 * is used to abort scattered packet when there is no room 108 * for the tail. 109 */ 110 if (unlikely(size != 0)) { 111 evq->exception = B_TRUE; 112 sfc_err(evq->sa, 113 "EVQ %u RxQ %u invalid RX abort " 114 "(id=%#x size=%u flags=%#x); needs restart", 115 evq->evq_index, rxq->dp.dpq.queue_id, 116 id, size, flags); 117 goto done; 118 } 119 120 /* Add discard flag to the first fragment */ 121 rxq->sw_desc[pending_id].flags |= EFX_DISCARD; 122 /* Remove continue flag from the last fragment */ 123 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; 124 } else if (unlikely(delta > rxq->batch_max)) { 125 evq->exception = B_TRUE; 126 127 sfc_err(evq->sa, 128 "EVQ %u RxQ %u completion out of order " 129 "(id=%#x delta=%u flags=%#x); needs restart", 130 evq->evq_index, rxq->dp.dpq.queue_id, 131 id, delta, flags); 132 133 goto done; 134 } 135 136 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { 137 rxd = &rxq->sw_desc[i]; 138 139 rxd->flags = flags; 140 141 SFC_ASSERT(size < (1 << 16)); 142 rxd->size = (uint16_t)size; 143 } 144 145 rxq->pending += delta; 146 147 done: 148 return B_FALSE; 149 } 150 151 static boolean_t 152 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 153 __rte_unused uint32_t size, __rte_unused uint16_t flags) 154 { 155 struct sfc_evq *evq = arg; 156 struct sfc_dp_rxq *dp_rxq; 157 158 dp_rxq = evq->dp_rxq; 159 SFC_ASSERT(dp_rxq != NULL); 160 161 SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL); 162 return evq->sa->dp_rx->qrx_ev(dp_rxq, id); 163 } 164 165 static boolean_t 166 sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id, 167 uint32_t pkt_count, uint16_t flags) 168 { 169 struct sfc_evq *evq = arg; 170 171 sfc_err(evq->sa, 172 "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x", 173 evq->evq_index, label, id, pkt_count, flags); 174 return B_TRUE; 175 } 176 177 /* It is not actually used on datapath, but required on RxQ flush */ 178 static boolean_t 179 sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id, 180 __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags) 181 { 182 struct sfc_evq *evq = arg; 183 struct sfc_dp_rxq *dp_rxq; 184 185 dp_rxq = evq->dp_rxq; 186 SFC_ASSERT(dp_rxq != NULL); 187 188 if (evq->sa->dp_rx->qrx_ps_ev != NULL) 189 return evq->sa->dp_rx->qrx_ps_ev(dp_rxq, id); 190 else 191 return B_FALSE; 192 } 193 194 static boolean_t 195 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) 196 { 197 struct sfc_evq *evq = arg; 198 199 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x", 200 evq->evq_index, label, id); 201 return B_TRUE; 202 } 203 204 static boolean_t 205 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 206 { 207 struct sfc_evq *evq = arg; 208 struct sfc_dp_txq *dp_txq; 209 struct sfc_efx_txq *txq; 210 unsigned int stop; 211 unsigned int delta; 212 213 dp_txq = evq->dp_txq; 214 SFC_ASSERT(dp_txq != NULL); 215 216 txq = sfc_efx_txq_by_dp_txq(dp_txq); 217 SFC_ASSERT(txq->evq == evq); 218 219 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0)) 220 goto done; 221 222 stop = (id + 1) & txq->ptr_mask; 223 id = txq->pending & txq->ptr_mask; 224 225 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); 226 227 txq->pending += delta; 228 229 done: 230 return B_FALSE; 231 } 232 233 static boolean_t 234 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 235 { 236 struct sfc_evq *evq = arg; 237 struct sfc_dp_txq *dp_txq; 238 239 dp_txq = evq->dp_txq; 240 SFC_ASSERT(dp_txq != NULL); 241 242 SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL); 243 return evq->sa->dp_tx->qtx_ev(dp_txq, id); 244 } 245 246 static boolean_t 247 sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) 248 { 249 struct sfc_evq *evq = arg; 250 251 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) 252 return B_FALSE; 253 254 evq->exception = B_TRUE; 255 sfc_warn(evq->sa, 256 "hardware exception %s (code=%u, data=%#x) on EVQ %u;" 257 " needs recovery", 258 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : 259 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : 260 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : 261 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : 262 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : 263 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : 264 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : 265 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : 266 "UNKNOWN", 267 code, data, evq->evq_index); 268 269 return B_TRUE; 270 } 271 272 static boolean_t 273 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index) 274 { 275 struct sfc_evq *evq = arg; 276 277 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done", 278 evq->evq_index, rxq_hw_index); 279 return B_TRUE; 280 } 281 282 static boolean_t 283 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index) 284 { 285 struct sfc_evq *evq = arg; 286 struct sfc_dp_rxq *dp_rxq; 287 struct sfc_rxq *rxq; 288 289 dp_rxq = evq->dp_rxq; 290 SFC_ASSERT(dp_rxq != NULL); 291 292 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 293 SFC_ASSERT(rxq != NULL); 294 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 295 SFC_ASSERT(rxq->evq == evq); 296 sfc_rx_qflush_done(rxq); 297 298 return B_FALSE; 299 } 300 301 static boolean_t 302 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index) 303 { 304 struct sfc_evq *evq = arg; 305 306 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed", 307 evq->evq_index, rxq_hw_index); 308 return B_TRUE; 309 } 310 311 static boolean_t 312 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) 313 { 314 struct sfc_evq *evq = arg; 315 struct sfc_dp_rxq *dp_rxq; 316 struct sfc_rxq *rxq; 317 318 dp_rxq = evq->dp_rxq; 319 SFC_ASSERT(dp_rxq != NULL); 320 321 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 322 SFC_ASSERT(rxq != NULL); 323 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 324 SFC_ASSERT(rxq->evq == evq); 325 sfc_rx_qflush_failed(rxq); 326 327 return B_FALSE; 328 } 329 330 static boolean_t 331 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index) 332 { 333 struct sfc_evq *evq = arg; 334 335 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done", 336 evq->evq_index, txq_hw_index); 337 return B_TRUE; 338 } 339 340 static boolean_t 341 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) 342 { 343 struct sfc_evq *evq = arg; 344 struct sfc_dp_txq *dp_txq; 345 struct sfc_txq *txq; 346 347 dp_txq = evq->dp_txq; 348 SFC_ASSERT(dp_txq != NULL); 349 350 txq = sfc_txq_by_dp_txq(dp_txq); 351 SFC_ASSERT(txq != NULL); 352 SFC_ASSERT(txq->hw_index == txq_hw_index); 353 SFC_ASSERT(txq->evq == evq); 354 sfc_tx_qflush_done(txq); 355 356 return B_FALSE; 357 } 358 359 static boolean_t 360 sfc_ev_software(void *arg, uint16_t magic) 361 { 362 struct sfc_evq *evq = arg; 363 364 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x", 365 evq->evq_index, magic); 366 return B_TRUE; 367 } 368 369 static boolean_t 370 sfc_ev_sram(void *arg, uint32_t code) 371 { 372 struct sfc_evq *evq = arg; 373 374 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u", 375 evq->evq_index, code); 376 return B_TRUE; 377 } 378 379 static boolean_t 380 sfc_ev_wake_up(void *arg, uint32_t index) 381 { 382 struct sfc_evq *evq = arg; 383 384 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u", 385 evq->evq_index, index); 386 return B_TRUE; 387 } 388 389 static boolean_t 390 sfc_ev_timer(void *arg, uint32_t index) 391 { 392 struct sfc_evq *evq = arg; 393 394 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u", 395 evq->evq_index, index); 396 return B_TRUE; 397 } 398 399 static boolean_t 400 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode) 401 { 402 struct sfc_evq *evq = arg; 403 404 sfc_err(evq->sa, "EVQ %u unexpected link change event", 405 evq->evq_index); 406 return B_TRUE; 407 } 408 409 static boolean_t 410 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) 411 { 412 struct sfc_evq *evq = arg; 413 struct sfc_adapter *sa = evq->sa; 414 struct rte_eth_link new_link; 415 416 sfc_port_link_mode_to_info(link_mode, &new_link); 417 if (rte_eth_linkstatus_set(sa->eth_dev, &new_link)) 418 evq->sa->port.lsc_seq++; 419 420 return B_FALSE; 421 } 422 423 static const efx_ev_callbacks_t sfc_ev_callbacks = { 424 .eec_initialized = sfc_ev_initialized, 425 .eec_rx = sfc_ev_nop_rx, 426 .eec_rx_ps = sfc_ev_nop_rx_ps, 427 .eec_tx = sfc_ev_nop_tx, 428 .eec_exception = sfc_ev_exception, 429 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 430 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 431 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 432 .eec_software = sfc_ev_software, 433 .eec_sram = sfc_ev_sram, 434 .eec_wake_up = sfc_ev_wake_up, 435 .eec_timer = sfc_ev_timer, 436 .eec_link_change = sfc_ev_link_change, 437 }; 438 439 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { 440 .eec_initialized = sfc_ev_initialized, 441 .eec_rx = sfc_ev_efx_rx, 442 .eec_rx_ps = sfc_ev_nop_rx_ps, 443 .eec_tx = sfc_ev_nop_tx, 444 .eec_exception = sfc_ev_exception, 445 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 446 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 447 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 448 .eec_software = sfc_ev_software, 449 .eec_sram = sfc_ev_sram, 450 .eec_wake_up = sfc_ev_wake_up, 451 .eec_timer = sfc_ev_timer, 452 .eec_link_change = sfc_ev_nop_link_change, 453 }; 454 455 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { 456 .eec_initialized = sfc_ev_initialized, 457 .eec_rx = sfc_ev_dp_rx, 458 .eec_rx_ps = sfc_ev_dp_rx_ps, 459 .eec_tx = sfc_ev_nop_tx, 460 .eec_exception = sfc_ev_exception, 461 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 462 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 463 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 464 .eec_software = sfc_ev_software, 465 .eec_sram = sfc_ev_sram, 466 .eec_wake_up = sfc_ev_wake_up, 467 .eec_timer = sfc_ev_timer, 468 .eec_link_change = sfc_ev_nop_link_change, 469 }; 470 471 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { 472 .eec_initialized = sfc_ev_initialized, 473 .eec_rx = sfc_ev_nop_rx, 474 .eec_rx_ps = sfc_ev_nop_rx_ps, 475 .eec_tx = sfc_ev_tx, 476 .eec_exception = sfc_ev_exception, 477 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 478 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 479 .eec_txq_flush_done = sfc_ev_txq_flush_done, 480 .eec_software = sfc_ev_software, 481 .eec_sram = sfc_ev_sram, 482 .eec_wake_up = sfc_ev_wake_up, 483 .eec_timer = sfc_ev_timer, 484 .eec_link_change = sfc_ev_nop_link_change, 485 }; 486 487 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { 488 .eec_initialized = sfc_ev_initialized, 489 .eec_rx = sfc_ev_nop_rx, 490 .eec_rx_ps = sfc_ev_nop_rx_ps, 491 .eec_tx = sfc_ev_dp_tx, 492 .eec_exception = sfc_ev_exception, 493 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 494 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 495 .eec_txq_flush_done = sfc_ev_txq_flush_done, 496 .eec_software = sfc_ev_software, 497 .eec_sram = sfc_ev_sram, 498 .eec_wake_up = sfc_ev_wake_up, 499 .eec_timer = sfc_ev_timer, 500 .eec_link_change = sfc_ev_nop_link_change, 501 }; 502 503 504 void 505 sfc_ev_qpoll(struct sfc_evq *evq) 506 { 507 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || 508 evq->init_state == SFC_EVQ_STARTING); 509 510 /* Synchronize the DMA memory for reading not required */ 511 512 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); 513 514 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { 515 struct sfc_adapter *sa = evq->sa; 516 int rc; 517 518 if (evq->dp_rxq != NULL) { 519 unsigned int rxq_sw_index; 520 521 rxq_sw_index = evq->dp_rxq->dpq.queue_id; 522 523 sfc_warn(sa, 524 "restart RxQ %u because of exception on its EvQ %u", 525 rxq_sw_index, evq->evq_index); 526 527 sfc_rx_qstop(sa, rxq_sw_index); 528 rc = sfc_rx_qstart(sa, rxq_sw_index); 529 if (rc != 0) 530 sfc_err(sa, "cannot restart RxQ %u", 531 rxq_sw_index); 532 } 533 534 if (evq->dp_txq != NULL) { 535 unsigned int txq_sw_index; 536 537 txq_sw_index = evq->dp_txq->dpq.queue_id; 538 539 sfc_warn(sa, 540 "restart TxQ %u because of exception on its EvQ %u", 541 txq_sw_index, evq->evq_index); 542 543 sfc_tx_qstop(sa, txq_sw_index); 544 rc = sfc_tx_qstart(sa, txq_sw_index); 545 if (rc != 0) 546 sfc_err(sa, "cannot restart TxQ %u", 547 txq_sw_index); 548 } 549 550 if (evq->exception) 551 sfc_panic(sa, "unrecoverable exception on EvQ %u", 552 evq->evq_index); 553 554 sfc_adapter_unlock(sa); 555 } 556 557 /* Poll-mode driver does not re-prime the event queue for interrupts */ 558 } 559 560 void 561 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) 562 { 563 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { 564 if (sa->mgmt_evq_running) 565 sfc_ev_qpoll(sa->mgmt_evq); 566 567 rte_spinlock_unlock(&sa->mgmt_evq_lock); 568 } 569 } 570 571 int 572 sfc_ev_qprime(struct sfc_evq *evq) 573 { 574 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); 575 return efx_ev_qprime(evq->common, evq->read_ptr); 576 } 577 578 /* Event queue HW index allocation scheme is described in sfc_ev.h. */ 579 int 580 sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) 581 { 582 struct sfc_adapter *sa = evq->sa; 583 efsys_mem_t *esmp; 584 uint32_t evq_flags = sa->evq_flags; 585 unsigned int total_delay_us; 586 unsigned int delay_us; 587 int rc; 588 589 sfc_log_init(sa, "hw_index=%u", hw_index); 590 591 esmp = &evq->mem; 592 593 evq->evq_index = hw_index; 594 595 /* Clear all events */ 596 (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); 597 598 if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) 599 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 600 else 601 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; 602 603 /* Create the common code event queue */ 604 rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, 605 0 /* unused on EF10 */, 0, evq_flags, 606 &evq->common); 607 if (rc != 0) 608 goto fail_ev_qcreate; 609 610 SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); 611 if (evq->dp_rxq != 0) { 612 if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) 613 evq->callbacks = &sfc_ev_callbacks_efx_rx; 614 else 615 evq->callbacks = &sfc_ev_callbacks_dp_rx; 616 } else if (evq->dp_txq != 0) { 617 if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) 618 evq->callbacks = &sfc_ev_callbacks_efx_tx; 619 else 620 evq->callbacks = &sfc_ev_callbacks_dp_tx; 621 } else { 622 evq->callbacks = &sfc_ev_callbacks; 623 } 624 625 evq->init_state = SFC_EVQ_STARTING; 626 627 /* Wait for the initialization event */ 628 total_delay_us = 0; 629 delay_us = SFC_EVQ_INIT_BACKOFF_START_US; 630 do { 631 (void)sfc_ev_qpoll(evq); 632 633 /* Check to see if the initialization complete indication 634 * posted by the hardware. 635 */ 636 if (evq->init_state == SFC_EVQ_STARTED) 637 goto done; 638 639 /* Give event queue some time to init */ 640 rte_delay_us(delay_us); 641 642 total_delay_us += delay_us; 643 644 /* Exponential backoff */ 645 delay_us *= 2; 646 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) 647 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; 648 649 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); 650 651 rc = ETIMEDOUT; 652 goto fail_timedout; 653 654 done: 655 return 0; 656 657 fail_timedout: 658 evq->init_state = SFC_EVQ_INITIALIZED; 659 efx_ev_qdestroy(evq->common); 660 661 fail_ev_qcreate: 662 sfc_log_init(sa, "failed %d", rc); 663 return rc; 664 } 665 666 void 667 sfc_ev_qstop(struct sfc_evq *evq) 668 { 669 if (evq == NULL) 670 return; 671 672 sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); 673 674 if (evq->init_state != SFC_EVQ_STARTED) 675 return; 676 677 evq->init_state = SFC_EVQ_INITIALIZED; 678 evq->callbacks = NULL; 679 evq->read_ptr = 0; 680 evq->exception = B_FALSE; 681 682 efx_ev_qdestroy(evq->common); 683 684 evq->evq_index = 0; 685 } 686 687 static void 688 sfc_ev_mgmt_periodic_qpoll(void *arg) 689 { 690 struct sfc_adapter *sa = arg; 691 int rc; 692 693 sfc_ev_mgmt_qpoll(sa); 694 695 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, 696 sfc_ev_mgmt_periodic_qpoll, sa); 697 if (rc == -ENOTSUP) { 698 sfc_warn(sa, "alarms are not supported"); 699 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update"); 700 } else if (rc != 0) { 701 sfc_err(sa, 702 "cannot rearm management EVQ polling alarm (rc=%d)", 703 rc); 704 } 705 } 706 707 static void 708 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa) 709 { 710 sfc_ev_mgmt_periodic_qpoll(sa); 711 } 712 713 static void 714 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa) 715 { 716 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa); 717 } 718 719 int 720 sfc_ev_start(struct sfc_adapter *sa) 721 { 722 int rc; 723 724 sfc_log_init(sa, "entry"); 725 726 rc = efx_ev_init(sa->nic); 727 if (rc != 0) 728 goto fail_ev_init; 729 730 /* Start management EVQ used for global events */ 731 732 /* 733 * Management event queue start polls the queue, but it cannot 734 * interfere with other polling contexts since mgmt_evq_running 735 * is false yet. 736 */ 737 rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); 738 if (rc != 0) 739 goto fail_mgmt_evq_start; 740 741 rte_spinlock_lock(&sa->mgmt_evq_lock); 742 sa->mgmt_evq_running = true; 743 rte_spinlock_unlock(&sa->mgmt_evq_lock); 744 745 if (sa->intr.lsc_intr) { 746 rc = sfc_ev_qprime(sa->mgmt_evq); 747 if (rc != 0) 748 goto fail_mgmt_evq_prime; 749 } 750 751 /* 752 * Start management EVQ polling. If interrupts are disabled 753 * (not used), it is required to process link status change 754 * and other device level events to avoid unrecoverable 755 * error because the event queue overflow. 756 */ 757 sfc_ev_mgmt_periodic_qpoll_start(sa); 758 759 /* 760 * Rx/Tx event queues are started/stopped when corresponding 761 * Rx/Tx queue is started/stopped. 762 */ 763 764 return 0; 765 766 fail_mgmt_evq_prime: 767 sfc_ev_qstop(sa->mgmt_evq); 768 769 fail_mgmt_evq_start: 770 efx_ev_fini(sa->nic); 771 772 fail_ev_init: 773 sfc_log_init(sa, "failed %d", rc); 774 return rc; 775 } 776 777 void 778 sfc_ev_stop(struct sfc_adapter *sa) 779 { 780 sfc_log_init(sa, "entry"); 781 782 sfc_ev_mgmt_periodic_qpoll_stop(sa); 783 784 rte_spinlock_lock(&sa->mgmt_evq_lock); 785 sa->mgmt_evq_running = false; 786 rte_spinlock_unlock(&sa->mgmt_evq_lock); 787 788 sfc_ev_qstop(sa->mgmt_evq); 789 790 efx_ev_fini(sa->nic); 791 } 792 793 int 794 sfc_ev_qinit(struct sfc_adapter *sa, 795 enum sfc_evq_type type, unsigned int type_index, 796 unsigned int entries, int socket_id, struct sfc_evq **evqp) 797 { 798 struct sfc_evq *evq; 799 int rc; 800 801 sfc_log_init(sa, "type=%s type_index=%u", 802 sfc_evq_type2str(type), type_index); 803 804 SFC_ASSERT(rte_is_power_of_2(entries)); 805 806 rc = ENOMEM; 807 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, 808 socket_id); 809 if (evq == NULL) 810 goto fail_evq_alloc; 811 812 evq->sa = sa; 813 evq->type = type; 814 evq->entries = entries; 815 816 /* Allocate DMA space */ 817 rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, 818 EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); 819 if (rc != 0) 820 goto fail_dma_alloc; 821 822 evq->init_state = SFC_EVQ_INITIALIZED; 823 824 sa->evq_count++; 825 826 *evqp = evq; 827 828 return 0; 829 830 fail_dma_alloc: 831 rte_free(evq); 832 833 fail_evq_alloc: 834 835 sfc_log_init(sa, "failed %d", rc); 836 return rc; 837 } 838 839 void 840 sfc_ev_qfini(struct sfc_evq *evq) 841 { 842 struct sfc_adapter *sa = evq->sa; 843 844 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); 845 846 sfc_dma_free(sa, &evq->mem); 847 848 rte_free(evq); 849 850 SFC_ASSERT(sa->evq_count > 0); 851 sa->evq_count--; 852 } 853 854 static int 855 sfc_kvarg_perf_profile_handler(__rte_unused const char *key, 856 const char *value_str, void *opaque) 857 { 858 uint32_t *value = opaque; 859 860 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) 861 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 862 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0) 863 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY; 864 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0) 865 *value = EFX_EVQ_FLAGS_TYPE_AUTO; 866 else 867 return -EINVAL; 868 869 return 0; 870 } 871 872 int 873 sfc_ev_attach(struct sfc_adapter *sa) 874 { 875 int rc; 876 877 sfc_log_init(sa, "entry"); 878 879 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 880 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, 881 sfc_kvarg_perf_profile_handler, 882 &sa->evq_flags); 883 if (rc != 0) { 884 sfc_err(sa, "invalid %s parameter value", 885 SFC_KVARG_PERF_PROFILE); 886 goto fail_kvarg_perf_profile; 887 } 888 889 sa->mgmt_evq_index = 0; 890 rte_spinlock_init(&sa->mgmt_evq_lock); 891 892 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES, 893 sa->socket_id, &sa->mgmt_evq); 894 if (rc != 0) 895 goto fail_mgmt_evq_init; 896 897 /* 898 * Rx/Tx event queues are created/destroyed when corresponding 899 * Rx/Tx queue is created/destroyed. 900 */ 901 902 return 0; 903 904 fail_mgmt_evq_init: 905 906 fail_kvarg_perf_profile: 907 sfc_log_init(sa, "failed %d", rc); 908 return rc; 909 } 910 911 void 912 sfc_ev_detach(struct sfc_adapter *sa) 913 { 914 sfc_log_init(sa, "entry"); 915 916 sfc_ev_qfini(sa->mgmt_evq); 917 918 if (sa->evq_count != 0) 919 sfc_err(sa, "%u EvQs are not destroyed before detach", 920 sa->evq_count); 921 } 922