1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright (c) 2016-2018 Solarflare Communications Inc. 4 * All rights reserved. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_debug.h> 11 #include <rte_cycles.h> 12 #include <rte_alarm.h> 13 #include <rte_branch_prediction.h> 14 15 #include "efx.h" 16 17 #include "sfc.h" 18 #include "sfc_debug.h" 19 #include "sfc_log.h" 20 #include "sfc_ev.h" 21 #include "sfc_rx.h" 22 #include "sfc_tx.h" 23 #include "sfc_kvargs.h" 24 25 26 /* Initial delay when waiting for event queue init complete event */ 27 #define SFC_EVQ_INIT_BACKOFF_START_US (1) 28 /* Maximum delay between event queue polling attempts */ 29 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000) 30 /* Event queue init approx timeout */ 31 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S) 32 33 /* Management event queue polling period in microseconds */ 34 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) 35 36 static const char * 37 sfc_evq_type2str(enum sfc_evq_type type) 38 { 39 switch (type) { 40 case SFC_EVQ_TYPE_MGMT: 41 return "mgmt-evq"; 42 case SFC_EVQ_TYPE_RX: 43 return "rx-evq"; 44 case SFC_EVQ_TYPE_TX: 45 return "tx-evq"; 46 default: 47 SFC_ASSERT(B_FALSE); 48 return NULL; 49 } 50 } 51 52 static boolean_t 53 sfc_ev_initialized(void *arg) 54 { 55 struct sfc_evq *evq = arg; 56 57 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ 58 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || 59 evq->init_state == SFC_EVQ_STARTED); 60 61 evq->init_state = SFC_EVQ_STARTED; 62 63 return B_FALSE; 64 } 65 66 static boolean_t 67 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id, 68 uint32_t size, uint16_t flags) 69 { 70 struct sfc_evq *evq = arg; 71 72 sfc_err(evq->sa, 73 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x", 74 evq->evq_index, label, id, size, flags); 75 return B_TRUE; 76 } 77 78 static boolean_t 79 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 80 uint32_t size, uint16_t flags) 81 { 82 struct sfc_evq *evq = arg; 83 struct sfc_efx_rxq *rxq; 84 unsigned int stop; 85 unsigned int pending_id; 86 unsigned int delta; 87 unsigned int i; 88 struct sfc_efx_rx_sw_desc *rxd; 89 90 if (unlikely(evq->exception)) 91 goto done; 92 93 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq); 94 95 SFC_ASSERT(rxq != NULL); 96 SFC_ASSERT(rxq->evq == evq); 97 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED); 98 99 stop = (id + 1) & rxq->ptr_mask; 100 pending_id = rxq->pending & rxq->ptr_mask; 101 delta = (stop >= pending_id) ? (stop - pending_id) : 102 (rxq->ptr_mask + 1 - pending_id + stop); 103 104 if (delta == 0) { 105 /* 106 * Rx event with no new descriptors done and zero length 107 * is used to abort scattered packet when there is no room 108 * for the tail. 109 */ 110 if (unlikely(size != 0)) { 111 evq->exception = B_TRUE; 112 sfc_err(evq->sa, 113 "EVQ %u RxQ %u invalid RX abort " 114 "(id=%#x size=%u flags=%#x); needs restart", 115 evq->evq_index, rxq->dp.dpq.queue_id, 116 id, size, flags); 117 goto done; 118 } 119 120 /* Add discard flag to the first fragment */ 121 rxq->sw_desc[pending_id].flags |= EFX_DISCARD; 122 /* Remove continue flag from the last fragment */ 123 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; 124 } else if (unlikely(delta > rxq->batch_max)) { 125 evq->exception = B_TRUE; 126 127 sfc_err(evq->sa, 128 "EVQ %u RxQ %u completion out of order " 129 "(id=%#x delta=%u flags=%#x); needs restart", 130 evq->evq_index, rxq->dp.dpq.queue_id, 131 id, delta, flags); 132 133 goto done; 134 } 135 136 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { 137 rxd = &rxq->sw_desc[i]; 138 139 rxd->flags = flags; 140 141 SFC_ASSERT(size < (1 << 16)); 142 rxd->size = (uint16_t)size; 143 } 144 145 rxq->pending += delta; 146 147 done: 148 return B_FALSE; 149 } 150 151 static boolean_t 152 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 153 __rte_unused uint32_t size, __rte_unused uint16_t flags) 154 { 155 struct sfc_evq *evq = arg; 156 struct sfc_dp_rxq *dp_rxq; 157 158 dp_rxq = evq->dp_rxq; 159 SFC_ASSERT(dp_rxq != NULL); 160 161 SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL); 162 return evq->sa->dp_rx->qrx_ev(dp_rxq, id); 163 } 164 165 static boolean_t 166 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) 167 { 168 struct sfc_evq *evq = arg; 169 170 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x", 171 evq->evq_index, label, id); 172 return B_TRUE; 173 } 174 175 static boolean_t 176 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 177 { 178 struct sfc_evq *evq = arg; 179 struct sfc_dp_txq *dp_txq; 180 struct sfc_efx_txq *txq; 181 unsigned int stop; 182 unsigned int delta; 183 184 dp_txq = evq->dp_txq; 185 SFC_ASSERT(dp_txq != NULL); 186 187 txq = sfc_efx_txq_by_dp_txq(dp_txq); 188 SFC_ASSERT(txq->evq == evq); 189 190 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0)) 191 goto done; 192 193 stop = (id + 1) & txq->ptr_mask; 194 id = txq->pending & txq->ptr_mask; 195 196 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); 197 198 txq->pending += delta; 199 200 done: 201 return B_FALSE; 202 } 203 204 static boolean_t 205 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 206 { 207 struct sfc_evq *evq = arg; 208 struct sfc_dp_txq *dp_txq; 209 210 dp_txq = evq->dp_txq; 211 SFC_ASSERT(dp_txq != NULL); 212 213 SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL); 214 return evq->sa->dp_tx->qtx_ev(dp_txq, id); 215 } 216 217 static boolean_t 218 sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) 219 { 220 struct sfc_evq *evq = arg; 221 222 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) 223 return B_FALSE; 224 225 evq->exception = B_TRUE; 226 sfc_warn(evq->sa, 227 "hardware exception %s (code=%u, data=%#x) on EVQ %u;" 228 " needs recovery", 229 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : 230 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : 231 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : 232 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : 233 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : 234 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : 235 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : 236 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : 237 "UNKNOWN", 238 code, data, evq->evq_index); 239 240 return B_TRUE; 241 } 242 243 static boolean_t 244 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index) 245 { 246 struct sfc_evq *evq = arg; 247 248 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done", 249 evq->evq_index, rxq_hw_index); 250 return B_TRUE; 251 } 252 253 static boolean_t 254 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index) 255 { 256 struct sfc_evq *evq = arg; 257 struct sfc_dp_rxq *dp_rxq; 258 struct sfc_rxq *rxq; 259 260 dp_rxq = evq->dp_rxq; 261 SFC_ASSERT(dp_rxq != NULL); 262 263 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 264 SFC_ASSERT(rxq != NULL); 265 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 266 SFC_ASSERT(rxq->evq == evq); 267 sfc_rx_qflush_done(rxq); 268 269 return B_FALSE; 270 } 271 272 static boolean_t 273 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index) 274 { 275 struct sfc_evq *evq = arg; 276 277 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed", 278 evq->evq_index, rxq_hw_index); 279 return B_TRUE; 280 } 281 282 static boolean_t 283 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) 284 { 285 struct sfc_evq *evq = arg; 286 struct sfc_dp_rxq *dp_rxq; 287 struct sfc_rxq *rxq; 288 289 dp_rxq = evq->dp_rxq; 290 SFC_ASSERT(dp_rxq != NULL); 291 292 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 293 SFC_ASSERT(rxq != NULL); 294 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 295 SFC_ASSERT(rxq->evq == evq); 296 sfc_rx_qflush_failed(rxq); 297 298 return B_FALSE; 299 } 300 301 static boolean_t 302 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index) 303 { 304 struct sfc_evq *evq = arg; 305 306 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done", 307 evq->evq_index, txq_hw_index); 308 return B_TRUE; 309 } 310 311 static boolean_t 312 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) 313 { 314 struct sfc_evq *evq = arg; 315 struct sfc_dp_txq *dp_txq; 316 struct sfc_txq *txq; 317 318 dp_txq = evq->dp_txq; 319 SFC_ASSERT(dp_txq != NULL); 320 321 txq = sfc_txq_by_dp_txq(dp_txq); 322 SFC_ASSERT(txq != NULL); 323 SFC_ASSERT(txq->hw_index == txq_hw_index); 324 SFC_ASSERT(txq->evq == evq); 325 sfc_tx_qflush_done(txq); 326 327 return B_FALSE; 328 } 329 330 static boolean_t 331 sfc_ev_software(void *arg, uint16_t magic) 332 { 333 struct sfc_evq *evq = arg; 334 335 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x", 336 evq->evq_index, magic); 337 return B_TRUE; 338 } 339 340 static boolean_t 341 sfc_ev_sram(void *arg, uint32_t code) 342 { 343 struct sfc_evq *evq = arg; 344 345 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u", 346 evq->evq_index, code); 347 return B_TRUE; 348 } 349 350 static boolean_t 351 sfc_ev_wake_up(void *arg, uint32_t index) 352 { 353 struct sfc_evq *evq = arg; 354 355 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u", 356 evq->evq_index, index); 357 return B_TRUE; 358 } 359 360 static boolean_t 361 sfc_ev_timer(void *arg, uint32_t index) 362 { 363 struct sfc_evq *evq = arg; 364 365 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u", 366 evq->evq_index, index); 367 return B_TRUE; 368 } 369 370 static boolean_t 371 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode) 372 { 373 struct sfc_evq *evq = arg; 374 375 sfc_err(evq->sa, "EVQ %u unexpected link change event", 376 evq->evq_index); 377 return B_TRUE; 378 } 379 380 static boolean_t 381 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) 382 { 383 struct sfc_evq *evq = arg; 384 struct sfc_adapter *sa = evq->sa; 385 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link; 386 struct rte_eth_link new_link; 387 uint64_t new_link_u64; 388 uint64_t old_link_u64; 389 390 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 391 392 sfc_port_link_mode_to_info(link_mode, &new_link); 393 394 new_link_u64 = *(uint64_t *)&new_link; 395 do { 396 old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link); 397 if (old_link_u64 == new_link_u64) 398 break; 399 400 if (rte_atomic64_cmpset((volatile uint64_t *)dev_link, 401 old_link_u64, new_link_u64)) { 402 evq->sa->port.lsc_seq++; 403 break; 404 } 405 } while (B_TRUE); 406 407 return B_FALSE; 408 } 409 410 static const efx_ev_callbacks_t sfc_ev_callbacks = { 411 .eec_initialized = sfc_ev_initialized, 412 .eec_rx = sfc_ev_nop_rx, 413 .eec_tx = sfc_ev_nop_tx, 414 .eec_exception = sfc_ev_exception, 415 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 416 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 417 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 418 .eec_software = sfc_ev_software, 419 .eec_sram = sfc_ev_sram, 420 .eec_wake_up = sfc_ev_wake_up, 421 .eec_timer = sfc_ev_timer, 422 .eec_link_change = sfc_ev_link_change, 423 }; 424 425 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { 426 .eec_initialized = sfc_ev_initialized, 427 .eec_rx = sfc_ev_efx_rx, 428 .eec_tx = sfc_ev_nop_tx, 429 .eec_exception = sfc_ev_exception, 430 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 431 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 432 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 433 .eec_software = sfc_ev_software, 434 .eec_sram = sfc_ev_sram, 435 .eec_wake_up = sfc_ev_wake_up, 436 .eec_timer = sfc_ev_timer, 437 .eec_link_change = sfc_ev_nop_link_change, 438 }; 439 440 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { 441 .eec_initialized = sfc_ev_initialized, 442 .eec_rx = sfc_ev_dp_rx, 443 .eec_tx = sfc_ev_nop_tx, 444 .eec_exception = sfc_ev_exception, 445 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 446 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 447 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 448 .eec_software = sfc_ev_software, 449 .eec_sram = sfc_ev_sram, 450 .eec_wake_up = sfc_ev_wake_up, 451 .eec_timer = sfc_ev_timer, 452 .eec_link_change = sfc_ev_nop_link_change, 453 }; 454 455 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { 456 .eec_initialized = sfc_ev_initialized, 457 .eec_rx = sfc_ev_nop_rx, 458 .eec_tx = sfc_ev_tx, 459 .eec_exception = sfc_ev_exception, 460 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 461 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 462 .eec_txq_flush_done = sfc_ev_txq_flush_done, 463 .eec_software = sfc_ev_software, 464 .eec_sram = sfc_ev_sram, 465 .eec_wake_up = sfc_ev_wake_up, 466 .eec_timer = sfc_ev_timer, 467 .eec_link_change = sfc_ev_nop_link_change, 468 }; 469 470 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { 471 .eec_initialized = sfc_ev_initialized, 472 .eec_rx = sfc_ev_nop_rx, 473 .eec_tx = sfc_ev_dp_tx, 474 .eec_exception = sfc_ev_exception, 475 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 476 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 477 .eec_txq_flush_done = sfc_ev_txq_flush_done, 478 .eec_software = sfc_ev_software, 479 .eec_sram = sfc_ev_sram, 480 .eec_wake_up = sfc_ev_wake_up, 481 .eec_timer = sfc_ev_timer, 482 .eec_link_change = sfc_ev_nop_link_change, 483 }; 484 485 486 void 487 sfc_ev_qpoll(struct sfc_evq *evq) 488 { 489 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || 490 evq->init_state == SFC_EVQ_STARTING); 491 492 /* Synchronize the DMA memory for reading not required */ 493 494 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); 495 496 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { 497 struct sfc_adapter *sa = evq->sa; 498 int rc; 499 500 if (evq->dp_rxq != NULL) { 501 unsigned int rxq_sw_index; 502 503 rxq_sw_index = evq->dp_rxq->dpq.queue_id; 504 505 sfc_warn(sa, 506 "restart RxQ %u because of exception on its EvQ %u", 507 rxq_sw_index, evq->evq_index); 508 509 sfc_rx_qstop(sa, rxq_sw_index); 510 rc = sfc_rx_qstart(sa, rxq_sw_index); 511 if (rc != 0) 512 sfc_err(sa, "cannot restart RxQ %u", 513 rxq_sw_index); 514 } 515 516 if (evq->dp_txq != NULL) { 517 unsigned int txq_sw_index; 518 519 txq_sw_index = evq->dp_txq->dpq.queue_id; 520 521 sfc_warn(sa, 522 "restart TxQ %u because of exception on its EvQ %u", 523 txq_sw_index, evq->evq_index); 524 525 sfc_tx_qstop(sa, txq_sw_index); 526 rc = sfc_tx_qstart(sa, txq_sw_index); 527 if (rc != 0) 528 sfc_err(sa, "cannot restart TxQ %u", 529 txq_sw_index); 530 } 531 532 if (evq->exception) 533 sfc_panic(sa, "unrecoverable exception on EvQ %u", 534 evq->evq_index); 535 536 sfc_adapter_unlock(sa); 537 } 538 539 /* Poll-mode driver does not re-prime the event queue for interrupts */ 540 } 541 542 void 543 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) 544 { 545 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { 546 if (sa->mgmt_evq_running) 547 sfc_ev_qpoll(sa->mgmt_evq); 548 549 rte_spinlock_unlock(&sa->mgmt_evq_lock); 550 } 551 } 552 553 int 554 sfc_ev_qprime(struct sfc_evq *evq) 555 { 556 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); 557 return efx_ev_qprime(evq->common, evq->read_ptr); 558 } 559 560 /* Event queue HW index allocation scheme is described in sfc_ev.h. */ 561 int 562 sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) 563 { 564 struct sfc_adapter *sa = evq->sa; 565 efsys_mem_t *esmp; 566 uint32_t evq_flags = sa->evq_flags; 567 unsigned int total_delay_us; 568 unsigned int delay_us; 569 int rc; 570 571 sfc_log_init(sa, "hw_index=%u", hw_index); 572 573 esmp = &evq->mem; 574 575 evq->evq_index = hw_index; 576 577 /* Clear all events */ 578 (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); 579 580 if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) 581 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 582 else 583 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; 584 585 /* Create the common code event queue */ 586 rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, 587 0 /* unused on EF10 */, 0, evq_flags, 588 &evq->common); 589 if (rc != 0) 590 goto fail_ev_qcreate; 591 592 SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); 593 if (evq->dp_rxq != 0) { 594 if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) 595 evq->callbacks = &sfc_ev_callbacks_efx_rx; 596 else 597 evq->callbacks = &sfc_ev_callbacks_dp_rx; 598 } else if (evq->dp_txq != 0) { 599 if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) 600 evq->callbacks = &sfc_ev_callbacks_efx_tx; 601 else 602 evq->callbacks = &sfc_ev_callbacks_dp_tx; 603 } else { 604 evq->callbacks = &sfc_ev_callbacks; 605 } 606 607 evq->init_state = SFC_EVQ_STARTING; 608 609 /* Wait for the initialization event */ 610 total_delay_us = 0; 611 delay_us = SFC_EVQ_INIT_BACKOFF_START_US; 612 do { 613 (void)sfc_ev_qpoll(evq); 614 615 /* Check to see if the initialization complete indication 616 * posted by the hardware. 617 */ 618 if (evq->init_state == SFC_EVQ_STARTED) 619 goto done; 620 621 /* Give event queue some time to init */ 622 rte_delay_us(delay_us); 623 624 total_delay_us += delay_us; 625 626 /* Exponential backoff */ 627 delay_us *= 2; 628 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) 629 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; 630 631 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); 632 633 rc = ETIMEDOUT; 634 goto fail_timedout; 635 636 done: 637 return 0; 638 639 fail_timedout: 640 evq->init_state = SFC_EVQ_INITIALIZED; 641 efx_ev_qdestroy(evq->common); 642 643 fail_ev_qcreate: 644 sfc_log_init(sa, "failed %d", rc); 645 return rc; 646 } 647 648 void 649 sfc_ev_qstop(struct sfc_evq *evq) 650 { 651 if (evq == NULL) 652 return; 653 654 sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); 655 656 if (evq->init_state != SFC_EVQ_STARTED) 657 return; 658 659 evq->init_state = SFC_EVQ_INITIALIZED; 660 evq->callbacks = NULL; 661 evq->read_ptr = 0; 662 evq->exception = B_FALSE; 663 664 efx_ev_qdestroy(evq->common); 665 666 evq->evq_index = 0; 667 } 668 669 static void 670 sfc_ev_mgmt_periodic_qpoll(void *arg) 671 { 672 struct sfc_adapter *sa = arg; 673 int rc; 674 675 sfc_ev_mgmt_qpoll(sa); 676 677 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, 678 sfc_ev_mgmt_periodic_qpoll, sa); 679 if (rc == -ENOTSUP) { 680 sfc_warn(sa, "alarms are not supported"); 681 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update"); 682 } else if (rc != 0) { 683 sfc_err(sa, 684 "cannot rearm management EVQ polling alarm (rc=%d)", 685 rc); 686 } 687 } 688 689 static void 690 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa) 691 { 692 sfc_ev_mgmt_periodic_qpoll(sa); 693 } 694 695 static void 696 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa) 697 { 698 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa); 699 } 700 701 int 702 sfc_ev_start(struct sfc_adapter *sa) 703 { 704 int rc; 705 706 sfc_log_init(sa, "entry"); 707 708 rc = efx_ev_init(sa->nic); 709 if (rc != 0) 710 goto fail_ev_init; 711 712 /* Start management EVQ used for global events */ 713 714 /* 715 * Management event queue start polls the queue, but it cannot 716 * interfere with other polling contexts since mgmt_evq_running 717 * is false yet. 718 */ 719 rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); 720 if (rc != 0) 721 goto fail_mgmt_evq_start; 722 723 rte_spinlock_lock(&sa->mgmt_evq_lock); 724 sa->mgmt_evq_running = true; 725 rte_spinlock_unlock(&sa->mgmt_evq_lock); 726 727 if (sa->intr.lsc_intr) { 728 rc = sfc_ev_qprime(sa->mgmt_evq); 729 if (rc != 0) 730 goto fail_mgmt_evq_prime; 731 } 732 733 /* 734 * Start management EVQ polling. If interrupts are disabled 735 * (not used), it is required to process link status change 736 * and other device level events to avoid unrecoverable 737 * error because the event queue overflow. 738 */ 739 sfc_ev_mgmt_periodic_qpoll_start(sa); 740 741 /* 742 * Rx/Tx event queues are started/stopped when corresponding 743 * Rx/Tx queue is started/stopped. 744 */ 745 746 return 0; 747 748 fail_mgmt_evq_prime: 749 sfc_ev_qstop(sa->mgmt_evq); 750 751 fail_mgmt_evq_start: 752 efx_ev_fini(sa->nic); 753 754 fail_ev_init: 755 sfc_log_init(sa, "failed %d", rc); 756 return rc; 757 } 758 759 void 760 sfc_ev_stop(struct sfc_adapter *sa) 761 { 762 sfc_log_init(sa, "entry"); 763 764 sfc_ev_mgmt_periodic_qpoll_stop(sa); 765 766 rte_spinlock_lock(&sa->mgmt_evq_lock); 767 sa->mgmt_evq_running = false; 768 rte_spinlock_unlock(&sa->mgmt_evq_lock); 769 770 sfc_ev_qstop(sa->mgmt_evq); 771 772 efx_ev_fini(sa->nic); 773 } 774 775 int 776 sfc_ev_qinit(struct sfc_adapter *sa, 777 enum sfc_evq_type type, unsigned int type_index, 778 unsigned int entries, int socket_id, struct sfc_evq **evqp) 779 { 780 struct sfc_evq *evq; 781 int rc; 782 783 sfc_log_init(sa, "type=%s type_index=%u", 784 sfc_evq_type2str(type), type_index); 785 786 SFC_ASSERT(rte_is_power_of_2(entries)); 787 788 rc = ENOMEM; 789 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, 790 socket_id); 791 if (evq == NULL) 792 goto fail_evq_alloc; 793 794 evq->sa = sa; 795 evq->type = type; 796 evq->entries = entries; 797 798 /* Allocate DMA space */ 799 rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, 800 EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); 801 if (rc != 0) 802 goto fail_dma_alloc; 803 804 evq->init_state = SFC_EVQ_INITIALIZED; 805 806 sa->evq_count++; 807 808 *evqp = evq; 809 810 return 0; 811 812 fail_dma_alloc: 813 rte_free(evq); 814 815 fail_evq_alloc: 816 817 sfc_log_init(sa, "failed %d", rc); 818 return rc; 819 } 820 821 void 822 sfc_ev_qfini(struct sfc_evq *evq) 823 { 824 struct sfc_adapter *sa = evq->sa; 825 826 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); 827 828 sfc_dma_free(sa, &evq->mem); 829 830 rte_free(evq); 831 832 SFC_ASSERT(sa->evq_count > 0); 833 sa->evq_count--; 834 } 835 836 static int 837 sfc_kvarg_perf_profile_handler(__rte_unused const char *key, 838 const char *value_str, void *opaque) 839 { 840 uint64_t *value = opaque; 841 842 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) 843 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 844 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0) 845 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY; 846 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0) 847 *value = EFX_EVQ_FLAGS_TYPE_AUTO; 848 else 849 return -EINVAL; 850 851 return 0; 852 } 853 854 int 855 sfc_ev_attach(struct sfc_adapter *sa) 856 { 857 int rc; 858 859 sfc_log_init(sa, "entry"); 860 861 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 862 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, 863 sfc_kvarg_perf_profile_handler, 864 &sa->evq_flags); 865 if (rc != 0) { 866 sfc_err(sa, "invalid %s parameter value", 867 SFC_KVARG_PERF_PROFILE); 868 goto fail_kvarg_perf_profile; 869 } 870 871 sa->mgmt_evq_index = 0; 872 rte_spinlock_init(&sa->mgmt_evq_lock); 873 874 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES, 875 sa->socket_id, &sa->mgmt_evq); 876 if (rc != 0) 877 goto fail_mgmt_evq_init; 878 879 /* 880 * Rx/Tx event queues are created/destroyed when corresponding 881 * Rx/Tx queue is created/destroyed. 882 */ 883 884 return 0; 885 886 fail_mgmt_evq_init: 887 888 fail_kvarg_perf_profile: 889 sfc_log_init(sa, "failed %d", rc); 890 return rc; 891 } 892 893 void 894 sfc_ev_detach(struct sfc_adapter *sa) 895 { 896 sfc_log_init(sa, "entry"); 897 898 sfc_ev_qfini(sa->mgmt_evq); 899 900 if (sa->evq_count != 0) 901 sfc_err(sa, "%u EvQs are not destroyed before detach", 902 sa->evq_count); 903 } 904