1 /*- 2 * Copyright (c) 2016 Solarflare Communications Inc. 3 * All rights reserved. 4 * 5 * This software was jointly developed between OKTET Labs (under contract 6 * for Solarflare) and Solarflare Communications, Inc. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions are met: 10 * 11 * 1. Redistributions of source code must retain the above copyright notice, 12 * this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright notice, 14 * this list of conditions and the following disclaimer in the documentation 15 * and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 19 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 20 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 21 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 23 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 24 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 25 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 26 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 27 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 #include <rte_debug.h> 31 #include <rte_cycles.h> 32 #include <rte_alarm.h> 33 #include <rte_branch_prediction.h> 34 35 #include "efx.h" 36 37 #include "sfc.h" 38 #include "sfc_debug.h" 39 #include "sfc_log.h" 40 #include "sfc_ev.h" 41 #include "sfc_rx.h" 42 #include "sfc_tx.h" 43 #include "sfc_kvargs.h" 44 45 46 /* Initial delay when waiting for event queue init complete event */ 47 #define SFC_EVQ_INIT_BACKOFF_START_US (1) 48 /* Maximum delay between event queue polling attempts */ 49 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000) 50 /* Event queue init approx timeout */ 51 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S) 52 53 /* Management event queue polling period in microseconds */ 54 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) 55 56 57 static boolean_t 58 sfc_ev_initialized(void *arg) 59 { 60 struct sfc_evq *evq = arg; 61 62 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ 63 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || 64 evq->init_state == SFC_EVQ_STARTED); 65 66 evq->init_state = SFC_EVQ_STARTED; 67 68 return B_FALSE; 69 } 70 71 static boolean_t 72 sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 73 uint32_t size, uint16_t flags) 74 { 75 struct sfc_evq *evq = arg; 76 struct sfc_rxq *rxq; 77 unsigned int stop; 78 unsigned int pending_id; 79 unsigned int delta; 80 unsigned int i; 81 struct sfc_rx_sw_desc *rxd; 82 83 if (unlikely(evq->exception)) 84 goto done; 85 86 rxq = evq->rxq; 87 88 SFC_ASSERT(rxq != NULL); 89 SFC_ASSERT(rxq->evq == evq); 90 SFC_ASSERT(rxq->state & SFC_RXQ_STARTED); 91 92 stop = (id + 1) & rxq->ptr_mask; 93 pending_id = rxq->pending & rxq->ptr_mask; 94 delta = (stop >= pending_id) ? (stop - pending_id) : 95 (rxq->ptr_mask + 1 - pending_id + stop); 96 97 if (delta == 0) { 98 /* 99 * Rx event with no new descriptors done and zero length 100 * is used to abort scattered packet when there is no room 101 * for the tail. 102 */ 103 if (unlikely(size != 0)) { 104 evq->exception = B_TRUE; 105 sfc_err(evq->sa, 106 "EVQ %u RxQ %u invalid RX abort " 107 "(id=%#x size=%u flags=%#x); needs restart\n", 108 evq->evq_index, sfc_rxq_sw_index(rxq), 109 id, size, flags); 110 goto done; 111 } 112 113 /* Add discard flag to the first fragment */ 114 rxq->sw_desc[pending_id].flags |= EFX_DISCARD; 115 /* Remove continue flag from the last fragment */ 116 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; 117 } else if (unlikely(delta > rxq->batch_max)) { 118 evq->exception = B_TRUE; 119 120 sfc_err(evq->sa, 121 "EVQ %u RxQ %u completion out of order " 122 "(id=%#x delta=%u flags=%#x); needs restart\n", 123 evq->evq_index, sfc_rxq_sw_index(rxq), id, delta, 124 flags); 125 126 goto done; 127 } 128 129 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { 130 rxd = &rxq->sw_desc[i]; 131 132 rxd->flags = flags; 133 134 SFC_ASSERT(size < (1 << 16)); 135 rxd->size = (uint16_t)size; 136 } 137 138 rxq->pending += delta; 139 140 done: 141 return B_FALSE; 142 } 143 144 static boolean_t 145 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 146 { 147 struct sfc_evq *evq = arg; 148 struct sfc_txq *txq; 149 unsigned int stop; 150 unsigned int delta; 151 152 txq = evq->txq; 153 154 SFC_ASSERT(txq != NULL); 155 SFC_ASSERT(txq->evq == evq); 156 157 if (unlikely((txq->state & SFC_TXQ_STARTED) == 0)) 158 goto done; 159 160 stop = (id + 1) & txq->ptr_mask; 161 id = txq->pending & txq->ptr_mask; 162 163 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); 164 165 txq->pending += delta; 166 167 done: 168 return B_FALSE; 169 } 170 171 static boolean_t 172 sfc_ev_exception(void *arg, __rte_unused uint32_t code, 173 __rte_unused uint32_t data) 174 { 175 struct sfc_evq *evq = arg; 176 177 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) 178 return B_FALSE; 179 180 evq->exception = B_TRUE; 181 sfc_warn(evq->sa, 182 "hardware exception %s (code=%u, data=%#x) on EVQ %u;" 183 " needs recovery", 184 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : 185 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : 186 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : 187 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : 188 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : 189 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : 190 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : 191 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : 192 "UNKNOWN", 193 code, data, evq->evq_index); 194 195 return B_TRUE; 196 } 197 198 static boolean_t 199 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index) 200 { 201 struct sfc_evq *evq = arg; 202 struct sfc_rxq *rxq; 203 204 rxq = evq->rxq; 205 SFC_ASSERT(rxq != NULL); 206 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 207 SFC_ASSERT(rxq->evq == evq); 208 sfc_rx_qflush_done(rxq); 209 210 return B_FALSE; 211 } 212 213 static boolean_t 214 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) 215 { 216 struct sfc_evq *evq = arg; 217 struct sfc_rxq *rxq; 218 219 rxq = evq->rxq; 220 SFC_ASSERT(rxq != NULL); 221 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 222 SFC_ASSERT(rxq->evq == evq); 223 sfc_rx_qflush_failed(rxq); 224 225 return B_FALSE; 226 } 227 228 static boolean_t 229 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) 230 { 231 struct sfc_evq *evq = arg; 232 struct sfc_txq *txq; 233 234 txq = evq->txq; 235 SFC_ASSERT(txq != NULL); 236 SFC_ASSERT(txq->hw_index == txq_hw_index); 237 SFC_ASSERT(txq->evq == evq); 238 sfc_tx_qflush_done(txq); 239 240 return B_FALSE; 241 } 242 243 static boolean_t 244 sfc_ev_software(void *arg, uint16_t magic) 245 { 246 struct sfc_evq *evq = arg; 247 248 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x", 249 evq->evq_index, magic); 250 return B_TRUE; 251 } 252 253 static boolean_t 254 sfc_ev_sram(void *arg, uint32_t code) 255 { 256 struct sfc_evq *evq = arg; 257 258 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u", 259 evq->evq_index, code); 260 return B_TRUE; 261 } 262 263 static boolean_t 264 sfc_ev_wake_up(void *arg, uint32_t index) 265 { 266 struct sfc_evq *evq = arg; 267 268 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u", 269 evq->evq_index, index); 270 return B_TRUE; 271 } 272 273 static boolean_t 274 sfc_ev_timer(void *arg, uint32_t index) 275 { 276 struct sfc_evq *evq = arg; 277 278 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u", 279 evq->evq_index, index); 280 return B_TRUE; 281 } 282 283 static boolean_t 284 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) 285 { 286 struct sfc_evq *evq = arg; 287 struct sfc_adapter *sa = evq->sa; 288 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link; 289 struct rte_eth_link new_link; 290 uint64_t new_link_u64; 291 uint64_t old_link_u64; 292 293 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 294 295 sfc_port_link_mode_to_info(link_mode, &new_link); 296 297 new_link_u64 = *(uint64_t *)&new_link; 298 do { 299 old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link); 300 if (old_link_u64 == new_link_u64) 301 break; 302 303 if (rte_atomic64_cmpset((volatile uint64_t *)dev_link, 304 old_link_u64, new_link_u64)) { 305 evq->sa->port.lsc_seq++; 306 break; 307 } 308 } while (B_TRUE); 309 310 return B_FALSE; 311 } 312 313 static const efx_ev_callbacks_t sfc_ev_callbacks = { 314 .eec_initialized = sfc_ev_initialized, 315 .eec_rx = sfc_ev_rx, 316 .eec_tx = sfc_ev_tx, 317 .eec_exception = sfc_ev_exception, 318 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 319 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 320 .eec_txq_flush_done = sfc_ev_txq_flush_done, 321 .eec_software = sfc_ev_software, 322 .eec_sram = sfc_ev_sram, 323 .eec_wake_up = sfc_ev_wake_up, 324 .eec_timer = sfc_ev_timer, 325 .eec_link_change = sfc_ev_link_change, 326 }; 327 328 329 void 330 sfc_ev_qpoll(struct sfc_evq *evq) 331 { 332 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || 333 evq->init_state == SFC_EVQ_STARTING); 334 335 /* Synchronize the DMA memory for reading not required */ 336 337 efx_ev_qpoll(evq->common, &evq->read_ptr, &sfc_ev_callbacks, evq); 338 339 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { 340 struct sfc_adapter *sa = evq->sa; 341 int rc; 342 343 if ((evq->rxq != NULL) && (evq->rxq->state & SFC_RXQ_RUNNING)) { 344 unsigned int rxq_sw_index = sfc_rxq_sw_index(evq->rxq); 345 346 sfc_warn(sa, 347 "restart RxQ %u because of exception on its EvQ %u", 348 rxq_sw_index, evq->evq_index); 349 350 sfc_rx_qstop(sa, rxq_sw_index); 351 rc = sfc_rx_qstart(sa, rxq_sw_index); 352 if (rc != 0) 353 sfc_err(sa, "cannot restart RxQ %u", 354 rxq_sw_index); 355 } 356 357 if (evq->txq != NULL) { 358 unsigned int txq_sw_index = sfc_txq_sw_index(evq->txq); 359 360 sfc_warn(sa, 361 "restart TxQ %u because of exception on its EvQ %u", 362 txq_sw_index, evq->evq_index); 363 364 sfc_tx_qstop(sa, txq_sw_index); 365 rc = sfc_tx_qstart(sa, txq_sw_index); 366 if (rc != 0) 367 sfc_err(sa, "cannot restart TxQ %u", 368 txq_sw_index); 369 } 370 371 if (evq->exception) 372 sfc_panic(sa, "unrecoverable exception on EvQ %u", 373 evq->evq_index); 374 375 sfc_adapter_unlock(sa); 376 } 377 378 /* Poll-mode driver does not re-prime the event queue for interrupts */ 379 } 380 381 void 382 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) 383 { 384 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { 385 struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq; 386 387 if (mgmt_evq->init_state == SFC_EVQ_STARTED) 388 sfc_ev_qpoll(mgmt_evq); 389 390 rte_spinlock_unlock(&sa->mgmt_evq_lock); 391 } 392 } 393 394 int 395 sfc_ev_qprime(struct sfc_evq *evq) 396 { 397 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); 398 return efx_ev_qprime(evq->common, evq->read_ptr); 399 } 400 401 int 402 sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index) 403 { 404 const struct sfc_evq_info *evq_info; 405 struct sfc_evq *evq; 406 efsys_mem_t *esmp; 407 unsigned int total_delay_us; 408 unsigned int delay_us; 409 int rc; 410 411 sfc_log_init(sa, "sw_index=%u", sw_index); 412 413 evq_info = &sa->evq_info[sw_index]; 414 evq = evq_info->evq; 415 esmp = &evq->mem; 416 417 /* Clear all events */ 418 (void)memset((void *)esmp->esm_base, 0xff, 419 EFX_EVQ_SIZE(evq_info->entries)); 420 421 /* Create the common code event queue */ 422 rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries, 423 0 /* unused on EF10 */, 0, evq_info->flags, 424 &evq->common); 425 if (rc != 0) 426 goto fail_ev_qcreate; 427 428 evq->init_state = SFC_EVQ_STARTING; 429 430 /* Wait for the initialization event */ 431 total_delay_us = 0; 432 delay_us = SFC_EVQ_INIT_BACKOFF_START_US; 433 do { 434 (void)sfc_ev_qpoll(evq); 435 436 /* Check to see if the initialization complete indication 437 * posted by the hardware. 438 */ 439 if (evq->init_state == SFC_EVQ_STARTED) 440 goto done; 441 442 /* Give event queue some time to init */ 443 rte_delay_us(delay_us); 444 445 total_delay_us += delay_us; 446 447 /* Exponential backoff */ 448 delay_us *= 2; 449 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) 450 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; 451 452 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); 453 454 rc = ETIMEDOUT; 455 goto fail_timedout; 456 457 done: 458 return 0; 459 460 fail_timedout: 461 evq->init_state = SFC_EVQ_INITIALIZED; 462 efx_ev_qdestroy(evq->common); 463 464 fail_ev_qcreate: 465 sfc_log_init(sa, "failed %d", rc); 466 return rc; 467 } 468 469 void 470 sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index) 471 { 472 const struct sfc_evq_info *evq_info; 473 struct sfc_evq *evq; 474 475 sfc_log_init(sa, "sw_index=%u", sw_index); 476 477 SFC_ASSERT(sw_index < sa->evq_count); 478 479 evq_info = &sa->evq_info[sw_index]; 480 evq = evq_info->evq; 481 482 if (evq == NULL || evq->init_state != SFC_EVQ_STARTED) 483 return; 484 485 evq->init_state = SFC_EVQ_INITIALIZED; 486 evq->read_ptr = 0; 487 evq->exception = B_FALSE; 488 489 efx_ev_qdestroy(evq->common); 490 } 491 492 static void 493 sfc_ev_mgmt_periodic_qpoll(void *arg) 494 { 495 struct sfc_adapter *sa = arg; 496 int rc; 497 498 sfc_ev_mgmt_qpoll(sa); 499 500 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, 501 sfc_ev_mgmt_periodic_qpoll, sa); 502 if (rc != 0) 503 sfc_panic(sa, 504 "cannot rearm management EVQ polling alarm (rc=%d)", 505 rc); 506 } 507 508 static void 509 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa) 510 { 511 sfc_ev_mgmt_periodic_qpoll(sa); 512 } 513 514 static void 515 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa) 516 { 517 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa); 518 } 519 520 int 521 sfc_ev_start(struct sfc_adapter *sa) 522 { 523 int rc; 524 525 sfc_log_init(sa, "entry"); 526 527 rc = efx_ev_init(sa->nic); 528 if (rc != 0) 529 goto fail_ev_init; 530 531 /* Start management EVQ used for global events */ 532 rte_spinlock_lock(&sa->mgmt_evq_lock); 533 534 rc = sfc_ev_qstart(sa, sa->mgmt_evq_index); 535 if (rc != 0) 536 goto fail_mgmt_evq_start; 537 538 if (sa->intr.lsc_intr) { 539 rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq); 540 if (rc != 0) 541 goto fail_evq0_prime; 542 } 543 544 rte_spinlock_unlock(&sa->mgmt_evq_lock); 545 546 /* 547 * Start management EVQ polling. If interrupts are disabled 548 * (not used), it is required to process link status change 549 * and other device level events to avoid unrecoverable 550 * error because the event queue overflow. 551 */ 552 sfc_ev_mgmt_periodic_qpoll_start(sa); 553 554 /* 555 * Rx/Tx event queues are started/stopped when corresponding 556 * Rx/Tx queue is started/stopped. 557 */ 558 559 return 0; 560 561 fail_evq0_prime: 562 sfc_ev_qstop(sa, 0); 563 564 fail_mgmt_evq_start: 565 rte_spinlock_unlock(&sa->mgmt_evq_lock); 566 efx_ev_fini(sa->nic); 567 568 fail_ev_init: 569 sfc_log_init(sa, "failed %d", rc); 570 return rc; 571 } 572 573 void 574 sfc_ev_stop(struct sfc_adapter *sa) 575 { 576 unsigned int sw_index; 577 578 sfc_log_init(sa, "entry"); 579 580 sfc_ev_mgmt_periodic_qpoll_stop(sa); 581 582 /* Make sure that all event queues are stopped */ 583 sw_index = sa->evq_count; 584 while (sw_index-- > 0) { 585 if (sw_index == sa->mgmt_evq_index) { 586 /* Locks are required for the management EVQ */ 587 rte_spinlock_lock(&sa->mgmt_evq_lock); 588 sfc_ev_qstop(sa, sa->mgmt_evq_index); 589 rte_spinlock_unlock(&sa->mgmt_evq_lock); 590 } else { 591 sfc_ev_qstop(sa, sw_index); 592 } 593 } 594 595 efx_ev_fini(sa->nic); 596 } 597 598 int 599 sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index, 600 unsigned int entries, int socket_id) 601 { 602 struct sfc_evq_info *evq_info; 603 struct sfc_evq *evq; 604 int rc; 605 606 sfc_log_init(sa, "sw_index=%u", sw_index); 607 608 evq_info = &sa->evq_info[sw_index]; 609 610 SFC_ASSERT(rte_is_power_of_2(entries)); 611 SFC_ASSERT(entries <= evq_info->max_entries); 612 evq_info->entries = entries; 613 614 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, 615 socket_id); 616 if (evq == NULL) 617 return ENOMEM; 618 619 evq->sa = sa; 620 evq->evq_index = sw_index; 621 622 /* Allocate DMA space */ 623 rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries), 624 socket_id, &evq->mem); 625 if (rc != 0) 626 return rc; 627 628 evq->init_state = SFC_EVQ_INITIALIZED; 629 630 evq_info->evq = evq; 631 632 return 0; 633 } 634 635 void 636 sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index) 637 { 638 struct sfc_evq *evq; 639 640 sfc_log_init(sa, "sw_index=%u", sw_index); 641 642 evq = sa->evq_info[sw_index].evq; 643 644 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); 645 646 sa->evq_info[sw_index].evq = NULL; 647 648 sfc_dma_free(sa, &evq->mem); 649 650 rte_free(evq); 651 } 652 653 static int 654 sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index) 655 { 656 struct sfc_evq_info *evq_info = &sa->evq_info[sw_index]; 657 unsigned int max_entries; 658 659 sfc_log_init(sa, "sw_index=%u", sw_index); 660 661 max_entries = sfc_evq_max_entries(sa, sw_index); 662 SFC_ASSERT(rte_is_power_of_2(max_entries)); 663 664 evq_info->max_entries = max_entries; 665 evq_info->flags = sa->evq_flags | 666 ((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ? 667 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT : 668 EFX_EVQ_FLAGS_NOTIFY_DISABLED); 669 670 return 0; 671 } 672 673 static int 674 sfc_kvarg_perf_profile_handler(__rte_unused const char *key, 675 const char *value_str, void *opaque) 676 { 677 uint64_t *value = opaque; 678 679 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) 680 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 681 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0) 682 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY; 683 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0) 684 *value = EFX_EVQ_FLAGS_TYPE_AUTO; 685 else 686 return -EINVAL; 687 688 return 0; 689 } 690 691 static void 692 sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index) 693 { 694 sfc_log_init(sa, "sw_index=%u", sw_index); 695 696 /* Nothing to cleanup */ 697 } 698 699 int 700 sfc_ev_init(struct sfc_adapter *sa) 701 { 702 int rc; 703 unsigned int sw_index; 704 705 sfc_log_init(sa, "entry"); 706 707 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 708 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, 709 sfc_kvarg_perf_profile_handler, 710 &sa->evq_flags); 711 if (rc != 0) { 712 sfc_err(sa, "invalid %s parameter value", 713 SFC_KVARG_PERF_PROFILE); 714 goto fail_kvarg_perf_profile; 715 } 716 717 sa->evq_count = sfc_ev_qcount(sa); 718 sa->mgmt_evq_index = 0; 719 rte_spinlock_init(&sa->mgmt_evq_lock); 720 721 /* Allocate EVQ info array */ 722 rc = ENOMEM; 723 sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count, 724 sizeof(struct sfc_evq_info), 0, 725 sa->socket_id); 726 if (sa->evq_info == NULL) 727 goto fail_evqs_alloc; 728 729 for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) { 730 rc = sfc_ev_qinit_info(sa, sw_index); 731 if (rc != 0) 732 goto fail_ev_qinit_info; 733 } 734 735 rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES, 736 sa->socket_id); 737 if (rc != 0) 738 goto fail_mgmt_evq_init; 739 740 /* 741 * Rx/Tx event queues are created/destroyed when corresponding 742 * Rx/Tx queue is created/destroyed. 743 */ 744 745 return 0; 746 747 fail_mgmt_evq_init: 748 fail_ev_qinit_info: 749 while (sw_index-- > 0) 750 sfc_ev_qfini_info(sa, sw_index); 751 752 rte_free(sa->evq_info); 753 sa->evq_info = NULL; 754 755 fail_evqs_alloc: 756 sa->evq_count = 0; 757 758 fail_kvarg_perf_profile: 759 sfc_log_init(sa, "failed %d", rc); 760 return rc; 761 } 762 763 void 764 sfc_ev_fini(struct sfc_adapter *sa) 765 { 766 int sw_index; 767 768 sfc_log_init(sa, "entry"); 769 770 /* Cleanup all event queues */ 771 sw_index = sa->evq_count; 772 while (--sw_index >= 0) { 773 if (sa->evq_info[sw_index].evq != NULL) 774 sfc_ev_qfini(sa, sw_index); 775 sfc_ev_qfini_info(sa, sw_index); 776 } 777 778 rte_free(sa->evq_info); 779 sa->evq_info = NULL; 780 sa->evq_count = 0; 781 } 782