1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016-2017 Solarflare Communications Inc. 5 * All rights reserved. 6 * 7 * This software was jointly developed between OKTET Labs (under contract 8 * for Solarflare) and Solarflare Communications, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <rte_debug.h> 33 #include <rte_cycles.h> 34 #include <rte_alarm.h> 35 #include <rte_branch_prediction.h> 36 37 #include "efx.h" 38 39 #include "sfc.h" 40 #include "sfc_debug.h" 41 #include "sfc_log.h" 42 #include "sfc_ev.h" 43 #include "sfc_rx.h" 44 #include "sfc_tx.h" 45 #include "sfc_kvargs.h" 46 47 48 /* Initial delay when waiting for event queue init complete event */ 49 #define SFC_EVQ_INIT_BACKOFF_START_US (1) 50 /* Maximum delay between event queue polling attempts */ 51 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000) 52 /* Event queue init approx timeout */ 53 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S) 54 55 /* Management event queue polling period in microseconds */ 56 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) 57 58 static const char * 59 sfc_evq_type2str(enum sfc_evq_type type) 60 { 61 switch (type) { 62 case SFC_EVQ_TYPE_MGMT: 63 return "mgmt-evq"; 64 case SFC_EVQ_TYPE_RX: 65 return "rx-evq"; 66 case SFC_EVQ_TYPE_TX: 67 return "tx-evq"; 68 default: 69 SFC_ASSERT(B_FALSE); 70 return NULL; 71 } 72 } 73 74 static boolean_t 75 sfc_ev_initialized(void *arg) 76 { 77 struct sfc_evq *evq = arg; 78 79 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ 80 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || 81 evq->init_state == SFC_EVQ_STARTED); 82 83 evq->init_state = SFC_EVQ_STARTED; 84 85 return B_FALSE; 86 } 87 88 static boolean_t 89 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id, 90 uint32_t size, uint16_t flags) 91 { 92 struct sfc_evq *evq = arg; 93 94 sfc_err(evq->sa, 95 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x", 96 evq->evq_index, label, id, size, flags); 97 return B_TRUE; 98 } 99 100 static boolean_t 101 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 102 uint32_t size, uint16_t flags) 103 { 104 struct sfc_evq *evq = arg; 105 struct sfc_efx_rxq *rxq; 106 unsigned int stop; 107 unsigned int pending_id; 108 unsigned int delta; 109 unsigned int i; 110 struct sfc_efx_rx_sw_desc *rxd; 111 112 if (unlikely(evq->exception)) 113 goto done; 114 115 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq); 116 117 SFC_ASSERT(rxq != NULL); 118 SFC_ASSERT(rxq->evq == evq); 119 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED); 120 121 stop = (id + 1) & rxq->ptr_mask; 122 pending_id = rxq->pending & rxq->ptr_mask; 123 delta = (stop >= pending_id) ? (stop - pending_id) : 124 (rxq->ptr_mask + 1 - pending_id + stop); 125 126 if (delta == 0) { 127 /* 128 * Rx event with no new descriptors done and zero length 129 * is used to abort scattered packet when there is no room 130 * for the tail. 131 */ 132 if (unlikely(size != 0)) { 133 evq->exception = B_TRUE; 134 sfc_err(evq->sa, 135 "EVQ %u RxQ %u invalid RX abort " 136 "(id=%#x size=%u flags=%#x); needs restart", 137 evq->evq_index, rxq->dp.dpq.queue_id, 138 id, size, flags); 139 goto done; 140 } 141 142 /* Add discard flag to the first fragment */ 143 rxq->sw_desc[pending_id].flags |= EFX_DISCARD; 144 /* Remove continue flag from the last fragment */ 145 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; 146 } else if (unlikely(delta > rxq->batch_max)) { 147 evq->exception = B_TRUE; 148 149 sfc_err(evq->sa, 150 "EVQ %u RxQ %u completion out of order " 151 "(id=%#x delta=%u flags=%#x); needs restart", 152 evq->evq_index, rxq->dp.dpq.queue_id, 153 id, delta, flags); 154 155 goto done; 156 } 157 158 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { 159 rxd = &rxq->sw_desc[i]; 160 161 rxd->flags = flags; 162 163 SFC_ASSERT(size < (1 << 16)); 164 rxd->size = (uint16_t)size; 165 } 166 167 rxq->pending += delta; 168 169 done: 170 return B_FALSE; 171 } 172 173 static boolean_t 174 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 175 __rte_unused uint32_t size, __rte_unused uint16_t flags) 176 { 177 struct sfc_evq *evq = arg; 178 struct sfc_dp_rxq *dp_rxq; 179 180 dp_rxq = evq->dp_rxq; 181 SFC_ASSERT(dp_rxq != NULL); 182 183 SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL); 184 return evq->sa->dp_rx->qrx_ev(dp_rxq, id); 185 } 186 187 static boolean_t 188 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) 189 { 190 struct sfc_evq *evq = arg; 191 192 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x", 193 evq->evq_index, label, id); 194 return B_TRUE; 195 } 196 197 static boolean_t 198 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 199 { 200 struct sfc_evq *evq = arg; 201 struct sfc_dp_txq *dp_txq; 202 struct sfc_efx_txq *txq; 203 unsigned int stop; 204 unsigned int delta; 205 206 dp_txq = evq->dp_txq; 207 SFC_ASSERT(dp_txq != NULL); 208 209 txq = sfc_efx_txq_by_dp_txq(dp_txq); 210 SFC_ASSERT(txq->evq == evq); 211 212 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0)) 213 goto done; 214 215 stop = (id + 1) & txq->ptr_mask; 216 id = txq->pending & txq->ptr_mask; 217 218 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); 219 220 txq->pending += delta; 221 222 done: 223 return B_FALSE; 224 } 225 226 static boolean_t 227 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 228 { 229 struct sfc_evq *evq = arg; 230 struct sfc_dp_txq *dp_txq; 231 232 dp_txq = evq->dp_txq; 233 SFC_ASSERT(dp_txq != NULL); 234 235 SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL); 236 return evq->sa->dp_tx->qtx_ev(dp_txq, id); 237 } 238 239 static boolean_t 240 sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) 241 { 242 struct sfc_evq *evq = arg; 243 244 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) 245 return B_FALSE; 246 247 evq->exception = B_TRUE; 248 sfc_warn(evq->sa, 249 "hardware exception %s (code=%u, data=%#x) on EVQ %u;" 250 " needs recovery", 251 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : 252 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : 253 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : 254 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : 255 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : 256 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : 257 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : 258 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : 259 "UNKNOWN", 260 code, data, evq->evq_index); 261 262 return B_TRUE; 263 } 264 265 static boolean_t 266 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index) 267 { 268 struct sfc_evq *evq = arg; 269 270 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done", 271 evq->evq_index, rxq_hw_index); 272 return B_TRUE; 273 } 274 275 static boolean_t 276 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index) 277 { 278 struct sfc_evq *evq = arg; 279 struct sfc_dp_rxq *dp_rxq; 280 struct sfc_rxq *rxq; 281 282 dp_rxq = evq->dp_rxq; 283 SFC_ASSERT(dp_rxq != NULL); 284 285 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 286 SFC_ASSERT(rxq != NULL); 287 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 288 SFC_ASSERT(rxq->evq == evq); 289 sfc_rx_qflush_done(rxq); 290 291 return B_FALSE; 292 } 293 294 static boolean_t 295 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index) 296 { 297 struct sfc_evq *evq = arg; 298 299 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed", 300 evq->evq_index, rxq_hw_index); 301 return B_TRUE; 302 } 303 304 static boolean_t 305 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) 306 { 307 struct sfc_evq *evq = arg; 308 struct sfc_dp_rxq *dp_rxq; 309 struct sfc_rxq *rxq; 310 311 dp_rxq = evq->dp_rxq; 312 SFC_ASSERT(dp_rxq != NULL); 313 314 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 315 SFC_ASSERT(rxq != NULL); 316 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 317 SFC_ASSERT(rxq->evq == evq); 318 sfc_rx_qflush_failed(rxq); 319 320 return B_FALSE; 321 } 322 323 static boolean_t 324 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index) 325 { 326 struct sfc_evq *evq = arg; 327 328 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done", 329 evq->evq_index, txq_hw_index); 330 return B_TRUE; 331 } 332 333 static boolean_t 334 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) 335 { 336 struct sfc_evq *evq = arg; 337 struct sfc_dp_txq *dp_txq; 338 struct sfc_txq *txq; 339 340 dp_txq = evq->dp_txq; 341 SFC_ASSERT(dp_txq != NULL); 342 343 txq = sfc_txq_by_dp_txq(dp_txq); 344 SFC_ASSERT(txq != NULL); 345 SFC_ASSERT(txq->hw_index == txq_hw_index); 346 SFC_ASSERT(txq->evq == evq); 347 sfc_tx_qflush_done(txq); 348 349 return B_FALSE; 350 } 351 352 static boolean_t 353 sfc_ev_software(void *arg, uint16_t magic) 354 { 355 struct sfc_evq *evq = arg; 356 357 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x", 358 evq->evq_index, magic); 359 return B_TRUE; 360 } 361 362 static boolean_t 363 sfc_ev_sram(void *arg, uint32_t code) 364 { 365 struct sfc_evq *evq = arg; 366 367 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u", 368 evq->evq_index, code); 369 return B_TRUE; 370 } 371 372 static boolean_t 373 sfc_ev_wake_up(void *arg, uint32_t index) 374 { 375 struct sfc_evq *evq = arg; 376 377 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u", 378 evq->evq_index, index); 379 return B_TRUE; 380 } 381 382 static boolean_t 383 sfc_ev_timer(void *arg, uint32_t index) 384 { 385 struct sfc_evq *evq = arg; 386 387 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u", 388 evq->evq_index, index); 389 return B_TRUE; 390 } 391 392 static boolean_t 393 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode) 394 { 395 struct sfc_evq *evq = arg; 396 397 sfc_err(evq->sa, "EVQ %u unexpected link change event", 398 evq->evq_index); 399 return B_TRUE; 400 } 401 402 static boolean_t 403 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) 404 { 405 struct sfc_evq *evq = arg; 406 struct sfc_adapter *sa = evq->sa; 407 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link; 408 struct rte_eth_link new_link; 409 uint64_t new_link_u64; 410 uint64_t old_link_u64; 411 412 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 413 414 sfc_port_link_mode_to_info(link_mode, &new_link); 415 416 new_link_u64 = *(uint64_t *)&new_link; 417 do { 418 old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link); 419 if (old_link_u64 == new_link_u64) 420 break; 421 422 if (rte_atomic64_cmpset((volatile uint64_t *)dev_link, 423 old_link_u64, new_link_u64)) { 424 evq->sa->port.lsc_seq++; 425 break; 426 } 427 } while (B_TRUE); 428 429 return B_FALSE; 430 } 431 432 static const efx_ev_callbacks_t sfc_ev_callbacks = { 433 .eec_initialized = sfc_ev_initialized, 434 .eec_rx = sfc_ev_nop_rx, 435 .eec_tx = sfc_ev_nop_tx, 436 .eec_exception = sfc_ev_exception, 437 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 438 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 439 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 440 .eec_software = sfc_ev_software, 441 .eec_sram = sfc_ev_sram, 442 .eec_wake_up = sfc_ev_wake_up, 443 .eec_timer = sfc_ev_timer, 444 .eec_link_change = sfc_ev_link_change, 445 }; 446 447 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { 448 .eec_initialized = sfc_ev_initialized, 449 .eec_rx = sfc_ev_efx_rx, 450 .eec_tx = sfc_ev_nop_tx, 451 .eec_exception = sfc_ev_exception, 452 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 453 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 454 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 455 .eec_software = sfc_ev_software, 456 .eec_sram = sfc_ev_sram, 457 .eec_wake_up = sfc_ev_wake_up, 458 .eec_timer = sfc_ev_timer, 459 .eec_link_change = sfc_ev_nop_link_change, 460 }; 461 462 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { 463 .eec_initialized = sfc_ev_initialized, 464 .eec_rx = sfc_ev_dp_rx, 465 .eec_tx = sfc_ev_nop_tx, 466 .eec_exception = sfc_ev_exception, 467 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 468 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 469 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 470 .eec_software = sfc_ev_software, 471 .eec_sram = sfc_ev_sram, 472 .eec_wake_up = sfc_ev_wake_up, 473 .eec_timer = sfc_ev_timer, 474 .eec_link_change = sfc_ev_nop_link_change, 475 }; 476 477 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { 478 .eec_initialized = sfc_ev_initialized, 479 .eec_rx = sfc_ev_nop_rx, 480 .eec_tx = sfc_ev_tx, 481 .eec_exception = sfc_ev_exception, 482 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 483 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 484 .eec_txq_flush_done = sfc_ev_txq_flush_done, 485 .eec_software = sfc_ev_software, 486 .eec_sram = sfc_ev_sram, 487 .eec_wake_up = sfc_ev_wake_up, 488 .eec_timer = sfc_ev_timer, 489 .eec_link_change = sfc_ev_nop_link_change, 490 }; 491 492 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { 493 .eec_initialized = sfc_ev_initialized, 494 .eec_rx = sfc_ev_nop_rx, 495 .eec_tx = sfc_ev_dp_tx, 496 .eec_exception = sfc_ev_exception, 497 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 498 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 499 .eec_txq_flush_done = sfc_ev_txq_flush_done, 500 .eec_software = sfc_ev_software, 501 .eec_sram = sfc_ev_sram, 502 .eec_wake_up = sfc_ev_wake_up, 503 .eec_timer = sfc_ev_timer, 504 .eec_link_change = sfc_ev_nop_link_change, 505 }; 506 507 508 void 509 sfc_ev_qpoll(struct sfc_evq *evq) 510 { 511 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || 512 evq->init_state == SFC_EVQ_STARTING); 513 514 /* Synchronize the DMA memory for reading not required */ 515 516 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); 517 518 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { 519 struct sfc_adapter *sa = evq->sa; 520 int rc; 521 522 if (evq->dp_rxq != NULL) { 523 unsigned int rxq_sw_index; 524 525 rxq_sw_index = evq->dp_rxq->dpq.queue_id; 526 527 sfc_warn(sa, 528 "restart RxQ %u because of exception on its EvQ %u", 529 rxq_sw_index, evq->evq_index); 530 531 sfc_rx_qstop(sa, rxq_sw_index); 532 rc = sfc_rx_qstart(sa, rxq_sw_index); 533 if (rc != 0) 534 sfc_err(sa, "cannot restart RxQ %u", 535 rxq_sw_index); 536 } 537 538 if (evq->dp_txq != NULL) { 539 unsigned int txq_sw_index; 540 541 txq_sw_index = evq->dp_txq->dpq.queue_id; 542 543 sfc_warn(sa, 544 "restart TxQ %u because of exception on its EvQ %u", 545 txq_sw_index, evq->evq_index); 546 547 sfc_tx_qstop(sa, txq_sw_index); 548 rc = sfc_tx_qstart(sa, txq_sw_index); 549 if (rc != 0) 550 sfc_err(sa, "cannot restart TxQ %u", 551 txq_sw_index); 552 } 553 554 if (evq->exception) 555 sfc_panic(sa, "unrecoverable exception on EvQ %u", 556 evq->evq_index); 557 558 sfc_adapter_unlock(sa); 559 } 560 561 /* Poll-mode driver does not re-prime the event queue for interrupts */ 562 } 563 564 void 565 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) 566 { 567 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { 568 if (sa->mgmt_evq_running) 569 sfc_ev_qpoll(sa->mgmt_evq); 570 571 rte_spinlock_unlock(&sa->mgmt_evq_lock); 572 } 573 } 574 575 int 576 sfc_ev_qprime(struct sfc_evq *evq) 577 { 578 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); 579 return efx_ev_qprime(evq->common, evq->read_ptr); 580 } 581 582 /* Event queue HW index allocation scheme is described in sfc_ev.h. */ 583 int 584 sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) 585 { 586 struct sfc_adapter *sa = evq->sa; 587 efsys_mem_t *esmp; 588 uint32_t evq_flags = sa->evq_flags; 589 unsigned int total_delay_us; 590 unsigned int delay_us; 591 int rc; 592 593 sfc_log_init(sa, "hw_index=%u", hw_index); 594 595 esmp = &evq->mem; 596 597 evq->evq_index = hw_index; 598 599 /* Clear all events */ 600 (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); 601 602 if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) 603 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 604 else 605 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; 606 607 /* Create the common code event queue */ 608 rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, 609 0 /* unused on EF10 */, 0, evq_flags, 610 &evq->common); 611 if (rc != 0) 612 goto fail_ev_qcreate; 613 614 SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); 615 if (evq->dp_rxq != 0) { 616 if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) 617 evq->callbacks = &sfc_ev_callbacks_efx_rx; 618 else 619 evq->callbacks = &sfc_ev_callbacks_dp_rx; 620 } else if (evq->dp_txq != 0) { 621 if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) 622 evq->callbacks = &sfc_ev_callbacks_efx_tx; 623 else 624 evq->callbacks = &sfc_ev_callbacks_dp_tx; 625 } else { 626 evq->callbacks = &sfc_ev_callbacks; 627 } 628 629 evq->init_state = SFC_EVQ_STARTING; 630 631 /* Wait for the initialization event */ 632 total_delay_us = 0; 633 delay_us = SFC_EVQ_INIT_BACKOFF_START_US; 634 do { 635 (void)sfc_ev_qpoll(evq); 636 637 /* Check to see if the initialization complete indication 638 * posted by the hardware. 639 */ 640 if (evq->init_state == SFC_EVQ_STARTED) 641 goto done; 642 643 /* Give event queue some time to init */ 644 rte_delay_us(delay_us); 645 646 total_delay_us += delay_us; 647 648 /* Exponential backoff */ 649 delay_us *= 2; 650 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) 651 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; 652 653 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); 654 655 rc = ETIMEDOUT; 656 goto fail_timedout; 657 658 done: 659 return 0; 660 661 fail_timedout: 662 evq->init_state = SFC_EVQ_INITIALIZED; 663 efx_ev_qdestroy(evq->common); 664 665 fail_ev_qcreate: 666 sfc_log_init(sa, "failed %d", rc); 667 return rc; 668 } 669 670 void 671 sfc_ev_qstop(struct sfc_evq *evq) 672 { 673 if (evq == NULL) 674 return; 675 676 sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); 677 678 if (evq->init_state != SFC_EVQ_STARTED) 679 return; 680 681 evq->init_state = SFC_EVQ_INITIALIZED; 682 evq->callbacks = NULL; 683 evq->read_ptr = 0; 684 evq->exception = B_FALSE; 685 686 efx_ev_qdestroy(evq->common); 687 688 evq->evq_index = 0; 689 } 690 691 static void 692 sfc_ev_mgmt_periodic_qpoll(void *arg) 693 { 694 struct sfc_adapter *sa = arg; 695 int rc; 696 697 sfc_ev_mgmt_qpoll(sa); 698 699 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, 700 sfc_ev_mgmt_periodic_qpoll, sa); 701 if (rc == -ENOTSUP) { 702 sfc_warn(sa, "alarms are not supported"); 703 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update"); 704 } else if (rc != 0) { 705 sfc_err(sa, 706 "cannot rearm management EVQ polling alarm (rc=%d)", 707 rc); 708 } 709 } 710 711 static void 712 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa) 713 { 714 sfc_ev_mgmt_periodic_qpoll(sa); 715 } 716 717 static void 718 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa) 719 { 720 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa); 721 } 722 723 int 724 sfc_ev_start(struct sfc_adapter *sa) 725 { 726 int rc; 727 728 sfc_log_init(sa, "entry"); 729 730 rc = efx_ev_init(sa->nic); 731 if (rc != 0) 732 goto fail_ev_init; 733 734 /* Start management EVQ used for global events */ 735 736 /* 737 * Management event queue start polls the queue, but it cannot 738 * interfere with other polling contexts since mgmt_evq_running 739 * is false yet. 740 */ 741 rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); 742 if (rc != 0) 743 goto fail_mgmt_evq_start; 744 745 rte_spinlock_lock(&sa->mgmt_evq_lock); 746 sa->mgmt_evq_running = true; 747 rte_spinlock_unlock(&sa->mgmt_evq_lock); 748 749 if (sa->intr.lsc_intr) { 750 rc = sfc_ev_qprime(sa->mgmt_evq); 751 if (rc != 0) 752 goto fail_mgmt_evq_prime; 753 } 754 755 /* 756 * Start management EVQ polling. If interrupts are disabled 757 * (not used), it is required to process link status change 758 * and other device level events to avoid unrecoverable 759 * error because the event queue overflow. 760 */ 761 sfc_ev_mgmt_periodic_qpoll_start(sa); 762 763 /* 764 * Rx/Tx event queues are started/stopped when corresponding 765 * Rx/Tx queue is started/stopped. 766 */ 767 768 return 0; 769 770 fail_mgmt_evq_prime: 771 sfc_ev_qstop(sa->mgmt_evq); 772 773 fail_mgmt_evq_start: 774 efx_ev_fini(sa->nic); 775 776 fail_ev_init: 777 sfc_log_init(sa, "failed %d", rc); 778 return rc; 779 } 780 781 void 782 sfc_ev_stop(struct sfc_adapter *sa) 783 { 784 sfc_log_init(sa, "entry"); 785 786 sfc_ev_mgmt_periodic_qpoll_stop(sa); 787 788 rte_spinlock_lock(&sa->mgmt_evq_lock); 789 sa->mgmt_evq_running = false; 790 rte_spinlock_unlock(&sa->mgmt_evq_lock); 791 792 sfc_ev_qstop(sa->mgmt_evq); 793 794 efx_ev_fini(sa->nic); 795 } 796 797 int 798 sfc_ev_qinit(struct sfc_adapter *sa, 799 enum sfc_evq_type type, unsigned int type_index, 800 unsigned int entries, int socket_id, struct sfc_evq **evqp) 801 { 802 struct sfc_evq *evq; 803 int rc; 804 805 sfc_log_init(sa, "type=%s type_index=%u", 806 sfc_evq_type2str(type), type_index); 807 808 SFC_ASSERT(rte_is_power_of_2(entries)); 809 810 rc = ENOMEM; 811 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, 812 socket_id); 813 if (evq == NULL) 814 goto fail_evq_alloc; 815 816 evq->sa = sa; 817 evq->type = type; 818 evq->entries = entries; 819 820 /* Allocate DMA space */ 821 rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, 822 EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); 823 if (rc != 0) 824 goto fail_dma_alloc; 825 826 evq->init_state = SFC_EVQ_INITIALIZED; 827 828 sa->evq_count++; 829 830 *evqp = evq; 831 832 return 0; 833 834 fail_dma_alloc: 835 rte_free(evq); 836 837 fail_evq_alloc: 838 839 sfc_log_init(sa, "failed %d", rc); 840 return rc; 841 } 842 843 void 844 sfc_ev_qfini(struct sfc_evq *evq) 845 { 846 struct sfc_adapter *sa = evq->sa; 847 848 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); 849 850 sfc_dma_free(sa, &evq->mem); 851 852 rte_free(evq); 853 854 SFC_ASSERT(sa->evq_count > 0); 855 sa->evq_count--; 856 } 857 858 static int 859 sfc_kvarg_perf_profile_handler(__rte_unused const char *key, 860 const char *value_str, void *opaque) 861 { 862 uint64_t *value = opaque; 863 864 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) 865 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 866 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0) 867 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY; 868 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0) 869 *value = EFX_EVQ_FLAGS_TYPE_AUTO; 870 else 871 return -EINVAL; 872 873 return 0; 874 } 875 876 int 877 sfc_ev_attach(struct sfc_adapter *sa) 878 { 879 int rc; 880 881 sfc_log_init(sa, "entry"); 882 883 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 884 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, 885 sfc_kvarg_perf_profile_handler, 886 &sa->evq_flags); 887 if (rc != 0) { 888 sfc_err(sa, "invalid %s parameter value", 889 SFC_KVARG_PERF_PROFILE); 890 goto fail_kvarg_perf_profile; 891 } 892 893 sa->mgmt_evq_index = 0; 894 rte_spinlock_init(&sa->mgmt_evq_lock); 895 896 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES, 897 sa->socket_id, &sa->mgmt_evq); 898 if (rc != 0) 899 goto fail_mgmt_evq_init; 900 901 /* 902 * Rx/Tx event queues are created/destroyed when corresponding 903 * Rx/Tx queue is created/destroyed. 904 */ 905 906 return 0; 907 908 fail_mgmt_evq_init: 909 910 fail_kvarg_perf_profile: 911 sfc_log_init(sa, "failed %d", rc); 912 return rc; 913 } 914 915 void 916 sfc_ev_detach(struct sfc_adapter *sa) 917 { 918 sfc_log_init(sa, "entry"); 919 920 sfc_ev_qfini(sa->mgmt_evq); 921 922 if (sa->evq_count != 0) 923 sfc_err(sa, "%u EvQs are not destroyed before detach", 924 sa->evq_count); 925 } 926