1 /*- 2 * BSD LICENSE 3 * 4 * Copyright (c) 2016-2017 Solarflare Communications Inc. 5 * All rights reserved. 6 * 7 * This software was jointly developed between OKTET Labs (under contract 8 * for Solarflare) and Solarflare Communications, Inc. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright notice, 14 * this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright notice, 16 * this list of conditions and the following disclaimer in the documentation 17 * and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, 21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR 23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; 26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, 27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR 28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, 29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <rte_debug.h> 33 #include <rte_cycles.h> 34 #include <rte_alarm.h> 35 #include <rte_branch_prediction.h> 36 37 #include "efx.h" 38 39 #include "sfc.h" 40 #include "sfc_debug.h" 41 #include "sfc_log.h" 42 #include "sfc_ev.h" 43 #include "sfc_rx.h" 44 #include "sfc_tx.h" 45 #include "sfc_kvargs.h" 46 47 48 /* Initial delay when waiting for event queue init complete event */ 49 #define SFC_EVQ_INIT_BACKOFF_START_US (1) 50 /* Maximum delay between event queue polling attempts */ 51 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000) 52 /* Event queue init approx timeout */ 53 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S) 54 55 /* Management event queue polling period in microseconds */ 56 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) 57 58 static const char * 59 sfc_evq_type2str(enum sfc_evq_type type) 60 { 61 switch (type) { 62 case SFC_EVQ_TYPE_MGMT: 63 return "mgmt-evq"; 64 case SFC_EVQ_TYPE_RX: 65 return "rx-evq"; 66 case SFC_EVQ_TYPE_TX: 67 return "tx-evq"; 68 default: 69 SFC_ASSERT(B_FALSE); 70 return NULL; 71 } 72 } 73 74 static boolean_t 75 sfc_ev_initialized(void *arg) 76 { 77 struct sfc_evq *evq = arg; 78 79 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ 80 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || 81 evq->init_state == SFC_EVQ_STARTED); 82 83 evq->init_state = SFC_EVQ_STARTED; 84 85 return B_FALSE; 86 } 87 88 static boolean_t 89 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id, 90 uint32_t size, uint16_t flags) 91 { 92 struct sfc_evq *evq = arg; 93 94 sfc_err(evq->sa, 95 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x", 96 evq->evq_index, label, id, size, flags); 97 return B_TRUE; 98 } 99 100 static boolean_t 101 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 102 uint32_t size, uint16_t flags) 103 { 104 struct sfc_evq *evq = arg; 105 struct sfc_efx_rxq *rxq; 106 unsigned int stop; 107 unsigned int pending_id; 108 unsigned int delta; 109 unsigned int i; 110 struct sfc_efx_rx_sw_desc *rxd; 111 112 if (unlikely(evq->exception)) 113 goto done; 114 115 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq); 116 117 SFC_ASSERT(rxq != NULL); 118 SFC_ASSERT(rxq->evq == evq); 119 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED); 120 121 stop = (id + 1) & rxq->ptr_mask; 122 pending_id = rxq->pending & rxq->ptr_mask; 123 delta = (stop >= pending_id) ? (stop - pending_id) : 124 (rxq->ptr_mask + 1 - pending_id + stop); 125 126 if (delta == 0) { 127 /* 128 * Rx event with no new descriptors done and zero length 129 * is used to abort scattered packet when there is no room 130 * for the tail. 131 */ 132 if (unlikely(size != 0)) { 133 evq->exception = B_TRUE; 134 sfc_err(evq->sa, 135 "EVQ %u RxQ %u invalid RX abort " 136 "(id=%#x size=%u flags=%#x); needs restart", 137 evq->evq_index, rxq->dp.dpq.queue_id, 138 id, size, flags); 139 goto done; 140 } 141 142 /* Add discard flag to the first fragment */ 143 rxq->sw_desc[pending_id].flags |= EFX_DISCARD; 144 /* Remove continue flag from the last fragment */ 145 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; 146 } else if (unlikely(delta > rxq->batch_max)) { 147 evq->exception = B_TRUE; 148 149 sfc_err(evq->sa, 150 "EVQ %u RxQ %u completion out of order " 151 "(id=%#x delta=%u flags=%#x); needs restart", 152 evq->evq_index, rxq->dp.dpq.queue_id, 153 id, delta, flags); 154 155 goto done; 156 } 157 158 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { 159 rxd = &rxq->sw_desc[i]; 160 161 rxd->flags = flags; 162 163 SFC_ASSERT(size < (1 << 16)); 164 rxd->size = (uint16_t)size; 165 } 166 167 rxq->pending += delta; 168 169 done: 170 return B_FALSE; 171 } 172 173 static boolean_t 174 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 175 __rte_unused uint32_t size, __rte_unused uint16_t flags) 176 { 177 struct sfc_evq *evq = arg; 178 struct sfc_dp_rxq *dp_rxq; 179 180 dp_rxq = evq->dp_rxq; 181 SFC_ASSERT(dp_rxq != NULL); 182 183 SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL); 184 return evq->sa->dp_rx->qrx_ev(dp_rxq, id); 185 } 186 187 static boolean_t 188 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) 189 { 190 struct sfc_evq *evq = arg; 191 192 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x", 193 evq->evq_index, label, id); 194 return B_TRUE; 195 } 196 197 static boolean_t 198 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 199 { 200 struct sfc_evq *evq = arg; 201 struct sfc_dp_txq *dp_txq; 202 struct sfc_efx_txq *txq; 203 unsigned int stop; 204 unsigned int delta; 205 206 dp_txq = evq->dp_txq; 207 SFC_ASSERT(dp_txq != NULL); 208 209 txq = sfc_efx_txq_by_dp_txq(dp_txq); 210 SFC_ASSERT(txq->evq == evq); 211 212 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0)) 213 goto done; 214 215 stop = (id + 1) & txq->ptr_mask; 216 id = txq->pending & txq->ptr_mask; 217 218 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); 219 220 txq->pending += delta; 221 222 done: 223 return B_FALSE; 224 } 225 226 static boolean_t 227 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 228 { 229 struct sfc_evq *evq = arg; 230 struct sfc_dp_txq *dp_txq; 231 232 dp_txq = evq->dp_txq; 233 SFC_ASSERT(dp_txq != NULL); 234 235 SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL); 236 return evq->sa->dp_tx->qtx_ev(dp_txq, id); 237 } 238 239 static boolean_t 240 sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) 241 { 242 struct sfc_evq *evq = arg; 243 244 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) 245 return B_FALSE; 246 247 evq->exception = B_TRUE; 248 sfc_warn(evq->sa, 249 "hardware exception %s (code=%u, data=%#x) on EVQ %u;" 250 " needs recovery", 251 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : 252 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : 253 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : 254 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : 255 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : 256 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : 257 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : 258 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : 259 "UNKNOWN", 260 code, data, evq->evq_index); 261 262 return B_TRUE; 263 } 264 265 static boolean_t 266 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index) 267 { 268 struct sfc_evq *evq = arg; 269 270 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done", 271 evq->evq_index, rxq_hw_index); 272 return B_TRUE; 273 } 274 275 static boolean_t 276 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index) 277 { 278 struct sfc_evq *evq = arg; 279 struct sfc_dp_rxq *dp_rxq; 280 struct sfc_rxq *rxq; 281 282 dp_rxq = evq->dp_rxq; 283 SFC_ASSERT(dp_rxq != NULL); 284 285 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 286 SFC_ASSERT(rxq != NULL); 287 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 288 SFC_ASSERT(rxq->evq == evq); 289 sfc_rx_qflush_done(rxq); 290 291 return B_FALSE; 292 } 293 294 static boolean_t 295 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index) 296 { 297 struct sfc_evq *evq = arg; 298 299 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed", 300 evq->evq_index, rxq_hw_index); 301 return B_TRUE; 302 } 303 304 static boolean_t 305 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) 306 { 307 struct sfc_evq *evq = arg; 308 struct sfc_dp_rxq *dp_rxq; 309 struct sfc_rxq *rxq; 310 311 dp_rxq = evq->dp_rxq; 312 SFC_ASSERT(dp_rxq != NULL); 313 314 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 315 SFC_ASSERT(rxq != NULL); 316 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 317 SFC_ASSERT(rxq->evq == evq); 318 sfc_rx_qflush_failed(rxq); 319 320 return B_FALSE; 321 } 322 323 static boolean_t 324 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index) 325 { 326 struct sfc_evq *evq = arg; 327 328 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done", 329 evq->evq_index, txq_hw_index); 330 return B_TRUE; 331 } 332 333 static boolean_t 334 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) 335 { 336 struct sfc_evq *evq = arg; 337 struct sfc_dp_txq *dp_txq; 338 struct sfc_txq *txq; 339 340 dp_txq = evq->dp_txq; 341 SFC_ASSERT(dp_txq != NULL); 342 343 txq = sfc_txq_by_dp_txq(dp_txq); 344 SFC_ASSERT(txq != NULL); 345 SFC_ASSERT(txq->hw_index == txq_hw_index); 346 SFC_ASSERT(txq->evq == evq); 347 sfc_tx_qflush_done(txq); 348 349 return B_FALSE; 350 } 351 352 static boolean_t 353 sfc_ev_software(void *arg, uint16_t magic) 354 { 355 struct sfc_evq *evq = arg; 356 357 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x", 358 evq->evq_index, magic); 359 return B_TRUE; 360 } 361 362 static boolean_t 363 sfc_ev_sram(void *arg, uint32_t code) 364 { 365 struct sfc_evq *evq = arg; 366 367 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u", 368 evq->evq_index, code); 369 return B_TRUE; 370 } 371 372 static boolean_t 373 sfc_ev_wake_up(void *arg, uint32_t index) 374 { 375 struct sfc_evq *evq = arg; 376 377 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u", 378 evq->evq_index, index); 379 return B_TRUE; 380 } 381 382 static boolean_t 383 sfc_ev_timer(void *arg, uint32_t index) 384 { 385 struct sfc_evq *evq = arg; 386 387 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u", 388 evq->evq_index, index); 389 return B_TRUE; 390 } 391 392 static boolean_t 393 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode) 394 { 395 struct sfc_evq *evq = arg; 396 397 sfc_err(evq->sa, "EVQ %u unexpected link change event", 398 evq->evq_index); 399 return B_TRUE; 400 } 401 402 static boolean_t 403 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) 404 { 405 struct sfc_evq *evq = arg; 406 struct sfc_adapter *sa = evq->sa; 407 struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link; 408 struct rte_eth_link new_link; 409 uint64_t new_link_u64; 410 uint64_t old_link_u64; 411 412 EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t)); 413 414 sfc_port_link_mode_to_info(link_mode, &new_link); 415 416 new_link_u64 = *(uint64_t *)&new_link; 417 do { 418 old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link); 419 if (old_link_u64 == new_link_u64) 420 break; 421 422 if (rte_atomic64_cmpset((volatile uint64_t *)dev_link, 423 old_link_u64, new_link_u64)) { 424 evq->sa->port.lsc_seq++; 425 break; 426 } 427 } while (B_TRUE); 428 429 return B_FALSE; 430 } 431 432 static const efx_ev_callbacks_t sfc_ev_callbacks = { 433 .eec_initialized = sfc_ev_initialized, 434 .eec_rx = sfc_ev_nop_rx, 435 .eec_tx = sfc_ev_nop_tx, 436 .eec_exception = sfc_ev_exception, 437 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 438 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 439 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 440 .eec_software = sfc_ev_software, 441 .eec_sram = sfc_ev_sram, 442 .eec_wake_up = sfc_ev_wake_up, 443 .eec_timer = sfc_ev_timer, 444 .eec_link_change = sfc_ev_link_change, 445 }; 446 447 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { 448 .eec_initialized = sfc_ev_initialized, 449 .eec_rx = sfc_ev_efx_rx, 450 .eec_tx = sfc_ev_nop_tx, 451 .eec_exception = sfc_ev_exception, 452 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 453 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 454 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 455 .eec_software = sfc_ev_software, 456 .eec_sram = sfc_ev_sram, 457 .eec_wake_up = sfc_ev_wake_up, 458 .eec_timer = sfc_ev_timer, 459 .eec_link_change = sfc_ev_nop_link_change, 460 }; 461 462 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { 463 .eec_initialized = sfc_ev_initialized, 464 .eec_rx = sfc_ev_dp_rx, 465 .eec_tx = sfc_ev_nop_tx, 466 .eec_exception = sfc_ev_exception, 467 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 468 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 469 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 470 .eec_software = sfc_ev_software, 471 .eec_sram = sfc_ev_sram, 472 .eec_wake_up = sfc_ev_wake_up, 473 .eec_timer = sfc_ev_timer, 474 .eec_link_change = sfc_ev_nop_link_change, 475 }; 476 477 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { 478 .eec_initialized = sfc_ev_initialized, 479 .eec_rx = sfc_ev_nop_rx, 480 .eec_tx = sfc_ev_tx, 481 .eec_exception = sfc_ev_exception, 482 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 483 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 484 .eec_txq_flush_done = sfc_ev_txq_flush_done, 485 .eec_software = sfc_ev_software, 486 .eec_sram = sfc_ev_sram, 487 .eec_wake_up = sfc_ev_wake_up, 488 .eec_timer = sfc_ev_timer, 489 .eec_link_change = sfc_ev_nop_link_change, 490 }; 491 492 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { 493 .eec_initialized = sfc_ev_initialized, 494 .eec_rx = sfc_ev_nop_rx, 495 .eec_tx = sfc_ev_dp_tx, 496 .eec_exception = sfc_ev_exception, 497 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 498 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 499 .eec_txq_flush_done = sfc_ev_txq_flush_done, 500 .eec_software = sfc_ev_software, 501 .eec_sram = sfc_ev_sram, 502 .eec_wake_up = sfc_ev_wake_up, 503 .eec_timer = sfc_ev_timer, 504 .eec_link_change = sfc_ev_nop_link_change, 505 }; 506 507 508 void 509 sfc_ev_qpoll(struct sfc_evq *evq) 510 { 511 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || 512 evq->init_state == SFC_EVQ_STARTING); 513 514 /* Synchronize the DMA memory for reading not required */ 515 516 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); 517 518 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { 519 struct sfc_adapter *sa = evq->sa; 520 int rc; 521 522 if (evq->dp_rxq != NULL) { 523 unsigned int rxq_sw_index; 524 525 rxq_sw_index = evq->dp_rxq->dpq.queue_id; 526 527 sfc_warn(sa, 528 "restart RxQ %u because of exception on its EvQ %u", 529 rxq_sw_index, evq->evq_index); 530 531 sfc_rx_qstop(sa, rxq_sw_index); 532 rc = sfc_rx_qstart(sa, rxq_sw_index); 533 if (rc != 0) 534 sfc_err(sa, "cannot restart RxQ %u", 535 rxq_sw_index); 536 } 537 538 if (evq->dp_txq != NULL) { 539 unsigned int txq_sw_index; 540 541 txq_sw_index = evq->dp_txq->dpq.queue_id; 542 543 sfc_warn(sa, 544 "restart TxQ %u because of exception on its EvQ %u", 545 txq_sw_index, evq->evq_index); 546 547 sfc_tx_qstop(sa, txq_sw_index); 548 rc = sfc_tx_qstart(sa, txq_sw_index); 549 if (rc != 0) 550 sfc_err(sa, "cannot restart TxQ %u", 551 txq_sw_index); 552 } 553 554 if (evq->exception) 555 sfc_panic(sa, "unrecoverable exception on EvQ %u", 556 evq->evq_index); 557 558 sfc_adapter_unlock(sa); 559 } 560 561 /* Poll-mode driver does not re-prime the event queue for interrupts */ 562 } 563 564 void 565 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) 566 { 567 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { 568 struct sfc_evq *mgmt_evq = sa->mgmt_evq; 569 570 if (mgmt_evq->init_state == SFC_EVQ_STARTED) 571 sfc_ev_qpoll(mgmt_evq); 572 573 rte_spinlock_unlock(&sa->mgmt_evq_lock); 574 } 575 } 576 577 int 578 sfc_ev_qprime(struct sfc_evq *evq) 579 { 580 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); 581 return efx_ev_qprime(evq->common, evq->read_ptr); 582 } 583 584 /* Event queue HW index allocation scheme is described in sfc_ev.h. */ 585 int 586 sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) 587 { 588 struct sfc_adapter *sa = evq->sa; 589 efsys_mem_t *esmp; 590 uint32_t evq_flags = sa->evq_flags; 591 unsigned int total_delay_us; 592 unsigned int delay_us; 593 int rc; 594 595 sfc_log_init(sa, "hw_index=%u", hw_index); 596 597 esmp = &evq->mem; 598 599 evq->evq_index = hw_index; 600 601 /* Clear all events */ 602 (void)memset((void *)esmp->esm_base, 0xff, EFX_EVQ_SIZE(evq->entries)); 603 604 if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) 605 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 606 else 607 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; 608 609 /* Create the common code event queue */ 610 rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries, 611 0 /* unused on EF10 */, 0, evq_flags, 612 &evq->common); 613 if (rc != 0) 614 goto fail_ev_qcreate; 615 616 SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); 617 if (evq->dp_rxq != 0) { 618 if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) 619 evq->callbacks = &sfc_ev_callbacks_efx_rx; 620 else 621 evq->callbacks = &sfc_ev_callbacks_dp_rx; 622 } else if (evq->dp_txq != 0) { 623 if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0) 624 evq->callbacks = &sfc_ev_callbacks_efx_tx; 625 else 626 evq->callbacks = &sfc_ev_callbacks_dp_tx; 627 } else { 628 evq->callbacks = &sfc_ev_callbacks; 629 } 630 631 evq->init_state = SFC_EVQ_STARTING; 632 633 /* Wait for the initialization event */ 634 total_delay_us = 0; 635 delay_us = SFC_EVQ_INIT_BACKOFF_START_US; 636 do { 637 (void)sfc_ev_qpoll(evq); 638 639 /* Check to see if the initialization complete indication 640 * posted by the hardware. 641 */ 642 if (evq->init_state == SFC_EVQ_STARTED) 643 goto done; 644 645 /* Give event queue some time to init */ 646 rte_delay_us(delay_us); 647 648 total_delay_us += delay_us; 649 650 /* Exponential backoff */ 651 delay_us *= 2; 652 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) 653 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; 654 655 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); 656 657 rc = ETIMEDOUT; 658 goto fail_timedout; 659 660 done: 661 return 0; 662 663 fail_timedout: 664 evq->init_state = SFC_EVQ_INITIALIZED; 665 efx_ev_qdestroy(evq->common); 666 667 fail_ev_qcreate: 668 sfc_log_init(sa, "failed %d", rc); 669 return rc; 670 } 671 672 void 673 sfc_ev_qstop(struct sfc_evq *evq) 674 { 675 if (evq == NULL) 676 return; 677 678 sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); 679 680 if (evq->init_state != SFC_EVQ_STARTED) 681 return; 682 683 evq->init_state = SFC_EVQ_INITIALIZED; 684 evq->callbacks = NULL; 685 evq->read_ptr = 0; 686 evq->exception = B_FALSE; 687 688 efx_ev_qdestroy(evq->common); 689 690 evq->evq_index = 0; 691 } 692 693 static void 694 sfc_ev_mgmt_periodic_qpoll(void *arg) 695 { 696 struct sfc_adapter *sa = arg; 697 int rc; 698 699 sfc_ev_mgmt_qpoll(sa); 700 701 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, 702 sfc_ev_mgmt_periodic_qpoll, sa); 703 if (rc == -ENOTSUP) { 704 sfc_warn(sa, "alarms are not supported"); 705 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update"); 706 } else if (rc != 0) { 707 sfc_err(sa, 708 "cannot rearm management EVQ polling alarm (rc=%d)", 709 rc); 710 } 711 } 712 713 static void 714 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa) 715 { 716 sfc_ev_mgmt_periodic_qpoll(sa); 717 } 718 719 static void 720 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa) 721 { 722 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa); 723 } 724 725 int 726 sfc_ev_start(struct sfc_adapter *sa) 727 { 728 int rc; 729 730 sfc_log_init(sa, "entry"); 731 732 rc = efx_ev_init(sa->nic); 733 if (rc != 0) 734 goto fail_ev_init; 735 736 /* Start management EVQ used for global events */ 737 rte_spinlock_lock(&sa->mgmt_evq_lock); 738 739 rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); 740 if (rc != 0) 741 goto fail_mgmt_evq_start; 742 743 if (sa->intr.lsc_intr) { 744 rc = sfc_ev_qprime(sa->mgmt_evq); 745 if (rc != 0) 746 goto fail_evq0_prime; 747 } 748 749 rte_spinlock_unlock(&sa->mgmt_evq_lock); 750 751 /* 752 * Start management EVQ polling. If interrupts are disabled 753 * (not used), it is required to process link status change 754 * and other device level events to avoid unrecoverable 755 * error because the event queue overflow. 756 */ 757 sfc_ev_mgmt_periodic_qpoll_start(sa); 758 759 /* 760 * Rx/Tx event queues are started/stopped when corresponding 761 * Rx/Tx queue is started/stopped. 762 */ 763 764 return 0; 765 766 fail_evq0_prime: 767 sfc_ev_qstop(sa->mgmt_evq); 768 769 fail_mgmt_evq_start: 770 rte_spinlock_unlock(&sa->mgmt_evq_lock); 771 efx_ev_fini(sa->nic); 772 773 fail_ev_init: 774 sfc_log_init(sa, "failed %d", rc); 775 return rc; 776 } 777 778 void 779 sfc_ev_stop(struct sfc_adapter *sa) 780 { 781 sfc_log_init(sa, "entry"); 782 783 sfc_ev_mgmt_periodic_qpoll_stop(sa); 784 785 rte_spinlock_lock(&sa->mgmt_evq_lock); 786 sfc_ev_qstop(sa->mgmt_evq); 787 rte_spinlock_unlock(&sa->mgmt_evq_lock); 788 789 efx_ev_fini(sa->nic); 790 } 791 792 int 793 sfc_ev_qinit(struct sfc_adapter *sa, 794 enum sfc_evq_type type, unsigned int type_index, 795 unsigned int entries, int socket_id, struct sfc_evq **evqp) 796 { 797 struct sfc_evq *evq; 798 int rc; 799 800 sfc_log_init(sa, "type=%s type_index=%u", 801 sfc_evq_type2str(type), type_index); 802 803 SFC_ASSERT(rte_is_power_of_2(entries)); 804 805 rc = ENOMEM; 806 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, 807 socket_id); 808 if (evq == NULL) 809 goto fail_evq_alloc; 810 811 evq->sa = sa; 812 evq->type = type; 813 evq->entries = entries; 814 815 /* Allocate DMA space */ 816 rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, 817 EFX_EVQ_SIZE(evq->entries), socket_id, &evq->mem); 818 if (rc != 0) 819 goto fail_dma_alloc; 820 821 evq->init_state = SFC_EVQ_INITIALIZED; 822 823 sa->evq_count++; 824 825 *evqp = evq; 826 827 return 0; 828 829 fail_dma_alloc: 830 rte_free(evq); 831 832 fail_evq_alloc: 833 834 sfc_log_init(sa, "failed %d", rc); 835 return rc; 836 } 837 838 void 839 sfc_ev_qfini(struct sfc_evq *evq) 840 { 841 struct sfc_adapter *sa = evq->sa; 842 843 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); 844 845 sfc_dma_free(sa, &evq->mem); 846 847 rte_free(evq); 848 849 SFC_ASSERT(sa->evq_count > 0); 850 sa->evq_count--; 851 } 852 853 static int 854 sfc_kvarg_perf_profile_handler(__rte_unused const char *key, 855 const char *value_str, void *opaque) 856 { 857 uint64_t *value = opaque; 858 859 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) 860 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 861 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0) 862 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY; 863 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0) 864 *value = EFX_EVQ_FLAGS_TYPE_AUTO; 865 else 866 return -EINVAL; 867 868 return 0; 869 } 870 871 int 872 sfc_ev_attach(struct sfc_adapter *sa) 873 { 874 int rc; 875 876 sfc_log_init(sa, "entry"); 877 878 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 879 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, 880 sfc_kvarg_perf_profile_handler, 881 &sa->evq_flags); 882 if (rc != 0) { 883 sfc_err(sa, "invalid %s parameter value", 884 SFC_KVARG_PERF_PROFILE); 885 goto fail_kvarg_perf_profile; 886 } 887 888 sa->mgmt_evq_index = 0; 889 rte_spinlock_init(&sa->mgmt_evq_lock); 890 891 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, SFC_MGMT_EVQ_ENTRIES, 892 sa->socket_id, &sa->mgmt_evq); 893 if (rc != 0) 894 goto fail_mgmt_evq_init; 895 896 /* 897 * Rx/Tx event queues are created/destroyed when corresponding 898 * Rx/Tx queue is created/destroyed. 899 */ 900 901 return 0; 902 903 fail_mgmt_evq_init: 904 905 fail_kvarg_perf_profile: 906 sfc_log_init(sa, "failed %d", rc); 907 return rc; 908 } 909 910 void 911 sfc_ev_detach(struct sfc_adapter *sa) 912 { 913 sfc_log_init(sa, "entry"); 914 915 sfc_ev_qfini(sa->mgmt_evq); 916 917 if (sa->evq_count != 0) 918 sfc_err(sa, "%u EvQs are not destroyed before detach", 919 sa->evq_count); 920 } 921