1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2016-2019 Solarflare Communications Inc. 5 * 6 * This software was jointly developed between OKTET Labs (under contract 7 * for Solarflare) and Solarflare Communications, Inc. 8 */ 9 10 #include <rte_debug.h> 11 #include <rte_cycles.h> 12 #include <rte_alarm.h> 13 #include <rte_branch_prediction.h> 14 15 #include "efx.h" 16 17 #include "sfc.h" 18 #include "sfc_debug.h" 19 #include "sfc_log.h" 20 #include "sfc_ev.h" 21 #include "sfc_rx.h" 22 #include "sfc_tx.h" 23 #include "sfc_kvargs.h" 24 25 26 /* Initial delay when waiting for event queue init complete event */ 27 #define SFC_EVQ_INIT_BACKOFF_START_US (1) 28 /* Maximum delay between event queue polling attempts */ 29 #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000) 30 /* Event queue init approx timeout */ 31 #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S) 32 33 /* Management event queue polling period in microseconds */ 34 #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S) 35 36 static const char * 37 sfc_evq_type2str(enum sfc_evq_type type) 38 { 39 switch (type) { 40 case SFC_EVQ_TYPE_MGMT: 41 return "mgmt-evq"; 42 case SFC_EVQ_TYPE_RX: 43 return "rx-evq"; 44 case SFC_EVQ_TYPE_TX: 45 return "tx-evq"; 46 default: 47 SFC_ASSERT(B_FALSE); 48 return NULL; 49 } 50 } 51 52 static boolean_t 53 sfc_ev_initialized(void *arg) 54 { 55 struct sfc_evq *evq = arg; 56 57 /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */ 58 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING || 59 evq->init_state == SFC_EVQ_STARTED); 60 61 evq->init_state = SFC_EVQ_STARTED; 62 63 return B_FALSE; 64 } 65 66 static boolean_t 67 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id, 68 uint32_t size, uint16_t flags) 69 { 70 struct sfc_evq *evq = arg; 71 72 sfc_err(evq->sa, 73 "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x", 74 evq->evq_index, label, id, size, flags); 75 return B_TRUE; 76 } 77 78 static boolean_t 79 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 80 uint32_t size, uint16_t flags) 81 { 82 struct sfc_evq *evq = arg; 83 struct sfc_efx_rxq *rxq; 84 unsigned int stop; 85 unsigned int pending_id; 86 unsigned int delta; 87 unsigned int i; 88 struct sfc_efx_rx_sw_desc *rxd; 89 90 if (unlikely(evq->exception)) 91 goto done; 92 93 rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq); 94 95 SFC_ASSERT(rxq != NULL); 96 SFC_ASSERT(rxq->evq == evq); 97 SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED); 98 99 stop = (id + 1) & rxq->ptr_mask; 100 pending_id = rxq->pending & rxq->ptr_mask; 101 delta = (stop >= pending_id) ? (stop - pending_id) : 102 (rxq->ptr_mask + 1 - pending_id + stop); 103 104 if (delta == 0) { 105 /* 106 * Rx event with no new descriptors done and zero length 107 * is used to abort scattered packet when there is no room 108 * for the tail. 109 */ 110 if (unlikely(size != 0)) { 111 evq->exception = B_TRUE; 112 sfc_err(evq->sa, 113 "EVQ %u RxQ %u invalid RX abort " 114 "(id=%#x size=%u flags=%#x); needs restart", 115 evq->evq_index, rxq->dp.dpq.queue_id, 116 id, size, flags); 117 goto done; 118 } 119 120 /* Add discard flag to the first fragment */ 121 rxq->sw_desc[pending_id].flags |= EFX_DISCARD; 122 /* Remove continue flag from the last fragment */ 123 rxq->sw_desc[id].flags &= ~EFX_PKT_CONT; 124 } else if (unlikely(delta > rxq->batch_max)) { 125 evq->exception = B_TRUE; 126 127 sfc_err(evq->sa, 128 "EVQ %u RxQ %u completion out of order " 129 "(id=%#x delta=%u flags=%#x); needs restart", 130 evq->evq_index, rxq->dp.dpq.queue_id, 131 id, delta, flags); 132 133 goto done; 134 } 135 136 for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) { 137 rxd = &rxq->sw_desc[i]; 138 139 rxd->flags = flags; 140 141 SFC_ASSERT(size < (1 << 16)); 142 rxd->size = (uint16_t)size; 143 } 144 145 rxq->pending += delta; 146 147 done: 148 return B_FALSE; 149 } 150 151 static boolean_t 152 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id, 153 __rte_unused uint32_t size, __rte_unused uint16_t flags) 154 { 155 struct sfc_evq *evq = arg; 156 struct sfc_dp_rxq *dp_rxq; 157 158 dp_rxq = evq->dp_rxq; 159 SFC_ASSERT(dp_rxq != NULL); 160 161 SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL); 162 return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id); 163 } 164 165 static boolean_t 166 sfc_ev_nop_rx_packets(void *arg, uint32_t label, unsigned int num_packets, 167 uint32_t flags) 168 { 169 struct sfc_evq *evq = arg; 170 171 sfc_err(evq->sa, 172 "EVQ %u unexpected Rx packets event label=%u num=%u flags=%#x", 173 evq->evq_index, label, num_packets, flags); 174 return B_TRUE; 175 } 176 177 static boolean_t 178 sfc_ev_dp_rx_packets(void *arg, __rte_unused uint32_t label, 179 unsigned int num_packets, __rte_unused uint32_t flags) 180 { 181 struct sfc_evq *evq = arg; 182 struct sfc_dp_rxq *dp_rxq; 183 184 dp_rxq = evq->dp_rxq; 185 SFC_ASSERT(dp_rxq != NULL); 186 187 SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL); 188 return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, num_packets); 189 } 190 191 static boolean_t 192 sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id, 193 uint32_t pkt_count, uint16_t flags) 194 { 195 struct sfc_evq *evq = arg; 196 197 sfc_err(evq->sa, 198 "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x", 199 evq->evq_index, label, id, pkt_count, flags); 200 return B_TRUE; 201 } 202 203 /* It is not actually used on datapath, but required on RxQ flush */ 204 static boolean_t 205 sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id, 206 __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags) 207 { 208 struct sfc_evq *evq = arg; 209 struct sfc_dp_rxq *dp_rxq; 210 211 dp_rxq = evq->dp_rxq; 212 SFC_ASSERT(dp_rxq != NULL); 213 214 if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL) 215 return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id); 216 else 217 return B_FALSE; 218 } 219 220 static boolean_t 221 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id) 222 { 223 struct sfc_evq *evq = arg; 224 225 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x", 226 evq->evq_index, label, id); 227 return B_TRUE; 228 } 229 230 static boolean_t 231 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 232 { 233 struct sfc_evq *evq = arg; 234 struct sfc_dp_txq *dp_txq; 235 struct sfc_efx_txq *txq; 236 unsigned int stop; 237 unsigned int delta; 238 239 dp_txq = evq->dp_txq; 240 SFC_ASSERT(dp_txq != NULL); 241 242 txq = sfc_efx_txq_by_dp_txq(dp_txq); 243 SFC_ASSERT(txq->evq == evq); 244 245 if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0)) 246 goto done; 247 248 stop = (id + 1) & txq->ptr_mask; 249 id = txq->pending & txq->ptr_mask; 250 251 delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop); 252 253 txq->pending += delta; 254 255 done: 256 return B_FALSE; 257 } 258 259 static boolean_t 260 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id) 261 { 262 struct sfc_evq *evq = arg; 263 struct sfc_dp_txq *dp_txq; 264 265 dp_txq = evq->dp_txq; 266 SFC_ASSERT(dp_txq != NULL); 267 268 SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL); 269 return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id); 270 } 271 272 static boolean_t 273 sfc_ev_nop_tx_ndescs(void *arg, uint32_t label, unsigned int ndescs) 274 { 275 struct sfc_evq *evq = arg; 276 277 sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u ndescs=%#x", 278 evq->evq_index, label, ndescs); 279 return B_TRUE; 280 } 281 282 static boolean_t 283 sfc_ev_dp_tx_ndescs(void *arg, __rte_unused uint32_t label, 284 unsigned int ndescs) 285 { 286 struct sfc_evq *evq = arg; 287 struct sfc_dp_txq *dp_txq; 288 289 dp_txq = evq->dp_txq; 290 SFC_ASSERT(dp_txq != NULL); 291 292 SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL); 293 return evq->sa->priv.dp_tx->qtx_ev(dp_txq, ndescs); 294 } 295 296 static boolean_t 297 sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data) 298 { 299 struct sfc_evq *evq = arg; 300 301 if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT) 302 return B_FALSE; 303 304 evq->exception = B_TRUE; 305 sfc_warn(evq->sa, 306 "hardware exception %s (code=%u, data=%#x) on EVQ %u;" 307 " needs recovery", 308 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" : 309 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" : 310 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" : 311 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" : 312 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" : 313 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" : 314 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" : 315 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" : 316 "UNKNOWN", 317 code, data, evq->evq_index); 318 319 return B_TRUE; 320 } 321 322 static boolean_t 323 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index) 324 { 325 struct sfc_evq *evq = arg; 326 327 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done", 328 evq->evq_index, rxq_hw_index); 329 return B_TRUE; 330 } 331 332 static boolean_t 333 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index) 334 { 335 struct sfc_evq *evq = arg; 336 struct sfc_dp_rxq *dp_rxq; 337 struct sfc_rxq *rxq; 338 339 dp_rxq = evq->dp_rxq; 340 SFC_ASSERT(dp_rxq != NULL); 341 342 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 343 SFC_ASSERT(rxq != NULL); 344 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 345 SFC_ASSERT(rxq->evq == evq); 346 RTE_SET_USED(rxq); 347 348 sfc_rx_qflush_done(sfc_rxq_info_by_dp_rxq(dp_rxq)); 349 350 return B_FALSE; 351 } 352 353 static boolean_t 354 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index) 355 { 356 struct sfc_evq *evq = arg; 357 358 sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed", 359 evq->evq_index, rxq_hw_index); 360 return B_TRUE; 361 } 362 363 static boolean_t 364 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index) 365 { 366 struct sfc_evq *evq = arg; 367 struct sfc_dp_rxq *dp_rxq; 368 struct sfc_rxq *rxq; 369 370 dp_rxq = evq->dp_rxq; 371 SFC_ASSERT(dp_rxq != NULL); 372 373 rxq = sfc_rxq_by_dp_rxq(dp_rxq); 374 SFC_ASSERT(rxq != NULL); 375 SFC_ASSERT(rxq->hw_index == rxq_hw_index); 376 SFC_ASSERT(rxq->evq == evq); 377 RTE_SET_USED(rxq); 378 379 sfc_rx_qflush_failed(sfc_rxq_info_by_dp_rxq(dp_rxq)); 380 381 return B_FALSE; 382 } 383 384 static boolean_t 385 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index) 386 { 387 struct sfc_evq *evq = arg; 388 389 sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done", 390 evq->evq_index, txq_hw_index); 391 return B_TRUE; 392 } 393 394 static boolean_t 395 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index) 396 { 397 struct sfc_evq *evq = arg; 398 struct sfc_dp_txq *dp_txq; 399 struct sfc_txq *txq; 400 401 dp_txq = evq->dp_txq; 402 SFC_ASSERT(dp_txq != NULL); 403 404 txq = sfc_txq_by_dp_txq(dp_txq); 405 SFC_ASSERT(txq != NULL); 406 SFC_ASSERT(txq->hw_index == txq_hw_index); 407 SFC_ASSERT(txq->evq == evq); 408 RTE_SET_USED(txq); 409 410 sfc_tx_qflush_done(sfc_txq_info_by_dp_txq(dp_txq)); 411 412 return B_FALSE; 413 } 414 415 static boolean_t 416 sfc_ev_software(void *arg, uint16_t magic) 417 { 418 struct sfc_evq *evq = arg; 419 420 sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x", 421 evq->evq_index, magic); 422 return B_TRUE; 423 } 424 425 static boolean_t 426 sfc_ev_sram(void *arg, uint32_t code) 427 { 428 struct sfc_evq *evq = arg; 429 430 sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u", 431 evq->evq_index, code); 432 return B_TRUE; 433 } 434 435 static boolean_t 436 sfc_ev_wake_up(void *arg, uint32_t index) 437 { 438 struct sfc_evq *evq = arg; 439 440 sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u", 441 evq->evq_index, index); 442 return B_TRUE; 443 } 444 445 static boolean_t 446 sfc_ev_timer(void *arg, uint32_t index) 447 { 448 struct sfc_evq *evq = arg; 449 450 sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u", 451 evq->evq_index, index); 452 return B_TRUE; 453 } 454 455 static boolean_t 456 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode) 457 { 458 struct sfc_evq *evq = arg; 459 460 sfc_err(evq->sa, "EVQ %u unexpected link change event", 461 evq->evq_index); 462 return B_TRUE; 463 } 464 465 static boolean_t 466 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode) 467 { 468 struct sfc_evq *evq = arg; 469 struct sfc_adapter *sa = evq->sa; 470 struct rte_eth_link new_link; 471 472 sfc_port_link_mode_to_info(link_mode, &new_link); 473 if (rte_eth_linkstatus_set(sa->eth_dev, &new_link) == 0) 474 evq->sa->port.lsc_seq++; 475 476 return B_FALSE; 477 } 478 479 static const efx_ev_callbacks_t sfc_ev_callbacks = { 480 .eec_initialized = sfc_ev_initialized, 481 .eec_rx = sfc_ev_nop_rx, 482 .eec_rx_packets = sfc_ev_nop_rx_packets, 483 .eec_rx_ps = sfc_ev_nop_rx_ps, 484 .eec_tx = sfc_ev_nop_tx, 485 .eec_tx_ndescs = sfc_ev_nop_tx_ndescs, 486 .eec_exception = sfc_ev_exception, 487 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 488 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 489 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 490 .eec_software = sfc_ev_software, 491 .eec_sram = sfc_ev_sram, 492 .eec_wake_up = sfc_ev_wake_up, 493 .eec_timer = sfc_ev_timer, 494 .eec_link_change = sfc_ev_link_change, 495 }; 496 497 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = { 498 .eec_initialized = sfc_ev_initialized, 499 .eec_rx = sfc_ev_efx_rx, 500 .eec_rx_packets = sfc_ev_nop_rx_packets, 501 .eec_rx_ps = sfc_ev_nop_rx_ps, 502 .eec_tx = sfc_ev_nop_tx, 503 .eec_tx_ndescs = sfc_ev_nop_tx_ndescs, 504 .eec_exception = sfc_ev_exception, 505 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 506 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 507 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 508 .eec_software = sfc_ev_software, 509 .eec_sram = sfc_ev_sram, 510 .eec_wake_up = sfc_ev_wake_up, 511 .eec_timer = sfc_ev_timer, 512 .eec_link_change = sfc_ev_nop_link_change, 513 }; 514 515 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = { 516 .eec_initialized = sfc_ev_initialized, 517 .eec_rx = sfc_ev_dp_rx, 518 .eec_rx_packets = sfc_ev_dp_rx_packets, 519 .eec_rx_ps = sfc_ev_dp_rx_ps, 520 .eec_tx = sfc_ev_nop_tx, 521 .eec_tx_ndescs = sfc_ev_nop_tx_ndescs, 522 .eec_exception = sfc_ev_exception, 523 .eec_rxq_flush_done = sfc_ev_rxq_flush_done, 524 .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed, 525 .eec_txq_flush_done = sfc_ev_nop_txq_flush_done, 526 .eec_software = sfc_ev_software, 527 .eec_sram = sfc_ev_sram, 528 .eec_wake_up = sfc_ev_wake_up, 529 .eec_timer = sfc_ev_timer, 530 .eec_link_change = sfc_ev_nop_link_change, 531 }; 532 533 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = { 534 .eec_initialized = sfc_ev_initialized, 535 .eec_rx = sfc_ev_nop_rx, 536 .eec_rx_packets = sfc_ev_nop_rx_packets, 537 .eec_rx_ps = sfc_ev_nop_rx_ps, 538 .eec_tx = sfc_ev_tx, 539 .eec_tx_ndescs = sfc_ev_nop_tx_ndescs, 540 .eec_exception = sfc_ev_exception, 541 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 542 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 543 .eec_txq_flush_done = sfc_ev_txq_flush_done, 544 .eec_software = sfc_ev_software, 545 .eec_sram = sfc_ev_sram, 546 .eec_wake_up = sfc_ev_wake_up, 547 .eec_timer = sfc_ev_timer, 548 .eec_link_change = sfc_ev_nop_link_change, 549 }; 550 551 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = { 552 .eec_initialized = sfc_ev_initialized, 553 .eec_rx = sfc_ev_nop_rx, 554 .eec_rx_packets = sfc_ev_nop_rx_packets, 555 .eec_rx_ps = sfc_ev_nop_rx_ps, 556 .eec_tx = sfc_ev_dp_tx, 557 .eec_tx_ndescs = sfc_ev_dp_tx_ndescs, 558 .eec_exception = sfc_ev_exception, 559 .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done, 560 .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed, 561 .eec_txq_flush_done = sfc_ev_txq_flush_done, 562 .eec_software = sfc_ev_software, 563 .eec_sram = sfc_ev_sram, 564 .eec_wake_up = sfc_ev_wake_up, 565 .eec_timer = sfc_ev_timer, 566 .eec_link_change = sfc_ev_nop_link_change, 567 }; 568 569 570 void 571 sfc_ev_qpoll(struct sfc_evq *evq) 572 { 573 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED || 574 evq->init_state == SFC_EVQ_STARTING); 575 576 /* Synchronize the DMA memory for reading not required */ 577 578 efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq); 579 580 if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) { 581 struct sfc_adapter *sa = evq->sa; 582 int rc; 583 584 if (evq->dp_rxq != NULL) { 585 sfc_sw_index_t rxq_sw_index; 586 587 rxq_sw_index = evq->dp_rxq->dpq.queue_id; 588 589 sfc_warn(sa, 590 "restart RxQ %u because of exception on its EvQ %u", 591 rxq_sw_index, evq->evq_index); 592 593 sfc_rx_qstop(sa, rxq_sw_index); 594 rc = sfc_rx_qstart(sa, rxq_sw_index); 595 if (rc != 0) 596 sfc_err(sa, "cannot restart RxQ %u", 597 rxq_sw_index); 598 } 599 600 if (evq->dp_txq != NULL) { 601 sfc_sw_index_t txq_sw_index; 602 603 txq_sw_index = evq->dp_txq->dpq.queue_id; 604 605 sfc_warn(sa, 606 "restart TxQ %u because of exception on its EvQ %u", 607 txq_sw_index, evq->evq_index); 608 609 sfc_tx_qstop(sa, txq_sw_index); 610 rc = sfc_tx_qstart(sa, txq_sw_index); 611 if (rc != 0) 612 sfc_err(sa, "cannot restart TxQ %u", 613 txq_sw_index); 614 } 615 616 if (evq->exception) 617 sfc_panic(sa, "unrecoverable exception on EvQ %u", 618 evq->evq_index); 619 620 sfc_adapter_unlock(sa); 621 } 622 623 /* Poll-mode driver does not re-prime the event queue for interrupts */ 624 } 625 626 void 627 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa) 628 { 629 if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) { 630 if (sa->mgmt_evq_running) 631 sfc_ev_qpoll(sa->mgmt_evq); 632 633 rte_spinlock_unlock(&sa->mgmt_evq_lock); 634 } 635 } 636 637 int 638 sfc_ev_qprime(struct sfc_evq *evq) 639 { 640 SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED); 641 return efx_ev_qprime(evq->common, evq->read_ptr); 642 } 643 644 /* Event queue HW index allocation scheme is described in sfc_ev.h. */ 645 int 646 sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index) 647 { 648 struct sfc_adapter *sa = evq->sa; 649 efsys_mem_t *esmp; 650 uint32_t evq_flags = sa->evq_flags; 651 uint32_t irq = 0; 652 unsigned int total_delay_us; 653 unsigned int delay_us; 654 int rc; 655 656 sfc_log_init(sa, "hw_index=%u", hw_index); 657 658 esmp = &evq->mem; 659 660 evq->evq_index = hw_index; 661 662 /* Clear all events */ 663 (void)memset((void *)esmp->esm_base, 0xff, 664 efx_evq_size(sa->nic, evq->entries, evq_flags)); 665 666 if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) { 667 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 668 irq = 0; 669 } else if (sa->intr.rxq_intr && evq->dp_rxq != NULL) { 670 sfc_ethdev_qid_t ethdev_qid; 671 672 ethdev_qid = 673 sfc_ethdev_rx_qid_by_rxq_sw_index(sfc_sa2shared(sa), 674 evq->dp_rxq->dpq.queue_id); 675 if (ethdev_qid != SFC_ETHDEV_QID_INVALID) { 676 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 677 /* 678 * The first interrupt is used for management EvQ 679 * (LSC etc). RxQ interrupts follow it. 680 */ 681 irq = 1 + ethdev_qid; 682 } else { 683 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; 684 } 685 } else { 686 evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED; 687 } 688 689 evq->init_state = SFC_EVQ_STARTING; 690 691 /* Create the common code event queue */ 692 rc = efx_ev_qcreate_irq(sa->nic, hw_index, esmp, evq->entries, 693 0 /* unused on EF10 */, 0, evq_flags, 694 irq, &evq->common); 695 if (rc != 0) 696 goto fail_ev_qcreate; 697 698 SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL); 699 if (evq->dp_rxq != 0) { 700 if (strcmp(sa->priv.dp_rx->dp.name, 701 SFC_KVARG_DATAPATH_EFX) == 0) 702 evq->callbacks = &sfc_ev_callbacks_efx_rx; 703 else 704 evq->callbacks = &sfc_ev_callbacks_dp_rx; 705 } else if (evq->dp_txq != 0) { 706 if (strcmp(sa->priv.dp_tx->dp.name, 707 SFC_KVARG_DATAPATH_EFX) == 0) 708 evq->callbacks = &sfc_ev_callbacks_efx_tx; 709 else 710 evq->callbacks = &sfc_ev_callbacks_dp_tx; 711 } else { 712 evq->callbacks = &sfc_ev_callbacks; 713 } 714 715 /* 716 * Poll once to ensure that eec_initialized callback is invoked in 717 * case if the hardware does not support INIT_DONE events. If the 718 * hardware supports INIT_DONE events, this will do nothing, and the 719 * corresponding event will be processed by sfc_ev_qpoll() below. 720 */ 721 efx_ev_qcreate_check_init_done(evq->common, evq->callbacks, evq); 722 723 /* Wait for the initialization event */ 724 total_delay_us = 0; 725 delay_us = SFC_EVQ_INIT_BACKOFF_START_US; 726 do { 727 (void)sfc_ev_qpoll(evq); 728 729 /* Check to see if the initialization complete indication 730 * posted by the hardware. 731 */ 732 if (evq->init_state == SFC_EVQ_STARTED) 733 goto done; 734 735 /* Give event queue some time to init */ 736 rte_delay_us(delay_us); 737 738 total_delay_us += delay_us; 739 740 /* Exponential backoff */ 741 delay_us *= 2; 742 if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US) 743 delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US; 744 745 } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US); 746 747 rc = ETIMEDOUT; 748 goto fail_timedout; 749 750 done: 751 return 0; 752 753 fail_timedout: 754 efx_ev_qdestroy(evq->common); 755 756 fail_ev_qcreate: 757 evq->init_state = SFC_EVQ_INITIALIZED; 758 sfc_log_init(sa, "failed %d", rc); 759 return rc; 760 } 761 762 void 763 sfc_ev_qstop(struct sfc_evq *evq) 764 { 765 if (evq == NULL) 766 return; 767 768 sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index); 769 770 if (evq->init_state != SFC_EVQ_STARTED) 771 return; 772 773 evq->init_state = SFC_EVQ_INITIALIZED; 774 evq->callbacks = NULL; 775 evq->read_ptr = 0; 776 evq->exception = B_FALSE; 777 778 efx_ev_qdestroy(evq->common); 779 780 evq->evq_index = 0; 781 } 782 783 static void 784 sfc_ev_mgmt_periodic_qpoll(void *arg) 785 { 786 struct sfc_adapter *sa = arg; 787 int rc; 788 789 sfc_ev_mgmt_qpoll(sa); 790 791 rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US, 792 sfc_ev_mgmt_periodic_qpoll, sa); 793 if (rc == -ENOTSUP) { 794 sfc_warn(sa, "alarms are not supported"); 795 sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update"); 796 } else if (rc != 0) { 797 sfc_err(sa, 798 "cannot rearm management EVQ polling alarm (rc=%d)", 799 rc); 800 } 801 } 802 803 static void 804 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa) 805 { 806 sfc_ev_mgmt_periodic_qpoll(sa); 807 } 808 809 static void 810 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa) 811 { 812 rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa); 813 } 814 815 int 816 sfc_ev_start(struct sfc_adapter *sa) 817 { 818 int rc; 819 820 sfc_log_init(sa, "entry"); 821 822 rc = efx_ev_init(sa->nic); 823 if (rc != 0) 824 goto fail_ev_init; 825 826 /* Start management EVQ used for global events */ 827 828 /* 829 * Management event queue start polls the queue, but it cannot 830 * interfere with other polling contexts since mgmt_evq_running 831 * is false yet. 832 */ 833 rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index); 834 if (rc != 0) 835 goto fail_mgmt_evq_start; 836 837 rte_spinlock_lock(&sa->mgmt_evq_lock); 838 sa->mgmt_evq_running = true; 839 rte_spinlock_unlock(&sa->mgmt_evq_lock); 840 841 if (sa->intr.lsc_intr) { 842 rc = sfc_ev_qprime(sa->mgmt_evq); 843 if (rc != 0) 844 goto fail_mgmt_evq_prime; 845 } 846 847 /* 848 * Start management EVQ polling. If interrupts are disabled 849 * (not used), it is required to process link status change 850 * and other device level events to avoid unrecoverable 851 * error because the event queue overflow. 852 */ 853 sfc_ev_mgmt_periodic_qpoll_start(sa); 854 855 /* 856 * Rx/Tx event queues are started/stopped when corresponding 857 * Rx/Tx queue is started/stopped. 858 */ 859 860 return 0; 861 862 fail_mgmt_evq_prime: 863 sfc_ev_qstop(sa->mgmt_evq); 864 865 fail_mgmt_evq_start: 866 efx_ev_fini(sa->nic); 867 868 fail_ev_init: 869 sfc_log_init(sa, "failed %d", rc); 870 return rc; 871 } 872 873 void 874 sfc_ev_stop(struct sfc_adapter *sa) 875 { 876 sfc_log_init(sa, "entry"); 877 878 sfc_ev_mgmt_periodic_qpoll_stop(sa); 879 880 rte_spinlock_lock(&sa->mgmt_evq_lock); 881 sa->mgmt_evq_running = false; 882 rte_spinlock_unlock(&sa->mgmt_evq_lock); 883 884 sfc_ev_qstop(sa->mgmt_evq); 885 886 efx_ev_fini(sa->nic); 887 } 888 889 int 890 sfc_ev_qinit(struct sfc_adapter *sa, 891 enum sfc_evq_type type, unsigned int type_index, 892 unsigned int entries, int socket_id, struct sfc_evq **evqp) 893 { 894 struct sfc_evq *evq; 895 int rc; 896 897 sfc_log_init(sa, "type=%s type_index=%u", 898 sfc_evq_type2str(type), type_index); 899 900 SFC_ASSERT(rte_is_power_of_2(entries)); 901 902 rc = ENOMEM; 903 evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE, 904 socket_id); 905 if (evq == NULL) 906 goto fail_evq_alloc; 907 908 evq->sa = sa; 909 evq->type = type; 910 evq->entries = entries; 911 912 /* Allocate DMA space */ 913 rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index, 914 efx_evq_size(sa->nic, evq->entries, sa->evq_flags), 915 socket_id, &evq->mem); 916 if (rc != 0) 917 goto fail_dma_alloc; 918 919 evq->init_state = SFC_EVQ_INITIALIZED; 920 921 sa->evq_count++; 922 923 *evqp = evq; 924 925 return 0; 926 927 fail_dma_alloc: 928 rte_free(evq); 929 930 fail_evq_alloc: 931 932 sfc_log_init(sa, "failed %d", rc); 933 return rc; 934 } 935 936 void 937 sfc_ev_qfini(struct sfc_evq *evq) 938 { 939 struct sfc_adapter *sa = evq->sa; 940 941 SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED); 942 943 sfc_dma_free(sa, &evq->mem); 944 945 rte_free(evq); 946 947 SFC_ASSERT(sa->evq_count > 0); 948 sa->evq_count--; 949 } 950 951 static int 952 sfc_kvarg_perf_profile_handler(__rte_unused const char *key, 953 const char *value_str, void *opaque) 954 { 955 uint32_t *value = opaque; 956 957 if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0) 958 *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 959 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0) 960 *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY; 961 else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0) 962 *value = EFX_EVQ_FLAGS_TYPE_AUTO; 963 else 964 return -EINVAL; 965 966 return 0; 967 } 968 969 int 970 sfc_ev_attach(struct sfc_adapter *sa) 971 { 972 int rc; 973 974 sfc_log_init(sa, "entry"); 975 976 sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT; 977 rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE, 978 sfc_kvarg_perf_profile_handler, 979 &sa->evq_flags); 980 if (rc != 0) { 981 sfc_err(sa, "invalid %s parameter value", 982 SFC_KVARG_PERF_PROFILE); 983 goto fail_kvarg_perf_profile; 984 } 985 986 sa->mgmt_evq_index = sfc_mgmt_evq_sw_index(sfc_sa2shared(sa)); 987 rte_spinlock_init(&sa->mgmt_evq_lock); 988 989 rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries, 990 sa->socket_id, &sa->mgmt_evq); 991 if (rc != 0) 992 goto fail_mgmt_evq_init; 993 994 /* 995 * Rx/Tx event queues are created/destroyed when corresponding 996 * Rx/Tx queue is created/destroyed. 997 */ 998 999 return 0; 1000 1001 fail_mgmt_evq_init: 1002 1003 fail_kvarg_perf_profile: 1004 sfc_log_init(sa, "failed %d", rc); 1005 return rc; 1006 } 1007 1008 void 1009 sfc_ev_detach(struct sfc_adapter *sa) 1010 { 1011 sfc_log_init(sa, "entry"); 1012 1013 sfc_ev_qfini(sa->mgmt_evq); 1014 1015 if (sa->evq_count != 0) 1016 sfc_err(sa, "%u EvQs are not destroyed before detach", 1017 sa->evq_count); 1018 } 1019