1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2007-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 #if EFSYS_OPT_MON_MCDI 10 #include "mcdi_mon.h" 11 #endif 12 13 #define EFX_EV_PRESENT(_qword) \ 14 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \ 15 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff) 16 17 18 19 #if EFSYS_OPT_SIENA 20 21 static __checkReturn efx_rc_t 22 siena_ev_init( 23 __in efx_nic_t *enp); 24 25 static void 26 siena_ev_fini( 27 __in efx_nic_t *enp); 28 29 static __checkReturn efx_rc_t 30 siena_ev_qcreate( 31 __in efx_nic_t *enp, 32 __in unsigned int index, 33 __in efsys_mem_t *esmp, 34 __in size_t ndescs, 35 __in uint32_t id, 36 __in uint32_t us, 37 __in uint32_t flags, 38 __in uint32_t irq, 39 __in efx_evq_t *eep); 40 41 static void 42 siena_ev_qdestroy( 43 __in efx_evq_t *eep); 44 45 static __checkReturn efx_rc_t 46 siena_ev_qprime( 47 __in efx_evq_t *eep, 48 __in unsigned int count); 49 50 static void 51 siena_ev_qpost( 52 __in efx_evq_t *eep, 53 __in uint16_t data); 54 55 static __checkReturn efx_rc_t 56 siena_ev_qmoderate( 57 __in efx_evq_t *eep, 58 __in unsigned int us); 59 60 #if EFSYS_OPT_QSTATS 61 static void 62 siena_ev_qstats_update( 63 __in efx_evq_t *eep, 64 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); 65 66 #endif 67 68 #endif /* EFSYS_OPT_SIENA */ 69 70 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA 71 72 static void 73 siena_ef10_ev_qpoll( 74 __in efx_evq_t *eep, 75 __inout unsigned int *countp, 76 __in const efx_ev_callbacks_t *eecp, 77 __in_opt void *arg); 78 79 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */ 80 81 #if EFSYS_OPT_SIENA 82 static const efx_ev_ops_t __efx_ev_siena_ops = { 83 siena_ev_init, /* eevo_init */ 84 siena_ev_fini, /* eevo_fini */ 85 siena_ev_qcreate, /* eevo_qcreate */ 86 siena_ev_qdestroy, /* eevo_qdestroy */ 87 siena_ev_qprime, /* eevo_qprime */ 88 siena_ev_qpost, /* eevo_qpost */ 89 siena_ef10_ev_qpoll, /* eevo_qpoll */ 90 siena_ev_qmoderate, /* eevo_qmoderate */ 91 #if EFSYS_OPT_QSTATS 92 siena_ev_qstats_update, /* eevo_qstats_update */ 93 #endif 94 }; 95 #endif /* EFSYS_OPT_SIENA */ 96 97 #if EFX_OPTS_EF10() 98 static const efx_ev_ops_t __efx_ev_ef10_ops = { 99 ef10_ev_init, /* eevo_init */ 100 ef10_ev_fini, /* eevo_fini */ 101 ef10_ev_qcreate, /* eevo_qcreate */ 102 ef10_ev_qdestroy, /* eevo_qdestroy */ 103 ef10_ev_qprime, /* eevo_qprime */ 104 ef10_ev_qpost, /* eevo_qpost */ 105 siena_ef10_ev_qpoll, /* eevo_qpoll */ 106 ef10_ev_qmoderate, /* eevo_qmoderate */ 107 #if EFSYS_OPT_QSTATS 108 ef10_ev_qstats_update, /* eevo_qstats_update */ 109 #endif 110 }; 111 #endif /* EFX_OPTS_EF10() */ 112 113 #if EFSYS_OPT_RIVERHEAD 114 static const efx_ev_ops_t __efx_ev_rhead_ops = { 115 rhead_ev_init, /* eevo_init */ 116 rhead_ev_fini, /* eevo_fini */ 117 rhead_ev_qcreate, /* eevo_qcreate */ 118 rhead_ev_qdestroy, /* eevo_qdestroy */ 119 rhead_ev_qprime, /* eevo_qprime */ 120 rhead_ev_qpost, /* eevo_qpost */ 121 rhead_ev_qpoll, /* eevo_qpoll */ 122 rhead_ev_qmoderate, /* eevo_qmoderate */ 123 #if EFSYS_OPT_QSTATS 124 rhead_ev_qstats_update, /* eevo_qstats_update */ 125 #endif 126 }; 127 #endif /* EFSYS_OPT_RIVERHEAD */ 128 129 130 __checkReturn efx_rc_t 131 efx_ev_init( 132 __in efx_nic_t *enp) 133 { 134 const efx_ev_ops_t *eevop; 135 efx_rc_t rc; 136 137 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 138 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); 139 140 if (enp->en_mod_flags & EFX_MOD_EV) { 141 rc = EINVAL; 142 goto fail1; 143 } 144 145 switch (enp->en_family) { 146 #if EFSYS_OPT_SIENA 147 case EFX_FAMILY_SIENA: 148 eevop = &__efx_ev_siena_ops; 149 break; 150 #endif /* EFSYS_OPT_SIENA */ 151 152 #if EFSYS_OPT_HUNTINGTON 153 case EFX_FAMILY_HUNTINGTON: 154 eevop = &__efx_ev_ef10_ops; 155 break; 156 #endif /* EFSYS_OPT_HUNTINGTON */ 157 158 #if EFSYS_OPT_MEDFORD 159 case EFX_FAMILY_MEDFORD: 160 eevop = &__efx_ev_ef10_ops; 161 break; 162 #endif /* EFSYS_OPT_MEDFORD */ 163 164 #if EFSYS_OPT_MEDFORD2 165 case EFX_FAMILY_MEDFORD2: 166 eevop = &__efx_ev_ef10_ops; 167 break; 168 #endif /* EFSYS_OPT_MEDFORD2 */ 169 170 #if EFSYS_OPT_RIVERHEAD 171 case EFX_FAMILY_RIVERHEAD: 172 eevop = &__efx_ev_rhead_ops; 173 break; 174 #endif /* EFSYS_OPT_RIVERHEAD */ 175 176 default: 177 EFSYS_ASSERT(0); 178 rc = ENOTSUP; 179 goto fail1; 180 } 181 182 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); 183 184 if ((rc = eevop->eevo_init(enp)) != 0) 185 goto fail2; 186 187 enp->en_eevop = eevop; 188 enp->en_mod_flags |= EFX_MOD_EV; 189 return (0); 190 191 fail2: 192 EFSYS_PROBE(fail2); 193 194 fail1: 195 EFSYS_PROBE1(fail1, efx_rc_t, rc); 196 197 enp->en_eevop = NULL; 198 enp->en_mod_flags &= ~EFX_MOD_EV; 199 return (rc); 200 } 201 202 __checkReturn size_t 203 efx_evq_size( 204 __in const efx_nic_t *enp, 205 __in unsigned int ndescs, 206 __in uint32_t flags) 207 { 208 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 209 size_t desc_size; 210 211 desc_size = encp->enc_ev_desc_size; 212 213 #if EFSYS_OPT_EV_EXTENDED_WIDTH 214 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) 215 desc_size = encp->enc_ev_ew_desc_size; 216 #else 217 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0); 218 #endif 219 220 return (ndescs * desc_size); 221 } 222 223 __checkReturn unsigned int 224 efx_evq_nbufs( 225 __in const efx_nic_t *enp, 226 __in unsigned int ndescs, 227 __in uint32_t flags) 228 { 229 size_t size; 230 231 size = efx_evq_size(enp, ndescs, flags); 232 233 return (EFX_DIV_ROUND_UP(size, EFX_BUF_SIZE)); 234 } 235 236 void 237 efx_ev_fini( 238 __in efx_nic_t *enp) 239 { 240 const efx_ev_ops_t *eevop = enp->en_eevop; 241 242 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 243 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); 244 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); 245 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); 246 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); 247 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); 248 249 eevop->eevo_fini(enp); 250 251 enp->en_eevop = NULL; 252 enp->en_mod_flags &= ~EFX_MOD_EV; 253 } 254 255 256 __checkReturn efx_rc_t 257 efx_ev_qcreate_irq( 258 __in efx_nic_t *enp, 259 __in unsigned int index, 260 __in efsys_mem_t *esmp, 261 __in size_t ndescs, 262 __in uint32_t id, 263 __in uint32_t us, 264 __in uint32_t flags, 265 __in uint32_t irq, 266 __deref_out efx_evq_t **eepp) 267 { 268 const efx_ev_ops_t *eevop = enp->en_eevop; 269 efx_evq_t *eep; 270 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 271 efx_rc_t rc; 272 273 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 274 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); 275 276 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, 277 enp->en_nic_cfg.enc_evq_limit); 278 279 if (index >= encp->enc_evq_limit) { 280 rc = EINVAL; 281 goto fail1; 282 } 283 284 if (us > encp->enc_evq_timer_max_us) { 285 rc = EINVAL; 286 goto fail2; 287 } 288 289 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) { 290 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT: 291 break; 292 case EFX_EVQ_FLAGS_NOTIFY_DISABLED: 293 if (us != 0) { 294 rc = EINVAL; 295 goto fail3; 296 } 297 break; 298 default: 299 rc = EINVAL; 300 goto fail4; 301 } 302 303 if ((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) && 304 (encp->enc_ev_ew_desc_size == 0)) { 305 /* Extended width event descriptors are not supported. */ 306 rc = EINVAL; 307 goto fail5; 308 } 309 310 EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs)); 311 EFSYS_ASSERT(ISP2(encp->enc_evq_min_nevs)); 312 313 if (!ISP2(ndescs) || 314 ndescs < encp->enc_evq_min_nevs || 315 ndescs > encp->enc_evq_max_nevs) { 316 rc = EINVAL; 317 goto fail6; 318 } 319 320 if (EFSYS_MEM_SIZE(esmp) < (ndescs * encp->enc_ev_desc_size)) { 321 /* Buffer too small for event queue descriptors. */ 322 rc = EINVAL; 323 goto fail7; 324 } 325 326 /* Allocate an EVQ object */ 327 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep); 328 if (eep == NULL) { 329 rc = ENOMEM; 330 goto fail8; 331 } 332 333 eep->ee_magic = EFX_EVQ_MAGIC; 334 eep->ee_enp = enp; 335 eep->ee_index = index; 336 eep->ee_mask = ndescs - 1; 337 eep->ee_flags = flags; 338 eep->ee_esmp = esmp; 339 340 /* 341 * Set outputs before the queue is created because interrupts may be 342 * raised for events immediately after the queue is created, before the 343 * function call below returns. See bug58606. 344 * 345 * The eepp pointer passed in by the client must therefore point to data 346 * shared with the client's event processing context. 347 */ 348 enp->en_ev_qcount++; 349 *eepp = eep; 350 351 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags, 352 irq, eep)) != 0) 353 goto fail9; 354 355 return (0); 356 357 fail9: 358 EFSYS_PROBE(fail9); 359 360 *eepp = NULL; 361 enp->en_ev_qcount--; 362 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); 363 fail8: 364 EFSYS_PROBE(fail8); 365 fail7: 366 EFSYS_PROBE(fail7); 367 fail6: 368 EFSYS_PROBE(fail6); 369 fail5: 370 EFSYS_PROBE(fail5); 371 fail4: 372 EFSYS_PROBE(fail4); 373 fail3: 374 EFSYS_PROBE(fail3); 375 fail2: 376 EFSYS_PROBE(fail2); 377 fail1: 378 EFSYS_PROBE1(fail1, efx_rc_t, rc); 379 return (rc); 380 } 381 382 __checkReturn efx_rc_t 383 efx_ev_qcreate( 384 __in efx_nic_t *enp, 385 __in unsigned int index, 386 __in efsys_mem_t *esmp, 387 __in size_t ndescs, 388 __in uint32_t id, 389 __in uint32_t us, 390 __in uint32_t flags, 391 __deref_out efx_evq_t **eepp) 392 { 393 uint32_t irq = index; 394 395 return (efx_ev_qcreate_irq(enp, index, esmp, ndescs, id, us, flags, 396 irq, eepp)); 397 } 398 399 void 400 efx_ev_qdestroy( 401 __in efx_evq_t *eep) 402 { 403 efx_nic_t *enp = eep->ee_enp; 404 const efx_ev_ops_t *eevop = enp->en_eevop; 405 406 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 407 408 EFSYS_ASSERT(enp->en_ev_qcount != 0); 409 --enp->en_ev_qcount; 410 411 eevop->eevo_qdestroy(eep); 412 413 /* Free the EVQ object */ 414 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); 415 } 416 417 __checkReturn efx_rc_t 418 efx_ev_qprime( 419 __in efx_evq_t *eep, 420 __in unsigned int count) 421 { 422 efx_nic_t *enp = eep->ee_enp; 423 const efx_ev_ops_t *eevop = enp->en_eevop; 424 efx_rc_t rc; 425 426 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 427 428 if (!(enp->en_mod_flags & EFX_MOD_INTR)) { 429 rc = EINVAL; 430 goto fail1; 431 } 432 433 if ((rc = eevop->eevo_qprime(eep, count)) != 0) 434 goto fail2; 435 436 return (0); 437 438 fail2: 439 EFSYS_PROBE(fail2); 440 fail1: 441 EFSYS_PROBE1(fail1, efx_rc_t, rc); 442 return (rc); 443 } 444 445 __checkReturn boolean_t 446 efx_ev_qpending( 447 __in efx_evq_t *eep, 448 __in unsigned int count) 449 { 450 size_t offset; 451 efx_qword_t qword; 452 453 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 454 455 offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 456 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword); 457 458 return (EFX_EV_PRESENT(qword)); 459 } 460 461 #if EFSYS_OPT_EV_PREFETCH 462 463 void 464 efx_ev_qprefetch( 465 __in efx_evq_t *eep, 466 __in unsigned int count) 467 { 468 unsigned int offset; 469 470 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 471 472 offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 473 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); 474 } 475 476 #endif /* EFSYS_OPT_EV_PREFETCH */ 477 478 /* 479 * This method is needed to ensure that eec_initialized callback 480 * is invoked after queue creation. The callback will be invoked 481 * on Riverhead boards which have no support for INIT_DONE events 482 * and will do nothing on other boards. 483 * 484 * The client drivers must call this method after calling efx_ev_create(). 485 * The call must be done with the same locks being held (if any) which are 486 * normally acquired around efx_ev_qpoll() calls to ensure that 487 * eec_initialized callback is invoked within the same locking context. 488 */ 489 void 490 efx_ev_qcreate_check_init_done( 491 __in efx_evq_t *eep, 492 __in const efx_ev_callbacks_t *eecp, 493 __in_opt void *arg) 494 { 495 const efx_nic_cfg_t *encp; 496 497 EFSYS_ASSERT(eep != NULL); 498 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 499 EFSYS_ASSERT(eecp != NULL); 500 EFSYS_ASSERT(eecp->eec_initialized != NULL); 501 502 encp = efx_nic_cfg_get(eep->ee_enp); 503 504 if (encp->enc_evq_init_done_ev_supported == B_FALSE) 505 (void) eecp->eec_initialized(arg); 506 } 507 508 void 509 efx_ev_qpoll( 510 __in efx_evq_t *eep, 511 __inout unsigned int *countp, 512 __in const efx_ev_callbacks_t *eecp, 513 __in_opt void *arg) 514 { 515 efx_nic_t *enp = eep->ee_enp; 516 const efx_ev_ops_t *eevop = enp->en_eevop; 517 518 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 519 520 EFSYS_ASSERT(eevop != NULL && 521 eevop->eevo_qpoll != NULL); 522 523 eevop->eevo_qpoll(eep, countp, eecp, arg); 524 } 525 526 void 527 efx_ev_qpost( 528 __in efx_evq_t *eep, 529 __in uint16_t data) 530 { 531 efx_nic_t *enp = eep->ee_enp; 532 const efx_ev_ops_t *eevop = enp->en_eevop; 533 534 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 535 536 EFSYS_ASSERT(eevop != NULL && 537 eevop->eevo_qpost != NULL); 538 539 eevop->eevo_qpost(eep, data); 540 } 541 542 __checkReturn efx_rc_t 543 efx_ev_usecs_to_ticks( 544 __in efx_nic_t *enp, 545 __in unsigned int us, 546 __out unsigned int *ticksp) 547 { 548 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 549 unsigned int ticks; 550 efx_rc_t rc; 551 552 if (encp->enc_evq_timer_quantum_ns == 0) { 553 rc = ENOTSUP; 554 goto fail1; 555 } 556 557 /* Convert microseconds to a timer tick count */ 558 if (us == 0) 559 ticks = 0; 560 else if (us * 1000 < encp->enc_evq_timer_quantum_ns) 561 ticks = 1; /* Never round down to zero */ 562 else 563 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns; 564 565 *ticksp = ticks; 566 return (0); 567 568 fail1: 569 EFSYS_PROBE1(fail1, efx_rc_t, rc); 570 return (rc); 571 } 572 573 __checkReturn efx_rc_t 574 efx_ev_qmoderate( 575 __in efx_evq_t *eep, 576 __in unsigned int us) 577 { 578 efx_nic_t *enp = eep->ee_enp; 579 const efx_ev_ops_t *eevop = enp->en_eevop; 580 efx_rc_t rc; 581 582 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 583 584 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 585 EFX_EVQ_FLAGS_NOTIFY_DISABLED) { 586 rc = EINVAL; 587 goto fail1; 588 } 589 590 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0) 591 goto fail2; 592 593 return (0); 594 595 fail2: 596 EFSYS_PROBE(fail2); 597 fail1: 598 EFSYS_PROBE1(fail1, efx_rc_t, rc); 599 return (rc); 600 } 601 602 #if EFSYS_OPT_QSTATS 603 void 604 efx_ev_qstats_update( 605 __in efx_evq_t *eep, 606 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 607 608 { efx_nic_t *enp = eep->ee_enp; 609 const efx_ev_ops_t *eevop = enp->en_eevop; 610 611 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 612 613 eevop->eevo_qstats_update(eep, stat); 614 } 615 616 #endif /* EFSYS_OPT_QSTATS */ 617 618 #if EFSYS_OPT_SIENA 619 620 static __checkReturn efx_rc_t 621 siena_ev_init( 622 __in efx_nic_t *enp) 623 { 624 efx_oword_t oword; 625 626 /* 627 * Program the event queue for receive and transmit queue 628 * flush events. 629 */ 630 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword); 631 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0); 632 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword); 633 634 return (0); 635 636 } 637 638 static __checkReturn boolean_t 639 siena_ev_rx_not_ok( 640 __in efx_evq_t *eep, 641 __in efx_qword_t *eqp, 642 __in uint32_t label, 643 __in uint32_t id, 644 __inout uint16_t *flagsp) 645 { 646 boolean_t ignore = B_FALSE; 647 648 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) { 649 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC); 650 EFSYS_PROBE(tobe_disc); 651 /* 652 * Assume this is a unicast address mismatch, unless below 653 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or 654 * EV_RX_PAUSE_FRM_ERR is set. 655 */ 656 (*flagsp) |= EFX_ADDR_MISMATCH; 657 } 658 659 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) { 660 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id); 661 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 662 (*flagsp) |= EFX_DISCARD; 663 664 #if EFSYS_OPT_RX_SCATTER 665 /* 666 * Lookout for payload queue ran dry errors and ignore them. 667 * 668 * Sadly for the header/data split cases, the descriptor 669 * pointer in this event refers to the header queue and 670 * therefore cannot be easily detected as duplicate. 671 * So we drop these and rely on the receive processing seeing 672 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard 673 * the partially received packet. 674 */ 675 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) && 676 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) && 677 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0)) 678 ignore = B_TRUE; 679 #endif /* EFSYS_OPT_RX_SCATTER */ 680 } 681 682 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) { 683 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 684 EFSYS_PROBE(crc_err); 685 (*flagsp) &= ~EFX_ADDR_MISMATCH; 686 (*flagsp) |= EFX_DISCARD; 687 } 688 689 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) { 690 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR); 691 EFSYS_PROBE(pause_frm_err); 692 (*flagsp) &= ~EFX_ADDR_MISMATCH; 693 (*flagsp) |= EFX_DISCARD; 694 } 695 696 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) { 697 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR); 698 EFSYS_PROBE(owner_id_err); 699 (*flagsp) |= EFX_DISCARD; 700 } 701 702 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) { 703 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 704 EFSYS_PROBE(ipv4_err); 705 (*flagsp) &= ~EFX_CKSUM_IPV4; 706 } 707 708 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) { 709 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 710 EFSYS_PROBE(udp_chk_err); 711 (*flagsp) &= ~EFX_CKSUM_TCPUDP; 712 } 713 714 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) { 715 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR); 716 717 /* 718 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This 719 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error 720 * condition. 721 */ 722 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP); 723 } 724 725 return (ignore); 726 } 727 728 static __checkReturn boolean_t 729 siena_ev_rx( 730 __in efx_evq_t *eep, 731 __in efx_qword_t *eqp, 732 __in const efx_ev_callbacks_t *eecp, 733 __in_opt void *arg) 734 { 735 uint32_t id; 736 uint32_t size; 737 uint32_t label; 738 boolean_t ok; 739 #if EFSYS_OPT_RX_SCATTER 740 boolean_t sop; 741 boolean_t jumbo_cont; 742 #endif /* EFSYS_OPT_RX_SCATTER */ 743 uint32_t hdr_type; 744 boolean_t is_v6; 745 uint16_t flags; 746 boolean_t ignore; 747 boolean_t should_abort; 748 749 EFX_EV_QSTAT_INCR(eep, EV_RX); 750 751 /* Basic packet information */ 752 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR); 753 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT); 754 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL); 755 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0); 756 757 #if EFSYS_OPT_RX_SCATTER 758 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0); 759 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0); 760 #endif /* EFSYS_OPT_RX_SCATTER */ 761 762 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE); 763 764 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0); 765 766 /* 767 * If packet is marked as OK and packet type is TCP/IP or 768 * UDP/IP or other IP, then we can rely on the hardware checksums. 769 */ 770 switch (hdr_type) { 771 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP: 772 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP; 773 if (is_v6) { 774 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); 775 flags |= EFX_PKT_IPV6; 776 } else { 777 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); 778 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; 779 } 780 break; 781 782 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP: 783 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP; 784 if (is_v6) { 785 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); 786 flags |= EFX_PKT_IPV6; 787 } else { 788 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); 789 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; 790 } 791 break; 792 793 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: 794 if (is_v6) { 795 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); 796 flags = EFX_PKT_IPV6; 797 } else { 798 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); 799 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4; 800 } 801 break; 802 803 case FSE_AZ_RX_EV_HDR_TYPE_OTHER: 804 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); 805 flags = 0; 806 break; 807 808 default: 809 EFSYS_ASSERT(B_FALSE); 810 flags = 0; 811 break; 812 } 813 814 #if EFSYS_OPT_RX_SCATTER 815 /* Report scatter and header/lookahead split buffer flags */ 816 if (sop) 817 flags |= EFX_PKT_START; 818 if (jumbo_cont) 819 flags |= EFX_PKT_CONT; 820 #endif /* EFSYS_OPT_RX_SCATTER */ 821 822 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */ 823 if (!ok) { 824 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags); 825 if (ignore) { 826 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, 827 uint32_t, size, uint16_t, flags); 828 829 return (B_FALSE); 830 } 831 } 832 833 /* If we're not discarding the packet then it is ok */ 834 if (~flags & EFX_DISCARD) 835 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 836 837 /* Detect multicast packets that didn't match the filter */ 838 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) { 839 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT); 840 841 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) { 842 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH); 843 } else { 844 EFSYS_PROBE(mcast_mismatch); 845 flags |= EFX_ADDR_MISMATCH; 846 } 847 } else { 848 flags |= EFX_PKT_UNICAST; 849 } 850 851 /* 852 * The packet parser in Siena can abort parsing packets under 853 * certain error conditions, setting the PKT_NOT_PARSED bit 854 * (which clears PKT_OK). If this is set, then don't trust 855 * the PKT_TYPE field. 856 */ 857 if (!ok) { 858 uint32_t parse_err; 859 860 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED); 861 if (parse_err != 0) 862 flags |= EFX_CHECK_VLAN; 863 } 864 865 if (~flags & EFX_CHECK_VLAN) { 866 uint32_t pkt_type; 867 868 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE); 869 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN) 870 flags |= EFX_PKT_VLAN_TAGGED; 871 } 872 873 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, 874 uint32_t, size, uint16_t, flags); 875 876 EFSYS_ASSERT(eecp->eec_rx != NULL); 877 should_abort = eecp->eec_rx(arg, label, id, size, flags); 878 879 return (should_abort); 880 } 881 882 static __checkReturn boolean_t 883 siena_ev_tx( 884 __in efx_evq_t *eep, 885 __in efx_qword_t *eqp, 886 __in const efx_ev_callbacks_t *eecp, 887 __in_opt void *arg) 888 { 889 uint32_t id; 890 uint32_t label; 891 boolean_t should_abort; 892 893 EFX_EV_QSTAT_INCR(eep, EV_TX); 894 895 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 && 896 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 && 897 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 && 898 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) { 899 900 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR); 901 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL); 902 903 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); 904 905 EFSYS_ASSERT(eecp->eec_tx != NULL); 906 should_abort = eecp->eec_tx(arg, label, id); 907 908 return (should_abort); 909 } 910 911 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0) 912 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 913 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 914 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 915 916 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0) 917 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR); 918 919 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0) 920 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG); 921 922 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0) 923 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL); 924 925 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED); 926 return (B_FALSE); 927 } 928 929 static __checkReturn boolean_t 930 siena_ev_global( 931 __in efx_evq_t *eep, 932 __in efx_qword_t *eqp, 933 __in const efx_ev_callbacks_t *eecp, 934 __in_opt void *arg) 935 { 936 _NOTE(ARGUNUSED(eqp, eecp, arg)) 937 938 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL); 939 940 return (B_FALSE); 941 } 942 943 static __checkReturn boolean_t 944 siena_ev_driver( 945 __in efx_evq_t *eep, 946 __in efx_qword_t *eqp, 947 __in const efx_ev_callbacks_t *eecp, 948 __in_opt void *arg) 949 { 950 boolean_t should_abort; 951 952 EFX_EV_QSTAT_INCR(eep, EV_DRIVER); 953 should_abort = B_FALSE; 954 955 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) { 956 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: { 957 uint32_t txq_index; 958 959 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); 960 961 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); 962 963 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); 964 965 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); 966 should_abort = eecp->eec_txq_flush_done(arg, txq_index); 967 968 break; 969 } 970 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: { 971 uint32_t rxq_index; 972 uint32_t failed; 973 974 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 975 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 976 977 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); 978 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL); 979 980 if (failed) { 981 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED); 982 983 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index); 984 985 should_abort = eecp->eec_rxq_flush_failed(arg, 986 rxq_index); 987 } else { 988 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); 989 990 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); 991 992 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); 993 } 994 995 break; 996 } 997 case FSE_AZ_EVQ_INIT_DONE_EV: 998 EFSYS_ASSERT(eecp->eec_initialized != NULL); 999 should_abort = eecp->eec_initialized(arg); 1000 1001 break; 1002 1003 case FSE_AZ_EVQ_NOT_EN_EV: 1004 EFSYS_PROBE(evq_not_en); 1005 break; 1006 1007 case FSE_AZ_SRM_UPD_DONE_EV: { 1008 uint32_t code; 1009 1010 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE); 1011 1012 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); 1013 1014 EFSYS_ASSERT(eecp->eec_sram != NULL); 1015 should_abort = eecp->eec_sram(arg, code); 1016 1017 break; 1018 } 1019 case FSE_AZ_WAKE_UP_EV: { 1020 uint32_t id; 1021 1022 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); 1023 1024 EFSYS_ASSERT(eecp->eec_wake_up != NULL); 1025 should_abort = eecp->eec_wake_up(arg, id); 1026 1027 break; 1028 } 1029 case FSE_AZ_TX_PKT_NON_TCP_UDP: 1030 EFSYS_PROBE(tx_pkt_non_tcp_udp); 1031 break; 1032 1033 case FSE_AZ_TIMER_EV: { 1034 uint32_t id; 1035 1036 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); 1037 1038 EFSYS_ASSERT(eecp->eec_timer != NULL); 1039 should_abort = eecp->eec_timer(arg, id); 1040 1041 break; 1042 } 1043 case FSE_AZ_RX_DSC_ERROR_EV: 1044 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR); 1045 1046 EFSYS_PROBE(rx_dsc_error); 1047 1048 EFSYS_ASSERT(eecp->eec_exception != NULL); 1049 should_abort = eecp->eec_exception(arg, 1050 EFX_EXCEPTION_RX_DSC_ERROR, 0); 1051 1052 break; 1053 1054 case FSE_AZ_TX_DSC_ERROR_EV: 1055 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR); 1056 1057 EFSYS_PROBE(tx_dsc_error); 1058 1059 EFSYS_ASSERT(eecp->eec_exception != NULL); 1060 should_abort = eecp->eec_exception(arg, 1061 EFX_EXCEPTION_TX_DSC_ERROR, 0); 1062 1063 break; 1064 1065 default: 1066 break; 1067 } 1068 1069 return (should_abort); 1070 } 1071 1072 static __checkReturn boolean_t 1073 siena_ev_drv_gen( 1074 __in efx_evq_t *eep, 1075 __in efx_qword_t *eqp, 1076 __in const efx_ev_callbacks_t *eecp, 1077 __in_opt void *arg) 1078 { 1079 uint32_t data; 1080 boolean_t should_abort; 1081 1082 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); 1083 1084 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0); 1085 if (data >= ((uint32_t)1 << 16)) { 1086 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1087 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1088 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1089 return (B_TRUE); 1090 } 1091 1092 EFSYS_ASSERT(eecp->eec_software != NULL); 1093 should_abort = eecp->eec_software(arg, (uint16_t)data); 1094 1095 return (should_abort); 1096 } 1097 1098 #if EFSYS_OPT_MCDI 1099 1100 static __checkReturn boolean_t 1101 siena_ev_mcdi( 1102 __in efx_evq_t *eep, 1103 __in efx_qword_t *eqp, 1104 __in const efx_ev_callbacks_t *eecp, 1105 __in_opt void *arg) 1106 { 1107 efx_nic_t *enp = eep->ee_enp; 1108 unsigned int code; 1109 boolean_t should_abort = B_FALSE; 1110 1111 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); 1112 1113 if (enp->en_family != EFX_FAMILY_SIENA) 1114 goto out; 1115 1116 EFSYS_ASSERT(eecp->eec_link_change != NULL); 1117 EFSYS_ASSERT(eecp->eec_exception != NULL); 1118 #if EFSYS_OPT_MON_STATS 1119 EFSYS_ASSERT(eecp->eec_monitor != NULL); 1120 #endif 1121 1122 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); 1123 1124 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); 1125 switch (code) { 1126 case MCDI_EVENT_CODE_BADSSERT: 1127 efx_mcdi_ev_death(enp, EINTR); 1128 break; 1129 1130 case MCDI_EVENT_CODE_CMDDONE: 1131 efx_mcdi_ev_cpl(enp, 1132 MCDI_EV_FIELD(eqp, CMDDONE_SEQ), 1133 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), 1134 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); 1135 break; 1136 1137 case MCDI_EVENT_CODE_LINKCHANGE: { 1138 efx_link_mode_t link_mode; 1139 1140 siena_phy_link_ev(enp, eqp, &link_mode); 1141 should_abort = eecp->eec_link_change(arg, link_mode); 1142 break; 1143 } 1144 case MCDI_EVENT_CODE_SENSOREVT: { 1145 #if EFSYS_OPT_MON_STATS 1146 efx_mon_stat_t id; 1147 efx_mon_stat_value_t value; 1148 efx_rc_t rc; 1149 1150 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) 1151 should_abort = eecp->eec_monitor(arg, id, value); 1152 else if (rc == ENOTSUP) { 1153 should_abort = eecp->eec_exception(arg, 1154 EFX_EXCEPTION_UNKNOWN_SENSOREVT, 1155 MCDI_EV_FIELD(eqp, DATA)); 1156 } else 1157 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ 1158 #else 1159 should_abort = B_FALSE; 1160 #endif 1161 break; 1162 } 1163 case MCDI_EVENT_CODE_SCHEDERR: 1164 /* Informational only */ 1165 break; 1166 1167 case MCDI_EVENT_CODE_REBOOT: 1168 efx_mcdi_ev_death(enp, EIO); 1169 break; 1170 1171 case MCDI_EVENT_CODE_MAC_STATS_DMA: 1172 #if EFSYS_OPT_MAC_STATS 1173 if (eecp->eec_mac_stats != NULL) { 1174 eecp->eec_mac_stats(arg, 1175 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); 1176 } 1177 #endif 1178 break; 1179 1180 case MCDI_EVENT_CODE_FWALERT: { 1181 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); 1182 1183 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) 1184 should_abort = eecp->eec_exception(arg, 1185 EFX_EXCEPTION_FWALERT_SRAM, 1186 MCDI_EV_FIELD(eqp, FWALERT_DATA)); 1187 else 1188 should_abort = eecp->eec_exception(arg, 1189 EFX_EXCEPTION_UNKNOWN_FWALERT, 1190 MCDI_EV_FIELD(eqp, DATA)); 1191 break; 1192 } 1193 1194 default: 1195 EFSYS_PROBE1(mc_pcol_error, int, code); 1196 break; 1197 } 1198 1199 out: 1200 return (should_abort); 1201 } 1202 1203 #endif /* EFSYS_OPT_MCDI */ 1204 1205 static __checkReturn efx_rc_t 1206 siena_ev_qprime( 1207 __in efx_evq_t *eep, 1208 __in unsigned int count) 1209 { 1210 efx_nic_t *enp = eep->ee_enp; 1211 uint32_t rptr; 1212 efx_dword_t dword; 1213 1214 rptr = count & eep->ee_mask; 1215 1216 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr); 1217 1218 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index, 1219 &dword, B_FALSE); 1220 1221 return (0); 1222 } 1223 1224 static void 1225 siena_ev_qpost( 1226 __in efx_evq_t *eep, 1227 __in uint16_t data) 1228 { 1229 efx_nic_t *enp = eep->ee_enp; 1230 efx_qword_t ev; 1231 efx_oword_t oword; 1232 1233 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV, 1234 FSF_AZ_EV_DATA_DW0, (uint32_t)data); 1235 1236 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index, 1237 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0), 1238 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1)); 1239 1240 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword); 1241 } 1242 1243 static __checkReturn efx_rc_t 1244 siena_ev_qmoderate( 1245 __in efx_evq_t *eep, 1246 __in unsigned int us) 1247 { 1248 efx_nic_t *enp = eep->ee_enp; 1249 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1250 unsigned int locked; 1251 efx_dword_t dword; 1252 efx_rc_t rc; 1253 1254 if (us > encp->enc_evq_timer_max_us) { 1255 rc = EINVAL; 1256 goto fail1; 1257 } 1258 1259 /* If the value is zero then disable the timer */ 1260 if (us == 0) { 1261 EFX_POPULATE_DWORD_2(dword, 1262 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, 1263 FRF_CZ_TC_TIMER_VAL, 0); 1264 } else { 1265 unsigned int ticks; 1266 1267 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 1268 goto fail2; 1269 1270 EFSYS_ASSERT(ticks > 0); 1271 EFX_POPULATE_DWORD_2(dword, 1272 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, 1273 FRF_CZ_TC_TIMER_VAL, ticks - 1); 1274 } 1275 1276 locked = (eep->ee_index == 0) ? 1 : 0; 1277 1278 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0, 1279 eep->ee_index, &dword, locked); 1280 1281 return (0); 1282 1283 fail2: 1284 EFSYS_PROBE(fail2); 1285 fail1: 1286 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1287 1288 return (rc); 1289 } 1290 1291 static __checkReturn efx_rc_t 1292 siena_ev_qcreate( 1293 __in efx_nic_t *enp, 1294 __in unsigned int index, 1295 __in efsys_mem_t *esmp, 1296 __in size_t ndescs, 1297 __in uint32_t id, 1298 __in uint32_t us, 1299 __in uint32_t flags, 1300 __in uint32_t irq, 1301 __in efx_evq_t *eep) 1302 { 1303 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1304 uint32_t size; 1305 efx_oword_t oword; 1306 efx_rc_t rc; 1307 boolean_t notify_mode; 1308 1309 _NOTE(ARGUNUSED(esmp)) 1310 1311 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0); 1312 1313 if (irq != index) { 1314 rc = EINVAL; 1315 goto fail1; 1316 } 1317 1318 #if EFSYS_OPT_RX_SCALE 1319 if (enp->en_intr.ei_type == EFX_INTR_LINE && 1320 index >= EFX_MAXRSS_LEGACY) { 1321 rc = EINVAL; 1322 goto fail2; 1323 } 1324 #endif 1325 for (size = 0; 1326 (1U << size) <= encp->enc_evq_max_nevs / encp->enc_evq_min_nevs; 1327 size++) 1328 if ((1U << size) == (uint32_t)ndescs / encp->enc_evq_min_nevs) 1329 break; 1330 if (id + (1 << size) >= encp->enc_buftbl_limit) { 1331 rc = EINVAL; 1332 goto fail3; 1333 } 1334 1335 /* Set up the handler table */ 1336 eep->ee_rx = siena_ev_rx; 1337 eep->ee_tx = siena_ev_tx; 1338 eep->ee_driver = siena_ev_driver; 1339 eep->ee_global = siena_ev_global; 1340 eep->ee_drv_gen = siena_ev_drv_gen; 1341 #if EFSYS_OPT_MCDI 1342 eep->ee_mcdi = siena_ev_mcdi; 1343 #endif /* EFSYS_OPT_MCDI */ 1344 1345 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) != 1346 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 1347 1348 /* Set up the new event queue */ 1349 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1, 1350 FRF_CZ_HOST_NOTIFY_MODE, notify_mode, 1351 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1352 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE); 1353 1354 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size, 1355 FRF_AZ_EVQ_BUF_BASE_ID, id); 1356 1357 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE); 1358 1359 /* Set initial interrupt moderation */ 1360 siena_ev_qmoderate(eep, us); 1361 1362 return (0); 1363 1364 fail3: 1365 EFSYS_PROBE(fail3); 1366 #if EFSYS_OPT_RX_SCALE 1367 fail2: 1368 EFSYS_PROBE(fail2); 1369 #endif 1370 fail1: 1371 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1372 1373 return (rc); 1374 } 1375 1376 #endif /* EFSYS_OPT_SIENA */ 1377 1378 #if EFSYS_OPT_QSTATS 1379 #if EFSYS_OPT_NAMES 1380 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock ac223f7134058b4f */ 1381 static const char * const __efx_ev_qstat_name[] = { 1382 "all", 1383 "rx", 1384 "rx_ok", 1385 "rx_frm_trunc", 1386 "rx_tobe_disc", 1387 "rx_pause_frm_err", 1388 "rx_buf_owner_id_err", 1389 "rx_ipv4_hdr_chksum_err", 1390 "rx_tcp_udp_chksum_err", 1391 "rx_eth_crc_err", 1392 "rx_ip_frag_err", 1393 "rx_mcast_pkt", 1394 "rx_mcast_hash_match", 1395 "rx_tcp_ipv4", 1396 "rx_tcp_ipv6", 1397 "rx_udp_ipv4", 1398 "rx_udp_ipv6", 1399 "rx_other_ipv4", 1400 "rx_other_ipv6", 1401 "rx_non_ip", 1402 "rx_batch", 1403 "tx", 1404 "tx_wq_ff_full", 1405 "tx_pkt_err", 1406 "tx_pkt_too_big", 1407 "tx_unexpected", 1408 "global", 1409 "global_mnt", 1410 "driver", 1411 "driver_srm_upd_done", 1412 "driver_tx_descq_fls_done", 1413 "driver_rx_descq_fls_done", 1414 "driver_rx_descq_fls_failed", 1415 "driver_rx_dsc_error", 1416 "driver_tx_dsc_error", 1417 "drv_gen", 1418 "mcdi_response", 1419 "rx_parse_incomplete", 1420 }; 1421 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */ 1422 1423 const char * 1424 efx_ev_qstat_name( 1425 __in efx_nic_t *enp, 1426 __in unsigned int id) 1427 { 1428 _NOTE(ARGUNUSED(enp)) 1429 1430 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 1431 EFSYS_ASSERT3U(id, <, EV_NQSTATS); 1432 1433 return (__efx_ev_qstat_name[id]); 1434 } 1435 #endif /* EFSYS_OPT_NAMES */ 1436 #endif /* EFSYS_OPT_QSTATS */ 1437 1438 #if EFSYS_OPT_SIENA 1439 1440 #if EFSYS_OPT_QSTATS 1441 static void 1442 siena_ev_qstats_update( 1443 __in efx_evq_t *eep, 1444 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 1445 { 1446 unsigned int id; 1447 1448 for (id = 0; id < EV_NQSTATS; id++) { 1449 efsys_stat_t *essp = &stat[id]; 1450 1451 EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 1452 eep->ee_stat[id] = 0; 1453 } 1454 } 1455 #endif /* EFSYS_OPT_QSTATS */ 1456 1457 static void 1458 siena_ev_qdestroy( 1459 __in efx_evq_t *eep) 1460 { 1461 efx_nic_t *enp = eep->ee_enp; 1462 efx_oword_t oword; 1463 1464 /* Purge event queue */ 1465 EFX_ZERO_OWORD(oword); 1466 1467 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, 1468 eep->ee_index, &oword, B_TRUE); 1469 1470 EFX_ZERO_OWORD(oword); 1471 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE); 1472 } 1473 1474 static void 1475 siena_ev_fini( 1476 __in efx_nic_t *enp) 1477 { 1478 _NOTE(ARGUNUSED(enp)) 1479 } 1480 1481 #endif /* EFSYS_OPT_SIENA */ 1482 1483 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA 1484 1485 #define EFX_EV_BATCH 8 1486 1487 static void 1488 siena_ef10_ev_qpoll( 1489 __in efx_evq_t *eep, 1490 __inout unsigned int *countp, 1491 __in const efx_ev_callbacks_t *eecp, 1492 __in_opt void *arg) 1493 { 1494 efx_qword_t ev[EFX_EV_BATCH]; 1495 unsigned int batch; 1496 unsigned int total; 1497 unsigned int count; 1498 unsigned int index; 1499 size_t offset; 1500 1501 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */ 1502 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN); 1503 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH); 1504 1505 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV); 1506 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV); 1507 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV); 1508 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV == 1509 FSE_AZ_EV_CODE_DRV_GEN_EV); 1510 #if EFSYS_OPT_MCDI 1511 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV == 1512 FSE_AZ_EV_CODE_MCDI_EVRESPONSE); 1513 #endif 1514 1515 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 1516 EFSYS_ASSERT(countp != NULL); 1517 EFSYS_ASSERT(eecp != NULL); 1518 1519 count = *countp; 1520 do { 1521 /* Read up until the end of the batch period */ 1522 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1)); 1523 offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 1524 for (total = 0; total < batch; ++total) { 1525 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); 1526 1527 if (!EFX_EV_PRESENT(ev[total])) 1528 break; 1529 1530 EFSYS_PROBE3(event, unsigned int, eep->ee_index, 1531 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), 1532 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); 1533 1534 offset += sizeof (efx_qword_t); 1535 } 1536 1537 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1) 1538 /* 1539 * Prefetch the next batch when we get within PREFETCH_PERIOD 1540 * of a completed batch. If the batch is smaller, then prefetch 1541 * immediately. 1542 */ 1543 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD) 1544 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); 1545 #endif /* EFSYS_OPT_EV_PREFETCH */ 1546 1547 /* Process the batch of events */ 1548 for (index = 0; index < total; ++index) { 1549 boolean_t should_abort; 1550 uint32_t code; 1551 1552 #if EFSYS_OPT_EV_PREFETCH 1553 /* Prefetch if we've now reached the batch period */ 1554 if (total == batch && 1555 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) { 1556 offset = (count + batch) & eep->ee_mask; 1557 offset *= sizeof (efx_qword_t); 1558 1559 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); 1560 } 1561 #endif /* EFSYS_OPT_EV_PREFETCH */ 1562 1563 EFX_EV_QSTAT_INCR(eep, EV_ALL); 1564 1565 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE); 1566 switch (code) { 1567 case FSE_AZ_EV_CODE_RX_EV: 1568 should_abort = eep->ee_rx(eep, 1569 &(ev[index]), eecp, arg); 1570 break; 1571 case FSE_AZ_EV_CODE_TX_EV: 1572 should_abort = eep->ee_tx(eep, 1573 &(ev[index]), eecp, arg); 1574 break; 1575 case FSE_AZ_EV_CODE_DRIVER_EV: 1576 should_abort = eep->ee_driver(eep, 1577 &(ev[index]), eecp, arg); 1578 break; 1579 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1580 should_abort = eep->ee_drv_gen(eep, 1581 &(ev[index]), eecp, arg); 1582 break; 1583 #if EFSYS_OPT_MCDI 1584 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE: 1585 should_abort = eep->ee_mcdi(eep, 1586 &(ev[index]), eecp, arg); 1587 break; 1588 #endif 1589 case FSE_AZ_EV_CODE_GLOBAL_EV: 1590 if (eep->ee_global) { 1591 should_abort = eep->ee_global(eep, 1592 &(ev[index]), eecp, arg); 1593 break; 1594 } 1595 /* else fallthrough */ 1596 default: 1597 EFSYS_PROBE3(bad_event, 1598 unsigned int, eep->ee_index, 1599 uint32_t, 1600 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1), 1601 uint32_t, 1602 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0)); 1603 1604 EFSYS_ASSERT(eecp->eec_exception != NULL); 1605 (void) eecp->eec_exception(arg, 1606 EFX_EXCEPTION_EV_ERROR, code); 1607 should_abort = B_TRUE; 1608 } 1609 if (should_abort) { 1610 /* Ignore subsequent events */ 1611 total = index + 1; 1612 1613 /* 1614 * Poison batch to ensure the outer 1615 * loop is broken out of. 1616 */ 1617 EFSYS_ASSERT(batch <= EFX_EV_BATCH); 1618 batch += (EFX_EV_BATCH << 1); 1619 EFSYS_ASSERT(total != batch); 1620 break; 1621 } 1622 } 1623 1624 /* 1625 * Now that the hardware has most likely moved onto dma'ing 1626 * into the next cache line, clear the processed events. Take 1627 * care to only clear out events that we've processed 1628 */ 1629 EFX_SET_QWORD(ev[0]); 1630 offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 1631 for (index = 0; index < total; ++index) { 1632 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0])); 1633 offset += sizeof (efx_qword_t); 1634 } 1635 1636 count += total; 1637 1638 } while (total == batch); 1639 1640 *countp = count; 1641 } 1642 1643 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */ 1644