1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2020 Xilinx, Inc. 4 * Copyright(c) 2007-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 #if EFSYS_OPT_MON_MCDI 10 #include "mcdi_mon.h" 11 #endif 12 13 #define EFX_EV_PRESENT(_qword) \ 14 (EFX_QWORD_FIELD((_qword), EFX_DWORD_0) != 0xffffffff && \ 15 EFX_QWORD_FIELD((_qword), EFX_DWORD_1) != 0xffffffff) 16 17 18 19 #if EFSYS_OPT_SIENA 20 21 static __checkReturn efx_rc_t 22 siena_ev_init( 23 __in efx_nic_t *enp); 24 25 static void 26 siena_ev_fini( 27 __in efx_nic_t *enp); 28 29 static __checkReturn efx_rc_t 30 siena_ev_qcreate( 31 __in efx_nic_t *enp, 32 __in unsigned int index, 33 __in efsys_mem_t *esmp, 34 __in size_t ndescs, 35 __in uint32_t id, 36 __in uint32_t us, 37 __in uint32_t flags, 38 __in efx_evq_t *eep); 39 40 static void 41 siena_ev_qdestroy( 42 __in efx_evq_t *eep); 43 44 static __checkReturn efx_rc_t 45 siena_ev_qprime( 46 __in efx_evq_t *eep, 47 __in unsigned int count); 48 49 static void 50 siena_ev_qpost( 51 __in efx_evq_t *eep, 52 __in uint16_t data); 53 54 static __checkReturn efx_rc_t 55 siena_ev_qmoderate( 56 __in efx_evq_t *eep, 57 __in unsigned int us); 58 59 #if EFSYS_OPT_QSTATS 60 static void 61 siena_ev_qstats_update( 62 __in efx_evq_t *eep, 63 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat); 64 65 #endif 66 67 #endif /* EFSYS_OPT_SIENA */ 68 69 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA 70 71 static void 72 siena_ef10_ev_qpoll( 73 __in efx_evq_t *eep, 74 __inout unsigned int *countp, 75 __in const efx_ev_callbacks_t *eecp, 76 __in_opt void *arg); 77 78 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */ 79 80 #if EFSYS_OPT_SIENA 81 static const efx_ev_ops_t __efx_ev_siena_ops = { 82 siena_ev_init, /* eevo_init */ 83 siena_ev_fini, /* eevo_fini */ 84 siena_ev_qcreate, /* eevo_qcreate */ 85 siena_ev_qdestroy, /* eevo_qdestroy */ 86 siena_ev_qprime, /* eevo_qprime */ 87 siena_ev_qpost, /* eevo_qpost */ 88 siena_ef10_ev_qpoll, /* eevo_qpoll */ 89 siena_ev_qmoderate, /* eevo_qmoderate */ 90 #if EFSYS_OPT_QSTATS 91 siena_ev_qstats_update, /* eevo_qstats_update */ 92 #endif 93 }; 94 #endif /* EFSYS_OPT_SIENA */ 95 96 #if EFX_OPTS_EF10() 97 static const efx_ev_ops_t __efx_ev_ef10_ops = { 98 ef10_ev_init, /* eevo_init */ 99 ef10_ev_fini, /* eevo_fini */ 100 ef10_ev_qcreate, /* eevo_qcreate */ 101 ef10_ev_qdestroy, /* eevo_qdestroy */ 102 ef10_ev_qprime, /* eevo_qprime */ 103 ef10_ev_qpost, /* eevo_qpost */ 104 siena_ef10_ev_qpoll, /* eevo_qpoll */ 105 ef10_ev_qmoderate, /* eevo_qmoderate */ 106 #if EFSYS_OPT_QSTATS 107 ef10_ev_qstats_update, /* eevo_qstats_update */ 108 #endif 109 }; 110 #endif /* EFX_OPTS_EF10() */ 111 112 #if EFSYS_OPT_RIVERHEAD 113 static const efx_ev_ops_t __efx_ev_rhead_ops = { 114 rhead_ev_init, /* eevo_init */ 115 rhead_ev_fini, /* eevo_fini */ 116 rhead_ev_qcreate, /* eevo_qcreate */ 117 rhead_ev_qdestroy, /* eevo_qdestroy */ 118 rhead_ev_qprime, /* eevo_qprime */ 119 rhead_ev_qpost, /* eevo_qpost */ 120 rhead_ev_qpoll, /* eevo_qpoll */ 121 rhead_ev_qmoderate, /* eevo_qmoderate */ 122 #if EFSYS_OPT_QSTATS 123 rhead_ev_qstats_update, /* eevo_qstats_update */ 124 #endif 125 }; 126 #endif /* EFSYS_OPT_RIVERHEAD */ 127 128 129 __checkReturn efx_rc_t 130 efx_ev_init( 131 __in efx_nic_t *enp) 132 { 133 const efx_ev_ops_t *eevop; 134 efx_rc_t rc; 135 136 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 137 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); 138 139 if (enp->en_mod_flags & EFX_MOD_EV) { 140 rc = EINVAL; 141 goto fail1; 142 } 143 144 switch (enp->en_family) { 145 #if EFSYS_OPT_SIENA 146 case EFX_FAMILY_SIENA: 147 eevop = &__efx_ev_siena_ops; 148 break; 149 #endif /* EFSYS_OPT_SIENA */ 150 151 #if EFSYS_OPT_HUNTINGTON 152 case EFX_FAMILY_HUNTINGTON: 153 eevop = &__efx_ev_ef10_ops; 154 break; 155 #endif /* EFSYS_OPT_HUNTINGTON */ 156 157 #if EFSYS_OPT_MEDFORD 158 case EFX_FAMILY_MEDFORD: 159 eevop = &__efx_ev_ef10_ops; 160 break; 161 #endif /* EFSYS_OPT_MEDFORD */ 162 163 #if EFSYS_OPT_MEDFORD2 164 case EFX_FAMILY_MEDFORD2: 165 eevop = &__efx_ev_ef10_ops; 166 break; 167 #endif /* EFSYS_OPT_MEDFORD2 */ 168 169 #if EFSYS_OPT_RIVERHEAD 170 case EFX_FAMILY_RIVERHEAD: 171 eevop = &__efx_ev_rhead_ops; 172 break; 173 #endif /* EFSYS_OPT_RIVERHEAD */ 174 175 default: 176 EFSYS_ASSERT(0); 177 rc = ENOTSUP; 178 goto fail1; 179 } 180 181 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); 182 183 if ((rc = eevop->eevo_init(enp)) != 0) 184 goto fail2; 185 186 enp->en_eevop = eevop; 187 enp->en_mod_flags |= EFX_MOD_EV; 188 return (0); 189 190 fail2: 191 EFSYS_PROBE(fail2); 192 193 fail1: 194 EFSYS_PROBE1(fail1, efx_rc_t, rc); 195 196 enp->en_eevop = NULL; 197 enp->en_mod_flags &= ~EFX_MOD_EV; 198 return (rc); 199 } 200 201 __checkReturn size_t 202 efx_evq_size( 203 __in const efx_nic_t *enp, 204 __in unsigned int ndescs, 205 __in uint32_t flags) 206 { 207 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 208 size_t desc_size; 209 210 desc_size = encp->enc_ev_desc_size; 211 212 #if EFSYS_OPT_EV_EXTENDED_WIDTH 213 if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) 214 desc_size = encp->enc_ev_ew_desc_size; 215 #else 216 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0); 217 #endif 218 219 return (ndescs * desc_size); 220 } 221 222 __checkReturn unsigned int 223 efx_evq_nbufs( 224 __in const efx_nic_t *enp, 225 __in unsigned int ndescs, 226 __in uint32_t flags) 227 { 228 size_t size; 229 230 size = efx_evq_size(enp, ndescs, flags); 231 232 return (EFX_DIV_ROUND_UP(size, EFX_BUF_SIZE)); 233 } 234 235 void 236 efx_ev_fini( 237 __in efx_nic_t *enp) 238 { 239 const efx_ev_ops_t *eevop = enp->en_eevop; 240 241 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 242 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_INTR); 243 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); 244 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_RX)); 245 EFSYS_ASSERT(!(enp->en_mod_flags & EFX_MOD_TX)); 246 EFSYS_ASSERT3U(enp->en_ev_qcount, ==, 0); 247 248 eevop->eevo_fini(enp); 249 250 enp->en_eevop = NULL; 251 enp->en_mod_flags &= ~EFX_MOD_EV; 252 } 253 254 255 __checkReturn efx_rc_t 256 efx_ev_qcreate( 257 __in efx_nic_t *enp, 258 __in unsigned int index, 259 __in efsys_mem_t *esmp, 260 __in size_t ndescs, 261 __in uint32_t id, 262 __in uint32_t us, 263 __in uint32_t flags, 264 __deref_out efx_evq_t **eepp) 265 { 266 const efx_ev_ops_t *eevop = enp->en_eevop; 267 efx_evq_t *eep; 268 const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 269 efx_rc_t rc; 270 271 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 272 EFSYS_ASSERT3U(enp->en_mod_flags, &, EFX_MOD_EV); 273 274 EFSYS_ASSERT3U(enp->en_ev_qcount + 1, <, 275 enp->en_nic_cfg.enc_evq_limit); 276 277 if (index >= encp->enc_evq_limit) { 278 rc = EINVAL; 279 goto fail1; 280 } 281 282 if (us > encp->enc_evq_timer_max_us) { 283 rc = EINVAL; 284 goto fail2; 285 } 286 287 switch (flags & EFX_EVQ_FLAGS_NOTIFY_MASK) { 288 case EFX_EVQ_FLAGS_NOTIFY_INTERRUPT: 289 break; 290 case EFX_EVQ_FLAGS_NOTIFY_DISABLED: 291 if (us != 0) { 292 rc = EINVAL; 293 goto fail3; 294 } 295 break; 296 default: 297 rc = EINVAL; 298 goto fail4; 299 } 300 301 if ((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) && 302 (encp->enc_ev_ew_desc_size == 0)) { 303 /* Extended width event descriptors are not supported. */ 304 rc = EINVAL; 305 goto fail5; 306 } 307 308 EFSYS_ASSERT(ISP2(encp->enc_evq_max_nevs)); 309 EFSYS_ASSERT(ISP2(encp->enc_evq_min_nevs)); 310 311 if (!ISP2(ndescs) || 312 ndescs < encp->enc_evq_min_nevs || 313 ndescs > encp->enc_evq_max_nevs) { 314 rc = EINVAL; 315 goto fail6; 316 } 317 318 if (EFSYS_MEM_SIZE(esmp) < (ndescs * encp->enc_ev_desc_size)) { 319 /* Buffer too small for event queue descriptors. */ 320 rc = EINVAL; 321 goto fail7; 322 } 323 324 /* Allocate an EVQ object */ 325 EFSYS_KMEM_ALLOC(enp->en_esip, sizeof (efx_evq_t), eep); 326 if (eep == NULL) { 327 rc = ENOMEM; 328 goto fail8; 329 } 330 331 eep->ee_magic = EFX_EVQ_MAGIC; 332 eep->ee_enp = enp; 333 eep->ee_index = index; 334 eep->ee_mask = ndescs - 1; 335 eep->ee_flags = flags; 336 eep->ee_esmp = esmp; 337 338 /* 339 * Set outputs before the queue is created because interrupts may be 340 * raised for events immediately after the queue is created, before the 341 * function call below returns. See bug58606. 342 * 343 * The eepp pointer passed in by the client must therefore point to data 344 * shared with the client's event processing context. 345 */ 346 enp->en_ev_qcount++; 347 *eepp = eep; 348 349 if ((rc = eevop->eevo_qcreate(enp, index, esmp, ndescs, id, us, flags, 350 eep)) != 0) 351 goto fail9; 352 353 return (0); 354 355 fail9: 356 EFSYS_PROBE(fail9); 357 358 *eepp = NULL; 359 enp->en_ev_qcount--; 360 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); 361 fail8: 362 EFSYS_PROBE(fail8); 363 fail7: 364 EFSYS_PROBE(fail7); 365 fail6: 366 EFSYS_PROBE(fail6); 367 fail5: 368 EFSYS_PROBE(fail5); 369 fail4: 370 EFSYS_PROBE(fail4); 371 fail3: 372 EFSYS_PROBE(fail3); 373 fail2: 374 EFSYS_PROBE(fail2); 375 fail1: 376 EFSYS_PROBE1(fail1, efx_rc_t, rc); 377 return (rc); 378 } 379 380 void 381 efx_ev_qdestroy( 382 __in efx_evq_t *eep) 383 { 384 efx_nic_t *enp = eep->ee_enp; 385 const efx_ev_ops_t *eevop = enp->en_eevop; 386 387 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 388 389 EFSYS_ASSERT(enp->en_ev_qcount != 0); 390 --enp->en_ev_qcount; 391 392 eevop->eevo_qdestroy(eep); 393 394 /* Free the EVQ object */ 395 EFSYS_KMEM_FREE(enp->en_esip, sizeof (efx_evq_t), eep); 396 } 397 398 __checkReturn efx_rc_t 399 efx_ev_qprime( 400 __in efx_evq_t *eep, 401 __in unsigned int count) 402 { 403 efx_nic_t *enp = eep->ee_enp; 404 const efx_ev_ops_t *eevop = enp->en_eevop; 405 efx_rc_t rc; 406 407 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 408 409 if (!(enp->en_mod_flags & EFX_MOD_INTR)) { 410 rc = EINVAL; 411 goto fail1; 412 } 413 414 if ((rc = eevop->eevo_qprime(eep, count)) != 0) 415 goto fail2; 416 417 return (0); 418 419 fail2: 420 EFSYS_PROBE(fail2); 421 fail1: 422 EFSYS_PROBE1(fail1, efx_rc_t, rc); 423 return (rc); 424 } 425 426 __checkReturn boolean_t 427 efx_ev_qpending( 428 __in efx_evq_t *eep, 429 __in unsigned int count) 430 { 431 size_t offset; 432 efx_qword_t qword; 433 434 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 435 436 offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 437 EFSYS_MEM_READQ(eep->ee_esmp, offset, &qword); 438 439 return (EFX_EV_PRESENT(qword)); 440 } 441 442 #if EFSYS_OPT_EV_PREFETCH 443 444 void 445 efx_ev_qprefetch( 446 __in efx_evq_t *eep, 447 __in unsigned int count) 448 { 449 unsigned int offset; 450 451 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 452 453 offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 454 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); 455 } 456 457 #endif /* EFSYS_OPT_EV_PREFETCH */ 458 459 /* 460 * This method is needed to ensure that eec_initialized callback 461 * is invoked after queue creation. The callback will be invoked 462 * on Riverhead boards which have no support for INIT_DONE events 463 * and will do nothing on other boards. 464 * 465 * The client drivers must call this method after calling efx_ev_create(). 466 * The call must be done with the same locks being held (if any) which are 467 * normally acquired around efx_ev_qpoll() calls to ensure that 468 * eec_initialized callback is invoked within the same locking context. 469 */ 470 void 471 efx_ev_qcreate_check_init_done( 472 __in efx_evq_t *eep, 473 __in const efx_ev_callbacks_t *eecp, 474 __in_opt void *arg) 475 { 476 const efx_nic_cfg_t *encp; 477 478 EFSYS_ASSERT(eep != NULL); 479 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 480 EFSYS_ASSERT(eecp != NULL); 481 EFSYS_ASSERT(eecp->eec_initialized != NULL); 482 483 encp = efx_nic_cfg_get(eep->ee_enp); 484 485 if (encp->enc_evq_init_done_ev_supported == B_FALSE) 486 (void) eecp->eec_initialized(arg); 487 } 488 489 void 490 efx_ev_qpoll( 491 __in efx_evq_t *eep, 492 __inout unsigned int *countp, 493 __in const efx_ev_callbacks_t *eecp, 494 __in_opt void *arg) 495 { 496 efx_nic_t *enp = eep->ee_enp; 497 const efx_ev_ops_t *eevop = enp->en_eevop; 498 499 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 500 501 EFSYS_ASSERT(eevop != NULL && 502 eevop->eevo_qpoll != NULL); 503 504 eevop->eevo_qpoll(eep, countp, eecp, arg); 505 } 506 507 void 508 efx_ev_qpost( 509 __in efx_evq_t *eep, 510 __in uint16_t data) 511 { 512 efx_nic_t *enp = eep->ee_enp; 513 const efx_ev_ops_t *eevop = enp->en_eevop; 514 515 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 516 517 EFSYS_ASSERT(eevop != NULL && 518 eevop->eevo_qpost != NULL); 519 520 eevop->eevo_qpost(eep, data); 521 } 522 523 __checkReturn efx_rc_t 524 efx_ev_usecs_to_ticks( 525 __in efx_nic_t *enp, 526 __in unsigned int us, 527 __out unsigned int *ticksp) 528 { 529 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 530 unsigned int ticks; 531 efx_rc_t rc; 532 533 if (encp->enc_evq_timer_quantum_ns == 0) { 534 rc = ENOTSUP; 535 goto fail1; 536 } 537 538 /* Convert microseconds to a timer tick count */ 539 if (us == 0) 540 ticks = 0; 541 else if (us * 1000 < encp->enc_evq_timer_quantum_ns) 542 ticks = 1; /* Never round down to zero */ 543 else 544 ticks = us * 1000 / encp->enc_evq_timer_quantum_ns; 545 546 *ticksp = ticks; 547 return (0); 548 549 fail1: 550 EFSYS_PROBE1(fail1, efx_rc_t, rc); 551 return (rc); 552 } 553 554 __checkReturn efx_rc_t 555 efx_ev_qmoderate( 556 __in efx_evq_t *eep, 557 __in unsigned int us) 558 { 559 efx_nic_t *enp = eep->ee_enp; 560 const efx_ev_ops_t *eevop = enp->en_eevop; 561 efx_rc_t rc; 562 563 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 564 565 if ((eep->ee_flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 566 EFX_EVQ_FLAGS_NOTIFY_DISABLED) { 567 rc = EINVAL; 568 goto fail1; 569 } 570 571 if ((rc = eevop->eevo_qmoderate(eep, us)) != 0) 572 goto fail2; 573 574 return (0); 575 576 fail2: 577 EFSYS_PROBE(fail2); 578 fail1: 579 EFSYS_PROBE1(fail1, efx_rc_t, rc); 580 return (rc); 581 } 582 583 #if EFSYS_OPT_QSTATS 584 void 585 efx_ev_qstats_update( 586 __in efx_evq_t *eep, 587 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 588 589 { efx_nic_t *enp = eep->ee_enp; 590 const efx_ev_ops_t *eevop = enp->en_eevop; 591 592 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 593 594 eevop->eevo_qstats_update(eep, stat); 595 } 596 597 #endif /* EFSYS_OPT_QSTATS */ 598 599 #if EFSYS_OPT_SIENA 600 601 static __checkReturn efx_rc_t 602 siena_ev_init( 603 __in efx_nic_t *enp) 604 { 605 efx_oword_t oword; 606 607 /* 608 * Program the event queue for receive and transmit queue 609 * flush events. 610 */ 611 EFX_BAR_READO(enp, FR_AZ_DP_CTRL_REG, &oword); 612 EFX_SET_OWORD_FIELD(oword, FRF_AZ_FLS_EVQ_ID, 0); 613 EFX_BAR_WRITEO(enp, FR_AZ_DP_CTRL_REG, &oword); 614 615 return (0); 616 617 } 618 619 static __checkReturn boolean_t 620 siena_ev_rx_not_ok( 621 __in efx_evq_t *eep, 622 __in efx_qword_t *eqp, 623 __in uint32_t label, 624 __in uint32_t id, 625 __inout uint16_t *flagsp) 626 { 627 boolean_t ignore = B_FALSE; 628 629 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TOBE_DISC) != 0) { 630 EFX_EV_QSTAT_INCR(eep, EV_RX_TOBE_DISC); 631 EFSYS_PROBE(tobe_disc); 632 /* 633 * Assume this is a unicast address mismatch, unless below 634 * we find either FSF_AZ_RX_EV_ETH_CRC_ERR or 635 * EV_RX_PAUSE_FRM_ERR is set. 636 */ 637 (*flagsp) |= EFX_ADDR_MISMATCH; 638 } 639 640 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_FRM_TRUNC) != 0) { 641 EFSYS_PROBE2(frm_trunc, uint32_t, label, uint32_t, id); 642 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 643 (*flagsp) |= EFX_DISCARD; 644 645 #if EFSYS_OPT_RX_SCATTER 646 /* 647 * Lookout for payload queue ran dry errors and ignore them. 648 * 649 * Sadly for the header/data split cases, the descriptor 650 * pointer in this event refers to the header queue and 651 * therefore cannot be easily detected as duplicate. 652 * So we drop these and rely on the receive processing seeing 653 * a subsequent packet with FSF_AZ_RX_EV_SOP set to discard 654 * the partially received packet. 655 */ 656 if ((EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) == 0) && 657 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) == 0) && 658 (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT) == 0)) 659 ignore = B_TRUE; 660 #endif /* EFSYS_OPT_RX_SCATTER */ 661 } 662 663 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_ETH_CRC_ERR) != 0) { 664 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 665 EFSYS_PROBE(crc_err); 666 (*flagsp) &= ~EFX_ADDR_MISMATCH; 667 (*flagsp) |= EFX_DISCARD; 668 } 669 670 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PAUSE_FRM_ERR) != 0) { 671 EFX_EV_QSTAT_INCR(eep, EV_RX_PAUSE_FRM_ERR); 672 EFSYS_PROBE(pause_frm_err); 673 (*flagsp) &= ~EFX_ADDR_MISMATCH; 674 (*flagsp) |= EFX_DISCARD; 675 } 676 677 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BUF_OWNER_ID_ERR) != 0) { 678 EFX_EV_QSTAT_INCR(eep, EV_RX_BUF_OWNER_ID_ERR); 679 EFSYS_PROBE(owner_id_err); 680 (*flagsp) |= EFX_DISCARD; 681 } 682 683 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR) != 0) { 684 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 685 EFSYS_PROBE(ipv4_err); 686 (*flagsp) &= ~EFX_CKSUM_IPV4; 687 } 688 689 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR) != 0) { 690 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 691 EFSYS_PROBE(udp_chk_err); 692 (*flagsp) &= ~EFX_CKSUM_TCPUDP; 693 } 694 695 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_IP_FRAG_ERR) != 0) { 696 EFX_EV_QSTAT_INCR(eep, EV_RX_IP_FRAG_ERR); 697 698 /* 699 * If IP is fragmented FSF_AZ_RX_EV_IP_FRAG_ERR is set. This 700 * causes FSF_AZ_RX_EV_PKT_OK to be clear. This is not an error 701 * condition. 702 */ 703 (*flagsp) &= ~(EFX_PKT_TCP | EFX_PKT_UDP | EFX_CKSUM_TCPUDP); 704 } 705 706 return (ignore); 707 } 708 709 static __checkReturn boolean_t 710 siena_ev_rx( 711 __in efx_evq_t *eep, 712 __in efx_qword_t *eqp, 713 __in const efx_ev_callbacks_t *eecp, 714 __in_opt void *arg) 715 { 716 uint32_t id; 717 uint32_t size; 718 uint32_t label; 719 boolean_t ok; 720 #if EFSYS_OPT_RX_SCATTER 721 boolean_t sop; 722 boolean_t jumbo_cont; 723 #endif /* EFSYS_OPT_RX_SCATTER */ 724 uint32_t hdr_type; 725 boolean_t is_v6; 726 uint16_t flags; 727 boolean_t ignore; 728 boolean_t should_abort; 729 730 EFX_EV_QSTAT_INCR(eep, EV_RX); 731 732 /* Basic packet information */ 733 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_DESC_PTR); 734 size = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_BYTE_CNT); 735 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_Q_LABEL); 736 ok = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_OK) != 0); 737 738 #if EFSYS_OPT_RX_SCATTER 739 sop = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_SOP) != 0); 740 jumbo_cont = (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_JUMBO_CONT) != 0); 741 #endif /* EFSYS_OPT_RX_SCATTER */ 742 743 hdr_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_HDR_TYPE); 744 745 is_v6 = (EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_IPV6_PKT) != 0); 746 747 /* 748 * If packet is marked as OK and packet type is TCP/IP or 749 * UDP/IP or other IP, then we can rely on the hardware checksums. 750 */ 751 switch (hdr_type) { 752 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_TCP: 753 flags = EFX_PKT_TCP | EFX_CKSUM_TCPUDP; 754 if (is_v6) { 755 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); 756 flags |= EFX_PKT_IPV6; 757 } else { 758 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); 759 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; 760 } 761 break; 762 763 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_UDP: 764 flags = EFX_PKT_UDP | EFX_CKSUM_TCPUDP; 765 if (is_v6) { 766 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); 767 flags |= EFX_PKT_IPV6; 768 } else { 769 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); 770 flags |= EFX_PKT_IPV4 | EFX_CKSUM_IPV4; 771 } 772 break; 773 774 case FSE_AZ_RX_EV_HDR_TYPE_IPV4V6_OTHER: 775 if (is_v6) { 776 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); 777 flags = EFX_PKT_IPV6; 778 } else { 779 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); 780 flags = EFX_PKT_IPV4 | EFX_CKSUM_IPV4; 781 } 782 break; 783 784 case FSE_AZ_RX_EV_HDR_TYPE_OTHER: 785 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); 786 flags = 0; 787 break; 788 789 default: 790 EFSYS_ASSERT(B_FALSE); 791 flags = 0; 792 break; 793 } 794 795 #if EFSYS_OPT_RX_SCATTER 796 /* Report scatter and header/lookahead split buffer flags */ 797 if (sop) 798 flags |= EFX_PKT_START; 799 if (jumbo_cont) 800 flags |= EFX_PKT_CONT; 801 #endif /* EFSYS_OPT_RX_SCATTER */ 802 803 /* Detect errors included in the FSF_AZ_RX_EV_PKT_OK indication */ 804 if (!ok) { 805 ignore = siena_ev_rx_not_ok(eep, eqp, label, id, &flags); 806 if (ignore) { 807 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, 808 uint32_t, size, uint16_t, flags); 809 810 return (B_FALSE); 811 } 812 } 813 814 /* If we're not discarding the packet then it is ok */ 815 if (~flags & EFX_DISCARD) 816 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 817 818 /* Detect multicast packets that didn't match the filter */ 819 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_PKT) != 0) { 820 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_PKT); 821 822 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_MCAST_HASH_MATCH) != 0) { 823 EFX_EV_QSTAT_INCR(eep, EV_RX_MCAST_HASH_MATCH); 824 } else { 825 EFSYS_PROBE(mcast_mismatch); 826 flags |= EFX_ADDR_MISMATCH; 827 } 828 } else { 829 flags |= EFX_PKT_UNICAST; 830 } 831 832 /* 833 * The packet parser in Siena can abort parsing packets under 834 * certain error conditions, setting the PKT_NOT_PARSED bit 835 * (which clears PKT_OK). If this is set, then don't trust 836 * the PKT_TYPE field. 837 */ 838 if (!ok) { 839 uint32_t parse_err; 840 841 parse_err = EFX_QWORD_FIELD(*eqp, FSF_CZ_RX_EV_PKT_NOT_PARSED); 842 if (parse_err != 0) 843 flags |= EFX_CHECK_VLAN; 844 } 845 846 if (~flags & EFX_CHECK_VLAN) { 847 uint32_t pkt_type; 848 849 pkt_type = EFX_QWORD_FIELD(*eqp, FSF_AZ_RX_EV_PKT_TYPE); 850 if (pkt_type >= FSE_AZ_RX_EV_PKT_TYPE_VLAN) 851 flags |= EFX_PKT_VLAN_TAGGED; 852 } 853 854 EFSYS_PROBE4(rx_complete, uint32_t, label, uint32_t, id, 855 uint32_t, size, uint16_t, flags); 856 857 EFSYS_ASSERT(eecp->eec_rx != NULL); 858 should_abort = eecp->eec_rx(arg, label, id, size, flags); 859 860 return (should_abort); 861 } 862 863 static __checkReturn boolean_t 864 siena_ev_tx( 865 __in efx_evq_t *eep, 866 __in efx_qword_t *eqp, 867 __in const efx_ev_callbacks_t *eecp, 868 __in_opt void *arg) 869 { 870 uint32_t id; 871 uint32_t label; 872 boolean_t should_abort; 873 874 EFX_EV_QSTAT_INCR(eep, EV_TX); 875 876 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0 && 877 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) == 0 && 878 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) == 0 && 879 EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) == 0) { 880 881 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_DESC_PTR); 882 label = EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_Q_LABEL); 883 884 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); 885 886 EFSYS_ASSERT(eecp->eec_tx != NULL); 887 should_abort = eecp->eec_tx(arg, label, id); 888 889 return (should_abort); 890 } 891 892 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_COMP) != 0) 893 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 894 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 895 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 896 897 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_ERR) != 0) 898 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_ERR); 899 900 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_PKT_TOO_BIG) != 0) 901 EFX_EV_QSTAT_INCR(eep, EV_TX_PKT_TOO_BIG); 902 903 if (EFX_QWORD_FIELD(*eqp, FSF_AZ_TX_EV_WQ_FF_FULL) != 0) 904 EFX_EV_QSTAT_INCR(eep, EV_TX_WQ_FF_FULL); 905 906 EFX_EV_QSTAT_INCR(eep, EV_TX_UNEXPECTED); 907 return (B_FALSE); 908 } 909 910 static __checkReturn boolean_t 911 siena_ev_global( 912 __in efx_evq_t *eep, 913 __in efx_qword_t *eqp, 914 __in const efx_ev_callbacks_t *eecp, 915 __in_opt void *arg) 916 { 917 _NOTE(ARGUNUSED(eqp, eecp, arg)) 918 919 EFX_EV_QSTAT_INCR(eep, EV_GLOBAL); 920 921 return (B_FALSE); 922 } 923 924 static __checkReturn boolean_t 925 siena_ev_driver( 926 __in efx_evq_t *eep, 927 __in efx_qword_t *eqp, 928 __in const efx_ev_callbacks_t *eecp, 929 __in_opt void *arg) 930 { 931 boolean_t should_abort; 932 933 EFX_EV_QSTAT_INCR(eep, EV_DRIVER); 934 should_abort = B_FALSE; 935 936 switch (EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBCODE)) { 937 case FSE_AZ_TX_DESCQ_FLS_DONE_EV: { 938 uint32_t txq_index; 939 940 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); 941 942 txq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); 943 944 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); 945 946 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); 947 should_abort = eecp->eec_txq_flush_done(arg, txq_index); 948 949 break; 950 } 951 case FSE_AZ_RX_DESCQ_FLS_DONE_EV: { 952 uint32_t rxq_index; 953 uint32_t failed; 954 955 rxq_index = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_DESCQ_ID); 956 failed = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL); 957 958 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); 959 EFSYS_ASSERT(eecp->eec_rxq_flush_failed != NULL); 960 961 if (failed) { 962 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_FAILED); 963 964 EFSYS_PROBE1(rx_descq_fls_failed, uint32_t, rxq_index); 965 966 should_abort = eecp->eec_rxq_flush_failed(arg, 967 rxq_index); 968 } else { 969 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); 970 971 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); 972 973 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); 974 } 975 976 break; 977 } 978 case FSE_AZ_EVQ_INIT_DONE_EV: 979 EFSYS_ASSERT(eecp->eec_initialized != NULL); 980 should_abort = eecp->eec_initialized(arg); 981 982 break; 983 984 case FSE_AZ_EVQ_NOT_EN_EV: 985 EFSYS_PROBE(evq_not_en); 986 break; 987 988 case FSE_AZ_SRM_UPD_DONE_EV: { 989 uint32_t code; 990 991 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_SRM_UPD_DONE); 992 993 code = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); 994 995 EFSYS_ASSERT(eecp->eec_sram != NULL); 996 should_abort = eecp->eec_sram(arg, code); 997 998 break; 999 } 1000 case FSE_AZ_WAKE_UP_EV: { 1001 uint32_t id; 1002 1003 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); 1004 1005 EFSYS_ASSERT(eecp->eec_wake_up != NULL); 1006 should_abort = eecp->eec_wake_up(arg, id); 1007 1008 break; 1009 } 1010 case FSE_AZ_TX_PKT_NON_TCP_UDP: 1011 EFSYS_PROBE(tx_pkt_non_tcp_udp); 1012 break; 1013 1014 case FSE_AZ_TIMER_EV: { 1015 uint32_t id; 1016 1017 id = EFX_QWORD_FIELD(*eqp, FSF_AZ_DRIVER_EV_SUBDATA); 1018 1019 EFSYS_ASSERT(eecp->eec_timer != NULL); 1020 should_abort = eecp->eec_timer(arg, id); 1021 1022 break; 1023 } 1024 case FSE_AZ_RX_DSC_ERROR_EV: 1025 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DSC_ERROR); 1026 1027 EFSYS_PROBE(rx_dsc_error); 1028 1029 EFSYS_ASSERT(eecp->eec_exception != NULL); 1030 should_abort = eecp->eec_exception(arg, 1031 EFX_EXCEPTION_RX_DSC_ERROR, 0); 1032 1033 break; 1034 1035 case FSE_AZ_TX_DSC_ERROR_EV: 1036 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DSC_ERROR); 1037 1038 EFSYS_PROBE(tx_dsc_error); 1039 1040 EFSYS_ASSERT(eecp->eec_exception != NULL); 1041 should_abort = eecp->eec_exception(arg, 1042 EFX_EXCEPTION_TX_DSC_ERROR, 0); 1043 1044 break; 1045 1046 default: 1047 break; 1048 } 1049 1050 return (should_abort); 1051 } 1052 1053 static __checkReturn boolean_t 1054 siena_ev_drv_gen( 1055 __in efx_evq_t *eep, 1056 __in efx_qword_t *eqp, 1057 __in const efx_ev_callbacks_t *eecp, 1058 __in_opt void *arg) 1059 { 1060 uint32_t data; 1061 boolean_t should_abort; 1062 1063 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); 1064 1065 data = EFX_QWORD_FIELD(*eqp, FSF_AZ_EV_DATA_DW0); 1066 if (data >= ((uint32_t)1 << 16)) { 1067 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1068 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1069 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1070 return (B_TRUE); 1071 } 1072 1073 EFSYS_ASSERT(eecp->eec_software != NULL); 1074 should_abort = eecp->eec_software(arg, (uint16_t)data); 1075 1076 return (should_abort); 1077 } 1078 1079 #if EFSYS_OPT_MCDI 1080 1081 static __checkReturn boolean_t 1082 siena_ev_mcdi( 1083 __in efx_evq_t *eep, 1084 __in efx_qword_t *eqp, 1085 __in const efx_ev_callbacks_t *eecp, 1086 __in_opt void *arg) 1087 { 1088 efx_nic_t *enp = eep->ee_enp; 1089 unsigned int code; 1090 boolean_t should_abort = B_FALSE; 1091 1092 EFSYS_ASSERT3U(enp->en_family, ==, EFX_FAMILY_SIENA); 1093 1094 if (enp->en_family != EFX_FAMILY_SIENA) 1095 goto out; 1096 1097 EFSYS_ASSERT(eecp->eec_link_change != NULL); 1098 EFSYS_ASSERT(eecp->eec_exception != NULL); 1099 #if EFSYS_OPT_MON_STATS 1100 EFSYS_ASSERT(eecp->eec_monitor != NULL); 1101 #endif 1102 1103 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); 1104 1105 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); 1106 switch (code) { 1107 case MCDI_EVENT_CODE_BADSSERT: 1108 efx_mcdi_ev_death(enp, EINTR); 1109 break; 1110 1111 case MCDI_EVENT_CODE_CMDDONE: 1112 efx_mcdi_ev_cpl(enp, 1113 MCDI_EV_FIELD(eqp, CMDDONE_SEQ), 1114 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), 1115 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); 1116 break; 1117 1118 case MCDI_EVENT_CODE_LINKCHANGE: { 1119 efx_link_mode_t link_mode; 1120 1121 siena_phy_link_ev(enp, eqp, &link_mode); 1122 should_abort = eecp->eec_link_change(arg, link_mode); 1123 break; 1124 } 1125 case MCDI_EVENT_CODE_SENSOREVT: { 1126 #if EFSYS_OPT_MON_STATS 1127 efx_mon_stat_t id; 1128 efx_mon_stat_value_t value; 1129 efx_rc_t rc; 1130 1131 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) 1132 should_abort = eecp->eec_monitor(arg, id, value); 1133 else if (rc == ENOTSUP) { 1134 should_abort = eecp->eec_exception(arg, 1135 EFX_EXCEPTION_UNKNOWN_SENSOREVT, 1136 MCDI_EV_FIELD(eqp, DATA)); 1137 } else 1138 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ 1139 #else 1140 should_abort = B_FALSE; 1141 #endif 1142 break; 1143 } 1144 case MCDI_EVENT_CODE_SCHEDERR: 1145 /* Informational only */ 1146 break; 1147 1148 case MCDI_EVENT_CODE_REBOOT: 1149 efx_mcdi_ev_death(enp, EIO); 1150 break; 1151 1152 case MCDI_EVENT_CODE_MAC_STATS_DMA: 1153 #if EFSYS_OPT_MAC_STATS 1154 if (eecp->eec_mac_stats != NULL) { 1155 eecp->eec_mac_stats(arg, 1156 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); 1157 } 1158 #endif 1159 break; 1160 1161 case MCDI_EVENT_CODE_FWALERT: { 1162 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); 1163 1164 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) 1165 should_abort = eecp->eec_exception(arg, 1166 EFX_EXCEPTION_FWALERT_SRAM, 1167 MCDI_EV_FIELD(eqp, FWALERT_DATA)); 1168 else 1169 should_abort = eecp->eec_exception(arg, 1170 EFX_EXCEPTION_UNKNOWN_FWALERT, 1171 MCDI_EV_FIELD(eqp, DATA)); 1172 break; 1173 } 1174 1175 default: 1176 EFSYS_PROBE1(mc_pcol_error, int, code); 1177 break; 1178 } 1179 1180 out: 1181 return (should_abort); 1182 } 1183 1184 #endif /* EFSYS_OPT_MCDI */ 1185 1186 static __checkReturn efx_rc_t 1187 siena_ev_qprime( 1188 __in efx_evq_t *eep, 1189 __in unsigned int count) 1190 { 1191 efx_nic_t *enp = eep->ee_enp; 1192 uint32_t rptr; 1193 efx_dword_t dword; 1194 1195 rptr = count & eep->ee_mask; 1196 1197 EFX_POPULATE_DWORD_1(dword, FRF_AZ_EVQ_RPTR, rptr); 1198 1199 EFX_BAR_TBL_WRITED(enp, FR_AZ_EVQ_RPTR_REG, eep->ee_index, 1200 &dword, B_FALSE); 1201 1202 return (0); 1203 } 1204 1205 static void 1206 siena_ev_qpost( 1207 __in efx_evq_t *eep, 1208 __in uint16_t data) 1209 { 1210 efx_nic_t *enp = eep->ee_enp; 1211 efx_qword_t ev; 1212 efx_oword_t oword; 1213 1214 EFX_POPULATE_QWORD_2(ev, FSF_AZ_EV_CODE, FSE_AZ_EV_CODE_DRV_GEN_EV, 1215 FSF_AZ_EV_DATA_DW0, (uint32_t)data); 1216 1217 EFX_POPULATE_OWORD_3(oword, FRF_AZ_DRV_EV_QID, eep->ee_index, 1218 EFX_DWORD_0, EFX_QWORD_FIELD(ev, EFX_DWORD_0), 1219 EFX_DWORD_1, EFX_QWORD_FIELD(ev, EFX_DWORD_1)); 1220 1221 EFX_BAR_WRITEO(enp, FR_AZ_DRV_EV_REG, &oword); 1222 } 1223 1224 static __checkReturn efx_rc_t 1225 siena_ev_qmoderate( 1226 __in efx_evq_t *eep, 1227 __in unsigned int us) 1228 { 1229 efx_nic_t *enp = eep->ee_enp; 1230 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1231 unsigned int locked; 1232 efx_dword_t dword; 1233 efx_rc_t rc; 1234 1235 if (us > encp->enc_evq_timer_max_us) { 1236 rc = EINVAL; 1237 goto fail1; 1238 } 1239 1240 /* If the value is zero then disable the timer */ 1241 if (us == 0) { 1242 EFX_POPULATE_DWORD_2(dword, 1243 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS, 1244 FRF_CZ_TC_TIMER_VAL, 0); 1245 } else { 1246 unsigned int ticks; 1247 1248 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 1249 goto fail2; 1250 1251 EFSYS_ASSERT(ticks > 0); 1252 EFX_POPULATE_DWORD_2(dword, 1253 FRF_CZ_TC_TIMER_MODE, FFE_CZ_TIMER_MODE_INT_HLDOFF, 1254 FRF_CZ_TC_TIMER_VAL, ticks - 1); 1255 } 1256 1257 locked = (eep->ee_index == 0) ? 1 : 0; 1258 1259 EFX_BAR_TBL_WRITED(enp, FR_BZ_TIMER_COMMAND_REGP0, 1260 eep->ee_index, &dword, locked); 1261 1262 return (0); 1263 1264 fail2: 1265 EFSYS_PROBE(fail2); 1266 fail1: 1267 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1268 1269 return (rc); 1270 } 1271 1272 static __checkReturn efx_rc_t 1273 siena_ev_qcreate( 1274 __in efx_nic_t *enp, 1275 __in unsigned int index, 1276 __in efsys_mem_t *esmp, 1277 __in size_t ndescs, 1278 __in uint32_t id, 1279 __in uint32_t us, 1280 __in uint32_t flags, 1281 __in efx_evq_t *eep) 1282 { 1283 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 1284 uint32_t size; 1285 efx_oword_t oword; 1286 efx_rc_t rc; 1287 boolean_t notify_mode; 1288 1289 _NOTE(ARGUNUSED(esmp)) 1290 1291 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0); 1292 1293 #if EFSYS_OPT_RX_SCALE 1294 if (enp->en_intr.ei_type == EFX_INTR_LINE && 1295 index >= EFX_MAXRSS_LEGACY) { 1296 rc = EINVAL; 1297 goto fail1; 1298 } 1299 #endif 1300 for (size = 0; 1301 (1U << size) <= encp->enc_evq_max_nevs / encp->enc_evq_min_nevs; 1302 size++) 1303 if ((1U << size) == (uint32_t)ndescs / encp->enc_evq_min_nevs) 1304 break; 1305 if (id + (1 << size) >= encp->enc_buftbl_limit) { 1306 rc = EINVAL; 1307 goto fail2; 1308 } 1309 1310 /* Set up the handler table */ 1311 eep->ee_rx = siena_ev_rx; 1312 eep->ee_tx = siena_ev_tx; 1313 eep->ee_driver = siena_ev_driver; 1314 eep->ee_global = siena_ev_global; 1315 eep->ee_drv_gen = siena_ev_drv_gen; 1316 #if EFSYS_OPT_MCDI 1317 eep->ee_mcdi = siena_ev_mcdi; 1318 #endif /* EFSYS_OPT_MCDI */ 1319 1320 notify_mode = ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) != 1321 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT); 1322 1323 /* Set up the new event queue */ 1324 EFX_POPULATE_OWORD_3(oword, FRF_CZ_TIMER_Q_EN, 1, 1325 FRF_CZ_HOST_NOTIFY_MODE, notify_mode, 1326 FRF_CZ_TIMER_MODE, FFE_CZ_TIMER_MODE_DIS); 1327 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, index, &oword, B_TRUE); 1328 1329 EFX_POPULATE_OWORD_3(oword, FRF_AZ_EVQ_EN, 1, FRF_AZ_EVQ_SIZE, size, 1330 FRF_AZ_EVQ_BUF_BASE_ID, id); 1331 1332 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, index, &oword, B_TRUE); 1333 1334 /* Set initial interrupt moderation */ 1335 siena_ev_qmoderate(eep, us); 1336 1337 return (0); 1338 1339 fail2: 1340 EFSYS_PROBE(fail2); 1341 #if EFSYS_OPT_RX_SCALE 1342 fail1: 1343 #endif 1344 EFSYS_PROBE1(fail1, efx_rc_t, rc); 1345 1346 return (rc); 1347 } 1348 1349 #endif /* EFSYS_OPT_SIENA */ 1350 1351 #if EFSYS_OPT_QSTATS 1352 #if EFSYS_OPT_NAMES 1353 /* START MKCONFIG GENERATED EfxEventQueueStatNamesBlock ac223f7134058b4f */ 1354 static const char * const __efx_ev_qstat_name[] = { 1355 "all", 1356 "rx", 1357 "rx_ok", 1358 "rx_frm_trunc", 1359 "rx_tobe_disc", 1360 "rx_pause_frm_err", 1361 "rx_buf_owner_id_err", 1362 "rx_ipv4_hdr_chksum_err", 1363 "rx_tcp_udp_chksum_err", 1364 "rx_eth_crc_err", 1365 "rx_ip_frag_err", 1366 "rx_mcast_pkt", 1367 "rx_mcast_hash_match", 1368 "rx_tcp_ipv4", 1369 "rx_tcp_ipv6", 1370 "rx_udp_ipv4", 1371 "rx_udp_ipv6", 1372 "rx_other_ipv4", 1373 "rx_other_ipv6", 1374 "rx_non_ip", 1375 "rx_batch", 1376 "tx", 1377 "tx_wq_ff_full", 1378 "tx_pkt_err", 1379 "tx_pkt_too_big", 1380 "tx_unexpected", 1381 "global", 1382 "global_mnt", 1383 "driver", 1384 "driver_srm_upd_done", 1385 "driver_tx_descq_fls_done", 1386 "driver_rx_descq_fls_done", 1387 "driver_rx_descq_fls_failed", 1388 "driver_rx_dsc_error", 1389 "driver_tx_dsc_error", 1390 "drv_gen", 1391 "mcdi_response", 1392 "rx_parse_incomplete", 1393 }; 1394 /* END MKCONFIG GENERATED EfxEventQueueStatNamesBlock */ 1395 1396 const char * 1397 efx_ev_qstat_name( 1398 __in efx_nic_t *enp, 1399 __in unsigned int id) 1400 { 1401 _NOTE(ARGUNUSED(enp)) 1402 1403 EFSYS_ASSERT3U(enp->en_magic, ==, EFX_NIC_MAGIC); 1404 EFSYS_ASSERT3U(id, <, EV_NQSTATS); 1405 1406 return (__efx_ev_qstat_name[id]); 1407 } 1408 #endif /* EFSYS_OPT_NAMES */ 1409 #endif /* EFSYS_OPT_QSTATS */ 1410 1411 #if EFSYS_OPT_SIENA 1412 1413 #if EFSYS_OPT_QSTATS 1414 static void 1415 siena_ev_qstats_update( 1416 __in efx_evq_t *eep, 1417 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 1418 { 1419 unsigned int id; 1420 1421 for (id = 0; id < EV_NQSTATS; id++) { 1422 efsys_stat_t *essp = &stat[id]; 1423 1424 EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 1425 eep->ee_stat[id] = 0; 1426 } 1427 } 1428 #endif /* EFSYS_OPT_QSTATS */ 1429 1430 static void 1431 siena_ev_qdestroy( 1432 __in efx_evq_t *eep) 1433 { 1434 efx_nic_t *enp = eep->ee_enp; 1435 efx_oword_t oword; 1436 1437 /* Purge event queue */ 1438 EFX_ZERO_OWORD(oword); 1439 1440 EFX_BAR_TBL_WRITEO(enp, FR_AZ_EVQ_PTR_TBL, 1441 eep->ee_index, &oword, B_TRUE); 1442 1443 EFX_ZERO_OWORD(oword); 1444 EFX_BAR_TBL_WRITEO(enp, FR_AZ_TIMER_TBL, eep->ee_index, &oword, B_TRUE); 1445 } 1446 1447 static void 1448 siena_ev_fini( 1449 __in efx_nic_t *enp) 1450 { 1451 _NOTE(ARGUNUSED(enp)) 1452 } 1453 1454 #endif /* EFSYS_OPT_SIENA */ 1455 1456 #if EFX_OPTS_EF10() || EFSYS_OPT_SIENA 1457 1458 #define EFX_EV_BATCH 8 1459 1460 static void 1461 siena_ef10_ev_qpoll( 1462 __in efx_evq_t *eep, 1463 __inout unsigned int *countp, 1464 __in const efx_ev_callbacks_t *eecp, 1465 __in_opt void *arg) 1466 { 1467 efx_qword_t ev[EFX_EV_BATCH]; 1468 unsigned int batch; 1469 unsigned int total; 1470 unsigned int count; 1471 unsigned int index; 1472 size_t offset; 1473 1474 /* Ensure events codes match for EF10 (Huntington/Medford) and Siena */ 1475 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_LBN == FSF_AZ_EV_CODE_LBN); 1476 EFX_STATIC_ASSERT(ESF_DZ_EV_CODE_WIDTH == FSF_AZ_EV_CODE_WIDTH); 1477 1478 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_RX_EV == FSE_AZ_EV_CODE_RX_EV); 1479 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_TX_EV == FSE_AZ_EV_CODE_TX_EV); 1480 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRIVER_EV == FSE_AZ_EV_CODE_DRIVER_EV); 1481 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_DRV_GEN_EV == 1482 FSE_AZ_EV_CODE_DRV_GEN_EV); 1483 #if EFSYS_OPT_MCDI 1484 EFX_STATIC_ASSERT(ESE_DZ_EV_CODE_MCDI_EV == 1485 FSE_AZ_EV_CODE_MCDI_EVRESPONSE); 1486 #endif 1487 1488 EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 1489 EFSYS_ASSERT(countp != NULL); 1490 EFSYS_ASSERT(eecp != NULL); 1491 1492 count = *countp; 1493 do { 1494 /* Read up until the end of the batch period */ 1495 batch = EFX_EV_BATCH - (count & (EFX_EV_BATCH - 1)); 1496 offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 1497 for (total = 0; total < batch; ++total) { 1498 EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); 1499 1500 if (!EFX_EV_PRESENT(ev[total])) 1501 break; 1502 1503 EFSYS_PROBE3(event, unsigned int, eep->ee_index, 1504 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), 1505 uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); 1506 1507 offset += sizeof (efx_qword_t); 1508 } 1509 1510 #if EFSYS_OPT_EV_PREFETCH && (EFSYS_OPT_EV_PREFETCH_PERIOD > 1) 1511 /* 1512 * Prefetch the next batch when we get within PREFETCH_PERIOD 1513 * of a completed batch. If the batch is smaller, then prefetch 1514 * immediately. 1515 */ 1516 if (total == batch && total < EFSYS_OPT_EV_PREFETCH_PERIOD) 1517 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); 1518 #endif /* EFSYS_OPT_EV_PREFETCH */ 1519 1520 /* Process the batch of events */ 1521 for (index = 0; index < total; ++index) { 1522 boolean_t should_abort; 1523 uint32_t code; 1524 1525 #if EFSYS_OPT_EV_PREFETCH 1526 /* Prefetch if we've now reached the batch period */ 1527 if (total == batch && 1528 index + EFSYS_OPT_EV_PREFETCH_PERIOD == total) { 1529 offset = (count + batch) & eep->ee_mask; 1530 offset *= sizeof (efx_qword_t); 1531 1532 EFSYS_MEM_PREFETCH(eep->ee_esmp, offset); 1533 } 1534 #endif /* EFSYS_OPT_EV_PREFETCH */ 1535 1536 EFX_EV_QSTAT_INCR(eep, EV_ALL); 1537 1538 code = EFX_QWORD_FIELD(ev[index], FSF_AZ_EV_CODE); 1539 switch (code) { 1540 case FSE_AZ_EV_CODE_RX_EV: 1541 should_abort = eep->ee_rx(eep, 1542 &(ev[index]), eecp, arg); 1543 break; 1544 case FSE_AZ_EV_CODE_TX_EV: 1545 should_abort = eep->ee_tx(eep, 1546 &(ev[index]), eecp, arg); 1547 break; 1548 case FSE_AZ_EV_CODE_DRIVER_EV: 1549 should_abort = eep->ee_driver(eep, 1550 &(ev[index]), eecp, arg); 1551 break; 1552 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1553 should_abort = eep->ee_drv_gen(eep, 1554 &(ev[index]), eecp, arg); 1555 break; 1556 #if EFSYS_OPT_MCDI 1557 case FSE_AZ_EV_CODE_MCDI_EVRESPONSE: 1558 should_abort = eep->ee_mcdi(eep, 1559 &(ev[index]), eecp, arg); 1560 break; 1561 #endif 1562 case FSE_AZ_EV_CODE_GLOBAL_EV: 1563 if (eep->ee_global) { 1564 should_abort = eep->ee_global(eep, 1565 &(ev[index]), eecp, arg); 1566 break; 1567 } 1568 /* else fallthrough */ 1569 default: 1570 EFSYS_PROBE3(bad_event, 1571 unsigned int, eep->ee_index, 1572 uint32_t, 1573 EFX_QWORD_FIELD(ev[index], EFX_DWORD_1), 1574 uint32_t, 1575 EFX_QWORD_FIELD(ev[index], EFX_DWORD_0)); 1576 1577 EFSYS_ASSERT(eecp->eec_exception != NULL); 1578 (void) eecp->eec_exception(arg, 1579 EFX_EXCEPTION_EV_ERROR, code); 1580 should_abort = B_TRUE; 1581 } 1582 if (should_abort) { 1583 /* Ignore subsequent events */ 1584 total = index + 1; 1585 1586 /* 1587 * Poison batch to ensure the outer 1588 * loop is broken out of. 1589 */ 1590 EFSYS_ASSERT(batch <= EFX_EV_BATCH); 1591 batch += (EFX_EV_BATCH << 1); 1592 EFSYS_ASSERT(total != batch); 1593 break; 1594 } 1595 } 1596 1597 /* 1598 * Now that the hardware has most likely moved onto dma'ing 1599 * into the next cache line, clear the processed events. Take 1600 * care to only clear out events that we've processed 1601 */ 1602 EFX_SET_QWORD(ev[0]); 1603 offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 1604 for (index = 0; index < total; ++index) { 1605 EFSYS_MEM_WRITEQ(eep->ee_esmp, offset, &(ev[0])); 1606 offset += sizeof (efx_qword_t); 1607 } 1608 1609 count += total; 1610 1611 } while (total == batch); 1612 1613 *countp = count; 1614 } 1615 1616 #endif /* EFX_OPTS_EF10() || EFSYS_OPT_SIENA */ 1617