1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright(c) 2019-2021 Xilinx, Inc. 4 * Copyright(c) 2012-2019 Solarflare Communications Inc. 5 */ 6 7 #include "efx.h" 8 #include "efx_impl.h" 9 #if EFSYS_OPT_MON_STATS 10 #include "mcdi_mon.h" 11 #endif 12 13 #if EFX_OPTS_EF10() 14 15 /* 16 * Non-interrupting event queue requires interrrupting event queue to 17 * refer to for wake-up events even if wake ups are never used. 18 * It could be even non-allocated event queue. 19 */ 20 #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0) 21 22 static __checkReturn boolean_t 23 ef10_ev_rx( 24 __in efx_evq_t *eep, 25 __in efx_qword_t *eqp, 26 __in const efx_ev_callbacks_t *eecp, 27 __in_opt void *arg); 28 29 static __checkReturn boolean_t 30 ef10_ev_tx( 31 __in efx_evq_t *eep, 32 __in efx_qword_t *eqp, 33 __in const efx_ev_callbacks_t *eecp, 34 __in_opt void *arg); 35 36 static __checkReturn boolean_t 37 ef10_ev_driver( 38 __in efx_evq_t *eep, 39 __in efx_qword_t *eqp, 40 __in const efx_ev_callbacks_t *eecp, 41 __in_opt void *arg); 42 43 static __checkReturn boolean_t 44 ef10_ev_drv_gen( 45 __in efx_evq_t *eep, 46 __in efx_qword_t *eqp, 47 __in const efx_ev_callbacks_t *eecp, 48 __in_opt void *arg); 49 50 51 static __checkReturn efx_rc_t 52 efx_mcdi_set_evq_tmr( 53 __in efx_nic_t *enp, 54 __in uint32_t instance, 55 __in uint32_t mode, 56 __in uint32_t timer_ns) 57 { 58 efx_mcdi_req_t req; 59 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN, 60 MC_CMD_SET_EVQ_TMR_OUT_LEN); 61 efx_rc_t rc; 62 63 req.emr_cmd = MC_CMD_SET_EVQ_TMR; 64 req.emr_in_buf = payload; 65 req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN; 66 req.emr_out_buf = payload; 67 req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN; 68 69 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance); 70 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns); 71 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns); 72 MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode); 73 74 efx_mcdi_execute(enp, &req); 75 76 if (req.emr_rc != 0) { 77 rc = req.emr_rc; 78 goto fail1; 79 } 80 81 if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) { 82 rc = EMSGSIZE; 83 goto fail2; 84 } 85 86 return (0); 87 88 fail2: 89 EFSYS_PROBE(fail2); 90 fail1: 91 EFSYS_PROBE1(fail1, efx_rc_t, rc); 92 93 return (rc); 94 } 95 96 97 __checkReturn efx_rc_t 98 ef10_ev_init( 99 __in efx_nic_t *enp) 100 { 101 _NOTE(ARGUNUSED(enp)) 102 return (0); 103 } 104 105 void 106 ef10_ev_fini( 107 __in efx_nic_t *enp) 108 { 109 _NOTE(ARGUNUSED(enp)) 110 } 111 112 __checkReturn efx_rc_t 113 ef10_ev_qcreate( 114 __in efx_nic_t *enp, 115 __in unsigned int index, 116 __in efsys_mem_t *esmp, 117 __in size_t ndescs, 118 __in uint32_t id, 119 __in uint32_t us, 120 __in uint32_t flags, 121 __in uint32_t irq, 122 __in efx_evq_t *eep) 123 { 124 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 125 uint32_t target_evq = 0; 126 efx_rc_t rc; 127 boolean_t low_latency; 128 129 _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ 130 131 EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0); 132 133 /* 134 * NO_CONT_EV mode is only requested from the firmware when creating 135 * receive queues, but here it needs to be specified at event queue 136 * creation, as the event handler needs to know which format is in use. 137 * 138 * If EFX_EVQ_FLAGS_NO_CONT_EV is specified, all receive queues for this 139 * event queue will be created in NO_CONT_EV mode. 140 * 141 * See SF-109306-TC 5.11 "Events for RXQs in NO_CONT_EV mode". 142 */ 143 if (flags & EFX_EVQ_FLAGS_NO_CONT_EV) { 144 if (enp->en_nic_cfg.enc_no_cont_ev_mode_supported == B_FALSE) { 145 rc = EINVAL; 146 goto fail1; 147 } 148 } 149 150 /* Set up the handler table */ 151 eep->ee_rx = ef10_ev_rx; 152 eep->ee_tx = ef10_ev_tx; 153 eep->ee_driver = ef10_ev_driver; 154 eep->ee_drv_gen = ef10_ev_drv_gen; 155 eep->ee_mcdi = ef10_ev_mcdi; 156 157 /* Set up the event queue */ 158 /* INIT_EVQ expects function-relative vector number */ 159 if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 160 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) { 161 /* IRQ number is specified by caller */ 162 } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) { 163 /* Use the first interrupt for always interrupting EvQ */ 164 irq = 0; 165 flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) | 166 EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 167 } else { 168 target_evq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX; 169 } 170 171 /* 172 * Interrupts may be raised for events immediately after the queue is 173 * created. See bug58606. 174 */ 175 176 /* 177 * On Huntington we need to specify the settings to use. 178 * If event queue type in flags is auto, we favour throughput 179 * if the adapter is running virtualization supporting firmware 180 * (i.e. the full featured firmware variant) 181 * and latency otherwise. The Ethernet Virtual Bridging 182 * capability is used to make this decision. (Note though that 183 * the low latency firmware variant is also best for 184 * throughput and corresponding type should be specified 185 * to choose it.) 186 * 187 * If FW supports EvQ types (e.g. on Medford and Medford2) the 188 * type which is specified in flags is passed to FW to make the 189 * decision and low_latency hint is ignored. 190 */ 191 low_latency = encp->enc_datapath_cap_evb ? 0 : 1; 192 rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, target_evq, us, 193 flags, low_latency); 194 if (rc != 0) 195 goto fail2; 196 197 return (0); 198 199 fail2: 200 EFSYS_PROBE(fail2); 201 fail1: 202 EFSYS_PROBE1(fail1, efx_rc_t, rc); 203 204 return (rc); 205 } 206 207 void 208 ef10_ev_qdestroy( 209 __in efx_evq_t *eep) 210 { 211 efx_nic_t *enp = eep->ee_enp; 212 213 EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp)); 214 215 (void) efx_mcdi_fini_evq(enp, eep->ee_index); 216 } 217 218 __checkReturn efx_rc_t 219 ef10_ev_qprime( 220 __in efx_evq_t *eep, 221 __in unsigned int count) 222 { 223 efx_nic_t *enp = eep->ee_enp; 224 uint32_t rptr; 225 efx_dword_t dword; 226 227 rptr = count & eep->ee_mask; 228 229 if (enp->en_nic_cfg.enc_bug35388_workaround) { 230 EFX_STATIC_ASSERT(EF10_EVQ_MINNEVS > 231 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH)); 232 EFX_STATIC_ASSERT(EF10_EVQ_MAXNEVS < 233 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH)); 234 235 EFX_POPULATE_DWORD_2(dword, 236 ERF_DD_EVQ_IND_RPTR_FLAGS, 237 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH, 238 ERF_DD_EVQ_IND_RPTR, 239 (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH)); 240 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 241 &dword, B_FALSE); 242 243 EFX_POPULATE_DWORD_2(dword, 244 ERF_DD_EVQ_IND_RPTR_FLAGS, 245 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW, 246 ERF_DD_EVQ_IND_RPTR, 247 rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1)); 248 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index, 249 &dword, B_FALSE); 250 } else { 251 EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr); 252 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index, 253 &dword, B_FALSE); 254 } 255 256 return (0); 257 } 258 259 static __checkReturn efx_rc_t 260 efx_mcdi_driver_event( 261 __in efx_nic_t *enp, 262 __in uint32_t evq, 263 __in efx_qword_t data) 264 { 265 efx_mcdi_req_t req; 266 EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN, 267 MC_CMD_DRIVER_EVENT_OUT_LEN); 268 efx_rc_t rc; 269 270 req.emr_cmd = MC_CMD_DRIVER_EVENT; 271 req.emr_in_buf = payload; 272 req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN; 273 req.emr_out_buf = payload; 274 req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN; 275 276 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq); 277 278 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO, 279 EFX_QWORD_FIELD(data, EFX_DWORD_0)); 280 MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI, 281 EFX_QWORD_FIELD(data, EFX_DWORD_1)); 282 283 efx_mcdi_execute(enp, &req); 284 285 if (req.emr_rc != 0) { 286 rc = req.emr_rc; 287 goto fail1; 288 } 289 290 return (0); 291 292 fail1: 293 EFSYS_PROBE1(fail1, efx_rc_t, rc); 294 295 return (rc); 296 } 297 298 void 299 ef10_ev_qpost( 300 __in efx_evq_t *eep, 301 __in uint16_t data) 302 { 303 efx_nic_t *enp = eep->ee_enp; 304 efx_qword_t event; 305 306 EFX_POPULATE_QWORD_3(event, 307 ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV, 308 ESF_DZ_DRV_SUB_CODE, 0, 309 ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data); 310 311 (void) efx_mcdi_driver_event(enp, eep->ee_index, event); 312 } 313 314 __checkReturn efx_rc_t 315 ef10_ev_qmoderate( 316 __in efx_evq_t *eep, 317 __in unsigned int us) 318 { 319 efx_nic_t *enp = eep->ee_enp; 320 efx_nic_cfg_t *encp = &(enp->en_nic_cfg); 321 efx_dword_t dword; 322 uint32_t mode; 323 efx_rc_t rc; 324 325 /* Check that hardware and MCDI use the same timer MODE values */ 326 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS == 327 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS); 328 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START == 329 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START); 330 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START == 331 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START); 332 EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF == 333 MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF); 334 335 if (us > encp->enc_evq_timer_max_us) { 336 rc = EINVAL; 337 goto fail1; 338 } 339 340 /* If the value is zero then disable the timer */ 341 if (us == 0) { 342 mode = FFE_CZ_TIMER_MODE_DIS; 343 } else { 344 mode = FFE_CZ_TIMER_MODE_INT_HLDOFF; 345 } 346 347 if (encp->enc_bug61265_workaround) { 348 uint32_t ns = us * 1000; 349 350 rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns); 351 if (rc != 0) 352 goto fail2; 353 } else { 354 unsigned int ticks; 355 356 if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0) 357 goto fail3; 358 359 if (encp->enc_bug35388_workaround) { 360 EFX_POPULATE_DWORD_3(dword, 361 ERF_DD_EVQ_IND_TIMER_FLAGS, 362 EFE_DD_EVQ_IND_TIMER_FLAGS, 363 ERF_DD_EVQ_IND_TIMER_MODE, mode, 364 ERF_DD_EVQ_IND_TIMER_VAL, ticks); 365 EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, 366 eep->ee_index, &dword, 0); 367 } else { 368 /* 369 * NOTE: The TMR_REL field introduced in Medford2 is 370 * ignored on earlier EF10 controllers. See bug66418 371 * comment 9 for details. 372 */ 373 EFX_POPULATE_DWORD_3(dword, 374 ERF_DZ_TC_TIMER_MODE, mode, 375 ERF_DZ_TC_TIMER_VAL, ticks, 376 ERF_FZ_TC_TMR_REL_VAL, ticks); 377 EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG, 378 eep->ee_index, &dword, 0); 379 } 380 } 381 382 return (0); 383 384 fail3: 385 EFSYS_PROBE(fail3); 386 fail2: 387 EFSYS_PROBE(fail2); 388 fail1: 389 EFSYS_PROBE1(fail1, efx_rc_t, rc); 390 391 return (rc); 392 } 393 394 395 #if EFSYS_OPT_QSTATS 396 void 397 ef10_ev_qstats_update( 398 __in efx_evq_t *eep, 399 __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 400 { 401 unsigned int id; 402 403 for (id = 0; id < EV_NQSTATS; id++) { 404 efsys_stat_t *essp = &stat[id]; 405 406 EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 407 eep->ee_stat[id] = 0; 408 } 409 } 410 #endif /* EFSYS_OPT_QSTATS */ 411 412 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 413 414 static __checkReturn boolean_t 415 ef10_ev_rx_packed_stream( 416 __in efx_evq_t *eep, 417 __in efx_qword_t *eqp, 418 __in const efx_ev_callbacks_t *eecp, 419 __in_opt void *arg) 420 { 421 uint32_t label; 422 uint32_t pkt_count_lbits; 423 uint16_t flags; 424 boolean_t should_abort; 425 efx_evq_rxq_state_t *eersp; 426 unsigned int pkt_count; 427 unsigned int current_id; 428 boolean_t new_buffer; 429 430 pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); 431 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); 432 new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE); 433 434 flags = 0; 435 436 eersp = &eep->ee_rxq_state[label]; 437 438 /* 439 * RX_DSC_PTR_LBITS has least significant bits of the global 440 * (not per-buffer) packet counter. It is guaranteed that 441 * maximum number of completed packets fits in lbits-mask. 442 * So, modulo lbits-mask arithmetic should be used to calculate 443 * packet counter increment. 444 */ 445 pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) & 446 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); 447 eersp->eers_rx_stream_npackets += pkt_count; 448 449 if (new_buffer) { 450 flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER; 451 #if EFSYS_OPT_RX_PACKED_STREAM 452 /* 453 * If both packed stream and equal stride super-buffer 454 * modes are compiled in, in theory credits should be 455 * be maintained for packed stream only, but right now 456 * these modes are not distinguished in the event queue 457 * Rx queue state and it is OK to increment the counter 458 * regardless (it might be event cheaper than branching 459 * since neighbour structure member are updated as well). 460 */ 461 eersp->eers_rx_packed_stream_credits++; 462 #endif 463 eersp->eers_rx_read_ptr++; 464 } 465 current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask; 466 467 /* Check for errors that invalidate checksum and L3/L4 fields */ 468 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { 469 /* RX frame truncated */ 470 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 471 flags |= EFX_DISCARD; 472 goto deliver; 473 } 474 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { 475 /* Bad Ethernet frame CRC */ 476 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 477 flags |= EFX_DISCARD; 478 goto deliver; 479 } 480 481 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { 482 EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); 483 flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE; 484 goto deliver; 485 } 486 487 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) 488 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 489 490 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) 491 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 492 493 deliver: 494 /* If we're not discarding the packet then it is ok */ 495 if (~flags & EFX_DISCARD) 496 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 497 498 EFSYS_ASSERT(eecp->eec_rx_ps != NULL); 499 should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count, 500 flags); 501 502 return (should_abort); 503 } 504 505 #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */ 506 507 static __checkReturn boolean_t 508 ef10_ev_rx( 509 __in efx_evq_t *eep, 510 __in efx_qword_t *eqp, 511 __in const efx_ev_callbacks_t *eecp, 512 __in_opt void *arg) 513 { 514 efx_nic_t *enp = eep->ee_enp; 515 uint32_t size; 516 uint32_t label; 517 uint32_t mac_class; 518 uint32_t eth_tag_class; 519 uint32_t l3_class; 520 uint32_t l4_class; 521 uint32_t next_read_lbits; 522 uint16_t flags; 523 boolean_t cont; 524 boolean_t should_abort; 525 efx_evq_rxq_state_t *eersp; 526 unsigned int desc_count; 527 unsigned int last_used_id; 528 529 EFX_EV_QSTAT_INCR(eep, EV_RX); 530 531 /* Discard events after RXQ/TXQ errors, or hardware not available */ 532 if (enp->en_reset_flags & 533 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) 534 return (B_FALSE); 535 536 /* Basic packet information */ 537 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL); 538 eersp = &eep->ee_rxq_state[label]; 539 540 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 541 /* 542 * Packed stream events are very different, 543 * so handle them separately 544 */ 545 if (eersp->eers_rx_packed_stream) 546 return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg)); 547 #endif 548 549 size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES); 550 cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT); 551 next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS); 552 eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS); 553 mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS); 554 l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS); 555 556 /* 557 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only 558 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field 559 * and values for all EF10 controllers. 560 */ 561 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN); 562 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); 563 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); 564 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN); 565 566 l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS); 567 568 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) { 569 /* Drop this event */ 570 return (B_FALSE); 571 } 572 flags = 0; 573 574 if (cont != 0) { 575 /* 576 * This may be part of a scattered frame, or it may be a 577 * truncated frame if scatter is disabled on this RXQ. 578 * Overlength frames can be received if e.g. a VF is configured 579 * for 1500 MTU but connected to a port set to 9000 MTU 580 * (see bug56567). 581 * FIXME: There is not yet any driver that supports scatter on 582 * Huntington. Scatter support is required for OSX. 583 */ 584 flags |= EFX_PKT_CONT; 585 } 586 587 if (mac_class == ESE_DZ_MAC_CLASS_UCAST) 588 flags |= EFX_PKT_UNICAST; 589 590 /* 591 * Increment the count of descriptors read. 592 * 593 * In NO_CONT_EV mode, RX_DSC_PTR_LBITS is actually a packet count, but 594 * when scatter is disabled, there is only one descriptor per packet and 595 * so it can be treated the same. 596 * 597 * TODO: Support scatter in NO_CONT_EV mode. 598 */ 599 desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) & 600 EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS); 601 eersp->eers_rx_read_ptr += desc_count; 602 603 /* Calculate the index of the last descriptor consumed */ 604 last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask; 605 606 if (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV) { 607 if (desc_count > 1) 608 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); 609 610 /* Always read the length from the prefix in NO_CONT_EV mode. */ 611 flags |= EFX_PKT_PREFIX_LEN; 612 613 /* 614 * Check for an aborted scatter, signalled by the ABORT bit in 615 * NO_CONT_EV mode. The ABORT bit was not used before NO_CONT_EV 616 * mode was added as it was broken in Huntington silicon. 617 */ 618 if (EFX_QWORD_FIELD(*eqp, ESF_EZ_RX_ABORT) != 0) { 619 flags |= EFX_DISCARD; 620 goto deliver; 621 } 622 } else if (desc_count > 1) { 623 /* 624 * FIXME: add error checking to make sure this a batched event. 625 * This could also be an aborted scatter, see Bug36629. 626 */ 627 EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH); 628 flags |= EFX_PKT_PREFIX_LEN; 629 } 630 631 /* Check for errors that invalidate checksum and L3/L4 fields */ 632 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) { 633 /* RX frame truncated */ 634 EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC); 635 flags |= EFX_DISCARD; 636 goto deliver; 637 } 638 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) { 639 /* Bad Ethernet frame CRC */ 640 EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR); 641 flags |= EFX_DISCARD; 642 goto deliver; 643 } 644 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) { 645 /* 646 * Hardware parse failed, due to malformed headers 647 * or headers that are too long for the parser. 648 * Headers and checksums must be validated by the host. 649 */ 650 EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE); 651 goto deliver; 652 } 653 654 if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) || 655 (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) { 656 flags |= EFX_PKT_VLAN_TAGGED; 657 } 658 659 switch (l3_class) { 660 case ESE_DZ_L3_CLASS_IP4: 661 case ESE_DZ_L3_CLASS_IP4_FRAG: 662 flags |= EFX_PKT_IPV4; 663 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) { 664 EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR); 665 } else { 666 flags |= EFX_CKSUM_IPV4; 667 } 668 669 /* 670 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is 671 * only 2 bits wide on Medford2. Check it is safe to use the 672 * Medford2 field and values for all EF10 controllers. 673 */ 674 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == 675 ESF_DE_RX_L4_CLASS_LBN); 676 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); 677 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); 678 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == 679 ESE_DE_L4_CLASS_UNKNOWN); 680 681 if (l4_class == ESE_FZ_L4_CLASS_TCP) { 682 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4); 683 flags |= EFX_PKT_TCP; 684 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { 685 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4); 686 flags |= EFX_PKT_UDP; 687 } else { 688 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4); 689 } 690 break; 691 692 case ESE_DZ_L3_CLASS_IP6: 693 case ESE_DZ_L3_CLASS_IP6_FRAG: 694 flags |= EFX_PKT_IPV6; 695 696 /* 697 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is 698 * only 2 bits wide on Medford2. Check it is safe to use the 699 * Medford2 field and values for all EF10 controllers. 700 */ 701 EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == 702 ESF_DE_RX_L4_CLASS_LBN); 703 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP); 704 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP); 705 EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == 706 ESE_DE_L4_CLASS_UNKNOWN); 707 708 if (l4_class == ESE_FZ_L4_CLASS_TCP) { 709 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6); 710 flags |= EFX_PKT_TCP; 711 } else if (l4_class == ESE_FZ_L4_CLASS_UDP) { 712 EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6); 713 flags |= EFX_PKT_UDP; 714 } else { 715 EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6); 716 } 717 break; 718 719 default: 720 EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP); 721 break; 722 } 723 724 if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) { 725 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) { 726 EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR); 727 } else { 728 flags |= EFX_CKSUM_TCPUDP; 729 } 730 } 731 732 deliver: 733 /* If we're not discarding the packet then it is ok */ 734 if (~flags & EFX_DISCARD) 735 EFX_EV_QSTAT_INCR(eep, EV_RX_OK); 736 737 EFSYS_ASSERT(eecp->eec_rx != NULL); 738 should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags); 739 740 return (should_abort); 741 } 742 743 static __checkReturn boolean_t 744 ef10_ev_tx( 745 __in efx_evq_t *eep, 746 __in efx_qword_t *eqp, 747 __in const efx_ev_callbacks_t *eecp, 748 __in_opt void *arg) 749 { 750 efx_nic_t *enp = eep->ee_enp; 751 uint32_t id; 752 uint32_t label; 753 boolean_t should_abort; 754 755 EFX_EV_QSTAT_INCR(eep, EV_TX); 756 757 /* Discard events after RXQ/TXQ errors, or hardware not available */ 758 if (enp->en_reset_flags & 759 (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) 760 return (B_FALSE); 761 762 if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) { 763 /* Drop this event */ 764 return (B_FALSE); 765 } 766 767 /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */ 768 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX); 769 label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL); 770 771 EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id); 772 773 EFSYS_ASSERT(eecp->eec_tx != NULL); 774 should_abort = eecp->eec_tx(arg, label, id); 775 776 return (should_abort); 777 } 778 779 static __checkReturn boolean_t 780 ef10_ev_driver( 781 __in efx_evq_t *eep, 782 __in efx_qword_t *eqp, 783 __in const efx_ev_callbacks_t *eecp, 784 __in_opt void *arg) 785 { 786 unsigned int code; 787 boolean_t should_abort; 788 789 EFX_EV_QSTAT_INCR(eep, EV_DRIVER); 790 should_abort = B_FALSE; 791 792 code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE); 793 switch (code) { 794 case ESE_DZ_DRV_TIMER_EV: { 795 uint32_t id; 796 797 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID); 798 799 EFSYS_ASSERT(eecp->eec_timer != NULL); 800 should_abort = eecp->eec_timer(arg, id); 801 break; 802 } 803 804 case ESE_DZ_DRV_WAKE_UP_EV: { 805 uint32_t id; 806 807 id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID); 808 809 EFSYS_ASSERT(eecp->eec_wake_up != NULL); 810 should_abort = eecp->eec_wake_up(arg, id); 811 break; 812 } 813 814 case ESE_DZ_DRV_START_UP_EV: 815 EFSYS_ASSERT(eecp->eec_initialized != NULL); 816 should_abort = eecp->eec_initialized(arg); 817 break; 818 819 default: 820 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 821 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 822 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 823 break; 824 } 825 826 return (should_abort); 827 } 828 829 static __checkReturn boolean_t 830 ef10_ev_drv_gen( 831 __in efx_evq_t *eep, 832 __in efx_qword_t *eqp, 833 __in const efx_ev_callbacks_t *eecp, 834 __in_opt void *arg) 835 { 836 uint32_t data; 837 boolean_t should_abort; 838 839 EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN); 840 should_abort = B_FALSE; 841 842 data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0); 843 if (data >= ((uint32_t)1 << 16)) { 844 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 845 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 846 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 847 848 return (B_TRUE); 849 } 850 851 EFSYS_ASSERT(eecp->eec_software != NULL); 852 should_abort = eecp->eec_software(arg, (uint16_t)data); 853 854 return (should_abort); 855 } 856 857 #endif /* EFX_OPTS_EF10() */ 858 859 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() 860 861 __checkReturn boolean_t 862 ef10_ev_mcdi( 863 __in efx_evq_t *eep, 864 __in efx_qword_t *eqp, 865 __in const efx_ev_callbacks_t *eecp, 866 __in_opt void *arg) 867 { 868 efx_nic_t *enp = eep->ee_enp; 869 unsigned int code; 870 boolean_t should_abort = B_FALSE; 871 872 EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE); 873 874 code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE); 875 switch (code) { 876 case MCDI_EVENT_CODE_BADSSERT: 877 efx_mcdi_ev_death(enp, EINTR); 878 break; 879 880 case MCDI_EVENT_CODE_CMDDONE: 881 efx_mcdi_ev_cpl(enp, 882 MCDI_EV_FIELD(eqp, CMDDONE_SEQ), 883 MCDI_EV_FIELD(eqp, CMDDONE_DATALEN), 884 MCDI_EV_FIELD(eqp, CMDDONE_ERRNO)); 885 break; 886 887 #if EFSYS_OPT_MCDI_PROXY_AUTH 888 case MCDI_EVENT_CODE_PROXY_RESPONSE: 889 /* 890 * This event notifies a function that an authorization request 891 * has been processed. If the request was authorized then the 892 * function can now re-send the original MCDI request. 893 * See SF-113652-SW "SR-IOV Proxied Network Access Control". 894 */ 895 efx_mcdi_ev_proxy_response(enp, 896 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE), 897 MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC)); 898 break; 899 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */ 900 901 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER 902 case MCDI_EVENT_CODE_PROXY_REQUEST: 903 efx_mcdi_ev_proxy_request(enp, 904 MCDI_EV_FIELD(eqp, PROXY_REQUEST_BUFF_INDEX)); 905 break; 906 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */ 907 908 case MCDI_EVENT_CODE_LINKCHANGE: { 909 efx_link_mode_t link_mode; 910 911 ef10_phy_link_ev(enp, eqp, &link_mode); 912 should_abort = eecp->eec_link_change(arg, link_mode); 913 break; 914 } 915 916 case MCDI_EVENT_CODE_SENSOREVT: { 917 #if EFSYS_OPT_MON_STATS 918 efx_mon_stat_t id; 919 efx_mon_stat_value_t value; 920 efx_rc_t rc; 921 922 /* Decode monitor stat for MCDI sensor (if supported) */ 923 if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) { 924 /* Report monitor stat change */ 925 should_abort = eecp->eec_monitor(arg, id, value); 926 } else if (rc == ENOTSUP) { 927 should_abort = eecp->eec_exception(arg, 928 EFX_EXCEPTION_UNKNOWN_SENSOREVT, 929 MCDI_EV_FIELD(eqp, DATA)); 930 } else { 931 EFSYS_ASSERT(rc == ENODEV); /* Wrong port */ 932 } 933 #endif 934 break; 935 } 936 937 case MCDI_EVENT_CODE_SCHEDERR: 938 /* Informational only */ 939 break; 940 941 case MCDI_EVENT_CODE_REBOOT: 942 /* Falcon/Siena only (should not been seen with Huntington). */ 943 efx_mcdi_ev_death(enp, EIO); 944 break; 945 946 case MCDI_EVENT_CODE_MC_REBOOT: 947 /* MC_REBOOT event is used for Huntington (EF10) and later. */ 948 efx_mcdi_ev_death(enp, EIO); 949 break; 950 951 case MCDI_EVENT_CODE_MAC_STATS_DMA: 952 #if EFSYS_OPT_MAC_STATS 953 if (eecp->eec_mac_stats != NULL) { 954 eecp->eec_mac_stats(arg, 955 MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION)); 956 } 957 #endif 958 break; 959 960 case MCDI_EVENT_CODE_FWALERT: { 961 uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON); 962 963 if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS) 964 should_abort = eecp->eec_exception(arg, 965 EFX_EXCEPTION_FWALERT_SRAM, 966 MCDI_EV_FIELD(eqp, FWALERT_DATA)); 967 else 968 should_abort = eecp->eec_exception(arg, 969 EFX_EXCEPTION_UNKNOWN_FWALERT, 970 MCDI_EV_FIELD(eqp, DATA)); 971 break; 972 } 973 974 case MCDI_EVENT_CODE_TX_ERR: { 975 /* 976 * After a TXQ error is detected, firmware sends a TX_ERR event. 977 * This may be followed by TX completions (which we discard), 978 * and then finally by a TX_FLUSH event. Firmware destroys the 979 * TXQ automatically after sending the TX_FLUSH event. 980 */ 981 enp->en_reset_flags |= EFX_RESET_TXQ_ERR; 982 983 EFSYS_PROBE2(tx_descq_err, 984 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 985 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 986 987 /* Inform the driver that a reset is required. */ 988 eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR, 989 MCDI_EV_FIELD(eqp, TX_ERR_DATA)); 990 break; 991 } 992 993 case MCDI_EVENT_CODE_TX_FLUSH: { 994 uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ); 995 996 /* 997 * EF10 firmware sends two TX_FLUSH events: one to the txq's 998 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set). 999 * We want to wait for all completions, so ignore the events 1000 * with TX_FLUSH_TO_DRIVER. 1001 */ 1002 if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) { 1003 should_abort = B_FALSE; 1004 break; 1005 } 1006 1007 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE); 1008 1009 EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index); 1010 1011 EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL); 1012 should_abort = eecp->eec_txq_flush_done(arg, txq_index); 1013 break; 1014 } 1015 1016 case MCDI_EVENT_CODE_RX_ERR: { 1017 /* 1018 * After an RXQ error is detected, firmware sends an RX_ERR 1019 * event. This may be followed by RX events (which we discard), 1020 * and then finally by an RX_FLUSH event. Firmware destroys the 1021 * RXQ automatically after sending the RX_FLUSH event. 1022 */ 1023 enp->en_reset_flags |= EFX_RESET_RXQ_ERR; 1024 1025 EFSYS_PROBE2(rx_descq_err, 1026 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1027 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1028 1029 /* Inform the driver that a reset is required. */ 1030 eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR, 1031 MCDI_EV_FIELD(eqp, RX_ERR_DATA)); 1032 break; 1033 } 1034 1035 case MCDI_EVENT_CODE_RX_FLUSH: { 1036 uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ); 1037 1038 /* 1039 * EF10 firmware sends two RX_FLUSH events: one to the rxq's 1040 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set). 1041 * We want to wait for all completions, so ignore the events 1042 * with RX_FLUSH_TO_DRIVER. 1043 */ 1044 if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) { 1045 should_abort = B_FALSE; 1046 break; 1047 } 1048 1049 EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE); 1050 1051 EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index); 1052 1053 EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL); 1054 should_abort = eecp->eec_rxq_flush_done(arg, rxq_index); 1055 break; 1056 } 1057 1058 default: 1059 EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index, 1060 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1), 1061 uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0)); 1062 break; 1063 } 1064 1065 return (should_abort); 1066 } 1067 1068 #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */ 1069 1070 #if EFX_OPTS_EF10() 1071 1072 void 1073 ef10_ev_rxlabel_init( 1074 __in efx_evq_t *eep, 1075 __in efx_rxq_t *erp, 1076 __in unsigned int label, 1077 __in efx_rxq_type_t type) 1078 { 1079 efx_evq_rxq_state_t *eersp; 1080 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 1081 boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM); 1082 boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER); 1083 #endif 1084 1085 _NOTE(ARGUNUSED(type)) 1086 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 1087 eersp = &eep->ee_rxq_state[label]; 1088 1089 EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0); 1090 1091 #if EFSYS_OPT_RX_PACKED_STREAM 1092 /* 1093 * For packed stream modes, the very first event will 1094 * have a new buffer flag set, so it will be incremented, 1095 * yielding the correct pointer. That results in a simpler 1096 * code than trying to detect start-of-the-world condition 1097 * in the event handler. 1098 */ 1099 eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0; 1100 #else 1101 eersp->eers_rx_read_ptr = 0; 1102 #endif 1103 eersp->eers_rx_mask = erp->er_mask; 1104 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 1105 eersp->eers_rx_stream_npackets = 0; 1106 eersp->eers_rx_packed_stream = packed_stream || es_super_buffer; 1107 #endif 1108 #if EFSYS_OPT_RX_PACKED_STREAM 1109 if (packed_stream) { 1110 eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) / 1111 EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT, 1112 EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE); 1113 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0); 1114 /* 1115 * A single credit is allocated to the queue when it is started. 1116 * It is immediately spent by the first packet which has NEW 1117 * BUFFER flag set, though, but still we shall take into 1118 * account, as to not wrap around the maximum number of credits 1119 * accidentally 1120 */ 1121 eersp->eers_rx_packed_stream_credits--; 1122 EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=, 1123 EFX_RX_PACKED_STREAM_MAX_CREDITS); 1124 } 1125 #endif 1126 } 1127 1128 void 1129 ef10_ev_rxlabel_fini( 1130 __in efx_evq_t *eep, 1131 __in unsigned int label) 1132 { 1133 efx_evq_rxq_state_t *eersp; 1134 1135 EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state)); 1136 eersp = &eep->ee_rxq_state[label]; 1137 1138 EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0); 1139 1140 eersp->eers_rx_read_ptr = 0; 1141 eersp->eers_rx_mask = 0; 1142 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER 1143 eersp->eers_rx_stream_npackets = 0; 1144 eersp->eers_rx_packed_stream = B_FALSE; 1145 #endif 1146 #if EFSYS_OPT_RX_PACKED_STREAM 1147 eersp->eers_rx_packed_stream_credits = 0; 1148 #endif 1149 } 1150 1151 #endif /* EFX_OPTS_EF10() */ 1152