1b97bf1caSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause 2b97bf1caSAndrew Rybchenko * 3b97bf1caSAndrew Rybchenko * Copyright(c) 2019-2020 Xilinx, Inc. 4b97bf1caSAndrew Rybchenko * Copyright(c) 2018-2019 Solarflare Communications Inc. 5b97bf1caSAndrew Rybchenko */ 6b97bf1caSAndrew Rybchenko 7b97bf1caSAndrew Rybchenko #include "efx.h" 8b97bf1caSAndrew Rybchenko #include "efx_impl.h" 9b97bf1caSAndrew Rybchenko 10b97bf1caSAndrew Rybchenko #if EFSYS_OPT_RIVERHEAD 11b97bf1caSAndrew Rybchenko 12b97bf1caSAndrew Rybchenko /* 13b97bf1caSAndrew Rybchenko * Non-interrupting event queue requires interrupting event queue to 14b97bf1caSAndrew Rybchenko * refer to for wake-up events even if wake ups are never used. 15b97bf1caSAndrew Rybchenko * It could be even non-allocated event queue. 16b97bf1caSAndrew Rybchenko */ 17b97bf1caSAndrew Rybchenko #define EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX (0) 18b97bf1caSAndrew Rybchenko 1917580779SAndrew Rybchenko static __checkReturn boolean_t 2017580779SAndrew Rybchenko rhead_ev_rx_packets( 2117580779SAndrew Rybchenko __in efx_evq_t *eep, 2217580779SAndrew Rybchenko __in efx_qword_t *eqp, 2317580779SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp, 2417580779SAndrew Rybchenko __in_opt void *arg); 2517580779SAndrew Rybchenko 2696a8519dSAndrew Rybchenko static __checkReturn boolean_t 2796a8519dSAndrew Rybchenko rhead_ev_tx_completion( 2896a8519dSAndrew Rybchenko __in efx_evq_t *eep, 2996a8519dSAndrew Rybchenko __in efx_qword_t *eqp, 3096a8519dSAndrew Rybchenko __in const efx_ev_callbacks_t *eecp, 3196a8519dSAndrew Rybchenko __in_opt void *arg); 3296a8519dSAndrew Rybchenko 33b97bf1caSAndrew Rybchenko 349edb8ee3SAndrew Rybchenko static __checkReturn boolean_t 359edb8ee3SAndrew Rybchenko rhead_ev_mcdi( 369edb8ee3SAndrew Rybchenko __in efx_evq_t *eep, 379edb8ee3SAndrew Rybchenko __in efx_qword_t *eqp, 389edb8ee3SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp, 399edb8ee3SAndrew Rybchenko __in_opt void *arg); 409edb8ee3SAndrew Rybchenko 419edb8ee3SAndrew Rybchenko 42b97bf1caSAndrew Rybchenko __checkReturn efx_rc_t 43b97bf1caSAndrew Rybchenko rhead_ev_init( 44b97bf1caSAndrew Rybchenko __in efx_nic_t *enp) 45b97bf1caSAndrew Rybchenko { 46b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(enp)) 47b97bf1caSAndrew Rybchenko 48b97bf1caSAndrew Rybchenko return (0); 49b97bf1caSAndrew Rybchenko } 50b97bf1caSAndrew Rybchenko 51b97bf1caSAndrew Rybchenko void 52b97bf1caSAndrew Rybchenko rhead_ev_fini( 53b97bf1caSAndrew Rybchenko __in efx_nic_t *enp) 54b97bf1caSAndrew Rybchenko { 55b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(enp)) 56b97bf1caSAndrew Rybchenko } 57b97bf1caSAndrew Rybchenko 58b97bf1caSAndrew Rybchenko __checkReturn efx_rc_t 59b97bf1caSAndrew Rybchenko rhead_ev_qcreate( 60b97bf1caSAndrew Rybchenko __in efx_nic_t *enp, 61b97bf1caSAndrew Rybchenko __in unsigned int index, 62b97bf1caSAndrew Rybchenko __in efsys_mem_t *esmp, 63b97bf1caSAndrew Rybchenko __in size_t ndescs, 64b97bf1caSAndrew Rybchenko __in uint32_t id, 65b97bf1caSAndrew Rybchenko __in uint32_t us, 66b97bf1caSAndrew Rybchenko __in uint32_t flags, 67b97bf1caSAndrew Rybchenko __in efx_evq_t *eep) 68b97bf1caSAndrew Rybchenko { 69*f8a60f76SAndy Moreton const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp); 70*f8a60f76SAndy Moreton size_t desc_size; 71b97bf1caSAndrew Rybchenko uint32_t irq; 72b97bf1caSAndrew Rybchenko efx_rc_t rc; 73b97bf1caSAndrew Rybchenko 74b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ 75b97bf1caSAndrew Rybchenko 76*f8a60f76SAndy Moreton desc_size = encp->enc_ev_desc_size; 77*f8a60f76SAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH 78*f8a60f76SAndy Moreton if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) 79*f8a60f76SAndy Moreton desc_size = encp->enc_ev_ew_desc_size; 80*f8a60f76SAndy Moreton #endif 81*f8a60f76SAndy Moreton EFSYS_ASSERT(desc_size != 0); 82*f8a60f76SAndy Moreton 83*f8a60f76SAndy Moreton if (EFSYS_MEM_SIZE(esmp) < (ndescs * desc_size)) { 84*f8a60f76SAndy Moreton /* Buffer too small for event queue descriptors */ 85*f8a60f76SAndy Moreton rc = EINVAL; 86*f8a60f76SAndy Moreton goto fail1; 87*f8a60f76SAndy Moreton } 88*f8a60f76SAndy Moreton 89b97bf1caSAndrew Rybchenko /* Set up the handler table */ 9017580779SAndrew Rybchenko eep->ee_rx = rhead_ev_rx_packets; 9196a8519dSAndrew Rybchenko eep->ee_tx = rhead_ev_tx_completion; 92b97bf1caSAndrew Rybchenko eep->ee_driver = NULL; /* FIXME */ 93b97bf1caSAndrew Rybchenko eep->ee_drv_gen = NULL; /* FIXME */ 949edb8ee3SAndrew Rybchenko eep->ee_mcdi = rhead_ev_mcdi; 95b97bf1caSAndrew Rybchenko 96b97bf1caSAndrew Rybchenko /* Set up the event queue */ 97b97bf1caSAndrew Rybchenko /* INIT_EVQ expects function-relative vector number */ 98b97bf1caSAndrew Rybchenko if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 99b97bf1caSAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) { 100b97bf1caSAndrew Rybchenko irq = index; 101b97bf1caSAndrew Rybchenko } else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) { 102b97bf1caSAndrew Rybchenko irq = index; 103b97bf1caSAndrew Rybchenko flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) | 104b97bf1caSAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 105b97bf1caSAndrew Rybchenko } else { 106b97bf1caSAndrew Rybchenko irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX; 107b97bf1caSAndrew Rybchenko } 108b97bf1caSAndrew Rybchenko 109b97bf1caSAndrew Rybchenko /* 110b97bf1caSAndrew Rybchenko * Interrupts may be raised for events immediately after the queue is 111b97bf1caSAndrew Rybchenko * created. See bug58606. 112b97bf1caSAndrew Rybchenko */ 113b97bf1caSAndrew Rybchenko rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags, 114b97bf1caSAndrew Rybchenko B_FALSE); 115b97bf1caSAndrew Rybchenko if (rc != 0) 116*f8a60f76SAndy Moreton goto fail2; 117b97bf1caSAndrew Rybchenko 118b97bf1caSAndrew Rybchenko return (0); 119b97bf1caSAndrew Rybchenko 120*f8a60f76SAndy Moreton fail2: 121*f8a60f76SAndy Moreton EFSYS_PROBE(fail2); 122b97bf1caSAndrew Rybchenko fail1: 123b97bf1caSAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc); 124b97bf1caSAndrew Rybchenko 125b97bf1caSAndrew Rybchenko return (rc); 126b97bf1caSAndrew Rybchenko } 127b97bf1caSAndrew Rybchenko 128b97bf1caSAndrew Rybchenko void 129b97bf1caSAndrew Rybchenko rhead_ev_qdestroy( 130b97bf1caSAndrew Rybchenko __in efx_evq_t *eep) 131b97bf1caSAndrew Rybchenko { 132b97bf1caSAndrew Rybchenko efx_nic_t *enp = eep->ee_enp; 133b97bf1caSAndrew Rybchenko 134b97bf1caSAndrew Rybchenko EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD); 135b97bf1caSAndrew Rybchenko 136b97bf1caSAndrew Rybchenko (void) efx_mcdi_fini_evq(enp, eep->ee_index); 137b97bf1caSAndrew Rybchenko } 138b97bf1caSAndrew Rybchenko 139b97bf1caSAndrew Rybchenko __checkReturn efx_rc_t 140b97bf1caSAndrew Rybchenko rhead_ev_qprime( 141b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 142b97bf1caSAndrew Rybchenko __in unsigned int count) 143b97bf1caSAndrew Rybchenko { 144b97bf1caSAndrew Rybchenko efx_nic_t *enp = eep->ee_enp; 145b97bf1caSAndrew Rybchenko uint32_t rptr; 146b97bf1caSAndrew Rybchenko efx_dword_t dword; 147b97bf1caSAndrew Rybchenko 148b97bf1caSAndrew Rybchenko rptr = count & eep->ee_mask; 149b97bf1caSAndrew Rybchenko 150b97bf1caSAndrew Rybchenko EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index, 151b97bf1caSAndrew Rybchenko ERF_GZ_IDX, rptr); 152b97bf1caSAndrew Rybchenko /* EVQ_INT_PRIME lives function control window only on Riverhead */ 153341bd4e0SIgor Romanov EFX_BAR_FCW_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword); 154b97bf1caSAndrew Rybchenko 155b97bf1caSAndrew Rybchenko return (0); 156b97bf1caSAndrew Rybchenko } 157b97bf1caSAndrew Rybchenko 158b97bf1caSAndrew Rybchenko void 159b97bf1caSAndrew Rybchenko rhead_ev_qpost( 160b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 161b97bf1caSAndrew Rybchenko __in uint16_t data) 162b97bf1caSAndrew Rybchenko { 163b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(eep, data)) 164b97bf1caSAndrew Rybchenko 165b97bf1caSAndrew Rybchenko /* Not implemented yet */ 166b97bf1caSAndrew Rybchenko EFSYS_ASSERT(B_FALSE); 167b97bf1caSAndrew Rybchenko } 168b97bf1caSAndrew Rybchenko 169b97bf1caSAndrew Rybchenko /* 170b97bf1caSAndrew Rybchenko * Poll event queue in batches. Size of the batch is equal to cache line 171b97bf1caSAndrew Rybchenko * size divided by event size. 172b97bf1caSAndrew Rybchenko * 173b97bf1caSAndrew Rybchenko * Event queue is written by NIC and read by CPU. If CPU starts reading 174b97bf1caSAndrew Rybchenko * of events on the cache line, read all remaining events in a tight 175b97bf1caSAndrew Rybchenko * loop while event is present. 176b97bf1caSAndrew Rybchenko */ 177b97bf1caSAndrew Rybchenko #define EF100_EV_BATCH 8 178b97bf1caSAndrew Rybchenko 179b97bf1caSAndrew Rybchenko /* 180b97bf1caSAndrew Rybchenko * Check if event is present. 181b97bf1caSAndrew Rybchenko * 182b97bf1caSAndrew Rybchenko * Riverhead EvQs use a phase bit to indicate the presence of valid events, 183b97bf1caSAndrew Rybchenko * by flipping the phase bit on each wrap of the write index. 184b97bf1caSAndrew Rybchenko */ 185b97bf1caSAndrew Rybchenko #define EF100_EV_PRESENT(_qword, _phase_bit) \ 186b97bf1caSAndrew Rybchenko (EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit) 187b97bf1caSAndrew Rybchenko 188b97bf1caSAndrew Rybchenko void 189b97bf1caSAndrew Rybchenko rhead_ev_qpoll( 190b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 191b97bf1caSAndrew Rybchenko __inout unsigned int *countp, 192b97bf1caSAndrew Rybchenko __in const efx_ev_callbacks_t *eecp, 193b97bf1caSAndrew Rybchenko __in_opt void *arg) 194b97bf1caSAndrew Rybchenko { 195b97bf1caSAndrew Rybchenko efx_qword_t ev[EF100_EV_BATCH]; 196b97bf1caSAndrew Rybchenko unsigned int batch; 197b97bf1caSAndrew Rybchenko unsigned int phase_bit; 198b97bf1caSAndrew Rybchenko unsigned int total; 199b97bf1caSAndrew Rybchenko unsigned int count; 200b97bf1caSAndrew Rybchenko unsigned int index; 201b97bf1caSAndrew Rybchenko size_t offset; 202b97bf1caSAndrew Rybchenko 203b97bf1caSAndrew Rybchenko EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 204b97bf1caSAndrew Rybchenko EFSYS_ASSERT(countp != NULL); 205b97bf1caSAndrew Rybchenko EFSYS_ASSERT(eecp != NULL); 206b97bf1caSAndrew Rybchenko 207b97bf1caSAndrew Rybchenko count = *countp; 208b97bf1caSAndrew Rybchenko do { 209b97bf1caSAndrew Rybchenko /* Read up until the end of the batch period */ 210b97bf1caSAndrew Rybchenko batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1)); 211b97bf1caSAndrew Rybchenko phase_bit = (count & (eep->ee_mask + 1)) != 0; 212b97bf1caSAndrew Rybchenko offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 213b97bf1caSAndrew Rybchenko for (total = 0; total < batch; ++total) { 214b97bf1caSAndrew Rybchenko EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); 215b97bf1caSAndrew Rybchenko 216b97bf1caSAndrew Rybchenko if (!EF100_EV_PRESENT(ev[total], phase_bit)) 217b97bf1caSAndrew Rybchenko break; 218b97bf1caSAndrew Rybchenko 219b97bf1caSAndrew Rybchenko EFSYS_PROBE3(event, unsigned int, eep->ee_index, 220b97bf1caSAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), 221b97bf1caSAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); 222b97bf1caSAndrew Rybchenko 223b97bf1caSAndrew Rybchenko offset += sizeof (efx_qword_t); 224b97bf1caSAndrew Rybchenko } 225b97bf1caSAndrew Rybchenko 226b97bf1caSAndrew Rybchenko /* Process the batch of events */ 227b97bf1caSAndrew Rybchenko for (index = 0; index < total; ++index) { 228b97bf1caSAndrew Rybchenko boolean_t should_abort; 229b97bf1caSAndrew Rybchenko uint32_t code; 230b97bf1caSAndrew Rybchenko 231b97bf1caSAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_ALL); 232b97bf1caSAndrew Rybchenko 233b97bf1caSAndrew Rybchenko code = EFX_QWORD_FIELD(ev[index], ESF_GZ_E_TYPE); 234b97bf1caSAndrew Rybchenko switch (code) { 23517580779SAndrew Rybchenko case ESE_GZ_EF100_EV_RX_PKTS: 23617580779SAndrew Rybchenko should_abort = eep->ee_rx(eep, 23717580779SAndrew Rybchenko &(ev[index]), eecp, arg); 23817580779SAndrew Rybchenko break; 23996a8519dSAndrew Rybchenko case ESE_GZ_EF100_EV_TX_COMPLETION: 24096a8519dSAndrew Rybchenko should_abort = eep->ee_tx(eep, 24196a8519dSAndrew Rybchenko &(ev[index]), eecp, arg); 24296a8519dSAndrew Rybchenko break; 2439edb8ee3SAndrew Rybchenko case ESE_GZ_EF100_EV_MCDI: 2449edb8ee3SAndrew Rybchenko should_abort = eep->ee_mcdi(eep, 2459edb8ee3SAndrew Rybchenko &(ev[index]), eecp, arg); 2469edb8ee3SAndrew Rybchenko break; 247b97bf1caSAndrew Rybchenko default: 248b97bf1caSAndrew Rybchenko EFSYS_PROBE3(bad_event, 249b97bf1caSAndrew Rybchenko unsigned int, eep->ee_index, 250b97bf1caSAndrew Rybchenko uint32_t, 251b97bf1caSAndrew Rybchenko EFX_QWORD_FIELD(ev[index], EFX_DWORD_1), 252b97bf1caSAndrew Rybchenko uint32_t, 253b97bf1caSAndrew Rybchenko EFX_QWORD_FIELD(ev[index], EFX_DWORD_0)); 254b97bf1caSAndrew Rybchenko 255b97bf1caSAndrew Rybchenko EFSYS_ASSERT(eecp->eec_exception != NULL); 256b97bf1caSAndrew Rybchenko (void) eecp->eec_exception(arg, 257b97bf1caSAndrew Rybchenko EFX_EXCEPTION_EV_ERROR, code); 258b97bf1caSAndrew Rybchenko should_abort = B_TRUE; 259b97bf1caSAndrew Rybchenko } 260b97bf1caSAndrew Rybchenko if (should_abort) { 261b97bf1caSAndrew Rybchenko /* Ignore subsequent events */ 262b97bf1caSAndrew Rybchenko total = index + 1; 263b97bf1caSAndrew Rybchenko 264b97bf1caSAndrew Rybchenko /* 265b97bf1caSAndrew Rybchenko * Poison batch to ensure the outer 266b97bf1caSAndrew Rybchenko * loop is broken out of. 267b97bf1caSAndrew Rybchenko */ 268b97bf1caSAndrew Rybchenko EFSYS_ASSERT(batch <= EF100_EV_BATCH); 269b97bf1caSAndrew Rybchenko batch += (EF100_EV_BATCH << 1); 270b97bf1caSAndrew Rybchenko EFSYS_ASSERT(total != batch); 271b97bf1caSAndrew Rybchenko break; 272b97bf1caSAndrew Rybchenko } 273b97bf1caSAndrew Rybchenko } 274b97bf1caSAndrew Rybchenko 275b97bf1caSAndrew Rybchenko /* 276b97bf1caSAndrew Rybchenko * There is no necessity to clear processed events since 277b97bf1caSAndrew Rybchenko * phase bit which is flipping on each write index wrap 278b97bf1caSAndrew Rybchenko * is used for event presence indication. 279b97bf1caSAndrew Rybchenko */ 280b97bf1caSAndrew Rybchenko 281b97bf1caSAndrew Rybchenko count += total; 282b97bf1caSAndrew Rybchenko 283b97bf1caSAndrew Rybchenko } while (total == batch); 284b97bf1caSAndrew Rybchenko 285b97bf1caSAndrew Rybchenko *countp = count; 286b97bf1caSAndrew Rybchenko } 287b97bf1caSAndrew Rybchenko 288b97bf1caSAndrew Rybchenko __checkReturn efx_rc_t 289b97bf1caSAndrew Rybchenko rhead_ev_qmoderate( 290b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 291b97bf1caSAndrew Rybchenko __in unsigned int us) 292b97bf1caSAndrew Rybchenko { 293b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(eep, us)) 294b97bf1caSAndrew Rybchenko 295b97bf1caSAndrew Rybchenko return (ENOTSUP); 296b97bf1caSAndrew Rybchenko } 297b97bf1caSAndrew Rybchenko 298b97bf1caSAndrew Rybchenko 299b97bf1caSAndrew Rybchenko #if EFSYS_OPT_QSTATS 300b97bf1caSAndrew Rybchenko void 301b97bf1caSAndrew Rybchenko rhead_ev_qstats_update( 302b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 303b97bf1caSAndrew Rybchenko __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 304b97bf1caSAndrew Rybchenko { 305b97bf1caSAndrew Rybchenko unsigned int id; 306b97bf1caSAndrew Rybchenko 307b97bf1caSAndrew Rybchenko for (id = 0; id < EV_NQSTATS; id++) { 308b97bf1caSAndrew Rybchenko efsys_stat_t *essp = &stat[id]; 309b97bf1caSAndrew Rybchenko 310b97bf1caSAndrew Rybchenko EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 311b97bf1caSAndrew Rybchenko eep->ee_stat[id] = 0; 312b97bf1caSAndrew Rybchenko } 313b97bf1caSAndrew Rybchenko } 314b97bf1caSAndrew Rybchenko #endif /* EFSYS_OPT_QSTATS */ 315b97bf1caSAndrew Rybchenko 3169edb8ee3SAndrew Rybchenko static __checkReturn boolean_t 31717580779SAndrew Rybchenko rhead_ev_rx_packets( 31817580779SAndrew Rybchenko __in efx_evq_t *eep, 31917580779SAndrew Rybchenko __in efx_qword_t *eqp, 32017580779SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp, 32117580779SAndrew Rybchenko __in_opt void *arg) 32217580779SAndrew Rybchenko { 32317580779SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp; 32417580779SAndrew Rybchenko uint32_t label; 32517580779SAndrew Rybchenko uint32_t num_packets; 32617580779SAndrew Rybchenko boolean_t should_abort; 32717580779SAndrew Rybchenko 32817580779SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX); 32917580779SAndrew Rybchenko 33017580779SAndrew Rybchenko /* Discard events after RXQ/TXQ errors, or hardware not available */ 33117580779SAndrew Rybchenko if (enp->en_reset_flags & 33217580779SAndrew Rybchenko (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) 33317580779SAndrew Rybchenko return (B_FALSE); 33417580779SAndrew Rybchenko 33517580779SAndrew Rybchenko label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_Q_LABEL); 33617580779SAndrew Rybchenko 33717580779SAndrew Rybchenko /* 33817580779SAndrew Rybchenko * On EF100 the EV_RX event reports the number of received 33917580779SAndrew Rybchenko * packets (unlike EF10 which reports a descriptor index). 34017580779SAndrew Rybchenko * The client driver is responsible for maintaining the Rx 34117580779SAndrew Rybchenko * descriptor index, and computing how many descriptors are 34217580779SAndrew Rybchenko * occupied by each received packet (based on the Rx buffer size 34317580779SAndrew Rybchenko * and the packet length from the Rx prefix). 34417580779SAndrew Rybchenko */ 34517580779SAndrew Rybchenko num_packets = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_NUM_PKT); 34617580779SAndrew Rybchenko 34717580779SAndrew Rybchenko /* 34817580779SAndrew Rybchenko * The receive event may indicate more than one packet, and so 34917580779SAndrew Rybchenko * does not contain the packet length. Read the packet length 35017580779SAndrew Rybchenko * from the prefix when handling each packet. 35117580779SAndrew Rybchenko */ 35217580779SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_rx_packets != NULL); 35317580779SAndrew Rybchenko should_abort = eecp->eec_rx_packets(arg, label, num_packets, 35417580779SAndrew Rybchenko EFX_PKT_PREFIX_LEN); 35517580779SAndrew Rybchenko 35617580779SAndrew Rybchenko return (should_abort); 35717580779SAndrew Rybchenko } 35817580779SAndrew Rybchenko 35917580779SAndrew Rybchenko static __checkReturn boolean_t 36096a8519dSAndrew Rybchenko rhead_ev_tx_completion( 36196a8519dSAndrew Rybchenko __in efx_evq_t *eep, 36296a8519dSAndrew Rybchenko __in efx_qword_t *eqp, 36396a8519dSAndrew Rybchenko __in const efx_ev_callbacks_t *eecp, 36496a8519dSAndrew Rybchenko __in_opt void *arg) 36596a8519dSAndrew Rybchenko { 36696a8519dSAndrew Rybchenko efx_nic_t *enp = eep->ee_enp; 36796a8519dSAndrew Rybchenko uint32_t num_descs; 36896a8519dSAndrew Rybchenko uint32_t label; 36996a8519dSAndrew Rybchenko boolean_t should_abort; 37096a8519dSAndrew Rybchenko 37196a8519dSAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_TX); 37296a8519dSAndrew Rybchenko 37396a8519dSAndrew Rybchenko /* Discard events after RXQ/TXQ errors, or hardware not available */ 37496a8519dSAndrew Rybchenko if (enp->en_reset_flags & 37596a8519dSAndrew Rybchenko (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL)) 37696a8519dSAndrew Rybchenko return (B_FALSE); 37796a8519dSAndrew Rybchenko 37896a8519dSAndrew Rybchenko label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_Q_LABEL); 37996a8519dSAndrew Rybchenko 38096a8519dSAndrew Rybchenko /* 38196a8519dSAndrew Rybchenko * On EF100 the EV_TX event reports the number of completed Tx 38296a8519dSAndrew Rybchenko * descriptors (on EF10, the event reports the low bits of the 38396a8519dSAndrew Rybchenko * index of the last completed descriptor). 38496a8519dSAndrew Rybchenko * The client driver completion callback will compute the 38596a8519dSAndrew Rybchenko * descriptor index, so that is not needed here. 38696a8519dSAndrew Rybchenko */ 38796a8519dSAndrew Rybchenko num_descs = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_NUM_DESC); 38896a8519dSAndrew Rybchenko 38996a8519dSAndrew Rybchenko EFSYS_PROBE2(tx_ndescs, uint32_t, label, unsigned int, num_descs); 39096a8519dSAndrew Rybchenko 39196a8519dSAndrew Rybchenko EFSYS_ASSERT(eecp->eec_tx_ndescs != NULL); 39296a8519dSAndrew Rybchenko should_abort = eecp->eec_tx_ndescs(arg, label, num_descs); 39396a8519dSAndrew Rybchenko 39496a8519dSAndrew Rybchenko return (should_abort); 39596a8519dSAndrew Rybchenko } 39696a8519dSAndrew Rybchenko 39796a8519dSAndrew Rybchenko static __checkReturn boolean_t 3989edb8ee3SAndrew Rybchenko rhead_ev_mcdi( 3999edb8ee3SAndrew Rybchenko __in efx_evq_t *eep, 4009edb8ee3SAndrew Rybchenko __in efx_qword_t *eqp, 4019edb8ee3SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp, 4029edb8ee3SAndrew Rybchenko __in_opt void *arg) 4039edb8ee3SAndrew Rybchenko { 4049edb8ee3SAndrew Rybchenko boolean_t ret; 4059edb8ee3SAndrew Rybchenko 4069edb8ee3SAndrew Rybchenko /* 4079edb8ee3SAndrew Rybchenko * Event format was changed post Riverhead R1 and now 4089edb8ee3SAndrew Rybchenko * MCDI event layout on EF100 is exactly the same as on EF10 4099edb8ee3SAndrew Rybchenko * except added QDMA phase bit which is unused on EF10. 4109edb8ee3SAndrew Rybchenko */ 4119edb8ee3SAndrew Rybchenko ret = ef10_ev_mcdi(eep, eqp, eecp, arg); 4129edb8ee3SAndrew Rybchenko 4139edb8ee3SAndrew Rybchenko return (ret); 4149edb8ee3SAndrew Rybchenko } 4159edb8ee3SAndrew Rybchenko 416b97bf1caSAndrew Rybchenko #endif /* EFSYS_OPT_RIVERHEAD */ 417