1*b97bf1caSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause 2*b97bf1caSAndrew Rybchenko * 3*b97bf1caSAndrew Rybchenko * Copyright(c) 2019-2020 Xilinx, Inc. 4*b97bf1caSAndrew Rybchenko * Copyright(c) 2018-2019 Solarflare Communications Inc. 5*b97bf1caSAndrew Rybchenko */ 6*b97bf1caSAndrew Rybchenko 7*b97bf1caSAndrew Rybchenko #include "efx.h" 8*b97bf1caSAndrew Rybchenko #include "efx_impl.h" 9*b97bf1caSAndrew Rybchenko 10*b97bf1caSAndrew Rybchenko #if EFSYS_OPT_RIVERHEAD 11*b97bf1caSAndrew Rybchenko 12*b97bf1caSAndrew Rybchenko /* 13*b97bf1caSAndrew Rybchenko * Non-interrupting event queue requires interrupting event queue to 14*b97bf1caSAndrew Rybchenko * refer to for wake-up events even if wake ups are never used. 15*b97bf1caSAndrew Rybchenko * It could be even non-allocated event queue. 16*b97bf1caSAndrew Rybchenko */ 17*b97bf1caSAndrew Rybchenko #define EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX (0) 18*b97bf1caSAndrew Rybchenko 19*b97bf1caSAndrew Rybchenko 20*b97bf1caSAndrew Rybchenko __checkReturn efx_rc_t 21*b97bf1caSAndrew Rybchenko rhead_ev_init( 22*b97bf1caSAndrew Rybchenko __in efx_nic_t *enp) 23*b97bf1caSAndrew Rybchenko { 24*b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(enp)) 25*b97bf1caSAndrew Rybchenko 26*b97bf1caSAndrew Rybchenko return (0); 27*b97bf1caSAndrew Rybchenko } 28*b97bf1caSAndrew Rybchenko 29*b97bf1caSAndrew Rybchenko void 30*b97bf1caSAndrew Rybchenko rhead_ev_fini( 31*b97bf1caSAndrew Rybchenko __in efx_nic_t *enp) 32*b97bf1caSAndrew Rybchenko { 33*b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(enp)) 34*b97bf1caSAndrew Rybchenko } 35*b97bf1caSAndrew Rybchenko 36*b97bf1caSAndrew Rybchenko __checkReturn efx_rc_t 37*b97bf1caSAndrew Rybchenko rhead_ev_qcreate( 38*b97bf1caSAndrew Rybchenko __in efx_nic_t *enp, 39*b97bf1caSAndrew Rybchenko __in unsigned int index, 40*b97bf1caSAndrew Rybchenko __in efsys_mem_t *esmp, 41*b97bf1caSAndrew Rybchenko __in size_t ndescs, 42*b97bf1caSAndrew Rybchenko __in uint32_t id, 43*b97bf1caSAndrew Rybchenko __in uint32_t us, 44*b97bf1caSAndrew Rybchenko __in uint32_t flags, 45*b97bf1caSAndrew Rybchenko __in efx_evq_t *eep) 46*b97bf1caSAndrew Rybchenko { 47*b97bf1caSAndrew Rybchenko uint32_t irq; 48*b97bf1caSAndrew Rybchenko efx_rc_t rc; 49*b97bf1caSAndrew Rybchenko 50*b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */ 51*b97bf1caSAndrew Rybchenko 52*b97bf1caSAndrew Rybchenko /* Set up the handler table */ 53*b97bf1caSAndrew Rybchenko eep->ee_rx = NULL; /* FIXME */ 54*b97bf1caSAndrew Rybchenko eep->ee_tx = NULL; /* FIXME */ 55*b97bf1caSAndrew Rybchenko eep->ee_driver = NULL; /* FIXME */ 56*b97bf1caSAndrew Rybchenko eep->ee_drv_gen = NULL; /* FIXME */ 57*b97bf1caSAndrew Rybchenko eep->ee_mcdi = NULL; /* FIXME */ 58*b97bf1caSAndrew Rybchenko 59*b97bf1caSAndrew Rybchenko /* Set up the event queue */ 60*b97bf1caSAndrew Rybchenko /* INIT_EVQ expects function-relative vector number */ 61*b97bf1caSAndrew Rybchenko if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) == 62*b97bf1caSAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) { 63*b97bf1caSAndrew Rybchenko irq = index; 64*b97bf1caSAndrew Rybchenko } else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) { 65*b97bf1caSAndrew Rybchenko irq = index; 66*b97bf1caSAndrew Rybchenko flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) | 67*b97bf1caSAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT; 68*b97bf1caSAndrew Rybchenko } else { 69*b97bf1caSAndrew Rybchenko irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX; 70*b97bf1caSAndrew Rybchenko } 71*b97bf1caSAndrew Rybchenko 72*b97bf1caSAndrew Rybchenko /* 73*b97bf1caSAndrew Rybchenko * Interrupts may be raised for events immediately after the queue is 74*b97bf1caSAndrew Rybchenko * created. See bug58606. 75*b97bf1caSAndrew Rybchenko */ 76*b97bf1caSAndrew Rybchenko rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags, 77*b97bf1caSAndrew Rybchenko B_FALSE); 78*b97bf1caSAndrew Rybchenko if (rc != 0) 79*b97bf1caSAndrew Rybchenko goto fail1; 80*b97bf1caSAndrew Rybchenko 81*b97bf1caSAndrew Rybchenko return (0); 82*b97bf1caSAndrew Rybchenko 83*b97bf1caSAndrew Rybchenko fail1: 84*b97bf1caSAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc); 85*b97bf1caSAndrew Rybchenko 86*b97bf1caSAndrew Rybchenko return (rc); 87*b97bf1caSAndrew Rybchenko } 88*b97bf1caSAndrew Rybchenko 89*b97bf1caSAndrew Rybchenko void 90*b97bf1caSAndrew Rybchenko rhead_ev_qdestroy( 91*b97bf1caSAndrew Rybchenko __in efx_evq_t *eep) 92*b97bf1caSAndrew Rybchenko { 93*b97bf1caSAndrew Rybchenko efx_nic_t *enp = eep->ee_enp; 94*b97bf1caSAndrew Rybchenko 95*b97bf1caSAndrew Rybchenko EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD); 96*b97bf1caSAndrew Rybchenko 97*b97bf1caSAndrew Rybchenko (void) efx_mcdi_fini_evq(enp, eep->ee_index); 98*b97bf1caSAndrew Rybchenko } 99*b97bf1caSAndrew Rybchenko 100*b97bf1caSAndrew Rybchenko __checkReturn efx_rc_t 101*b97bf1caSAndrew Rybchenko rhead_ev_qprime( 102*b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 103*b97bf1caSAndrew Rybchenko __in unsigned int count) 104*b97bf1caSAndrew Rybchenko { 105*b97bf1caSAndrew Rybchenko efx_nic_t *enp = eep->ee_enp; 106*b97bf1caSAndrew Rybchenko uint32_t rptr; 107*b97bf1caSAndrew Rybchenko efx_dword_t dword; 108*b97bf1caSAndrew Rybchenko 109*b97bf1caSAndrew Rybchenko rptr = count & eep->ee_mask; 110*b97bf1caSAndrew Rybchenko 111*b97bf1caSAndrew Rybchenko EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index, 112*b97bf1caSAndrew Rybchenko ERF_GZ_IDX, rptr); 113*b97bf1caSAndrew Rybchenko /* EVQ_INT_PRIME lives function control window only on Riverhead */ 114*b97bf1caSAndrew Rybchenko EFX_BAR_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword, B_FALSE); 115*b97bf1caSAndrew Rybchenko 116*b97bf1caSAndrew Rybchenko return (0); 117*b97bf1caSAndrew Rybchenko } 118*b97bf1caSAndrew Rybchenko 119*b97bf1caSAndrew Rybchenko void 120*b97bf1caSAndrew Rybchenko rhead_ev_qpost( 121*b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 122*b97bf1caSAndrew Rybchenko __in uint16_t data) 123*b97bf1caSAndrew Rybchenko { 124*b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(eep, data)) 125*b97bf1caSAndrew Rybchenko 126*b97bf1caSAndrew Rybchenko /* Not implemented yet */ 127*b97bf1caSAndrew Rybchenko EFSYS_ASSERT(B_FALSE); 128*b97bf1caSAndrew Rybchenko } 129*b97bf1caSAndrew Rybchenko 130*b97bf1caSAndrew Rybchenko /* 131*b97bf1caSAndrew Rybchenko * Poll event queue in batches. Size of the batch is equal to cache line 132*b97bf1caSAndrew Rybchenko * size divided by event size. 133*b97bf1caSAndrew Rybchenko * 134*b97bf1caSAndrew Rybchenko * Event queue is written by NIC and read by CPU. If CPU starts reading 135*b97bf1caSAndrew Rybchenko * of events on the cache line, read all remaining events in a tight 136*b97bf1caSAndrew Rybchenko * loop while event is present. 137*b97bf1caSAndrew Rybchenko */ 138*b97bf1caSAndrew Rybchenko #define EF100_EV_BATCH 8 139*b97bf1caSAndrew Rybchenko 140*b97bf1caSAndrew Rybchenko /* 141*b97bf1caSAndrew Rybchenko * Check if event is present. 142*b97bf1caSAndrew Rybchenko * 143*b97bf1caSAndrew Rybchenko * Riverhead EvQs use a phase bit to indicate the presence of valid events, 144*b97bf1caSAndrew Rybchenko * by flipping the phase bit on each wrap of the write index. 145*b97bf1caSAndrew Rybchenko */ 146*b97bf1caSAndrew Rybchenko #define EF100_EV_PRESENT(_qword, _phase_bit) \ 147*b97bf1caSAndrew Rybchenko (EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit) 148*b97bf1caSAndrew Rybchenko 149*b97bf1caSAndrew Rybchenko void 150*b97bf1caSAndrew Rybchenko rhead_ev_qpoll( 151*b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 152*b97bf1caSAndrew Rybchenko __inout unsigned int *countp, 153*b97bf1caSAndrew Rybchenko __in const efx_ev_callbacks_t *eecp, 154*b97bf1caSAndrew Rybchenko __in_opt void *arg) 155*b97bf1caSAndrew Rybchenko { 156*b97bf1caSAndrew Rybchenko efx_qword_t ev[EF100_EV_BATCH]; 157*b97bf1caSAndrew Rybchenko unsigned int batch; 158*b97bf1caSAndrew Rybchenko unsigned int phase_bit; 159*b97bf1caSAndrew Rybchenko unsigned int total; 160*b97bf1caSAndrew Rybchenko unsigned int count; 161*b97bf1caSAndrew Rybchenko unsigned int index; 162*b97bf1caSAndrew Rybchenko size_t offset; 163*b97bf1caSAndrew Rybchenko 164*b97bf1caSAndrew Rybchenko EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC); 165*b97bf1caSAndrew Rybchenko EFSYS_ASSERT(countp != NULL); 166*b97bf1caSAndrew Rybchenko EFSYS_ASSERT(eecp != NULL); 167*b97bf1caSAndrew Rybchenko 168*b97bf1caSAndrew Rybchenko count = *countp; 169*b97bf1caSAndrew Rybchenko do { 170*b97bf1caSAndrew Rybchenko /* Read up until the end of the batch period */ 171*b97bf1caSAndrew Rybchenko batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1)); 172*b97bf1caSAndrew Rybchenko phase_bit = (count & (eep->ee_mask + 1)) != 0; 173*b97bf1caSAndrew Rybchenko offset = (count & eep->ee_mask) * sizeof (efx_qword_t); 174*b97bf1caSAndrew Rybchenko for (total = 0; total < batch; ++total) { 175*b97bf1caSAndrew Rybchenko EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total])); 176*b97bf1caSAndrew Rybchenko 177*b97bf1caSAndrew Rybchenko if (!EF100_EV_PRESENT(ev[total], phase_bit)) 178*b97bf1caSAndrew Rybchenko break; 179*b97bf1caSAndrew Rybchenko 180*b97bf1caSAndrew Rybchenko EFSYS_PROBE3(event, unsigned int, eep->ee_index, 181*b97bf1caSAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1), 182*b97bf1caSAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0)); 183*b97bf1caSAndrew Rybchenko 184*b97bf1caSAndrew Rybchenko offset += sizeof (efx_qword_t); 185*b97bf1caSAndrew Rybchenko } 186*b97bf1caSAndrew Rybchenko 187*b97bf1caSAndrew Rybchenko /* Process the batch of events */ 188*b97bf1caSAndrew Rybchenko for (index = 0; index < total; ++index) { 189*b97bf1caSAndrew Rybchenko boolean_t should_abort; 190*b97bf1caSAndrew Rybchenko uint32_t code; 191*b97bf1caSAndrew Rybchenko 192*b97bf1caSAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_ALL); 193*b97bf1caSAndrew Rybchenko 194*b97bf1caSAndrew Rybchenko code = EFX_QWORD_FIELD(ev[index], ESF_GZ_E_TYPE); 195*b97bf1caSAndrew Rybchenko switch (code) { 196*b97bf1caSAndrew Rybchenko default: 197*b97bf1caSAndrew Rybchenko EFSYS_PROBE3(bad_event, 198*b97bf1caSAndrew Rybchenko unsigned int, eep->ee_index, 199*b97bf1caSAndrew Rybchenko uint32_t, 200*b97bf1caSAndrew Rybchenko EFX_QWORD_FIELD(ev[index], EFX_DWORD_1), 201*b97bf1caSAndrew Rybchenko uint32_t, 202*b97bf1caSAndrew Rybchenko EFX_QWORD_FIELD(ev[index], EFX_DWORD_0)); 203*b97bf1caSAndrew Rybchenko 204*b97bf1caSAndrew Rybchenko EFSYS_ASSERT(eecp->eec_exception != NULL); 205*b97bf1caSAndrew Rybchenko (void) eecp->eec_exception(arg, 206*b97bf1caSAndrew Rybchenko EFX_EXCEPTION_EV_ERROR, code); 207*b97bf1caSAndrew Rybchenko should_abort = B_TRUE; 208*b97bf1caSAndrew Rybchenko } 209*b97bf1caSAndrew Rybchenko if (should_abort) { 210*b97bf1caSAndrew Rybchenko /* Ignore subsequent events */ 211*b97bf1caSAndrew Rybchenko total = index + 1; 212*b97bf1caSAndrew Rybchenko 213*b97bf1caSAndrew Rybchenko /* 214*b97bf1caSAndrew Rybchenko * Poison batch to ensure the outer 215*b97bf1caSAndrew Rybchenko * loop is broken out of. 216*b97bf1caSAndrew Rybchenko */ 217*b97bf1caSAndrew Rybchenko EFSYS_ASSERT(batch <= EF100_EV_BATCH); 218*b97bf1caSAndrew Rybchenko batch += (EF100_EV_BATCH << 1); 219*b97bf1caSAndrew Rybchenko EFSYS_ASSERT(total != batch); 220*b97bf1caSAndrew Rybchenko break; 221*b97bf1caSAndrew Rybchenko } 222*b97bf1caSAndrew Rybchenko } 223*b97bf1caSAndrew Rybchenko 224*b97bf1caSAndrew Rybchenko /* 225*b97bf1caSAndrew Rybchenko * There is no necessity to clear processed events since 226*b97bf1caSAndrew Rybchenko * phase bit which is flipping on each write index wrap 227*b97bf1caSAndrew Rybchenko * is used for event presence indication. 228*b97bf1caSAndrew Rybchenko */ 229*b97bf1caSAndrew Rybchenko 230*b97bf1caSAndrew Rybchenko count += total; 231*b97bf1caSAndrew Rybchenko 232*b97bf1caSAndrew Rybchenko } while (total == batch); 233*b97bf1caSAndrew Rybchenko 234*b97bf1caSAndrew Rybchenko *countp = count; 235*b97bf1caSAndrew Rybchenko } 236*b97bf1caSAndrew Rybchenko 237*b97bf1caSAndrew Rybchenko __checkReturn efx_rc_t 238*b97bf1caSAndrew Rybchenko rhead_ev_qmoderate( 239*b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 240*b97bf1caSAndrew Rybchenko __in unsigned int us) 241*b97bf1caSAndrew Rybchenko { 242*b97bf1caSAndrew Rybchenko _NOTE(ARGUNUSED(eep, us)) 243*b97bf1caSAndrew Rybchenko 244*b97bf1caSAndrew Rybchenko return (ENOTSUP); 245*b97bf1caSAndrew Rybchenko } 246*b97bf1caSAndrew Rybchenko 247*b97bf1caSAndrew Rybchenko 248*b97bf1caSAndrew Rybchenko #if EFSYS_OPT_QSTATS 249*b97bf1caSAndrew Rybchenko void 250*b97bf1caSAndrew Rybchenko rhead_ev_qstats_update( 251*b97bf1caSAndrew Rybchenko __in efx_evq_t *eep, 252*b97bf1caSAndrew Rybchenko __inout_ecount(EV_NQSTATS) efsys_stat_t *stat) 253*b97bf1caSAndrew Rybchenko { 254*b97bf1caSAndrew Rybchenko unsigned int id; 255*b97bf1caSAndrew Rybchenko 256*b97bf1caSAndrew Rybchenko for (id = 0; id < EV_NQSTATS; id++) { 257*b97bf1caSAndrew Rybchenko efsys_stat_t *essp = &stat[id]; 258*b97bf1caSAndrew Rybchenko 259*b97bf1caSAndrew Rybchenko EFSYS_STAT_INCR(essp, eep->ee_stat[id]); 260*b97bf1caSAndrew Rybchenko eep->ee_stat[id] = 0; 261*b97bf1caSAndrew Rybchenko } 262*b97bf1caSAndrew Rybchenko } 263*b97bf1caSAndrew Rybchenko #endif /* EFSYS_OPT_QSTATS */ 264*b97bf1caSAndrew Rybchenko 265*b97bf1caSAndrew Rybchenko #endif /* EFSYS_OPT_RIVERHEAD */ 266