xref: /dpdk/drivers/common/sfc_efx/base/rhead_ev.c (revision 1a4448be57aa7851a3d73b969ebe58fa9eb51631)
1b97bf1caSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2b97bf1caSAndrew Rybchenko  *
3b97bf1caSAndrew Rybchenko  * Copyright(c) 2019-2020 Xilinx, Inc.
4b97bf1caSAndrew Rybchenko  * Copyright(c) 2018-2019 Solarflare Communications Inc.
5b97bf1caSAndrew Rybchenko  */
6b97bf1caSAndrew Rybchenko 
7b97bf1caSAndrew Rybchenko #include "efx.h"
8b97bf1caSAndrew Rybchenko #include "efx_impl.h"
9b97bf1caSAndrew Rybchenko 
10b97bf1caSAndrew Rybchenko #if EFSYS_OPT_RIVERHEAD
11b97bf1caSAndrew Rybchenko 
12b97bf1caSAndrew Rybchenko /*
13b97bf1caSAndrew Rybchenko  * Non-interrupting event queue requires interrupting event queue to
14b97bf1caSAndrew Rybchenko  * refer to for wake-up events even if wake ups are never used.
15b97bf1caSAndrew Rybchenko  * It could be even non-allocated event queue.
16b97bf1caSAndrew Rybchenko  */
17b97bf1caSAndrew Rybchenko #define	EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
18b97bf1caSAndrew Rybchenko 
1917580779SAndrew Rybchenko static	__checkReturn	boolean_t
2017580779SAndrew Rybchenko rhead_ev_rx_packets(
2117580779SAndrew Rybchenko 	__in		efx_evq_t *eep,
2217580779SAndrew Rybchenko 	__in		efx_qword_t *eqp,
2317580779SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
2417580779SAndrew Rybchenko 	__in_opt	void *arg);
2517580779SAndrew Rybchenko 
2696a8519dSAndrew Rybchenko static	__checkReturn	boolean_t
2796a8519dSAndrew Rybchenko rhead_ev_tx_completion(
2896a8519dSAndrew Rybchenko 	__in		efx_evq_t *eep,
2996a8519dSAndrew Rybchenko 	__in		efx_qword_t *eqp,
3096a8519dSAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
3196a8519dSAndrew Rybchenko 	__in_opt	void *arg);
3296a8519dSAndrew Rybchenko 
339edb8ee3SAndrew Rybchenko static	__checkReturn	boolean_t
349edb8ee3SAndrew Rybchenko rhead_ev_mcdi(
359edb8ee3SAndrew Rybchenko 	__in		efx_evq_t *eep,
369edb8ee3SAndrew Rybchenko 	__in		efx_qword_t *eqp,
379edb8ee3SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
389edb8ee3SAndrew Rybchenko 	__in_opt	void *arg);
399edb8ee3SAndrew Rybchenko 
40*1a4448beSAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH
41*1a4448beSAndy Moreton static			boolean_t
42*1a4448beSAndy Moreton rhead_ev_ew_dispatch(
43*1a4448beSAndy Moreton 	__in		efx_evq_t *eep,
44*1a4448beSAndy Moreton 	__in		efx_xword_t *eventp,
45*1a4448beSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
46*1a4448beSAndy Moreton 	__in_opt	void *arg);
47*1a4448beSAndy Moreton 
48*1a4448beSAndy Moreton static			void
49*1a4448beSAndy Moreton rhead_ev_ew_qpoll(
50*1a4448beSAndy Moreton 	__in		efx_evq_t *eep,
51*1a4448beSAndy Moreton 	__inout		unsigned int *countp,
52*1a4448beSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
53*1a4448beSAndy Moreton 	__in_opt	void *arg);
54*1a4448beSAndy Moreton #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
55*1a4448beSAndy Moreton 
569edb8ee3SAndrew Rybchenko 
57b97bf1caSAndrew Rybchenko 	__checkReturn	efx_rc_t
58b97bf1caSAndrew Rybchenko rhead_ev_init(
59b97bf1caSAndrew Rybchenko 	__in		efx_nic_t *enp)
60b97bf1caSAndrew Rybchenko {
61b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(enp))
62b97bf1caSAndrew Rybchenko 
63b97bf1caSAndrew Rybchenko 	return (0);
64b97bf1caSAndrew Rybchenko }
65b97bf1caSAndrew Rybchenko 
66b97bf1caSAndrew Rybchenko 			void
67b97bf1caSAndrew Rybchenko rhead_ev_fini(
68b97bf1caSAndrew Rybchenko 	__in		efx_nic_t *enp)
69b97bf1caSAndrew Rybchenko {
70b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(enp))
71b97bf1caSAndrew Rybchenko }
72b97bf1caSAndrew Rybchenko 
73b97bf1caSAndrew Rybchenko 	__checkReturn	efx_rc_t
74b97bf1caSAndrew Rybchenko rhead_ev_qcreate(
75b97bf1caSAndrew Rybchenko 	__in		efx_nic_t *enp,
76b97bf1caSAndrew Rybchenko 	__in		unsigned int index,
77b97bf1caSAndrew Rybchenko 	__in		efsys_mem_t *esmp,
78b97bf1caSAndrew Rybchenko 	__in		size_t ndescs,
79b97bf1caSAndrew Rybchenko 	__in		uint32_t id,
80b97bf1caSAndrew Rybchenko 	__in		uint32_t us,
81b97bf1caSAndrew Rybchenko 	__in		uint32_t flags,
82b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep)
83b97bf1caSAndrew Rybchenko {
84f8a60f76SAndy Moreton 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
85f8a60f76SAndy Moreton 	size_t desc_size;
86b97bf1caSAndrew Rybchenko 	uint32_t irq;
87b97bf1caSAndrew Rybchenko 	efx_rc_t rc;
88b97bf1caSAndrew Rybchenko 
89b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
90b97bf1caSAndrew Rybchenko 
91f8a60f76SAndy Moreton 	desc_size = encp->enc_ev_desc_size;
92f8a60f76SAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH
93f8a60f76SAndy Moreton 	if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
94f8a60f76SAndy Moreton 		desc_size = encp->enc_ev_ew_desc_size;
95f8a60f76SAndy Moreton #endif
96f8a60f76SAndy Moreton 	EFSYS_ASSERT(desc_size != 0);
97f8a60f76SAndy Moreton 
98f8a60f76SAndy Moreton 	if (EFSYS_MEM_SIZE(esmp) < (ndescs * desc_size)) {
99f8a60f76SAndy Moreton 		/* Buffer too small for event queue descriptors */
100f8a60f76SAndy Moreton 		rc = EINVAL;
101f8a60f76SAndy Moreton 		goto fail1;
102f8a60f76SAndy Moreton 	}
103f8a60f76SAndy Moreton 
104b97bf1caSAndrew Rybchenko 	/* Set up the handler table */
10517580779SAndrew Rybchenko 	eep->ee_rx	= rhead_ev_rx_packets;
10696a8519dSAndrew Rybchenko 	eep->ee_tx	= rhead_ev_tx_completion;
107b97bf1caSAndrew Rybchenko 	eep->ee_driver	= NULL; /* FIXME */
108b97bf1caSAndrew Rybchenko 	eep->ee_drv_gen	= NULL; /* FIXME */
1099edb8ee3SAndrew Rybchenko 	eep->ee_mcdi	= rhead_ev_mcdi;
110b97bf1caSAndrew Rybchenko 
111b97bf1caSAndrew Rybchenko 	/* Set up the event queue */
112b97bf1caSAndrew Rybchenko 	/* INIT_EVQ expects function-relative vector number */
113b97bf1caSAndrew Rybchenko 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
114b97bf1caSAndrew Rybchenko 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
115b97bf1caSAndrew Rybchenko 		irq = index;
116b97bf1caSAndrew Rybchenko 	} else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
117b97bf1caSAndrew Rybchenko 		irq = index;
118b97bf1caSAndrew Rybchenko 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
119b97bf1caSAndrew Rybchenko 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
120b97bf1caSAndrew Rybchenko 	} else {
121b97bf1caSAndrew Rybchenko 		irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
122b97bf1caSAndrew Rybchenko 	}
123b97bf1caSAndrew Rybchenko 
124b97bf1caSAndrew Rybchenko 	/*
125b97bf1caSAndrew Rybchenko 	 * Interrupts may be raised for events immediately after the queue is
126b97bf1caSAndrew Rybchenko 	 * created. See bug58606.
127b97bf1caSAndrew Rybchenko 	 */
128b97bf1caSAndrew Rybchenko 	rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
129b97bf1caSAndrew Rybchenko 	    B_FALSE);
130b97bf1caSAndrew Rybchenko 	if (rc != 0)
131f8a60f76SAndy Moreton 		goto fail2;
132b97bf1caSAndrew Rybchenko 
133b97bf1caSAndrew Rybchenko 	return (0);
134b97bf1caSAndrew Rybchenko 
135f8a60f76SAndy Moreton fail2:
136f8a60f76SAndy Moreton 	EFSYS_PROBE(fail2);
137b97bf1caSAndrew Rybchenko fail1:
138b97bf1caSAndrew Rybchenko 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
139b97bf1caSAndrew Rybchenko 
140b97bf1caSAndrew Rybchenko 	return (rc);
141b97bf1caSAndrew Rybchenko }
142b97bf1caSAndrew Rybchenko 
143b97bf1caSAndrew Rybchenko 			void
144b97bf1caSAndrew Rybchenko rhead_ev_qdestroy(
145b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep)
146b97bf1caSAndrew Rybchenko {
147b97bf1caSAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
148b97bf1caSAndrew Rybchenko 
149b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
150b97bf1caSAndrew Rybchenko 
151b97bf1caSAndrew Rybchenko 	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
152b97bf1caSAndrew Rybchenko }
153b97bf1caSAndrew Rybchenko 
154b97bf1caSAndrew Rybchenko 	__checkReturn	efx_rc_t
155b97bf1caSAndrew Rybchenko rhead_ev_qprime(
156b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep,
157b97bf1caSAndrew Rybchenko 	__in		unsigned int count)
158b97bf1caSAndrew Rybchenko {
159b97bf1caSAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
160b97bf1caSAndrew Rybchenko 	uint32_t rptr;
161b97bf1caSAndrew Rybchenko 	efx_dword_t dword;
162b97bf1caSAndrew Rybchenko 
163b97bf1caSAndrew Rybchenko 	rptr = count & eep->ee_mask;
164b97bf1caSAndrew Rybchenko 
165b97bf1caSAndrew Rybchenko 	EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
166b97bf1caSAndrew Rybchenko 	    ERF_GZ_IDX, rptr);
167b97bf1caSAndrew Rybchenko 	/* EVQ_INT_PRIME lives function control window only on Riverhead */
168341bd4e0SIgor Romanov 	EFX_BAR_FCW_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword);
169b97bf1caSAndrew Rybchenko 
170b97bf1caSAndrew Rybchenko 	return (0);
171b97bf1caSAndrew Rybchenko }
172b97bf1caSAndrew Rybchenko 
173b97bf1caSAndrew Rybchenko 			void
174b97bf1caSAndrew Rybchenko rhead_ev_qpost(
175b97bf1caSAndrew Rybchenko 	__in	efx_evq_t *eep,
176b97bf1caSAndrew Rybchenko 	__in	uint16_t data)
177b97bf1caSAndrew Rybchenko {
178b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(eep, data))
179b97bf1caSAndrew Rybchenko 
180b97bf1caSAndrew Rybchenko 	/* Not implemented yet */
181b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT(B_FALSE);
182b97bf1caSAndrew Rybchenko }
183b97bf1caSAndrew Rybchenko 
184b97bf1caSAndrew Rybchenko /*
185b97bf1caSAndrew Rybchenko  * Poll event queue in batches. Size of the batch is equal to cache line
186b97bf1caSAndrew Rybchenko  * size divided by event size.
187b97bf1caSAndrew Rybchenko  *
188b97bf1caSAndrew Rybchenko  * Event queue is written by NIC and read by CPU. If CPU starts reading
189b97bf1caSAndrew Rybchenko  * of events on the cache line, read all remaining events in a tight
190b97bf1caSAndrew Rybchenko  * loop while event is present.
191b97bf1caSAndrew Rybchenko  */
192b97bf1caSAndrew Rybchenko #define	EF100_EV_BATCH	8
193b97bf1caSAndrew Rybchenko 
194b97bf1caSAndrew Rybchenko /*
195b97bf1caSAndrew Rybchenko  * Check if event is present.
196b97bf1caSAndrew Rybchenko  *
197b97bf1caSAndrew Rybchenko  * Riverhead EvQs use a phase bit to indicate the presence of valid events,
198b97bf1caSAndrew Rybchenko  * by flipping the phase bit on each wrap of the write index.
199b97bf1caSAndrew Rybchenko  */
200b97bf1caSAndrew Rybchenko #define	EF100_EV_PRESENT(_qword, _phase_bit)				\
201b97bf1caSAndrew Rybchenko 	(EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
202b97bf1caSAndrew Rybchenko 
203b97bf1caSAndrew Rybchenko 			void
204b97bf1caSAndrew Rybchenko rhead_ev_qpoll(
205b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep,
206b97bf1caSAndrew Rybchenko 	__inout		unsigned int *countp,
207b97bf1caSAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
208b97bf1caSAndrew Rybchenko 	__in_opt	void *arg)
209b97bf1caSAndrew Rybchenko {
210b97bf1caSAndrew Rybchenko 	efx_qword_t ev[EF100_EV_BATCH];
211b97bf1caSAndrew Rybchenko 	unsigned int batch;
212b97bf1caSAndrew Rybchenko 	unsigned int phase_bit;
213b97bf1caSAndrew Rybchenko 	unsigned int total;
214b97bf1caSAndrew Rybchenko 	unsigned int count;
215b97bf1caSAndrew Rybchenko 	unsigned int index;
216b97bf1caSAndrew Rybchenko 	size_t offset;
217b97bf1caSAndrew Rybchenko 
218*1a4448beSAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH
219*1a4448beSAndy Moreton 	if (eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) {
220*1a4448beSAndy Moreton 		rhead_ev_ew_qpoll(eep, countp, eecp, arg);
221*1a4448beSAndy Moreton 		return;
222*1a4448beSAndy Moreton 	}
223*1a4448beSAndy Moreton #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
224*1a4448beSAndy Moreton 
225b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
226b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT(countp != NULL);
227b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT(eecp != NULL);
228b97bf1caSAndrew Rybchenko 
229b97bf1caSAndrew Rybchenko 	count = *countp;
230b97bf1caSAndrew Rybchenko 	do {
231b97bf1caSAndrew Rybchenko 		/* Read up until the end of the batch period */
232b97bf1caSAndrew Rybchenko 		batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
233b97bf1caSAndrew Rybchenko 		phase_bit = (count & (eep->ee_mask + 1)) != 0;
234b97bf1caSAndrew Rybchenko 		offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
235b97bf1caSAndrew Rybchenko 		for (total = 0; total < batch; ++total) {
236b97bf1caSAndrew Rybchenko 			EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
237b97bf1caSAndrew Rybchenko 
238b97bf1caSAndrew Rybchenko 			if (!EF100_EV_PRESENT(ev[total], phase_bit))
239b97bf1caSAndrew Rybchenko 				break;
240b97bf1caSAndrew Rybchenko 
241b97bf1caSAndrew Rybchenko 			EFSYS_PROBE3(event, unsigned int, eep->ee_index,
242b97bf1caSAndrew Rybchenko 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
243b97bf1caSAndrew Rybchenko 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
244b97bf1caSAndrew Rybchenko 
245b97bf1caSAndrew Rybchenko 			offset += sizeof (efx_qword_t);
246b97bf1caSAndrew Rybchenko 		}
247b97bf1caSAndrew Rybchenko 
248b97bf1caSAndrew Rybchenko 		/* Process the batch of events */
249b97bf1caSAndrew Rybchenko 		for (index = 0; index < total; ++index) {
250b97bf1caSAndrew Rybchenko 			boolean_t should_abort;
251b97bf1caSAndrew Rybchenko 			uint32_t code;
252b97bf1caSAndrew Rybchenko 
253b97bf1caSAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
254b97bf1caSAndrew Rybchenko 
255b97bf1caSAndrew Rybchenko 			code = EFX_QWORD_FIELD(ev[index], ESF_GZ_E_TYPE);
256b97bf1caSAndrew Rybchenko 			switch (code) {
25717580779SAndrew Rybchenko 			case ESE_GZ_EF100_EV_RX_PKTS:
25817580779SAndrew Rybchenko 				should_abort = eep->ee_rx(eep,
25917580779SAndrew Rybchenko 				    &(ev[index]), eecp, arg);
26017580779SAndrew Rybchenko 				break;
26196a8519dSAndrew Rybchenko 			case ESE_GZ_EF100_EV_TX_COMPLETION:
26296a8519dSAndrew Rybchenko 				should_abort = eep->ee_tx(eep,
26396a8519dSAndrew Rybchenko 				    &(ev[index]), eecp, arg);
26496a8519dSAndrew Rybchenko 				break;
2659edb8ee3SAndrew Rybchenko 			case ESE_GZ_EF100_EV_MCDI:
2669edb8ee3SAndrew Rybchenko 				should_abort = eep->ee_mcdi(eep,
2679edb8ee3SAndrew Rybchenko 				    &(ev[index]), eecp, arg);
2689edb8ee3SAndrew Rybchenko 				break;
269b97bf1caSAndrew Rybchenko 			default:
270b97bf1caSAndrew Rybchenko 				EFSYS_PROBE3(bad_event,
271b97bf1caSAndrew Rybchenko 				    unsigned int, eep->ee_index,
272b97bf1caSAndrew Rybchenko 				    uint32_t,
273b97bf1caSAndrew Rybchenko 				    EFX_QWORD_FIELD(ev[index], EFX_DWORD_1),
274b97bf1caSAndrew Rybchenko 				    uint32_t,
275b97bf1caSAndrew Rybchenko 				    EFX_QWORD_FIELD(ev[index], EFX_DWORD_0));
276b97bf1caSAndrew Rybchenko 
277b97bf1caSAndrew Rybchenko 				EFSYS_ASSERT(eecp->eec_exception != NULL);
278b97bf1caSAndrew Rybchenko 				(void) eecp->eec_exception(arg,
279b97bf1caSAndrew Rybchenko 					EFX_EXCEPTION_EV_ERROR, code);
280b97bf1caSAndrew Rybchenko 				should_abort = B_TRUE;
281b97bf1caSAndrew Rybchenko 			}
282b97bf1caSAndrew Rybchenko 			if (should_abort) {
283b97bf1caSAndrew Rybchenko 				/* Ignore subsequent events */
284b97bf1caSAndrew Rybchenko 				total = index + 1;
285b97bf1caSAndrew Rybchenko 
286b97bf1caSAndrew Rybchenko 				/*
287b97bf1caSAndrew Rybchenko 				 * Poison batch to ensure the outer
288b97bf1caSAndrew Rybchenko 				 * loop is broken out of.
289b97bf1caSAndrew Rybchenko 				 */
290b97bf1caSAndrew Rybchenko 				EFSYS_ASSERT(batch <= EF100_EV_BATCH);
291b97bf1caSAndrew Rybchenko 				batch += (EF100_EV_BATCH << 1);
292b97bf1caSAndrew Rybchenko 				EFSYS_ASSERT(total != batch);
293b97bf1caSAndrew Rybchenko 				break;
294b97bf1caSAndrew Rybchenko 			}
295b97bf1caSAndrew Rybchenko 		}
296b97bf1caSAndrew Rybchenko 
297b97bf1caSAndrew Rybchenko 		/*
298b97bf1caSAndrew Rybchenko 		 * There is no necessity to clear processed events since
299b97bf1caSAndrew Rybchenko 		 * phase bit which is flipping on each write index wrap
300b97bf1caSAndrew Rybchenko 		 * is used for event presence indication.
301b97bf1caSAndrew Rybchenko 		 */
302b97bf1caSAndrew Rybchenko 
303b97bf1caSAndrew Rybchenko 		count += total;
304b97bf1caSAndrew Rybchenko 
305b97bf1caSAndrew Rybchenko 	} while (total == batch);
306b97bf1caSAndrew Rybchenko 
307b97bf1caSAndrew Rybchenko 	*countp = count;
308b97bf1caSAndrew Rybchenko }
309b97bf1caSAndrew Rybchenko 
310*1a4448beSAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH
311*1a4448beSAndy Moreton static			boolean_t
312*1a4448beSAndy Moreton rhead_ev_ew_dispatch(
313*1a4448beSAndy Moreton 	__in		efx_evq_t *eep,
314*1a4448beSAndy Moreton 	__in		efx_xword_t *eventp,
315*1a4448beSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
316*1a4448beSAndy Moreton 	__in_opt	void *arg)
317*1a4448beSAndy Moreton {
318*1a4448beSAndy Moreton 	boolean_t should_abort;
319*1a4448beSAndy Moreton 	uint32_t code;
320*1a4448beSAndy Moreton 
321*1a4448beSAndy Moreton 	EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
322*1a4448beSAndy Moreton 
323*1a4448beSAndy Moreton 	code = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_256_EV32_TYPE);
324*1a4448beSAndy Moreton 	switch (code) {
325*1a4448beSAndy Moreton 	default:
326*1a4448beSAndy Moreton 		/* Omit currently unused reserved bits from the probe. */
327*1a4448beSAndy Moreton 		EFSYS_PROBE7(ew_bad_event, unsigned int, eep->ee_index,
328*1a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_7),
329*1a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_4),
330*1a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_3),
331*1a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_2),
332*1a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_1),
333*1a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_0));
334*1a4448beSAndy Moreton 
335*1a4448beSAndy Moreton 		EFSYS_ASSERT(eecp->eec_exception != NULL);
336*1a4448beSAndy Moreton 		(void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
337*1a4448beSAndy Moreton 		should_abort = B_TRUE;
338*1a4448beSAndy Moreton 	}
339*1a4448beSAndy Moreton 
340*1a4448beSAndy Moreton 	return (should_abort);
341*1a4448beSAndy Moreton }
342*1a4448beSAndy Moreton 
343*1a4448beSAndy Moreton /*
344*1a4448beSAndy Moreton  * Poll extended width event queue. Size of the batch is equal to cache line
345*1a4448beSAndy Moreton  * size divided by event size.
346*1a4448beSAndy Moreton  */
347*1a4448beSAndy Moreton #define	EF100_EV_EW_BATCH	2
348*1a4448beSAndy Moreton 
349*1a4448beSAndy Moreton /*
350*1a4448beSAndy Moreton  * Check if event is present.
351*1a4448beSAndy Moreton  *
352*1a4448beSAndy Moreton  * Riverhead EvQs use a phase bit to indicate the presence of valid events,
353*1a4448beSAndy Moreton  * by flipping the phase bit on each wrap of the write index.
354*1a4448beSAndy Moreton  */
355*1a4448beSAndy Moreton #define	EF100_EV_EW_PRESENT(_xword, _phase_bit)				\
356*1a4448beSAndy Moreton 	(EFX_XWORD_FIELD((_xword), ESF_GZ_EV_256_EV32_PHASE) == (_phase_bit))
357*1a4448beSAndy Moreton 
358*1a4448beSAndy Moreton static			void
359*1a4448beSAndy Moreton rhead_ev_ew_qpoll(
360*1a4448beSAndy Moreton 	__in		efx_evq_t *eep,
361*1a4448beSAndy Moreton 	__inout		unsigned int *countp,
362*1a4448beSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
363*1a4448beSAndy Moreton 	__in_opt	void *arg)
364*1a4448beSAndy Moreton {
365*1a4448beSAndy Moreton 	efx_xword_t ev[EF100_EV_EW_BATCH];
366*1a4448beSAndy Moreton 	unsigned int batch;
367*1a4448beSAndy Moreton 	unsigned int phase_bit;
368*1a4448beSAndy Moreton 	unsigned int total;
369*1a4448beSAndy Moreton 	unsigned int count;
370*1a4448beSAndy Moreton 	unsigned int index;
371*1a4448beSAndy Moreton 	size_t offset;
372*1a4448beSAndy Moreton 
373*1a4448beSAndy Moreton 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
374*1a4448beSAndy Moreton 	EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
375*1a4448beSAndy Moreton 	EFSYS_ASSERT(countp != NULL);
376*1a4448beSAndy Moreton 	EFSYS_ASSERT(eecp != NULL);
377*1a4448beSAndy Moreton 
378*1a4448beSAndy Moreton 	count = *countp;
379*1a4448beSAndy Moreton 	do {
380*1a4448beSAndy Moreton 		/* Read up until the end of the batch period */
381*1a4448beSAndy Moreton 		batch = EF100_EV_EW_BATCH - (count & (EF100_EV_EW_BATCH - 1));
382*1a4448beSAndy Moreton 		phase_bit = (count & (eep->ee_mask + 1)) != 0;
383*1a4448beSAndy Moreton 		offset = (count & eep->ee_mask) * sizeof (efx_xword_t);
384*1a4448beSAndy Moreton 		for (total = 0; total < batch; ++total) {
385*1a4448beSAndy Moreton 			EFSYS_MEM_READX(eep->ee_esmp, offset, &(ev[total]));
386*1a4448beSAndy Moreton 
387*1a4448beSAndy Moreton 			if (!EF100_EV_EW_PRESENT(ev[total], phase_bit))
388*1a4448beSAndy Moreton 				break;
389*1a4448beSAndy Moreton 
390*1a4448beSAndy Moreton 			/* Omit unused reserved bits from the probe. */
391*1a4448beSAndy Moreton 			EFSYS_PROBE7(ew_event, unsigned int, eep->ee_index,
392*1a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_7),
393*1a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_4),
394*1a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_3),
395*1a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_2),
396*1a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_1),
397*1a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_0));
398*1a4448beSAndy Moreton 
399*1a4448beSAndy Moreton 			offset += sizeof (efx_xword_t);
400*1a4448beSAndy Moreton 		}
401*1a4448beSAndy Moreton 
402*1a4448beSAndy Moreton 		/* Process the batch of events */
403*1a4448beSAndy Moreton 		for (index = 0; index < total; ++index) {
404*1a4448beSAndy Moreton 			boolean_t should_abort;
405*1a4448beSAndy Moreton 
406*1a4448beSAndy Moreton 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
407*1a4448beSAndy Moreton 
408*1a4448beSAndy Moreton 			should_abort =
409*1a4448beSAndy Moreton 			    rhead_ev_ew_dispatch(eep, &(ev[index]), eecp, arg);
410*1a4448beSAndy Moreton 
411*1a4448beSAndy Moreton 			if (should_abort) {
412*1a4448beSAndy Moreton 				/* Ignore subsequent events */
413*1a4448beSAndy Moreton 				total = index + 1;
414*1a4448beSAndy Moreton 
415*1a4448beSAndy Moreton 				/*
416*1a4448beSAndy Moreton 				 * Poison batch to ensure the outer
417*1a4448beSAndy Moreton 				 * loop is broken out of.
418*1a4448beSAndy Moreton 				 */
419*1a4448beSAndy Moreton 				EFSYS_ASSERT(batch <= EF100_EV_EW_BATCH);
420*1a4448beSAndy Moreton 				batch += (EF100_EV_EW_BATCH << 1);
421*1a4448beSAndy Moreton 				EFSYS_ASSERT(total != batch);
422*1a4448beSAndy Moreton 				break;
423*1a4448beSAndy Moreton 			}
424*1a4448beSAndy Moreton 		}
425*1a4448beSAndy Moreton 
426*1a4448beSAndy Moreton 		/*
427*1a4448beSAndy Moreton 		 * There is no necessity to clear processed events since
428*1a4448beSAndy Moreton 		 * phase bit which is flipping on each write index wrap
429*1a4448beSAndy Moreton 		 * is used for event presence indication.
430*1a4448beSAndy Moreton 		 */
431*1a4448beSAndy Moreton 
432*1a4448beSAndy Moreton 		count += total;
433*1a4448beSAndy Moreton 
434*1a4448beSAndy Moreton 	} while (total == batch);
435*1a4448beSAndy Moreton 
436*1a4448beSAndy Moreton 	*countp = count;
437*1a4448beSAndy Moreton }
438*1a4448beSAndy Moreton #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
439*1a4448beSAndy Moreton 
440*1a4448beSAndy Moreton 
441b97bf1caSAndrew Rybchenko 	__checkReturn	efx_rc_t
442b97bf1caSAndrew Rybchenko rhead_ev_qmoderate(
443b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep,
444b97bf1caSAndrew Rybchenko 	__in		unsigned int us)
445b97bf1caSAndrew Rybchenko {
446b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(eep, us))
447b97bf1caSAndrew Rybchenko 
448b97bf1caSAndrew Rybchenko 	return (ENOTSUP);
449b97bf1caSAndrew Rybchenko }
450b97bf1caSAndrew Rybchenko 
451b97bf1caSAndrew Rybchenko 
452b97bf1caSAndrew Rybchenko #if EFSYS_OPT_QSTATS
453b97bf1caSAndrew Rybchenko 			void
454b97bf1caSAndrew Rybchenko rhead_ev_qstats_update(
455b97bf1caSAndrew Rybchenko 	__in				efx_evq_t *eep,
456b97bf1caSAndrew Rybchenko 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
457b97bf1caSAndrew Rybchenko {
458b97bf1caSAndrew Rybchenko 	unsigned int id;
459b97bf1caSAndrew Rybchenko 
460b97bf1caSAndrew Rybchenko 	for (id = 0; id < EV_NQSTATS; id++) {
461b97bf1caSAndrew Rybchenko 		efsys_stat_t *essp = &stat[id];
462b97bf1caSAndrew Rybchenko 
463b97bf1caSAndrew Rybchenko 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
464b97bf1caSAndrew Rybchenko 		eep->ee_stat[id] = 0;
465b97bf1caSAndrew Rybchenko 	}
466b97bf1caSAndrew Rybchenko }
467b97bf1caSAndrew Rybchenko #endif /* EFSYS_OPT_QSTATS */
468b97bf1caSAndrew Rybchenko 
4699edb8ee3SAndrew Rybchenko static	__checkReturn	boolean_t
47017580779SAndrew Rybchenko rhead_ev_rx_packets(
47117580779SAndrew Rybchenko 	__in		efx_evq_t *eep,
47217580779SAndrew Rybchenko 	__in		efx_qword_t *eqp,
47317580779SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
47417580779SAndrew Rybchenko 	__in_opt	void *arg)
47517580779SAndrew Rybchenko {
47617580779SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
47717580779SAndrew Rybchenko 	uint32_t label;
47817580779SAndrew Rybchenko 	uint32_t num_packets;
47917580779SAndrew Rybchenko 	boolean_t should_abort;
48017580779SAndrew Rybchenko 
48117580779SAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_RX);
48217580779SAndrew Rybchenko 
48317580779SAndrew Rybchenko 	/* Discard events after RXQ/TXQ errors, or hardware not available */
48417580779SAndrew Rybchenko 	if (enp->en_reset_flags &
48517580779SAndrew Rybchenko 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
48617580779SAndrew Rybchenko 		return (B_FALSE);
48717580779SAndrew Rybchenko 
48817580779SAndrew Rybchenko 	label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_Q_LABEL);
48917580779SAndrew Rybchenko 
49017580779SAndrew Rybchenko 	/*
49117580779SAndrew Rybchenko 	 * On EF100 the EV_RX event reports the number of received
49217580779SAndrew Rybchenko 	 * packets (unlike EF10 which reports a descriptor index).
49317580779SAndrew Rybchenko 	 * The client driver is responsible for maintaining the Rx
49417580779SAndrew Rybchenko 	 * descriptor index, and computing how many descriptors are
49517580779SAndrew Rybchenko 	 * occupied by each received packet (based on the Rx buffer size
49617580779SAndrew Rybchenko 	 * and the packet length from the Rx prefix).
49717580779SAndrew Rybchenko 	 */
49817580779SAndrew Rybchenko 	num_packets = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_NUM_PKT);
49917580779SAndrew Rybchenko 
50017580779SAndrew Rybchenko 	/*
50117580779SAndrew Rybchenko 	 * The receive event may indicate more than one packet, and so
50217580779SAndrew Rybchenko 	 * does not contain the packet length. Read the packet length
50317580779SAndrew Rybchenko 	 * from the prefix when handling each packet.
50417580779SAndrew Rybchenko 	 */
50517580779SAndrew Rybchenko 	EFSYS_ASSERT(eecp->eec_rx_packets != NULL);
50617580779SAndrew Rybchenko 	should_abort = eecp->eec_rx_packets(arg, label, num_packets,
50717580779SAndrew Rybchenko 	    EFX_PKT_PREFIX_LEN);
50817580779SAndrew Rybchenko 
50917580779SAndrew Rybchenko 	return (should_abort);
51017580779SAndrew Rybchenko }
51117580779SAndrew Rybchenko 
51217580779SAndrew Rybchenko static	__checkReturn	boolean_t
51396a8519dSAndrew Rybchenko rhead_ev_tx_completion(
51496a8519dSAndrew Rybchenko 	__in		efx_evq_t *eep,
51596a8519dSAndrew Rybchenko 	__in		efx_qword_t *eqp,
51696a8519dSAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
51796a8519dSAndrew Rybchenko 	__in_opt	void *arg)
51896a8519dSAndrew Rybchenko {
51996a8519dSAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
52096a8519dSAndrew Rybchenko 	uint32_t num_descs;
52196a8519dSAndrew Rybchenko 	uint32_t label;
52296a8519dSAndrew Rybchenko 	boolean_t should_abort;
52396a8519dSAndrew Rybchenko 
52496a8519dSAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_TX);
52596a8519dSAndrew Rybchenko 
52696a8519dSAndrew Rybchenko 	/* Discard events after RXQ/TXQ errors, or hardware not available */
52796a8519dSAndrew Rybchenko 	if (enp->en_reset_flags &
52896a8519dSAndrew Rybchenko 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
52996a8519dSAndrew Rybchenko 		return (B_FALSE);
53096a8519dSAndrew Rybchenko 
53196a8519dSAndrew Rybchenko 	label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_Q_LABEL);
53296a8519dSAndrew Rybchenko 
53396a8519dSAndrew Rybchenko 	/*
53496a8519dSAndrew Rybchenko 	 * On EF100 the EV_TX event reports the number of completed Tx
53596a8519dSAndrew Rybchenko 	 * descriptors (on EF10, the event reports the low bits of the
53696a8519dSAndrew Rybchenko 	 * index of the last completed descriptor).
53796a8519dSAndrew Rybchenko 	 * The client driver completion callback will compute the
53896a8519dSAndrew Rybchenko 	 * descriptor index, so that is not needed here.
53996a8519dSAndrew Rybchenko 	 */
54096a8519dSAndrew Rybchenko 	num_descs = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_NUM_DESC);
54196a8519dSAndrew Rybchenko 
54296a8519dSAndrew Rybchenko 	EFSYS_PROBE2(tx_ndescs, uint32_t, label, unsigned int, num_descs);
54396a8519dSAndrew Rybchenko 
54496a8519dSAndrew Rybchenko 	EFSYS_ASSERT(eecp->eec_tx_ndescs != NULL);
54596a8519dSAndrew Rybchenko 	should_abort = eecp->eec_tx_ndescs(arg, label, num_descs);
54696a8519dSAndrew Rybchenko 
54796a8519dSAndrew Rybchenko 	return (should_abort);
54896a8519dSAndrew Rybchenko }
54996a8519dSAndrew Rybchenko 
55096a8519dSAndrew Rybchenko static	__checkReturn	boolean_t
5519edb8ee3SAndrew Rybchenko rhead_ev_mcdi(
5529edb8ee3SAndrew Rybchenko 	__in		efx_evq_t *eep,
5539edb8ee3SAndrew Rybchenko 	__in		efx_qword_t *eqp,
5549edb8ee3SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
5559edb8ee3SAndrew Rybchenko 	__in_opt	void *arg)
5569edb8ee3SAndrew Rybchenko {
5579edb8ee3SAndrew Rybchenko 	boolean_t ret;
5589edb8ee3SAndrew Rybchenko 
5599edb8ee3SAndrew Rybchenko 	/*
5609edb8ee3SAndrew Rybchenko 	 * Event format was changed post Riverhead R1 and now
5619edb8ee3SAndrew Rybchenko 	 * MCDI event layout on EF100 is exactly the same as on EF10
5629edb8ee3SAndrew Rybchenko 	 * except added QDMA phase bit which is unused on EF10.
5639edb8ee3SAndrew Rybchenko 	 */
5649edb8ee3SAndrew Rybchenko 	ret = ef10_ev_mcdi(eep, eqp, eecp, arg);
5659edb8ee3SAndrew Rybchenko 
5669edb8ee3SAndrew Rybchenko 	return (ret);
5679edb8ee3SAndrew Rybchenko }
5689edb8ee3SAndrew Rybchenko 
569b97bf1caSAndrew Rybchenko #endif	/* EFSYS_OPT_RIVERHEAD */
570