xref: /dpdk/drivers/common/sfc_efx/base/rhead_ev.c (revision e10a178add3daa646dc1c2f521a5f072c1b41e8d)
1b97bf1caSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2b97bf1caSAndrew Rybchenko  *
3b97bf1caSAndrew Rybchenko  * Copyright(c) 2019-2020 Xilinx, Inc.
4b97bf1caSAndrew Rybchenko  * Copyright(c) 2018-2019 Solarflare Communications Inc.
5b97bf1caSAndrew Rybchenko  */
6b97bf1caSAndrew Rybchenko 
7b97bf1caSAndrew Rybchenko #include "efx.h"
8b97bf1caSAndrew Rybchenko #include "efx_impl.h"
9b97bf1caSAndrew Rybchenko 
10b97bf1caSAndrew Rybchenko #if EFSYS_OPT_RIVERHEAD
11b97bf1caSAndrew Rybchenko 
12b97bf1caSAndrew Rybchenko /*
13b97bf1caSAndrew Rybchenko  * Non-interrupting event queue requires interrupting event queue to
14b97bf1caSAndrew Rybchenko  * refer to for wake-up events even if wake ups are never used.
15b97bf1caSAndrew Rybchenko  * It could be even non-allocated event queue.
16b97bf1caSAndrew Rybchenko  */
17b97bf1caSAndrew Rybchenko #define	EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
18b97bf1caSAndrew Rybchenko 
19*e10a178aSAndy Moreton static			boolean_t
20*e10a178aSAndy Moreton rhead_ev_dispatch(
21*e10a178aSAndy Moreton 	__in		efx_evq_t *eep,
22*e10a178aSAndy Moreton 	__in		efx_qword_t *eventp,
23*e10a178aSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
24*e10a178aSAndy Moreton 	__in_opt	void *arg);
25*e10a178aSAndy Moreton 
2617580779SAndrew Rybchenko static	__checkReturn	boolean_t
2717580779SAndrew Rybchenko rhead_ev_rx_packets(
2817580779SAndrew Rybchenko 	__in		efx_evq_t *eep,
2917580779SAndrew Rybchenko 	__in		efx_qword_t *eqp,
3017580779SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
3117580779SAndrew Rybchenko 	__in_opt	void *arg);
3217580779SAndrew Rybchenko 
3396a8519dSAndrew Rybchenko static	__checkReturn	boolean_t
3496a8519dSAndrew Rybchenko rhead_ev_tx_completion(
3596a8519dSAndrew Rybchenko 	__in		efx_evq_t *eep,
3696a8519dSAndrew Rybchenko 	__in		efx_qword_t *eqp,
3796a8519dSAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
3896a8519dSAndrew Rybchenko 	__in_opt	void *arg);
3996a8519dSAndrew Rybchenko 
409edb8ee3SAndrew Rybchenko static	__checkReturn	boolean_t
419edb8ee3SAndrew Rybchenko rhead_ev_mcdi(
429edb8ee3SAndrew Rybchenko 	__in		efx_evq_t *eep,
439edb8ee3SAndrew Rybchenko 	__in		efx_qword_t *eqp,
449edb8ee3SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
459edb8ee3SAndrew Rybchenko 	__in_opt	void *arg);
469edb8ee3SAndrew Rybchenko 
471a4448beSAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH
481a4448beSAndy Moreton static			boolean_t
491a4448beSAndy Moreton rhead_ev_ew_dispatch(
501a4448beSAndy Moreton 	__in		efx_evq_t *eep,
511a4448beSAndy Moreton 	__in		efx_xword_t *eventp,
521a4448beSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
531a4448beSAndy Moreton 	__in_opt	void *arg);
541a4448beSAndy Moreton 
551a4448beSAndy Moreton static			void
561a4448beSAndy Moreton rhead_ev_ew_qpoll(
571a4448beSAndy Moreton 	__in		efx_evq_t *eep,
581a4448beSAndy Moreton 	__inout		unsigned int *countp,
591a4448beSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
601a4448beSAndy Moreton 	__in_opt	void *arg);
611a4448beSAndy Moreton #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
621a4448beSAndy Moreton 
639edb8ee3SAndrew Rybchenko 
64b97bf1caSAndrew Rybchenko 	__checkReturn	efx_rc_t
65b97bf1caSAndrew Rybchenko rhead_ev_init(
66b97bf1caSAndrew Rybchenko 	__in		efx_nic_t *enp)
67b97bf1caSAndrew Rybchenko {
68b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(enp))
69b97bf1caSAndrew Rybchenko 
70b97bf1caSAndrew Rybchenko 	return (0);
71b97bf1caSAndrew Rybchenko }
72b97bf1caSAndrew Rybchenko 
73b97bf1caSAndrew Rybchenko 			void
74b97bf1caSAndrew Rybchenko rhead_ev_fini(
75b97bf1caSAndrew Rybchenko 	__in		efx_nic_t *enp)
76b97bf1caSAndrew Rybchenko {
77b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(enp))
78b97bf1caSAndrew Rybchenko }
79b97bf1caSAndrew Rybchenko 
80b97bf1caSAndrew Rybchenko 	__checkReturn	efx_rc_t
81b97bf1caSAndrew Rybchenko rhead_ev_qcreate(
82b97bf1caSAndrew Rybchenko 	__in		efx_nic_t *enp,
83b97bf1caSAndrew Rybchenko 	__in		unsigned int index,
84b97bf1caSAndrew Rybchenko 	__in		efsys_mem_t *esmp,
85b97bf1caSAndrew Rybchenko 	__in		size_t ndescs,
86b97bf1caSAndrew Rybchenko 	__in		uint32_t id,
87b97bf1caSAndrew Rybchenko 	__in		uint32_t us,
88b97bf1caSAndrew Rybchenko 	__in		uint32_t flags,
89b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep)
90b97bf1caSAndrew Rybchenko {
91f8a60f76SAndy Moreton 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
92f8a60f76SAndy Moreton 	size_t desc_size;
93b97bf1caSAndrew Rybchenko 	uint32_t irq;
94b97bf1caSAndrew Rybchenko 	efx_rc_t rc;
95b97bf1caSAndrew Rybchenko 
96b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
97b97bf1caSAndrew Rybchenko 
98f8a60f76SAndy Moreton 	desc_size = encp->enc_ev_desc_size;
99f8a60f76SAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH
100f8a60f76SAndy Moreton 	if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
101f8a60f76SAndy Moreton 		desc_size = encp->enc_ev_ew_desc_size;
102f8a60f76SAndy Moreton #endif
103f8a60f76SAndy Moreton 	EFSYS_ASSERT(desc_size != 0);
104f8a60f76SAndy Moreton 
105f8a60f76SAndy Moreton 	if (EFSYS_MEM_SIZE(esmp) < (ndescs * desc_size)) {
106f8a60f76SAndy Moreton 		/* Buffer too small for event queue descriptors */
107f8a60f76SAndy Moreton 		rc = EINVAL;
108f8a60f76SAndy Moreton 		goto fail1;
109f8a60f76SAndy Moreton 	}
110f8a60f76SAndy Moreton 
111b97bf1caSAndrew Rybchenko 	/* Set up the handler table */
11217580779SAndrew Rybchenko 	eep->ee_rx	= rhead_ev_rx_packets;
11396a8519dSAndrew Rybchenko 	eep->ee_tx	= rhead_ev_tx_completion;
114b97bf1caSAndrew Rybchenko 	eep->ee_driver	= NULL; /* FIXME */
115b97bf1caSAndrew Rybchenko 	eep->ee_drv_gen	= NULL; /* FIXME */
1169edb8ee3SAndrew Rybchenko 	eep->ee_mcdi	= rhead_ev_mcdi;
117b97bf1caSAndrew Rybchenko 
118b97bf1caSAndrew Rybchenko 	/* Set up the event queue */
119b97bf1caSAndrew Rybchenko 	/* INIT_EVQ expects function-relative vector number */
120b97bf1caSAndrew Rybchenko 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
121b97bf1caSAndrew Rybchenko 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
122b97bf1caSAndrew Rybchenko 		irq = index;
123b97bf1caSAndrew Rybchenko 	} else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
124b97bf1caSAndrew Rybchenko 		irq = index;
125b97bf1caSAndrew Rybchenko 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
126b97bf1caSAndrew Rybchenko 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
127b97bf1caSAndrew Rybchenko 	} else {
128b97bf1caSAndrew Rybchenko 		irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
129b97bf1caSAndrew Rybchenko 	}
130b97bf1caSAndrew Rybchenko 
131b97bf1caSAndrew Rybchenko 	/*
132b97bf1caSAndrew Rybchenko 	 * Interrupts may be raised for events immediately after the queue is
133b97bf1caSAndrew Rybchenko 	 * created. See bug58606.
134b97bf1caSAndrew Rybchenko 	 */
135b97bf1caSAndrew Rybchenko 	rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
136b97bf1caSAndrew Rybchenko 	    B_FALSE);
137b97bf1caSAndrew Rybchenko 	if (rc != 0)
138f8a60f76SAndy Moreton 		goto fail2;
139b97bf1caSAndrew Rybchenko 
140b97bf1caSAndrew Rybchenko 	return (0);
141b97bf1caSAndrew Rybchenko 
142f8a60f76SAndy Moreton fail2:
143f8a60f76SAndy Moreton 	EFSYS_PROBE(fail2);
144b97bf1caSAndrew Rybchenko fail1:
145b97bf1caSAndrew Rybchenko 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
146b97bf1caSAndrew Rybchenko 
147b97bf1caSAndrew Rybchenko 	return (rc);
148b97bf1caSAndrew Rybchenko }
149b97bf1caSAndrew Rybchenko 
150b97bf1caSAndrew Rybchenko 			void
151b97bf1caSAndrew Rybchenko rhead_ev_qdestroy(
152b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep)
153b97bf1caSAndrew Rybchenko {
154b97bf1caSAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
155b97bf1caSAndrew Rybchenko 
156b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
157b97bf1caSAndrew Rybchenko 
158b97bf1caSAndrew Rybchenko 	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
159b97bf1caSAndrew Rybchenko }
160b97bf1caSAndrew Rybchenko 
161b97bf1caSAndrew Rybchenko 	__checkReturn	efx_rc_t
162b97bf1caSAndrew Rybchenko rhead_ev_qprime(
163b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep,
164b97bf1caSAndrew Rybchenko 	__in		unsigned int count)
165b97bf1caSAndrew Rybchenko {
166b97bf1caSAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
167b97bf1caSAndrew Rybchenko 	uint32_t rptr;
168b97bf1caSAndrew Rybchenko 	efx_dword_t dword;
169b97bf1caSAndrew Rybchenko 
170b97bf1caSAndrew Rybchenko 	rptr = count & eep->ee_mask;
171b97bf1caSAndrew Rybchenko 
172b97bf1caSAndrew Rybchenko 	EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
173b97bf1caSAndrew Rybchenko 	    ERF_GZ_IDX, rptr);
174b97bf1caSAndrew Rybchenko 	/* EVQ_INT_PRIME lives function control window only on Riverhead */
175341bd4e0SIgor Romanov 	EFX_BAR_FCW_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword);
176b97bf1caSAndrew Rybchenko 
177b97bf1caSAndrew Rybchenko 	return (0);
178b97bf1caSAndrew Rybchenko }
179b97bf1caSAndrew Rybchenko 
180b97bf1caSAndrew Rybchenko 			void
181b97bf1caSAndrew Rybchenko rhead_ev_qpost(
182b97bf1caSAndrew Rybchenko 	__in	efx_evq_t *eep,
183b97bf1caSAndrew Rybchenko 	__in	uint16_t data)
184b97bf1caSAndrew Rybchenko {
185b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(eep, data))
186b97bf1caSAndrew Rybchenko 
187b97bf1caSAndrew Rybchenko 	/* Not implemented yet */
188b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT(B_FALSE);
189b97bf1caSAndrew Rybchenko }
190b97bf1caSAndrew Rybchenko 
191*e10a178aSAndy Moreton static	__checkReturn	boolean_t
192*e10a178aSAndy Moreton rhead_ev_dispatch(
193*e10a178aSAndy Moreton 	__in		efx_evq_t *eep,
194*e10a178aSAndy Moreton 	__in		efx_qword_t *eventp,
195*e10a178aSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
196*e10a178aSAndy Moreton 	__in_opt	void *arg)
197*e10a178aSAndy Moreton {
198*e10a178aSAndy Moreton 	boolean_t should_abort;
199*e10a178aSAndy Moreton 	uint32_t code;
200*e10a178aSAndy Moreton 
201*e10a178aSAndy Moreton 	code = EFX_QWORD_FIELD(*eventp, ESF_GZ_E_TYPE);
202*e10a178aSAndy Moreton 	switch (code) {
203*e10a178aSAndy Moreton 	case ESE_GZ_EF100_EV_RX_PKTS:
204*e10a178aSAndy Moreton 		should_abort = eep->ee_rx(eep, eventp, eecp, arg);
205*e10a178aSAndy Moreton 		break;
206*e10a178aSAndy Moreton 	case ESE_GZ_EF100_EV_TX_COMPLETION:
207*e10a178aSAndy Moreton 		should_abort = eep->ee_tx(eep, eventp, eecp, arg);
208*e10a178aSAndy Moreton 		break;
209*e10a178aSAndy Moreton 	case ESE_GZ_EF100_EV_MCDI:
210*e10a178aSAndy Moreton 		should_abort = eep->ee_mcdi(eep, eventp, eecp, arg);
211*e10a178aSAndy Moreton 		break;
212*e10a178aSAndy Moreton 	default:
213*e10a178aSAndy Moreton 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
214*e10a178aSAndy Moreton 		    uint32_t, EFX_QWORD_FIELD(*eventp, EFX_DWORD_1),
215*e10a178aSAndy Moreton 		    uint32_t, EFX_QWORD_FIELD(*eventp, EFX_DWORD_0));
216*e10a178aSAndy Moreton 
217*e10a178aSAndy Moreton 		EFSYS_ASSERT(eecp->eec_exception != NULL);
218*e10a178aSAndy Moreton 		(void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
219*e10a178aSAndy Moreton 		should_abort = B_TRUE;
220*e10a178aSAndy Moreton 		break;
221*e10a178aSAndy Moreton 	}
222*e10a178aSAndy Moreton 
223*e10a178aSAndy Moreton 	return (should_abort);
224*e10a178aSAndy Moreton }
225*e10a178aSAndy Moreton 
226b97bf1caSAndrew Rybchenko /*
227b97bf1caSAndrew Rybchenko  * Poll event queue in batches. Size of the batch is equal to cache line
228b97bf1caSAndrew Rybchenko  * size divided by event size.
229b97bf1caSAndrew Rybchenko  *
230b97bf1caSAndrew Rybchenko  * Event queue is written by NIC and read by CPU. If CPU starts reading
231b97bf1caSAndrew Rybchenko  * of events on the cache line, read all remaining events in a tight
232b97bf1caSAndrew Rybchenko  * loop while event is present.
233b97bf1caSAndrew Rybchenko  */
234b97bf1caSAndrew Rybchenko #define	EF100_EV_BATCH	8
235b97bf1caSAndrew Rybchenko 
236b97bf1caSAndrew Rybchenko /*
237b97bf1caSAndrew Rybchenko  * Check if event is present.
238b97bf1caSAndrew Rybchenko  *
239b97bf1caSAndrew Rybchenko  * Riverhead EvQs use a phase bit to indicate the presence of valid events,
240b97bf1caSAndrew Rybchenko  * by flipping the phase bit on each wrap of the write index.
241b97bf1caSAndrew Rybchenko  */
242b97bf1caSAndrew Rybchenko #define	EF100_EV_PRESENT(_qword, _phase_bit)				\
243b97bf1caSAndrew Rybchenko 	(EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
244b97bf1caSAndrew Rybchenko 
245b97bf1caSAndrew Rybchenko 			void
246b97bf1caSAndrew Rybchenko rhead_ev_qpoll(
247b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep,
248b97bf1caSAndrew Rybchenko 	__inout		unsigned int *countp,
249b97bf1caSAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
250b97bf1caSAndrew Rybchenko 	__in_opt	void *arg)
251b97bf1caSAndrew Rybchenko {
252b97bf1caSAndrew Rybchenko 	efx_qword_t ev[EF100_EV_BATCH];
253b97bf1caSAndrew Rybchenko 	unsigned int batch;
254b97bf1caSAndrew Rybchenko 	unsigned int phase_bit;
255b97bf1caSAndrew Rybchenko 	unsigned int total;
256b97bf1caSAndrew Rybchenko 	unsigned int count;
257b97bf1caSAndrew Rybchenko 	unsigned int index;
258b97bf1caSAndrew Rybchenko 	size_t offset;
259b97bf1caSAndrew Rybchenko 
2601a4448beSAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH
2611a4448beSAndy Moreton 	if (eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) {
2621a4448beSAndy Moreton 		rhead_ev_ew_qpoll(eep, countp, eecp, arg);
2631a4448beSAndy Moreton 		return;
2641a4448beSAndy Moreton 	}
2651a4448beSAndy Moreton #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
2661a4448beSAndy Moreton 
267b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
268b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT(countp != NULL);
269b97bf1caSAndrew Rybchenko 	EFSYS_ASSERT(eecp != NULL);
270b97bf1caSAndrew Rybchenko 
271b97bf1caSAndrew Rybchenko 	count = *countp;
272b97bf1caSAndrew Rybchenko 	do {
273b97bf1caSAndrew Rybchenko 		/* Read up until the end of the batch period */
274b97bf1caSAndrew Rybchenko 		batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
275b97bf1caSAndrew Rybchenko 		phase_bit = (count & (eep->ee_mask + 1)) != 0;
276b97bf1caSAndrew Rybchenko 		offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
277b97bf1caSAndrew Rybchenko 		for (total = 0; total < batch; ++total) {
278b97bf1caSAndrew Rybchenko 			EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
279b97bf1caSAndrew Rybchenko 
280b97bf1caSAndrew Rybchenko 			if (!EF100_EV_PRESENT(ev[total], phase_bit))
281b97bf1caSAndrew Rybchenko 				break;
282b97bf1caSAndrew Rybchenko 
283b97bf1caSAndrew Rybchenko 			EFSYS_PROBE3(event, unsigned int, eep->ee_index,
284b97bf1caSAndrew Rybchenko 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
285b97bf1caSAndrew Rybchenko 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
286b97bf1caSAndrew Rybchenko 
287b97bf1caSAndrew Rybchenko 			offset += sizeof (efx_qword_t);
288b97bf1caSAndrew Rybchenko 		}
289b97bf1caSAndrew Rybchenko 
290b97bf1caSAndrew Rybchenko 		/* Process the batch of events */
291b97bf1caSAndrew Rybchenko 		for (index = 0; index < total; ++index) {
292b97bf1caSAndrew Rybchenko 			boolean_t should_abort;
293b97bf1caSAndrew Rybchenko 
294b97bf1caSAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
295b97bf1caSAndrew Rybchenko 
296*e10a178aSAndy Moreton 			should_abort =
297*e10a178aSAndy Moreton 			    rhead_ev_dispatch(eep, &(ev[index]), eecp, arg);
298b97bf1caSAndrew Rybchenko 
299b97bf1caSAndrew Rybchenko 			if (should_abort) {
300b97bf1caSAndrew Rybchenko 				/* Ignore subsequent events */
301b97bf1caSAndrew Rybchenko 				total = index + 1;
302b97bf1caSAndrew Rybchenko 
303b97bf1caSAndrew Rybchenko 				/*
304b97bf1caSAndrew Rybchenko 				 * Poison batch to ensure the outer
305b97bf1caSAndrew Rybchenko 				 * loop is broken out of.
306b97bf1caSAndrew Rybchenko 				 */
307b97bf1caSAndrew Rybchenko 				EFSYS_ASSERT(batch <= EF100_EV_BATCH);
308b97bf1caSAndrew Rybchenko 				batch += (EF100_EV_BATCH << 1);
309b97bf1caSAndrew Rybchenko 				EFSYS_ASSERT(total != batch);
310b97bf1caSAndrew Rybchenko 				break;
311b97bf1caSAndrew Rybchenko 			}
312b97bf1caSAndrew Rybchenko 		}
313b97bf1caSAndrew Rybchenko 
314b97bf1caSAndrew Rybchenko 		/*
315b97bf1caSAndrew Rybchenko 		 * There is no necessity to clear processed events since
316b97bf1caSAndrew Rybchenko 		 * phase bit which is flipping on each write index wrap
317b97bf1caSAndrew Rybchenko 		 * is used for event presence indication.
318b97bf1caSAndrew Rybchenko 		 */
319b97bf1caSAndrew Rybchenko 
320b97bf1caSAndrew Rybchenko 		count += total;
321b97bf1caSAndrew Rybchenko 
322b97bf1caSAndrew Rybchenko 	} while (total == batch);
323b97bf1caSAndrew Rybchenko 
324b97bf1caSAndrew Rybchenko 	*countp = count;
325b97bf1caSAndrew Rybchenko }
326b97bf1caSAndrew Rybchenko 
3271a4448beSAndy Moreton #if EFSYS_OPT_EV_EXTENDED_WIDTH
3281a4448beSAndy Moreton static			boolean_t
3291a4448beSAndy Moreton rhead_ev_ew_dispatch(
3301a4448beSAndy Moreton 	__in		efx_evq_t *eep,
3311a4448beSAndy Moreton 	__in		efx_xword_t *eventp,
3321a4448beSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
3331a4448beSAndy Moreton 	__in_opt	void *arg)
3341a4448beSAndy Moreton {
3351a4448beSAndy Moreton 	boolean_t should_abort;
3361a4448beSAndy Moreton 	uint32_t code;
3371a4448beSAndy Moreton 
3381a4448beSAndy Moreton 	EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
3391a4448beSAndy Moreton 
3401a4448beSAndy Moreton 	code = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_256_EV32_TYPE);
3411a4448beSAndy Moreton 	switch (code) {
342*e10a178aSAndy Moreton 	case ESE_GZ_EF100_EVEW_64BIT:
343*e10a178aSAndy Moreton 		/* NOTE: ignore phase bit in encapsulated 64bit event. */
344*e10a178aSAndy Moreton 		should_abort =
345*e10a178aSAndy Moreton 		    rhead_ev_dispatch(eep, &eventp->ex_qword[0], eecp, arg);
346*e10a178aSAndy Moreton 		break;
347*e10a178aSAndy Moreton 
3481a4448beSAndy Moreton 	default:
3491a4448beSAndy Moreton 		/* Omit currently unused reserved bits from the probe. */
3501a4448beSAndy Moreton 		EFSYS_PROBE7(ew_bad_event, unsigned int, eep->ee_index,
3511a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_7),
3521a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_4),
3531a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_3),
3541a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_2),
3551a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_1),
3561a4448beSAndy Moreton 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_0));
3571a4448beSAndy Moreton 
3581a4448beSAndy Moreton 		EFSYS_ASSERT(eecp->eec_exception != NULL);
3591a4448beSAndy Moreton 		(void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
3601a4448beSAndy Moreton 		should_abort = B_TRUE;
3611a4448beSAndy Moreton 	}
3621a4448beSAndy Moreton 
3631a4448beSAndy Moreton 	return (should_abort);
3641a4448beSAndy Moreton }
3651a4448beSAndy Moreton 
3661a4448beSAndy Moreton /*
3671a4448beSAndy Moreton  * Poll extended width event queue. Size of the batch is equal to cache line
3681a4448beSAndy Moreton  * size divided by event size.
3691a4448beSAndy Moreton  */
3701a4448beSAndy Moreton #define	EF100_EV_EW_BATCH	2
3711a4448beSAndy Moreton 
3721a4448beSAndy Moreton /*
3731a4448beSAndy Moreton  * Check if event is present.
3741a4448beSAndy Moreton  *
3751a4448beSAndy Moreton  * Riverhead EvQs use a phase bit to indicate the presence of valid events,
3761a4448beSAndy Moreton  * by flipping the phase bit on each wrap of the write index.
3771a4448beSAndy Moreton  */
3781a4448beSAndy Moreton #define	EF100_EV_EW_PRESENT(_xword, _phase_bit)				\
3791a4448beSAndy Moreton 	(EFX_XWORD_FIELD((_xword), ESF_GZ_EV_256_EV32_PHASE) == (_phase_bit))
3801a4448beSAndy Moreton 
3811a4448beSAndy Moreton static			void
3821a4448beSAndy Moreton rhead_ev_ew_qpoll(
3831a4448beSAndy Moreton 	__in		efx_evq_t *eep,
3841a4448beSAndy Moreton 	__inout		unsigned int *countp,
3851a4448beSAndy Moreton 	__in		const efx_ev_callbacks_t *eecp,
3861a4448beSAndy Moreton 	__in_opt	void *arg)
3871a4448beSAndy Moreton {
3881a4448beSAndy Moreton 	efx_xword_t ev[EF100_EV_EW_BATCH];
3891a4448beSAndy Moreton 	unsigned int batch;
3901a4448beSAndy Moreton 	unsigned int phase_bit;
3911a4448beSAndy Moreton 	unsigned int total;
3921a4448beSAndy Moreton 	unsigned int count;
3931a4448beSAndy Moreton 	unsigned int index;
3941a4448beSAndy Moreton 	size_t offset;
3951a4448beSAndy Moreton 
3961a4448beSAndy Moreton 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
3971a4448beSAndy Moreton 	EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
3981a4448beSAndy Moreton 	EFSYS_ASSERT(countp != NULL);
3991a4448beSAndy Moreton 	EFSYS_ASSERT(eecp != NULL);
4001a4448beSAndy Moreton 
4011a4448beSAndy Moreton 	count = *countp;
4021a4448beSAndy Moreton 	do {
4031a4448beSAndy Moreton 		/* Read up until the end of the batch period */
4041a4448beSAndy Moreton 		batch = EF100_EV_EW_BATCH - (count & (EF100_EV_EW_BATCH - 1));
4051a4448beSAndy Moreton 		phase_bit = (count & (eep->ee_mask + 1)) != 0;
4061a4448beSAndy Moreton 		offset = (count & eep->ee_mask) * sizeof (efx_xword_t);
4071a4448beSAndy Moreton 		for (total = 0; total < batch; ++total) {
4081a4448beSAndy Moreton 			EFSYS_MEM_READX(eep->ee_esmp, offset, &(ev[total]));
4091a4448beSAndy Moreton 
4101a4448beSAndy Moreton 			if (!EF100_EV_EW_PRESENT(ev[total], phase_bit))
4111a4448beSAndy Moreton 				break;
4121a4448beSAndy Moreton 
4131a4448beSAndy Moreton 			/* Omit unused reserved bits from the probe. */
4141a4448beSAndy Moreton 			EFSYS_PROBE7(ew_event, unsigned int, eep->ee_index,
4151a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_7),
4161a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_4),
4171a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_3),
4181a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_2),
4191a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_1),
4201a4448beSAndy Moreton 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_0));
4211a4448beSAndy Moreton 
4221a4448beSAndy Moreton 			offset += sizeof (efx_xword_t);
4231a4448beSAndy Moreton 		}
4241a4448beSAndy Moreton 
4251a4448beSAndy Moreton 		/* Process the batch of events */
4261a4448beSAndy Moreton 		for (index = 0; index < total; ++index) {
4271a4448beSAndy Moreton 			boolean_t should_abort;
4281a4448beSAndy Moreton 
4291a4448beSAndy Moreton 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
4301a4448beSAndy Moreton 
4311a4448beSAndy Moreton 			should_abort =
4321a4448beSAndy Moreton 			    rhead_ev_ew_dispatch(eep, &(ev[index]), eecp, arg);
4331a4448beSAndy Moreton 
4341a4448beSAndy Moreton 			if (should_abort) {
4351a4448beSAndy Moreton 				/* Ignore subsequent events */
4361a4448beSAndy Moreton 				total = index + 1;
4371a4448beSAndy Moreton 
4381a4448beSAndy Moreton 				/*
4391a4448beSAndy Moreton 				 * Poison batch to ensure the outer
4401a4448beSAndy Moreton 				 * loop is broken out of.
4411a4448beSAndy Moreton 				 */
4421a4448beSAndy Moreton 				EFSYS_ASSERT(batch <= EF100_EV_EW_BATCH);
4431a4448beSAndy Moreton 				batch += (EF100_EV_EW_BATCH << 1);
4441a4448beSAndy Moreton 				EFSYS_ASSERT(total != batch);
4451a4448beSAndy Moreton 				break;
4461a4448beSAndy Moreton 			}
4471a4448beSAndy Moreton 		}
4481a4448beSAndy Moreton 
4491a4448beSAndy Moreton 		/*
4501a4448beSAndy Moreton 		 * There is no necessity to clear processed events since
4511a4448beSAndy Moreton 		 * phase bit which is flipping on each write index wrap
4521a4448beSAndy Moreton 		 * is used for event presence indication.
4531a4448beSAndy Moreton 		 */
4541a4448beSAndy Moreton 
4551a4448beSAndy Moreton 		count += total;
4561a4448beSAndy Moreton 
4571a4448beSAndy Moreton 	} while (total == batch);
4581a4448beSAndy Moreton 
4591a4448beSAndy Moreton 	*countp = count;
4601a4448beSAndy Moreton }
4611a4448beSAndy Moreton #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
4621a4448beSAndy Moreton 
4631a4448beSAndy Moreton 
464b97bf1caSAndrew Rybchenko 	__checkReturn	efx_rc_t
465b97bf1caSAndrew Rybchenko rhead_ev_qmoderate(
466b97bf1caSAndrew Rybchenko 	__in		efx_evq_t *eep,
467b97bf1caSAndrew Rybchenko 	__in		unsigned int us)
468b97bf1caSAndrew Rybchenko {
469b97bf1caSAndrew Rybchenko 	_NOTE(ARGUNUSED(eep, us))
470b97bf1caSAndrew Rybchenko 
471b97bf1caSAndrew Rybchenko 	return (ENOTSUP);
472b97bf1caSAndrew Rybchenko }
473b97bf1caSAndrew Rybchenko 
474b97bf1caSAndrew Rybchenko 
475b97bf1caSAndrew Rybchenko #if EFSYS_OPT_QSTATS
476b97bf1caSAndrew Rybchenko 			void
477b97bf1caSAndrew Rybchenko rhead_ev_qstats_update(
478b97bf1caSAndrew Rybchenko 	__in				efx_evq_t *eep,
479b97bf1caSAndrew Rybchenko 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
480b97bf1caSAndrew Rybchenko {
481b97bf1caSAndrew Rybchenko 	unsigned int id;
482b97bf1caSAndrew Rybchenko 
483b97bf1caSAndrew Rybchenko 	for (id = 0; id < EV_NQSTATS; id++) {
484b97bf1caSAndrew Rybchenko 		efsys_stat_t *essp = &stat[id];
485b97bf1caSAndrew Rybchenko 
486b97bf1caSAndrew Rybchenko 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
487b97bf1caSAndrew Rybchenko 		eep->ee_stat[id] = 0;
488b97bf1caSAndrew Rybchenko 	}
489b97bf1caSAndrew Rybchenko }
490b97bf1caSAndrew Rybchenko #endif /* EFSYS_OPT_QSTATS */
491b97bf1caSAndrew Rybchenko 
4929edb8ee3SAndrew Rybchenko static	__checkReturn	boolean_t
49317580779SAndrew Rybchenko rhead_ev_rx_packets(
49417580779SAndrew Rybchenko 	__in		efx_evq_t *eep,
49517580779SAndrew Rybchenko 	__in		efx_qword_t *eqp,
49617580779SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
49717580779SAndrew Rybchenko 	__in_opt	void *arg)
49817580779SAndrew Rybchenko {
49917580779SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
50017580779SAndrew Rybchenko 	uint32_t label;
50117580779SAndrew Rybchenko 	uint32_t num_packets;
50217580779SAndrew Rybchenko 	boolean_t should_abort;
50317580779SAndrew Rybchenko 
50417580779SAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_RX);
50517580779SAndrew Rybchenko 
50617580779SAndrew Rybchenko 	/* Discard events after RXQ/TXQ errors, or hardware not available */
50717580779SAndrew Rybchenko 	if (enp->en_reset_flags &
50817580779SAndrew Rybchenko 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
50917580779SAndrew Rybchenko 		return (B_FALSE);
51017580779SAndrew Rybchenko 
51117580779SAndrew Rybchenko 	label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_Q_LABEL);
51217580779SAndrew Rybchenko 
51317580779SAndrew Rybchenko 	/*
51417580779SAndrew Rybchenko 	 * On EF100 the EV_RX event reports the number of received
51517580779SAndrew Rybchenko 	 * packets (unlike EF10 which reports a descriptor index).
51617580779SAndrew Rybchenko 	 * The client driver is responsible for maintaining the Rx
51717580779SAndrew Rybchenko 	 * descriptor index, and computing how many descriptors are
51817580779SAndrew Rybchenko 	 * occupied by each received packet (based on the Rx buffer size
51917580779SAndrew Rybchenko 	 * and the packet length from the Rx prefix).
52017580779SAndrew Rybchenko 	 */
52117580779SAndrew Rybchenko 	num_packets = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_NUM_PKT);
52217580779SAndrew Rybchenko 
52317580779SAndrew Rybchenko 	/*
52417580779SAndrew Rybchenko 	 * The receive event may indicate more than one packet, and so
52517580779SAndrew Rybchenko 	 * does not contain the packet length. Read the packet length
52617580779SAndrew Rybchenko 	 * from the prefix when handling each packet.
52717580779SAndrew Rybchenko 	 */
52817580779SAndrew Rybchenko 	EFSYS_ASSERT(eecp->eec_rx_packets != NULL);
52917580779SAndrew Rybchenko 	should_abort = eecp->eec_rx_packets(arg, label, num_packets,
53017580779SAndrew Rybchenko 	    EFX_PKT_PREFIX_LEN);
53117580779SAndrew Rybchenko 
53217580779SAndrew Rybchenko 	return (should_abort);
53317580779SAndrew Rybchenko }
53417580779SAndrew Rybchenko 
53517580779SAndrew Rybchenko static	__checkReturn	boolean_t
53696a8519dSAndrew Rybchenko rhead_ev_tx_completion(
53796a8519dSAndrew Rybchenko 	__in		efx_evq_t *eep,
53896a8519dSAndrew Rybchenko 	__in		efx_qword_t *eqp,
53996a8519dSAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
54096a8519dSAndrew Rybchenko 	__in_opt	void *arg)
54196a8519dSAndrew Rybchenko {
54296a8519dSAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
54396a8519dSAndrew Rybchenko 	uint32_t num_descs;
54496a8519dSAndrew Rybchenko 	uint32_t label;
54596a8519dSAndrew Rybchenko 	boolean_t should_abort;
54696a8519dSAndrew Rybchenko 
54796a8519dSAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_TX);
54896a8519dSAndrew Rybchenko 
54996a8519dSAndrew Rybchenko 	/* Discard events after RXQ/TXQ errors, or hardware not available */
55096a8519dSAndrew Rybchenko 	if (enp->en_reset_flags &
55196a8519dSAndrew Rybchenko 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
55296a8519dSAndrew Rybchenko 		return (B_FALSE);
55396a8519dSAndrew Rybchenko 
55496a8519dSAndrew Rybchenko 	label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_Q_LABEL);
55596a8519dSAndrew Rybchenko 
55696a8519dSAndrew Rybchenko 	/*
55796a8519dSAndrew Rybchenko 	 * On EF100 the EV_TX event reports the number of completed Tx
55896a8519dSAndrew Rybchenko 	 * descriptors (on EF10, the event reports the low bits of the
55996a8519dSAndrew Rybchenko 	 * index of the last completed descriptor).
56096a8519dSAndrew Rybchenko 	 * The client driver completion callback will compute the
56196a8519dSAndrew Rybchenko 	 * descriptor index, so that is not needed here.
56296a8519dSAndrew Rybchenko 	 */
56396a8519dSAndrew Rybchenko 	num_descs = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_NUM_DESC);
56496a8519dSAndrew Rybchenko 
56596a8519dSAndrew Rybchenko 	EFSYS_PROBE2(tx_ndescs, uint32_t, label, unsigned int, num_descs);
56696a8519dSAndrew Rybchenko 
56796a8519dSAndrew Rybchenko 	EFSYS_ASSERT(eecp->eec_tx_ndescs != NULL);
56896a8519dSAndrew Rybchenko 	should_abort = eecp->eec_tx_ndescs(arg, label, num_descs);
56996a8519dSAndrew Rybchenko 
57096a8519dSAndrew Rybchenko 	return (should_abort);
57196a8519dSAndrew Rybchenko }
57296a8519dSAndrew Rybchenko 
57396a8519dSAndrew Rybchenko static	__checkReturn	boolean_t
5749edb8ee3SAndrew Rybchenko rhead_ev_mcdi(
5759edb8ee3SAndrew Rybchenko 	__in		efx_evq_t *eep,
5769edb8ee3SAndrew Rybchenko 	__in		efx_qword_t *eqp,
5779edb8ee3SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
5789edb8ee3SAndrew Rybchenko 	__in_opt	void *arg)
5799edb8ee3SAndrew Rybchenko {
5809edb8ee3SAndrew Rybchenko 	boolean_t ret;
5819edb8ee3SAndrew Rybchenko 
5829edb8ee3SAndrew Rybchenko 	/*
5839edb8ee3SAndrew Rybchenko 	 * Event format was changed post Riverhead R1 and now
5849edb8ee3SAndrew Rybchenko 	 * MCDI event layout on EF100 is exactly the same as on EF10
5859edb8ee3SAndrew Rybchenko 	 * except added QDMA phase bit which is unused on EF10.
5869edb8ee3SAndrew Rybchenko 	 */
5879edb8ee3SAndrew Rybchenko 	ret = ef10_ev_mcdi(eep, eqp, eecp, arg);
5889edb8ee3SAndrew Rybchenko 
5899edb8ee3SAndrew Rybchenko 	return (ret);
5909edb8ee3SAndrew Rybchenko }
5919edb8ee3SAndrew Rybchenko 
592b97bf1caSAndrew Rybchenko #endif	/* EFSYS_OPT_RIVERHEAD */
593