xref: /dpdk/drivers/common/sfc_efx/base/rhead_ev.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2018-2019 Solarflare Communications Inc.
5  */
6 
7 #include "efx.h"
8 #include "efx_impl.h"
9 
10 #if EFSYS_OPT_RIVERHEAD
11 
12 /*
13  * Non-interrupting event queue requires interrupting event queue to
14  * refer to for wake-up events even if wake ups are never used.
15  * It could be even non-allocated event queue.
16  */
17 #define	EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
18 
19 static			boolean_t
20 rhead_ev_dispatch(
21 	__in		efx_evq_t *eep,
22 	__in		efx_qword_t *eventp,
23 	__in		const efx_ev_callbacks_t *eecp,
24 	__in_opt	void *arg);
25 
26 static	__checkReturn	boolean_t
27 rhead_ev_rx_packets(
28 	__in		efx_evq_t *eep,
29 	__in		efx_qword_t *eqp,
30 	__in		const efx_ev_callbacks_t *eecp,
31 	__in_opt	void *arg);
32 
33 static	__checkReturn	boolean_t
34 rhead_ev_tx_completion(
35 	__in		efx_evq_t *eep,
36 	__in		efx_qword_t *eqp,
37 	__in		const efx_ev_callbacks_t *eecp,
38 	__in_opt	void *arg);
39 
40 static	__checkReturn	boolean_t
41 rhead_ev_mcdi(
42 	__in		efx_evq_t *eep,
43 	__in		efx_qword_t *eqp,
44 	__in		const efx_ev_callbacks_t *eecp,
45 	__in_opt	void *arg);
46 
47 #if EFSYS_OPT_EV_EXTENDED_WIDTH
48 static			boolean_t
49 rhead_ev_ew_dispatch(
50 	__in		efx_evq_t *eep,
51 	__in		efx_xword_t *eventp,
52 	__in		const efx_ev_callbacks_t *eecp,
53 	__in_opt	void *arg);
54 
55 static			void
56 rhead_ev_ew_qpoll(
57 	__in		efx_evq_t *eep,
58 	__inout		unsigned int *countp,
59 	__in		const efx_ev_callbacks_t *eecp,
60 	__in_opt	void *arg);
61 
62 #if EFSYS_OPT_DESC_PROXY
63 static			boolean_t
64 rhead_ev_ew_txq_desc(
65 	__in		efx_evq_t *eep,
66 	__in		efx_xword_t *eventp,
67 	__in		const efx_ev_callbacks_t *eecp,
68 	__in_opt	void *arg);
69 
70 static			boolean_t
71 rhead_ev_ew_virtq_desc(
72 	__in		efx_evq_t *eep,
73 	__in		efx_xword_t *eventp,
74 	__in		const efx_ev_callbacks_t *eecp,
75 	__in_opt	void *arg);
76 #endif /* EFSYS_OPT_DESC_PROXY */
77 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
78 
79 
80 	__checkReturn	efx_rc_t
81 rhead_ev_init(
82 	__in		efx_nic_t *enp)
83 {
84 	_NOTE(ARGUNUSED(enp))
85 
86 	return (0);
87 }
88 
89 			void
90 rhead_ev_fini(
91 	__in		efx_nic_t *enp)
92 {
93 	_NOTE(ARGUNUSED(enp))
94 }
95 
96 	__checkReturn	efx_rc_t
97 rhead_ev_qcreate(
98 	__in		efx_nic_t *enp,
99 	__in		unsigned int index,
100 	__in		efsys_mem_t *esmp,
101 	__in		size_t ndescs,
102 	__in		uint32_t id,
103 	__in		uint32_t us,
104 	__in		uint32_t flags,
105 	__in		efx_evq_t *eep)
106 {
107 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
108 	size_t desc_size;
109 	uint32_t irq;
110 	efx_rc_t rc;
111 
112 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
113 
114 	desc_size = encp->enc_ev_desc_size;
115 #if EFSYS_OPT_EV_EXTENDED_WIDTH
116 	if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
117 		desc_size = encp->enc_ev_ew_desc_size;
118 #endif
119 	EFSYS_ASSERT(desc_size != 0);
120 
121 	if (EFSYS_MEM_SIZE(esmp) < (ndescs * desc_size)) {
122 		/* Buffer too small for event queue descriptors */
123 		rc = EINVAL;
124 		goto fail1;
125 	}
126 
127 	/* Set up the handler table */
128 	eep->ee_rx	= rhead_ev_rx_packets;
129 	eep->ee_tx	= rhead_ev_tx_completion;
130 	eep->ee_driver	= NULL; /* FIXME */
131 	eep->ee_drv_gen	= NULL; /* FIXME */
132 	eep->ee_mcdi	= rhead_ev_mcdi;
133 
134 #if EFSYS_OPT_DESC_PROXY
135 	eep->ee_ew_txq_desc	= rhead_ev_ew_txq_desc;
136 	eep->ee_ew_virtq_desc	= rhead_ev_ew_virtq_desc;
137 #endif /* EFSYS_OPT_DESC_PROXY */
138 
139 	/* Set up the event queue */
140 	/* INIT_EVQ expects function-relative vector number */
141 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
142 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
143 		irq = index;
144 	} else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
145 		irq = index;
146 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
147 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
148 	} else {
149 		irq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
150 	}
151 
152 	/*
153 	 * Interrupts may be raised for events immediately after the queue is
154 	 * created. See bug58606.
155 	 */
156 	rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
157 	    B_FALSE);
158 	if (rc != 0)
159 		goto fail2;
160 
161 	return (0);
162 
163 fail2:
164 	EFSYS_PROBE(fail2);
165 fail1:
166 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
167 
168 	return (rc);
169 }
170 
171 			void
172 rhead_ev_qdestroy(
173 	__in		efx_evq_t *eep)
174 {
175 	efx_nic_t *enp = eep->ee_enp;
176 
177 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
178 
179 	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
180 }
181 
182 	__checkReturn	efx_rc_t
183 rhead_ev_qprime(
184 	__in		efx_evq_t *eep,
185 	__in		unsigned int count)
186 {
187 	efx_nic_t *enp = eep->ee_enp;
188 	uint32_t rptr;
189 	efx_dword_t dword;
190 
191 	rptr = count & eep->ee_mask;
192 
193 	EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
194 	    ERF_GZ_IDX, rptr);
195 	/* EVQ_INT_PRIME lives function control window only on Riverhead */
196 	EFX_BAR_FCW_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword);
197 
198 	return (0);
199 }
200 
201 			void
202 rhead_ev_qpost(
203 	__in	efx_evq_t *eep,
204 	__in	uint16_t data)
205 {
206 	_NOTE(ARGUNUSED(eep, data))
207 
208 	/* Not implemented yet */
209 	EFSYS_ASSERT(B_FALSE);
210 }
211 
212 static	__checkReturn	boolean_t
213 rhead_ev_dispatch(
214 	__in		efx_evq_t *eep,
215 	__in		efx_qword_t *eventp,
216 	__in		const efx_ev_callbacks_t *eecp,
217 	__in_opt	void *arg)
218 {
219 	boolean_t should_abort;
220 	uint32_t code;
221 
222 	code = EFX_QWORD_FIELD(*eventp, ESF_GZ_E_TYPE);
223 	switch (code) {
224 	case ESE_GZ_EF100_EV_RX_PKTS:
225 		should_abort = eep->ee_rx(eep, eventp, eecp, arg);
226 		break;
227 	case ESE_GZ_EF100_EV_TX_COMPLETION:
228 		should_abort = eep->ee_tx(eep, eventp, eecp, arg);
229 		break;
230 	case ESE_GZ_EF100_EV_MCDI:
231 		should_abort = eep->ee_mcdi(eep, eventp, eecp, arg);
232 		break;
233 	default:
234 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
235 		    uint32_t, EFX_QWORD_FIELD(*eventp, EFX_DWORD_1),
236 		    uint32_t, EFX_QWORD_FIELD(*eventp, EFX_DWORD_0));
237 
238 		EFSYS_ASSERT(eecp->eec_exception != NULL);
239 		(void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
240 		should_abort = B_TRUE;
241 		break;
242 	}
243 
244 	return (should_abort);
245 }
246 
247 /*
248  * Poll event queue in batches. Size of the batch is equal to cache line
249  * size divided by event size.
250  *
251  * Event queue is written by NIC and read by CPU. If CPU starts reading
252  * of events on the cache line, read all remaining events in a tight
253  * loop while event is present.
254  */
255 #define	EF100_EV_BATCH	8
256 
257 /*
258  * Check if event is present.
259  *
260  * Riverhead EvQs use a phase bit to indicate the presence of valid events,
261  * by flipping the phase bit on each wrap of the write index.
262  */
263 #define	EF100_EV_PRESENT(_qword, _phase_bit)				\
264 	(EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
265 
266 			void
267 rhead_ev_qpoll(
268 	__in		efx_evq_t *eep,
269 	__inout		unsigned int *countp,
270 	__in		const efx_ev_callbacks_t *eecp,
271 	__in_opt	void *arg)
272 {
273 	efx_qword_t ev[EF100_EV_BATCH];
274 	unsigned int batch;
275 	unsigned int phase_bit;
276 	unsigned int total;
277 	unsigned int count;
278 	unsigned int index;
279 	size_t offset;
280 
281 #if EFSYS_OPT_EV_EXTENDED_WIDTH
282 	if (eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) {
283 		rhead_ev_ew_qpoll(eep, countp, eecp, arg);
284 		return;
285 	}
286 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
287 
288 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
289 	EFSYS_ASSERT(countp != NULL);
290 	EFSYS_ASSERT(eecp != NULL);
291 
292 	count = *countp;
293 	do {
294 		/* Read up until the end of the batch period */
295 		batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
296 		phase_bit = (count & (eep->ee_mask + 1)) != 0;
297 		offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
298 		for (total = 0; total < batch; ++total) {
299 			EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
300 
301 			if (!EF100_EV_PRESENT(ev[total], phase_bit))
302 				break;
303 
304 			EFSYS_PROBE3(event, unsigned int, eep->ee_index,
305 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
306 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
307 
308 			offset += sizeof (efx_qword_t);
309 		}
310 
311 		/* Process the batch of events */
312 		for (index = 0; index < total; ++index) {
313 			boolean_t should_abort;
314 
315 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
316 
317 			should_abort =
318 			    rhead_ev_dispatch(eep, &(ev[index]), eecp, arg);
319 
320 			if (should_abort) {
321 				/* Ignore subsequent events */
322 				total = index + 1;
323 
324 				/*
325 				 * Poison batch to ensure the outer
326 				 * loop is broken out of.
327 				 */
328 				EFSYS_ASSERT(batch <= EF100_EV_BATCH);
329 				batch += (EF100_EV_BATCH << 1);
330 				EFSYS_ASSERT(total != batch);
331 				break;
332 			}
333 		}
334 
335 		/*
336 		 * There is no necessity to clear processed events since
337 		 * phase bit which is flipping on each write index wrap
338 		 * is used for event presence indication.
339 		 */
340 
341 		count += total;
342 
343 	} while (total == batch);
344 
345 	*countp = count;
346 }
347 
348 #if EFSYS_OPT_EV_EXTENDED_WIDTH
349 static			boolean_t
350 rhead_ev_ew_dispatch(
351 	__in		efx_evq_t *eep,
352 	__in		efx_xword_t *eventp,
353 	__in		const efx_ev_callbacks_t *eecp,
354 	__in_opt	void *arg)
355 {
356 	boolean_t should_abort;
357 	uint32_t code;
358 
359 	EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
360 
361 	code = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_256_EV32_TYPE);
362 	switch (code) {
363 	case ESE_GZ_EF100_EVEW_64BIT:
364 		/* NOTE: ignore phase bit in encapsulated 64bit event. */
365 		should_abort =
366 		    rhead_ev_dispatch(eep, &eventp->ex_qword[0], eecp, arg);
367 		break;
368 
369 #if EFSYS_OPT_DESC_PROXY
370 	case ESE_GZ_EF100_EVEW_TXQ_DESC:
371 		should_abort = eep->ee_ew_txq_desc(eep, eventp, eecp, arg);
372 		break;
373 
374 	case ESE_GZ_EF100_EVEW_VIRTQ_DESC:
375 		should_abort = eep->ee_ew_virtq_desc(eep, eventp, eecp, arg);
376 		break;
377 #endif /* EFSYS_OPT_DESC_PROXY */
378 
379 	default:
380 		/* Omit currently unused reserved bits from the probe. */
381 		EFSYS_PROBE7(ew_bad_event, unsigned int, eep->ee_index,
382 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_7),
383 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_4),
384 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_3),
385 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_2),
386 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_1),
387 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_0));
388 
389 		EFSYS_ASSERT(eecp->eec_exception != NULL);
390 		(void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
391 		should_abort = B_TRUE;
392 	}
393 
394 	return (should_abort);
395 }
396 
397 /*
398  * Poll extended width event queue. Size of the batch is equal to cache line
399  * size divided by event size.
400  */
401 #define	EF100_EV_EW_BATCH	2
402 
403 /*
404  * Check if event is present.
405  *
406  * Riverhead EvQs use a phase bit to indicate the presence of valid events,
407  * by flipping the phase bit on each wrap of the write index.
408  */
409 #define	EF100_EV_EW_PRESENT(_xword, _phase_bit)				\
410 	(EFX_XWORD_FIELD((_xword), ESF_GZ_EV_256_EV32_PHASE) == (_phase_bit))
411 
412 static			void
413 rhead_ev_ew_qpoll(
414 	__in		efx_evq_t *eep,
415 	__inout		unsigned int *countp,
416 	__in		const efx_ev_callbacks_t *eecp,
417 	__in_opt	void *arg)
418 {
419 	efx_xword_t ev[EF100_EV_EW_BATCH];
420 	unsigned int batch;
421 	unsigned int phase_bit;
422 	unsigned int total;
423 	unsigned int count;
424 	unsigned int index;
425 	size_t offset;
426 
427 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
428 	EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
429 	EFSYS_ASSERT(countp != NULL);
430 	EFSYS_ASSERT(eecp != NULL);
431 
432 	count = *countp;
433 	do {
434 		/* Read up until the end of the batch period */
435 		batch = EF100_EV_EW_BATCH - (count & (EF100_EV_EW_BATCH - 1));
436 		phase_bit = (count & (eep->ee_mask + 1)) != 0;
437 		offset = (count & eep->ee_mask) * sizeof (efx_xword_t);
438 		for (total = 0; total < batch; ++total) {
439 			EFSYS_MEM_READX(eep->ee_esmp, offset, &(ev[total]));
440 
441 			if (!EF100_EV_EW_PRESENT(ev[total], phase_bit))
442 				break;
443 
444 			/* Omit unused reserved bits from the probe. */
445 			EFSYS_PROBE7(ew_event, unsigned int, eep->ee_index,
446 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_7),
447 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_4),
448 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_3),
449 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_2),
450 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_1),
451 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_0));
452 
453 			offset += sizeof (efx_xword_t);
454 		}
455 
456 		/* Process the batch of events */
457 		for (index = 0; index < total; ++index) {
458 			boolean_t should_abort;
459 
460 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
461 
462 			should_abort =
463 			    rhead_ev_ew_dispatch(eep, &(ev[index]), eecp, arg);
464 
465 			if (should_abort) {
466 				/* Ignore subsequent events */
467 				total = index + 1;
468 
469 				/*
470 				 * Poison batch to ensure the outer
471 				 * loop is broken out of.
472 				 */
473 				EFSYS_ASSERT(batch <= EF100_EV_EW_BATCH);
474 				batch += (EF100_EV_EW_BATCH << 1);
475 				EFSYS_ASSERT(total != batch);
476 				break;
477 			}
478 		}
479 
480 		/*
481 		 * There is no necessity to clear processed events since
482 		 * phase bit which is flipping on each write index wrap
483 		 * is used for event presence indication.
484 		 */
485 
486 		count += total;
487 
488 	} while (total == batch);
489 
490 	*countp = count;
491 }
492 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
493 
494 
495 	__checkReturn	efx_rc_t
496 rhead_ev_qmoderate(
497 	__in		efx_evq_t *eep,
498 	__in		unsigned int us)
499 {
500 	_NOTE(ARGUNUSED(eep, us))
501 
502 	return (ENOTSUP);
503 }
504 
505 
506 #if EFSYS_OPT_QSTATS
507 			void
508 rhead_ev_qstats_update(
509 	__in				efx_evq_t *eep,
510 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
511 {
512 	unsigned int id;
513 
514 	for (id = 0; id < EV_NQSTATS; id++) {
515 		efsys_stat_t *essp = &stat[id];
516 
517 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
518 		eep->ee_stat[id] = 0;
519 	}
520 }
521 #endif /* EFSYS_OPT_QSTATS */
522 
523 static	__checkReturn	boolean_t
524 rhead_ev_rx_packets(
525 	__in		efx_evq_t *eep,
526 	__in		efx_qword_t *eqp,
527 	__in		const efx_ev_callbacks_t *eecp,
528 	__in_opt	void *arg)
529 {
530 	efx_nic_t *enp = eep->ee_enp;
531 	uint32_t label;
532 	uint32_t num_packets;
533 	boolean_t should_abort;
534 
535 	EFX_EV_QSTAT_INCR(eep, EV_RX);
536 
537 	/* Discard events after RXQ/TXQ errors, or hardware not available */
538 	if (enp->en_reset_flags &
539 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
540 		return (B_FALSE);
541 
542 	label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_Q_LABEL);
543 
544 	/*
545 	 * On EF100 the EV_RX event reports the number of received
546 	 * packets (unlike EF10 which reports a descriptor index).
547 	 * The client driver is responsible for maintaining the Rx
548 	 * descriptor index, and computing how many descriptors are
549 	 * occupied by each received packet (based on the Rx buffer size
550 	 * and the packet length from the Rx prefix).
551 	 */
552 	num_packets = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_NUM_PKT);
553 
554 	/*
555 	 * The receive event may indicate more than one packet, and so
556 	 * does not contain the packet length. Read the packet length
557 	 * from the prefix when handling each packet.
558 	 */
559 	EFSYS_ASSERT(eecp->eec_rx_packets != NULL);
560 	should_abort = eecp->eec_rx_packets(arg, label, num_packets,
561 	    EFX_PKT_PREFIX_LEN);
562 
563 	return (should_abort);
564 }
565 
566 static	__checkReturn	boolean_t
567 rhead_ev_tx_completion(
568 	__in		efx_evq_t *eep,
569 	__in		efx_qword_t *eqp,
570 	__in		const efx_ev_callbacks_t *eecp,
571 	__in_opt	void *arg)
572 {
573 	efx_nic_t *enp = eep->ee_enp;
574 	uint32_t num_descs;
575 	uint32_t label;
576 	boolean_t should_abort;
577 
578 	EFX_EV_QSTAT_INCR(eep, EV_TX);
579 
580 	/* Discard events after RXQ/TXQ errors, or hardware not available */
581 	if (enp->en_reset_flags &
582 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
583 		return (B_FALSE);
584 
585 	label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_Q_LABEL);
586 
587 	/*
588 	 * On EF100 the EV_TX event reports the number of completed Tx
589 	 * descriptors (on EF10, the event reports the low bits of the
590 	 * index of the last completed descriptor).
591 	 * The client driver completion callback will compute the
592 	 * descriptor index, so that is not needed here.
593 	 */
594 	num_descs = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_NUM_DESC);
595 
596 	EFSYS_PROBE2(tx_ndescs, uint32_t, label, unsigned int, num_descs);
597 
598 	EFSYS_ASSERT(eecp->eec_tx_ndescs != NULL);
599 	should_abort = eecp->eec_tx_ndescs(arg, label, num_descs);
600 
601 	return (should_abort);
602 }
603 
604 static	__checkReturn	boolean_t
605 rhead_ev_mcdi(
606 	__in		efx_evq_t *eep,
607 	__in		efx_qword_t *eqp,
608 	__in		const efx_ev_callbacks_t *eecp,
609 	__in_opt	void *arg)
610 {
611 	boolean_t ret;
612 
613 	/*
614 	 * Event format was changed post Riverhead R1 and now
615 	 * MCDI event layout on EF100 is exactly the same as on EF10
616 	 * except added QDMA phase bit which is unused on EF10.
617 	 */
618 	ret = ef10_ev_mcdi(eep, eqp, eecp, arg);
619 
620 	return (ret);
621 }
622 
623 #if EFSYS_OPT_DESC_PROXY
624 static			boolean_t
625 rhead_ev_ew_txq_desc(
626 	__in		efx_evq_t *eep,
627 	__in		efx_xword_t *eventp,
628 	__in		const efx_ev_callbacks_t *eecp,
629 	__in_opt	void *arg)
630 {
631 	efx_oword_t txq_desc;
632 	uint16_t vi_id;
633 	boolean_t should_abort;
634 
635 	_NOTE(ARGUNUSED(eep))
636 
637 	vi_id = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_TXQ_DP_VI_ID);
638 
639 	/*
640 	 * NOTE: This is the raw descriptor data, and has not been converted
641 	 * to host endian. The handler callback must use the EFX_OWORD macros
642 	 * to extract the descriptor fields as host endian values.
643 	 */
644 	txq_desc = eventp->ex_oword[0];
645 
646 	EFSYS_ASSERT(eecp->eec_desc_proxy_txq_desc != NULL);
647 	should_abort = eecp->eec_desc_proxy_txq_desc(arg, vi_id, txq_desc);
648 
649 	return (should_abort);
650 }
651 #endif /* EFSYS_OPT_DESC_PROXY */
652 
653 
654 #if EFSYS_OPT_DESC_PROXY
655 static			boolean_t
656 rhead_ev_ew_virtq_desc(
657 	__in		efx_evq_t *eep,
658 	__in		efx_xword_t *eventp,
659 	__in		const efx_ev_callbacks_t *eecp,
660 	__in_opt	void *arg)
661 {
662 	efx_oword_t virtq_desc;
663 	uint16_t vi_id;
664 	uint16_t avail;
665 	boolean_t should_abort;
666 
667 	_NOTE(ARGUNUSED(eep))
668 
669 	vi_id = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_VQ_DP_VI_ID);
670 	avail = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_VQ_DP_AVAIL_ENTRY);
671 
672 	/*
673 	 * NOTE: This is the raw descriptor data, and has not been converted
674 	 * to host endian. The handler callback must use the EFX_OWORD macros
675 	 * to extract the descriptor fields as host endian values.
676 	 */
677 	virtq_desc = eventp->ex_oword[0];
678 
679 	EFSYS_ASSERT(eecp->eec_desc_proxy_virtq_desc != NULL);
680 	should_abort =
681 	    eecp->eec_desc_proxy_virtq_desc(arg, vi_id, avail, virtq_desc);
682 
683 	return (should_abort);
684 }
685 #endif /* EFSYS_OPT_DESC_PROXY */
686 
687 #endif	/* EFSYS_OPT_RIVERHEAD */
688