xref: /dpdk/drivers/common/sfc_efx/base/rhead_ev.c (revision aa6dc1017cd3fa7b722ca877558e871f82eb1f02)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2021 Xilinx, Inc.
4  * Copyright(c) 2018-2019 Solarflare Communications Inc.
5  */
6 
7 #include "efx.h"
8 #include "efx_impl.h"
9 
10 #if EFSYS_OPT_RIVERHEAD
11 
12 /*
13  * Non-interrupting event queue requires interrupting event queue to
14  * refer to for wake-up events even if wake ups are never used.
15  * It could be even non-allocated event queue.
16  */
17 #define	EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
18 
19 static			boolean_t
20 rhead_ev_dispatch(
21 	__in		efx_evq_t *eep,
22 	__in		efx_qword_t *eventp,
23 	__in		const efx_ev_callbacks_t *eecp,
24 	__in_opt	void *arg);
25 
26 static	__checkReturn	boolean_t
27 rhead_ev_rx_packets(
28 	__in		efx_evq_t *eep,
29 	__in		efx_qword_t *eqp,
30 	__in		const efx_ev_callbacks_t *eecp,
31 	__in_opt	void *arg);
32 
33 static	__checkReturn	boolean_t
34 rhead_ev_tx_completion(
35 	__in		efx_evq_t *eep,
36 	__in		efx_qword_t *eqp,
37 	__in		const efx_ev_callbacks_t *eecp,
38 	__in_opt	void *arg);
39 
40 static	__checkReturn	boolean_t
41 rhead_ev_mcdi(
42 	__in		efx_evq_t *eep,
43 	__in		efx_qword_t *eqp,
44 	__in		const efx_ev_callbacks_t *eecp,
45 	__in_opt	void *arg);
46 
47 #if EFSYS_OPT_EV_EXTENDED_WIDTH
48 static			boolean_t
49 rhead_ev_ew_dispatch(
50 	__in		efx_evq_t *eep,
51 	__in		efx_xword_t *eventp,
52 	__in		const efx_ev_callbacks_t *eecp,
53 	__in_opt	void *arg);
54 
55 static			void
56 rhead_ev_ew_qpoll(
57 	__in		efx_evq_t *eep,
58 	__inout		unsigned int *countp,
59 	__in		const efx_ev_callbacks_t *eecp,
60 	__in_opt	void *arg);
61 
62 #if EFSYS_OPT_DESC_PROXY
63 static			boolean_t
64 rhead_ev_ew_txq_desc(
65 	__in		efx_evq_t *eep,
66 	__in		efx_xword_t *eventp,
67 	__in		const efx_ev_callbacks_t *eecp,
68 	__in_opt	void *arg);
69 
70 static			boolean_t
71 rhead_ev_ew_virtq_desc(
72 	__in		efx_evq_t *eep,
73 	__in		efx_xword_t *eventp,
74 	__in		const efx_ev_callbacks_t *eecp,
75 	__in_opt	void *arg);
76 #endif /* EFSYS_OPT_DESC_PROXY */
77 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
78 
79 
80 	__checkReturn	efx_rc_t
rhead_ev_init(__in efx_nic_t * enp)81 rhead_ev_init(
82 	__in		efx_nic_t *enp)
83 {
84 	_NOTE(ARGUNUSED(enp))
85 
86 	return (0);
87 }
88 
89 			void
rhead_ev_fini(__in efx_nic_t * enp)90 rhead_ev_fini(
91 	__in		efx_nic_t *enp)
92 {
93 	_NOTE(ARGUNUSED(enp))
94 }
95 
96 	__checkReturn	efx_rc_t
rhead_ev_qcreate(__in efx_nic_t * enp,__in unsigned int index,__in efsys_mem_t * esmp,__in size_t ndescs,__in uint32_t id,__in uint32_t us,__in uint32_t flags,__in uint32_t irq,__in efx_evq_t * eep)97 rhead_ev_qcreate(
98 	__in		efx_nic_t *enp,
99 	__in		unsigned int index,
100 	__in		efsys_mem_t *esmp,
101 	__in		size_t ndescs,
102 	__in		uint32_t id,
103 	__in		uint32_t us,
104 	__in		uint32_t flags,
105 	__in		uint32_t irq,
106 	__in		efx_evq_t *eep)
107 {
108 	const efx_nic_cfg_t *encp = efx_nic_cfg_get(enp);
109 	size_t desc_size;
110 	uint32_t target_evq = 0;
111 	efx_rc_t rc;
112 
113 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
114 
115 	desc_size = encp->enc_ev_desc_size;
116 #if EFSYS_OPT_EV_EXTENDED_WIDTH
117 	if (flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH)
118 		desc_size = encp->enc_ev_ew_desc_size;
119 #endif
120 	EFSYS_ASSERT(desc_size != 0);
121 
122 	if (EFSYS_MEM_SIZE(esmp) < (ndescs * desc_size)) {
123 		/* Buffer too small for event queue descriptors */
124 		rc = EINVAL;
125 		goto fail1;
126 	}
127 
128 	/* Set up the handler table */
129 	eep->ee_rx	= rhead_ev_rx_packets;
130 	eep->ee_tx	= rhead_ev_tx_completion;
131 	eep->ee_driver	= NULL; /* FIXME */
132 	eep->ee_drv_gen	= NULL; /* FIXME */
133 	eep->ee_mcdi	= rhead_ev_mcdi;
134 
135 #if EFSYS_OPT_DESC_PROXY
136 	eep->ee_ew_txq_desc	= rhead_ev_ew_txq_desc;
137 	eep->ee_ew_virtq_desc	= rhead_ev_ew_virtq_desc;
138 #endif /* EFSYS_OPT_DESC_PROXY */
139 
140 	/* Set up the event queue */
141 	/* INIT_EVQ expects function-relative vector number */
142 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
143 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
144 		/* IRQ number is specified by caller */
145 	} else if (index == EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX) {
146 		/* Use the first interrupt for always interrupting EvQ */
147 		irq = 0;
148 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
149 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
150 	} else {
151 		target_evq = EFX_RHEAD_ALWAYS_INTERRUPTING_EVQ_INDEX;
152 	}
153 
154 	/*
155 	 * Interrupts may be raised for events immediately after the queue is
156 	 * created. See bug58606.
157 	 */
158 	rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, target_evq, us,
159 	    flags, B_FALSE);
160 	if (rc != 0)
161 		goto fail2;
162 
163 	return (0);
164 
165 fail2:
166 	EFSYS_PROBE(fail2);
167 fail1:
168 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
169 
170 	return (rc);
171 }
172 
173 			void
rhead_ev_qdestroy(__in efx_evq_t * eep)174 rhead_ev_qdestroy(
175 	__in		efx_evq_t *eep)
176 {
177 	efx_nic_t *enp = eep->ee_enp;
178 
179 	EFSYS_ASSERT(enp->en_family == EFX_FAMILY_RIVERHEAD);
180 
181 	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
182 }
183 
184 	__checkReturn	efx_rc_t
rhead_ev_qprime(__in efx_evq_t * eep,__in unsigned int count)185 rhead_ev_qprime(
186 	__in		efx_evq_t *eep,
187 	__in		unsigned int count)
188 {
189 	efx_nic_t *enp = eep->ee_enp;
190 	uint32_t rptr;
191 	efx_dword_t dword;
192 
193 	rptr = count & eep->ee_mask;
194 
195 	EFX_POPULATE_DWORD_2(dword, ERF_GZ_EVQ_ID, eep->ee_index,
196 	    ERF_GZ_IDX, rptr);
197 	/* EVQ_INT_PRIME lives function control window only on Riverhead */
198 	EFX_BAR_FCW_WRITED(enp, ER_GZ_EVQ_INT_PRIME, &dword);
199 
200 	return (0);
201 }
202 
203 			void
rhead_ev_qpost(__in efx_evq_t * eep,__in uint16_t data)204 rhead_ev_qpost(
205 	__in	efx_evq_t *eep,
206 	__in	uint16_t data)
207 {
208 	_NOTE(ARGUNUSED(eep, data))
209 
210 	/* Not implemented yet */
211 	EFSYS_ASSERT(B_FALSE);
212 }
213 
214 static	__checkReturn	boolean_t
rhead_ev_dispatch(__in efx_evq_t * eep,__in efx_qword_t * eventp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)215 rhead_ev_dispatch(
216 	__in		efx_evq_t *eep,
217 	__in		efx_qword_t *eventp,
218 	__in		const efx_ev_callbacks_t *eecp,
219 	__in_opt	void *arg)
220 {
221 	boolean_t should_abort;
222 	uint32_t code;
223 
224 	code = EFX_QWORD_FIELD(*eventp, ESF_GZ_E_TYPE);
225 	switch (code) {
226 	case ESE_GZ_EF100_EV_RX_PKTS:
227 		should_abort = eep->ee_rx(eep, eventp, eecp, arg);
228 		break;
229 	case ESE_GZ_EF100_EV_TX_COMPLETION:
230 		should_abort = eep->ee_tx(eep, eventp, eecp, arg);
231 		break;
232 	case ESE_GZ_EF100_EV_MCDI:
233 		should_abort = eep->ee_mcdi(eep, eventp, eecp, arg);
234 		break;
235 	default:
236 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
237 		    uint32_t, EFX_QWORD_FIELD(*eventp, EFX_DWORD_1),
238 		    uint32_t, EFX_QWORD_FIELD(*eventp, EFX_DWORD_0));
239 
240 		EFSYS_ASSERT(eecp->eec_exception != NULL);
241 		(void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
242 		should_abort = B_TRUE;
243 		break;
244 	}
245 
246 	return (should_abort);
247 }
248 
249 /*
250  * Poll event queue in batches. Size of the batch is equal to cache line
251  * size divided by event size.
252  *
253  * Event queue is written by NIC and read by CPU. If CPU starts reading
254  * of events on the cache line, read all remaining events in a tight
255  * loop while event is present.
256  */
257 #define	EF100_EV_BATCH	8
258 
259 /*
260  * Check if event is present.
261  *
262  * Riverhead EvQs use a phase bit to indicate the presence of valid events,
263  * by flipping the phase bit on each wrap of the write index.
264  */
265 #define	EF100_EV_PRESENT(_qword, _phase_bit)				\
266 	(EFX_QWORD_FIELD((_qword), ESF_GZ_EV_EVQ_PHASE) == _phase_bit)
267 
268 			void
rhead_ev_qpoll(__in efx_evq_t * eep,__inout unsigned int * countp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)269 rhead_ev_qpoll(
270 	__in		efx_evq_t *eep,
271 	__inout		unsigned int *countp,
272 	__in		const efx_ev_callbacks_t *eecp,
273 	__in_opt	void *arg)
274 {
275 	efx_qword_t ev[EF100_EV_BATCH];
276 	unsigned int batch;
277 	unsigned int phase_bit;
278 	unsigned int total;
279 	unsigned int count;
280 	unsigned int index;
281 	size_t offset;
282 
283 #if EFSYS_OPT_EV_EXTENDED_WIDTH
284 	if (eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) {
285 		rhead_ev_ew_qpoll(eep, countp, eecp, arg);
286 		return;
287 	}
288 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
289 
290 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
291 	EFSYS_ASSERT(countp != NULL);
292 	EFSYS_ASSERT(eecp != NULL);
293 
294 	count = *countp;
295 	do {
296 		/* Read up until the end of the batch period */
297 		batch = EF100_EV_BATCH - (count & (EF100_EV_BATCH - 1));
298 		phase_bit = (count & (eep->ee_mask + 1)) != 0;
299 		offset = (count & eep->ee_mask) * sizeof (efx_qword_t);
300 		for (total = 0; total < batch; ++total) {
301 			EFSYS_MEM_READQ(eep->ee_esmp, offset, &(ev[total]));
302 
303 			if (!EF100_EV_PRESENT(ev[total], phase_bit))
304 				break;
305 
306 			EFSYS_PROBE3(event, unsigned int, eep->ee_index,
307 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_1),
308 			    uint32_t, EFX_QWORD_FIELD(ev[total], EFX_DWORD_0));
309 
310 			offset += sizeof (efx_qword_t);
311 		}
312 
313 		/* Process the batch of events */
314 		for (index = 0; index < total; ++index) {
315 			boolean_t should_abort;
316 
317 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
318 
319 			should_abort =
320 			    rhead_ev_dispatch(eep, &(ev[index]), eecp, arg);
321 
322 			if (should_abort) {
323 				/* Ignore subsequent events */
324 				total = index + 1;
325 
326 				/*
327 				 * Poison batch to ensure the outer
328 				 * loop is broken out of.
329 				 */
330 				EFSYS_ASSERT(batch <= EF100_EV_BATCH);
331 				batch += (EF100_EV_BATCH << 1);
332 				EFSYS_ASSERT(total != batch);
333 				break;
334 			}
335 		}
336 
337 		/*
338 		 * There is no necessity to clear processed events since
339 		 * phase bit which is flipping on each write index wrap
340 		 * is used for event presence indication.
341 		 */
342 
343 		count += total;
344 
345 	} while (total == batch);
346 
347 	*countp = count;
348 }
349 
350 #if EFSYS_OPT_EV_EXTENDED_WIDTH
351 static			boolean_t
rhead_ev_ew_dispatch(__in efx_evq_t * eep,__in efx_xword_t * eventp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)352 rhead_ev_ew_dispatch(
353 	__in		efx_evq_t *eep,
354 	__in		efx_xword_t *eventp,
355 	__in		const efx_ev_callbacks_t *eecp,
356 	__in_opt	void *arg)
357 {
358 	boolean_t should_abort;
359 	uint32_t code;
360 
361 	EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
362 
363 	code = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_256_EV32_TYPE);
364 	switch (code) {
365 	case ESE_GZ_EF100_EVEW_64BIT:
366 		/* NOTE: ignore phase bit in encapsulated 64bit event. */
367 		should_abort =
368 		    rhead_ev_dispatch(eep, &eventp->ex_qword[0], eecp, arg);
369 		break;
370 
371 #if EFSYS_OPT_DESC_PROXY
372 	case ESE_GZ_EF100_EVEW_TXQ_DESC:
373 		should_abort = eep->ee_ew_txq_desc(eep, eventp, eecp, arg);
374 		break;
375 
376 	case ESE_GZ_EF100_EVEW_VIRTQ_DESC:
377 		should_abort = eep->ee_ew_virtq_desc(eep, eventp, eecp, arg);
378 		break;
379 #endif /* EFSYS_OPT_DESC_PROXY */
380 
381 	default:
382 		/* Omit currently unused reserved bits from the probe. */
383 		EFSYS_PROBE7(ew_bad_event, unsigned int, eep->ee_index,
384 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_7),
385 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_4),
386 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_3),
387 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_2),
388 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_1),
389 		    uint32_t, EFX_XWORD_FIELD(*eventp, EFX_DWORD_0));
390 
391 		EFSYS_ASSERT(eecp->eec_exception != NULL);
392 		(void) eecp->eec_exception(arg, EFX_EXCEPTION_EV_ERROR, code);
393 		should_abort = B_TRUE;
394 	}
395 
396 	return (should_abort);
397 }
398 
399 /*
400  * Poll extended width event queue. Size of the batch is equal to cache line
401  * size divided by event size.
402  */
403 #define	EF100_EV_EW_BATCH	2
404 
405 /*
406  * Check if event is present.
407  *
408  * Riverhead EvQs use a phase bit to indicate the presence of valid events,
409  * by flipping the phase bit on each wrap of the write index.
410  */
411 #define	EF100_EV_EW_PRESENT(_xword, _phase_bit)				\
412 	(EFX_XWORD_FIELD((_xword), ESF_GZ_EV_256_EV32_PHASE) == (_phase_bit))
413 
414 static			void
rhead_ev_ew_qpoll(__in efx_evq_t * eep,__inout unsigned int * countp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)415 rhead_ev_ew_qpoll(
416 	__in		efx_evq_t *eep,
417 	__inout		unsigned int *countp,
418 	__in		const efx_ev_callbacks_t *eecp,
419 	__in_opt	void *arg)
420 {
421 	efx_xword_t ev[EF100_EV_EW_BATCH];
422 	unsigned int batch;
423 	unsigned int phase_bit;
424 	unsigned int total;
425 	unsigned int count;
426 	unsigned int index;
427 	size_t offset;
428 
429 	EFSYS_ASSERT3U(eep->ee_magic, ==, EFX_EVQ_MAGIC);
430 	EFSYS_ASSERT((eep->ee_flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) != 0);
431 	EFSYS_ASSERT(countp != NULL);
432 	EFSYS_ASSERT(eecp != NULL);
433 
434 	count = *countp;
435 	do {
436 		/* Read up until the end of the batch period */
437 		batch = EF100_EV_EW_BATCH - (count & (EF100_EV_EW_BATCH - 1));
438 		phase_bit = (count & (eep->ee_mask + 1)) != 0;
439 		offset = (count & eep->ee_mask) * sizeof (efx_xword_t);
440 		for (total = 0; total < batch; ++total) {
441 			EFSYS_MEM_READX(eep->ee_esmp, offset, &(ev[total]));
442 
443 			if (!EF100_EV_EW_PRESENT(ev[total], phase_bit))
444 				break;
445 
446 			/* Omit unused reserved bits from the probe. */
447 			EFSYS_PROBE7(ew_event, unsigned int, eep->ee_index,
448 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_7),
449 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_4),
450 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_3),
451 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_2),
452 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_1),
453 			    uint32_t, EFX_XWORD_FIELD(ev[total], EFX_DWORD_0));
454 
455 			offset += sizeof (efx_xword_t);
456 		}
457 
458 		/* Process the batch of events */
459 		for (index = 0; index < total; ++index) {
460 			boolean_t should_abort;
461 
462 			EFX_EV_QSTAT_INCR(eep, EV_ALL);
463 
464 			should_abort =
465 			    rhead_ev_ew_dispatch(eep, &(ev[index]), eecp, arg);
466 
467 			if (should_abort) {
468 				/* Ignore subsequent events */
469 				total = index + 1;
470 
471 				/*
472 				 * Poison batch to ensure the outer
473 				 * loop is broken out of.
474 				 */
475 				EFSYS_ASSERT(batch <= EF100_EV_EW_BATCH);
476 				batch += (EF100_EV_EW_BATCH << 1);
477 				EFSYS_ASSERT(total != batch);
478 				break;
479 			}
480 		}
481 
482 		/*
483 		 * There is no necessity to clear processed events since
484 		 * phase bit which is flipping on each write index wrap
485 		 * is used for event presence indication.
486 		 */
487 
488 		count += total;
489 
490 	} while (total == batch);
491 
492 	*countp = count;
493 }
494 #endif /* EFSYS_OPT_EV_EXTENDED_WIDTH */
495 
496 
497 	__checkReturn	efx_rc_t
rhead_ev_qmoderate(__in efx_evq_t * eep,__in unsigned int us)498 rhead_ev_qmoderate(
499 	__in		efx_evq_t *eep,
500 	__in		unsigned int us)
501 {
502 	_NOTE(ARGUNUSED(eep, us))
503 
504 	return (ENOTSUP);
505 }
506 
507 
508 #if EFSYS_OPT_QSTATS
509 			void
rhead_ev_qstats_update(__in efx_evq_t * eep,__inout_ecount (EV_NQSTATS)efsys_stat_t * stat)510 rhead_ev_qstats_update(
511 	__in				efx_evq_t *eep,
512 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
513 {
514 	unsigned int id;
515 
516 	for (id = 0; id < EV_NQSTATS; id++) {
517 		efsys_stat_t *essp = &stat[id];
518 
519 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
520 		eep->ee_stat[id] = 0;
521 	}
522 }
523 #endif /* EFSYS_OPT_QSTATS */
524 
525 static	__checkReturn	boolean_t
rhead_ev_rx_packets(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)526 rhead_ev_rx_packets(
527 	__in		efx_evq_t *eep,
528 	__in		efx_qword_t *eqp,
529 	__in		const efx_ev_callbacks_t *eecp,
530 	__in_opt	void *arg)
531 {
532 	efx_nic_t *enp = eep->ee_enp;
533 	uint32_t label;
534 	uint32_t num_packets;
535 	boolean_t should_abort;
536 
537 	EFX_EV_QSTAT_INCR(eep, EV_RX);
538 
539 	/* Discard events after RXQ/TXQ errors, or hardware not available */
540 	if (enp->en_reset_flags &
541 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
542 		return (B_FALSE);
543 
544 	label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_Q_LABEL);
545 
546 	/*
547 	 * On EF100 the EV_RX event reports the number of received
548 	 * packets (unlike EF10 which reports a descriptor index).
549 	 * The client driver is responsible for maintaining the Rx
550 	 * descriptor index, and computing how many descriptors are
551 	 * occupied by each received packet (based on the Rx buffer size
552 	 * and the packet length from the Rx prefix).
553 	 */
554 	num_packets = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_RXPKTS_NUM_PKT);
555 
556 	/*
557 	 * The receive event may indicate more than one packet, and so
558 	 * does not contain the packet length. Read the packet length
559 	 * from the prefix when handling each packet.
560 	 */
561 	EFSYS_ASSERT(eecp->eec_rx_packets != NULL);
562 	should_abort = eecp->eec_rx_packets(arg, label, num_packets,
563 	    EFX_PKT_PREFIX_LEN);
564 
565 	return (should_abort);
566 }
567 
568 static	__checkReturn	boolean_t
rhead_ev_tx_completion(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)569 rhead_ev_tx_completion(
570 	__in		efx_evq_t *eep,
571 	__in		efx_qword_t *eqp,
572 	__in		const efx_ev_callbacks_t *eecp,
573 	__in_opt	void *arg)
574 {
575 	efx_nic_t *enp = eep->ee_enp;
576 	uint32_t num_descs;
577 	uint32_t label;
578 	boolean_t should_abort;
579 
580 	EFX_EV_QSTAT_INCR(eep, EV_TX);
581 
582 	/* Discard events after RXQ/TXQ errors, or hardware not available */
583 	if (enp->en_reset_flags &
584 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
585 		return (B_FALSE);
586 
587 	label = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_Q_LABEL);
588 
589 	/*
590 	 * On EF100 the EV_TX event reports the number of completed Tx
591 	 * descriptors (on EF10, the event reports the low bits of the
592 	 * index of the last completed descriptor).
593 	 * The client driver completion callback will compute the
594 	 * descriptor index, so that is not needed here.
595 	 */
596 	num_descs = EFX_QWORD_FIELD(*eqp, ESF_GZ_EV_TXCMPL_NUM_DESC);
597 
598 	EFSYS_PROBE2(tx_ndescs, uint32_t, label, unsigned int, num_descs);
599 
600 	EFSYS_ASSERT(eecp->eec_tx_ndescs != NULL);
601 	should_abort = eecp->eec_tx_ndescs(arg, label, num_descs);
602 
603 	return (should_abort);
604 }
605 
606 static	__checkReturn	boolean_t
rhead_ev_mcdi(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)607 rhead_ev_mcdi(
608 	__in		efx_evq_t *eep,
609 	__in		efx_qword_t *eqp,
610 	__in		const efx_ev_callbacks_t *eecp,
611 	__in_opt	void *arg)
612 {
613 	boolean_t ret;
614 
615 	/*
616 	 * Event format was changed post Riverhead R1 and now
617 	 * MCDI event layout on EF100 is exactly the same as on EF10
618 	 * except added QDMA phase bit which is unused on EF10.
619 	 */
620 	ret = ef10_ev_mcdi(eep, eqp, eecp, arg);
621 
622 	return (ret);
623 }
624 
625 #if EFSYS_OPT_DESC_PROXY
626 static			boolean_t
rhead_ev_ew_txq_desc(__in efx_evq_t * eep,__in efx_xword_t * eventp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)627 rhead_ev_ew_txq_desc(
628 	__in		efx_evq_t *eep,
629 	__in		efx_xword_t *eventp,
630 	__in		const efx_ev_callbacks_t *eecp,
631 	__in_opt	void *arg)
632 {
633 	efx_oword_t txq_desc;
634 	uint16_t vi_id;
635 	boolean_t should_abort;
636 
637 	_NOTE(ARGUNUSED(eep))
638 
639 	vi_id = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_TXQ_DP_VI_ID);
640 
641 	/*
642 	 * NOTE: This is the raw descriptor data, and has not been converted
643 	 * to host endian. The handler callback must use the EFX_OWORD macros
644 	 * to extract the descriptor fields as host endian values.
645 	 */
646 	txq_desc = eventp->ex_oword[0];
647 
648 	EFSYS_ASSERT(eecp->eec_desc_proxy_txq_desc != NULL);
649 	should_abort = eecp->eec_desc_proxy_txq_desc(arg, vi_id, txq_desc);
650 
651 	return (should_abort);
652 }
653 #endif /* EFSYS_OPT_DESC_PROXY */
654 
655 
656 #if EFSYS_OPT_DESC_PROXY
657 static			boolean_t
rhead_ev_ew_virtq_desc(__in efx_evq_t * eep,__in efx_xword_t * eventp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)658 rhead_ev_ew_virtq_desc(
659 	__in		efx_evq_t *eep,
660 	__in		efx_xword_t *eventp,
661 	__in		const efx_ev_callbacks_t *eecp,
662 	__in_opt	void *arg)
663 {
664 	efx_oword_t virtq_desc;
665 	uint16_t vi_id;
666 	uint16_t avail;
667 	boolean_t should_abort;
668 
669 	_NOTE(ARGUNUSED(eep))
670 
671 	vi_id = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_VQ_DP_VI_ID);
672 	avail = EFX_XWORD_FIELD(*eventp, ESF_GZ_EV_VQ_DP_AVAIL_ENTRY);
673 
674 	/*
675 	 * NOTE: This is the raw descriptor data, and has not been converted
676 	 * to host endian. The handler callback must use the EFX_OWORD macros
677 	 * to extract the descriptor fields as host endian values.
678 	 */
679 	virtq_desc = eventp->ex_oword[0];
680 
681 	EFSYS_ASSERT(eecp->eec_desc_proxy_virtq_desc != NULL);
682 	should_abort =
683 	    eecp->eec_desc_proxy_virtq_desc(arg, vi_id, avail, virtq_desc);
684 
685 	return (should_abort);
686 }
687 #endif /* EFSYS_OPT_DESC_PROXY */
688 
689 #endif	/* EFSYS_OPT_RIVERHEAD */
690