xref: /dpdk/drivers/common/sfc_efx/base/ef10_ev.c (revision f5057be340e44f3edc0fe90fa875eb89a4c49b4f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  * Copyright(c) 2019-2020 Xilinx, Inc.
4  * Copyright(c) 2012-2019 Solarflare Communications Inc.
5  */
6 
7 #include "efx.h"
8 #include "efx_impl.h"
9 #if EFSYS_OPT_MON_STATS
10 #include "mcdi_mon.h"
11 #endif
12 
13 #if EFX_OPTS_EF10()
14 
15 /*
16  * Non-interrupting event queue requires interrrupting event queue to
17  * refer to for wake-up events even if wake ups are never used.
18  * It could be even non-allocated event queue.
19  */
20 #define	EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
21 
22 static	__checkReturn	boolean_t
23 ef10_ev_rx(
24 	__in		efx_evq_t *eep,
25 	__in		efx_qword_t *eqp,
26 	__in		const efx_ev_callbacks_t *eecp,
27 	__in_opt	void *arg);
28 
29 static	__checkReturn	boolean_t
30 ef10_ev_tx(
31 	__in		efx_evq_t *eep,
32 	__in		efx_qword_t *eqp,
33 	__in		const efx_ev_callbacks_t *eecp,
34 	__in_opt	void *arg);
35 
36 static	__checkReturn	boolean_t
37 ef10_ev_driver(
38 	__in		efx_evq_t *eep,
39 	__in		efx_qword_t *eqp,
40 	__in		const efx_ev_callbacks_t *eecp,
41 	__in_opt	void *arg);
42 
43 static	__checkReturn	boolean_t
44 ef10_ev_drv_gen(
45 	__in		efx_evq_t *eep,
46 	__in		efx_qword_t *eqp,
47 	__in		const efx_ev_callbacks_t *eecp,
48 	__in_opt	void *arg);
49 
50 
51 static	__checkReturn	efx_rc_t
52 efx_mcdi_set_evq_tmr(
53 	__in		efx_nic_t *enp,
54 	__in		uint32_t instance,
55 	__in		uint32_t mode,
56 	__in		uint32_t timer_ns)
57 {
58 	efx_mcdi_req_t req;
59 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
60 		MC_CMD_SET_EVQ_TMR_OUT_LEN);
61 	efx_rc_t rc;
62 
63 	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
64 	req.emr_in_buf = payload;
65 	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
66 	req.emr_out_buf = payload;
67 	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
68 
69 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
70 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
71 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
72 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
73 
74 	efx_mcdi_execute(enp, &req);
75 
76 	if (req.emr_rc != 0) {
77 		rc = req.emr_rc;
78 		goto fail1;
79 	}
80 
81 	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
82 		rc = EMSGSIZE;
83 		goto fail2;
84 	}
85 
86 	return (0);
87 
88 fail2:
89 	EFSYS_PROBE(fail2);
90 fail1:
91 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
92 
93 	return (rc);
94 }
95 
96 
97 	__checkReturn	efx_rc_t
98 ef10_ev_init(
99 	__in		efx_nic_t *enp)
100 {
101 	_NOTE(ARGUNUSED(enp))
102 	return (0);
103 }
104 
105 			void
106 ef10_ev_fini(
107 	__in		efx_nic_t *enp)
108 {
109 	_NOTE(ARGUNUSED(enp))
110 }
111 
112 	__checkReturn	efx_rc_t
113 ef10_ev_qcreate(
114 	__in		efx_nic_t *enp,
115 	__in		unsigned int index,
116 	__in		efsys_mem_t *esmp,
117 	__in		size_t ndescs,
118 	__in		uint32_t id,
119 	__in		uint32_t us,
120 	__in		uint32_t flags,
121 	__in		efx_evq_t *eep)
122 {
123 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
124 	uint32_t irq;
125 	efx_rc_t rc;
126 	boolean_t low_latency;
127 
128 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
129 
130 	EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
131 
132 	/*
133 	 * NO_CONT_EV mode is only requested from the firmware when creating
134 	 * receive queues, but here it needs to be specified at event queue
135 	 * creation, as the event handler needs to know which format is in use.
136 	 *
137 	 * If EFX_EVQ_FLAGS_NO_CONT_EV is specified, all receive queues for this
138 	 * event queue will be created in NO_CONT_EV mode.
139 	 *
140 	 * See SF-109306-TC 5.11 "Events for RXQs in NO_CONT_EV mode".
141 	 */
142 	if (flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
143 		if (enp->en_nic_cfg.enc_no_cont_ev_mode_supported == B_FALSE) {
144 			rc = EINVAL;
145 			goto fail1;
146 		}
147 	}
148 
149 	/* Set up the handler table */
150 	eep->ee_rx	= ef10_ev_rx;
151 	eep->ee_tx	= ef10_ev_tx;
152 	eep->ee_driver	= ef10_ev_driver;
153 	eep->ee_drv_gen	= ef10_ev_drv_gen;
154 	eep->ee_mcdi	= ef10_ev_mcdi;
155 
156 	/* Set up the event queue */
157 	/* INIT_EVQ expects function-relative vector number */
158 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
159 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
160 		irq = index;
161 	} else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
162 		irq = index;
163 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
164 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
165 	} else {
166 		irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
167 	}
168 
169 	/*
170 	 * Interrupts may be raised for events immediately after the queue is
171 	 * created. See bug58606.
172 	 */
173 
174 	/*
175 	 * On Huntington we need to specify the settings to use.
176 	 * If event queue type in flags is auto, we favour throughput
177 	 * if the adapter is running virtualization supporting firmware
178 	 * (i.e. the full featured firmware variant)
179 	 * and latency otherwise. The Ethernet Virtual Bridging
180 	 * capability is used to make this decision. (Note though that
181 	 * the low latency firmware variant is also best for
182 	 * throughput and corresponding type should be specified
183 	 * to choose it.)
184 	 *
185 	 * If FW supports EvQ types (e.g. on Medford and Medford2) the
186 	 * type which is specified in flags is passed to FW to make the
187 	 * decision and low_latency hint is ignored.
188 	 */
189 	low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
190 	rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
191 	    low_latency);
192 	if (rc != 0)
193 		goto fail2;
194 
195 	return (0);
196 
197 fail2:
198 	EFSYS_PROBE(fail2);
199 fail1:
200 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
201 
202 	return (rc);
203 }
204 
205 			void
206 ef10_ev_qdestroy(
207 	__in		efx_evq_t *eep)
208 {
209 	efx_nic_t *enp = eep->ee_enp;
210 
211 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
212 
213 	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
214 }
215 
216 	__checkReturn	efx_rc_t
217 ef10_ev_qprime(
218 	__in		efx_evq_t *eep,
219 	__in		unsigned int count)
220 {
221 	efx_nic_t *enp = eep->ee_enp;
222 	uint32_t rptr;
223 	efx_dword_t dword;
224 
225 	rptr = count & eep->ee_mask;
226 
227 	if (enp->en_nic_cfg.enc_bug35388_workaround) {
228 		EFX_STATIC_ASSERT(EF10_EVQ_MINNEVS >
229 		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
230 		EFX_STATIC_ASSERT(EF10_EVQ_MAXNEVS <
231 		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
232 
233 		EFX_POPULATE_DWORD_2(dword,
234 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
235 		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
236 		    ERF_DD_EVQ_IND_RPTR,
237 		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
238 		EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
239 		    &dword, B_FALSE);
240 
241 		EFX_POPULATE_DWORD_2(dword,
242 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
243 		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
244 		    ERF_DD_EVQ_IND_RPTR,
245 		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
246 		EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
247 		    &dword, B_FALSE);
248 	} else {
249 		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
250 		EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
251 		    &dword, B_FALSE);
252 	}
253 
254 	return (0);
255 }
256 
257 static	__checkReturn	efx_rc_t
258 efx_mcdi_driver_event(
259 	__in		efx_nic_t *enp,
260 	__in		uint32_t evq,
261 	__in		efx_qword_t data)
262 {
263 	efx_mcdi_req_t req;
264 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
265 		MC_CMD_DRIVER_EVENT_OUT_LEN);
266 	efx_rc_t rc;
267 
268 	req.emr_cmd = MC_CMD_DRIVER_EVENT;
269 	req.emr_in_buf = payload;
270 	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
271 	req.emr_out_buf = payload;
272 	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
273 
274 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
275 
276 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
277 	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
278 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
279 	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
280 
281 	efx_mcdi_execute(enp, &req);
282 
283 	if (req.emr_rc != 0) {
284 		rc = req.emr_rc;
285 		goto fail1;
286 	}
287 
288 	return (0);
289 
290 fail1:
291 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
292 
293 	return (rc);
294 }
295 
296 			void
297 ef10_ev_qpost(
298 	__in	efx_evq_t *eep,
299 	__in	uint16_t data)
300 {
301 	efx_nic_t *enp = eep->ee_enp;
302 	efx_qword_t event;
303 
304 	EFX_POPULATE_QWORD_3(event,
305 	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
306 	    ESF_DZ_DRV_SUB_CODE, 0,
307 	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
308 
309 	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
310 }
311 
312 	__checkReturn	efx_rc_t
313 ef10_ev_qmoderate(
314 	__in		efx_evq_t *eep,
315 	__in		unsigned int us)
316 {
317 	efx_nic_t *enp = eep->ee_enp;
318 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
319 	efx_dword_t dword;
320 	uint32_t mode;
321 	efx_rc_t rc;
322 
323 	/* Check that hardware and MCDI use the same timer MODE values */
324 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
325 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
326 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
327 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
328 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
329 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
330 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
331 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
332 
333 	if (us > encp->enc_evq_timer_max_us) {
334 		rc = EINVAL;
335 		goto fail1;
336 	}
337 
338 	/* If the value is zero then disable the timer */
339 	if (us == 0) {
340 		mode = FFE_CZ_TIMER_MODE_DIS;
341 	} else {
342 		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
343 	}
344 
345 	if (encp->enc_bug61265_workaround) {
346 		uint32_t ns = us * 1000;
347 
348 		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
349 		if (rc != 0)
350 			goto fail2;
351 	} else {
352 		unsigned int ticks;
353 
354 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
355 			goto fail3;
356 
357 		if (encp->enc_bug35388_workaround) {
358 			EFX_POPULATE_DWORD_3(dword,
359 			    ERF_DD_EVQ_IND_TIMER_FLAGS,
360 			    EFE_DD_EVQ_IND_TIMER_FLAGS,
361 			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
362 			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
363 			EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
364 			    eep->ee_index, &dword, 0);
365 		} else {
366 			/*
367 			 * NOTE: The TMR_REL field introduced in Medford2 is
368 			 * ignored on earlier EF10 controllers. See bug66418
369 			 * comment 9 for details.
370 			 */
371 			EFX_POPULATE_DWORD_3(dword,
372 			    ERF_DZ_TC_TIMER_MODE, mode,
373 			    ERF_DZ_TC_TIMER_VAL, ticks,
374 			    ERF_FZ_TC_TMR_REL_VAL, ticks);
375 			EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
376 			    eep->ee_index, &dword, 0);
377 		}
378 	}
379 
380 	return (0);
381 
382 fail3:
383 	EFSYS_PROBE(fail3);
384 fail2:
385 	EFSYS_PROBE(fail2);
386 fail1:
387 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
388 
389 	return (rc);
390 }
391 
392 
393 #if EFSYS_OPT_QSTATS
394 			void
395 ef10_ev_qstats_update(
396 	__in				efx_evq_t *eep,
397 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
398 {
399 	unsigned int id;
400 
401 	for (id = 0; id < EV_NQSTATS; id++) {
402 		efsys_stat_t *essp = &stat[id];
403 
404 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
405 		eep->ee_stat[id] = 0;
406 	}
407 }
408 #endif /* EFSYS_OPT_QSTATS */
409 
410 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
411 
412 static	__checkReturn	boolean_t
413 ef10_ev_rx_packed_stream(
414 	__in		efx_evq_t *eep,
415 	__in		efx_qword_t *eqp,
416 	__in		const efx_ev_callbacks_t *eecp,
417 	__in_opt	void *arg)
418 {
419 	uint32_t label;
420 	uint32_t pkt_count_lbits;
421 	uint16_t flags;
422 	boolean_t should_abort;
423 	efx_evq_rxq_state_t *eersp;
424 	unsigned int pkt_count;
425 	unsigned int current_id;
426 	boolean_t new_buffer;
427 
428 	pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
429 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
430 	new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
431 
432 	flags = 0;
433 
434 	eersp = &eep->ee_rxq_state[label];
435 
436 	/*
437 	 * RX_DSC_PTR_LBITS has least significant bits of the global
438 	 * (not per-buffer) packet counter. It is guaranteed that
439 	 * maximum number of completed packets fits in lbits-mask.
440 	 * So, modulo lbits-mask arithmetic should be used to calculate
441 	 * packet counter increment.
442 	 */
443 	pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
444 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
445 	eersp->eers_rx_stream_npackets += pkt_count;
446 
447 	if (new_buffer) {
448 		flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
449 #if EFSYS_OPT_RX_PACKED_STREAM
450 		/*
451 		 * If both packed stream and equal stride super-buffer
452 		 * modes are compiled in, in theory credits should be
453 		 * be maintained for packed stream only, but right now
454 		 * these modes are not distinguished in the event queue
455 		 * Rx queue state and it is OK to increment the counter
456 		 * regardless (it might be event cheaper than branching
457 		 * since neighbour structure member are updated as well).
458 		 */
459 		eersp->eers_rx_packed_stream_credits++;
460 #endif
461 		eersp->eers_rx_read_ptr++;
462 	}
463 	current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
464 
465 	/* Check for errors that invalidate checksum and L3/L4 fields */
466 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
467 		/* RX frame truncated */
468 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
469 		flags |= EFX_DISCARD;
470 		goto deliver;
471 	}
472 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
473 		/* Bad Ethernet frame CRC */
474 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
475 		flags |= EFX_DISCARD;
476 		goto deliver;
477 	}
478 
479 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
480 		EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
481 		flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
482 		goto deliver;
483 	}
484 
485 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
486 		EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
487 
488 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
489 		EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
490 
491 deliver:
492 	/* If we're not discarding the packet then it is ok */
493 	if (~flags & EFX_DISCARD)
494 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
495 
496 	EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
497 	should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
498 	    flags);
499 
500 	return (should_abort);
501 }
502 
503 #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
504 
505 static	__checkReturn	boolean_t
506 ef10_ev_rx(
507 	__in		efx_evq_t *eep,
508 	__in		efx_qword_t *eqp,
509 	__in		const efx_ev_callbacks_t *eecp,
510 	__in_opt	void *arg)
511 {
512 	efx_nic_t *enp = eep->ee_enp;
513 	uint32_t size;
514 	uint32_t label;
515 	uint32_t mac_class;
516 	uint32_t eth_tag_class;
517 	uint32_t l3_class;
518 	uint32_t l4_class;
519 	uint32_t next_read_lbits;
520 	uint16_t flags;
521 	boolean_t cont;
522 	boolean_t should_abort;
523 	efx_evq_rxq_state_t *eersp;
524 	unsigned int desc_count;
525 	unsigned int last_used_id;
526 
527 	EFX_EV_QSTAT_INCR(eep, EV_RX);
528 
529 	/* Discard events after RXQ/TXQ errors, or hardware not available */
530 	if (enp->en_reset_flags &
531 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
532 		return (B_FALSE);
533 
534 	/* Basic packet information */
535 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
536 	eersp = &eep->ee_rxq_state[label];
537 
538 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
539 	/*
540 	 * Packed stream events are very different,
541 	 * so handle them separately
542 	 */
543 	if (eersp->eers_rx_packed_stream)
544 		return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
545 #endif
546 
547 	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
548 	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
549 	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
550 	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
551 	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
552 	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
553 
554 	/*
555 	 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
556 	 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
557 	 * and values for all EF10 controllers.
558 	 */
559 	EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
560 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
561 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
562 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
563 
564 	l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
565 
566 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
567 		/* Drop this event */
568 		return (B_FALSE);
569 	}
570 	flags = 0;
571 
572 	if (cont != 0) {
573 		/*
574 		 * This may be part of a scattered frame, or it may be a
575 		 * truncated frame if scatter is disabled on this RXQ.
576 		 * Overlength frames can be received if e.g. a VF is configured
577 		 * for 1500 MTU but connected to a port set to 9000 MTU
578 		 * (see bug56567).
579 		 * FIXME: There is not yet any driver that supports scatter on
580 		 * Huntington.  Scatter support is required for OSX.
581 		 */
582 		flags |= EFX_PKT_CONT;
583 	}
584 
585 	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
586 		flags |= EFX_PKT_UNICAST;
587 
588 	/*
589 	 * Increment the count of descriptors read.
590 	 *
591 	 * In NO_CONT_EV mode, RX_DSC_PTR_LBITS is actually a packet count, but
592 	 * when scatter is disabled, there is only one descriptor per packet and
593 	 * so it can be treated the same.
594 	 *
595 	 * TODO: Support scatter in NO_CONT_EV mode.
596 	 */
597 	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
598 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
599 	eersp->eers_rx_read_ptr += desc_count;
600 
601 	/* Calculate the index of the last descriptor consumed */
602 	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
603 
604 	if (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
605 		if (desc_count > 1)
606 			EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
607 
608 		/* Always read the length from the prefix in NO_CONT_EV mode. */
609 		flags |= EFX_PKT_PREFIX_LEN;
610 
611 		/*
612 		 * Check for an aborted scatter, signalled by the ABORT bit in
613 		 * NO_CONT_EV mode. The ABORT bit was not used before NO_CONT_EV
614 		 * mode was added as it was broken in Huntington silicon.
615 		 */
616 		if (EFX_QWORD_FIELD(*eqp, ESF_EZ_RX_ABORT) != 0) {
617 			flags |= EFX_DISCARD;
618 			goto deliver;
619 		}
620 	} else if (desc_count > 1) {
621 		/*
622 		 * FIXME: add error checking to make sure this a batched event.
623 		 * This could also be an aborted scatter, see Bug36629.
624 		 */
625 		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
626 		flags |= EFX_PKT_PREFIX_LEN;
627 	}
628 
629 	/* Check for errors that invalidate checksum and L3/L4 fields */
630 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
631 		/* RX frame truncated */
632 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
633 		flags |= EFX_DISCARD;
634 		goto deliver;
635 	}
636 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
637 		/* Bad Ethernet frame CRC */
638 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
639 		flags |= EFX_DISCARD;
640 		goto deliver;
641 	}
642 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
643 		/*
644 		 * Hardware parse failed, due to malformed headers
645 		 * or headers that are too long for the parser.
646 		 * Headers and checksums must be validated by the host.
647 		 */
648 		EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
649 		goto deliver;
650 	}
651 
652 	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
653 	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
654 		flags |= EFX_PKT_VLAN_TAGGED;
655 	}
656 
657 	switch (l3_class) {
658 	case ESE_DZ_L3_CLASS_IP4:
659 	case ESE_DZ_L3_CLASS_IP4_FRAG:
660 		flags |= EFX_PKT_IPV4;
661 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
662 			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
663 		} else {
664 			flags |= EFX_CKSUM_IPV4;
665 		}
666 
667 		/*
668 		 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
669 		 * only 2 bits wide on Medford2. Check it is safe to use the
670 		 * Medford2 field and values for all EF10 controllers.
671 		 */
672 		EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
673 		    ESF_DE_RX_L4_CLASS_LBN);
674 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
675 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
676 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
677 		    ESE_DE_L4_CLASS_UNKNOWN);
678 
679 		if (l4_class == ESE_FZ_L4_CLASS_TCP) {
680 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
681 			flags |= EFX_PKT_TCP;
682 		} else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
683 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
684 			flags |= EFX_PKT_UDP;
685 		} else {
686 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
687 		}
688 		break;
689 
690 	case ESE_DZ_L3_CLASS_IP6:
691 	case ESE_DZ_L3_CLASS_IP6_FRAG:
692 		flags |= EFX_PKT_IPV6;
693 
694 		/*
695 		 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
696 		 * only 2 bits wide on Medford2. Check it is safe to use the
697 		 * Medford2 field and values for all EF10 controllers.
698 		 */
699 		EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
700 		    ESF_DE_RX_L4_CLASS_LBN);
701 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
702 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
703 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
704 		    ESE_DE_L4_CLASS_UNKNOWN);
705 
706 		if (l4_class == ESE_FZ_L4_CLASS_TCP) {
707 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
708 			flags |= EFX_PKT_TCP;
709 		} else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
710 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
711 			flags |= EFX_PKT_UDP;
712 		} else {
713 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
714 		}
715 		break;
716 
717 	default:
718 		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
719 		break;
720 	}
721 
722 	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
723 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
724 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
725 		} else {
726 			flags |= EFX_CKSUM_TCPUDP;
727 		}
728 	}
729 
730 deliver:
731 	/* If we're not discarding the packet then it is ok */
732 	if (~flags & EFX_DISCARD)
733 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
734 
735 	EFSYS_ASSERT(eecp->eec_rx != NULL);
736 	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
737 
738 	return (should_abort);
739 }
740 
741 static	__checkReturn	boolean_t
742 ef10_ev_tx(
743 	__in		efx_evq_t *eep,
744 	__in		efx_qword_t *eqp,
745 	__in		const efx_ev_callbacks_t *eecp,
746 	__in_opt	void *arg)
747 {
748 	efx_nic_t *enp = eep->ee_enp;
749 	uint32_t id;
750 	uint32_t label;
751 	boolean_t should_abort;
752 
753 	EFX_EV_QSTAT_INCR(eep, EV_TX);
754 
755 	/* Discard events after RXQ/TXQ errors, or hardware not available */
756 	if (enp->en_reset_flags &
757 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
758 		return (B_FALSE);
759 
760 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
761 		/* Drop this event */
762 		return (B_FALSE);
763 	}
764 
765 	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
766 	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
767 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
768 
769 	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
770 
771 	EFSYS_ASSERT(eecp->eec_tx != NULL);
772 	should_abort = eecp->eec_tx(arg, label, id);
773 
774 	return (should_abort);
775 }
776 
777 static	__checkReturn	boolean_t
778 ef10_ev_driver(
779 	__in		efx_evq_t *eep,
780 	__in		efx_qword_t *eqp,
781 	__in		const efx_ev_callbacks_t *eecp,
782 	__in_opt	void *arg)
783 {
784 	unsigned int code;
785 	boolean_t should_abort;
786 
787 	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
788 	should_abort = B_FALSE;
789 
790 	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
791 	switch (code) {
792 	case ESE_DZ_DRV_TIMER_EV: {
793 		uint32_t id;
794 
795 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
796 
797 		EFSYS_ASSERT(eecp->eec_timer != NULL);
798 		should_abort = eecp->eec_timer(arg, id);
799 		break;
800 	}
801 
802 	case ESE_DZ_DRV_WAKE_UP_EV: {
803 		uint32_t id;
804 
805 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
806 
807 		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
808 		should_abort = eecp->eec_wake_up(arg, id);
809 		break;
810 	}
811 
812 	case ESE_DZ_DRV_START_UP_EV:
813 		EFSYS_ASSERT(eecp->eec_initialized != NULL);
814 		should_abort = eecp->eec_initialized(arg);
815 		break;
816 
817 	default:
818 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
819 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
820 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
821 		break;
822 	}
823 
824 	return (should_abort);
825 }
826 
827 static	__checkReturn	boolean_t
828 ef10_ev_drv_gen(
829 	__in		efx_evq_t *eep,
830 	__in		efx_qword_t *eqp,
831 	__in		const efx_ev_callbacks_t *eecp,
832 	__in_opt	void *arg)
833 {
834 	uint32_t data;
835 	boolean_t should_abort;
836 
837 	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
838 	should_abort = B_FALSE;
839 
840 	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
841 	if (data >= ((uint32_t)1 << 16)) {
842 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
843 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
844 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
845 
846 		return (B_TRUE);
847 	}
848 
849 	EFSYS_ASSERT(eecp->eec_software != NULL);
850 	should_abort = eecp->eec_software(arg, (uint16_t)data);
851 
852 	return (should_abort);
853 }
854 
855 #endif	/* EFX_OPTS_EF10() */
856 
857 #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
858 
859 	__checkReturn	boolean_t
860 ef10_ev_mcdi(
861 	__in		efx_evq_t *eep,
862 	__in		efx_qword_t *eqp,
863 	__in		const efx_ev_callbacks_t *eecp,
864 	__in_opt	void *arg)
865 {
866 	efx_nic_t *enp = eep->ee_enp;
867 	unsigned int code;
868 	boolean_t should_abort = B_FALSE;
869 
870 	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
871 
872 	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
873 	switch (code) {
874 	case MCDI_EVENT_CODE_BADSSERT:
875 		efx_mcdi_ev_death(enp, EINTR);
876 		break;
877 
878 	case MCDI_EVENT_CODE_CMDDONE:
879 		efx_mcdi_ev_cpl(enp,
880 		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
881 		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
882 		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
883 		break;
884 
885 #if EFSYS_OPT_MCDI_PROXY_AUTH
886 	case MCDI_EVENT_CODE_PROXY_RESPONSE:
887 		/*
888 		 * This event notifies a function that an authorization request
889 		 * has been processed. If the request was authorized then the
890 		 * function can now re-send the original MCDI request.
891 		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
892 		 */
893 		efx_mcdi_ev_proxy_response(enp,
894 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
895 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
896 		break;
897 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
898 
899 #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
900 	case MCDI_EVENT_CODE_PROXY_REQUEST:
901 		efx_mcdi_ev_proxy_request(enp,
902 			MCDI_EV_FIELD(eqp, PROXY_REQUEST_BUFF_INDEX));
903 		break;
904 #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
905 
906 	case MCDI_EVENT_CODE_LINKCHANGE: {
907 		efx_link_mode_t link_mode;
908 
909 		ef10_phy_link_ev(enp, eqp, &link_mode);
910 		should_abort = eecp->eec_link_change(arg, link_mode);
911 		break;
912 	}
913 
914 	case MCDI_EVENT_CODE_SENSOREVT: {
915 #if EFSYS_OPT_MON_STATS
916 		efx_mon_stat_t id;
917 		efx_mon_stat_value_t value;
918 		efx_rc_t rc;
919 
920 		/* Decode monitor stat for MCDI sensor (if supported) */
921 		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
922 			/* Report monitor stat change */
923 			should_abort = eecp->eec_monitor(arg, id, value);
924 		} else if (rc == ENOTSUP) {
925 			should_abort = eecp->eec_exception(arg,
926 				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
927 				MCDI_EV_FIELD(eqp, DATA));
928 		} else {
929 			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
930 		}
931 #endif
932 		break;
933 	}
934 
935 	case MCDI_EVENT_CODE_SCHEDERR:
936 		/* Informational only */
937 		break;
938 
939 	case MCDI_EVENT_CODE_REBOOT:
940 		/* Falcon/Siena only (should not been seen with Huntington). */
941 		efx_mcdi_ev_death(enp, EIO);
942 		break;
943 
944 	case MCDI_EVENT_CODE_MC_REBOOT:
945 		/* MC_REBOOT event is used for Huntington (EF10) and later. */
946 		efx_mcdi_ev_death(enp, EIO);
947 		break;
948 
949 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
950 #if EFSYS_OPT_MAC_STATS
951 		if (eecp->eec_mac_stats != NULL) {
952 			eecp->eec_mac_stats(arg,
953 			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
954 		}
955 #endif
956 		break;
957 
958 	case MCDI_EVENT_CODE_FWALERT: {
959 		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
960 
961 		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
962 			should_abort = eecp->eec_exception(arg,
963 				EFX_EXCEPTION_FWALERT_SRAM,
964 				MCDI_EV_FIELD(eqp, FWALERT_DATA));
965 		else
966 			should_abort = eecp->eec_exception(arg,
967 				EFX_EXCEPTION_UNKNOWN_FWALERT,
968 				MCDI_EV_FIELD(eqp, DATA));
969 		break;
970 	}
971 
972 	case MCDI_EVENT_CODE_TX_ERR: {
973 		/*
974 		 * After a TXQ error is detected, firmware sends a TX_ERR event.
975 		 * This may be followed by TX completions (which we discard),
976 		 * and then finally by a TX_FLUSH event. Firmware destroys the
977 		 * TXQ automatically after sending the TX_FLUSH event.
978 		 */
979 		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
980 
981 		EFSYS_PROBE2(tx_descq_err,
982 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
983 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
984 
985 		/* Inform the driver that a reset is required. */
986 		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
987 		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
988 		break;
989 	}
990 
991 	case MCDI_EVENT_CODE_TX_FLUSH: {
992 		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
993 
994 		/*
995 		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
996 		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
997 		 * We want to wait for all completions, so ignore the events
998 		 * with TX_FLUSH_TO_DRIVER.
999 		 */
1000 		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
1001 			should_abort = B_FALSE;
1002 			break;
1003 		}
1004 
1005 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
1006 
1007 		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
1008 
1009 		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
1010 		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
1011 		break;
1012 	}
1013 
1014 	case MCDI_EVENT_CODE_RX_ERR: {
1015 		/*
1016 		 * After an RXQ error is detected, firmware sends an RX_ERR
1017 		 * event. This may be followed by RX events (which we discard),
1018 		 * and then finally by an RX_FLUSH event. Firmware destroys the
1019 		 * RXQ automatically after sending the RX_FLUSH event.
1020 		 */
1021 		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
1022 
1023 		EFSYS_PROBE2(rx_descq_err,
1024 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1025 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1026 
1027 		/* Inform the driver that a reset is required. */
1028 		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
1029 		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
1030 		break;
1031 	}
1032 
1033 	case MCDI_EVENT_CODE_RX_FLUSH: {
1034 		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
1035 
1036 		/*
1037 		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
1038 		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
1039 		 * We want to wait for all completions, so ignore the events
1040 		 * with RX_FLUSH_TO_DRIVER.
1041 		 */
1042 		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
1043 			should_abort = B_FALSE;
1044 			break;
1045 		}
1046 
1047 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
1048 
1049 		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
1050 
1051 		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
1052 		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
1053 		break;
1054 	}
1055 
1056 	default:
1057 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
1058 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
1059 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
1060 		break;
1061 	}
1062 
1063 	return (should_abort);
1064 }
1065 
1066 #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
1067 
1068 #if EFX_OPTS_EF10()
1069 
1070 		void
1071 ef10_ev_rxlabel_init(
1072 	__in		efx_evq_t *eep,
1073 	__in		efx_rxq_t *erp,
1074 	__in		unsigned int label,
1075 	__in		efx_rxq_type_t type)
1076 {
1077 	efx_evq_rxq_state_t *eersp;
1078 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1079 	boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
1080 	boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
1081 #endif
1082 
1083 	_NOTE(ARGUNUSED(type))
1084 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1085 	eersp = &eep->ee_rxq_state[label];
1086 
1087 	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
1088 
1089 #if EFSYS_OPT_RX_PACKED_STREAM
1090 	/*
1091 	 * For packed stream modes, the very first event will
1092 	 * have a new buffer flag set, so it will be incremented,
1093 	 * yielding the correct pointer. That results in a simpler
1094 	 * code than trying to detect start-of-the-world condition
1095 	 * in the event handler.
1096 	 */
1097 	eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
1098 #else
1099 	eersp->eers_rx_read_ptr = 0;
1100 #endif
1101 	eersp->eers_rx_mask = erp->er_mask;
1102 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1103 	eersp->eers_rx_stream_npackets = 0;
1104 	eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
1105 #endif
1106 #if EFSYS_OPT_RX_PACKED_STREAM
1107 	if (packed_stream) {
1108 		eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
1109 		    EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
1110 		    EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
1111 		EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
1112 		/*
1113 		 * A single credit is allocated to the queue when it is started.
1114 		 * It is immediately spent by the first packet which has NEW
1115 		 * BUFFER flag set, though, but still we shall take into
1116 		 * account, as to not wrap around the maximum number of credits
1117 		 * accidentally
1118 		 */
1119 		eersp->eers_rx_packed_stream_credits--;
1120 		EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
1121 		    EFX_RX_PACKED_STREAM_MAX_CREDITS);
1122 	}
1123 #endif
1124 }
1125 
1126 		void
1127 ef10_ev_rxlabel_fini(
1128 	__in		efx_evq_t *eep,
1129 	__in		unsigned int label)
1130 {
1131 	efx_evq_rxq_state_t *eersp;
1132 
1133 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
1134 	eersp = &eep->ee_rxq_state[label];
1135 
1136 	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
1137 
1138 	eersp->eers_rx_read_ptr = 0;
1139 	eersp->eers_rx_mask = 0;
1140 #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
1141 	eersp->eers_rx_stream_npackets = 0;
1142 	eersp->eers_rx_packed_stream = B_FALSE;
1143 #endif
1144 #if EFSYS_OPT_RX_PACKED_STREAM
1145 	eersp->eers_rx_packed_stream_credits = 0;
1146 #endif
1147 }
1148 
1149 #endif	/* EFX_OPTS_EF10() */
1150