xref: /dpdk/drivers/common/sfc_efx/base/ef10_ev.c (revision f8a60f76b3c611d42d7883845fff7567de26df1d)
15e111ed8SAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
25e111ed8SAndrew Rybchenko  *
35e111ed8SAndrew Rybchenko  * Copyright(c) 2019-2020 Xilinx, Inc.
45e111ed8SAndrew Rybchenko  * Copyright(c) 2012-2019 Solarflare Communications Inc.
55e111ed8SAndrew Rybchenko  */
65e111ed8SAndrew Rybchenko 
75e111ed8SAndrew Rybchenko #include "efx.h"
85e111ed8SAndrew Rybchenko #include "efx_impl.h"
95e111ed8SAndrew Rybchenko #if EFSYS_OPT_MON_STATS
105e111ed8SAndrew Rybchenko #include "mcdi_mon.h"
115e111ed8SAndrew Rybchenko #endif
125e111ed8SAndrew Rybchenko 
135e111ed8SAndrew Rybchenko #if EFX_OPTS_EF10()
145e111ed8SAndrew Rybchenko 
155e111ed8SAndrew Rybchenko /*
165e111ed8SAndrew Rybchenko  * Non-interrupting event queue requires interrrupting event queue to
175e111ed8SAndrew Rybchenko  * refer to for wake-up events even if wake ups are never used.
185e111ed8SAndrew Rybchenko  * It could be even non-allocated event queue.
195e111ed8SAndrew Rybchenko  */
205e111ed8SAndrew Rybchenko #define	EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX	(0)
215e111ed8SAndrew Rybchenko 
225e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
235e111ed8SAndrew Rybchenko ef10_ev_rx(
245e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
255e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
265e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
275e111ed8SAndrew Rybchenko 	__in_opt	void *arg);
285e111ed8SAndrew Rybchenko 
295e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
305e111ed8SAndrew Rybchenko ef10_ev_tx(
315e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
325e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
335e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
345e111ed8SAndrew Rybchenko 	__in_opt	void *arg);
355e111ed8SAndrew Rybchenko 
365e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
375e111ed8SAndrew Rybchenko ef10_ev_driver(
385e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
395e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
405e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
415e111ed8SAndrew Rybchenko 	__in_opt	void *arg);
425e111ed8SAndrew Rybchenko 
435e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
445e111ed8SAndrew Rybchenko ef10_ev_drv_gen(
455e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
465e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
475e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
485e111ed8SAndrew Rybchenko 	__in_opt	void *arg);
495e111ed8SAndrew Rybchenko 
505e111ed8SAndrew Rybchenko 
515e111ed8SAndrew Rybchenko static	__checkReturn	efx_rc_t
525e111ed8SAndrew Rybchenko efx_mcdi_set_evq_tmr(
535e111ed8SAndrew Rybchenko 	__in		efx_nic_t *enp,
545e111ed8SAndrew Rybchenko 	__in		uint32_t instance,
555e111ed8SAndrew Rybchenko 	__in		uint32_t mode,
565e111ed8SAndrew Rybchenko 	__in		uint32_t timer_ns)
575e111ed8SAndrew Rybchenko {
585e111ed8SAndrew Rybchenko 	efx_mcdi_req_t req;
595e111ed8SAndrew Rybchenko 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
605e111ed8SAndrew Rybchenko 		MC_CMD_SET_EVQ_TMR_OUT_LEN);
615e111ed8SAndrew Rybchenko 	efx_rc_t rc;
625e111ed8SAndrew Rybchenko 
635e111ed8SAndrew Rybchenko 	req.emr_cmd = MC_CMD_SET_EVQ_TMR;
645e111ed8SAndrew Rybchenko 	req.emr_in_buf = payload;
655e111ed8SAndrew Rybchenko 	req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
665e111ed8SAndrew Rybchenko 	req.emr_out_buf = payload;
675e111ed8SAndrew Rybchenko 	req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
685e111ed8SAndrew Rybchenko 
695e111ed8SAndrew Rybchenko 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
705e111ed8SAndrew Rybchenko 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
715e111ed8SAndrew Rybchenko 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
725e111ed8SAndrew Rybchenko 	MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
735e111ed8SAndrew Rybchenko 
745e111ed8SAndrew Rybchenko 	efx_mcdi_execute(enp, &req);
755e111ed8SAndrew Rybchenko 
765e111ed8SAndrew Rybchenko 	if (req.emr_rc != 0) {
775e111ed8SAndrew Rybchenko 		rc = req.emr_rc;
785e111ed8SAndrew Rybchenko 		goto fail1;
795e111ed8SAndrew Rybchenko 	}
805e111ed8SAndrew Rybchenko 
815e111ed8SAndrew Rybchenko 	if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
825e111ed8SAndrew Rybchenko 		rc = EMSGSIZE;
835e111ed8SAndrew Rybchenko 		goto fail2;
845e111ed8SAndrew Rybchenko 	}
855e111ed8SAndrew Rybchenko 
865e111ed8SAndrew Rybchenko 	return (0);
875e111ed8SAndrew Rybchenko 
885e111ed8SAndrew Rybchenko fail2:
895e111ed8SAndrew Rybchenko 	EFSYS_PROBE(fail2);
905e111ed8SAndrew Rybchenko fail1:
915e111ed8SAndrew Rybchenko 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
925e111ed8SAndrew Rybchenko 
935e111ed8SAndrew Rybchenko 	return (rc);
945e111ed8SAndrew Rybchenko }
955e111ed8SAndrew Rybchenko 
965e111ed8SAndrew Rybchenko 
975e111ed8SAndrew Rybchenko 	__checkReturn	efx_rc_t
985e111ed8SAndrew Rybchenko ef10_ev_init(
995e111ed8SAndrew Rybchenko 	__in		efx_nic_t *enp)
1005e111ed8SAndrew Rybchenko {
1015e111ed8SAndrew Rybchenko 	_NOTE(ARGUNUSED(enp))
1025e111ed8SAndrew Rybchenko 	return (0);
1035e111ed8SAndrew Rybchenko }
1045e111ed8SAndrew Rybchenko 
1055e111ed8SAndrew Rybchenko 			void
1065e111ed8SAndrew Rybchenko ef10_ev_fini(
1075e111ed8SAndrew Rybchenko 	__in		efx_nic_t *enp)
1085e111ed8SAndrew Rybchenko {
1095e111ed8SAndrew Rybchenko 	_NOTE(ARGUNUSED(enp))
1105e111ed8SAndrew Rybchenko }
1115e111ed8SAndrew Rybchenko 
1125e111ed8SAndrew Rybchenko 	__checkReturn	efx_rc_t
1135e111ed8SAndrew Rybchenko ef10_ev_qcreate(
1145e111ed8SAndrew Rybchenko 	__in		efx_nic_t *enp,
1155e111ed8SAndrew Rybchenko 	__in		unsigned int index,
1165e111ed8SAndrew Rybchenko 	__in		efsys_mem_t *esmp,
1175e111ed8SAndrew Rybchenko 	__in		size_t ndescs,
1185e111ed8SAndrew Rybchenko 	__in		uint32_t id,
1195e111ed8SAndrew Rybchenko 	__in		uint32_t us,
1205e111ed8SAndrew Rybchenko 	__in		uint32_t flags,
1215e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep)
1225e111ed8SAndrew Rybchenko {
1235e111ed8SAndrew Rybchenko 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1245e111ed8SAndrew Rybchenko 	uint32_t irq;
1255e111ed8SAndrew Rybchenko 	efx_rc_t rc;
1268aad1149SAndrew Rybchenko 	boolean_t low_latency;
1275e111ed8SAndrew Rybchenko 
1285e111ed8SAndrew Rybchenko 	_NOTE(ARGUNUSED(id))	/* buftbl id managed by MC */
1295e111ed8SAndrew Rybchenko 
130*f8a60f76SAndy Moreton 	EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
131*f8a60f76SAndy Moreton 
1325e111ed8SAndrew Rybchenko 	/*
1335e111ed8SAndrew Rybchenko 	 * NO_CONT_EV mode is only requested from the firmware when creating
1345e111ed8SAndrew Rybchenko 	 * receive queues, but here it needs to be specified at event queue
1355e111ed8SAndrew Rybchenko 	 * creation, as the event handler needs to know which format is in use.
1365e111ed8SAndrew Rybchenko 	 *
1375e111ed8SAndrew Rybchenko 	 * If EFX_EVQ_FLAGS_NO_CONT_EV is specified, all receive queues for this
1385e111ed8SAndrew Rybchenko 	 * event queue will be created in NO_CONT_EV mode.
1395e111ed8SAndrew Rybchenko 	 *
1405e111ed8SAndrew Rybchenko 	 * See SF-109306-TC 5.11 "Events for RXQs in NO_CONT_EV mode".
1415e111ed8SAndrew Rybchenko 	 */
1425e111ed8SAndrew Rybchenko 	if (flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
1435e111ed8SAndrew Rybchenko 		if (enp->en_nic_cfg.enc_no_cont_ev_mode_supported == B_FALSE) {
1445e111ed8SAndrew Rybchenko 			rc = EINVAL;
1452e5819a5SAndrew Rybchenko 			goto fail1;
1465e111ed8SAndrew Rybchenko 		}
1475e111ed8SAndrew Rybchenko 	}
1485e111ed8SAndrew Rybchenko 
1495e111ed8SAndrew Rybchenko 	/* Set up the handler table */
1505e111ed8SAndrew Rybchenko 	eep->ee_rx	= ef10_ev_rx;
1515e111ed8SAndrew Rybchenko 	eep->ee_tx	= ef10_ev_tx;
1525e111ed8SAndrew Rybchenko 	eep->ee_driver	= ef10_ev_driver;
1535e111ed8SAndrew Rybchenko 	eep->ee_drv_gen	= ef10_ev_drv_gen;
1545e111ed8SAndrew Rybchenko 	eep->ee_mcdi	= ef10_ev_mcdi;
1555e111ed8SAndrew Rybchenko 
1565e111ed8SAndrew Rybchenko 	/* Set up the event queue */
1575e111ed8SAndrew Rybchenko 	/* INIT_EVQ expects function-relative vector number */
1585e111ed8SAndrew Rybchenko 	if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
1595e111ed8SAndrew Rybchenko 	    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
1605e111ed8SAndrew Rybchenko 		irq = index;
1615e111ed8SAndrew Rybchenko 	} else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
1625e111ed8SAndrew Rybchenko 		irq = index;
1635e111ed8SAndrew Rybchenko 		flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
1645e111ed8SAndrew Rybchenko 		    EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
1655e111ed8SAndrew Rybchenko 	} else {
1665e111ed8SAndrew Rybchenko 		irq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
1675e111ed8SAndrew Rybchenko 	}
1685e111ed8SAndrew Rybchenko 
1695e111ed8SAndrew Rybchenko 	/*
1705e111ed8SAndrew Rybchenko 	 * Interrupts may be raised for events immediately after the queue is
1715e111ed8SAndrew Rybchenko 	 * created. See bug58606.
1725e111ed8SAndrew Rybchenko 	 */
1735e111ed8SAndrew Rybchenko 
1745e111ed8SAndrew Rybchenko 	/*
1755e111ed8SAndrew Rybchenko 	 * On Huntington we need to specify the settings to use.
1765e111ed8SAndrew Rybchenko 	 * If event queue type in flags is auto, we favour throughput
1775e111ed8SAndrew Rybchenko 	 * if the adapter is running virtualization supporting firmware
1785e111ed8SAndrew Rybchenko 	 * (i.e. the full featured firmware variant)
1795e111ed8SAndrew Rybchenko 	 * and latency otherwise. The Ethernet Virtual Bridging
1805e111ed8SAndrew Rybchenko 	 * capability is used to make this decision. (Note though that
1815e111ed8SAndrew Rybchenko 	 * the low latency firmware variant is also best for
1825e111ed8SAndrew Rybchenko 	 * throughput and corresponding type should be specified
1835e111ed8SAndrew Rybchenko 	 * to choose it.)
1848aad1149SAndrew Rybchenko 	 *
1858aad1149SAndrew Rybchenko 	 * If FW supports EvQ types (e.g. on Medford and Medford2) the
1868aad1149SAndrew Rybchenko 	 * type which is specified in flags is passed to FW to make the
1878aad1149SAndrew Rybchenko 	 * decision and low_latency hint is ignored.
1885e111ed8SAndrew Rybchenko 	 */
1898aad1149SAndrew Rybchenko 	low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
1905e111ed8SAndrew Rybchenko 	rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, us, flags,
1915e111ed8SAndrew Rybchenko 	    low_latency);
1925e111ed8SAndrew Rybchenko 	if (rc != 0)
1938aad1149SAndrew Rybchenko 		goto fail2;
1945e111ed8SAndrew Rybchenko 
1955e111ed8SAndrew Rybchenko 	return (0);
1965e111ed8SAndrew Rybchenko 
1975e111ed8SAndrew Rybchenko fail2:
1985e111ed8SAndrew Rybchenko 	EFSYS_PROBE(fail2);
1995e111ed8SAndrew Rybchenko fail1:
2005e111ed8SAndrew Rybchenko 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2015e111ed8SAndrew Rybchenko 
2025e111ed8SAndrew Rybchenko 	return (rc);
2035e111ed8SAndrew Rybchenko }
2045e111ed8SAndrew Rybchenko 
2055e111ed8SAndrew Rybchenko 			void
2065e111ed8SAndrew Rybchenko ef10_ev_qdestroy(
2075e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep)
2085e111ed8SAndrew Rybchenko {
2095e111ed8SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
2105e111ed8SAndrew Rybchenko 
2115e111ed8SAndrew Rybchenko 	EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2125e111ed8SAndrew Rybchenko 
2135e111ed8SAndrew Rybchenko 	(void) efx_mcdi_fini_evq(enp, eep->ee_index);
2145e111ed8SAndrew Rybchenko }
2155e111ed8SAndrew Rybchenko 
2165e111ed8SAndrew Rybchenko 	__checkReturn	efx_rc_t
2175e111ed8SAndrew Rybchenko ef10_ev_qprime(
2185e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
2195e111ed8SAndrew Rybchenko 	__in		unsigned int count)
2205e111ed8SAndrew Rybchenko {
2215e111ed8SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
2225e111ed8SAndrew Rybchenko 	uint32_t rptr;
2235e111ed8SAndrew Rybchenko 	efx_dword_t dword;
2245e111ed8SAndrew Rybchenko 
2255e111ed8SAndrew Rybchenko 	rptr = count & eep->ee_mask;
2265e111ed8SAndrew Rybchenko 
2275e111ed8SAndrew Rybchenko 	if (enp->en_nic_cfg.enc_bug35388_workaround) {
2285e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(EF10_EVQ_MINNEVS >
2295e111ed8SAndrew Rybchenko 		    (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
2305e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(EF10_EVQ_MAXNEVS <
2315e111ed8SAndrew Rybchenko 		    (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
2325e111ed8SAndrew Rybchenko 
2335e111ed8SAndrew Rybchenko 		EFX_POPULATE_DWORD_2(dword,
2345e111ed8SAndrew Rybchenko 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
2355e111ed8SAndrew Rybchenko 		    EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2365e111ed8SAndrew Rybchenko 		    ERF_DD_EVQ_IND_RPTR,
2375e111ed8SAndrew Rybchenko 		    (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
2385e111ed8SAndrew Rybchenko 		EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
2395e111ed8SAndrew Rybchenko 		    &dword, B_FALSE);
2405e111ed8SAndrew Rybchenko 
2415e111ed8SAndrew Rybchenko 		EFX_POPULATE_DWORD_2(dword,
2425e111ed8SAndrew Rybchenko 		    ERF_DD_EVQ_IND_RPTR_FLAGS,
2435e111ed8SAndrew Rybchenko 		    EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2445e111ed8SAndrew Rybchenko 		    ERF_DD_EVQ_IND_RPTR,
2455e111ed8SAndrew Rybchenko 		    rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2465e111ed8SAndrew Rybchenko 		EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
2475e111ed8SAndrew Rybchenko 		    &dword, B_FALSE);
2485e111ed8SAndrew Rybchenko 	} else {
2495e111ed8SAndrew Rybchenko 		EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
2505e111ed8SAndrew Rybchenko 		EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
2515e111ed8SAndrew Rybchenko 		    &dword, B_FALSE);
2525e111ed8SAndrew Rybchenko 	}
2535e111ed8SAndrew Rybchenko 
2545e111ed8SAndrew Rybchenko 	return (0);
2555e111ed8SAndrew Rybchenko }
2565e111ed8SAndrew Rybchenko 
2575e111ed8SAndrew Rybchenko static	__checkReturn	efx_rc_t
2585e111ed8SAndrew Rybchenko efx_mcdi_driver_event(
2595e111ed8SAndrew Rybchenko 	__in		efx_nic_t *enp,
2605e111ed8SAndrew Rybchenko 	__in		uint32_t evq,
2615e111ed8SAndrew Rybchenko 	__in		efx_qword_t data)
2625e111ed8SAndrew Rybchenko {
2635e111ed8SAndrew Rybchenko 	efx_mcdi_req_t req;
2645e111ed8SAndrew Rybchenko 	EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
2655e111ed8SAndrew Rybchenko 		MC_CMD_DRIVER_EVENT_OUT_LEN);
2665e111ed8SAndrew Rybchenko 	efx_rc_t rc;
2675e111ed8SAndrew Rybchenko 
2685e111ed8SAndrew Rybchenko 	req.emr_cmd = MC_CMD_DRIVER_EVENT;
2695e111ed8SAndrew Rybchenko 	req.emr_in_buf = payload;
2705e111ed8SAndrew Rybchenko 	req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
2715e111ed8SAndrew Rybchenko 	req.emr_out_buf = payload;
2725e111ed8SAndrew Rybchenko 	req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
2735e111ed8SAndrew Rybchenko 
2745e111ed8SAndrew Rybchenko 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
2755e111ed8SAndrew Rybchenko 
2765e111ed8SAndrew Rybchenko 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
2775e111ed8SAndrew Rybchenko 	    EFX_QWORD_FIELD(data, EFX_DWORD_0));
2785e111ed8SAndrew Rybchenko 	MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
2795e111ed8SAndrew Rybchenko 	    EFX_QWORD_FIELD(data, EFX_DWORD_1));
2805e111ed8SAndrew Rybchenko 
2815e111ed8SAndrew Rybchenko 	efx_mcdi_execute(enp, &req);
2825e111ed8SAndrew Rybchenko 
2835e111ed8SAndrew Rybchenko 	if (req.emr_rc != 0) {
2845e111ed8SAndrew Rybchenko 		rc = req.emr_rc;
2855e111ed8SAndrew Rybchenko 		goto fail1;
2865e111ed8SAndrew Rybchenko 	}
2875e111ed8SAndrew Rybchenko 
2885e111ed8SAndrew Rybchenko 	return (0);
2895e111ed8SAndrew Rybchenko 
2905e111ed8SAndrew Rybchenko fail1:
2915e111ed8SAndrew Rybchenko 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
2925e111ed8SAndrew Rybchenko 
2935e111ed8SAndrew Rybchenko 	return (rc);
2945e111ed8SAndrew Rybchenko }
2955e111ed8SAndrew Rybchenko 
2965e111ed8SAndrew Rybchenko 			void
2975e111ed8SAndrew Rybchenko ef10_ev_qpost(
2985e111ed8SAndrew Rybchenko 	__in	efx_evq_t *eep,
2995e111ed8SAndrew Rybchenko 	__in	uint16_t data)
3005e111ed8SAndrew Rybchenko {
3015e111ed8SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
3025e111ed8SAndrew Rybchenko 	efx_qword_t event;
3035e111ed8SAndrew Rybchenko 
3045e111ed8SAndrew Rybchenko 	EFX_POPULATE_QWORD_3(event,
3055e111ed8SAndrew Rybchenko 	    ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
3065e111ed8SAndrew Rybchenko 	    ESF_DZ_DRV_SUB_CODE, 0,
3075e111ed8SAndrew Rybchenko 	    ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
3085e111ed8SAndrew Rybchenko 
3095e111ed8SAndrew Rybchenko 	(void) efx_mcdi_driver_event(enp, eep->ee_index, event);
3105e111ed8SAndrew Rybchenko }
3115e111ed8SAndrew Rybchenko 
3125e111ed8SAndrew Rybchenko 	__checkReturn	efx_rc_t
3135e111ed8SAndrew Rybchenko ef10_ev_qmoderate(
3145e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
3155e111ed8SAndrew Rybchenko 	__in		unsigned int us)
3165e111ed8SAndrew Rybchenko {
3175e111ed8SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
3185e111ed8SAndrew Rybchenko 	efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
3195e111ed8SAndrew Rybchenko 	efx_dword_t dword;
3205e111ed8SAndrew Rybchenko 	uint32_t mode;
3215e111ed8SAndrew Rybchenko 	efx_rc_t rc;
3225e111ed8SAndrew Rybchenko 
3235e111ed8SAndrew Rybchenko 	/* Check that hardware and MCDI use the same timer MODE values */
3245e111ed8SAndrew Rybchenko 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
3255e111ed8SAndrew Rybchenko 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
3265e111ed8SAndrew Rybchenko 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
3275e111ed8SAndrew Rybchenko 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
3285e111ed8SAndrew Rybchenko 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
3295e111ed8SAndrew Rybchenko 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
3305e111ed8SAndrew Rybchenko 	EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
3315e111ed8SAndrew Rybchenko 	    MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
3325e111ed8SAndrew Rybchenko 
3335e111ed8SAndrew Rybchenko 	if (us > encp->enc_evq_timer_max_us) {
3345e111ed8SAndrew Rybchenko 		rc = EINVAL;
3355e111ed8SAndrew Rybchenko 		goto fail1;
3365e111ed8SAndrew Rybchenko 	}
3375e111ed8SAndrew Rybchenko 
3385e111ed8SAndrew Rybchenko 	/* If the value is zero then disable the timer */
3395e111ed8SAndrew Rybchenko 	if (us == 0) {
3405e111ed8SAndrew Rybchenko 		mode = FFE_CZ_TIMER_MODE_DIS;
3415e111ed8SAndrew Rybchenko 	} else {
3425e111ed8SAndrew Rybchenko 		mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
3435e111ed8SAndrew Rybchenko 	}
3445e111ed8SAndrew Rybchenko 
3455e111ed8SAndrew Rybchenko 	if (encp->enc_bug61265_workaround) {
3465e111ed8SAndrew Rybchenko 		uint32_t ns = us * 1000;
3475e111ed8SAndrew Rybchenko 
3485e111ed8SAndrew Rybchenko 		rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
3495e111ed8SAndrew Rybchenko 		if (rc != 0)
3505e111ed8SAndrew Rybchenko 			goto fail2;
3515e111ed8SAndrew Rybchenko 	} else {
3525e111ed8SAndrew Rybchenko 		unsigned int ticks;
3535e111ed8SAndrew Rybchenko 
3545e111ed8SAndrew Rybchenko 		if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
3555e111ed8SAndrew Rybchenko 			goto fail3;
3565e111ed8SAndrew Rybchenko 
3575e111ed8SAndrew Rybchenko 		if (encp->enc_bug35388_workaround) {
3585e111ed8SAndrew Rybchenko 			EFX_POPULATE_DWORD_3(dword,
3595e111ed8SAndrew Rybchenko 			    ERF_DD_EVQ_IND_TIMER_FLAGS,
3605e111ed8SAndrew Rybchenko 			    EFE_DD_EVQ_IND_TIMER_FLAGS,
3615e111ed8SAndrew Rybchenko 			    ERF_DD_EVQ_IND_TIMER_MODE, mode,
3625e111ed8SAndrew Rybchenko 			    ERF_DD_EVQ_IND_TIMER_VAL, ticks);
3635e111ed8SAndrew Rybchenko 			EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
3645e111ed8SAndrew Rybchenko 			    eep->ee_index, &dword, 0);
3655e111ed8SAndrew Rybchenko 		} else {
3665e111ed8SAndrew Rybchenko 			/*
3675e111ed8SAndrew Rybchenko 			 * NOTE: The TMR_REL field introduced in Medford2 is
3685e111ed8SAndrew Rybchenko 			 * ignored on earlier EF10 controllers. See bug66418
3695e111ed8SAndrew Rybchenko 			 * comment 9 for details.
3705e111ed8SAndrew Rybchenko 			 */
3715e111ed8SAndrew Rybchenko 			EFX_POPULATE_DWORD_3(dword,
3725e111ed8SAndrew Rybchenko 			    ERF_DZ_TC_TIMER_MODE, mode,
3735e111ed8SAndrew Rybchenko 			    ERF_DZ_TC_TIMER_VAL, ticks,
3745e111ed8SAndrew Rybchenko 			    ERF_FZ_TC_TMR_REL_VAL, ticks);
3755e111ed8SAndrew Rybchenko 			EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
3765e111ed8SAndrew Rybchenko 			    eep->ee_index, &dword, 0);
3775e111ed8SAndrew Rybchenko 		}
3785e111ed8SAndrew Rybchenko 	}
3795e111ed8SAndrew Rybchenko 
3805e111ed8SAndrew Rybchenko 	return (0);
3815e111ed8SAndrew Rybchenko 
3825e111ed8SAndrew Rybchenko fail3:
3835e111ed8SAndrew Rybchenko 	EFSYS_PROBE(fail3);
3845e111ed8SAndrew Rybchenko fail2:
3855e111ed8SAndrew Rybchenko 	EFSYS_PROBE(fail2);
3865e111ed8SAndrew Rybchenko fail1:
3875e111ed8SAndrew Rybchenko 	EFSYS_PROBE1(fail1, efx_rc_t, rc);
3885e111ed8SAndrew Rybchenko 
3895e111ed8SAndrew Rybchenko 	return (rc);
3905e111ed8SAndrew Rybchenko }
3915e111ed8SAndrew Rybchenko 
3925e111ed8SAndrew Rybchenko 
3935e111ed8SAndrew Rybchenko #if EFSYS_OPT_QSTATS
3945e111ed8SAndrew Rybchenko 			void
3955e111ed8SAndrew Rybchenko ef10_ev_qstats_update(
3965e111ed8SAndrew Rybchenko 	__in				efx_evq_t *eep,
3975e111ed8SAndrew Rybchenko 	__inout_ecount(EV_NQSTATS)	efsys_stat_t *stat)
3985e111ed8SAndrew Rybchenko {
3995e111ed8SAndrew Rybchenko 	unsigned int id;
4005e111ed8SAndrew Rybchenko 
4015e111ed8SAndrew Rybchenko 	for (id = 0; id < EV_NQSTATS; id++) {
4025e111ed8SAndrew Rybchenko 		efsys_stat_t *essp = &stat[id];
4035e111ed8SAndrew Rybchenko 
4045e111ed8SAndrew Rybchenko 		EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
4055e111ed8SAndrew Rybchenko 		eep->ee_stat[id] = 0;
4065e111ed8SAndrew Rybchenko 	}
4075e111ed8SAndrew Rybchenko }
4085e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_QSTATS */
4095e111ed8SAndrew Rybchenko 
4105e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
4115e111ed8SAndrew Rybchenko 
4125e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
4135e111ed8SAndrew Rybchenko ef10_ev_rx_packed_stream(
4145e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
4155e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
4165e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
4175e111ed8SAndrew Rybchenko 	__in_opt	void *arg)
4185e111ed8SAndrew Rybchenko {
4195e111ed8SAndrew Rybchenko 	uint32_t label;
4205e111ed8SAndrew Rybchenko 	uint32_t pkt_count_lbits;
4215e111ed8SAndrew Rybchenko 	uint16_t flags;
4225e111ed8SAndrew Rybchenko 	boolean_t should_abort;
4235e111ed8SAndrew Rybchenko 	efx_evq_rxq_state_t *eersp;
4245e111ed8SAndrew Rybchenko 	unsigned int pkt_count;
4255e111ed8SAndrew Rybchenko 	unsigned int current_id;
4265e111ed8SAndrew Rybchenko 	boolean_t new_buffer;
4275e111ed8SAndrew Rybchenko 
4285e111ed8SAndrew Rybchenko 	pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
4295e111ed8SAndrew Rybchenko 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
4305e111ed8SAndrew Rybchenko 	new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
4315e111ed8SAndrew Rybchenko 
4325e111ed8SAndrew Rybchenko 	flags = 0;
4335e111ed8SAndrew Rybchenko 
4345e111ed8SAndrew Rybchenko 	eersp = &eep->ee_rxq_state[label];
4355e111ed8SAndrew Rybchenko 
4365e111ed8SAndrew Rybchenko 	/*
4375e111ed8SAndrew Rybchenko 	 * RX_DSC_PTR_LBITS has least significant bits of the global
4385e111ed8SAndrew Rybchenko 	 * (not per-buffer) packet counter. It is guaranteed that
4395e111ed8SAndrew Rybchenko 	 * maximum number of completed packets fits in lbits-mask.
4405e111ed8SAndrew Rybchenko 	 * So, modulo lbits-mask arithmetic should be used to calculate
4415e111ed8SAndrew Rybchenko 	 * packet counter increment.
4425e111ed8SAndrew Rybchenko 	 */
4435e111ed8SAndrew Rybchenko 	pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
4445e111ed8SAndrew Rybchenko 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
4455e111ed8SAndrew Rybchenko 	eersp->eers_rx_stream_npackets += pkt_count;
4465e111ed8SAndrew Rybchenko 
4475e111ed8SAndrew Rybchenko 	if (new_buffer) {
4485e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
4495e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
4505e111ed8SAndrew Rybchenko 		/*
4515e111ed8SAndrew Rybchenko 		 * If both packed stream and equal stride super-buffer
4525e111ed8SAndrew Rybchenko 		 * modes are compiled in, in theory credits should be
4535e111ed8SAndrew Rybchenko 		 * be maintained for packed stream only, but right now
4545e111ed8SAndrew Rybchenko 		 * these modes are not distinguished in the event queue
4555e111ed8SAndrew Rybchenko 		 * Rx queue state and it is OK to increment the counter
4565e111ed8SAndrew Rybchenko 		 * regardless (it might be event cheaper than branching
4575e111ed8SAndrew Rybchenko 		 * since neighbour structure member are updated as well).
4585e111ed8SAndrew Rybchenko 		 */
4595e111ed8SAndrew Rybchenko 		eersp->eers_rx_packed_stream_credits++;
4605e111ed8SAndrew Rybchenko #endif
4615e111ed8SAndrew Rybchenko 		eersp->eers_rx_read_ptr++;
4625e111ed8SAndrew Rybchenko 	}
4635e111ed8SAndrew Rybchenko 	current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
4645e111ed8SAndrew Rybchenko 
4655e111ed8SAndrew Rybchenko 	/* Check for errors that invalidate checksum and L3/L4 fields */
4665e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
4675e111ed8SAndrew Rybchenko 		/* RX frame truncated */
4685e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
4695e111ed8SAndrew Rybchenko 		flags |= EFX_DISCARD;
4705e111ed8SAndrew Rybchenko 		goto deliver;
4715e111ed8SAndrew Rybchenko 	}
4725e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
4735e111ed8SAndrew Rybchenko 		/* Bad Ethernet frame CRC */
4745e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
4755e111ed8SAndrew Rybchenko 		flags |= EFX_DISCARD;
4765e111ed8SAndrew Rybchenko 		goto deliver;
4775e111ed8SAndrew Rybchenko 	}
4785e111ed8SAndrew Rybchenko 
4795e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
4805e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
4815e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
4825e111ed8SAndrew Rybchenko 		goto deliver;
4835e111ed8SAndrew Rybchenko 	}
4845e111ed8SAndrew Rybchenko 
4855e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
4865e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
4875e111ed8SAndrew Rybchenko 
4885e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
4895e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
4905e111ed8SAndrew Rybchenko 
4915e111ed8SAndrew Rybchenko deliver:
4925e111ed8SAndrew Rybchenko 	/* If we're not discarding the packet then it is ok */
4935e111ed8SAndrew Rybchenko 	if (~flags & EFX_DISCARD)
4945e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
4955e111ed8SAndrew Rybchenko 
4965e111ed8SAndrew Rybchenko 	EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
4975e111ed8SAndrew Rybchenko 	should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
4985e111ed8SAndrew Rybchenko 	    flags);
4995e111ed8SAndrew Rybchenko 
5005e111ed8SAndrew Rybchenko 	return (should_abort);
5015e111ed8SAndrew Rybchenko }
5025e111ed8SAndrew Rybchenko 
5035e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
5045e111ed8SAndrew Rybchenko 
5055e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
5065e111ed8SAndrew Rybchenko ef10_ev_rx(
5075e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
5085e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
5095e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
5105e111ed8SAndrew Rybchenko 	__in_opt	void *arg)
5115e111ed8SAndrew Rybchenko {
5125e111ed8SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
5135e111ed8SAndrew Rybchenko 	uint32_t size;
5145e111ed8SAndrew Rybchenko 	uint32_t label;
5155e111ed8SAndrew Rybchenko 	uint32_t mac_class;
5165e111ed8SAndrew Rybchenko 	uint32_t eth_tag_class;
5175e111ed8SAndrew Rybchenko 	uint32_t l3_class;
5185e111ed8SAndrew Rybchenko 	uint32_t l4_class;
5195e111ed8SAndrew Rybchenko 	uint32_t next_read_lbits;
5205e111ed8SAndrew Rybchenko 	uint16_t flags;
5215e111ed8SAndrew Rybchenko 	boolean_t cont;
5225e111ed8SAndrew Rybchenko 	boolean_t should_abort;
5235e111ed8SAndrew Rybchenko 	efx_evq_rxq_state_t *eersp;
5245e111ed8SAndrew Rybchenko 	unsigned int desc_count;
5255e111ed8SAndrew Rybchenko 	unsigned int last_used_id;
5265e111ed8SAndrew Rybchenko 
5275e111ed8SAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_RX);
5285e111ed8SAndrew Rybchenko 
5295e111ed8SAndrew Rybchenko 	/* Discard events after RXQ/TXQ errors, or hardware not available */
5305e111ed8SAndrew Rybchenko 	if (enp->en_reset_flags &
5315e111ed8SAndrew Rybchenko 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
5325e111ed8SAndrew Rybchenko 		return (B_FALSE);
5335e111ed8SAndrew Rybchenko 
5345e111ed8SAndrew Rybchenko 	/* Basic packet information */
5355e111ed8SAndrew Rybchenko 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
5365e111ed8SAndrew Rybchenko 	eersp = &eep->ee_rxq_state[label];
5375e111ed8SAndrew Rybchenko 
5385e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
5395e111ed8SAndrew Rybchenko 	/*
5405e111ed8SAndrew Rybchenko 	 * Packed stream events are very different,
5415e111ed8SAndrew Rybchenko 	 * so handle them separately
5425e111ed8SAndrew Rybchenko 	 */
5435e111ed8SAndrew Rybchenko 	if (eersp->eers_rx_packed_stream)
5445e111ed8SAndrew Rybchenko 		return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
5455e111ed8SAndrew Rybchenko #endif
5465e111ed8SAndrew Rybchenko 
5475e111ed8SAndrew Rybchenko 	size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
5485e111ed8SAndrew Rybchenko 	cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
5495e111ed8SAndrew Rybchenko 	next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
5505e111ed8SAndrew Rybchenko 	eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
5515e111ed8SAndrew Rybchenko 	mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
5525e111ed8SAndrew Rybchenko 	l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
5535e111ed8SAndrew Rybchenko 
5545e111ed8SAndrew Rybchenko 	/*
5555e111ed8SAndrew Rybchenko 	 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
5565e111ed8SAndrew Rybchenko 	 * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
5575e111ed8SAndrew Rybchenko 	 * and values for all EF10 controllers.
5585e111ed8SAndrew Rybchenko 	 */
5595e111ed8SAndrew Rybchenko 	EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
5605e111ed8SAndrew Rybchenko 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
5615e111ed8SAndrew Rybchenko 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
5625e111ed8SAndrew Rybchenko 	EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
5635e111ed8SAndrew Rybchenko 
5645e111ed8SAndrew Rybchenko 	l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
5655e111ed8SAndrew Rybchenko 
5665e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
5675e111ed8SAndrew Rybchenko 		/* Drop this event */
5685e111ed8SAndrew Rybchenko 		return (B_FALSE);
5695e111ed8SAndrew Rybchenko 	}
5705e111ed8SAndrew Rybchenko 	flags = 0;
5715e111ed8SAndrew Rybchenko 
5725e111ed8SAndrew Rybchenko 	if (cont != 0) {
5735e111ed8SAndrew Rybchenko 		/*
5745e111ed8SAndrew Rybchenko 		 * This may be part of a scattered frame, or it may be a
5755e111ed8SAndrew Rybchenko 		 * truncated frame if scatter is disabled on this RXQ.
5765e111ed8SAndrew Rybchenko 		 * Overlength frames can be received if e.g. a VF is configured
5775e111ed8SAndrew Rybchenko 		 * for 1500 MTU but connected to a port set to 9000 MTU
5785e111ed8SAndrew Rybchenko 		 * (see bug56567).
5795e111ed8SAndrew Rybchenko 		 * FIXME: There is not yet any driver that supports scatter on
5805e111ed8SAndrew Rybchenko 		 * Huntington.  Scatter support is required for OSX.
5815e111ed8SAndrew Rybchenko 		 */
5825e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_CONT;
5835e111ed8SAndrew Rybchenko 	}
5845e111ed8SAndrew Rybchenko 
5855e111ed8SAndrew Rybchenko 	if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
5865e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_UNICAST;
5875e111ed8SAndrew Rybchenko 
5885e111ed8SAndrew Rybchenko 	/*
5895e111ed8SAndrew Rybchenko 	 * Increment the count of descriptors read.
5905e111ed8SAndrew Rybchenko 	 *
5915e111ed8SAndrew Rybchenko 	 * In NO_CONT_EV mode, RX_DSC_PTR_LBITS is actually a packet count, but
5925e111ed8SAndrew Rybchenko 	 * when scatter is disabled, there is only one descriptor per packet and
5935e111ed8SAndrew Rybchenko 	 * so it can be treated the same.
5945e111ed8SAndrew Rybchenko 	 *
5955e111ed8SAndrew Rybchenko 	 * TODO: Support scatter in NO_CONT_EV mode.
5965e111ed8SAndrew Rybchenko 	 */
5975e111ed8SAndrew Rybchenko 	desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
5985e111ed8SAndrew Rybchenko 	    EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
5995e111ed8SAndrew Rybchenko 	eersp->eers_rx_read_ptr += desc_count;
6005e111ed8SAndrew Rybchenko 
6015e111ed8SAndrew Rybchenko 	/* Calculate the index of the last descriptor consumed */
6025e111ed8SAndrew Rybchenko 	last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
6035e111ed8SAndrew Rybchenko 
6045e111ed8SAndrew Rybchenko 	if (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
6055e111ed8SAndrew Rybchenko 		if (desc_count > 1)
6065e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
6075e111ed8SAndrew Rybchenko 
6085e111ed8SAndrew Rybchenko 		/* Always read the length from the prefix in NO_CONT_EV mode. */
6095e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_PREFIX_LEN;
6105e111ed8SAndrew Rybchenko 
6115e111ed8SAndrew Rybchenko 		/*
6125e111ed8SAndrew Rybchenko 		 * Check for an aborted scatter, signalled by the ABORT bit in
6135e111ed8SAndrew Rybchenko 		 * NO_CONT_EV mode. The ABORT bit was not used before NO_CONT_EV
6145e111ed8SAndrew Rybchenko 		 * mode was added as it was broken in Huntington silicon.
6155e111ed8SAndrew Rybchenko 		 */
6165e111ed8SAndrew Rybchenko 		if (EFX_QWORD_FIELD(*eqp, ESF_EZ_RX_ABORT) != 0) {
6175e111ed8SAndrew Rybchenko 			flags |= EFX_DISCARD;
6185e111ed8SAndrew Rybchenko 			goto deliver;
6195e111ed8SAndrew Rybchenko 		}
6205e111ed8SAndrew Rybchenko 	} else if (desc_count > 1) {
6215e111ed8SAndrew Rybchenko 		/*
6225e111ed8SAndrew Rybchenko 		 * FIXME: add error checking to make sure this a batched event.
6235e111ed8SAndrew Rybchenko 		 * This could also be an aborted scatter, see Bug36629.
6245e111ed8SAndrew Rybchenko 		 */
6255e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
6265e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_PREFIX_LEN;
6275e111ed8SAndrew Rybchenko 	}
6285e111ed8SAndrew Rybchenko 
6295e111ed8SAndrew Rybchenko 	/* Check for errors that invalidate checksum and L3/L4 fields */
6305e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
6315e111ed8SAndrew Rybchenko 		/* RX frame truncated */
6325e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
6335e111ed8SAndrew Rybchenko 		flags |= EFX_DISCARD;
6345e111ed8SAndrew Rybchenko 		goto deliver;
6355e111ed8SAndrew Rybchenko 	}
6365e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
6375e111ed8SAndrew Rybchenko 		/* Bad Ethernet frame CRC */
6385e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
6395e111ed8SAndrew Rybchenko 		flags |= EFX_DISCARD;
6405e111ed8SAndrew Rybchenko 		goto deliver;
6415e111ed8SAndrew Rybchenko 	}
6425e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
6435e111ed8SAndrew Rybchenko 		/*
6445e111ed8SAndrew Rybchenko 		 * Hardware parse failed, due to malformed headers
6455e111ed8SAndrew Rybchenko 		 * or headers that are too long for the parser.
6465e111ed8SAndrew Rybchenko 		 * Headers and checksums must be validated by the host.
6475e111ed8SAndrew Rybchenko 		 */
6485e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
6495e111ed8SAndrew Rybchenko 		goto deliver;
6505e111ed8SAndrew Rybchenko 	}
6515e111ed8SAndrew Rybchenko 
6525e111ed8SAndrew Rybchenko 	if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
6535e111ed8SAndrew Rybchenko 	    (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
6545e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_VLAN_TAGGED;
6555e111ed8SAndrew Rybchenko 	}
6565e111ed8SAndrew Rybchenko 
6575e111ed8SAndrew Rybchenko 	switch (l3_class) {
6585e111ed8SAndrew Rybchenko 	case ESE_DZ_L3_CLASS_IP4:
6595e111ed8SAndrew Rybchenko 	case ESE_DZ_L3_CLASS_IP4_FRAG:
6605e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_IPV4;
6615e111ed8SAndrew Rybchenko 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
6625e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
6635e111ed8SAndrew Rybchenko 		} else {
6645e111ed8SAndrew Rybchenko 			flags |= EFX_CKSUM_IPV4;
6655e111ed8SAndrew Rybchenko 		}
6665e111ed8SAndrew Rybchenko 
6675e111ed8SAndrew Rybchenko 		/*
6685e111ed8SAndrew Rybchenko 		 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
6695e111ed8SAndrew Rybchenko 		 * only 2 bits wide on Medford2. Check it is safe to use the
6705e111ed8SAndrew Rybchenko 		 * Medford2 field and values for all EF10 controllers.
6715e111ed8SAndrew Rybchenko 		 */
6725e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
6735e111ed8SAndrew Rybchenko 		    ESF_DE_RX_L4_CLASS_LBN);
6745e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
6755e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
6765e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
6775e111ed8SAndrew Rybchenko 		    ESE_DE_L4_CLASS_UNKNOWN);
6785e111ed8SAndrew Rybchenko 
6795e111ed8SAndrew Rybchenko 		if (l4_class == ESE_FZ_L4_CLASS_TCP) {
6805e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
6815e111ed8SAndrew Rybchenko 			flags |= EFX_PKT_TCP;
6825e111ed8SAndrew Rybchenko 		} else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
6835e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
6845e111ed8SAndrew Rybchenko 			flags |= EFX_PKT_UDP;
6855e111ed8SAndrew Rybchenko 		} else {
6865e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
6875e111ed8SAndrew Rybchenko 		}
6885e111ed8SAndrew Rybchenko 		break;
6895e111ed8SAndrew Rybchenko 
6905e111ed8SAndrew Rybchenko 	case ESE_DZ_L3_CLASS_IP6:
6915e111ed8SAndrew Rybchenko 	case ESE_DZ_L3_CLASS_IP6_FRAG:
6925e111ed8SAndrew Rybchenko 		flags |= EFX_PKT_IPV6;
6935e111ed8SAndrew Rybchenko 
6945e111ed8SAndrew Rybchenko 		/*
6955e111ed8SAndrew Rybchenko 		 * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
6965e111ed8SAndrew Rybchenko 		 * only 2 bits wide on Medford2. Check it is safe to use the
6975e111ed8SAndrew Rybchenko 		 * Medford2 field and values for all EF10 controllers.
6985e111ed8SAndrew Rybchenko 		 */
6995e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
7005e111ed8SAndrew Rybchenko 		    ESF_DE_RX_L4_CLASS_LBN);
7015e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
7025e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
7035e111ed8SAndrew Rybchenko 		EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
7045e111ed8SAndrew Rybchenko 		    ESE_DE_L4_CLASS_UNKNOWN);
7055e111ed8SAndrew Rybchenko 
7065e111ed8SAndrew Rybchenko 		if (l4_class == ESE_FZ_L4_CLASS_TCP) {
7075e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
7085e111ed8SAndrew Rybchenko 			flags |= EFX_PKT_TCP;
7095e111ed8SAndrew Rybchenko 		} else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
7105e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
7115e111ed8SAndrew Rybchenko 			flags |= EFX_PKT_UDP;
7125e111ed8SAndrew Rybchenko 		} else {
7135e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
7145e111ed8SAndrew Rybchenko 		}
7155e111ed8SAndrew Rybchenko 		break;
7165e111ed8SAndrew Rybchenko 
7175e111ed8SAndrew Rybchenko 	default:
7185e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
7195e111ed8SAndrew Rybchenko 		break;
7205e111ed8SAndrew Rybchenko 	}
7215e111ed8SAndrew Rybchenko 
7225e111ed8SAndrew Rybchenko 	if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
7235e111ed8SAndrew Rybchenko 		if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
7245e111ed8SAndrew Rybchenko 			EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
7255e111ed8SAndrew Rybchenko 		} else {
7265e111ed8SAndrew Rybchenko 			flags |= EFX_CKSUM_TCPUDP;
7275e111ed8SAndrew Rybchenko 		}
7285e111ed8SAndrew Rybchenko 	}
7295e111ed8SAndrew Rybchenko 
7305e111ed8SAndrew Rybchenko deliver:
7315e111ed8SAndrew Rybchenko 	/* If we're not discarding the packet then it is ok */
7325e111ed8SAndrew Rybchenko 	if (~flags & EFX_DISCARD)
7335e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
7345e111ed8SAndrew Rybchenko 
7355e111ed8SAndrew Rybchenko 	EFSYS_ASSERT(eecp->eec_rx != NULL);
7365e111ed8SAndrew Rybchenko 	should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
7375e111ed8SAndrew Rybchenko 
7385e111ed8SAndrew Rybchenko 	return (should_abort);
7395e111ed8SAndrew Rybchenko }
7405e111ed8SAndrew Rybchenko 
7415e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
7425e111ed8SAndrew Rybchenko ef10_ev_tx(
7435e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
7445e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
7455e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
7465e111ed8SAndrew Rybchenko 	__in_opt	void *arg)
7475e111ed8SAndrew Rybchenko {
7485e111ed8SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
7495e111ed8SAndrew Rybchenko 	uint32_t id;
7505e111ed8SAndrew Rybchenko 	uint32_t label;
7515e111ed8SAndrew Rybchenko 	boolean_t should_abort;
7525e111ed8SAndrew Rybchenko 
7535e111ed8SAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_TX);
7545e111ed8SAndrew Rybchenko 
7555e111ed8SAndrew Rybchenko 	/* Discard events after RXQ/TXQ errors, or hardware not available */
7565e111ed8SAndrew Rybchenko 	if (enp->en_reset_flags &
7575e111ed8SAndrew Rybchenko 	    (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
7585e111ed8SAndrew Rybchenko 		return (B_FALSE);
7595e111ed8SAndrew Rybchenko 
7605e111ed8SAndrew Rybchenko 	if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
7615e111ed8SAndrew Rybchenko 		/* Drop this event */
7625e111ed8SAndrew Rybchenko 		return (B_FALSE);
7635e111ed8SAndrew Rybchenko 	}
7645e111ed8SAndrew Rybchenko 
7655e111ed8SAndrew Rybchenko 	/* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
7665e111ed8SAndrew Rybchenko 	id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
7675e111ed8SAndrew Rybchenko 	label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
7685e111ed8SAndrew Rybchenko 
7695e111ed8SAndrew Rybchenko 	EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
7705e111ed8SAndrew Rybchenko 
7715e111ed8SAndrew Rybchenko 	EFSYS_ASSERT(eecp->eec_tx != NULL);
7725e111ed8SAndrew Rybchenko 	should_abort = eecp->eec_tx(arg, label, id);
7735e111ed8SAndrew Rybchenko 
7745e111ed8SAndrew Rybchenko 	return (should_abort);
7755e111ed8SAndrew Rybchenko }
7765e111ed8SAndrew Rybchenko 
7775e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
7785e111ed8SAndrew Rybchenko ef10_ev_driver(
7795e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
7805e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
7815e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
7825e111ed8SAndrew Rybchenko 	__in_opt	void *arg)
7835e111ed8SAndrew Rybchenko {
7845e111ed8SAndrew Rybchenko 	unsigned int code;
7855e111ed8SAndrew Rybchenko 	boolean_t should_abort;
7865e111ed8SAndrew Rybchenko 
7875e111ed8SAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
7885e111ed8SAndrew Rybchenko 	should_abort = B_FALSE;
7895e111ed8SAndrew Rybchenko 
7905e111ed8SAndrew Rybchenko 	code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
7915e111ed8SAndrew Rybchenko 	switch (code) {
7925e111ed8SAndrew Rybchenko 	case ESE_DZ_DRV_TIMER_EV: {
7935e111ed8SAndrew Rybchenko 		uint32_t id;
7945e111ed8SAndrew Rybchenko 
7955e111ed8SAndrew Rybchenko 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
7965e111ed8SAndrew Rybchenko 
7975e111ed8SAndrew Rybchenko 		EFSYS_ASSERT(eecp->eec_timer != NULL);
7985e111ed8SAndrew Rybchenko 		should_abort = eecp->eec_timer(arg, id);
7995e111ed8SAndrew Rybchenko 		break;
8005e111ed8SAndrew Rybchenko 	}
8015e111ed8SAndrew Rybchenko 
8025e111ed8SAndrew Rybchenko 	case ESE_DZ_DRV_WAKE_UP_EV: {
8035e111ed8SAndrew Rybchenko 		uint32_t id;
8045e111ed8SAndrew Rybchenko 
8055e111ed8SAndrew Rybchenko 		id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
8065e111ed8SAndrew Rybchenko 
8075e111ed8SAndrew Rybchenko 		EFSYS_ASSERT(eecp->eec_wake_up != NULL);
8085e111ed8SAndrew Rybchenko 		should_abort = eecp->eec_wake_up(arg, id);
8095e111ed8SAndrew Rybchenko 		break;
8105e111ed8SAndrew Rybchenko 	}
8115e111ed8SAndrew Rybchenko 
8125e111ed8SAndrew Rybchenko 	case ESE_DZ_DRV_START_UP_EV:
8135e111ed8SAndrew Rybchenko 		EFSYS_ASSERT(eecp->eec_initialized != NULL);
8145e111ed8SAndrew Rybchenko 		should_abort = eecp->eec_initialized(arg);
8155e111ed8SAndrew Rybchenko 		break;
8165e111ed8SAndrew Rybchenko 
8175e111ed8SAndrew Rybchenko 	default:
8185e111ed8SAndrew Rybchenko 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
8195e111ed8SAndrew Rybchenko 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
8205e111ed8SAndrew Rybchenko 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
8215e111ed8SAndrew Rybchenko 		break;
8225e111ed8SAndrew Rybchenko 	}
8235e111ed8SAndrew Rybchenko 
8245e111ed8SAndrew Rybchenko 	return (should_abort);
8255e111ed8SAndrew Rybchenko }
8265e111ed8SAndrew Rybchenko 
8275e111ed8SAndrew Rybchenko static	__checkReturn	boolean_t
8285e111ed8SAndrew Rybchenko ef10_ev_drv_gen(
8295e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
8305e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
8315e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
8325e111ed8SAndrew Rybchenko 	__in_opt	void *arg)
8335e111ed8SAndrew Rybchenko {
8345e111ed8SAndrew Rybchenko 	uint32_t data;
8355e111ed8SAndrew Rybchenko 	boolean_t should_abort;
8365e111ed8SAndrew Rybchenko 
8375e111ed8SAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
8385e111ed8SAndrew Rybchenko 	should_abort = B_FALSE;
8395e111ed8SAndrew Rybchenko 
8405e111ed8SAndrew Rybchenko 	data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
8415e111ed8SAndrew Rybchenko 	if (data >= ((uint32_t)1 << 16)) {
8425e111ed8SAndrew Rybchenko 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
8435e111ed8SAndrew Rybchenko 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
8445e111ed8SAndrew Rybchenko 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
8455e111ed8SAndrew Rybchenko 
8465e111ed8SAndrew Rybchenko 		return (B_TRUE);
8475e111ed8SAndrew Rybchenko 	}
8485e111ed8SAndrew Rybchenko 
8495e111ed8SAndrew Rybchenko 	EFSYS_ASSERT(eecp->eec_software != NULL);
8505e111ed8SAndrew Rybchenko 	should_abort = eecp->eec_software(arg, (uint16_t)data);
8515e111ed8SAndrew Rybchenko 
8525e111ed8SAndrew Rybchenko 	return (should_abort);
8535e111ed8SAndrew Rybchenko }
8545e111ed8SAndrew Rybchenko 
8559edb8ee3SAndrew Rybchenko #endif	/* EFX_OPTS_EF10() */
8569edb8ee3SAndrew Rybchenko 
8579edb8ee3SAndrew Rybchenko #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
8589edb8ee3SAndrew Rybchenko 
8599edb8ee3SAndrew Rybchenko 	__checkReturn	boolean_t
8605e111ed8SAndrew Rybchenko ef10_ev_mcdi(
8615e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
8625e111ed8SAndrew Rybchenko 	__in		efx_qword_t *eqp,
8635e111ed8SAndrew Rybchenko 	__in		const efx_ev_callbacks_t *eecp,
8645e111ed8SAndrew Rybchenko 	__in_opt	void *arg)
8655e111ed8SAndrew Rybchenko {
8665e111ed8SAndrew Rybchenko 	efx_nic_t *enp = eep->ee_enp;
8675e111ed8SAndrew Rybchenko 	unsigned int code;
8685e111ed8SAndrew Rybchenko 	boolean_t should_abort = B_FALSE;
8695e111ed8SAndrew Rybchenko 
8705e111ed8SAndrew Rybchenko 	EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
8715e111ed8SAndrew Rybchenko 
8725e111ed8SAndrew Rybchenko 	code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
8735e111ed8SAndrew Rybchenko 	switch (code) {
8745e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_BADSSERT:
8755e111ed8SAndrew Rybchenko 		efx_mcdi_ev_death(enp, EINTR);
8765e111ed8SAndrew Rybchenko 		break;
8775e111ed8SAndrew Rybchenko 
8785e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_CMDDONE:
8795e111ed8SAndrew Rybchenko 		efx_mcdi_ev_cpl(enp,
8805e111ed8SAndrew Rybchenko 		    MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
8815e111ed8SAndrew Rybchenko 		    MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
8825e111ed8SAndrew Rybchenko 		    MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
8835e111ed8SAndrew Rybchenko 		break;
8845e111ed8SAndrew Rybchenko 
8855e111ed8SAndrew Rybchenko #if EFSYS_OPT_MCDI_PROXY_AUTH
8865e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_PROXY_RESPONSE:
8875e111ed8SAndrew Rybchenko 		/*
8885e111ed8SAndrew Rybchenko 		 * This event notifies a function that an authorization request
8895e111ed8SAndrew Rybchenko 		 * has been processed. If the request was authorized then the
8905e111ed8SAndrew Rybchenko 		 * function can now re-send the original MCDI request.
8915e111ed8SAndrew Rybchenko 		 * See SF-113652-SW "SR-IOV Proxied Network Access Control".
8925e111ed8SAndrew Rybchenko 		 */
8935e111ed8SAndrew Rybchenko 		efx_mcdi_ev_proxy_response(enp,
8945e111ed8SAndrew Rybchenko 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
8955e111ed8SAndrew Rybchenko 		    MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
8965e111ed8SAndrew Rybchenko 		break;
8975e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
8985e111ed8SAndrew Rybchenko 
8995e111ed8SAndrew Rybchenko #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
9005e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_PROXY_REQUEST:
9015e111ed8SAndrew Rybchenko 		efx_mcdi_ev_proxy_request(enp,
9025e111ed8SAndrew Rybchenko 			MCDI_EV_FIELD(eqp, PROXY_REQUEST_BUFF_INDEX));
9035e111ed8SAndrew Rybchenko 		break;
9045e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
9055e111ed8SAndrew Rybchenko 
9065e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_LINKCHANGE: {
9075e111ed8SAndrew Rybchenko 		efx_link_mode_t link_mode;
9085e111ed8SAndrew Rybchenko 
9095e111ed8SAndrew Rybchenko 		ef10_phy_link_ev(enp, eqp, &link_mode);
9105e111ed8SAndrew Rybchenko 		should_abort = eecp->eec_link_change(arg, link_mode);
9115e111ed8SAndrew Rybchenko 		break;
9125e111ed8SAndrew Rybchenko 	}
9135e111ed8SAndrew Rybchenko 
9145e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_SENSOREVT: {
9155e111ed8SAndrew Rybchenko #if EFSYS_OPT_MON_STATS
9165e111ed8SAndrew Rybchenko 		efx_mon_stat_t id;
9175e111ed8SAndrew Rybchenko 		efx_mon_stat_value_t value;
9185e111ed8SAndrew Rybchenko 		efx_rc_t rc;
9195e111ed8SAndrew Rybchenko 
9205e111ed8SAndrew Rybchenko 		/* Decode monitor stat for MCDI sensor (if supported) */
9215e111ed8SAndrew Rybchenko 		if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
9225e111ed8SAndrew Rybchenko 			/* Report monitor stat change */
9235e111ed8SAndrew Rybchenko 			should_abort = eecp->eec_monitor(arg, id, value);
9245e111ed8SAndrew Rybchenko 		} else if (rc == ENOTSUP) {
9255e111ed8SAndrew Rybchenko 			should_abort = eecp->eec_exception(arg,
9265e111ed8SAndrew Rybchenko 				EFX_EXCEPTION_UNKNOWN_SENSOREVT,
9275e111ed8SAndrew Rybchenko 				MCDI_EV_FIELD(eqp, DATA));
9285e111ed8SAndrew Rybchenko 		} else {
9295e111ed8SAndrew Rybchenko 			EFSYS_ASSERT(rc == ENODEV);	/* Wrong port */
9305e111ed8SAndrew Rybchenko 		}
9315e111ed8SAndrew Rybchenko #endif
9325e111ed8SAndrew Rybchenko 		break;
9335e111ed8SAndrew Rybchenko 	}
9345e111ed8SAndrew Rybchenko 
9355e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_SCHEDERR:
9365e111ed8SAndrew Rybchenko 		/* Informational only */
9375e111ed8SAndrew Rybchenko 		break;
9385e111ed8SAndrew Rybchenko 
9395e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_REBOOT:
9405e111ed8SAndrew Rybchenko 		/* Falcon/Siena only (should not been seen with Huntington). */
9415e111ed8SAndrew Rybchenko 		efx_mcdi_ev_death(enp, EIO);
9425e111ed8SAndrew Rybchenko 		break;
9435e111ed8SAndrew Rybchenko 
9445e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_MC_REBOOT:
9455e111ed8SAndrew Rybchenko 		/* MC_REBOOT event is used for Huntington (EF10) and later. */
9465e111ed8SAndrew Rybchenko 		efx_mcdi_ev_death(enp, EIO);
9475e111ed8SAndrew Rybchenko 		break;
9485e111ed8SAndrew Rybchenko 
9495e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_MAC_STATS_DMA:
9505e111ed8SAndrew Rybchenko #if EFSYS_OPT_MAC_STATS
9515e111ed8SAndrew Rybchenko 		if (eecp->eec_mac_stats != NULL) {
9525e111ed8SAndrew Rybchenko 			eecp->eec_mac_stats(arg,
9535e111ed8SAndrew Rybchenko 			    MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
9545e111ed8SAndrew Rybchenko 		}
9555e111ed8SAndrew Rybchenko #endif
9565e111ed8SAndrew Rybchenko 		break;
9575e111ed8SAndrew Rybchenko 
9585e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_FWALERT: {
9595e111ed8SAndrew Rybchenko 		uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
9605e111ed8SAndrew Rybchenko 
9615e111ed8SAndrew Rybchenko 		if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
9625e111ed8SAndrew Rybchenko 			should_abort = eecp->eec_exception(arg,
9635e111ed8SAndrew Rybchenko 				EFX_EXCEPTION_FWALERT_SRAM,
9645e111ed8SAndrew Rybchenko 				MCDI_EV_FIELD(eqp, FWALERT_DATA));
9655e111ed8SAndrew Rybchenko 		else
9665e111ed8SAndrew Rybchenko 			should_abort = eecp->eec_exception(arg,
9675e111ed8SAndrew Rybchenko 				EFX_EXCEPTION_UNKNOWN_FWALERT,
9685e111ed8SAndrew Rybchenko 				MCDI_EV_FIELD(eqp, DATA));
9695e111ed8SAndrew Rybchenko 		break;
9705e111ed8SAndrew Rybchenko 	}
9715e111ed8SAndrew Rybchenko 
9725e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_TX_ERR: {
9735e111ed8SAndrew Rybchenko 		/*
9745e111ed8SAndrew Rybchenko 		 * After a TXQ error is detected, firmware sends a TX_ERR event.
9755e111ed8SAndrew Rybchenko 		 * This may be followed by TX completions (which we discard),
9765e111ed8SAndrew Rybchenko 		 * and then finally by a TX_FLUSH event. Firmware destroys the
9775e111ed8SAndrew Rybchenko 		 * TXQ automatically after sending the TX_FLUSH event.
9785e111ed8SAndrew Rybchenko 		 */
9795e111ed8SAndrew Rybchenko 		enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
9805e111ed8SAndrew Rybchenko 
9815e111ed8SAndrew Rybchenko 		EFSYS_PROBE2(tx_descq_err,
9825e111ed8SAndrew Rybchenko 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
9835e111ed8SAndrew Rybchenko 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
9845e111ed8SAndrew Rybchenko 
9855e111ed8SAndrew Rybchenko 		/* Inform the driver that a reset is required. */
9865e111ed8SAndrew Rybchenko 		eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
9875e111ed8SAndrew Rybchenko 		    MCDI_EV_FIELD(eqp, TX_ERR_DATA));
9885e111ed8SAndrew Rybchenko 		break;
9895e111ed8SAndrew Rybchenko 	}
9905e111ed8SAndrew Rybchenko 
9915e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_TX_FLUSH: {
9925e111ed8SAndrew Rybchenko 		uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
9935e111ed8SAndrew Rybchenko 
9945e111ed8SAndrew Rybchenko 		/*
9955e111ed8SAndrew Rybchenko 		 * EF10 firmware sends two TX_FLUSH events: one to the txq's
9965e111ed8SAndrew Rybchenko 		 * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
9975e111ed8SAndrew Rybchenko 		 * We want to wait for all completions, so ignore the events
9985e111ed8SAndrew Rybchenko 		 * with TX_FLUSH_TO_DRIVER.
9995e111ed8SAndrew Rybchenko 		 */
10005e111ed8SAndrew Rybchenko 		if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
10015e111ed8SAndrew Rybchenko 			should_abort = B_FALSE;
10025e111ed8SAndrew Rybchenko 			break;
10035e111ed8SAndrew Rybchenko 		}
10045e111ed8SAndrew Rybchenko 
10055e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
10065e111ed8SAndrew Rybchenko 
10075e111ed8SAndrew Rybchenko 		EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
10085e111ed8SAndrew Rybchenko 
10095e111ed8SAndrew Rybchenko 		EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
10105e111ed8SAndrew Rybchenko 		should_abort = eecp->eec_txq_flush_done(arg, txq_index);
10115e111ed8SAndrew Rybchenko 		break;
10125e111ed8SAndrew Rybchenko 	}
10135e111ed8SAndrew Rybchenko 
10145e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_RX_ERR: {
10155e111ed8SAndrew Rybchenko 		/*
10165e111ed8SAndrew Rybchenko 		 * After an RXQ error is detected, firmware sends an RX_ERR
10175e111ed8SAndrew Rybchenko 		 * event. This may be followed by RX events (which we discard),
10185e111ed8SAndrew Rybchenko 		 * and then finally by an RX_FLUSH event. Firmware destroys the
10195e111ed8SAndrew Rybchenko 		 * RXQ automatically after sending the RX_FLUSH event.
10205e111ed8SAndrew Rybchenko 		 */
10215e111ed8SAndrew Rybchenko 		enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
10225e111ed8SAndrew Rybchenko 
10235e111ed8SAndrew Rybchenko 		EFSYS_PROBE2(rx_descq_err,
10245e111ed8SAndrew Rybchenko 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
10255e111ed8SAndrew Rybchenko 			    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
10265e111ed8SAndrew Rybchenko 
10275e111ed8SAndrew Rybchenko 		/* Inform the driver that a reset is required. */
10285e111ed8SAndrew Rybchenko 		eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
10295e111ed8SAndrew Rybchenko 		    MCDI_EV_FIELD(eqp, RX_ERR_DATA));
10305e111ed8SAndrew Rybchenko 		break;
10315e111ed8SAndrew Rybchenko 	}
10325e111ed8SAndrew Rybchenko 
10335e111ed8SAndrew Rybchenko 	case MCDI_EVENT_CODE_RX_FLUSH: {
10345e111ed8SAndrew Rybchenko 		uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
10355e111ed8SAndrew Rybchenko 
10365e111ed8SAndrew Rybchenko 		/*
10375e111ed8SAndrew Rybchenko 		 * EF10 firmware sends two RX_FLUSH events: one to the rxq's
10385e111ed8SAndrew Rybchenko 		 * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
10395e111ed8SAndrew Rybchenko 		 * We want to wait for all completions, so ignore the events
10405e111ed8SAndrew Rybchenko 		 * with RX_FLUSH_TO_DRIVER.
10415e111ed8SAndrew Rybchenko 		 */
10425e111ed8SAndrew Rybchenko 		if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
10435e111ed8SAndrew Rybchenko 			should_abort = B_FALSE;
10445e111ed8SAndrew Rybchenko 			break;
10455e111ed8SAndrew Rybchenko 		}
10465e111ed8SAndrew Rybchenko 
10475e111ed8SAndrew Rybchenko 		EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
10485e111ed8SAndrew Rybchenko 
10495e111ed8SAndrew Rybchenko 		EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
10505e111ed8SAndrew Rybchenko 
10515e111ed8SAndrew Rybchenko 		EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
10525e111ed8SAndrew Rybchenko 		should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
10535e111ed8SAndrew Rybchenko 		break;
10545e111ed8SAndrew Rybchenko 	}
10555e111ed8SAndrew Rybchenko 
10565e111ed8SAndrew Rybchenko 	default:
10575e111ed8SAndrew Rybchenko 		EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
10585e111ed8SAndrew Rybchenko 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
10595e111ed8SAndrew Rybchenko 		    uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
10605e111ed8SAndrew Rybchenko 		break;
10615e111ed8SAndrew Rybchenko 	}
10625e111ed8SAndrew Rybchenko 
10635e111ed8SAndrew Rybchenko 	return (should_abort);
10645e111ed8SAndrew Rybchenko }
10655e111ed8SAndrew Rybchenko 
10669edb8ee3SAndrew Rybchenko #endif	/* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
10679edb8ee3SAndrew Rybchenko 
10689edb8ee3SAndrew Rybchenko #if EFX_OPTS_EF10()
10699edb8ee3SAndrew Rybchenko 
10705e111ed8SAndrew Rybchenko 		void
10715e111ed8SAndrew Rybchenko ef10_ev_rxlabel_init(
10725e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
10735e111ed8SAndrew Rybchenko 	__in		efx_rxq_t *erp,
10745e111ed8SAndrew Rybchenko 	__in		unsigned int label,
10755e111ed8SAndrew Rybchenko 	__in		efx_rxq_type_t type)
10765e111ed8SAndrew Rybchenko {
10775e111ed8SAndrew Rybchenko 	efx_evq_rxq_state_t *eersp;
10785e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
10795e111ed8SAndrew Rybchenko 	boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
10805e111ed8SAndrew Rybchenko 	boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
10815e111ed8SAndrew Rybchenko #endif
10825e111ed8SAndrew Rybchenko 
10835e111ed8SAndrew Rybchenko 	_NOTE(ARGUNUSED(type))
10845e111ed8SAndrew Rybchenko 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
10855e111ed8SAndrew Rybchenko 	eersp = &eep->ee_rxq_state[label];
10865e111ed8SAndrew Rybchenko 
10875e111ed8SAndrew Rybchenko 	EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
10885e111ed8SAndrew Rybchenko 
10895e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
10905e111ed8SAndrew Rybchenko 	/*
10915e111ed8SAndrew Rybchenko 	 * For packed stream modes, the very first event will
10925e111ed8SAndrew Rybchenko 	 * have a new buffer flag set, so it will be incremented,
10935e111ed8SAndrew Rybchenko 	 * yielding the correct pointer. That results in a simpler
10945e111ed8SAndrew Rybchenko 	 * code than trying to detect start-of-the-world condition
10955e111ed8SAndrew Rybchenko 	 * in the event handler.
10965e111ed8SAndrew Rybchenko 	 */
10975e111ed8SAndrew Rybchenko 	eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
10985e111ed8SAndrew Rybchenko #else
10995e111ed8SAndrew Rybchenko 	eersp->eers_rx_read_ptr = 0;
11005e111ed8SAndrew Rybchenko #endif
11015e111ed8SAndrew Rybchenko 	eersp->eers_rx_mask = erp->er_mask;
11025e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
11035e111ed8SAndrew Rybchenko 	eersp->eers_rx_stream_npackets = 0;
11045e111ed8SAndrew Rybchenko 	eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
11055e111ed8SAndrew Rybchenko #endif
11065e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
11075e111ed8SAndrew Rybchenko 	if (packed_stream) {
11085e111ed8SAndrew Rybchenko 		eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
11095e111ed8SAndrew Rybchenko 		    EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
11105e111ed8SAndrew Rybchenko 		    EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
11115e111ed8SAndrew Rybchenko 		EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
11125e111ed8SAndrew Rybchenko 		/*
11135e111ed8SAndrew Rybchenko 		 * A single credit is allocated to the queue when it is started.
11145e111ed8SAndrew Rybchenko 		 * It is immediately spent by the first packet which has NEW
11155e111ed8SAndrew Rybchenko 		 * BUFFER flag set, though, but still we shall take into
11165e111ed8SAndrew Rybchenko 		 * account, as to not wrap around the maximum number of credits
11175e111ed8SAndrew Rybchenko 		 * accidentally
11185e111ed8SAndrew Rybchenko 		 */
11195e111ed8SAndrew Rybchenko 		eersp->eers_rx_packed_stream_credits--;
11205e111ed8SAndrew Rybchenko 		EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
11215e111ed8SAndrew Rybchenko 		    EFX_RX_PACKED_STREAM_MAX_CREDITS);
11225e111ed8SAndrew Rybchenko 	}
11235e111ed8SAndrew Rybchenko #endif
11245e111ed8SAndrew Rybchenko }
11255e111ed8SAndrew Rybchenko 
11265e111ed8SAndrew Rybchenko 		void
11275e111ed8SAndrew Rybchenko ef10_ev_rxlabel_fini(
11285e111ed8SAndrew Rybchenko 	__in		efx_evq_t *eep,
11295e111ed8SAndrew Rybchenko 	__in		unsigned int label)
11305e111ed8SAndrew Rybchenko {
11315e111ed8SAndrew Rybchenko 	efx_evq_rxq_state_t *eersp;
11325e111ed8SAndrew Rybchenko 
11335e111ed8SAndrew Rybchenko 	EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
11345e111ed8SAndrew Rybchenko 	eersp = &eep->ee_rxq_state[label];
11355e111ed8SAndrew Rybchenko 
11365e111ed8SAndrew Rybchenko 	EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
11375e111ed8SAndrew Rybchenko 
11385e111ed8SAndrew Rybchenko 	eersp->eers_rx_read_ptr = 0;
11395e111ed8SAndrew Rybchenko 	eersp->eers_rx_mask = 0;
11405e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
11415e111ed8SAndrew Rybchenko 	eersp->eers_rx_stream_npackets = 0;
11425e111ed8SAndrew Rybchenko 	eersp->eers_rx_packed_stream = B_FALSE;
11435e111ed8SAndrew Rybchenko #endif
11445e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
11455e111ed8SAndrew Rybchenko 	eersp->eers_rx_packed_stream_credits = 0;
11465e111ed8SAndrew Rybchenko #endif
11475e111ed8SAndrew Rybchenko }
11485e111ed8SAndrew Rybchenko 
11495e111ed8SAndrew Rybchenko #endif	/* EFX_OPTS_EF10() */
1150