15e111ed8SAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
25e111ed8SAndrew Rybchenko *
3672386c1SAndrew Rybchenko * Copyright(c) 2019-2021 Xilinx, Inc.
45e111ed8SAndrew Rybchenko * Copyright(c) 2012-2019 Solarflare Communications Inc.
55e111ed8SAndrew Rybchenko */
65e111ed8SAndrew Rybchenko
75e111ed8SAndrew Rybchenko #include "efx.h"
85e111ed8SAndrew Rybchenko #include "efx_impl.h"
95e111ed8SAndrew Rybchenko #if EFSYS_OPT_MON_STATS
105e111ed8SAndrew Rybchenko #include "mcdi_mon.h"
115e111ed8SAndrew Rybchenko #endif
125e111ed8SAndrew Rybchenko
135e111ed8SAndrew Rybchenko #if EFX_OPTS_EF10()
145e111ed8SAndrew Rybchenko
155e111ed8SAndrew Rybchenko /*
165e111ed8SAndrew Rybchenko * Non-interrupting event queue requires interrrupting event queue to
175e111ed8SAndrew Rybchenko * refer to for wake-up events even if wake ups are never used.
185e111ed8SAndrew Rybchenko * It could be even non-allocated event queue.
195e111ed8SAndrew Rybchenko */
205e111ed8SAndrew Rybchenko #define EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX (0)
215e111ed8SAndrew Rybchenko
225e111ed8SAndrew Rybchenko static __checkReturn boolean_t
235e111ed8SAndrew Rybchenko ef10_ev_rx(
245e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
255e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
265e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
275e111ed8SAndrew Rybchenko __in_opt void *arg);
285e111ed8SAndrew Rybchenko
295e111ed8SAndrew Rybchenko static __checkReturn boolean_t
305e111ed8SAndrew Rybchenko ef10_ev_tx(
315e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
325e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
335e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
345e111ed8SAndrew Rybchenko __in_opt void *arg);
355e111ed8SAndrew Rybchenko
365e111ed8SAndrew Rybchenko static __checkReturn boolean_t
375e111ed8SAndrew Rybchenko ef10_ev_driver(
385e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
395e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
405e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
415e111ed8SAndrew Rybchenko __in_opt void *arg);
425e111ed8SAndrew Rybchenko
435e111ed8SAndrew Rybchenko static __checkReturn boolean_t
445e111ed8SAndrew Rybchenko ef10_ev_drv_gen(
455e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
465e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
475e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
485e111ed8SAndrew Rybchenko __in_opt void *arg);
495e111ed8SAndrew Rybchenko
505e111ed8SAndrew Rybchenko
515e111ed8SAndrew Rybchenko static __checkReturn efx_rc_t
efx_mcdi_set_evq_tmr(__in efx_nic_t * enp,__in uint32_t instance,__in uint32_t mode,__in uint32_t timer_ns)525e111ed8SAndrew Rybchenko efx_mcdi_set_evq_tmr(
535e111ed8SAndrew Rybchenko __in efx_nic_t *enp,
545e111ed8SAndrew Rybchenko __in uint32_t instance,
555e111ed8SAndrew Rybchenko __in uint32_t mode,
565e111ed8SAndrew Rybchenko __in uint32_t timer_ns)
575e111ed8SAndrew Rybchenko {
585e111ed8SAndrew Rybchenko efx_mcdi_req_t req;
595e111ed8SAndrew Rybchenko EFX_MCDI_DECLARE_BUF(payload, MC_CMD_SET_EVQ_TMR_IN_LEN,
605e111ed8SAndrew Rybchenko MC_CMD_SET_EVQ_TMR_OUT_LEN);
615e111ed8SAndrew Rybchenko efx_rc_t rc;
625e111ed8SAndrew Rybchenko
635e111ed8SAndrew Rybchenko req.emr_cmd = MC_CMD_SET_EVQ_TMR;
645e111ed8SAndrew Rybchenko req.emr_in_buf = payload;
655e111ed8SAndrew Rybchenko req.emr_in_length = MC_CMD_SET_EVQ_TMR_IN_LEN;
665e111ed8SAndrew Rybchenko req.emr_out_buf = payload;
675e111ed8SAndrew Rybchenko req.emr_out_length = MC_CMD_SET_EVQ_TMR_OUT_LEN;
685e111ed8SAndrew Rybchenko
695e111ed8SAndrew Rybchenko MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_INSTANCE, instance);
705e111ed8SAndrew Rybchenko MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_LOAD_REQ_NS, timer_ns);
715e111ed8SAndrew Rybchenko MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_RELOAD_REQ_NS, timer_ns);
725e111ed8SAndrew Rybchenko MCDI_IN_SET_DWORD(req, SET_EVQ_TMR_IN_TMR_MODE, mode);
735e111ed8SAndrew Rybchenko
745e111ed8SAndrew Rybchenko efx_mcdi_execute(enp, &req);
755e111ed8SAndrew Rybchenko
765e111ed8SAndrew Rybchenko if (req.emr_rc != 0) {
775e111ed8SAndrew Rybchenko rc = req.emr_rc;
785e111ed8SAndrew Rybchenko goto fail1;
795e111ed8SAndrew Rybchenko }
805e111ed8SAndrew Rybchenko
815e111ed8SAndrew Rybchenko if (req.emr_out_length_used < MC_CMD_SET_EVQ_TMR_OUT_LEN) {
825e111ed8SAndrew Rybchenko rc = EMSGSIZE;
835e111ed8SAndrew Rybchenko goto fail2;
845e111ed8SAndrew Rybchenko }
855e111ed8SAndrew Rybchenko
865e111ed8SAndrew Rybchenko return (0);
875e111ed8SAndrew Rybchenko
885e111ed8SAndrew Rybchenko fail2:
895e111ed8SAndrew Rybchenko EFSYS_PROBE(fail2);
905e111ed8SAndrew Rybchenko fail1:
915e111ed8SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
925e111ed8SAndrew Rybchenko
935e111ed8SAndrew Rybchenko return (rc);
945e111ed8SAndrew Rybchenko }
955e111ed8SAndrew Rybchenko
965e111ed8SAndrew Rybchenko
975e111ed8SAndrew Rybchenko __checkReturn efx_rc_t
ef10_ev_init(__in efx_nic_t * enp)985e111ed8SAndrew Rybchenko ef10_ev_init(
995e111ed8SAndrew Rybchenko __in efx_nic_t *enp)
1005e111ed8SAndrew Rybchenko {
1015e111ed8SAndrew Rybchenko _NOTE(ARGUNUSED(enp))
1025e111ed8SAndrew Rybchenko return (0);
1035e111ed8SAndrew Rybchenko }
1045e111ed8SAndrew Rybchenko
1055e111ed8SAndrew Rybchenko void
ef10_ev_fini(__in efx_nic_t * enp)1065e111ed8SAndrew Rybchenko ef10_ev_fini(
1075e111ed8SAndrew Rybchenko __in efx_nic_t *enp)
1085e111ed8SAndrew Rybchenko {
1095e111ed8SAndrew Rybchenko _NOTE(ARGUNUSED(enp))
1105e111ed8SAndrew Rybchenko }
1115e111ed8SAndrew Rybchenko
1125e111ed8SAndrew Rybchenko __checkReturn efx_rc_t
ef10_ev_qcreate(__in efx_nic_t * enp,__in unsigned int index,__in efsys_mem_t * esmp,__in size_t ndescs,__in uint32_t id,__in uint32_t us,__in uint32_t flags,__in uint32_t irq,__in efx_evq_t * eep)1135e111ed8SAndrew Rybchenko ef10_ev_qcreate(
1145e111ed8SAndrew Rybchenko __in efx_nic_t *enp,
1155e111ed8SAndrew Rybchenko __in unsigned int index,
1165e111ed8SAndrew Rybchenko __in efsys_mem_t *esmp,
1175e111ed8SAndrew Rybchenko __in size_t ndescs,
1185e111ed8SAndrew Rybchenko __in uint32_t id,
1195e111ed8SAndrew Rybchenko __in uint32_t us,
1205e111ed8SAndrew Rybchenko __in uint32_t flags,
121aa6dc101SAndrew Rybchenko __in uint32_t irq,
1225e111ed8SAndrew Rybchenko __in efx_evq_t *eep)
1235e111ed8SAndrew Rybchenko {
1245e111ed8SAndrew Rybchenko efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
1253dee345aSAndrew Rybchenko uint32_t target_evq = 0;
1265e111ed8SAndrew Rybchenko efx_rc_t rc;
1278aad1149SAndrew Rybchenko boolean_t low_latency;
1285e111ed8SAndrew Rybchenko
1295e111ed8SAndrew Rybchenko _NOTE(ARGUNUSED(id)) /* buftbl id managed by MC */
1305e111ed8SAndrew Rybchenko
131f8a60f76SAndy Moreton EFSYS_ASSERT((flags & EFX_EVQ_FLAGS_EXTENDED_WIDTH) == 0);
132f8a60f76SAndy Moreton
1335e111ed8SAndrew Rybchenko /*
1345e111ed8SAndrew Rybchenko * NO_CONT_EV mode is only requested from the firmware when creating
1355e111ed8SAndrew Rybchenko * receive queues, but here it needs to be specified at event queue
1365e111ed8SAndrew Rybchenko * creation, as the event handler needs to know which format is in use.
1375e111ed8SAndrew Rybchenko *
1385e111ed8SAndrew Rybchenko * If EFX_EVQ_FLAGS_NO_CONT_EV is specified, all receive queues for this
1395e111ed8SAndrew Rybchenko * event queue will be created in NO_CONT_EV mode.
1405e111ed8SAndrew Rybchenko *
1415e111ed8SAndrew Rybchenko * See SF-109306-TC 5.11 "Events for RXQs in NO_CONT_EV mode".
1425e111ed8SAndrew Rybchenko */
1435e111ed8SAndrew Rybchenko if (flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
1445e111ed8SAndrew Rybchenko if (enp->en_nic_cfg.enc_no_cont_ev_mode_supported == B_FALSE) {
1455e111ed8SAndrew Rybchenko rc = EINVAL;
1462e5819a5SAndrew Rybchenko goto fail1;
1475e111ed8SAndrew Rybchenko }
1485e111ed8SAndrew Rybchenko }
1495e111ed8SAndrew Rybchenko
1505e111ed8SAndrew Rybchenko /* Set up the handler table */
1515e111ed8SAndrew Rybchenko eep->ee_rx = ef10_ev_rx;
1525e111ed8SAndrew Rybchenko eep->ee_tx = ef10_ev_tx;
1535e111ed8SAndrew Rybchenko eep->ee_driver = ef10_ev_driver;
1545e111ed8SAndrew Rybchenko eep->ee_drv_gen = ef10_ev_drv_gen;
1555e111ed8SAndrew Rybchenko eep->ee_mcdi = ef10_ev_mcdi;
1565e111ed8SAndrew Rybchenko
1575e111ed8SAndrew Rybchenko /* Set up the event queue */
1585e111ed8SAndrew Rybchenko /* INIT_EVQ expects function-relative vector number */
1595e111ed8SAndrew Rybchenko if ((flags & EFX_EVQ_FLAGS_NOTIFY_MASK) ==
1605e111ed8SAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT) {
161aa6dc101SAndrew Rybchenko /* IRQ number is specified by caller */
1625e111ed8SAndrew Rybchenko } else if (index == EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX) {
1633dee345aSAndrew Rybchenko /* Use the first interrupt for always interrupting EvQ */
1643dee345aSAndrew Rybchenko irq = 0;
1655e111ed8SAndrew Rybchenko flags = (flags & ~EFX_EVQ_FLAGS_NOTIFY_MASK) |
1665e111ed8SAndrew Rybchenko EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
1675e111ed8SAndrew Rybchenko } else {
1683dee345aSAndrew Rybchenko target_evq = EFX_EF10_ALWAYS_INTERRUPTING_EVQ_INDEX;
1695e111ed8SAndrew Rybchenko }
1705e111ed8SAndrew Rybchenko
1715e111ed8SAndrew Rybchenko /*
1725e111ed8SAndrew Rybchenko * Interrupts may be raised for events immediately after the queue is
1735e111ed8SAndrew Rybchenko * created. See bug58606.
1745e111ed8SAndrew Rybchenko */
1755e111ed8SAndrew Rybchenko
1765e111ed8SAndrew Rybchenko /*
1775e111ed8SAndrew Rybchenko * On Huntington we need to specify the settings to use.
1785e111ed8SAndrew Rybchenko * If event queue type in flags is auto, we favour throughput
1795e111ed8SAndrew Rybchenko * if the adapter is running virtualization supporting firmware
1805e111ed8SAndrew Rybchenko * (i.e. the full featured firmware variant)
1815e111ed8SAndrew Rybchenko * and latency otherwise. The Ethernet Virtual Bridging
1825e111ed8SAndrew Rybchenko * capability is used to make this decision. (Note though that
1835e111ed8SAndrew Rybchenko * the low latency firmware variant is also best for
1845e111ed8SAndrew Rybchenko * throughput and corresponding type should be specified
1855e111ed8SAndrew Rybchenko * to choose it.)
1868aad1149SAndrew Rybchenko *
1878aad1149SAndrew Rybchenko * If FW supports EvQ types (e.g. on Medford and Medford2) the
1888aad1149SAndrew Rybchenko * type which is specified in flags is passed to FW to make the
1898aad1149SAndrew Rybchenko * decision and low_latency hint is ignored.
1905e111ed8SAndrew Rybchenko */
1918aad1149SAndrew Rybchenko low_latency = encp->enc_datapath_cap_evb ? 0 : 1;
1923dee345aSAndrew Rybchenko rc = efx_mcdi_init_evq(enp, index, esmp, ndescs, irq, target_evq, us,
1933dee345aSAndrew Rybchenko flags, low_latency);
1945e111ed8SAndrew Rybchenko if (rc != 0)
1958aad1149SAndrew Rybchenko goto fail2;
1965e111ed8SAndrew Rybchenko
1975e111ed8SAndrew Rybchenko return (0);
1985e111ed8SAndrew Rybchenko
1995e111ed8SAndrew Rybchenko fail2:
2005e111ed8SAndrew Rybchenko EFSYS_PROBE(fail2);
2015e111ed8SAndrew Rybchenko fail1:
2025e111ed8SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
2035e111ed8SAndrew Rybchenko
2045e111ed8SAndrew Rybchenko return (rc);
2055e111ed8SAndrew Rybchenko }
2065e111ed8SAndrew Rybchenko
2075e111ed8SAndrew Rybchenko void
ef10_ev_qdestroy(__in efx_evq_t * eep)2085e111ed8SAndrew Rybchenko ef10_ev_qdestroy(
2095e111ed8SAndrew Rybchenko __in efx_evq_t *eep)
2105e111ed8SAndrew Rybchenko {
2115e111ed8SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
2125e111ed8SAndrew Rybchenko
2135e111ed8SAndrew Rybchenko EFSYS_ASSERT(EFX_FAMILY_IS_EF10(enp));
2145e111ed8SAndrew Rybchenko
2155e111ed8SAndrew Rybchenko (void) efx_mcdi_fini_evq(enp, eep->ee_index);
2165e111ed8SAndrew Rybchenko }
2175e111ed8SAndrew Rybchenko
2185e111ed8SAndrew Rybchenko __checkReturn efx_rc_t
ef10_ev_qprime(__in efx_evq_t * eep,__in unsigned int count)2195e111ed8SAndrew Rybchenko ef10_ev_qprime(
2205e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
2215e111ed8SAndrew Rybchenko __in unsigned int count)
2225e111ed8SAndrew Rybchenko {
2235e111ed8SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
2245e111ed8SAndrew Rybchenko uint32_t rptr;
2255e111ed8SAndrew Rybchenko efx_dword_t dword;
2265e111ed8SAndrew Rybchenko
2275e111ed8SAndrew Rybchenko rptr = count & eep->ee_mask;
2285e111ed8SAndrew Rybchenko
2295e111ed8SAndrew Rybchenko if (enp->en_nic_cfg.enc_bug35388_workaround) {
2305e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(EF10_EVQ_MINNEVS >
2315e111ed8SAndrew Rybchenko (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
2325e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(EF10_EVQ_MAXNEVS <
2335e111ed8SAndrew Rybchenko (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
2345e111ed8SAndrew Rybchenko
2355e111ed8SAndrew Rybchenko EFX_POPULATE_DWORD_2(dword,
2365e111ed8SAndrew Rybchenko ERF_DD_EVQ_IND_RPTR_FLAGS,
2375e111ed8SAndrew Rybchenko EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2385e111ed8SAndrew Rybchenko ERF_DD_EVQ_IND_RPTR,
2395e111ed8SAndrew Rybchenko (rptr >> ERF_DD_EVQ_IND_RPTR_WIDTH));
2405e111ed8SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
2415e111ed8SAndrew Rybchenko &dword, B_FALSE);
2425e111ed8SAndrew Rybchenko
2435e111ed8SAndrew Rybchenko EFX_POPULATE_DWORD_2(dword,
2445e111ed8SAndrew Rybchenko ERF_DD_EVQ_IND_RPTR_FLAGS,
2455e111ed8SAndrew Rybchenko EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2465e111ed8SAndrew Rybchenko ERF_DD_EVQ_IND_RPTR,
2475e111ed8SAndrew Rybchenko rptr & ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2485e111ed8SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT, eep->ee_index,
2495e111ed8SAndrew Rybchenko &dword, B_FALSE);
2505e111ed8SAndrew Rybchenko } else {
2515e111ed8SAndrew Rybchenko EFX_POPULATE_DWORD_1(dword, ERF_DZ_EVQ_RPTR, rptr);
2525e111ed8SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_RPTR_REG, eep->ee_index,
2535e111ed8SAndrew Rybchenko &dword, B_FALSE);
2545e111ed8SAndrew Rybchenko }
2555e111ed8SAndrew Rybchenko
2565e111ed8SAndrew Rybchenko return (0);
2575e111ed8SAndrew Rybchenko }
2585e111ed8SAndrew Rybchenko
2595e111ed8SAndrew Rybchenko static __checkReturn efx_rc_t
efx_mcdi_driver_event(__in efx_nic_t * enp,__in uint32_t evq,__in efx_qword_t data)2605e111ed8SAndrew Rybchenko efx_mcdi_driver_event(
2615e111ed8SAndrew Rybchenko __in efx_nic_t *enp,
2625e111ed8SAndrew Rybchenko __in uint32_t evq,
2635e111ed8SAndrew Rybchenko __in efx_qword_t data)
2645e111ed8SAndrew Rybchenko {
2655e111ed8SAndrew Rybchenko efx_mcdi_req_t req;
2665e111ed8SAndrew Rybchenko EFX_MCDI_DECLARE_BUF(payload, MC_CMD_DRIVER_EVENT_IN_LEN,
2675e111ed8SAndrew Rybchenko MC_CMD_DRIVER_EVENT_OUT_LEN);
2685e111ed8SAndrew Rybchenko efx_rc_t rc;
2695e111ed8SAndrew Rybchenko
2705e111ed8SAndrew Rybchenko req.emr_cmd = MC_CMD_DRIVER_EVENT;
2715e111ed8SAndrew Rybchenko req.emr_in_buf = payload;
2725e111ed8SAndrew Rybchenko req.emr_in_length = MC_CMD_DRIVER_EVENT_IN_LEN;
2735e111ed8SAndrew Rybchenko req.emr_out_buf = payload;
2745e111ed8SAndrew Rybchenko req.emr_out_length = MC_CMD_DRIVER_EVENT_OUT_LEN;
2755e111ed8SAndrew Rybchenko
2765e111ed8SAndrew Rybchenko MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_EVQ, evq);
2775e111ed8SAndrew Rybchenko
2785e111ed8SAndrew Rybchenko MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_LO,
2795e111ed8SAndrew Rybchenko EFX_QWORD_FIELD(data, EFX_DWORD_0));
2805e111ed8SAndrew Rybchenko MCDI_IN_SET_DWORD(req, DRIVER_EVENT_IN_DATA_HI,
2815e111ed8SAndrew Rybchenko EFX_QWORD_FIELD(data, EFX_DWORD_1));
2825e111ed8SAndrew Rybchenko
2835e111ed8SAndrew Rybchenko efx_mcdi_execute(enp, &req);
2845e111ed8SAndrew Rybchenko
2855e111ed8SAndrew Rybchenko if (req.emr_rc != 0) {
2865e111ed8SAndrew Rybchenko rc = req.emr_rc;
2875e111ed8SAndrew Rybchenko goto fail1;
2885e111ed8SAndrew Rybchenko }
2895e111ed8SAndrew Rybchenko
2905e111ed8SAndrew Rybchenko return (0);
2915e111ed8SAndrew Rybchenko
2925e111ed8SAndrew Rybchenko fail1:
2935e111ed8SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
2945e111ed8SAndrew Rybchenko
2955e111ed8SAndrew Rybchenko return (rc);
2965e111ed8SAndrew Rybchenko }
2975e111ed8SAndrew Rybchenko
2985e111ed8SAndrew Rybchenko void
ef10_ev_qpost(__in efx_evq_t * eep,__in uint16_t data)2995e111ed8SAndrew Rybchenko ef10_ev_qpost(
3005e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
3015e111ed8SAndrew Rybchenko __in uint16_t data)
3025e111ed8SAndrew Rybchenko {
3035e111ed8SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
3045e111ed8SAndrew Rybchenko efx_qword_t event;
3055e111ed8SAndrew Rybchenko
3065e111ed8SAndrew Rybchenko EFX_POPULATE_QWORD_3(event,
3075e111ed8SAndrew Rybchenko ESF_DZ_DRV_CODE, ESE_DZ_EV_CODE_DRV_GEN_EV,
3085e111ed8SAndrew Rybchenko ESF_DZ_DRV_SUB_CODE, 0,
3095e111ed8SAndrew Rybchenko ESF_DZ_DRV_SUB_DATA_DW0, (uint32_t)data);
3105e111ed8SAndrew Rybchenko
3115e111ed8SAndrew Rybchenko (void) efx_mcdi_driver_event(enp, eep->ee_index, event);
3125e111ed8SAndrew Rybchenko }
3135e111ed8SAndrew Rybchenko
3145e111ed8SAndrew Rybchenko __checkReturn efx_rc_t
ef10_ev_qmoderate(__in efx_evq_t * eep,__in unsigned int us)3155e111ed8SAndrew Rybchenko ef10_ev_qmoderate(
3165e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
3175e111ed8SAndrew Rybchenko __in unsigned int us)
3185e111ed8SAndrew Rybchenko {
3195e111ed8SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
3205e111ed8SAndrew Rybchenko efx_nic_cfg_t *encp = &(enp->en_nic_cfg);
3215e111ed8SAndrew Rybchenko efx_dword_t dword;
3225e111ed8SAndrew Rybchenko uint32_t mode;
3235e111ed8SAndrew Rybchenko efx_rc_t rc;
3245e111ed8SAndrew Rybchenko
3255e111ed8SAndrew Rybchenko /* Check that hardware and MCDI use the same timer MODE values */
3265e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_DIS ==
3275e111ed8SAndrew Rybchenko MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_DIS);
3285e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_IMMED_START ==
3295e111ed8SAndrew Rybchenko MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_IMMED_START);
3305e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_TRIG_START ==
3315e111ed8SAndrew Rybchenko MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_TRIG_START);
3325e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(FFE_CZ_TIMER_MODE_INT_HLDOFF ==
3335e111ed8SAndrew Rybchenko MC_CMD_SET_EVQ_TMR_IN_TIMER_MODE_INT_HLDOFF);
3345e111ed8SAndrew Rybchenko
3355e111ed8SAndrew Rybchenko if (us > encp->enc_evq_timer_max_us) {
3365e111ed8SAndrew Rybchenko rc = EINVAL;
3375e111ed8SAndrew Rybchenko goto fail1;
3385e111ed8SAndrew Rybchenko }
3395e111ed8SAndrew Rybchenko
3405e111ed8SAndrew Rybchenko /* If the value is zero then disable the timer */
3415e111ed8SAndrew Rybchenko if (us == 0) {
3425e111ed8SAndrew Rybchenko mode = FFE_CZ_TIMER_MODE_DIS;
3435e111ed8SAndrew Rybchenko } else {
3445e111ed8SAndrew Rybchenko mode = FFE_CZ_TIMER_MODE_INT_HLDOFF;
3455e111ed8SAndrew Rybchenko }
3465e111ed8SAndrew Rybchenko
3475e111ed8SAndrew Rybchenko if (encp->enc_bug61265_workaround) {
3485e111ed8SAndrew Rybchenko uint32_t ns = us * 1000;
3495e111ed8SAndrew Rybchenko
3505e111ed8SAndrew Rybchenko rc = efx_mcdi_set_evq_tmr(enp, eep->ee_index, mode, ns);
3515e111ed8SAndrew Rybchenko if (rc != 0)
3525e111ed8SAndrew Rybchenko goto fail2;
3535e111ed8SAndrew Rybchenko } else {
3545e111ed8SAndrew Rybchenko unsigned int ticks;
3555e111ed8SAndrew Rybchenko
3565e111ed8SAndrew Rybchenko if ((rc = efx_ev_usecs_to_ticks(enp, us, &ticks)) != 0)
3575e111ed8SAndrew Rybchenko goto fail3;
3585e111ed8SAndrew Rybchenko
3595e111ed8SAndrew Rybchenko if (encp->enc_bug35388_workaround) {
3605e111ed8SAndrew Rybchenko EFX_POPULATE_DWORD_3(dword,
3615e111ed8SAndrew Rybchenko ERF_DD_EVQ_IND_TIMER_FLAGS,
3625e111ed8SAndrew Rybchenko EFE_DD_EVQ_IND_TIMER_FLAGS,
3635e111ed8SAndrew Rybchenko ERF_DD_EVQ_IND_TIMER_MODE, mode,
3645e111ed8SAndrew Rybchenko ERF_DD_EVQ_IND_TIMER_VAL, ticks);
3655e111ed8SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DD_EVQ_INDIRECT,
3665e111ed8SAndrew Rybchenko eep->ee_index, &dword, 0);
3675e111ed8SAndrew Rybchenko } else {
3685e111ed8SAndrew Rybchenko /*
3695e111ed8SAndrew Rybchenko * NOTE: The TMR_REL field introduced in Medford2 is
3705e111ed8SAndrew Rybchenko * ignored on earlier EF10 controllers. See bug66418
3715e111ed8SAndrew Rybchenko * comment 9 for details.
3725e111ed8SAndrew Rybchenko */
3735e111ed8SAndrew Rybchenko EFX_POPULATE_DWORD_3(dword,
3745e111ed8SAndrew Rybchenko ERF_DZ_TC_TIMER_MODE, mode,
3755e111ed8SAndrew Rybchenko ERF_DZ_TC_TIMER_VAL, ticks,
3765e111ed8SAndrew Rybchenko ERF_FZ_TC_TMR_REL_VAL, ticks);
3775e111ed8SAndrew Rybchenko EFX_BAR_VI_WRITED(enp, ER_DZ_EVQ_TMR_REG,
3785e111ed8SAndrew Rybchenko eep->ee_index, &dword, 0);
3795e111ed8SAndrew Rybchenko }
3805e111ed8SAndrew Rybchenko }
3815e111ed8SAndrew Rybchenko
3825e111ed8SAndrew Rybchenko return (0);
3835e111ed8SAndrew Rybchenko
3845e111ed8SAndrew Rybchenko fail3:
3855e111ed8SAndrew Rybchenko EFSYS_PROBE(fail3);
3865e111ed8SAndrew Rybchenko fail2:
3875e111ed8SAndrew Rybchenko EFSYS_PROBE(fail2);
3885e111ed8SAndrew Rybchenko fail1:
3895e111ed8SAndrew Rybchenko EFSYS_PROBE1(fail1, efx_rc_t, rc);
3905e111ed8SAndrew Rybchenko
3915e111ed8SAndrew Rybchenko return (rc);
3925e111ed8SAndrew Rybchenko }
3935e111ed8SAndrew Rybchenko
3945e111ed8SAndrew Rybchenko
3955e111ed8SAndrew Rybchenko #if EFSYS_OPT_QSTATS
3965e111ed8SAndrew Rybchenko void
ef10_ev_qstats_update(__in efx_evq_t * eep,__inout_ecount (EV_NQSTATS)efsys_stat_t * stat)3975e111ed8SAndrew Rybchenko ef10_ev_qstats_update(
3985e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
3995e111ed8SAndrew Rybchenko __inout_ecount(EV_NQSTATS) efsys_stat_t *stat)
4005e111ed8SAndrew Rybchenko {
4015e111ed8SAndrew Rybchenko unsigned int id;
4025e111ed8SAndrew Rybchenko
4035e111ed8SAndrew Rybchenko for (id = 0; id < EV_NQSTATS; id++) {
4045e111ed8SAndrew Rybchenko efsys_stat_t *essp = &stat[id];
4055e111ed8SAndrew Rybchenko
4065e111ed8SAndrew Rybchenko EFSYS_STAT_INCR(essp, eep->ee_stat[id]);
4075e111ed8SAndrew Rybchenko eep->ee_stat[id] = 0;
4085e111ed8SAndrew Rybchenko }
4095e111ed8SAndrew Rybchenko }
4105e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_QSTATS */
4115e111ed8SAndrew Rybchenko
4125e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
4135e111ed8SAndrew Rybchenko
4145e111ed8SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_rx_packed_stream(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)4155e111ed8SAndrew Rybchenko ef10_ev_rx_packed_stream(
4165e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
4175e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
4185e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
4195e111ed8SAndrew Rybchenko __in_opt void *arg)
4205e111ed8SAndrew Rybchenko {
4215e111ed8SAndrew Rybchenko uint32_t label;
4225e111ed8SAndrew Rybchenko uint32_t pkt_count_lbits;
4235e111ed8SAndrew Rybchenko uint16_t flags;
4245e111ed8SAndrew Rybchenko boolean_t should_abort;
4255e111ed8SAndrew Rybchenko efx_evq_rxq_state_t *eersp;
4265e111ed8SAndrew Rybchenko unsigned int pkt_count;
4275e111ed8SAndrew Rybchenko unsigned int current_id;
4285e111ed8SAndrew Rybchenko boolean_t new_buffer;
4295e111ed8SAndrew Rybchenko
4305e111ed8SAndrew Rybchenko pkt_count_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
4315e111ed8SAndrew Rybchenko label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
4325e111ed8SAndrew Rybchenko new_buffer = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_EV_ROTATE);
4335e111ed8SAndrew Rybchenko
4345e111ed8SAndrew Rybchenko flags = 0;
4355e111ed8SAndrew Rybchenko
4365e111ed8SAndrew Rybchenko eersp = &eep->ee_rxq_state[label];
4375e111ed8SAndrew Rybchenko
4385e111ed8SAndrew Rybchenko /*
4395e111ed8SAndrew Rybchenko * RX_DSC_PTR_LBITS has least significant bits of the global
4405e111ed8SAndrew Rybchenko * (not per-buffer) packet counter. It is guaranteed that
4415e111ed8SAndrew Rybchenko * maximum number of completed packets fits in lbits-mask.
4425e111ed8SAndrew Rybchenko * So, modulo lbits-mask arithmetic should be used to calculate
4435e111ed8SAndrew Rybchenko * packet counter increment.
4445e111ed8SAndrew Rybchenko */
4455e111ed8SAndrew Rybchenko pkt_count = (pkt_count_lbits - eersp->eers_rx_stream_npackets) &
4465e111ed8SAndrew Rybchenko EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
4475e111ed8SAndrew Rybchenko eersp->eers_rx_stream_npackets += pkt_count;
4485e111ed8SAndrew Rybchenko
4495e111ed8SAndrew Rybchenko if (new_buffer) {
4505e111ed8SAndrew Rybchenko flags |= EFX_PKT_PACKED_STREAM_NEW_BUFFER;
4515e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
4525e111ed8SAndrew Rybchenko /*
4535e111ed8SAndrew Rybchenko * If both packed stream and equal stride super-buffer
4545e111ed8SAndrew Rybchenko * modes are compiled in, in theory credits should be
4555e111ed8SAndrew Rybchenko * be maintained for packed stream only, but right now
4565e111ed8SAndrew Rybchenko * these modes are not distinguished in the event queue
4575e111ed8SAndrew Rybchenko * Rx queue state and it is OK to increment the counter
4585e111ed8SAndrew Rybchenko * regardless (it might be event cheaper than branching
4595e111ed8SAndrew Rybchenko * since neighbour structure member are updated as well).
4605e111ed8SAndrew Rybchenko */
4615e111ed8SAndrew Rybchenko eersp->eers_rx_packed_stream_credits++;
4625e111ed8SAndrew Rybchenko #endif
4635e111ed8SAndrew Rybchenko eersp->eers_rx_read_ptr++;
4645e111ed8SAndrew Rybchenko }
4655e111ed8SAndrew Rybchenko current_id = eersp->eers_rx_read_ptr & eersp->eers_rx_mask;
4665e111ed8SAndrew Rybchenko
4675e111ed8SAndrew Rybchenko /* Check for errors that invalidate checksum and L3/L4 fields */
4685e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
4695e111ed8SAndrew Rybchenko /* RX frame truncated */
4705e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
4715e111ed8SAndrew Rybchenko flags |= EFX_DISCARD;
4725e111ed8SAndrew Rybchenko goto deliver;
4735e111ed8SAndrew Rybchenko }
4745e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
4755e111ed8SAndrew Rybchenko /* Bad Ethernet frame CRC */
4765e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
4775e111ed8SAndrew Rybchenko flags |= EFX_DISCARD;
4785e111ed8SAndrew Rybchenko goto deliver;
4795e111ed8SAndrew Rybchenko }
4805e111ed8SAndrew Rybchenko
4815e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
4825e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
4835e111ed8SAndrew Rybchenko flags |= EFX_PKT_PACKED_STREAM_PARSE_INCOMPLETE;
4845e111ed8SAndrew Rybchenko goto deliver;
4855e111ed8SAndrew Rybchenko }
4865e111ed8SAndrew Rybchenko
4875e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR))
4885e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
4895e111ed8SAndrew Rybchenko
4905e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR))
4915e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
4925e111ed8SAndrew Rybchenko
4935e111ed8SAndrew Rybchenko deliver:
4945e111ed8SAndrew Rybchenko /* If we're not discarding the packet then it is ok */
4955e111ed8SAndrew Rybchenko if (~flags & EFX_DISCARD)
4965e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
4975e111ed8SAndrew Rybchenko
4985e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_rx_ps != NULL);
4995e111ed8SAndrew Rybchenko should_abort = eecp->eec_rx_ps(arg, label, current_id, pkt_count,
5005e111ed8SAndrew Rybchenko flags);
5015e111ed8SAndrew Rybchenko
5025e111ed8SAndrew Rybchenko return (should_abort);
5035e111ed8SAndrew Rybchenko }
5045e111ed8SAndrew Rybchenko
5055e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER */
5065e111ed8SAndrew Rybchenko
5075e111ed8SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_rx(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)5085e111ed8SAndrew Rybchenko ef10_ev_rx(
5095e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
5105e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
5115e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
5125e111ed8SAndrew Rybchenko __in_opt void *arg)
5135e111ed8SAndrew Rybchenko {
5145e111ed8SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
5155e111ed8SAndrew Rybchenko uint32_t size;
5165e111ed8SAndrew Rybchenko uint32_t label;
5175e111ed8SAndrew Rybchenko uint32_t mac_class;
5185e111ed8SAndrew Rybchenko uint32_t eth_tag_class;
5195e111ed8SAndrew Rybchenko uint32_t l3_class;
5205e111ed8SAndrew Rybchenko uint32_t l4_class;
5215e111ed8SAndrew Rybchenko uint32_t next_read_lbits;
5225e111ed8SAndrew Rybchenko uint16_t flags;
5235e111ed8SAndrew Rybchenko boolean_t cont;
5245e111ed8SAndrew Rybchenko boolean_t should_abort;
5255e111ed8SAndrew Rybchenko efx_evq_rxq_state_t *eersp;
5265e111ed8SAndrew Rybchenko unsigned int desc_count;
5275e111ed8SAndrew Rybchenko unsigned int last_used_id;
5285e111ed8SAndrew Rybchenko
5295e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX);
5305e111ed8SAndrew Rybchenko
5315e111ed8SAndrew Rybchenko /* Discard events after RXQ/TXQ errors, or hardware not available */
5325e111ed8SAndrew Rybchenko if (enp->en_reset_flags &
5335e111ed8SAndrew Rybchenko (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
5345e111ed8SAndrew Rybchenko return (B_FALSE);
5355e111ed8SAndrew Rybchenko
5365e111ed8SAndrew Rybchenko /* Basic packet information */
5375e111ed8SAndrew Rybchenko label = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_QLABEL);
5385e111ed8SAndrew Rybchenko eersp = &eep->ee_rxq_state[label];
5395e111ed8SAndrew Rybchenko
5405e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
5415e111ed8SAndrew Rybchenko /*
5425e111ed8SAndrew Rybchenko * Packed stream events are very different,
5435e111ed8SAndrew Rybchenko * so handle them separately
5445e111ed8SAndrew Rybchenko */
5455e111ed8SAndrew Rybchenko if (eersp->eers_rx_packed_stream)
5465e111ed8SAndrew Rybchenko return (ef10_ev_rx_packed_stream(eep, eqp, eecp, arg));
5475e111ed8SAndrew Rybchenko #endif
5485e111ed8SAndrew Rybchenko
5495e111ed8SAndrew Rybchenko size = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_BYTES);
5505e111ed8SAndrew Rybchenko cont = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_CONT);
5515e111ed8SAndrew Rybchenko next_read_lbits = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DSC_PTR_LBITS);
5525e111ed8SAndrew Rybchenko eth_tag_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ETH_TAG_CLASS);
5535e111ed8SAndrew Rybchenko mac_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_MAC_CLASS);
5545e111ed8SAndrew Rybchenko l3_class = EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_L3_CLASS);
5555e111ed8SAndrew Rybchenko
5565e111ed8SAndrew Rybchenko /*
5575e111ed8SAndrew Rybchenko * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is only
5585e111ed8SAndrew Rybchenko * 2 bits wide on Medford2. Check it is safe to use the Medford2 field
5595e111ed8SAndrew Rybchenko * and values for all EF10 controllers.
5605e111ed8SAndrew Rybchenko */
5615e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN == ESF_DE_RX_L4_CLASS_LBN);
5625e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
5635e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
5645e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN == ESE_DE_L4_CLASS_UNKNOWN);
5655e111ed8SAndrew Rybchenko
5665e111ed8SAndrew Rybchenko l4_class = EFX_QWORD_FIELD(*eqp, ESF_FZ_RX_L4_CLASS);
5675e111ed8SAndrew Rybchenko
5685e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_DROP_EVENT) != 0) {
5695e111ed8SAndrew Rybchenko /* Drop this event */
5705e111ed8SAndrew Rybchenko return (B_FALSE);
5715e111ed8SAndrew Rybchenko }
5725e111ed8SAndrew Rybchenko flags = 0;
5735e111ed8SAndrew Rybchenko
5745e111ed8SAndrew Rybchenko if (cont != 0) {
5755e111ed8SAndrew Rybchenko /*
5765e111ed8SAndrew Rybchenko * This may be part of a scattered frame, or it may be a
5775e111ed8SAndrew Rybchenko * truncated frame if scatter is disabled on this RXQ.
5785e111ed8SAndrew Rybchenko * Overlength frames can be received if e.g. a VF is configured
5795e111ed8SAndrew Rybchenko * for 1500 MTU but connected to a port set to 9000 MTU
5805e111ed8SAndrew Rybchenko * (see bug56567).
5815e111ed8SAndrew Rybchenko * FIXME: There is not yet any driver that supports scatter on
5825e111ed8SAndrew Rybchenko * Huntington. Scatter support is required for OSX.
5835e111ed8SAndrew Rybchenko */
5845e111ed8SAndrew Rybchenko flags |= EFX_PKT_CONT;
5855e111ed8SAndrew Rybchenko }
5865e111ed8SAndrew Rybchenko
5875e111ed8SAndrew Rybchenko if (mac_class == ESE_DZ_MAC_CLASS_UCAST)
5885e111ed8SAndrew Rybchenko flags |= EFX_PKT_UNICAST;
5895e111ed8SAndrew Rybchenko
5905e111ed8SAndrew Rybchenko /*
5915e111ed8SAndrew Rybchenko * Increment the count of descriptors read.
5925e111ed8SAndrew Rybchenko *
5935e111ed8SAndrew Rybchenko * In NO_CONT_EV mode, RX_DSC_PTR_LBITS is actually a packet count, but
5945e111ed8SAndrew Rybchenko * when scatter is disabled, there is only one descriptor per packet and
5955e111ed8SAndrew Rybchenko * so it can be treated the same.
5965e111ed8SAndrew Rybchenko *
5975e111ed8SAndrew Rybchenko * TODO: Support scatter in NO_CONT_EV mode.
5985e111ed8SAndrew Rybchenko */
5995e111ed8SAndrew Rybchenko desc_count = (next_read_lbits - eersp->eers_rx_read_ptr) &
6005e111ed8SAndrew Rybchenko EFX_MASK32(ESF_DZ_RX_DSC_PTR_LBITS);
6015e111ed8SAndrew Rybchenko eersp->eers_rx_read_ptr += desc_count;
6025e111ed8SAndrew Rybchenko
6035e111ed8SAndrew Rybchenko /* Calculate the index of the last descriptor consumed */
6045e111ed8SAndrew Rybchenko last_used_id = (eersp->eers_rx_read_ptr - 1) & eersp->eers_rx_mask;
6055e111ed8SAndrew Rybchenko
6065e111ed8SAndrew Rybchenko if (eep->ee_flags & EFX_EVQ_FLAGS_NO_CONT_EV) {
6075e111ed8SAndrew Rybchenko if (desc_count > 1)
6085e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
6095e111ed8SAndrew Rybchenko
6105e111ed8SAndrew Rybchenko /* Always read the length from the prefix in NO_CONT_EV mode. */
6115e111ed8SAndrew Rybchenko flags |= EFX_PKT_PREFIX_LEN;
6125e111ed8SAndrew Rybchenko
6135e111ed8SAndrew Rybchenko /*
6145e111ed8SAndrew Rybchenko * Check for an aborted scatter, signalled by the ABORT bit in
6155e111ed8SAndrew Rybchenko * NO_CONT_EV mode. The ABORT bit was not used before NO_CONT_EV
6165e111ed8SAndrew Rybchenko * mode was added as it was broken in Huntington silicon.
6175e111ed8SAndrew Rybchenko */
6185e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_EZ_RX_ABORT) != 0) {
6195e111ed8SAndrew Rybchenko flags |= EFX_DISCARD;
6205e111ed8SAndrew Rybchenko goto deliver;
6215e111ed8SAndrew Rybchenko }
6225e111ed8SAndrew Rybchenko } else if (desc_count > 1) {
6235e111ed8SAndrew Rybchenko /*
6245e111ed8SAndrew Rybchenko * FIXME: add error checking to make sure this a batched event.
6255e111ed8SAndrew Rybchenko * This could also be an aborted scatter, see Bug36629.
6265e111ed8SAndrew Rybchenko */
6275e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_BATCH);
6285e111ed8SAndrew Rybchenko flags |= EFX_PKT_PREFIX_LEN;
6295e111ed8SAndrew Rybchenko }
6305e111ed8SAndrew Rybchenko
6315e111ed8SAndrew Rybchenko /* Check for errors that invalidate checksum and L3/L4 fields */
6325e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TRUNC_ERR) != 0) {
6335e111ed8SAndrew Rybchenko /* RX frame truncated */
6345e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_FRM_TRUNC);
6355e111ed8SAndrew Rybchenko flags |= EFX_DISCARD;
6365e111ed8SAndrew Rybchenko goto deliver;
6375e111ed8SAndrew Rybchenko }
6385e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_ECRC_ERR) != 0) {
6395e111ed8SAndrew Rybchenko /* Bad Ethernet frame CRC */
6405e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_ETH_CRC_ERR);
6415e111ed8SAndrew Rybchenko flags |= EFX_DISCARD;
6425e111ed8SAndrew Rybchenko goto deliver;
6435e111ed8SAndrew Rybchenko }
6445e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_PARSE_INCOMPLETE)) {
6455e111ed8SAndrew Rybchenko /*
6465e111ed8SAndrew Rybchenko * Hardware parse failed, due to malformed headers
6475e111ed8SAndrew Rybchenko * or headers that are too long for the parser.
6485e111ed8SAndrew Rybchenko * Headers and checksums must be validated by the host.
6495e111ed8SAndrew Rybchenko */
6505e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_PARSE_INCOMPLETE);
6515e111ed8SAndrew Rybchenko goto deliver;
6525e111ed8SAndrew Rybchenko }
6535e111ed8SAndrew Rybchenko
6545e111ed8SAndrew Rybchenko if ((eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN1) ||
6555e111ed8SAndrew Rybchenko (eth_tag_class == ESE_DZ_ETH_TAG_CLASS_VLAN2)) {
6565e111ed8SAndrew Rybchenko flags |= EFX_PKT_VLAN_TAGGED;
6575e111ed8SAndrew Rybchenko }
6585e111ed8SAndrew Rybchenko
6595e111ed8SAndrew Rybchenko switch (l3_class) {
6605e111ed8SAndrew Rybchenko case ESE_DZ_L3_CLASS_IP4:
6615e111ed8SAndrew Rybchenko case ESE_DZ_L3_CLASS_IP4_FRAG:
6625e111ed8SAndrew Rybchenko flags |= EFX_PKT_IPV4;
6635e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_IPCKSUM_ERR)) {
6645e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_IPV4_HDR_CHKSUM_ERR);
6655e111ed8SAndrew Rybchenko } else {
6665e111ed8SAndrew Rybchenko flags |= EFX_CKSUM_IPV4;
6675e111ed8SAndrew Rybchenko }
6685e111ed8SAndrew Rybchenko
6695e111ed8SAndrew Rybchenko /*
6705e111ed8SAndrew Rybchenko * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
6715e111ed8SAndrew Rybchenko * only 2 bits wide on Medford2. Check it is safe to use the
6725e111ed8SAndrew Rybchenko * Medford2 field and values for all EF10 controllers.
6735e111ed8SAndrew Rybchenko */
6745e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
6755e111ed8SAndrew Rybchenko ESF_DE_RX_L4_CLASS_LBN);
6765e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
6775e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
6785e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
6795e111ed8SAndrew Rybchenko ESE_DE_L4_CLASS_UNKNOWN);
6805e111ed8SAndrew Rybchenko
6815e111ed8SAndrew Rybchenko if (l4_class == ESE_FZ_L4_CLASS_TCP) {
6825e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV4);
6835e111ed8SAndrew Rybchenko flags |= EFX_PKT_TCP;
6845e111ed8SAndrew Rybchenko } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
6855e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV4);
6865e111ed8SAndrew Rybchenko flags |= EFX_PKT_UDP;
6875e111ed8SAndrew Rybchenko } else {
6885e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV4);
6895e111ed8SAndrew Rybchenko }
6905e111ed8SAndrew Rybchenko break;
6915e111ed8SAndrew Rybchenko
6925e111ed8SAndrew Rybchenko case ESE_DZ_L3_CLASS_IP6:
6935e111ed8SAndrew Rybchenko case ESE_DZ_L3_CLASS_IP6_FRAG:
6945e111ed8SAndrew Rybchenko flags |= EFX_PKT_IPV6;
6955e111ed8SAndrew Rybchenko
6965e111ed8SAndrew Rybchenko /*
6975e111ed8SAndrew Rybchenko * RX_L4_CLASS is 3 bits wide on Huntington and Medford, but is
6985e111ed8SAndrew Rybchenko * only 2 bits wide on Medford2. Check it is safe to use the
6995e111ed8SAndrew Rybchenko * Medford2 field and values for all EF10 controllers.
7005e111ed8SAndrew Rybchenko */
7015e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESF_FZ_RX_L4_CLASS_LBN ==
7025e111ed8SAndrew Rybchenko ESF_DE_RX_L4_CLASS_LBN);
7035e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_TCP == ESE_DE_L4_CLASS_TCP);
7045e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UDP == ESE_DE_L4_CLASS_UDP);
7055e111ed8SAndrew Rybchenko EFX_STATIC_ASSERT(ESE_FZ_L4_CLASS_UNKNOWN ==
7065e111ed8SAndrew Rybchenko ESE_DE_L4_CLASS_UNKNOWN);
7075e111ed8SAndrew Rybchenko
7085e111ed8SAndrew Rybchenko if (l4_class == ESE_FZ_L4_CLASS_TCP) {
7095e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_IPV6);
7105e111ed8SAndrew Rybchenko flags |= EFX_PKT_TCP;
7115e111ed8SAndrew Rybchenko } else if (l4_class == ESE_FZ_L4_CLASS_UDP) {
7125e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_UDP_IPV6);
7135e111ed8SAndrew Rybchenko flags |= EFX_PKT_UDP;
7145e111ed8SAndrew Rybchenko } else {
7155e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_OTHER_IPV6);
7165e111ed8SAndrew Rybchenko }
7175e111ed8SAndrew Rybchenko break;
7185e111ed8SAndrew Rybchenko
7195e111ed8SAndrew Rybchenko default:
7205e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_NON_IP);
7215e111ed8SAndrew Rybchenko break;
7225e111ed8SAndrew Rybchenko }
7235e111ed8SAndrew Rybchenko
7245e111ed8SAndrew Rybchenko if (flags & (EFX_PKT_TCP | EFX_PKT_UDP)) {
7255e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_RX_TCPUDP_CKSUM_ERR)) {
7265e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_TCP_UDP_CHKSUM_ERR);
7275e111ed8SAndrew Rybchenko } else {
7285e111ed8SAndrew Rybchenko flags |= EFX_CKSUM_TCPUDP;
7295e111ed8SAndrew Rybchenko }
7305e111ed8SAndrew Rybchenko }
7315e111ed8SAndrew Rybchenko
7325e111ed8SAndrew Rybchenko deliver:
7335e111ed8SAndrew Rybchenko /* If we're not discarding the packet then it is ok */
7345e111ed8SAndrew Rybchenko if (~flags & EFX_DISCARD)
7355e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_RX_OK);
7365e111ed8SAndrew Rybchenko
7375e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_rx != NULL);
7385e111ed8SAndrew Rybchenko should_abort = eecp->eec_rx(arg, label, last_used_id, size, flags);
7395e111ed8SAndrew Rybchenko
7405e111ed8SAndrew Rybchenko return (should_abort);
7415e111ed8SAndrew Rybchenko }
7425e111ed8SAndrew Rybchenko
7435e111ed8SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_tx(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)7445e111ed8SAndrew Rybchenko ef10_ev_tx(
7455e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
7465e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
7475e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
7485e111ed8SAndrew Rybchenko __in_opt void *arg)
7495e111ed8SAndrew Rybchenko {
7505e111ed8SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
7515e111ed8SAndrew Rybchenko uint32_t id;
7525e111ed8SAndrew Rybchenko uint32_t label;
7535e111ed8SAndrew Rybchenko boolean_t should_abort;
7545e111ed8SAndrew Rybchenko
7555e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_TX);
7565e111ed8SAndrew Rybchenko
7575e111ed8SAndrew Rybchenko /* Discard events after RXQ/TXQ errors, or hardware not available */
7585e111ed8SAndrew Rybchenko if (enp->en_reset_flags &
7595e111ed8SAndrew Rybchenko (EFX_RESET_RXQ_ERR | EFX_RESET_TXQ_ERR | EFX_RESET_HW_UNAVAIL))
7605e111ed8SAndrew Rybchenko return (B_FALSE);
7615e111ed8SAndrew Rybchenko
7625e111ed8SAndrew Rybchenko if (EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DROP_EVENT) != 0) {
7635e111ed8SAndrew Rybchenko /* Drop this event */
7645e111ed8SAndrew Rybchenko return (B_FALSE);
7655e111ed8SAndrew Rybchenko }
7665e111ed8SAndrew Rybchenko
7675e111ed8SAndrew Rybchenko /* Per-packet TX completion (was per-descriptor for Falcon/Siena) */
7685e111ed8SAndrew Rybchenko id = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_DESCR_INDX);
7695e111ed8SAndrew Rybchenko label = EFX_QWORD_FIELD(*eqp, ESF_DZ_TX_QLABEL);
7705e111ed8SAndrew Rybchenko
7715e111ed8SAndrew Rybchenko EFSYS_PROBE2(tx_complete, uint32_t, label, uint32_t, id);
7725e111ed8SAndrew Rybchenko
7735e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_tx != NULL);
7745e111ed8SAndrew Rybchenko should_abort = eecp->eec_tx(arg, label, id);
7755e111ed8SAndrew Rybchenko
7765e111ed8SAndrew Rybchenko return (should_abort);
7775e111ed8SAndrew Rybchenko }
7785e111ed8SAndrew Rybchenko
7795e111ed8SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_driver(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)7805e111ed8SAndrew Rybchenko ef10_ev_driver(
7815e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
7825e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
7835e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
7845e111ed8SAndrew Rybchenko __in_opt void *arg)
7855e111ed8SAndrew Rybchenko {
7865e111ed8SAndrew Rybchenko unsigned int code;
7875e111ed8SAndrew Rybchenko boolean_t should_abort;
7885e111ed8SAndrew Rybchenko
7895e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_DRIVER);
7905e111ed8SAndrew Rybchenko should_abort = B_FALSE;
7915e111ed8SAndrew Rybchenko
7925e111ed8SAndrew Rybchenko code = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_CODE);
7935e111ed8SAndrew Rybchenko switch (code) {
7945e111ed8SAndrew Rybchenko case ESE_DZ_DRV_TIMER_EV: {
7955e111ed8SAndrew Rybchenko uint32_t id;
7965e111ed8SAndrew Rybchenko
7975e111ed8SAndrew Rybchenko id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_TMR_ID);
7985e111ed8SAndrew Rybchenko
7995e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_timer != NULL);
8005e111ed8SAndrew Rybchenko should_abort = eecp->eec_timer(arg, id);
8015e111ed8SAndrew Rybchenko break;
8025e111ed8SAndrew Rybchenko }
8035e111ed8SAndrew Rybchenko
8045e111ed8SAndrew Rybchenko case ESE_DZ_DRV_WAKE_UP_EV: {
8055e111ed8SAndrew Rybchenko uint32_t id;
8065e111ed8SAndrew Rybchenko
8075e111ed8SAndrew Rybchenko id = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_EVQ_ID);
8085e111ed8SAndrew Rybchenko
8095e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_wake_up != NULL);
8105e111ed8SAndrew Rybchenko should_abort = eecp->eec_wake_up(arg, id);
8115e111ed8SAndrew Rybchenko break;
8125e111ed8SAndrew Rybchenko }
8135e111ed8SAndrew Rybchenko
8145e111ed8SAndrew Rybchenko case ESE_DZ_DRV_START_UP_EV:
8155e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_initialized != NULL);
8165e111ed8SAndrew Rybchenko should_abort = eecp->eec_initialized(arg);
8175e111ed8SAndrew Rybchenko break;
8185e111ed8SAndrew Rybchenko
8195e111ed8SAndrew Rybchenko default:
8205e111ed8SAndrew Rybchenko EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
8215e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
8225e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
8235e111ed8SAndrew Rybchenko break;
8245e111ed8SAndrew Rybchenko }
8255e111ed8SAndrew Rybchenko
8265e111ed8SAndrew Rybchenko return (should_abort);
8275e111ed8SAndrew Rybchenko }
8285e111ed8SAndrew Rybchenko
8295e111ed8SAndrew Rybchenko static __checkReturn boolean_t
ef10_ev_drv_gen(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)8305e111ed8SAndrew Rybchenko ef10_ev_drv_gen(
8315e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
8325e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
8335e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
8345e111ed8SAndrew Rybchenko __in_opt void *arg)
8355e111ed8SAndrew Rybchenko {
8365e111ed8SAndrew Rybchenko uint32_t data;
8375e111ed8SAndrew Rybchenko boolean_t should_abort;
8385e111ed8SAndrew Rybchenko
8395e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_DRV_GEN);
8405e111ed8SAndrew Rybchenko should_abort = B_FALSE;
8415e111ed8SAndrew Rybchenko
8425e111ed8SAndrew Rybchenko data = EFX_QWORD_FIELD(*eqp, ESF_DZ_DRV_SUB_DATA_DW0);
8435e111ed8SAndrew Rybchenko if (data >= ((uint32_t)1 << 16)) {
8445e111ed8SAndrew Rybchenko EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
8455e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
8465e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
8475e111ed8SAndrew Rybchenko
8485e111ed8SAndrew Rybchenko return (B_TRUE);
8495e111ed8SAndrew Rybchenko }
8505e111ed8SAndrew Rybchenko
8515e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_software != NULL);
8525e111ed8SAndrew Rybchenko should_abort = eecp->eec_software(arg, (uint16_t)data);
8535e111ed8SAndrew Rybchenko
8545e111ed8SAndrew Rybchenko return (should_abort);
8555e111ed8SAndrew Rybchenko }
8565e111ed8SAndrew Rybchenko
8579edb8ee3SAndrew Rybchenko #endif /* EFX_OPTS_EF10() */
8589edb8ee3SAndrew Rybchenko
8599edb8ee3SAndrew Rybchenko #if EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10()
8609edb8ee3SAndrew Rybchenko
8619edb8ee3SAndrew Rybchenko __checkReturn boolean_t
ef10_ev_mcdi(__in efx_evq_t * eep,__in efx_qword_t * eqp,__in const efx_ev_callbacks_t * eecp,__in_opt void * arg)8625e111ed8SAndrew Rybchenko ef10_ev_mcdi(
8635e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
8645e111ed8SAndrew Rybchenko __in efx_qword_t *eqp,
8655e111ed8SAndrew Rybchenko __in const efx_ev_callbacks_t *eecp,
8665e111ed8SAndrew Rybchenko __in_opt void *arg)
8675e111ed8SAndrew Rybchenko {
8685e111ed8SAndrew Rybchenko efx_nic_t *enp = eep->ee_enp;
8695e111ed8SAndrew Rybchenko unsigned int code;
8705e111ed8SAndrew Rybchenko boolean_t should_abort = B_FALSE;
871*80e33a28SIvan Malov boolean_t ev_is_v2 = B_FALSE;
8725e111ed8SAndrew Rybchenko
8735e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_MCDI_RESPONSE);
8745e111ed8SAndrew Rybchenko
8755e111ed8SAndrew Rybchenko code = EFX_QWORD_FIELD(*eqp, MCDI_EVENT_CODE);
8765e111ed8SAndrew Rybchenko switch (code) {
8775e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_BADSSERT:
8785e111ed8SAndrew Rybchenko efx_mcdi_ev_death(enp, EINTR);
8795e111ed8SAndrew Rybchenko break;
8805e111ed8SAndrew Rybchenko
8815e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_CMDDONE:
8825e111ed8SAndrew Rybchenko efx_mcdi_ev_cpl(enp,
8835e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, CMDDONE_SEQ),
8845e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, CMDDONE_DATALEN),
8855e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, CMDDONE_ERRNO));
8865e111ed8SAndrew Rybchenko break;
8875e111ed8SAndrew Rybchenko
8885e111ed8SAndrew Rybchenko #if EFSYS_OPT_MCDI_PROXY_AUTH
8895e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_PROXY_RESPONSE:
8905e111ed8SAndrew Rybchenko /*
8915e111ed8SAndrew Rybchenko * This event notifies a function that an authorization request
8925e111ed8SAndrew Rybchenko * has been processed. If the request was authorized then the
8935e111ed8SAndrew Rybchenko * function can now re-send the original MCDI request.
8945e111ed8SAndrew Rybchenko * See SF-113652-SW "SR-IOV Proxied Network Access Control".
8955e111ed8SAndrew Rybchenko */
8965e111ed8SAndrew Rybchenko efx_mcdi_ev_proxy_response(enp,
8975e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, PROXY_RESPONSE_HANDLE),
8985e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, PROXY_RESPONSE_RC));
8995e111ed8SAndrew Rybchenko break;
9005e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_MCDI_PROXY_AUTH */
9015e111ed8SAndrew Rybchenko
9025e111ed8SAndrew Rybchenko #if EFSYS_OPT_MCDI_PROXY_AUTH_SERVER
9035e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_PROXY_REQUEST:
9045e111ed8SAndrew Rybchenko efx_mcdi_ev_proxy_request(enp,
9055e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, PROXY_REQUEST_BUFF_INDEX));
9065e111ed8SAndrew Rybchenko break;
9075e111ed8SAndrew Rybchenko #endif /* EFSYS_OPT_MCDI_PROXY_AUTH_SERVER */
9085e111ed8SAndrew Rybchenko
909*80e33a28SIvan Malov case MCDI_EVENT_CODE_LINKCHANGE_V2:
910*80e33a28SIvan Malov ev_is_v2 = B_TRUE;
911*80e33a28SIvan Malov /* Fallthrough */
9125e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_LINKCHANGE: {
9135e111ed8SAndrew Rybchenko efx_link_mode_t link_mode;
9145e111ed8SAndrew Rybchenko
915*80e33a28SIvan Malov ef10_phy_link_ev(enp, eqp, ev_is_v2, &link_mode);
9165e111ed8SAndrew Rybchenko should_abort = eecp->eec_link_change(arg, link_mode);
9175e111ed8SAndrew Rybchenko break;
9185e111ed8SAndrew Rybchenko }
9195e111ed8SAndrew Rybchenko
9205e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_SENSOREVT: {
9215e111ed8SAndrew Rybchenko #if EFSYS_OPT_MON_STATS
9225e111ed8SAndrew Rybchenko efx_mon_stat_t id;
9235e111ed8SAndrew Rybchenko efx_mon_stat_value_t value;
9245e111ed8SAndrew Rybchenko efx_rc_t rc;
9255e111ed8SAndrew Rybchenko
9265e111ed8SAndrew Rybchenko /* Decode monitor stat for MCDI sensor (if supported) */
9275e111ed8SAndrew Rybchenko if ((rc = mcdi_mon_ev(enp, eqp, &id, &value)) == 0) {
9285e111ed8SAndrew Rybchenko /* Report monitor stat change */
9295e111ed8SAndrew Rybchenko should_abort = eecp->eec_monitor(arg, id, value);
9305e111ed8SAndrew Rybchenko } else if (rc == ENOTSUP) {
9315e111ed8SAndrew Rybchenko should_abort = eecp->eec_exception(arg,
9325e111ed8SAndrew Rybchenko EFX_EXCEPTION_UNKNOWN_SENSOREVT,
9335e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, DATA));
9345e111ed8SAndrew Rybchenko } else {
9355e111ed8SAndrew Rybchenko EFSYS_ASSERT(rc == ENODEV); /* Wrong port */
9365e111ed8SAndrew Rybchenko }
9375e111ed8SAndrew Rybchenko #endif
9385e111ed8SAndrew Rybchenko break;
9395e111ed8SAndrew Rybchenko }
9405e111ed8SAndrew Rybchenko
9415e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_SCHEDERR:
9425e111ed8SAndrew Rybchenko /* Informational only */
9435e111ed8SAndrew Rybchenko break;
9445e111ed8SAndrew Rybchenko
9455e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_REBOOT:
9465e111ed8SAndrew Rybchenko /* Falcon/Siena only (should not been seen with Huntington). */
9475e111ed8SAndrew Rybchenko efx_mcdi_ev_death(enp, EIO);
9485e111ed8SAndrew Rybchenko break;
9495e111ed8SAndrew Rybchenko
9505e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_MC_REBOOT:
9515e111ed8SAndrew Rybchenko /* MC_REBOOT event is used for Huntington (EF10) and later. */
9525e111ed8SAndrew Rybchenko efx_mcdi_ev_death(enp, EIO);
9535e111ed8SAndrew Rybchenko break;
9545e111ed8SAndrew Rybchenko
9555e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_MAC_STATS_DMA:
9565e111ed8SAndrew Rybchenko #if EFSYS_OPT_MAC_STATS
9575e111ed8SAndrew Rybchenko if (eecp->eec_mac_stats != NULL) {
9585e111ed8SAndrew Rybchenko eecp->eec_mac_stats(arg,
9595e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, MAC_STATS_DMA_GENERATION));
9605e111ed8SAndrew Rybchenko }
9615e111ed8SAndrew Rybchenko #endif
9625e111ed8SAndrew Rybchenko break;
9635e111ed8SAndrew Rybchenko
9645e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_FWALERT: {
9655e111ed8SAndrew Rybchenko uint32_t reason = MCDI_EV_FIELD(eqp, FWALERT_REASON);
9665e111ed8SAndrew Rybchenko
9675e111ed8SAndrew Rybchenko if (reason == MCDI_EVENT_FWALERT_REASON_SRAM_ACCESS)
9685e111ed8SAndrew Rybchenko should_abort = eecp->eec_exception(arg,
9695e111ed8SAndrew Rybchenko EFX_EXCEPTION_FWALERT_SRAM,
9705e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, FWALERT_DATA));
9715e111ed8SAndrew Rybchenko else
9725e111ed8SAndrew Rybchenko should_abort = eecp->eec_exception(arg,
9735e111ed8SAndrew Rybchenko EFX_EXCEPTION_UNKNOWN_FWALERT,
9745e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, DATA));
9755e111ed8SAndrew Rybchenko break;
9765e111ed8SAndrew Rybchenko }
9775e111ed8SAndrew Rybchenko
9785e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_TX_ERR: {
9795e111ed8SAndrew Rybchenko /*
9805e111ed8SAndrew Rybchenko * After a TXQ error is detected, firmware sends a TX_ERR event.
9815e111ed8SAndrew Rybchenko * This may be followed by TX completions (which we discard),
9825e111ed8SAndrew Rybchenko * and then finally by a TX_FLUSH event. Firmware destroys the
9835e111ed8SAndrew Rybchenko * TXQ automatically after sending the TX_FLUSH event.
9845e111ed8SAndrew Rybchenko */
9855e111ed8SAndrew Rybchenko enp->en_reset_flags |= EFX_RESET_TXQ_ERR;
9865e111ed8SAndrew Rybchenko
9875e111ed8SAndrew Rybchenko EFSYS_PROBE2(tx_descq_err,
9885e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
9895e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
9905e111ed8SAndrew Rybchenko
9915e111ed8SAndrew Rybchenko /* Inform the driver that a reset is required. */
9925e111ed8SAndrew Rybchenko eecp->eec_exception(arg, EFX_EXCEPTION_TX_ERROR,
9935e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, TX_ERR_DATA));
9945e111ed8SAndrew Rybchenko break;
9955e111ed8SAndrew Rybchenko }
9965e111ed8SAndrew Rybchenko
9975e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_TX_FLUSH: {
9985e111ed8SAndrew Rybchenko uint32_t txq_index = MCDI_EV_FIELD(eqp, TX_FLUSH_TXQ);
9995e111ed8SAndrew Rybchenko
10005e111ed8SAndrew Rybchenko /*
10015e111ed8SAndrew Rybchenko * EF10 firmware sends two TX_FLUSH events: one to the txq's
10025e111ed8SAndrew Rybchenko * event queue, and one to evq 0 (with TX_FLUSH_TO_DRIVER set).
10035e111ed8SAndrew Rybchenko * We want to wait for all completions, so ignore the events
10045e111ed8SAndrew Rybchenko * with TX_FLUSH_TO_DRIVER.
10055e111ed8SAndrew Rybchenko */
10065e111ed8SAndrew Rybchenko if (MCDI_EV_FIELD(eqp, TX_FLUSH_TO_DRIVER) != 0) {
10075e111ed8SAndrew Rybchenko should_abort = B_FALSE;
10085e111ed8SAndrew Rybchenko break;
10095e111ed8SAndrew Rybchenko }
10105e111ed8SAndrew Rybchenko
10115e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_DRIVER_TX_DESCQ_FLS_DONE);
10125e111ed8SAndrew Rybchenko
10135e111ed8SAndrew Rybchenko EFSYS_PROBE1(tx_descq_fls_done, uint32_t, txq_index);
10145e111ed8SAndrew Rybchenko
10155e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_txq_flush_done != NULL);
10165e111ed8SAndrew Rybchenko should_abort = eecp->eec_txq_flush_done(arg, txq_index);
10175e111ed8SAndrew Rybchenko break;
10185e111ed8SAndrew Rybchenko }
10195e111ed8SAndrew Rybchenko
10205e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_RX_ERR: {
10215e111ed8SAndrew Rybchenko /*
10225e111ed8SAndrew Rybchenko * After an RXQ error is detected, firmware sends an RX_ERR
10235e111ed8SAndrew Rybchenko * event. This may be followed by RX events (which we discard),
10245e111ed8SAndrew Rybchenko * and then finally by an RX_FLUSH event. Firmware destroys the
10255e111ed8SAndrew Rybchenko * RXQ automatically after sending the RX_FLUSH event.
10265e111ed8SAndrew Rybchenko */
10275e111ed8SAndrew Rybchenko enp->en_reset_flags |= EFX_RESET_RXQ_ERR;
10285e111ed8SAndrew Rybchenko
10295e111ed8SAndrew Rybchenko EFSYS_PROBE2(rx_descq_err,
10305e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
10315e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
10325e111ed8SAndrew Rybchenko
10335e111ed8SAndrew Rybchenko /* Inform the driver that a reset is required. */
10345e111ed8SAndrew Rybchenko eecp->eec_exception(arg, EFX_EXCEPTION_RX_ERROR,
10355e111ed8SAndrew Rybchenko MCDI_EV_FIELD(eqp, RX_ERR_DATA));
10365e111ed8SAndrew Rybchenko break;
10375e111ed8SAndrew Rybchenko }
10385e111ed8SAndrew Rybchenko
10395e111ed8SAndrew Rybchenko case MCDI_EVENT_CODE_RX_FLUSH: {
10405e111ed8SAndrew Rybchenko uint32_t rxq_index = MCDI_EV_FIELD(eqp, RX_FLUSH_RXQ);
10415e111ed8SAndrew Rybchenko
10425e111ed8SAndrew Rybchenko /*
10435e111ed8SAndrew Rybchenko * EF10 firmware sends two RX_FLUSH events: one to the rxq's
10445e111ed8SAndrew Rybchenko * event queue, and one to evq 0 (with RX_FLUSH_TO_DRIVER set).
10455e111ed8SAndrew Rybchenko * We want to wait for all completions, so ignore the events
10465e111ed8SAndrew Rybchenko * with RX_FLUSH_TO_DRIVER.
10475e111ed8SAndrew Rybchenko */
10485e111ed8SAndrew Rybchenko if (MCDI_EV_FIELD(eqp, RX_FLUSH_TO_DRIVER) != 0) {
10495e111ed8SAndrew Rybchenko should_abort = B_FALSE;
10505e111ed8SAndrew Rybchenko break;
10515e111ed8SAndrew Rybchenko }
10525e111ed8SAndrew Rybchenko
10535e111ed8SAndrew Rybchenko EFX_EV_QSTAT_INCR(eep, EV_DRIVER_RX_DESCQ_FLS_DONE);
10545e111ed8SAndrew Rybchenko
10555e111ed8SAndrew Rybchenko EFSYS_PROBE1(rx_descq_fls_done, uint32_t, rxq_index);
10565e111ed8SAndrew Rybchenko
10575e111ed8SAndrew Rybchenko EFSYS_ASSERT(eecp->eec_rxq_flush_done != NULL);
10585e111ed8SAndrew Rybchenko should_abort = eecp->eec_rxq_flush_done(arg, rxq_index);
10595e111ed8SAndrew Rybchenko break;
10605e111ed8SAndrew Rybchenko }
10615e111ed8SAndrew Rybchenko
10625e111ed8SAndrew Rybchenko default:
10635e111ed8SAndrew Rybchenko EFSYS_PROBE3(bad_event, unsigned int, eep->ee_index,
10645e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_1),
10655e111ed8SAndrew Rybchenko uint32_t, EFX_QWORD_FIELD(*eqp, EFX_DWORD_0));
10665e111ed8SAndrew Rybchenko break;
10675e111ed8SAndrew Rybchenko }
10685e111ed8SAndrew Rybchenko
10695e111ed8SAndrew Rybchenko return (should_abort);
10705e111ed8SAndrew Rybchenko }
10715e111ed8SAndrew Rybchenko
10729edb8ee3SAndrew Rybchenko #endif /* EFSYS_OPT_RIVERHEAD || EFX_OPTS_EF10() */
10739edb8ee3SAndrew Rybchenko
10749edb8ee3SAndrew Rybchenko #if EFX_OPTS_EF10()
10759edb8ee3SAndrew Rybchenko
10765e111ed8SAndrew Rybchenko void
ef10_ev_rxlabel_init(__in efx_evq_t * eep,__in efx_rxq_t * erp,__in unsigned int label,__in efx_rxq_type_t type)10775e111ed8SAndrew Rybchenko ef10_ev_rxlabel_init(
10785e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
10795e111ed8SAndrew Rybchenko __in efx_rxq_t *erp,
10805e111ed8SAndrew Rybchenko __in unsigned int label,
10815e111ed8SAndrew Rybchenko __in efx_rxq_type_t type)
10825e111ed8SAndrew Rybchenko {
10835e111ed8SAndrew Rybchenko efx_evq_rxq_state_t *eersp;
10845e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
10855e111ed8SAndrew Rybchenko boolean_t packed_stream = (type == EFX_RXQ_TYPE_PACKED_STREAM);
10865e111ed8SAndrew Rybchenko boolean_t es_super_buffer = (type == EFX_RXQ_TYPE_ES_SUPER_BUFFER);
10875e111ed8SAndrew Rybchenko #endif
10885e111ed8SAndrew Rybchenko
10895e111ed8SAndrew Rybchenko _NOTE(ARGUNUSED(type))
10905e111ed8SAndrew Rybchenko EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
10915e111ed8SAndrew Rybchenko eersp = &eep->ee_rxq_state[label];
10925e111ed8SAndrew Rybchenko
10935e111ed8SAndrew Rybchenko EFSYS_ASSERT3U(eersp->eers_rx_mask, ==, 0);
10945e111ed8SAndrew Rybchenko
10955e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
10965e111ed8SAndrew Rybchenko /*
10975e111ed8SAndrew Rybchenko * For packed stream modes, the very first event will
10985e111ed8SAndrew Rybchenko * have a new buffer flag set, so it will be incremented,
10995e111ed8SAndrew Rybchenko * yielding the correct pointer. That results in a simpler
11005e111ed8SAndrew Rybchenko * code than trying to detect start-of-the-world condition
11015e111ed8SAndrew Rybchenko * in the event handler.
11025e111ed8SAndrew Rybchenko */
11035e111ed8SAndrew Rybchenko eersp->eers_rx_read_ptr = packed_stream ? ~0 : 0;
11045e111ed8SAndrew Rybchenko #else
11055e111ed8SAndrew Rybchenko eersp->eers_rx_read_ptr = 0;
11065e111ed8SAndrew Rybchenko #endif
11075e111ed8SAndrew Rybchenko eersp->eers_rx_mask = erp->er_mask;
11085e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
11095e111ed8SAndrew Rybchenko eersp->eers_rx_stream_npackets = 0;
11105e111ed8SAndrew Rybchenko eersp->eers_rx_packed_stream = packed_stream || es_super_buffer;
11115e111ed8SAndrew Rybchenko #endif
11125e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
11135e111ed8SAndrew Rybchenko if (packed_stream) {
11145e111ed8SAndrew Rybchenko eersp->eers_rx_packed_stream_credits = (eep->ee_mask + 1) /
11155e111ed8SAndrew Rybchenko EFX_DIV_ROUND_UP(EFX_RX_PACKED_STREAM_MEM_PER_CREDIT,
11165e111ed8SAndrew Rybchenko EFX_RX_PACKED_STREAM_MIN_PACKET_SPACE);
11175e111ed8SAndrew Rybchenko EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, !=, 0);
11185e111ed8SAndrew Rybchenko /*
11195e111ed8SAndrew Rybchenko * A single credit is allocated to the queue when it is started.
11205e111ed8SAndrew Rybchenko * It is immediately spent by the first packet which has NEW
11215e111ed8SAndrew Rybchenko * BUFFER flag set, though, but still we shall take into
11225e111ed8SAndrew Rybchenko * account, as to not wrap around the maximum number of credits
11235e111ed8SAndrew Rybchenko * accidentally
11245e111ed8SAndrew Rybchenko */
11255e111ed8SAndrew Rybchenko eersp->eers_rx_packed_stream_credits--;
11265e111ed8SAndrew Rybchenko EFSYS_ASSERT3U(eersp->eers_rx_packed_stream_credits, <=,
11275e111ed8SAndrew Rybchenko EFX_RX_PACKED_STREAM_MAX_CREDITS);
11285e111ed8SAndrew Rybchenko }
11295e111ed8SAndrew Rybchenko #endif
11305e111ed8SAndrew Rybchenko }
11315e111ed8SAndrew Rybchenko
11325e111ed8SAndrew Rybchenko void
ef10_ev_rxlabel_fini(__in efx_evq_t * eep,__in unsigned int label)11335e111ed8SAndrew Rybchenko ef10_ev_rxlabel_fini(
11345e111ed8SAndrew Rybchenko __in efx_evq_t *eep,
11355e111ed8SAndrew Rybchenko __in unsigned int label)
11365e111ed8SAndrew Rybchenko {
11375e111ed8SAndrew Rybchenko efx_evq_rxq_state_t *eersp;
11385e111ed8SAndrew Rybchenko
11395e111ed8SAndrew Rybchenko EFSYS_ASSERT3U(label, <, EFX_ARRAY_SIZE(eep->ee_rxq_state));
11405e111ed8SAndrew Rybchenko eersp = &eep->ee_rxq_state[label];
11415e111ed8SAndrew Rybchenko
11425e111ed8SAndrew Rybchenko EFSYS_ASSERT3U(eersp->eers_rx_mask, !=, 0);
11435e111ed8SAndrew Rybchenko
11445e111ed8SAndrew Rybchenko eersp->eers_rx_read_ptr = 0;
11455e111ed8SAndrew Rybchenko eersp->eers_rx_mask = 0;
11465e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM || EFSYS_OPT_RX_ES_SUPER_BUFFER
11475e111ed8SAndrew Rybchenko eersp->eers_rx_stream_npackets = 0;
11485e111ed8SAndrew Rybchenko eersp->eers_rx_packed_stream = B_FALSE;
11495e111ed8SAndrew Rybchenko #endif
11505e111ed8SAndrew Rybchenko #if EFSYS_OPT_RX_PACKED_STREAM
11515e111ed8SAndrew Rybchenko eersp->eers_rx_packed_stream_credits = 0;
11525e111ed8SAndrew Rybchenko #endif
11535e111ed8SAndrew Rybchenko }
11545e111ed8SAndrew Rybchenko
11555e111ed8SAndrew Rybchenko #endif /* EFX_OPTS_EF10() */
1156