xref: /dpdk/drivers/net/sfc/sfc_ev.c (revision 83c31c9945302cf43fb3d26f5e5b1fecc60a35f1)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2244cfa79SAndrew Rybchenko  *
3a0147be5SAndrew Rybchenko  * Copyright(c) 2019-2020 Xilinx, Inc.
4a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
558294ee6SAndrew Rybchenko  *
658294ee6SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
758294ee6SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
858294ee6SAndrew Rybchenko  */
958294ee6SAndrew Rybchenko 
1058294ee6SAndrew Rybchenko #include <rte_debug.h>
1158294ee6SAndrew Rybchenko #include <rte_cycles.h>
122de39f4eSAndrew Rybchenko #include <rte_alarm.h>
1377f2d053SAndrew Rybchenko #include <rte_branch_prediction.h>
1458294ee6SAndrew Rybchenko 
1558294ee6SAndrew Rybchenko #include "efx.h"
1658294ee6SAndrew Rybchenko 
1758294ee6SAndrew Rybchenko #include "sfc.h"
1858294ee6SAndrew Rybchenko #include "sfc_debug.h"
1958294ee6SAndrew Rybchenko #include "sfc_log.h"
2058294ee6SAndrew Rybchenko #include "sfc_ev.h"
2128944ac0SAndrew Rybchenko #include "sfc_rx.h"
22fed9aeb4SIvan Malov #include "sfc_tx.h"
23c22d3c50SAndrew Rybchenko #include "sfc_kvargs.h"
2458294ee6SAndrew Rybchenko 
2558294ee6SAndrew Rybchenko 
2658294ee6SAndrew Rybchenko /* Initial delay when waiting for event queue init complete event */
2758294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_START_US	(1)
2858294ee6SAndrew Rybchenko /* Maximum delay between event queue polling attempts */
2958294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_MAX_US	(10 * 1000)
3058294ee6SAndrew Rybchenko /* Event queue init approx timeout */
3158294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_TIMEOUT_US		(2 * US_PER_S)
3258294ee6SAndrew Rybchenko 
332de39f4eSAndrew Rybchenko /* Management event queue polling period in microseconds */
342de39f4eSAndrew Rybchenko #define SFC_MGMT_EV_QPOLL_PERIOD_US	(US_PER_S)
352de39f4eSAndrew Rybchenko 
36298951a4SAndrew Rybchenko static const char *
37298951a4SAndrew Rybchenko sfc_evq_type2str(enum sfc_evq_type type)
38298951a4SAndrew Rybchenko {
39298951a4SAndrew Rybchenko 	switch (type) {
40298951a4SAndrew Rybchenko 	case SFC_EVQ_TYPE_MGMT:
41298951a4SAndrew Rybchenko 		return "mgmt-evq";
42298951a4SAndrew Rybchenko 	case SFC_EVQ_TYPE_RX:
43298951a4SAndrew Rybchenko 		return "rx-evq";
44298951a4SAndrew Rybchenko 	case SFC_EVQ_TYPE_TX:
45298951a4SAndrew Rybchenko 		return "tx-evq";
46298951a4SAndrew Rybchenko 	default:
47298951a4SAndrew Rybchenko 		SFC_ASSERT(B_FALSE);
48298951a4SAndrew Rybchenko 		return NULL;
49298951a4SAndrew Rybchenko 	}
50298951a4SAndrew Rybchenko }
5158294ee6SAndrew Rybchenko 
5258294ee6SAndrew Rybchenko static boolean_t
5358294ee6SAndrew Rybchenko sfc_ev_initialized(void *arg)
5458294ee6SAndrew Rybchenko {
5558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
5658294ee6SAndrew Rybchenko 
5758294ee6SAndrew Rybchenko 	/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
5858294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
5958294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTED);
6058294ee6SAndrew Rybchenko 
6158294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTED;
6258294ee6SAndrew Rybchenko 
6358294ee6SAndrew Rybchenko 	return B_FALSE;
6458294ee6SAndrew Rybchenko }
6558294ee6SAndrew Rybchenko 
6658294ee6SAndrew Rybchenko static boolean_t
677965557eSAndrew Rybchenko sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
687965557eSAndrew Rybchenko 	      uint32_t size, uint16_t flags)
697965557eSAndrew Rybchenko {
707965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
717965557eSAndrew Rybchenko 
727965557eSAndrew Rybchenko 	sfc_err(evq->sa,
737965557eSAndrew Rybchenko 		"EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
747965557eSAndrew Rybchenko 		evq->evq_index, label, id, size, flags);
757965557eSAndrew Rybchenko 	return B_TRUE;
767965557eSAndrew Rybchenko }
777965557eSAndrew Rybchenko 
787965557eSAndrew Rybchenko static boolean_t
79df1bfde4SAndrew Rybchenko sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
80921f6cf1SAndrew Rybchenko 	      uint32_t size, uint16_t flags)
8158294ee6SAndrew Rybchenko {
8258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
83df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq;
84921f6cf1SAndrew Rybchenko 	unsigned int stop;
85921f6cf1SAndrew Rybchenko 	unsigned int pending_id;
86921f6cf1SAndrew Rybchenko 	unsigned int delta;
87921f6cf1SAndrew Rybchenko 	unsigned int i;
88df1bfde4SAndrew Rybchenko 	struct sfc_efx_rx_sw_desc *rxd;
8958294ee6SAndrew Rybchenko 
90921f6cf1SAndrew Rybchenko 	if (unlikely(evq->exception))
91921f6cf1SAndrew Rybchenko 		goto done;
92921f6cf1SAndrew Rybchenko 
93df1bfde4SAndrew Rybchenko 	rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
94921f6cf1SAndrew Rybchenko 
95921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
96921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
97df1bfde4SAndrew Rybchenko 	SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
98921f6cf1SAndrew Rybchenko 
99921f6cf1SAndrew Rybchenko 	stop = (id + 1) & rxq->ptr_mask;
100921f6cf1SAndrew Rybchenko 	pending_id = rxq->pending & rxq->ptr_mask;
101921f6cf1SAndrew Rybchenko 	delta = (stop >= pending_id) ? (stop - pending_id) :
102921f6cf1SAndrew Rybchenko 		(rxq->ptr_mask + 1 - pending_id + stop);
103921f6cf1SAndrew Rybchenko 
10409a09b6fSAndrew Rybchenko 	if (delta == 0) {
10509a09b6fSAndrew Rybchenko 		/*
10609a09b6fSAndrew Rybchenko 		 * Rx event with no new descriptors done and zero length
10709a09b6fSAndrew Rybchenko 		 * is used to abort scattered packet when there is no room
10809a09b6fSAndrew Rybchenko 		 * for the tail.
10909a09b6fSAndrew Rybchenko 		 */
11009a09b6fSAndrew Rybchenko 		if (unlikely(size != 0)) {
11109a09b6fSAndrew Rybchenko 			evq->exception = B_TRUE;
11209a09b6fSAndrew Rybchenko 			sfc_err(evq->sa,
11309a09b6fSAndrew Rybchenko 				"EVQ %u RxQ %u invalid RX abort "
114f2462150SFerruh Yigit 				"(id=%#x size=%u flags=%#x); needs restart",
115df1bfde4SAndrew Rybchenko 				evq->evq_index, rxq->dp.dpq.queue_id,
11609a09b6fSAndrew Rybchenko 				id, size, flags);
11709a09b6fSAndrew Rybchenko 			goto done;
11809a09b6fSAndrew Rybchenko 		}
11909a09b6fSAndrew Rybchenko 
12009a09b6fSAndrew Rybchenko 		/* Add discard flag to the first fragment */
12109a09b6fSAndrew Rybchenko 		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
12209a09b6fSAndrew Rybchenko 		/* Remove continue flag from the last fragment */
12309a09b6fSAndrew Rybchenko 		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
12409a09b6fSAndrew Rybchenko 	} else if (unlikely(delta > rxq->batch_max)) {
125921f6cf1SAndrew Rybchenko 		evq->exception = B_TRUE;
126921f6cf1SAndrew Rybchenko 
127921f6cf1SAndrew Rybchenko 		sfc_err(evq->sa,
128921f6cf1SAndrew Rybchenko 			"EVQ %u RxQ %u completion out of order "
129f2462150SFerruh Yigit 			"(id=%#x delta=%u flags=%#x); needs restart",
130df1bfde4SAndrew Rybchenko 			evq->evq_index, rxq->dp.dpq.queue_id,
131df1bfde4SAndrew Rybchenko 			id, delta, flags);
132921f6cf1SAndrew Rybchenko 
133921f6cf1SAndrew Rybchenko 		goto done;
134921f6cf1SAndrew Rybchenko 	}
135921f6cf1SAndrew Rybchenko 
136921f6cf1SAndrew Rybchenko 	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
137921f6cf1SAndrew Rybchenko 		rxd = &rxq->sw_desc[i];
138921f6cf1SAndrew Rybchenko 
139921f6cf1SAndrew Rybchenko 		rxd->flags = flags;
140921f6cf1SAndrew Rybchenko 
141921f6cf1SAndrew Rybchenko 		SFC_ASSERT(size < (1 << 16));
142921f6cf1SAndrew Rybchenko 		rxd->size = (uint16_t)size;
143921f6cf1SAndrew Rybchenko 	}
144921f6cf1SAndrew Rybchenko 
145921f6cf1SAndrew Rybchenko 	rxq->pending += delta;
146921f6cf1SAndrew Rybchenko 
147921f6cf1SAndrew Rybchenko done:
148921f6cf1SAndrew Rybchenko 	return B_FALSE;
14958294ee6SAndrew Rybchenko }
15058294ee6SAndrew Rybchenko 
15158294ee6SAndrew Rybchenko static boolean_t
152638bddc9SAndrew Rybchenko sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
153638bddc9SAndrew Rybchenko 	     __rte_unused uint32_t size, __rte_unused uint16_t flags)
154638bddc9SAndrew Rybchenko {
155638bddc9SAndrew Rybchenko 	struct sfc_evq *evq = arg;
156638bddc9SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
157638bddc9SAndrew Rybchenko 
158638bddc9SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
159638bddc9SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
160638bddc9SAndrew Rybchenko 
1615dec95e3SAndrew Rybchenko 	SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
1625dec95e3SAndrew Rybchenko 	return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id);
163638bddc9SAndrew Rybchenko }
164638bddc9SAndrew Rybchenko 
165638bddc9SAndrew Rybchenko static boolean_t
1663379085aSAndrew Rybchenko sfc_ev_nop_rx_packets(void *arg, uint32_t label, unsigned int num_packets,
1673379085aSAndrew Rybchenko 		      uint32_t flags)
1683379085aSAndrew Rybchenko {
1693379085aSAndrew Rybchenko 	struct sfc_evq *evq = arg;
1703379085aSAndrew Rybchenko 
1713379085aSAndrew Rybchenko 	sfc_err(evq->sa,
1723379085aSAndrew Rybchenko 		"EVQ %u unexpected Rx packets event label=%u num=%u flags=%#x",
1733379085aSAndrew Rybchenko 		evq->evq_index, label, num_packets, flags);
1743379085aSAndrew Rybchenko 	return B_TRUE;
1753379085aSAndrew Rybchenko }
1763379085aSAndrew Rybchenko 
1773379085aSAndrew Rybchenko static boolean_t
1783379085aSAndrew Rybchenko sfc_ev_dp_rx_packets(void *arg, __rte_unused uint32_t label,
1793379085aSAndrew Rybchenko 		     unsigned int num_packets, __rte_unused uint32_t flags)
1803379085aSAndrew Rybchenko {
1813379085aSAndrew Rybchenko 	struct sfc_evq *evq = arg;
1823379085aSAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
1833379085aSAndrew Rybchenko 
1843379085aSAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
1853379085aSAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
1863379085aSAndrew Rybchenko 
1873379085aSAndrew Rybchenko 	SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
1883379085aSAndrew Rybchenko 	return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, num_packets);
1893379085aSAndrew Rybchenko }
1903379085aSAndrew Rybchenko 
1913379085aSAndrew Rybchenko static boolean_t
192390f9b8dSAndrew Rybchenko sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
193390f9b8dSAndrew Rybchenko 		 uint32_t pkt_count, uint16_t flags)
194390f9b8dSAndrew Rybchenko {
195390f9b8dSAndrew Rybchenko 	struct sfc_evq *evq = arg;
196390f9b8dSAndrew Rybchenko 
197390f9b8dSAndrew Rybchenko 	sfc_err(evq->sa,
198390f9b8dSAndrew Rybchenko 		"EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
199390f9b8dSAndrew Rybchenko 		evq->evq_index, label, id, pkt_count, flags);
200390f9b8dSAndrew Rybchenko 	return B_TRUE;
201390f9b8dSAndrew Rybchenko }
202390f9b8dSAndrew Rybchenko 
203390f9b8dSAndrew Rybchenko /* It is not actually used on datapath, but required on RxQ flush */
204390f9b8dSAndrew Rybchenko static boolean_t
205390f9b8dSAndrew Rybchenko sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
206390f9b8dSAndrew Rybchenko 		__rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
207390f9b8dSAndrew Rybchenko {
208390f9b8dSAndrew Rybchenko 	struct sfc_evq *evq = arg;
209390f9b8dSAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
210390f9b8dSAndrew Rybchenko 
211390f9b8dSAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
212390f9b8dSAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
213390f9b8dSAndrew Rybchenko 
2145dec95e3SAndrew Rybchenko 	if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL)
2155dec95e3SAndrew Rybchenko 		return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id);
216390f9b8dSAndrew Rybchenko 	else
217390f9b8dSAndrew Rybchenko 		return B_FALSE;
218390f9b8dSAndrew Rybchenko }
219390f9b8dSAndrew Rybchenko 
220390f9b8dSAndrew Rybchenko static boolean_t
2217965557eSAndrew Rybchenko sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
2227965557eSAndrew Rybchenko {
2237965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2247965557eSAndrew Rybchenko 
2257965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
2267965557eSAndrew Rybchenko 		evq->evq_index, label, id);
2277965557eSAndrew Rybchenko 	return B_TRUE;
2287965557eSAndrew Rybchenko }
2297965557eSAndrew Rybchenko 
2307965557eSAndrew Rybchenko static boolean_t
231428c7dddSIvan Malov sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
23258294ee6SAndrew Rybchenko {
23358294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
234dbdc8241SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
235dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq;
236428c7dddSIvan Malov 	unsigned int stop;
237428c7dddSIvan Malov 	unsigned int delta;
23858294ee6SAndrew Rybchenko 
239dbdc8241SAndrew Rybchenko 	dp_txq = evq->dp_txq;
240dbdc8241SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
241428c7dddSIvan Malov 
242dbdc8241SAndrew Rybchenko 	txq = sfc_efx_txq_by_dp_txq(dp_txq);
243428c7dddSIvan Malov 	SFC_ASSERT(txq->evq == evq);
244428c7dddSIvan Malov 
245dbdc8241SAndrew Rybchenko 	if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
246428c7dddSIvan Malov 		goto done;
247428c7dddSIvan Malov 
248428c7dddSIvan Malov 	stop = (id + 1) & txq->ptr_mask;
249428c7dddSIvan Malov 	id = txq->pending & txq->ptr_mask;
250428c7dddSIvan Malov 
251428c7dddSIvan Malov 	delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
252428c7dddSIvan Malov 
253428c7dddSIvan Malov 	txq->pending += delta;
254428c7dddSIvan Malov 
255428c7dddSIvan Malov done:
256428c7dddSIvan Malov 	return B_FALSE;
25758294ee6SAndrew Rybchenko }
25858294ee6SAndrew Rybchenko 
25958294ee6SAndrew Rybchenko static boolean_t
2608b00f426SAndrew Rybchenko sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
2618b00f426SAndrew Rybchenko {
2628b00f426SAndrew Rybchenko 	struct sfc_evq *evq = arg;
2638b00f426SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
2648b00f426SAndrew Rybchenko 
2658b00f426SAndrew Rybchenko 	dp_txq = evq->dp_txq;
2668b00f426SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
2678b00f426SAndrew Rybchenko 
2685dec95e3SAndrew Rybchenko 	SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
2695dec95e3SAndrew Rybchenko 	return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id);
2708b00f426SAndrew Rybchenko }
2718b00f426SAndrew Rybchenko 
2728b00f426SAndrew Rybchenko static boolean_t
273*83c31c99SAndrew Rybchenko sfc_ev_nop_tx_ndescs(void *arg, uint32_t label, unsigned int ndescs)
274*83c31c99SAndrew Rybchenko {
275*83c31c99SAndrew Rybchenko 	struct sfc_evq *evq = arg;
276*83c31c99SAndrew Rybchenko 
277*83c31c99SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u ndescs=%#x",
278*83c31c99SAndrew Rybchenko 		evq->evq_index, label, ndescs);
279*83c31c99SAndrew Rybchenko 	return B_TRUE;
280*83c31c99SAndrew Rybchenko }
281*83c31c99SAndrew Rybchenko 
282*83c31c99SAndrew Rybchenko static boolean_t
283*83c31c99SAndrew Rybchenko sfc_ev_dp_tx_ndescs(void *arg, __rte_unused uint32_t label,
284*83c31c99SAndrew Rybchenko 		      unsigned int ndescs)
285*83c31c99SAndrew Rybchenko {
286*83c31c99SAndrew Rybchenko 	struct sfc_evq *evq = arg;
287*83c31c99SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
288*83c31c99SAndrew Rybchenko 
289*83c31c99SAndrew Rybchenko 	dp_txq = evq->dp_txq;
290*83c31c99SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
291*83c31c99SAndrew Rybchenko 
292*83c31c99SAndrew Rybchenko 	SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
293*83c31c99SAndrew Rybchenko 	return evq->sa->priv.dp_tx->qtx_ev(dp_txq, ndescs);
294*83c31c99SAndrew Rybchenko }
295*83c31c99SAndrew Rybchenko 
296*83c31c99SAndrew Rybchenko static boolean_t
297dd2c630aSFerruh Yigit sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data)
29858294ee6SAndrew Rybchenko {
29958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
30058294ee6SAndrew Rybchenko 
30198200dd9SAndrew Rybchenko 	if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
30298200dd9SAndrew Rybchenko 		return B_FALSE;
30398200dd9SAndrew Rybchenko 
30498200dd9SAndrew Rybchenko 	evq->exception = B_TRUE;
30598200dd9SAndrew Rybchenko 	sfc_warn(evq->sa,
30698200dd9SAndrew Rybchenko 		 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
30798200dd9SAndrew Rybchenko 		 " needs recovery",
30898200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
30998200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
31098200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
31198200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
31298200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
31398200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
31498200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
31598200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
31698200dd9SAndrew Rybchenko 		 "UNKNOWN",
31798200dd9SAndrew Rybchenko 		 code, data, evq->evq_index);
31898200dd9SAndrew Rybchenko 
31958294ee6SAndrew Rybchenko 	return B_TRUE;
32058294ee6SAndrew Rybchenko }
32158294ee6SAndrew Rybchenko 
32258294ee6SAndrew Rybchenko static boolean_t
3237965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
3247965557eSAndrew Rybchenko {
3257965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3267965557eSAndrew Rybchenko 
3277965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
3287965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
3297965557eSAndrew Rybchenko 	return B_TRUE;
3307965557eSAndrew Rybchenko }
3317965557eSAndrew Rybchenko 
3327965557eSAndrew Rybchenko static boolean_t
33358294ee6SAndrew Rybchenko sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
33458294ee6SAndrew Rybchenko {
33558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
336df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
33728944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
33858294ee6SAndrew Rybchenko 
339df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
340df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
341df1bfde4SAndrew Rybchenko 
342df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
34328944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
34428944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
34528944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
3462e42d78dSAndrew Rybchenko 	RTE_SET_USED(rxq);
3472e42d78dSAndrew Rybchenko 
3482e42d78dSAndrew Rybchenko 	sfc_rx_qflush_done(sfc_rxq_info_by_dp_rxq(dp_rxq));
34928944ac0SAndrew Rybchenko 
35028944ac0SAndrew Rybchenko 	return B_FALSE;
35158294ee6SAndrew Rybchenko }
35258294ee6SAndrew Rybchenko 
35358294ee6SAndrew Rybchenko static boolean_t
3547965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
3557965557eSAndrew Rybchenko {
3567965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3577965557eSAndrew Rybchenko 
3587965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
3597965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
3607965557eSAndrew Rybchenko 	return B_TRUE;
3617965557eSAndrew Rybchenko }
3627965557eSAndrew Rybchenko 
3637965557eSAndrew Rybchenko static boolean_t
36458294ee6SAndrew Rybchenko sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
36558294ee6SAndrew Rybchenko {
36658294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
367df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
36828944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
36958294ee6SAndrew Rybchenko 
370df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
371df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
372df1bfde4SAndrew Rybchenko 
373df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
37428944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
37528944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
37628944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
3772e42d78dSAndrew Rybchenko 	RTE_SET_USED(rxq);
3782e42d78dSAndrew Rybchenko 
3792e42d78dSAndrew Rybchenko 	sfc_rx_qflush_failed(sfc_rxq_info_by_dp_rxq(dp_rxq));
38028944ac0SAndrew Rybchenko 
38128944ac0SAndrew Rybchenko 	return B_FALSE;
38258294ee6SAndrew Rybchenko }
38358294ee6SAndrew Rybchenko 
38458294ee6SAndrew Rybchenko static boolean_t
3857965557eSAndrew Rybchenko sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
3867965557eSAndrew Rybchenko {
3877965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3887965557eSAndrew Rybchenko 
3897965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
3907965557eSAndrew Rybchenko 		evq->evq_index, txq_hw_index);
3917965557eSAndrew Rybchenko 	return B_TRUE;
3927965557eSAndrew Rybchenko }
3937965557eSAndrew Rybchenko 
3947965557eSAndrew Rybchenko static boolean_t
39558294ee6SAndrew Rybchenko sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
39658294ee6SAndrew Rybchenko {
39758294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
398dbdc8241SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
399fed9aeb4SIvan Malov 	struct sfc_txq *txq;
40058294ee6SAndrew Rybchenko 
401dbdc8241SAndrew Rybchenko 	dp_txq = evq->dp_txq;
402dbdc8241SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
403dbdc8241SAndrew Rybchenko 
404dbdc8241SAndrew Rybchenko 	txq = sfc_txq_by_dp_txq(dp_txq);
405fed9aeb4SIvan Malov 	SFC_ASSERT(txq != NULL);
406fed9aeb4SIvan Malov 	SFC_ASSERT(txq->hw_index == txq_hw_index);
407fed9aeb4SIvan Malov 	SFC_ASSERT(txq->evq == evq);
408561508daSAndrew Rybchenko 	RTE_SET_USED(txq);
409561508daSAndrew Rybchenko 
410561508daSAndrew Rybchenko 	sfc_tx_qflush_done(sfc_txq_info_by_dp_txq(dp_txq));
411fed9aeb4SIvan Malov 
412fed9aeb4SIvan Malov 	return B_FALSE;
41358294ee6SAndrew Rybchenko }
41458294ee6SAndrew Rybchenko 
41558294ee6SAndrew Rybchenko static boolean_t
41658294ee6SAndrew Rybchenko sfc_ev_software(void *arg, uint16_t magic)
41758294ee6SAndrew Rybchenko {
41858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
41958294ee6SAndrew Rybchenko 
42058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
42158294ee6SAndrew Rybchenko 		evq->evq_index, magic);
42258294ee6SAndrew Rybchenko 	return B_TRUE;
42358294ee6SAndrew Rybchenko }
42458294ee6SAndrew Rybchenko 
42558294ee6SAndrew Rybchenko static boolean_t
42658294ee6SAndrew Rybchenko sfc_ev_sram(void *arg, uint32_t code)
42758294ee6SAndrew Rybchenko {
42858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
42958294ee6SAndrew Rybchenko 
43058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
43158294ee6SAndrew Rybchenko 		evq->evq_index, code);
43258294ee6SAndrew Rybchenko 	return B_TRUE;
43358294ee6SAndrew Rybchenko }
43458294ee6SAndrew Rybchenko 
43558294ee6SAndrew Rybchenko static boolean_t
43658294ee6SAndrew Rybchenko sfc_ev_wake_up(void *arg, uint32_t index)
43758294ee6SAndrew Rybchenko {
43858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
43958294ee6SAndrew Rybchenko 
44058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
44158294ee6SAndrew Rybchenko 		evq->evq_index, index);
44258294ee6SAndrew Rybchenko 	return B_TRUE;
44358294ee6SAndrew Rybchenko }
44458294ee6SAndrew Rybchenko 
44558294ee6SAndrew Rybchenko static boolean_t
44658294ee6SAndrew Rybchenko sfc_ev_timer(void *arg, uint32_t index)
44758294ee6SAndrew Rybchenko {
44858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
44958294ee6SAndrew Rybchenko 
45058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
45158294ee6SAndrew Rybchenko 		evq->evq_index, index);
45258294ee6SAndrew Rybchenko 	return B_TRUE;
45358294ee6SAndrew Rybchenko }
45458294ee6SAndrew Rybchenko 
45558294ee6SAndrew Rybchenko static boolean_t
4567965557eSAndrew Rybchenko sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
4577965557eSAndrew Rybchenko {
4587965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
4597965557eSAndrew Rybchenko 
4607965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected link change event",
4617965557eSAndrew Rybchenko 		evq->evq_index);
4627965557eSAndrew Rybchenko 	return B_TRUE;
4637965557eSAndrew Rybchenko }
4647965557eSAndrew Rybchenko 
4657965557eSAndrew Rybchenko static boolean_t
466886f8d8aSArtem Andreev sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
46758294ee6SAndrew Rybchenko {
46858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
469886f8d8aSArtem Andreev 	struct sfc_adapter *sa = evq->sa;
470886f8d8aSArtem Andreev 	struct rte_eth_link new_link;
471886f8d8aSArtem Andreev 
472886f8d8aSArtem Andreev 	sfc_port_link_mode_to_info(link_mode, &new_link);
473282b72cdSAndrew Rybchenko 	if (rte_eth_linkstatus_set(sa->eth_dev, &new_link) == 0)
4743b809c27SAndrew Rybchenko 		evq->sa->port.lsc_seq++;
475886f8d8aSArtem Andreev 
476886f8d8aSArtem Andreev 	return B_FALSE;
47758294ee6SAndrew Rybchenko }
47858294ee6SAndrew Rybchenko 
47958294ee6SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks = {
48058294ee6SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
4817965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
4823379085aSAndrew Rybchenko 	.eec_rx_packets		= sfc_ev_nop_rx_packets,
483390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_nop_rx_ps,
4847965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
485*83c31c99SAndrew Rybchenko 	.eec_tx_ndescs		= sfc_ev_nop_tx_ndescs,
48658294ee6SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4877965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
4887965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
4897965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
49058294ee6SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
49158294ee6SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
49258294ee6SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
49358294ee6SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
49458294ee6SAndrew Rybchenko 	.eec_link_change	= sfc_ev_link_change,
49558294ee6SAndrew Rybchenko };
49658294ee6SAndrew Rybchenko 
497df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
4987965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
499df1bfde4SAndrew Rybchenko 	.eec_rx			= sfc_ev_efx_rx,
5003379085aSAndrew Rybchenko 	.eec_rx_packets		= sfc_ev_nop_rx_packets,
501390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_nop_rx_ps,
502df1bfde4SAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
503*83c31c99SAndrew Rybchenko 	.eec_tx_ndescs		= sfc_ev_nop_tx_ndescs,
504df1bfde4SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
505df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
506df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
507df1bfde4SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
508df1bfde4SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
509df1bfde4SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
510df1bfde4SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
511df1bfde4SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
512df1bfde4SAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
513df1bfde4SAndrew Rybchenko };
514df1bfde4SAndrew Rybchenko 
515df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
516df1bfde4SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
517638bddc9SAndrew Rybchenko 	.eec_rx			= sfc_ev_dp_rx,
5183379085aSAndrew Rybchenko 	.eec_rx_packets		= sfc_ev_dp_rx_packets,
519390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_dp_rx_ps,
5207965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
521*83c31c99SAndrew Rybchenko 	.eec_tx_ndescs		= sfc_ev_nop_tx_ndescs,
5227965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
5237965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
5247965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
5257965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
5267965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
5277965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
5287965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
5297965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
5307965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
5317965557eSAndrew Rybchenko };
5327965557eSAndrew Rybchenko 
533dbdc8241SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
5347965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
5357965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
5363379085aSAndrew Rybchenko 	.eec_rx_packets		= sfc_ev_nop_rx_packets,
537390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_nop_rx_ps,
5387965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_tx,
539*83c31c99SAndrew Rybchenko 	.eec_tx_ndescs		= sfc_ev_nop_tx_ndescs,
5407965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
5417965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
5427965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
5437965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
5447965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
5457965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
5467965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
5477965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
5487965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
5497965557eSAndrew Rybchenko };
5507965557eSAndrew Rybchenko 
551dbdc8241SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
552dbdc8241SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
553dbdc8241SAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
5543379085aSAndrew Rybchenko 	.eec_rx_packets		= sfc_ev_nop_rx_packets,
555390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_nop_rx_ps,
5568b00f426SAndrew Rybchenko 	.eec_tx			= sfc_ev_dp_tx,
557*83c31c99SAndrew Rybchenko 	.eec_tx_ndescs		= sfc_ev_dp_tx_ndescs,
558dbdc8241SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
559dbdc8241SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
560dbdc8241SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
561dbdc8241SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
562dbdc8241SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
563dbdc8241SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
564dbdc8241SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
565dbdc8241SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
566dbdc8241SAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
567dbdc8241SAndrew Rybchenko };
568dbdc8241SAndrew Rybchenko 
56958294ee6SAndrew Rybchenko 
57058294ee6SAndrew Rybchenko void
57158294ee6SAndrew Rybchenko sfc_ev_qpoll(struct sfc_evq *evq)
57258294ee6SAndrew Rybchenko {
57358294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
57458294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTING);
57558294ee6SAndrew Rybchenko 
57658294ee6SAndrew Rybchenko 	/* Synchronize the DMA memory for reading not required */
57758294ee6SAndrew Rybchenko 
5787965557eSAndrew Rybchenko 	efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
57958294ee6SAndrew Rybchenko 
58077f2d053SAndrew Rybchenko 	if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
58177f2d053SAndrew Rybchenko 		struct sfc_adapter *sa = evq->sa;
58277f2d053SAndrew Rybchenko 		int rc;
58377f2d053SAndrew Rybchenko 
584df1bfde4SAndrew Rybchenko 		if (evq->dp_rxq != NULL) {
585df1bfde4SAndrew Rybchenko 			unsigned int rxq_sw_index;
586df1bfde4SAndrew Rybchenko 
587df1bfde4SAndrew Rybchenko 			rxq_sw_index = evq->dp_rxq->dpq.queue_id;
58877f2d053SAndrew Rybchenko 
58977f2d053SAndrew Rybchenko 			sfc_warn(sa,
59077f2d053SAndrew Rybchenko 				 "restart RxQ %u because of exception on its EvQ %u",
59177f2d053SAndrew Rybchenko 				 rxq_sw_index, evq->evq_index);
59277f2d053SAndrew Rybchenko 
59377f2d053SAndrew Rybchenko 			sfc_rx_qstop(sa, rxq_sw_index);
59477f2d053SAndrew Rybchenko 			rc = sfc_rx_qstart(sa, rxq_sw_index);
59577f2d053SAndrew Rybchenko 			if (rc != 0)
59677f2d053SAndrew Rybchenko 				sfc_err(sa, "cannot restart RxQ %u",
59777f2d053SAndrew Rybchenko 					rxq_sw_index);
59877f2d053SAndrew Rybchenko 		}
59977f2d053SAndrew Rybchenko 
600dbdc8241SAndrew Rybchenko 		if (evq->dp_txq != NULL) {
601dbdc8241SAndrew Rybchenko 			unsigned int txq_sw_index;
602dbdc8241SAndrew Rybchenko 
603dbdc8241SAndrew Rybchenko 			txq_sw_index = evq->dp_txq->dpq.queue_id;
6044a18304dSIvan Malov 
6054a18304dSIvan Malov 			sfc_warn(sa,
6064a18304dSIvan Malov 				 "restart TxQ %u because of exception on its EvQ %u",
6074a18304dSIvan Malov 				 txq_sw_index, evq->evq_index);
6084a18304dSIvan Malov 
6094a18304dSIvan Malov 			sfc_tx_qstop(sa, txq_sw_index);
6104a18304dSIvan Malov 			rc = sfc_tx_qstart(sa, txq_sw_index);
6114a18304dSIvan Malov 			if (rc != 0)
6124a18304dSIvan Malov 				sfc_err(sa, "cannot restart TxQ %u",
6134a18304dSIvan Malov 					txq_sw_index);
6144a18304dSIvan Malov 		}
6154a18304dSIvan Malov 
61677f2d053SAndrew Rybchenko 		if (evq->exception)
61777f2d053SAndrew Rybchenko 			sfc_panic(sa, "unrecoverable exception on EvQ %u",
61877f2d053SAndrew Rybchenko 				  evq->evq_index);
61977f2d053SAndrew Rybchenko 
62077f2d053SAndrew Rybchenko 		sfc_adapter_unlock(sa);
62177f2d053SAndrew Rybchenko 	}
62277f2d053SAndrew Rybchenko 
62358294ee6SAndrew Rybchenko 	/* Poll-mode driver does not re-prime the event queue for interrupts */
62458294ee6SAndrew Rybchenko }
62558294ee6SAndrew Rybchenko 
6269a75f75cSAndrew Rybchenko void
6279a75f75cSAndrew Rybchenko sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
6289a75f75cSAndrew Rybchenko {
6299a75f75cSAndrew Rybchenko 	if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
630f042136eSAndrew Rybchenko 		if (sa->mgmt_evq_running)
631f042136eSAndrew Rybchenko 			sfc_ev_qpoll(sa->mgmt_evq);
6329a75f75cSAndrew Rybchenko 
6339a75f75cSAndrew Rybchenko 		rte_spinlock_unlock(&sa->mgmt_evq_lock);
6349a75f75cSAndrew Rybchenko 	}
6359a75f75cSAndrew Rybchenko }
6369a75f75cSAndrew Rybchenko 
63758294ee6SAndrew Rybchenko int
63858294ee6SAndrew Rybchenko sfc_ev_qprime(struct sfc_evq *evq)
63958294ee6SAndrew Rybchenko {
64058294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
64158294ee6SAndrew Rybchenko 	return efx_ev_qprime(evq->common, evq->read_ptr);
64258294ee6SAndrew Rybchenko }
64358294ee6SAndrew Rybchenko 
6446caeec47SAndrew Rybchenko /* Event queue HW index allocation scheme is described in sfc_ev.h. */
64558294ee6SAndrew Rybchenko int
6466caeec47SAndrew Rybchenko sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
64758294ee6SAndrew Rybchenko {
6486caeec47SAndrew Rybchenko 	struct sfc_adapter *sa = evq->sa;
64958294ee6SAndrew Rybchenko 	efsys_mem_t *esmp;
650df456342SAndrew Rybchenko 	uint32_t evq_flags = sa->evq_flags;
65158294ee6SAndrew Rybchenko 	unsigned int total_delay_us;
65258294ee6SAndrew Rybchenko 	unsigned int delay_us;
65358294ee6SAndrew Rybchenko 	int rc;
65458294ee6SAndrew Rybchenko 
6556caeec47SAndrew Rybchenko 	sfc_log_init(sa, "hw_index=%u", hw_index);
65658294ee6SAndrew Rybchenko 
65758294ee6SAndrew Rybchenko 	esmp = &evq->mem;
65858294ee6SAndrew Rybchenko 
6596caeec47SAndrew Rybchenko 	evq->evq_index = hw_index;
6606caeec47SAndrew Rybchenko 
66158294ee6SAndrew Rybchenko 	/* Clear all events */
662afe3c756SIgor Romanov 	(void)memset((void *)esmp->esm_base, 0xff,
663f8a60f76SAndy Moreton 		     efx_evq_size(sa->nic, evq->entries, evq_flags));
66458294ee6SAndrew Rybchenko 
6654279b54eSGeorgiy Levashov 	if ((sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) ||
6664279b54eSGeorgiy Levashov 	    (sa->intr.rxq_intr && evq->dp_rxq != NULL))
667df456342SAndrew Rybchenko 		evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
668df456342SAndrew Rybchenko 	else
669df456342SAndrew Rybchenko 		evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
670df456342SAndrew Rybchenko 
67192fedcd3SIvan Malov 	evq->init_state = SFC_EVQ_STARTING;
67292fedcd3SIvan Malov 
67358294ee6SAndrew Rybchenko 	/* Create the common code event queue */
6746caeec47SAndrew Rybchenko 	rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
675df456342SAndrew Rybchenko 			    0 /* unused on EF10 */, 0, evq_flags,
67658294ee6SAndrew Rybchenko 			    &evq->common);
67758294ee6SAndrew Rybchenko 	if (rc != 0)
67858294ee6SAndrew Rybchenko 		goto fail_ev_qcreate;
67958294ee6SAndrew Rybchenko 
680dbdc8241SAndrew Rybchenko 	SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
681df1bfde4SAndrew Rybchenko 	if (evq->dp_rxq != 0) {
6825dec95e3SAndrew Rybchenko 		if (strcmp(sa->priv.dp_rx->dp.name,
6835dec95e3SAndrew Rybchenko 			   SFC_KVARG_DATAPATH_EFX) == 0)
684df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_efx_rx;
6857965557eSAndrew Rybchenko 		else
686df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_dp_rx;
687dbdc8241SAndrew Rybchenko 	} else if (evq->dp_txq != 0) {
6885dec95e3SAndrew Rybchenko 		if (strcmp(sa->priv.dp_tx->dp.name,
6895dec95e3SAndrew Rybchenko 			   SFC_KVARG_DATAPATH_EFX) == 0)
690dbdc8241SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_efx_tx;
691dbdc8241SAndrew Rybchenko 		else
692dbdc8241SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_dp_tx;
693df1bfde4SAndrew Rybchenko 	} else {
6947965557eSAndrew Rybchenko 		evq->callbacks = &sfc_ev_callbacks;
695df1bfde4SAndrew Rybchenko 	}
6967965557eSAndrew Rybchenko 
69792fedcd3SIvan Malov 	/*
69892fedcd3SIvan Malov 	 * Poll once to ensure that eec_initialized callback is invoked in
69992fedcd3SIvan Malov 	 * case if the hardware does not support INIT_DONE events. If the
70092fedcd3SIvan Malov 	 * hardware supports INIT_DONE events, this will do nothing, and the
70192fedcd3SIvan Malov 	 * corresponding event will be processed by sfc_ev_qpoll() below.
70292fedcd3SIvan Malov 	 */
70392fedcd3SIvan Malov 	efx_ev_qcreate_check_init_done(evq->common, evq->callbacks, evq);
70458294ee6SAndrew Rybchenko 
70558294ee6SAndrew Rybchenko 	/* Wait for the initialization event */
70658294ee6SAndrew Rybchenko 	total_delay_us = 0;
70758294ee6SAndrew Rybchenko 	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
70858294ee6SAndrew Rybchenko 	do {
70958294ee6SAndrew Rybchenko 		(void)sfc_ev_qpoll(evq);
71058294ee6SAndrew Rybchenko 
71158294ee6SAndrew Rybchenko 		/* Check to see if the initialization complete indication
71258294ee6SAndrew Rybchenko 		 * posted by the hardware.
71358294ee6SAndrew Rybchenko 		 */
71458294ee6SAndrew Rybchenko 		if (evq->init_state == SFC_EVQ_STARTED)
71558294ee6SAndrew Rybchenko 			goto done;
71658294ee6SAndrew Rybchenko 
71758294ee6SAndrew Rybchenko 		/* Give event queue some time to init */
71858294ee6SAndrew Rybchenko 		rte_delay_us(delay_us);
71958294ee6SAndrew Rybchenko 
72058294ee6SAndrew Rybchenko 		total_delay_us += delay_us;
72158294ee6SAndrew Rybchenko 
72258294ee6SAndrew Rybchenko 		/* Exponential backoff */
72358294ee6SAndrew Rybchenko 		delay_us *= 2;
72458294ee6SAndrew Rybchenko 		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
72558294ee6SAndrew Rybchenko 			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
72658294ee6SAndrew Rybchenko 
72758294ee6SAndrew Rybchenko 	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
72858294ee6SAndrew Rybchenko 
72958294ee6SAndrew Rybchenko 	rc = ETIMEDOUT;
73058294ee6SAndrew Rybchenko 	goto fail_timedout;
73158294ee6SAndrew Rybchenko 
73258294ee6SAndrew Rybchenko done:
73358294ee6SAndrew Rybchenko 	return 0;
73458294ee6SAndrew Rybchenko 
73558294ee6SAndrew Rybchenko fail_timedout:
73658294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
73758294ee6SAndrew Rybchenko 
73858294ee6SAndrew Rybchenko fail_ev_qcreate:
73992fedcd3SIvan Malov 	evq->init_state = SFC_EVQ_INITIALIZED;
74058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
74158294ee6SAndrew Rybchenko 	return rc;
74258294ee6SAndrew Rybchenko }
74358294ee6SAndrew Rybchenko 
74458294ee6SAndrew Rybchenko void
7456caeec47SAndrew Rybchenko sfc_ev_qstop(struct sfc_evq *evq)
74658294ee6SAndrew Rybchenko {
7476caeec47SAndrew Rybchenko 	if (evq == NULL)
7486caeec47SAndrew Rybchenko 		return;
74958294ee6SAndrew Rybchenko 
7506caeec47SAndrew Rybchenko 	sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
75158294ee6SAndrew Rybchenko 
7526caeec47SAndrew Rybchenko 	if (evq->init_state != SFC_EVQ_STARTED)
75358294ee6SAndrew Rybchenko 		return;
75458294ee6SAndrew Rybchenko 
75558294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
7567965557eSAndrew Rybchenko 	evq->callbacks = NULL;
75758294ee6SAndrew Rybchenko 	evq->read_ptr = 0;
75858294ee6SAndrew Rybchenko 	evq->exception = B_FALSE;
75958294ee6SAndrew Rybchenko 
76058294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
7616caeec47SAndrew Rybchenko 
7626caeec47SAndrew Rybchenko 	evq->evq_index = 0;
76358294ee6SAndrew Rybchenko }
76458294ee6SAndrew Rybchenko 
7652de39f4eSAndrew Rybchenko static void
7662de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(void *arg)
7672de39f4eSAndrew Rybchenko {
7682de39f4eSAndrew Rybchenko 	struct sfc_adapter *sa = arg;
7692de39f4eSAndrew Rybchenko 	int rc;
7702de39f4eSAndrew Rybchenko 
7712de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_qpoll(sa);
7722de39f4eSAndrew Rybchenko 
7732de39f4eSAndrew Rybchenko 	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
7742de39f4eSAndrew Rybchenko 			       sfc_ev_mgmt_periodic_qpoll, sa);
775323706abSAndrew Rybchenko 	if (rc == -ENOTSUP) {
776323706abSAndrew Rybchenko 		sfc_warn(sa, "alarms are not supported");
777323706abSAndrew Rybchenko 		sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
778323706abSAndrew Rybchenko 	} else if (rc != 0) {
779323706abSAndrew Rybchenko 		sfc_err(sa,
7802de39f4eSAndrew Rybchenko 			"cannot rearm management EVQ polling alarm (rc=%d)",
7812de39f4eSAndrew Rybchenko 			rc);
7822de39f4eSAndrew Rybchenko 	}
783323706abSAndrew Rybchenko }
7842de39f4eSAndrew Rybchenko 
7852de39f4eSAndrew Rybchenko static void
7862de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
7872de39f4eSAndrew Rybchenko {
7882de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll(sa);
7892de39f4eSAndrew Rybchenko }
7902de39f4eSAndrew Rybchenko 
7912de39f4eSAndrew Rybchenko static void
7922de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
7932de39f4eSAndrew Rybchenko {
7942de39f4eSAndrew Rybchenko 	rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
7952de39f4eSAndrew Rybchenko }
7962de39f4eSAndrew Rybchenko 
79758294ee6SAndrew Rybchenko int
79858294ee6SAndrew Rybchenko sfc_ev_start(struct sfc_adapter *sa)
79958294ee6SAndrew Rybchenko {
80058294ee6SAndrew Rybchenko 	int rc;
80158294ee6SAndrew Rybchenko 
80258294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
80358294ee6SAndrew Rybchenko 
80458294ee6SAndrew Rybchenko 	rc = efx_ev_init(sa->nic);
80558294ee6SAndrew Rybchenko 	if (rc != 0)
80658294ee6SAndrew Rybchenko 		goto fail_ev_init;
80758294ee6SAndrew Rybchenko 
8089a75f75cSAndrew Rybchenko 	/* Start management EVQ used for global events */
8099a75f75cSAndrew Rybchenko 
810f042136eSAndrew Rybchenko 	/*
811f042136eSAndrew Rybchenko 	 * Management event queue start polls the queue, but it cannot
812f042136eSAndrew Rybchenko 	 * interfere with other polling contexts since mgmt_evq_running
813f042136eSAndrew Rybchenko 	 * is false yet.
814f042136eSAndrew Rybchenko 	 */
8156caeec47SAndrew Rybchenko 	rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
8169a75f75cSAndrew Rybchenko 	if (rc != 0)
8179a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_start;
8189a75f75cSAndrew Rybchenko 
819f042136eSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
820f042136eSAndrew Rybchenko 	sa->mgmt_evq_running = true;
821f042136eSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
822f042136eSAndrew Rybchenko 
8233b809c27SAndrew Rybchenko 	if (sa->intr.lsc_intr) {
8246caeec47SAndrew Rybchenko 		rc = sfc_ev_qprime(sa->mgmt_evq);
8253b809c27SAndrew Rybchenko 		if (rc != 0)
82681568924SAndrew Rybchenko 			goto fail_mgmt_evq_prime;
8273b809c27SAndrew Rybchenko 	}
8283b809c27SAndrew Rybchenko 
82958294ee6SAndrew Rybchenko 	/*
8302de39f4eSAndrew Rybchenko 	 * Start management EVQ polling. If interrupts are disabled
8312de39f4eSAndrew Rybchenko 	 * (not used), it is required to process link status change
8322de39f4eSAndrew Rybchenko 	 * and other device level events to avoid unrecoverable
8332de39f4eSAndrew Rybchenko 	 * error because the event queue overflow.
8342de39f4eSAndrew Rybchenko 	 */
8352de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_start(sa);
8362de39f4eSAndrew Rybchenko 
8372de39f4eSAndrew Rybchenko 	/*
8389a75f75cSAndrew Rybchenko 	 * Rx/Tx event queues are started/stopped when corresponding
8399a75f75cSAndrew Rybchenko 	 * Rx/Tx queue is started/stopped.
84058294ee6SAndrew Rybchenko 	 */
84158294ee6SAndrew Rybchenko 
84258294ee6SAndrew Rybchenko 	return 0;
84358294ee6SAndrew Rybchenko 
84481568924SAndrew Rybchenko fail_mgmt_evq_prime:
8456caeec47SAndrew Rybchenko 	sfc_ev_qstop(sa->mgmt_evq);
8463b809c27SAndrew Rybchenko 
8479a75f75cSAndrew Rybchenko fail_mgmt_evq_start:
8489a75f75cSAndrew Rybchenko 	efx_ev_fini(sa->nic);
8499a75f75cSAndrew Rybchenko 
85058294ee6SAndrew Rybchenko fail_ev_init:
85158294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
85258294ee6SAndrew Rybchenko 	return rc;
85358294ee6SAndrew Rybchenko }
85458294ee6SAndrew Rybchenko 
85558294ee6SAndrew Rybchenko void
85658294ee6SAndrew Rybchenko sfc_ev_stop(struct sfc_adapter *sa)
85758294ee6SAndrew Rybchenko {
85858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
85958294ee6SAndrew Rybchenko 
8602de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_stop(sa);
8612de39f4eSAndrew Rybchenko 
8629a75f75cSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
863f042136eSAndrew Rybchenko 	sa->mgmt_evq_running = false;
8649a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
86558294ee6SAndrew Rybchenko 
866f042136eSAndrew Rybchenko 	sfc_ev_qstop(sa->mgmt_evq);
867f042136eSAndrew Rybchenko 
86858294ee6SAndrew Rybchenko 	efx_ev_fini(sa->nic);
86958294ee6SAndrew Rybchenko }
87058294ee6SAndrew Rybchenko 
87158294ee6SAndrew Rybchenko int
8726caeec47SAndrew Rybchenko sfc_ev_qinit(struct sfc_adapter *sa,
873298951a4SAndrew Rybchenko 	     enum sfc_evq_type type, unsigned int type_index,
8746caeec47SAndrew Rybchenko 	     unsigned int entries, int socket_id, struct sfc_evq **evqp)
87558294ee6SAndrew Rybchenko {
87658294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
87758294ee6SAndrew Rybchenko 	int rc;
87858294ee6SAndrew Rybchenko 
8796caeec47SAndrew Rybchenko 	sfc_log_init(sa, "type=%s type_index=%u",
8806caeec47SAndrew Rybchenko 		     sfc_evq_type2str(type), type_index);
88158294ee6SAndrew Rybchenko 
88258294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(entries));
88358294ee6SAndrew Rybchenko 
8840c16506eSAndrew Rybchenko 	rc = ENOMEM;
88558294ee6SAndrew Rybchenko 	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
88658294ee6SAndrew Rybchenko 				 socket_id);
88758294ee6SAndrew Rybchenko 	if (evq == NULL)
8880c16506eSAndrew Rybchenko 		goto fail_evq_alloc;
88958294ee6SAndrew Rybchenko 
89058294ee6SAndrew Rybchenko 	evq->sa = sa;
891298951a4SAndrew Rybchenko 	evq->type = type;
892ec9217f9SAndrew Rybchenko 	evq->entries = entries;
89358294ee6SAndrew Rybchenko 
89458294ee6SAndrew Rybchenko 	/* Allocate DMA space */
895298951a4SAndrew Rybchenko 	rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
896f8a60f76SAndy Moreton 			   efx_evq_size(sa->nic, evq->entries, sa->evq_flags),
897f8a60f76SAndy Moreton 			   socket_id, &evq->mem);
89858294ee6SAndrew Rybchenko 	if (rc != 0)
8990c16506eSAndrew Rybchenko 		goto fail_dma_alloc;
90058294ee6SAndrew Rybchenko 
90158294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
90258294ee6SAndrew Rybchenko 
9036caeec47SAndrew Rybchenko 	sa->evq_count++;
9046caeec47SAndrew Rybchenko 
9056caeec47SAndrew Rybchenko 	*evqp = evq;
90658294ee6SAndrew Rybchenko 
90758294ee6SAndrew Rybchenko 	return 0;
9080c16506eSAndrew Rybchenko 
9090c16506eSAndrew Rybchenko fail_dma_alloc:
9100c16506eSAndrew Rybchenko 	rte_free(evq);
9110c16506eSAndrew Rybchenko 
9120c16506eSAndrew Rybchenko fail_evq_alloc:
9130c16506eSAndrew Rybchenko 
9140c16506eSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
9150c16506eSAndrew Rybchenko 	return rc;
91658294ee6SAndrew Rybchenko }
91758294ee6SAndrew Rybchenko 
91858294ee6SAndrew Rybchenko void
9196caeec47SAndrew Rybchenko sfc_ev_qfini(struct sfc_evq *evq)
92058294ee6SAndrew Rybchenko {
9216caeec47SAndrew Rybchenko 	struct sfc_adapter *sa = evq->sa;
92258294ee6SAndrew Rybchenko 
92358294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
92458294ee6SAndrew Rybchenko 
92558294ee6SAndrew Rybchenko 	sfc_dma_free(sa, &evq->mem);
92658294ee6SAndrew Rybchenko 
92758294ee6SAndrew Rybchenko 	rte_free(evq);
92858294ee6SAndrew Rybchenko 
9296caeec47SAndrew Rybchenko 	SFC_ASSERT(sa->evq_count > 0);
9306caeec47SAndrew Rybchenko 	sa->evq_count--;
931c22d3c50SAndrew Rybchenko }
932c22d3c50SAndrew Rybchenko 
933c22d3c50SAndrew Rybchenko static int
934c22d3c50SAndrew Rybchenko sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
935c22d3c50SAndrew Rybchenko 			       const char *value_str, void *opaque)
936c22d3c50SAndrew Rybchenko {
9375076ad03SRoman Zhukov 	uint32_t *value = opaque;
938c22d3c50SAndrew Rybchenko 
939c22d3c50SAndrew Rybchenko 	if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
940c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
941c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
942c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
943c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
944c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_AUTO;
945c22d3c50SAndrew Rybchenko 	else
946c22d3c50SAndrew Rybchenko 		return -EINVAL;
94758294ee6SAndrew Rybchenko 
94858294ee6SAndrew Rybchenko 	return 0;
94958294ee6SAndrew Rybchenko }
95058294ee6SAndrew Rybchenko 
95158294ee6SAndrew Rybchenko int
95247995190SAndrew Rybchenko sfc_ev_attach(struct sfc_adapter *sa)
95358294ee6SAndrew Rybchenko {
95458294ee6SAndrew Rybchenko 	int rc;
95558294ee6SAndrew Rybchenko 
95658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
95758294ee6SAndrew Rybchenko 
958c22d3c50SAndrew Rybchenko 	sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
959c22d3c50SAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
960c22d3c50SAndrew Rybchenko 				sfc_kvarg_perf_profile_handler,
961c22d3c50SAndrew Rybchenko 				&sa->evq_flags);
962c22d3c50SAndrew Rybchenko 	if (rc != 0) {
963c22d3c50SAndrew Rybchenko 		sfc_err(sa, "invalid %s parameter value",
964c22d3c50SAndrew Rybchenko 			SFC_KVARG_PERF_PROFILE);
965c22d3c50SAndrew Rybchenko 		goto fail_kvarg_perf_profile;
966c22d3c50SAndrew Rybchenko 	}
967c22d3c50SAndrew Rybchenko 
96858294ee6SAndrew Rybchenko 	sa->mgmt_evq_index = 0;
9699a75f75cSAndrew Rybchenko 	rte_spinlock_init(&sa->mgmt_evq_lock);
97058294ee6SAndrew Rybchenko 
971d5371f3dSIgor Romanov 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries,
9726caeec47SAndrew Rybchenko 			  sa->socket_id, &sa->mgmt_evq);
9739a75f75cSAndrew Rybchenko 	if (rc != 0)
9749a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_init;
9759a75f75cSAndrew Rybchenko 
97658294ee6SAndrew Rybchenko 	/*
97758294ee6SAndrew Rybchenko 	 * Rx/Tx event queues are created/destroyed when corresponding
97858294ee6SAndrew Rybchenko 	 * Rx/Tx queue is created/destroyed.
97958294ee6SAndrew Rybchenko 	 */
98058294ee6SAndrew Rybchenko 
98158294ee6SAndrew Rybchenko 	return 0;
98258294ee6SAndrew Rybchenko 
9839a75f75cSAndrew Rybchenko fail_mgmt_evq_init:
984c22d3c50SAndrew Rybchenko 
985c22d3c50SAndrew Rybchenko fail_kvarg_perf_profile:
98658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
98758294ee6SAndrew Rybchenko 	return rc;
98858294ee6SAndrew Rybchenko }
98958294ee6SAndrew Rybchenko 
99058294ee6SAndrew Rybchenko void
99147995190SAndrew Rybchenko sfc_ev_detach(struct sfc_adapter *sa)
99258294ee6SAndrew Rybchenko {
99358294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
99458294ee6SAndrew Rybchenko 
9956caeec47SAndrew Rybchenko 	sfc_ev_qfini(sa->mgmt_evq);
99658294ee6SAndrew Rybchenko 
9976caeec47SAndrew Rybchenko 	if (sa->evq_count != 0)
9986caeec47SAndrew Rybchenko 		sfc_err(sa, "%u EvQs are not destroyed before detach",
9996caeec47SAndrew Rybchenko 			sa->evq_count);
100058294ee6SAndrew Rybchenko }
1001