xref: /dpdk/drivers/net/sfc/sfc_ev.c (revision a0147be54763c09daca94eec7cb075214788ca65)
144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2244cfa79SAndrew Rybchenko  *
3*a0147be5SAndrew Rybchenko  * Copyright(c) 2019-2020 Xilinx, Inc.
4*a0147be5SAndrew Rybchenko  * Copyright(c) 2016-2019 Solarflare Communications Inc.
558294ee6SAndrew Rybchenko  *
658294ee6SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
758294ee6SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
858294ee6SAndrew Rybchenko  */
958294ee6SAndrew Rybchenko 
1058294ee6SAndrew Rybchenko #include <rte_debug.h>
1158294ee6SAndrew Rybchenko #include <rte_cycles.h>
122de39f4eSAndrew Rybchenko #include <rte_alarm.h>
1377f2d053SAndrew Rybchenko #include <rte_branch_prediction.h>
1458294ee6SAndrew Rybchenko 
1558294ee6SAndrew Rybchenko #include "efx.h"
1658294ee6SAndrew Rybchenko 
1758294ee6SAndrew Rybchenko #include "sfc.h"
1858294ee6SAndrew Rybchenko #include "sfc_debug.h"
1958294ee6SAndrew Rybchenko #include "sfc_log.h"
2058294ee6SAndrew Rybchenko #include "sfc_ev.h"
2128944ac0SAndrew Rybchenko #include "sfc_rx.h"
22fed9aeb4SIvan Malov #include "sfc_tx.h"
23c22d3c50SAndrew Rybchenko #include "sfc_kvargs.h"
2458294ee6SAndrew Rybchenko 
2558294ee6SAndrew Rybchenko 
2658294ee6SAndrew Rybchenko /* Initial delay when waiting for event queue init complete event */
2758294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_START_US	(1)
2858294ee6SAndrew Rybchenko /* Maximum delay between event queue polling attempts */
2958294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_MAX_US	(10 * 1000)
3058294ee6SAndrew Rybchenko /* Event queue init approx timeout */
3158294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_TIMEOUT_US		(2 * US_PER_S)
3258294ee6SAndrew Rybchenko 
332de39f4eSAndrew Rybchenko /* Management event queue polling period in microseconds */
342de39f4eSAndrew Rybchenko #define SFC_MGMT_EV_QPOLL_PERIOD_US	(US_PER_S)
352de39f4eSAndrew Rybchenko 
36298951a4SAndrew Rybchenko static const char *
37298951a4SAndrew Rybchenko sfc_evq_type2str(enum sfc_evq_type type)
38298951a4SAndrew Rybchenko {
39298951a4SAndrew Rybchenko 	switch (type) {
40298951a4SAndrew Rybchenko 	case SFC_EVQ_TYPE_MGMT:
41298951a4SAndrew Rybchenko 		return "mgmt-evq";
42298951a4SAndrew Rybchenko 	case SFC_EVQ_TYPE_RX:
43298951a4SAndrew Rybchenko 		return "rx-evq";
44298951a4SAndrew Rybchenko 	case SFC_EVQ_TYPE_TX:
45298951a4SAndrew Rybchenko 		return "tx-evq";
46298951a4SAndrew Rybchenko 	default:
47298951a4SAndrew Rybchenko 		SFC_ASSERT(B_FALSE);
48298951a4SAndrew Rybchenko 		return NULL;
49298951a4SAndrew Rybchenko 	}
50298951a4SAndrew Rybchenko }
5158294ee6SAndrew Rybchenko 
5258294ee6SAndrew Rybchenko static boolean_t
5358294ee6SAndrew Rybchenko sfc_ev_initialized(void *arg)
5458294ee6SAndrew Rybchenko {
5558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
5658294ee6SAndrew Rybchenko 
5758294ee6SAndrew Rybchenko 	/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
5858294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
5958294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTED);
6058294ee6SAndrew Rybchenko 
6158294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTED;
6258294ee6SAndrew Rybchenko 
6358294ee6SAndrew Rybchenko 	return B_FALSE;
6458294ee6SAndrew Rybchenko }
6558294ee6SAndrew Rybchenko 
6658294ee6SAndrew Rybchenko static boolean_t
677965557eSAndrew Rybchenko sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
687965557eSAndrew Rybchenko 	      uint32_t size, uint16_t flags)
697965557eSAndrew Rybchenko {
707965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
717965557eSAndrew Rybchenko 
727965557eSAndrew Rybchenko 	sfc_err(evq->sa,
737965557eSAndrew Rybchenko 		"EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
747965557eSAndrew Rybchenko 		evq->evq_index, label, id, size, flags);
757965557eSAndrew Rybchenko 	return B_TRUE;
767965557eSAndrew Rybchenko }
777965557eSAndrew Rybchenko 
787965557eSAndrew Rybchenko static boolean_t
79df1bfde4SAndrew Rybchenko sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
80921f6cf1SAndrew Rybchenko 	      uint32_t size, uint16_t flags)
8158294ee6SAndrew Rybchenko {
8258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
83df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq;
84921f6cf1SAndrew Rybchenko 	unsigned int stop;
85921f6cf1SAndrew Rybchenko 	unsigned int pending_id;
86921f6cf1SAndrew Rybchenko 	unsigned int delta;
87921f6cf1SAndrew Rybchenko 	unsigned int i;
88df1bfde4SAndrew Rybchenko 	struct sfc_efx_rx_sw_desc *rxd;
8958294ee6SAndrew Rybchenko 
90921f6cf1SAndrew Rybchenko 	if (unlikely(evq->exception))
91921f6cf1SAndrew Rybchenko 		goto done;
92921f6cf1SAndrew Rybchenko 
93df1bfde4SAndrew Rybchenko 	rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
94921f6cf1SAndrew Rybchenko 
95921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
96921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
97df1bfde4SAndrew Rybchenko 	SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
98921f6cf1SAndrew Rybchenko 
99921f6cf1SAndrew Rybchenko 	stop = (id + 1) & rxq->ptr_mask;
100921f6cf1SAndrew Rybchenko 	pending_id = rxq->pending & rxq->ptr_mask;
101921f6cf1SAndrew Rybchenko 	delta = (stop >= pending_id) ? (stop - pending_id) :
102921f6cf1SAndrew Rybchenko 		(rxq->ptr_mask + 1 - pending_id + stop);
103921f6cf1SAndrew Rybchenko 
10409a09b6fSAndrew Rybchenko 	if (delta == 0) {
10509a09b6fSAndrew Rybchenko 		/*
10609a09b6fSAndrew Rybchenko 		 * Rx event with no new descriptors done and zero length
10709a09b6fSAndrew Rybchenko 		 * is used to abort scattered packet when there is no room
10809a09b6fSAndrew Rybchenko 		 * for the tail.
10909a09b6fSAndrew Rybchenko 		 */
11009a09b6fSAndrew Rybchenko 		if (unlikely(size != 0)) {
11109a09b6fSAndrew Rybchenko 			evq->exception = B_TRUE;
11209a09b6fSAndrew Rybchenko 			sfc_err(evq->sa,
11309a09b6fSAndrew Rybchenko 				"EVQ %u RxQ %u invalid RX abort "
114f2462150SFerruh Yigit 				"(id=%#x size=%u flags=%#x); needs restart",
115df1bfde4SAndrew Rybchenko 				evq->evq_index, rxq->dp.dpq.queue_id,
11609a09b6fSAndrew Rybchenko 				id, size, flags);
11709a09b6fSAndrew Rybchenko 			goto done;
11809a09b6fSAndrew Rybchenko 		}
11909a09b6fSAndrew Rybchenko 
12009a09b6fSAndrew Rybchenko 		/* Add discard flag to the first fragment */
12109a09b6fSAndrew Rybchenko 		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
12209a09b6fSAndrew Rybchenko 		/* Remove continue flag from the last fragment */
12309a09b6fSAndrew Rybchenko 		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
12409a09b6fSAndrew Rybchenko 	} else if (unlikely(delta > rxq->batch_max)) {
125921f6cf1SAndrew Rybchenko 		evq->exception = B_TRUE;
126921f6cf1SAndrew Rybchenko 
127921f6cf1SAndrew Rybchenko 		sfc_err(evq->sa,
128921f6cf1SAndrew Rybchenko 			"EVQ %u RxQ %u completion out of order "
129f2462150SFerruh Yigit 			"(id=%#x delta=%u flags=%#x); needs restart",
130df1bfde4SAndrew Rybchenko 			evq->evq_index, rxq->dp.dpq.queue_id,
131df1bfde4SAndrew Rybchenko 			id, delta, flags);
132921f6cf1SAndrew Rybchenko 
133921f6cf1SAndrew Rybchenko 		goto done;
134921f6cf1SAndrew Rybchenko 	}
135921f6cf1SAndrew Rybchenko 
136921f6cf1SAndrew Rybchenko 	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
137921f6cf1SAndrew Rybchenko 		rxd = &rxq->sw_desc[i];
138921f6cf1SAndrew Rybchenko 
139921f6cf1SAndrew Rybchenko 		rxd->flags = flags;
140921f6cf1SAndrew Rybchenko 
141921f6cf1SAndrew Rybchenko 		SFC_ASSERT(size < (1 << 16));
142921f6cf1SAndrew Rybchenko 		rxd->size = (uint16_t)size;
143921f6cf1SAndrew Rybchenko 	}
144921f6cf1SAndrew Rybchenko 
145921f6cf1SAndrew Rybchenko 	rxq->pending += delta;
146921f6cf1SAndrew Rybchenko 
147921f6cf1SAndrew Rybchenko done:
148921f6cf1SAndrew Rybchenko 	return B_FALSE;
14958294ee6SAndrew Rybchenko }
15058294ee6SAndrew Rybchenko 
15158294ee6SAndrew Rybchenko static boolean_t
152638bddc9SAndrew Rybchenko sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
153638bddc9SAndrew Rybchenko 	     __rte_unused uint32_t size, __rte_unused uint16_t flags)
154638bddc9SAndrew Rybchenko {
155638bddc9SAndrew Rybchenko 	struct sfc_evq *evq = arg;
156638bddc9SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
157638bddc9SAndrew Rybchenko 
158638bddc9SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
159638bddc9SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
160638bddc9SAndrew Rybchenko 
1615dec95e3SAndrew Rybchenko 	SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
1625dec95e3SAndrew Rybchenko 	return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id);
163638bddc9SAndrew Rybchenko }
164638bddc9SAndrew Rybchenko 
165638bddc9SAndrew Rybchenko static boolean_t
166390f9b8dSAndrew Rybchenko sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
167390f9b8dSAndrew Rybchenko 		 uint32_t pkt_count, uint16_t flags)
168390f9b8dSAndrew Rybchenko {
169390f9b8dSAndrew Rybchenko 	struct sfc_evq *evq = arg;
170390f9b8dSAndrew Rybchenko 
171390f9b8dSAndrew Rybchenko 	sfc_err(evq->sa,
172390f9b8dSAndrew Rybchenko 		"EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
173390f9b8dSAndrew Rybchenko 		evq->evq_index, label, id, pkt_count, flags);
174390f9b8dSAndrew Rybchenko 	return B_TRUE;
175390f9b8dSAndrew Rybchenko }
176390f9b8dSAndrew Rybchenko 
177390f9b8dSAndrew Rybchenko /* It is not actually used on datapath, but required on RxQ flush */
178390f9b8dSAndrew Rybchenko static boolean_t
179390f9b8dSAndrew Rybchenko sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
180390f9b8dSAndrew Rybchenko 		__rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
181390f9b8dSAndrew Rybchenko {
182390f9b8dSAndrew Rybchenko 	struct sfc_evq *evq = arg;
183390f9b8dSAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
184390f9b8dSAndrew Rybchenko 
185390f9b8dSAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
186390f9b8dSAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
187390f9b8dSAndrew Rybchenko 
1885dec95e3SAndrew Rybchenko 	if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL)
1895dec95e3SAndrew Rybchenko 		return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id);
190390f9b8dSAndrew Rybchenko 	else
191390f9b8dSAndrew Rybchenko 		return B_FALSE;
192390f9b8dSAndrew Rybchenko }
193390f9b8dSAndrew Rybchenko 
194390f9b8dSAndrew Rybchenko static boolean_t
1957965557eSAndrew Rybchenko sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
1967965557eSAndrew Rybchenko {
1977965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
1987965557eSAndrew Rybchenko 
1997965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
2007965557eSAndrew Rybchenko 		evq->evq_index, label, id);
2017965557eSAndrew Rybchenko 	return B_TRUE;
2027965557eSAndrew Rybchenko }
2037965557eSAndrew Rybchenko 
2047965557eSAndrew Rybchenko static boolean_t
205428c7dddSIvan Malov sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
20658294ee6SAndrew Rybchenko {
20758294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
208dbdc8241SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
209dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq;
210428c7dddSIvan Malov 	unsigned int stop;
211428c7dddSIvan Malov 	unsigned int delta;
21258294ee6SAndrew Rybchenko 
213dbdc8241SAndrew Rybchenko 	dp_txq = evq->dp_txq;
214dbdc8241SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
215428c7dddSIvan Malov 
216dbdc8241SAndrew Rybchenko 	txq = sfc_efx_txq_by_dp_txq(dp_txq);
217428c7dddSIvan Malov 	SFC_ASSERT(txq->evq == evq);
218428c7dddSIvan Malov 
219dbdc8241SAndrew Rybchenko 	if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
220428c7dddSIvan Malov 		goto done;
221428c7dddSIvan Malov 
222428c7dddSIvan Malov 	stop = (id + 1) & txq->ptr_mask;
223428c7dddSIvan Malov 	id = txq->pending & txq->ptr_mask;
224428c7dddSIvan Malov 
225428c7dddSIvan Malov 	delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
226428c7dddSIvan Malov 
227428c7dddSIvan Malov 	txq->pending += delta;
228428c7dddSIvan Malov 
229428c7dddSIvan Malov done:
230428c7dddSIvan Malov 	return B_FALSE;
23158294ee6SAndrew Rybchenko }
23258294ee6SAndrew Rybchenko 
23358294ee6SAndrew Rybchenko static boolean_t
2348b00f426SAndrew Rybchenko sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
2358b00f426SAndrew Rybchenko {
2368b00f426SAndrew Rybchenko 	struct sfc_evq *evq = arg;
2378b00f426SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
2388b00f426SAndrew Rybchenko 
2398b00f426SAndrew Rybchenko 	dp_txq = evq->dp_txq;
2408b00f426SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
2418b00f426SAndrew Rybchenko 
2425dec95e3SAndrew Rybchenko 	SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
2435dec95e3SAndrew Rybchenko 	return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id);
2448b00f426SAndrew Rybchenko }
2458b00f426SAndrew Rybchenko 
2468b00f426SAndrew Rybchenko static boolean_t
247dd2c630aSFerruh Yigit sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data)
24858294ee6SAndrew Rybchenko {
24958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
25058294ee6SAndrew Rybchenko 
25198200dd9SAndrew Rybchenko 	if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
25298200dd9SAndrew Rybchenko 		return B_FALSE;
25398200dd9SAndrew Rybchenko 
25498200dd9SAndrew Rybchenko 	evq->exception = B_TRUE;
25598200dd9SAndrew Rybchenko 	sfc_warn(evq->sa,
25698200dd9SAndrew Rybchenko 		 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
25798200dd9SAndrew Rybchenko 		 " needs recovery",
25898200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
25998200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
26098200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
26198200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
26298200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
26398200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
26498200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
26598200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
26698200dd9SAndrew Rybchenko 		 "UNKNOWN",
26798200dd9SAndrew Rybchenko 		 code, data, evq->evq_index);
26898200dd9SAndrew Rybchenko 
26958294ee6SAndrew Rybchenko 	return B_TRUE;
27058294ee6SAndrew Rybchenko }
27158294ee6SAndrew Rybchenko 
27258294ee6SAndrew Rybchenko static boolean_t
2737965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
2747965557eSAndrew Rybchenko {
2757965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2767965557eSAndrew Rybchenko 
2777965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
2787965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
2797965557eSAndrew Rybchenko 	return B_TRUE;
2807965557eSAndrew Rybchenko }
2817965557eSAndrew Rybchenko 
2827965557eSAndrew Rybchenko static boolean_t
28358294ee6SAndrew Rybchenko sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
28458294ee6SAndrew Rybchenko {
28558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
286df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
28728944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
28858294ee6SAndrew Rybchenko 
289df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
290df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
291df1bfde4SAndrew Rybchenko 
292df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
29328944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
29428944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
29528944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
2962e42d78dSAndrew Rybchenko 	RTE_SET_USED(rxq);
2972e42d78dSAndrew Rybchenko 
2982e42d78dSAndrew Rybchenko 	sfc_rx_qflush_done(sfc_rxq_info_by_dp_rxq(dp_rxq));
29928944ac0SAndrew Rybchenko 
30028944ac0SAndrew Rybchenko 	return B_FALSE;
30158294ee6SAndrew Rybchenko }
30258294ee6SAndrew Rybchenko 
30358294ee6SAndrew Rybchenko static boolean_t
3047965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
3057965557eSAndrew Rybchenko {
3067965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3077965557eSAndrew Rybchenko 
3087965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
3097965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
3107965557eSAndrew Rybchenko 	return B_TRUE;
3117965557eSAndrew Rybchenko }
3127965557eSAndrew Rybchenko 
3137965557eSAndrew Rybchenko static boolean_t
31458294ee6SAndrew Rybchenko sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
31558294ee6SAndrew Rybchenko {
31658294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
317df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
31828944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
31958294ee6SAndrew Rybchenko 
320df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
321df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
322df1bfde4SAndrew Rybchenko 
323df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
32428944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
32528944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
32628944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
3272e42d78dSAndrew Rybchenko 	RTE_SET_USED(rxq);
3282e42d78dSAndrew Rybchenko 
3292e42d78dSAndrew Rybchenko 	sfc_rx_qflush_failed(sfc_rxq_info_by_dp_rxq(dp_rxq));
33028944ac0SAndrew Rybchenko 
33128944ac0SAndrew Rybchenko 	return B_FALSE;
33258294ee6SAndrew Rybchenko }
33358294ee6SAndrew Rybchenko 
33458294ee6SAndrew Rybchenko static boolean_t
3357965557eSAndrew Rybchenko sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
3367965557eSAndrew Rybchenko {
3377965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3387965557eSAndrew Rybchenko 
3397965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
3407965557eSAndrew Rybchenko 		evq->evq_index, txq_hw_index);
3417965557eSAndrew Rybchenko 	return B_TRUE;
3427965557eSAndrew Rybchenko }
3437965557eSAndrew Rybchenko 
3447965557eSAndrew Rybchenko static boolean_t
34558294ee6SAndrew Rybchenko sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
34658294ee6SAndrew Rybchenko {
34758294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
348dbdc8241SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
349fed9aeb4SIvan Malov 	struct sfc_txq *txq;
35058294ee6SAndrew Rybchenko 
351dbdc8241SAndrew Rybchenko 	dp_txq = evq->dp_txq;
352dbdc8241SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
353dbdc8241SAndrew Rybchenko 
354dbdc8241SAndrew Rybchenko 	txq = sfc_txq_by_dp_txq(dp_txq);
355fed9aeb4SIvan Malov 	SFC_ASSERT(txq != NULL);
356fed9aeb4SIvan Malov 	SFC_ASSERT(txq->hw_index == txq_hw_index);
357fed9aeb4SIvan Malov 	SFC_ASSERT(txq->evq == evq);
358561508daSAndrew Rybchenko 	RTE_SET_USED(txq);
359561508daSAndrew Rybchenko 
360561508daSAndrew Rybchenko 	sfc_tx_qflush_done(sfc_txq_info_by_dp_txq(dp_txq));
361fed9aeb4SIvan Malov 
362fed9aeb4SIvan Malov 	return B_FALSE;
36358294ee6SAndrew Rybchenko }
36458294ee6SAndrew Rybchenko 
36558294ee6SAndrew Rybchenko static boolean_t
36658294ee6SAndrew Rybchenko sfc_ev_software(void *arg, uint16_t magic)
36758294ee6SAndrew Rybchenko {
36858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
36958294ee6SAndrew Rybchenko 
37058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
37158294ee6SAndrew Rybchenko 		evq->evq_index, magic);
37258294ee6SAndrew Rybchenko 	return B_TRUE;
37358294ee6SAndrew Rybchenko }
37458294ee6SAndrew Rybchenko 
37558294ee6SAndrew Rybchenko static boolean_t
37658294ee6SAndrew Rybchenko sfc_ev_sram(void *arg, uint32_t code)
37758294ee6SAndrew Rybchenko {
37858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
37958294ee6SAndrew Rybchenko 
38058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
38158294ee6SAndrew Rybchenko 		evq->evq_index, code);
38258294ee6SAndrew Rybchenko 	return B_TRUE;
38358294ee6SAndrew Rybchenko }
38458294ee6SAndrew Rybchenko 
38558294ee6SAndrew Rybchenko static boolean_t
38658294ee6SAndrew Rybchenko sfc_ev_wake_up(void *arg, uint32_t index)
38758294ee6SAndrew Rybchenko {
38858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
38958294ee6SAndrew Rybchenko 
39058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
39158294ee6SAndrew Rybchenko 		evq->evq_index, index);
39258294ee6SAndrew Rybchenko 	return B_TRUE;
39358294ee6SAndrew Rybchenko }
39458294ee6SAndrew Rybchenko 
39558294ee6SAndrew Rybchenko static boolean_t
39658294ee6SAndrew Rybchenko sfc_ev_timer(void *arg, uint32_t index)
39758294ee6SAndrew Rybchenko {
39858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
39958294ee6SAndrew Rybchenko 
40058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
40158294ee6SAndrew Rybchenko 		evq->evq_index, index);
40258294ee6SAndrew Rybchenko 	return B_TRUE;
40358294ee6SAndrew Rybchenko }
40458294ee6SAndrew Rybchenko 
40558294ee6SAndrew Rybchenko static boolean_t
4067965557eSAndrew Rybchenko sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
4077965557eSAndrew Rybchenko {
4087965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
4097965557eSAndrew Rybchenko 
4107965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected link change event",
4117965557eSAndrew Rybchenko 		evq->evq_index);
4127965557eSAndrew Rybchenko 	return B_TRUE;
4137965557eSAndrew Rybchenko }
4147965557eSAndrew Rybchenko 
4157965557eSAndrew Rybchenko static boolean_t
416886f8d8aSArtem Andreev sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
41758294ee6SAndrew Rybchenko {
41858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
419886f8d8aSArtem Andreev 	struct sfc_adapter *sa = evq->sa;
420886f8d8aSArtem Andreev 	struct rte_eth_link new_link;
421886f8d8aSArtem Andreev 
422886f8d8aSArtem Andreev 	sfc_port_link_mode_to_info(link_mode, &new_link);
423282b72cdSAndrew Rybchenko 	if (rte_eth_linkstatus_set(sa->eth_dev, &new_link) == 0)
4243b809c27SAndrew Rybchenko 		evq->sa->port.lsc_seq++;
425886f8d8aSArtem Andreev 
426886f8d8aSArtem Andreev 	return B_FALSE;
42758294ee6SAndrew Rybchenko }
42858294ee6SAndrew Rybchenko 
42958294ee6SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks = {
43058294ee6SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
4317965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
432390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_nop_rx_ps,
4337965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
43458294ee6SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4357965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
4367965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
4377965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
43858294ee6SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
43958294ee6SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
44058294ee6SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
44158294ee6SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
44258294ee6SAndrew Rybchenko 	.eec_link_change	= sfc_ev_link_change,
44358294ee6SAndrew Rybchenko };
44458294ee6SAndrew Rybchenko 
445df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
4467965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
447df1bfde4SAndrew Rybchenko 	.eec_rx			= sfc_ev_efx_rx,
448390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_nop_rx_ps,
449df1bfde4SAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
450df1bfde4SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
451df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
452df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
453df1bfde4SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
454df1bfde4SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
455df1bfde4SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
456df1bfde4SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
457df1bfde4SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
458df1bfde4SAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
459df1bfde4SAndrew Rybchenko };
460df1bfde4SAndrew Rybchenko 
461df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
462df1bfde4SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
463638bddc9SAndrew Rybchenko 	.eec_rx			= sfc_ev_dp_rx,
464390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_dp_rx_ps,
4657965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
4667965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4677965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
4687965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
4697965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
4707965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
4717965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
4727965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
4737965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
4747965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
4757965557eSAndrew Rybchenko };
4767965557eSAndrew Rybchenko 
477dbdc8241SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
4787965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
4797965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
480390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_nop_rx_ps,
4817965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_tx,
4827965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4837965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
4847965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
4857965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
4867965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
4877965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
4887965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
4897965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
4907965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
4917965557eSAndrew Rybchenko };
4927965557eSAndrew Rybchenko 
493dbdc8241SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
494dbdc8241SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
495dbdc8241SAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
496390f9b8dSAndrew Rybchenko 	.eec_rx_ps		= sfc_ev_nop_rx_ps,
4978b00f426SAndrew Rybchenko 	.eec_tx			= sfc_ev_dp_tx,
498dbdc8241SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
499dbdc8241SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
500dbdc8241SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
501dbdc8241SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
502dbdc8241SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
503dbdc8241SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
504dbdc8241SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
505dbdc8241SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
506dbdc8241SAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
507dbdc8241SAndrew Rybchenko };
508dbdc8241SAndrew Rybchenko 
50958294ee6SAndrew Rybchenko 
51058294ee6SAndrew Rybchenko void
51158294ee6SAndrew Rybchenko sfc_ev_qpoll(struct sfc_evq *evq)
51258294ee6SAndrew Rybchenko {
51358294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
51458294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTING);
51558294ee6SAndrew Rybchenko 
51658294ee6SAndrew Rybchenko 	/* Synchronize the DMA memory for reading not required */
51758294ee6SAndrew Rybchenko 
5187965557eSAndrew Rybchenko 	efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
51958294ee6SAndrew Rybchenko 
52077f2d053SAndrew Rybchenko 	if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
52177f2d053SAndrew Rybchenko 		struct sfc_adapter *sa = evq->sa;
52277f2d053SAndrew Rybchenko 		int rc;
52377f2d053SAndrew Rybchenko 
524df1bfde4SAndrew Rybchenko 		if (evq->dp_rxq != NULL) {
525df1bfde4SAndrew Rybchenko 			unsigned int rxq_sw_index;
526df1bfde4SAndrew Rybchenko 
527df1bfde4SAndrew Rybchenko 			rxq_sw_index = evq->dp_rxq->dpq.queue_id;
52877f2d053SAndrew Rybchenko 
52977f2d053SAndrew Rybchenko 			sfc_warn(sa,
53077f2d053SAndrew Rybchenko 				 "restart RxQ %u because of exception on its EvQ %u",
53177f2d053SAndrew Rybchenko 				 rxq_sw_index, evq->evq_index);
53277f2d053SAndrew Rybchenko 
53377f2d053SAndrew Rybchenko 			sfc_rx_qstop(sa, rxq_sw_index);
53477f2d053SAndrew Rybchenko 			rc = sfc_rx_qstart(sa, rxq_sw_index);
53577f2d053SAndrew Rybchenko 			if (rc != 0)
53677f2d053SAndrew Rybchenko 				sfc_err(sa, "cannot restart RxQ %u",
53777f2d053SAndrew Rybchenko 					rxq_sw_index);
53877f2d053SAndrew Rybchenko 		}
53977f2d053SAndrew Rybchenko 
540dbdc8241SAndrew Rybchenko 		if (evq->dp_txq != NULL) {
541dbdc8241SAndrew Rybchenko 			unsigned int txq_sw_index;
542dbdc8241SAndrew Rybchenko 
543dbdc8241SAndrew Rybchenko 			txq_sw_index = evq->dp_txq->dpq.queue_id;
5444a18304dSIvan Malov 
5454a18304dSIvan Malov 			sfc_warn(sa,
5464a18304dSIvan Malov 				 "restart TxQ %u because of exception on its EvQ %u",
5474a18304dSIvan Malov 				 txq_sw_index, evq->evq_index);
5484a18304dSIvan Malov 
5494a18304dSIvan Malov 			sfc_tx_qstop(sa, txq_sw_index);
5504a18304dSIvan Malov 			rc = sfc_tx_qstart(sa, txq_sw_index);
5514a18304dSIvan Malov 			if (rc != 0)
5524a18304dSIvan Malov 				sfc_err(sa, "cannot restart TxQ %u",
5534a18304dSIvan Malov 					txq_sw_index);
5544a18304dSIvan Malov 		}
5554a18304dSIvan Malov 
55677f2d053SAndrew Rybchenko 		if (evq->exception)
55777f2d053SAndrew Rybchenko 			sfc_panic(sa, "unrecoverable exception on EvQ %u",
55877f2d053SAndrew Rybchenko 				  evq->evq_index);
55977f2d053SAndrew Rybchenko 
56077f2d053SAndrew Rybchenko 		sfc_adapter_unlock(sa);
56177f2d053SAndrew Rybchenko 	}
56277f2d053SAndrew Rybchenko 
56358294ee6SAndrew Rybchenko 	/* Poll-mode driver does not re-prime the event queue for interrupts */
56458294ee6SAndrew Rybchenko }
56558294ee6SAndrew Rybchenko 
5669a75f75cSAndrew Rybchenko void
5679a75f75cSAndrew Rybchenko sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
5689a75f75cSAndrew Rybchenko {
5699a75f75cSAndrew Rybchenko 	if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
570f042136eSAndrew Rybchenko 		if (sa->mgmt_evq_running)
571f042136eSAndrew Rybchenko 			sfc_ev_qpoll(sa->mgmt_evq);
5729a75f75cSAndrew Rybchenko 
5739a75f75cSAndrew Rybchenko 		rte_spinlock_unlock(&sa->mgmt_evq_lock);
5749a75f75cSAndrew Rybchenko 	}
5759a75f75cSAndrew Rybchenko }
5769a75f75cSAndrew Rybchenko 
57758294ee6SAndrew Rybchenko int
57858294ee6SAndrew Rybchenko sfc_ev_qprime(struct sfc_evq *evq)
57958294ee6SAndrew Rybchenko {
58058294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
58158294ee6SAndrew Rybchenko 	return efx_ev_qprime(evq->common, evq->read_ptr);
58258294ee6SAndrew Rybchenko }
58358294ee6SAndrew Rybchenko 
5846caeec47SAndrew Rybchenko /* Event queue HW index allocation scheme is described in sfc_ev.h. */
58558294ee6SAndrew Rybchenko int
5866caeec47SAndrew Rybchenko sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
58758294ee6SAndrew Rybchenko {
5886caeec47SAndrew Rybchenko 	struct sfc_adapter *sa = evq->sa;
58958294ee6SAndrew Rybchenko 	efsys_mem_t *esmp;
590df456342SAndrew Rybchenko 	uint32_t evq_flags = sa->evq_flags;
59158294ee6SAndrew Rybchenko 	unsigned int total_delay_us;
59258294ee6SAndrew Rybchenko 	unsigned int delay_us;
59358294ee6SAndrew Rybchenko 	int rc;
59458294ee6SAndrew Rybchenko 
5956caeec47SAndrew Rybchenko 	sfc_log_init(sa, "hw_index=%u", hw_index);
59658294ee6SAndrew Rybchenko 
59758294ee6SAndrew Rybchenko 	esmp = &evq->mem;
59858294ee6SAndrew Rybchenko 
5996caeec47SAndrew Rybchenko 	evq->evq_index = hw_index;
6006caeec47SAndrew Rybchenko 
60158294ee6SAndrew Rybchenko 	/* Clear all events */
602afe3c756SIgor Romanov 	(void)memset((void *)esmp->esm_base, 0xff,
603afe3c756SIgor Romanov 		     efx_evq_size(sa->nic, evq->entries));
60458294ee6SAndrew Rybchenko 
6054279b54eSGeorgiy Levashov 	if ((sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) ||
6064279b54eSGeorgiy Levashov 	    (sa->intr.rxq_intr && evq->dp_rxq != NULL))
607df456342SAndrew Rybchenko 		evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
608df456342SAndrew Rybchenko 	else
609df456342SAndrew Rybchenko 		evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
610df456342SAndrew Rybchenko 
61158294ee6SAndrew Rybchenko 	/* Create the common code event queue */
6126caeec47SAndrew Rybchenko 	rc = efx_ev_qcreate(sa->nic, hw_index, esmp, evq->entries,
613df456342SAndrew Rybchenko 			    0 /* unused on EF10 */, 0, evq_flags,
61458294ee6SAndrew Rybchenko 			    &evq->common);
61558294ee6SAndrew Rybchenko 	if (rc != 0)
61658294ee6SAndrew Rybchenko 		goto fail_ev_qcreate;
61758294ee6SAndrew Rybchenko 
618dbdc8241SAndrew Rybchenko 	SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
619df1bfde4SAndrew Rybchenko 	if (evq->dp_rxq != 0) {
6205dec95e3SAndrew Rybchenko 		if (strcmp(sa->priv.dp_rx->dp.name,
6215dec95e3SAndrew Rybchenko 			   SFC_KVARG_DATAPATH_EFX) == 0)
622df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_efx_rx;
6237965557eSAndrew Rybchenko 		else
624df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_dp_rx;
625dbdc8241SAndrew Rybchenko 	} else if (evq->dp_txq != 0) {
6265dec95e3SAndrew Rybchenko 		if (strcmp(sa->priv.dp_tx->dp.name,
6275dec95e3SAndrew Rybchenko 			   SFC_KVARG_DATAPATH_EFX) == 0)
628dbdc8241SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_efx_tx;
629dbdc8241SAndrew Rybchenko 		else
630dbdc8241SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_dp_tx;
631df1bfde4SAndrew Rybchenko 	} else {
6327965557eSAndrew Rybchenko 		evq->callbacks = &sfc_ev_callbacks;
633df1bfde4SAndrew Rybchenko 	}
6347965557eSAndrew Rybchenko 
63558294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTING;
63658294ee6SAndrew Rybchenko 
63758294ee6SAndrew Rybchenko 	/* Wait for the initialization event */
63858294ee6SAndrew Rybchenko 	total_delay_us = 0;
63958294ee6SAndrew Rybchenko 	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
64058294ee6SAndrew Rybchenko 	do {
64158294ee6SAndrew Rybchenko 		(void)sfc_ev_qpoll(evq);
64258294ee6SAndrew Rybchenko 
64358294ee6SAndrew Rybchenko 		/* Check to see if the initialization complete indication
64458294ee6SAndrew Rybchenko 		 * posted by the hardware.
64558294ee6SAndrew Rybchenko 		 */
64658294ee6SAndrew Rybchenko 		if (evq->init_state == SFC_EVQ_STARTED)
64758294ee6SAndrew Rybchenko 			goto done;
64858294ee6SAndrew Rybchenko 
64958294ee6SAndrew Rybchenko 		/* Give event queue some time to init */
65058294ee6SAndrew Rybchenko 		rte_delay_us(delay_us);
65158294ee6SAndrew Rybchenko 
65258294ee6SAndrew Rybchenko 		total_delay_us += delay_us;
65358294ee6SAndrew Rybchenko 
65458294ee6SAndrew Rybchenko 		/* Exponential backoff */
65558294ee6SAndrew Rybchenko 		delay_us *= 2;
65658294ee6SAndrew Rybchenko 		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
65758294ee6SAndrew Rybchenko 			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
65858294ee6SAndrew Rybchenko 
65958294ee6SAndrew Rybchenko 	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
66058294ee6SAndrew Rybchenko 
66158294ee6SAndrew Rybchenko 	rc = ETIMEDOUT;
66258294ee6SAndrew Rybchenko 	goto fail_timedout;
66358294ee6SAndrew Rybchenko 
66458294ee6SAndrew Rybchenko done:
66558294ee6SAndrew Rybchenko 	return 0;
66658294ee6SAndrew Rybchenko 
66758294ee6SAndrew Rybchenko fail_timedout:
66858294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
66958294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
67058294ee6SAndrew Rybchenko 
67158294ee6SAndrew Rybchenko fail_ev_qcreate:
67258294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
67358294ee6SAndrew Rybchenko 	return rc;
67458294ee6SAndrew Rybchenko }
67558294ee6SAndrew Rybchenko 
67658294ee6SAndrew Rybchenko void
6776caeec47SAndrew Rybchenko sfc_ev_qstop(struct sfc_evq *evq)
67858294ee6SAndrew Rybchenko {
6796caeec47SAndrew Rybchenko 	if (evq == NULL)
6806caeec47SAndrew Rybchenko 		return;
68158294ee6SAndrew Rybchenko 
6826caeec47SAndrew Rybchenko 	sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
68358294ee6SAndrew Rybchenko 
6846caeec47SAndrew Rybchenko 	if (evq->init_state != SFC_EVQ_STARTED)
68558294ee6SAndrew Rybchenko 		return;
68658294ee6SAndrew Rybchenko 
68758294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
6887965557eSAndrew Rybchenko 	evq->callbacks = NULL;
68958294ee6SAndrew Rybchenko 	evq->read_ptr = 0;
69058294ee6SAndrew Rybchenko 	evq->exception = B_FALSE;
69158294ee6SAndrew Rybchenko 
69258294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
6936caeec47SAndrew Rybchenko 
6946caeec47SAndrew Rybchenko 	evq->evq_index = 0;
69558294ee6SAndrew Rybchenko }
69658294ee6SAndrew Rybchenko 
6972de39f4eSAndrew Rybchenko static void
6982de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(void *arg)
6992de39f4eSAndrew Rybchenko {
7002de39f4eSAndrew Rybchenko 	struct sfc_adapter *sa = arg;
7012de39f4eSAndrew Rybchenko 	int rc;
7022de39f4eSAndrew Rybchenko 
7032de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_qpoll(sa);
7042de39f4eSAndrew Rybchenko 
7052de39f4eSAndrew Rybchenko 	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
7062de39f4eSAndrew Rybchenko 			       sfc_ev_mgmt_periodic_qpoll, sa);
707323706abSAndrew Rybchenko 	if (rc == -ENOTSUP) {
708323706abSAndrew Rybchenko 		sfc_warn(sa, "alarms are not supported");
709323706abSAndrew Rybchenko 		sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
710323706abSAndrew Rybchenko 	} else if (rc != 0) {
711323706abSAndrew Rybchenko 		sfc_err(sa,
7122de39f4eSAndrew Rybchenko 			"cannot rearm management EVQ polling alarm (rc=%d)",
7132de39f4eSAndrew Rybchenko 			rc);
7142de39f4eSAndrew Rybchenko 	}
715323706abSAndrew Rybchenko }
7162de39f4eSAndrew Rybchenko 
7172de39f4eSAndrew Rybchenko static void
7182de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
7192de39f4eSAndrew Rybchenko {
7202de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll(sa);
7212de39f4eSAndrew Rybchenko }
7222de39f4eSAndrew Rybchenko 
7232de39f4eSAndrew Rybchenko static void
7242de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
7252de39f4eSAndrew Rybchenko {
7262de39f4eSAndrew Rybchenko 	rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
7272de39f4eSAndrew Rybchenko }
7282de39f4eSAndrew Rybchenko 
72958294ee6SAndrew Rybchenko int
73058294ee6SAndrew Rybchenko sfc_ev_start(struct sfc_adapter *sa)
73158294ee6SAndrew Rybchenko {
73258294ee6SAndrew Rybchenko 	int rc;
73358294ee6SAndrew Rybchenko 
73458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
73558294ee6SAndrew Rybchenko 
73658294ee6SAndrew Rybchenko 	rc = efx_ev_init(sa->nic);
73758294ee6SAndrew Rybchenko 	if (rc != 0)
73858294ee6SAndrew Rybchenko 		goto fail_ev_init;
73958294ee6SAndrew Rybchenko 
7409a75f75cSAndrew Rybchenko 	/* Start management EVQ used for global events */
7419a75f75cSAndrew Rybchenko 
742f042136eSAndrew Rybchenko 	/*
743f042136eSAndrew Rybchenko 	 * Management event queue start polls the queue, but it cannot
744f042136eSAndrew Rybchenko 	 * interfere with other polling contexts since mgmt_evq_running
745f042136eSAndrew Rybchenko 	 * is false yet.
746f042136eSAndrew Rybchenko 	 */
7476caeec47SAndrew Rybchenko 	rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
7489a75f75cSAndrew Rybchenko 	if (rc != 0)
7499a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_start;
7509a75f75cSAndrew Rybchenko 
751f042136eSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
752f042136eSAndrew Rybchenko 	sa->mgmt_evq_running = true;
753f042136eSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
754f042136eSAndrew Rybchenko 
7553b809c27SAndrew Rybchenko 	if (sa->intr.lsc_intr) {
7566caeec47SAndrew Rybchenko 		rc = sfc_ev_qprime(sa->mgmt_evq);
7573b809c27SAndrew Rybchenko 		if (rc != 0)
75881568924SAndrew Rybchenko 			goto fail_mgmt_evq_prime;
7593b809c27SAndrew Rybchenko 	}
7603b809c27SAndrew Rybchenko 
76158294ee6SAndrew Rybchenko 	/*
7622de39f4eSAndrew Rybchenko 	 * Start management EVQ polling. If interrupts are disabled
7632de39f4eSAndrew Rybchenko 	 * (not used), it is required to process link status change
7642de39f4eSAndrew Rybchenko 	 * and other device level events to avoid unrecoverable
7652de39f4eSAndrew Rybchenko 	 * error because the event queue overflow.
7662de39f4eSAndrew Rybchenko 	 */
7672de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_start(sa);
7682de39f4eSAndrew Rybchenko 
7692de39f4eSAndrew Rybchenko 	/*
7709a75f75cSAndrew Rybchenko 	 * Rx/Tx event queues are started/stopped when corresponding
7719a75f75cSAndrew Rybchenko 	 * Rx/Tx queue is started/stopped.
77258294ee6SAndrew Rybchenko 	 */
77358294ee6SAndrew Rybchenko 
77458294ee6SAndrew Rybchenko 	return 0;
77558294ee6SAndrew Rybchenko 
77681568924SAndrew Rybchenko fail_mgmt_evq_prime:
7776caeec47SAndrew Rybchenko 	sfc_ev_qstop(sa->mgmt_evq);
7783b809c27SAndrew Rybchenko 
7799a75f75cSAndrew Rybchenko fail_mgmt_evq_start:
7809a75f75cSAndrew Rybchenko 	efx_ev_fini(sa->nic);
7819a75f75cSAndrew Rybchenko 
78258294ee6SAndrew Rybchenko fail_ev_init:
78358294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
78458294ee6SAndrew Rybchenko 	return rc;
78558294ee6SAndrew Rybchenko }
78658294ee6SAndrew Rybchenko 
78758294ee6SAndrew Rybchenko void
78858294ee6SAndrew Rybchenko sfc_ev_stop(struct sfc_adapter *sa)
78958294ee6SAndrew Rybchenko {
79058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
79158294ee6SAndrew Rybchenko 
7922de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_stop(sa);
7932de39f4eSAndrew Rybchenko 
7949a75f75cSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
795f042136eSAndrew Rybchenko 	sa->mgmt_evq_running = false;
7969a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
79758294ee6SAndrew Rybchenko 
798f042136eSAndrew Rybchenko 	sfc_ev_qstop(sa->mgmt_evq);
799f042136eSAndrew Rybchenko 
80058294ee6SAndrew Rybchenko 	efx_ev_fini(sa->nic);
80158294ee6SAndrew Rybchenko }
80258294ee6SAndrew Rybchenko 
80358294ee6SAndrew Rybchenko int
8046caeec47SAndrew Rybchenko sfc_ev_qinit(struct sfc_adapter *sa,
805298951a4SAndrew Rybchenko 	     enum sfc_evq_type type, unsigned int type_index,
8066caeec47SAndrew Rybchenko 	     unsigned int entries, int socket_id, struct sfc_evq **evqp)
80758294ee6SAndrew Rybchenko {
80858294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
80958294ee6SAndrew Rybchenko 	int rc;
81058294ee6SAndrew Rybchenko 
8116caeec47SAndrew Rybchenko 	sfc_log_init(sa, "type=%s type_index=%u",
8126caeec47SAndrew Rybchenko 		     sfc_evq_type2str(type), type_index);
81358294ee6SAndrew Rybchenko 
81458294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(entries));
81558294ee6SAndrew Rybchenko 
8160c16506eSAndrew Rybchenko 	rc = ENOMEM;
81758294ee6SAndrew Rybchenko 	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
81858294ee6SAndrew Rybchenko 				 socket_id);
81958294ee6SAndrew Rybchenko 	if (evq == NULL)
8200c16506eSAndrew Rybchenko 		goto fail_evq_alloc;
82158294ee6SAndrew Rybchenko 
82258294ee6SAndrew Rybchenko 	evq->sa = sa;
823298951a4SAndrew Rybchenko 	evq->type = type;
824ec9217f9SAndrew Rybchenko 	evq->entries = entries;
82558294ee6SAndrew Rybchenko 
82658294ee6SAndrew Rybchenko 	/* Allocate DMA space */
827298951a4SAndrew Rybchenko 	rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
828afe3c756SIgor Romanov 			   efx_evq_size(sa->nic, evq->entries), socket_id,
829afe3c756SIgor Romanov 			   &evq->mem);
83058294ee6SAndrew Rybchenko 	if (rc != 0)
8310c16506eSAndrew Rybchenko 		goto fail_dma_alloc;
83258294ee6SAndrew Rybchenko 
83358294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
83458294ee6SAndrew Rybchenko 
8356caeec47SAndrew Rybchenko 	sa->evq_count++;
8366caeec47SAndrew Rybchenko 
8376caeec47SAndrew Rybchenko 	*evqp = evq;
83858294ee6SAndrew Rybchenko 
83958294ee6SAndrew Rybchenko 	return 0;
8400c16506eSAndrew Rybchenko 
8410c16506eSAndrew Rybchenko fail_dma_alloc:
8420c16506eSAndrew Rybchenko 	rte_free(evq);
8430c16506eSAndrew Rybchenko 
8440c16506eSAndrew Rybchenko fail_evq_alloc:
8450c16506eSAndrew Rybchenko 
8460c16506eSAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
8470c16506eSAndrew Rybchenko 	return rc;
84858294ee6SAndrew Rybchenko }
84958294ee6SAndrew Rybchenko 
85058294ee6SAndrew Rybchenko void
8516caeec47SAndrew Rybchenko sfc_ev_qfini(struct sfc_evq *evq)
85258294ee6SAndrew Rybchenko {
8536caeec47SAndrew Rybchenko 	struct sfc_adapter *sa = evq->sa;
85458294ee6SAndrew Rybchenko 
85558294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
85658294ee6SAndrew Rybchenko 
85758294ee6SAndrew Rybchenko 	sfc_dma_free(sa, &evq->mem);
85858294ee6SAndrew Rybchenko 
85958294ee6SAndrew Rybchenko 	rte_free(evq);
86058294ee6SAndrew Rybchenko 
8616caeec47SAndrew Rybchenko 	SFC_ASSERT(sa->evq_count > 0);
8626caeec47SAndrew Rybchenko 	sa->evq_count--;
863c22d3c50SAndrew Rybchenko }
864c22d3c50SAndrew Rybchenko 
865c22d3c50SAndrew Rybchenko static int
866c22d3c50SAndrew Rybchenko sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
867c22d3c50SAndrew Rybchenko 			       const char *value_str, void *opaque)
868c22d3c50SAndrew Rybchenko {
8695076ad03SRoman Zhukov 	uint32_t *value = opaque;
870c22d3c50SAndrew Rybchenko 
871c22d3c50SAndrew Rybchenko 	if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
872c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
873c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
874c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
875c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
876c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_AUTO;
877c22d3c50SAndrew Rybchenko 	else
878c22d3c50SAndrew Rybchenko 		return -EINVAL;
87958294ee6SAndrew Rybchenko 
88058294ee6SAndrew Rybchenko 	return 0;
88158294ee6SAndrew Rybchenko }
88258294ee6SAndrew Rybchenko 
88358294ee6SAndrew Rybchenko int
88447995190SAndrew Rybchenko sfc_ev_attach(struct sfc_adapter *sa)
88558294ee6SAndrew Rybchenko {
88658294ee6SAndrew Rybchenko 	int rc;
88758294ee6SAndrew Rybchenko 
88858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
88958294ee6SAndrew Rybchenko 
890c22d3c50SAndrew Rybchenko 	sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
891c22d3c50SAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
892c22d3c50SAndrew Rybchenko 				sfc_kvarg_perf_profile_handler,
893c22d3c50SAndrew Rybchenko 				&sa->evq_flags);
894c22d3c50SAndrew Rybchenko 	if (rc != 0) {
895c22d3c50SAndrew Rybchenko 		sfc_err(sa, "invalid %s parameter value",
896c22d3c50SAndrew Rybchenko 			SFC_KVARG_PERF_PROFILE);
897c22d3c50SAndrew Rybchenko 		goto fail_kvarg_perf_profile;
898c22d3c50SAndrew Rybchenko 	}
899c22d3c50SAndrew Rybchenko 
90058294ee6SAndrew Rybchenko 	sa->mgmt_evq_index = 0;
9019a75f75cSAndrew Rybchenko 	rte_spinlock_init(&sa->mgmt_evq_lock);
90258294ee6SAndrew Rybchenko 
903d5371f3dSIgor Romanov 	rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries,
9046caeec47SAndrew Rybchenko 			  sa->socket_id, &sa->mgmt_evq);
9059a75f75cSAndrew Rybchenko 	if (rc != 0)
9069a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_init;
9079a75f75cSAndrew Rybchenko 
90858294ee6SAndrew Rybchenko 	/*
90958294ee6SAndrew Rybchenko 	 * Rx/Tx event queues are created/destroyed when corresponding
91058294ee6SAndrew Rybchenko 	 * Rx/Tx queue is created/destroyed.
91158294ee6SAndrew Rybchenko 	 */
91258294ee6SAndrew Rybchenko 
91358294ee6SAndrew Rybchenko 	return 0;
91458294ee6SAndrew Rybchenko 
9159a75f75cSAndrew Rybchenko fail_mgmt_evq_init:
916c22d3c50SAndrew Rybchenko 
917c22d3c50SAndrew Rybchenko fail_kvarg_perf_profile:
91858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
91958294ee6SAndrew Rybchenko 	return rc;
92058294ee6SAndrew Rybchenko }
92158294ee6SAndrew Rybchenko 
92258294ee6SAndrew Rybchenko void
92347995190SAndrew Rybchenko sfc_ev_detach(struct sfc_adapter *sa)
92458294ee6SAndrew Rybchenko {
92558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
92658294ee6SAndrew Rybchenko 
9276caeec47SAndrew Rybchenko 	sfc_ev_qfini(sa->mgmt_evq);
92858294ee6SAndrew Rybchenko 
9296caeec47SAndrew Rybchenko 	if (sa->evq_count != 0)
9306caeec47SAndrew Rybchenko 		sfc_err(sa, "%u EvQs are not destroyed before detach",
9316caeec47SAndrew Rybchenko 			sa->evq_count);
93258294ee6SAndrew Rybchenko }
933