xref: /dpdk/drivers/net/sfc/sfc_ev.c (revision dbdc82416b723b1f089bdcea99d5441016aa724d)
158294ee6SAndrew Rybchenko /*-
2244cfa79SAndrew Rybchenko  *   BSD LICENSE
3244cfa79SAndrew Rybchenko  *
4244cfa79SAndrew Rybchenko  * Copyright (c) 2016-2017 Solarflare Communications Inc.
558294ee6SAndrew Rybchenko  * All rights reserved.
658294ee6SAndrew Rybchenko  *
758294ee6SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
858294ee6SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
958294ee6SAndrew Rybchenko  *
1058294ee6SAndrew Rybchenko  * Redistribution and use in source and binary forms, with or without
1158294ee6SAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
1258294ee6SAndrew Rybchenko  *
1358294ee6SAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
1458294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer.
1558294ee6SAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
1658294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
1758294ee6SAndrew Rybchenko  *    and/or other materials provided with the distribution.
1858294ee6SAndrew Rybchenko  *
1958294ee6SAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2058294ee6SAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2158294ee6SAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2258294ee6SAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2358294ee6SAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2458294ee6SAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2558294ee6SAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
2658294ee6SAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
2758294ee6SAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
2858294ee6SAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
2958294ee6SAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3058294ee6SAndrew Rybchenko  */
3158294ee6SAndrew Rybchenko 
3258294ee6SAndrew Rybchenko #include <rte_debug.h>
3358294ee6SAndrew Rybchenko #include <rte_cycles.h>
342de39f4eSAndrew Rybchenko #include <rte_alarm.h>
3577f2d053SAndrew Rybchenko #include <rte_branch_prediction.h>
3658294ee6SAndrew Rybchenko 
3758294ee6SAndrew Rybchenko #include "efx.h"
3858294ee6SAndrew Rybchenko 
3958294ee6SAndrew Rybchenko #include "sfc.h"
4058294ee6SAndrew Rybchenko #include "sfc_debug.h"
4158294ee6SAndrew Rybchenko #include "sfc_log.h"
4258294ee6SAndrew Rybchenko #include "sfc_ev.h"
4328944ac0SAndrew Rybchenko #include "sfc_rx.h"
44fed9aeb4SIvan Malov #include "sfc_tx.h"
45c22d3c50SAndrew Rybchenko #include "sfc_kvargs.h"
4658294ee6SAndrew Rybchenko 
4758294ee6SAndrew Rybchenko 
4858294ee6SAndrew Rybchenko /* Initial delay when waiting for event queue init complete event */
4958294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_START_US	(1)
5058294ee6SAndrew Rybchenko /* Maximum delay between event queue polling attempts */
5158294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_MAX_US	(10 * 1000)
5258294ee6SAndrew Rybchenko /* Event queue init approx timeout */
5358294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_TIMEOUT_US		(2 * US_PER_S)
5458294ee6SAndrew Rybchenko 
552de39f4eSAndrew Rybchenko /* Management event queue polling period in microseconds */
562de39f4eSAndrew Rybchenko #define SFC_MGMT_EV_QPOLL_PERIOD_US	(US_PER_S)
572de39f4eSAndrew Rybchenko 
5858294ee6SAndrew Rybchenko 
5958294ee6SAndrew Rybchenko static boolean_t
6058294ee6SAndrew Rybchenko sfc_ev_initialized(void *arg)
6158294ee6SAndrew Rybchenko {
6258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
6358294ee6SAndrew Rybchenko 
6458294ee6SAndrew Rybchenko 	/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
6558294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
6658294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTED);
6758294ee6SAndrew Rybchenko 
6858294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTED;
6958294ee6SAndrew Rybchenko 
7058294ee6SAndrew Rybchenko 	return B_FALSE;
7158294ee6SAndrew Rybchenko }
7258294ee6SAndrew Rybchenko 
7358294ee6SAndrew Rybchenko static boolean_t
747965557eSAndrew Rybchenko sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
757965557eSAndrew Rybchenko 	      uint32_t size, uint16_t flags)
767965557eSAndrew Rybchenko {
777965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
787965557eSAndrew Rybchenko 
797965557eSAndrew Rybchenko 	sfc_err(evq->sa,
807965557eSAndrew Rybchenko 		"EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
817965557eSAndrew Rybchenko 		evq->evq_index, label, id, size, flags);
827965557eSAndrew Rybchenko 	return B_TRUE;
837965557eSAndrew Rybchenko }
847965557eSAndrew Rybchenko 
857965557eSAndrew Rybchenko static boolean_t
86df1bfde4SAndrew Rybchenko sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
87921f6cf1SAndrew Rybchenko 	      uint32_t size, uint16_t flags)
8858294ee6SAndrew Rybchenko {
8958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
90df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq;
91921f6cf1SAndrew Rybchenko 	unsigned int stop;
92921f6cf1SAndrew Rybchenko 	unsigned int pending_id;
93921f6cf1SAndrew Rybchenko 	unsigned int delta;
94921f6cf1SAndrew Rybchenko 	unsigned int i;
95df1bfde4SAndrew Rybchenko 	struct sfc_efx_rx_sw_desc *rxd;
9658294ee6SAndrew Rybchenko 
97921f6cf1SAndrew Rybchenko 	if (unlikely(evq->exception))
98921f6cf1SAndrew Rybchenko 		goto done;
99921f6cf1SAndrew Rybchenko 
100df1bfde4SAndrew Rybchenko 	rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
101921f6cf1SAndrew Rybchenko 
102921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
103921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
104df1bfde4SAndrew Rybchenko 	SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
105921f6cf1SAndrew Rybchenko 
106921f6cf1SAndrew Rybchenko 	stop = (id + 1) & rxq->ptr_mask;
107921f6cf1SAndrew Rybchenko 	pending_id = rxq->pending & rxq->ptr_mask;
108921f6cf1SAndrew Rybchenko 	delta = (stop >= pending_id) ? (stop - pending_id) :
109921f6cf1SAndrew Rybchenko 		(rxq->ptr_mask + 1 - pending_id + stop);
110921f6cf1SAndrew Rybchenko 
11109a09b6fSAndrew Rybchenko 	if (delta == 0) {
11209a09b6fSAndrew Rybchenko 		/*
11309a09b6fSAndrew Rybchenko 		 * Rx event with no new descriptors done and zero length
11409a09b6fSAndrew Rybchenko 		 * is used to abort scattered packet when there is no room
11509a09b6fSAndrew Rybchenko 		 * for the tail.
11609a09b6fSAndrew Rybchenko 		 */
11709a09b6fSAndrew Rybchenko 		if (unlikely(size != 0)) {
11809a09b6fSAndrew Rybchenko 			evq->exception = B_TRUE;
11909a09b6fSAndrew Rybchenko 			sfc_err(evq->sa,
12009a09b6fSAndrew Rybchenko 				"EVQ %u RxQ %u invalid RX abort "
121f2462150SFerruh Yigit 				"(id=%#x size=%u flags=%#x); needs restart",
122df1bfde4SAndrew Rybchenko 				evq->evq_index, rxq->dp.dpq.queue_id,
12309a09b6fSAndrew Rybchenko 				id, size, flags);
12409a09b6fSAndrew Rybchenko 			goto done;
12509a09b6fSAndrew Rybchenko 		}
12609a09b6fSAndrew Rybchenko 
12709a09b6fSAndrew Rybchenko 		/* Add discard flag to the first fragment */
12809a09b6fSAndrew Rybchenko 		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
12909a09b6fSAndrew Rybchenko 		/* Remove continue flag from the last fragment */
13009a09b6fSAndrew Rybchenko 		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
13109a09b6fSAndrew Rybchenko 	} else if (unlikely(delta > rxq->batch_max)) {
132921f6cf1SAndrew Rybchenko 		evq->exception = B_TRUE;
133921f6cf1SAndrew Rybchenko 
134921f6cf1SAndrew Rybchenko 		sfc_err(evq->sa,
135921f6cf1SAndrew Rybchenko 			"EVQ %u RxQ %u completion out of order "
136f2462150SFerruh Yigit 			"(id=%#x delta=%u flags=%#x); needs restart",
137df1bfde4SAndrew Rybchenko 			evq->evq_index, rxq->dp.dpq.queue_id,
138df1bfde4SAndrew Rybchenko 			id, delta, flags);
139921f6cf1SAndrew Rybchenko 
140921f6cf1SAndrew Rybchenko 		goto done;
141921f6cf1SAndrew Rybchenko 	}
142921f6cf1SAndrew Rybchenko 
143921f6cf1SAndrew Rybchenko 	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
144921f6cf1SAndrew Rybchenko 		rxd = &rxq->sw_desc[i];
145921f6cf1SAndrew Rybchenko 
146921f6cf1SAndrew Rybchenko 		rxd->flags = flags;
147921f6cf1SAndrew Rybchenko 
148921f6cf1SAndrew Rybchenko 		SFC_ASSERT(size < (1 << 16));
149921f6cf1SAndrew Rybchenko 		rxd->size = (uint16_t)size;
150921f6cf1SAndrew Rybchenko 	}
151921f6cf1SAndrew Rybchenko 
152921f6cf1SAndrew Rybchenko 	rxq->pending += delta;
153921f6cf1SAndrew Rybchenko 
154921f6cf1SAndrew Rybchenko done:
155921f6cf1SAndrew Rybchenko 	return B_FALSE;
15658294ee6SAndrew Rybchenko }
15758294ee6SAndrew Rybchenko 
15858294ee6SAndrew Rybchenko static boolean_t
159638bddc9SAndrew Rybchenko sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
160638bddc9SAndrew Rybchenko 	     __rte_unused uint32_t size, __rte_unused uint16_t flags)
161638bddc9SAndrew Rybchenko {
162638bddc9SAndrew Rybchenko 	struct sfc_evq *evq = arg;
163638bddc9SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
164638bddc9SAndrew Rybchenko 
165638bddc9SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
166638bddc9SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
167638bddc9SAndrew Rybchenko 
168638bddc9SAndrew Rybchenko 	SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
169638bddc9SAndrew Rybchenko 	return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
170638bddc9SAndrew Rybchenko }
171638bddc9SAndrew Rybchenko 
172638bddc9SAndrew Rybchenko static boolean_t
1737965557eSAndrew Rybchenko sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
1747965557eSAndrew Rybchenko {
1757965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
1767965557eSAndrew Rybchenko 
1777965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
1787965557eSAndrew Rybchenko 		evq->evq_index, label, id);
1797965557eSAndrew Rybchenko 	return B_TRUE;
1807965557eSAndrew Rybchenko }
1817965557eSAndrew Rybchenko 
1827965557eSAndrew Rybchenko static boolean_t
183428c7dddSIvan Malov sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
18458294ee6SAndrew Rybchenko {
18558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
186*dbdc8241SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
187*dbdc8241SAndrew Rybchenko 	struct sfc_efx_txq *txq;
188428c7dddSIvan Malov 	unsigned int stop;
189428c7dddSIvan Malov 	unsigned int delta;
19058294ee6SAndrew Rybchenko 
191*dbdc8241SAndrew Rybchenko 	dp_txq = evq->dp_txq;
192*dbdc8241SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
193428c7dddSIvan Malov 
194*dbdc8241SAndrew Rybchenko 	txq = sfc_efx_txq_by_dp_txq(dp_txq);
195428c7dddSIvan Malov 	SFC_ASSERT(txq->evq == evq);
196428c7dddSIvan Malov 
197*dbdc8241SAndrew Rybchenko 	if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
198428c7dddSIvan Malov 		goto done;
199428c7dddSIvan Malov 
200428c7dddSIvan Malov 	stop = (id + 1) & txq->ptr_mask;
201428c7dddSIvan Malov 	id = txq->pending & txq->ptr_mask;
202428c7dddSIvan Malov 
203428c7dddSIvan Malov 	delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
204428c7dddSIvan Malov 
205428c7dddSIvan Malov 	txq->pending += delta;
206428c7dddSIvan Malov 
207428c7dddSIvan Malov done:
208428c7dddSIvan Malov 	return B_FALSE;
20958294ee6SAndrew Rybchenko }
21058294ee6SAndrew Rybchenko 
21158294ee6SAndrew Rybchenko static boolean_t
21258294ee6SAndrew Rybchenko sfc_ev_exception(void *arg, __rte_unused uint32_t code,
21358294ee6SAndrew Rybchenko 		 __rte_unused uint32_t data)
21458294ee6SAndrew Rybchenko {
21558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
21658294ee6SAndrew Rybchenko 
21798200dd9SAndrew Rybchenko 	if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
21898200dd9SAndrew Rybchenko 		return B_FALSE;
21998200dd9SAndrew Rybchenko 
22098200dd9SAndrew Rybchenko 	evq->exception = B_TRUE;
22198200dd9SAndrew Rybchenko 	sfc_warn(evq->sa,
22298200dd9SAndrew Rybchenko 		 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
22398200dd9SAndrew Rybchenko 		 " needs recovery",
22498200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
22598200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
22698200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
22798200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
22898200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
22998200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
23098200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
23198200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
23298200dd9SAndrew Rybchenko 		 "UNKNOWN",
23398200dd9SAndrew Rybchenko 		 code, data, evq->evq_index);
23498200dd9SAndrew Rybchenko 
23558294ee6SAndrew Rybchenko 	return B_TRUE;
23658294ee6SAndrew Rybchenko }
23758294ee6SAndrew Rybchenko 
23858294ee6SAndrew Rybchenko static boolean_t
2397965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
2407965557eSAndrew Rybchenko {
2417965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2427965557eSAndrew Rybchenko 
2437965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
2447965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
2457965557eSAndrew Rybchenko 	return B_TRUE;
2467965557eSAndrew Rybchenko }
2477965557eSAndrew Rybchenko 
2487965557eSAndrew Rybchenko static boolean_t
24958294ee6SAndrew Rybchenko sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
25058294ee6SAndrew Rybchenko {
25158294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
252df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
25328944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
25458294ee6SAndrew Rybchenko 
255df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
256df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
257df1bfde4SAndrew Rybchenko 
258df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
25928944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
26028944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
26128944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
26228944ac0SAndrew Rybchenko 	sfc_rx_qflush_done(rxq);
26328944ac0SAndrew Rybchenko 
26428944ac0SAndrew Rybchenko 	return B_FALSE;
26558294ee6SAndrew Rybchenko }
26658294ee6SAndrew Rybchenko 
26758294ee6SAndrew Rybchenko static boolean_t
2687965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
2697965557eSAndrew Rybchenko {
2707965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2717965557eSAndrew Rybchenko 
2727965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
2737965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
2747965557eSAndrew Rybchenko 	return B_TRUE;
2757965557eSAndrew Rybchenko }
2767965557eSAndrew Rybchenko 
2777965557eSAndrew Rybchenko static boolean_t
27858294ee6SAndrew Rybchenko sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
27958294ee6SAndrew Rybchenko {
28058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
281df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
28228944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
28358294ee6SAndrew Rybchenko 
284df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
285df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
286df1bfde4SAndrew Rybchenko 
287df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
28828944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
28928944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
29028944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
29128944ac0SAndrew Rybchenko 	sfc_rx_qflush_failed(rxq);
29228944ac0SAndrew Rybchenko 
29328944ac0SAndrew Rybchenko 	return B_FALSE;
29458294ee6SAndrew Rybchenko }
29558294ee6SAndrew Rybchenko 
29658294ee6SAndrew Rybchenko static boolean_t
2977965557eSAndrew Rybchenko sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
2987965557eSAndrew Rybchenko {
2997965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3007965557eSAndrew Rybchenko 
3017965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
3027965557eSAndrew Rybchenko 		evq->evq_index, txq_hw_index);
3037965557eSAndrew Rybchenko 	return B_TRUE;
3047965557eSAndrew Rybchenko }
3057965557eSAndrew Rybchenko 
3067965557eSAndrew Rybchenko static boolean_t
30758294ee6SAndrew Rybchenko sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
30858294ee6SAndrew Rybchenko {
30958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
310*dbdc8241SAndrew Rybchenko 	struct sfc_dp_txq *dp_txq;
311fed9aeb4SIvan Malov 	struct sfc_txq *txq;
31258294ee6SAndrew Rybchenko 
313*dbdc8241SAndrew Rybchenko 	dp_txq = evq->dp_txq;
314*dbdc8241SAndrew Rybchenko 	SFC_ASSERT(dp_txq != NULL);
315*dbdc8241SAndrew Rybchenko 
316*dbdc8241SAndrew Rybchenko 	txq = sfc_txq_by_dp_txq(dp_txq);
317fed9aeb4SIvan Malov 	SFC_ASSERT(txq != NULL);
318fed9aeb4SIvan Malov 	SFC_ASSERT(txq->hw_index == txq_hw_index);
319fed9aeb4SIvan Malov 	SFC_ASSERT(txq->evq == evq);
320fed9aeb4SIvan Malov 	sfc_tx_qflush_done(txq);
321fed9aeb4SIvan Malov 
322fed9aeb4SIvan Malov 	return B_FALSE;
32358294ee6SAndrew Rybchenko }
32458294ee6SAndrew Rybchenko 
32558294ee6SAndrew Rybchenko static boolean_t
32658294ee6SAndrew Rybchenko sfc_ev_software(void *arg, uint16_t magic)
32758294ee6SAndrew Rybchenko {
32858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
32958294ee6SAndrew Rybchenko 
33058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
33158294ee6SAndrew Rybchenko 		evq->evq_index, magic);
33258294ee6SAndrew Rybchenko 	return B_TRUE;
33358294ee6SAndrew Rybchenko }
33458294ee6SAndrew Rybchenko 
33558294ee6SAndrew Rybchenko static boolean_t
33658294ee6SAndrew Rybchenko sfc_ev_sram(void *arg, uint32_t code)
33758294ee6SAndrew Rybchenko {
33858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
33958294ee6SAndrew Rybchenko 
34058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
34158294ee6SAndrew Rybchenko 		evq->evq_index, code);
34258294ee6SAndrew Rybchenko 	return B_TRUE;
34358294ee6SAndrew Rybchenko }
34458294ee6SAndrew Rybchenko 
34558294ee6SAndrew Rybchenko static boolean_t
34658294ee6SAndrew Rybchenko sfc_ev_wake_up(void *arg, uint32_t index)
34758294ee6SAndrew Rybchenko {
34858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
34958294ee6SAndrew Rybchenko 
35058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
35158294ee6SAndrew Rybchenko 		evq->evq_index, index);
35258294ee6SAndrew Rybchenko 	return B_TRUE;
35358294ee6SAndrew Rybchenko }
35458294ee6SAndrew Rybchenko 
35558294ee6SAndrew Rybchenko static boolean_t
35658294ee6SAndrew Rybchenko sfc_ev_timer(void *arg, uint32_t index)
35758294ee6SAndrew Rybchenko {
35858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
35958294ee6SAndrew Rybchenko 
36058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
36158294ee6SAndrew Rybchenko 		evq->evq_index, index);
36258294ee6SAndrew Rybchenko 	return B_TRUE;
36358294ee6SAndrew Rybchenko }
36458294ee6SAndrew Rybchenko 
36558294ee6SAndrew Rybchenko static boolean_t
3667965557eSAndrew Rybchenko sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
3677965557eSAndrew Rybchenko {
3687965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3697965557eSAndrew Rybchenko 
3707965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected link change event",
3717965557eSAndrew Rybchenko 		evq->evq_index);
3727965557eSAndrew Rybchenko 	return B_TRUE;
3737965557eSAndrew Rybchenko }
3747965557eSAndrew Rybchenko 
3757965557eSAndrew Rybchenko static boolean_t
376886f8d8aSArtem Andreev sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
37758294ee6SAndrew Rybchenko {
37858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
379886f8d8aSArtem Andreev 	struct sfc_adapter *sa = evq->sa;
380886f8d8aSArtem Andreev 	struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
381886f8d8aSArtem Andreev 	struct rte_eth_link new_link;
3823b809c27SAndrew Rybchenko 	uint64_t new_link_u64;
3833b809c27SAndrew Rybchenko 	uint64_t old_link_u64;
38458294ee6SAndrew Rybchenko 
385886f8d8aSArtem Andreev 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
386886f8d8aSArtem Andreev 
387886f8d8aSArtem Andreev 	sfc_port_link_mode_to_info(link_mode, &new_link);
3883b809c27SAndrew Rybchenko 
3893b809c27SAndrew Rybchenko 	new_link_u64 = *(uint64_t *)&new_link;
3903b809c27SAndrew Rybchenko 	do {
3913b809c27SAndrew Rybchenko 		old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
3923b809c27SAndrew Rybchenko 		if (old_link_u64 == new_link_u64)
3933b809c27SAndrew Rybchenko 			break;
3943b809c27SAndrew Rybchenko 
3953b809c27SAndrew Rybchenko 		if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
3963b809c27SAndrew Rybchenko 					old_link_u64, new_link_u64)) {
3973b809c27SAndrew Rybchenko 			evq->sa->port.lsc_seq++;
3983b809c27SAndrew Rybchenko 			break;
3993b809c27SAndrew Rybchenko 		}
4003b809c27SAndrew Rybchenko 	} while (B_TRUE);
401886f8d8aSArtem Andreev 
402886f8d8aSArtem Andreev 	return B_FALSE;
40358294ee6SAndrew Rybchenko }
40458294ee6SAndrew Rybchenko 
40558294ee6SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks = {
40658294ee6SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
4077965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
4087965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
40958294ee6SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4107965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
4117965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
4127965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
41358294ee6SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
41458294ee6SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
41558294ee6SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
41658294ee6SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
41758294ee6SAndrew Rybchenko 	.eec_link_change	= sfc_ev_link_change,
41858294ee6SAndrew Rybchenko };
41958294ee6SAndrew Rybchenko 
420df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
4217965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
422df1bfde4SAndrew Rybchenko 	.eec_rx			= sfc_ev_efx_rx,
423df1bfde4SAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
424df1bfde4SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
425df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
426df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
427df1bfde4SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
428df1bfde4SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
429df1bfde4SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
430df1bfde4SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
431df1bfde4SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
432df1bfde4SAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
433df1bfde4SAndrew Rybchenko };
434df1bfde4SAndrew Rybchenko 
435df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
436df1bfde4SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
437638bddc9SAndrew Rybchenko 	.eec_rx			= sfc_ev_dp_rx,
4387965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
4397965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4407965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
4417965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
4427965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
4437965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
4447965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
4457965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
4467965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
4477965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
4487965557eSAndrew Rybchenko };
4497965557eSAndrew Rybchenko 
450*dbdc8241SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
4517965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
4527965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
4537965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_tx,
4547965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4557965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
4567965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
4577965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
4587965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
4597965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
4607965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
4617965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
4627965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
4637965557eSAndrew Rybchenko };
4647965557eSAndrew Rybchenko 
465*dbdc8241SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
466*dbdc8241SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
467*dbdc8241SAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
468*dbdc8241SAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
469*dbdc8241SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
470*dbdc8241SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
471*dbdc8241SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
472*dbdc8241SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
473*dbdc8241SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
474*dbdc8241SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
475*dbdc8241SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
476*dbdc8241SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
477*dbdc8241SAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
478*dbdc8241SAndrew Rybchenko };
479*dbdc8241SAndrew Rybchenko 
48058294ee6SAndrew Rybchenko 
48158294ee6SAndrew Rybchenko void
48258294ee6SAndrew Rybchenko sfc_ev_qpoll(struct sfc_evq *evq)
48358294ee6SAndrew Rybchenko {
48458294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
48558294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTING);
48658294ee6SAndrew Rybchenko 
48758294ee6SAndrew Rybchenko 	/* Synchronize the DMA memory for reading not required */
48858294ee6SAndrew Rybchenko 
4897965557eSAndrew Rybchenko 	efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
49058294ee6SAndrew Rybchenko 
49177f2d053SAndrew Rybchenko 	if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
49277f2d053SAndrew Rybchenko 		struct sfc_adapter *sa = evq->sa;
49377f2d053SAndrew Rybchenko 		int rc;
49477f2d053SAndrew Rybchenko 
495df1bfde4SAndrew Rybchenko 		if (evq->dp_rxq != NULL) {
496df1bfde4SAndrew Rybchenko 			unsigned int rxq_sw_index;
497df1bfde4SAndrew Rybchenko 
498df1bfde4SAndrew Rybchenko 			rxq_sw_index = evq->dp_rxq->dpq.queue_id;
49977f2d053SAndrew Rybchenko 
50077f2d053SAndrew Rybchenko 			sfc_warn(sa,
50177f2d053SAndrew Rybchenko 				 "restart RxQ %u because of exception on its EvQ %u",
50277f2d053SAndrew Rybchenko 				 rxq_sw_index, evq->evq_index);
50377f2d053SAndrew Rybchenko 
50477f2d053SAndrew Rybchenko 			sfc_rx_qstop(sa, rxq_sw_index);
50577f2d053SAndrew Rybchenko 			rc = sfc_rx_qstart(sa, rxq_sw_index);
50677f2d053SAndrew Rybchenko 			if (rc != 0)
50777f2d053SAndrew Rybchenko 				sfc_err(sa, "cannot restart RxQ %u",
50877f2d053SAndrew Rybchenko 					rxq_sw_index);
50977f2d053SAndrew Rybchenko 		}
51077f2d053SAndrew Rybchenko 
511*dbdc8241SAndrew Rybchenko 		if (evq->dp_txq != NULL) {
512*dbdc8241SAndrew Rybchenko 			unsigned int txq_sw_index;
513*dbdc8241SAndrew Rybchenko 
514*dbdc8241SAndrew Rybchenko 			txq_sw_index = evq->dp_txq->dpq.queue_id;
5154a18304dSIvan Malov 
5164a18304dSIvan Malov 			sfc_warn(sa,
5174a18304dSIvan Malov 				 "restart TxQ %u because of exception on its EvQ %u",
5184a18304dSIvan Malov 				 txq_sw_index, evq->evq_index);
5194a18304dSIvan Malov 
5204a18304dSIvan Malov 			sfc_tx_qstop(sa, txq_sw_index);
5214a18304dSIvan Malov 			rc = sfc_tx_qstart(sa, txq_sw_index);
5224a18304dSIvan Malov 			if (rc != 0)
5234a18304dSIvan Malov 				sfc_err(sa, "cannot restart TxQ %u",
5244a18304dSIvan Malov 					txq_sw_index);
5254a18304dSIvan Malov 		}
5264a18304dSIvan Malov 
52777f2d053SAndrew Rybchenko 		if (evq->exception)
52877f2d053SAndrew Rybchenko 			sfc_panic(sa, "unrecoverable exception on EvQ %u",
52977f2d053SAndrew Rybchenko 				  evq->evq_index);
53077f2d053SAndrew Rybchenko 
53177f2d053SAndrew Rybchenko 		sfc_adapter_unlock(sa);
53277f2d053SAndrew Rybchenko 	}
53377f2d053SAndrew Rybchenko 
53458294ee6SAndrew Rybchenko 	/* Poll-mode driver does not re-prime the event queue for interrupts */
53558294ee6SAndrew Rybchenko }
53658294ee6SAndrew Rybchenko 
5379a75f75cSAndrew Rybchenko void
5389a75f75cSAndrew Rybchenko sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
5399a75f75cSAndrew Rybchenko {
5409a75f75cSAndrew Rybchenko 	if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
5419a75f75cSAndrew Rybchenko 		struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
5429a75f75cSAndrew Rybchenko 
5439a75f75cSAndrew Rybchenko 		if (mgmt_evq->init_state == SFC_EVQ_STARTED)
5449a75f75cSAndrew Rybchenko 			sfc_ev_qpoll(mgmt_evq);
5459a75f75cSAndrew Rybchenko 
5469a75f75cSAndrew Rybchenko 		rte_spinlock_unlock(&sa->mgmt_evq_lock);
5479a75f75cSAndrew Rybchenko 	}
5489a75f75cSAndrew Rybchenko }
5499a75f75cSAndrew Rybchenko 
55058294ee6SAndrew Rybchenko int
55158294ee6SAndrew Rybchenko sfc_ev_qprime(struct sfc_evq *evq)
55258294ee6SAndrew Rybchenko {
55358294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
55458294ee6SAndrew Rybchenko 	return efx_ev_qprime(evq->common, evq->read_ptr);
55558294ee6SAndrew Rybchenko }
55658294ee6SAndrew Rybchenko 
55758294ee6SAndrew Rybchenko int
55858294ee6SAndrew Rybchenko sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
55958294ee6SAndrew Rybchenko {
56058294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
56158294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
56258294ee6SAndrew Rybchenko 	efsys_mem_t *esmp;
56358294ee6SAndrew Rybchenko 	unsigned int total_delay_us;
56458294ee6SAndrew Rybchenko 	unsigned int delay_us;
56558294ee6SAndrew Rybchenko 	int rc;
56658294ee6SAndrew Rybchenko 
56758294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
56858294ee6SAndrew Rybchenko 
56958294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
57058294ee6SAndrew Rybchenko 	evq = evq_info->evq;
57158294ee6SAndrew Rybchenko 	esmp = &evq->mem;
57258294ee6SAndrew Rybchenko 
57358294ee6SAndrew Rybchenko 	/* Clear all events */
57458294ee6SAndrew Rybchenko 	(void)memset((void *)esmp->esm_base, 0xff,
57558294ee6SAndrew Rybchenko 		     EFX_EVQ_SIZE(evq_info->entries));
57658294ee6SAndrew Rybchenko 
57758294ee6SAndrew Rybchenko 	/* Create the common code event queue */
57858294ee6SAndrew Rybchenko 	rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
579c22d3c50SAndrew Rybchenko 			    0 /* unused on EF10 */, 0, evq_info->flags,
58058294ee6SAndrew Rybchenko 			    &evq->common);
58158294ee6SAndrew Rybchenko 	if (rc != 0)
58258294ee6SAndrew Rybchenko 		goto fail_ev_qcreate;
58358294ee6SAndrew Rybchenko 
584*dbdc8241SAndrew Rybchenko 	SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
585df1bfde4SAndrew Rybchenko 	if (evq->dp_rxq != 0) {
586df1bfde4SAndrew Rybchenko 		if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
587df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_efx_rx;
5887965557eSAndrew Rybchenko 		else
589df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_dp_rx;
590*dbdc8241SAndrew Rybchenko 	} else if (evq->dp_txq != 0) {
591*dbdc8241SAndrew Rybchenko 		if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
592*dbdc8241SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_efx_tx;
593*dbdc8241SAndrew Rybchenko 		else
594*dbdc8241SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_dp_tx;
595df1bfde4SAndrew Rybchenko 	} else {
5967965557eSAndrew Rybchenko 		evq->callbacks = &sfc_ev_callbacks;
597df1bfde4SAndrew Rybchenko 	}
5987965557eSAndrew Rybchenko 
59958294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTING;
60058294ee6SAndrew Rybchenko 
60158294ee6SAndrew Rybchenko 	/* Wait for the initialization event */
60258294ee6SAndrew Rybchenko 	total_delay_us = 0;
60358294ee6SAndrew Rybchenko 	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
60458294ee6SAndrew Rybchenko 	do {
60558294ee6SAndrew Rybchenko 		(void)sfc_ev_qpoll(evq);
60658294ee6SAndrew Rybchenko 
60758294ee6SAndrew Rybchenko 		/* Check to see if the initialization complete indication
60858294ee6SAndrew Rybchenko 		 * posted by the hardware.
60958294ee6SAndrew Rybchenko 		 */
61058294ee6SAndrew Rybchenko 		if (evq->init_state == SFC_EVQ_STARTED)
61158294ee6SAndrew Rybchenko 			goto done;
61258294ee6SAndrew Rybchenko 
61358294ee6SAndrew Rybchenko 		/* Give event queue some time to init */
61458294ee6SAndrew Rybchenko 		rte_delay_us(delay_us);
61558294ee6SAndrew Rybchenko 
61658294ee6SAndrew Rybchenko 		total_delay_us += delay_us;
61758294ee6SAndrew Rybchenko 
61858294ee6SAndrew Rybchenko 		/* Exponential backoff */
61958294ee6SAndrew Rybchenko 		delay_us *= 2;
62058294ee6SAndrew Rybchenko 		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
62158294ee6SAndrew Rybchenko 			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
62258294ee6SAndrew Rybchenko 
62358294ee6SAndrew Rybchenko 	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
62458294ee6SAndrew Rybchenko 
62558294ee6SAndrew Rybchenko 	rc = ETIMEDOUT;
62658294ee6SAndrew Rybchenko 	goto fail_timedout;
62758294ee6SAndrew Rybchenko 
62858294ee6SAndrew Rybchenko done:
62958294ee6SAndrew Rybchenko 	return 0;
63058294ee6SAndrew Rybchenko 
63158294ee6SAndrew Rybchenko fail_timedout:
63258294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
63358294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
63458294ee6SAndrew Rybchenko 
63558294ee6SAndrew Rybchenko fail_ev_qcreate:
63658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
63758294ee6SAndrew Rybchenko 	return rc;
63858294ee6SAndrew Rybchenko }
63958294ee6SAndrew Rybchenko 
64058294ee6SAndrew Rybchenko void
64158294ee6SAndrew Rybchenko sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
64258294ee6SAndrew Rybchenko {
64358294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
64458294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
64558294ee6SAndrew Rybchenko 
64658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
64758294ee6SAndrew Rybchenko 
64858294ee6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sa->evq_count);
64958294ee6SAndrew Rybchenko 
65058294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
65158294ee6SAndrew Rybchenko 	evq = evq_info->evq;
65258294ee6SAndrew Rybchenko 
65358294ee6SAndrew Rybchenko 	if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
65458294ee6SAndrew Rybchenko 		return;
65558294ee6SAndrew Rybchenko 
65658294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
6577965557eSAndrew Rybchenko 	evq->callbacks = NULL;
65858294ee6SAndrew Rybchenko 	evq->read_ptr = 0;
65958294ee6SAndrew Rybchenko 	evq->exception = B_FALSE;
66058294ee6SAndrew Rybchenko 
66158294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
66258294ee6SAndrew Rybchenko }
66358294ee6SAndrew Rybchenko 
6642de39f4eSAndrew Rybchenko static void
6652de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(void *arg)
6662de39f4eSAndrew Rybchenko {
6672de39f4eSAndrew Rybchenko 	struct sfc_adapter *sa = arg;
6682de39f4eSAndrew Rybchenko 	int rc;
6692de39f4eSAndrew Rybchenko 
6702de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_qpoll(sa);
6712de39f4eSAndrew Rybchenko 
6722de39f4eSAndrew Rybchenko 	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
6732de39f4eSAndrew Rybchenko 			       sfc_ev_mgmt_periodic_qpoll, sa);
674323706abSAndrew Rybchenko 	if (rc == -ENOTSUP) {
675323706abSAndrew Rybchenko 		sfc_warn(sa, "alarms are not supported");
676323706abSAndrew Rybchenko 		sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
677323706abSAndrew Rybchenko 	} else if (rc != 0) {
678323706abSAndrew Rybchenko 		sfc_err(sa,
6792de39f4eSAndrew Rybchenko 			"cannot rearm management EVQ polling alarm (rc=%d)",
6802de39f4eSAndrew Rybchenko 			rc);
6812de39f4eSAndrew Rybchenko 	}
682323706abSAndrew Rybchenko }
6832de39f4eSAndrew Rybchenko 
6842de39f4eSAndrew Rybchenko static void
6852de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
6862de39f4eSAndrew Rybchenko {
6872de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll(sa);
6882de39f4eSAndrew Rybchenko }
6892de39f4eSAndrew Rybchenko 
6902de39f4eSAndrew Rybchenko static void
6912de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
6922de39f4eSAndrew Rybchenko {
6932de39f4eSAndrew Rybchenko 	rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
6942de39f4eSAndrew Rybchenko }
6952de39f4eSAndrew Rybchenko 
69658294ee6SAndrew Rybchenko int
69758294ee6SAndrew Rybchenko sfc_ev_start(struct sfc_adapter *sa)
69858294ee6SAndrew Rybchenko {
69958294ee6SAndrew Rybchenko 	int rc;
70058294ee6SAndrew Rybchenko 
70158294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
70258294ee6SAndrew Rybchenko 
70358294ee6SAndrew Rybchenko 	rc = efx_ev_init(sa->nic);
70458294ee6SAndrew Rybchenko 	if (rc != 0)
70558294ee6SAndrew Rybchenko 		goto fail_ev_init;
70658294ee6SAndrew Rybchenko 
7079a75f75cSAndrew Rybchenko 	/* Start management EVQ used for global events */
7089a75f75cSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
7099a75f75cSAndrew Rybchenko 
7109a75f75cSAndrew Rybchenko 	rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
7119a75f75cSAndrew Rybchenko 	if (rc != 0)
7129a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_start;
7139a75f75cSAndrew Rybchenko 
7143b809c27SAndrew Rybchenko 	if (sa->intr.lsc_intr) {
7153b809c27SAndrew Rybchenko 		rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq);
7163b809c27SAndrew Rybchenko 		if (rc != 0)
7173b809c27SAndrew Rybchenko 			goto fail_evq0_prime;
7183b809c27SAndrew Rybchenko 	}
7193b809c27SAndrew Rybchenko 
7209a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
7219a75f75cSAndrew Rybchenko 
72258294ee6SAndrew Rybchenko 	/*
7232de39f4eSAndrew Rybchenko 	 * Start management EVQ polling. If interrupts are disabled
7242de39f4eSAndrew Rybchenko 	 * (not used), it is required to process link status change
7252de39f4eSAndrew Rybchenko 	 * and other device level events to avoid unrecoverable
7262de39f4eSAndrew Rybchenko 	 * error because the event queue overflow.
7272de39f4eSAndrew Rybchenko 	 */
7282de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_start(sa);
7292de39f4eSAndrew Rybchenko 
7302de39f4eSAndrew Rybchenko 	/*
7319a75f75cSAndrew Rybchenko 	 * Rx/Tx event queues are started/stopped when corresponding
7329a75f75cSAndrew Rybchenko 	 * Rx/Tx queue is started/stopped.
73358294ee6SAndrew Rybchenko 	 */
73458294ee6SAndrew Rybchenko 
73558294ee6SAndrew Rybchenko 	return 0;
73658294ee6SAndrew Rybchenko 
7373b809c27SAndrew Rybchenko fail_evq0_prime:
7383b809c27SAndrew Rybchenko 	sfc_ev_qstop(sa, 0);
7393b809c27SAndrew Rybchenko 
7409a75f75cSAndrew Rybchenko fail_mgmt_evq_start:
7419a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
7429a75f75cSAndrew Rybchenko 	efx_ev_fini(sa->nic);
7439a75f75cSAndrew Rybchenko 
74458294ee6SAndrew Rybchenko fail_ev_init:
74558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
74658294ee6SAndrew Rybchenko 	return rc;
74758294ee6SAndrew Rybchenko }
74858294ee6SAndrew Rybchenko 
74958294ee6SAndrew Rybchenko void
75058294ee6SAndrew Rybchenko sfc_ev_stop(struct sfc_adapter *sa)
75158294ee6SAndrew Rybchenko {
75258294ee6SAndrew Rybchenko 	unsigned int sw_index;
75358294ee6SAndrew Rybchenko 
75458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
75558294ee6SAndrew Rybchenko 
7562de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_stop(sa);
7572de39f4eSAndrew Rybchenko 
75858294ee6SAndrew Rybchenko 	/* Make sure that all event queues are stopped */
75958294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
7609a75f75cSAndrew Rybchenko 	while (sw_index-- > 0) {
7619a75f75cSAndrew Rybchenko 		if (sw_index == sa->mgmt_evq_index) {
7629a75f75cSAndrew Rybchenko 			/* Locks are required for the management EVQ */
7639a75f75cSAndrew Rybchenko 			rte_spinlock_lock(&sa->mgmt_evq_lock);
7649a75f75cSAndrew Rybchenko 			sfc_ev_qstop(sa, sa->mgmt_evq_index);
7659a75f75cSAndrew Rybchenko 			rte_spinlock_unlock(&sa->mgmt_evq_lock);
7669a75f75cSAndrew Rybchenko 		} else {
76758294ee6SAndrew Rybchenko 			sfc_ev_qstop(sa, sw_index);
7689a75f75cSAndrew Rybchenko 		}
7699a75f75cSAndrew Rybchenko 	}
77058294ee6SAndrew Rybchenko 
77158294ee6SAndrew Rybchenko 	efx_ev_fini(sa->nic);
77258294ee6SAndrew Rybchenko }
77358294ee6SAndrew Rybchenko 
77458294ee6SAndrew Rybchenko int
77558294ee6SAndrew Rybchenko sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
77658294ee6SAndrew Rybchenko 	     unsigned int entries, int socket_id)
77758294ee6SAndrew Rybchenko {
77858294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info;
77958294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
78058294ee6SAndrew Rybchenko 	int rc;
78158294ee6SAndrew Rybchenko 
78258294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
78358294ee6SAndrew Rybchenko 
78458294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
78558294ee6SAndrew Rybchenko 
78658294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(entries));
78758294ee6SAndrew Rybchenko 	SFC_ASSERT(entries <= evq_info->max_entries);
78858294ee6SAndrew Rybchenko 	evq_info->entries = entries;
78958294ee6SAndrew Rybchenko 
79058294ee6SAndrew Rybchenko 	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
79158294ee6SAndrew Rybchenko 				 socket_id);
79258294ee6SAndrew Rybchenko 	if (evq == NULL)
79358294ee6SAndrew Rybchenko 		return ENOMEM;
79458294ee6SAndrew Rybchenko 
79558294ee6SAndrew Rybchenko 	evq->sa = sa;
79658294ee6SAndrew Rybchenko 	evq->evq_index = sw_index;
79758294ee6SAndrew Rybchenko 
79858294ee6SAndrew Rybchenko 	/* Allocate DMA space */
79958294ee6SAndrew Rybchenko 	rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
80058294ee6SAndrew Rybchenko 			   socket_id, &evq->mem);
80158294ee6SAndrew Rybchenko 	if (rc != 0)
80258294ee6SAndrew Rybchenko 		return rc;
80358294ee6SAndrew Rybchenko 
80458294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
80558294ee6SAndrew Rybchenko 
80658294ee6SAndrew Rybchenko 	evq_info->evq = evq;
80758294ee6SAndrew Rybchenko 
80858294ee6SAndrew Rybchenko 	return 0;
80958294ee6SAndrew Rybchenko }
81058294ee6SAndrew Rybchenko 
81158294ee6SAndrew Rybchenko void
81258294ee6SAndrew Rybchenko sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
81358294ee6SAndrew Rybchenko {
81458294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
81558294ee6SAndrew Rybchenko 
81658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
81758294ee6SAndrew Rybchenko 
81858294ee6SAndrew Rybchenko 	evq = sa->evq_info[sw_index].evq;
81958294ee6SAndrew Rybchenko 
82058294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
82158294ee6SAndrew Rybchenko 
82258294ee6SAndrew Rybchenko 	sa->evq_info[sw_index].evq = NULL;
82358294ee6SAndrew Rybchenko 
82458294ee6SAndrew Rybchenko 	sfc_dma_free(sa, &evq->mem);
82558294ee6SAndrew Rybchenko 
82658294ee6SAndrew Rybchenko 	rte_free(evq);
82758294ee6SAndrew Rybchenko }
82858294ee6SAndrew Rybchenko 
82958294ee6SAndrew Rybchenko static int
83058294ee6SAndrew Rybchenko sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
83158294ee6SAndrew Rybchenko {
83258294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
83358294ee6SAndrew Rybchenko 	unsigned int max_entries;
83458294ee6SAndrew Rybchenko 
83558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
83658294ee6SAndrew Rybchenko 
83758294ee6SAndrew Rybchenko 	max_entries = sfc_evq_max_entries(sa, sw_index);
83858294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(max_entries));
83958294ee6SAndrew Rybchenko 
84058294ee6SAndrew Rybchenko 	evq_info->max_entries = max_entries;
8413b809c27SAndrew Rybchenko 	evq_info->flags = sa->evq_flags |
8423b809c27SAndrew Rybchenko 		((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ?
8433b809c27SAndrew Rybchenko 			EFX_EVQ_FLAGS_NOTIFY_INTERRUPT :
8443b809c27SAndrew Rybchenko 			EFX_EVQ_FLAGS_NOTIFY_DISABLED);
845c22d3c50SAndrew Rybchenko 
846c22d3c50SAndrew Rybchenko 	return 0;
847c22d3c50SAndrew Rybchenko }
848c22d3c50SAndrew Rybchenko 
849c22d3c50SAndrew Rybchenko static int
850c22d3c50SAndrew Rybchenko sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
851c22d3c50SAndrew Rybchenko 			       const char *value_str, void *opaque)
852c22d3c50SAndrew Rybchenko {
853c22d3c50SAndrew Rybchenko 	uint64_t *value = opaque;
854c22d3c50SAndrew Rybchenko 
855c22d3c50SAndrew Rybchenko 	if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
856c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
857c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
858c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
859c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
860c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_AUTO;
861c22d3c50SAndrew Rybchenko 	else
862c22d3c50SAndrew Rybchenko 		return -EINVAL;
86358294ee6SAndrew Rybchenko 
86458294ee6SAndrew Rybchenko 	return 0;
86558294ee6SAndrew Rybchenko }
86658294ee6SAndrew Rybchenko 
86758294ee6SAndrew Rybchenko static void
86858294ee6SAndrew Rybchenko sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
86958294ee6SAndrew Rybchenko {
87058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
87158294ee6SAndrew Rybchenko 
87258294ee6SAndrew Rybchenko 	/* Nothing to cleanup */
87358294ee6SAndrew Rybchenko }
87458294ee6SAndrew Rybchenko 
87558294ee6SAndrew Rybchenko int
87658294ee6SAndrew Rybchenko sfc_ev_init(struct sfc_adapter *sa)
87758294ee6SAndrew Rybchenko {
87858294ee6SAndrew Rybchenko 	int rc;
87958294ee6SAndrew Rybchenko 	unsigned int sw_index;
88058294ee6SAndrew Rybchenko 
88158294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
88258294ee6SAndrew Rybchenko 
883c22d3c50SAndrew Rybchenko 	sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
884c22d3c50SAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
885c22d3c50SAndrew Rybchenko 				sfc_kvarg_perf_profile_handler,
886c22d3c50SAndrew Rybchenko 				&sa->evq_flags);
887c22d3c50SAndrew Rybchenko 	if (rc != 0) {
888c22d3c50SAndrew Rybchenko 		sfc_err(sa, "invalid %s parameter value",
889c22d3c50SAndrew Rybchenko 			SFC_KVARG_PERF_PROFILE);
890c22d3c50SAndrew Rybchenko 		goto fail_kvarg_perf_profile;
891c22d3c50SAndrew Rybchenko 	}
892c22d3c50SAndrew Rybchenko 
89358294ee6SAndrew Rybchenko 	sa->evq_count = sfc_ev_qcount(sa);
89458294ee6SAndrew Rybchenko 	sa->mgmt_evq_index = 0;
8959a75f75cSAndrew Rybchenko 	rte_spinlock_init(&sa->mgmt_evq_lock);
89658294ee6SAndrew Rybchenko 
89758294ee6SAndrew Rybchenko 	/* Allocate EVQ info array */
89858294ee6SAndrew Rybchenko 	rc = ENOMEM;
89958294ee6SAndrew Rybchenko 	sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
90058294ee6SAndrew Rybchenko 					 sizeof(struct sfc_evq_info), 0,
90158294ee6SAndrew Rybchenko 					 sa->socket_id);
90258294ee6SAndrew Rybchenko 	if (sa->evq_info == NULL)
90358294ee6SAndrew Rybchenko 		goto fail_evqs_alloc;
90458294ee6SAndrew Rybchenko 
90558294ee6SAndrew Rybchenko 	for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
90658294ee6SAndrew Rybchenko 		rc = sfc_ev_qinit_info(sa, sw_index);
90758294ee6SAndrew Rybchenko 		if (rc != 0)
90858294ee6SAndrew Rybchenko 			goto fail_ev_qinit_info;
90958294ee6SAndrew Rybchenko 	}
91058294ee6SAndrew Rybchenko 
9119a75f75cSAndrew Rybchenko 	rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
9129a75f75cSAndrew Rybchenko 			  sa->socket_id);
9139a75f75cSAndrew Rybchenko 	if (rc != 0)
9149a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_init;
9159a75f75cSAndrew Rybchenko 
91658294ee6SAndrew Rybchenko 	/*
91758294ee6SAndrew Rybchenko 	 * Rx/Tx event queues are created/destroyed when corresponding
91858294ee6SAndrew Rybchenko 	 * Rx/Tx queue is created/destroyed.
91958294ee6SAndrew Rybchenko 	 */
92058294ee6SAndrew Rybchenko 
92158294ee6SAndrew Rybchenko 	return 0;
92258294ee6SAndrew Rybchenko 
9239a75f75cSAndrew Rybchenko fail_mgmt_evq_init:
92458294ee6SAndrew Rybchenko fail_ev_qinit_info:
92558294ee6SAndrew Rybchenko 	while (sw_index-- > 0)
92658294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
92758294ee6SAndrew Rybchenko 
92858294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
92958294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
93058294ee6SAndrew Rybchenko 
93158294ee6SAndrew Rybchenko fail_evqs_alloc:
93258294ee6SAndrew Rybchenko 	sa->evq_count = 0;
933c22d3c50SAndrew Rybchenko 
934c22d3c50SAndrew Rybchenko fail_kvarg_perf_profile:
93558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
93658294ee6SAndrew Rybchenko 	return rc;
93758294ee6SAndrew Rybchenko }
93858294ee6SAndrew Rybchenko 
93958294ee6SAndrew Rybchenko void
94058294ee6SAndrew Rybchenko sfc_ev_fini(struct sfc_adapter *sa)
94158294ee6SAndrew Rybchenko {
94258294ee6SAndrew Rybchenko 	int sw_index;
94358294ee6SAndrew Rybchenko 
94458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
94558294ee6SAndrew Rybchenko 
94658294ee6SAndrew Rybchenko 	/* Cleanup all event queues */
94758294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
94858294ee6SAndrew Rybchenko 	while (--sw_index >= 0) {
94958294ee6SAndrew Rybchenko 		if (sa->evq_info[sw_index].evq != NULL)
95058294ee6SAndrew Rybchenko 			sfc_ev_qfini(sa, sw_index);
95158294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
95258294ee6SAndrew Rybchenko 	}
95358294ee6SAndrew Rybchenko 
95458294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
95558294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
95658294ee6SAndrew Rybchenko 	sa->evq_count = 0;
95758294ee6SAndrew Rybchenko }
958