xref: /dpdk/drivers/net/sfc/sfc_ev.c (revision 09a09b6f8be1535f44b091048f95add7efd23d9c)
158294ee6SAndrew Rybchenko /*-
258294ee6SAndrew Rybchenko  * Copyright (c) 2016 Solarflare Communications Inc.
358294ee6SAndrew Rybchenko  * All rights reserved.
458294ee6SAndrew Rybchenko  *
558294ee6SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
658294ee6SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
758294ee6SAndrew Rybchenko  *
858294ee6SAndrew Rybchenko  * Redistribution and use in source and binary forms, with or without
958294ee6SAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
1058294ee6SAndrew Rybchenko  *
1158294ee6SAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
1258294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer.
1358294ee6SAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
1458294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
1558294ee6SAndrew Rybchenko  *    and/or other materials provided with the distribution.
1658294ee6SAndrew Rybchenko  *
1758294ee6SAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1858294ee6SAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1958294ee6SAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2058294ee6SAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2158294ee6SAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2258294ee6SAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2358294ee6SAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
2458294ee6SAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
2558294ee6SAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
2658294ee6SAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
2758294ee6SAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2858294ee6SAndrew Rybchenko  */
2958294ee6SAndrew Rybchenko 
3058294ee6SAndrew Rybchenko #include <rte_debug.h>
3158294ee6SAndrew Rybchenko #include <rte_cycles.h>
322de39f4eSAndrew Rybchenko #include <rte_alarm.h>
3358294ee6SAndrew Rybchenko 
3458294ee6SAndrew Rybchenko #include "efx.h"
3558294ee6SAndrew Rybchenko 
3658294ee6SAndrew Rybchenko #include "sfc.h"
3758294ee6SAndrew Rybchenko #include "sfc_debug.h"
3858294ee6SAndrew Rybchenko #include "sfc_log.h"
3958294ee6SAndrew Rybchenko #include "sfc_ev.h"
4028944ac0SAndrew Rybchenko #include "sfc_rx.h"
4158294ee6SAndrew Rybchenko 
4258294ee6SAndrew Rybchenko 
4358294ee6SAndrew Rybchenko /* Initial delay when waiting for event queue init complete event */
4458294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_START_US	(1)
4558294ee6SAndrew Rybchenko /* Maximum delay between event queue polling attempts */
4658294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_MAX_US	(10 * 1000)
4758294ee6SAndrew Rybchenko /* Event queue init approx timeout */
4858294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_TIMEOUT_US		(2 * US_PER_S)
4958294ee6SAndrew Rybchenko 
502de39f4eSAndrew Rybchenko /* Management event queue polling period in microseconds */
512de39f4eSAndrew Rybchenko #define SFC_MGMT_EV_QPOLL_PERIOD_US	(US_PER_S)
522de39f4eSAndrew Rybchenko 
5358294ee6SAndrew Rybchenko 
5458294ee6SAndrew Rybchenko static boolean_t
5558294ee6SAndrew Rybchenko sfc_ev_initialized(void *arg)
5658294ee6SAndrew Rybchenko {
5758294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
5858294ee6SAndrew Rybchenko 
5958294ee6SAndrew Rybchenko 	/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
6058294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
6158294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTED);
6258294ee6SAndrew Rybchenko 
6358294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTED;
6458294ee6SAndrew Rybchenko 
6558294ee6SAndrew Rybchenko 	return B_FALSE;
6658294ee6SAndrew Rybchenko }
6758294ee6SAndrew Rybchenko 
6858294ee6SAndrew Rybchenko static boolean_t
69921f6cf1SAndrew Rybchenko sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
70921f6cf1SAndrew Rybchenko 	  uint32_t size, uint16_t flags)
7158294ee6SAndrew Rybchenko {
7258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
73921f6cf1SAndrew Rybchenko 	struct sfc_rxq *rxq;
74921f6cf1SAndrew Rybchenko 	unsigned int stop;
75921f6cf1SAndrew Rybchenko 	unsigned int pending_id;
76921f6cf1SAndrew Rybchenko 	unsigned int delta;
77921f6cf1SAndrew Rybchenko 	unsigned int i;
78921f6cf1SAndrew Rybchenko 	struct sfc_rx_sw_desc *rxd;
7958294ee6SAndrew Rybchenko 
80921f6cf1SAndrew Rybchenko 	if (unlikely(evq->exception))
81921f6cf1SAndrew Rybchenko 		goto done;
82921f6cf1SAndrew Rybchenko 
83921f6cf1SAndrew Rybchenko 	rxq = evq->rxq;
84921f6cf1SAndrew Rybchenko 
85921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
86921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
87921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
88921f6cf1SAndrew Rybchenko 
89921f6cf1SAndrew Rybchenko 	stop = (id + 1) & rxq->ptr_mask;
90921f6cf1SAndrew Rybchenko 	pending_id = rxq->pending & rxq->ptr_mask;
91921f6cf1SAndrew Rybchenko 	delta = (stop >= pending_id) ? (stop - pending_id) :
92921f6cf1SAndrew Rybchenko 		(rxq->ptr_mask + 1 - pending_id + stop);
93921f6cf1SAndrew Rybchenko 
94*09a09b6fSAndrew Rybchenko 	if (delta == 0) {
95*09a09b6fSAndrew Rybchenko 		/*
96*09a09b6fSAndrew Rybchenko 		 * Rx event with no new descriptors done and zero length
97*09a09b6fSAndrew Rybchenko 		 * is used to abort scattered packet when there is no room
98*09a09b6fSAndrew Rybchenko 		 * for the tail.
99*09a09b6fSAndrew Rybchenko 		 */
100*09a09b6fSAndrew Rybchenko 		if (unlikely(size != 0)) {
101*09a09b6fSAndrew Rybchenko 			evq->exception = B_TRUE;
102*09a09b6fSAndrew Rybchenko 			sfc_err(evq->sa,
103*09a09b6fSAndrew Rybchenko 				"EVQ %u RxQ %u invalid RX abort "
104*09a09b6fSAndrew Rybchenko 				"(id=%#x size=%u flags=%#x); needs restart\n",
105*09a09b6fSAndrew Rybchenko 				evq->evq_index, sfc_rxq_sw_index(rxq),
106*09a09b6fSAndrew Rybchenko 				id, size, flags);
107*09a09b6fSAndrew Rybchenko 			goto done;
108*09a09b6fSAndrew Rybchenko 		}
109*09a09b6fSAndrew Rybchenko 
110*09a09b6fSAndrew Rybchenko 		/* Add discard flag to the first fragment */
111*09a09b6fSAndrew Rybchenko 		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
112*09a09b6fSAndrew Rybchenko 		/* Remove continue flag from the last fragment */
113*09a09b6fSAndrew Rybchenko 		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
114*09a09b6fSAndrew Rybchenko 	} else if (unlikely(delta > rxq->batch_max)) {
115921f6cf1SAndrew Rybchenko 		evq->exception = B_TRUE;
116921f6cf1SAndrew Rybchenko 
117921f6cf1SAndrew Rybchenko 		sfc_err(evq->sa,
118921f6cf1SAndrew Rybchenko 			"EVQ %u RxQ %u completion out of order "
119921f6cf1SAndrew Rybchenko 			"(id=%#x delta=%u flags=%#x); needs restart\n",
120921f6cf1SAndrew Rybchenko 			evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
121921f6cf1SAndrew Rybchenko 			flags);
122921f6cf1SAndrew Rybchenko 
123921f6cf1SAndrew Rybchenko 		goto done;
124921f6cf1SAndrew Rybchenko 	}
125921f6cf1SAndrew Rybchenko 
126921f6cf1SAndrew Rybchenko 	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
127921f6cf1SAndrew Rybchenko 		rxd = &rxq->sw_desc[i];
128921f6cf1SAndrew Rybchenko 
129921f6cf1SAndrew Rybchenko 		rxd->flags = flags;
130921f6cf1SAndrew Rybchenko 
131921f6cf1SAndrew Rybchenko 		SFC_ASSERT(size < (1 << 16));
132921f6cf1SAndrew Rybchenko 		rxd->size = (uint16_t)size;
133921f6cf1SAndrew Rybchenko 	}
134921f6cf1SAndrew Rybchenko 
135921f6cf1SAndrew Rybchenko 	rxq->pending += delta;
136921f6cf1SAndrew Rybchenko 
137921f6cf1SAndrew Rybchenko done:
138921f6cf1SAndrew Rybchenko 	return B_FALSE;
13958294ee6SAndrew Rybchenko }
14058294ee6SAndrew Rybchenko 
14158294ee6SAndrew Rybchenko static boolean_t
14258294ee6SAndrew Rybchenko sfc_ev_tx(void *arg, __rte_unused uint32_t label, __rte_unused uint32_t id)
14358294ee6SAndrew Rybchenko {
14458294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
14558294ee6SAndrew Rybchenko 
14658294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected Tx event", evq->evq_index);
14758294ee6SAndrew Rybchenko 	return B_TRUE;
14858294ee6SAndrew Rybchenko }
14958294ee6SAndrew Rybchenko 
15058294ee6SAndrew Rybchenko static boolean_t
15158294ee6SAndrew Rybchenko sfc_ev_exception(void *arg, __rte_unused uint32_t code,
15258294ee6SAndrew Rybchenko 		 __rte_unused uint32_t data)
15358294ee6SAndrew Rybchenko {
15458294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
15558294ee6SAndrew Rybchenko 
15698200dd9SAndrew Rybchenko 	if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
15798200dd9SAndrew Rybchenko 		return B_FALSE;
15898200dd9SAndrew Rybchenko 
15998200dd9SAndrew Rybchenko 	evq->exception = B_TRUE;
16098200dd9SAndrew Rybchenko 	sfc_warn(evq->sa,
16198200dd9SAndrew Rybchenko 		 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
16298200dd9SAndrew Rybchenko 		 " needs recovery",
16398200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
16498200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
16598200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
16698200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
16798200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
16898200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
16998200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
17098200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
17198200dd9SAndrew Rybchenko 		 "UNKNOWN",
17298200dd9SAndrew Rybchenko 		 code, data, evq->evq_index);
17398200dd9SAndrew Rybchenko 
17458294ee6SAndrew Rybchenko 	return B_TRUE;
17558294ee6SAndrew Rybchenko }
17658294ee6SAndrew Rybchenko 
17758294ee6SAndrew Rybchenko static boolean_t
17858294ee6SAndrew Rybchenko sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
17958294ee6SAndrew Rybchenko {
18058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
18128944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
18258294ee6SAndrew Rybchenko 
18328944ac0SAndrew Rybchenko 	rxq = evq->rxq;
18428944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
18528944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
18628944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
18728944ac0SAndrew Rybchenko 	sfc_rx_qflush_done(rxq);
18828944ac0SAndrew Rybchenko 
18928944ac0SAndrew Rybchenko 	return B_FALSE;
19058294ee6SAndrew Rybchenko }
19158294ee6SAndrew Rybchenko 
19258294ee6SAndrew Rybchenko static boolean_t
19358294ee6SAndrew Rybchenko sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
19458294ee6SAndrew Rybchenko {
19558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
19628944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
19758294ee6SAndrew Rybchenko 
19828944ac0SAndrew Rybchenko 	rxq = evq->rxq;
19928944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
20028944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
20128944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
20228944ac0SAndrew Rybchenko 	sfc_rx_qflush_failed(rxq);
20328944ac0SAndrew Rybchenko 
20428944ac0SAndrew Rybchenko 	return B_FALSE;
20558294ee6SAndrew Rybchenko }
20658294ee6SAndrew Rybchenko 
20758294ee6SAndrew Rybchenko static boolean_t
20858294ee6SAndrew Rybchenko sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
20958294ee6SAndrew Rybchenko {
21058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
21158294ee6SAndrew Rybchenko 
21258294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected Tx flush done event",
21358294ee6SAndrew Rybchenko 		evq->evq_index);
21458294ee6SAndrew Rybchenko 	return B_TRUE;
21558294ee6SAndrew Rybchenko }
21658294ee6SAndrew Rybchenko 
21758294ee6SAndrew Rybchenko static boolean_t
21858294ee6SAndrew Rybchenko sfc_ev_software(void *arg, uint16_t magic)
21958294ee6SAndrew Rybchenko {
22058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
22158294ee6SAndrew Rybchenko 
22258294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
22358294ee6SAndrew Rybchenko 		evq->evq_index, magic);
22458294ee6SAndrew Rybchenko 	return B_TRUE;
22558294ee6SAndrew Rybchenko }
22658294ee6SAndrew Rybchenko 
22758294ee6SAndrew Rybchenko static boolean_t
22858294ee6SAndrew Rybchenko sfc_ev_sram(void *arg, uint32_t code)
22958294ee6SAndrew Rybchenko {
23058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
23158294ee6SAndrew Rybchenko 
23258294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
23358294ee6SAndrew Rybchenko 		evq->evq_index, code);
23458294ee6SAndrew Rybchenko 	return B_TRUE;
23558294ee6SAndrew Rybchenko }
23658294ee6SAndrew Rybchenko 
23758294ee6SAndrew Rybchenko static boolean_t
23858294ee6SAndrew Rybchenko sfc_ev_wake_up(void *arg, uint32_t index)
23958294ee6SAndrew Rybchenko {
24058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
24158294ee6SAndrew Rybchenko 
24258294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
24358294ee6SAndrew Rybchenko 		evq->evq_index, index);
24458294ee6SAndrew Rybchenko 	return B_TRUE;
24558294ee6SAndrew Rybchenko }
24658294ee6SAndrew Rybchenko 
24758294ee6SAndrew Rybchenko static boolean_t
24858294ee6SAndrew Rybchenko sfc_ev_timer(void *arg, uint32_t index)
24958294ee6SAndrew Rybchenko {
25058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
25158294ee6SAndrew Rybchenko 
25258294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
25358294ee6SAndrew Rybchenko 		evq->evq_index, index);
25458294ee6SAndrew Rybchenko 	return B_TRUE;
25558294ee6SAndrew Rybchenko }
25658294ee6SAndrew Rybchenko 
25758294ee6SAndrew Rybchenko static boolean_t
258886f8d8aSArtem Andreev sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
25958294ee6SAndrew Rybchenko {
26058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
261886f8d8aSArtem Andreev 	struct sfc_adapter *sa = evq->sa;
262886f8d8aSArtem Andreev 	struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
263886f8d8aSArtem Andreev 	struct rte_eth_link new_link;
26458294ee6SAndrew Rybchenko 
265886f8d8aSArtem Andreev 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
266886f8d8aSArtem Andreev 
267886f8d8aSArtem Andreev 	sfc_port_link_mode_to_info(link_mode, &new_link);
268886f8d8aSArtem Andreev 	rte_atomic64_set((rte_atomic64_t *)dev_link, *(uint64_t *)&new_link);
269886f8d8aSArtem Andreev 
270886f8d8aSArtem Andreev 	return B_FALSE;
27158294ee6SAndrew Rybchenko }
27258294ee6SAndrew Rybchenko 
27358294ee6SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks = {
27458294ee6SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
27558294ee6SAndrew Rybchenko 	.eec_rx			= sfc_ev_rx,
27658294ee6SAndrew Rybchenko 	.eec_tx			= sfc_ev_tx,
27758294ee6SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
27858294ee6SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
27958294ee6SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
28058294ee6SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
28158294ee6SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
28258294ee6SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
28358294ee6SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
28458294ee6SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
28558294ee6SAndrew Rybchenko 	.eec_link_change	= sfc_ev_link_change,
28658294ee6SAndrew Rybchenko };
28758294ee6SAndrew Rybchenko 
28858294ee6SAndrew Rybchenko 
28958294ee6SAndrew Rybchenko void
29058294ee6SAndrew Rybchenko sfc_ev_qpoll(struct sfc_evq *evq)
29158294ee6SAndrew Rybchenko {
29258294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
29358294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTING);
29458294ee6SAndrew Rybchenko 
29558294ee6SAndrew Rybchenko 	/* Synchronize the DMA memory for reading not required */
29658294ee6SAndrew Rybchenko 
29758294ee6SAndrew Rybchenko 	efx_ev_qpoll(evq->common, &evq->read_ptr, &sfc_ev_callbacks, evq);
29858294ee6SAndrew Rybchenko 
29958294ee6SAndrew Rybchenko 	/* Poll-mode driver does not re-prime the event queue for interrupts */
30058294ee6SAndrew Rybchenko }
30158294ee6SAndrew Rybchenko 
3029a75f75cSAndrew Rybchenko void
3039a75f75cSAndrew Rybchenko sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
3049a75f75cSAndrew Rybchenko {
3059a75f75cSAndrew Rybchenko 	if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
3069a75f75cSAndrew Rybchenko 		struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
3079a75f75cSAndrew Rybchenko 
3089a75f75cSAndrew Rybchenko 		if (mgmt_evq->init_state == SFC_EVQ_STARTED)
3099a75f75cSAndrew Rybchenko 			sfc_ev_qpoll(mgmt_evq);
3109a75f75cSAndrew Rybchenko 
3119a75f75cSAndrew Rybchenko 		rte_spinlock_unlock(&sa->mgmt_evq_lock);
3129a75f75cSAndrew Rybchenko 	}
3139a75f75cSAndrew Rybchenko }
3149a75f75cSAndrew Rybchenko 
31558294ee6SAndrew Rybchenko int
31658294ee6SAndrew Rybchenko sfc_ev_qprime(struct sfc_evq *evq)
31758294ee6SAndrew Rybchenko {
31858294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
31958294ee6SAndrew Rybchenko 	return efx_ev_qprime(evq->common, evq->read_ptr);
32058294ee6SAndrew Rybchenko }
32158294ee6SAndrew Rybchenko 
32258294ee6SAndrew Rybchenko int
32358294ee6SAndrew Rybchenko sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
32458294ee6SAndrew Rybchenko {
32558294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
32658294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
32758294ee6SAndrew Rybchenko 	efsys_mem_t *esmp;
32858294ee6SAndrew Rybchenko 	unsigned int total_delay_us;
32958294ee6SAndrew Rybchenko 	unsigned int delay_us;
33058294ee6SAndrew Rybchenko 	int rc;
33158294ee6SAndrew Rybchenko 
33258294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
33358294ee6SAndrew Rybchenko 
33458294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
33558294ee6SAndrew Rybchenko 	evq = evq_info->evq;
33658294ee6SAndrew Rybchenko 	esmp = &evq->mem;
33758294ee6SAndrew Rybchenko 
33858294ee6SAndrew Rybchenko 	/* Clear all events */
33958294ee6SAndrew Rybchenko 	(void)memset((void *)esmp->esm_base, 0xff,
34058294ee6SAndrew Rybchenko 		     EFX_EVQ_SIZE(evq_info->entries));
34158294ee6SAndrew Rybchenko 
34258294ee6SAndrew Rybchenko 	/* Create the common code event queue */
34358294ee6SAndrew Rybchenko 	rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
34458294ee6SAndrew Rybchenko 			    0 /* unused on EF10 */, 0,
34558294ee6SAndrew Rybchenko 			    EFX_EVQ_FLAGS_TYPE_THROUGHPUT |
34658294ee6SAndrew Rybchenko 			    EFX_EVQ_FLAGS_NOTIFY_DISABLED,
34758294ee6SAndrew Rybchenko 			    &evq->common);
34858294ee6SAndrew Rybchenko 	if (rc != 0)
34958294ee6SAndrew Rybchenko 		goto fail_ev_qcreate;
35058294ee6SAndrew Rybchenko 
35158294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTING;
35258294ee6SAndrew Rybchenko 
35358294ee6SAndrew Rybchenko 	/* Wait for the initialization event */
35458294ee6SAndrew Rybchenko 	total_delay_us = 0;
35558294ee6SAndrew Rybchenko 	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
35658294ee6SAndrew Rybchenko 	do {
35758294ee6SAndrew Rybchenko 		(void)sfc_ev_qpoll(evq);
35858294ee6SAndrew Rybchenko 
35958294ee6SAndrew Rybchenko 		/* Check to see if the initialization complete indication
36058294ee6SAndrew Rybchenko 		 * posted by the hardware.
36158294ee6SAndrew Rybchenko 		 */
36258294ee6SAndrew Rybchenko 		if (evq->init_state == SFC_EVQ_STARTED)
36358294ee6SAndrew Rybchenko 			goto done;
36458294ee6SAndrew Rybchenko 
36558294ee6SAndrew Rybchenko 		/* Give event queue some time to init */
36658294ee6SAndrew Rybchenko 		rte_delay_us(delay_us);
36758294ee6SAndrew Rybchenko 
36858294ee6SAndrew Rybchenko 		total_delay_us += delay_us;
36958294ee6SAndrew Rybchenko 
37058294ee6SAndrew Rybchenko 		/* Exponential backoff */
37158294ee6SAndrew Rybchenko 		delay_us *= 2;
37258294ee6SAndrew Rybchenko 		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
37358294ee6SAndrew Rybchenko 			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
37458294ee6SAndrew Rybchenko 
37558294ee6SAndrew Rybchenko 	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
37658294ee6SAndrew Rybchenko 
37758294ee6SAndrew Rybchenko 	rc = ETIMEDOUT;
37858294ee6SAndrew Rybchenko 	goto fail_timedout;
37958294ee6SAndrew Rybchenko 
38058294ee6SAndrew Rybchenko done:
38158294ee6SAndrew Rybchenko 	return 0;
38258294ee6SAndrew Rybchenko 
38358294ee6SAndrew Rybchenko fail_timedout:
38458294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
38558294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
38658294ee6SAndrew Rybchenko 
38758294ee6SAndrew Rybchenko fail_ev_qcreate:
38858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
38958294ee6SAndrew Rybchenko 	return rc;
39058294ee6SAndrew Rybchenko }
39158294ee6SAndrew Rybchenko 
39258294ee6SAndrew Rybchenko void
39358294ee6SAndrew Rybchenko sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
39458294ee6SAndrew Rybchenko {
39558294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
39658294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
39758294ee6SAndrew Rybchenko 
39858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
39958294ee6SAndrew Rybchenko 
40058294ee6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sa->evq_count);
40158294ee6SAndrew Rybchenko 
40258294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
40358294ee6SAndrew Rybchenko 	evq = evq_info->evq;
40458294ee6SAndrew Rybchenko 
40558294ee6SAndrew Rybchenko 	if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
40658294ee6SAndrew Rybchenko 		return;
40758294ee6SAndrew Rybchenko 
40858294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
40958294ee6SAndrew Rybchenko 	evq->read_ptr = 0;
41058294ee6SAndrew Rybchenko 	evq->exception = B_FALSE;
41158294ee6SAndrew Rybchenko 
41258294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
41358294ee6SAndrew Rybchenko }
41458294ee6SAndrew Rybchenko 
4152de39f4eSAndrew Rybchenko static void
4162de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(void *arg)
4172de39f4eSAndrew Rybchenko {
4182de39f4eSAndrew Rybchenko 	struct sfc_adapter *sa = arg;
4192de39f4eSAndrew Rybchenko 	int rc;
4202de39f4eSAndrew Rybchenko 
4212de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_qpoll(sa);
4222de39f4eSAndrew Rybchenko 
4232de39f4eSAndrew Rybchenko 	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
4242de39f4eSAndrew Rybchenko 			       sfc_ev_mgmt_periodic_qpoll, sa);
4252de39f4eSAndrew Rybchenko 	if (rc != 0)
4262de39f4eSAndrew Rybchenko 		sfc_panic(sa,
4272de39f4eSAndrew Rybchenko 			  "cannot rearm management EVQ polling alarm (rc=%d)",
4282de39f4eSAndrew Rybchenko 			  rc);
4292de39f4eSAndrew Rybchenko }
4302de39f4eSAndrew Rybchenko 
4312de39f4eSAndrew Rybchenko static void
4322de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
4332de39f4eSAndrew Rybchenko {
4342de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll(sa);
4352de39f4eSAndrew Rybchenko }
4362de39f4eSAndrew Rybchenko 
4372de39f4eSAndrew Rybchenko static void
4382de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
4392de39f4eSAndrew Rybchenko {
4402de39f4eSAndrew Rybchenko 	rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
4412de39f4eSAndrew Rybchenko }
4422de39f4eSAndrew Rybchenko 
44358294ee6SAndrew Rybchenko int
44458294ee6SAndrew Rybchenko sfc_ev_start(struct sfc_adapter *sa)
44558294ee6SAndrew Rybchenko {
44658294ee6SAndrew Rybchenko 	int rc;
44758294ee6SAndrew Rybchenko 
44858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
44958294ee6SAndrew Rybchenko 
45058294ee6SAndrew Rybchenko 	rc = efx_ev_init(sa->nic);
45158294ee6SAndrew Rybchenko 	if (rc != 0)
45258294ee6SAndrew Rybchenko 		goto fail_ev_init;
45358294ee6SAndrew Rybchenko 
4549a75f75cSAndrew Rybchenko 	/* Start management EVQ used for global events */
4559a75f75cSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
4569a75f75cSAndrew Rybchenko 
4579a75f75cSAndrew Rybchenko 	rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
4589a75f75cSAndrew Rybchenko 	if (rc != 0)
4599a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_start;
4609a75f75cSAndrew Rybchenko 
4619a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
4629a75f75cSAndrew Rybchenko 
46358294ee6SAndrew Rybchenko 	/*
4642de39f4eSAndrew Rybchenko 	 * Start management EVQ polling. If interrupts are disabled
4652de39f4eSAndrew Rybchenko 	 * (not used), it is required to process link status change
4662de39f4eSAndrew Rybchenko 	 * and other device level events to avoid unrecoverable
4672de39f4eSAndrew Rybchenko 	 * error because the event queue overflow.
4682de39f4eSAndrew Rybchenko 	 */
4692de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_start(sa);
4702de39f4eSAndrew Rybchenko 
4712de39f4eSAndrew Rybchenko 	/*
4729a75f75cSAndrew Rybchenko 	 * Rx/Tx event queues are started/stopped when corresponding
4739a75f75cSAndrew Rybchenko 	 * Rx/Tx queue is started/stopped.
47458294ee6SAndrew Rybchenko 	 */
47558294ee6SAndrew Rybchenko 
47658294ee6SAndrew Rybchenko 	return 0;
47758294ee6SAndrew Rybchenko 
4789a75f75cSAndrew Rybchenko fail_mgmt_evq_start:
4799a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
4809a75f75cSAndrew Rybchenko 	efx_ev_fini(sa->nic);
4819a75f75cSAndrew Rybchenko 
48258294ee6SAndrew Rybchenko fail_ev_init:
48358294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
48458294ee6SAndrew Rybchenko 	return rc;
48558294ee6SAndrew Rybchenko }
48658294ee6SAndrew Rybchenko 
48758294ee6SAndrew Rybchenko void
48858294ee6SAndrew Rybchenko sfc_ev_stop(struct sfc_adapter *sa)
48958294ee6SAndrew Rybchenko {
49058294ee6SAndrew Rybchenko 	unsigned int sw_index;
49158294ee6SAndrew Rybchenko 
49258294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
49358294ee6SAndrew Rybchenko 
4942de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_stop(sa);
4952de39f4eSAndrew Rybchenko 
49658294ee6SAndrew Rybchenko 	/* Make sure that all event queues are stopped */
49758294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
4989a75f75cSAndrew Rybchenko 	while (sw_index-- > 0) {
4999a75f75cSAndrew Rybchenko 		if (sw_index == sa->mgmt_evq_index) {
5009a75f75cSAndrew Rybchenko 			/* Locks are required for the management EVQ */
5019a75f75cSAndrew Rybchenko 			rte_spinlock_lock(&sa->mgmt_evq_lock);
5029a75f75cSAndrew Rybchenko 			sfc_ev_qstop(sa, sa->mgmt_evq_index);
5039a75f75cSAndrew Rybchenko 			rte_spinlock_unlock(&sa->mgmt_evq_lock);
5049a75f75cSAndrew Rybchenko 		} else {
50558294ee6SAndrew Rybchenko 			sfc_ev_qstop(sa, sw_index);
5069a75f75cSAndrew Rybchenko 		}
5079a75f75cSAndrew Rybchenko 	}
50858294ee6SAndrew Rybchenko 
50958294ee6SAndrew Rybchenko 	efx_ev_fini(sa->nic);
51058294ee6SAndrew Rybchenko }
51158294ee6SAndrew Rybchenko 
51258294ee6SAndrew Rybchenko int
51358294ee6SAndrew Rybchenko sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
51458294ee6SAndrew Rybchenko 	     unsigned int entries, int socket_id)
51558294ee6SAndrew Rybchenko {
51658294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info;
51758294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
51858294ee6SAndrew Rybchenko 	int rc;
51958294ee6SAndrew Rybchenko 
52058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
52158294ee6SAndrew Rybchenko 
52258294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
52358294ee6SAndrew Rybchenko 
52458294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(entries));
52558294ee6SAndrew Rybchenko 	SFC_ASSERT(entries <= evq_info->max_entries);
52658294ee6SAndrew Rybchenko 	evq_info->entries = entries;
52758294ee6SAndrew Rybchenko 
52858294ee6SAndrew Rybchenko 	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
52958294ee6SAndrew Rybchenko 				 socket_id);
53058294ee6SAndrew Rybchenko 	if (evq == NULL)
53158294ee6SAndrew Rybchenko 		return ENOMEM;
53258294ee6SAndrew Rybchenko 
53358294ee6SAndrew Rybchenko 	evq->sa = sa;
53458294ee6SAndrew Rybchenko 	evq->evq_index = sw_index;
53558294ee6SAndrew Rybchenko 
53658294ee6SAndrew Rybchenko 	/* Allocate DMA space */
53758294ee6SAndrew Rybchenko 	rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
53858294ee6SAndrew Rybchenko 			   socket_id, &evq->mem);
53958294ee6SAndrew Rybchenko 	if (rc != 0)
54058294ee6SAndrew Rybchenko 		return rc;
54158294ee6SAndrew Rybchenko 
54258294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
54358294ee6SAndrew Rybchenko 
54458294ee6SAndrew Rybchenko 	evq_info->evq = evq;
54558294ee6SAndrew Rybchenko 
54658294ee6SAndrew Rybchenko 	return 0;
54758294ee6SAndrew Rybchenko }
54858294ee6SAndrew Rybchenko 
54958294ee6SAndrew Rybchenko void
55058294ee6SAndrew Rybchenko sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
55158294ee6SAndrew Rybchenko {
55258294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
55358294ee6SAndrew Rybchenko 
55458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
55558294ee6SAndrew Rybchenko 
55658294ee6SAndrew Rybchenko 	evq = sa->evq_info[sw_index].evq;
55758294ee6SAndrew Rybchenko 
55858294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
55958294ee6SAndrew Rybchenko 
56058294ee6SAndrew Rybchenko 	sa->evq_info[sw_index].evq = NULL;
56158294ee6SAndrew Rybchenko 
56258294ee6SAndrew Rybchenko 	sfc_dma_free(sa, &evq->mem);
56358294ee6SAndrew Rybchenko 
56458294ee6SAndrew Rybchenko 	rte_free(evq);
56558294ee6SAndrew Rybchenko }
56658294ee6SAndrew Rybchenko 
56758294ee6SAndrew Rybchenko static int
56858294ee6SAndrew Rybchenko sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
56958294ee6SAndrew Rybchenko {
57058294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
57158294ee6SAndrew Rybchenko 	unsigned int max_entries;
57258294ee6SAndrew Rybchenko 
57358294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
57458294ee6SAndrew Rybchenko 
57558294ee6SAndrew Rybchenko 	max_entries = sfc_evq_max_entries(sa, sw_index);
57658294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(max_entries));
57758294ee6SAndrew Rybchenko 
57858294ee6SAndrew Rybchenko 	evq_info->max_entries = max_entries;
57958294ee6SAndrew Rybchenko 
58058294ee6SAndrew Rybchenko 	return 0;
58158294ee6SAndrew Rybchenko }
58258294ee6SAndrew Rybchenko 
58358294ee6SAndrew Rybchenko static void
58458294ee6SAndrew Rybchenko sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
58558294ee6SAndrew Rybchenko {
58658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
58758294ee6SAndrew Rybchenko 
58858294ee6SAndrew Rybchenko 	/* Nothing to cleanup */
58958294ee6SAndrew Rybchenko }
59058294ee6SAndrew Rybchenko 
59158294ee6SAndrew Rybchenko int
59258294ee6SAndrew Rybchenko sfc_ev_init(struct sfc_adapter *sa)
59358294ee6SAndrew Rybchenko {
59458294ee6SAndrew Rybchenko 	int rc;
59558294ee6SAndrew Rybchenko 	unsigned int sw_index;
59658294ee6SAndrew Rybchenko 
59758294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
59858294ee6SAndrew Rybchenko 
59958294ee6SAndrew Rybchenko 	sa->evq_count = sfc_ev_qcount(sa);
60058294ee6SAndrew Rybchenko 	sa->mgmt_evq_index = 0;
6019a75f75cSAndrew Rybchenko 	rte_spinlock_init(&sa->mgmt_evq_lock);
60258294ee6SAndrew Rybchenko 
60358294ee6SAndrew Rybchenko 	/* Allocate EVQ info array */
60458294ee6SAndrew Rybchenko 	rc = ENOMEM;
60558294ee6SAndrew Rybchenko 	sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
60658294ee6SAndrew Rybchenko 					 sizeof(struct sfc_evq_info), 0,
60758294ee6SAndrew Rybchenko 					 sa->socket_id);
60858294ee6SAndrew Rybchenko 	if (sa->evq_info == NULL)
60958294ee6SAndrew Rybchenko 		goto fail_evqs_alloc;
61058294ee6SAndrew Rybchenko 
61158294ee6SAndrew Rybchenko 	for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
61258294ee6SAndrew Rybchenko 		rc = sfc_ev_qinit_info(sa, sw_index);
61358294ee6SAndrew Rybchenko 		if (rc != 0)
61458294ee6SAndrew Rybchenko 			goto fail_ev_qinit_info;
61558294ee6SAndrew Rybchenko 	}
61658294ee6SAndrew Rybchenko 
6179a75f75cSAndrew Rybchenko 	rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
6189a75f75cSAndrew Rybchenko 			  sa->socket_id);
6199a75f75cSAndrew Rybchenko 	if (rc != 0)
6209a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_init;
6219a75f75cSAndrew Rybchenko 
62258294ee6SAndrew Rybchenko 	/*
62358294ee6SAndrew Rybchenko 	 * Rx/Tx event queues are created/destroyed when corresponding
62458294ee6SAndrew Rybchenko 	 * Rx/Tx queue is created/destroyed.
62558294ee6SAndrew Rybchenko 	 */
62658294ee6SAndrew Rybchenko 
62758294ee6SAndrew Rybchenko 	return 0;
62858294ee6SAndrew Rybchenko 
6299a75f75cSAndrew Rybchenko fail_mgmt_evq_init:
63058294ee6SAndrew Rybchenko fail_ev_qinit_info:
63158294ee6SAndrew Rybchenko 	while (sw_index-- > 0)
63258294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
63358294ee6SAndrew Rybchenko 
63458294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
63558294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
63658294ee6SAndrew Rybchenko 
63758294ee6SAndrew Rybchenko fail_evqs_alloc:
63858294ee6SAndrew Rybchenko 	sa->evq_count = 0;
63958294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
64058294ee6SAndrew Rybchenko 	return rc;
64158294ee6SAndrew Rybchenko }
64258294ee6SAndrew Rybchenko 
64358294ee6SAndrew Rybchenko void
64458294ee6SAndrew Rybchenko sfc_ev_fini(struct sfc_adapter *sa)
64558294ee6SAndrew Rybchenko {
64658294ee6SAndrew Rybchenko 	int sw_index;
64758294ee6SAndrew Rybchenko 
64858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
64958294ee6SAndrew Rybchenko 
65058294ee6SAndrew Rybchenko 	/* Cleanup all event queues */
65158294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
65258294ee6SAndrew Rybchenko 	while (--sw_index >= 0) {
65358294ee6SAndrew Rybchenko 		if (sa->evq_info[sw_index].evq != NULL)
65458294ee6SAndrew Rybchenko 			sfc_ev_qfini(sa, sw_index);
65558294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
65658294ee6SAndrew Rybchenko 	}
65758294ee6SAndrew Rybchenko 
65858294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
65958294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
66058294ee6SAndrew Rybchenko 	sa->evq_count = 0;
66158294ee6SAndrew Rybchenko }
662