xref: /dpdk/drivers/net/sfc/sfc_ev.c (revision 77f2d0534d1bc3db3fc18e76425d478aa87151c4)
158294ee6SAndrew Rybchenko /*-
258294ee6SAndrew Rybchenko  * Copyright (c) 2016 Solarflare Communications Inc.
358294ee6SAndrew Rybchenko  * All rights reserved.
458294ee6SAndrew Rybchenko  *
558294ee6SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
658294ee6SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
758294ee6SAndrew Rybchenko  *
858294ee6SAndrew Rybchenko  * Redistribution and use in source and binary forms, with or without
958294ee6SAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
1058294ee6SAndrew Rybchenko  *
1158294ee6SAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
1258294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer.
1358294ee6SAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
1458294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
1558294ee6SAndrew Rybchenko  *    and/or other materials provided with the distribution.
1658294ee6SAndrew Rybchenko  *
1758294ee6SAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
1858294ee6SAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
1958294ee6SAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2058294ee6SAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2158294ee6SAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2258294ee6SAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2358294ee6SAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
2458294ee6SAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
2558294ee6SAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
2658294ee6SAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
2758294ee6SAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2858294ee6SAndrew Rybchenko  */
2958294ee6SAndrew Rybchenko 
3058294ee6SAndrew Rybchenko #include <rte_debug.h>
3158294ee6SAndrew Rybchenko #include <rte_cycles.h>
322de39f4eSAndrew Rybchenko #include <rte_alarm.h>
33*77f2d053SAndrew Rybchenko #include <rte_branch_prediction.h>
3458294ee6SAndrew Rybchenko 
3558294ee6SAndrew Rybchenko #include "efx.h"
3658294ee6SAndrew Rybchenko 
3758294ee6SAndrew Rybchenko #include "sfc.h"
3858294ee6SAndrew Rybchenko #include "sfc_debug.h"
3958294ee6SAndrew Rybchenko #include "sfc_log.h"
4058294ee6SAndrew Rybchenko #include "sfc_ev.h"
4128944ac0SAndrew Rybchenko #include "sfc_rx.h"
42fed9aeb4SIvan Malov #include "sfc_tx.h"
4358294ee6SAndrew Rybchenko 
4458294ee6SAndrew Rybchenko 
4558294ee6SAndrew Rybchenko /* Initial delay when waiting for event queue init complete event */
4658294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_START_US	(1)
4758294ee6SAndrew Rybchenko /* Maximum delay between event queue polling attempts */
4858294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_MAX_US	(10 * 1000)
4958294ee6SAndrew Rybchenko /* Event queue init approx timeout */
5058294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_TIMEOUT_US		(2 * US_PER_S)
5158294ee6SAndrew Rybchenko 
522de39f4eSAndrew Rybchenko /* Management event queue polling period in microseconds */
532de39f4eSAndrew Rybchenko #define SFC_MGMT_EV_QPOLL_PERIOD_US	(US_PER_S)
542de39f4eSAndrew Rybchenko 
5558294ee6SAndrew Rybchenko 
5658294ee6SAndrew Rybchenko static boolean_t
5758294ee6SAndrew Rybchenko sfc_ev_initialized(void *arg)
5858294ee6SAndrew Rybchenko {
5958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
6058294ee6SAndrew Rybchenko 
6158294ee6SAndrew Rybchenko 	/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
6258294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
6358294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTED);
6458294ee6SAndrew Rybchenko 
6558294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTED;
6658294ee6SAndrew Rybchenko 
6758294ee6SAndrew Rybchenko 	return B_FALSE;
6858294ee6SAndrew Rybchenko }
6958294ee6SAndrew Rybchenko 
7058294ee6SAndrew Rybchenko static boolean_t
71921f6cf1SAndrew Rybchenko sfc_ev_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
72921f6cf1SAndrew Rybchenko 	  uint32_t size, uint16_t flags)
7358294ee6SAndrew Rybchenko {
7458294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
75921f6cf1SAndrew Rybchenko 	struct sfc_rxq *rxq;
76921f6cf1SAndrew Rybchenko 	unsigned int stop;
77921f6cf1SAndrew Rybchenko 	unsigned int pending_id;
78921f6cf1SAndrew Rybchenko 	unsigned int delta;
79921f6cf1SAndrew Rybchenko 	unsigned int i;
80921f6cf1SAndrew Rybchenko 	struct sfc_rx_sw_desc *rxd;
8158294ee6SAndrew Rybchenko 
82921f6cf1SAndrew Rybchenko 	if (unlikely(evq->exception))
83921f6cf1SAndrew Rybchenko 		goto done;
84921f6cf1SAndrew Rybchenko 
85921f6cf1SAndrew Rybchenko 	rxq = evq->rxq;
86921f6cf1SAndrew Rybchenko 
87921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
88921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
89921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->state & SFC_RXQ_STARTED);
90921f6cf1SAndrew Rybchenko 
91921f6cf1SAndrew Rybchenko 	stop = (id + 1) & rxq->ptr_mask;
92921f6cf1SAndrew Rybchenko 	pending_id = rxq->pending & rxq->ptr_mask;
93921f6cf1SAndrew Rybchenko 	delta = (stop >= pending_id) ? (stop - pending_id) :
94921f6cf1SAndrew Rybchenko 		(rxq->ptr_mask + 1 - pending_id + stop);
95921f6cf1SAndrew Rybchenko 
9609a09b6fSAndrew Rybchenko 	if (delta == 0) {
9709a09b6fSAndrew Rybchenko 		/*
9809a09b6fSAndrew Rybchenko 		 * Rx event with no new descriptors done and zero length
9909a09b6fSAndrew Rybchenko 		 * is used to abort scattered packet when there is no room
10009a09b6fSAndrew Rybchenko 		 * for the tail.
10109a09b6fSAndrew Rybchenko 		 */
10209a09b6fSAndrew Rybchenko 		if (unlikely(size != 0)) {
10309a09b6fSAndrew Rybchenko 			evq->exception = B_TRUE;
10409a09b6fSAndrew Rybchenko 			sfc_err(evq->sa,
10509a09b6fSAndrew Rybchenko 				"EVQ %u RxQ %u invalid RX abort "
10609a09b6fSAndrew Rybchenko 				"(id=%#x size=%u flags=%#x); needs restart\n",
10709a09b6fSAndrew Rybchenko 				evq->evq_index, sfc_rxq_sw_index(rxq),
10809a09b6fSAndrew Rybchenko 				id, size, flags);
10909a09b6fSAndrew Rybchenko 			goto done;
11009a09b6fSAndrew Rybchenko 		}
11109a09b6fSAndrew Rybchenko 
11209a09b6fSAndrew Rybchenko 		/* Add discard flag to the first fragment */
11309a09b6fSAndrew Rybchenko 		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
11409a09b6fSAndrew Rybchenko 		/* Remove continue flag from the last fragment */
11509a09b6fSAndrew Rybchenko 		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
11609a09b6fSAndrew Rybchenko 	} else if (unlikely(delta > rxq->batch_max)) {
117921f6cf1SAndrew Rybchenko 		evq->exception = B_TRUE;
118921f6cf1SAndrew Rybchenko 
119921f6cf1SAndrew Rybchenko 		sfc_err(evq->sa,
120921f6cf1SAndrew Rybchenko 			"EVQ %u RxQ %u completion out of order "
121921f6cf1SAndrew Rybchenko 			"(id=%#x delta=%u flags=%#x); needs restart\n",
122921f6cf1SAndrew Rybchenko 			evq->evq_index, sfc_rxq_sw_index(rxq), id, delta,
123921f6cf1SAndrew Rybchenko 			flags);
124921f6cf1SAndrew Rybchenko 
125921f6cf1SAndrew Rybchenko 		goto done;
126921f6cf1SAndrew Rybchenko 	}
127921f6cf1SAndrew Rybchenko 
128921f6cf1SAndrew Rybchenko 	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
129921f6cf1SAndrew Rybchenko 		rxd = &rxq->sw_desc[i];
130921f6cf1SAndrew Rybchenko 
131921f6cf1SAndrew Rybchenko 		rxd->flags = flags;
132921f6cf1SAndrew Rybchenko 
133921f6cf1SAndrew Rybchenko 		SFC_ASSERT(size < (1 << 16));
134921f6cf1SAndrew Rybchenko 		rxd->size = (uint16_t)size;
135921f6cf1SAndrew Rybchenko 	}
136921f6cf1SAndrew Rybchenko 
137921f6cf1SAndrew Rybchenko 	rxq->pending += delta;
138921f6cf1SAndrew Rybchenko 
139921f6cf1SAndrew Rybchenko done:
140921f6cf1SAndrew Rybchenko 	return B_FALSE;
14158294ee6SAndrew Rybchenko }
14258294ee6SAndrew Rybchenko 
14358294ee6SAndrew Rybchenko static boolean_t
144428c7dddSIvan Malov sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
14558294ee6SAndrew Rybchenko {
14658294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
147428c7dddSIvan Malov 	struct sfc_txq *txq;
148428c7dddSIvan Malov 	unsigned int stop;
149428c7dddSIvan Malov 	unsigned int delta;
15058294ee6SAndrew Rybchenko 
151428c7dddSIvan Malov 	txq = evq->txq;
152428c7dddSIvan Malov 
153428c7dddSIvan Malov 	SFC_ASSERT(txq != NULL);
154428c7dddSIvan Malov 	SFC_ASSERT(txq->evq == evq);
155428c7dddSIvan Malov 
156428c7dddSIvan Malov 	if (unlikely((txq->state & SFC_TXQ_STARTED) == 0))
157428c7dddSIvan Malov 		goto done;
158428c7dddSIvan Malov 
159428c7dddSIvan Malov 	stop = (id + 1) & txq->ptr_mask;
160428c7dddSIvan Malov 	id = txq->pending & txq->ptr_mask;
161428c7dddSIvan Malov 
162428c7dddSIvan Malov 	delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
163428c7dddSIvan Malov 
164428c7dddSIvan Malov 	txq->pending += delta;
165428c7dddSIvan Malov 
166428c7dddSIvan Malov done:
167428c7dddSIvan Malov 	return B_FALSE;
16858294ee6SAndrew Rybchenko }
16958294ee6SAndrew Rybchenko 
17058294ee6SAndrew Rybchenko static boolean_t
17158294ee6SAndrew Rybchenko sfc_ev_exception(void *arg, __rte_unused uint32_t code,
17258294ee6SAndrew Rybchenko 		 __rte_unused uint32_t data)
17358294ee6SAndrew Rybchenko {
17458294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
17558294ee6SAndrew Rybchenko 
17698200dd9SAndrew Rybchenko 	if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
17798200dd9SAndrew Rybchenko 		return B_FALSE;
17898200dd9SAndrew Rybchenko 
17998200dd9SAndrew Rybchenko 	evq->exception = B_TRUE;
18098200dd9SAndrew Rybchenko 	sfc_warn(evq->sa,
18198200dd9SAndrew Rybchenko 		 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
18298200dd9SAndrew Rybchenko 		 " needs recovery",
18398200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
18498200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
18598200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
18698200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
18798200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
18898200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
18998200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
19098200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
19198200dd9SAndrew Rybchenko 		 "UNKNOWN",
19298200dd9SAndrew Rybchenko 		 code, data, evq->evq_index);
19398200dd9SAndrew Rybchenko 
19458294ee6SAndrew Rybchenko 	return B_TRUE;
19558294ee6SAndrew Rybchenko }
19658294ee6SAndrew Rybchenko 
19758294ee6SAndrew Rybchenko static boolean_t
19858294ee6SAndrew Rybchenko sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
19958294ee6SAndrew Rybchenko {
20058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
20128944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
20258294ee6SAndrew Rybchenko 
20328944ac0SAndrew Rybchenko 	rxq = evq->rxq;
20428944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
20528944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
20628944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
20728944ac0SAndrew Rybchenko 	sfc_rx_qflush_done(rxq);
20828944ac0SAndrew Rybchenko 
20928944ac0SAndrew Rybchenko 	return B_FALSE;
21058294ee6SAndrew Rybchenko }
21158294ee6SAndrew Rybchenko 
21258294ee6SAndrew Rybchenko static boolean_t
21358294ee6SAndrew Rybchenko sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
21458294ee6SAndrew Rybchenko {
21558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
21628944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
21758294ee6SAndrew Rybchenko 
21828944ac0SAndrew Rybchenko 	rxq = evq->rxq;
21928944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
22028944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
22128944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
22228944ac0SAndrew Rybchenko 	sfc_rx_qflush_failed(rxq);
22328944ac0SAndrew Rybchenko 
22428944ac0SAndrew Rybchenko 	return B_FALSE;
22558294ee6SAndrew Rybchenko }
22658294ee6SAndrew Rybchenko 
22758294ee6SAndrew Rybchenko static boolean_t
22858294ee6SAndrew Rybchenko sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
22958294ee6SAndrew Rybchenko {
23058294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
231fed9aeb4SIvan Malov 	struct sfc_txq *txq;
23258294ee6SAndrew Rybchenko 
233fed9aeb4SIvan Malov 	txq = evq->txq;
234fed9aeb4SIvan Malov 	SFC_ASSERT(txq != NULL);
235fed9aeb4SIvan Malov 	SFC_ASSERT(txq->hw_index == txq_hw_index);
236fed9aeb4SIvan Malov 	SFC_ASSERT(txq->evq == evq);
237fed9aeb4SIvan Malov 	sfc_tx_qflush_done(txq);
238fed9aeb4SIvan Malov 
239fed9aeb4SIvan Malov 	return B_FALSE;
24058294ee6SAndrew Rybchenko }
24158294ee6SAndrew Rybchenko 
24258294ee6SAndrew Rybchenko static boolean_t
24358294ee6SAndrew Rybchenko sfc_ev_software(void *arg, uint16_t magic)
24458294ee6SAndrew Rybchenko {
24558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
24658294ee6SAndrew Rybchenko 
24758294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
24858294ee6SAndrew Rybchenko 		evq->evq_index, magic);
24958294ee6SAndrew Rybchenko 	return B_TRUE;
25058294ee6SAndrew Rybchenko }
25158294ee6SAndrew Rybchenko 
25258294ee6SAndrew Rybchenko static boolean_t
25358294ee6SAndrew Rybchenko sfc_ev_sram(void *arg, uint32_t code)
25458294ee6SAndrew Rybchenko {
25558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
25658294ee6SAndrew Rybchenko 
25758294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
25858294ee6SAndrew Rybchenko 		evq->evq_index, code);
25958294ee6SAndrew Rybchenko 	return B_TRUE;
26058294ee6SAndrew Rybchenko }
26158294ee6SAndrew Rybchenko 
26258294ee6SAndrew Rybchenko static boolean_t
26358294ee6SAndrew Rybchenko sfc_ev_wake_up(void *arg, uint32_t index)
26458294ee6SAndrew Rybchenko {
26558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
26658294ee6SAndrew Rybchenko 
26758294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
26858294ee6SAndrew Rybchenko 		evq->evq_index, index);
26958294ee6SAndrew Rybchenko 	return B_TRUE;
27058294ee6SAndrew Rybchenko }
27158294ee6SAndrew Rybchenko 
27258294ee6SAndrew Rybchenko static boolean_t
27358294ee6SAndrew Rybchenko sfc_ev_timer(void *arg, uint32_t index)
27458294ee6SAndrew Rybchenko {
27558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
27658294ee6SAndrew Rybchenko 
27758294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
27858294ee6SAndrew Rybchenko 		evq->evq_index, index);
27958294ee6SAndrew Rybchenko 	return B_TRUE;
28058294ee6SAndrew Rybchenko }
28158294ee6SAndrew Rybchenko 
28258294ee6SAndrew Rybchenko static boolean_t
283886f8d8aSArtem Andreev sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
28458294ee6SAndrew Rybchenko {
28558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
286886f8d8aSArtem Andreev 	struct sfc_adapter *sa = evq->sa;
287886f8d8aSArtem Andreev 	struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
288886f8d8aSArtem Andreev 	struct rte_eth_link new_link;
28958294ee6SAndrew Rybchenko 
290886f8d8aSArtem Andreev 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
291886f8d8aSArtem Andreev 
292886f8d8aSArtem Andreev 	sfc_port_link_mode_to_info(link_mode, &new_link);
293886f8d8aSArtem Andreev 	rte_atomic64_set((rte_atomic64_t *)dev_link, *(uint64_t *)&new_link);
294886f8d8aSArtem Andreev 
295886f8d8aSArtem Andreev 	return B_FALSE;
29658294ee6SAndrew Rybchenko }
29758294ee6SAndrew Rybchenko 
29858294ee6SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks = {
29958294ee6SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
30058294ee6SAndrew Rybchenko 	.eec_rx			= sfc_ev_rx,
30158294ee6SAndrew Rybchenko 	.eec_tx			= sfc_ev_tx,
30258294ee6SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
30358294ee6SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
30458294ee6SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
30558294ee6SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
30658294ee6SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
30758294ee6SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
30858294ee6SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
30958294ee6SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
31058294ee6SAndrew Rybchenko 	.eec_link_change	= sfc_ev_link_change,
31158294ee6SAndrew Rybchenko };
31258294ee6SAndrew Rybchenko 
31358294ee6SAndrew Rybchenko 
31458294ee6SAndrew Rybchenko void
31558294ee6SAndrew Rybchenko sfc_ev_qpoll(struct sfc_evq *evq)
31658294ee6SAndrew Rybchenko {
31758294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
31858294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTING);
31958294ee6SAndrew Rybchenko 
32058294ee6SAndrew Rybchenko 	/* Synchronize the DMA memory for reading not required */
32158294ee6SAndrew Rybchenko 
32258294ee6SAndrew Rybchenko 	efx_ev_qpoll(evq->common, &evq->read_ptr, &sfc_ev_callbacks, evq);
32358294ee6SAndrew Rybchenko 
324*77f2d053SAndrew Rybchenko 	if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
325*77f2d053SAndrew Rybchenko 		struct sfc_adapter *sa = evq->sa;
326*77f2d053SAndrew Rybchenko 		int rc;
327*77f2d053SAndrew Rybchenko 
328*77f2d053SAndrew Rybchenko 		if ((evq->rxq != NULL) && (evq->rxq->state & SFC_RXQ_RUNNING)) {
329*77f2d053SAndrew Rybchenko 			unsigned int rxq_sw_index = sfc_rxq_sw_index(evq->rxq);
330*77f2d053SAndrew Rybchenko 
331*77f2d053SAndrew Rybchenko 			sfc_warn(sa,
332*77f2d053SAndrew Rybchenko 				 "restart RxQ %u because of exception on its EvQ %u",
333*77f2d053SAndrew Rybchenko 				 rxq_sw_index, evq->evq_index);
334*77f2d053SAndrew Rybchenko 
335*77f2d053SAndrew Rybchenko 			sfc_rx_qstop(sa, rxq_sw_index);
336*77f2d053SAndrew Rybchenko 			rc = sfc_rx_qstart(sa, rxq_sw_index);
337*77f2d053SAndrew Rybchenko 			if (rc != 0)
338*77f2d053SAndrew Rybchenko 				sfc_err(sa, "cannot restart RxQ %u",
339*77f2d053SAndrew Rybchenko 					rxq_sw_index);
340*77f2d053SAndrew Rybchenko 		}
341*77f2d053SAndrew Rybchenko 
342*77f2d053SAndrew Rybchenko 		if (evq->exception)
343*77f2d053SAndrew Rybchenko 			sfc_panic(sa, "unrecoverable exception on EvQ %u",
344*77f2d053SAndrew Rybchenko 				  evq->evq_index);
345*77f2d053SAndrew Rybchenko 
346*77f2d053SAndrew Rybchenko 		sfc_adapter_unlock(sa);
347*77f2d053SAndrew Rybchenko 	}
348*77f2d053SAndrew Rybchenko 
34958294ee6SAndrew Rybchenko 	/* Poll-mode driver does not re-prime the event queue for interrupts */
35058294ee6SAndrew Rybchenko }
35158294ee6SAndrew Rybchenko 
3529a75f75cSAndrew Rybchenko void
3539a75f75cSAndrew Rybchenko sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
3549a75f75cSAndrew Rybchenko {
3559a75f75cSAndrew Rybchenko 	if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
3569a75f75cSAndrew Rybchenko 		struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
3579a75f75cSAndrew Rybchenko 
3589a75f75cSAndrew Rybchenko 		if (mgmt_evq->init_state == SFC_EVQ_STARTED)
3599a75f75cSAndrew Rybchenko 			sfc_ev_qpoll(mgmt_evq);
3609a75f75cSAndrew Rybchenko 
3619a75f75cSAndrew Rybchenko 		rte_spinlock_unlock(&sa->mgmt_evq_lock);
3629a75f75cSAndrew Rybchenko 	}
3639a75f75cSAndrew Rybchenko }
3649a75f75cSAndrew Rybchenko 
36558294ee6SAndrew Rybchenko int
36658294ee6SAndrew Rybchenko sfc_ev_qprime(struct sfc_evq *evq)
36758294ee6SAndrew Rybchenko {
36858294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
36958294ee6SAndrew Rybchenko 	return efx_ev_qprime(evq->common, evq->read_ptr);
37058294ee6SAndrew Rybchenko }
37158294ee6SAndrew Rybchenko 
37258294ee6SAndrew Rybchenko int
37358294ee6SAndrew Rybchenko sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
37458294ee6SAndrew Rybchenko {
37558294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
37658294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
37758294ee6SAndrew Rybchenko 	efsys_mem_t *esmp;
37858294ee6SAndrew Rybchenko 	unsigned int total_delay_us;
37958294ee6SAndrew Rybchenko 	unsigned int delay_us;
38058294ee6SAndrew Rybchenko 	int rc;
38158294ee6SAndrew Rybchenko 
38258294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
38358294ee6SAndrew Rybchenko 
38458294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
38558294ee6SAndrew Rybchenko 	evq = evq_info->evq;
38658294ee6SAndrew Rybchenko 	esmp = &evq->mem;
38758294ee6SAndrew Rybchenko 
38858294ee6SAndrew Rybchenko 	/* Clear all events */
38958294ee6SAndrew Rybchenko 	(void)memset((void *)esmp->esm_base, 0xff,
39058294ee6SAndrew Rybchenko 		     EFX_EVQ_SIZE(evq_info->entries));
39158294ee6SAndrew Rybchenko 
39258294ee6SAndrew Rybchenko 	/* Create the common code event queue */
39358294ee6SAndrew Rybchenko 	rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
39458294ee6SAndrew Rybchenko 			    0 /* unused on EF10 */, 0,
39558294ee6SAndrew Rybchenko 			    EFX_EVQ_FLAGS_TYPE_THROUGHPUT |
39658294ee6SAndrew Rybchenko 			    EFX_EVQ_FLAGS_NOTIFY_DISABLED,
39758294ee6SAndrew Rybchenko 			    &evq->common);
39858294ee6SAndrew Rybchenko 	if (rc != 0)
39958294ee6SAndrew Rybchenko 		goto fail_ev_qcreate;
40058294ee6SAndrew Rybchenko 
40158294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTING;
40258294ee6SAndrew Rybchenko 
40358294ee6SAndrew Rybchenko 	/* Wait for the initialization event */
40458294ee6SAndrew Rybchenko 	total_delay_us = 0;
40558294ee6SAndrew Rybchenko 	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
40658294ee6SAndrew Rybchenko 	do {
40758294ee6SAndrew Rybchenko 		(void)sfc_ev_qpoll(evq);
40858294ee6SAndrew Rybchenko 
40958294ee6SAndrew Rybchenko 		/* Check to see if the initialization complete indication
41058294ee6SAndrew Rybchenko 		 * posted by the hardware.
41158294ee6SAndrew Rybchenko 		 */
41258294ee6SAndrew Rybchenko 		if (evq->init_state == SFC_EVQ_STARTED)
41358294ee6SAndrew Rybchenko 			goto done;
41458294ee6SAndrew Rybchenko 
41558294ee6SAndrew Rybchenko 		/* Give event queue some time to init */
41658294ee6SAndrew Rybchenko 		rte_delay_us(delay_us);
41758294ee6SAndrew Rybchenko 
41858294ee6SAndrew Rybchenko 		total_delay_us += delay_us;
41958294ee6SAndrew Rybchenko 
42058294ee6SAndrew Rybchenko 		/* Exponential backoff */
42158294ee6SAndrew Rybchenko 		delay_us *= 2;
42258294ee6SAndrew Rybchenko 		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
42358294ee6SAndrew Rybchenko 			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
42458294ee6SAndrew Rybchenko 
42558294ee6SAndrew Rybchenko 	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
42658294ee6SAndrew Rybchenko 
42758294ee6SAndrew Rybchenko 	rc = ETIMEDOUT;
42858294ee6SAndrew Rybchenko 	goto fail_timedout;
42958294ee6SAndrew Rybchenko 
43058294ee6SAndrew Rybchenko done:
43158294ee6SAndrew Rybchenko 	return 0;
43258294ee6SAndrew Rybchenko 
43358294ee6SAndrew Rybchenko fail_timedout:
43458294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
43558294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
43658294ee6SAndrew Rybchenko 
43758294ee6SAndrew Rybchenko fail_ev_qcreate:
43858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
43958294ee6SAndrew Rybchenko 	return rc;
44058294ee6SAndrew Rybchenko }
44158294ee6SAndrew Rybchenko 
44258294ee6SAndrew Rybchenko void
44358294ee6SAndrew Rybchenko sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
44458294ee6SAndrew Rybchenko {
44558294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
44658294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
44758294ee6SAndrew Rybchenko 
44858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
44958294ee6SAndrew Rybchenko 
45058294ee6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sa->evq_count);
45158294ee6SAndrew Rybchenko 
45258294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
45358294ee6SAndrew Rybchenko 	evq = evq_info->evq;
45458294ee6SAndrew Rybchenko 
45558294ee6SAndrew Rybchenko 	if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
45658294ee6SAndrew Rybchenko 		return;
45758294ee6SAndrew Rybchenko 
45858294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
45958294ee6SAndrew Rybchenko 	evq->read_ptr = 0;
46058294ee6SAndrew Rybchenko 	evq->exception = B_FALSE;
46158294ee6SAndrew Rybchenko 
46258294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
46358294ee6SAndrew Rybchenko }
46458294ee6SAndrew Rybchenko 
4652de39f4eSAndrew Rybchenko static void
4662de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(void *arg)
4672de39f4eSAndrew Rybchenko {
4682de39f4eSAndrew Rybchenko 	struct sfc_adapter *sa = arg;
4692de39f4eSAndrew Rybchenko 	int rc;
4702de39f4eSAndrew Rybchenko 
4712de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_qpoll(sa);
4722de39f4eSAndrew Rybchenko 
4732de39f4eSAndrew Rybchenko 	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
4742de39f4eSAndrew Rybchenko 			       sfc_ev_mgmt_periodic_qpoll, sa);
4752de39f4eSAndrew Rybchenko 	if (rc != 0)
4762de39f4eSAndrew Rybchenko 		sfc_panic(sa,
4772de39f4eSAndrew Rybchenko 			  "cannot rearm management EVQ polling alarm (rc=%d)",
4782de39f4eSAndrew Rybchenko 			  rc);
4792de39f4eSAndrew Rybchenko }
4802de39f4eSAndrew Rybchenko 
4812de39f4eSAndrew Rybchenko static void
4822de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
4832de39f4eSAndrew Rybchenko {
4842de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll(sa);
4852de39f4eSAndrew Rybchenko }
4862de39f4eSAndrew Rybchenko 
4872de39f4eSAndrew Rybchenko static void
4882de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
4892de39f4eSAndrew Rybchenko {
4902de39f4eSAndrew Rybchenko 	rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
4912de39f4eSAndrew Rybchenko }
4922de39f4eSAndrew Rybchenko 
49358294ee6SAndrew Rybchenko int
49458294ee6SAndrew Rybchenko sfc_ev_start(struct sfc_adapter *sa)
49558294ee6SAndrew Rybchenko {
49658294ee6SAndrew Rybchenko 	int rc;
49758294ee6SAndrew Rybchenko 
49858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
49958294ee6SAndrew Rybchenko 
50058294ee6SAndrew Rybchenko 	rc = efx_ev_init(sa->nic);
50158294ee6SAndrew Rybchenko 	if (rc != 0)
50258294ee6SAndrew Rybchenko 		goto fail_ev_init;
50358294ee6SAndrew Rybchenko 
5049a75f75cSAndrew Rybchenko 	/* Start management EVQ used for global events */
5059a75f75cSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
5069a75f75cSAndrew Rybchenko 
5079a75f75cSAndrew Rybchenko 	rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
5089a75f75cSAndrew Rybchenko 	if (rc != 0)
5099a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_start;
5109a75f75cSAndrew Rybchenko 
5119a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
5129a75f75cSAndrew Rybchenko 
51358294ee6SAndrew Rybchenko 	/*
5142de39f4eSAndrew Rybchenko 	 * Start management EVQ polling. If interrupts are disabled
5152de39f4eSAndrew Rybchenko 	 * (not used), it is required to process link status change
5162de39f4eSAndrew Rybchenko 	 * and other device level events to avoid unrecoverable
5172de39f4eSAndrew Rybchenko 	 * error because the event queue overflow.
5182de39f4eSAndrew Rybchenko 	 */
5192de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_start(sa);
5202de39f4eSAndrew Rybchenko 
5212de39f4eSAndrew Rybchenko 	/*
5229a75f75cSAndrew Rybchenko 	 * Rx/Tx event queues are started/stopped when corresponding
5239a75f75cSAndrew Rybchenko 	 * Rx/Tx queue is started/stopped.
52458294ee6SAndrew Rybchenko 	 */
52558294ee6SAndrew Rybchenko 
52658294ee6SAndrew Rybchenko 	return 0;
52758294ee6SAndrew Rybchenko 
5289a75f75cSAndrew Rybchenko fail_mgmt_evq_start:
5299a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
5309a75f75cSAndrew Rybchenko 	efx_ev_fini(sa->nic);
5319a75f75cSAndrew Rybchenko 
53258294ee6SAndrew Rybchenko fail_ev_init:
53358294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
53458294ee6SAndrew Rybchenko 	return rc;
53558294ee6SAndrew Rybchenko }
53658294ee6SAndrew Rybchenko 
53758294ee6SAndrew Rybchenko void
53858294ee6SAndrew Rybchenko sfc_ev_stop(struct sfc_adapter *sa)
53958294ee6SAndrew Rybchenko {
54058294ee6SAndrew Rybchenko 	unsigned int sw_index;
54158294ee6SAndrew Rybchenko 
54258294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
54358294ee6SAndrew Rybchenko 
5442de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_stop(sa);
5452de39f4eSAndrew Rybchenko 
54658294ee6SAndrew Rybchenko 	/* Make sure that all event queues are stopped */
54758294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
5489a75f75cSAndrew Rybchenko 	while (sw_index-- > 0) {
5499a75f75cSAndrew Rybchenko 		if (sw_index == sa->mgmt_evq_index) {
5509a75f75cSAndrew Rybchenko 			/* Locks are required for the management EVQ */
5519a75f75cSAndrew Rybchenko 			rte_spinlock_lock(&sa->mgmt_evq_lock);
5529a75f75cSAndrew Rybchenko 			sfc_ev_qstop(sa, sa->mgmt_evq_index);
5539a75f75cSAndrew Rybchenko 			rte_spinlock_unlock(&sa->mgmt_evq_lock);
5549a75f75cSAndrew Rybchenko 		} else {
55558294ee6SAndrew Rybchenko 			sfc_ev_qstop(sa, sw_index);
5569a75f75cSAndrew Rybchenko 		}
5579a75f75cSAndrew Rybchenko 	}
55858294ee6SAndrew Rybchenko 
55958294ee6SAndrew Rybchenko 	efx_ev_fini(sa->nic);
56058294ee6SAndrew Rybchenko }
56158294ee6SAndrew Rybchenko 
56258294ee6SAndrew Rybchenko int
56358294ee6SAndrew Rybchenko sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
56458294ee6SAndrew Rybchenko 	     unsigned int entries, int socket_id)
56558294ee6SAndrew Rybchenko {
56658294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info;
56758294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
56858294ee6SAndrew Rybchenko 	int rc;
56958294ee6SAndrew Rybchenko 
57058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
57158294ee6SAndrew Rybchenko 
57258294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
57358294ee6SAndrew Rybchenko 
57458294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(entries));
57558294ee6SAndrew Rybchenko 	SFC_ASSERT(entries <= evq_info->max_entries);
57658294ee6SAndrew Rybchenko 	evq_info->entries = entries;
57758294ee6SAndrew Rybchenko 
57858294ee6SAndrew Rybchenko 	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
57958294ee6SAndrew Rybchenko 				 socket_id);
58058294ee6SAndrew Rybchenko 	if (evq == NULL)
58158294ee6SAndrew Rybchenko 		return ENOMEM;
58258294ee6SAndrew Rybchenko 
58358294ee6SAndrew Rybchenko 	evq->sa = sa;
58458294ee6SAndrew Rybchenko 	evq->evq_index = sw_index;
58558294ee6SAndrew Rybchenko 
58658294ee6SAndrew Rybchenko 	/* Allocate DMA space */
58758294ee6SAndrew Rybchenko 	rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
58858294ee6SAndrew Rybchenko 			   socket_id, &evq->mem);
58958294ee6SAndrew Rybchenko 	if (rc != 0)
59058294ee6SAndrew Rybchenko 		return rc;
59158294ee6SAndrew Rybchenko 
59258294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
59358294ee6SAndrew Rybchenko 
59458294ee6SAndrew Rybchenko 	evq_info->evq = evq;
59558294ee6SAndrew Rybchenko 
59658294ee6SAndrew Rybchenko 	return 0;
59758294ee6SAndrew Rybchenko }
59858294ee6SAndrew Rybchenko 
59958294ee6SAndrew Rybchenko void
60058294ee6SAndrew Rybchenko sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
60158294ee6SAndrew Rybchenko {
60258294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
60358294ee6SAndrew Rybchenko 
60458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
60558294ee6SAndrew Rybchenko 
60658294ee6SAndrew Rybchenko 	evq = sa->evq_info[sw_index].evq;
60758294ee6SAndrew Rybchenko 
60858294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
60958294ee6SAndrew Rybchenko 
61058294ee6SAndrew Rybchenko 	sa->evq_info[sw_index].evq = NULL;
61158294ee6SAndrew Rybchenko 
61258294ee6SAndrew Rybchenko 	sfc_dma_free(sa, &evq->mem);
61358294ee6SAndrew Rybchenko 
61458294ee6SAndrew Rybchenko 	rte_free(evq);
61558294ee6SAndrew Rybchenko }
61658294ee6SAndrew Rybchenko 
61758294ee6SAndrew Rybchenko static int
61858294ee6SAndrew Rybchenko sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
61958294ee6SAndrew Rybchenko {
62058294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
62158294ee6SAndrew Rybchenko 	unsigned int max_entries;
62258294ee6SAndrew Rybchenko 
62358294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
62458294ee6SAndrew Rybchenko 
62558294ee6SAndrew Rybchenko 	max_entries = sfc_evq_max_entries(sa, sw_index);
62658294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(max_entries));
62758294ee6SAndrew Rybchenko 
62858294ee6SAndrew Rybchenko 	evq_info->max_entries = max_entries;
62958294ee6SAndrew Rybchenko 
63058294ee6SAndrew Rybchenko 	return 0;
63158294ee6SAndrew Rybchenko }
63258294ee6SAndrew Rybchenko 
63358294ee6SAndrew Rybchenko static void
63458294ee6SAndrew Rybchenko sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
63558294ee6SAndrew Rybchenko {
63658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
63758294ee6SAndrew Rybchenko 
63858294ee6SAndrew Rybchenko 	/* Nothing to cleanup */
63958294ee6SAndrew Rybchenko }
64058294ee6SAndrew Rybchenko 
64158294ee6SAndrew Rybchenko int
64258294ee6SAndrew Rybchenko sfc_ev_init(struct sfc_adapter *sa)
64358294ee6SAndrew Rybchenko {
64458294ee6SAndrew Rybchenko 	int rc;
64558294ee6SAndrew Rybchenko 	unsigned int sw_index;
64658294ee6SAndrew Rybchenko 
64758294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
64858294ee6SAndrew Rybchenko 
64958294ee6SAndrew Rybchenko 	sa->evq_count = sfc_ev_qcount(sa);
65058294ee6SAndrew Rybchenko 	sa->mgmt_evq_index = 0;
6519a75f75cSAndrew Rybchenko 	rte_spinlock_init(&sa->mgmt_evq_lock);
65258294ee6SAndrew Rybchenko 
65358294ee6SAndrew Rybchenko 	/* Allocate EVQ info array */
65458294ee6SAndrew Rybchenko 	rc = ENOMEM;
65558294ee6SAndrew Rybchenko 	sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
65658294ee6SAndrew Rybchenko 					 sizeof(struct sfc_evq_info), 0,
65758294ee6SAndrew Rybchenko 					 sa->socket_id);
65858294ee6SAndrew Rybchenko 	if (sa->evq_info == NULL)
65958294ee6SAndrew Rybchenko 		goto fail_evqs_alloc;
66058294ee6SAndrew Rybchenko 
66158294ee6SAndrew Rybchenko 	for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
66258294ee6SAndrew Rybchenko 		rc = sfc_ev_qinit_info(sa, sw_index);
66358294ee6SAndrew Rybchenko 		if (rc != 0)
66458294ee6SAndrew Rybchenko 			goto fail_ev_qinit_info;
66558294ee6SAndrew Rybchenko 	}
66658294ee6SAndrew Rybchenko 
6679a75f75cSAndrew Rybchenko 	rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
6689a75f75cSAndrew Rybchenko 			  sa->socket_id);
6699a75f75cSAndrew Rybchenko 	if (rc != 0)
6709a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_init;
6719a75f75cSAndrew Rybchenko 
67258294ee6SAndrew Rybchenko 	/*
67358294ee6SAndrew Rybchenko 	 * Rx/Tx event queues are created/destroyed when corresponding
67458294ee6SAndrew Rybchenko 	 * Rx/Tx queue is created/destroyed.
67558294ee6SAndrew Rybchenko 	 */
67658294ee6SAndrew Rybchenko 
67758294ee6SAndrew Rybchenko 	return 0;
67858294ee6SAndrew Rybchenko 
6799a75f75cSAndrew Rybchenko fail_mgmt_evq_init:
68058294ee6SAndrew Rybchenko fail_ev_qinit_info:
68158294ee6SAndrew Rybchenko 	while (sw_index-- > 0)
68258294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
68358294ee6SAndrew Rybchenko 
68458294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
68558294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
68658294ee6SAndrew Rybchenko 
68758294ee6SAndrew Rybchenko fail_evqs_alloc:
68858294ee6SAndrew Rybchenko 	sa->evq_count = 0;
68958294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
69058294ee6SAndrew Rybchenko 	return rc;
69158294ee6SAndrew Rybchenko }
69258294ee6SAndrew Rybchenko 
69358294ee6SAndrew Rybchenko void
69458294ee6SAndrew Rybchenko sfc_ev_fini(struct sfc_adapter *sa)
69558294ee6SAndrew Rybchenko {
69658294ee6SAndrew Rybchenko 	int sw_index;
69758294ee6SAndrew Rybchenko 
69858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
69958294ee6SAndrew Rybchenko 
70058294ee6SAndrew Rybchenko 	/* Cleanup all event queues */
70158294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
70258294ee6SAndrew Rybchenko 	while (--sw_index >= 0) {
70358294ee6SAndrew Rybchenko 		if (sa->evq_info[sw_index].evq != NULL)
70458294ee6SAndrew Rybchenko 			sfc_ev_qfini(sa, sw_index);
70558294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
70658294ee6SAndrew Rybchenko 	}
70758294ee6SAndrew Rybchenko 
70858294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
70958294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
71058294ee6SAndrew Rybchenko 	sa->evq_count = 0;
71158294ee6SAndrew Rybchenko }
712