xref: /dpdk/drivers/net/sfc/sfc_ev.c (revision 638bddc99faafd3d91fbbd5703467233e28da29b)
158294ee6SAndrew Rybchenko /*-
2244cfa79SAndrew Rybchenko  *   BSD LICENSE
3244cfa79SAndrew Rybchenko  *
4244cfa79SAndrew Rybchenko  * Copyright (c) 2016-2017 Solarflare Communications Inc.
558294ee6SAndrew Rybchenko  * All rights reserved.
658294ee6SAndrew Rybchenko  *
758294ee6SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
858294ee6SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
958294ee6SAndrew Rybchenko  *
1058294ee6SAndrew Rybchenko  * Redistribution and use in source and binary forms, with or without
1158294ee6SAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
1258294ee6SAndrew Rybchenko  *
1358294ee6SAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
1458294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer.
1558294ee6SAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
1658294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
1758294ee6SAndrew Rybchenko  *    and/or other materials provided with the distribution.
1858294ee6SAndrew Rybchenko  *
1958294ee6SAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2058294ee6SAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2158294ee6SAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2258294ee6SAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2358294ee6SAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2458294ee6SAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2558294ee6SAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
2658294ee6SAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
2758294ee6SAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
2858294ee6SAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
2958294ee6SAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3058294ee6SAndrew Rybchenko  */
3158294ee6SAndrew Rybchenko 
3258294ee6SAndrew Rybchenko #include <rte_debug.h>
3358294ee6SAndrew Rybchenko #include <rte_cycles.h>
342de39f4eSAndrew Rybchenko #include <rte_alarm.h>
3577f2d053SAndrew Rybchenko #include <rte_branch_prediction.h>
3658294ee6SAndrew Rybchenko 
3758294ee6SAndrew Rybchenko #include "efx.h"
3858294ee6SAndrew Rybchenko 
3958294ee6SAndrew Rybchenko #include "sfc.h"
4058294ee6SAndrew Rybchenko #include "sfc_debug.h"
4158294ee6SAndrew Rybchenko #include "sfc_log.h"
4258294ee6SAndrew Rybchenko #include "sfc_ev.h"
4328944ac0SAndrew Rybchenko #include "sfc_rx.h"
44fed9aeb4SIvan Malov #include "sfc_tx.h"
45c22d3c50SAndrew Rybchenko #include "sfc_kvargs.h"
4658294ee6SAndrew Rybchenko 
4758294ee6SAndrew Rybchenko 
4858294ee6SAndrew Rybchenko /* Initial delay when waiting for event queue init complete event */
4958294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_START_US	(1)
5058294ee6SAndrew Rybchenko /* Maximum delay between event queue polling attempts */
5158294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_MAX_US	(10 * 1000)
5258294ee6SAndrew Rybchenko /* Event queue init approx timeout */
5358294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_TIMEOUT_US		(2 * US_PER_S)
5458294ee6SAndrew Rybchenko 
552de39f4eSAndrew Rybchenko /* Management event queue polling period in microseconds */
562de39f4eSAndrew Rybchenko #define SFC_MGMT_EV_QPOLL_PERIOD_US	(US_PER_S)
572de39f4eSAndrew Rybchenko 
5858294ee6SAndrew Rybchenko 
5958294ee6SAndrew Rybchenko static boolean_t
6058294ee6SAndrew Rybchenko sfc_ev_initialized(void *arg)
6158294ee6SAndrew Rybchenko {
6258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
6358294ee6SAndrew Rybchenko 
6458294ee6SAndrew Rybchenko 	/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
6558294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
6658294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTED);
6758294ee6SAndrew Rybchenko 
6858294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTED;
6958294ee6SAndrew Rybchenko 
7058294ee6SAndrew Rybchenko 	return B_FALSE;
7158294ee6SAndrew Rybchenko }
7258294ee6SAndrew Rybchenko 
7358294ee6SAndrew Rybchenko static boolean_t
747965557eSAndrew Rybchenko sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
757965557eSAndrew Rybchenko 	      uint32_t size, uint16_t flags)
767965557eSAndrew Rybchenko {
777965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
787965557eSAndrew Rybchenko 
797965557eSAndrew Rybchenko 	sfc_err(evq->sa,
807965557eSAndrew Rybchenko 		"EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
817965557eSAndrew Rybchenko 		evq->evq_index, label, id, size, flags);
827965557eSAndrew Rybchenko 	return B_TRUE;
837965557eSAndrew Rybchenko }
847965557eSAndrew Rybchenko 
857965557eSAndrew Rybchenko static boolean_t
86df1bfde4SAndrew Rybchenko sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
87921f6cf1SAndrew Rybchenko 	      uint32_t size, uint16_t flags)
8858294ee6SAndrew Rybchenko {
8958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
90df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq;
91921f6cf1SAndrew Rybchenko 	unsigned int stop;
92921f6cf1SAndrew Rybchenko 	unsigned int pending_id;
93921f6cf1SAndrew Rybchenko 	unsigned int delta;
94921f6cf1SAndrew Rybchenko 	unsigned int i;
95df1bfde4SAndrew Rybchenko 	struct sfc_efx_rx_sw_desc *rxd;
9658294ee6SAndrew Rybchenko 
97921f6cf1SAndrew Rybchenko 	if (unlikely(evq->exception))
98921f6cf1SAndrew Rybchenko 		goto done;
99921f6cf1SAndrew Rybchenko 
100df1bfde4SAndrew Rybchenko 	rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
101921f6cf1SAndrew Rybchenko 
102921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
103921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
104df1bfde4SAndrew Rybchenko 	SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
105921f6cf1SAndrew Rybchenko 
106921f6cf1SAndrew Rybchenko 	stop = (id + 1) & rxq->ptr_mask;
107921f6cf1SAndrew Rybchenko 	pending_id = rxq->pending & rxq->ptr_mask;
108921f6cf1SAndrew Rybchenko 	delta = (stop >= pending_id) ? (stop - pending_id) :
109921f6cf1SAndrew Rybchenko 		(rxq->ptr_mask + 1 - pending_id + stop);
110921f6cf1SAndrew Rybchenko 
11109a09b6fSAndrew Rybchenko 	if (delta == 0) {
11209a09b6fSAndrew Rybchenko 		/*
11309a09b6fSAndrew Rybchenko 		 * Rx event with no new descriptors done and zero length
11409a09b6fSAndrew Rybchenko 		 * is used to abort scattered packet when there is no room
11509a09b6fSAndrew Rybchenko 		 * for the tail.
11609a09b6fSAndrew Rybchenko 		 */
11709a09b6fSAndrew Rybchenko 		if (unlikely(size != 0)) {
11809a09b6fSAndrew Rybchenko 			evq->exception = B_TRUE;
11909a09b6fSAndrew Rybchenko 			sfc_err(evq->sa,
12009a09b6fSAndrew Rybchenko 				"EVQ %u RxQ %u invalid RX abort "
121f2462150SFerruh Yigit 				"(id=%#x size=%u flags=%#x); needs restart",
122df1bfde4SAndrew Rybchenko 				evq->evq_index, rxq->dp.dpq.queue_id,
12309a09b6fSAndrew Rybchenko 				id, size, flags);
12409a09b6fSAndrew Rybchenko 			goto done;
12509a09b6fSAndrew Rybchenko 		}
12609a09b6fSAndrew Rybchenko 
12709a09b6fSAndrew Rybchenko 		/* Add discard flag to the first fragment */
12809a09b6fSAndrew Rybchenko 		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
12909a09b6fSAndrew Rybchenko 		/* Remove continue flag from the last fragment */
13009a09b6fSAndrew Rybchenko 		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
13109a09b6fSAndrew Rybchenko 	} else if (unlikely(delta > rxq->batch_max)) {
132921f6cf1SAndrew Rybchenko 		evq->exception = B_TRUE;
133921f6cf1SAndrew Rybchenko 
134921f6cf1SAndrew Rybchenko 		sfc_err(evq->sa,
135921f6cf1SAndrew Rybchenko 			"EVQ %u RxQ %u completion out of order "
136f2462150SFerruh Yigit 			"(id=%#x delta=%u flags=%#x); needs restart",
137df1bfde4SAndrew Rybchenko 			evq->evq_index, rxq->dp.dpq.queue_id,
138df1bfde4SAndrew Rybchenko 			id, delta, flags);
139921f6cf1SAndrew Rybchenko 
140921f6cf1SAndrew Rybchenko 		goto done;
141921f6cf1SAndrew Rybchenko 	}
142921f6cf1SAndrew Rybchenko 
143921f6cf1SAndrew Rybchenko 	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
144921f6cf1SAndrew Rybchenko 		rxd = &rxq->sw_desc[i];
145921f6cf1SAndrew Rybchenko 
146921f6cf1SAndrew Rybchenko 		rxd->flags = flags;
147921f6cf1SAndrew Rybchenko 
148921f6cf1SAndrew Rybchenko 		SFC_ASSERT(size < (1 << 16));
149921f6cf1SAndrew Rybchenko 		rxd->size = (uint16_t)size;
150921f6cf1SAndrew Rybchenko 	}
151921f6cf1SAndrew Rybchenko 
152921f6cf1SAndrew Rybchenko 	rxq->pending += delta;
153921f6cf1SAndrew Rybchenko 
154921f6cf1SAndrew Rybchenko done:
155921f6cf1SAndrew Rybchenko 	return B_FALSE;
15658294ee6SAndrew Rybchenko }
15758294ee6SAndrew Rybchenko 
15858294ee6SAndrew Rybchenko static boolean_t
159*638bddc9SAndrew Rybchenko sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
160*638bddc9SAndrew Rybchenko 	     __rte_unused uint32_t size, __rte_unused uint16_t flags)
161*638bddc9SAndrew Rybchenko {
162*638bddc9SAndrew Rybchenko 	struct sfc_evq *evq = arg;
163*638bddc9SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
164*638bddc9SAndrew Rybchenko 
165*638bddc9SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
166*638bddc9SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
167*638bddc9SAndrew Rybchenko 
168*638bddc9SAndrew Rybchenko 	SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
169*638bddc9SAndrew Rybchenko 	return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
170*638bddc9SAndrew Rybchenko }
171*638bddc9SAndrew Rybchenko 
172*638bddc9SAndrew Rybchenko static boolean_t
1737965557eSAndrew Rybchenko sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
1747965557eSAndrew Rybchenko {
1757965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
1767965557eSAndrew Rybchenko 
1777965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
1787965557eSAndrew Rybchenko 		evq->evq_index, label, id);
1797965557eSAndrew Rybchenko 	return B_TRUE;
1807965557eSAndrew Rybchenko }
1817965557eSAndrew Rybchenko 
1827965557eSAndrew Rybchenko static boolean_t
183428c7dddSIvan Malov sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
18458294ee6SAndrew Rybchenko {
18558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
186428c7dddSIvan Malov 	struct sfc_txq *txq;
187428c7dddSIvan Malov 	unsigned int stop;
188428c7dddSIvan Malov 	unsigned int delta;
18958294ee6SAndrew Rybchenko 
190428c7dddSIvan Malov 	txq = evq->txq;
191428c7dddSIvan Malov 
192428c7dddSIvan Malov 	SFC_ASSERT(txq != NULL);
193428c7dddSIvan Malov 	SFC_ASSERT(txq->evq == evq);
194428c7dddSIvan Malov 
195428c7dddSIvan Malov 	if (unlikely((txq->state & SFC_TXQ_STARTED) == 0))
196428c7dddSIvan Malov 		goto done;
197428c7dddSIvan Malov 
198428c7dddSIvan Malov 	stop = (id + 1) & txq->ptr_mask;
199428c7dddSIvan Malov 	id = txq->pending & txq->ptr_mask;
200428c7dddSIvan Malov 
201428c7dddSIvan Malov 	delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
202428c7dddSIvan Malov 
203428c7dddSIvan Malov 	txq->pending += delta;
204428c7dddSIvan Malov 
205428c7dddSIvan Malov done:
206428c7dddSIvan Malov 	return B_FALSE;
20758294ee6SAndrew Rybchenko }
20858294ee6SAndrew Rybchenko 
20958294ee6SAndrew Rybchenko static boolean_t
21058294ee6SAndrew Rybchenko sfc_ev_exception(void *arg, __rte_unused uint32_t code,
21158294ee6SAndrew Rybchenko 		 __rte_unused uint32_t data)
21258294ee6SAndrew Rybchenko {
21358294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
21458294ee6SAndrew Rybchenko 
21598200dd9SAndrew Rybchenko 	if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
21698200dd9SAndrew Rybchenko 		return B_FALSE;
21798200dd9SAndrew Rybchenko 
21898200dd9SAndrew Rybchenko 	evq->exception = B_TRUE;
21998200dd9SAndrew Rybchenko 	sfc_warn(evq->sa,
22098200dd9SAndrew Rybchenko 		 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
22198200dd9SAndrew Rybchenko 		 " needs recovery",
22298200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
22398200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
22498200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
22598200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
22698200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
22798200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
22898200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
22998200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
23098200dd9SAndrew Rybchenko 		 "UNKNOWN",
23198200dd9SAndrew Rybchenko 		 code, data, evq->evq_index);
23298200dd9SAndrew Rybchenko 
23358294ee6SAndrew Rybchenko 	return B_TRUE;
23458294ee6SAndrew Rybchenko }
23558294ee6SAndrew Rybchenko 
23658294ee6SAndrew Rybchenko static boolean_t
2377965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
2387965557eSAndrew Rybchenko {
2397965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2407965557eSAndrew Rybchenko 
2417965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
2427965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
2437965557eSAndrew Rybchenko 	return B_TRUE;
2447965557eSAndrew Rybchenko }
2457965557eSAndrew Rybchenko 
2467965557eSAndrew Rybchenko static boolean_t
24758294ee6SAndrew Rybchenko sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
24858294ee6SAndrew Rybchenko {
24958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
250df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
25128944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
25258294ee6SAndrew Rybchenko 
253df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
254df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
255df1bfde4SAndrew Rybchenko 
256df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
25728944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
25828944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
25928944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
26028944ac0SAndrew Rybchenko 	sfc_rx_qflush_done(rxq);
26128944ac0SAndrew Rybchenko 
26228944ac0SAndrew Rybchenko 	return B_FALSE;
26358294ee6SAndrew Rybchenko }
26458294ee6SAndrew Rybchenko 
26558294ee6SAndrew Rybchenko static boolean_t
2667965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
2677965557eSAndrew Rybchenko {
2687965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2697965557eSAndrew Rybchenko 
2707965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
2717965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
2727965557eSAndrew Rybchenko 	return B_TRUE;
2737965557eSAndrew Rybchenko }
2747965557eSAndrew Rybchenko 
2757965557eSAndrew Rybchenko static boolean_t
27658294ee6SAndrew Rybchenko sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
27758294ee6SAndrew Rybchenko {
27858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
279df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
28028944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
28158294ee6SAndrew Rybchenko 
282df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
283df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
284df1bfde4SAndrew Rybchenko 
285df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
28628944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
28728944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
28828944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
28928944ac0SAndrew Rybchenko 	sfc_rx_qflush_failed(rxq);
29028944ac0SAndrew Rybchenko 
29128944ac0SAndrew Rybchenko 	return B_FALSE;
29258294ee6SAndrew Rybchenko }
29358294ee6SAndrew Rybchenko 
29458294ee6SAndrew Rybchenko static boolean_t
2957965557eSAndrew Rybchenko sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
2967965557eSAndrew Rybchenko {
2977965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2987965557eSAndrew Rybchenko 
2997965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
3007965557eSAndrew Rybchenko 		evq->evq_index, txq_hw_index);
3017965557eSAndrew Rybchenko 	return B_TRUE;
3027965557eSAndrew Rybchenko }
3037965557eSAndrew Rybchenko 
3047965557eSAndrew Rybchenko static boolean_t
30558294ee6SAndrew Rybchenko sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
30658294ee6SAndrew Rybchenko {
30758294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
308fed9aeb4SIvan Malov 	struct sfc_txq *txq;
30958294ee6SAndrew Rybchenko 
310fed9aeb4SIvan Malov 	txq = evq->txq;
311fed9aeb4SIvan Malov 	SFC_ASSERT(txq != NULL);
312fed9aeb4SIvan Malov 	SFC_ASSERT(txq->hw_index == txq_hw_index);
313fed9aeb4SIvan Malov 	SFC_ASSERT(txq->evq == evq);
314fed9aeb4SIvan Malov 	sfc_tx_qflush_done(txq);
315fed9aeb4SIvan Malov 
316fed9aeb4SIvan Malov 	return B_FALSE;
31758294ee6SAndrew Rybchenko }
31858294ee6SAndrew Rybchenko 
31958294ee6SAndrew Rybchenko static boolean_t
32058294ee6SAndrew Rybchenko sfc_ev_software(void *arg, uint16_t magic)
32158294ee6SAndrew Rybchenko {
32258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
32358294ee6SAndrew Rybchenko 
32458294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
32558294ee6SAndrew Rybchenko 		evq->evq_index, magic);
32658294ee6SAndrew Rybchenko 	return B_TRUE;
32758294ee6SAndrew Rybchenko }
32858294ee6SAndrew Rybchenko 
32958294ee6SAndrew Rybchenko static boolean_t
33058294ee6SAndrew Rybchenko sfc_ev_sram(void *arg, uint32_t code)
33158294ee6SAndrew Rybchenko {
33258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
33358294ee6SAndrew Rybchenko 
33458294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
33558294ee6SAndrew Rybchenko 		evq->evq_index, code);
33658294ee6SAndrew Rybchenko 	return B_TRUE;
33758294ee6SAndrew Rybchenko }
33858294ee6SAndrew Rybchenko 
33958294ee6SAndrew Rybchenko static boolean_t
34058294ee6SAndrew Rybchenko sfc_ev_wake_up(void *arg, uint32_t index)
34158294ee6SAndrew Rybchenko {
34258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
34358294ee6SAndrew Rybchenko 
34458294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
34558294ee6SAndrew Rybchenko 		evq->evq_index, index);
34658294ee6SAndrew Rybchenko 	return B_TRUE;
34758294ee6SAndrew Rybchenko }
34858294ee6SAndrew Rybchenko 
34958294ee6SAndrew Rybchenko static boolean_t
35058294ee6SAndrew Rybchenko sfc_ev_timer(void *arg, uint32_t index)
35158294ee6SAndrew Rybchenko {
35258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
35358294ee6SAndrew Rybchenko 
35458294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
35558294ee6SAndrew Rybchenko 		evq->evq_index, index);
35658294ee6SAndrew Rybchenko 	return B_TRUE;
35758294ee6SAndrew Rybchenko }
35858294ee6SAndrew Rybchenko 
35958294ee6SAndrew Rybchenko static boolean_t
3607965557eSAndrew Rybchenko sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
3617965557eSAndrew Rybchenko {
3627965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3637965557eSAndrew Rybchenko 
3647965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected link change event",
3657965557eSAndrew Rybchenko 		evq->evq_index);
3667965557eSAndrew Rybchenko 	return B_TRUE;
3677965557eSAndrew Rybchenko }
3687965557eSAndrew Rybchenko 
3697965557eSAndrew Rybchenko static boolean_t
370886f8d8aSArtem Andreev sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
37158294ee6SAndrew Rybchenko {
37258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
373886f8d8aSArtem Andreev 	struct sfc_adapter *sa = evq->sa;
374886f8d8aSArtem Andreev 	struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
375886f8d8aSArtem Andreev 	struct rte_eth_link new_link;
3763b809c27SAndrew Rybchenko 	uint64_t new_link_u64;
3773b809c27SAndrew Rybchenko 	uint64_t old_link_u64;
37858294ee6SAndrew Rybchenko 
379886f8d8aSArtem Andreev 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
380886f8d8aSArtem Andreev 
381886f8d8aSArtem Andreev 	sfc_port_link_mode_to_info(link_mode, &new_link);
3823b809c27SAndrew Rybchenko 
3833b809c27SAndrew Rybchenko 	new_link_u64 = *(uint64_t *)&new_link;
3843b809c27SAndrew Rybchenko 	do {
3853b809c27SAndrew Rybchenko 		old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
3863b809c27SAndrew Rybchenko 		if (old_link_u64 == new_link_u64)
3873b809c27SAndrew Rybchenko 			break;
3883b809c27SAndrew Rybchenko 
3893b809c27SAndrew Rybchenko 		if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
3903b809c27SAndrew Rybchenko 					old_link_u64, new_link_u64)) {
3913b809c27SAndrew Rybchenko 			evq->sa->port.lsc_seq++;
3923b809c27SAndrew Rybchenko 			break;
3933b809c27SAndrew Rybchenko 		}
3943b809c27SAndrew Rybchenko 	} while (B_TRUE);
395886f8d8aSArtem Andreev 
396886f8d8aSArtem Andreev 	return B_FALSE;
39758294ee6SAndrew Rybchenko }
39858294ee6SAndrew Rybchenko 
39958294ee6SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks = {
40058294ee6SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
4017965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
4027965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
40358294ee6SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4047965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
4057965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
4067965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
40758294ee6SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
40858294ee6SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
40958294ee6SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
41058294ee6SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
41158294ee6SAndrew Rybchenko 	.eec_link_change	= sfc_ev_link_change,
41258294ee6SAndrew Rybchenko };
41358294ee6SAndrew Rybchenko 
414df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
4157965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
416df1bfde4SAndrew Rybchenko 	.eec_rx			= sfc_ev_efx_rx,
417df1bfde4SAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
418df1bfde4SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
419df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
420df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
421df1bfde4SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
422df1bfde4SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
423df1bfde4SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
424df1bfde4SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
425df1bfde4SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
426df1bfde4SAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
427df1bfde4SAndrew Rybchenko };
428df1bfde4SAndrew Rybchenko 
429df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
430df1bfde4SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
431*638bddc9SAndrew Rybchenko 	.eec_rx			= sfc_ev_dp_rx,
4327965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
4337965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4347965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
4357965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
4367965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
4377965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
4387965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
4397965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
4407965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
4417965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
4427965557eSAndrew Rybchenko };
4437965557eSAndrew Rybchenko 
4447965557eSAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_tx = {
4457965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
4467965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
4477965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_tx,
4487965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4497965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
4507965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
4517965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
4527965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
4537965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
4547965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
4557965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
4567965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
4577965557eSAndrew Rybchenko };
4587965557eSAndrew Rybchenko 
45958294ee6SAndrew Rybchenko 
46058294ee6SAndrew Rybchenko void
46158294ee6SAndrew Rybchenko sfc_ev_qpoll(struct sfc_evq *evq)
46258294ee6SAndrew Rybchenko {
46358294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
46458294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTING);
46558294ee6SAndrew Rybchenko 
46658294ee6SAndrew Rybchenko 	/* Synchronize the DMA memory for reading not required */
46758294ee6SAndrew Rybchenko 
4687965557eSAndrew Rybchenko 	efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
46958294ee6SAndrew Rybchenko 
47077f2d053SAndrew Rybchenko 	if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
47177f2d053SAndrew Rybchenko 		struct sfc_adapter *sa = evq->sa;
47277f2d053SAndrew Rybchenko 		int rc;
47377f2d053SAndrew Rybchenko 
474df1bfde4SAndrew Rybchenko 		if (evq->dp_rxq != NULL) {
475df1bfde4SAndrew Rybchenko 			unsigned int rxq_sw_index;
476df1bfde4SAndrew Rybchenko 
477df1bfde4SAndrew Rybchenko 			rxq_sw_index = evq->dp_rxq->dpq.queue_id;
47877f2d053SAndrew Rybchenko 
47977f2d053SAndrew Rybchenko 			sfc_warn(sa,
48077f2d053SAndrew Rybchenko 				 "restart RxQ %u because of exception on its EvQ %u",
48177f2d053SAndrew Rybchenko 				 rxq_sw_index, evq->evq_index);
48277f2d053SAndrew Rybchenko 
48377f2d053SAndrew Rybchenko 			sfc_rx_qstop(sa, rxq_sw_index);
48477f2d053SAndrew Rybchenko 			rc = sfc_rx_qstart(sa, rxq_sw_index);
48577f2d053SAndrew Rybchenko 			if (rc != 0)
48677f2d053SAndrew Rybchenko 				sfc_err(sa, "cannot restart RxQ %u",
48777f2d053SAndrew Rybchenko 					rxq_sw_index);
48877f2d053SAndrew Rybchenko 		}
48977f2d053SAndrew Rybchenko 
4904a18304dSIvan Malov 		if (evq->txq != NULL) {
4914a18304dSIvan Malov 			unsigned int txq_sw_index = sfc_txq_sw_index(evq->txq);
4924a18304dSIvan Malov 
4934a18304dSIvan Malov 			sfc_warn(sa,
4944a18304dSIvan Malov 				 "restart TxQ %u because of exception on its EvQ %u",
4954a18304dSIvan Malov 				 txq_sw_index, evq->evq_index);
4964a18304dSIvan Malov 
4974a18304dSIvan Malov 			sfc_tx_qstop(sa, txq_sw_index);
4984a18304dSIvan Malov 			rc = sfc_tx_qstart(sa, txq_sw_index);
4994a18304dSIvan Malov 			if (rc != 0)
5004a18304dSIvan Malov 				sfc_err(sa, "cannot restart TxQ %u",
5014a18304dSIvan Malov 					txq_sw_index);
5024a18304dSIvan Malov 		}
5034a18304dSIvan Malov 
50477f2d053SAndrew Rybchenko 		if (evq->exception)
50577f2d053SAndrew Rybchenko 			sfc_panic(sa, "unrecoverable exception on EvQ %u",
50677f2d053SAndrew Rybchenko 				  evq->evq_index);
50777f2d053SAndrew Rybchenko 
50877f2d053SAndrew Rybchenko 		sfc_adapter_unlock(sa);
50977f2d053SAndrew Rybchenko 	}
51077f2d053SAndrew Rybchenko 
51158294ee6SAndrew Rybchenko 	/* Poll-mode driver does not re-prime the event queue for interrupts */
51258294ee6SAndrew Rybchenko }
51358294ee6SAndrew Rybchenko 
5149a75f75cSAndrew Rybchenko void
5159a75f75cSAndrew Rybchenko sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
5169a75f75cSAndrew Rybchenko {
5179a75f75cSAndrew Rybchenko 	if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
5189a75f75cSAndrew Rybchenko 		struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
5199a75f75cSAndrew Rybchenko 
5209a75f75cSAndrew Rybchenko 		if (mgmt_evq->init_state == SFC_EVQ_STARTED)
5219a75f75cSAndrew Rybchenko 			sfc_ev_qpoll(mgmt_evq);
5229a75f75cSAndrew Rybchenko 
5239a75f75cSAndrew Rybchenko 		rte_spinlock_unlock(&sa->mgmt_evq_lock);
5249a75f75cSAndrew Rybchenko 	}
5259a75f75cSAndrew Rybchenko }
5269a75f75cSAndrew Rybchenko 
52758294ee6SAndrew Rybchenko int
52858294ee6SAndrew Rybchenko sfc_ev_qprime(struct sfc_evq *evq)
52958294ee6SAndrew Rybchenko {
53058294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
53158294ee6SAndrew Rybchenko 	return efx_ev_qprime(evq->common, evq->read_ptr);
53258294ee6SAndrew Rybchenko }
53358294ee6SAndrew Rybchenko 
53458294ee6SAndrew Rybchenko int
53558294ee6SAndrew Rybchenko sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
53658294ee6SAndrew Rybchenko {
53758294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
53858294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
53958294ee6SAndrew Rybchenko 	efsys_mem_t *esmp;
54058294ee6SAndrew Rybchenko 	unsigned int total_delay_us;
54158294ee6SAndrew Rybchenko 	unsigned int delay_us;
54258294ee6SAndrew Rybchenko 	int rc;
54358294ee6SAndrew Rybchenko 
54458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
54558294ee6SAndrew Rybchenko 
54658294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
54758294ee6SAndrew Rybchenko 	evq = evq_info->evq;
54858294ee6SAndrew Rybchenko 	esmp = &evq->mem;
54958294ee6SAndrew Rybchenko 
55058294ee6SAndrew Rybchenko 	/* Clear all events */
55158294ee6SAndrew Rybchenko 	(void)memset((void *)esmp->esm_base, 0xff,
55258294ee6SAndrew Rybchenko 		     EFX_EVQ_SIZE(evq_info->entries));
55358294ee6SAndrew Rybchenko 
55458294ee6SAndrew Rybchenko 	/* Create the common code event queue */
55558294ee6SAndrew Rybchenko 	rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
556c22d3c50SAndrew Rybchenko 			    0 /* unused on EF10 */, 0, evq_info->flags,
55758294ee6SAndrew Rybchenko 			    &evq->common);
55858294ee6SAndrew Rybchenko 	if (rc != 0)
55958294ee6SAndrew Rybchenko 		goto fail_ev_qcreate;
56058294ee6SAndrew Rybchenko 
561df1bfde4SAndrew Rybchenko 	SFC_ASSERT(evq->dp_rxq == NULL || evq->txq == NULL);
562df1bfde4SAndrew Rybchenko 	if (evq->dp_rxq != 0) {
563df1bfde4SAndrew Rybchenko 		if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
564df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_efx_rx;
5657965557eSAndrew Rybchenko 		else
566df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_dp_rx;
567df1bfde4SAndrew Rybchenko 	} else if (evq->txq != 0) {
568df1bfde4SAndrew Rybchenko 		evq->callbacks = &sfc_ev_callbacks_tx;
569df1bfde4SAndrew Rybchenko 	} else {
5707965557eSAndrew Rybchenko 		evq->callbacks = &sfc_ev_callbacks;
571df1bfde4SAndrew Rybchenko 	}
5727965557eSAndrew Rybchenko 
57358294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTING;
57458294ee6SAndrew Rybchenko 
57558294ee6SAndrew Rybchenko 	/* Wait for the initialization event */
57658294ee6SAndrew Rybchenko 	total_delay_us = 0;
57758294ee6SAndrew Rybchenko 	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
57858294ee6SAndrew Rybchenko 	do {
57958294ee6SAndrew Rybchenko 		(void)sfc_ev_qpoll(evq);
58058294ee6SAndrew Rybchenko 
58158294ee6SAndrew Rybchenko 		/* Check to see if the initialization complete indication
58258294ee6SAndrew Rybchenko 		 * posted by the hardware.
58358294ee6SAndrew Rybchenko 		 */
58458294ee6SAndrew Rybchenko 		if (evq->init_state == SFC_EVQ_STARTED)
58558294ee6SAndrew Rybchenko 			goto done;
58658294ee6SAndrew Rybchenko 
58758294ee6SAndrew Rybchenko 		/* Give event queue some time to init */
58858294ee6SAndrew Rybchenko 		rte_delay_us(delay_us);
58958294ee6SAndrew Rybchenko 
59058294ee6SAndrew Rybchenko 		total_delay_us += delay_us;
59158294ee6SAndrew Rybchenko 
59258294ee6SAndrew Rybchenko 		/* Exponential backoff */
59358294ee6SAndrew Rybchenko 		delay_us *= 2;
59458294ee6SAndrew Rybchenko 		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
59558294ee6SAndrew Rybchenko 			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
59658294ee6SAndrew Rybchenko 
59758294ee6SAndrew Rybchenko 	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
59858294ee6SAndrew Rybchenko 
59958294ee6SAndrew Rybchenko 	rc = ETIMEDOUT;
60058294ee6SAndrew Rybchenko 	goto fail_timedout;
60158294ee6SAndrew Rybchenko 
60258294ee6SAndrew Rybchenko done:
60358294ee6SAndrew Rybchenko 	return 0;
60458294ee6SAndrew Rybchenko 
60558294ee6SAndrew Rybchenko fail_timedout:
60658294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
60758294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
60858294ee6SAndrew Rybchenko 
60958294ee6SAndrew Rybchenko fail_ev_qcreate:
61058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
61158294ee6SAndrew Rybchenko 	return rc;
61258294ee6SAndrew Rybchenko }
61358294ee6SAndrew Rybchenko 
61458294ee6SAndrew Rybchenko void
61558294ee6SAndrew Rybchenko sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
61658294ee6SAndrew Rybchenko {
61758294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
61858294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
61958294ee6SAndrew Rybchenko 
62058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
62158294ee6SAndrew Rybchenko 
62258294ee6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sa->evq_count);
62358294ee6SAndrew Rybchenko 
62458294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
62558294ee6SAndrew Rybchenko 	evq = evq_info->evq;
62658294ee6SAndrew Rybchenko 
62758294ee6SAndrew Rybchenko 	if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
62858294ee6SAndrew Rybchenko 		return;
62958294ee6SAndrew Rybchenko 
63058294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
6317965557eSAndrew Rybchenko 	evq->callbacks = NULL;
63258294ee6SAndrew Rybchenko 	evq->read_ptr = 0;
63358294ee6SAndrew Rybchenko 	evq->exception = B_FALSE;
63458294ee6SAndrew Rybchenko 
63558294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
63658294ee6SAndrew Rybchenko }
63758294ee6SAndrew Rybchenko 
6382de39f4eSAndrew Rybchenko static void
6392de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(void *arg)
6402de39f4eSAndrew Rybchenko {
6412de39f4eSAndrew Rybchenko 	struct sfc_adapter *sa = arg;
6422de39f4eSAndrew Rybchenko 	int rc;
6432de39f4eSAndrew Rybchenko 
6442de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_qpoll(sa);
6452de39f4eSAndrew Rybchenko 
6462de39f4eSAndrew Rybchenko 	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
6472de39f4eSAndrew Rybchenko 			       sfc_ev_mgmt_periodic_qpoll, sa);
648323706abSAndrew Rybchenko 	if (rc == -ENOTSUP) {
649323706abSAndrew Rybchenko 		sfc_warn(sa, "alarms are not supported");
650323706abSAndrew Rybchenko 		sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
651323706abSAndrew Rybchenko 	} else if (rc != 0) {
652323706abSAndrew Rybchenko 		sfc_err(sa,
6532de39f4eSAndrew Rybchenko 			"cannot rearm management EVQ polling alarm (rc=%d)",
6542de39f4eSAndrew Rybchenko 			rc);
6552de39f4eSAndrew Rybchenko 	}
656323706abSAndrew Rybchenko }
6572de39f4eSAndrew Rybchenko 
6582de39f4eSAndrew Rybchenko static void
6592de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
6602de39f4eSAndrew Rybchenko {
6612de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll(sa);
6622de39f4eSAndrew Rybchenko }
6632de39f4eSAndrew Rybchenko 
6642de39f4eSAndrew Rybchenko static void
6652de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
6662de39f4eSAndrew Rybchenko {
6672de39f4eSAndrew Rybchenko 	rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
6682de39f4eSAndrew Rybchenko }
6692de39f4eSAndrew Rybchenko 
67058294ee6SAndrew Rybchenko int
67158294ee6SAndrew Rybchenko sfc_ev_start(struct sfc_adapter *sa)
67258294ee6SAndrew Rybchenko {
67358294ee6SAndrew Rybchenko 	int rc;
67458294ee6SAndrew Rybchenko 
67558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
67658294ee6SAndrew Rybchenko 
67758294ee6SAndrew Rybchenko 	rc = efx_ev_init(sa->nic);
67858294ee6SAndrew Rybchenko 	if (rc != 0)
67958294ee6SAndrew Rybchenko 		goto fail_ev_init;
68058294ee6SAndrew Rybchenko 
6819a75f75cSAndrew Rybchenko 	/* Start management EVQ used for global events */
6829a75f75cSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
6839a75f75cSAndrew Rybchenko 
6849a75f75cSAndrew Rybchenko 	rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
6859a75f75cSAndrew Rybchenko 	if (rc != 0)
6869a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_start;
6879a75f75cSAndrew Rybchenko 
6883b809c27SAndrew Rybchenko 	if (sa->intr.lsc_intr) {
6893b809c27SAndrew Rybchenko 		rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq);
6903b809c27SAndrew Rybchenko 		if (rc != 0)
6913b809c27SAndrew Rybchenko 			goto fail_evq0_prime;
6923b809c27SAndrew Rybchenko 	}
6933b809c27SAndrew Rybchenko 
6949a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
6959a75f75cSAndrew Rybchenko 
69658294ee6SAndrew Rybchenko 	/*
6972de39f4eSAndrew Rybchenko 	 * Start management EVQ polling. If interrupts are disabled
6982de39f4eSAndrew Rybchenko 	 * (not used), it is required to process link status change
6992de39f4eSAndrew Rybchenko 	 * and other device level events to avoid unrecoverable
7002de39f4eSAndrew Rybchenko 	 * error because the event queue overflow.
7012de39f4eSAndrew Rybchenko 	 */
7022de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_start(sa);
7032de39f4eSAndrew Rybchenko 
7042de39f4eSAndrew Rybchenko 	/*
7059a75f75cSAndrew Rybchenko 	 * Rx/Tx event queues are started/stopped when corresponding
7069a75f75cSAndrew Rybchenko 	 * Rx/Tx queue is started/stopped.
70758294ee6SAndrew Rybchenko 	 */
70858294ee6SAndrew Rybchenko 
70958294ee6SAndrew Rybchenko 	return 0;
71058294ee6SAndrew Rybchenko 
7113b809c27SAndrew Rybchenko fail_evq0_prime:
7123b809c27SAndrew Rybchenko 	sfc_ev_qstop(sa, 0);
7133b809c27SAndrew Rybchenko 
7149a75f75cSAndrew Rybchenko fail_mgmt_evq_start:
7159a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
7169a75f75cSAndrew Rybchenko 	efx_ev_fini(sa->nic);
7179a75f75cSAndrew Rybchenko 
71858294ee6SAndrew Rybchenko fail_ev_init:
71958294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
72058294ee6SAndrew Rybchenko 	return rc;
72158294ee6SAndrew Rybchenko }
72258294ee6SAndrew Rybchenko 
72358294ee6SAndrew Rybchenko void
72458294ee6SAndrew Rybchenko sfc_ev_stop(struct sfc_adapter *sa)
72558294ee6SAndrew Rybchenko {
72658294ee6SAndrew Rybchenko 	unsigned int sw_index;
72758294ee6SAndrew Rybchenko 
72858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
72958294ee6SAndrew Rybchenko 
7302de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_stop(sa);
7312de39f4eSAndrew Rybchenko 
73258294ee6SAndrew Rybchenko 	/* Make sure that all event queues are stopped */
73358294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
7349a75f75cSAndrew Rybchenko 	while (sw_index-- > 0) {
7359a75f75cSAndrew Rybchenko 		if (sw_index == sa->mgmt_evq_index) {
7369a75f75cSAndrew Rybchenko 			/* Locks are required for the management EVQ */
7379a75f75cSAndrew Rybchenko 			rte_spinlock_lock(&sa->mgmt_evq_lock);
7389a75f75cSAndrew Rybchenko 			sfc_ev_qstop(sa, sa->mgmt_evq_index);
7399a75f75cSAndrew Rybchenko 			rte_spinlock_unlock(&sa->mgmt_evq_lock);
7409a75f75cSAndrew Rybchenko 		} else {
74158294ee6SAndrew Rybchenko 			sfc_ev_qstop(sa, sw_index);
7429a75f75cSAndrew Rybchenko 		}
7439a75f75cSAndrew Rybchenko 	}
74458294ee6SAndrew Rybchenko 
74558294ee6SAndrew Rybchenko 	efx_ev_fini(sa->nic);
74658294ee6SAndrew Rybchenko }
74758294ee6SAndrew Rybchenko 
74858294ee6SAndrew Rybchenko int
74958294ee6SAndrew Rybchenko sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
75058294ee6SAndrew Rybchenko 	     unsigned int entries, int socket_id)
75158294ee6SAndrew Rybchenko {
75258294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info;
75358294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
75458294ee6SAndrew Rybchenko 	int rc;
75558294ee6SAndrew Rybchenko 
75658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
75758294ee6SAndrew Rybchenko 
75858294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
75958294ee6SAndrew Rybchenko 
76058294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(entries));
76158294ee6SAndrew Rybchenko 	SFC_ASSERT(entries <= evq_info->max_entries);
76258294ee6SAndrew Rybchenko 	evq_info->entries = entries;
76358294ee6SAndrew Rybchenko 
76458294ee6SAndrew Rybchenko 	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
76558294ee6SAndrew Rybchenko 				 socket_id);
76658294ee6SAndrew Rybchenko 	if (evq == NULL)
76758294ee6SAndrew Rybchenko 		return ENOMEM;
76858294ee6SAndrew Rybchenko 
76958294ee6SAndrew Rybchenko 	evq->sa = sa;
77058294ee6SAndrew Rybchenko 	evq->evq_index = sw_index;
77158294ee6SAndrew Rybchenko 
77258294ee6SAndrew Rybchenko 	/* Allocate DMA space */
77358294ee6SAndrew Rybchenko 	rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
77458294ee6SAndrew Rybchenko 			   socket_id, &evq->mem);
77558294ee6SAndrew Rybchenko 	if (rc != 0)
77658294ee6SAndrew Rybchenko 		return rc;
77758294ee6SAndrew Rybchenko 
77858294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
77958294ee6SAndrew Rybchenko 
78058294ee6SAndrew Rybchenko 	evq_info->evq = evq;
78158294ee6SAndrew Rybchenko 
78258294ee6SAndrew Rybchenko 	return 0;
78358294ee6SAndrew Rybchenko }
78458294ee6SAndrew Rybchenko 
78558294ee6SAndrew Rybchenko void
78658294ee6SAndrew Rybchenko sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
78758294ee6SAndrew Rybchenko {
78858294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
78958294ee6SAndrew Rybchenko 
79058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
79158294ee6SAndrew Rybchenko 
79258294ee6SAndrew Rybchenko 	evq = sa->evq_info[sw_index].evq;
79358294ee6SAndrew Rybchenko 
79458294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
79558294ee6SAndrew Rybchenko 
79658294ee6SAndrew Rybchenko 	sa->evq_info[sw_index].evq = NULL;
79758294ee6SAndrew Rybchenko 
79858294ee6SAndrew Rybchenko 	sfc_dma_free(sa, &evq->mem);
79958294ee6SAndrew Rybchenko 
80058294ee6SAndrew Rybchenko 	rte_free(evq);
80158294ee6SAndrew Rybchenko }
80258294ee6SAndrew Rybchenko 
80358294ee6SAndrew Rybchenko static int
80458294ee6SAndrew Rybchenko sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
80558294ee6SAndrew Rybchenko {
80658294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
80758294ee6SAndrew Rybchenko 	unsigned int max_entries;
80858294ee6SAndrew Rybchenko 
80958294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
81058294ee6SAndrew Rybchenko 
81158294ee6SAndrew Rybchenko 	max_entries = sfc_evq_max_entries(sa, sw_index);
81258294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(max_entries));
81358294ee6SAndrew Rybchenko 
81458294ee6SAndrew Rybchenko 	evq_info->max_entries = max_entries;
8153b809c27SAndrew Rybchenko 	evq_info->flags = sa->evq_flags |
8163b809c27SAndrew Rybchenko 		((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ?
8173b809c27SAndrew Rybchenko 			EFX_EVQ_FLAGS_NOTIFY_INTERRUPT :
8183b809c27SAndrew Rybchenko 			EFX_EVQ_FLAGS_NOTIFY_DISABLED);
819c22d3c50SAndrew Rybchenko 
820c22d3c50SAndrew Rybchenko 	return 0;
821c22d3c50SAndrew Rybchenko }
822c22d3c50SAndrew Rybchenko 
823c22d3c50SAndrew Rybchenko static int
824c22d3c50SAndrew Rybchenko sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
825c22d3c50SAndrew Rybchenko 			       const char *value_str, void *opaque)
826c22d3c50SAndrew Rybchenko {
827c22d3c50SAndrew Rybchenko 	uint64_t *value = opaque;
828c22d3c50SAndrew Rybchenko 
829c22d3c50SAndrew Rybchenko 	if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
830c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
831c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
832c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
833c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
834c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_AUTO;
835c22d3c50SAndrew Rybchenko 	else
836c22d3c50SAndrew Rybchenko 		return -EINVAL;
83758294ee6SAndrew Rybchenko 
83858294ee6SAndrew Rybchenko 	return 0;
83958294ee6SAndrew Rybchenko }
84058294ee6SAndrew Rybchenko 
84158294ee6SAndrew Rybchenko static void
84258294ee6SAndrew Rybchenko sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
84358294ee6SAndrew Rybchenko {
84458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
84558294ee6SAndrew Rybchenko 
84658294ee6SAndrew Rybchenko 	/* Nothing to cleanup */
84758294ee6SAndrew Rybchenko }
84858294ee6SAndrew Rybchenko 
84958294ee6SAndrew Rybchenko int
85058294ee6SAndrew Rybchenko sfc_ev_init(struct sfc_adapter *sa)
85158294ee6SAndrew Rybchenko {
85258294ee6SAndrew Rybchenko 	int rc;
85358294ee6SAndrew Rybchenko 	unsigned int sw_index;
85458294ee6SAndrew Rybchenko 
85558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
85658294ee6SAndrew Rybchenko 
857c22d3c50SAndrew Rybchenko 	sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
858c22d3c50SAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
859c22d3c50SAndrew Rybchenko 				sfc_kvarg_perf_profile_handler,
860c22d3c50SAndrew Rybchenko 				&sa->evq_flags);
861c22d3c50SAndrew Rybchenko 	if (rc != 0) {
862c22d3c50SAndrew Rybchenko 		sfc_err(sa, "invalid %s parameter value",
863c22d3c50SAndrew Rybchenko 			SFC_KVARG_PERF_PROFILE);
864c22d3c50SAndrew Rybchenko 		goto fail_kvarg_perf_profile;
865c22d3c50SAndrew Rybchenko 	}
866c22d3c50SAndrew Rybchenko 
86758294ee6SAndrew Rybchenko 	sa->evq_count = sfc_ev_qcount(sa);
86858294ee6SAndrew Rybchenko 	sa->mgmt_evq_index = 0;
8699a75f75cSAndrew Rybchenko 	rte_spinlock_init(&sa->mgmt_evq_lock);
87058294ee6SAndrew Rybchenko 
87158294ee6SAndrew Rybchenko 	/* Allocate EVQ info array */
87258294ee6SAndrew Rybchenko 	rc = ENOMEM;
87358294ee6SAndrew Rybchenko 	sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
87458294ee6SAndrew Rybchenko 					 sizeof(struct sfc_evq_info), 0,
87558294ee6SAndrew Rybchenko 					 sa->socket_id);
87658294ee6SAndrew Rybchenko 	if (sa->evq_info == NULL)
87758294ee6SAndrew Rybchenko 		goto fail_evqs_alloc;
87858294ee6SAndrew Rybchenko 
87958294ee6SAndrew Rybchenko 	for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
88058294ee6SAndrew Rybchenko 		rc = sfc_ev_qinit_info(sa, sw_index);
88158294ee6SAndrew Rybchenko 		if (rc != 0)
88258294ee6SAndrew Rybchenko 			goto fail_ev_qinit_info;
88358294ee6SAndrew Rybchenko 	}
88458294ee6SAndrew Rybchenko 
8859a75f75cSAndrew Rybchenko 	rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
8869a75f75cSAndrew Rybchenko 			  sa->socket_id);
8879a75f75cSAndrew Rybchenko 	if (rc != 0)
8889a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_init;
8899a75f75cSAndrew Rybchenko 
89058294ee6SAndrew Rybchenko 	/*
89158294ee6SAndrew Rybchenko 	 * Rx/Tx event queues are created/destroyed when corresponding
89258294ee6SAndrew Rybchenko 	 * Rx/Tx queue is created/destroyed.
89358294ee6SAndrew Rybchenko 	 */
89458294ee6SAndrew Rybchenko 
89558294ee6SAndrew Rybchenko 	return 0;
89658294ee6SAndrew Rybchenko 
8979a75f75cSAndrew Rybchenko fail_mgmt_evq_init:
89858294ee6SAndrew Rybchenko fail_ev_qinit_info:
89958294ee6SAndrew Rybchenko 	while (sw_index-- > 0)
90058294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
90158294ee6SAndrew Rybchenko 
90258294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
90358294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
90458294ee6SAndrew Rybchenko 
90558294ee6SAndrew Rybchenko fail_evqs_alloc:
90658294ee6SAndrew Rybchenko 	sa->evq_count = 0;
907c22d3c50SAndrew Rybchenko 
908c22d3c50SAndrew Rybchenko fail_kvarg_perf_profile:
90958294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
91058294ee6SAndrew Rybchenko 	return rc;
91158294ee6SAndrew Rybchenko }
91258294ee6SAndrew Rybchenko 
91358294ee6SAndrew Rybchenko void
91458294ee6SAndrew Rybchenko sfc_ev_fini(struct sfc_adapter *sa)
91558294ee6SAndrew Rybchenko {
91658294ee6SAndrew Rybchenko 	int sw_index;
91758294ee6SAndrew Rybchenko 
91858294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
91958294ee6SAndrew Rybchenko 
92058294ee6SAndrew Rybchenko 	/* Cleanup all event queues */
92158294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
92258294ee6SAndrew Rybchenko 	while (--sw_index >= 0) {
92358294ee6SAndrew Rybchenko 		if (sa->evq_info[sw_index].evq != NULL)
92458294ee6SAndrew Rybchenko 			sfc_ev_qfini(sa, sw_index);
92558294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
92658294ee6SAndrew Rybchenko 	}
92758294ee6SAndrew Rybchenko 
92858294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
92958294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
93058294ee6SAndrew Rybchenko 	sa->evq_count = 0;
93158294ee6SAndrew Rybchenko }
932