xref: /dpdk/drivers/net/sfc/sfc_ev.c (revision df1bfde4ff0dbdb2a0380a1a8b9f2c5bb6e974be)
158294ee6SAndrew Rybchenko /*-
2244cfa79SAndrew Rybchenko  *   BSD LICENSE
3244cfa79SAndrew Rybchenko  *
4244cfa79SAndrew Rybchenko  * Copyright (c) 2016-2017 Solarflare Communications Inc.
558294ee6SAndrew Rybchenko  * All rights reserved.
658294ee6SAndrew Rybchenko  *
758294ee6SAndrew Rybchenko  * This software was jointly developed between OKTET Labs (under contract
858294ee6SAndrew Rybchenko  * for Solarflare) and Solarflare Communications, Inc.
958294ee6SAndrew Rybchenko  *
1058294ee6SAndrew Rybchenko  * Redistribution and use in source and binary forms, with or without
1158294ee6SAndrew Rybchenko  * modification, are permitted provided that the following conditions are met:
1258294ee6SAndrew Rybchenko  *
1358294ee6SAndrew Rybchenko  * 1. Redistributions of source code must retain the above copyright notice,
1458294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer.
1558294ee6SAndrew Rybchenko  * 2. Redistributions in binary form must reproduce the above copyright notice,
1658294ee6SAndrew Rybchenko  *    this list of conditions and the following disclaimer in the documentation
1758294ee6SAndrew Rybchenko  *    and/or other materials provided with the distribution.
1858294ee6SAndrew Rybchenko  *
1958294ee6SAndrew Rybchenko  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
2058294ee6SAndrew Rybchenko  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
2158294ee6SAndrew Rybchenko  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2258294ee6SAndrew Rybchenko  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
2358294ee6SAndrew Rybchenko  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
2458294ee6SAndrew Rybchenko  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
2558294ee6SAndrew Rybchenko  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
2658294ee6SAndrew Rybchenko  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
2758294ee6SAndrew Rybchenko  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
2858294ee6SAndrew Rybchenko  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
2958294ee6SAndrew Rybchenko  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
3058294ee6SAndrew Rybchenko  */
3158294ee6SAndrew Rybchenko 
3258294ee6SAndrew Rybchenko #include <rte_debug.h>
3358294ee6SAndrew Rybchenko #include <rte_cycles.h>
342de39f4eSAndrew Rybchenko #include <rte_alarm.h>
3577f2d053SAndrew Rybchenko #include <rte_branch_prediction.h>
3658294ee6SAndrew Rybchenko 
3758294ee6SAndrew Rybchenko #include "efx.h"
3858294ee6SAndrew Rybchenko 
3958294ee6SAndrew Rybchenko #include "sfc.h"
4058294ee6SAndrew Rybchenko #include "sfc_debug.h"
4158294ee6SAndrew Rybchenko #include "sfc_log.h"
4258294ee6SAndrew Rybchenko #include "sfc_ev.h"
4328944ac0SAndrew Rybchenko #include "sfc_rx.h"
44fed9aeb4SIvan Malov #include "sfc_tx.h"
45c22d3c50SAndrew Rybchenko #include "sfc_kvargs.h"
4658294ee6SAndrew Rybchenko 
4758294ee6SAndrew Rybchenko 
4858294ee6SAndrew Rybchenko /* Initial delay when waiting for event queue init complete event */
4958294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_START_US	(1)
5058294ee6SAndrew Rybchenko /* Maximum delay between event queue polling attempts */
5158294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_MAX_US	(10 * 1000)
5258294ee6SAndrew Rybchenko /* Event queue init approx timeout */
5358294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_TIMEOUT_US		(2 * US_PER_S)
5458294ee6SAndrew Rybchenko 
552de39f4eSAndrew Rybchenko /* Management event queue polling period in microseconds */
562de39f4eSAndrew Rybchenko #define SFC_MGMT_EV_QPOLL_PERIOD_US	(US_PER_S)
572de39f4eSAndrew Rybchenko 
5858294ee6SAndrew Rybchenko 
5958294ee6SAndrew Rybchenko static boolean_t
6058294ee6SAndrew Rybchenko sfc_ev_initialized(void *arg)
6158294ee6SAndrew Rybchenko {
6258294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
6358294ee6SAndrew Rybchenko 
6458294ee6SAndrew Rybchenko 	/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
6558294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
6658294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTED);
6758294ee6SAndrew Rybchenko 
6858294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTED;
6958294ee6SAndrew Rybchenko 
7058294ee6SAndrew Rybchenko 	return B_FALSE;
7158294ee6SAndrew Rybchenko }
7258294ee6SAndrew Rybchenko 
7358294ee6SAndrew Rybchenko static boolean_t
747965557eSAndrew Rybchenko sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
757965557eSAndrew Rybchenko 	      uint32_t size, uint16_t flags)
767965557eSAndrew Rybchenko {
777965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
787965557eSAndrew Rybchenko 
797965557eSAndrew Rybchenko 	sfc_err(evq->sa,
807965557eSAndrew Rybchenko 		"EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
817965557eSAndrew Rybchenko 		evq->evq_index, label, id, size, flags);
827965557eSAndrew Rybchenko 	return B_TRUE;
837965557eSAndrew Rybchenko }
847965557eSAndrew Rybchenko 
857965557eSAndrew Rybchenko static boolean_t
86*df1bfde4SAndrew Rybchenko sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
87921f6cf1SAndrew Rybchenko 	      uint32_t size, uint16_t flags)
8858294ee6SAndrew Rybchenko {
8958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
90*df1bfde4SAndrew Rybchenko 	struct sfc_efx_rxq *rxq;
91921f6cf1SAndrew Rybchenko 	unsigned int stop;
92921f6cf1SAndrew Rybchenko 	unsigned int pending_id;
93921f6cf1SAndrew Rybchenko 	unsigned int delta;
94921f6cf1SAndrew Rybchenko 	unsigned int i;
95*df1bfde4SAndrew Rybchenko 	struct sfc_efx_rx_sw_desc *rxd;
9658294ee6SAndrew Rybchenko 
97921f6cf1SAndrew Rybchenko 	if (unlikely(evq->exception))
98921f6cf1SAndrew Rybchenko 		goto done;
99921f6cf1SAndrew Rybchenko 
100*df1bfde4SAndrew Rybchenko 	rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
101921f6cf1SAndrew Rybchenko 
102921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
103921f6cf1SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
104*df1bfde4SAndrew Rybchenko 	SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
105921f6cf1SAndrew Rybchenko 
106921f6cf1SAndrew Rybchenko 	stop = (id + 1) & rxq->ptr_mask;
107921f6cf1SAndrew Rybchenko 	pending_id = rxq->pending & rxq->ptr_mask;
108921f6cf1SAndrew Rybchenko 	delta = (stop >= pending_id) ? (stop - pending_id) :
109921f6cf1SAndrew Rybchenko 		(rxq->ptr_mask + 1 - pending_id + stop);
110921f6cf1SAndrew Rybchenko 
11109a09b6fSAndrew Rybchenko 	if (delta == 0) {
11209a09b6fSAndrew Rybchenko 		/*
11309a09b6fSAndrew Rybchenko 		 * Rx event with no new descriptors done and zero length
11409a09b6fSAndrew Rybchenko 		 * is used to abort scattered packet when there is no room
11509a09b6fSAndrew Rybchenko 		 * for the tail.
11609a09b6fSAndrew Rybchenko 		 */
11709a09b6fSAndrew Rybchenko 		if (unlikely(size != 0)) {
11809a09b6fSAndrew Rybchenko 			evq->exception = B_TRUE;
11909a09b6fSAndrew Rybchenko 			sfc_err(evq->sa,
12009a09b6fSAndrew Rybchenko 				"EVQ %u RxQ %u invalid RX abort "
121f2462150SFerruh Yigit 				"(id=%#x size=%u flags=%#x); needs restart",
122*df1bfde4SAndrew Rybchenko 				evq->evq_index, rxq->dp.dpq.queue_id,
12309a09b6fSAndrew Rybchenko 				id, size, flags);
12409a09b6fSAndrew Rybchenko 			goto done;
12509a09b6fSAndrew Rybchenko 		}
12609a09b6fSAndrew Rybchenko 
12709a09b6fSAndrew Rybchenko 		/* Add discard flag to the first fragment */
12809a09b6fSAndrew Rybchenko 		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
12909a09b6fSAndrew Rybchenko 		/* Remove continue flag from the last fragment */
13009a09b6fSAndrew Rybchenko 		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
13109a09b6fSAndrew Rybchenko 	} else if (unlikely(delta > rxq->batch_max)) {
132921f6cf1SAndrew Rybchenko 		evq->exception = B_TRUE;
133921f6cf1SAndrew Rybchenko 
134921f6cf1SAndrew Rybchenko 		sfc_err(evq->sa,
135921f6cf1SAndrew Rybchenko 			"EVQ %u RxQ %u completion out of order "
136f2462150SFerruh Yigit 			"(id=%#x delta=%u flags=%#x); needs restart",
137*df1bfde4SAndrew Rybchenko 			evq->evq_index, rxq->dp.dpq.queue_id,
138*df1bfde4SAndrew Rybchenko 			id, delta, flags);
139921f6cf1SAndrew Rybchenko 
140921f6cf1SAndrew Rybchenko 		goto done;
141921f6cf1SAndrew Rybchenko 	}
142921f6cf1SAndrew Rybchenko 
143921f6cf1SAndrew Rybchenko 	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
144921f6cf1SAndrew Rybchenko 		rxd = &rxq->sw_desc[i];
145921f6cf1SAndrew Rybchenko 
146921f6cf1SAndrew Rybchenko 		rxd->flags = flags;
147921f6cf1SAndrew Rybchenko 
148921f6cf1SAndrew Rybchenko 		SFC_ASSERT(size < (1 << 16));
149921f6cf1SAndrew Rybchenko 		rxd->size = (uint16_t)size;
150921f6cf1SAndrew Rybchenko 	}
151921f6cf1SAndrew Rybchenko 
152921f6cf1SAndrew Rybchenko 	rxq->pending += delta;
153921f6cf1SAndrew Rybchenko 
154921f6cf1SAndrew Rybchenko done:
155921f6cf1SAndrew Rybchenko 	return B_FALSE;
15658294ee6SAndrew Rybchenko }
15758294ee6SAndrew Rybchenko 
15858294ee6SAndrew Rybchenko static boolean_t
1597965557eSAndrew Rybchenko sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
1607965557eSAndrew Rybchenko {
1617965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
1627965557eSAndrew Rybchenko 
1637965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
1647965557eSAndrew Rybchenko 		evq->evq_index, label, id);
1657965557eSAndrew Rybchenko 	return B_TRUE;
1667965557eSAndrew Rybchenko }
1677965557eSAndrew Rybchenko 
1687965557eSAndrew Rybchenko static boolean_t
169428c7dddSIvan Malov sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
17058294ee6SAndrew Rybchenko {
17158294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
172428c7dddSIvan Malov 	struct sfc_txq *txq;
173428c7dddSIvan Malov 	unsigned int stop;
174428c7dddSIvan Malov 	unsigned int delta;
17558294ee6SAndrew Rybchenko 
176428c7dddSIvan Malov 	txq = evq->txq;
177428c7dddSIvan Malov 
178428c7dddSIvan Malov 	SFC_ASSERT(txq != NULL);
179428c7dddSIvan Malov 	SFC_ASSERT(txq->evq == evq);
180428c7dddSIvan Malov 
181428c7dddSIvan Malov 	if (unlikely((txq->state & SFC_TXQ_STARTED) == 0))
182428c7dddSIvan Malov 		goto done;
183428c7dddSIvan Malov 
184428c7dddSIvan Malov 	stop = (id + 1) & txq->ptr_mask;
185428c7dddSIvan Malov 	id = txq->pending & txq->ptr_mask;
186428c7dddSIvan Malov 
187428c7dddSIvan Malov 	delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
188428c7dddSIvan Malov 
189428c7dddSIvan Malov 	txq->pending += delta;
190428c7dddSIvan Malov 
191428c7dddSIvan Malov done:
192428c7dddSIvan Malov 	return B_FALSE;
19358294ee6SAndrew Rybchenko }
19458294ee6SAndrew Rybchenko 
19558294ee6SAndrew Rybchenko static boolean_t
19658294ee6SAndrew Rybchenko sfc_ev_exception(void *arg, __rte_unused uint32_t code,
19758294ee6SAndrew Rybchenko 		 __rte_unused uint32_t data)
19858294ee6SAndrew Rybchenko {
19958294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
20058294ee6SAndrew Rybchenko 
20198200dd9SAndrew Rybchenko 	if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
20298200dd9SAndrew Rybchenko 		return B_FALSE;
20398200dd9SAndrew Rybchenko 
20498200dd9SAndrew Rybchenko 	evq->exception = B_TRUE;
20598200dd9SAndrew Rybchenko 	sfc_warn(evq->sa,
20698200dd9SAndrew Rybchenko 		 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
20798200dd9SAndrew Rybchenko 		 " needs recovery",
20898200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
20998200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
21098200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
21198200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
21298200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
21398200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
21498200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
21598200dd9SAndrew Rybchenko 		 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
21698200dd9SAndrew Rybchenko 		 "UNKNOWN",
21798200dd9SAndrew Rybchenko 		 code, data, evq->evq_index);
21898200dd9SAndrew Rybchenko 
21958294ee6SAndrew Rybchenko 	return B_TRUE;
22058294ee6SAndrew Rybchenko }
22158294ee6SAndrew Rybchenko 
22258294ee6SAndrew Rybchenko static boolean_t
2237965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
2247965557eSAndrew Rybchenko {
2257965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2267965557eSAndrew Rybchenko 
2277965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
2287965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
2297965557eSAndrew Rybchenko 	return B_TRUE;
2307965557eSAndrew Rybchenko }
2317965557eSAndrew Rybchenko 
2327965557eSAndrew Rybchenko static boolean_t
23358294ee6SAndrew Rybchenko sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
23458294ee6SAndrew Rybchenko {
23558294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
236*df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
23728944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
23858294ee6SAndrew Rybchenko 
239*df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
240*df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
241*df1bfde4SAndrew Rybchenko 
242*df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
24328944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
24428944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
24528944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
24628944ac0SAndrew Rybchenko 	sfc_rx_qflush_done(rxq);
24728944ac0SAndrew Rybchenko 
24828944ac0SAndrew Rybchenko 	return B_FALSE;
24958294ee6SAndrew Rybchenko }
25058294ee6SAndrew Rybchenko 
25158294ee6SAndrew Rybchenko static boolean_t
2527965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
2537965557eSAndrew Rybchenko {
2547965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2557965557eSAndrew Rybchenko 
2567965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
2577965557eSAndrew Rybchenko 		evq->evq_index, rxq_hw_index);
2587965557eSAndrew Rybchenko 	return B_TRUE;
2597965557eSAndrew Rybchenko }
2607965557eSAndrew Rybchenko 
2617965557eSAndrew Rybchenko static boolean_t
26258294ee6SAndrew Rybchenko sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
26358294ee6SAndrew Rybchenko {
26458294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
265*df1bfde4SAndrew Rybchenko 	struct sfc_dp_rxq *dp_rxq;
26628944ac0SAndrew Rybchenko 	struct sfc_rxq *rxq;
26758294ee6SAndrew Rybchenko 
268*df1bfde4SAndrew Rybchenko 	dp_rxq = evq->dp_rxq;
269*df1bfde4SAndrew Rybchenko 	SFC_ASSERT(dp_rxq != NULL);
270*df1bfde4SAndrew Rybchenko 
271*df1bfde4SAndrew Rybchenko 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
27228944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq != NULL);
27328944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
27428944ac0SAndrew Rybchenko 	SFC_ASSERT(rxq->evq == evq);
27528944ac0SAndrew Rybchenko 	sfc_rx_qflush_failed(rxq);
27628944ac0SAndrew Rybchenko 
27728944ac0SAndrew Rybchenko 	return B_FALSE;
27858294ee6SAndrew Rybchenko }
27958294ee6SAndrew Rybchenko 
28058294ee6SAndrew Rybchenko static boolean_t
2817965557eSAndrew Rybchenko sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
2827965557eSAndrew Rybchenko {
2837965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
2847965557eSAndrew Rybchenko 
2857965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
2867965557eSAndrew Rybchenko 		evq->evq_index, txq_hw_index);
2877965557eSAndrew Rybchenko 	return B_TRUE;
2887965557eSAndrew Rybchenko }
2897965557eSAndrew Rybchenko 
2907965557eSAndrew Rybchenko static boolean_t
29158294ee6SAndrew Rybchenko sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
29258294ee6SAndrew Rybchenko {
29358294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
294fed9aeb4SIvan Malov 	struct sfc_txq *txq;
29558294ee6SAndrew Rybchenko 
296fed9aeb4SIvan Malov 	txq = evq->txq;
297fed9aeb4SIvan Malov 	SFC_ASSERT(txq != NULL);
298fed9aeb4SIvan Malov 	SFC_ASSERT(txq->hw_index == txq_hw_index);
299fed9aeb4SIvan Malov 	SFC_ASSERT(txq->evq == evq);
300fed9aeb4SIvan Malov 	sfc_tx_qflush_done(txq);
301fed9aeb4SIvan Malov 
302fed9aeb4SIvan Malov 	return B_FALSE;
30358294ee6SAndrew Rybchenko }
30458294ee6SAndrew Rybchenko 
30558294ee6SAndrew Rybchenko static boolean_t
30658294ee6SAndrew Rybchenko sfc_ev_software(void *arg, uint16_t magic)
30758294ee6SAndrew Rybchenko {
30858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
30958294ee6SAndrew Rybchenko 
31058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
31158294ee6SAndrew Rybchenko 		evq->evq_index, magic);
31258294ee6SAndrew Rybchenko 	return B_TRUE;
31358294ee6SAndrew Rybchenko }
31458294ee6SAndrew Rybchenko 
31558294ee6SAndrew Rybchenko static boolean_t
31658294ee6SAndrew Rybchenko sfc_ev_sram(void *arg, uint32_t code)
31758294ee6SAndrew Rybchenko {
31858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
31958294ee6SAndrew Rybchenko 
32058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
32158294ee6SAndrew Rybchenko 		evq->evq_index, code);
32258294ee6SAndrew Rybchenko 	return B_TRUE;
32358294ee6SAndrew Rybchenko }
32458294ee6SAndrew Rybchenko 
32558294ee6SAndrew Rybchenko static boolean_t
32658294ee6SAndrew Rybchenko sfc_ev_wake_up(void *arg, uint32_t index)
32758294ee6SAndrew Rybchenko {
32858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
32958294ee6SAndrew Rybchenko 
33058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
33158294ee6SAndrew Rybchenko 		evq->evq_index, index);
33258294ee6SAndrew Rybchenko 	return B_TRUE;
33358294ee6SAndrew Rybchenko }
33458294ee6SAndrew Rybchenko 
33558294ee6SAndrew Rybchenko static boolean_t
33658294ee6SAndrew Rybchenko sfc_ev_timer(void *arg, uint32_t index)
33758294ee6SAndrew Rybchenko {
33858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
33958294ee6SAndrew Rybchenko 
34058294ee6SAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
34158294ee6SAndrew Rybchenko 		evq->evq_index, index);
34258294ee6SAndrew Rybchenko 	return B_TRUE;
34358294ee6SAndrew Rybchenko }
34458294ee6SAndrew Rybchenko 
34558294ee6SAndrew Rybchenko static boolean_t
3467965557eSAndrew Rybchenko sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
3477965557eSAndrew Rybchenko {
3487965557eSAndrew Rybchenko 	struct sfc_evq *evq = arg;
3497965557eSAndrew Rybchenko 
3507965557eSAndrew Rybchenko 	sfc_err(evq->sa, "EVQ %u unexpected link change event",
3517965557eSAndrew Rybchenko 		evq->evq_index);
3527965557eSAndrew Rybchenko 	return B_TRUE;
3537965557eSAndrew Rybchenko }
3547965557eSAndrew Rybchenko 
3557965557eSAndrew Rybchenko static boolean_t
356886f8d8aSArtem Andreev sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
35758294ee6SAndrew Rybchenko {
35858294ee6SAndrew Rybchenko 	struct sfc_evq *evq = arg;
359886f8d8aSArtem Andreev 	struct sfc_adapter *sa = evq->sa;
360886f8d8aSArtem Andreev 	struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
361886f8d8aSArtem Andreev 	struct rte_eth_link new_link;
3623b809c27SAndrew Rybchenko 	uint64_t new_link_u64;
3633b809c27SAndrew Rybchenko 	uint64_t old_link_u64;
36458294ee6SAndrew Rybchenko 
365886f8d8aSArtem Andreev 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
366886f8d8aSArtem Andreev 
367886f8d8aSArtem Andreev 	sfc_port_link_mode_to_info(link_mode, &new_link);
3683b809c27SAndrew Rybchenko 
3693b809c27SAndrew Rybchenko 	new_link_u64 = *(uint64_t *)&new_link;
3703b809c27SAndrew Rybchenko 	do {
3713b809c27SAndrew Rybchenko 		old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
3723b809c27SAndrew Rybchenko 		if (old_link_u64 == new_link_u64)
3733b809c27SAndrew Rybchenko 			break;
3743b809c27SAndrew Rybchenko 
3753b809c27SAndrew Rybchenko 		if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
3763b809c27SAndrew Rybchenko 					old_link_u64, new_link_u64)) {
3773b809c27SAndrew Rybchenko 			evq->sa->port.lsc_seq++;
3783b809c27SAndrew Rybchenko 			break;
3793b809c27SAndrew Rybchenko 		}
3803b809c27SAndrew Rybchenko 	} while (B_TRUE);
381886f8d8aSArtem Andreev 
382886f8d8aSArtem Andreev 	return B_FALSE;
38358294ee6SAndrew Rybchenko }
38458294ee6SAndrew Rybchenko 
38558294ee6SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks = {
38658294ee6SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
3877965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
3887965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
38958294ee6SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
3907965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
3917965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
3927965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
39358294ee6SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
39458294ee6SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
39558294ee6SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
39658294ee6SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
39758294ee6SAndrew Rybchenko 	.eec_link_change	= sfc_ev_link_change,
39858294ee6SAndrew Rybchenko };
39958294ee6SAndrew Rybchenko 
400*df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
4017965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
402*df1bfde4SAndrew Rybchenko 	.eec_rx			= sfc_ev_efx_rx,
403*df1bfde4SAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
404*df1bfde4SAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
405*df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
406*df1bfde4SAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
407*df1bfde4SAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
408*df1bfde4SAndrew Rybchenko 	.eec_software		= sfc_ev_software,
409*df1bfde4SAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
410*df1bfde4SAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
411*df1bfde4SAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
412*df1bfde4SAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
413*df1bfde4SAndrew Rybchenko };
414*df1bfde4SAndrew Rybchenko 
415*df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
416*df1bfde4SAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
417*df1bfde4SAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
4187965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_nop_tx,
4197965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4207965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
4217965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
4227965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
4237965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
4247965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
4257965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
4267965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
4277965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
4287965557eSAndrew Rybchenko };
4297965557eSAndrew Rybchenko 
4307965557eSAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_tx = {
4317965557eSAndrew Rybchenko 	.eec_initialized	= sfc_ev_initialized,
4327965557eSAndrew Rybchenko 	.eec_rx			= sfc_ev_nop_rx,
4337965557eSAndrew Rybchenko 	.eec_tx			= sfc_ev_tx,
4347965557eSAndrew Rybchenko 	.eec_exception		= sfc_ev_exception,
4357965557eSAndrew Rybchenko 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
4367965557eSAndrew Rybchenko 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
4377965557eSAndrew Rybchenko 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
4387965557eSAndrew Rybchenko 	.eec_software		= sfc_ev_software,
4397965557eSAndrew Rybchenko 	.eec_sram		= sfc_ev_sram,
4407965557eSAndrew Rybchenko 	.eec_wake_up		= sfc_ev_wake_up,
4417965557eSAndrew Rybchenko 	.eec_timer		= sfc_ev_timer,
4427965557eSAndrew Rybchenko 	.eec_link_change	= sfc_ev_nop_link_change,
4437965557eSAndrew Rybchenko };
4447965557eSAndrew Rybchenko 
44558294ee6SAndrew Rybchenko 
44658294ee6SAndrew Rybchenko void
44758294ee6SAndrew Rybchenko sfc_ev_qpoll(struct sfc_evq *evq)
44858294ee6SAndrew Rybchenko {
44958294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
45058294ee6SAndrew Rybchenko 		   evq->init_state == SFC_EVQ_STARTING);
45158294ee6SAndrew Rybchenko 
45258294ee6SAndrew Rybchenko 	/* Synchronize the DMA memory for reading not required */
45358294ee6SAndrew Rybchenko 
4547965557eSAndrew Rybchenko 	efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
45558294ee6SAndrew Rybchenko 
45677f2d053SAndrew Rybchenko 	if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
45777f2d053SAndrew Rybchenko 		struct sfc_adapter *sa = evq->sa;
45877f2d053SAndrew Rybchenko 		int rc;
45977f2d053SAndrew Rybchenko 
460*df1bfde4SAndrew Rybchenko 		if (evq->dp_rxq != NULL) {
461*df1bfde4SAndrew Rybchenko 			unsigned int rxq_sw_index;
462*df1bfde4SAndrew Rybchenko 
463*df1bfde4SAndrew Rybchenko 			rxq_sw_index = evq->dp_rxq->dpq.queue_id;
46477f2d053SAndrew Rybchenko 
46577f2d053SAndrew Rybchenko 			sfc_warn(sa,
46677f2d053SAndrew Rybchenko 				 "restart RxQ %u because of exception on its EvQ %u",
46777f2d053SAndrew Rybchenko 				 rxq_sw_index, evq->evq_index);
46877f2d053SAndrew Rybchenko 
46977f2d053SAndrew Rybchenko 			sfc_rx_qstop(sa, rxq_sw_index);
47077f2d053SAndrew Rybchenko 			rc = sfc_rx_qstart(sa, rxq_sw_index);
47177f2d053SAndrew Rybchenko 			if (rc != 0)
47277f2d053SAndrew Rybchenko 				sfc_err(sa, "cannot restart RxQ %u",
47377f2d053SAndrew Rybchenko 					rxq_sw_index);
47477f2d053SAndrew Rybchenko 		}
47577f2d053SAndrew Rybchenko 
4764a18304dSIvan Malov 		if (evq->txq != NULL) {
4774a18304dSIvan Malov 			unsigned int txq_sw_index = sfc_txq_sw_index(evq->txq);
4784a18304dSIvan Malov 
4794a18304dSIvan Malov 			sfc_warn(sa,
4804a18304dSIvan Malov 				 "restart TxQ %u because of exception on its EvQ %u",
4814a18304dSIvan Malov 				 txq_sw_index, evq->evq_index);
4824a18304dSIvan Malov 
4834a18304dSIvan Malov 			sfc_tx_qstop(sa, txq_sw_index);
4844a18304dSIvan Malov 			rc = sfc_tx_qstart(sa, txq_sw_index);
4854a18304dSIvan Malov 			if (rc != 0)
4864a18304dSIvan Malov 				sfc_err(sa, "cannot restart TxQ %u",
4874a18304dSIvan Malov 					txq_sw_index);
4884a18304dSIvan Malov 		}
4894a18304dSIvan Malov 
49077f2d053SAndrew Rybchenko 		if (evq->exception)
49177f2d053SAndrew Rybchenko 			sfc_panic(sa, "unrecoverable exception on EvQ %u",
49277f2d053SAndrew Rybchenko 				  evq->evq_index);
49377f2d053SAndrew Rybchenko 
49477f2d053SAndrew Rybchenko 		sfc_adapter_unlock(sa);
49577f2d053SAndrew Rybchenko 	}
49677f2d053SAndrew Rybchenko 
49758294ee6SAndrew Rybchenko 	/* Poll-mode driver does not re-prime the event queue for interrupts */
49858294ee6SAndrew Rybchenko }
49958294ee6SAndrew Rybchenko 
5009a75f75cSAndrew Rybchenko void
5019a75f75cSAndrew Rybchenko sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
5029a75f75cSAndrew Rybchenko {
5039a75f75cSAndrew Rybchenko 	if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
5049a75f75cSAndrew Rybchenko 		struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
5059a75f75cSAndrew Rybchenko 
5069a75f75cSAndrew Rybchenko 		if (mgmt_evq->init_state == SFC_EVQ_STARTED)
5079a75f75cSAndrew Rybchenko 			sfc_ev_qpoll(mgmt_evq);
5089a75f75cSAndrew Rybchenko 
5099a75f75cSAndrew Rybchenko 		rte_spinlock_unlock(&sa->mgmt_evq_lock);
5109a75f75cSAndrew Rybchenko 	}
5119a75f75cSAndrew Rybchenko }
5129a75f75cSAndrew Rybchenko 
51358294ee6SAndrew Rybchenko int
51458294ee6SAndrew Rybchenko sfc_ev_qprime(struct sfc_evq *evq)
51558294ee6SAndrew Rybchenko {
51658294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
51758294ee6SAndrew Rybchenko 	return efx_ev_qprime(evq->common, evq->read_ptr);
51858294ee6SAndrew Rybchenko }
51958294ee6SAndrew Rybchenko 
52058294ee6SAndrew Rybchenko int
52158294ee6SAndrew Rybchenko sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
52258294ee6SAndrew Rybchenko {
52358294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
52458294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
52558294ee6SAndrew Rybchenko 	efsys_mem_t *esmp;
52658294ee6SAndrew Rybchenko 	unsigned int total_delay_us;
52758294ee6SAndrew Rybchenko 	unsigned int delay_us;
52858294ee6SAndrew Rybchenko 	int rc;
52958294ee6SAndrew Rybchenko 
53058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
53158294ee6SAndrew Rybchenko 
53258294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
53358294ee6SAndrew Rybchenko 	evq = evq_info->evq;
53458294ee6SAndrew Rybchenko 	esmp = &evq->mem;
53558294ee6SAndrew Rybchenko 
53658294ee6SAndrew Rybchenko 	/* Clear all events */
53758294ee6SAndrew Rybchenko 	(void)memset((void *)esmp->esm_base, 0xff,
53858294ee6SAndrew Rybchenko 		     EFX_EVQ_SIZE(evq_info->entries));
53958294ee6SAndrew Rybchenko 
54058294ee6SAndrew Rybchenko 	/* Create the common code event queue */
54158294ee6SAndrew Rybchenko 	rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
542c22d3c50SAndrew Rybchenko 			    0 /* unused on EF10 */, 0, evq_info->flags,
54358294ee6SAndrew Rybchenko 			    &evq->common);
54458294ee6SAndrew Rybchenko 	if (rc != 0)
54558294ee6SAndrew Rybchenko 		goto fail_ev_qcreate;
54658294ee6SAndrew Rybchenko 
547*df1bfde4SAndrew Rybchenko 	SFC_ASSERT(evq->dp_rxq == NULL || evq->txq == NULL);
548*df1bfde4SAndrew Rybchenko 	if (evq->dp_rxq != 0) {
549*df1bfde4SAndrew Rybchenko 		if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
550*df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_efx_rx;
5517965557eSAndrew Rybchenko 		else
552*df1bfde4SAndrew Rybchenko 			evq->callbacks = &sfc_ev_callbacks_dp_rx;
553*df1bfde4SAndrew Rybchenko 	} else if (evq->txq != 0) {
554*df1bfde4SAndrew Rybchenko 		evq->callbacks = &sfc_ev_callbacks_tx;
555*df1bfde4SAndrew Rybchenko 	} else {
5567965557eSAndrew Rybchenko 		evq->callbacks = &sfc_ev_callbacks;
557*df1bfde4SAndrew Rybchenko 	}
5587965557eSAndrew Rybchenko 
55958294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_STARTING;
56058294ee6SAndrew Rybchenko 
56158294ee6SAndrew Rybchenko 	/* Wait for the initialization event */
56258294ee6SAndrew Rybchenko 	total_delay_us = 0;
56358294ee6SAndrew Rybchenko 	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
56458294ee6SAndrew Rybchenko 	do {
56558294ee6SAndrew Rybchenko 		(void)sfc_ev_qpoll(evq);
56658294ee6SAndrew Rybchenko 
56758294ee6SAndrew Rybchenko 		/* Check to see if the initialization complete indication
56858294ee6SAndrew Rybchenko 		 * posted by the hardware.
56958294ee6SAndrew Rybchenko 		 */
57058294ee6SAndrew Rybchenko 		if (evq->init_state == SFC_EVQ_STARTED)
57158294ee6SAndrew Rybchenko 			goto done;
57258294ee6SAndrew Rybchenko 
57358294ee6SAndrew Rybchenko 		/* Give event queue some time to init */
57458294ee6SAndrew Rybchenko 		rte_delay_us(delay_us);
57558294ee6SAndrew Rybchenko 
57658294ee6SAndrew Rybchenko 		total_delay_us += delay_us;
57758294ee6SAndrew Rybchenko 
57858294ee6SAndrew Rybchenko 		/* Exponential backoff */
57958294ee6SAndrew Rybchenko 		delay_us *= 2;
58058294ee6SAndrew Rybchenko 		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
58158294ee6SAndrew Rybchenko 			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
58258294ee6SAndrew Rybchenko 
58358294ee6SAndrew Rybchenko 	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
58458294ee6SAndrew Rybchenko 
58558294ee6SAndrew Rybchenko 	rc = ETIMEDOUT;
58658294ee6SAndrew Rybchenko 	goto fail_timedout;
58758294ee6SAndrew Rybchenko 
58858294ee6SAndrew Rybchenko done:
58958294ee6SAndrew Rybchenko 	return 0;
59058294ee6SAndrew Rybchenko 
59158294ee6SAndrew Rybchenko fail_timedout:
59258294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
59358294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
59458294ee6SAndrew Rybchenko 
59558294ee6SAndrew Rybchenko fail_ev_qcreate:
59658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
59758294ee6SAndrew Rybchenko 	return rc;
59858294ee6SAndrew Rybchenko }
59958294ee6SAndrew Rybchenko 
60058294ee6SAndrew Rybchenko void
60158294ee6SAndrew Rybchenko sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
60258294ee6SAndrew Rybchenko {
60358294ee6SAndrew Rybchenko 	const struct sfc_evq_info *evq_info;
60458294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
60558294ee6SAndrew Rybchenko 
60658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
60758294ee6SAndrew Rybchenko 
60858294ee6SAndrew Rybchenko 	SFC_ASSERT(sw_index < sa->evq_count);
60958294ee6SAndrew Rybchenko 
61058294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
61158294ee6SAndrew Rybchenko 	evq = evq_info->evq;
61258294ee6SAndrew Rybchenko 
61358294ee6SAndrew Rybchenko 	if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
61458294ee6SAndrew Rybchenko 		return;
61558294ee6SAndrew Rybchenko 
61658294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
6177965557eSAndrew Rybchenko 	evq->callbacks = NULL;
61858294ee6SAndrew Rybchenko 	evq->read_ptr = 0;
61958294ee6SAndrew Rybchenko 	evq->exception = B_FALSE;
62058294ee6SAndrew Rybchenko 
62158294ee6SAndrew Rybchenko 	efx_ev_qdestroy(evq->common);
62258294ee6SAndrew Rybchenko }
62358294ee6SAndrew Rybchenko 
6242de39f4eSAndrew Rybchenko static void
6252de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(void *arg)
6262de39f4eSAndrew Rybchenko {
6272de39f4eSAndrew Rybchenko 	struct sfc_adapter *sa = arg;
6282de39f4eSAndrew Rybchenko 	int rc;
6292de39f4eSAndrew Rybchenko 
6302de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_qpoll(sa);
6312de39f4eSAndrew Rybchenko 
6322de39f4eSAndrew Rybchenko 	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
6332de39f4eSAndrew Rybchenko 			       sfc_ev_mgmt_periodic_qpoll, sa);
634323706abSAndrew Rybchenko 	if (rc == -ENOTSUP) {
635323706abSAndrew Rybchenko 		sfc_warn(sa, "alarms are not supported");
636323706abSAndrew Rybchenko 		sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
637323706abSAndrew Rybchenko 	} else if (rc != 0) {
638323706abSAndrew Rybchenko 		sfc_err(sa,
6392de39f4eSAndrew Rybchenko 			"cannot rearm management EVQ polling alarm (rc=%d)",
6402de39f4eSAndrew Rybchenko 			rc);
6412de39f4eSAndrew Rybchenko 	}
642323706abSAndrew Rybchenko }
6432de39f4eSAndrew Rybchenko 
6442de39f4eSAndrew Rybchenko static void
6452de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
6462de39f4eSAndrew Rybchenko {
6472de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll(sa);
6482de39f4eSAndrew Rybchenko }
6492de39f4eSAndrew Rybchenko 
6502de39f4eSAndrew Rybchenko static void
6512de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
6522de39f4eSAndrew Rybchenko {
6532de39f4eSAndrew Rybchenko 	rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
6542de39f4eSAndrew Rybchenko }
6552de39f4eSAndrew Rybchenko 
65658294ee6SAndrew Rybchenko int
65758294ee6SAndrew Rybchenko sfc_ev_start(struct sfc_adapter *sa)
65858294ee6SAndrew Rybchenko {
65958294ee6SAndrew Rybchenko 	int rc;
66058294ee6SAndrew Rybchenko 
66158294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
66258294ee6SAndrew Rybchenko 
66358294ee6SAndrew Rybchenko 	rc = efx_ev_init(sa->nic);
66458294ee6SAndrew Rybchenko 	if (rc != 0)
66558294ee6SAndrew Rybchenko 		goto fail_ev_init;
66658294ee6SAndrew Rybchenko 
6679a75f75cSAndrew Rybchenko 	/* Start management EVQ used for global events */
6689a75f75cSAndrew Rybchenko 	rte_spinlock_lock(&sa->mgmt_evq_lock);
6699a75f75cSAndrew Rybchenko 
6709a75f75cSAndrew Rybchenko 	rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
6719a75f75cSAndrew Rybchenko 	if (rc != 0)
6729a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_start;
6739a75f75cSAndrew Rybchenko 
6743b809c27SAndrew Rybchenko 	if (sa->intr.lsc_intr) {
6753b809c27SAndrew Rybchenko 		rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq);
6763b809c27SAndrew Rybchenko 		if (rc != 0)
6773b809c27SAndrew Rybchenko 			goto fail_evq0_prime;
6783b809c27SAndrew Rybchenko 	}
6793b809c27SAndrew Rybchenko 
6809a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
6819a75f75cSAndrew Rybchenko 
68258294ee6SAndrew Rybchenko 	/*
6832de39f4eSAndrew Rybchenko 	 * Start management EVQ polling. If interrupts are disabled
6842de39f4eSAndrew Rybchenko 	 * (not used), it is required to process link status change
6852de39f4eSAndrew Rybchenko 	 * and other device level events to avoid unrecoverable
6862de39f4eSAndrew Rybchenko 	 * error because the event queue overflow.
6872de39f4eSAndrew Rybchenko 	 */
6882de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_start(sa);
6892de39f4eSAndrew Rybchenko 
6902de39f4eSAndrew Rybchenko 	/*
6919a75f75cSAndrew Rybchenko 	 * Rx/Tx event queues are started/stopped when corresponding
6929a75f75cSAndrew Rybchenko 	 * Rx/Tx queue is started/stopped.
69358294ee6SAndrew Rybchenko 	 */
69458294ee6SAndrew Rybchenko 
69558294ee6SAndrew Rybchenko 	return 0;
69658294ee6SAndrew Rybchenko 
6973b809c27SAndrew Rybchenko fail_evq0_prime:
6983b809c27SAndrew Rybchenko 	sfc_ev_qstop(sa, 0);
6993b809c27SAndrew Rybchenko 
7009a75f75cSAndrew Rybchenko fail_mgmt_evq_start:
7019a75f75cSAndrew Rybchenko 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
7029a75f75cSAndrew Rybchenko 	efx_ev_fini(sa->nic);
7039a75f75cSAndrew Rybchenko 
70458294ee6SAndrew Rybchenko fail_ev_init:
70558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
70658294ee6SAndrew Rybchenko 	return rc;
70758294ee6SAndrew Rybchenko }
70858294ee6SAndrew Rybchenko 
70958294ee6SAndrew Rybchenko void
71058294ee6SAndrew Rybchenko sfc_ev_stop(struct sfc_adapter *sa)
71158294ee6SAndrew Rybchenko {
71258294ee6SAndrew Rybchenko 	unsigned int sw_index;
71358294ee6SAndrew Rybchenko 
71458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
71558294ee6SAndrew Rybchenko 
7162de39f4eSAndrew Rybchenko 	sfc_ev_mgmt_periodic_qpoll_stop(sa);
7172de39f4eSAndrew Rybchenko 
71858294ee6SAndrew Rybchenko 	/* Make sure that all event queues are stopped */
71958294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
7209a75f75cSAndrew Rybchenko 	while (sw_index-- > 0) {
7219a75f75cSAndrew Rybchenko 		if (sw_index == sa->mgmt_evq_index) {
7229a75f75cSAndrew Rybchenko 			/* Locks are required for the management EVQ */
7239a75f75cSAndrew Rybchenko 			rte_spinlock_lock(&sa->mgmt_evq_lock);
7249a75f75cSAndrew Rybchenko 			sfc_ev_qstop(sa, sa->mgmt_evq_index);
7259a75f75cSAndrew Rybchenko 			rte_spinlock_unlock(&sa->mgmt_evq_lock);
7269a75f75cSAndrew Rybchenko 		} else {
72758294ee6SAndrew Rybchenko 			sfc_ev_qstop(sa, sw_index);
7289a75f75cSAndrew Rybchenko 		}
7299a75f75cSAndrew Rybchenko 	}
73058294ee6SAndrew Rybchenko 
73158294ee6SAndrew Rybchenko 	efx_ev_fini(sa->nic);
73258294ee6SAndrew Rybchenko }
73358294ee6SAndrew Rybchenko 
73458294ee6SAndrew Rybchenko int
73558294ee6SAndrew Rybchenko sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
73658294ee6SAndrew Rybchenko 	     unsigned int entries, int socket_id)
73758294ee6SAndrew Rybchenko {
73858294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info;
73958294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
74058294ee6SAndrew Rybchenko 	int rc;
74158294ee6SAndrew Rybchenko 
74258294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
74358294ee6SAndrew Rybchenko 
74458294ee6SAndrew Rybchenko 	evq_info = &sa->evq_info[sw_index];
74558294ee6SAndrew Rybchenko 
74658294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(entries));
74758294ee6SAndrew Rybchenko 	SFC_ASSERT(entries <= evq_info->max_entries);
74858294ee6SAndrew Rybchenko 	evq_info->entries = entries;
74958294ee6SAndrew Rybchenko 
75058294ee6SAndrew Rybchenko 	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
75158294ee6SAndrew Rybchenko 				 socket_id);
75258294ee6SAndrew Rybchenko 	if (evq == NULL)
75358294ee6SAndrew Rybchenko 		return ENOMEM;
75458294ee6SAndrew Rybchenko 
75558294ee6SAndrew Rybchenko 	evq->sa = sa;
75658294ee6SAndrew Rybchenko 	evq->evq_index = sw_index;
75758294ee6SAndrew Rybchenko 
75858294ee6SAndrew Rybchenko 	/* Allocate DMA space */
75958294ee6SAndrew Rybchenko 	rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
76058294ee6SAndrew Rybchenko 			   socket_id, &evq->mem);
76158294ee6SAndrew Rybchenko 	if (rc != 0)
76258294ee6SAndrew Rybchenko 		return rc;
76358294ee6SAndrew Rybchenko 
76458294ee6SAndrew Rybchenko 	evq->init_state = SFC_EVQ_INITIALIZED;
76558294ee6SAndrew Rybchenko 
76658294ee6SAndrew Rybchenko 	evq_info->evq = evq;
76758294ee6SAndrew Rybchenko 
76858294ee6SAndrew Rybchenko 	return 0;
76958294ee6SAndrew Rybchenko }
77058294ee6SAndrew Rybchenko 
77158294ee6SAndrew Rybchenko void
77258294ee6SAndrew Rybchenko sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
77358294ee6SAndrew Rybchenko {
77458294ee6SAndrew Rybchenko 	struct sfc_evq *evq;
77558294ee6SAndrew Rybchenko 
77658294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
77758294ee6SAndrew Rybchenko 
77858294ee6SAndrew Rybchenko 	evq = sa->evq_info[sw_index].evq;
77958294ee6SAndrew Rybchenko 
78058294ee6SAndrew Rybchenko 	SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
78158294ee6SAndrew Rybchenko 
78258294ee6SAndrew Rybchenko 	sa->evq_info[sw_index].evq = NULL;
78358294ee6SAndrew Rybchenko 
78458294ee6SAndrew Rybchenko 	sfc_dma_free(sa, &evq->mem);
78558294ee6SAndrew Rybchenko 
78658294ee6SAndrew Rybchenko 	rte_free(evq);
78758294ee6SAndrew Rybchenko }
78858294ee6SAndrew Rybchenko 
78958294ee6SAndrew Rybchenko static int
79058294ee6SAndrew Rybchenko sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
79158294ee6SAndrew Rybchenko {
79258294ee6SAndrew Rybchenko 	struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
79358294ee6SAndrew Rybchenko 	unsigned int max_entries;
79458294ee6SAndrew Rybchenko 
79558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
79658294ee6SAndrew Rybchenko 
79758294ee6SAndrew Rybchenko 	max_entries = sfc_evq_max_entries(sa, sw_index);
79858294ee6SAndrew Rybchenko 	SFC_ASSERT(rte_is_power_of_2(max_entries));
79958294ee6SAndrew Rybchenko 
80058294ee6SAndrew Rybchenko 	evq_info->max_entries = max_entries;
8013b809c27SAndrew Rybchenko 	evq_info->flags = sa->evq_flags |
8023b809c27SAndrew Rybchenko 		((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ?
8033b809c27SAndrew Rybchenko 			EFX_EVQ_FLAGS_NOTIFY_INTERRUPT :
8043b809c27SAndrew Rybchenko 			EFX_EVQ_FLAGS_NOTIFY_DISABLED);
805c22d3c50SAndrew Rybchenko 
806c22d3c50SAndrew Rybchenko 	return 0;
807c22d3c50SAndrew Rybchenko }
808c22d3c50SAndrew Rybchenko 
809c22d3c50SAndrew Rybchenko static int
810c22d3c50SAndrew Rybchenko sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
811c22d3c50SAndrew Rybchenko 			       const char *value_str, void *opaque)
812c22d3c50SAndrew Rybchenko {
813c22d3c50SAndrew Rybchenko 	uint64_t *value = opaque;
814c22d3c50SAndrew Rybchenko 
815c22d3c50SAndrew Rybchenko 	if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
816c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
817c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
818c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
819c22d3c50SAndrew Rybchenko 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
820c22d3c50SAndrew Rybchenko 		*value = EFX_EVQ_FLAGS_TYPE_AUTO;
821c22d3c50SAndrew Rybchenko 	else
822c22d3c50SAndrew Rybchenko 		return -EINVAL;
82358294ee6SAndrew Rybchenko 
82458294ee6SAndrew Rybchenko 	return 0;
82558294ee6SAndrew Rybchenko }
82658294ee6SAndrew Rybchenko 
82758294ee6SAndrew Rybchenko static void
82858294ee6SAndrew Rybchenko sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
82958294ee6SAndrew Rybchenko {
83058294ee6SAndrew Rybchenko 	sfc_log_init(sa, "sw_index=%u", sw_index);
83158294ee6SAndrew Rybchenko 
83258294ee6SAndrew Rybchenko 	/* Nothing to cleanup */
83358294ee6SAndrew Rybchenko }
83458294ee6SAndrew Rybchenko 
83558294ee6SAndrew Rybchenko int
83658294ee6SAndrew Rybchenko sfc_ev_init(struct sfc_adapter *sa)
83758294ee6SAndrew Rybchenko {
83858294ee6SAndrew Rybchenko 	int rc;
83958294ee6SAndrew Rybchenko 	unsigned int sw_index;
84058294ee6SAndrew Rybchenko 
84158294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
84258294ee6SAndrew Rybchenko 
843c22d3c50SAndrew Rybchenko 	sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
844c22d3c50SAndrew Rybchenko 	rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
845c22d3c50SAndrew Rybchenko 				sfc_kvarg_perf_profile_handler,
846c22d3c50SAndrew Rybchenko 				&sa->evq_flags);
847c22d3c50SAndrew Rybchenko 	if (rc != 0) {
848c22d3c50SAndrew Rybchenko 		sfc_err(sa, "invalid %s parameter value",
849c22d3c50SAndrew Rybchenko 			SFC_KVARG_PERF_PROFILE);
850c22d3c50SAndrew Rybchenko 		goto fail_kvarg_perf_profile;
851c22d3c50SAndrew Rybchenko 	}
852c22d3c50SAndrew Rybchenko 
85358294ee6SAndrew Rybchenko 	sa->evq_count = sfc_ev_qcount(sa);
85458294ee6SAndrew Rybchenko 	sa->mgmt_evq_index = 0;
8559a75f75cSAndrew Rybchenko 	rte_spinlock_init(&sa->mgmt_evq_lock);
85658294ee6SAndrew Rybchenko 
85758294ee6SAndrew Rybchenko 	/* Allocate EVQ info array */
85858294ee6SAndrew Rybchenko 	rc = ENOMEM;
85958294ee6SAndrew Rybchenko 	sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
86058294ee6SAndrew Rybchenko 					 sizeof(struct sfc_evq_info), 0,
86158294ee6SAndrew Rybchenko 					 sa->socket_id);
86258294ee6SAndrew Rybchenko 	if (sa->evq_info == NULL)
86358294ee6SAndrew Rybchenko 		goto fail_evqs_alloc;
86458294ee6SAndrew Rybchenko 
86558294ee6SAndrew Rybchenko 	for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
86658294ee6SAndrew Rybchenko 		rc = sfc_ev_qinit_info(sa, sw_index);
86758294ee6SAndrew Rybchenko 		if (rc != 0)
86858294ee6SAndrew Rybchenko 			goto fail_ev_qinit_info;
86958294ee6SAndrew Rybchenko 	}
87058294ee6SAndrew Rybchenko 
8719a75f75cSAndrew Rybchenko 	rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
8729a75f75cSAndrew Rybchenko 			  sa->socket_id);
8739a75f75cSAndrew Rybchenko 	if (rc != 0)
8749a75f75cSAndrew Rybchenko 		goto fail_mgmt_evq_init;
8759a75f75cSAndrew Rybchenko 
87658294ee6SAndrew Rybchenko 	/*
87758294ee6SAndrew Rybchenko 	 * Rx/Tx event queues are created/destroyed when corresponding
87858294ee6SAndrew Rybchenko 	 * Rx/Tx queue is created/destroyed.
87958294ee6SAndrew Rybchenko 	 */
88058294ee6SAndrew Rybchenko 
88158294ee6SAndrew Rybchenko 	return 0;
88258294ee6SAndrew Rybchenko 
8839a75f75cSAndrew Rybchenko fail_mgmt_evq_init:
88458294ee6SAndrew Rybchenko fail_ev_qinit_info:
88558294ee6SAndrew Rybchenko 	while (sw_index-- > 0)
88658294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
88758294ee6SAndrew Rybchenko 
88858294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
88958294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
89058294ee6SAndrew Rybchenko 
89158294ee6SAndrew Rybchenko fail_evqs_alloc:
89258294ee6SAndrew Rybchenko 	sa->evq_count = 0;
893c22d3c50SAndrew Rybchenko 
894c22d3c50SAndrew Rybchenko fail_kvarg_perf_profile:
89558294ee6SAndrew Rybchenko 	sfc_log_init(sa, "failed %d", rc);
89658294ee6SAndrew Rybchenko 	return rc;
89758294ee6SAndrew Rybchenko }
89858294ee6SAndrew Rybchenko 
89958294ee6SAndrew Rybchenko void
90058294ee6SAndrew Rybchenko sfc_ev_fini(struct sfc_adapter *sa)
90158294ee6SAndrew Rybchenko {
90258294ee6SAndrew Rybchenko 	int sw_index;
90358294ee6SAndrew Rybchenko 
90458294ee6SAndrew Rybchenko 	sfc_log_init(sa, "entry");
90558294ee6SAndrew Rybchenko 
90658294ee6SAndrew Rybchenko 	/* Cleanup all event queues */
90758294ee6SAndrew Rybchenko 	sw_index = sa->evq_count;
90858294ee6SAndrew Rybchenko 	while (--sw_index >= 0) {
90958294ee6SAndrew Rybchenko 		if (sa->evq_info[sw_index].evq != NULL)
91058294ee6SAndrew Rybchenko 			sfc_ev_qfini(sa, sw_index);
91158294ee6SAndrew Rybchenko 		sfc_ev_qfini_info(sa, sw_index);
91258294ee6SAndrew Rybchenko 	}
91358294ee6SAndrew Rybchenko 
91458294ee6SAndrew Rybchenko 	rte_free(sa->evq_info);
91558294ee6SAndrew Rybchenko 	sa->evq_info = NULL;
91658294ee6SAndrew Rybchenko 	sa->evq_count = 0;
91758294ee6SAndrew Rybchenko }
918