144c0947bSAndrew Rybchenko /* SPDX-License-Identifier: BSD-3-Clause
2244cfa79SAndrew Rybchenko *
398d26ef7SAndrew Rybchenko * Copyright(c) 2019-2021 Xilinx, Inc.
4a0147be5SAndrew Rybchenko * Copyright(c) 2016-2019 Solarflare Communications Inc.
558294ee6SAndrew Rybchenko *
658294ee6SAndrew Rybchenko * This software was jointly developed between OKTET Labs (under contract
758294ee6SAndrew Rybchenko * for Solarflare) and Solarflare Communications, Inc.
858294ee6SAndrew Rybchenko */
958294ee6SAndrew Rybchenko
1058294ee6SAndrew Rybchenko #include <rte_debug.h>
1158294ee6SAndrew Rybchenko #include <rte_cycles.h>
122de39f4eSAndrew Rybchenko #include <rte_alarm.h>
1377f2d053SAndrew Rybchenko #include <rte_branch_prediction.h>
1458294ee6SAndrew Rybchenko
1558294ee6SAndrew Rybchenko #include "efx.h"
1658294ee6SAndrew Rybchenko
1758294ee6SAndrew Rybchenko #include "sfc.h"
1858294ee6SAndrew Rybchenko #include "sfc_debug.h"
1958294ee6SAndrew Rybchenko #include "sfc_log.h"
2058294ee6SAndrew Rybchenko #include "sfc_ev.h"
2128944ac0SAndrew Rybchenko #include "sfc_rx.h"
22fed9aeb4SIvan Malov #include "sfc_tx.h"
23c22d3c50SAndrew Rybchenko #include "sfc_kvargs.h"
2458294ee6SAndrew Rybchenko
2558294ee6SAndrew Rybchenko
2658294ee6SAndrew Rybchenko /* Initial delay when waiting for event queue init complete event */
2758294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_START_US (1)
2858294ee6SAndrew Rybchenko /* Maximum delay between event queue polling attempts */
2958294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_BACKOFF_MAX_US (10 * 1000)
3058294ee6SAndrew Rybchenko /* Event queue init approx timeout */
3158294ee6SAndrew Rybchenko #define SFC_EVQ_INIT_TIMEOUT_US (2 * US_PER_S)
3258294ee6SAndrew Rybchenko
332de39f4eSAndrew Rybchenko /* Management event queue polling period in microseconds */
342de39f4eSAndrew Rybchenko #define SFC_MGMT_EV_QPOLL_PERIOD_US (US_PER_S)
352de39f4eSAndrew Rybchenko
36298951a4SAndrew Rybchenko static const char *
sfc_evq_type2str(enum sfc_evq_type type)37298951a4SAndrew Rybchenko sfc_evq_type2str(enum sfc_evq_type type)
38298951a4SAndrew Rybchenko {
39298951a4SAndrew Rybchenko switch (type) {
40298951a4SAndrew Rybchenko case SFC_EVQ_TYPE_MGMT:
41298951a4SAndrew Rybchenko return "mgmt-evq";
42298951a4SAndrew Rybchenko case SFC_EVQ_TYPE_RX:
43298951a4SAndrew Rybchenko return "rx-evq";
44298951a4SAndrew Rybchenko case SFC_EVQ_TYPE_TX:
45298951a4SAndrew Rybchenko return "tx-evq";
46298951a4SAndrew Rybchenko default:
47298951a4SAndrew Rybchenko SFC_ASSERT(B_FALSE);
48298951a4SAndrew Rybchenko return NULL;
49298951a4SAndrew Rybchenko }
50298951a4SAndrew Rybchenko }
5158294ee6SAndrew Rybchenko
5258294ee6SAndrew Rybchenko static boolean_t
sfc_ev_initialized(void * arg)5358294ee6SAndrew Rybchenko sfc_ev_initialized(void *arg)
5458294ee6SAndrew Rybchenko {
5558294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
5658294ee6SAndrew Rybchenko
5758294ee6SAndrew Rybchenko /* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
5858294ee6SAndrew Rybchenko SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
5958294ee6SAndrew Rybchenko evq->init_state == SFC_EVQ_STARTED);
6058294ee6SAndrew Rybchenko
6158294ee6SAndrew Rybchenko evq->init_state = SFC_EVQ_STARTED;
6258294ee6SAndrew Rybchenko
6358294ee6SAndrew Rybchenko return B_FALSE;
6458294ee6SAndrew Rybchenko }
6558294ee6SAndrew Rybchenko
6658294ee6SAndrew Rybchenko static boolean_t
sfc_ev_nop_rx(void * arg,uint32_t label,uint32_t id,uint32_t size,uint16_t flags)677965557eSAndrew Rybchenko sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
687965557eSAndrew Rybchenko uint32_t size, uint16_t flags)
697965557eSAndrew Rybchenko {
707965557eSAndrew Rybchenko struct sfc_evq *evq = arg;
717965557eSAndrew Rybchenko
727965557eSAndrew Rybchenko sfc_err(evq->sa,
737965557eSAndrew Rybchenko "EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
747965557eSAndrew Rybchenko evq->evq_index, label, id, size, flags);
757965557eSAndrew Rybchenko return B_TRUE;
767965557eSAndrew Rybchenko }
777965557eSAndrew Rybchenko
787965557eSAndrew Rybchenko static boolean_t
sfc_ev_efx_rx(void * arg,__rte_unused uint32_t label,uint32_t id,uint32_t size,uint16_t flags)79df1bfde4SAndrew Rybchenko sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
80921f6cf1SAndrew Rybchenko uint32_t size, uint16_t flags)
8158294ee6SAndrew Rybchenko {
8258294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
83df1bfde4SAndrew Rybchenko struct sfc_efx_rxq *rxq;
84921f6cf1SAndrew Rybchenko unsigned int stop;
85921f6cf1SAndrew Rybchenko unsigned int pending_id;
86921f6cf1SAndrew Rybchenko unsigned int delta;
87921f6cf1SAndrew Rybchenko unsigned int i;
88df1bfde4SAndrew Rybchenko struct sfc_efx_rx_sw_desc *rxd;
8958294ee6SAndrew Rybchenko
90921f6cf1SAndrew Rybchenko if (unlikely(evq->exception))
91921f6cf1SAndrew Rybchenko goto done;
92921f6cf1SAndrew Rybchenko
93df1bfde4SAndrew Rybchenko rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
94921f6cf1SAndrew Rybchenko
95921f6cf1SAndrew Rybchenko SFC_ASSERT(rxq != NULL);
96921f6cf1SAndrew Rybchenko SFC_ASSERT(rxq->evq == evq);
97df1bfde4SAndrew Rybchenko SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
98921f6cf1SAndrew Rybchenko
99921f6cf1SAndrew Rybchenko stop = (id + 1) & rxq->ptr_mask;
100921f6cf1SAndrew Rybchenko pending_id = rxq->pending & rxq->ptr_mask;
101921f6cf1SAndrew Rybchenko delta = (stop >= pending_id) ? (stop - pending_id) :
102921f6cf1SAndrew Rybchenko (rxq->ptr_mask + 1 - pending_id + stop);
103921f6cf1SAndrew Rybchenko
10409a09b6fSAndrew Rybchenko if (delta == 0) {
10509a09b6fSAndrew Rybchenko /*
10609a09b6fSAndrew Rybchenko * Rx event with no new descriptors done and zero length
10709a09b6fSAndrew Rybchenko * is used to abort scattered packet when there is no room
10809a09b6fSAndrew Rybchenko * for the tail.
10909a09b6fSAndrew Rybchenko */
11009a09b6fSAndrew Rybchenko if (unlikely(size != 0)) {
11109a09b6fSAndrew Rybchenko evq->exception = B_TRUE;
11209a09b6fSAndrew Rybchenko sfc_err(evq->sa,
11309a09b6fSAndrew Rybchenko "EVQ %u RxQ %u invalid RX abort "
114f2462150SFerruh Yigit "(id=%#x size=%u flags=%#x); needs restart",
115df1bfde4SAndrew Rybchenko evq->evq_index, rxq->dp.dpq.queue_id,
11609a09b6fSAndrew Rybchenko id, size, flags);
11709a09b6fSAndrew Rybchenko goto done;
11809a09b6fSAndrew Rybchenko }
11909a09b6fSAndrew Rybchenko
12009a09b6fSAndrew Rybchenko /* Add discard flag to the first fragment */
12109a09b6fSAndrew Rybchenko rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
12209a09b6fSAndrew Rybchenko /* Remove continue flag from the last fragment */
12309a09b6fSAndrew Rybchenko rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
12409a09b6fSAndrew Rybchenko } else if (unlikely(delta > rxq->batch_max)) {
125921f6cf1SAndrew Rybchenko evq->exception = B_TRUE;
126921f6cf1SAndrew Rybchenko
127921f6cf1SAndrew Rybchenko sfc_err(evq->sa,
128921f6cf1SAndrew Rybchenko "EVQ %u RxQ %u completion out of order "
129f2462150SFerruh Yigit "(id=%#x delta=%u flags=%#x); needs restart",
130df1bfde4SAndrew Rybchenko evq->evq_index, rxq->dp.dpq.queue_id,
131df1bfde4SAndrew Rybchenko id, delta, flags);
132921f6cf1SAndrew Rybchenko
133921f6cf1SAndrew Rybchenko goto done;
134921f6cf1SAndrew Rybchenko }
135921f6cf1SAndrew Rybchenko
136921f6cf1SAndrew Rybchenko for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
137921f6cf1SAndrew Rybchenko rxd = &rxq->sw_desc[i];
138921f6cf1SAndrew Rybchenko
139921f6cf1SAndrew Rybchenko rxd->flags = flags;
140921f6cf1SAndrew Rybchenko
141921f6cf1SAndrew Rybchenko SFC_ASSERT(size < (1 << 16));
142921f6cf1SAndrew Rybchenko rxd->size = (uint16_t)size;
143921f6cf1SAndrew Rybchenko }
144921f6cf1SAndrew Rybchenko
145921f6cf1SAndrew Rybchenko rxq->pending += delta;
146921f6cf1SAndrew Rybchenko
147921f6cf1SAndrew Rybchenko done:
148921f6cf1SAndrew Rybchenko return B_FALSE;
14958294ee6SAndrew Rybchenko }
15058294ee6SAndrew Rybchenko
15158294ee6SAndrew Rybchenko static boolean_t
sfc_ev_dp_rx(void * arg,__rte_unused uint32_t label,uint32_t id,__rte_unused uint32_t size,__rte_unused uint16_t flags)152638bddc9SAndrew Rybchenko sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
153638bddc9SAndrew Rybchenko __rte_unused uint32_t size, __rte_unused uint16_t flags)
154638bddc9SAndrew Rybchenko {
155638bddc9SAndrew Rybchenko struct sfc_evq *evq = arg;
156638bddc9SAndrew Rybchenko struct sfc_dp_rxq *dp_rxq;
157638bddc9SAndrew Rybchenko
158638bddc9SAndrew Rybchenko dp_rxq = evq->dp_rxq;
159638bddc9SAndrew Rybchenko SFC_ASSERT(dp_rxq != NULL);
160638bddc9SAndrew Rybchenko
1615dec95e3SAndrew Rybchenko SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
1625dec95e3SAndrew Rybchenko return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, id);
163638bddc9SAndrew Rybchenko }
164638bddc9SAndrew Rybchenko
165638bddc9SAndrew Rybchenko static boolean_t
sfc_ev_nop_rx_packets(void * arg,uint32_t label,unsigned int num_packets,uint32_t flags)1663379085aSAndrew Rybchenko sfc_ev_nop_rx_packets(void *arg, uint32_t label, unsigned int num_packets,
1673379085aSAndrew Rybchenko uint32_t flags)
1683379085aSAndrew Rybchenko {
1693379085aSAndrew Rybchenko struct sfc_evq *evq = arg;
1703379085aSAndrew Rybchenko
1713379085aSAndrew Rybchenko sfc_err(evq->sa,
1723379085aSAndrew Rybchenko "EVQ %u unexpected Rx packets event label=%u num=%u flags=%#x",
1733379085aSAndrew Rybchenko evq->evq_index, label, num_packets, flags);
1743379085aSAndrew Rybchenko return B_TRUE;
1753379085aSAndrew Rybchenko }
1763379085aSAndrew Rybchenko
1773379085aSAndrew Rybchenko static boolean_t
sfc_ev_dp_rx_packets(void * arg,__rte_unused uint32_t label,unsigned int num_packets,__rte_unused uint32_t flags)1783379085aSAndrew Rybchenko sfc_ev_dp_rx_packets(void *arg, __rte_unused uint32_t label,
1793379085aSAndrew Rybchenko unsigned int num_packets, __rte_unused uint32_t flags)
1803379085aSAndrew Rybchenko {
1813379085aSAndrew Rybchenko struct sfc_evq *evq = arg;
1823379085aSAndrew Rybchenko struct sfc_dp_rxq *dp_rxq;
1833379085aSAndrew Rybchenko
1843379085aSAndrew Rybchenko dp_rxq = evq->dp_rxq;
1853379085aSAndrew Rybchenko SFC_ASSERT(dp_rxq != NULL);
1863379085aSAndrew Rybchenko
1873379085aSAndrew Rybchenko SFC_ASSERT(evq->sa->priv.dp_rx->qrx_ev != NULL);
1883379085aSAndrew Rybchenko return evq->sa->priv.dp_rx->qrx_ev(dp_rxq, num_packets);
1893379085aSAndrew Rybchenko }
1903379085aSAndrew Rybchenko
1913379085aSAndrew Rybchenko static boolean_t
sfc_ev_nop_rx_ps(void * arg,uint32_t label,uint32_t id,uint32_t pkt_count,uint16_t flags)192390f9b8dSAndrew Rybchenko sfc_ev_nop_rx_ps(void *arg, uint32_t label, uint32_t id,
193390f9b8dSAndrew Rybchenko uint32_t pkt_count, uint16_t flags)
194390f9b8dSAndrew Rybchenko {
195390f9b8dSAndrew Rybchenko struct sfc_evq *evq = arg;
196390f9b8dSAndrew Rybchenko
197390f9b8dSAndrew Rybchenko sfc_err(evq->sa,
198390f9b8dSAndrew Rybchenko "EVQ %u unexpected packed stream Rx event label=%u id=%#x pkt_count=%u flags=%#x",
199390f9b8dSAndrew Rybchenko evq->evq_index, label, id, pkt_count, flags);
200390f9b8dSAndrew Rybchenko return B_TRUE;
201390f9b8dSAndrew Rybchenko }
202390f9b8dSAndrew Rybchenko
203390f9b8dSAndrew Rybchenko /* It is not actually used on datapath, but required on RxQ flush */
204390f9b8dSAndrew Rybchenko static boolean_t
sfc_ev_dp_rx_ps(void * arg,__rte_unused uint32_t label,uint32_t id,__rte_unused uint32_t pkt_count,__rte_unused uint16_t flags)205390f9b8dSAndrew Rybchenko sfc_ev_dp_rx_ps(void *arg, __rte_unused uint32_t label, uint32_t id,
206390f9b8dSAndrew Rybchenko __rte_unused uint32_t pkt_count, __rte_unused uint16_t flags)
207390f9b8dSAndrew Rybchenko {
208390f9b8dSAndrew Rybchenko struct sfc_evq *evq = arg;
209390f9b8dSAndrew Rybchenko struct sfc_dp_rxq *dp_rxq;
210390f9b8dSAndrew Rybchenko
211390f9b8dSAndrew Rybchenko dp_rxq = evq->dp_rxq;
212390f9b8dSAndrew Rybchenko SFC_ASSERT(dp_rxq != NULL);
213390f9b8dSAndrew Rybchenko
2145dec95e3SAndrew Rybchenko if (evq->sa->priv.dp_rx->qrx_ps_ev != NULL)
2155dec95e3SAndrew Rybchenko return evq->sa->priv.dp_rx->qrx_ps_ev(dp_rxq, id);
216390f9b8dSAndrew Rybchenko else
217390f9b8dSAndrew Rybchenko return B_FALSE;
218390f9b8dSAndrew Rybchenko }
219390f9b8dSAndrew Rybchenko
220390f9b8dSAndrew Rybchenko static boolean_t
sfc_ev_nop_tx(void * arg,uint32_t label,uint32_t id)2217965557eSAndrew Rybchenko sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
2227965557eSAndrew Rybchenko {
2237965557eSAndrew Rybchenko struct sfc_evq *evq = arg;
2247965557eSAndrew Rybchenko
2257965557eSAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
2267965557eSAndrew Rybchenko evq->evq_index, label, id);
2277965557eSAndrew Rybchenko return B_TRUE;
2287965557eSAndrew Rybchenko }
2297965557eSAndrew Rybchenko
2307965557eSAndrew Rybchenko static boolean_t
sfc_ev_tx(void * arg,__rte_unused uint32_t label,uint32_t id)231428c7dddSIvan Malov sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
23258294ee6SAndrew Rybchenko {
23358294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
234dbdc8241SAndrew Rybchenko struct sfc_dp_txq *dp_txq;
235dbdc8241SAndrew Rybchenko struct sfc_efx_txq *txq;
236428c7dddSIvan Malov unsigned int stop;
237428c7dddSIvan Malov unsigned int delta;
23858294ee6SAndrew Rybchenko
239dbdc8241SAndrew Rybchenko dp_txq = evq->dp_txq;
240dbdc8241SAndrew Rybchenko SFC_ASSERT(dp_txq != NULL);
241428c7dddSIvan Malov
242dbdc8241SAndrew Rybchenko txq = sfc_efx_txq_by_dp_txq(dp_txq);
243428c7dddSIvan Malov SFC_ASSERT(txq->evq == evq);
244428c7dddSIvan Malov
245dbdc8241SAndrew Rybchenko if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
246428c7dddSIvan Malov goto done;
247428c7dddSIvan Malov
248428c7dddSIvan Malov stop = (id + 1) & txq->ptr_mask;
249428c7dddSIvan Malov id = txq->pending & txq->ptr_mask;
250428c7dddSIvan Malov
251428c7dddSIvan Malov delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
252428c7dddSIvan Malov
253428c7dddSIvan Malov txq->pending += delta;
254428c7dddSIvan Malov
255428c7dddSIvan Malov done:
256428c7dddSIvan Malov return B_FALSE;
25758294ee6SAndrew Rybchenko }
25858294ee6SAndrew Rybchenko
25958294ee6SAndrew Rybchenko static boolean_t
sfc_ev_dp_tx(void * arg,__rte_unused uint32_t label,uint32_t id)2608b00f426SAndrew Rybchenko sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
2618b00f426SAndrew Rybchenko {
2628b00f426SAndrew Rybchenko struct sfc_evq *evq = arg;
2638b00f426SAndrew Rybchenko struct sfc_dp_txq *dp_txq;
2648b00f426SAndrew Rybchenko
2658b00f426SAndrew Rybchenko dp_txq = evq->dp_txq;
2668b00f426SAndrew Rybchenko SFC_ASSERT(dp_txq != NULL);
2678b00f426SAndrew Rybchenko
2685dec95e3SAndrew Rybchenko SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
2695dec95e3SAndrew Rybchenko return evq->sa->priv.dp_tx->qtx_ev(dp_txq, id);
2708b00f426SAndrew Rybchenko }
2718b00f426SAndrew Rybchenko
2728b00f426SAndrew Rybchenko static boolean_t
sfc_ev_nop_tx_ndescs(void * arg,uint32_t label,unsigned int ndescs)27383c31c99SAndrew Rybchenko sfc_ev_nop_tx_ndescs(void *arg, uint32_t label, unsigned int ndescs)
27483c31c99SAndrew Rybchenko {
27583c31c99SAndrew Rybchenko struct sfc_evq *evq = arg;
27683c31c99SAndrew Rybchenko
27783c31c99SAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u ndescs=%#x",
27883c31c99SAndrew Rybchenko evq->evq_index, label, ndescs);
27983c31c99SAndrew Rybchenko return B_TRUE;
28083c31c99SAndrew Rybchenko }
28183c31c99SAndrew Rybchenko
28283c31c99SAndrew Rybchenko static boolean_t
sfc_ev_dp_tx_ndescs(void * arg,__rte_unused uint32_t label,unsigned int ndescs)28383c31c99SAndrew Rybchenko sfc_ev_dp_tx_ndescs(void *arg, __rte_unused uint32_t label,
28483c31c99SAndrew Rybchenko unsigned int ndescs)
28583c31c99SAndrew Rybchenko {
28683c31c99SAndrew Rybchenko struct sfc_evq *evq = arg;
28783c31c99SAndrew Rybchenko struct sfc_dp_txq *dp_txq;
28883c31c99SAndrew Rybchenko
28983c31c99SAndrew Rybchenko dp_txq = evq->dp_txq;
29083c31c99SAndrew Rybchenko SFC_ASSERT(dp_txq != NULL);
29183c31c99SAndrew Rybchenko
29283c31c99SAndrew Rybchenko SFC_ASSERT(evq->sa->priv.dp_tx->qtx_ev != NULL);
29383c31c99SAndrew Rybchenko return evq->sa->priv.dp_tx->qtx_ev(dp_txq, ndescs);
29483c31c99SAndrew Rybchenko }
29583c31c99SAndrew Rybchenko
29683c31c99SAndrew Rybchenko static boolean_t
sfc_ev_exception(void * arg,uint32_t code,__rte_unused uint32_t data)297dd2c630aSFerruh Yigit sfc_ev_exception(void *arg, uint32_t code, __rte_unused uint32_t data)
29858294ee6SAndrew Rybchenko {
29958294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
30058294ee6SAndrew Rybchenko
30198200dd9SAndrew Rybchenko if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
30298200dd9SAndrew Rybchenko return B_FALSE;
30398200dd9SAndrew Rybchenko
30498200dd9SAndrew Rybchenko evq->exception = B_TRUE;
30598200dd9SAndrew Rybchenko sfc_warn(evq->sa,
30698200dd9SAndrew Rybchenko "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
30798200dd9SAndrew Rybchenko " needs recovery",
30898200dd9SAndrew Rybchenko (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
30998200dd9SAndrew Rybchenko (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
31098200dd9SAndrew Rybchenko (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
31198200dd9SAndrew Rybchenko (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
31298200dd9SAndrew Rybchenko (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
31398200dd9SAndrew Rybchenko (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
31498200dd9SAndrew Rybchenko (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
31598200dd9SAndrew Rybchenko (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
31698200dd9SAndrew Rybchenko "UNKNOWN",
31798200dd9SAndrew Rybchenko code, data, evq->evq_index);
31898200dd9SAndrew Rybchenko
31958294ee6SAndrew Rybchenko return B_TRUE;
32058294ee6SAndrew Rybchenko }
32158294ee6SAndrew Rybchenko
32258294ee6SAndrew Rybchenko static boolean_t
sfc_ev_nop_rxq_flush_done(void * arg,uint32_t rxq_hw_index)3237965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
3247965557eSAndrew Rybchenko {
3257965557eSAndrew Rybchenko struct sfc_evq *evq = arg;
3267965557eSAndrew Rybchenko
3277965557eSAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
3287965557eSAndrew Rybchenko evq->evq_index, rxq_hw_index);
3297965557eSAndrew Rybchenko return B_TRUE;
3307965557eSAndrew Rybchenko }
3317965557eSAndrew Rybchenko
3327965557eSAndrew Rybchenko static boolean_t
sfc_ev_rxq_flush_done(void * arg,__rte_unused uint32_t rxq_hw_index)33358294ee6SAndrew Rybchenko sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
33458294ee6SAndrew Rybchenko {
33558294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
336df1bfde4SAndrew Rybchenko struct sfc_dp_rxq *dp_rxq;
33728944ac0SAndrew Rybchenko struct sfc_rxq *rxq;
33858294ee6SAndrew Rybchenko
339df1bfde4SAndrew Rybchenko dp_rxq = evq->dp_rxq;
340df1bfde4SAndrew Rybchenko SFC_ASSERT(dp_rxq != NULL);
341df1bfde4SAndrew Rybchenko
342df1bfde4SAndrew Rybchenko rxq = sfc_rxq_by_dp_rxq(dp_rxq);
34328944ac0SAndrew Rybchenko SFC_ASSERT(rxq != NULL);
34428944ac0SAndrew Rybchenko SFC_ASSERT(rxq->hw_index == rxq_hw_index);
34528944ac0SAndrew Rybchenko SFC_ASSERT(rxq->evq == evq);
3462e42d78dSAndrew Rybchenko RTE_SET_USED(rxq);
3472e42d78dSAndrew Rybchenko
3482e42d78dSAndrew Rybchenko sfc_rx_qflush_done(sfc_rxq_info_by_dp_rxq(dp_rxq));
34928944ac0SAndrew Rybchenko
35028944ac0SAndrew Rybchenko return B_FALSE;
35158294ee6SAndrew Rybchenko }
35258294ee6SAndrew Rybchenko
35358294ee6SAndrew Rybchenko static boolean_t
sfc_ev_nop_rxq_flush_failed(void * arg,uint32_t rxq_hw_index)3547965557eSAndrew Rybchenko sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
3557965557eSAndrew Rybchenko {
3567965557eSAndrew Rybchenko struct sfc_evq *evq = arg;
3577965557eSAndrew Rybchenko
3587965557eSAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
3597965557eSAndrew Rybchenko evq->evq_index, rxq_hw_index);
3607965557eSAndrew Rybchenko return B_TRUE;
3617965557eSAndrew Rybchenko }
3627965557eSAndrew Rybchenko
3637965557eSAndrew Rybchenko static boolean_t
sfc_ev_rxq_flush_failed(void * arg,__rte_unused uint32_t rxq_hw_index)36458294ee6SAndrew Rybchenko sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
36558294ee6SAndrew Rybchenko {
36658294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
367df1bfde4SAndrew Rybchenko struct sfc_dp_rxq *dp_rxq;
36828944ac0SAndrew Rybchenko struct sfc_rxq *rxq;
36958294ee6SAndrew Rybchenko
370df1bfde4SAndrew Rybchenko dp_rxq = evq->dp_rxq;
371df1bfde4SAndrew Rybchenko SFC_ASSERT(dp_rxq != NULL);
372df1bfde4SAndrew Rybchenko
373df1bfde4SAndrew Rybchenko rxq = sfc_rxq_by_dp_rxq(dp_rxq);
37428944ac0SAndrew Rybchenko SFC_ASSERT(rxq != NULL);
37528944ac0SAndrew Rybchenko SFC_ASSERT(rxq->hw_index == rxq_hw_index);
37628944ac0SAndrew Rybchenko SFC_ASSERT(rxq->evq == evq);
3772e42d78dSAndrew Rybchenko RTE_SET_USED(rxq);
3782e42d78dSAndrew Rybchenko
3792e42d78dSAndrew Rybchenko sfc_rx_qflush_failed(sfc_rxq_info_by_dp_rxq(dp_rxq));
38028944ac0SAndrew Rybchenko
38128944ac0SAndrew Rybchenko return B_FALSE;
38258294ee6SAndrew Rybchenko }
38358294ee6SAndrew Rybchenko
38458294ee6SAndrew Rybchenko static boolean_t
sfc_ev_nop_txq_flush_done(void * arg,uint32_t txq_hw_index)3857965557eSAndrew Rybchenko sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
3867965557eSAndrew Rybchenko {
3877965557eSAndrew Rybchenko struct sfc_evq *evq = arg;
3887965557eSAndrew Rybchenko
3897965557eSAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
3907965557eSAndrew Rybchenko evq->evq_index, txq_hw_index);
3917965557eSAndrew Rybchenko return B_TRUE;
3927965557eSAndrew Rybchenko }
3937965557eSAndrew Rybchenko
3947965557eSAndrew Rybchenko static boolean_t
sfc_ev_txq_flush_done(void * arg,__rte_unused uint32_t txq_hw_index)39558294ee6SAndrew Rybchenko sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
39658294ee6SAndrew Rybchenko {
39758294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
398dbdc8241SAndrew Rybchenko struct sfc_dp_txq *dp_txq;
399fed9aeb4SIvan Malov struct sfc_txq *txq;
40058294ee6SAndrew Rybchenko
401dbdc8241SAndrew Rybchenko dp_txq = evq->dp_txq;
402dbdc8241SAndrew Rybchenko SFC_ASSERT(dp_txq != NULL);
403dbdc8241SAndrew Rybchenko
404dbdc8241SAndrew Rybchenko txq = sfc_txq_by_dp_txq(dp_txq);
405fed9aeb4SIvan Malov SFC_ASSERT(txq != NULL);
406fed9aeb4SIvan Malov SFC_ASSERT(txq->hw_index == txq_hw_index);
407fed9aeb4SIvan Malov SFC_ASSERT(txq->evq == evq);
408561508daSAndrew Rybchenko RTE_SET_USED(txq);
409561508daSAndrew Rybchenko
410561508daSAndrew Rybchenko sfc_tx_qflush_done(sfc_txq_info_by_dp_txq(dp_txq));
411fed9aeb4SIvan Malov
412fed9aeb4SIvan Malov return B_FALSE;
41358294ee6SAndrew Rybchenko }
41458294ee6SAndrew Rybchenko
41558294ee6SAndrew Rybchenko static boolean_t
sfc_ev_software(void * arg,uint16_t magic)41658294ee6SAndrew Rybchenko sfc_ev_software(void *arg, uint16_t magic)
41758294ee6SAndrew Rybchenko {
41858294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
41958294ee6SAndrew Rybchenko
42058294ee6SAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
42158294ee6SAndrew Rybchenko evq->evq_index, magic);
42258294ee6SAndrew Rybchenko return B_TRUE;
42358294ee6SAndrew Rybchenko }
42458294ee6SAndrew Rybchenko
42558294ee6SAndrew Rybchenko static boolean_t
sfc_ev_sram(void * arg,uint32_t code)42658294ee6SAndrew Rybchenko sfc_ev_sram(void *arg, uint32_t code)
42758294ee6SAndrew Rybchenko {
42858294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
42958294ee6SAndrew Rybchenko
43058294ee6SAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
43158294ee6SAndrew Rybchenko evq->evq_index, code);
43258294ee6SAndrew Rybchenko return B_TRUE;
43358294ee6SAndrew Rybchenko }
43458294ee6SAndrew Rybchenko
43558294ee6SAndrew Rybchenko static boolean_t
sfc_ev_wake_up(void * arg,uint32_t index)43658294ee6SAndrew Rybchenko sfc_ev_wake_up(void *arg, uint32_t index)
43758294ee6SAndrew Rybchenko {
43858294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
43958294ee6SAndrew Rybchenko
44058294ee6SAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
44158294ee6SAndrew Rybchenko evq->evq_index, index);
44258294ee6SAndrew Rybchenko return B_TRUE;
44358294ee6SAndrew Rybchenko }
44458294ee6SAndrew Rybchenko
44558294ee6SAndrew Rybchenko static boolean_t
sfc_ev_timer(void * arg,uint32_t index)44658294ee6SAndrew Rybchenko sfc_ev_timer(void *arg, uint32_t index)
44758294ee6SAndrew Rybchenko {
44858294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
44958294ee6SAndrew Rybchenko
45058294ee6SAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
45158294ee6SAndrew Rybchenko evq->evq_index, index);
45258294ee6SAndrew Rybchenko return B_TRUE;
45358294ee6SAndrew Rybchenko }
45458294ee6SAndrew Rybchenko
45558294ee6SAndrew Rybchenko static boolean_t
sfc_ev_nop_link_change(void * arg,__rte_unused efx_link_mode_t link_mode)4567965557eSAndrew Rybchenko sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
4577965557eSAndrew Rybchenko {
4587965557eSAndrew Rybchenko struct sfc_evq *evq = arg;
4597965557eSAndrew Rybchenko
4607965557eSAndrew Rybchenko sfc_err(evq->sa, "EVQ %u unexpected link change event",
4617965557eSAndrew Rybchenko evq->evq_index);
4627965557eSAndrew Rybchenko return B_TRUE;
4637965557eSAndrew Rybchenko }
4647965557eSAndrew Rybchenko
4657965557eSAndrew Rybchenko static boolean_t
sfc_ev_link_change(void * arg,efx_link_mode_t link_mode)466886f8d8aSArtem Andreev sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
46758294ee6SAndrew Rybchenko {
46858294ee6SAndrew Rybchenko struct sfc_evq *evq = arg;
469886f8d8aSArtem Andreev struct sfc_adapter *sa = evq->sa;
470886f8d8aSArtem Andreev struct rte_eth_link new_link;
471886f8d8aSArtem Andreev
472886f8d8aSArtem Andreev sfc_port_link_mode_to_info(link_mode, &new_link);
473282b72cdSAndrew Rybchenko if (rte_eth_linkstatus_set(sa->eth_dev, &new_link) == 0)
4743b809c27SAndrew Rybchenko evq->sa->port.lsc_seq++;
475886f8d8aSArtem Andreev
476886f8d8aSArtem Andreev return B_FALSE;
47758294ee6SAndrew Rybchenko }
47858294ee6SAndrew Rybchenko
47958294ee6SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks = {
48058294ee6SAndrew Rybchenko .eec_initialized = sfc_ev_initialized,
4817965557eSAndrew Rybchenko .eec_rx = sfc_ev_nop_rx,
4823379085aSAndrew Rybchenko .eec_rx_packets = sfc_ev_nop_rx_packets,
483390f9b8dSAndrew Rybchenko .eec_rx_ps = sfc_ev_nop_rx_ps,
4847965557eSAndrew Rybchenko .eec_tx = sfc_ev_nop_tx,
48583c31c99SAndrew Rybchenko .eec_tx_ndescs = sfc_ev_nop_tx_ndescs,
48658294ee6SAndrew Rybchenko .eec_exception = sfc_ev_exception,
4877965557eSAndrew Rybchenko .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
4887965557eSAndrew Rybchenko .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
4897965557eSAndrew Rybchenko .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
49058294ee6SAndrew Rybchenko .eec_software = sfc_ev_software,
49158294ee6SAndrew Rybchenko .eec_sram = sfc_ev_sram,
49258294ee6SAndrew Rybchenko .eec_wake_up = sfc_ev_wake_up,
49358294ee6SAndrew Rybchenko .eec_timer = sfc_ev_timer,
49458294ee6SAndrew Rybchenko .eec_link_change = sfc_ev_link_change,
49558294ee6SAndrew Rybchenko };
49658294ee6SAndrew Rybchenko
497df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
4987965557eSAndrew Rybchenko .eec_initialized = sfc_ev_initialized,
499df1bfde4SAndrew Rybchenko .eec_rx = sfc_ev_efx_rx,
5003379085aSAndrew Rybchenko .eec_rx_packets = sfc_ev_nop_rx_packets,
501390f9b8dSAndrew Rybchenko .eec_rx_ps = sfc_ev_nop_rx_ps,
502df1bfde4SAndrew Rybchenko .eec_tx = sfc_ev_nop_tx,
50383c31c99SAndrew Rybchenko .eec_tx_ndescs = sfc_ev_nop_tx_ndescs,
504df1bfde4SAndrew Rybchenko .eec_exception = sfc_ev_exception,
505df1bfde4SAndrew Rybchenko .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
506df1bfde4SAndrew Rybchenko .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
507df1bfde4SAndrew Rybchenko .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
508df1bfde4SAndrew Rybchenko .eec_software = sfc_ev_software,
509df1bfde4SAndrew Rybchenko .eec_sram = sfc_ev_sram,
510df1bfde4SAndrew Rybchenko .eec_wake_up = sfc_ev_wake_up,
511df1bfde4SAndrew Rybchenko .eec_timer = sfc_ev_timer,
512df1bfde4SAndrew Rybchenko .eec_link_change = sfc_ev_nop_link_change,
513df1bfde4SAndrew Rybchenko };
514df1bfde4SAndrew Rybchenko
515df1bfde4SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
516df1bfde4SAndrew Rybchenko .eec_initialized = sfc_ev_initialized,
517638bddc9SAndrew Rybchenko .eec_rx = sfc_ev_dp_rx,
5183379085aSAndrew Rybchenko .eec_rx_packets = sfc_ev_dp_rx_packets,
519390f9b8dSAndrew Rybchenko .eec_rx_ps = sfc_ev_dp_rx_ps,
5207965557eSAndrew Rybchenko .eec_tx = sfc_ev_nop_tx,
52183c31c99SAndrew Rybchenko .eec_tx_ndescs = sfc_ev_nop_tx_ndescs,
5227965557eSAndrew Rybchenko .eec_exception = sfc_ev_exception,
5237965557eSAndrew Rybchenko .eec_rxq_flush_done = sfc_ev_rxq_flush_done,
5247965557eSAndrew Rybchenko .eec_rxq_flush_failed = sfc_ev_rxq_flush_failed,
5257965557eSAndrew Rybchenko .eec_txq_flush_done = sfc_ev_nop_txq_flush_done,
5267965557eSAndrew Rybchenko .eec_software = sfc_ev_software,
5277965557eSAndrew Rybchenko .eec_sram = sfc_ev_sram,
5287965557eSAndrew Rybchenko .eec_wake_up = sfc_ev_wake_up,
5297965557eSAndrew Rybchenko .eec_timer = sfc_ev_timer,
5307965557eSAndrew Rybchenko .eec_link_change = sfc_ev_nop_link_change,
5317965557eSAndrew Rybchenko };
5327965557eSAndrew Rybchenko
533dbdc8241SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
5347965557eSAndrew Rybchenko .eec_initialized = sfc_ev_initialized,
5357965557eSAndrew Rybchenko .eec_rx = sfc_ev_nop_rx,
5363379085aSAndrew Rybchenko .eec_rx_packets = sfc_ev_nop_rx_packets,
537390f9b8dSAndrew Rybchenko .eec_rx_ps = sfc_ev_nop_rx_ps,
5387965557eSAndrew Rybchenko .eec_tx = sfc_ev_tx,
53983c31c99SAndrew Rybchenko .eec_tx_ndescs = sfc_ev_nop_tx_ndescs,
5407965557eSAndrew Rybchenko .eec_exception = sfc_ev_exception,
5417965557eSAndrew Rybchenko .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
5427965557eSAndrew Rybchenko .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
5437965557eSAndrew Rybchenko .eec_txq_flush_done = sfc_ev_txq_flush_done,
5447965557eSAndrew Rybchenko .eec_software = sfc_ev_software,
5457965557eSAndrew Rybchenko .eec_sram = sfc_ev_sram,
5467965557eSAndrew Rybchenko .eec_wake_up = sfc_ev_wake_up,
5477965557eSAndrew Rybchenko .eec_timer = sfc_ev_timer,
5487965557eSAndrew Rybchenko .eec_link_change = sfc_ev_nop_link_change,
5497965557eSAndrew Rybchenko };
5507965557eSAndrew Rybchenko
551dbdc8241SAndrew Rybchenko static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
552dbdc8241SAndrew Rybchenko .eec_initialized = sfc_ev_initialized,
553dbdc8241SAndrew Rybchenko .eec_rx = sfc_ev_nop_rx,
5543379085aSAndrew Rybchenko .eec_rx_packets = sfc_ev_nop_rx_packets,
555390f9b8dSAndrew Rybchenko .eec_rx_ps = sfc_ev_nop_rx_ps,
5568b00f426SAndrew Rybchenko .eec_tx = sfc_ev_dp_tx,
55783c31c99SAndrew Rybchenko .eec_tx_ndescs = sfc_ev_dp_tx_ndescs,
558dbdc8241SAndrew Rybchenko .eec_exception = sfc_ev_exception,
559dbdc8241SAndrew Rybchenko .eec_rxq_flush_done = sfc_ev_nop_rxq_flush_done,
560dbdc8241SAndrew Rybchenko .eec_rxq_flush_failed = sfc_ev_nop_rxq_flush_failed,
561dbdc8241SAndrew Rybchenko .eec_txq_flush_done = sfc_ev_txq_flush_done,
562dbdc8241SAndrew Rybchenko .eec_software = sfc_ev_software,
563dbdc8241SAndrew Rybchenko .eec_sram = sfc_ev_sram,
564dbdc8241SAndrew Rybchenko .eec_wake_up = sfc_ev_wake_up,
565dbdc8241SAndrew Rybchenko .eec_timer = sfc_ev_timer,
566dbdc8241SAndrew Rybchenko .eec_link_change = sfc_ev_nop_link_change,
567dbdc8241SAndrew Rybchenko };
568dbdc8241SAndrew Rybchenko
56958294ee6SAndrew Rybchenko
57058294ee6SAndrew Rybchenko void
sfc_ev_qpoll(struct sfc_evq * evq)57158294ee6SAndrew Rybchenko sfc_ev_qpoll(struct sfc_evq *evq)
57258294ee6SAndrew Rybchenko {
573*8e582cf5SDavid Marchand struct sfc_adapter *sa;
574*8e582cf5SDavid Marchand
57558294ee6SAndrew Rybchenko SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
57658294ee6SAndrew Rybchenko evq->init_state == SFC_EVQ_STARTING);
57758294ee6SAndrew Rybchenko
57858294ee6SAndrew Rybchenko /* Synchronize the DMA memory for reading not required */
57958294ee6SAndrew Rybchenko
5807965557eSAndrew Rybchenko efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
58158294ee6SAndrew Rybchenko
582*8e582cf5SDavid Marchand sa = evq->sa;
583*8e582cf5SDavid Marchand if (unlikely(evq->exception) && sfc_adapter_trylock(sa)) {
58477f2d053SAndrew Rybchenko int rc;
58577f2d053SAndrew Rybchenko
586df1bfde4SAndrew Rybchenko if (evq->dp_rxq != NULL) {
58709cafbddSIgor Romanov sfc_sw_index_t rxq_sw_index;
588df1bfde4SAndrew Rybchenko
589df1bfde4SAndrew Rybchenko rxq_sw_index = evq->dp_rxq->dpq.queue_id;
59077f2d053SAndrew Rybchenko
59177f2d053SAndrew Rybchenko sfc_warn(sa,
59277f2d053SAndrew Rybchenko "restart RxQ %u because of exception on its EvQ %u",
59377f2d053SAndrew Rybchenko rxq_sw_index, evq->evq_index);
59477f2d053SAndrew Rybchenko
59577f2d053SAndrew Rybchenko sfc_rx_qstop(sa, rxq_sw_index);
59677f2d053SAndrew Rybchenko rc = sfc_rx_qstart(sa, rxq_sw_index);
59777f2d053SAndrew Rybchenko if (rc != 0)
59877f2d053SAndrew Rybchenko sfc_err(sa, "cannot restart RxQ %u",
59977f2d053SAndrew Rybchenko rxq_sw_index);
60077f2d053SAndrew Rybchenko }
60177f2d053SAndrew Rybchenko
602dbdc8241SAndrew Rybchenko if (evq->dp_txq != NULL) {
603db980d26SIgor Romanov sfc_sw_index_t txq_sw_index;
604dbdc8241SAndrew Rybchenko
605dbdc8241SAndrew Rybchenko txq_sw_index = evq->dp_txq->dpq.queue_id;
6064a18304dSIvan Malov
6074a18304dSIvan Malov sfc_warn(sa,
6084a18304dSIvan Malov "restart TxQ %u because of exception on its EvQ %u",
6094a18304dSIvan Malov txq_sw_index, evq->evq_index);
6104a18304dSIvan Malov
6114a18304dSIvan Malov sfc_tx_qstop(sa, txq_sw_index);
6124a18304dSIvan Malov rc = sfc_tx_qstart(sa, txq_sw_index);
6134a18304dSIvan Malov if (rc != 0)
6144a18304dSIvan Malov sfc_err(sa, "cannot restart TxQ %u",
6154a18304dSIvan Malov txq_sw_index);
6164a18304dSIvan Malov }
6174a18304dSIvan Malov
61877f2d053SAndrew Rybchenko if (evq->exception)
61977f2d053SAndrew Rybchenko sfc_panic(sa, "unrecoverable exception on EvQ %u",
62077f2d053SAndrew Rybchenko evq->evq_index);
62177f2d053SAndrew Rybchenko
62277f2d053SAndrew Rybchenko sfc_adapter_unlock(sa);
62377f2d053SAndrew Rybchenko }
62477f2d053SAndrew Rybchenko
62558294ee6SAndrew Rybchenko /* Poll-mode driver does not re-prime the event queue for interrupts */
62658294ee6SAndrew Rybchenko }
62758294ee6SAndrew Rybchenko
6289a75f75cSAndrew Rybchenko void
sfc_ev_mgmt_qpoll(struct sfc_adapter * sa)6299a75f75cSAndrew Rybchenko sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
6309a75f75cSAndrew Rybchenko {
6319a75f75cSAndrew Rybchenko if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
632f042136eSAndrew Rybchenko if (sa->mgmt_evq_running)
633f042136eSAndrew Rybchenko sfc_ev_qpoll(sa->mgmt_evq);
6349a75f75cSAndrew Rybchenko
6359a75f75cSAndrew Rybchenko rte_spinlock_unlock(&sa->mgmt_evq_lock);
6369a75f75cSAndrew Rybchenko }
6379a75f75cSAndrew Rybchenko }
6389a75f75cSAndrew Rybchenko
63958294ee6SAndrew Rybchenko int
sfc_ev_qprime(struct sfc_evq * evq)64058294ee6SAndrew Rybchenko sfc_ev_qprime(struct sfc_evq *evq)
64158294ee6SAndrew Rybchenko {
64258294ee6SAndrew Rybchenko SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
64358294ee6SAndrew Rybchenko return efx_ev_qprime(evq->common, evq->read_ptr);
64458294ee6SAndrew Rybchenko }
64558294ee6SAndrew Rybchenko
6466caeec47SAndrew Rybchenko /* Event queue HW index allocation scheme is described in sfc_ev.h. */
64758294ee6SAndrew Rybchenko int
sfc_ev_qstart(struct sfc_evq * evq,unsigned int hw_index)6486caeec47SAndrew Rybchenko sfc_ev_qstart(struct sfc_evq *evq, unsigned int hw_index)
64958294ee6SAndrew Rybchenko {
6506caeec47SAndrew Rybchenko struct sfc_adapter *sa = evq->sa;
65158294ee6SAndrew Rybchenko efsys_mem_t *esmp;
652df456342SAndrew Rybchenko uint32_t evq_flags = sa->evq_flags;
65370451221SAndrew Rybchenko uint32_t irq = 0;
65458294ee6SAndrew Rybchenko unsigned int total_delay_us;
65558294ee6SAndrew Rybchenko unsigned int delay_us;
65658294ee6SAndrew Rybchenko int rc;
65758294ee6SAndrew Rybchenko
6586caeec47SAndrew Rybchenko sfc_log_init(sa, "hw_index=%u", hw_index);
65958294ee6SAndrew Rybchenko
66058294ee6SAndrew Rybchenko esmp = &evq->mem;
66158294ee6SAndrew Rybchenko
6626caeec47SAndrew Rybchenko evq->evq_index = hw_index;
6636caeec47SAndrew Rybchenko
66458294ee6SAndrew Rybchenko /* Clear all events */
665afe3c756SIgor Romanov (void)memset((void *)esmp->esm_base, 0xff,
666f8a60f76SAndy Moreton efx_evq_size(sa->nic, evq->entries, evq_flags));
66758294ee6SAndrew Rybchenko
66870451221SAndrew Rybchenko if (sa->intr.lsc_intr && hw_index == sa->mgmt_evq_index) {
669df456342SAndrew Rybchenko evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
67070451221SAndrew Rybchenko irq = 0;
67170451221SAndrew Rybchenko } else if (sa->intr.rxq_intr && evq->dp_rxq != NULL) {
67270451221SAndrew Rybchenko sfc_ethdev_qid_t ethdev_qid;
67370451221SAndrew Rybchenko
67470451221SAndrew Rybchenko ethdev_qid =
67570451221SAndrew Rybchenko sfc_ethdev_rx_qid_by_rxq_sw_index(sfc_sa2shared(sa),
67670451221SAndrew Rybchenko evq->dp_rxq->dpq.queue_id);
67770451221SAndrew Rybchenko if (ethdev_qid != SFC_ETHDEV_QID_INVALID) {
67870451221SAndrew Rybchenko evq_flags |= EFX_EVQ_FLAGS_NOTIFY_INTERRUPT;
67970451221SAndrew Rybchenko /*
68070451221SAndrew Rybchenko * The first interrupt is used for management EvQ
68170451221SAndrew Rybchenko * (LSC etc). RxQ interrupts follow it.
68270451221SAndrew Rybchenko */
68370451221SAndrew Rybchenko irq = 1 + ethdev_qid;
68470451221SAndrew Rybchenko } else {
685df456342SAndrew Rybchenko evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
68670451221SAndrew Rybchenko }
68770451221SAndrew Rybchenko } else {
68870451221SAndrew Rybchenko evq_flags |= EFX_EVQ_FLAGS_NOTIFY_DISABLED;
68970451221SAndrew Rybchenko }
690df456342SAndrew Rybchenko
69192fedcd3SIvan Malov evq->init_state = SFC_EVQ_STARTING;
69292fedcd3SIvan Malov
69358294ee6SAndrew Rybchenko /* Create the common code event queue */
69470451221SAndrew Rybchenko rc = efx_ev_qcreate_irq(sa->nic, hw_index, esmp, evq->entries,
695df456342SAndrew Rybchenko 0 /* unused on EF10 */, 0, evq_flags,
69670451221SAndrew Rybchenko irq, &evq->common);
69758294ee6SAndrew Rybchenko if (rc != 0)
69858294ee6SAndrew Rybchenko goto fail_ev_qcreate;
69958294ee6SAndrew Rybchenko
700dbdc8241SAndrew Rybchenko SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
701df1bfde4SAndrew Rybchenko if (evq->dp_rxq != 0) {
7025dec95e3SAndrew Rybchenko if (strcmp(sa->priv.dp_rx->dp.name,
7035dec95e3SAndrew Rybchenko SFC_KVARG_DATAPATH_EFX) == 0)
704df1bfde4SAndrew Rybchenko evq->callbacks = &sfc_ev_callbacks_efx_rx;
7057965557eSAndrew Rybchenko else
706df1bfde4SAndrew Rybchenko evq->callbacks = &sfc_ev_callbacks_dp_rx;
707dbdc8241SAndrew Rybchenko } else if (evq->dp_txq != 0) {
7085dec95e3SAndrew Rybchenko if (strcmp(sa->priv.dp_tx->dp.name,
7095dec95e3SAndrew Rybchenko SFC_KVARG_DATAPATH_EFX) == 0)
710dbdc8241SAndrew Rybchenko evq->callbacks = &sfc_ev_callbacks_efx_tx;
711dbdc8241SAndrew Rybchenko else
712dbdc8241SAndrew Rybchenko evq->callbacks = &sfc_ev_callbacks_dp_tx;
713df1bfde4SAndrew Rybchenko } else {
7147965557eSAndrew Rybchenko evq->callbacks = &sfc_ev_callbacks;
715df1bfde4SAndrew Rybchenko }
7167965557eSAndrew Rybchenko
71792fedcd3SIvan Malov /*
71892fedcd3SIvan Malov * Poll once to ensure that eec_initialized callback is invoked in
71992fedcd3SIvan Malov * case if the hardware does not support INIT_DONE events. If the
72092fedcd3SIvan Malov * hardware supports INIT_DONE events, this will do nothing, and the
72192fedcd3SIvan Malov * corresponding event will be processed by sfc_ev_qpoll() below.
72292fedcd3SIvan Malov */
72392fedcd3SIvan Malov efx_ev_qcreate_check_init_done(evq->common, evq->callbacks, evq);
72458294ee6SAndrew Rybchenko
72558294ee6SAndrew Rybchenko /* Wait for the initialization event */
72658294ee6SAndrew Rybchenko total_delay_us = 0;
72758294ee6SAndrew Rybchenko delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
72858294ee6SAndrew Rybchenko do {
72958294ee6SAndrew Rybchenko (void)sfc_ev_qpoll(evq);
73058294ee6SAndrew Rybchenko
73158294ee6SAndrew Rybchenko /* Check to see if the initialization complete indication
73258294ee6SAndrew Rybchenko * posted by the hardware.
73358294ee6SAndrew Rybchenko */
73458294ee6SAndrew Rybchenko if (evq->init_state == SFC_EVQ_STARTED)
73558294ee6SAndrew Rybchenko goto done;
73658294ee6SAndrew Rybchenko
73758294ee6SAndrew Rybchenko /* Give event queue some time to init */
73858294ee6SAndrew Rybchenko rte_delay_us(delay_us);
73958294ee6SAndrew Rybchenko
74058294ee6SAndrew Rybchenko total_delay_us += delay_us;
74158294ee6SAndrew Rybchenko
74258294ee6SAndrew Rybchenko /* Exponential backoff */
74358294ee6SAndrew Rybchenko delay_us *= 2;
74458294ee6SAndrew Rybchenko if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
74558294ee6SAndrew Rybchenko delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
74658294ee6SAndrew Rybchenko
74758294ee6SAndrew Rybchenko } while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
74858294ee6SAndrew Rybchenko
74958294ee6SAndrew Rybchenko rc = ETIMEDOUT;
75058294ee6SAndrew Rybchenko goto fail_timedout;
75158294ee6SAndrew Rybchenko
75258294ee6SAndrew Rybchenko done:
75358294ee6SAndrew Rybchenko return 0;
75458294ee6SAndrew Rybchenko
75558294ee6SAndrew Rybchenko fail_timedout:
75658294ee6SAndrew Rybchenko efx_ev_qdestroy(evq->common);
75758294ee6SAndrew Rybchenko
75858294ee6SAndrew Rybchenko fail_ev_qcreate:
75992fedcd3SIvan Malov evq->init_state = SFC_EVQ_INITIALIZED;
76058294ee6SAndrew Rybchenko sfc_log_init(sa, "failed %d", rc);
76158294ee6SAndrew Rybchenko return rc;
76258294ee6SAndrew Rybchenko }
76358294ee6SAndrew Rybchenko
76458294ee6SAndrew Rybchenko void
sfc_ev_qstop(struct sfc_evq * evq)7656caeec47SAndrew Rybchenko sfc_ev_qstop(struct sfc_evq *evq)
76658294ee6SAndrew Rybchenko {
7676caeec47SAndrew Rybchenko if (evq == NULL)
7686caeec47SAndrew Rybchenko return;
76958294ee6SAndrew Rybchenko
7706caeec47SAndrew Rybchenko sfc_log_init(evq->sa, "hw_index=%u", evq->evq_index);
77158294ee6SAndrew Rybchenko
7726caeec47SAndrew Rybchenko if (evq->init_state != SFC_EVQ_STARTED)
77358294ee6SAndrew Rybchenko return;
77458294ee6SAndrew Rybchenko
77558294ee6SAndrew Rybchenko evq->init_state = SFC_EVQ_INITIALIZED;
7767965557eSAndrew Rybchenko evq->callbacks = NULL;
77758294ee6SAndrew Rybchenko evq->read_ptr = 0;
77858294ee6SAndrew Rybchenko evq->exception = B_FALSE;
77958294ee6SAndrew Rybchenko
78058294ee6SAndrew Rybchenko efx_ev_qdestroy(evq->common);
7816caeec47SAndrew Rybchenko
7826caeec47SAndrew Rybchenko evq->evq_index = 0;
78358294ee6SAndrew Rybchenko }
78458294ee6SAndrew Rybchenko
7852de39f4eSAndrew Rybchenko static void
sfc_ev_mgmt_periodic_qpoll(void * arg)7862de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(void *arg)
7872de39f4eSAndrew Rybchenko {
7882de39f4eSAndrew Rybchenko struct sfc_adapter *sa = arg;
7892de39f4eSAndrew Rybchenko int rc;
7902de39f4eSAndrew Rybchenko
7912de39f4eSAndrew Rybchenko sfc_ev_mgmt_qpoll(sa);
7922de39f4eSAndrew Rybchenko
7932de39f4eSAndrew Rybchenko rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
7942de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll, sa);
795323706abSAndrew Rybchenko if (rc == -ENOTSUP) {
796323706abSAndrew Rybchenko sfc_warn(sa, "alarms are not supported");
797323706abSAndrew Rybchenko sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
798323706abSAndrew Rybchenko } else if (rc != 0) {
799323706abSAndrew Rybchenko sfc_err(sa,
8002de39f4eSAndrew Rybchenko "cannot rearm management EVQ polling alarm (rc=%d)",
8012de39f4eSAndrew Rybchenko rc);
8022de39f4eSAndrew Rybchenko }
803323706abSAndrew Rybchenko }
8042de39f4eSAndrew Rybchenko
8052de39f4eSAndrew Rybchenko static void
sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter * sa)8062de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
8072de39f4eSAndrew Rybchenko {
8082de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll(sa);
8092de39f4eSAndrew Rybchenko }
8102de39f4eSAndrew Rybchenko
8112de39f4eSAndrew Rybchenko static void
sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter * sa)8122de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
8132de39f4eSAndrew Rybchenko {
8142de39f4eSAndrew Rybchenko rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
8152de39f4eSAndrew Rybchenko }
8162de39f4eSAndrew Rybchenko
81758294ee6SAndrew Rybchenko int
sfc_ev_start(struct sfc_adapter * sa)81858294ee6SAndrew Rybchenko sfc_ev_start(struct sfc_adapter *sa)
81958294ee6SAndrew Rybchenko {
82058294ee6SAndrew Rybchenko int rc;
82158294ee6SAndrew Rybchenko
82258294ee6SAndrew Rybchenko sfc_log_init(sa, "entry");
82358294ee6SAndrew Rybchenko
82458294ee6SAndrew Rybchenko rc = efx_ev_init(sa->nic);
82558294ee6SAndrew Rybchenko if (rc != 0)
82658294ee6SAndrew Rybchenko goto fail_ev_init;
82758294ee6SAndrew Rybchenko
8289a75f75cSAndrew Rybchenko /* Start management EVQ used for global events */
8299a75f75cSAndrew Rybchenko
830f042136eSAndrew Rybchenko /*
831f042136eSAndrew Rybchenko * Management event queue start polls the queue, but it cannot
832f042136eSAndrew Rybchenko * interfere with other polling contexts since mgmt_evq_running
833f042136eSAndrew Rybchenko * is false yet.
834f042136eSAndrew Rybchenko */
8356caeec47SAndrew Rybchenko rc = sfc_ev_qstart(sa->mgmt_evq, sa->mgmt_evq_index);
8369a75f75cSAndrew Rybchenko if (rc != 0)
8379a75f75cSAndrew Rybchenko goto fail_mgmt_evq_start;
8389a75f75cSAndrew Rybchenko
839f042136eSAndrew Rybchenko rte_spinlock_lock(&sa->mgmt_evq_lock);
840f042136eSAndrew Rybchenko sa->mgmt_evq_running = true;
841f042136eSAndrew Rybchenko rte_spinlock_unlock(&sa->mgmt_evq_lock);
842f042136eSAndrew Rybchenko
8433b809c27SAndrew Rybchenko if (sa->intr.lsc_intr) {
8446caeec47SAndrew Rybchenko rc = sfc_ev_qprime(sa->mgmt_evq);
8453b809c27SAndrew Rybchenko if (rc != 0)
84681568924SAndrew Rybchenko goto fail_mgmt_evq_prime;
8473b809c27SAndrew Rybchenko }
8483b809c27SAndrew Rybchenko
84958294ee6SAndrew Rybchenko /*
8502de39f4eSAndrew Rybchenko * Start management EVQ polling. If interrupts are disabled
8512de39f4eSAndrew Rybchenko * (not used), it is required to process link status change
8522de39f4eSAndrew Rybchenko * and other device level events to avoid unrecoverable
8532de39f4eSAndrew Rybchenko * error because the event queue overflow.
8542de39f4eSAndrew Rybchenko */
8552de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_start(sa);
8562de39f4eSAndrew Rybchenko
8572de39f4eSAndrew Rybchenko /*
8589a75f75cSAndrew Rybchenko * Rx/Tx event queues are started/stopped when corresponding
8599a75f75cSAndrew Rybchenko * Rx/Tx queue is started/stopped.
86058294ee6SAndrew Rybchenko */
86158294ee6SAndrew Rybchenko
86258294ee6SAndrew Rybchenko return 0;
86358294ee6SAndrew Rybchenko
86481568924SAndrew Rybchenko fail_mgmt_evq_prime:
8656caeec47SAndrew Rybchenko sfc_ev_qstop(sa->mgmt_evq);
8663b809c27SAndrew Rybchenko
8679a75f75cSAndrew Rybchenko fail_mgmt_evq_start:
8689a75f75cSAndrew Rybchenko efx_ev_fini(sa->nic);
8699a75f75cSAndrew Rybchenko
87058294ee6SAndrew Rybchenko fail_ev_init:
87158294ee6SAndrew Rybchenko sfc_log_init(sa, "failed %d", rc);
87258294ee6SAndrew Rybchenko return rc;
87358294ee6SAndrew Rybchenko }
87458294ee6SAndrew Rybchenko
87558294ee6SAndrew Rybchenko void
sfc_ev_stop(struct sfc_adapter * sa)87658294ee6SAndrew Rybchenko sfc_ev_stop(struct sfc_adapter *sa)
87758294ee6SAndrew Rybchenko {
87858294ee6SAndrew Rybchenko sfc_log_init(sa, "entry");
87958294ee6SAndrew Rybchenko
8802de39f4eSAndrew Rybchenko sfc_ev_mgmt_periodic_qpoll_stop(sa);
8812de39f4eSAndrew Rybchenko
8829a75f75cSAndrew Rybchenko rte_spinlock_lock(&sa->mgmt_evq_lock);
883f042136eSAndrew Rybchenko sa->mgmt_evq_running = false;
8849a75f75cSAndrew Rybchenko rte_spinlock_unlock(&sa->mgmt_evq_lock);
88558294ee6SAndrew Rybchenko
886f042136eSAndrew Rybchenko sfc_ev_qstop(sa->mgmt_evq);
887f042136eSAndrew Rybchenko
88858294ee6SAndrew Rybchenko efx_ev_fini(sa->nic);
88958294ee6SAndrew Rybchenko }
89058294ee6SAndrew Rybchenko
89158294ee6SAndrew Rybchenko int
sfc_ev_qinit(struct sfc_adapter * sa,enum sfc_evq_type type,unsigned int type_index,unsigned int entries,int socket_id,struct sfc_evq ** evqp)8926caeec47SAndrew Rybchenko sfc_ev_qinit(struct sfc_adapter *sa,
893298951a4SAndrew Rybchenko enum sfc_evq_type type, unsigned int type_index,
8946caeec47SAndrew Rybchenko unsigned int entries, int socket_id, struct sfc_evq **evqp)
89558294ee6SAndrew Rybchenko {
89658294ee6SAndrew Rybchenko struct sfc_evq *evq;
89758294ee6SAndrew Rybchenko int rc;
89858294ee6SAndrew Rybchenko
8996caeec47SAndrew Rybchenko sfc_log_init(sa, "type=%s type_index=%u",
9006caeec47SAndrew Rybchenko sfc_evq_type2str(type), type_index);
90158294ee6SAndrew Rybchenko
90258294ee6SAndrew Rybchenko SFC_ASSERT(rte_is_power_of_2(entries));
90358294ee6SAndrew Rybchenko
9040c16506eSAndrew Rybchenko rc = ENOMEM;
90558294ee6SAndrew Rybchenko evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
90658294ee6SAndrew Rybchenko socket_id);
90758294ee6SAndrew Rybchenko if (evq == NULL)
9080c16506eSAndrew Rybchenko goto fail_evq_alloc;
90958294ee6SAndrew Rybchenko
91058294ee6SAndrew Rybchenko evq->sa = sa;
911298951a4SAndrew Rybchenko evq->type = type;
912ec9217f9SAndrew Rybchenko evq->entries = entries;
91358294ee6SAndrew Rybchenko
91458294ee6SAndrew Rybchenko /* Allocate DMA space */
915298951a4SAndrew Rybchenko rc = sfc_dma_alloc(sa, sfc_evq_type2str(type), type_index,
9163037e6cfSViacheslav Galaktionov EFX_NIC_DMA_ADDR_EVENT_RING,
917f8a60f76SAndy Moreton efx_evq_size(sa->nic, evq->entries, sa->evq_flags),
918f8a60f76SAndy Moreton socket_id, &evq->mem);
91958294ee6SAndrew Rybchenko if (rc != 0)
9200c16506eSAndrew Rybchenko goto fail_dma_alloc;
92158294ee6SAndrew Rybchenko
92258294ee6SAndrew Rybchenko evq->init_state = SFC_EVQ_INITIALIZED;
92358294ee6SAndrew Rybchenko
9246caeec47SAndrew Rybchenko sa->evq_count++;
9256caeec47SAndrew Rybchenko
9266caeec47SAndrew Rybchenko *evqp = evq;
92758294ee6SAndrew Rybchenko
92858294ee6SAndrew Rybchenko return 0;
9290c16506eSAndrew Rybchenko
9300c16506eSAndrew Rybchenko fail_dma_alloc:
9310c16506eSAndrew Rybchenko rte_free(evq);
9320c16506eSAndrew Rybchenko
9330c16506eSAndrew Rybchenko fail_evq_alloc:
9340c16506eSAndrew Rybchenko
9350c16506eSAndrew Rybchenko sfc_log_init(sa, "failed %d", rc);
9360c16506eSAndrew Rybchenko return rc;
93758294ee6SAndrew Rybchenko }
93858294ee6SAndrew Rybchenko
93958294ee6SAndrew Rybchenko void
sfc_ev_qfini(struct sfc_evq * evq)9406caeec47SAndrew Rybchenko sfc_ev_qfini(struct sfc_evq *evq)
94158294ee6SAndrew Rybchenko {
9426caeec47SAndrew Rybchenko struct sfc_adapter *sa = evq->sa;
94358294ee6SAndrew Rybchenko
94458294ee6SAndrew Rybchenko SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
94558294ee6SAndrew Rybchenko
94658294ee6SAndrew Rybchenko sfc_dma_free(sa, &evq->mem);
94758294ee6SAndrew Rybchenko
94858294ee6SAndrew Rybchenko rte_free(evq);
94958294ee6SAndrew Rybchenko
9506caeec47SAndrew Rybchenko SFC_ASSERT(sa->evq_count > 0);
9516caeec47SAndrew Rybchenko sa->evq_count--;
952c22d3c50SAndrew Rybchenko }
953c22d3c50SAndrew Rybchenko
954c22d3c50SAndrew Rybchenko static int
sfc_kvarg_perf_profile_handler(__rte_unused const char * key,const char * value_str,void * opaque)955c22d3c50SAndrew Rybchenko sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
956c22d3c50SAndrew Rybchenko const char *value_str, void *opaque)
957c22d3c50SAndrew Rybchenko {
9585076ad03SRoman Zhukov uint32_t *value = opaque;
959c22d3c50SAndrew Rybchenko
960c22d3c50SAndrew Rybchenko if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
961c22d3c50SAndrew Rybchenko *value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
962c22d3c50SAndrew Rybchenko else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
963c22d3c50SAndrew Rybchenko *value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
964c22d3c50SAndrew Rybchenko else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
965c22d3c50SAndrew Rybchenko *value = EFX_EVQ_FLAGS_TYPE_AUTO;
966c22d3c50SAndrew Rybchenko else
967c22d3c50SAndrew Rybchenko return -EINVAL;
96858294ee6SAndrew Rybchenko
96958294ee6SAndrew Rybchenko return 0;
97058294ee6SAndrew Rybchenko }
97158294ee6SAndrew Rybchenko
97258294ee6SAndrew Rybchenko int
sfc_ev_attach(struct sfc_adapter * sa)97347995190SAndrew Rybchenko sfc_ev_attach(struct sfc_adapter *sa)
97458294ee6SAndrew Rybchenko {
97558294ee6SAndrew Rybchenko int rc;
97658294ee6SAndrew Rybchenko
97758294ee6SAndrew Rybchenko sfc_log_init(sa, "entry");
97858294ee6SAndrew Rybchenko
979c22d3c50SAndrew Rybchenko sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
980c22d3c50SAndrew Rybchenko rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
981c22d3c50SAndrew Rybchenko sfc_kvarg_perf_profile_handler,
982c22d3c50SAndrew Rybchenko &sa->evq_flags);
983c22d3c50SAndrew Rybchenko if (rc != 0) {
984c22d3c50SAndrew Rybchenko sfc_err(sa, "invalid %s parameter value",
985c22d3c50SAndrew Rybchenko SFC_KVARG_PERF_PROFILE);
986c22d3c50SAndrew Rybchenko goto fail_kvarg_perf_profile;
987c22d3c50SAndrew Rybchenko }
988c22d3c50SAndrew Rybchenko
98929b133bbSIgor Romanov sa->mgmt_evq_index = sfc_mgmt_evq_sw_index(sfc_sa2shared(sa));
9909a75f75cSAndrew Rybchenko rte_spinlock_init(&sa->mgmt_evq_lock);
99158294ee6SAndrew Rybchenko
992d5371f3dSIgor Romanov rc = sfc_ev_qinit(sa, SFC_EVQ_TYPE_MGMT, 0, sa->evq_min_entries,
9936caeec47SAndrew Rybchenko sa->socket_id, &sa->mgmt_evq);
9949a75f75cSAndrew Rybchenko if (rc != 0)
9959a75f75cSAndrew Rybchenko goto fail_mgmt_evq_init;
9969a75f75cSAndrew Rybchenko
99758294ee6SAndrew Rybchenko /*
99858294ee6SAndrew Rybchenko * Rx/Tx event queues are created/destroyed when corresponding
99958294ee6SAndrew Rybchenko * Rx/Tx queue is created/destroyed.
100058294ee6SAndrew Rybchenko */
100158294ee6SAndrew Rybchenko
100258294ee6SAndrew Rybchenko return 0;
100358294ee6SAndrew Rybchenko
10049a75f75cSAndrew Rybchenko fail_mgmt_evq_init:
1005c22d3c50SAndrew Rybchenko
1006c22d3c50SAndrew Rybchenko fail_kvarg_perf_profile:
100758294ee6SAndrew Rybchenko sfc_log_init(sa, "failed %d", rc);
100858294ee6SAndrew Rybchenko return rc;
100958294ee6SAndrew Rybchenko }
101058294ee6SAndrew Rybchenko
101158294ee6SAndrew Rybchenko void
sfc_ev_detach(struct sfc_adapter * sa)101247995190SAndrew Rybchenko sfc_ev_detach(struct sfc_adapter *sa)
101358294ee6SAndrew Rybchenko {
101458294ee6SAndrew Rybchenko sfc_log_init(sa, "entry");
101558294ee6SAndrew Rybchenko
10166caeec47SAndrew Rybchenko sfc_ev_qfini(sa->mgmt_evq);
101758294ee6SAndrew Rybchenko
10186caeec47SAndrew Rybchenko if (sa->evq_count != 0)
10196caeec47SAndrew Rybchenko sfc_err(sa, "%u EvQs are not destroyed before detach",
10206caeec47SAndrew Rybchenko sa->evq_count);
102158294ee6SAndrew Rybchenko }
1022