xref: /dpdk/drivers/net/sfc/sfc_ev.c (revision 1315219a22a32aa438de3483eaacc104890ee71f)
1 /*-
2  *   BSD LICENSE
3  *
4  * Copyright (c) 2016-2017 Solarflare Communications Inc.
5  * All rights reserved.
6  *
7  * This software was jointly developed between OKTET Labs (under contract
8  * for Solarflare) and Solarflare Communications, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions are met:
12  *
13  * 1. Redistributions of source code must retain the above copyright notice,
14  *    this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright notice,
16  *    this list of conditions and the following disclaimer in the documentation
17  *    and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26  * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28  * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29  * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <rte_debug.h>
33 #include <rte_cycles.h>
34 #include <rte_alarm.h>
35 #include <rte_branch_prediction.h>
36 
37 #include "efx.h"
38 
39 #include "sfc.h"
40 #include "sfc_debug.h"
41 #include "sfc_log.h"
42 #include "sfc_ev.h"
43 #include "sfc_rx.h"
44 #include "sfc_tx.h"
45 #include "sfc_kvargs.h"
46 
47 
48 /* Initial delay when waiting for event queue init complete event */
49 #define SFC_EVQ_INIT_BACKOFF_START_US	(1)
50 /* Maximum delay between event queue polling attempts */
51 #define SFC_EVQ_INIT_BACKOFF_MAX_US	(10 * 1000)
52 /* Event queue init approx timeout */
53 #define SFC_EVQ_INIT_TIMEOUT_US		(2 * US_PER_S)
54 
55 /* Management event queue polling period in microseconds */
56 #define SFC_MGMT_EV_QPOLL_PERIOD_US	(US_PER_S)
57 
58 
59 static boolean_t
60 sfc_ev_initialized(void *arg)
61 {
62 	struct sfc_evq *evq = arg;
63 
64 	/* Init done events may be duplicated on SFN7xxx (SFC bug 31631) */
65 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTING ||
66 		   evq->init_state == SFC_EVQ_STARTED);
67 
68 	evq->init_state = SFC_EVQ_STARTED;
69 
70 	return B_FALSE;
71 }
72 
73 static boolean_t
74 sfc_ev_nop_rx(void *arg, uint32_t label, uint32_t id,
75 	      uint32_t size, uint16_t flags)
76 {
77 	struct sfc_evq *evq = arg;
78 
79 	sfc_err(evq->sa,
80 		"EVQ %u unexpected Rx event label=%u id=%#x size=%u flags=%#x",
81 		evq->evq_index, label, id, size, flags);
82 	return B_TRUE;
83 }
84 
85 static boolean_t
86 sfc_ev_efx_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
87 	      uint32_t size, uint16_t flags)
88 {
89 	struct sfc_evq *evq = arg;
90 	struct sfc_efx_rxq *rxq;
91 	unsigned int stop;
92 	unsigned int pending_id;
93 	unsigned int delta;
94 	unsigned int i;
95 	struct sfc_efx_rx_sw_desc *rxd;
96 
97 	if (unlikely(evq->exception))
98 		goto done;
99 
100 	rxq = sfc_efx_rxq_by_dp_rxq(evq->dp_rxq);
101 
102 	SFC_ASSERT(rxq != NULL);
103 	SFC_ASSERT(rxq->evq == evq);
104 	SFC_ASSERT(rxq->flags & SFC_EFX_RXQ_FLAG_STARTED);
105 
106 	stop = (id + 1) & rxq->ptr_mask;
107 	pending_id = rxq->pending & rxq->ptr_mask;
108 	delta = (stop >= pending_id) ? (stop - pending_id) :
109 		(rxq->ptr_mask + 1 - pending_id + stop);
110 
111 	if (delta == 0) {
112 		/*
113 		 * Rx event with no new descriptors done and zero length
114 		 * is used to abort scattered packet when there is no room
115 		 * for the tail.
116 		 */
117 		if (unlikely(size != 0)) {
118 			evq->exception = B_TRUE;
119 			sfc_err(evq->sa,
120 				"EVQ %u RxQ %u invalid RX abort "
121 				"(id=%#x size=%u flags=%#x); needs restart",
122 				evq->evq_index, rxq->dp.dpq.queue_id,
123 				id, size, flags);
124 			goto done;
125 		}
126 
127 		/* Add discard flag to the first fragment */
128 		rxq->sw_desc[pending_id].flags |= EFX_DISCARD;
129 		/* Remove continue flag from the last fragment */
130 		rxq->sw_desc[id].flags &= ~EFX_PKT_CONT;
131 	} else if (unlikely(delta > rxq->batch_max)) {
132 		evq->exception = B_TRUE;
133 
134 		sfc_err(evq->sa,
135 			"EVQ %u RxQ %u completion out of order "
136 			"(id=%#x delta=%u flags=%#x); needs restart",
137 			evq->evq_index, rxq->dp.dpq.queue_id,
138 			id, delta, flags);
139 
140 		goto done;
141 	}
142 
143 	for (i = pending_id; i != stop; i = (i + 1) & rxq->ptr_mask) {
144 		rxd = &rxq->sw_desc[i];
145 
146 		rxd->flags = flags;
147 
148 		SFC_ASSERT(size < (1 << 16));
149 		rxd->size = (uint16_t)size;
150 	}
151 
152 	rxq->pending += delta;
153 
154 done:
155 	return B_FALSE;
156 }
157 
158 static boolean_t
159 sfc_ev_dp_rx(void *arg, __rte_unused uint32_t label, uint32_t id,
160 	     __rte_unused uint32_t size, __rte_unused uint16_t flags)
161 {
162 	struct sfc_evq *evq = arg;
163 	struct sfc_dp_rxq *dp_rxq;
164 
165 	dp_rxq = evq->dp_rxq;
166 	SFC_ASSERT(dp_rxq != NULL);
167 
168 	SFC_ASSERT(evq->sa->dp_rx->qrx_ev != NULL);
169 	return evq->sa->dp_rx->qrx_ev(dp_rxq, id);
170 }
171 
172 static boolean_t
173 sfc_ev_nop_tx(void *arg, uint32_t label, uint32_t id)
174 {
175 	struct sfc_evq *evq = arg;
176 
177 	sfc_err(evq->sa, "EVQ %u unexpected Tx event label=%u id=%#x",
178 		evq->evq_index, label, id);
179 	return B_TRUE;
180 }
181 
182 static boolean_t
183 sfc_ev_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
184 {
185 	struct sfc_evq *evq = arg;
186 	struct sfc_dp_txq *dp_txq;
187 	struct sfc_efx_txq *txq;
188 	unsigned int stop;
189 	unsigned int delta;
190 
191 	dp_txq = evq->dp_txq;
192 	SFC_ASSERT(dp_txq != NULL);
193 
194 	txq = sfc_efx_txq_by_dp_txq(dp_txq);
195 	SFC_ASSERT(txq->evq == evq);
196 
197 	if (unlikely((txq->flags & SFC_EFX_TXQ_FLAG_STARTED) == 0))
198 		goto done;
199 
200 	stop = (id + 1) & txq->ptr_mask;
201 	id = txq->pending & txq->ptr_mask;
202 
203 	delta = (stop >= id) ? (stop - id) : (txq->ptr_mask + 1 - id + stop);
204 
205 	txq->pending += delta;
206 
207 done:
208 	return B_FALSE;
209 }
210 
211 static boolean_t
212 sfc_ev_dp_tx(void *arg, __rte_unused uint32_t label, uint32_t id)
213 {
214 	struct sfc_evq *evq = arg;
215 	struct sfc_dp_txq *dp_txq;
216 
217 	dp_txq = evq->dp_txq;
218 	SFC_ASSERT(dp_txq != NULL);
219 
220 	SFC_ASSERT(evq->sa->dp_tx->qtx_ev != NULL);
221 	return evq->sa->dp_tx->qtx_ev(dp_txq, id);
222 }
223 
224 static boolean_t
225 sfc_ev_exception(void *arg, __rte_unused uint32_t code,
226 		 __rte_unused uint32_t data)
227 {
228 	struct sfc_evq *evq = arg;
229 
230 	if (code == EFX_EXCEPTION_UNKNOWN_SENSOREVT)
231 		return B_FALSE;
232 
233 	evq->exception = B_TRUE;
234 	sfc_warn(evq->sa,
235 		 "hardware exception %s (code=%u, data=%#x) on EVQ %u;"
236 		 " needs recovery",
237 		 (code == EFX_EXCEPTION_RX_RECOVERY) ? "RX_RECOVERY" :
238 		 (code == EFX_EXCEPTION_RX_DSC_ERROR) ? "RX_DSC_ERROR" :
239 		 (code == EFX_EXCEPTION_TX_DSC_ERROR) ? "TX_DSC_ERROR" :
240 		 (code == EFX_EXCEPTION_FWALERT_SRAM) ? "FWALERT_SRAM" :
241 		 (code == EFX_EXCEPTION_UNKNOWN_FWALERT) ? "UNKNOWN_FWALERT" :
242 		 (code == EFX_EXCEPTION_RX_ERROR) ? "RX_ERROR" :
243 		 (code == EFX_EXCEPTION_TX_ERROR) ? "TX_ERROR" :
244 		 (code == EFX_EXCEPTION_EV_ERROR) ? "EV_ERROR" :
245 		 "UNKNOWN",
246 		 code, data, evq->evq_index);
247 
248 	return B_TRUE;
249 }
250 
251 static boolean_t
252 sfc_ev_nop_rxq_flush_done(void *arg, uint32_t rxq_hw_index)
253 {
254 	struct sfc_evq *evq = arg;
255 
256 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush done",
257 		evq->evq_index, rxq_hw_index);
258 	return B_TRUE;
259 }
260 
261 static boolean_t
262 sfc_ev_rxq_flush_done(void *arg, __rte_unused uint32_t rxq_hw_index)
263 {
264 	struct sfc_evq *evq = arg;
265 	struct sfc_dp_rxq *dp_rxq;
266 	struct sfc_rxq *rxq;
267 
268 	dp_rxq = evq->dp_rxq;
269 	SFC_ASSERT(dp_rxq != NULL);
270 
271 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
272 	SFC_ASSERT(rxq != NULL);
273 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
274 	SFC_ASSERT(rxq->evq == evq);
275 	sfc_rx_qflush_done(rxq);
276 
277 	return B_FALSE;
278 }
279 
280 static boolean_t
281 sfc_ev_nop_rxq_flush_failed(void *arg, uint32_t rxq_hw_index)
282 {
283 	struct sfc_evq *evq = arg;
284 
285 	sfc_err(evq->sa, "EVQ %u unexpected RxQ %u flush failed",
286 		evq->evq_index, rxq_hw_index);
287 	return B_TRUE;
288 }
289 
290 static boolean_t
291 sfc_ev_rxq_flush_failed(void *arg, __rte_unused uint32_t rxq_hw_index)
292 {
293 	struct sfc_evq *evq = arg;
294 	struct sfc_dp_rxq *dp_rxq;
295 	struct sfc_rxq *rxq;
296 
297 	dp_rxq = evq->dp_rxq;
298 	SFC_ASSERT(dp_rxq != NULL);
299 
300 	rxq = sfc_rxq_by_dp_rxq(dp_rxq);
301 	SFC_ASSERT(rxq != NULL);
302 	SFC_ASSERT(rxq->hw_index == rxq_hw_index);
303 	SFC_ASSERT(rxq->evq == evq);
304 	sfc_rx_qflush_failed(rxq);
305 
306 	return B_FALSE;
307 }
308 
309 static boolean_t
310 sfc_ev_nop_txq_flush_done(void *arg, uint32_t txq_hw_index)
311 {
312 	struct sfc_evq *evq = arg;
313 
314 	sfc_err(evq->sa, "EVQ %u unexpected TxQ %u flush done",
315 		evq->evq_index, txq_hw_index);
316 	return B_TRUE;
317 }
318 
319 static boolean_t
320 sfc_ev_txq_flush_done(void *arg, __rte_unused uint32_t txq_hw_index)
321 {
322 	struct sfc_evq *evq = arg;
323 	struct sfc_dp_txq *dp_txq;
324 	struct sfc_txq *txq;
325 
326 	dp_txq = evq->dp_txq;
327 	SFC_ASSERT(dp_txq != NULL);
328 
329 	txq = sfc_txq_by_dp_txq(dp_txq);
330 	SFC_ASSERT(txq != NULL);
331 	SFC_ASSERT(txq->hw_index == txq_hw_index);
332 	SFC_ASSERT(txq->evq == evq);
333 	sfc_tx_qflush_done(txq);
334 
335 	return B_FALSE;
336 }
337 
338 static boolean_t
339 sfc_ev_software(void *arg, uint16_t magic)
340 {
341 	struct sfc_evq *evq = arg;
342 
343 	sfc_err(evq->sa, "EVQ %u unexpected software event magic=%#.4x",
344 		evq->evq_index, magic);
345 	return B_TRUE;
346 }
347 
348 static boolean_t
349 sfc_ev_sram(void *arg, uint32_t code)
350 {
351 	struct sfc_evq *evq = arg;
352 
353 	sfc_err(evq->sa, "EVQ %u unexpected SRAM event code=%u",
354 		evq->evq_index, code);
355 	return B_TRUE;
356 }
357 
358 static boolean_t
359 sfc_ev_wake_up(void *arg, uint32_t index)
360 {
361 	struct sfc_evq *evq = arg;
362 
363 	sfc_err(evq->sa, "EVQ %u unexpected wake up event index=%u",
364 		evq->evq_index, index);
365 	return B_TRUE;
366 }
367 
368 static boolean_t
369 sfc_ev_timer(void *arg, uint32_t index)
370 {
371 	struct sfc_evq *evq = arg;
372 
373 	sfc_err(evq->sa, "EVQ %u unexpected timer event index=%u",
374 		evq->evq_index, index);
375 	return B_TRUE;
376 }
377 
378 static boolean_t
379 sfc_ev_nop_link_change(void *arg, __rte_unused efx_link_mode_t link_mode)
380 {
381 	struct sfc_evq *evq = arg;
382 
383 	sfc_err(evq->sa, "EVQ %u unexpected link change event",
384 		evq->evq_index);
385 	return B_TRUE;
386 }
387 
388 static boolean_t
389 sfc_ev_link_change(void *arg, efx_link_mode_t link_mode)
390 {
391 	struct sfc_evq *evq = arg;
392 	struct sfc_adapter *sa = evq->sa;
393 	struct rte_eth_link *dev_link = &sa->eth_dev->data->dev_link;
394 	struct rte_eth_link new_link;
395 	uint64_t new_link_u64;
396 	uint64_t old_link_u64;
397 
398 	EFX_STATIC_ASSERT(sizeof(*dev_link) == sizeof(rte_atomic64_t));
399 
400 	sfc_port_link_mode_to_info(link_mode, &new_link);
401 
402 	new_link_u64 = *(uint64_t *)&new_link;
403 	do {
404 		old_link_u64 = rte_atomic64_read((rte_atomic64_t *)dev_link);
405 		if (old_link_u64 == new_link_u64)
406 			break;
407 
408 		if (rte_atomic64_cmpset((volatile uint64_t *)dev_link,
409 					old_link_u64, new_link_u64)) {
410 			evq->sa->port.lsc_seq++;
411 			break;
412 		}
413 	} while (B_TRUE);
414 
415 	return B_FALSE;
416 }
417 
418 static const efx_ev_callbacks_t sfc_ev_callbacks = {
419 	.eec_initialized	= sfc_ev_initialized,
420 	.eec_rx			= sfc_ev_nop_rx,
421 	.eec_tx			= sfc_ev_nop_tx,
422 	.eec_exception		= sfc_ev_exception,
423 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
424 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
425 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
426 	.eec_software		= sfc_ev_software,
427 	.eec_sram		= sfc_ev_sram,
428 	.eec_wake_up		= sfc_ev_wake_up,
429 	.eec_timer		= sfc_ev_timer,
430 	.eec_link_change	= sfc_ev_link_change,
431 };
432 
433 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_rx = {
434 	.eec_initialized	= sfc_ev_initialized,
435 	.eec_rx			= sfc_ev_efx_rx,
436 	.eec_tx			= sfc_ev_nop_tx,
437 	.eec_exception		= sfc_ev_exception,
438 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
439 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
440 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
441 	.eec_software		= sfc_ev_software,
442 	.eec_sram		= sfc_ev_sram,
443 	.eec_wake_up		= sfc_ev_wake_up,
444 	.eec_timer		= sfc_ev_timer,
445 	.eec_link_change	= sfc_ev_nop_link_change,
446 };
447 
448 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_rx = {
449 	.eec_initialized	= sfc_ev_initialized,
450 	.eec_rx			= sfc_ev_dp_rx,
451 	.eec_tx			= sfc_ev_nop_tx,
452 	.eec_exception		= sfc_ev_exception,
453 	.eec_rxq_flush_done	= sfc_ev_rxq_flush_done,
454 	.eec_rxq_flush_failed	= sfc_ev_rxq_flush_failed,
455 	.eec_txq_flush_done	= sfc_ev_nop_txq_flush_done,
456 	.eec_software		= sfc_ev_software,
457 	.eec_sram		= sfc_ev_sram,
458 	.eec_wake_up		= sfc_ev_wake_up,
459 	.eec_timer		= sfc_ev_timer,
460 	.eec_link_change	= sfc_ev_nop_link_change,
461 };
462 
463 static const efx_ev_callbacks_t sfc_ev_callbacks_efx_tx = {
464 	.eec_initialized	= sfc_ev_initialized,
465 	.eec_rx			= sfc_ev_nop_rx,
466 	.eec_tx			= sfc_ev_tx,
467 	.eec_exception		= sfc_ev_exception,
468 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
469 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
470 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
471 	.eec_software		= sfc_ev_software,
472 	.eec_sram		= sfc_ev_sram,
473 	.eec_wake_up		= sfc_ev_wake_up,
474 	.eec_timer		= sfc_ev_timer,
475 	.eec_link_change	= sfc_ev_nop_link_change,
476 };
477 
478 static const efx_ev_callbacks_t sfc_ev_callbacks_dp_tx = {
479 	.eec_initialized	= sfc_ev_initialized,
480 	.eec_rx			= sfc_ev_nop_rx,
481 	.eec_tx			= sfc_ev_dp_tx,
482 	.eec_exception		= sfc_ev_exception,
483 	.eec_rxq_flush_done	= sfc_ev_nop_rxq_flush_done,
484 	.eec_rxq_flush_failed	= sfc_ev_nop_rxq_flush_failed,
485 	.eec_txq_flush_done	= sfc_ev_txq_flush_done,
486 	.eec_software		= sfc_ev_software,
487 	.eec_sram		= sfc_ev_sram,
488 	.eec_wake_up		= sfc_ev_wake_up,
489 	.eec_timer		= sfc_ev_timer,
490 	.eec_link_change	= sfc_ev_nop_link_change,
491 };
492 
493 
494 void
495 sfc_ev_qpoll(struct sfc_evq *evq)
496 {
497 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED ||
498 		   evq->init_state == SFC_EVQ_STARTING);
499 
500 	/* Synchronize the DMA memory for reading not required */
501 
502 	efx_ev_qpoll(evq->common, &evq->read_ptr, evq->callbacks, evq);
503 
504 	if (unlikely(evq->exception) && sfc_adapter_trylock(evq->sa)) {
505 		struct sfc_adapter *sa = evq->sa;
506 		int rc;
507 
508 		if (evq->dp_rxq != NULL) {
509 			unsigned int rxq_sw_index;
510 
511 			rxq_sw_index = evq->dp_rxq->dpq.queue_id;
512 
513 			sfc_warn(sa,
514 				 "restart RxQ %u because of exception on its EvQ %u",
515 				 rxq_sw_index, evq->evq_index);
516 
517 			sfc_rx_qstop(sa, rxq_sw_index);
518 			rc = sfc_rx_qstart(sa, rxq_sw_index);
519 			if (rc != 0)
520 				sfc_err(sa, "cannot restart RxQ %u",
521 					rxq_sw_index);
522 		}
523 
524 		if (evq->dp_txq != NULL) {
525 			unsigned int txq_sw_index;
526 
527 			txq_sw_index = evq->dp_txq->dpq.queue_id;
528 
529 			sfc_warn(sa,
530 				 "restart TxQ %u because of exception on its EvQ %u",
531 				 txq_sw_index, evq->evq_index);
532 
533 			sfc_tx_qstop(sa, txq_sw_index);
534 			rc = sfc_tx_qstart(sa, txq_sw_index);
535 			if (rc != 0)
536 				sfc_err(sa, "cannot restart TxQ %u",
537 					txq_sw_index);
538 		}
539 
540 		if (evq->exception)
541 			sfc_panic(sa, "unrecoverable exception on EvQ %u",
542 				  evq->evq_index);
543 
544 		sfc_adapter_unlock(sa);
545 	}
546 
547 	/* Poll-mode driver does not re-prime the event queue for interrupts */
548 }
549 
550 void
551 sfc_ev_mgmt_qpoll(struct sfc_adapter *sa)
552 {
553 	if (rte_spinlock_trylock(&sa->mgmt_evq_lock)) {
554 		struct sfc_evq *mgmt_evq = sa->evq_info[sa->mgmt_evq_index].evq;
555 
556 		if (mgmt_evq->init_state == SFC_EVQ_STARTED)
557 			sfc_ev_qpoll(mgmt_evq);
558 
559 		rte_spinlock_unlock(&sa->mgmt_evq_lock);
560 	}
561 }
562 
563 int
564 sfc_ev_qprime(struct sfc_evq *evq)
565 {
566 	SFC_ASSERT(evq->init_state == SFC_EVQ_STARTED);
567 	return efx_ev_qprime(evq->common, evq->read_ptr);
568 }
569 
570 int
571 sfc_ev_qstart(struct sfc_adapter *sa, unsigned int sw_index)
572 {
573 	const struct sfc_evq_info *evq_info;
574 	struct sfc_evq *evq;
575 	efsys_mem_t *esmp;
576 	unsigned int total_delay_us;
577 	unsigned int delay_us;
578 	int rc;
579 
580 	sfc_log_init(sa, "sw_index=%u", sw_index);
581 
582 	evq_info = &sa->evq_info[sw_index];
583 	evq = evq_info->evq;
584 	esmp = &evq->mem;
585 
586 	/* Clear all events */
587 	(void)memset((void *)esmp->esm_base, 0xff,
588 		     EFX_EVQ_SIZE(evq_info->entries));
589 
590 	/* Create the common code event queue */
591 	rc = efx_ev_qcreate(sa->nic, sw_index, esmp, evq_info->entries,
592 			    0 /* unused on EF10 */, 0, evq_info->flags,
593 			    &evq->common);
594 	if (rc != 0)
595 		goto fail_ev_qcreate;
596 
597 	SFC_ASSERT(evq->dp_rxq == NULL || evq->dp_txq == NULL);
598 	if (evq->dp_rxq != 0) {
599 		if (strcmp(sa->dp_rx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
600 			evq->callbacks = &sfc_ev_callbacks_efx_rx;
601 		else
602 			evq->callbacks = &sfc_ev_callbacks_dp_rx;
603 	} else if (evq->dp_txq != 0) {
604 		if (strcmp(sa->dp_tx->dp.name, SFC_KVARG_DATAPATH_EFX) == 0)
605 			evq->callbacks = &sfc_ev_callbacks_efx_tx;
606 		else
607 			evq->callbacks = &sfc_ev_callbacks_dp_tx;
608 	} else {
609 		evq->callbacks = &sfc_ev_callbacks;
610 	}
611 
612 	evq->init_state = SFC_EVQ_STARTING;
613 
614 	/* Wait for the initialization event */
615 	total_delay_us = 0;
616 	delay_us = SFC_EVQ_INIT_BACKOFF_START_US;
617 	do {
618 		(void)sfc_ev_qpoll(evq);
619 
620 		/* Check to see if the initialization complete indication
621 		 * posted by the hardware.
622 		 */
623 		if (evq->init_state == SFC_EVQ_STARTED)
624 			goto done;
625 
626 		/* Give event queue some time to init */
627 		rte_delay_us(delay_us);
628 
629 		total_delay_us += delay_us;
630 
631 		/* Exponential backoff */
632 		delay_us *= 2;
633 		if (delay_us > SFC_EVQ_INIT_BACKOFF_MAX_US)
634 			delay_us = SFC_EVQ_INIT_BACKOFF_MAX_US;
635 
636 	} while (total_delay_us < SFC_EVQ_INIT_TIMEOUT_US);
637 
638 	rc = ETIMEDOUT;
639 	goto fail_timedout;
640 
641 done:
642 	return 0;
643 
644 fail_timedout:
645 	evq->init_state = SFC_EVQ_INITIALIZED;
646 	efx_ev_qdestroy(evq->common);
647 
648 fail_ev_qcreate:
649 	sfc_log_init(sa, "failed %d", rc);
650 	return rc;
651 }
652 
653 void
654 sfc_ev_qstop(struct sfc_adapter *sa, unsigned int sw_index)
655 {
656 	const struct sfc_evq_info *evq_info;
657 	struct sfc_evq *evq;
658 
659 	sfc_log_init(sa, "sw_index=%u", sw_index);
660 
661 	SFC_ASSERT(sw_index < sa->evq_count);
662 
663 	evq_info = &sa->evq_info[sw_index];
664 	evq = evq_info->evq;
665 
666 	if (evq == NULL || evq->init_state != SFC_EVQ_STARTED)
667 		return;
668 
669 	evq->init_state = SFC_EVQ_INITIALIZED;
670 	evq->callbacks = NULL;
671 	evq->read_ptr = 0;
672 	evq->exception = B_FALSE;
673 
674 	efx_ev_qdestroy(evq->common);
675 }
676 
677 static void
678 sfc_ev_mgmt_periodic_qpoll(void *arg)
679 {
680 	struct sfc_adapter *sa = arg;
681 	int rc;
682 
683 	sfc_ev_mgmt_qpoll(sa);
684 
685 	rc = rte_eal_alarm_set(SFC_MGMT_EV_QPOLL_PERIOD_US,
686 			       sfc_ev_mgmt_periodic_qpoll, sa);
687 	if (rc == -ENOTSUP) {
688 		sfc_warn(sa, "alarms are not supported");
689 		sfc_warn(sa, "management EVQ must be polled indirectly using no-wait link status update");
690 	} else if (rc != 0) {
691 		sfc_err(sa,
692 			"cannot rearm management EVQ polling alarm (rc=%d)",
693 			rc);
694 	}
695 }
696 
697 static void
698 sfc_ev_mgmt_periodic_qpoll_start(struct sfc_adapter *sa)
699 {
700 	sfc_ev_mgmt_periodic_qpoll(sa);
701 }
702 
703 static void
704 sfc_ev_mgmt_periodic_qpoll_stop(struct sfc_adapter *sa)
705 {
706 	rte_eal_alarm_cancel(sfc_ev_mgmt_periodic_qpoll, sa);
707 }
708 
709 int
710 sfc_ev_start(struct sfc_adapter *sa)
711 {
712 	int rc;
713 
714 	sfc_log_init(sa, "entry");
715 
716 	rc = efx_ev_init(sa->nic);
717 	if (rc != 0)
718 		goto fail_ev_init;
719 
720 	/* Start management EVQ used for global events */
721 	rte_spinlock_lock(&sa->mgmt_evq_lock);
722 
723 	rc = sfc_ev_qstart(sa, sa->mgmt_evq_index);
724 	if (rc != 0)
725 		goto fail_mgmt_evq_start;
726 
727 	if (sa->intr.lsc_intr) {
728 		rc = sfc_ev_qprime(sa->evq_info[sa->mgmt_evq_index].evq);
729 		if (rc != 0)
730 			goto fail_evq0_prime;
731 	}
732 
733 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
734 
735 	/*
736 	 * Start management EVQ polling. If interrupts are disabled
737 	 * (not used), it is required to process link status change
738 	 * and other device level events to avoid unrecoverable
739 	 * error because the event queue overflow.
740 	 */
741 	sfc_ev_mgmt_periodic_qpoll_start(sa);
742 
743 	/*
744 	 * Rx/Tx event queues are started/stopped when corresponding
745 	 * Rx/Tx queue is started/stopped.
746 	 */
747 
748 	return 0;
749 
750 fail_evq0_prime:
751 	sfc_ev_qstop(sa, 0);
752 
753 fail_mgmt_evq_start:
754 	rte_spinlock_unlock(&sa->mgmt_evq_lock);
755 	efx_ev_fini(sa->nic);
756 
757 fail_ev_init:
758 	sfc_log_init(sa, "failed %d", rc);
759 	return rc;
760 }
761 
762 void
763 sfc_ev_stop(struct sfc_adapter *sa)
764 {
765 	unsigned int sw_index;
766 
767 	sfc_log_init(sa, "entry");
768 
769 	sfc_ev_mgmt_periodic_qpoll_stop(sa);
770 
771 	/* Make sure that all event queues are stopped */
772 	sw_index = sa->evq_count;
773 	while (sw_index-- > 0) {
774 		if (sw_index == sa->mgmt_evq_index) {
775 			/* Locks are required for the management EVQ */
776 			rte_spinlock_lock(&sa->mgmt_evq_lock);
777 			sfc_ev_qstop(sa, sa->mgmt_evq_index);
778 			rte_spinlock_unlock(&sa->mgmt_evq_lock);
779 		} else {
780 			sfc_ev_qstop(sa, sw_index);
781 		}
782 	}
783 
784 	efx_ev_fini(sa->nic);
785 }
786 
787 int
788 sfc_ev_qinit(struct sfc_adapter *sa, unsigned int sw_index,
789 	     unsigned int entries, int socket_id)
790 {
791 	struct sfc_evq_info *evq_info;
792 	struct sfc_evq *evq;
793 	int rc;
794 
795 	sfc_log_init(sa, "sw_index=%u", sw_index);
796 
797 	evq_info = &sa->evq_info[sw_index];
798 
799 	SFC_ASSERT(rte_is_power_of_2(entries));
800 	SFC_ASSERT(entries <= evq_info->max_entries);
801 	evq_info->entries = entries;
802 
803 	rc = ENOMEM;
804 	evq = rte_zmalloc_socket("sfc-evq", sizeof(*evq), RTE_CACHE_LINE_SIZE,
805 				 socket_id);
806 	if (evq == NULL)
807 		goto fail_evq_alloc;
808 
809 	evq->sa = sa;
810 	evq->evq_index = sw_index;
811 
812 	/* Allocate DMA space */
813 	rc = sfc_dma_alloc(sa, "evq", sw_index, EFX_EVQ_SIZE(evq_info->entries),
814 			   socket_id, &evq->mem);
815 	if (rc != 0)
816 		goto fail_dma_alloc;
817 
818 	evq->init_state = SFC_EVQ_INITIALIZED;
819 
820 	evq_info->evq = evq;
821 
822 	return 0;
823 
824 fail_dma_alloc:
825 	rte_free(evq);
826 
827 fail_evq_alloc:
828 
829 	sfc_log_init(sa, "failed %d", rc);
830 	return rc;
831 }
832 
833 void
834 sfc_ev_qfini(struct sfc_adapter *sa, unsigned int sw_index)
835 {
836 	struct sfc_evq *evq;
837 
838 	sfc_log_init(sa, "sw_index=%u", sw_index);
839 
840 	evq = sa->evq_info[sw_index].evq;
841 
842 	SFC_ASSERT(evq->init_state == SFC_EVQ_INITIALIZED);
843 
844 	sa->evq_info[sw_index].evq = NULL;
845 
846 	sfc_dma_free(sa, &evq->mem);
847 
848 	rte_free(evq);
849 }
850 
851 static int
852 sfc_ev_qinit_info(struct sfc_adapter *sa, unsigned int sw_index)
853 {
854 	struct sfc_evq_info *evq_info = &sa->evq_info[sw_index];
855 	unsigned int max_entries;
856 
857 	sfc_log_init(sa, "sw_index=%u", sw_index);
858 
859 	max_entries = sfc_evq_max_entries(sa, sw_index);
860 	SFC_ASSERT(rte_is_power_of_2(max_entries));
861 
862 	evq_info->max_entries = max_entries;
863 	evq_info->flags = sa->evq_flags |
864 		((sa->intr.lsc_intr && sw_index == sa->mgmt_evq_index) ?
865 			EFX_EVQ_FLAGS_NOTIFY_INTERRUPT :
866 			EFX_EVQ_FLAGS_NOTIFY_DISABLED);
867 
868 	return 0;
869 }
870 
871 static int
872 sfc_kvarg_perf_profile_handler(__rte_unused const char *key,
873 			       const char *value_str, void *opaque)
874 {
875 	uint64_t *value = opaque;
876 
877 	if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_THROUGHPUT) == 0)
878 		*value = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
879 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_LOW_LATENCY) == 0)
880 		*value = EFX_EVQ_FLAGS_TYPE_LOW_LATENCY;
881 	else if (strcasecmp(value_str, SFC_KVARG_PERF_PROFILE_AUTO) == 0)
882 		*value = EFX_EVQ_FLAGS_TYPE_AUTO;
883 	else
884 		return -EINVAL;
885 
886 	return 0;
887 }
888 
889 static void
890 sfc_ev_qfini_info(struct sfc_adapter *sa, unsigned int sw_index)
891 {
892 	sfc_log_init(sa, "sw_index=%u", sw_index);
893 
894 	/* Nothing to cleanup */
895 }
896 
897 int
898 sfc_ev_init(struct sfc_adapter *sa)
899 {
900 	int rc;
901 	unsigned int sw_index;
902 
903 	sfc_log_init(sa, "entry");
904 
905 	sa->evq_flags = EFX_EVQ_FLAGS_TYPE_THROUGHPUT;
906 	rc = sfc_kvargs_process(sa, SFC_KVARG_PERF_PROFILE,
907 				sfc_kvarg_perf_profile_handler,
908 				&sa->evq_flags);
909 	if (rc != 0) {
910 		sfc_err(sa, "invalid %s parameter value",
911 			SFC_KVARG_PERF_PROFILE);
912 		goto fail_kvarg_perf_profile;
913 	}
914 
915 	sa->evq_count = sfc_ev_qcount(sa);
916 	sa->mgmt_evq_index = 0;
917 	rte_spinlock_init(&sa->mgmt_evq_lock);
918 
919 	/* Allocate EVQ info array */
920 	rc = ENOMEM;
921 	sa->evq_info = rte_calloc_socket("sfc-evqs", sa->evq_count,
922 					 sizeof(struct sfc_evq_info), 0,
923 					 sa->socket_id);
924 	if (sa->evq_info == NULL)
925 		goto fail_evqs_alloc;
926 
927 	for (sw_index = 0; sw_index < sa->evq_count; ++sw_index) {
928 		rc = sfc_ev_qinit_info(sa, sw_index);
929 		if (rc != 0)
930 			goto fail_ev_qinit_info;
931 	}
932 
933 	rc = sfc_ev_qinit(sa, sa->mgmt_evq_index, SFC_MGMT_EVQ_ENTRIES,
934 			  sa->socket_id);
935 	if (rc != 0)
936 		goto fail_mgmt_evq_init;
937 
938 	/*
939 	 * Rx/Tx event queues are created/destroyed when corresponding
940 	 * Rx/Tx queue is created/destroyed.
941 	 */
942 
943 	return 0;
944 
945 fail_mgmt_evq_init:
946 fail_ev_qinit_info:
947 	while (sw_index-- > 0)
948 		sfc_ev_qfini_info(sa, sw_index);
949 
950 	rte_free(sa->evq_info);
951 	sa->evq_info = NULL;
952 
953 fail_evqs_alloc:
954 	sa->evq_count = 0;
955 
956 fail_kvarg_perf_profile:
957 	sfc_log_init(sa, "failed %d", rc);
958 	return rc;
959 }
960 
961 void
962 sfc_ev_fini(struct sfc_adapter *sa)
963 {
964 	int sw_index;
965 
966 	sfc_log_init(sa, "entry");
967 
968 	/* Cleanup all event queues */
969 	sw_index = sa->evq_count;
970 	while (--sw_index >= 0) {
971 		if (sa->evq_info[sw_index].evq != NULL)
972 			sfc_ev_qfini(sa, sw_index);
973 		sfc_ev_qfini_info(sa, sw_index);
974 	}
975 
976 	rte_free(sa->evq_info);
977 	sa->evq_info = NULL;
978 	sa->evq_count = 0;
979 }
980