xref: /dpdk/drivers/event/dpaa/dpaa_eventdev.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /*   SPDX-License-Identifier:        BSD-3-Clause
2  *   Copyright 2017-2019 NXP
3  */
4 
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/epoll.h>
12 
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <dev_driver.h>
18 #include <rte_eal.h>
19 #include <rte_lcore.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
25 #include <rte_pci.h>
26 #include <rte_eventdev.h>
27 #include <eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_crypto_adapter.h>
30 #include <rte_event_eth_rx_adapter.h>
31 #include <rte_event_eth_tx_adapter.h>
32 #include <cryptodev_pmd.h>
33 #include <bus_dpaa_driver.h>
34 #include <rte_dpaa_logs.h>
35 #include <rte_cycles.h>
36 #include <rte_kvargs.h>
37 
38 #include <dpaa_ethdev.h>
39 #include <dpaa_sec_event.h>
40 #include "dpaa_eventdev.h"
41 #include <dpaa_mempool.h>
42 
43 /*
44  * Clarifications
45  * Evendev = Virtual Instance for SoC
46  * Eventport = Portal Instance
47  * Eventqueue = Channel Instance
48  * 1 Eventdev can have N Eventqueue
49  */
50 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_eventdev, NOTICE);
51 
52 #define DISABLE_INTR_MODE "disable_intr"
53 
54 static int
55 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
56 				 uint64_t *timeout_ticks)
57 {
58 	EVENTDEV_INIT_FUNC_TRACE();
59 
60 	RTE_SET_USED(dev);
61 
62 	uint64_t cycles_per_second;
63 
64 	cycles_per_second = rte_get_timer_hz();
65 	*timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
66 
67 	return 0;
68 }
69 
70 static int
71 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
72 				 uint64_t *timeout_ticks)
73 {
74 	RTE_SET_USED(dev);
75 
76 	*timeout_ticks = ns/1000;
77 	return 0;
78 }
79 
80 static void
81 dpaa_eventq_portal_add(u16 ch_id)
82 {
83 	uint32_t sdqcr;
84 
85 	sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
86 	qman_static_dequeue_add(sdqcr, NULL);
87 }
88 
89 static uint16_t
90 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
91 			 uint16_t nb_events)
92 {
93 	uint16_t i;
94 	struct rte_mbuf *mbuf;
95 
96 	RTE_SET_USED(port);
97 	/*Release all the contexts saved previously*/
98 	for (i = 0; i < nb_events; i++) {
99 		switch (ev[i].op) {
100 		case RTE_EVENT_OP_RELEASE:
101 			qman_dca_index(ev[i].impl_opaque, 0);
102 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
103 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
104 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
105 			DPAA_PER_LCORE_DQRR_SIZE--;
106 			break;
107 		default:
108 			break;
109 		}
110 	}
111 
112 	return nb_events;
113 }
114 
115 static uint16_t
116 dpaa_event_enqueue(void *port, const struct rte_event *ev)
117 {
118 	return dpaa_event_enqueue_burst(port, ev, 1);
119 }
120 
121 static void drain_4_bytes(int fd, fd_set *fdset)
122 {
123 	if (FD_ISSET(fd, fdset)) {
124 		/* drain 4 bytes */
125 		uint32_t junk;
126 		ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
127 		if (sjunk != sizeof(junk))
128 			DPAA_EVENTDEV_ERR("UIO irq read error");
129 	}
130 }
131 
132 static inline int
133 dpaa_event_dequeue_wait(uint64_t timeout_ticks)
134 {
135 	int fd_qman, nfds;
136 	int ret;
137 	fd_set readset;
138 
139 	/* Go into (and back out of) IRQ mode for each select,
140 	 * it simplifies exit-path considerations and other
141 	 * potential nastiness.
142 	 */
143 	struct timeval tv = {
144 		.tv_sec = timeout_ticks / 1000000,
145 		.tv_usec = timeout_ticks % 1000000
146 	};
147 
148 	fd_qman = qman_thread_fd();
149 	nfds = fd_qman + 1;
150 	FD_ZERO(&readset);
151 	FD_SET(fd_qman, &readset);
152 
153 	qman_irqsource_add(QM_PIRQ_DQRI);
154 
155 	ret = select(nfds, &readset, NULL, NULL, &tv);
156 	if (ret < 0)
157 		return ret;
158 	/* Calling irqsource_remove() prior to thread_irq()
159 	 * means thread_irq() will not process whatever caused
160 	 * the interrupts, however it does ensure that, once
161 	 * thread_irq() re-enables interrupts, they won't fire
162 	 * again immediately.
163 	 */
164 	qman_irqsource_remove(~0);
165 	drain_4_bytes(fd_qman, &readset);
166 	qman_thread_irq();
167 
168 	return ret;
169 }
170 
171 static uint16_t
172 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
173 			 uint16_t nb_events, uint64_t timeout_ticks)
174 {
175 	int ret;
176 	u16 ch_id;
177 	void *buffers[8];
178 	u32 num_frames, i;
179 	uint64_t cur_ticks = 0, wait_time_ticks = 0;
180 	struct dpaa_port *portal = (struct dpaa_port *)port;
181 	struct rte_mbuf *mbuf;
182 
183 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
184 		/* Affine current thread context to a qman portal */
185 		ret = rte_dpaa_portal_init((void *)0);
186 		if (ret) {
187 			DPAA_EVENTDEV_ERR("Unable to initialize portal");
188 			return ret;
189 		}
190 	}
191 
192 	if (unlikely(!portal->is_port_linked)) {
193 		/*
194 		 * Affine event queue for current thread context
195 		 * to a qman portal.
196 		 */
197 		for (i = 0; i < portal->num_linked_evq; i++) {
198 			ch_id = portal->evq_info[i].ch_id;
199 			dpaa_eventq_portal_add(ch_id);
200 		}
201 		portal->is_port_linked = true;
202 	}
203 
204 	/* Check if there are atomic contexts to be released */
205 	i = 0;
206 	while (DPAA_PER_LCORE_DQRR_SIZE) {
207 		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
208 			qman_dca_index(i, 0);
209 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
210 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
211 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
212 			DPAA_PER_LCORE_DQRR_SIZE--;
213 		}
214 		i++;
215 	}
216 	DPAA_PER_LCORE_DQRR_HELD = 0;
217 
218 	if (timeout_ticks)
219 		wait_time_ticks = timeout_ticks;
220 	else
221 		wait_time_ticks = portal->timeout_us;
222 
223 	wait_time_ticks += rte_get_timer_cycles();
224 	do {
225 		/* Lets dequeue the frames */
226 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
227 		if (num_frames)
228 			break;
229 		cur_ticks = rte_get_timer_cycles();
230 	} while (cur_ticks < wait_time_ticks);
231 
232 	return num_frames;
233 }
234 
235 static uint16_t
236 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
237 {
238 	return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
239 }
240 
241 static uint16_t
242 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
243 			      uint16_t nb_events, uint64_t timeout_ticks)
244 {
245 	int ret;
246 	u16 ch_id;
247 	void *buffers[8];
248 	u32 num_frames, i, irq = 0;
249 	uint64_t cur_ticks = 0, wait_time_ticks = 0;
250 	struct dpaa_port *portal = (struct dpaa_port *)port;
251 	struct rte_mbuf *mbuf;
252 
253 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
254 		/* Affine current thread context to a qman portal */
255 		ret = rte_dpaa_portal_init((void *)0);
256 		if (ret) {
257 			DPAA_EVENTDEV_ERR("Unable to initialize portal");
258 			return ret;
259 		}
260 	}
261 
262 	if (unlikely(!portal->is_port_linked)) {
263 		/*
264 		 * Affine event queue for current thread context
265 		 * to a qman portal.
266 		 */
267 		for (i = 0; i < portal->num_linked_evq; i++) {
268 			ch_id = portal->evq_info[i].ch_id;
269 			dpaa_eventq_portal_add(ch_id);
270 		}
271 		portal->is_port_linked = true;
272 	}
273 
274 	/* Check if there are atomic contexts to be released */
275 	i = 0;
276 	while (DPAA_PER_LCORE_DQRR_SIZE) {
277 		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
278 			qman_dca_index(i, 0);
279 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
280 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
281 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
282 			DPAA_PER_LCORE_DQRR_SIZE--;
283 		}
284 		i++;
285 	}
286 	DPAA_PER_LCORE_DQRR_HELD = 0;
287 
288 	if (timeout_ticks)
289 		wait_time_ticks = timeout_ticks;
290 	else
291 		wait_time_ticks = portal->timeout_us;
292 
293 	do {
294 		/* Lets dequeue the frames */
295 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
296 		if (irq)
297 			irq = 0;
298 		if (num_frames)
299 			break;
300 		if (wait_time_ticks) { /* wait for time */
301 			if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
302 				irq = 1;
303 				continue;
304 			}
305 			break; /* no event after waiting */
306 		}
307 		cur_ticks = rte_get_timer_cycles();
308 	} while (cur_ticks < wait_time_ticks);
309 
310 	return num_frames;
311 }
312 
313 static uint16_t
314 dpaa_event_dequeue_intr(void *port,
315 			struct rte_event *ev,
316 			uint64_t timeout_ticks)
317 {
318 	return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
319 }
320 
321 static void
322 dpaa_event_dev_info_get(struct rte_eventdev *dev,
323 			struct rte_event_dev_info *dev_info)
324 {
325 	EVENTDEV_INIT_FUNC_TRACE();
326 
327 	RTE_SET_USED(dev);
328 	dev_info->driver_name = "event_dpaa1";
329 	dev_info->min_dequeue_timeout_ns =
330 		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
331 	dev_info->max_dequeue_timeout_ns =
332 		DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
333 	dev_info->dequeue_timeout_ns =
334 		DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
335 	dev_info->max_event_queues =
336 		DPAA_EVENT_MAX_QUEUES;
337 	dev_info->max_event_queue_flows =
338 		DPAA_EVENT_MAX_QUEUE_FLOWS;
339 	dev_info->max_event_queue_priority_levels =
340 		DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
341 	dev_info->max_event_priority_levels =
342 		DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
343 	dev_info->max_event_ports =
344 		DPAA_EVENT_MAX_EVENT_PORT;
345 	dev_info->max_event_port_dequeue_depth =
346 		DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
347 	dev_info->max_event_port_enqueue_depth =
348 		DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
349 	/*
350 	 * TODO: Need to find out that how to fetch this info
351 	 * from kernel or somewhere else.
352 	 */
353 	dev_info->max_num_events =
354 		DPAA_EVENT_MAX_NUM_EVENTS;
355 	dev_info->event_dev_cap =
356 		RTE_EVENT_DEV_CAP_ATOMIC |
357 		RTE_EVENT_DEV_CAP_PARALLEL |
358 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
359 		RTE_EVENT_DEV_CAP_BURST_MODE |
360 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
361 		RTE_EVENT_DEV_CAP_NONSEQ_MODE |
362 		RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
363 		RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
364 	dev_info->max_profiles_per_port = 1;
365 }
366 
367 static int
368 dpaa_event_dev_configure(const struct rte_eventdev *dev)
369 {
370 	struct dpaa_eventdev *priv = dev->data->dev_private;
371 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
372 	int ret, i;
373 	uint32_t *ch_id;
374 
375 	EVENTDEV_INIT_FUNC_TRACE();
376 	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
377 	priv->nb_events_limit = conf->nb_events_limit;
378 	priv->nb_event_queues = conf->nb_event_queues;
379 	priv->nb_event_ports = conf->nb_event_ports;
380 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
381 	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
382 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
383 	priv->event_dev_cfg = conf->event_dev_cfg;
384 
385 	ch_id = rte_malloc("dpaa-channels",
386 			  sizeof(uint32_t) * priv->nb_event_queues,
387 			  RTE_CACHE_LINE_SIZE);
388 	if (ch_id == NULL) {
389 		DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
390 		return -ENOMEM;
391 	}
392 	/* Create requested event queues within the given event device */
393 	ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
394 	if (ret < 0) {
395 		DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
396 				 priv->nb_event_queues, ret);
397 		rte_free(ch_id);
398 		return ret;
399 	}
400 	for (i = 0; i < priv->nb_event_queues; i++)
401 		priv->evq_info[i].ch_id = (u16)ch_id[i];
402 
403 	/* Lets prepare event ports */
404 	memset(&priv->ports[0], 0,
405 	      sizeof(struct dpaa_port) * priv->nb_event_ports);
406 
407 	/* Check dequeue timeout method is per dequeue or global */
408 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
409 		/*
410 		 * Use timeout value as given in dequeue operation.
411 		 * So invalidating this timeout value.
412 		 */
413 		priv->dequeue_timeout_ns = 0;
414 
415 	} else if (conf->dequeue_timeout_ns == 0) {
416 		priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
417 	} else {
418 		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
419 	}
420 
421 	for (i = 0; i < priv->nb_event_ports; i++) {
422 		if (priv->intr_mode) {
423 			priv->ports[i].timeout_us =
424 				priv->dequeue_timeout_ns/1000;
425 		} else {
426 			uint64_t cycles_per_second;
427 
428 			cycles_per_second = rte_get_timer_hz();
429 			priv->ports[i].timeout_us =
430 				(priv->dequeue_timeout_ns * cycles_per_second)
431 					/ NS_PER_S;
432 		}
433 	}
434 
435 	/*
436 	 * TODO: Currently portals are affined with threads. Maximum threads
437 	 * can be created equals to number of lcore.
438 	 */
439 	rte_free(ch_id);
440 	DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
441 
442 	return 0;
443 }
444 
445 static int
446 dpaa_event_dev_start(struct rte_eventdev *dev)
447 {
448 	EVENTDEV_INIT_FUNC_TRACE();
449 	RTE_SET_USED(dev);
450 
451 	return 0;
452 }
453 
454 static void
455 dpaa_event_dev_stop(struct rte_eventdev *dev)
456 {
457 	EVENTDEV_INIT_FUNC_TRACE();
458 	RTE_SET_USED(dev);
459 }
460 
461 static int
462 dpaa_event_dev_close(struct rte_eventdev *dev)
463 {
464 	EVENTDEV_INIT_FUNC_TRACE();
465 	RTE_SET_USED(dev);
466 
467 	return 0;
468 }
469 
470 static void
471 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
472 			  struct rte_event_queue_conf *queue_conf)
473 {
474 	EVENTDEV_INIT_FUNC_TRACE();
475 
476 	RTE_SET_USED(dev);
477 	RTE_SET_USED(queue_id);
478 
479 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
480 	queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS;
481 	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
482 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
483 }
484 
485 static int
486 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
487 		       const struct rte_event_queue_conf *queue_conf)
488 {
489 	struct dpaa_eventdev *priv = dev->data->dev_private;
490 	struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
491 
492 	EVENTDEV_INIT_FUNC_TRACE();
493 
494 	switch (queue_conf->schedule_type) {
495 	case RTE_SCHED_TYPE_PARALLEL:
496 	case RTE_SCHED_TYPE_ATOMIC:
497 		break;
498 	case RTE_SCHED_TYPE_ORDERED:
499 		DPAA_EVENTDEV_ERR("Schedule type is not supported.");
500 		return -1;
501 	}
502 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
503 	evq_info->event_queue_id = queue_id;
504 
505 	return 0;
506 }
507 
508 static void
509 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
510 {
511 	EVENTDEV_INIT_FUNC_TRACE();
512 
513 	RTE_SET_USED(dev);
514 	RTE_SET_USED(queue_id);
515 }
516 
517 static void
518 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
519 				 struct rte_event_port_conf *port_conf)
520 {
521 	EVENTDEV_INIT_FUNC_TRACE();
522 
523 	RTE_SET_USED(dev);
524 	RTE_SET_USED(port_id);
525 
526 	port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
527 	port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
528 	port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
529 }
530 
531 static int
532 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
533 		      const struct rte_event_port_conf *port_conf)
534 {
535 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
536 
537 	EVENTDEV_INIT_FUNC_TRACE();
538 
539 	RTE_SET_USED(port_conf);
540 	dev->data->ports[port_id] = &eventdev->ports[port_id];
541 
542 	return 0;
543 }
544 
545 static void
546 dpaa_event_port_release(void *port)
547 {
548 	EVENTDEV_INIT_FUNC_TRACE();
549 
550 	RTE_SET_USED(port);
551 }
552 
553 static int
554 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
555 		     const uint8_t queues[], const uint8_t priorities[],
556 		     uint16_t nb_links)
557 {
558 	struct dpaa_eventdev *priv = dev->data->dev_private;
559 	struct dpaa_port *event_port = (struct dpaa_port *)port;
560 	struct dpaa_eventq *event_queue;
561 	uint8_t eventq_id;
562 	int i;
563 
564 	RTE_SET_USED(dev);
565 	RTE_SET_USED(priorities);
566 
567 	/* First check that input configuration are valid */
568 	for (i = 0; i < nb_links; i++) {
569 		eventq_id = queues[i];
570 		event_queue = &priv->evq_info[eventq_id];
571 		if ((event_queue->event_queue_cfg
572 			& RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
573 			&& (event_queue->event_port)) {
574 			return -EINVAL;
575 		}
576 	}
577 
578 	for (i = 0; i < nb_links; i++) {
579 		eventq_id = queues[i];
580 		event_queue = &priv->evq_info[eventq_id];
581 		event_port->evq_info[i].event_queue_id = eventq_id;
582 		event_port->evq_info[i].ch_id = event_queue->ch_id;
583 		event_queue->event_port = port;
584 	}
585 
586 	event_port->num_linked_evq = event_port->num_linked_evq + i;
587 
588 	return (int)i;
589 }
590 
591 static int
592 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
593 		       uint8_t queues[], uint16_t nb_links)
594 {
595 	int i;
596 	uint8_t eventq_id;
597 	struct dpaa_eventq *event_queue;
598 	struct dpaa_eventdev *priv = dev->data->dev_private;
599 	struct dpaa_port *event_port = (struct dpaa_port *)port;
600 
601 	if (!event_port->num_linked_evq)
602 		return nb_links;
603 
604 	for (i = 0; i < nb_links; i++) {
605 		eventq_id = queues[i];
606 		event_port->evq_info[eventq_id].event_queue_id = -1;
607 		event_port->evq_info[eventq_id].ch_id = 0;
608 		event_queue = &priv->evq_info[eventq_id];
609 		event_queue->event_port = NULL;
610 	}
611 
612 	if (event_port->num_linked_evq)
613 		event_port->num_linked_evq = event_port->num_linked_evq - i;
614 
615 	return (int)i;
616 }
617 
618 static int
619 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
620 				   const struct rte_eth_dev *eth_dev,
621 				   uint32_t *caps)
622 {
623 	const char *ethdev_driver = eth_dev->device->driver->name;
624 
625 	EVENTDEV_INIT_FUNC_TRACE();
626 
627 	RTE_SET_USED(dev);
628 
629 	if (!strcmp(ethdev_driver, "net_dpaa"))
630 		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
631 	else
632 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
633 
634 	return 0;
635 }
636 
637 static int
638 dpaa_event_eth_rx_adapter_queue_add(
639 		const struct rte_eventdev *dev,
640 		const struct rte_eth_dev *eth_dev,
641 		int32_t rx_queue_id,
642 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
643 {
644 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
645 	uint8_t ev_qid = queue_conf->ev.queue_id;
646 	u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
647 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
648 	int ret, i;
649 
650 	EVENTDEV_INIT_FUNC_TRACE();
651 
652 	if (rx_queue_id == -1) {
653 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
654 			ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
655 						     queue_conf);
656 			if (ret) {
657 				DPAA_EVENTDEV_ERR(
658 					"Event Queue attach failed:%d\n", ret);
659 				goto detach_configured_queues;
660 			}
661 		}
662 		return 0;
663 	}
664 
665 	ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
666 	if (ret)
667 		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
668 	return ret;
669 
670 detach_configured_queues:
671 
672 	for (i = (i - 1); i >= 0 ; i--)
673 		dpaa_eth_eventq_detach(eth_dev, i);
674 
675 	return ret;
676 }
677 
678 static int
679 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
680 				    const struct rte_eth_dev *eth_dev,
681 				    int32_t rx_queue_id)
682 {
683 	int ret, i;
684 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
685 
686 	EVENTDEV_INIT_FUNC_TRACE();
687 
688 	RTE_SET_USED(dev);
689 	if (rx_queue_id == -1) {
690 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
691 			ret = dpaa_eth_eventq_detach(eth_dev, i);
692 			if (ret)
693 				DPAA_EVENTDEV_ERR(
694 					"Event Queue detach failed:%d\n", ret);
695 		}
696 
697 		return 0;
698 	}
699 
700 	ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
701 	if (ret)
702 		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
703 	return ret;
704 }
705 
706 static int
707 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
708 				const struct rte_eth_dev *eth_dev)
709 {
710 	EVENTDEV_INIT_FUNC_TRACE();
711 
712 	RTE_SET_USED(dev);
713 	RTE_SET_USED(eth_dev);
714 
715 	return 0;
716 }
717 
718 static int
719 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
720 			       const struct rte_eth_dev *eth_dev)
721 {
722 	EVENTDEV_INIT_FUNC_TRACE();
723 
724 	RTE_SET_USED(dev);
725 	RTE_SET_USED(eth_dev);
726 
727 	return 0;
728 }
729 
730 static int
731 dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
732 			    const struct rte_cryptodev *cdev,
733 			    uint32_t *caps)
734 {
735 	const char *name = cdev->data->name;
736 
737 	EVENTDEV_INIT_FUNC_TRACE();
738 
739 	RTE_SET_USED(dev);
740 
741 	if (!strncmp(name, "dpaa_sec-", 9))
742 		*caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP;
743 	else
744 		return -1;
745 
746 	return 0;
747 }
748 
749 static int
750 dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
751 		const struct rte_cryptodev *cryptodev,
752 		const struct rte_event *ev)
753 {
754 	struct dpaa_eventdev *priv = dev->data->dev_private;
755 	uint8_t ev_qid = ev->queue_id;
756 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
757 	int i, ret;
758 
759 	EVENTDEV_INIT_FUNC_TRACE();
760 
761 	for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
762 		ret = dpaa_sec_eventq_attach(cryptodev, i,
763 				ch_id, ev);
764 		if (ret) {
765 			DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d\n",
766 				    ret);
767 			goto fail;
768 		}
769 	}
770 	return 0;
771 fail:
772 	for (i = (i - 1); i >= 0 ; i--)
773 		dpaa_sec_eventq_detach(cryptodev, i);
774 
775 	return ret;
776 }
777 
778 static int
779 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
780 		const struct rte_cryptodev *cryptodev,
781 		int32_t rx_queue_id,
782 		const struct rte_event_crypto_adapter_queue_conf *conf)
783 {
784 	struct dpaa_eventdev *priv = dev->data->dev_private;
785 	uint8_t ev_qid = conf->ev.queue_id;
786 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
787 	int ret;
788 
789 	EVENTDEV_INIT_FUNC_TRACE();
790 
791 	if (rx_queue_id == -1)
792 		return dpaa_eventdev_crypto_queue_add_all(dev,
793 				cryptodev, &conf->ev);
794 
795 	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
796 			ch_id, &conf->ev);
797 	if (ret) {
798 		DPAA_EVENTDEV_ERR(
799 			"dpaa_sec_eventq_attach failed: ret: %d\n", ret);
800 		return ret;
801 	}
802 	return 0;
803 }
804 
805 static int
806 dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
807 			     const struct rte_cryptodev *cdev)
808 {
809 	int i, ret;
810 
811 	EVENTDEV_INIT_FUNC_TRACE();
812 
813 	RTE_SET_USED(dev);
814 
815 	for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
816 		ret = dpaa_sec_eventq_detach(cdev, i);
817 		if (ret) {
818 			DPAA_EVENTDEV_ERR(
819 				"dpaa_sec_eventq_detach failed:ret %d\n", ret);
820 			return ret;
821 		}
822 	}
823 
824 	return 0;
825 }
826 
827 static int
828 dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
829 			     const struct rte_cryptodev *cryptodev,
830 			     int32_t rx_queue_id)
831 {
832 	int ret;
833 
834 	EVENTDEV_INIT_FUNC_TRACE();
835 
836 	if (rx_queue_id == -1)
837 		return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev);
838 
839 	ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id);
840 	if (ret) {
841 		DPAA_EVENTDEV_ERR(
842 			"dpaa_sec_eventq_detach failed: ret: %d\n", ret);
843 		return ret;
844 	}
845 
846 	return 0;
847 }
848 
849 static int
850 dpaa_eventdev_crypto_start(const struct rte_eventdev *dev,
851 			   const struct rte_cryptodev *cryptodev)
852 {
853 	EVENTDEV_INIT_FUNC_TRACE();
854 
855 	RTE_SET_USED(dev);
856 	RTE_SET_USED(cryptodev);
857 
858 	return 0;
859 }
860 
861 static int
862 dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev,
863 			  const struct rte_cryptodev *cryptodev)
864 {
865 	EVENTDEV_INIT_FUNC_TRACE();
866 
867 	RTE_SET_USED(dev);
868 	RTE_SET_USED(cryptodev);
869 
870 	return 0;
871 }
872 
873 static int
874 dpaa_eventdev_tx_adapter_create(uint8_t id,
875 				 const struct rte_eventdev *dev)
876 {
877 	RTE_SET_USED(id);
878 	RTE_SET_USED(dev);
879 
880 	/* Nothing to do. Simply return. */
881 	return 0;
882 }
883 
884 static int
885 dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
886 			       const struct rte_eth_dev *eth_dev,
887 			       uint32_t *caps)
888 {
889 	RTE_SET_USED(dev);
890 	RTE_SET_USED(eth_dev);
891 
892 	*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
893 	return 0;
894 }
895 
896 static uint16_t
897 dpaa_eventdev_txa_enqueue_same_dest(void *port,
898 				     struct rte_event ev[],
899 				     uint16_t nb_events)
900 {
901 	struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
902 	uint8_t qid, i;
903 
904 	RTE_SET_USED(port);
905 
906 	m0 = (struct rte_mbuf *)ev[0].mbuf;
907 	qid = rte_event_eth_tx_adapter_txq_get(m0);
908 
909 	for (i = 0; i < nb_events; i++)
910 		m[i] = (struct rte_mbuf *)ev[i].mbuf;
911 
912 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
913 }
914 
915 static uint16_t
916 dpaa_eventdev_txa_enqueue(void *port,
917 			   struct rte_event ev[],
918 			   uint16_t nb_events)
919 {
920 	struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
921 	uint8_t qid, i;
922 
923 	RTE_SET_USED(port);
924 
925 	for (i = 0; i < nb_events; i++) {
926 		qid = rte_event_eth_tx_adapter_txq_get(m);
927 		rte_eth_tx_burst(m->port, qid, &m, 1);
928 	}
929 
930 	return nb_events;
931 }
932 
933 static struct eventdev_ops dpaa_eventdev_ops = {
934 	.dev_infos_get    = dpaa_event_dev_info_get,
935 	.dev_configure    = dpaa_event_dev_configure,
936 	.dev_start        = dpaa_event_dev_start,
937 	.dev_stop         = dpaa_event_dev_stop,
938 	.dev_close        = dpaa_event_dev_close,
939 	.queue_def_conf   = dpaa_event_queue_def_conf,
940 	.queue_setup      = dpaa_event_queue_setup,
941 	.queue_release    = dpaa_event_queue_release,
942 	.port_def_conf    = dpaa_event_port_default_conf_get,
943 	.port_setup       = dpaa_event_port_setup,
944 	.port_release       = dpaa_event_port_release,
945 	.port_link        = dpaa_event_port_link,
946 	.port_unlink      = dpaa_event_port_unlink,
947 	.timeout_ticks    = dpaa_event_dequeue_timeout_ticks,
948 	.eth_rx_adapter_caps_get	= dpaa_event_eth_rx_adapter_caps_get,
949 	.eth_rx_adapter_queue_add	= dpaa_event_eth_rx_adapter_queue_add,
950 	.eth_rx_adapter_queue_del	= dpaa_event_eth_rx_adapter_queue_del,
951 	.eth_rx_adapter_start		= dpaa_event_eth_rx_adapter_start,
952 	.eth_rx_adapter_stop		= dpaa_event_eth_rx_adapter_stop,
953 	.eth_tx_adapter_caps_get	= dpaa_eventdev_tx_adapter_caps,
954 	.eth_tx_adapter_create		= dpaa_eventdev_tx_adapter_create,
955 	.crypto_adapter_caps_get	= dpaa_eventdev_crypto_caps_get,
956 	.crypto_adapter_queue_pair_add	= dpaa_eventdev_crypto_queue_add,
957 	.crypto_adapter_queue_pair_del	= dpaa_eventdev_crypto_queue_del,
958 	.crypto_adapter_start		= dpaa_eventdev_crypto_start,
959 	.crypto_adapter_stop		= dpaa_eventdev_crypto_stop,
960 };
961 
962 static int flag_check_handler(__rte_unused const char *key,
963 		const char *value, __rte_unused void *opaque)
964 {
965 	if (strcmp(value, "1"))
966 		return -1;
967 
968 	return 0;
969 }
970 
971 static int
972 dpaa_event_check_flags(const char *params)
973 {
974 	struct rte_kvargs *kvlist;
975 
976 	if (params == NULL || params[0] == '\0')
977 		return 0;
978 
979 	kvlist = rte_kvargs_parse(params, NULL);
980 	if (kvlist == NULL)
981 		return 0;
982 
983 	if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
984 		rte_kvargs_free(kvlist);
985 		return 0;
986 	}
987 	/* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
988 	if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
989 				flag_check_handler, NULL) < 0) {
990 		rte_kvargs_free(kvlist);
991 		return 0;
992 	}
993 	rte_kvargs_free(kvlist);
994 
995 	return 1;
996 }
997 
998 static int
999 dpaa_event_dev_create(const char *name, const char *params, struct rte_vdev_device *vdev)
1000 {
1001 	struct rte_eventdev *eventdev;
1002 	struct dpaa_eventdev *priv;
1003 
1004 	eventdev = rte_event_pmd_vdev_init(name,
1005 					   sizeof(struct dpaa_eventdev),
1006 					   rte_socket_id(), vdev);
1007 	if (eventdev == NULL) {
1008 		DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
1009 		goto fail;
1010 	}
1011 	priv = eventdev->data->dev_private;
1012 
1013 	eventdev->dev_ops       = &dpaa_eventdev_ops;
1014 	eventdev->enqueue       = dpaa_event_enqueue;
1015 	eventdev->enqueue_burst = dpaa_event_enqueue_burst;
1016 
1017 	if (dpaa_event_check_flags(params)) {
1018 		eventdev->dequeue	= dpaa_event_dequeue;
1019 		eventdev->dequeue_burst = dpaa_event_dequeue_burst;
1020 	} else {
1021 		priv->intr_mode = 1;
1022 		eventdev->dev_ops->timeout_ticks =
1023 				dpaa_event_dequeue_timeout_ticks_intr;
1024 		eventdev->dequeue	= dpaa_event_dequeue_intr;
1025 		eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
1026 	}
1027 	eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue;
1028 	eventdev->txa_enqueue_same_dest	= dpaa_eventdev_txa_enqueue_same_dest;
1029 
1030 	DPAA_EVENTDEV_INFO("%s eventdev added", name);
1031 
1032 	/* For secondary processes, the primary has done all the work */
1033 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1034 		goto done;
1035 
1036 	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
1037 
1038 done:
1039 	event_dev_probing_finish(eventdev);
1040 	return 0;
1041 fail:
1042 	return -EFAULT;
1043 }
1044 
1045 static int
1046 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
1047 {
1048 	const char *name;
1049 	const char *params;
1050 
1051 	name = rte_vdev_device_name(vdev);
1052 	DPAA_EVENTDEV_INFO("Initializing %s", name);
1053 
1054 	params = rte_vdev_device_args(vdev);
1055 
1056 	return dpaa_event_dev_create(name, params, vdev);
1057 }
1058 
1059 static int
1060 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
1061 {
1062 	const char *name;
1063 
1064 	name = rte_vdev_device_name(vdev);
1065 	DPAA_EVENTDEV_INFO("Closing %s", name);
1066 
1067 	return rte_event_pmd_vdev_uninit(name);
1068 }
1069 
1070 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
1071 	.probe = dpaa_event_dev_probe,
1072 	.remove = dpaa_event_dev_remove
1073 };
1074 
1075 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
1076 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
1077 		DISABLE_INTR_MODE "=<int>");
1078