xref: /dpdk/drivers/event/dpaa/dpaa_eventdev.c (revision 9e991f217fc8719e38a812dc280dba5f84db9f59)
1 /*   SPDX-License-Identifier:        BSD-3-Clause
2  *   Copyright 2017-2019 NXP
3  */
4 
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/epoll.h>
12 
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_dev.h>
18 #include <rte_eal.h>
19 #include <rte_lcore.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
25 #include <rte_pci.h>
26 #include <rte_eventdev.h>
27 #include <rte_eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_event_eth_tx_adapter.h>
31 #include <rte_cryptodev.h>
32 #include <rte_dpaa_bus.h>
33 #include <rte_dpaa_logs.h>
34 #include <rte_cycles.h>
35 #include <rte_kvargs.h>
36 
37 #include <dpaa_ethdev.h>
38 #include <dpaa_sec_event.h>
39 #include "dpaa_eventdev.h"
40 #include <dpaa_mempool.h>
41 
42 /*
43  * Clarifications
44  * Evendev = Virtual Instance for SoC
45  * Eventport = Portal Instance
46  * Eventqueue = Channel Instance
47  * 1 Eventdev can have N Eventqueue
48  */
49 
50 #define DISABLE_INTR_MODE "disable_intr"
51 
52 static int
53 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
54 				 uint64_t *timeout_ticks)
55 {
56 	EVENTDEV_INIT_FUNC_TRACE();
57 
58 	RTE_SET_USED(dev);
59 
60 	uint64_t cycles_per_second;
61 
62 	cycles_per_second = rte_get_timer_hz();
63 	*timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
64 
65 	return 0;
66 }
67 
68 static int
69 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
70 				 uint64_t *timeout_ticks)
71 {
72 	RTE_SET_USED(dev);
73 
74 	*timeout_ticks = ns/1000;
75 	return 0;
76 }
77 
78 static void
79 dpaa_eventq_portal_add(u16 ch_id)
80 {
81 	uint32_t sdqcr;
82 
83 	sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
84 	qman_static_dequeue_add(sdqcr, NULL);
85 }
86 
87 static uint16_t
88 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
89 			 uint16_t nb_events)
90 {
91 	uint16_t i;
92 	struct rte_mbuf *mbuf;
93 
94 	RTE_SET_USED(port);
95 	/*Release all the contexts saved previously*/
96 	for (i = 0; i < nb_events; i++) {
97 		switch (ev[i].op) {
98 		case RTE_EVENT_OP_RELEASE:
99 			qman_dca_index(ev[i].impl_opaque, 0);
100 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
101 			mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
102 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
103 			DPAA_PER_LCORE_DQRR_SIZE--;
104 			break;
105 		default:
106 			break;
107 		}
108 	}
109 
110 	return nb_events;
111 }
112 
113 static uint16_t
114 dpaa_event_enqueue(void *port, const struct rte_event *ev)
115 {
116 	return dpaa_event_enqueue_burst(port, ev, 1);
117 }
118 
119 static void drain_4_bytes(int fd, fd_set *fdset)
120 {
121 	if (FD_ISSET(fd, fdset)) {
122 		/* drain 4 bytes */
123 		uint32_t junk;
124 		ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
125 		if (sjunk != sizeof(junk))
126 			DPAA_EVENTDEV_ERR("UIO irq read error");
127 	}
128 }
129 
130 static inline int
131 dpaa_event_dequeue_wait(uint64_t timeout_ticks)
132 {
133 	int fd_qman, nfds;
134 	int ret;
135 	fd_set readset;
136 
137 	/* Go into (and back out of) IRQ mode for each select,
138 	 * it simplifies exit-path considerations and other
139 	 * potential nastiness.
140 	 */
141 	struct timeval tv = {
142 		.tv_sec = timeout_ticks / 1000000,
143 		.tv_usec = timeout_ticks % 1000000
144 	};
145 
146 	fd_qman = qman_thread_fd();
147 	nfds = fd_qman + 1;
148 	FD_ZERO(&readset);
149 	FD_SET(fd_qman, &readset);
150 
151 	qman_irqsource_add(QM_PIRQ_DQRI);
152 
153 	ret = select(nfds, &readset, NULL, NULL, &tv);
154 	if (ret < 0)
155 		return ret;
156 	/* Calling irqsource_remove() prior to thread_irq()
157 	 * means thread_irq() will not process whatever caused
158 	 * the interrupts, however it does ensure that, once
159 	 * thread_irq() re-enables interrupts, they won't fire
160 	 * again immediately.
161 	 */
162 	qman_irqsource_remove(~0);
163 	drain_4_bytes(fd_qman, &readset);
164 	qman_thread_irq();
165 
166 	return ret;
167 }
168 
169 static uint16_t
170 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
171 			 uint16_t nb_events, uint64_t timeout_ticks)
172 {
173 	int ret;
174 	u16 ch_id;
175 	void *buffers[8];
176 	u32 num_frames, i, irq = 0;
177 	uint64_t cur_ticks = 0, wait_time_ticks = 0;
178 	struct dpaa_port *portal = (struct dpaa_port *)port;
179 	struct rte_mbuf *mbuf;
180 
181 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
182 		/* Affine current thread context to a qman portal */
183 		ret = rte_dpaa_portal_init((void *)0);
184 		if (ret) {
185 			DPAA_EVENTDEV_ERR("Unable to initialize portal");
186 			return ret;
187 		}
188 	}
189 
190 	if (unlikely(!portal->is_port_linked)) {
191 		/*
192 		 * Affine event queue for current thread context
193 		 * to a qman portal.
194 		 */
195 		for (i = 0; i < portal->num_linked_evq; i++) {
196 			ch_id = portal->evq_info[i].ch_id;
197 			dpaa_eventq_portal_add(ch_id);
198 		}
199 		portal->is_port_linked = true;
200 	}
201 
202 	/* Check if there are atomic contexts to be released */
203 	i = 0;
204 	while (DPAA_PER_LCORE_DQRR_SIZE) {
205 		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
206 			qman_dca_index(i, 0);
207 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
208 			mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
209 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
210 			DPAA_PER_LCORE_DQRR_SIZE--;
211 		}
212 		i++;
213 	}
214 	DPAA_PER_LCORE_DQRR_HELD = 0;
215 
216 	if (timeout_ticks)
217 		wait_time_ticks = timeout_ticks;
218 	else
219 		wait_time_ticks = portal->timeout_us;
220 
221 	wait_time_ticks += rte_get_timer_cycles();
222 	do {
223 		/* Lets dequeue the frames */
224 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
225 		if (irq)
226 			irq = 0;
227 		if (num_frames)
228 			break;
229 		cur_ticks = rte_get_timer_cycles();
230 	} while (cur_ticks < wait_time_ticks);
231 
232 	return num_frames;
233 }
234 
235 static uint16_t
236 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks)
237 {
238 	return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks);
239 }
240 
241 static uint16_t
242 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
243 			      uint16_t nb_events, uint64_t timeout_ticks)
244 {
245 	int ret;
246 	u16 ch_id;
247 	void *buffers[8];
248 	u32 num_frames, i, irq = 0;
249 	uint64_t cur_ticks = 0, wait_time_ticks = 0;
250 	struct dpaa_port *portal = (struct dpaa_port *)port;
251 	struct rte_mbuf *mbuf;
252 
253 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
254 		/* Affine current thread context to a qman portal */
255 		ret = rte_dpaa_portal_init((void *)0);
256 		if (ret) {
257 			DPAA_EVENTDEV_ERR("Unable to initialize portal");
258 			return ret;
259 		}
260 	}
261 
262 	if (unlikely(!portal->is_port_linked)) {
263 		/*
264 		 * Affine event queue for current thread context
265 		 * to a qman portal.
266 		 */
267 		for (i = 0; i < portal->num_linked_evq; i++) {
268 			ch_id = portal->evq_info[i].ch_id;
269 			dpaa_eventq_portal_add(ch_id);
270 		}
271 		portal->is_port_linked = true;
272 	}
273 
274 	/* Check if there are atomic contexts to be released */
275 	i = 0;
276 	while (DPAA_PER_LCORE_DQRR_SIZE) {
277 		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
278 			qman_dca_index(i, 0);
279 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
280 			mbuf->seqn = DPAA_INVALID_MBUF_SEQN;
281 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
282 			DPAA_PER_LCORE_DQRR_SIZE--;
283 		}
284 		i++;
285 	}
286 	DPAA_PER_LCORE_DQRR_HELD = 0;
287 
288 	if (timeout_ticks)
289 		wait_time_ticks = timeout_ticks;
290 	else
291 		wait_time_ticks = portal->timeout_us;
292 
293 	do {
294 		/* Lets dequeue the frames */
295 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
296 		if (irq)
297 			irq = 0;
298 		if (num_frames)
299 			break;
300 		if (wait_time_ticks) { /* wait for time */
301 			if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
302 				irq = 1;
303 				continue;
304 			}
305 			break; /* no event after waiting */
306 		}
307 		cur_ticks = rte_get_timer_cycles();
308 	} while (cur_ticks < wait_time_ticks);
309 
310 	return num_frames;
311 }
312 
313 static uint16_t
314 dpaa_event_dequeue_intr(void *port,
315 			struct rte_event *ev,
316 			uint64_t timeout_ticks)
317 {
318 	return dpaa_event_dequeue_burst_intr(port, ev, 1, timeout_ticks);
319 }
320 
321 static void
322 dpaa_event_dev_info_get(struct rte_eventdev *dev,
323 			struct rte_event_dev_info *dev_info)
324 {
325 	EVENTDEV_INIT_FUNC_TRACE();
326 
327 	RTE_SET_USED(dev);
328 	dev_info->driver_name = "event_dpaa1";
329 	dev_info->min_dequeue_timeout_ns =
330 		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
331 	dev_info->max_dequeue_timeout_ns =
332 		DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
333 	dev_info->dequeue_timeout_ns =
334 		DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
335 	dev_info->max_event_queues =
336 		DPAA_EVENT_MAX_QUEUES;
337 	dev_info->max_event_queue_flows =
338 		DPAA_EVENT_MAX_QUEUE_FLOWS;
339 	dev_info->max_event_queue_priority_levels =
340 		DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
341 	dev_info->max_event_priority_levels =
342 		DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
343 	dev_info->max_event_ports =
344 		DPAA_EVENT_MAX_EVENT_PORT;
345 	dev_info->max_event_port_dequeue_depth =
346 		DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
347 	dev_info->max_event_port_enqueue_depth =
348 		DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
349 	/*
350 	 * TODO: Need to find out that how to fetch this info
351 	 * from kernel or somewhere else.
352 	 */
353 	dev_info->max_num_events =
354 		DPAA_EVENT_MAX_NUM_EVENTS;
355 	dev_info->event_dev_cap =
356 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
357 		RTE_EVENT_DEV_CAP_BURST_MODE |
358 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
359 		RTE_EVENT_DEV_CAP_NONSEQ_MODE;
360 }
361 
362 static int
363 dpaa_event_dev_configure(const struct rte_eventdev *dev)
364 {
365 	struct dpaa_eventdev *priv = dev->data->dev_private;
366 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
367 	int ret, i;
368 	uint32_t *ch_id;
369 
370 	EVENTDEV_INIT_FUNC_TRACE();
371 	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
372 	priv->nb_events_limit = conf->nb_events_limit;
373 	priv->nb_event_queues = conf->nb_event_queues;
374 	priv->nb_event_ports = conf->nb_event_ports;
375 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
376 	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
377 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
378 	priv->event_dev_cfg = conf->event_dev_cfg;
379 
380 	ch_id = rte_malloc("dpaa-channels",
381 			  sizeof(uint32_t) * priv->nb_event_queues,
382 			  RTE_CACHE_LINE_SIZE);
383 	if (ch_id == NULL) {
384 		DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels\n");
385 		return -ENOMEM;
386 	}
387 	/* Create requested event queues within the given event device */
388 	ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
389 	if (ret < 0) {
390 		DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d\n",
391 				 priv->nb_event_queues, ret);
392 		rte_free(ch_id);
393 		return ret;
394 	}
395 	for (i = 0; i < priv->nb_event_queues; i++)
396 		priv->evq_info[i].ch_id = (u16)ch_id[i];
397 
398 	/* Lets prepare event ports */
399 	memset(&priv->ports[0], 0,
400 	      sizeof(struct dpaa_port) * priv->nb_event_ports);
401 
402 	/* Check dequeue timeout method is per dequeue or global */
403 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
404 		/*
405 		 * Use timeout value as given in dequeue operation.
406 		 * So invalidating this timeout value.
407 		 */
408 		priv->dequeue_timeout_ns = 0;
409 
410 	} else if (conf->dequeue_timeout_ns == 0) {
411 		priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
412 	} else {
413 		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
414 	}
415 
416 	for (i = 0; i < priv->nb_event_ports; i++) {
417 		if (priv->intr_mode) {
418 			priv->ports[i].timeout_us =
419 				priv->dequeue_timeout_ns/1000;
420 		} else {
421 			uint64_t cycles_per_second;
422 
423 			cycles_per_second = rte_get_timer_hz();
424 			priv->ports[i].timeout_us =
425 				(priv->dequeue_timeout_ns * cycles_per_second)
426 					/ NS_PER_S;
427 		}
428 	}
429 
430 	/*
431 	 * TODO: Currently portals are affined with threads. Maximum threads
432 	 * can be created equals to number of lcore.
433 	 */
434 	rte_free(ch_id);
435 	DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
436 
437 	return 0;
438 }
439 
440 static int
441 dpaa_event_dev_start(struct rte_eventdev *dev)
442 {
443 	EVENTDEV_INIT_FUNC_TRACE();
444 	RTE_SET_USED(dev);
445 
446 	return 0;
447 }
448 
449 static void
450 dpaa_event_dev_stop(struct rte_eventdev *dev)
451 {
452 	EVENTDEV_INIT_FUNC_TRACE();
453 	RTE_SET_USED(dev);
454 }
455 
456 static int
457 dpaa_event_dev_close(struct rte_eventdev *dev)
458 {
459 	EVENTDEV_INIT_FUNC_TRACE();
460 	RTE_SET_USED(dev);
461 
462 	return 0;
463 }
464 
465 static void
466 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
467 			  struct rte_event_queue_conf *queue_conf)
468 {
469 	EVENTDEV_INIT_FUNC_TRACE();
470 
471 	RTE_SET_USED(dev);
472 	RTE_SET_USED(queue_id);
473 
474 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
475 	queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS;
476 	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
477 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
478 }
479 
480 static int
481 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
482 		       const struct rte_event_queue_conf *queue_conf)
483 {
484 	struct dpaa_eventdev *priv = dev->data->dev_private;
485 	struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
486 
487 	EVENTDEV_INIT_FUNC_TRACE();
488 
489 	switch (queue_conf->schedule_type) {
490 	case RTE_SCHED_TYPE_PARALLEL:
491 	case RTE_SCHED_TYPE_ATOMIC:
492 		break;
493 	case RTE_SCHED_TYPE_ORDERED:
494 		DPAA_EVENTDEV_ERR("Schedule type is not supported.");
495 		return -1;
496 	}
497 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
498 	evq_info->event_queue_id = queue_id;
499 
500 	return 0;
501 }
502 
503 static void
504 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
505 {
506 	EVENTDEV_INIT_FUNC_TRACE();
507 
508 	RTE_SET_USED(dev);
509 	RTE_SET_USED(queue_id);
510 }
511 
512 static void
513 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
514 				 struct rte_event_port_conf *port_conf)
515 {
516 	EVENTDEV_INIT_FUNC_TRACE();
517 
518 	RTE_SET_USED(dev);
519 	RTE_SET_USED(port_id);
520 
521 	port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
522 	port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
523 	port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
524 }
525 
526 static int
527 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
528 		      const struct rte_event_port_conf *port_conf)
529 {
530 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
531 
532 	EVENTDEV_INIT_FUNC_TRACE();
533 
534 	RTE_SET_USED(port_conf);
535 	dev->data->ports[port_id] = &eventdev->ports[port_id];
536 
537 	return 0;
538 }
539 
540 static void
541 dpaa_event_port_release(void *port)
542 {
543 	EVENTDEV_INIT_FUNC_TRACE();
544 
545 	RTE_SET_USED(port);
546 }
547 
548 static int
549 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
550 		     const uint8_t queues[], const uint8_t priorities[],
551 		     uint16_t nb_links)
552 {
553 	struct dpaa_eventdev *priv = dev->data->dev_private;
554 	struct dpaa_port *event_port = (struct dpaa_port *)port;
555 	struct dpaa_eventq *event_queue;
556 	uint8_t eventq_id;
557 	int i;
558 
559 	RTE_SET_USED(dev);
560 	RTE_SET_USED(priorities);
561 
562 	/* First check that input configuration are valid */
563 	for (i = 0; i < nb_links; i++) {
564 		eventq_id = queues[i];
565 		event_queue = &priv->evq_info[eventq_id];
566 		if ((event_queue->event_queue_cfg
567 			& RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
568 			&& (event_queue->event_port)) {
569 			return -EINVAL;
570 		}
571 	}
572 
573 	for (i = 0; i < nb_links; i++) {
574 		eventq_id = queues[i];
575 		event_queue = &priv->evq_info[eventq_id];
576 		event_port->evq_info[i].event_queue_id = eventq_id;
577 		event_port->evq_info[i].ch_id = event_queue->ch_id;
578 		event_queue->event_port = port;
579 	}
580 
581 	event_port->num_linked_evq = event_port->num_linked_evq + i;
582 
583 	return (int)i;
584 }
585 
586 static int
587 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
588 		       uint8_t queues[], uint16_t nb_links)
589 {
590 	int i;
591 	uint8_t eventq_id;
592 	struct dpaa_eventq *event_queue;
593 	struct dpaa_eventdev *priv = dev->data->dev_private;
594 	struct dpaa_port *event_port = (struct dpaa_port *)port;
595 
596 	if (!event_port->num_linked_evq)
597 		return nb_links;
598 
599 	for (i = 0; i < nb_links; i++) {
600 		eventq_id = queues[i];
601 		event_port->evq_info[eventq_id].event_queue_id = -1;
602 		event_port->evq_info[eventq_id].ch_id = 0;
603 		event_queue = &priv->evq_info[eventq_id];
604 		event_queue->event_port = NULL;
605 	}
606 
607 	if (event_port->num_linked_evq)
608 		event_port->num_linked_evq = event_port->num_linked_evq - i;
609 
610 	return (int)i;
611 }
612 
613 static int
614 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
615 				   const struct rte_eth_dev *eth_dev,
616 				   uint32_t *caps)
617 {
618 	const char *ethdev_driver = eth_dev->device->driver->name;
619 
620 	EVENTDEV_INIT_FUNC_TRACE();
621 
622 	RTE_SET_USED(dev);
623 
624 	if (!strcmp(ethdev_driver, "net_dpaa"))
625 		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
626 	else
627 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
628 
629 	return 0;
630 }
631 
632 static int
633 dpaa_event_eth_rx_adapter_queue_add(
634 		const struct rte_eventdev *dev,
635 		const struct rte_eth_dev *eth_dev,
636 		int32_t rx_queue_id,
637 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
638 {
639 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
640 	uint8_t ev_qid = queue_conf->ev.queue_id;
641 	u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
642 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
643 	int ret, i;
644 
645 	EVENTDEV_INIT_FUNC_TRACE();
646 
647 	if (rx_queue_id == -1) {
648 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
649 			ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
650 						     queue_conf);
651 			if (ret) {
652 				DPAA_EVENTDEV_ERR(
653 					"Event Queue attach failed:%d\n", ret);
654 				goto detach_configured_queues;
655 			}
656 		}
657 		return 0;
658 	}
659 
660 	ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
661 	if (ret)
662 		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret);
663 	return ret;
664 
665 detach_configured_queues:
666 
667 	for (i = (i - 1); i >= 0 ; i--)
668 		dpaa_eth_eventq_detach(eth_dev, i);
669 
670 	return ret;
671 }
672 
673 static int
674 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
675 				    const struct rte_eth_dev *eth_dev,
676 				    int32_t rx_queue_id)
677 {
678 	int ret, i;
679 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
680 
681 	EVENTDEV_INIT_FUNC_TRACE();
682 
683 	RTE_SET_USED(dev);
684 	if (rx_queue_id == -1) {
685 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
686 			ret = dpaa_eth_eventq_detach(eth_dev, i);
687 			if (ret)
688 				DPAA_EVENTDEV_ERR(
689 					"Event Queue detach failed:%d\n", ret);
690 		}
691 
692 		return 0;
693 	}
694 
695 	ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
696 	if (ret)
697 		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret);
698 	return ret;
699 }
700 
701 static int
702 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
703 				const struct rte_eth_dev *eth_dev)
704 {
705 	EVENTDEV_INIT_FUNC_TRACE();
706 
707 	RTE_SET_USED(dev);
708 	RTE_SET_USED(eth_dev);
709 
710 	return 0;
711 }
712 
713 static int
714 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
715 			       const struct rte_eth_dev *eth_dev)
716 {
717 	EVENTDEV_INIT_FUNC_TRACE();
718 
719 	RTE_SET_USED(dev);
720 	RTE_SET_USED(eth_dev);
721 
722 	return 0;
723 }
724 
725 static int
726 dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
727 			    const struct rte_cryptodev *cdev,
728 			    uint32_t *caps)
729 {
730 	const char *name = cdev->data->name;
731 
732 	EVENTDEV_INIT_FUNC_TRACE();
733 
734 	RTE_SET_USED(dev);
735 
736 	if (!strncmp(name, "dpaa_sec-", 9))
737 		*caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP;
738 	else
739 		return -1;
740 
741 	return 0;
742 }
743 
744 static int
745 dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
746 		const struct rte_cryptodev *cryptodev,
747 		const struct rte_event *ev)
748 {
749 	struct dpaa_eventdev *priv = dev->data->dev_private;
750 	uint8_t ev_qid = ev->queue_id;
751 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
752 	int i, ret;
753 
754 	EVENTDEV_INIT_FUNC_TRACE();
755 
756 	for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
757 		ret = dpaa_sec_eventq_attach(cryptodev, i,
758 				ch_id, ev);
759 		if (ret) {
760 			DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d\n",
761 				    ret);
762 			goto fail;
763 		}
764 	}
765 	return 0;
766 fail:
767 	for (i = (i - 1); i >= 0 ; i--)
768 		dpaa_sec_eventq_detach(cryptodev, i);
769 
770 	return ret;
771 }
772 
773 static int
774 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
775 		const struct rte_cryptodev *cryptodev,
776 		int32_t rx_queue_id,
777 		const struct rte_event *ev)
778 {
779 	struct dpaa_eventdev *priv = dev->data->dev_private;
780 	uint8_t ev_qid = ev->queue_id;
781 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
782 	int ret;
783 
784 	EVENTDEV_INIT_FUNC_TRACE();
785 
786 	if (rx_queue_id == -1)
787 		return dpaa_eventdev_crypto_queue_add_all(dev,
788 				cryptodev, ev);
789 
790 	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
791 			ch_id, ev);
792 	if (ret) {
793 		DPAA_EVENTDEV_ERR(
794 			"dpaa_sec_eventq_attach failed: ret: %d\n", ret);
795 		return ret;
796 	}
797 	return 0;
798 }
799 
800 static int
801 dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
802 			     const struct rte_cryptodev *cdev)
803 {
804 	int i, ret;
805 
806 	EVENTDEV_INIT_FUNC_TRACE();
807 
808 	RTE_SET_USED(dev);
809 
810 	for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
811 		ret = dpaa_sec_eventq_detach(cdev, i);
812 		if (ret) {
813 			DPAA_EVENTDEV_ERR(
814 				"dpaa_sec_eventq_detach failed:ret %d\n", ret);
815 			return ret;
816 		}
817 	}
818 
819 	return 0;
820 }
821 
822 static int
823 dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
824 			     const struct rte_cryptodev *cryptodev,
825 			     int32_t rx_queue_id)
826 {
827 	int ret;
828 
829 	EVENTDEV_INIT_FUNC_TRACE();
830 
831 	if (rx_queue_id == -1)
832 		return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev);
833 
834 	ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id);
835 	if (ret) {
836 		DPAA_EVENTDEV_ERR(
837 			"dpaa_sec_eventq_detach failed: ret: %d\n", ret);
838 		return ret;
839 	}
840 
841 	return 0;
842 }
843 
844 static int
845 dpaa_eventdev_crypto_start(const struct rte_eventdev *dev,
846 			   const struct rte_cryptodev *cryptodev)
847 {
848 	EVENTDEV_INIT_FUNC_TRACE();
849 
850 	RTE_SET_USED(dev);
851 	RTE_SET_USED(cryptodev);
852 
853 	return 0;
854 }
855 
856 static int
857 dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev,
858 			  const struct rte_cryptodev *cryptodev)
859 {
860 	EVENTDEV_INIT_FUNC_TRACE();
861 
862 	RTE_SET_USED(dev);
863 	RTE_SET_USED(cryptodev);
864 
865 	return 0;
866 }
867 
868 static int
869 dpaa_eventdev_tx_adapter_create(uint8_t id,
870 				 const struct rte_eventdev *dev)
871 {
872 	RTE_SET_USED(id);
873 	RTE_SET_USED(dev);
874 
875 	/* Nothing to do. Simply return. */
876 	return 0;
877 }
878 
879 static int
880 dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
881 			       const struct rte_eth_dev *eth_dev,
882 			       uint32_t *caps)
883 {
884 	RTE_SET_USED(dev);
885 	RTE_SET_USED(eth_dev);
886 
887 	*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
888 	return 0;
889 }
890 
891 static uint16_t
892 dpaa_eventdev_txa_enqueue_same_dest(void *port,
893 				     struct rte_event ev[],
894 				     uint16_t nb_events)
895 {
896 	struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
897 	uint8_t qid, i;
898 
899 	RTE_SET_USED(port);
900 
901 	m0 = (struct rte_mbuf *)ev[0].mbuf;
902 	qid = rte_event_eth_tx_adapter_txq_get(m0);
903 
904 	for (i = 0; i < nb_events; i++)
905 		m[i] = (struct rte_mbuf *)ev[i].mbuf;
906 
907 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
908 }
909 
910 static uint16_t
911 dpaa_eventdev_txa_enqueue(void *port,
912 			   struct rte_event ev[],
913 			   uint16_t nb_events)
914 {
915 	struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
916 	uint8_t qid, i;
917 
918 	RTE_SET_USED(port);
919 
920 	for (i = 0; i < nb_events; i++) {
921 		qid = rte_event_eth_tx_adapter_txq_get(m);
922 		rte_eth_tx_burst(m->port, qid, &m, 1);
923 	}
924 
925 	return nb_events;
926 }
927 
928 static struct rte_eventdev_ops dpaa_eventdev_ops = {
929 	.dev_infos_get    = dpaa_event_dev_info_get,
930 	.dev_configure    = dpaa_event_dev_configure,
931 	.dev_start        = dpaa_event_dev_start,
932 	.dev_stop         = dpaa_event_dev_stop,
933 	.dev_close        = dpaa_event_dev_close,
934 	.queue_def_conf   = dpaa_event_queue_def_conf,
935 	.queue_setup      = dpaa_event_queue_setup,
936 	.queue_release    = dpaa_event_queue_release,
937 	.port_def_conf    = dpaa_event_port_default_conf_get,
938 	.port_setup       = dpaa_event_port_setup,
939 	.port_release       = dpaa_event_port_release,
940 	.port_link        = dpaa_event_port_link,
941 	.port_unlink      = dpaa_event_port_unlink,
942 	.timeout_ticks    = dpaa_event_dequeue_timeout_ticks,
943 	.eth_rx_adapter_caps_get	= dpaa_event_eth_rx_adapter_caps_get,
944 	.eth_rx_adapter_queue_add	= dpaa_event_eth_rx_adapter_queue_add,
945 	.eth_rx_adapter_queue_del	= dpaa_event_eth_rx_adapter_queue_del,
946 	.eth_rx_adapter_start		= dpaa_event_eth_rx_adapter_start,
947 	.eth_rx_adapter_stop		= dpaa_event_eth_rx_adapter_stop,
948 	.eth_tx_adapter_caps_get	= dpaa_eventdev_tx_adapter_caps,
949 	.eth_tx_adapter_create		= dpaa_eventdev_tx_adapter_create,
950 	.crypto_adapter_caps_get	= dpaa_eventdev_crypto_caps_get,
951 	.crypto_adapter_queue_pair_add	= dpaa_eventdev_crypto_queue_add,
952 	.crypto_adapter_queue_pair_del	= dpaa_eventdev_crypto_queue_del,
953 	.crypto_adapter_start		= dpaa_eventdev_crypto_start,
954 	.crypto_adapter_stop		= dpaa_eventdev_crypto_stop,
955 };
956 
957 static int flag_check_handler(__rte_unused const char *key,
958 		const char *value, __rte_unused void *opaque)
959 {
960 	if (strcmp(value, "1"))
961 		return -1;
962 
963 	return 0;
964 }
965 
966 static int
967 dpaa_event_check_flags(const char *params)
968 {
969 	struct rte_kvargs *kvlist;
970 
971 	if (params == NULL || params[0] == '\0')
972 		return 0;
973 
974 	kvlist = rte_kvargs_parse(params, NULL);
975 	if (kvlist == NULL)
976 		return 0;
977 
978 	if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
979 		rte_kvargs_free(kvlist);
980 		return 0;
981 	}
982 	/* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
983 	if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
984 				flag_check_handler, NULL) < 0) {
985 		rte_kvargs_free(kvlist);
986 		return 0;
987 	}
988 	rte_kvargs_free(kvlist);
989 
990 	return 1;
991 }
992 
993 static int
994 dpaa_event_dev_create(const char *name, const char *params)
995 {
996 	struct rte_eventdev *eventdev;
997 	struct dpaa_eventdev *priv;
998 
999 	eventdev = rte_event_pmd_vdev_init(name,
1000 					   sizeof(struct dpaa_eventdev),
1001 					   rte_socket_id());
1002 	if (eventdev == NULL) {
1003 		DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
1004 		goto fail;
1005 	}
1006 	priv = eventdev->data->dev_private;
1007 
1008 	eventdev->dev_ops       = &dpaa_eventdev_ops;
1009 	eventdev->enqueue       = dpaa_event_enqueue;
1010 	eventdev->enqueue_burst = dpaa_event_enqueue_burst;
1011 
1012 	if (dpaa_event_check_flags(params)) {
1013 		eventdev->dequeue	= dpaa_event_dequeue;
1014 		eventdev->dequeue_burst = dpaa_event_dequeue_burst;
1015 	} else {
1016 		priv->intr_mode = 1;
1017 		eventdev->dev_ops->timeout_ticks =
1018 				dpaa_event_dequeue_timeout_ticks_intr;
1019 		eventdev->dequeue	= dpaa_event_dequeue_intr;
1020 		eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
1021 	}
1022 	eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue;
1023 	eventdev->txa_enqueue_same_dest	= dpaa_eventdev_txa_enqueue_same_dest;
1024 
1025 	RTE_LOG(INFO, PMD, "%s eventdev added", name);
1026 
1027 	/* For secondary processes, the primary has done all the work */
1028 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1029 		return 0;
1030 
1031 	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
1032 
1033 	return 0;
1034 fail:
1035 	return -EFAULT;
1036 }
1037 
1038 static int
1039 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
1040 {
1041 	const char *name;
1042 	const char *params;
1043 
1044 	name = rte_vdev_device_name(vdev);
1045 	DPAA_EVENTDEV_INFO("Initializing %s", name);
1046 
1047 	params = rte_vdev_device_args(vdev);
1048 
1049 	return dpaa_event_dev_create(name, params);
1050 }
1051 
1052 static int
1053 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
1054 {
1055 	const char *name;
1056 
1057 	name = rte_vdev_device_name(vdev);
1058 	DPAA_EVENTDEV_INFO("Closing %s", name);
1059 
1060 	return rte_event_pmd_vdev_uninit(name);
1061 }
1062 
1063 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
1064 	.probe = dpaa_event_dev_probe,
1065 	.remove = dpaa_event_dev_remove
1066 };
1067 
1068 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
1069 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
1070 		DISABLE_INTR_MODE "=<int>");
1071