xref: /dpdk/drivers/event/dpaa/dpaa_eventdev.c (revision e1b07dd581cb487e1138e60c21159c499173352e)
1 /*   SPDX-License-Identifier:        BSD-3-Clause
2  *   Copyright 2017-2019 NXP
3  */
4 
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/epoll.h>
12 
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <dev_driver.h>
18 #include <rte_eal.h>
19 #include <rte_lcore.h>
20 #include <rte_log.h>
21 #include <rte_malloc.h>
22 #include <rte_memcpy.h>
23 #include <rte_memory.h>
24 #include <rte_memzone.h>
25 #include <rte_pci.h>
26 #include <rte_eventdev.h>
27 #include <eventdev_pmd_vdev.h>
28 #include <rte_ethdev.h>
29 #include <rte_event_crypto_adapter.h>
30 #include <rte_event_eth_rx_adapter.h>
31 #include <rte_event_eth_tx_adapter.h>
32 #include <cryptodev_pmd.h>
33 #include <bus_dpaa_driver.h>
34 #include <rte_dpaa_logs.h>
35 #include <rte_cycles.h>
36 #include <rte_kvargs.h>
37 
38 #include <dpaa_ethdev.h>
39 #include <dpaa_sec_event.h>
40 #include "dpaa_eventdev.h"
41 #include <dpaa_mempool.h>
42 
43 /*
44  * Clarifications
45  * Evendev = Virtual Instance for SoC
46  * Eventport = Portal Instance
47  * Eventqueue = Channel Instance
48  * 1 Eventdev can have N Eventqueue
49  */
50 RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_eventdev, NOTICE);
51 #define RTE_LOGTYPE_DPAA_EVENTDEV dpaa_logtype_eventdev
52 
53 #define DISABLE_INTR_MODE "disable_intr"
54 
55 static int
56 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
57 				 uint64_t *timeout_ticks)
58 {
59 	EVENTDEV_INIT_FUNC_TRACE();
60 
61 	RTE_SET_USED(dev);
62 
63 	uint64_t cycles_per_second;
64 
65 	cycles_per_second = rte_get_timer_hz();
66 	*timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
67 
68 	return 0;
69 }
70 
71 static int
72 dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
73 				 uint64_t *timeout_ticks)
74 {
75 	RTE_SET_USED(dev);
76 
77 	*timeout_ticks = ns/1000;
78 	return 0;
79 }
80 
81 static void
82 dpaa_eventq_portal_add(u16 ch_id)
83 {
84 	uint32_t sdqcr;
85 
86 	sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
87 	qman_static_dequeue_add(sdqcr, NULL);
88 }
89 
90 static uint16_t
91 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
92 			 uint16_t nb_events)
93 {
94 	uint16_t i;
95 	struct rte_mbuf *mbuf;
96 
97 	RTE_SET_USED(port);
98 	/*Release all the contexts saved previously*/
99 	for (i = 0; i < nb_events; i++) {
100 		switch (ev[i].op) {
101 		case RTE_EVENT_OP_RELEASE:
102 			qman_dca_index(ev[i].impl_opaque, 0);
103 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
104 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
105 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
106 			DPAA_PER_LCORE_DQRR_SIZE--;
107 			break;
108 		default:
109 			break;
110 		}
111 	}
112 
113 	return nb_events;
114 }
115 
116 static void drain_4_bytes(int fd, fd_set *fdset)
117 {
118 	if (FD_ISSET(fd, fdset)) {
119 		/* drain 4 bytes */
120 		uint32_t junk;
121 		ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
122 		if (sjunk != sizeof(junk))
123 			DPAA_EVENTDEV_ERR("UIO irq read error");
124 	}
125 }
126 
127 static inline int
128 dpaa_event_dequeue_wait(uint64_t timeout_ticks)
129 {
130 	int fd_qman, nfds;
131 	int ret;
132 	fd_set readset;
133 
134 	/* Go into (and back out of) IRQ mode for each select,
135 	 * it simplifies exit-path considerations and other
136 	 * potential nastiness.
137 	 */
138 	struct timeval tv = {
139 		.tv_sec = timeout_ticks / 1000000,
140 		.tv_usec = timeout_ticks % 1000000
141 	};
142 
143 	fd_qman = qman_thread_fd();
144 	nfds = fd_qman + 1;
145 	FD_ZERO(&readset);
146 	FD_SET(fd_qman, &readset);
147 
148 	qman_irqsource_add(QM_PIRQ_DQRI);
149 
150 	ret = select(nfds, &readset, NULL, NULL, &tv);
151 	if (ret < 0)
152 		return ret;
153 	/* Calling irqsource_remove() prior to thread_irq()
154 	 * means thread_irq() will not process whatever caused
155 	 * the interrupts, however it does ensure that, once
156 	 * thread_irq() re-enables interrupts, they won't fire
157 	 * again immediately.
158 	 */
159 	qman_irqsource_remove(~0);
160 	drain_4_bytes(fd_qman, &readset);
161 	qman_thread_irq();
162 
163 	return ret;
164 }
165 
166 static uint16_t
167 dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
168 			 uint16_t nb_events, uint64_t timeout_ticks)
169 {
170 	int ret;
171 	u16 ch_id;
172 	void *buffers[8];
173 	u32 num_frames, i;
174 	uint64_t cur_ticks = 0, wait_time_ticks = 0;
175 	struct dpaa_port *portal = (struct dpaa_port *)port;
176 	struct rte_mbuf *mbuf;
177 
178 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
179 		/* Affine current thread context to a qman portal */
180 		ret = rte_dpaa_portal_init((void *)0);
181 		if (ret) {
182 			DPAA_EVENTDEV_ERR("Unable to initialize portal");
183 			return ret;
184 		}
185 	}
186 
187 	if (unlikely(!portal->is_port_linked)) {
188 		/*
189 		 * Affine event queue for current thread context
190 		 * to a qman portal.
191 		 */
192 		for (i = 0; i < portal->num_linked_evq; i++) {
193 			ch_id = portal->evq_info[i].ch_id;
194 			dpaa_eventq_portal_add(ch_id);
195 		}
196 		portal->is_port_linked = true;
197 	}
198 
199 	/* Check if there are atomic contexts to be released */
200 	i = 0;
201 	while (DPAA_PER_LCORE_DQRR_SIZE) {
202 		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
203 			qman_dca_index(i, 0);
204 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
205 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
206 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
207 			DPAA_PER_LCORE_DQRR_SIZE--;
208 		}
209 		i++;
210 	}
211 	DPAA_PER_LCORE_DQRR_HELD = 0;
212 
213 	if (timeout_ticks)
214 		wait_time_ticks = timeout_ticks;
215 	else
216 		wait_time_ticks = portal->timeout_us;
217 
218 	wait_time_ticks += rte_get_timer_cycles();
219 	do {
220 		/* Lets dequeue the frames */
221 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
222 		if (num_frames)
223 			break;
224 		cur_ticks = rte_get_timer_cycles();
225 	} while (cur_ticks < wait_time_ticks);
226 
227 	return num_frames;
228 }
229 
230 static uint16_t
231 dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
232 			      uint16_t nb_events, uint64_t timeout_ticks)
233 {
234 	int ret;
235 	u16 ch_id;
236 	void *buffers[8];
237 	u32 num_frames, i, irq = 0;
238 	uint64_t cur_ticks = 0, wait_time_ticks = 0;
239 	struct dpaa_port *portal = (struct dpaa_port *)port;
240 	struct rte_mbuf *mbuf;
241 
242 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
243 		/* Affine current thread context to a qman portal */
244 		ret = rte_dpaa_portal_init((void *)0);
245 		if (ret) {
246 			DPAA_EVENTDEV_ERR("Unable to initialize portal");
247 			return ret;
248 		}
249 	}
250 
251 	if (unlikely(!portal->is_port_linked)) {
252 		/*
253 		 * Affine event queue for current thread context
254 		 * to a qman portal.
255 		 */
256 		for (i = 0; i < portal->num_linked_evq; i++) {
257 			ch_id = portal->evq_info[i].ch_id;
258 			dpaa_eventq_portal_add(ch_id);
259 		}
260 		portal->is_port_linked = true;
261 	}
262 
263 	/* Check if there are atomic contexts to be released */
264 	i = 0;
265 	while (DPAA_PER_LCORE_DQRR_SIZE) {
266 		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
267 			qman_dca_index(i, 0);
268 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
269 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
270 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
271 			DPAA_PER_LCORE_DQRR_SIZE--;
272 		}
273 		i++;
274 	}
275 	DPAA_PER_LCORE_DQRR_HELD = 0;
276 
277 	if (timeout_ticks)
278 		wait_time_ticks = timeout_ticks;
279 	else
280 		wait_time_ticks = portal->timeout_us;
281 
282 	do {
283 		/* Lets dequeue the frames */
284 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
285 		if (irq)
286 			irq = 0;
287 		if (num_frames)
288 			break;
289 		if (wait_time_ticks) { /* wait for time */
290 			if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
291 				irq = 1;
292 				continue;
293 			}
294 			break; /* no event after waiting */
295 		}
296 		cur_ticks = rte_get_timer_cycles();
297 	} while (cur_ticks < wait_time_ticks);
298 
299 	return num_frames;
300 }
301 
302 static void
303 dpaa_event_dev_info_get(struct rte_eventdev *dev,
304 			struct rte_event_dev_info *dev_info)
305 {
306 	EVENTDEV_INIT_FUNC_TRACE();
307 
308 	RTE_SET_USED(dev);
309 	dev_info->driver_name = "event_dpaa1";
310 	dev_info->min_dequeue_timeout_ns =
311 		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
312 	dev_info->max_dequeue_timeout_ns =
313 		DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
314 	dev_info->dequeue_timeout_ns =
315 		DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
316 	dev_info->max_event_queues =
317 		DPAA_EVENT_MAX_QUEUES;
318 	dev_info->max_event_queue_flows =
319 		DPAA_EVENT_MAX_QUEUE_FLOWS;
320 	dev_info->max_event_queue_priority_levels =
321 		DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
322 	dev_info->max_event_priority_levels =
323 		DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
324 	dev_info->max_event_ports =
325 		DPAA_EVENT_MAX_EVENT_PORT;
326 	dev_info->max_event_port_dequeue_depth =
327 		DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
328 	dev_info->max_event_port_enqueue_depth =
329 		DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
330 	/*
331 	 * TODO: Need to find out that how to fetch this info
332 	 * from kernel or somewhere else.
333 	 */
334 	dev_info->max_num_events =
335 		DPAA_EVENT_MAX_NUM_EVENTS;
336 	dev_info->event_dev_cap =
337 		RTE_EVENT_DEV_CAP_ATOMIC |
338 		RTE_EVENT_DEV_CAP_PARALLEL |
339 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
340 		RTE_EVENT_DEV_CAP_BURST_MODE |
341 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
342 		RTE_EVENT_DEV_CAP_NONSEQ_MODE |
343 		RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
344 		RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
345 	dev_info->max_profiles_per_port = 1;
346 }
347 
348 static int
349 dpaa_event_dev_configure(const struct rte_eventdev *dev)
350 {
351 	struct dpaa_eventdev *priv = dev->data->dev_private;
352 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
353 	int ret, i;
354 	uint32_t *ch_id;
355 
356 	EVENTDEV_INIT_FUNC_TRACE();
357 	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
358 	priv->nb_events_limit = conf->nb_events_limit;
359 	priv->nb_event_queues = conf->nb_event_queues;
360 	priv->nb_event_ports = conf->nb_event_ports;
361 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
362 	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
363 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
364 	priv->event_dev_cfg = conf->event_dev_cfg;
365 
366 	ch_id = rte_malloc("dpaa-channels",
367 			  sizeof(uint32_t) * priv->nb_event_queues,
368 			  RTE_CACHE_LINE_SIZE);
369 	if (ch_id == NULL) {
370 		DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels");
371 		return -ENOMEM;
372 	}
373 	/* Create requested event queues within the given event device */
374 	ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
375 	if (ret < 0) {
376 		DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d",
377 				 priv->nb_event_queues, ret);
378 		rte_free(ch_id);
379 		return ret;
380 	}
381 	for (i = 0; i < priv->nb_event_queues; i++)
382 		priv->evq_info[i].ch_id = (u16)ch_id[i];
383 
384 	/* Lets prepare event ports */
385 	memset(&priv->ports[0], 0,
386 	      sizeof(struct dpaa_port) * priv->nb_event_ports);
387 
388 	/* Check dequeue timeout method is per dequeue or global */
389 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
390 		/*
391 		 * Use timeout value as given in dequeue operation.
392 		 * So invalidating this timeout value.
393 		 */
394 		priv->dequeue_timeout_ns = 0;
395 
396 	} else if (conf->dequeue_timeout_ns == 0) {
397 		priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
398 	} else {
399 		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
400 	}
401 
402 	for (i = 0; i < priv->nb_event_ports; i++) {
403 		if (priv->intr_mode) {
404 			priv->ports[i].timeout_us =
405 				priv->dequeue_timeout_ns/1000;
406 		} else {
407 			uint64_t cycles_per_second;
408 
409 			cycles_per_second = rte_get_timer_hz();
410 			priv->ports[i].timeout_us =
411 				(priv->dequeue_timeout_ns * cycles_per_second)
412 					/ NS_PER_S;
413 		}
414 	}
415 
416 	/*
417 	 * TODO: Currently portals are affined with threads. Maximum threads
418 	 * can be created equals to number of lcore.
419 	 */
420 	rte_free(ch_id);
421 	DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
422 
423 	return 0;
424 }
425 
426 static int
427 dpaa_event_dev_start(struct rte_eventdev *dev)
428 {
429 	EVENTDEV_INIT_FUNC_TRACE();
430 	RTE_SET_USED(dev);
431 
432 	return 0;
433 }
434 
435 static void
436 dpaa_event_dev_stop(struct rte_eventdev *dev)
437 {
438 	EVENTDEV_INIT_FUNC_TRACE();
439 	RTE_SET_USED(dev);
440 }
441 
442 static int
443 dpaa_event_dev_close(struct rte_eventdev *dev)
444 {
445 	EVENTDEV_INIT_FUNC_TRACE();
446 	RTE_SET_USED(dev);
447 
448 	return 0;
449 }
450 
451 static void
452 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
453 			  struct rte_event_queue_conf *queue_conf)
454 {
455 	EVENTDEV_INIT_FUNC_TRACE();
456 
457 	RTE_SET_USED(dev);
458 	RTE_SET_USED(queue_id);
459 
460 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
461 	queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS;
462 	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
463 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
464 }
465 
466 static int
467 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
468 		       const struct rte_event_queue_conf *queue_conf)
469 {
470 	struct dpaa_eventdev *priv = dev->data->dev_private;
471 	struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
472 
473 	EVENTDEV_INIT_FUNC_TRACE();
474 
475 	switch (queue_conf->schedule_type) {
476 	case RTE_SCHED_TYPE_PARALLEL:
477 	case RTE_SCHED_TYPE_ATOMIC:
478 		break;
479 	case RTE_SCHED_TYPE_ORDERED:
480 		DPAA_EVENTDEV_ERR("Schedule type is not supported.");
481 		return -1;
482 	}
483 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
484 	evq_info->event_queue_id = queue_id;
485 
486 	return 0;
487 }
488 
489 static void
490 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
491 {
492 	EVENTDEV_INIT_FUNC_TRACE();
493 
494 	RTE_SET_USED(dev);
495 	RTE_SET_USED(queue_id);
496 }
497 
498 static void
499 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
500 				 struct rte_event_port_conf *port_conf)
501 {
502 	EVENTDEV_INIT_FUNC_TRACE();
503 
504 	RTE_SET_USED(dev);
505 	RTE_SET_USED(port_id);
506 
507 	port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
508 	port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
509 	port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
510 }
511 
512 static int
513 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
514 		      const struct rte_event_port_conf *port_conf)
515 {
516 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
517 
518 	EVENTDEV_INIT_FUNC_TRACE();
519 
520 	RTE_SET_USED(port_conf);
521 	dev->data->ports[port_id] = &eventdev->ports[port_id];
522 
523 	return 0;
524 }
525 
526 static void
527 dpaa_event_port_release(void *port)
528 {
529 	EVENTDEV_INIT_FUNC_TRACE();
530 
531 	RTE_SET_USED(port);
532 }
533 
534 static int
535 dpaa_event_port_link(struct rte_eventdev *dev, void *port,
536 		     const uint8_t queues[], const uint8_t priorities[],
537 		     uint16_t nb_links)
538 {
539 	struct dpaa_eventdev *priv = dev->data->dev_private;
540 	struct dpaa_port *event_port = (struct dpaa_port *)port;
541 	struct dpaa_eventq *event_queue;
542 	uint8_t eventq_id;
543 	int i;
544 
545 	RTE_SET_USED(dev);
546 	RTE_SET_USED(priorities);
547 
548 	/* First check that input configuration are valid */
549 	for (i = 0; i < nb_links; i++) {
550 		eventq_id = queues[i];
551 		event_queue = &priv->evq_info[eventq_id];
552 		if ((event_queue->event_queue_cfg
553 			& RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
554 			&& (event_queue->event_port)) {
555 			return -EINVAL;
556 		}
557 	}
558 
559 	for (i = 0; i < nb_links; i++) {
560 		eventq_id = queues[i];
561 		event_queue = &priv->evq_info[eventq_id];
562 		event_port->evq_info[i].event_queue_id = eventq_id;
563 		event_port->evq_info[i].ch_id = event_queue->ch_id;
564 		event_queue->event_port = port;
565 	}
566 
567 	event_port->num_linked_evq = event_port->num_linked_evq + i;
568 
569 	return (int)i;
570 }
571 
572 static int
573 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
574 		       uint8_t queues[], uint16_t nb_links)
575 {
576 	int i;
577 	uint8_t eventq_id;
578 	struct dpaa_eventq *event_queue;
579 	struct dpaa_eventdev *priv = dev->data->dev_private;
580 	struct dpaa_port *event_port = (struct dpaa_port *)port;
581 
582 	if (!event_port->num_linked_evq)
583 		return nb_links;
584 
585 	for (i = 0; i < nb_links; i++) {
586 		eventq_id = queues[i];
587 		event_port->evq_info[eventq_id].event_queue_id = -1;
588 		event_port->evq_info[eventq_id].ch_id = 0;
589 		event_queue = &priv->evq_info[eventq_id];
590 		event_queue->event_port = NULL;
591 	}
592 
593 	if (event_port->num_linked_evq)
594 		event_port->num_linked_evq = event_port->num_linked_evq - i;
595 
596 	return (int)i;
597 }
598 
599 static int
600 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
601 				   const struct rte_eth_dev *eth_dev,
602 				   uint32_t *caps)
603 {
604 	const char *ethdev_driver = eth_dev->device->driver->name;
605 
606 	EVENTDEV_INIT_FUNC_TRACE();
607 
608 	RTE_SET_USED(dev);
609 
610 	if (!strcmp(ethdev_driver, "net_dpaa"))
611 		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
612 	else
613 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
614 
615 	return 0;
616 }
617 
618 static int
619 dpaa_event_eth_rx_adapter_queue_add(
620 		const struct rte_eventdev *dev,
621 		const struct rte_eth_dev *eth_dev,
622 		int32_t rx_queue_id,
623 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
624 {
625 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
626 	uint8_t ev_qid = queue_conf->ev.queue_id;
627 	u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
628 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
629 	int ret, i;
630 
631 	EVENTDEV_INIT_FUNC_TRACE();
632 
633 	if (rx_queue_id == -1) {
634 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
635 			ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
636 						     queue_conf);
637 			if (ret) {
638 				DPAA_EVENTDEV_ERR(
639 					"Event Queue attach failed:%d", ret);
640 				goto detach_configured_queues;
641 			}
642 		}
643 		return 0;
644 	}
645 
646 	ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
647 	if (ret)
648 		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d", ret);
649 	return ret;
650 
651 detach_configured_queues:
652 
653 	for (i = (i - 1); i >= 0 ; i--)
654 		dpaa_eth_eventq_detach(eth_dev, i);
655 
656 	return ret;
657 }
658 
659 static int
660 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
661 				    const struct rte_eth_dev *eth_dev,
662 				    int32_t rx_queue_id)
663 {
664 	int ret, i;
665 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
666 
667 	EVENTDEV_INIT_FUNC_TRACE();
668 
669 	RTE_SET_USED(dev);
670 	if (rx_queue_id == -1) {
671 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
672 			ret = dpaa_eth_eventq_detach(eth_dev, i);
673 			if (ret)
674 				DPAA_EVENTDEV_ERR(
675 					"Event Queue detach failed:%d", ret);
676 		}
677 
678 		return 0;
679 	}
680 
681 	ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
682 	if (ret)
683 		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d", ret);
684 	return ret;
685 }
686 
687 static int
688 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
689 				const struct rte_eth_dev *eth_dev)
690 {
691 	EVENTDEV_INIT_FUNC_TRACE();
692 
693 	RTE_SET_USED(dev);
694 	RTE_SET_USED(eth_dev);
695 
696 	return 0;
697 }
698 
699 static int
700 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
701 			       const struct rte_eth_dev *eth_dev)
702 {
703 	EVENTDEV_INIT_FUNC_TRACE();
704 
705 	RTE_SET_USED(dev);
706 	RTE_SET_USED(eth_dev);
707 
708 	return 0;
709 }
710 
711 static int
712 dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
713 			    const struct rte_cryptodev *cdev,
714 			    uint32_t *caps)
715 {
716 	const char *name = cdev->data->name;
717 
718 	EVENTDEV_INIT_FUNC_TRACE();
719 
720 	RTE_SET_USED(dev);
721 
722 	if (!strncmp(name, "dpaa_sec-", 9))
723 		*caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP;
724 	else
725 		return -1;
726 
727 	return 0;
728 }
729 
730 static int
731 dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
732 		const struct rte_cryptodev *cryptodev,
733 		const struct rte_event *ev)
734 {
735 	struct dpaa_eventdev *priv = dev->data->dev_private;
736 	uint8_t ev_qid = ev->queue_id;
737 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
738 	int i, ret;
739 
740 	EVENTDEV_INIT_FUNC_TRACE();
741 
742 	for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
743 		ret = dpaa_sec_eventq_attach(cryptodev, i,
744 				ch_id, ev);
745 		if (ret) {
746 			DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d",
747 				    ret);
748 			goto fail;
749 		}
750 	}
751 	return 0;
752 fail:
753 	for (i = (i - 1); i >= 0 ; i--)
754 		dpaa_sec_eventq_detach(cryptodev, i);
755 
756 	return ret;
757 }
758 
759 static int
760 dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
761 		const struct rte_cryptodev *cryptodev,
762 		int32_t rx_queue_id,
763 		const struct rte_event_crypto_adapter_queue_conf *conf)
764 {
765 	struct dpaa_eventdev *priv = dev->data->dev_private;
766 	uint8_t ev_qid = conf->ev.queue_id;
767 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
768 	int ret;
769 
770 	EVENTDEV_INIT_FUNC_TRACE();
771 
772 	if (rx_queue_id == -1)
773 		return dpaa_eventdev_crypto_queue_add_all(dev,
774 				cryptodev, &conf->ev);
775 
776 	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
777 			ch_id, &conf->ev);
778 	if (ret) {
779 		DPAA_EVENTDEV_ERR(
780 			"dpaa_sec_eventq_attach failed: ret: %d", ret);
781 		return ret;
782 	}
783 	return 0;
784 }
785 
786 static int
787 dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
788 			     const struct rte_cryptodev *cdev)
789 {
790 	int i, ret;
791 
792 	EVENTDEV_INIT_FUNC_TRACE();
793 
794 	RTE_SET_USED(dev);
795 
796 	for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
797 		ret = dpaa_sec_eventq_detach(cdev, i);
798 		if (ret) {
799 			DPAA_EVENTDEV_ERR(
800 				"dpaa_sec_eventq_detach failed:ret %d", ret);
801 			return ret;
802 		}
803 	}
804 
805 	return 0;
806 }
807 
808 static int
809 dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
810 			     const struct rte_cryptodev *cryptodev,
811 			     int32_t rx_queue_id)
812 {
813 	int ret;
814 
815 	EVENTDEV_INIT_FUNC_TRACE();
816 
817 	if (rx_queue_id == -1)
818 		return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev);
819 
820 	ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id);
821 	if (ret) {
822 		DPAA_EVENTDEV_ERR(
823 			"dpaa_sec_eventq_detach failed: ret: %d", ret);
824 		return ret;
825 	}
826 
827 	return 0;
828 }
829 
830 static int
831 dpaa_eventdev_crypto_start(const struct rte_eventdev *dev,
832 			   const struct rte_cryptodev *cryptodev)
833 {
834 	EVENTDEV_INIT_FUNC_TRACE();
835 
836 	RTE_SET_USED(dev);
837 	RTE_SET_USED(cryptodev);
838 
839 	return 0;
840 }
841 
842 static int
843 dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev,
844 			  const struct rte_cryptodev *cryptodev)
845 {
846 	EVENTDEV_INIT_FUNC_TRACE();
847 
848 	RTE_SET_USED(dev);
849 	RTE_SET_USED(cryptodev);
850 
851 	return 0;
852 }
853 
854 static int
855 dpaa_eventdev_tx_adapter_create(uint8_t id,
856 				 const struct rte_eventdev *dev)
857 {
858 	RTE_SET_USED(id);
859 	RTE_SET_USED(dev);
860 
861 	/* Nothing to do. Simply return. */
862 	return 0;
863 }
864 
865 static int
866 dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
867 			       const struct rte_eth_dev *eth_dev,
868 			       uint32_t *caps)
869 {
870 	RTE_SET_USED(dev);
871 	RTE_SET_USED(eth_dev);
872 
873 	*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
874 	return 0;
875 }
876 
877 static uint16_t
878 dpaa_eventdev_txa_enqueue_same_dest(void *port,
879 				     struct rte_event ev[],
880 				     uint16_t nb_events)
881 {
882 	struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
883 	uint8_t qid, i;
884 
885 	RTE_SET_USED(port);
886 
887 	m0 = (struct rte_mbuf *)ev[0].mbuf;
888 	qid = rte_event_eth_tx_adapter_txq_get(m0);
889 
890 	for (i = 0; i < nb_events; i++)
891 		m[i] = (struct rte_mbuf *)ev[i].mbuf;
892 
893 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
894 }
895 
896 static uint16_t
897 dpaa_eventdev_txa_enqueue(void *port,
898 			   struct rte_event ev[],
899 			   uint16_t nb_events)
900 {
901 	struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
902 	uint8_t qid, i;
903 
904 	RTE_SET_USED(port);
905 
906 	for (i = 0; i < nb_events; i++) {
907 		qid = rte_event_eth_tx_adapter_txq_get(m);
908 		rte_eth_tx_burst(m->port, qid, &m, 1);
909 	}
910 
911 	return nb_events;
912 }
913 
914 static struct eventdev_ops dpaa_eventdev_ops = {
915 	.dev_infos_get    = dpaa_event_dev_info_get,
916 	.dev_configure    = dpaa_event_dev_configure,
917 	.dev_start        = dpaa_event_dev_start,
918 	.dev_stop         = dpaa_event_dev_stop,
919 	.dev_close        = dpaa_event_dev_close,
920 	.queue_def_conf   = dpaa_event_queue_def_conf,
921 	.queue_setup      = dpaa_event_queue_setup,
922 	.queue_release    = dpaa_event_queue_release,
923 	.port_def_conf    = dpaa_event_port_default_conf_get,
924 	.port_setup       = dpaa_event_port_setup,
925 	.port_release       = dpaa_event_port_release,
926 	.port_link        = dpaa_event_port_link,
927 	.port_unlink      = dpaa_event_port_unlink,
928 	.timeout_ticks    = dpaa_event_dequeue_timeout_ticks,
929 	.eth_rx_adapter_caps_get	= dpaa_event_eth_rx_adapter_caps_get,
930 	.eth_rx_adapter_queue_add	= dpaa_event_eth_rx_adapter_queue_add,
931 	.eth_rx_adapter_queue_del	= dpaa_event_eth_rx_adapter_queue_del,
932 	.eth_rx_adapter_start		= dpaa_event_eth_rx_adapter_start,
933 	.eth_rx_adapter_stop		= dpaa_event_eth_rx_adapter_stop,
934 	.eth_tx_adapter_caps_get	= dpaa_eventdev_tx_adapter_caps,
935 	.eth_tx_adapter_create		= dpaa_eventdev_tx_adapter_create,
936 	.crypto_adapter_caps_get	= dpaa_eventdev_crypto_caps_get,
937 	.crypto_adapter_queue_pair_add	= dpaa_eventdev_crypto_queue_add,
938 	.crypto_adapter_queue_pair_del	= dpaa_eventdev_crypto_queue_del,
939 	.crypto_adapter_start		= dpaa_eventdev_crypto_start,
940 	.crypto_adapter_stop		= dpaa_eventdev_crypto_stop,
941 };
942 
943 static int flag_check_handler(__rte_unused const char *key,
944 		const char *value, __rte_unused void *opaque)
945 {
946 	if (strcmp(value, "1"))
947 		return -1;
948 
949 	return 0;
950 }
951 
952 static int
953 dpaa_event_check_flags(const char *params)
954 {
955 	struct rte_kvargs *kvlist;
956 
957 	if (params == NULL || params[0] == '\0')
958 		return 0;
959 
960 	kvlist = rte_kvargs_parse(params, NULL);
961 	if (kvlist == NULL)
962 		return 0;
963 
964 	if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
965 		rte_kvargs_free(kvlist);
966 		return 0;
967 	}
968 	/* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
969 	if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
970 				flag_check_handler, NULL) < 0) {
971 		rte_kvargs_free(kvlist);
972 		return 0;
973 	}
974 	rte_kvargs_free(kvlist);
975 
976 	return 1;
977 }
978 
979 static int
980 dpaa_event_dev_create(const char *name, const char *params, struct rte_vdev_device *vdev)
981 {
982 	struct rte_eventdev *eventdev;
983 	struct dpaa_eventdev *priv;
984 
985 	eventdev = rte_event_pmd_vdev_init(name,
986 					   sizeof(struct dpaa_eventdev),
987 					   rte_socket_id(), vdev);
988 	if (eventdev == NULL) {
989 		DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
990 		goto fail;
991 	}
992 	priv = eventdev->data->dev_private;
993 
994 	eventdev->dev_ops       = &dpaa_eventdev_ops;
995 	eventdev->enqueue_burst = dpaa_event_enqueue_burst;
996 
997 	if (dpaa_event_check_flags(params))
998 		eventdev->dequeue_burst = dpaa_event_dequeue_burst;
999 	else {
1000 		priv->intr_mode = 1;
1001 		eventdev->dev_ops->timeout_ticks =
1002 				dpaa_event_dequeue_timeout_ticks_intr;
1003 		eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
1004 	}
1005 	eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue;
1006 	eventdev->txa_enqueue_same_dest	= dpaa_eventdev_txa_enqueue_same_dest;
1007 
1008 	DPAA_EVENTDEV_INFO("%s eventdev added", name);
1009 
1010 	/* For secondary processes, the primary has done all the work */
1011 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1012 		goto done;
1013 
1014 	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
1015 
1016 done:
1017 	event_dev_probing_finish(eventdev);
1018 	return 0;
1019 fail:
1020 	return -EFAULT;
1021 }
1022 
1023 static int
1024 dpaa_event_dev_probe(struct rte_vdev_device *vdev)
1025 {
1026 	const char *name;
1027 	const char *params;
1028 
1029 	name = rte_vdev_device_name(vdev);
1030 	DPAA_EVENTDEV_INFO("Initializing %s", name);
1031 
1032 	params = rte_vdev_device_args(vdev);
1033 
1034 	return dpaa_event_dev_create(name, params, vdev);
1035 }
1036 
1037 static int
1038 dpaa_event_dev_remove(struct rte_vdev_device *vdev)
1039 {
1040 	const char *name;
1041 
1042 	name = rte_vdev_device_name(vdev);
1043 	DPAA_EVENTDEV_INFO("Closing %s", name);
1044 
1045 	return rte_event_pmd_vdev_uninit(name);
1046 }
1047 
1048 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
1049 	.probe = dpaa_event_dev_probe,
1050 	.remove = dpaa_event_dev_remove
1051 };
1052 
1053 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
1054 RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
1055 		DISABLE_INTR_MODE "=<int>");
1056