xref: /dpdk/drivers/event/dpaa2/dpaa2_eventdev.c (revision 3cc6ecfdfe85d2577fef30e1791bb7534e3d60b3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017,2019 NXP
3  */
4 
5 #include <assert.h>
6 #include <stdio.h>
7 #include <stdbool.h>
8 #include <errno.h>
9 #include <stdint.h>
10 #include <string.h>
11 #include <sys/epoll.h>
12 
13 #include <rte_atomic.h>
14 #include <rte_byteorder.h>
15 #include <rte_common.h>
16 #include <rte_debug.h>
17 #include <rte_dev.h>
18 #include <rte_eal.h>
19 #include <rte_fslmc.h>
20 #include <rte_lcore.h>
21 #include <rte_log.h>
22 #include <rte_malloc.h>
23 #include <rte_memcpy.h>
24 #include <rte_memory.h>
25 #include <rte_pci.h>
26 #include <rte_bus_vdev.h>
27 #include <rte_ethdev_driver.h>
28 #include <rte_cryptodev.h>
29 #include <rte_event_eth_rx_adapter.h>
30 #include <rte_event_eth_tx_adapter.h>
31 
32 #include <fslmc_vfio.h>
33 #include <dpaa2_hw_pvt.h>
34 #include <dpaa2_hw_mempool.h>
35 #include <dpaa2_hw_dpio.h>
36 #include <dpaa2_ethdev.h>
37 #include <dpaa2_sec_event.h>
38 #include "dpaa2_eventdev.h"
39 #include "dpaa2_eventdev_logs.h"
40 #include <portal/dpaa2_hw_pvt.h>
41 #include <mc/fsl_dpci.h>
42 
43 /* Clarifications
44  * Evendev = SoC Instance
45  * Eventport = DPIO Instance
46  * Eventqueue = DPCON Instance
47  * 1 Eventdev can have N Eventqueue
48  * Soft Event Flow is DPCI Instance
49  */
50 
51 #define DPAA2_EV_TX_RETRY_COUNT 10000
52 
53 static uint16_t
54 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[],
55 			     uint16_t nb_events)
56 {
57 
58 	struct dpaa2_port *dpaa2_portal = port;
59 	struct dpaa2_dpio_dev *dpio_dev;
60 	uint32_t queue_id = ev[0].queue_id;
61 	struct dpaa2_eventq *evq_info;
62 	uint32_t fqid, retry_count;
63 	struct qbman_swp *swp;
64 	struct qbman_fd fd_arr[MAX_TX_RING_SLOTS];
65 	uint32_t loop, frames_to_send;
66 	struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS];
67 	uint16_t num_tx = 0;
68 	int i, n, ret;
69 	uint8_t channel_index;
70 
71 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
72 		/* Affine current thread context to a qman portal */
73 		ret = dpaa2_affine_qbman_swp();
74 		if (ret < 0) {
75 			DPAA2_EVENTDEV_ERR(
76 				"Failed to allocate IO portal, tid: %d\n",
77 				rte_gettid());
78 			return 0;
79 		}
80 	}
81 	/* todo - dpaa2_portal shall have dpio_dev - no per thread variable */
82 	dpio_dev = DPAA2_PER_LCORE_DPIO;
83 	swp = DPAA2_PER_LCORE_PORTAL;
84 
85 	if (likely(dpaa2_portal->is_port_linked))
86 		goto skip_linking;
87 
88 	/* Create mapping between portal and channel to receive packets */
89 	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
90 		evq_info = &dpaa2_portal->evq_info[i];
91 		if (!evq_info->event_port)
92 			continue;
93 
94 		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
95 						      CMD_PRI_LOW,
96 						      dpio_dev->token,
97 						      evq_info->dpcon->dpcon_id,
98 						      &channel_index);
99 		if (ret < 0) {
100 			DPAA2_EVENTDEV_ERR(
101 				"Static dequeue config failed: err(%d)", ret);
102 			goto err;
103 		}
104 
105 		qbman_swp_push_set(swp, channel_index, 1);
106 		evq_info->dpcon->channel_index = channel_index;
107 	}
108 	dpaa2_portal->is_port_linked = true;
109 
110 skip_linking:
111 	evq_info = &dpaa2_portal->evq_info[queue_id];
112 
113 	while (nb_events) {
114 		frames_to_send = (nb_events > dpaa2_eqcr_size) ?
115 			dpaa2_eqcr_size : nb_events;
116 
117 		for (loop = 0; loop < frames_to_send; loop++) {
118 			const struct rte_event *event = &ev[num_tx + loop];
119 
120 			if (event->sched_type != RTE_SCHED_TYPE_ATOMIC)
121 				fqid = evq_info->dpci->rx_queue[
122 					DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid;
123 			else
124 				fqid = evq_info->dpci->rx_queue[
125 					DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid;
126 
127 			/* Prepare enqueue descriptor */
128 			qbman_eq_desc_clear(&eqdesc[loop]);
129 			qbman_eq_desc_set_fq(&eqdesc[loop], fqid);
130 			qbman_eq_desc_set_no_orp(&eqdesc[loop], 0);
131 			qbman_eq_desc_set_response(&eqdesc[loop], 0, 0);
132 
133 			if (event->sched_type == RTE_SCHED_TYPE_ATOMIC
134 				&& event->mbuf->seqn) {
135 				uint8_t dqrr_index = event->mbuf->seqn - 1;
136 
137 				qbman_eq_desc_set_dca(&eqdesc[loop], 1,
138 						      dqrr_index, 0);
139 				DPAA2_PER_LCORE_DQRR_SIZE--;
140 				DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index);
141 			}
142 
143 			memset(&fd_arr[loop], 0, sizeof(struct qbman_fd));
144 
145 			/*
146 			 * todo - need to align with hw context data
147 			 * to avoid copy
148 			 */
149 			struct rte_event *ev_temp = rte_malloc(NULL,
150 						sizeof(struct rte_event), 0);
151 
152 			if (!ev_temp) {
153 				if (!loop)
154 					return num_tx;
155 				frames_to_send = loop;
156 				DPAA2_EVENTDEV_ERR(
157 					"Unable to allocate event object");
158 				goto send_partial;
159 			}
160 			rte_memcpy(ev_temp, event, sizeof(struct rte_event));
161 			DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp);
162 			DPAA2_SET_FD_LEN((&fd_arr[loop]),
163 					 sizeof(struct rte_event));
164 		}
165 send_partial:
166 		loop = 0;
167 		retry_count = 0;
168 		while (loop < frames_to_send) {
169 			ret = qbman_swp_enqueue_multiple_desc(swp,
170 					&eqdesc[loop], &fd_arr[loop],
171 					frames_to_send - loop);
172 			if (unlikely(ret < 0)) {
173 				retry_count++;
174 				if (retry_count > DPAA2_EV_TX_RETRY_COUNT) {
175 					num_tx += loop;
176 					nb_events -= loop;
177 					return num_tx + loop;
178 				}
179 			} else {
180 				loop += ret;
181 				retry_count = 0;
182 			}
183 		}
184 		num_tx += loop;
185 		nb_events -= loop;
186 	}
187 
188 	return num_tx;
189 err:
190 	for (n = 0; n < i; n++) {
191 		evq_info = &dpaa2_portal->evq_info[n];
192 		if (!evq_info->event_port)
193 			continue;
194 		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
195 		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
196 						dpio_dev->token,
197 						evq_info->dpcon->dpcon_id);
198 	}
199 	return 0;
200 
201 }
202 
203 static uint16_t
204 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev)
205 {
206 	return dpaa2_eventdev_enqueue_burst(port, ev, 1);
207 }
208 
209 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks)
210 {
211 	struct epoll_event epoll_ev;
212 
213 	qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL,
214 					 QBMAN_SWP_INTERRUPT_DQRI);
215 
216 	epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd,
217 			 &epoll_ev, 1, timeout_ticks);
218 }
219 
220 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp,
221 					    const struct qbman_fd *fd,
222 					    const struct qbman_result *dq,
223 					    struct dpaa2_queue *rxq,
224 					    struct rte_event *ev)
225 {
226 	struct rte_event *ev_temp =
227 		(struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
228 
229 	RTE_SET_USED(rxq);
230 
231 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
232 	rte_free(ev_temp);
233 
234 	qbman_swp_dqrr_consume(swp, dq);
235 }
236 
237 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp,
238 					  const struct qbman_fd *fd,
239 					  const struct qbman_result *dq,
240 					  struct dpaa2_queue *rxq,
241 					  struct rte_event *ev)
242 {
243 	struct rte_event *ev_temp =
244 		(struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd);
245 	uint8_t dqrr_index = qbman_get_dqrr_idx(dq);
246 
247 	RTE_SET_USED(swp);
248 	RTE_SET_USED(rxq);
249 
250 	rte_memcpy(ev, ev_temp, sizeof(struct rte_event));
251 	rte_free(ev_temp);
252 	ev->mbuf->seqn = dqrr_index + 1;
253 	DPAA2_PER_LCORE_DQRR_SIZE++;
254 	DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index;
255 	DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf;
256 }
257 
258 static uint16_t
259 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[],
260 			     uint16_t nb_events, uint64_t timeout_ticks)
261 {
262 	const struct qbman_result *dq;
263 	struct dpaa2_dpio_dev *dpio_dev = NULL;
264 	struct dpaa2_port *dpaa2_portal = port;
265 	struct dpaa2_eventq *evq_info;
266 	struct qbman_swp *swp;
267 	const struct qbman_fd *fd;
268 	struct dpaa2_queue *rxq;
269 	int num_pkts = 0, ret, i = 0, n;
270 	uint8_t channel_index;
271 
272 	if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
273 		/* Affine current thread context to a qman portal */
274 		ret = dpaa2_affine_qbman_swp();
275 		if (ret < 0) {
276 			DPAA2_EVENTDEV_ERR(
277 				"Failed to allocate IO portal, tid: %d\n",
278 				rte_gettid());
279 			return 0;
280 		}
281 	}
282 
283 	dpio_dev = DPAA2_PER_LCORE_DPIO;
284 	swp = DPAA2_PER_LCORE_PORTAL;
285 
286 	if (likely(dpaa2_portal->is_port_linked))
287 		goto skip_linking;
288 
289 	/* Create mapping between portal and channel to receive packets */
290 	for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) {
291 		evq_info = &dpaa2_portal->evq_info[i];
292 		if (!evq_info->event_port)
293 			continue;
294 
295 		ret = dpio_add_static_dequeue_channel(dpio_dev->dpio,
296 						      CMD_PRI_LOW,
297 						      dpio_dev->token,
298 						      evq_info->dpcon->dpcon_id,
299 						      &channel_index);
300 		if (ret < 0) {
301 			DPAA2_EVENTDEV_ERR(
302 				"Static dequeue config failed: err(%d)", ret);
303 			goto err;
304 		}
305 
306 		qbman_swp_push_set(swp, channel_index, 1);
307 		evq_info->dpcon->channel_index = channel_index;
308 	}
309 	dpaa2_portal->is_port_linked = true;
310 
311 skip_linking:
312 	/* Check if there are atomic contexts to be released */
313 	while (DPAA2_PER_LCORE_DQRR_SIZE) {
314 		if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) {
315 			qbman_swp_dqrr_idx_consume(swp, i);
316 			DPAA2_PER_LCORE_DQRR_SIZE--;
317 			DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn =
318 				DPAA2_INVALID_MBUF_SEQN;
319 		}
320 		i++;
321 	}
322 	DPAA2_PER_LCORE_DQRR_HELD = 0;
323 
324 	do {
325 		dq = qbman_swp_dqrr_next(swp);
326 		if (!dq) {
327 			if (!num_pkts && timeout_ticks) {
328 				dpaa2_eventdev_dequeue_wait(timeout_ticks);
329 				timeout_ticks = 0;
330 				continue;
331 			}
332 			return num_pkts;
333 		}
334 		qbman_swp_prefetch_dqrr_next(swp);
335 
336 		fd = qbman_result_DQ_fd(dq);
337 		rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq);
338 		if (rxq) {
339 			rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]);
340 		} else {
341 			qbman_swp_dqrr_consume(swp, dq);
342 			DPAA2_EVENTDEV_ERR("Null Return VQ received");
343 			return 0;
344 		}
345 
346 		num_pkts++;
347 	} while (num_pkts < nb_events);
348 
349 	return num_pkts;
350 err:
351 	for (n = 0; n < i; n++) {
352 		evq_info = &dpaa2_portal->evq_info[n];
353 		if (!evq_info->event_port)
354 			continue;
355 
356 		qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0);
357 		dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
358 							dpio_dev->token,
359 						evq_info->dpcon->dpcon_id);
360 	}
361 	return 0;
362 }
363 
364 static uint16_t
365 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev,
366 		       uint64_t timeout_ticks)
367 {
368 	return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks);
369 }
370 
371 static void
372 dpaa2_eventdev_info_get(struct rte_eventdev *dev,
373 			struct rte_event_dev_info *dev_info)
374 {
375 	struct dpaa2_eventdev *priv = dev->data->dev_private;
376 
377 	EVENTDEV_INIT_FUNC_TRACE();
378 
379 	RTE_SET_USED(dev);
380 
381 	memset(dev_info, 0, sizeof(struct rte_event_dev_info));
382 	dev_info->min_dequeue_timeout_ns =
383 		DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT;
384 	dev_info->max_dequeue_timeout_ns =
385 		DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT;
386 	dev_info->dequeue_timeout_ns =
387 		DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
388 	dev_info->max_event_queues = priv->max_event_queues;
389 	dev_info->max_event_queue_flows =
390 		DPAA2_EVENT_MAX_QUEUE_FLOWS;
391 	dev_info->max_event_queue_priority_levels =
392 		DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
393 	dev_info->max_event_priority_levels =
394 		DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS;
395 	dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO);
396 	/* we only support dpio up to number of cores */
397 	if (dev_info->max_event_ports > rte_lcore_count())
398 		dev_info->max_event_ports = rte_lcore_count();
399 	dev_info->max_event_port_dequeue_depth =
400 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
401 	dev_info->max_event_port_enqueue_depth =
402 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
403 	dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS;
404 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
405 		RTE_EVENT_DEV_CAP_BURST_MODE|
406 		RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
407 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
408 		RTE_EVENT_DEV_CAP_NONSEQ_MODE |
409 		RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES;
410 
411 }
412 
413 static int
414 dpaa2_eventdev_configure(const struct rte_eventdev *dev)
415 {
416 	struct dpaa2_eventdev *priv = dev->data->dev_private;
417 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
418 
419 	EVENTDEV_INIT_FUNC_TRACE();
420 
421 	priv->nb_event_queues = conf->nb_event_queues;
422 	priv->nb_event_ports = conf->nb_event_ports;
423 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
424 	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
425 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
426 	priv->event_dev_cfg = conf->event_dev_cfg;
427 
428 	/* Check dequeue timeout method is per dequeue or global */
429 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
430 		/*
431 		 * Use timeout value as given in dequeue operation.
432 		 * So invalidating this timeout value.
433 		 */
434 		priv->dequeue_timeout_ns = 0;
435 
436 	} else if (conf->dequeue_timeout_ns == 0) {
437 		priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
438 	} else {
439 		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
440 	}
441 
442 	DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d",
443 			     dev->data->dev_id);
444 	return 0;
445 }
446 
447 static int
448 dpaa2_eventdev_start(struct rte_eventdev *dev)
449 {
450 	EVENTDEV_INIT_FUNC_TRACE();
451 
452 	RTE_SET_USED(dev);
453 
454 	return 0;
455 }
456 
457 static void
458 dpaa2_eventdev_stop(struct rte_eventdev *dev)
459 {
460 	EVENTDEV_INIT_FUNC_TRACE();
461 
462 	RTE_SET_USED(dev);
463 }
464 
465 static int
466 dpaa2_eventdev_close(struct rte_eventdev *dev)
467 {
468 	EVENTDEV_INIT_FUNC_TRACE();
469 
470 	RTE_SET_USED(dev);
471 
472 	return 0;
473 }
474 
475 static void
476 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
477 			      struct rte_event_queue_conf *queue_conf)
478 {
479 	EVENTDEV_INIT_FUNC_TRACE();
480 
481 	RTE_SET_USED(dev);
482 	RTE_SET_USED(queue_id);
483 
484 	queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS;
485 	queue_conf->nb_atomic_order_sequences =
486 				DPAA2_EVENT_QUEUE_ORDER_SEQUENCES;
487 	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
488 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
489 }
490 
491 static int
492 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
493 			   const struct rte_event_queue_conf *queue_conf)
494 {
495 	struct dpaa2_eventdev *priv = dev->data->dev_private;
496 	struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id];
497 
498 	EVENTDEV_INIT_FUNC_TRACE();
499 
500 	switch (queue_conf->schedule_type) {
501 	case RTE_SCHED_TYPE_PARALLEL:
502 	case RTE_SCHED_TYPE_ATOMIC:
503 	case RTE_SCHED_TYPE_ORDERED:
504 		break;
505 	default:
506 		DPAA2_EVENTDEV_ERR("Schedule type is not supported.");
507 		return -1;
508 	}
509 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
510 	evq_info->event_queue_id = queue_id;
511 
512 	return 0;
513 }
514 
515 static void
516 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
517 {
518 	EVENTDEV_INIT_FUNC_TRACE();
519 
520 	RTE_SET_USED(dev);
521 	RTE_SET_USED(queue_id);
522 }
523 
524 static void
525 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
526 			     struct rte_event_port_conf *port_conf)
527 {
528 	EVENTDEV_INIT_FUNC_TRACE();
529 
530 	RTE_SET_USED(dev);
531 	RTE_SET_USED(port_id);
532 
533 	port_conf->new_event_threshold =
534 		DPAA2_EVENT_MAX_NUM_EVENTS;
535 	port_conf->dequeue_depth =
536 		DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH;
537 	port_conf->enqueue_depth =
538 		DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH;
539 	port_conf->disable_implicit_release = 0;
540 }
541 
542 static int
543 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id,
544 			  const struct rte_event_port_conf *port_conf)
545 {
546 	char event_port_name[32];
547 	struct dpaa2_port *portal;
548 
549 	EVENTDEV_INIT_FUNC_TRACE();
550 
551 	RTE_SET_USED(port_conf);
552 
553 	sprintf(event_port_name, "event-port-%d", port_id);
554 	portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0);
555 	if (!portal) {
556 		DPAA2_EVENTDEV_ERR("Memory allocation failure");
557 		return -ENOMEM;
558 	}
559 
560 	memset(portal, 0, sizeof(struct dpaa2_port));
561 	dev->data->ports[port_id] = portal;
562 	return 0;
563 }
564 
565 static void
566 dpaa2_eventdev_port_release(void *port)
567 {
568 	struct dpaa2_port *portal = port;
569 
570 	EVENTDEV_INIT_FUNC_TRACE();
571 
572 	/* TODO: Cleanup is required when ports are in linked state. */
573 	if (portal->is_port_linked)
574 		DPAA2_EVENTDEV_WARN("Event port must be unlinked before release");
575 
576 	if (portal)
577 		rte_free(portal);
578 
579 	portal = NULL;
580 }
581 
582 static int
583 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port,
584 			 const uint8_t queues[], const uint8_t priorities[],
585 			uint16_t nb_links)
586 {
587 	struct dpaa2_eventdev *priv = dev->data->dev_private;
588 	struct dpaa2_port *dpaa2_portal = port;
589 	struct dpaa2_eventq *evq_info;
590 	uint16_t i;
591 
592 	EVENTDEV_INIT_FUNC_TRACE();
593 
594 	RTE_SET_USED(priorities);
595 
596 	for (i = 0; i < nb_links; i++) {
597 		evq_info = &priv->evq_info[queues[i]];
598 		memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info,
599 			   sizeof(struct dpaa2_eventq));
600 		dpaa2_portal->evq_info[queues[i]].event_port = port;
601 		dpaa2_portal->num_linked_evq++;
602 	}
603 
604 	return (int)nb_links;
605 }
606 
607 static int
608 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port,
609 			   uint8_t queues[], uint16_t nb_unlinks)
610 {
611 	struct dpaa2_port *dpaa2_portal = port;
612 	int i;
613 	struct dpaa2_dpio_dev *dpio_dev = NULL;
614 	struct dpaa2_eventq *evq_info;
615 	struct qbman_swp *swp;
616 
617 	EVENTDEV_INIT_FUNC_TRACE();
618 
619 	RTE_SET_USED(dev);
620 	RTE_SET_USED(queues);
621 
622 	for (i = 0; i < nb_unlinks; i++) {
623 		evq_info = &dpaa2_portal->evq_info[queues[i]];
624 
625 		if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) {
626 			/* todo dpaa2_portal shall have dpio_dev-no per lcore*/
627 			dpio_dev = DPAA2_PER_LCORE_DPIO;
628 			swp = DPAA2_PER_LCORE_PORTAL;
629 
630 			qbman_swp_push_set(swp,
631 					evq_info->dpcon->channel_index, 0);
632 			dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0,
633 						dpio_dev->token,
634 						evq_info->dpcon->dpcon_id);
635 		}
636 		memset(evq_info, 0, sizeof(struct dpaa2_eventq));
637 		if (dpaa2_portal->num_linked_evq)
638 			dpaa2_portal->num_linked_evq--;
639 	}
640 
641 	if (!dpaa2_portal->num_linked_evq)
642 		dpaa2_portal->is_port_linked = false;
643 
644 	return (int)nb_unlinks;
645 }
646 
647 
648 static int
649 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
650 			     uint64_t *timeout_ticks)
651 {
652 	uint32_t scale = 1000*1000;
653 
654 	EVENTDEV_INIT_FUNC_TRACE();
655 
656 	RTE_SET_USED(dev);
657 	*timeout_ticks = ns / scale;
658 
659 	return 0;
660 }
661 
662 static void
663 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f)
664 {
665 	EVENTDEV_INIT_FUNC_TRACE();
666 
667 	RTE_SET_USED(dev);
668 	RTE_SET_USED(f);
669 }
670 
671 static int
672 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev,
673 			    const struct rte_eth_dev *eth_dev,
674 			    uint32_t *caps)
675 {
676 	const char *ethdev_driver = eth_dev->device->driver->name;
677 
678 	EVENTDEV_INIT_FUNC_TRACE();
679 
680 	RTE_SET_USED(dev);
681 
682 	if (!strcmp(ethdev_driver, "net_dpaa2"))
683 		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP;
684 	else
685 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
686 
687 	return 0;
688 }
689 
690 static int
691 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev,
692 		const struct rte_eth_dev *eth_dev,
693 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
694 {
695 	struct dpaa2_eventdev *priv = dev->data->dev_private;
696 	uint8_t ev_qid = queue_conf->ev.queue_id;
697 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
698 	int i, ret;
699 
700 	EVENTDEV_INIT_FUNC_TRACE();
701 
702 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
703 		ret = dpaa2_eth_eventq_attach(eth_dev, i,
704 					      dpcon, queue_conf);
705 		if (ret) {
706 			DPAA2_EVENTDEV_ERR(
707 				"Event queue attach failed: err(%d)", ret);
708 			goto fail;
709 		}
710 	}
711 	return 0;
712 fail:
713 	for (i = (i - 1); i >= 0 ; i--)
714 		dpaa2_eth_eventq_detach(eth_dev, i);
715 
716 	return ret;
717 }
718 
719 static int
720 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev,
721 		const struct rte_eth_dev *eth_dev,
722 		int32_t rx_queue_id,
723 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
724 {
725 	struct dpaa2_eventdev *priv = dev->data->dev_private;
726 	uint8_t ev_qid = queue_conf->ev.queue_id;
727 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
728 	int ret;
729 
730 	EVENTDEV_INIT_FUNC_TRACE();
731 
732 	if (rx_queue_id == -1)
733 		return dpaa2_eventdev_eth_queue_add_all(dev,
734 				eth_dev, queue_conf);
735 
736 	ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id,
737 				      dpcon, queue_conf);
738 	if (ret) {
739 		DPAA2_EVENTDEV_ERR(
740 			"Event queue attach failed: err(%d)", ret);
741 		return ret;
742 	}
743 	return 0;
744 }
745 
746 static int
747 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev,
748 			     const struct rte_eth_dev *eth_dev)
749 {
750 	int i, ret;
751 
752 	EVENTDEV_INIT_FUNC_TRACE();
753 
754 	RTE_SET_USED(dev);
755 
756 	for (i = 0; i < eth_dev->data->nb_rx_queues; i++) {
757 		ret = dpaa2_eth_eventq_detach(eth_dev, i);
758 		if (ret) {
759 			DPAA2_EVENTDEV_ERR(
760 				"Event queue detach failed: err(%d)", ret);
761 			return ret;
762 		}
763 	}
764 
765 	return 0;
766 }
767 
768 static int
769 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev,
770 			     const struct rte_eth_dev *eth_dev,
771 			     int32_t rx_queue_id)
772 {
773 	int ret;
774 
775 	EVENTDEV_INIT_FUNC_TRACE();
776 
777 	if (rx_queue_id == -1)
778 		return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev);
779 
780 	ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id);
781 	if (ret) {
782 		DPAA2_EVENTDEV_ERR(
783 			"Event queue detach failed: err(%d)", ret);
784 		return ret;
785 	}
786 
787 	return 0;
788 }
789 
790 static int
791 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev,
792 			 const struct rte_eth_dev *eth_dev)
793 {
794 	EVENTDEV_INIT_FUNC_TRACE();
795 
796 	RTE_SET_USED(dev);
797 	RTE_SET_USED(eth_dev);
798 
799 	return 0;
800 }
801 
802 static int
803 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev,
804 			const struct rte_eth_dev *eth_dev)
805 {
806 	EVENTDEV_INIT_FUNC_TRACE();
807 
808 	RTE_SET_USED(dev);
809 	RTE_SET_USED(eth_dev);
810 
811 	return 0;
812 }
813 
814 static int
815 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
816 			    const struct rte_cryptodev *cdev,
817 			    uint32_t *caps)
818 {
819 	const char *name = cdev->data->name;
820 
821 	EVENTDEV_INIT_FUNC_TRACE();
822 
823 	RTE_SET_USED(dev);
824 
825 	if (!strncmp(name, "dpsec-", 6))
826 		*caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP;
827 	else
828 		return -1;
829 
830 	return 0;
831 }
832 
833 static int
834 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
835 		const struct rte_cryptodev *cryptodev,
836 		const struct rte_event *ev)
837 {
838 	struct dpaa2_eventdev *priv = dev->data->dev_private;
839 	uint8_t ev_qid = ev->queue_id;
840 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
841 	int i, ret;
842 
843 	EVENTDEV_INIT_FUNC_TRACE();
844 
845 	for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
846 		ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev);
847 		if (ret) {
848 			DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n",
849 				    ret);
850 			goto fail;
851 		}
852 	}
853 	return 0;
854 fail:
855 	for (i = (i - 1); i >= 0 ; i--)
856 		dpaa2_sec_eventq_detach(cryptodev, i);
857 
858 	return ret;
859 }
860 
861 static int
862 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
863 		const struct rte_cryptodev *cryptodev,
864 		int32_t rx_queue_id,
865 		const struct rte_event *ev)
866 {
867 	struct dpaa2_eventdev *priv = dev->data->dev_private;
868 	uint8_t ev_qid = ev->queue_id;
869 	struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon;
870 	int ret;
871 
872 	EVENTDEV_INIT_FUNC_TRACE();
873 
874 	if (rx_queue_id == -1)
875 		return dpaa2_eventdev_crypto_queue_add_all(dev,
876 				cryptodev, ev);
877 
878 	ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id,
879 				      dpcon, ev);
880 	if (ret) {
881 		DPAA2_EVENTDEV_ERR(
882 			"dpaa2_sec_eventq_attach failed: ret: %d\n", ret);
883 		return ret;
884 	}
885 	return 0;
886 }
887 
888 static int
889 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
890 			     const struct rte_cryptodev *cdev)
891 {
892 	int i, ret;
893 
894 	EVENTDEV_INIT_FUNC_TRACE();
895 
896 	RTE_SET_USED(dev);
897 
898 	for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
899 		ret = dpaa2_sec_eventq_detach(cdev, i);
900 		if (ret) {
901 			DPAA2_EVENTDEV_ERR(
902 				"dpaa2_sec_eventq_detach failed:ret %d\n", ret);
903 			return ret;
904 		}
905 	}
906 
907 	return 0;
908 }
909 
910 static int
911 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
912 			     const struct rte_cryptodev *cryptodev,
913 			     int32_t rx_queue_id)
914 {
915 	int ret;
916 
917 	EVENTDEV_INIT_FUNC_TRACE();
918 
919 	if (rx_queue_id == -1)
920 		return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev);
921 
922 	ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id);
923 	if (ret) {
924 		DPAA2_EVENTDEV_ERR(
925 			"dpaa2_sec_eventq_detach failed: ret: %d\n", ret);
926 		return ret;
927 	}
928 
929 	return 0;
930 }
931 
932 static int
933 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev,
934 			    const struct rte_cryptodev *cryptodev)
935 {
936 	EVENTDEV_INIT_FUNC_TRACE();
937 
938 	RTE_SET_USED(dev);
939 	RTE_SET_USED(cryptodev);
940 
941 	return 0;
942 }
943 
944 static int
945 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev,
946 			   const struct rte_cryptodev *cryptodev)
947 {
948 	EVENTDEV_INIT_FUNC_TRACE();
949 
950 	RTE_SET_USED(dev);
951 	RTE_SET_USED(cryptodev);
952 
953 	return 0;
954 }
955 
956 static int
957 dpaa2_eventdev_tx_adapter_create(uint8_t id,
958 				 const struct rte_eventdev *dev)
959 {
960 	RTE_SET_USED(id);
961 	RTE_SET_USED(dev);
962 
963 	/* Nothing to do. Simply return. */
964 	return 0;
965 }
966 
967 static int
968 dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
969 			       const struct rte_eth_dev *eth_dev,
970 			       uint32_t *caps)
971 {
972 	RTE_SET_USED(dev);
973 	RTE_SET_USED(eth_dev);
974 
975 	*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
976 	return 0;
977 }
978 
979 static uint16_t
980 dpaa2_eventdev_txa_enqueue_same_dest(void *port,
981 				     struct rte_event ev[],
982 				     uint16_t nb_events)
983 {
984 	struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
985 	uint8_t qid, i;
986 
987 	RTE_SET_USED(port);
988 
989 	m0 = (struct rte_mbuf *)ev[0].mbuf;
990 	qid = rte_event_eth_tx_adapter_txq_get(m0);
991 
992 	for (i = 0; i < nb_events; i++)
993 		m[i] = (struct rte_mbuf *)ev[i].mbuf;
994 
995 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
996 }
997 
998 static uint16_t
999 dpaa2_eventdev_txa_enqueue(void *port,
1000 			   struct rte_event ev[],
1001 			   uint16_t nb_events)
1002 {
1003 	struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
1004 	uint8_t qid, i;
1005 
1006 	RTE_SET_USED(port);
1007 
1008 	for (i = 0; i < nb_events; i++) {
1009 		qid = rte_event_eth_tx_adapter_txq_get(m);
1010 		rte_eth_tx_burst(m->port, qid, &m, 1);
1011 	}
1012 
1013 	return nb_events;
1014 }
1015 
1016 static struct rte_eventdev_ops dpaa2_eventdev_ops = {
1017 	.dev_infos_get    = dpaa2_eventdev_info_get,
1018 	.dev_configure    = dpaa2_eventdev_configure,
1019 	.dev_start        = dpaa2_eventdev_start,
1020 	.dev_stop         = dpaa2_eventdev_stop,
1021 	.dev_close        = dpaa2_eventdev_close,
1022 	.queue_def_conf   = dpaa2_eventdev_queue_def_conf,
1023 	.queue_setup      = dpaa2_eventdev_queue_setup,
1024 	.queue_release    = dpaa2_eventdev_queue_release,
1025 	.port_def_conf    = dpaa2_eventdev_port_def_conf,
1026 	.port_setup       = dpaa2_eventdev_port_setup,
1027 	.port_release     = dpaa2_eventdev_port_release,
1028 	.port_link        = dpaa2_eventdev_port_link,
1029 	.port_unlink      = dpaa2_eventdev_port_unlink,
1030 	.timeout_ticks    = dpaa2_eventdev_timeout_ticks,
1031 	.dump             = dpaa2_eventdev_dump,
1032 	.dev_selftest     = test_eventdev_dpaa2,
1033 	.eth_rx_adapter_caps_get	= dpaa2_eventdev_eth_caps_get,
1034 	.eth_rx_adapter_queue_add	= dpaa2_eventdev_eth_queue_add,
1035 	.eth_rx_adapter_queue_del	= dpaa2_eventdev_eth_queue_del,
1036 	.eth_rx_adapter_start		= dpaa2_eventdev_eth_start,
1037 	.eth_rx_adapter_stop		= dpaa2_eventdev_eth_stop,
1038 	.eth_tx_adapter_caps_get	= dpaa2_eventdev_tx_adapter_caps,
1039 	.eth_tx_adapter_create		= dpaa2_eventdev_tx_adapter_create,
1040 	.crypto_adapter_caps_get	= dpaa2_eventdev_crypto_caps_get,
1041 	.crypto_adapter_queue_pair_add	= dpaa2_eventdev_crypto_queue_add,
1042 	.crypto_adapter_queue_pair_del	= dpaa2_eventdev_crypto_queue_del,
1043 	.crypto_adapter_start		= dpaa2_eventdev_crypto_start,
1044 	.crypto_adapter_stop		= dpaa2_eventdev_crypto_stop,
1045 };
1046 
1047 static int
1048 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev,
1049 			  struct dpaa2_dpcon_dev *dpcon_dev)
1050 {
1051 	struct dpci_rx_queue_cfg rx_queue_cfg;
1052 	int ret, i;
1053 
1054 	/*Do settings to get the frame on a DPCON object*/
1055 	rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST |
1056 		  DPCI_QUEUE_OPT_USER_CTX;
1057 	rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON;
1058 	rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id;
1059 	rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO;
1060 
1061 	dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb =
1062 		dpaa2_eventdev_process_parallel;
1063 	dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb =
1064 		dpaa2_eventdev_process_atomic;
1065 
1066 	for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) {
1067 		rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]);
1068 		ret = dpci_set_rx_queue(&dpci_dev->dpci,
1069 					CMD_PRI_LOW,
1070 					dpci_dev->token, i,
1071 					&rx_queue_cfg);
1072 		if (ret) {
1073 			DPAA2_EVENTDEV_ERR(
1074 				"DPCI Rx queue setup failed: err(%d)",
1075 				ret);
1076 			return ret;
1077 		}
1078 	}
1079 	return 0;
1080 }
1081 
1082 static int
1083 dpaa2_eventdev_create(const char *name)
1084 {
1085 	struct rte_eventdev *eventdev;
1086 	struct dpaa2_eventdev *priv;
1087 	struct dpaa2_dpcon_dev *dpcon_dev = NULL;
1088 	struct dpaa2_dpci_dev *dpci_dev = NULL;
1089 	int ret;
1090 
1091 	eventdev = rte_event_pmd_vdev_init(name,
1092 					   sizeof(struct dpaa2_eventdev),
1093 					   rte_socket_id());
1094 	if (eventdev == NULL) {
1095 		DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name);
1096 		goto fail;
1097 	}
1098 
1099 	eventdev->dev_ops       = &dpaa2_eventdev_ops;
1100 	eventdev->enqueue       = dpaa2_eventdev_enqueue;
1101 	eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst;
1102 	eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst;
1103 	eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst;
1104 	eventdev->dequeue       = dpaa2_eventdev_dequeue;
1105 	eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst;
1106 	eventdev->txa_enqueue	= dpaa2_eventdev_txa_enqueue;
1107 	eventdev->txa_enqueue_same_dest	= dpaa2_eventdev_txa_enqueue_same_dest;
1108 
1109 	/* For secondary processes, the primary has done all the work */
1110 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1111 		return 0;
1112 
1113 	priv = eventdev->data->dev_private;
1114 	priv->max_event_queues = 0;
1115 
1116 	do {
1117 		dpcon_dev = rte_dpaa2_alloc_dpcon_dev();
1118 		if (!dpcon_dev)
1119 			break;
1120 		priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev;
1121 
1122 		dpci_dev = rte_dpaa2_alloc_dpci_dev();
1123 		if (!dpci_dev) {
1124 			rte_dpaa2_free_dpcon_dev(dpcon_dev);
1125 			break;
1126 		}
1127 		priv->evq_info[priv->max_event_queues].dpci = dpci_dev;
1128 
1129 		ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev);
1130 		if (ret) {
1131 			DPAA2_EVENTDEV_ERR(
1132 				    "DPCI setup failed: err(%d)", ret);
1133 			return ret;
1134 		}
1135 		priv->max_event_queues++;
1136 	} while (dpcon_dev && dpci_dev);
1137 
1138 	RTE_LOG(INFO, PMD, "%s eventdev created\n", name);
1139 
1140 	return 0;
1141 fail:
1142 	return -EFAULT;
1143 }
1144 
1145 static int
1146 dpaa2_eventdev_destroy(const char *name)
1147 {
1148 	struct rte_eventdev *eventdev;
1149 	struct dpaa2_eventdev *priv;
1150 	int i;
1151 
1152 	eventdev = rte_event_pmd_get_named_dev(name);
1153 	if (eventdev == NULL) {
1154 		RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name);
1155 		return -1;
1156 	}
1157 
1158 	/* For secondary processes, the primary has done all the work */
1159 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
1160 		return 0;
1161 
1162 	priv = eventdev->data->dev_private;
1163 	for (i = 0; i < priv->max_event_queues; i++) {
1164 		if (priv->evq_info[i].dpcon)
1165 			rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon);
1166 
1167 		if (priv->evq_info[i].dpci)
1168 			rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci);
1169 
1170 	}
1171 	priv->max_event_queues = 0;
1172 
1173 	RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name);
1174 	return 0;
1175 }
1176 
1177 
1178 static int
1179 dpaa2_eventdev_probe(struct rte_vdev_device *vdev)
1180 {
1181 	const char *name;
1182 
1183 	name = rte_vdev_device_name(vdev);
1184 	DPAA2_EVENTDEV_INFO("Initializing %s", name);
1185 	return dpaa2_eventdev_create(name);
1186 }
1187 
1188 static int
1189 dpaa2_eventdev_remove(struct rte_vdev_device *vdev)
1190 {
1191 	const char *name;
1192 
1193 	name = rte_vdev_device_name(vdev);
1194 	DPAA2_EVENTDEV_INFO("Closing %s", name);
1195 
1196 	dpaa2_eventdev_destroy(name);
1197 
1198 	return rte_event_pmd_vdev_uninit(name);
1199 }
1200 
1201 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = {
1202 	.probe = dpaa2_eventdev_probe,
1203 	.remove = dpaa2_eventdev_remove
1204 };
1205 
1206 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd);
1207 RTE_LOG_REGISTER(dpaa2_logtype_event, pmd.event.dpaa2, NOTICE);
1208