xref: /dpdk/drivers/bus/dpaa/dpaa_bus.c (revision 2d0c29a37a9c080c1cccb1ad7941aba2ccf5437e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  *
3  *   Copyright 2017 NXP
4  *
5  */
6 /* System headers */
7 #include <stdio.h>
8 #include <inttypes.h>
9 #include <unistd.h>
10 #include <limits.h>
11 #include <sched.h>
12 #include <signal.h>
13 #include <pthread.h>
14 #include <sys/types.h>
15 #include <sys/syscall.h>
16 
17 #include <rte_byteorder.h>
18 #include <rte_common.h>
19 #include <rte_interrupts.h>
20 #include <rte_log.h>
21 #include <rte_debug.h>
22 #include <rte_atomic.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_memory.h>
25 #include <rte_tailq.h>
26 #include <rte_eal.h>
27 #include <rte_alarm.h>
28 #include <rte_ether.h>
29 #include <rte_ethdev_driver.h>
30 #include <rte_malloc.h>
31 #include <rte_ring.h>
32 #include <rte_bus.h>
33 #include <rte_mbuf_pool_ops.h>
34 
35 #include <rte_dpaa_bus.h>
36 #include <rte_dpaa_logs.h>
37 #include <dpaax_iova_table.h>
38 
39 #include <fsl_usd.h>
40 #include <fsl_qman.h>
41 #include <fsl_bman.h>
42 #include <of.h>
43 #include <netcfg.h>
44 
45 int dpaa_logtype_bus;
46 int dpaa_logtype_mempool;
47 int dpaa_logtype_pmd;
48 int dpaa_logtype_eventdev;
49 
50 static struct rte_dpaa_bus rte_dpaa_bus;
51 struct netcfg_info *dpaa_netcfg;
52 
53 /* define a variable to hold the portal_key, once created.*/
54 static pthread_key_t dpaa_portal_key;
55 
56 unsigned int dpaa_svr_family;
57 
58 #define FSL_DPAA_BUS_NAME	dpaa_bus
59 
60 RTE_DEFINE_PER_LCORE(bool, dpaa_io);
61 RTE_DEFINE_PER_LCORE(struct dpaa_portal_dqrr, held_bufs);
62 
63 static int
64 compare_dpaa_devices(struct rte_dpaa_device *dev1,
65 		     struct rte_dpaa_device *dev2)
66 {
67 	int comp = 0;
68 
69 	/* Segragating ETH from SEC devices */
70 	if (dev1->device_type > dev2->device_type)
71 		comp = 1;
72 	else if (dev1->device_type < dev2->device_type)
73 		comp = -1;
74 	else
75 		comp = 0;
76 
77 	if ((comp != 0) || (dev1->device_type != FSL_DPAA_ETH))
78 		return comp;
79 
80 	if (dev1->id.fman_id > dev2->id.fman_id) {
81 		comp = 1;
82 	} else if (dev1->id.fman_id < dev2->id.fman_id) {
83 		comp = -1;
84 	} else {
85 		/* FMAN ids match, check for mac_id */
86 		if (dev1->id.mac_id > dev2->id.mac_id)
87 			comp = 1;
88 		else if (dev1->id.mac_id < dev2->id.mac_id)
89 			comp = -1;
90 		else
91 			comp = 0;
92 	}
93 
94 	return comp;
95 }
96 
97 static inline void
98 dpaa_add_to_device_list(struct rte_dpaa_device *newdev)
99 {
100 	int comp, inserted = 0;
101 	struct rte_dpaa_device *dev = NULL;
102 	struct rte_dpaa_device *tdev = NULL;
103 
104 	TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
105 		comp = compare_dpaa_devices(newdev, dev);
106 		if (comp < 0) {
107 			TAILQ_INSERT_BEFORE(dev, newdev, next);
108 			inserted = 1;
109 			break;
110 		}
111 	}
112 
113 	if (!inserted)
114 		TAILQ_INSERT_TAIL(&rte_dpaa_bus.device_list, newdev, next);
115 }
116 
117 /*
118  * Reads the SEC device from DTS
119  * Returns -1 if SEC devices not available, 0 otherwise
120  */
121 static inline int
122 dpaa_sec_available(void)
123 {
124 	const struct device_node *caam_node;
125 
126 	for_each_compatible_node(caam_node, NULL, "fsl,sec-v4.0") {
127 		return 0;
128 	}
129 
130 	return -1;
131 }
132 
133 static void dpaa_clean_device_list(void);
134 
135 static struct rte_devargs *
136 dpaa_devargs_lookup(struct rte_dpaa_device *dev)
137 {
138 	struct rte_devargs *devargs;
139 	char dev_name[32];
140 
141 	RTE_EAL_DEVARGS_FOREACH("dpaa_bus", devargs) {
142 		devargs->bus->parse(devargs->name, &dev_name);
143 		if (strcmp(dev_name, dev->device.name) == 0) {
144 			DPAA_BUS_INFO("**Devargs matched %s", dev_name);
145 			return devargs;
146 		}
147 	}
148 	return NULL;
149 }
150 
151 static int
152 dpaa_create_device_list(void)
153 {
154 	int i;
155 	int ret;
156 	struct rte_dpaa_device *dev;
157 	struct fm_eth_port_cfg *cfg;
158 	struct fman_if *fman_intf;
159 
160 	/* Creating Ethernet Devices */
161 	for (i = 0; i < dpaa_netcfg->num_ethports; i++) {
162 		dev = calloc(1, sizeof(struct rte_dpaa_device));
163 		if (!dev) {
164 			DPAA_BUS_LOG(ERR, "Failed to allocate ETH devices");
165 			ret = -ENOMEM;
166 			goto cleanup;
167 		}
168 
169 		dev->device.bus = &rte_dpaa_bus.bus;
170 
171 		cfg = &dpaa_netcfg->port_cfg[i];
172 		fman_intf = cfg->fman_if;
173 
174 		/* Device identifiers */
175 		dev->id.fman_id = fman_intf->fman_idx + 1;
176 		dev->id.mac_id = fman_intf->mac_idx;
177 		dev->device_type = FSL_DPAA_ETH;
178 		dev->id.dev_id = i;
179 
180 		/* Create device name */
181 		memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
182 		sprintf(dev->name, "fm%d-mac%d", (fman_intf->fman_idx + 1),
183 			fman_intf->mac_idx);
184 		DPAA_BUS_LOG(INFO, "%s netdev added", dev->name);
185 		dev->device.name = dev->name;
186 		dev->device.devargs = dpaa_devargs_lookup(dev);
187 
188 		dpaa_add_to_device_list(dev);
189 	}
190 
191 	rte_dpaa_bus.device_count = i;
192 
193 	/* Unlike case of ETH, RTE_LIBRTE_DPAA_MAX_CRYPTODEV SEC devices are
194 	 * constantly created only if "sec" property is found in the device
195 	 * tree. Logically there is no limit for number of devices (QI
196 	 * interfaces) that can be created.
197 	 */
198 
199 	if (dpaa_sec_available()) {
200 		DPAA_BUS_LOG(INFO, "DPAA SEC devices are not available");
201 		return 0;
202 	}
203 
204 	/* Creating SEC Devices */
205 	for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
206 		dev = calloc(1, sizeof(struct rte_dpaa_device));
207 		if (!dev) {
208 			DPAA_BUS_LOG(ERR, "Failed to allocate SEC devices");
209 			ret = -1;
210 			goto cleanup;
211 		}
212 
213 		dev->device_type = FSL_DPAA_CRYPTO;
214 		dev->id.dev_id = rte_dpaa_bus.device_count + i;
215 
216 		/* Even though RTE_CRYPTODEV_NAME_MAX_LEN is valid length of
217 		 * crypto PMD, using RTE_ETH_NAME_MAX_LEN as that is the size
218 		 * allocated for dev->name/
219 		 */
220 		memset(dev->name, 0, RTE_ETH_NAME_MAX_LEN);
221 		sprintf(dev->name, "dpaa-sec%d", i);
222 		DPAA_BUS_LOG(INFO, "%s cryptodev added", dev->name);
223 		dev->device.name = dev->name;
224 		dev->device.devargs = dpaa_devargs_lookup(dev);
225 
226 		dpaa_add_to_device_list(dev);
227 	}
228 
229 	rte_dpaa_bus.device_count += i;
230 
231 	return 0;
232 
233 cleanup:
234 	dpaa_clean_device_list();
235 	return ret;
236 }
237 
238 static void
239 dpaa_clean_device_list(void)
240 {
241 	struct rte_dpaa_device *dev = NULL;
242 	struct rte_dpaa_device *tdev = NULL;
243 
244 	TAILQ_FOREACH_SAFE(dev, &rte_dpaa_bus.device_list, next, tdev) {
245 		TAILQ_REMOVE(&rte_dpaa_bus.device_list, dev, next);
246 		free(dev);
247 		dev = NULL;
248 	}
249 }
250 
251 int rte_dpaa_portal_init(void *arg)
252 {
253 	pthread_t id;
254 	unsigned int cpu, lcore = rte_lcore_id();
255 	int ret;
256 	struct dpaa_portal *dpaa_io_portal;
257 
258 	BUS_INIT_FUNC_TRACE();
259 
260 	if ((size_t)arg == 1 || lcore == LCORE_ID_ANY)
261 		lcore = rte_get_master_lcore();
262 	else
263 		if (lcore >= RTE_MAX_LCORE)
264 			return -1;
265 
266 	cpu = lcore_config[lcore].core_id;
267 
268 	/* Set CPU affinity for this thread.*/
269 	id = pthread_self();
270 	ret = pthread_setaffinity_np(id, sizeof(cpu_set_t),
271 			&lcore_config[lcore].cpuset);
272 	if (ret) {
273 		DPAA_BUS_LOG(ERR, "pthread_setaffinity_np failed on core :%u"
274 			     " (lcore=%u) with ret: %d", cpu, lcore, ret);
275 		return ret;
276 	}
277 
278 	/* Initialise bman thread portals */
279 	ret = bman_thread_init();
280 	if (ret) {
281 		DPAA_BUS_LOG(ERR, "bman_thread_init failed on core %u"
282 			     " (lcore=%u) with ret: %d", cpu, lcore, ret);
283 		return ret;
284 	}
285 
286 	DPAA_BUS_LOG(DEBUG, "BMAN thread initialized - CPU=%d lcore=%d",
287 		     cpu, lcore);
288 
289 	/* Initialise qman thread portals */
290 	ret = qman_thread_init();
291 	if (ret) {
292 		DPAA_BUS_LOG(ERR, "qman_thread_init failed on core %u"
293 			    " (lcore=%u) with ret: %d", cpu, lcore, ret);
294 		bman_thread_finish();
295 		return ret;
296 	}
297 
298 	DPAA_BUS_LOG(DEBUG, "QMAN thread initialized - CPU=%d lcore=%d",
299 		     cpu, lcore);
300 
301 	dpaa_io_portal = rte_malloc(NULL, sizeof(struct dpaa_portal),
302 				    RTE_CACHE_LINE_SIZE);
303 	if (!dpaa_io_portal) {
304 		DPAA_BUS_LOG(ERR, "Unable to allocate memory");
305 		bman_thread_finish();
306 		qman_thread_finish();
307 		return -ENOMEM;
308 	}
309 
310 	dpaa_io_portal->qman_idx = qman_get_portal_index();
311 	dpaa_io_portal->bman_idx = bman_get_portal_index();
312 	dpaa_io_portal->tid = syscall(SYS_gettid);
313 
314 	ret = pthread_setspecific(dpaa_portal_key, (void *)dpaa_io_portal);
315 	if (ret) {
316 		DPAA_BUS_LOG(ERR, "pthread_setspecific failed on core %u"
317 			     " (lcore=%u) with ret: %d", cpu, lcore, ret);
318 		dpaa_portal_finish(NULL);
319 
320 		return ret;
321 	}
322 
323 	RTE_PER_LCORE(dpaa_io) = true;
324 
325 	DPAA_BUS_LOG(DEBUG, "QMAN thread initialized");
326 
327 	return 0;
328 }
329 
330 int
331 rte_dpaa_portal_fq_init(void *arg, struct qman_fq *fq)
332 {
333 	/* Affine above created portal with channel*/
334 	u32 sdqcr;
335 	struct qman_portal *qp;
336 	int ret;
337 
338 	if (unlikely(!RTE_PER_LCORE(dpaa_io))) {
339 		ret = rte_dpaa_portal_init(arg);
340 		if (ret < 0) {
341 			DPAA_BUS_LOG(ERR, "portal initialization failure");
342 			return ret;
343 		}
344 	}
345 
346 	/* Initialise qman specific portals */
347 	qp = fsl_qman_portal_create();
348 	if (!qp) {
349 		DPAA_BUS_LOG(ERR, "Unable to alloc fq portal");
350 		return -1;
351 	}
352 	fq->qp = qp;
353 	sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(fq->ch_id);
354 	qman_static_dequeue_add(sdqcr, qp);
355 
356 	return 0;
357 }
358 
359 int rte_dpaa_portal_fq_close(struct qman_fq *fq)
360 {
361 	return fsl_qman_portal_destroy(fq->qp);
362 }
363 
364 void
365 dpaa_portal_finish(void *arg)
366 {
367 	struct dpaa_portal *dpaa_io_portal = (struct dpaa_portal *)arg;
368 
369 	if (!dpaa_io_portal) {
370 		DPAA_BUS_LOG(DEBUG, "Portal already cleaned");
371 		return;
372 	}
373 
374 	bman_thread_finish();
375 	qman_thread_finish();
376 
377 	pthread_setspecific(dpaa_portal_key, NULL);
378 
379 	rte_free(dpaa_io_portal);
380 	dpaa_io_portal = NULL;
381 
382 	RTE_PER_LCORE(dpaa_io) = false;
383 }
384 
385 static int
386 rte_dpaa_bus_parse(const char *name, void *out_name)
387 {
388 	int i, j;
389 	int max_fman = 2, max_macs = 16;
390 	char *sep = strchr(name, ':');
391 
392 	if (strncmp(name, RTE_STR(FSL_DPAA_BUS_NAME),
393 		strlen(RTE_STR(FSL_DPAA_BUS_NAME)))) {
394 		return -EINVAL;
395 	}
396 
397 	if (!sep) {
398 		DPAA_BUS_ERR("Incorrect device name observed");
399 		return -EINVAL;
400 	}
401 
402 	sep = (char *) (sep + 1);
403 
404 	for (i = 0; i < max_fman; i++) {
405 		for (j = 0; j < max_macs; j++) {
406 			char fm_name[16];
407 			snprintf(fm_name, 16, "fm%d-mac%d", i, j);
408 			if (strcmp(fm_name, sep) == 0) {
409 				if (out_name)
410 					strcpy(out_name, sep);
411 				return 0;
412 			}
413 		}
414 	}
415 
416 	for (i = 0; i < RTE_LIBRTE_DPAA_MAX_CRYPTODEV; i++) {
417 		char sec_name[16];
418 
419 		snprintf(sec_name, 16, "dpaa-sec%d", i);
420 		if (strcmp(sec_name, sep) == 0) {
421 			if (out_name)
422 				strcpy(out_name, sep);
423 			return 0;
424 		}
425 	}
426 
427 	return -EINVAL;
428 }
429 
430 #define DPAA_DEV_PATH1 "/sys/devices/platform/soc/soc:fsl,dpaa"
431 #define DPAA_DEV_PATH2 "/sys/devices/platform/fsl,dpaa"
432 
433 static int
434 rte_dpaa_bus_scan(void)
435 {
436 	int ret;
437 
438 	BUS_INIT_FUNC_TRACE();
439 
440 	if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
441 	    (access(DPAA_DEV_PATH2, F_OK) != 0)) {
442 		RTE_LOG(DEBUG, EAL, "DPAA Bus not present. Skipping.\n");
443 		return 0;
444 	}
445 	/* detected DPAA devices */
446 	rte_dpaa_bus.detected = 1;
447 
448 	/* create the key, supplying a function that'll be invoked
449 	 * when a portal affined thread will be deleted.
450 	 */
451 	ret = pthread_key_create(&dpaa_portal_key, dpaa_portal_finish);
452 	if (ret) {
453 		DPAA_BUS_LOG(DEBUG, "Unable to create pthread key. (%d)", ret);
454 		dpaa_clean_device_list();
455 		return ret;
456 	}
457 
458 	return 0;
459 }
460 
461 /* register a dpaa bus based dpaa driver */
462 void
463 rte_dpaa_driver_register(struct rte_dpaa_driver *driver)
464 {
465 	RTE_VERIFY(driver);
466 
467 	BUS_INIT_FUNC_TRACE();
468 
469 	TAILQ_INSERT_TAIL(&rte_dpaa_bus.driver_list, driver, next);
470 	/* Update Bus references */
471 	driver->dpaa_bus = &rte_dpaa_bus;
472 }
473 
474 /* un-register a dpaa bus based dpaa driver */
475 void
476 rte_dpaa_driver_unregister(struct rte_dpaa_driver *driver)
477 {
478 	struct rte_dpaa_bus *dpaa_bus;
479 
480 	BUS_INIT_FUNC_TRACE();
481 
482 	dpaa_bus = driver->dpaa_bus;
483 
484 	TAILQ_REMOVE(&dpaa_bus->driver_list, driver, next);
485 	/* Update Bus references */
486 	driver->dpaa_bus = NULL;
487 }
488 
489 static int
490 rte_dpaa_device_match(struct rte_dpaa_driver *drv,
491 		      struct rte_dpaa_device *dev)
492 {
493 	if (!drv || !dev) {
494 		DPAA_BUS_DEBUG("Invalid drv or dev received.");
495 		return -1;
496 	}
497 
498 	if (drv->drv_type == dev->device_type)
499 		return 0;
500 
501 	return -1;
502 }
503 
504 static int
505 rte_dpaa_bus_dev_build(void)
506 {
507 	int ret;
508 
509 	/* Load the device-tree driver */
510 	ret = of_init();
511 	if (ret) {
512 		DPAA_BUS_LOG(ERR, "of_init failed with ret: %d", ret);
513 		return -1;
514 	}
515 
516 	/* Get the interface configurations from device-tree */
517 	dpaa_netcfg = netcfg_acquire();
518 	if (!dpaa_netcfg) {
519 		DPAA_BUS_LOG(ERR, "netcfg_acquire failed");
520 		return -EINVAL;
521 	}
522 
523 	RTE_LOG(NOTICE, EAL, "DPAA Bus Detected\n");
524 
525 	if (!dpaa_netcfg->num_ethports) {
526 		DPAA_BUS_LOG(INFO, "no network interfaces available");
527 		/* This is not an error */
528 		return 0;
529 	}
530 
531 #ifdef RTE_LIBRTE_DPAA_DEBUG_DRIVER
532 	dump_netcfg(dpaa_netcfg);
533 #endif
534 
535 	DPAA_BUS_LOG(DEBUG, "Number of ethernet devices = %d",
536 		     dpaa_netcfg->num_ethports);
537 	ret = dpaa_create_device_list();
538 	if (ret) {
539 		DPAA_BUS_LOG(ERR, "Unable to create device list. (%d)", ret);
540 		return ret;
541 	}
542 	return 0;
543 }
544 
545 static int
546 rte_dpaa_bus_probe(void)
547 {
548 	int ret = -1;
549 	struct rte_dpaa_device *dev;
550 	struct rte_dpaa_driver *drv;
551 	FILE *svr_file = NULL;
552 	unsigned int svr_ver;
553 	int probe_all = rte_dpaa_bus.bus.conf.scan_mode != RTE_BUS_SCAN_WHITELIST;
554 
555 	/* If DPAA bus is not present nothing needs to be done */
556 	if (!rte_dpaa_bus.detected)
557 		return 0;
558 
559 	rte_dpaa_bus_dev_build();
560 
561 	/* If no device present on DPAA bus nothing needs to be done */
562 	if (TAILQ_EMPTY(&rte_dpaa_bus.device_list))
563 		return 0;
564 
565 	svr_file = fopen(DPAA_SOC_ID_FILE, "r");
566 	if (svr_file) {
567 		if (fscanf(svr_file, "svr:%x", &svr_ver) > 0)
568 			dpaa_svr_family = svr_ver & SVR_MASK;
569 		fclose(svr_file);
570 	}
571 
572 	/* And initialize the PA->VA translation table */
573 	dpaax_iova_table_populate();
574 
575 	/* For each registered driver, and device, call the driver->probe */
576 	TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
577 		TAILQ_FOREACH(drv, &rte_dpaa_bus.driver_list, next) {
578 			ret = rte_dpaa_device_match(drv, dev);
579 			if (ret)
580 				continue;
581 
582 			if (rte_dev_is_probed(&dev->device))
583 				continue;
584 
585 			if (!drv->probe ||
586 			    (dev->device.devargs &&
587 			    dev->device.devargs->policy == RTE_DEV_BLACKLISTED))
588 				continue;
589 
590 			if (probe_all ||
591 			    (dev->device.devargs &&
592 			    dev->device.devargs->policy ==
593 			    RTE_DEV_WHITELISTED)) {
594 				ret = drv->probe(drv, dev);
595 				if (ret) {
596 					DPAA_BUS_ERR("Unable to probe.\n");
597 				} else {
598 					dev->driver = drv;
599 					dev->device.driver = &drv->driver;
600 				}
601 			}
602 			break;
603 		}
604 	}
605 
606 	/* Register DPAA mempool ops only if any DPAA device has
607 	 * been detected.
608 	 */
609 	rte_mbuf_set_platform_mempool_ops(DPAA_MEMPOOL_OPS_NAME);
610 
611 	return 0;
612 }
613 
614 static struct rte_device *
615 rte_dpaa_find_device(const struct rte_device *start, rte_dev_cmp_t cmp,
616 		     const void *data)
617 {
618 	struct rte_dpaa_device *dev;
619 
620 	TAILQ_FOREACH(dev, &rte_dpaa_bus.device_list, next) {
621 		if (start && &dev->device == start) {
622 			start = NULL;  /* starting point found */
623 			continue;
624 		}
625 
626 		if (cmp(&dev->device, data) == 0)
627 			return &dev->device;
628 	}
629 
630 	return NULL;
631 }
632 
633 /*
634  * Get iommu class of DPAA2 devices on the bus.
635  */
636 static enum rte_iova_mode
637 rte_dpaa_get_iommu_class(void)
638 {
639 	if ((access(DPAA_DEV_PATH1, F_OK) != 0) &&
640 	    (access(DPAA_DEV_PATH2, F_OK) != 0)) {
641 		return RTE_IOVA_DC;
642 	}
643 	return RTE_IOVA_PA;
644 }
645 
646 static struct rte_dpaa_bus rte_dpaa_bus = {
647 	.bus = {
648 		.scan = rte_dpaa_bus_scan,
649 		.probe = rte_dpaa_bus_probe,
650 		.parse = rte_dpaa_bus_parse,
651 		.find_device = rte_dpaa_find_device,
652 		.get_iommu_class = rte_dpaa_get_iommu_class,
653 	},
654 	.device_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.device_list),
655 	.driver_list = TAILQ_HEAD_INITIALIZER(rte_dpaa_bus.driver_list),
656 	.device_count = 0,
657 };
658 
659 RTE_REGISTER_BUS(FSL_DPAA_BUS_NAME, rte_dpaa_bus.bus);
660 
661 RTE_INIT(dpaa_init_log)
662 {
663 	dpaa_logtype_bus = rte_log_register("bus.dpaa");
664 	if (dpaa_logtype_bus >= 0)
665 		rte_log_set_level(dpaa_logtype_bus, RTE_LOG_NOTICE);
666 
667 	dpaa_logtype_mempool = rte_log_register("mempool.dpaa");
668 	if (dpaa_logtype_mempool >= 0)
669 		rte_log_set_level(dpaa_logtype_mempool, RTE_LOG_NOTICE);
670 
671 	dpaa_logtype_pmd = rte_log_register("pmd.net.dpaa");
672 	if (dpaa_logtype_pmd >= 0)
673 		rte_log_set_level(dpaa_logtype_pmd, RTE_LOG_NOTICE);
674 
675 	dpaa_logtype_eventdev = rte_log_register("pmd.event.dpaa");
676 	if (dpaa_logtype_eventdev >= 0)
677 		rte_log_set_level(dpaa_logtype_eventdev, RTE_LOG_NOTICE);
678 }
679