1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017 NXP 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <sys/epoll.h> 12 13 #include <rte_atomic.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_debug.h> 17 #include <rte_dev.h> 18 #include <rte_eal.h> 19 #include <rte_lcore.h> 20 #include <rte_log.h> 21 #include <rte_malloc.h> 22 #include <rte_memcpy.h> 23 #include <rte_memory.h> 24 #include <rte_memzone.h> 25 #include <rte_pci.h> 26 #include <rte_eventdev.h> 27 #include <rte_eventdev_pmd_vdev.h> 28 #include <rte_ethdev.h> 29 #include <rte_event_eth_rx_adapter.h> 30 #include <rte_dpaa_bus.h> 31 #include <rte_dpaa_logs.h> 32 #include <rte_cycles_64.h> 33 34 #include <dpaa_ethdev.h> 35 #include "dpaa_eventdev.h" 36 #include <dpaa_mempool.h> 37 38 /* 39 * Clarifications 40 * Evendev = Virtual Instance for SoC 41 * Eventport = Portal Instance 42 * Eventqueue = Channel Instance 43 * 1 Eventdev can have N Eventqueue 44 */ 45 46 static int 47 dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 48 uint64_t *timeout_ticks) 49 { 50 uint64_t cycles_per_second; 51 52 EVENTDEV_DRV_FUNC_TRACE(); 53 54 RTE_SET_USED(dev); 55 56 cycles_per_second = rte_get_timer_hz(); 57 *timeout_ticks = ns * (cycles_per_second / NS_PER_S); 58 59 return 0; 60 } 61 62 static void 63 dpaa_eventq_portal_add(u16 ch_id) 64 { 65 uint32_t sdqcr; 66 67 sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id); 68 qman_static_dequeue_add(sdqcr, NULL); 69 } 70 71 static uint16_t 72 dpaa_event_enqueue_burst(void *port, const struct rte_event ev[], 73 uint16_t nb_events) 74 { 75 uint16_t i; 76 struct rte_mbuf *mbuf; 77 78 RTE_SET_USED(port); 79 /*Release all the contexts saved previously*/ 80 for (i = 0; i < nb_events; i++) { 81 switch (ev[i].op) { 82 case RTE_EVENT_OP_RELEASE: 83 qman_dca_index(ev[i].impl_opaque, 0); 84 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i); 85 mbuf->seqn = DPAA_INVALID_MBUF_SEQN; 86 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i); 87 DPAA_PER_LCORE_DQRR_SIZE--; 88 break; 89 default: 90 break; 91 } 92 } 93 94 return nb_events; 95 } 96 97 static uint16_t 98 dpaa_event_enqueue(void *port, const struct rte_event *ev) 99 { 100 return dpaa_event_enqueue_burst(port, ev, 1); 101 } 102 103 static uint16_t 104 dpaa_event_dequeue_burst(void *port, struct rte_event ev[], 105 uint16_t nb_events, uint64_t timeout_ticks) 106 { 107 int ret; 108 u16 ch_id; 109 void *buffers[8]; 110 u32 num_frames, i; 111 uint64_t wait_time, cur_ticks, start_ticks; 112 struct dpaa_port *portal = (struct dpaa_port *)port; 113 struct rte_mbuf *mbuf; 114 115 /* Affine current thread context to a qman portal */ 116 ret = rte_dpaa_portal_init((void *)0); 117 if (ret) { 118 DPAA_EVENTDEV_ERR("Unable to initialize portal"); 119 return ret; 120 } 121 122 if (unlikely(!portal->is_port_linked)) { 123 /* 124 * Affine event queue for current thread context 125 * to a qman portal. 126 */ 127 for (i = 0; i < portal->num_linked_evq; i++) { 128 ch_id = portal->evq_info[i].ch_id; 129 dpaa_eventq_portal_add(ch_id); 130 } 131 portal->is_port_linked = true; 132 } 133 134 /* Check if there are atomic contexts to be released */ 135 i = 0; 136 while (DPAA_PER_LCORE_DQRR_SIZE) { 137 if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) { 138 qman_dca_index(i, 0); 139 mbuf = DPAA_PER_LCORE_DQRR_MBUF(i); 140 mbuf->seqn = DPAA_INVALID_MBUF_SEQN; 141 DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i); 142 DPAA_PER_LCORE_DQRR_SIZE--; 143 } 144 i++; 145 } 146 DPAA_PER_LCORE_DQRR_HELD = 0; 147 148 if (portal->timeout == DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID) 149 wait_time = timeout_ticks; 150 else 151 wait_time = portal->timeout; 152 153 /* Lets dequeue the frames */ 154 start_ticks = rte_get_timer_cycles(); 155 wait_time += start_ticks; 156 do { 157 num_frames = qman_portal_dequeue(ev, nb_events, buffers); 158 if (num_frames != 0) 159 break; 160 cur_ticks = rte_get_timer_cycles(); 161 } while (cur_ticks < wait_time); 162 163 return num_frames; 164 } 165 166 static uint16_t 167 dpaa_event_dequeue(void *port, struct rte_event *ev, uint64_t timeout_ticks) 168 { 169 return dpaa_event_dequeue_burst(port, ev, 1, timeout_ticks); 170 } 171 172 static void 173 dpaa_event_dev_info_get(struct rte_eventdev *dev, 174 struct rte_event_dev_info *dev_info) 175 { 176 EVENTDEV_DRV_FUNC_TRACE(); 177 178 RTE_SET_USED(dev); 179 dev_info->driver_name = "event_dpaa"; 180 dev_info->min_dequeue_timeout_ns = 181 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT; 182 dev_info->max_dequeue_timeout_ns = 183 DPAA_EVENT_MAX_DEQUEUE_TIMEOUT; 184 dev_info->dequeue_timeout_ns = 185 DPAA_EVENT_MIN_DEQUEUE_TIMEOUT; 186 dev_info->max_event_queues = 187 DPAA_EVENT_MAX_QUEUES; 188 dev_info->max_event_queue_flows = 189 DPAA_EVENT_MAX_QUEUE_FLOWS; 190 dev_info->max_event_queue_priority_levels = 191 DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS; 192 dev_info->max_event_priority_levels = 193 DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS; 194 dev_info->max_event_ports = 195 DPAA_EVENT_MAX_EVENT_PORT; 196 dev_info->max_event_port_dequeue_depth = 197 DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH; 198 dev_info->max_event_port_enqueue_depth = 199 DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH; 200 /* 201 * TODO: Need to find out that how to fetch this info 202 * from kernel or somewhere else. 203 */ 204 dev_info->max_num_events = 205 DPAA_EVENT_MAX_NUM_EVENTS; 206 dev_info->event_dev_cap = 207 RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 208 RTE_EVENT_DEV_CAP_BURST_MODE | 209 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 210 RTE_EVENT_DEV_CAP_NONSEQ_MODE; 211 } 212 213 static int 214 dpaa_event_dev_configure(const struct rte_eventdev *dev) 215 { 216 struct dpaa_eventdev *priv = dev->data->dev_private; 217 struct rte_event_dev_config *conf = &dev->data->dev_conf; 218 int ret, i; 219 uint32_t *ch_id; 220 221 EVENTDEV_DRV_FUNC_TRACE(); 222 223 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; 224 priv->nb_events_limit = conf->nb_events_limit; 225 priv->nb_event_queues = conf->nb_event_queues; 226 priv->nb_event_ports = conf->nb_event_ports; 227 priv->nb_event_queue_flows = conf->nb_event_queue_flows; 228 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth; 229 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth; 230 priv->event_dev_cfg = conf->event_dev_cfg; 231 232 /* Check dequeue timeout method is per dequeue or global */ 233 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 234 /* 235 * Use timeout value as given in dequeue operation. 236 * So invalidating this timetout value. 237 */ 238 priv->dequeue_timeout_ns = 0; 239 } 240 241 ch_id = rte_malloc("dpaa-channels", 242 sizeof(uint32_t) * priv->nb_event_queues, 243 RTE_CACHE_LINE_SIZE); 244 if (ch_id == NULL) { 245 EVENTDEV_DRV_ERR("Fail to allocate memory for dpaa channels\n"); 246 return -ENOMEM; 247 } 248 /* Create requested event queues within the given event device */ 249 ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0); 250 if (ret < 0) { 251 EVENTDEV_DRV_ERR("Failed to create internal channel\n"); 252 rte_free(ch_id); 253 return ret; 254 } 255 for (i = 0; i < priv->nb_event_queues; i++) 256 priv->evq_info[i].ch_id = (u16)ch_id[i]; 257 258 /* Lets prepare event ports */ 259 memset(&priv->ports[0], 0, 260 sizeof(struct dpaa_port) * priv->nb_event_ports); 261 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 262 for (i = 0; i < priv->nb_event_ports; i++) { 263 priv->ports[i].timeout = 264 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_INVALID; 265 } 266 } else if (priv->dequeue_timeout_ns == 0) { 267 for (i = 0; i < priv->nb_event_ports; i++) { 268 dpaa_event_dequeue_timeout_ticks(NULL, 269 DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS, 270 &priv->ports[i].timeout); 271 } 272 } else { 273 for (i = 0; i < priv->nb_event_ports; i++) { 274 dpaa_event_dequeue_timeout_ticks(NULL, 275 priv->dequeue_timeout_ns, 276 &priv->ports[i].timeout); 277 } 278 } 279 /* 280 * TODO: Currently portals are affined with threads. Maximum threads 281 * can be created equals to number of lcore. 282 */ 283 rte_free(ch_id); 284 EVENTDEV_DRV_LOG("Configured eventdev devid=%d", dev->data->dev_id); 285 286 return 0; 287 } 288 289 static int 290 dpaa_event_dev_start(struct rte_eventdev *dev) 291 { 292 EVENTDEV_DRV_FUNC_TRACE(); 293 RTE_SET_USED(dev); 294 295 return 0; 296 } 297 298 static void 299 dpaa_event_dev_stop(struct rte_eventdev *dev) 300 { 301 EVENTDEV_DRV_FUNC_TRACE(); 302 RTE_SET_USED(dev); 303 } 304 305 static int 306 dpaa_event_dev_close(struct rte_eventdev *dev) 307 { 308 EVENTDEV_DRV_FUNC_TRACE(); 309 RTE_SET_USED(dev); 310 311 return 0; 312 } 313 314 static void 315 dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 316 struct rte_event_queue_conf *queue_conf) 317 { 318 EVENTDEV_DRV_FUNC_TRACE(); 319 320 RTE_SET_USED(dev); 321 RTE_SET_USED(queue_id); 322 323 memset(queue_conf, 0, sizeof(struct rte_event_queue_conf)); 324 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; 325 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST; 326 } 327 328 static int 329 dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 330 const struct rte_event_queue_conf *queue_conf) 331 { 332 struct dpaa_eventdev *priv = dev->data->dev_private; 333 struct dpaa_eventq *evq_info = &priv->evq_info[queue_id]; 334 335 EVENTDEV_DRV_FUNC_TRACE(); 336 337 switch (queue_conf->schedule_type) { 338 case RTE_SCHED_TYPE_PARALLEL: 339 case RTE_SCHED_TYPE_ATOMIC: 340 break; 341 case RTE_SCHED_TYPE_ORDERED: 342 EVENTDEV_DRV_ERR("Schedule type is not supported."); 343 return -1; 344 } 345 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; 346 evq_info->event_queue_id = queue_id; 347 348 return 0; 349 } 350 351 static void 352 dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 353 { 354 EVENTDEV_DRV_FUNC_TRACE(); 355 356 RTE_SET_USED(dev); 357 RTE_SET_USED(queue_id); 358 } 359 360 static void 361 dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id, 362 struct rte_event_port_conf *port_conf) 363 { 364 EVENTDEV_DRV_FUNC_TRACE(); 365 366 RTE_SET_USED(dev); 367 RTE_SET_USED(port_id); 368 369 port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS; 370 port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH; 371 port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH; 372 } 373 374 static int 375 dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id, 376 const struct rte_event_port_conf *port_conf) 377 { 378 struct dpaa_eventdev *eventdev = dev->data->dev_private; 379 380 EVENTDEV_DRV_FUNC_TRACE(); 381 382 RTE_SET_USED(port_conf); 383 dev->data->ports[port_id] = &eventdev->ports[port_id]; 384 385 return 0; 386 } 387 388 static void 389 dpaa_event_port_release(void *port) 390 { 391 EVENTDEV_DRV_FUNC_TRACE(); 392 393 RTE_SET_USED(port); 394 } 395 396 static int 397 dpaa_event_port_link(struct rte_eventdev *dev, void *port, 398 const uint8_t queues[], const uint8_t priorities[], 399 uint16_t nb_links) 400 { 401 struct dpaa_eventdev *priv = dev->data->dev_private; 402 struct dpaa_port *event_port = (struct dpaa_port *)port; 403 struct dpaa_eventq *event_queue; 404 uint8_t eventq_id; 405 int i; 406 407 RTE_SET_USED(dev); 408 RTE_SET_USED(priorities); 409 410 /* First check that input configuration are valid */ 411 for (i = 0; i < nb_links; i++) { 412 eventq_id = queues[i]; 413 event_queue = &priv->evq_info[eventq_id]; 414 if ((event_queue->event_queue_cfg 415 & RTE_EVENT_QUEUE_CFG_SINGLE_LINK) 416 && (event_queue->event_port)) { 417 return -EINVAL; 418 } 419 } 420 421 for (i = 0; i < nb_links; i++) { 422 eventq_id = queues[i]; 423 event_queue = &priv->evq_info[eventq_id]; 424 event_port->evq_info[i].event_queue_id = eventq_id; 425 event_port->evq_info[i].ch_id = event_queue->ch_id; 426 event_queue->event_port = port; 427 } 428 429 event_port->num_linked_evq = event_port->num_linked_evq + i; 430 431 return (int)i; 432 } 433 434 static int 435 dpaa_event_port_unlink(struct rte_eventdev *dev, void *port, 436 uint8_t queues[], uint16_t nb_links) 437 { 438 int i; 439 uint8_t eventq_id; 440 struct dpaa_eventq *event_queue; 441 struct dpaa_eventdev *priv = dev->data->dev_private; 442 struct dpaa_port *event_port = (struct dpaa_port *)port; 443 444 if (!event_port->num_linked_evq) 445 return nb_links; 446 447 for (i = 0; i < nb_links; i++) { 448 eventq_id = queues[i]; 449 event_port->evq_info[eventq_id].event_queue_id = -1; 450 event_port->evq_info[eventq_id].ch_id = 0; 451 event_queue = &priv->evq_info[eventq_id]; 452 event_queue->event_port = NULL; 453 } 454 455 event_port->num_linked_evq = event_port->num_linked_evq - i; 456 457 return (int)i; 458 } 459 460 static int 461 dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev, 462 const struct rte_eth_dev *eth_dev, 463 uint32_t *caps) 464 { 465 const char *ethdev_driver = eth_dev->device->driver->name; 466 467 EVENTDEV_DRV_FUNC_TRACE(); 468 469 RTE_SET_USED(dev); 470 471 if (!strcmp(ethdev_driver, "net_dpaa")) 472 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP; 473 else 474 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 475 476 return 0; 477 } 478 479 static int 480 dpaa_event_eth_rx_adapter_queue_add( 481 const struct rte_eventdev *dev, 482 const struct rte_eth_dev *eth_dev, 483 int32_t rx_queue_id, 484 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 485 { 486 struct dpaa_eventdev *eventdev = dev->data->dev_private; 487 uint8_t ev_qid = queue_conf->ev.queue_id; 488 u16 ch_id = eventdev->evq_info[ev_qid].ch_id; 489 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private; 490 int ret, i; 491 492 EVENTDEV_DRV_FUNC_TRACE(); 493 494 if (rx_queue_id == -1) { 495 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) { 496 ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id, 497 queue_conf); 498 if (ret) { 499 EVENTDEV_DRV_ERR( 500 "Event Queue attach failed:%d\n", ret); 501 goto detach_configured_queues; 502 } 503 } 504 return 0; 505 } 506 507 ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf); 508 if (ret) 509 EVENTDEV_DRV_ERR("dpaa_eth_eventq_attach failed:%d\n", ret); 510 return ret; 511 512 detach_configured_queues: 513 514 for (i = (i - 1); i >= 0 ; i--) 515 dpaa_eth_eventq_detach(eth_dev, i); 516 517 return ret; 518 } 519 520 static int 521 dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev, 522 const struct rte_eth_dev *eth_dev, 523 int32_t rx_queue_id) 524 { 525 int ret, i; 526 struct dpaa_if *dpaa_intf = eth_dev->data->dev_private; 527 528 EVENTDEV_DRV_FUNC_TRACE(); 529 530 RTE_SET_USED(dev); 531 if (rx_queue_id == -1) { 532 for (i = 0; i < dpaa_intf->nb_rx_queues; i++) { 533 ret = dpaa_eth_eventq_detach(eth_dev, i); 534 if (ret) 535 EVENTDEV_DRV_ERR( 536 "Event Queue detach failed:%d\n", ret); 537 } 538 539 return 0; 540 } 541 542 ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id); 543 if (ret) 544 EVENTDEV_DRV_ERR("dpaa_eth_eventq_detach failed:%d\n", ret); 545 return ret; 546 } 547 548 static int 549 dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev, 550 const struct rte_eth_dev *eth_dev) 551 { 552 EVENTDEV_DRV_FUNC_TRACE(); 553 554 RTE_SET_USED(dev); 555 RTE_SET_USED(eth_dev); 556 557 return 0; 558 } 559 560 static int 561 dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev, 562 const struct rte_eth_dev *eth_dev) 563 { 564 EVENTDEV_DRV_FUNC_TRACE(); 565 566 RTE_SET_USED(dev); 567 RTE_SET_USED(eth_dev); 568 569 return 0; 570 } 571 572 static const struct rte_eventdev_ops dpaa_eventdev_ops = { 573 .dev_infos_get = dpaa_event_dev_info_get, 574 .dev_configure = dpaa_event_dev_configure, 575 .dev_start = dpaa_event_dev_start, 576 .dev_stop = dpaa_event_dev_stop, 577 .dev_close = dpaa_event_dev_close, 578 .queue_def_conf = dpaa_event_queue_def_conf, 579 .queue_setup = dpaa_event_queue_setup, 580 .queue_release = dpaa_event_queue_release, 581 .port_def_conf = dpaa_event_port_default_conf_get, 582 .port_setup = dpaa_event_port_setup, 583 .port_release = dpaa_event_port_release, 584 .port_link = dpaa_event_port_link, 585 .port_unlink = dpaa_event_port_unlink, 586 .timeout_ticks = dpaa_event_dequeue_timeout_ticks, 587 .eth_rx_adapter_caps_get = dpaa_event_eth_rx_adapter_caps_get, 588 .eth_rx_adapter_queue_add = dpaa_event_eth_rx_adapter_queue_add, 589 .eth_rx_adapter_queue_del = dpaa_event_eth_rx_adapter_queue_del, 590 .eth_rx_adapter_start = dpaa_event_eth_rx_adapter_start, 591 .eth_rx_adapter_stop = dpaa_event_eth_rx_adapter_stop, 592 }; 593 594 static int 595 dpaa_event_dev_create(const char *name) 596 { 597 struct rte_eventdev *eventdev; 598 struct dpaa_eventdev *priv; 599 600 eventdev = rte_event_pmd_vdev_init(name, 601 sizeof(struct dpaa_eventdev), 602 rte_socket_id()); 603 if (eventdev == NULL) { 604 EVENTDEV_DRV_ERR("Failed to create eventdev vdev %s", name); 605 goto fail; 606 } 607 608 eventdev->dev_ops = &dpaa_eventdev_ops; 609 eventdev->enqueue = dpaa_event_enqueue; 610 eventdev->enqueue_burst = dpaa_event_enqueue_burst; 611 eventdev->dequeue = dpaa_event_dequeue; 612 eventdev->dequeue_burst = dpaa_event_dequeue_burst; 613 614 /* For secondary processes, the primary has done all the work */ 615 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 616 return 0; 617 618 priv = eventdev->data->dev_private; 619 priv->max_event_queues = DPAA_EVENT_MAX_QUEUES; 620 621 return 0; 622 fail: 623 return -EFAULT; 624 } 625 626 static int 627 dpaa_event_dev_probe(struct rte_vdev_device *vdev) 628 { 629 const char *name; 630 631 name = rte_vdev_device_name(vdev); 632 EVENTDEV_DRV_LOG("Initializing %s", name); 633 634 return dpaa_event_dev_create(name); 635 } 636 637 static int 638 dpaa_event_dev_remove(struct rte_vdev_device *vdev) 639 { 640 const char *name; 641 642 name = rte_vdev_device_name(vdev); 643 EVENTDEV_DRV_LOG("Closing %s", name); 644 645 return rte_event_pmd_vdev_uninit(name); 646 } 647 648 static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = { 649 .probe = dpaa_event_dev_probe, 650 .remove = dpaa_event_dev_remove 651 }; 652 653 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd); 654