1 /* SPDX-License-Identifier: BSD-3-Clause 2 * 3 * Copyright 2017 NXP 4 * 5 */ 6 7 #include <assert.h> 8 #include <stdio.h> 9 #include <stdbool.h> 10 #include <errno.h> 11 #include <stdint.h> 12 #include <string.h> 13 #include <sys/epoll.h> 14 15 #include <rte_atomic.h> 16 #include <rte_byteorder.h> 17 #include <rte_common.h> 18 #include <rte_debug.h> 19 #include <rte_dev.h> 20 #include <rte_eal.h> 21 #include <rte_fslmc.h> 22 #include <rte_lcore.h> 23 #include <rte_log.h> 24 #include <rte_malloc.h> 25 #include <rte_memcpy.h> 26 #include <rte_memory.h> 27 #include <rte_pci.h> 28 #include <rte_bus_vdev.h> 29 #include <rte_ethdev_driver.h> 30 #include <rte_cryptodev.h> 31 #include <rte_event_eth_rx_adapter.h> 32 33 #include <fslmc_vfio.h> 34 #include <dpaa2_hw_pvt.h> 35 #include <dpaa2_hw_mempool.h> 36 #include <dpaa2_hw_dpio.h> 37 #include <dpaa2_ethdev.h> 38 #ifdef RTE_LIBRTE_SECURITY 39 #include <dpaa2_sec_event.h> 40 #endif 41 #include "dpaa2_eventdev.h" 42 #include "dpaa2_eventdev_logs.h" 43 #include <portal/dpaa2_hw_pvt.h> 44 #include <mc/fsl_dpci.h> 45 46 /* Clarifications 47 * Evendev = SoC Instance 48 * Eventport = DPIO Instance 49 * Eventqueue = DPCON Instance 50 * 1 Eventdev can have N Eventqueue 51 * Soft Event Flow is DPCI Instance 52 */ 53 54 /* Dynamic logging identified for mempool */ 55 int dpaa2_logtype_event; 56 57 static uint16_t 58 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], 59 uint16_t nb_events) 60 { 61 62 struct dpaa2_port *dpaa2_portal = port; 63 struct dpaa2_dpio_dev *dpio_dev; 64 uint32_t queue_id = ev[0].queue_id; 65 struct dpaa2_eventq *evq_info; 66 uint32_t fqid; 67 struct qbman_swp *swp; 68 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 69 uint32_t loop, frames_to_send; 70 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 71 uint16_t num_tx = 0; 72 int i, n, ret; 73 uint8_t channel_index; 74 75 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 76 /* Affine current thread context to a qman portal */ 77 ret = dpaa2_affine_qbman_swp(); 78 if (ret < 0) { 79 DPAA2_EVENTDEV_ERR("Failure in affining portal"); 80 return 0; 81 } 82 } 83 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */ 84 dpio_dev = DPAA2_PER_LCORE_DPIO; 85 swp = DPAA2_PER_LCORE_PORTAL; 86 87 if (likely(dpaa2_portal->is_port_linked)) 88 goto skip_linking; 89 90 /* Create mapping between portal and channel to receive packets */ 91 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { 92 evq_info = &dpaa2_portal->evq_info[i]; 93 if (!evq_info->event_port) 94 continue; 95 96 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, 97 CMD_PRI_LOW, 98 dpio_dev->token, 99 evq_info->dpcon->dpcon_id, 100 &channel_index); 101 if (ret < 0) { 102 DPAA2_EVENTDEV_ERR( 103 "Static dequeue config failed: err(%d)", ret); 104 goto err; 105 } 106 107 qbman_swp_push_set(swp, channel_index, 1); 108 evq_info->dpcon->channel_index = channel_index; 109 } 110 dpaa2_portal->is_port_linked = true; 111 112 skip_linking: 113 evq_info = &dpaa2_portal->evq_info[queue_id]; 114 115 while (nb_events) { 116 frames_to_send = (nb_events > dpaa2_eqcr_size) ? 117 dpaa2_eqcr_size : nb_events; 118 119 for (loop = 0; loop < frames_to_send; loop++) { 120 const struct rte_event *event = &ev[num_tx + loop]; 121 122 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC) 123 fqid = evq_info->dpci->rx_queue[ 124 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid; 125 else 126 fqid = evq_info->dpci->rx_queue[ 127 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid; 128 129 /* Prepare enqueue descriptor */ 130 qbman_eq_desc_clear(&eqdesc[loop]); 131 qbman_eq_desc_set_fq(&eqdesc[loop], fqid); 132 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0); 133 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0); 134 135 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC 136 && event->mbuf->seqn) { 137 uint8_t dqrr_index = event->mbuf->seqn - 1; 138 139 qbman_eq_desc_set_dca(&eqdesc[loop], 1, 140 dqrr_index, 0); 141 DPAA2_PER_LCORE_DQRR_SIZE--; 142 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 143 } 144 145 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 146 147 /* 148 * todo - need to align with hw context data 149 * to avoid copy 150 */ 151 struct rte_event *ev_temp = rte_malloc(NULL, 152 sizeof(struct rte_event), 0); 153 154 if (!ev_temp) { 155 if (!loop) 156 return num_tx; 157 frames_to_send = loop; 158 DPAA2_EVENTDEV_ERR( 159 "Unable to allocate event object"); 160 goto send_partial; 161 } 162 rte_memcpy(ev_temp, event, sizeof(struct rte_event)); 163 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp); 164 DPAA2_SET_FD_LEN((&fd_arr[loop]), 165 sizeof(struct rte_event)); 166 } 167 send_partial: 168 loop = 0; 169 while (loop < frames_to_send) { 170 loop += qbman_swp_enqueue_multiple_desc(swp, 171 &eqdesc[loop], &fd_arr[loop], 172 frames_to_send - loop); 173 } 174 num_tx += frames_to_send; 175 nb_events -= frames_to_send; 176 } 177 178 return num_tx; 179 err: 180 for (n = 0; n < i; n++) { 181 evq_info = &dpaa2_portal->evq_info[n]; 182 if (!evq_info->event_port) 183 continue; 184 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); 185 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 186 dpio_dev->token, 187 evq_info->dpcon->dpcon_id); 188 } 189 return 0; 190 191 } 192 193 static uint16_t 194 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev) 195 { 196 return dpaa2_eventdev_enqueue_burst(port, ev, 1); 197 } 198 199 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks) 200 { 201 struct epoll_event epoll_ev; 202 203 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL, 204 QBMAN_SWP_INTERRUPT_DQRI); 205 206 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd, 207 &epoll_ev, 1, timeout_ticks); 208 } 209 210 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp, 211 const struct qbman_fd *fd, 212 const struct qbman_result *dq, 213 struct dpaa2_queue *rxq, 214 struct rte_event *ev) 215 { 216 struct rte_event *ev_temp = 217 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); 218 219 RTE_SET_USED(rxq); 220 221 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 222 rte_free(ev_temp); 223 224 qbman_swp_dqrr_consume(swp, dq); 225 } 226 227 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, 228 const struct qbman_fd *fd, 229 const struct qbman_result *dq, 230 struct dpaa2_queue *rxq, 231 struct rte_event *ev) 232 { 233 struct rte_event *ev_temp = 234 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); 235 uint8_t dqrr_index = qbman_get_dqrr_idx(dq); 236 237 RTE_SET_USED(swp); 238 RTE_SET_USED(rxq); 239 240 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 241 rte_free(ev_temp); 242 ev->mbuf->seqn = dqrr_index + 1; 243 DPAA2_PER_LCORE_DQRR_SIZE++; 244 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 245 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 246 } 247 248 static uint16_t 249 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], 250 uint16_t nb_events, uint64_t timeout_ticks) 251 { 252 const struct qbman_result *dq; 253 struct dpaa2_dpio_dev *dpio_dev = NULL; 254 struct dpaa2_port *dpaa2_portal = port; 255 struct dpaa2_eventq *evq_info; 256 struct qbman_swp *swp; 257 const struct qbman_fd *fd; 258 struct dpaa2_queue *rxq; 259 int num_pkts = 0, ret, i = 0, n; 260 uint8_t channel_index; 261 262 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 263 /* Affine current thread context to a qman portal */ 264 ret = dpaa2_affine_qbman_swp(); 265 if (ret < 0) { 266 DPAA2_EVENTDEV_ERR("Failure in affining portal"); 267 return 0; 268 } 269 } 270 271 dpio_dev = DPAA2_PER_LCORE_DPIO; 272 swp = DPAA2_PER_LCORE_PORTAL; 273 274 if (likely(dpaa2_portal->is_port_linked)) 275 goto skip_linking; 276 277 /* Create mapping between portal and channel to receive packets */ 278 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { 279 evq_info = &dpaa2_portal->evq_info[i]; 280 if (!evq_info->event_port) 281 continue; 282 283 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, 284 CMD_PRI_LOW, 285 dpio_dev->token, 286 evq_info->dpcon->dpcon_id, 287 &channel_index); 288 if (ret < 0) { 289 DPAA2_EVENTDEV_ERR( 290 "Static dequeue config failed: err(%d)", ret); 291 goto err; 292 } 293 294 qbman_swp_push_set(swp, channel_index, 1); 295 evq_info->dpcon->channel_index = channel_index; 296 } 297 dpaa2_portal->is_port_linked = true; 298 299 skip_linking: 300 /* Check if there are atomic contexts to be released */ 301 while (DPAA2_PER_LCORE_DQRR_SIZE) { 302 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) { 303 qbman_swp_dqrr_idx_consume(swp, i); 304 DPAA2_PER_LCORE_DQRR_SIZE--; 305 DPAA2_PER_LCORE_DQRR_MBUF(i)->seqn = 306 DPAA2_INVALID_MBUF_SEQN; 307 } 308 i++; 309 } 310 DPAA2_PER_LCORE_DQRR_HELD = 0; 311 312 do { 313 dq = qbman_swp_dqrr_next(swp); 314 if (!dq) { 315 if (!num_pkts && timeout_ticks) { 316 dpaa2_eventdev_dequeue_wait(timeout_ticks); 317 timeout_ticks = 0; 318 continue; 319 } 320 return num_pkts; 321 } 322 qbman_swp_prefetch_dqrr_next(swp); 323 324 fd = qbman_result_DQ_fd(dq); 325 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq); 326 if (rxq) { 327 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]); 328 } else { 329 qbman_swp_dqrr_consume(swp, dq); 330 DPAA2_EVENTDEV_ERR("Null Return VQ received"); 331 return 0; 332 } 333 334 num_pkts++; 335 } while (num_pkts < nb_events); 336 337 return num_pkts; 338 err: 339 for (n = 0; n < i; n++) { 340 evq_info = &dpaa2_portal->evq_info[n]; 341 if (!evq_info->event_port) 342 continue; 343 344 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); 345 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 346 dpio_dev->token, 347 evq_info->dpcon->dpcon_id); 348 } 349 return 0; 350 } 351 352 static uint16_t 353 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev, 354 uint64_t timeout_ticks) 355 { 356 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks); 357 } 358 359 static void 360 dpaa2_eventdev_info_get(struct rte_eventdev *dev, 361 struct rte_event_dev_info *dev_info) 362 { 363 struct dpaa2_eventdev *priv = dev->data->dev_private; 364 365 EVENTDEV_INIT_FUNC_TRACE(); 366 367 RTE_SET_USED(dev); 368 369 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 370 dev_info->min_dequeue_timeout_ns = 371 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT; 372 dev_info->max_dequeue_timeout_ns = 373 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT; 374 dev_info->dequeue_timeout_ns = 375 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 376 dev_info->max_event_queues = priv->max_event_queues; 377 dev_info->max_event_queue_flows = 378 DPAA2_EVENT_MAX_QUEUE_FLOWS; 379 dev_info->max_event_queue_priority_levels = 380 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS; 381 dev_info->max_event_priority_levels = 382 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS; 383 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO); 384 /* we only support dpio upto number of cores*/ 385 if (dev_info->max_event_ports > rte_lcore_count()) 386 dev_info->max_event_ports = rte_lcore_count(); 387 dev_info->max_event_port_dequeue_depth = 388 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 389 dev_info->max_event_port_enqueue_depth = 390 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 391 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS; 392 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 393 RTE_EVENT_DEV_CAP_BURST_MODE| 394 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 395 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 396 RTE_EVENT_DEV_CAP_NONSEQ_MODE; 397 398 } 399 400 static int 401 dpaa2_eventdev_configure(const struct rte_eventdev *dev) 402 { 403 struct dpaa2_eventdev *priv = dev->data->dev_private; 404 struct rte_event_dev_config *conf = &dev->data->dev_conf; 405 406 EVENTDEV_INIT_FUNC_TRACE(); 407 408 priv->nb_event_queues = conf->nb_event_queues; 409 priv->nb_event_ports = conf->nb_event_ports; 410 priv->nb_event_queue_flows = conf->nb_event_queue_flows; 411 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth; 412 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth; 413 priv->event_dev_cfg = conf->event_dev_cfg; 414 415 /* Check dequeue timeout method is per dequeue or global */ 416 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 417 /* 418 * Use timeout value as given in dequeue operation. 419 * So invalidating this timeout value. 420 */ 421 priv->dequeue_timeout_ns = 0; 422 423 } else if (conf->dequeue_timeout_ns == 0) { 424 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 425 } else { 426 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; 427 } 428 429 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d", 430 dev->data->dev_id); 431 return 0; 432 } 433 434 static int 435 dpaa2_eventdev_start(struct rte_eventdev *dev) 436 { 437 EVENTDEV_INIT_FUNC_TRACE(); 438 439 RTE_SET_USED(dev); 440 441 return 0; 442 } 443 444 static void 445 dpaa2_eventdev_stop(struct rte_eventdev *dev) 446 { 447 EVENTDEV_INIT_FUNC_TRACE(); 448 449 RTE_SET_USED(dev); 450 } 451 452 static int 453 dpaa2_eventdev_close(struct rte_eventdev *dev) 454 { 455 EVENTDEV_INIT_FUNC_TRACE(); 456 457 RTE_SET_USED(dev); 458 459 return 0; 460 } 461 462 static void 463 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 464 struct rte_event_queue_conf *queue_conf) 465 { 466 EVENTDEV_INIT_FUNC_TRACE(); 467 468 RTE_SET_USED(dev); 469 RTE_SET_USED(queue_id); 470 RTE_SET_USED(queue_conf); 471 472 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS; 473 queue_conf->schedule_type = RTE_SCHED_TYPE_ATOMIC | 474 RTE_SCHED_TYPE_PARALLEL; 475 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 476 } 477 478 static int 479 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 480 const struct rte_event_queue_conf *queue_conf) 481 { 482 struct dpaa2_eventdev *priv = dev->data->dev_private; 483 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id]; 484 485 EVENTDEV_INIT_FUNC_TRACE(); 486 487 switch (queue_conf->schedule_type) { 488 case RTE_SCHED_TYPE_PARALLEL: 489 case RTE_SCHED_TYPE_ATOMIC: 490 break; 491 case RTE_SCHED_TYPE_ORDERED: 492 DPAA2_EVENTDEV_ERR("Schedule type is not supported."); 493 return -1; 494 } 495 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; 496 evq_info->event_queue_id = queue_id; 497 498 return 0; 499 } 500 501 static void 502 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 503 { 504 EVENTDEV_INIT_FUNC_TRACE(); 505 506 RTE_SET_USED(dev); 507 RTE_SET_USED(queue_id); 508 } 509 510 static void 511 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 512 struct rte_event_port_conf *port_conf) 513 { 514 EVENTDEV_INIT_FUNC_TRACE(); 515 516 RTE_SET_USED(dev); 517 RTE_SET_USED(port_id); 518 519 port_conf->new_event_threshold = 520 DPAA2_EVENT_MAX_NUM_EVENTS; 521 port_conf->dequeue_depth = 522 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 523 port_conf->enqueue_depth = 524 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 525 port_conf->disable_implicit_release = 0; 526 } 527 528 static int 529 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, 530 const struct rte_event_port_conf *port_conf) 531 { 532 char event_port_name[32]; 533 struct dpaa2_port *portal; 534 535 EVENTDEV_INIT_FUNC_TRACE(); 536 537 RTE_SET_USED(port_conf); 538 539 sprintf(event_port_name, "event-port-%d", port_id); 540 portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0); 541 if (!portal) { 542 DPAA2_EVENTDEV_ERR("Memory allocation failure"); 543 return -ENOMEM; 544 } 545 546 memset(portal, 0, sizeof(struct dpaa2_port)); 547 dev->data->ports[port_id] = portal; 548 return 0; 549 } 550 551 static void 552 dpaa2_eventdev_port_release(void *port) 553 { 554 struct dpaa2_port *portal = port; 555 556 EVENTDEV_INIT_FUNC_TRACE(); 557 558 /* TODO: Cleanup is required when ports are in linked state. */ 559 if (portal->is_port_linked) 560 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release"); 561 562 if (portal) 563 rte_free(portal); 564 565 portal = NULL; 566 } 567 568 static int 569 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port, 570 const uint8_t queues[], const uint8_t priorities[], 571 uint16_t nb_links) 572 { 573 struct dpaa2_eventdev *priv = dev->data->dev_private; 574 struct dpaa2_port *dpaa2_portal = port; 575 struct dpaa2_eventq *evq_info; 576 uint16_t i; 577 578 EVENTDEV_INIT_FUNC_TRACE(); 579 580 RTE_SET_USED(priorities); 581 582 for (i = 0; i < nb_links; i++) { 583 evq_info = &priv->evq_info[queues[i]]; 584 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info, 585 sizeof(struct dpaa2_eventq)); 586 dpaa2_portal->evq_info[queues[i]].event_port = port; 587 dpaa2_portal->num_linked_evq++; 588 } 589 590 return (int)nb_links; 591 } 592 593 static int 594 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port, 595 uint8_t queues[], uint16_t nb_unlinks) 596 { 597 struct dpaa2_port *dpaa2_portal = port; 598 int i; 599 struct dpaa2_dpio_dev *dpio_dev = NULL; 600 struct dpaa2_eventq *evq_info; 601 struct qbman_swp *swp; 602 603 EVENTDEV_INIT_FUNC_TRACE(); 604 605 RTE_SET_USED(dev); 606 RTE_SET_USED(queues); 607 608 for (i = 0; i < nb_unlinks; i++) { 609 evq_info = &dpaa2_portal->evq_info[queues[i]]; 610 611 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) { 612 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/ 613 dpio_dev = DPAA2_PER_LCORE_DPIO; 614 swp = DPAA2_PER_LCORE_PORTAL; 615 616 qbman_swp_push_set(swp, 617 evq_info->dpcon->channel_index, 0); 618 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 619 dpio_dev->token, 620 evq_info->dpcon->dpcon_id); 621 } 622 memset(evq_info, 0, sizeof(struct dpaa2_eventq)); 623 if (dpaa2_portal->num_linked_evq) 624 dpaa2_portal->num_linked_evq--; 625 } 626 627 if (!dpaa2_portal->num_linked_evq) 628 dpaa2_portal->is_port_linked = false; 629 630 return (int)nb_unlinks; 631 } 632 633 634 static int 635 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 636 uint64_t *timeout_ticks) 637 { 638 uint32_t scale = 1000*1000; 639 640 EVENTDEV_INIT_FUNC_TRACE(); 641 642 RTE_SET_USED(dev); 643 *timeout_ticks = ns / scale; 644 645 return 0; 646 } 647 648 static void 649 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f) 650 { 651 EVENTDEV_INIT_FUNC_TRACE(); 652 653 RTE_SET_USED(dev); 654 RTE_SET_USED(f); 655 } 656 657 static int 658 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev, 659 const struct rte_eth_dev *eth_dev, 660 uint32_t *caps) 661 { 662 const char *ethdev_driver = eth_dev->device->driver->name; 663 664 EVENTDEV_INIT_FUNC_TRACE(); 665 666 RTE_SET_USED(dev); 667 668 if (!strcmp(ethdev_driver, "net_dpaa2")) 669 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP; 670 else 671 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 672 673 return 0; 674 } 675 676 static int 677 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev, 678 const struct rte_eth_dev *eth_dev, 679 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 680 { 681 struct dpaa2_eventdev *priv = dev->data->dev_private; 682 uint8_t ev_qid = queue_conf->ev.queue_id; 683 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; 684 int i, ret; 685 686 EVENTDEV_INIT_FUNC_TRACE(); 687 688 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 689 ret = dpaa2_eth_eventq_attach(eth_dev, i, 690 dpcon_id, queue_conf); 691 if (ret) { 692 DPAA2_EVENTDEV_ERR( 693 "Event queue attach failed: err(%d)", ret); 694 goto fail; 695 } 696 } 697 return 0; 698 fail: 699 for (i = (i - 1); i >= 0 ; i--) 700 dpaa2_eth_eventq_detach(eth_dev, i); 701 702 return ret; 703 } 704 705 static int 706 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev, 707 const struct rte_eth_dev *eth_dev, 708 int32_t rx_queue_id, 709 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 710 { 711 struct dpaa2_eventdev *priv = dev->data->dev_private; 712 uint8_t ev_qid = queue_conf->ev.queue_id; 713 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; 714 int ret; 715 716 EVENTDEV_INIT_FUNC_TRACE(); 717 718 if (rx_queue_id == -1) 719 return dpaa2_eventdev_eth_queue_add_all(dev, 720 eth_dev, queue_conf); 721 722 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id, 723 dpcon_id, queue_conf); 724 if (ret) { 725 DPAA2_EVENTDEV_ERR( 726 "Event queue attach failed: err(%d)", ret); 727 return ret; 728 } 729 return 0; 730 } 731 732 static int 733 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev, 734 const struct rte_eth_dev *eth_dev) 735 { 736 int i, ret; 737 738 EVENTDEV_INIT_FUNC_TRACE(); 739 740 RTE_SET_USED(dev); 741 742 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 743 ret = dpaa2_eth_eventq_detach(eth_dev, i); 744 if (ret) { 745 DPAA2_EVENTDEV_ERR( 746 "Event queue detach failed: err(%d)", ret); 747 return ret; 748 } 749 } 750 751 return 0; 752 } 753 754 static int 755 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev, 756 const struct rte_eth_dev *eth_dev, 757 int32_t rx_queue_id) 758 { 759 int ret; 760 761 EVENTDEV_INIT_FUNC_TRACE(); 762 763 if (rx_queue_id == -1) 764 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev); 765 766 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id); 767 if (ret) { 768 DPAA2_EVENTDEV_ERR( 769 "Event queue detach failed: err(%d)", ret); 770 return ret; 771 } 772 773 return 0; 774 } 775 776 static int 777 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev, 778 const struct rte_eth_dev *eth_dev) 779 { 780 EVENTDEV_INIT_FUNC_TRACE(); 781 782 RTE_SET_USED(dev); 783 RTE_SET_USED(eth_dev); 784 785 return 0; 786 } 787 788 static int 789 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev, 790 const struct rte_eth_dev *eth_dev) 791 { 792 EVENTDEV_INIT_FUNC_TRACE(); 793 794 RTE_SET_USED(dev); 795 RTE_SET_USED(eth_dev); 796 797 return 0; 798 } 799 800 #ifdef RTE_LIBRTE_SECURITY 801 static int 802 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev, 803 const struct rte_cryptodev *cdev, 804 uint32_t *caps) 805 { 806 const char *name = cdev->data->name; 807 808 EVENTDEV_INIT_FUNC_TRACE(); 809 810 RTE_SET_USED(dev); 811 812 if (!strncmp(name, "dpsec-", 6)) 813 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP; 814 else 815 return -1; 816 817 return 0; 818 } 819 820 static int 821 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, 822 const struct rte_cryptodev *cryptodev, 823 const struct rte_event *ev) 824 { 825 struct dpaa2_eventdev *priv = dev->data->dev_private; 826 uint8_t ev_qid = ev->queue_id; 827 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; 828 int i, ret; 829 830 EVENTDEV_INIT_FUNC_TRACE(); 831 832 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { 833 ret = dpaa2_sec_eventq_attach(cryptodev, i, 834 dpcon_id, ev); 835 if (ret) { 836 DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n", 837 ret); 838 goto fail; 839 } 840 } 841 return 0; 842 fail: 843 for (i = (i - 1); i >= 0 ; i--) 844 dpaa2_sec_eventq_detach(cryptodev, i); 845 846 return ret; 847 } 848 849 static int 850 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev, 851 const struct rte_cryptodev *cryptodev, 852 int32_t rx_queue_id, 853 const struct rte_event *ev) 854 { 855 struct dpaa2_eventdev *priv = dev->data->dev_private; 856 uint8_t ev_qid = ev->queue_id; 857 uint16_t dpcon_id = priv->evq_info[ev_qid].dpcon->dpcon_id; 858 int ret; 859 860 EVENTDEV_INIT_FUNC_TRACE(); 861 862 if (rx_queue_id == -1) 863 return dpaa2_eventdev_crypto_queue_add_all(dev, 864 cryptodev, ev); 865 866 ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id, 867 dpcon_id, ev); 868 if (ret) { 869 DPAA2_EVENTDEV_ERR( 870 "dpaa2_sec_eventq_attach failed: ret: %d\n", ret); 871 return ret; 872 } 873 return 0; 874 } 875 876 static int 877 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev, 878 const struct rte_cryptodev *cdev) 879 { 880 int i, ret; 881 882 EVENTDEV_INIT_FUNC_TRACE(); 883 884 RTE_SET_USED(dev); 885 886 for (i = 0; i < cdev->data->nb_queue_pairs; i++) { 887 ret = dpaa2_sec_eventq_detach(cdev, i); 888 if (ret) { 889 DPAA2_EVENTDEV_ERR( 890 "dpaa2_sec_eventq_detach failed:ret %d\n", ret); 891 return ret; 892 } 893 } 894 895 return 0; 896 } 897 898 static int 899 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev, 900 const struct rte_cryptodev *cryptodev, 901 int32_t rx_queue_id) 902 { 903 int ret; 904 905 EVENTDEV_INIT_FUNC_TRACE(); 906 907 if (rx_queue_id == -1) 908 return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev); 909 910 ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id); 911 if (ret) { 912 DPAA2_EVENTDEV_ERR( 913 "dpaa2_sec_eventq_detach failed: ret: %d\n", ret); 914 return ret; 915 } 916 917 return 0; 918 } 919 920 static int 921 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev, 922 const struct rte_cryptodev *cryptodev) 923 { 924 EVENTDEV_INIT_FUNC_TRACE(); 925 926 RTE_SET_USED(dev); 927 RTE_SET_USED(cryptodev); 928 929 return 0; 930 } 931 932 static int 933 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev, 934 const struct rte_cryptodev *cryptodev) 935 { 936 EVENTDEV_INIT_FUNC_TRACE(); 937 938 RTE_SET_USED(dev); 939 RTE_SET_USED(cryptodev); 940 941 return 0; 942 } 943 #endif 944 945 static struct rte_eventdev_ops dpaa2_eventdev_ops = { 946 .dev_infos_get = dpaa2_eventdev_info_get, 947 .dev_configure = dpaa2_eventdev_configure, 948 .dev_start = dpaa2_eventdev_start, 949 .dev_stop = dpaa2_eventdev_stop, 950 .dev_close = dpaa2_eventdev_close, 951 .queue_def_conf = dpaa2_eventdev_queue_def_conf, 952 .queue_setup = dpaa2_eventdev_queue_setup, 953 .queue_release = dpaa2_eventdev_queue_release, 954 .port_def_conf = dpaa2_eventdev_port_def_conf, 955 .port_setup = dpaa2_eventdev_port_setup, 956 .port_release = dpaa2_eventdev_port_release, 957 .port_link = dpaa2_eventdev_port_link, 958 .port_unlink = dpaa2_eventdev_port_unlink, 959 .timeout_ticks = dpaa2_eventdev_timeout_ticks, 960 .dump = dpaa2_eventdev_dump, 961 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get, 962 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add, 963 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del, 964 .eth_rx_adapter_start = dpaa2_eventdev_eth_start, 965 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop, 966 #ifdef RTE_LIBRTE_SECURITY 967 .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get, 968 .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add, 969 .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del, 970 .crypto_adapter_start = dpaa2_eventdev_crypto_start, 971 .crypto_adapter_stop = dpaa2_eventdev_crypto_stop, 972 #endif 973 }; 974 975 static int 976 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev, 977 struct dpaa2_dpcon_dev *dpcon_dev) 978 { 979 struct dpci_rx_queue_cfg rx_queue_cfg; 980 int ret, i; 981 982 /*Do settings to get the frame on a DPCON object*/ 983 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST | 984 DPCI_QUEUE_OPT_USER_CTX; 985 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON; 986 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id; 987 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO; 988 989 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb = 990 dpaa2_eventdev_process_parallel; 991 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb = 992 dpaa2_eventdev_process_atomic; 993 994 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) { 995 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]); 996 ret = dpci_set_rx_queue(&dpci_dev->dpci, 997 CMD_PRI_LOW, 998 dpci_dev->token, i, 999 &rx_queue_cfg); 1000 if (ret) { 1001 DPAA2_EVENTDEV_ERR( 1002 "DPCI Rx queue setup failed: err(%d)", 1003 ret); 1004 return ret; 1005 } 1006 } 1007 return 0; 1008 } 1009 1010 static int 1011 dpaa2_eventdev_create(const char *name) 1012 { 1013 struct rte_eventdev *eventdev; 1014 struct dpaa2_eventdev *priv; 1015 struct dpaa2_dpcon_dev *dpcon_dev = NULL; 1016 struct dpaa2_dpci_dev *dpci_dev = NULL; 1017 int ret; 1018 1019 eventdev = rte_event_pmd_vdev_init(name, 1020 sizeof(struct dpaa2_eventdev), 1021 rte_socket_id()); 1022 if (eventdev == NULL) { 1023 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name); 1024 goto fail; 1025 } 1026 1027 eventdev->dev_ops = &dpaa2_eventdev_ops; 1028 eventdev->enqueue = dpaa2_eventdev_enqueue; 1029 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst; 1030 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst; 1031 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst; 1032 eventdev->dequeue = dpaa2_eventdev_dequeue; 1033 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst; 1034 1035 /* For secondary processes, the primary has done all the work */ 1036 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1037 return 0; 1038 1039 priv = eventdev->data->dev_private; 1040 priv->max_event_queues = 0; 1041 1042 do { 1043 dpcon_dev = rte_dpaa2_alloc_dpcon_dev(); 1044 if (!dpcon_dev) 1045 break; 1046 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev; 1047 1048 dpci_dev = rte_dpaa2_alloc_dpci_dev(); 1049 if (!dpci_dev) { 1050 rte_dpaa2_free_dpcon_dev(dpcon_dev); 1051 break; 1052 } 1053 priv->evq_info[priv->max_event_queues].dpci = dpci_dev; 1054 1055 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev); 1056 if (ret) { 1057 DPAA2_EVENTDEV_ERR( 1058 "DPCI setup failed: err(%d)", ret); 1059 return ret; 1060 } 1061 priv->max_event_queues++; 1062 } while (dpcon_dev && dpci_dev); 1063 1064 RTE_LOG(INFO, PMD, "%s eventdev created\n", name); 1065 1066 return 0; 1067 fail: 1068 return -EFAULT; 1069 } 1070 1071 static int 1072 dpaa2_eventdev_probe(struct rte_vdev_device *vdev) 1073 { 1074 const char *name; 1075 1076 name = rte_vdev_device_name(vdev); 1077 DPAA2_EVENTDEV_INFO("Initializing %s", name); 1078 return dpaa2_eventdev_create(name); 1079 } 1080 1081 static int 1082 dpaa2_eventdev_remove(struct rte_vdev_device *vdev) 1083 { 1084 const char *name; 1085 1086 name = rte_vdev_device_name(vdev); 1087 DPAA2_EVENTDEV_INFO("Closing %s", name); 1088 1089 return rte_event_pmd_vdev_uninit(name); 1090 } 1091 1092 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = { 1093 .probe = dpaa2_eventdev_probe, 1094 .remove = dpaa2_eventdev_remove 1095 }; 1096 1097 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd); 1098 1099 RTE_INIT(dpaa2_eventdev_init_log) 1100 { 1101 dpaa2_logtype_event = rte_log_register("pmd.event.dpaa2"); 1102 if (dpaa2_logtype_event >= 0) 1103 rte_log_set_level(dpaa2_logtype_event, RTE_LOG_NOTICE); 1104 } 1105