1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017,2019-2022 NXP 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <sys/epoll.h> 12 13 #include <rte_atomic.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_debug.h> 17 #include <dev_driver.h> 18 #include <rte_eal.h> 19 #include <bus_fslmc_driver.h> 20 #include <rte_lcore.h> 21 #include <rte_log.h> 22 #include <rte_malloc.h> 23 #include <rte_memcpy.h> 24 #include <rte_memory.h> 25 #include <rte_pci.h> 26 #include <bus_vdev_driver.h> 27 #include <ethdev_driver.h> 28 #include <cryptodev_pmd.h> 29 #include <rte_event_crypto_adapter.h> 30 #include <rte_event_eth_rx_adapter.h> 31 #include <rte_event_eth_tx_adapter.h> 32 33 #include <fslmc_vfio.h> 34 #include <dpaa2_hw_pvt.h> 35 #include <dpaa2_hw_mempool.h> 36 #include <dpaa2_hw_dpio.h> 37 #include <dpaa2_ethdev.h> 38 #include <dpaa2_sec_event.h> 39 #include "dpaa2_eventdev.h" 40 #include "dpaa2_eventdev_logs.h" 41 #include <portal/dpaa2_hw_pvt.h> 42 #include <mc/fsl_dpci.h> 43 44 /* Clarifications 45 * Evendev = SoC Instance 46 * Eventport = DPIO Instance 47 * Eventqueue = DPCON Instance 48 * 1 Eventdev can have N Eventqueue 49 * Soft Event Flow is DPCI Instance 50 */ 51 52 #define DPAA2_EV_TX_RETRY_COUNT 10000 53 54 static uint16_t 55 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], 56 uint16_t nb_events) 57 { 58 59 struct dpaa2_port *dpaa2_portal = port; 60 struct dpaa2_dpio_dev *dpio_dev; 61 uint32_t queue_id = ev[0].queue_id; 62 struct dpaa2_eventq *evq_info; 63 uint32_t fqid, retry_count; 64 struct qbman_swp *swp; 65 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 66 uint32_t loop, frames_to_send; 67 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 68 uint16_t num_tx = 0; 69 int i, n, ret; 70 uint8_t channel_index; 71 72 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 73 /* Affine current thread context to a qman portal */ 74 ret = dpaa2_affine_qbman_swp(); 75 if (ret < 0) { 76 DPAA2_EVENTDEV_ERR( 77 "Failed to allocate IO portal, tid: %d", 78 rte_gettid()); 79 return 0; 80 } 81 } 82 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */ 83 dpio_dev = DPAA2_PER_LCORE_DPIO; 84 swp = DPAA2_PER_LCORE_PORTAL; 85 86 if (likely(dpaa2_portal->is_port_linked)) 87 goto skip_linking; 88 89 /* Create mapping between portal and channel to receive packets */ 90 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { 91 evq_info = &dpaa2_portal->evq_info[i]; 92 if (!evq_info->event_port) 93 continue; 94 95 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, 96 CMD_PRI_LOW, 97 dpio_dev->token, 98 evq_info->dpcon->dpcon_id, 99 &channel_index); 100 if (ret < 0) { 101 DPAA2_EVENTDEV_ERR( 102 "Static dequeue config failed: err(%d)", ret); 103 goto err; 104 } 105 106 qbman_swp_push_set(swp, channel_index, 1); 107 evq_info->dpcon->channel_index = channel_index; 108 } 109 dpaa2_portal->is_port_linked = true; 110 111 skip_linking: 112 evq_info = &dpaa2_portal->evq_info[queue_id]; 113 114 while (nb_events) { 115 frames_to_send = (nb_events > dpaa2_eqcr_size) ? 116 dpaa2_eqcr_size : nb_events; 117 118 for (loop = 0; loop < frames_to_send; loop++) { 119 const struct rte_event *event = &ev[num_tx + loop]; 120 121 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC) 122 fqid = evq_info->dpci->rx_queue[ 123 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid; 124 else 125 fqid = evq_info->dpci->rx_queue[ 126 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid; 127 128 /* Prepare enqueue descriptor */ 129 qbman_eq_desc_clear(&eqdesc[loop]); 130 qbman_eq_desc_set_fq(&eqdesc[loop], fqid); 131 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0); 132 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0); 133 134 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC 135 && *dpaa2_seqn(event->mbuf)) { 136 uint8_t dqrr_index = 137 *dpaa2_seqn(event->mbuf) - 1; 138 139 qbman_eq_desc_set_dca(&eqdesc[loop], 1, 140 dqrr_index, 0); 141 DPAA2_PER_LCORE_DQRR_SIZE--; 142 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 143 } 144 145 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 146 147 /* 148 * todo - need to align with hw context data 149 * to avoid copy 150 */ 151 struct rte_event *ev_temp = rte_malloc(NULL, 152 sizeof(struct rte_event), 0); 153 154 if (!ev_temp) { 155 if (!loop) 156 return num_tx; 157 frames_to_send = loop; 158 DPAA2_EVENTDEV_ERR( 159 "Unable to allocate event object"); 160 goto send_partial; 161 } 162 rte_memcpy(ev_temp, event, sizeof(struct rte_event)); 163 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp); 164 DPAA2_SET_FD_LEN((&fd_arr[loop]), 165 sizeof(struct rte_event)); 166 } 167 send_partial: 168 loop = 0; 169 retry_count = 0; 170 while (loop < frames_to_send) { 171 ret = qbman_swp_enqueue_multiple_desc(swp, 172 &eqdesc[loop], &fd_arr[loop], 173 frames_to_send - loop); 174 if (unlikely(ret < 0)) { 175 retry_count++; 176 if (retry_count > DPAA2_EV_TX_RETRY_COUNT) { 177 num_tx += loop; 178 nb_events -= loop; 179 return num_tx; 180 } 181 } else { 182 loop += ret; 183 retry_count = 0; 184 } 185 } 186 num_tx += loop; 187 nb_events -= loop; 188 } 189 190 return num_tx; 191 err: 192 for (n = 0; n < i; n++) { 193 evq_info = &dpaa2_portal->evq_info[n]; 194 if (!evq_info->event_port) 195 continue; 196 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); 197 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 198 dpio_dev->token, 199 evq_info->dpcon->dpcon_id); 200 } 201 return 0; 202 203 } 204 205 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks) 206 { 207 struct epoll_event epoll_ev; 208 209 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL, 210 QBMAN_SWP_INTERRUPT_DQRI); 211 212 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd, 213 &epoll_ev, 1, timeout_ticks); 214 } 215 216 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp, 217 const struct qbman_fd *fd, 218 const struct qbman_result *dq, 219 struct dpaa2_queue *rxq, 220 struct rte_event *ev) 221 { 222 struct rte_event *ev_temp = 223 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); 224 225 RTE_SET_USED(rxq); 226 227 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 228 rte_free(ev_temp); 229 230 qbman_swp_dqrr_consume(swp, dq); 231 } 232 233 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, 234 const struct qbman_fd *fd, 235 const struct qbman_result *dq, 236 struct dpaa2_queue *rxq, 237 struct rte_event *ev) 238 { 239 struct rte_event *ev_temp = 240 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); 241 uint8_t dqrr_index = qbman_get_dqrr_idx(dq); 242 243 RTE_SET_USED(swp); 244 RTE_SET_USED(rxq); 245 246 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 247 rte_free(ev_temp); 248 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; 249 DPAA2_PER_LCORE_DQRR_SIZE++; 250 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 251 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 252 } 253 254 static uint16_t 255 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], 256 uint16_t nb_events, uint64_t timeout_ticks) 257 { 258 const struct qbman_result *dq; 259 struct dpaa2_dpio_dev *dpio_dev = NULL; 260 struct dpaa2_port *dpaa2_portal = port; 261 struct dpaa2_eventq *evq_info; 262 struct qbman_swp *swp; 263 const struct qbman_fd *fd; 264 struct dpaa2_queue *rxq; 265 int num_pkts = 0, ret, i = 0, n; 266 uint8_t channel_index; 267 268 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 269 /* Affine current thread context to a qman portal */ 270 ret = dpaa2_affine_qbman_swp(); 271 if (ret < 0) { 272 DPAA2_EVENTDEV_ERR( 273 "Failed to allocate IO portal, tid: %d", 274 rte_gettid()); 275 return 0; 276 } 277 } 278 279 dpio_dev = DPAA2_PER_LCORE_DPIO; 280 swp = DPAA2_PER_LCORE_PORTAL; 281 282 if (likely(dpaa2_portal->is_port_linked)) 283 goto skip_linking; 284 285 /* Create mapping between portal and channel to receive packets */ 286 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { 287 evq_info = &dpaa2_portal->evq_info[i]; 288 if (!evq_info->event_port) 289 continue; 290 291 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, 292 CMD_PRI_LOW, 293 dpio_dev->token, 294 evq_info->dpcon->dpcon_id, 295 &channel_index); 296 if (ret < 0) { 297 DPAA2_EVENTDEV_ERR( 298 "Static dequeue config failed: err(%d)", ret); 299 goto err; 300 } 301 302 qbman_swp_push_set(swp, channel_index, 1); 303 evq_info->dpcon->channel_index = channel_index; 304 } 305 dpaa2_portal->is_port_linked = true; 306 307 skip_linking: 308 /* Check if there are atomic contexts to be released */ 309 while (DPAA2_PER_LCORE_DQRR_SIZE) { 310 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) { 311 qbman_swp_dqrr_idx_consume(swp, i); 312 DPAA2_PER_LCORE_DQRR_SIZE--; 313 *dpaa2_seqn(DPAA2_PER_LCORE_DQRR_MBUF(i)) = 314 DPAA2_INVALID_MBUF_SEQN; 315 } 316 i++; 317 } 318 DPAA2_PER_LCORE_DQRR_HELD = 0; 319 320 do { 321 dq = qbman_swp_dqrr_next(swp); 322 if (!dq) { 323 if (!num_pkts && timeout_ticks) { 324 dpaa2_eventdev_dequeue_wait(timeout_ticks); 325 timeout_ticks = 0; 326 continue; 327 } 328 return num_pkts; 329 } 330 qbman_swp_prefetch_dqrr_next(swp); 331 332 fd = qbman_result_DQ_fd(dq); 333 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq); 334 if (rxq) { 335 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]); 336 } else { 337 qbman_swp_dqrr_consume(swp, dq); 338 DPAA2_EVENTDEV_ERR("Null Return VQ received"); 339 return 0; 340 } 341 342 num_pkts++; 343 } while (num_pkts < nb_events); 344 345 return num_pkts; 346 err: 347 for (n = 0; n < i; n++) { 348 evq_info = &dpaa2_portal->evq_info[n]; 349 if (!evq_info->event_port) 350 continue; 351 352 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); 353 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 354 dpio_dev->token, 355 evq_info->dpcon->dpcon_id); 356 } 357 return 0; 358 } 359 360 static void 361 dpaa2_eventdev_info_get(struct rte_eventdev *dev, 362 struct rte_event_dev_info *dev_info) 363 { 364 struct dpaa2_eventdev *priv = dev->data->dev_private; 365 366 EVENTDEV_INIT_FUNC_TRACE(); 367 368 RTE_SET_USED(dev); 369 370 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 371 dev_info->min_dequeue_timeout_ns = 372 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT; 373 dev_info->max_dequeue_timeout_ns = 374 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT; 375 dev_info->dequeue_timeout_ns = 376 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 377 dev_info->max_event_queues = priv->max_event_queues; 378 dev_info->max_event_queue_flows = 379 DPAA2_EVENT_MAX_QUEUE_FLOWS; 380 dev_info->max_event_queue_priority_levels = 381 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS; 382 dev_info->max_event_priority_levels = 383 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS; 384 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO); 385 /* we only support dpio up to number of cores */ 386 if (dev_info->max_event_ports > rte_lcore_count()) 387 dev_info->max_event_ports = rte_lcore_count(); 388 dev_info->max_event_port_dequeue_depth = 389 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 390 dev_info->max_event_port_enqueue_depth = 391 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 392 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS; 393 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 394 RTE_EVENT_DEV_CAP_ATOMIC | 395 RTE_EVENT_DEV_CAP_PARALLEL | 396 RTE_EVENT_DEV_CAP_BURST_MODE| 397 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 398 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 399 RTE_EVENT_DEV_CAP_NONSEQ_MODE | 400 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | 401 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | 402 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; 403 dev_info->max_profiles_per_port = 1; 404 } 405 406 static int 407 dpaa2_eventdev_configure(const struct rte_eventdev *dev) 408 { 409 struct dpaa2_eventdev *priv = dev->data->dev_private; 410 struct rte_event_dev_config *conf = &dev->data->dev_conf; 411 412 EVENTDEV_INIT_FUNC_TRACE(); 413 414 priv->nb_event_queues = conf->nb_event_queues; 415 priv->nb_event_ports = conf->nb_event_ports; 416 priv->nb_event_queue_flows = conf->nb_event_queue_flows; 417 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth; 418 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth; 419 priv->event_dev_cfg = conf->event_dev_cfg; 420 421 /* Check dequeue timeout method is per dequeue or global */ 422 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 423 /* 424 * Use timeout value as given in dequeue operation. 425 * So invalidating this timeout value. 426 */ 427 priv->dequeue_timeout_ns = 0; 428 429 } else if (conf->dequeue_timeout_ns == 0) { 430 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 431 } else { 432 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; 433 } 434 435 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d", 436 dev->data->dev_id); 437 return 0; 438 } 439 440 static int 441 dpaa2_eventdev_start(struct rte_eventdev *dev) 442 { 443 EVENTDEV_INIT_FUNC_TRACE(); 444 445 RTE_SET_USED(dev); 446 447 return 0; 448 } 449 450 static void 451 dpaa2_eventdev_stop(struct rte_eventdev *dev) 452 { 453 EVENTDEV_INIT_FUNC_TRACE(); 454 455 RTE_SET_USED(dev); 456 } 457 458 static int 459 dpaa2_eventdev_close(struct rte_eventdev *dev) 460 { 461 EVENTDEV_INIT_FUNC_TRACE(); 462 463 RTE_SET_USED(dev); 464 465 return 0; 466 } 467 468 static void 469 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 470 struct rte_event_queue_conf *queue_conf) 471 { 472 EVENTDEV_INIT_FUNC_TRACE(); 473 474 RTE_SET_USED(dev); 475 RTE_SET_USED(queue_id); 476 477 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS; 478 queue_conf->nb_atomic_order_sequences = 479 DPAA2_EVENT_QUEUE_ORDER_SEQUENCES; 480 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; 481 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 482 } 483 484 static int 485 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 486 const struct rte_event_queue_conf *queue_conf) 487 { 488 struct dpaa2_eventdev *priv = dev->data->dev_private; 489 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id]; 490 491 EVENTDEV_INIT_FUNC_TRACE(); 492 493 switch (queue_conf->schedule_type) { 494 case RTE_SCHED_TYPE_PARALLEL: 495 case RTE_SCHED_TYPE_ATOMIC: 496 case RTE_SCHED_TYPE_ORDERED: 497 break; 498 default: 499 DPAA2_EVENTDEV_ERR("Schedule type is not supported."); 500 return -1; 501 } 502 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; 503 evq_info->event_queue_id = queue_id; 504 505 return 0; 506 } 507 508 static void 509 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 510 { 511 EVENTDEV_INIT_FUNC_TRACE(); 512 513 RTE_SET_USED(dev); 514 RTE_SET_USED(queue_id); 515 } 516 517 static void 518 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 519 struct rte_event_port_conf *port_conf) 520 { 521 EVENTDEV_INIT_FUNC_TRACE(); 522 523 RTE_SET_USED(dev); 524 RTE_SET_USED(port_id); 525 526 port_conf->new_event_threshold = 527 DPAA2_EVENT_MAX_NUM_EVENTS; 528 port_conf->dequeue_depth = 529 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 530 port_conf->enqueue_depth = 531 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 532 port_conf->event_port_cfg = 0; 533 } 534 535 static int 536 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, 537 const struct rte_event_port_conf *port_conf) 538 { 539 char event_port_name[32]; 540 struct dpaa2_port *portal; 541 542 EVENTDEV_INIT_FUNC_TRACE(); 543 544 RTE_SET_USED(port_conf); 545 546 sprintf(event_port_name, "event-port-%d", port_id); 547 portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0); 548 if (!portal) { 549 DPAA2_EVENTDEV_ERR("Memory allocation failure"); 550 return -ENOMEM; 551 } 552 553 memset(portal, 0, sizeof(struct dpaa2_port)); 554 dev->data->ports[port_id] = portal; 555 return 0; 556 } 557 558 static void 559 dpaa2_eventdev_port_release(void *port) 560 { 561 struct dpaa2_port *portal = port; 562 563 EVENTDEV_INIT_FUNC_TRACE(); 564 565 if (portal == NULL) 566 return; 567 568 /* TODO: Cleanup is required when ports are in linked state. */ 569 if (portal->is_port_linked) 570 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release"); 571 572 rte_free(portal); 573 } 574 575 static int 576 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port, 577 const uint8_t queues[], const uint8_t priorities[], 578 uint16_t nb_links) 579 { 580 struct dpaa2_eventdev *priv = dev->data->dev_private; 581 struct dpaa2_port *dpaa2_portal = port; 582 struct dpaa2_eventq *evq_info; 583 uint16_t i; 584 585 EVENTDEV_INIT_FUNC_TRACE(); 586 587 RTE_SET_USED(priorities); 588 589 for (i = 0; i < nb_links; i++) { 590 evq_info = &priv->evq_info[queues[i]]; 591 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info, 592 sizeof(struct dpaa2_eventq)); 593 dpaa2_portal->evq_info[queues[i]].event_port = port; 594 dpaa2_portal->num_linked_evq++; 595 } 596 597 return (int)nb_links; 598 } 599 600 static int 601 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port, 602 uint8_t queues[], uint16_t nb_unlinks) 603 { 604 struct dpaa2_port *dpaa2_portal = port; 605 int i; 606 struct dpaa2_dpio_dev *dpio_dev = NULL; 607 struct dpaa2_eventq *evq_info; 608 struct qbman_swp *swp; 609 610 EVENTDEV_INIT_FUNC_TRACE(); 611 612 RTE_SET_USED(dev); 613 RTE_SET_USED(queues); 614 615 for (i = 0; i < nb_unlinks; i++) { 616 evq_info = &dpaa2_portal->evq_info[queues[i]]; 617 618 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) { 619 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/ 620 dpio_dev = DPAA2_PER_LCORE_DPIO; 621 swp = DPAA2_PER_LCORE_PORTAL; 622 623 qbman_swp_push_set(swp, 624 evq_info->dpcon->channel_index, 0); 625 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 626 dpio_dev->token, 627 evq_info->dpcon->dpcon_id); 628 } 629 memset(evq_info, 0, sizeof(struct dpaa2_eventq)); 630 if (dpaa2_portal->num_linked_evq) 631 dpaa2_portal->num_linked_evq--; 632 } 633 634 if (!dpaa2_portal->num_linked_evq) 635 dpaa2_portal->is_port_linked = false; 636 637 return (int)nb_unlinks; 638 } 639 640 641 static int 642 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 643 uint64_t *timeout_ticks) 644 { 645 uint32_t scale = 1000*1000; 646 647 EVENTDEV_INIT_FUNC_TRACE(); 648 649 RTE_SET_USED(dev); 650 *timeout_ticks = ns / scale; 651 652 return 0; 653 } 654 655 static void 656 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f) 657 { 658 EVENTDEV_INIT_FUNC_TRACE(); 659 660 RTE_SET_USED(dev); 661 RTE_SET_USED(f); 662 } 663 664 static int 665 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev, 666 const struct rte_eth_dev *eth_dev, 667 uint32_t *caps) 668 { 669 const char *ethdev_driver = eth_dev->device->driver->name; 670 671 EVENTDEV_INIT_FUNC_TRACE(); 672 673 RTE_SET_USED(dev); 674 675 if (!strcmp(ethdev_driver, "net_dpaa2")) 676 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP; 677 else 678 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 679 680 return 0; 681 } 682 683 static int 684 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev, 685 const struct rte_eth_dev *eth_dev, 686 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 687 { 688 struct dpaa2_eventdev *priv = dev->data->dev_private; 689 uint8_t ev_qid = queue_conf->ev.queue_id; 690 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 691 int i, ret; 692 693 EVENTDEV_INIT_FUNC_TRACE(); 694 695 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 696 ret = dpaa2_eth_eventq_attach(eth_dev, i, 697 dpcon, queue_conf); 698 if (ret) { 699 DPAA2_EVENTDEV_ERR( 700 "Event queue attach failed: err(%d)", ret); 701 goto fail; 702 } 703 } 704 return 0; 705 fail: 706 for (i = (i - 1); i >= 0 ; i--) 707 dpaa2_eth_eventq_detach(eth_dev, i); 708 709 return ret; 710 } 711 712 static int 713 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev, 714 const struct rte_eth_dev *eth_dev, 715 int32_t rx_queue_id, 716 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 717 { 718 struct dpaa2_eventdev *priv = dev->data->dev_private; 719 uint8_t ev_qid = queue_conf->ev.queue_id; 720 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 721 int ret; 722 723 EVENTDEV_INIT_FUNC_TRACE(); 724 725 if (rx_queue_id == -1) 726 return dpaa2_eventdev_eth_queue_add_all(dev, 727 eth_dev, queue_conf); 728 729 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id, 730 dpcon, queue_conf); 731 if (ret) { 732 DPAA2_EVENTDEV_ERR( 733 "Event queue attach failed: err(%d)", ret); 734 return ret; 735 } 736 return 0; 737 } 738 739 static int 740 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev, 741 const struct rte_eth_dev *eth_dev) 742 { 743 int i, ret; 744 745 EVENTDEV_INIT_FUNC_TRACE(); 746 747 RTE_SET_USED(dev); 748 749 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 750 ret = dpaa2_eth_eventq_detach(eth_dev, i); 751 if (ret) { 752 DPAA2_EVENTDEV_ERR( 753 "Event queue detach failed: err(%d)", ret); 754 return ret; 755 } 756 } 757 758 return 0; 759 } 760 761 static int 762 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev, 763 const struct rte_eth_dev *eth_dev, 764 int32_t rx_queue_id) 765 { 766 int ret; 767 768 EVENTDEV_INIT_FUNC_TRACE(); 769 770 if (rx_queue_id == -1) 771 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev); 772 773 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id); 774 if (ret) { 775 DPAA2_EVENTDEV_ERR( 776 "Event queue detach failed: err(%d)", ret); 777 return ret; 778 } 779 780 return 0; 781 } 782 783 static int 784 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev, 785 const struct rte_eth_dev *eth_dev) 786 { 787 EVENTDEV_INIT_FUNC_TRACE(); 788 789 RTE_SET_USED(dev); 790 RTE_SET_USED(eth_dev); 791 792 return 0; 793 } 794 795 static int 796 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev, 797 const struct rte_eth_dev *eth_dev) 798 { 799 EVENTDEV_INIT_FUNC_TRACE(); 800 801 RTE_SET_USED(dev); 802 RTE_SET_USED(eth_dev); 803 804 return 0; 805 } 806 807 static int 808 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev, 809 const struct rte_cryptodev *cdev, 810 uint32_t *caps) 811 { 812 const char *name = cdev->data->name; 813 814 EVENTDEV_INIT_FUNC_TRACE(); 815 816 RTE_SET_USED(dev); 817 818 if (!strncmp(name, "dpsec-", 6)) 819 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP; 820 else 821 return -1; 822 823 return 0; 824 } 825 826 static int 827 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, 828 const struct rte_cryptodev *cryptodev, 829 const struct rte_event *ev) 830 { 831 struct dpaa2_eventdev *priv = dev->data->dev_private; 832 uint8_t ev_qid = ev->queue_id; 833 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 834 int i, ret; 835 836 EVENTDEV_INIT_FUNC_TRACE(); 837 838 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { 839 ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev); 840 if (ret) { 841 DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d", 842 ret); 843 goto fail; 844 } 845 } 846 return 0; 847 fail: 848 for (i = (i - 1); i >= 0 ; i--) 849 dpaa2_sec_eventq_detach(cryptodev, i); 850 851 return ret; 852 } 853 854 static int 855 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev, 856 const struct rte_cryptodev *cryptodev, 857 int32_t rx_queue_id, 858 const struct rte_event_crypto_adapter_queue_conf *conf) 859 { 860 struct dpaa2_eventdev *priv = dev->data->dev_private; 861 uint8_t ev_qid = conf->ev.queue_id; 862 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 863 int ret; 864 865 EVENTDEV_INIT_FUNC_TRACE(); 866 867 if (rx_queue_id == -1) 868 return dpaa2_eventdev_crypto_queue_add_all(dev, 869 cryptodev, &conf->ev); 870 871 ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id, 872 dpcon, &conf->ev); 873 if (ret) { 874 DPAA2_EVENTDEV_ERR( 875 "dpaa2_sec_eventq_attach failed: ret: %d", ret); 876 return ret; 877 } 878 return 0; 879 } 880 881 static int 882 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev, 883 const struct rte_cryptodev *cdev) 884 { 885 int i, ret; 886 887 EVENTDEV_INIT_FUNC_TRACE(); 888 889 RTE_SET_USED(dev); 890 891 for (i = 0; i < cdev->data->nb_queue_pairs; i++) { 892 ret = dpaa2_sec_eventq_detach(cdev, i); 893 if (ret) { 894 DPAA2_EVENTDEV_ERR( 895 "dpaa2_sec_eventq_detach failed:ret %d", ret); 896 return ret; 897 } 898 } 899 900 return 0; 901 } 902 903 static int 904 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev, 905 const struct rte_cryptodev *cryptodev, 906 int32_t rx_queue_id) 907 { 908 int ret; 909 910 EVENTDEV_INIT_FUNC_TRACE(); 911 912 if (rx_queue_id == -1) 913 return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev); 914 915 ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id); 916 if (ret) { 917 DPAA2_EVENTDEV_ERR( 918 "dpaa2_sec_eventq_detach failed: ret: %d", ret); 919 return ret; 920 } 921 922 return 0; 923 } 924 925 static int 926 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev, 927 const struct rte_cryptodev *cryptodev) 928 { 929 EVENTDEV_INIT_FUNC_TRACE(); 930 931 RTE_SET_USED(dev); 932 RTE_SET_USED(cryptodev); 933 934 return 0; 935 } 936 937 static int 938 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev, 939 const struct rte_cryptodev *cryptodev) 940 { 941 EVENTDEV_INIT_FUNC_TRACE(); 942 943 RTE_SET_USED(dev); 944 RTE_SET_USED(cryptodev); 945 946 return 0; 947 } 948 949 static int 950 dpaa2_eventdev_tx_adapter_create(uint8_t id, 951 const struct rte_eventdev *dev) 952 { 953 RTE_SET_USED(id); 954 RTE_SET_USED(dev); 955 956 /* Nothing to do. Simply return. */ 957 return 0; 958 } 959 960 static int 961 dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev, 962 const struct rte_eth_dev *eth_dev, 963 uint32_t *caps) 964 { 965 RTE_SET_USED(dev); 966 RTE_SET_USED(eth_dev); 967 968 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 969 return 0; 970 } 971 972 static uint16_t 973 dpaa2_eventdev_txa_enqueue_same_dest(void *port, 974 struct rte_event ev[], 975 uint16_t nb_events) 976 { 977 struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0; 978 uint8_t qid, i; 979 980 RTE_SET_USED(port); 981 982 m0 = (struct rte_mbuf *)ev[0].mbuf; 983 qid = rte_event_eth_tx_adapter_txq_get(m0); 984 985 for (i = 0; i < nb_events; i++) 986 m[i] = (struct rte_mbuf *)ev[i].mbuf; 987 988 return rte_eth_tx_burst(m0->port, qid, m, nb_events); 989 } 990 991 static uint16_t 992 dpaa2_eventdev_txa_enqueue(void *port, 993 struct rte_event ev[], 994 uint16_t nb_events) 995 { 996 void *txq[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH]; 997 struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH]; 998 uint8_t qid, i; 999 1000 RTE_SET_USED(port); 1001 1002 for (i = 0; i < nb_events; i++) { 1003 m[i] = (struct rte_mbuf *)ev[i].mbuf; 1004 qid = rte_event_eth_tx_adapter_txq_get(m[i]); 1005 txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid]; 1006 } 1007 1008 return dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events); 1009 } 1010 1011 static struct eventdev_ops dpaa2_eventdev_ops = { 1012 .dev_infos_get = dpaa2_eventdev_info_get, 1013 .dev_configure = dpaa2_eventdev_configure, 1014 .dev_start = dpaa2_eventdev_start, 1015 .dev_stop = dpaa2_eventdev_stop, 1016 .dev_close = dpaa2_eventdev_close, 1017 .queue_def_conf = dpaa2_eventdev_queue_def_conf, 1018 .queue_setup = dpaa2_eventdev_queue_setup, 1019 .queue_release = dpaa2_eventdev_queue_release, 1020 .port_def_conf = dpaa2_eventdev_port_def_conf, 1021 .port_setup = dpaa2_eventdev_port_setup, 1022 .port_release = dpaa2_eventdev_port_release, 1023 .port_link = dpaa2_eventdev_port_link, 1024 .port_unlink = dpaa2_eventdev_port_unlink, 1025 .timeout_ticks = dpaa2_eventdev_timeout_ticks, 1026 .dump = dpaa2_eventdev_dump, 1027 .dev_selftest = test_eventdev_dpaa2, 1028 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get, 1029 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add, 1030 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del, 1031 .eth_rx_adapter_start = dpaa2_eventdev_eth_start, 1032 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop, 1033 .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps, 1034 .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create, 1035 .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get, 1036 .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add, 1037 .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del, 1038 .crypto_adapter_start = dpaa2_eventdev_crypto_start, 1039 .crypto_adapter_stop = dpaa2_eventdev_crypto_stop, 1040 }; 1041 1042 static int 1043 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev, 1044 struct dpaa2_dpcon_dev *dpcon_dev) 1045 { 1046 struct dpci_rx_queue_cfg rx_queue_cfg; 1047 int ret, i; 1048 1049 /*Do settings to get the frame on a DPCON object*/ 1050 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST | 1051 DPCI_QUEUE_OPT_USER_CTX; 1052 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON; 1053 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id; 1054 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO; 1055 1056 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb = 1057 dpaa2_eventdev_process_parallel; 1058 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb = 1059 dpaa2_eventdev_process_atomic; 1060 1061 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) { 1062 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]); 1063 ret = dpci_set_rx_queue(&dpci_dev->dpci, 1064 CMD_PRI_LOW, 1065 dpci_dev->token, i, 1066 &rx_queue_cfg); 1067 if (ret) { 1068 DPAA2_EVENTDEV_ERR( 1069 "DPCI Rx queue setup failed: err(%d)", 1070 ret); 1071 return ret; 1072 } 1073 } 1074 return 0; 1075 } 1076 1077 static int 1078 dpaa2_eventdev_create(const char *name, struct rte_vdev_device *vdev) 1079 { 1080 struct rte_eventdev *eventdev; 1081 struct dpaa2_eventdev *priv; 1082 struct dpaa2_dpcon_dev *dpcon_dev = NULL; 1083 struct dpaa2_dpci_dev *dpci_dev = NULL; 1084 int ret; 1085 1086 eventdev = rte_event_pmd_vdev_init(name, 1087 sizeof(struct dpaa2_eventdev), 1088 rte_socket_id(), vdev); 1089 if (eventdev == NULL) { 1090 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name); 1091 goto fail; 1092 } 1093 1094 eventdev->dev_ops = &dpaa2_eventdev_ops; 1095 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst; 1096 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst; 1097 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst; 1098 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst; 1099 eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue; 1100 eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest; 1101 1102 /* For secondary processes, the primary has done all the work */ 1103 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1104 goto done; 1105 1106 priv = eventdev->data->dev_private; 1107 priv->max_event_queues = 0; 1108 1109 do { 1110 dpcon_dev = rte_dpaa2_alloc_dpcon_dev(); 1111 if (!dpcon_dev) 1112 break; 1113 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev; 1114 1115 dpci_dev = rte_dpaa2_alloc_dpci_dev(); 1116 if (!dpci_dev) { 1117 rte_dpaa2_free_dpcon_dev(dpcon_dev); 1118 break; 1119 } 1120 priv->evq_info[priv->max_event_queues].dpci = dpci_dev; 1121 1122 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev); 1123 if (ret) { 1124 DPAA2_EVENTDEV_ERR( 1125 "DPCI setup failed: err(%d)", ret); 1126 return ret; 1127 } 1128 priv->max_event_queues++; 1129 } while (dpcon_dev && dpci_dev); 1130 1131 DPAA2_EVENTDEV_INFO("%s eventdev created", name); 1132 1133 done: 1134 event_dev_probing_finish(eventdev); 1135 return 0; 1136 fail: 1137 return -EFAULT; 1138 } 1139 1140 static int 1141 dpaa2_eventdev_destroy(const char *name) 1142 { 1143 struct rte_eventdev *eventdev; 1144 struct dpaa2_eventdev *priv; 1145 int i; 1146 1147 eventdev = rte_event_pmd_get_named_dev(name); 1148 if (eventdev == NULL) { 1149 DPAA2_EVENTDEV_ERR("eventdev with name %s not allocated", name); 1150 return -1; 1151 } 1152 1153 /* For secondary processes, the primary has done all the work */ 1154 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1155 return 0; 1156 1157 priv = eventdev->data->dev_private; 1158 for (i = 0; i < priv->max_event_queues; i++) { 1159 if (priv->evq_info[i].dpcon) 1160 rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon); 1161 1162 if (priv->evq_info[i].dpci) 1163 rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci); 1164 1165 } 1166 priv->max_event_queues = 0; 1167 1168 DPAA2_EVENTDEV_INFO("%s eventdev cleaned", name); 1169 return 0; 1170 } 1171 1172 1173 static int 1174 dpaa2_eventdev_probe(struct rte_vdev_device *vdev) 1175 { 1176 const char *name; 1177 1178 name = rte_vdev_device_name(vdev); 1179 DPAA2_EVENTDEV_INFO("Initializing %s", name); 1180 return dpaa2_eventdev_create(name, vdev); 1181 } 1182 1183 static int 1184 dpaa2_eventdev_remove(struct rte_vdev_device *vdev) 1185 { 1186 const char *name; 1187 1188 name = rte_vdev_device_name(vdev); 1189 DPAA2_EVENTDEV_INFO("Closing %s", name); 1190 1191 dpaa2_eventdev_destroy(name); 1192 1193 return rte_event_pmd_vdev_uninit(name); 1194 } 1195 1196 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = { 1197 .probe = dpaa2_eventdev_probe, 1198 .remove = dpaa2_eventdev_remove 1199 }; 1200 1201 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd); 1202 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_event, NOTICE); 1203