1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017,2019-2022 NXP 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <sys/epoll.h> 12 13 #include <rte_atomic.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_debug.h> 17 #include <dev_driver.h> 18 #include <rte_eal.h> 19 #include <bus_fslmc_driver.h> 20 #include <rte_lcore.h> 21 #include <rte_log.h> 22 #include <rte_malloc.h> 23 #include <rte_memcpy.h> 24 #include <rte_memory.h> 25 #include <rte_pci.h> 26 #include <bus_vdev_driver.h> 27 #include <ethdev_driver.h> 28 #include <cryptodev_pmd.h> 29 #include <rte_event_crypto_adapter.h> 30 #include <rte_event_eth_rx_adapter.h> 31 #include <rte_event_eth_tx_adapter.h> 32 33 #include <fslmc_vfio.h> 34 #include <dpaa2_hw_pvt.h> 35 #include <dpaa2_hw_mempool.h> 36 #include <dpaa2_hw_dpio.h> 37 #include <dpaa2_ethdev.h> 38 #include <dpaa2_sec_event.h> 39 #include "dpaa2_eventdev.h" 40 #include "dpaa2_eventdev_logs.h" 41 #include <portal/dpaa2_hw_pvt.h> 42 #include <mc/fsl_dpci.h> 43 44 /* Clarifications 45 * Evendev = SoC Instance 46 * Eventport = DPIO Instance 47 * Eventqueue = DPCON Instance 48 * 1 Eventdev can have N Eventqueue 49 * Soft Event Flow is DPCI Instance 50 */ 51 52 #define DPAA2_EV_TX_RETRY_COUNT 10000 53 54 static uint16_t 55 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], 56 uint16_t nb_events) 57 { 58 59 struct dpaa2_port *dpaa2_portal = port; 60 struct dpaa2_dpio_dev *dpio_dev; 61 uint32_t queue_id = ev[0].queue_id; 62 struct dpaa2_eventq *evq_info; 63 uint32_t fqid, retry_count; 64 struct qbman_swp *swp; 65 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 66 uint32_t loop, frames_to_send; 67 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 68 uint16_t num_tx = 0; 69 int i, n, ret; 70 uint8_t channel_index; 71 72 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 73 /* Affine current thread context to a qman portal */ 74 ret = dpaa2_affine_qbman_swp(); 75 if (ret < 0) { 76 DPAA2_EVENTDEV_ERR( 77 "Failed to allocate IO portal, tid: %d\n", 78 rte_gettid()); 79 return 0; 80 } 81 } 82 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */ 83 dpio_dev = DPAA2_PER_LCORE_DPIO; 84 swp = DPAA2_PER_LCORE_PORTAL; 85 86 if (likely(dpaa2_portal->is_port_linked)) 87 goto skip_linking; 88 89 /* Create mapping between portal and channel to receive packets */ 90 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { 91 evq_info = &dpaa2_portal->evq_info[i]; 92 if (!evq_info->event_port) 93 continue; 94 95 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, 96 CMD_PRI_LOW, 97 dpio_dev->token, 98 evq_info->dpcon->dpcon_id, 99 &channel_index); 100 if (ret < 0) { 101 DPAA2_EVENTDEV_ERR( 102 "Static dequeue config failed: err(%d)", ret); 103 goto err; 104 } 105 106 qbman_swp_push_set(swp, channel_index, 1); 107 evq_info->dpcon->channel_index = channel_index; 108 } 109 dpaa2_portal->is_port_linked = true; 110 111 skip_linking: 112 evq_info = &dpaa2_portal->evq_info[queue_id]; 113 114 while (nb_events) { 115 frames_to_send = (nb_events > dpaa2_eqcr_size) ? 116 dpaa2_eqcr_size : nb_events; 117 118 for (loop = 0; loop < frames_to_send; loop++) { 119 const struct rte_event *event = &ev[num_tx + loop]; 120 121 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC) 122 fqid = evq_info->dpci->rx_queue[ 123 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid; 124 else 125 fqid = evq_info->dpci->rx_queue[ 126 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid; 127 128 /* Prepare enqueue descriptor */ 129 qbman_eq_desc_clear(&eqdesc[loop]); 130 qbman_eq_desc_set_fq(&eqdesc[loop], fqid); 131 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0); 132 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0); 133 134 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC 135 && *dpaa2_seqn(event->mbuf)) { 136 uint8_t dqrr_index = 137 *dpaa2_seqn(event->mbuf) - 1; 138 139 qbman_eq_desc_set_dca(&eqdesc[loop], 1, 140 dqrr_index, 0); 141 DPAA2_PER_LCORE_DQRR_SIZE--; 142 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 143 } 144 145 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 146 147 /* 148 * todo - need to align with hw context data 149 * to avoid copy 150 */ 151 struct rte_event *ev_temp = rte_malloc(NULL, 152 sizeof(struct rte_event), 0); 153 154 if (!ev_temp) { 155 if (!loop) 156 return num_tx; 157 frames_to_send = loop; 158 DPAA2_EVENTDEV_ERR( 159 "Unable to allocate event object"); 160 goto send_partial; 161 } 162 rte_memcpy(ev_temp, event, sizeof(struct rte_event)); 163 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp); 164 DPAA2_SET_FD_LEN((&fd_arr[loop]), 165 sizeof(struct rte_event)); 166 } 167 send_partial: 168 loop = 0; 169 retry_count = 0; 170 while (loop < frames_to_send) { 171 ret = qbman_swp_enqueue_multiple_desc(swp, 172 &eqdesc[loop], &fd_arr[loop], 173 frames_to_send - loop); 174 if (unlikely(ret < 0)) { 175 retry_count++; 176 if (retry_count > DPAA2_EV_TX_RETRY_COUNT) { 177 num_tx += loop; 178 nb_events -= loop; 179 return num_tx; 180 } 181 } else { 182 loop += ret; 183 retry_count = 0; 184 } 185 } 186 num_tx += loop; 187 nb_events -= loop; 188 } 189 190 return num_tx; 191 err: 192 for (n = 0; n < i; n++) { 193 evq_info = &dpaa2_portal->evq_info[n]; 194 if (!evq_info->event_port) 195 continue; 196 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); 197 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 198 dpio_dev->token, 199 evq_info->dpcon->dpcon_id); 200 } 201 return 0; 202 203 } 204 205 static uint16_t 206 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev) 207 { 208 return dpaa2_eventdev_enqueue_burst(port, ev, 1); 209 } 210 211 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks) 212 { 213 struct epoll_event epoll_ev; 214 215 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL, 216 QBMAN_SWP_INTERRUPT_DQRI); 217 218 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd, 219 &epoll_ev, 1, timeout_ticks); 220 } 221 222 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp, 223 const struct qbman_fd *fd, 224 const struct qbman_result *dq, 225 struct dpaa2_queue *rxq, 226 struct rte_event *ev) 227 { 228 struct rte_event *ev_temp = 229 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); 230 231 RTE_SET_USED(rxq); 232 233 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 234 rte_free(ev_temp); 235 236 qbman_swp_dqrr_consume(swp, dq); 237 } 238 239 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, 240 const struct qbman_fd *fd, 241 const struct qbman_result *dq, 242 struct dpaa2_queue *rxq, 243 struct rte_event *ev) 244 { 245 struct rte_event *ev_temp = 246 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); 247 uint8_t dqrr_index = qbman_get_dqrr_idx(dq); 248 249 RTE_SET_USED(swp); 250 RTE_SET_USED(rxq); 251 252 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 253 rte_free(ev_temp); 254 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; 255 DPAA2_PER_LCORE_DQRR_SIZE++; 256 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 257 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 258 } 259 260 static uint16_t 261 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], 262 uint16_t nb_events, uint64_t timeout_ticks) 263 { 264 const struct qbman_result *dq; 265 struct dpaa2_dpio_dev *dpio_dev = NULL; 266 struct dpaa2_port *dpaa2_portal = port; 267 struct dpaa2_eventq *evq_info; 268 struct qbman_swp *swp; 269 const struct qbman_fd *fd; 270 struct dpaa2_queue *rxq; 271 int num_pkts = 0, ret, i = 0, n; 272 uint8_t channel_index; 273 274 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 275 /* Affine current thread context to a qman portal */ 276 ret = dpaa2_affine_qbman_swp(); 277 if (ret < 0) { 278 DPAA2_EVENTDEV_ERR( 279 "Failed to allocate IO portal, tid: %d\n", 280 rte_gettid()); 281 return 0; 282 } 283 } 284 285 dpio_dev = DPAA2_PER_LCORE_DPIO; 286 swp = DPAA2_PER_LCORE_PORTAL; 287 288 if (likely(dpaa2_portal->is_port_linked)) 289 goto skip_linking; 290 291 /* Create mapping between portal and channel to receive packets */ 292 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { 293 evq_info = &dpaa2_portal->evq_info[i]; 294 if (!evq_info->event_port) 295 continue; 296 297 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, 298 CMD_PRI_LOW, 299 dpio_dev->token, 300 evq_info->dpcon->dpcon_id, 301 &channel_index); 302 if (ret < 0) { 303 DPAA2_EVENTDEV_ERR( 304 "Static dequeue config failed: err(%d)", ret); 305 goto err; 306 } 307 308 qbman_swp_push_set(swp, channel_index, 1); 309 evq_info->dpcon->channel_index = channel_index; 310 } 311 dpaa2_portal->is_port_linked = true; 312 313 skip_linking: 314 /* Check if there are atomic contexts to be released */ 315 while (DPAA2_PER_LCORE_DQRR_SIZE) { 316 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) { 317 qbman_swp_dqrr_idx_consume(swp, i); 318 DPAA2_PER_LCORE_DQRR_SIZE--; 319 *dpaa2_seqn(DPAA2_PER_LCORE_DQRR_MBUF(i)) = 320 DPAA2_INVALID_MBUF_SEQN; 321 } 322 i++; 323 } 324 DPAA2_PER_LCORE_DQRR_HELD = 0; 325 326 do { 327 dq = qbman_swp_dqrr_next(swp); 328 if (!dq) { 329 if (!num_pkts && timeout_ticks) { 330 dpaa2_eventdev_dequeue_wait(timeout_ticks); 331 timeout_ticks = 0; 332 continue; 333 } 334 return num_pkts; 335 } 336 qbman_swp_prefetch_dqrr_next(swp); 337 338 fd = qbman_result_DQ_fd(dq); 339 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq); 340 if (rxq) { 341 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]); 342 } else { 343 qbman_swp_dqrr_consume(swp, dq); 344 DPAA2_EVENTDEV_ERR("Null Return VQ received"); 345 return 0; 346 } 347 348 num_pkts++; 349 } while (num_pkts < nb_events); 350 351 return num_pkts; 352 err: 353 for (n = 0; n < i; n++) { 354 evq_info = &dpaa2_portal->evq_info[n]; 355 if (!evq_info->event_port) 356 continue; 357 358 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); 359 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 360 dpio_dev->token, 361 evq_info->dpcon->dpcon_id); 362 } 363 return 0; 364 } 365 366 static uint16_t 367 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev, 368 uint64_t timeout_ticks) 369 { 370 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks); 371 } 372 373 static void 374 dpaa2_eventdev_info_get(struct rte_eventdev *dev, 375 struct rte_event_dev_info *dev_info) 376 { 377 struct dpaa2_eventdev *priv = dev->data->dev_private; 378 379 EVENTDEV_INIT_FUNC_TRACE(); 380 381 RTE_SET_USED(dev); 382 383 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 384 dev_info->min_dequeue_timeout_ns = 385 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT; 386 dev_info->max_dequeue_timeout_ns = 387 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT; 388 dev_info->dequeue_timeout_ns = 389 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 390 dev_info->max_event_queues = priv->max_event_queues; 391 dev_info->max_event_queue_flows = 392 DPAA2_EVENT_MAX_QUEUE_FLOWS; 393 dev_info->max_event_queue_priority_levels = 394 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS; 395 dev_info->max_event_priority_levels = 396 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS; 397 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO); 398 /* we only support dpio up to number of cores */ 399 if (dev_info->max_event_ports > rte_lcore_count()) 400 dev_info->max_event_ports = rte_lcore_count(); 401 dev_info->max_event_port_dequeue_depth = 402 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 403 dev_info->max_event_port_enqueue_depth = 404 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 405 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS; 406 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 407 RTE_EVENT_DEV_CAP_BURST_MODE| 408 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 409 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 410 RTE_EVENT_DEV_CAP_NONSEQ_MODE | 411 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | 412 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID | 413 RTE_EVENT_DEV_CAP_MAINTENANCE_FREE; 414 dev_info->max_profiles_per_port = 1; 415 } 416 417 static int 418 dpaa2_eventdev_configure(const struct rte_eventdev *dev) 419 { 420 struct dpaa2_eventdev *priv = dev->data->dev_private; 421 struct rte_event_dev_config *conf = &dev->data->dev_conf; 422 423 EVENTDEV_INIT_FUNC_TRACE(); 424 425 priv->nb_event_queues = conf->nb_event_queues; 426 priv->nb_event_ports = conf->nb_event_ports; 427 priv->nb_event_queue_flows = conf->nb_event_queue_flows; 428 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth; 429 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth; 430 priv->event_dev_cfg = conf->event_dev_cfg; 431 432 /* Check dequeue timeout method is per dequeue or global */ 433 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 434 /* 435 * Use timeout value as given in dequeue operation. 436 * So invalidating this timeout value. 437 */ 438 priv->dequeue_timeout_ns = 0; 439 440 } else if (conf->dequeue_timeout_ns == 0) { 441 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 442 } else { 443 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; 444 } 445 446 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d", 447 dev->data->dev_id); 448 return 0; 449 } 450 451 static int 452 dpaa2_eventdev_start(struct rte_eventdev *dev) 453 { 454 EVENTDEV_INIT_FUNC_TRACE(); 455 456 RTE_SET_USED(dev); 457 458 return 0; 459 } 460 461 static void 462 dpaa2_eventdev_stop(struct rte_eventdev *dev) 463 { 464 EVENTDEV_INIT_FUNC_TRACE(); 465 466 RTE_SET_USED(dev); 467 } 468 469 static int 470 dpaa2_eventdev_close(struct rte_eventdev *dev) 471 { 472 EVENTDEV_INIT_FUNC_TRACE(); 473 474 RTE_SET_USED(dev); 475 476 return 0; 477 } 478 479 static void 480 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 481 struct rte_event_queue_conf *queue_conf) 482 { 483 EVENTDEV_INIT_FUNC_TRACE(); 484 485 RTE_SET_USED(dev); 486 RTE_SET_USED(queue_id); 487 488 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS; 489 queue_conf->nb_atomic_order_sequences = 490 DPAA2_EVENT_QUEUE_ORDER_SEQUENCES; 491 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; 492 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 493 } 494 495 static int 496 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 497 const struct rte_event_queue_conf *queue_conf) 498 { 499 struct dpaa2_eventdev *priv = dev->data->dev_private; 500 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id]; 501 502 EVENTDEV_INIT_FUNC_TRACE(); 503 504 switch (queue_conf->schedule_type) { 505 case RTE_SCHED_TYPE_PARALLEL: 506 case RTE_SCHED_TYPE_ATOMIC: 507 case RTE_SCHED_TYPE_ORDERED: 508 break; 509 default: 510 DPAA2_EVENTDEV_ERR("Schedule type is not supported."); 511 return -1; 512 } 513 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; 514 evq_info->event_queue_id = queue_id; 515 516 return 0; 517 } 518 519 static void 520 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 521 { 522 EVENTDEV_INIT_FUNC_TRACE(); 523 524 RTE_SET_USED(dev); 525 RTE_SET_USED(queue_id); 526 } 527 528 static void 529 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 530 struct rte_event_port_conf *port_conf) 531 { 532 EVENTDEV_INIT_FUNC_TRACE(); 533 534 RTE_SET_USED(dev); 535 RTE_SET_USED(port_id); 536 537 port_conf->new_event_threshold = 538 DPAA2_EVENT_MAX_NUM_EVENTS; 539 port_conf->dequeue_depth = 540 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 541 port_conf->enqueue_depth = 542 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 543 port_conf->event_port_cfg = 0; 544 } 545 546 static int 547 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, 548 const struct rte_event_port_conf *port_conf) 549 { 550 char event_port_name[32]; 551 struct dpaa2_port *portal; 552 553 EVENTDEV_INIT_FUNC_TRACE(); 554 555 RTE_SET_USED(port_conf); 556 557 sprintf(event_port_name, "event-port-%d", port_id); 558 portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0); 559 if (!portal) { 560 DPAA2_EVENTDEV_ERR("Memory allocation failure"); 561 return -ENOMEM; 562 } 563 564 memset(portal, 0, sizeof(struct dpaa2_port)); 565 dev->data->ports[port_id] = portal; 566 return 0; 567 } 568 569 static void 570 dpaa2_eventdev_port_release(void *port) 571 { 572 struct dpaa2_port *portal = port; 573 574 EVENTDEV_INIT_FUNC_TRACE(); 575 576 if (portal == NULL) 577 return; 578 579 /* TODO: Cleanup is required when ports are in linked state. */ 580 if (portal->is_port_linked) 581 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release"); 582 583 rte_free(portal); 584 } 585 586 static int 587 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port, 588 const uint8_t queues[], const uint8_t priorities[], 589 uint16_t nb_links) 590 { 591 struct dpaa2_eventdev *priv = dev->data->dev_private; 592 struct dpaa2_port *dpaa2_portal = port; 593 struct dpaa2_eventq *evq_info; 594 uint16_t i; 595 596 EVENTDEV_INIT_FUNC_TRACE(); 597 598 RTE_SET_USED(priorities); 599 600 for (i = 0; i < nb_links; i++) { 601 evq_info = &priv->evq_info[queues[i]]; 602 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info, 603 sizeof(struct dpaa2_eventq)); 604 dpaa2_portal->evq_info[queues[i]].event_port = port; 605 dpaa2_portal->num_linked_evq++; 606 } 607 608 return (int)nb_links; 609 } 610 611 static int 612 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port, 613 uint8_t queues[], uint16_t nb_unlinks) 614 { 615 struct dpaa2_port *dpaa2_portal = port; 616 int i; 617 struct dpaa2_dpio_dev *dpio_dev = NULL; 618 struct dpaa2_eventq *evq_info; 619 struct qbman_swp *swp; 620 621 EVENTDEV_INIT_FUNC_TRACE(); 622 623 RTE_SET_USED(dev); 624 RTE_SET_USED(queues); 625 626 for (i = 0; i < nb_unlinks; i++) { 627 evq_info = &dpaa2_portal->evq_info[queues[i]]; 628 629 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) { 630 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/ 631 dpio_dev = DPAA2_PER_LCORE_DPIO; 632 swp = DPAA2_PER_LCORE_PORTAL; 633 634 qbman_swp_push_set(swp, 635 evq_info->dpcon->channel_index, 0); 636 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 637 dpio_dev->token, 638 evq_info->dpcon->dpcon_id); 639 } 640 memset(evq_info, 0, sizeof(struct dpaa2_eventq)); 641 if (dpaa2_portal->num_linked_evq) 642 dpaa2_portal->num_linked_evq--; 643 } 644 645 if (!dpaa2_portal->num_linked_evq) 646 dpaa2_portal->is_port_linked = false; 647 648 return (int)nb_unlinks; 649 } 650 651 652 static int 653 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 654 uint64_t *timeout_ticks) 655 { 656 uint32_t scale = 1000*1000; 657 658 EVENTDEV_INIT_FUNC_TRACE(); 659 660 RTE_SET_USED(dev); 661 *timeout_ticks = ns / scale; 662 663 return 0; 664 } 665 666 static void 667 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f) 668 { 669 EVENTDEV_INIT_FUNC_TRACE(); 670 671 RTE_SET_USED(dev); 672 RTE_SET_USED(f); 673 } 674 675 static int 676 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev, 677 const struct rte_eth_dev *eth_dev, 678 uint32_t *caps) 679 { 680 const char *ethdev_driver = eth_dev->device->driver->name; 681 682 EVENTDEV_INIT_FUNC_TRACE(); 683 684 RTE_SET_USED(dev); 685 686 if (!strcmp(ethdev_driver, "net_dpaa2")) 687 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP; 688 else 689 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 690 691 return 0; 692 } 693 694 static int 695 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev, 696 const struct rte_eth_dev *eth_dev, 697 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 698 { 699 struct dpaa2_eventdev *priv = dev->data->dev_private; 700 uint8_t ev_qid = queue_conf->ev.queue_id; 701 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 702 int i, ret; 703 704 EVENTDEV_INIT_FUNC_TRACE(); 705 706 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 707 ret = dpaa2_eth_eventq_attach(eth_dev, i, 708 dpcon, queue_conf); 709 if (ret) { 710 DPAA2_EVENTDEV_ERR( 711 "Event queue attach failed: err(%d)", ret); 712 goto fail; 713 } 714 } 715 return 0; 716 fail: 717 for (i = (i - 1); i >= 0 ; i--) 718 dpaa2_eth_eventq_detach(eth_dev, i); 719 720 return ret; 721 } 722 723 static int 724 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev, 725 const struct rte_eth_dev *eth_dev, 726 int32_t rx_queue_id, 727 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 728 { 729 struct dpaa2_eventdev *priv = dev->data->dev_private; 730 uint8_t ev_qid = queue_conf->ev.queue_id; 731 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 732 int ret; 733 734 EVENTDEV_INIT_FUNC_TRACE(); 735 736 if (rx_queue_id == -1) 737 return dpaa2_eventdev_eth_queue_add_all(dev, 738 eth_dev, queue_conf); 739 740 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id, 741 dpcon, queue_conf); 742 if (ret) { 743 DPAA2_EVENTDEV_ERR( 744 "Event queue attach failed: err(%d)", ret); 745 return ret; 746 } 747 return 0; 748 } 749 750 static int 751 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev, 752 const struct rte_eth_dev *eth_dev) 753 { 754 int i, ret; 755 756 EVENTDEV_INIT_FUNC_TRACE(); 757 758 RTE_SET_USED(dev); 759 760 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 761 ret = dpaa2_eth_eventq_detach(eth_dev, i); 762 if (ret) { 763 DPAA2_EVENTDEV_ERR( 764 "Event queue detach failed: err(%d)", ret); 765 return ret; 766 } 767 } 768 769 return 0; 770 } 771 772 static int 773 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev, 774 const struct rte_eth_dev *eth_dev, 775 int32_t rx_queue_id) 776 { 777 int ret; 778 779 EVENTDEV_INIT_FUNC_TRACE(); 780 781 if (rx_queue_id == -1) 782 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev); 783 784 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id); 785 if (ret) { 786 DPAA2_EVENTDEV_ERR( 787 "Event queue detach failed: err(%d)", ret); 788 return ret; 789 } 790 791 return 0; 792 } 793 794 static int 795 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev, 796 const struct rte_eth_dev *eth_dev) 797 { 798 EVENTDEV_INIT_FUNC_TRACE(); 799 800 RTE_SET_USED(dev); 801 RTE_SET_USED(eth_dev); 802 803 return 0; 804 } 805 806 static int 807 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev, 808 const struct rte_eth_dev *eth_dev) 809 { 810 EVENTDEV_INIT_FUNC_TRACE(); 811 812 RTE_SET_USED(dev); 813 RTE_SET_USED(eth_dev); 814 815 return 0; 816 } 817 818 static int 819 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev, 820 const struct rte_cryptodev *cdev, 821 uint32_t *caps) 822 { 823 const char *name = cdev->data->name; 824 825 EVENTDEV_INIT_FUNC_TRACE(); 826 827 RTE_SET_USED(dev); 828 829 if (!strncmp(name, "dpsec-", 6)) 830 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP; 831 else 832 return -1; 833 834 return 0; 835 } 836 837 static int 838 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, 839 const struct rte_cryptodev *cryptodev, 840 const struct rte_event *ev) 841 { 842 struct dpaa2_eventdev *priv = dev->data->dev_private; 843 uint8_t ev_qid = ev->queue_id; 844 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 845 int i, ret; 846 847 EVENTDEV_INIT_FUNC_TRACE(); 848 849 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { 850 ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev); 851 if (ret) { 852 DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n", 853 ret); 854 goto fail; 855 } 856 } 857 return 0; 858 fail: 859 for (i = (i - 1); i >= 0 ; i--) 860 dpaa2_sec_eventq_detach(cryptodev, i); 861 862 return ret; 863 } 864 865 static int 866 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev, 867 const struct rte_cryptodev *cryptodev, 868 int32_t rx_queue_id, 869 const struct rte_event_crypto_adapter_queue_conf *conf) 870 { 871 struct dpaa2_eventdev *priv = dev->data->dev_private; 872 uint8_t ev_qid = conf->ev.queue_id; 873 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 874 int ret; 875 876 EVENTDEV_INIT_FUNC_TRACE(); 877 878 if (rx_queue_id == -1) 879 return dpaa2_eventdev_crypto_queue_add_all(dev, 880 cryptodev, &conf->ev); 881 882 ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id, 883 dpcon, &conf->ev); 884 if (ret) { 885 DPAA2_EVENTDEV_ERR( 886 "dpaa2_sec_eventq_attach failed: ret: %d\n", ret); 887 return ret; 888 } 889 return 0; 890 } 891 892 static int 893 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev, 894 const struct rte_cryptodev *cdev) 895 { 896 int i, ret; 897 898 EVENTDEV_INIT_FUNC_TRACE(); 899 900 RTE_SET_USED(dev); 901 902 for (i = 0; i < cdev->data->nb_queue_pairs; i++) { 903 ret = dpaa2_sec_eventq_detach(cdev, i); 904 if (ret) { 905 DPAA2_EVENTDEV_ERR( 906 "dpaa2_sec_eventq_detach failed:ret %d\n", ret); 907 return ret; 908 } 909 } 910 911 return 0; 912 } 913 914 static int 915 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev, 916 const struct rte_cryptodev *cryptodev, 917 int32_t rx_queue_id) 918 { 919 int ret; 920 921 EVENTDEV_INIT_FUNC_TRACE(); 922 923 if (rx_queue_id == -1) 924 return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev); 925 926 ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id); 927 if (ret) { 928 DPAA2_EVENTDEV_ERR( 929 "dpaa2_sec_eventq_detach failed: ret: %d\n", ret); 930 return ret; 931 } 932 933 return 0; 934 } 935 936 static int 937 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev, 938 const struct rte_cryptodev *cryptodev) 939 { 940 EVENTDEV_INIT_FUNC_TRACE(); 941 942 RTE_SET_USED(dev); 943 RTE_SET_USED(cryptodev); 944 945 return 0; 946 } 947 948 static int 949 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev, 950 const struct rte_cryptodev *cryptodev) 951 { 952 EVENTDEV_INIT_FUNC_TRACE(); 953 954 RTE_SET_USED(dev); 955 RTE_SET_USED(cryptodev); 956 957 return 0; 958 } 959 960 static int 961 dpaa2_eventdev_tx_adapter_create(uint8_t id, 962 const struct rte_eventdev *dev) 963 { 964 RTE_SET_USED(id); 965 RTE_SET_USED(dev); 966 967 /* Nothing to do. Simply return. */ 968 return 0; 969 } 970 971 static int 972 dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev, 973 const struct rte_eth_dev *eth_dev, 974 uint32_t *caps) 975 { 976 RTE_SET_USED(dev); 977 RTE_SET_USED(eth_dev); 978 979 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 980 return 0; 981 } 982 983 static uint16_t 984 dpaa2_eventdev_txa_enqueue_same_dest(void *port, 985 struct rte_event ev[], 986 uint16_t nb_events) 987 { 988 struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0; 989 uint8_t qid, i; 990 991 RTE_SET_USED(port); 992 993 m0 = (struct rte_mbuf *)ev[0].mbuf; 994 qid = rte_event_eth_tx_adapter_txq_get(m0); 995 996 for (i = 0; i < nb_events; i++) 997 m[i] = (struct rte_mbuf *)ev[i].mbuf; 998 999 return rte_eth_tx_burst(m0->port, qid, m, nb_events); 1000 } 1001 1002 static uint16_t 1003 dpaa2_eventdev_txa_enqueue(void *port, 1004 struct rte_event ev[], 1005 uint16_t nb_events) 1006 { 1007 void *txq[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH]; 1008 struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH]; 1009 uint8_t qid, i; 1010 1011 RTE_SET_USED(port); 1012 1013 for (i = 0; i < nb_events; i++) { 1014 m[i] = (struct rte_mbuf *)ev[i].mbuf; 1015 qid = rte_event_eth_tx_adapter_txq_get(m[i]); 1016 txq[i] = rte_eth_devices[m[i]->port].data->tx_queues[qid]; 1017 } 1018 1019 return dpaa2_dev_tx_multi_txq_ordered(txq, m, nb_events); 1020 } 1021 1022 static struct eventdev_ops dpaa2_eventdev_ops = { 1023 .dev_infos_get = dpaa2_eventdev_info_get, 1024 .dev_configure = dpaa2_eventdev_configure, 1025 .dev_start = dpaa2_eventdev_start, 1026 .dev_stop = dpaa2_eventdev_stop, 1027 .dev_close = dpaa2_eventdev_close, 1028 .queue_def_conf = dpaa2_eventdev_queue_def_conf, 1029 .queue_setup = dpaa2_eventdev_queue_setup, 1030 .queue_release = dpaa2_eventdev_queue_release, 1031 .port_def_conf = dpaa2_eventdev_port_def_conf, 1032 .port_setup = dpaa2_eventdev_port_setup, 1033 .port_release = dpaa2_eventdev_port_release, 1034 .port_link = dpaa2_eventdev_port_link, 1035 .port_unlink = dpaa2_eventdev_port_unlink, 1036 .timeout_ticks = dpaa2_eventdev_timeout_ticks, 1037 .dump = dpaa2_eventdev_dump, 1038 .dev_selftest = test_eventdev_dpaa2, 1039 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get, 1040 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add, 1041 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del, 1042 .eth_rx_adapter_start = dpaa2_eventdev_eth_start, 1043 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop, 1044 .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps, 1045 .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create, 1046 .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get, 1047 .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add, 1048 .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del, 1049 .crypto_adapter_start = dpaa2_eventdev_crypto_start, 1050 .crypto_adapter_stop = dpaa2_eventdev_crypto_stop, 1051 }; 1052 1053 static int 1054 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev, 1055 struct dpaa2_dpcon_dev *dpcon_dev) 1056 { 1057 struct dpci_rx_queue_cfg rx_queue_cfg; 1058 int ret, i; 1059 1060 /*Do settings to get the frame on a DPCON object*/ 1061 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST | 1062 DPCI_QUEUE_OPT_USER_CTX; 1063 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON; 1064 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id; 1065 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO; 1066 1067 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb = 1068 dpaa2_eventdev_process_parallel; 1069 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb = 1070 dpaa2_eventdev_process_atomic; 1071 1072 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) { 1073 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]); 1074 ret = dpci_set_rx_queue(&dpci_dev->dpci, 1075 CMD_PRI_LOW, 1076 dpci_dev->token, i, 1077 &rx_queue_cfg); 1078 if (ret) { 1079 DPAA2_EVENTDEV_ERR( 1080 "DPCI Rx queue setup failed: err(%d)", 1081 ret); 1082 return ret; 1083 } 1084 } 1085 return 0; 1086 } 1087 1088 static int 1089 dpaa2_eventdev_create(const char *name) 1090 { 1091 struct rte_eventdev *eventdev; 1092 struct dpaa2_eventdev *priv; 1093 struct dpaa2_dpcon_dev *dpcon_dev = NULL; 1094 struct dpaa2_dpci_dev *dpci_dev = NULL; 1095 int ret; 1096 1097 eventdev = rte_event_pmd_vdev_init(name, 1098 sizeof(struct dpaa2_eventdev), 1099 rte_socket_id()); 1100 if (eventdev == NULL) { 1101 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name); 1102 goto fail; 1103 } 1104 1105 eventdev->dev_ops = &dpaa2_eventdev_ops; 1106 eventdev->enqueue = dpaa2_eventdev_enqueue; 1107 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst; 1108 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst; 1109 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst; 1110 eventdev->dequeue = dpaa2_eventdev_dequeue; 1111 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst; 1112 eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue; 1113 eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest; 1114 1115 /* For secondary processes, the primary has done all the work */ 1116 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1117 goto done; 1118 1119 priv = eventdev->data->dev_private; 1120 priv->max_event_queues = 0; 1121 1122 do { 1123 dpcon_dev = rte_dpaa2_alloc_dpcon_dev(); 1124 if (!dpcon_dev) 1125 break; 1126 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev; 1127 1128 dpci_dev = rte_dpaa2_alloc_dpci_dev(); 1129 if (!dpci_dev) { 1130 rte_dpaa2_free_dpcon_dev(dpcon_dev); 1131 break; 1132 } 1133 priv->evq_info[priv->max_event_queues].dpci = dpci_dev; 1134 1135 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev); 1136 if (ret) { 1137 DPAA2_EVENTDEV_ERR( 1138 "DPCI setup failed: err(%d)", ret); 1139 return ret; 1140 } 1141 priv->max_event_queues++; 1142 } while (dpcon_dev && dpci_dev); 1143 1144 RTE_LOG(INFO, PMD, "%s eventdev created\n", name); 1145 1146 done: 1147 event_dev_probing_finish(eventdev); 1148 return 0; 1149 fail: 1150 return -EFAULT; 1151 } 1152 1153 static int 1154 dpaa2_eventdev_destroy(const char *name) 1155 { 1156 struct rte_eventdev *eventdev; 1157 struct dpaa2_eventdev *priv; 1158 int i; 1159 1160 eventdev = rte_event_pmd_get_named_dev(name); 1161 if (eventdev == NULL) { 1162 RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name); 1163 return -1; 1164 } 1165 1166 /* For secondary processes, the primary has done all the work */ 1167 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1168 return 0; 1169 1170 priv = eventdev->data->dev_private; 1171 for (i = 0; i < priv->max_event_queues; i++) { 1172 if (priv->evq_info[i].dpcon) 1173 rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon); 1174 1175 if (priv->evq_info[i].dpci) 1176 rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci); 1177 1178 } 1179 priv->max_event_queues = 0; 1180 1181 RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name); 1182 return 0; 1183 } 1184 1185 1186 static int 1187 dpaa2_eventdev_probe(struct rte_vdev_device *vdev) 1188 { 1189 const char *name; 1190 1191 name = rte_vdev_device_name(vdev); 1192 DPAA2_EVENTDEV_INFO("Initializing %s", name); 1193 return dpaa2_eventdev_create(name); 1194 } 1195 1196 static int 1197 dpaa2_eventdev_remove(struct rte_vdev_device *vdev) 1198 { 1199 const char *name; 1200 1201 name = rte_vdev_device_name(vdev); 1202 DPAA2_EVENTDEV_INFO("Closing %s", name); 1203 1204 dpaa2_eventdev_destroy(name); 1205 1206 return rte_event_pmd_vdev_uninit(name); 1207 } 1208 1209 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = { 1210 .probe = dpaa2_eventdev_probe, 1211 .remove = dpaa2_eventdev_remove 1212 }; 1213 1214 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd); 1215 RTE_LOG_REGISTER_DEFAULT(dpaa2_logtype_event, NOTICE); 1216