1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2017,2019 NXP 3 */ 4 5 #include <assert.h> 6 #include <stdio.h> 7 #include <stdbool.h> 8 #include <errno.h> 9 #include <stdint.h> 10 #include <string.h> 11 #include <sys/epoll.h> 12 13 #include <rte_atomic.h> 14 #include <rte_byteorder.h> 15 #include <rte_common.h> 16 #include <rte_debug.h> 17 #include <rte_dev.h> 18 #include <rte_eal.h> 19 #include <rte_fslmc.h> 20 #include <rte_lcore.h> 21 #include <rte_log.h> 22 #include <rte_malloc.h> 23 #include <rte_memcpy.h> 24 #include <rte_memory.h> 25 #include <rte_pci.h> 26 #include <rte_bus_vdev.h> 27 #include <ethdev_driver.h> 28 #include <rte_cryptodev.h> 29 #include <rte_event_eth_rx_adapter.h> 30 #include <rte_event_eth_tx_adapter.h> 31 32 #include <fslmc_vfio.h> 33 #include <dpaa2_hw_pvt.h> 34 #include <dpaa2_hw_mempool.h> 35 #include <dpaa2_hw_dpio.h> 36 #include <dpaa2_ethdev.h> 37 #include <dpaa2_sec_event.h> 38 #include "dpaa2_eventdev.h" 39 #include "dpaa2_eventdev_logs.h" 40 #include <portal/dpaa2_hw_pvt.h> 41 #include <mc/fsl_dpci.h> 42 43 /* Clarifications 44 * Evendev = SoC Instance 45 * Eventport = DPIO Instance 46 * Eventqueue = DPCON Instance 47 * 1 Eventdev can have N Eventqueue 48 * Soft Event Flow is DPCI Instance 49 */ 50 51 #define DPAA2_EV_TX_RETRY_COUNT 10000 52 53 static uint16_t 54 dpaa2_eventdev_enqueue_burst(void *port, const struct rte_event ev[], 55 uint16_t nb_events) 56 { 57 58 struct dpaa2_port *dpaa2_portal = port; 59 struct dpaa2_dpio_dev *dpio_dev; 60 uint32_t queue_id = ev[0].queue_id; 61 struct dpaa2_eventq *evq_info; 62 uint32_t fqid, retry_count; 63 struct qbman_swp *swp; 64 struct qbman_fd fd_arr[MAX_TX_RING_SLOTS]; 65 uint32_t loop, frames_to_send; 66 struct qbman_eq_desc eqdesc[MAX_TX_RING_SLOTS]; 67 uint16_t num_tx = 0; 68 int i, n, ret; 69 uint8_t channel_index; 70 71 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 72 /* Affine current thread context to a qman portal */ 73 ret = dpaa2_affine_qbman_swp(); 74 if (ret < 0) { 75 DPAA2_EVENTDEV_ERR( 76 "Failed to allocate IO portal, tid: %d\n", 77 rte_gettid()); 78 return 0; 79 } 80 } 81 /* todo - dpaa2_portal shall have dpio_dev - no per thread variable */ 82 dpio_dev = DPAA2_PER_LCORE_DPIO; 83 swp = DPAA2_PER_LCORE_PORTAL; 84 85 if (likely(dpaa2_portal->is_port_linked)) 86 goto skip_linking; 87 88 /* Create mapping between portal and channel to receive packets */ 89 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { 90 evq_info = &dpaa2_portal->evq_info[i]; 91 if (!evq_info->event_port) 92 continue; 93 94 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, 95 CMD_PRI_LOW, 96 dpio_dev->token, 97 evq_info->dpcon->dpcon_id, 98 &channel_index); 99 if (ret < 0) { 100 DPAA2_EVENTDEV_ERR( 101 "Static dequeue config failed: err(%d)", ret); 102 goto err; 103 } 104 105 qbman_swp_push_set(swp, channel_index, 1); 106 evq_info->dpcon->channel_index = channel_index; 107 } 108 dpaa2_portal->is_port_linked = true; 109 110 skip_linking: 111 evq_info = &dpaa2_portal->evq_info[queue_id]; 112 113 while (nb_events) { 114 frames_to_send = (nb_events > dpaa2_eqcr_size) ? 115 dpaa2_eqcr_size : nb_events; 116 117 for (loop = 0; loop < frames_to_send; loop++) { 118 const struct rte_event *event = &ev[num_tx + loop]; 119 120 if (event->sched_type != RTE_SCHED_TYPE_ATOMIC) 121 fqid = evq_info->dpci->rx_queue[ 122 DPAA2_EVENT_DPCI_PARALLEL_QUEUE].fqid; 123 else 124 fqid = evq_info->dpci->rx_queue[ 125 DPAA2_EVENT_DPCI_ATOMIC_QUEUE].fqid; 126 127 /* Prepare enqueue descriptor */ 128 qbman_eq_desc_clear(&eqdesc[loop]); 129 qbman_eq_desc_set_fq(&eqdesc[loop], fqid); 130 qbman_eq_desc_set_no_orp(&eqdesc[loop], 0); 131 qbman_eq_desc_set_response(&eqdesc[loop], 0, 0); 132 133 if (event->sched_type == RTE_SCHED_TYPE_ATOMIC 134 && *dpaa2_seqn(event->mbuf)) { 135 uint8_t dqrr_index = 136 *dpaa2_seqn(event->mbuf) - 1; 137 138 qbman_eq_desc_set_dca(&eqdesc[loop], 1, 139 dqrr_index, 0); 140 DPAA2_PER_LCORE_DQRR_SIZE--; 141 DPAA2_PER_LCORE_DQRR_HELD &= ~(1 << dqrr_index); 142 } 143 144 memset(&fd_arr[loop], 0, sizeof(struct qbman_fd)); 145 146 /* 147 * todo - need to align with hw context data 148 * to avoid copy 149 */ 150 struct rte_event *ev_temp = rte_malloc(NULL, 151 sizeof(struct rte_event), 0); 152 153 if (!ev_temp) { 154 if (!loop) 155 return num_tx; 156 frames_to_send = loop; 157 DPAA2_EVENTDEV_ERR( 158 "Unable to allocate event object"); 159 goto send_partial; 160 } 161 rte_memcpy(ev_temp, event, sizeof(struct rte_event)); 162 DPAA2_SET_FD_ADDR((&fd_arr[loop]), (size_t)ev_temp); 163 DPAA2_SET_FD_LEN((&fd_arr[loop]), 164 sizeof(struct rte_event)); 165 } 166 send_partial: 167 loop = 0; 168 retry_count = 0; 169 while (loop < frames_to_send) { 170 ret = qbman_swp_enqueue_multiple_desc(swp, 171 &eqdesc[loop], &fd_arr[loop], 172 frames_to_send - loop); 173 if (unlikely(ret < 0)) { 174 retry_count++; 175 if (retry_count > DPAA2_EV_TX_RETRY_COUNT) { 176 num_tx += loop; 177 nb_events -= loop; 178 return num_tx + loop; 179 } 180 } else { 181 loop += ret; 182 retry_count = 0; 183 } 184 } 185 num_tx += loop; 186 nb_events -= loop; 187 } 188 189 return num_tx; 190 err: 191 for (n = 0; n < i; n++) { 192 evq_info = &dpaa2_portal->evq_info[n]; 193 if (!evq_info->event_port) 194 continue; 195 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); 196 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 197 dpio_dev->token, 198 evq_info->dpcon->dpcon_id); 199 } 200 return 0; 201 202 } 203 204 static uint16_t 205 dpaa2_eventdev_enqueue(void *port, const struct rte_event *ev) 206 { 207 return dpaa2_eventdev_enqueue_burst(port, ev, 1); 208 } 209 210 static void dpaa2_eventdev_dequeue_wait(uint64_t timeout_ticks) 211 { 212 struct epoll_event epoll_ev; 213 214 qbman_swp_interrupt_clear_status(DPAA2_PER_LCORE_PORTAL, 215 QBMAN_SWP_INTERRUPT_DQRI); 216 217 epoll_wait(DPAA2_PER_LCORE_DPIO->epoll_fd, 218 &epoll_ev, 1, timeout_ticks); 219 } 220 221 static void dpaa2_eventdev_process_parallel(struct qbman_swp *swp, 222 const struct qbman_fd *fd, 223 const struct qbman_result *dq, 224 struct dpaa2_queue *rxq, 225 struct rte_event *ev) 226 { 227 struct rte_event *ev_temp = 228 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); 229 230 RTE_SET_USED(rxq); 231 232 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 233 rte_free(ev_temp); 234 235 qbman_swp_dqrr_consume(swp, dq); 236 } 237 238 static void dpaa2_eventdev_process_atomic(struct qbman_swp *swp, 239 const struct qbman_fd *fd, 240 const struct qbman_result *dq, 241 struct dpaa2_queue *rxq, 242 struct rte_event *ev) 243 { 244 struct rte_event *ev_temp = 245 (struct rte_event *)(size_t)DPAA2_GET_FD_ADDR(fd); 246 uint8_t dqrr_index = qbman_get_dqrr_idx(dq); 247 248 RTE_SET_USED(swp); 249 RTE_SET_USED(rxq); 250 251 rte_memcpy(ev, ev_temp, sizeof(struct rte_event)); 252 rte_free(ev_temp); 253 *dpaa2_seqn(ev->mbuf) = dqrr_index + 1; 254 DPAA2_PER_LCORE_DQRR_SIZE++; 255 DPAA2_PER_LCORE_DQRR_HELD |= 1 << dqrr_index; 256 DPAA2_PER_LCORE_DQRR_MBUF(dqrr_index) = ev->mbuf; 257 } 258 259 static uint16_t 260 dpaa2_eventdev_dequeue_burst(void *port, struct rte_event ev[], 261 uint16_t nb_events, uint64_t timeout_ticks) 262 { 263 const struct qbman_result *dq; 264 struct dpaa2_dpio_dev *dpio_dev = NULL; 265 struct dpaa2_port *dpaa2_portal = port; 266 struct dpaa2_eventq *evq_info; 267 struct qbman_swp *swp; 268 const struct qbman_fd *fd; 269 struct dpaa2_queue *rxq; 270 int num_pkts = 0, ret, i = 0, n; 271 uint8_t channel_index; 272 273 if (unlikely(!DPAA2_PER_LCORE_DPIO)) { 274 /* Affine current thread context to a qman portal */ 275 ret = dpaa2_affine_qbman_swp(); 276 if (ret < 0) { 277 DPAA2_EVENTDEV_ERR( 278 "Failed to allocate IO portal, tid: %d\n", 279 rte_gettid()); 280 return 0; 281 } 282 } 283 284 dpio_dev = DPAA2_PER_LCORE_DPIO; 285 swp = DPAA2_PER_LCORE_PORTAL; 286 287 if (likely(dpaa2_portal->is_port_linked)) 288 goto skip_linking; 289 290 /* Create mapping between portal and channel to receive packets */ 291 for (i = 0; i < DPAA2_EVENT_MAX_QUEUES; i++) { 292 evq_info = &dpaa2_portal->evq_info[i]; 293 if (!evq_info->event_port) 294 continue; 295 296 ret = dpio_add_static_dequeue_channel(dpio_dev->dpio, 297 CMD_PRI_LOW, 298 dpio_dev->token, 299 evq_info->dpcon->dpcon_id, 300 &channel_index); 301 if (ret < 0) { 302 DPAA2_EVENTDEV_ERR( 303 "Static dequeue config failed: err(%d)", ret); 304 goto err; 305 } 306 307 qbman_swp_push_set(swp, channel_index, 1); 308 evq_info->dpcon->channel_index = channel_index; 309 } 310 dpaa2_portal->is_port_linked = true; 311 312 skip_linking: 313 /* Check if there are atomic contexts to be released */ 314 while (DPAA2_PER_LCORE_DQRR_SIZE) { 315 if (DPAA2_PER_LCORE_DQRR_HELD & (1 << i)) { 316 qbman_swp_dqrr_idx_consume(swp, i); 317 DPAA2_PER_LCORE_DQRR_SIZE--; 318 *dpaa2_seqn(DPAA2_PER_LCORE_DQRR_MBUF(i)) = 319 DPAA2_INVALID_MBUF_SEQN; 320 } 321 i++; 322 } 323 DPAA2_PER_LCORE_DQRR_HELD = 0; 324 325 do { 326 dq = qbman_swp_dqrr_next(swp); 327 if (!dq) { 328 if (!num_pkts && timeout_ticks) { 329 dpaa2_eventdev_dequeue_wait(timeout_ticks); 330 timeout_ticks = 0; 331 continue; 332 } 333 return num_pkts; 334 } 335 qbman_swp_prefetch_dqrr_next(swp); 336 337 fd = qbman_result_DQ_fd(dq); 338 rxq = (struct dpaa2_queue *)(size_t)qbman_result_DQ_fqd_ctx(dq); 339 if (rxq) { 340 rxq->cb(swp, fd, dq, rxq, &ev[num_pkts]); 341 } else { 342 qbman_swp_dqrr_consume(swp, dq); 343 DPAA2_EVENTDEV_ERR("Null Return VQ received"); 344 return 0; 345 } 346 347 num_pkts++; 348 } while (num_pkts < nb_events); 349 350 return num_pkts; 351 err: 352 for (n = 0; n < i; n++) { 353 evq_info = &dpaa2_portal->evq_info[n]; 354 if (!evq_info->event_port) 355 continue; 356 357 qbman_swp_push_set(swp, evq_info->dpcon->channel_index, 0); 358 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 359 dpio_dev->token, 360 evq_info->dpcon->dpcon_id); 361 } 362 return 0; 363 } 364 365 static uint16_t 366 dpaa2_eventdev_dequeue(void *port, struct rte_event *ev, 367 uint64_t timeout_ticks) 368 { 369 return dpaa2_eventdev_dequeue_burst(port, ev, 1, timeout_ticks); 370 } 371 372 static void 373 dpaa2_eventdev_info_get(struct rte_eventdev *dev, 374 struct rte_event_dev_info *dev_info) 375 { 376 struct dpaa2_eventdev *priv = dev->data->dev_private; 377 378 EVENTDEV_INIT_FUNC_TRACE(); 379 380 RTE_SET_USED(dev); 381 382 memset(dev_info, 0, sizeof(struct rte_event_dev_info)); 383 dev_info->min_dequeue_timeout_ns = 384 DPAA2_EVENT_MIN_DEQUEUE_TIMEOUT; 385 dev_info->max_dequeue_timeout_ns = 386 DPAA2_EVENT_MAX_DEQUEUE_TIMEOUT; 387 dev_info->dequeue_timeout_ns = 388 DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 389 dev_info->max_event_queues = priv->max_event_queues; 390 dev_info->max_event_queue_flows = 391 DPAA2_EVENT_MAX_QUEUE_FLOWS; 392 dev_info->max_event_queue_priority_levels = 393 DPAA2_EVENT_MAX_QUEUE_PRIORITY_LEVELS; 394 dev_info->max_event_priority_levels = 395 DPAA2_EVENT_MAX_EVENT_PRIORITY_LEVELS; 396 dev_info->max_event_ports = rte_fslmc_get_device_count(DPAA2_IO); 397 /* we only support dpio up to number of cores */ 398 if (dev_info->max_event_ports > rte_lcore_count()) 399 dev_info->max_event_ports = rte_lcore_count(); 400 dev_info->max_event_port_dequeue_depth = 401 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 402 dev_info->max_event_port_enqueue_depth = 403 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 404 dev_info->max_num_events = DPAA2_EVENT_MAX_NUM_EVENTS; 405 dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED | 406 RTE_EVENT_DEV_CAP_BURST_MODE| 407 RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK | 408 RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT | 409 RTE_EVENT_DEV_CAP_NONSEQ_MODE | 410 RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES | 411 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID; 412 413 } 414 415 static int 416 dpaa2_eventdev_configure(const struct rte_eventdev *dev) 417 { 418 struct dpaa2_eventdev *priv = dev->data->dev_private; 419 struct rte_event_dev_config *conf = &dev->data->dev_conf; 420 421 EVENTDEV_INIT_FUNC_TRACE(); 422 423 priv->nb_event_queues = conf->nb_event_queues; 424 priv->nb_event_ports = conf->nb_event_ports; 425 priv->nb_event_queue_flows = conf->nb_event_queue_flows; 426 priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth; 427 priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth; 428 priv->event_dev_cfg = conf->event_dev_cfg; 429 430 /* Check dequeue timeout method is per dequeue or global */ 431 if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 432 /* 433 * Use timeout value as given in dequeue operation. 434 * So invalidating this timeout value. 435 */ 436 priv->dequeue_timeout_ns = 0; 437 438 } else if (conf->dequeue_timeout_ns == 0) { 439 priv->dequeue_timeout_ns = DPAA2_EVENT_PORT_DEQUEUE_TIMEOUT_NS; 440 } else { 441 priv->dequeue_timeout_ns = conf->dequeue_timeout_ns; 442 } 443 444 DPAA2_EVENTDEV_DEBUG("Configured eventdev devid=%d", 445 dev->data->dev_id); 446 return 0; 447 } 448 449 static int 450 dpaa2_eventdev_start(struct rte_eventdev *dev) 451 { 452 EVENTDEV_INIT_FUNC_TRACE(); 453 454 RTE_SET_USED(dev); 455 456 return 0; 457 } 458 459 static void 460 dpaa2_eventdev_stop(struct rte_eventdev *dev) 461 { 462 EVENTDEV_INIT_FUNC_TRACE(); 463 464 RTE_SET_USED(dev); 465 } 466 467 static int 468 dpaa2_eventdev_close(struct rte_eventdev *dev) 469 { 470 EVENTDEV_INIT_FUNC_TRACE(); 471 472 RTE_SET_USED(dev); 473 474 return 0; 475 } 476 477 static void 478 dpaa2_eventdev_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id, 479 struct rte_event_queue_conf *queue_conf) 480 { 481 EVENTDEV_INIT_FUNC_TRACE(); 482 483 RTE_SET_USED(dev); 484 RTE_SET_USED(queue_id); 485 486 queue_conf->nb_atomic_flows = DPAA2_EVENT_QUEUE_ATOMIC_FLOWS; 487 queue_conf->nb_atomic_order_sequences = 488 DPAA2_EVENT_QUEUE_ORDER_SEQUENCES; 489 queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL; 490 queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 491 } 492 493 static int 494 dpaa2_eventdev_queue_setup(struct rte_eventdev *dev, uint8_t queue_id, 495 const struct rte_event_queue_conf *queue_conf) 496 { 497 struct dpaa2_eventdev *priv = dev->data->dev_private; 498 struct dpaa2_eventq *evq_info = &priv->evq_info[queue_id]; 499 500 EVENTDEV_INIT_FUNC_TRACE(); 501 502 switch (queue_conf->schedule_type) { 503 case RTE_SCHED_TYPE_PARALLEL: 504 case RTE_SCHED_TYPE_ATOMIC: 505 case RTE_SCHED_TYPE_ORDERED: 506 break; 507 default: 508 DPAA2_EVENTDEV_ERR("Schedule type is not supported."); 509 return -1; 510 } 511 evq_info->event_queue_cfg = queue_conf->event_queue_cfg; 512 evq_info->event_queue_id = queue_id; 513 514 return 0; 515 } 516 517 static void 518 dpaa2_eventdev_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 519 { 520 EVENTDEV_INIT_FUNC_TRACE(); 521 522 RTE_SET_USED(dev); 523 RTE_SET_USED(queue_id); 524 } 525 526 static void 527 dpaa2_eventdev_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 528 struct rte_event_port_conf *port_conf) 529 { 530 EVENTDEV_INIT_FUNC_TRACE(); 531 532 RTE_SET_USED(dev); 533 RTE_SET_USED(port_id); 534 535 port_conf->new_event_threshold = 536 DPAA2_EVENT_MAX_NUM_EVENTS; 537 port_conf->dequeue_depth = 538 DPAA2_EVENT_MAX_PORT_DEQUEUE_DEPTH; 539 port_conf->enqueue_depth = 540 DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH; 541 port_conf->event_port_cfg = 0; 542 } 543 544 static int 545 dpaa2_eventdev_port_setup(struct rte_eventdev *dev, uint8_t port_id, 546 const struct rte_event_port_conf *port_conf) 547 { 548 char event_port_name[32]; 549 struct dpaa2_port *portal; 550 551 EVENTDEV_INIT_FUNC_TRACE(); 552 553 RTE_SET_USED(port_conf); 554 555 sprintf(event_port_name, "event-port-%d", port_id); 556 portal = rte_malloc(event_port_name, sizeof(struct dpaa2_port), 0); 557 if (!portal) { 558 DPAA2_EVENTDEV_ERR("Memory allocation failure"); 559 return -ENOMEM; 560 } 561 562 memset(portal, 0, sizeof(struct dpaa2_port)); 563 dev->data->ports[port_id] = portal; 564 return 0; 565 } 566 567 static void 568 dpaa2_eventdev_port_release(void *port) 569 { 570 struct dpaa2_port *portal = port; 571 572 EVENTDEV_INIT_FUNC_TRACE(); 573 574 if (portal == NULL) 575 return; 576 577 /* TODO: Cleanup is required when ports are in linked state. */ 578 if (portal->is_port_linked) 579 DPAA2_EVENTDEV_WARN("Event port must be unlinked before release"); 580 581 rte_free(portal); 582 } 583 584 static int 585 dpaa2_eventdev_port_link(struct rte_eventdev *dev, void *port, 586 const uint8_t queues[], const uint8_t priorities[], 587 uint16_t nb_links) 588 { 589 struct dpaa2_eventdev *priv = dev->data->dev_private; 590 struct dpaa2_port *dpaa2_portal = port; 591 struct dpaa2_eventq *evq_info; 592 uint16_t i; 593 594 EVENTDEV_INIT_FUNC_TRACE(); 595 596 RTE_SET_USED(priorities); 597 598 for (i = 0; i < nb_links; i++) { 599 evq_info = &priv->evq_info[queues[i]]; 600 memcpy(&dpaa2_portal->evq_info[queues[i]], evq_info, 601 sizeof(struct dpaa2_eventq)); 602 dpaa2_portal->evq_info[queues[i]].event_port = port; 603 dpaa2_portal->num_linked_evq++; 604 } 605 606 return (int)nb_links; 607 } 608 609 static int 610 dpaa2_eventdev_port_unlink(struct rte_eventdev *dev, void *port, 611 uint8_t queues[], uint16_t nb_unlinks) 612 { 613 struct dpaa2_port *dpaa2_portal = port; 614 int i; 615 struct dpaa2_dpio_dev *dpio_dev = NULL; 616 struct dpaa2_eventq *evq_info; 617 struct qbman_swp *swp; 618 619 EVENTDEV_INIT_FUNC_TRACE(); 620 621 RTE_SET_USED(dev); 622 RTE_SET_USED(queues); 623 624 for (i = 0; i < nb_unlinks; i++) { 625 evq_info = &dpaa2_portal->evq_info[queues[i]]; 626 627 if (DPAA2_PER_LCORE_DPIO && evq_info->dpcon) { 628 /* todo dpaa2_portal shall have dpio_dev-no per lcore*/ 629 dpio_dev = DPAA2_PER_LCORE_DPIO; 630 swp = DPAA2_PER_LCORE_PORTAL; 631 632 qbman_swp_push_set(swp, 633 evq_info->dpcon->channel_index, 0); 634 dpio_remove_static_dequeue_channel(dpio_dev->dpio, 0, 635 dpio_dev->token, 636 evq_info->dpcon->dpcon_id); 637 } 638 memset(evq_info, 0, sizeof(struct dpaa2_eventq)); 639 if (dpaa2_portal->num_linked_evq) 640 dpaa2_portal->num_linked_evq--; 641 } 642 643 if (!dpaa2_portal->num_linked_evq) 644 dpaa2_portal->is_port_linked = false; 645 646 return (int)nb_unlinks; 647 } 648 649 650 static int 651 dpaa2_eventdev_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, 652 uint64_t *timeout_ticks) 653 { 654 uint32_t scale = 1000*1000; 655 656 EVENTDEV_INIT_FUNC_TRACE(); 657 658 RTE_SET_USED(dev); 659 *timeout_ticks = ns / scale; 660 661 return 0; 662 } 663 664 static void 665 dpaa2_eventdev_dump(struct rte_eventdev *dev, FILE *f) 666 { 667 EVENTDEV_INIT_FUNC_TRACE(); 668 669 RTE_SET_USED(dev); 670 RTE_SET_USED(f); 671 } 672 673 static int 674 dpaa2_eventdev_eth_caps_get(const struct rte_eventdev *dev, 675 const struct rte_eth_dev *eth_dev, 676 uint32_t *caps) 677 { 678 const char *ethdev_driver = eth_dev->device->driver->name; 679 680 EVENTDEV_INIT_FUNC_TRACE(); 681 682 RTE_SET_USED(dev); 683 684 if (!strcmp(ethdev_driver, "net_dpaa2")) 685 *caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA2_CAP; 686 else 687 *caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP; 688 689 return 0; 690 } 691 692 static int 693 dpaa2_eventdev_eth_queue_add_all(const struct rte_eventdev *dev, 694 const struct rte_eth_dev *eth_dev, 695 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 696 { 697 struct dpaa2_eventdev *priv = dev->data->dev_private; 698 uint8_t ev_qid = queue_conf->ev.queue_id; 699 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 700 int i, ret; 701 702 EVENTDEV_INIT_FUNC_TRACE(); 703 704 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 705 ret = dpaa2_eth_eventq_attach(eth_dev, i, 706 dpcon, queue_conf); 707 if (ret) { 708 DPAA2_EVENTDEV_ERR( 709 "Event queue attach failed: err(%d)", ret); 710 goto fail; 711 } 712 } 713 return 0; 714 fail: 715 for (i = (i - 1); i >= 0 ; i--) 716 dpaa2_eth_eventq_detach(eth_dev, i); 717 718 return ret; 719 } 720 721 static int 722 dpaa2_eventdev_eth_queue_add(const struct rte_eventdev *dev, 723 const struct rte_eth_dev *eth_dev, 724 int32_t rx_queue_id, 725 const struct rte_event_eth_rx_adapter_queue_conf *queue_conf) 726 { 727 struct dpaa2_eventdev *priv = dev->data->dev_private; 728 uint8_t ev_qid = queue_conf->ev.queue_id; 729 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 730 int ret; 731 732 EVENTDEV_INIT_FUNC_TRACE(); 733 734 if (rx_queue_id == -1) 735 return dpaa2_eventdev_eth_queue_add_all(dev, 736 eth_dev, queue_conf); 737 738 ret = dpaa2_eth_eventq_attach(eth_dev, rx_queue_id, 739 dpcon, queue_conf); 740 if (ret) { 741 DPAA2_EVENTDEV_ERR( 742 "Event queue attach failed: err(%d)", ret); 743 return ret; 744 } 745 return 0; 746 } 747 748 static int 749 dpaa2_eventdev_eth_queue_del_all(const struct rte_eventdev *dev, 750 const struct rte_eth_dev *eth_dev) 751 { 752 int i, ret; 753 754 EVENTDEV_INIT_FUNC_TRACE(); 755 756 RTE_SET_USED(dev); 757 758 for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { 759 ret = dpaa2_eth_eventq_detach(eth_dev, i); 760 if (ret) { 761 DPAA2_EVENTDEV_ERR( 762 "Event queue detach failed: err(%d)", ret); 763 return ret; 764 } 765 } 766 767 return 0; 768 } 769 770 static int 771 dpaa2_eventdev_eth_queue_del(const struct rte_eventdev *dev, 772 const struct rte_eth_dev *eth_dev, 773 int32_t rx_queue_id) 774 { 775 int ret; 776 777 EVENTDEV_INIT_FUNC_TRACE(); 778 779 if (rx_queue_id == -1) 780 return dpaa2_eventdev_eth_queue_del_all(dev, eth_dev); 781 782 ret = dpaa2_eth_eventq_detach(eth_dev, rx_queue_id); 783 if (ret) { 784 DPAA2_EVENTDEV_ERR( 785 "Event queue detach failed: err(%d)", ret); 786 return ret; 787 } 788 789 return 0; 790 } 791 792 static int 793 dpaa2_eventdev_eth_start(const struct rte_eventdev *dev, 794 const struct rte_eth_dev *eth_dev) 795 { 796 EVENTDEV_INIT_FUNC_TRACE(); 797 798 RTE_SET_USED(dev); 799 RTE_SET_USED(eth_dev); 800 801 return 0; 802 } 803 804 static int 805 dpaa2_eventdev_eth_stop(const struct rte_eventdev *dev, 806 const struct rte_eth_dev *eth_dev) 807 { 808 EVENTDEV_INIT_FUNC_TRACE(); 809 810 RTE_SET_USED(dev); 811 RTE_SET_USED(eth_dev); 812 813 return 0; 814 } 815 816 static int 817 dpaa2_eventdev_crypto_caps_get(const struct rte_eventdev *dev, 818 const struct rte_cryptodev *cdev, 819 uint32_t *caps) 820 { 821 const char *name = cdev->data->name; 822 823 EVENTDEV_INIT_FUNC_TRACE(); 824 825 RTE_SET_USED(dev); 826 827 if (!strncmp(name, "dpsec-", 6)) 828 *caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA2_CAP; 829 else 830 return -1; 831 832 return 0; 833 } 834 835 static int 836 dpaa2_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev, 837 const struct rte_cryptodev *cryptodev, 838 const struct rte_event *ev) 839 { 840 struct dpaa2_eventdev *priv = dev->data->dev_private; 841 uint8_t ev_qid = ev->queue_id; 842 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 843 int i, ret; 844 845 EVENTDEV_INIT_FUNC_TRACE(); 846 847 for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) { 848 ret = dpaa2_sec_eventq_attach(cryptodev, i, dpcon, ev); 849 if (ret) { 850 DPAA2_EVENTDEV_ERR("dpaa2_sec_eventq_attach failed: ret %d\n", 851 ret); 852 goto fail; 853 } 854 } 855 return 0; 856 fail: 857 for (i = (i - 1); i >= 0 ; i--) 858 dpaa2_sec_eventq_detach(cryptodev, i); 859 860 return ret; 861 } 862 863 static int 864 dpaa2_eventdev_crypto_queue_add(const struct rte_eventdev *dev, 865 const struct rte_cryptodev *cryptodev, 866 int32_t rx_queue_id, 867 const struct rte_event *ev) 868 { 869 struct dpaa2_eventdev *priv = dev->data->dev_private; 870 uint8_t ev_qid = ev->queue_id; 871 struct dpaa2_dpcon_dev *dpcon = priv->evq_info[ev_qid].dpcon; 872 int ret; 873 874 EVENTDEV_INIT_FUNC_TRACE(); 875 876 if (rx_queue_id == -1) 877 return dpaa2_eventdev_crypto_queue_add_all(dev, 878 cryptodev, ev); 879 880 ret = dpaa2_sec_eventq_attach(cryptodev, rx_queue_id, 881 dpcon, ev); 882 if (ret) { 883 DPAA2_EVENTDEV_ERR( 884 "dpaa2_sec_eventq_attach failed: ret: %d\n", ret); 885 return ret; 886 } 887 return 0; 888 } 889 890 static int 891 dpaa2_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev, 892 const struct rte_cryptodev *cdev) 893 { 894 int i, ret; 895 896 EVENTDEV_INIT_FUNC_TRACE(); 897 898 RTE_SET_USED(dev); 899 900 for (i = 0; i < cdev->data->nb_queue_pairs; i++) { 901 ret = dpaa2_sec_eventq_detach(cdev, i); 902 if (ret) { 903 DPAA2_EVENTDEV_ERR( 904 "dpaa2_sec_eventq_detach failed:ret %d\n", ret); 905 return ret; 906 } 907 } 908 909 return 0; 910 } 911 912 static int 913 dpaa2_eventdev_crypto_queue_del(const struct rte_eventdev *dev, 914 const struct rte_cryptodev *cryptodev, 915 int32_t rx_queue_id) 916 { 917 int ret; 918 919 EVENTDEV_INIT_FUNC_TRACE(); 920 921 if (rx_queue_id == -1) 922 return dpaa2_eventdev_crypto_queue_del_all(dev, cryptodev); 923 924 ret = dpaa2_sec_eventq_detach(cryptodev, rx_queue_id); 925 if (ret) { 926 DPAA2_EVENTDEV_ERR( 927 "dpaa2_sec_eventq_detach failed: ret: %d\n", ret); 928 return ret; 929 } 930 931 return 0; 932 } 933 934 static int 935 dpaa2_eventdev_crypto_start(const struct rte_eventdev *dev, 936 const struct rte_cryptodev *cryptodev) 937 { 938 EVENTDEV_INIT_FUNC_TRACE(); 939 940 RTE_SET_USED(dev); 941 RTE_SET_USED(cryptodev); 942 943 return 0; 944 } 945 946 static int 947 dpaa2_eventdev_crypto_stop(const struct rte_eventdev *dev, 948 const struct rte_cryptodev *cryptodev) 949 { 950 EVENTDEV_INIT_FUNC_TRACE(); 951 952 RTE_SET_USED(dev); 953 RTE_SET_USED(cryptodev); 954 955 return 0; 956 } 957 958 static int 959 dpaa2_eventdev_tx_adapter_create(uint8_t id, 960 const struct rte_eventdev *dev) 961 { 962 RTE_SET_USED(id); 963 RTE_SET_USED(dev); 964 965 /* Nothing to do. Simply return. */ 966 return 0; 967 } 968 969 static int 970 dpaa2_eventdev_tx_adapter_caps(const struct rte_eventdev *dev, 971 const struct rte_eth_dev *eth_dev, 972 uint32_t *caps) 973 { 974 RTE_SET_USED(dev); 975 RTE_SET_USED(eth_dev); 976 977 *caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT; 978 return 0; 979 } 980 981 static uint16_t 982 dpaa2_eventdev_txa_enqueue_same_dest(void *port, 983 struct rte_event ev[], 984 uint16_t nb_events) 985 { 986 struct rte_mbuf *m[DPAA2_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0; 987 uint8_t qid, i; 988 989 RTE_SET_USED(port); 990 991 m0 = (struct rte_mbuf *)ev[0].mbuf; 992 qid = rte_event_eth_tx_adapter_txq_get(m0); 993 994 for (i = 0; i < nb_events; i++) 995 m[i] = (struct rte_mbuf *)ev[i].mbuf; 996 997 return rte_eth_tx_burst(m0->port, qid, m, nb_events); 998 } 999 1000 static uint16_t 1001 dpaa2_eventdev_txa_enqueue(void *port, 1002 struct rte_event ev[], 1003 uint16_t nb_events) 1004 { 1005 struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf; 1006 uint8_t qid, i; 1007 1008 RTE_SET_USED(port); 1009 1010 for (i = 0; i < nb_events; i++) { 1011 qid = rte_event_eth_tx_adapter_txq_get(m); 1012 rte_eth_tx_burst(m->port, qid, &m, 1); 1013 } 1014 1015 return nb_events; 1016 } 1017 1018 static struct rte_eventdev_ops dpaa2_eventdev_ops = { 1019 .dev_infos_get = dpaa2_eventdev_info_get, 1020 .dev_configure = dpaa2_eventdev_configure, 1021 .dev_start = dpaa2_eventdev_start, 1022 .dev_stop = dpaa2_eventdev_stop, 1023 .dev_close = dpaa2_eventdev_close, 1024 .queue_def_conf = dpaa2_eventdev_queue_def_conf, 1025 .queue_setup = dpaa2_eventdev_queue_setup, 1026 .queue_release = dpaa2_eventdev_queue_release, 1027 .port_def_conf = dpaa2_eventdev_port_def_conf, 1028 .port_setup = dpaa2_eventdev_port_setup, 1029 .port_release = dpaa2_eventdev_port_release, 1030 .port_link = dpaa2_eventdev_port_link, 1031 .port_unlink = dpaa2_eventdev_port_unlink, 1032 .timeout_ticks = dpaa2_eventdev_timeout_ticks, 1033 .dump = dpaa2_eventdev_dump, 1034 .dev_selftest = test_eventdev_dpaa2, 1035 .eth_rx_adapter_caps_get = dpaa2_eventdev_eth_caps_get, 1036 .eth_rx_adapter_queue_add = dpaa2_eventdev_eth_queue_add, 1037 .eth_rx_adapter_queue_del = dpaa2_eventdev_eth_queue_del, 1038 .eth_rx_adapter_start = dpaa2_eventdev_eth_start, 1039 .eth_rx_adapter_stop = dpaa2_eventdev_eth_stop, 1040 .eth_tx_adapter_caps_get = dpaa2_eventdev_tx_adapter_caps, 1041 .eth_tx_adapter_create = dpaa2_eventdev_tx_adapter_create, 1042 .crypto_adapter_caps_get = dpaa2_eventdev_crypto_caps_get, 1043 .crypto_adapter_queue_pair_add = dpaa2_eventdev_crypto_queue_add, 1044 .crypto_adapter_queue_pair_del = dpaa2_eventdev_crypto_queue_del, 1045 .crypto_adapter_start = dpaa2_eventdev_crypto_start, 1046 .crypto_adapter_stop = dpaa2_eventdev_crypto_stop, 1047 }; 1048 1049 static int 1050 dpaa2_eventdev_setup_dpci(struct dpaa2_dpci_dev *dpci_dev, 1051 struct dpaa2_dpcon_dev *dpcon_dev) 1052 { 1053 struct dpci_rx_queue_cfg rx_queue_cfg; 1054 int ret, i; 1055 1056 /*Do settings to get the frame on a DPCON object*/ 1057 rx_queue_cfg.options = DPCI_QUEUE_OPT_DEST | 1058 DPCI_QUEUE_OPT_USER_CTX; 1059 rx_queue_cfg.dest_cfg.dest_type = DPCI_DEST_DPCON; 1060 rx_queue_cfg.dest_cfg.dest_id = dpcon_dev->dpcon_id; 1061 rx_queue_cfg.dest_cfg.priority = DPAA2_EVENT_DEFAULT_DPCI_PRIO; 1062 1063 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_PARALLEL_QUEUE].cb = 1064 dpaa2_eventdev_process_parallel; 1065 dpci_dev->rx_queue[DPAA2_EVENT_DPCI_ATOMIC_QUEUE].cb = 1066 dpaa2_eventdev_process_atomic; 1067 1068 for (i = 0 ; i < DPAA2_EVENT_DPCI_MAX_QUEUES; i++) { 1069 rx_queue_cfg.user_ctx = (size_t)(&dpci_dev->rx_queue[i]); 1070 ret = dpci_set_rx_queue(&dpci_dev->dpci, 1071 CMD_PRI_LOW, 1072 dpci_dev->token, i, 1073 &rx_queue_cfg); 1074 if (ret) { 1075 DPAA2_EVENTDEV_ERR( 1076 "DPCI Rx queue setup failed: err(%d)", 1077 ret); 1078 return ret; 1079 } 1080 } 1081 return 0; 1082 } 1083 1084 static int 1085 dpaa2_eventdev_create(const char *name) 1086 { 1087 struct rte_eventdev *eventdev; 1088 struct dpaa2_eventdev *priv; 1089 struct dpaa2_dpcon_dev *dpcon_dev = NULL; 1090 struct dpaa2_dpci_dev *dpci_dev = NULL; 1091 int ret; 1092 1093 eventdev = rte_event_pmd_vdev_init(name, 1094 sizeof(struct dpaa2_eventdev), 1095 rte_socket_id()); 1096 if (eventdev == NULL) { 1097 DPAA2_EVENTDEV_ERR("Failed to create Event device %s", name); 1098 goto fail; 1099 } 1100 1101 eventdev->dev_ops = &dpaa2_eventdev_ops; 1102 eventdev->enqueue = dpaa2_eventdev_enqueue; 1103 eventdev->enqueue_burst = dpaa2_eventdev_enqueue_burst; 1104 eventdev->enqueue_new_burst = dpaa2_eventdev_enqueue_burst; 1105 eventdev->enqueue_forward_burst = dpaa2_eventdev_enqueue_burst; 1106 eventdev->dequeue = dpaa2_eventdev_dequeue; 1107 eventdev->dequeue_burst = dpaa2_eventdev_dequeue_burst; 1108 eventdev->txa_enqueue = dpaa2_eventdev_txa_enqueue; 1109 eventdev->txa_enqueue_same_dest = dpaa2_eventdev_txa_enqueue_same_dest; 1110 1111 /* For secondary processes, the primary has done all the work */ 1112 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1113 return 0; 1114 1115 priv = eventdev->data->dev_private; 1116 priv->max_event_queues = 0; 1117 1118 do { 1119 dpcon_dev = rte_dpaa2_alloc_dpcon_dev(); 1120 if (!dpcon_dev) 1121 break; 1122 priv->evq_info[priv->max_event_queues].dpcon = dpcon_dev; 1123 1124 dpci_dev = rte_dpaa2_alloc_dpci_dev(); 1125 if (!dpci_dev) { 1126 rte_dpaa2_free_dpcon_dev(dpcon_dev); 1127 break; 1128 } 1129 priv->evq_info[priv->max_event_queues].dpci = dpci_dev; 1130 1131 ret = dpaa2_eventdev_setup_dpci(dpci_dev, dpcon_dev); 1132 if (ret) { 1133 DPAA2_EVENTDEV_ERR( 1134 "DPCI setup failed: err(%d)", ret); 1135 return ret; 1136 } 1137 priv->max_event_queues++; 1138 } while (dpcon_dev && dpci_dev); 1139 1140 RTE_LOG(INFO, PMD, "%s eventdev created\n", name); 1141 1142 return 0; 1143 fail: 1144 return -EFAULT; 1145 } 1146 1147 static int 1148 dpaa2_eventdev_destroy(const char *name) 1149 { 1150 struct rte_eventdev *eventdev; 1151 struct dpaa2_eventdev *priv; 1152 int i; 1153 1154 eventdev = rte_event_pmd_get_named_dev(name); 1155 if (eventdev == NULL) { 1156 RTE_EDEV_LOG_ERR("eventdev with name %s not allocated", name); 1157 return -1; 1158 } 1159 1160 /* For secondary processes, the primary has done all the work */ 1161 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 1162 return 0; 1163 1164 priv = eventdev->data->dev_private; 1165 for (i = 0; i < priv->max_event_queues; i++) { 1166 if (priv->evq_info[i].dpcon) 1167 rte_dpaa2_free_dpcon_dev(priv->evq_info[i].dpcon); 1168 1169 if (priv->evq_info[i].dpci) 1170 rte_dpaa2_free_dpci_dev(priv->evq_info[i].dpci); 1171 1172 } 1173 priv->max_event_queues = 0; 1174 1175 RTE_LOG(INFO, PMD, "%s eventdev cleaned\n", name); 1176 return 0; 1177 } 1178 1179 1180 static int 1181 dpaa2_eventdev_probe(struct rte_vdev_device *vdev) 1182 { 1183 const char *name; 1184 1185 name = rte_vdev_device_name(vdev); 1186 DPAA2_EVENTDEV_INFO("Initializing %s", name); 1187 return dpaa2_eventdev_create(name); 1188 } 1189 1190 static int 1191 dpaa2_eventdev_remove(struct rte_vdev_device *vdev) 1192 { 1193 const char *name; 1194 1195 name = rte_vdev_device_name(vdev); 1196 DPAA2_EVENTDEV_INFO("Closing %s", name); 1197 1198 dpaa2_eventdev_destroy(name); 1199 1200 return rte_event_pmd_vdev_uninit(name); 1201 } 1202 1203 static struct rte_vdev_driver vdev_eventdev_dpaa2_pmd = { 1204 .probe = dpaa2_eventdev_probe, 1205 .remove = dpaa2_eventdev_remove 1206 }; 1207 1208 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA2_PMD, vdev_eventdev_dpaa2_pmd); 1209 RTE_LOG_REGISTER(dpaa2_logtype_event, pmd.event.dpaa2, NOTICE); 1210