1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <inttypes.h> 6 #include <string.h> 7 8 #include <rte_bus_vdev.h> 9 #include <rte_lcore.h> 10 #include <rte_memzone.h> 11 #include <rte_kvargs.h> 12 #include <rte_errno.h> 13 #include <rte_cycles.h> 14 15 #include "opdl_evdev.h" 16 #include "opdl_ring.h" 17 #include "opdl_log.h" 18 19 #define EVENTDEV_NAME_OPDL_PMD event_opdl 20 #define NUMA_NODE_ARG "numa_node" 21 #define DO_VALIDATION_ARG "do_validation" 22 #define DO_TEST_ARG "self_test" 23 24 25 static void 26 opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info); 27 28 uint16_t 29 opdl_event_enqueue_burst(void *port, 30 const struct rte_event ev[], 31 uint16_t num) 32 { 33 struct opdl_port *p = port; 34 35 if (unlikely(!p->opdl->data->dev_started)) 36 return 0; 37 38 39 /* either rx_enqueue or disclaim*/ 40 return p->enq(p, ev, num); 41 } 42 43 uint16_t 44 opdl_event_enqueue(void *port, const struct rte_event *ev) 45 { 46 struct opdl_port *p = port; 47 48 if (unlikely(!p->opdl->data->dev_started)) 49 return 0; 50 51 52 return p->enq(p, ev, 1); 53 } 54 55 uint16_t 56 opdl_event_dequeue_burst(void *port, 57 struct rte_event *ev, 58 uint16_t num, 59 uint64_t wait) 60 { 61 struct opdl_port *p = (void *)port; 62 63 RTE_SET_USED(wait); 64 65 if (unlikely(!p->opdl->data->dev_started)) 66 return 0; 67 68 /* This function pointer can point to tx_dequeue or claim*/ 69 return p->deq(p, ev, num); 70 } 71 72 uint16_t 73 opdl_event_dequeue(void *port, 74 struct rte_event *ev, 75 uint64_t wait) 76 { 77 struct opdl_port *p = (void *)port; 78 79 if (unlikely(!p->opdl->data->dev_started)) 80 return 0; 81 82 RTE_SET_USED(wait); 83 84 return p->deq(p, ev, 1); 85 } 86 87 static int 88 opdl_port_link(struct rte_eventdev *dev, 89 void *port, 90 const uint8_t queues[], 91 const uint8_t priorities[], 92 uint16_t num) 93 { 94 struct opdl_port *p = port; 95 96 RTE_SET_USED(priorities); 97 RTE_SET_USED(dev); 98 99 if (unlikely(dev->data->dev_started)) { 100 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 101 "Attempt to link queue (%u) to port %d while device started\n", 102 dev->data->dev_id, 103 queues[0], 104 p->id); 105 rte_errno = EINVAL; 106 return 0; 107 } 108 109 /* Max of 1 queue per port */ 110 if (num > 1) { 111 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 112 "Attempt to link more than one queue (%u) to port %d requested\n", 113 dev->data->dev_id, 114 num, 115 p->id); 116 rte_errno = EDQUOT; 117 return 0; 118 } 119 120 if (!p->configured) { 121 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 122 "port %d not configured, cannot link to %u\n", 123 dev->data->dev_id, 124 p->id, 125 queues[0]); 126 rte_errno = EINVAL; 127 return 0; 128 } 129 130 if (p->external_qid != OPDL_INVALID_QID) { 131 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 132 "port %d already linked to queue %u, cannot link to %u\n", 133 dev->data->dev_id, 134 p->id, 135 p->external_qid, 136 queues[0]); 137 rte_errno = EINVAL; 138 return 0; 139 } 140 141 p->external_qid = queues[0]; 142 143 return 1; 144 } 145 146 static int 147 opdl_port_unlink(struct rte_eventdev *dev, 148 void *port, 149 uint8_t queues[], 150 uint16_t nb_unlinks) 151 { 152 struct opdl_port *p = port; 153 154 RTE_SET_USED(queues); 155 RTE_SET_USED(nb_unlinks); 156 157 if (unlikely(dev->data->dev_started)) { 158 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 159 "Attempt to unlink queue (%u) to port %d while device started\n", 160 dev->data->dev_id, 161 queues[0], 162 p->id); 163 rte_errno = EINVAL; 164 return 0; 165 } 166 RTE_SET_USED(nb_unlinks); 167 168 /* Port Stuff */ 169 p->queue_id = OPDL_INVALID_QID; 170 p->p_type = OPDL_INVALID_PORT; 171 p->external_qid = OPDL_INVALID_QID; 172 173 /* always unlink 0 queue due to statice pipeline */ 174 return 0; 175 } 176 177 static int 178 opdl_port_setup(struct rte_eventdev *dev, 179 uint8_t port_id, 180 const struct rte_event_port_conf *conf) 181 { 182 struct opdl_evdev *device = opdl_pmd_priv(dev); 183 struct opdl_port *p = &device->ports[port_id]; 184 185 RTE_SET_USED(conf); 186 187 /* Check if port already configured */ 188 if (p->configured) { 189 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 190 "Attempt to setup port %d which is already setup\n", 191 dev->data->dev_id, 192 p->id); 193 return -EDQUOT; 194 } 195 196 *p = (struct opdl_port){0}; /* zero entire structure */ 197 p->id = port_id; 198 p->opdl = device; 199 p->queue_id = OPDL_INVALID_QID; 200 p->external_qid = OPDL_INVALID_QID; 201 dev->data->ports[port_id] = p; 202 rte_smp_wmb(); 203 p->configured = 1; 204 device->nb_ports++; 205 return 0; 206 } 207 208 static void 209 opdl_port_release(void *port) 210 { 211 struct opdl_port *p = (void *)port; 212 213 if (p == NULL || 214 p->opdl->data->dev_started) { 215 return; 216 } 217 218 p->configured = 0; 219 p->initialized = 0; 220 } 221 222 static void 223 opdl_port_def_conf(struct rte_eventdev *dev, uint8_t port_id, 224 struct rte_event_port_conf *port_conf) 225 { 226 RTE_SET_USED(dev); 227 RTE_SET_USED(port_id); 228 229 port_conf->new_event_threshold = MAX_OPDL_CONS_Q_DEPTH; 230 port_conf->dequeue_depth = MAX_OPDL_CONS_Q_DEPTH; 231 port_conf->enqueue_depth = MAX_OPDL_CONS_Q_DEPTH; 232 } 233 234 static int 235 opdl_queue_setup(struct rte_eventdev *dev, 236 uint8_t queue_id, 237 const struct rte_event_queue_conf *conf) 238 { 239 enum queue_type type; 240 241 struct opdl_evdev *device = opdl_pmd_priv(dev); 242 243 /* Extra sanity check, probably not needed */ 244 if (queue_id == OPDL_INVALID_QID) { 245 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 246 "Invalid queue id %u requested\n", 247 dev->data->dev_id, 248 queue_id); 249 return -EINVAL; 250 } 251 252 if (device->nb_q_md > device->max_queue_nb) { 253 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 254 "Max number of queues %u exceeded by request %u\n", 255 dev->data->dev_id, 256 device->max_queue_nb, 257 device->nb_q_md); 258 return -EINVAL; 259 } 260 261 if (RTE_EVENT_QUEUE_CFG_ALL_TYPES 262 & conf->event_queue_cfg) { 263 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 264 "QUEUE_CFG_ALL_TYPES not supported\n", 265 dev->data->dev_id); 266 return -ENOTSUP; 267 } else if (RTE_EVENT_QUEUE_CFG_SINGLE_LINK 268 & conf->event_queue_cfg) { 269 type = OPDL_Q_TYPE_SINGLE_LINK; 270 } else { 271 switch (conf->schedule_type) { 272 case RTE_SCHED_TYPE_ORDERED: 273 type = OPDL_Q_TYPE_ORDERED; 274 break; 275 case RTE_SCHED_TYPE_ATOMIC: 276 type = OPDL_Q_TYPE_ATOMIC; 277 break; 278 case RTE_SCHED_TYPE_PARALLEL: 279 type = OPDL_Q_TYPE_ORDERED; 280 break; 281 default: 282 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 283 "Unknown queue type %d requested\n", 284 dev->data->dev_id, 285 conf->event_queue_cfg); 286 return -EINVAL; 287 } 288 } 289 /* Check if queue id has been setup already */ 290 uint32_t i; 291 for (i = 0; i < device->nb_q_md; i++) { 292 if (device->q_md[i].ext_id == queue_id) { 293 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 294 "queue id %u already setup\n", 295 dev->data->dev_id, 296 queue_id); 297 return -EINVAL; 298 } 299 } 300 301 device->q_md[device->nb_q_md].ext_id = queue_id; 302 device->q_md[device->nb_q_md].type = type; 303 device->q_md[device->nb_q_md].setup = 1; 304 device->nb_q_md++; 305 306 return 1; 307 } 308 309 static void 310 opdl_queue_release(struct rte_eventdev *dev, uint8_t queue_id) 311 { 312 struct opdl_evdev *device = opdl_pmd_priv(dev); 313 314 RTE_SET_USED(queue_id); 315 316 if (device->data->dev_started) 317 return; 318 319 } 320 321 static void 322 opdl_queue_def_conf(struct rte_eventdev *dev, 323 uint8_t queue_id, 324 struct rte_event_queue_conf *conf) 325 { 326 RTE_SET_USED(dev); 327 RTE_SET_USED(queue_id); 328 329 static const struct rte_event_queue_conf default_conf = { 330 .nb_atomic_flows = 1024, 331 .nb_atomic_order_sequences = 1, 332 .event_queue_cfg = 0, 333 .schedule_type = RTE_SCHED_TYPE_ORDERED, 334 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 335 }; 336 337 *conf = default_conf; 338 } 339 340 341 static int 342 opdl_dev_configure(const struct rte_eventdev *dev) 343 { 344 struct opdl_evdev *opdl = opdl_pmd_priv(dev); 345 const struct rte_eventdev_data *data = dev->data; 346 const struct rte_event_dev_config *conf = &data->dev_conf; 347 348 opdl->max_queue_nb = conf->nb_event_queues; 349 opdl->max_port_nb = conf->nb_event_ports; 350 opdl->nb_events_limit = conf->nb_events_limit; 351 352 if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) { 353 PMD_DRV_LOG(ERR, "DEV_ID:[%02d] : " 354 "DEQUEUE_TIMEOUT not supported\n", 355 dev->data->dev_id); 356 return -ENOTSUP; 357 } 358 359 return 0; 360 } 361 362 static void 363 opdl_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *info) 364 { 365 RTE_SET_USED(dev); 366 367 static const struct rte_event_dev_info evdev_opdl_info = { 368 .driver_name = OPDL_PMD_NAME, 369 .max_event_queues = RTE_EVENT_MAX_QUEUES_PER_DEV, 370 .max_event_queue_flows = OPDL_QID_NUM_FIDS, 371 .max_event_queue_priority_levels = OPDL_Q_PRIORITY_MAX, 372 .max_event_priority_levels = OPDL_IQS_MAX, 373 .max_event_ports = OPDL_PORTS_MAX, 374 .max_event_port_dequeue_depth = MAX_OPDL_CONS_Q_DEPTH, 375 .max_event_port_enqueue_depth = MAX_OPDL_CONS_Q_DEPTH, 376 .max_num_events = OPDL_INFLIGHT_EVENTS_TOTAL, 377 .event_dev_cap = RTE_EVENT_DEV_CAP_BURST_MODE | 378 RTE_EVENT_DEV_CAP_CARRY_FLOW_ID, 379 }; 380 381 *info = evdev_opdl_info; 382 } 383 384 static void 385 opdl_dump(struct rte_eventdev *dev, FILE *f) 386 { 387 struct opdl_evdev *device = opdl_pmd_priv(dev); 388 389 if (!device->do_validation) 390 return; 391 392 fprintf(f, 393 "\n\n -- RING STATISTICS --\n"); 394 uint32_t i; 395 for (i = 0; i < device->nb_opdls; i++) 396 opdl_ring_dump(device->opdl[i], f); 397 398 fprintf(f, 399 "\n\n -- PORT STATISTICS --\n" 400 "Type Port Index Port Id Queue Id Av. Req Size " 401 "Av. Grant Size Av. Cycles PP" 402 " Empty DEQs Non Empty DEQs Pkts Processed\n"); 403 404 for (i = 0; i < device->max_port_nb; i++) { 405 char queue_id[64]; 406 char total_cyc[64]; 407 const char *p_type; 408 409 uint64_t cne, cpg; 410 struct opdl_port *port = &device->ports[i]; 411 412 if (port->initialized) { 413 cne = port->port_stat[claim_non_empty]; 414 cpg = port->port_stat[claim_pkts_granted]; 415 if (port->p_type == OPDL_REGULAR_PORT) 416 p_type = "REG"; 417 else if (port->p_type == OPDL_PURE_RX_PORT) 418 p_type = " RX"; 419 else if (port->p_type == OPDL_PURE_TX_PORT) 420 p_type = " TX"; 421 else if (port->p_type == OPDL_ASYNC_PORT) 422 p_type = "SYNC"; 423 else 424 p_type = "????"; 425 426 snprintf(queue_id, sizeof(queue_id), "%02u", 427 port->external_qid); 428 if (port->p_type == OPDL_REGULAR_PORT || 429 port->p_type == OPDL_ASYNC_PORT) 430 snprintf(total_cyc, sizeof(total_cyc), 431 " %'16"PRIu64"", 432 (cpg != 0 ? 433 port->port_stat[total_cycles] / cpg 434 : 0)); 435 else 436 snprintf(total_cyc, sizeof(total_cyc), 437 " ----"); 438 fprintf(f, 439 "%4s %10u %8u %9s %'16"PRIu64" %'16"PRIu64" %s " 440 "%'16"PRIu64" %'16"PRIu64" %'16"PRIu64"\n", 441 p_type, 442 i, 443 port->id, 444 (port->external_qid == OPDL_INVALID_QID ? "---" 445 : queue_id), 446 (cne != 0 ? 447 port->port_stat[claim_pkts_requested] / cne 448 : 0), 449 (cne != 0 ? 450 port->port_stat[claim_pkts_granted] / cne 451 : 0), 452 total_cyc, 453 port->port_stat[claim_empty], 454 port->port_stat[claim_non_empty], 455 port->port_stat[claim_pkts_granted]); 456 } 457 } 458 fprintf(f, "\n"); 459 } 460 461 462 static void 463 opdl_stop(struct rte_eventdev *dev) 464 { 465 struct opdl_evdev *device = opdl_pmd_priv(dev); 466 467 opdl_xstats_uninit(dev); 468 469 destroy_queues_and_rings(dev); 470 471 472 device->started = 0; 473 474 rte_smp_wmb(); 475 } 476 477 static int 478 opdl_start(struct rte_eventdev *dev) 479 { 480 int err = 0; 481 482 if (!err) 483 err = create_queues_and_rings(dev); 484 485 486 if (!err) 487 err = assign_internal_queue_ids(dev); 488 489 490 if (!err) 491 err = initialise_queue_zero_ports(dev); 492 493 494 if (!err) 495 err = initialise_all_other_ports(dev); 496 497 498 if (!err) 499 err = check_queues_linked(dev); 500 501 502 if (!err) 503 err = opdl_add_event_handlers(dev); 504 505 506 if (!err) 507 err = build_all_dependencies(dev); 508 509 if (!err) { 510 opdl_xstats_init(dev); 511 512 struct opdl_evdev *device = opdl_pmd_priv(dev); 513 514 PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : " 515 "SUCCESS : Created %u total queues (%u ex, %u in)," 516 " %u opdls, %u event_dev ports, %u input ports", 517 opdl_pmd_dev_id(device), 518 device->nb_queues, 519 (device->nb_queues - device->nb_opdls), 520 device->nb_opdls, 521 device->nb_opdls, 522 device->nb_ports, 523 device->queue[0].nb_ports); 524 } else 525 opdl_stop(dev); 526 527 return err; 528 } 529 530 static int 531 opdl_close(struct rte_eventdev *dev) 532 { 533 struct opdl_evdev *device = opdl_pmd_priv(dev); 534 uint32_t i; 535 536 for (i = 0; i < device->max_port_nb; i++) { 537 memset(&device->ports[i], 538 0, 539 sizeof(struct opdl_port)); 540 } 541 542 memset(&device->s_md, 543 0x0, 544 sizeof(struct opdl_stage_meta_data)*OPDL_PORTS_MAX); 545 546 memset(&device->q_md, 547 0xFF, 548 sizeof(struct opdl_queue_meta_data)*OPDL_MAX_QUEUES); 549 550 551 memset(device->q_map_ex_to_in, 552 0, 553 sizeof(uint8_t)*OPDL_INVALID_QID); 554 555 opdl_xstats_uninit(dev); 556 557 device->max_port_nb = 0; 558 559 device->max_queue_nb = 0; 560 561 device->nb_opdls = 0; 562 563 device->nb_queues = 0; 564 565 device->nb_ports = 0; 566 567 device->nb_q_md = 0; 568 569 dev->data->nb_queues = 0; 570 571 dev->data->nb_ports = 0; 572 573 574 return 0; 575 } 576 577 static int 578 assign_numa_node(const char *key __rte_unused, const char *value, void *opaque) 579 { 580 int *socket_id = opaque; 581 *socket_id = atoi(value); 582 if (*socket_id >= RTE_MAX_NUMA_NODES) 583 return -1; 584 return 0; 585 } 586 587 static int 588 set_do_validation(const char *key __rte_unused, const char *value, void *opaque) 589 { 590 int *do_val = opaque; 591 *do_val = atoi(value); 592 if (*do_val != 0) 593 *do_val = 1; 594 595 return 0; 596 } 597 static int 598 set_do_test(const char *key __rte_unused, const char *value, void *opaque) 599 { 600 int *do_test = opaque; 601 602 *do_test = atoi(value); 603 604 if (*do_test != 0) 605 *do_test = 1; 606 return 0; 607 } 608 609 static int 610 opdl_probe(struct rte_vdev_device *vdev) 611 { 612 static struct rte_eventdev_ops evdev_opdl_ops = { 613 .dev_configure = opdl_dev_configure, 614 .dev_infos_get = opdl_info_get, 615 .dev_close = opdl_close, 616 .dev_start = opdl_start, 617 .dev_stop = opdl_stop, 618 .dump = opdl_dump, 619 620 .queue_def_conf = opdl_queue_def_conf, 621 .queue_setup = opdl_queue_setup, 622 .queue_release = opdl_queue_release, 623 .port_def_conf = opdl_port_def_conf, 624 .port_setup = opdl_port_setup, 625 .port_release = opdl_port_release, 626 .port_link = opdl_port_link, 627 .port_unlink = opdl_port_unlink, 628 629 630 .xstats_get = opdl_xstats_get, 631 .xstats_get_names = opdl_xstats_get_names, 632 .xstats_get_by_name = opdl_xstats_get_by_name, 633 .xstats_reset = opdl_xstats_reset, 634 }; 635 636 static const char *const args[] = { 637 NUMA_NODE_ARG, 638 DO_VALIDATION_ARG, 639 DO_TEST_ARG, 640 NULL 641 }; 642 const char *name; 643 const char *params; 644 struct rte_eventdev *dev; 645 struct opdl_evdev *opdl; 646 int socket_id = rte_socket_id(); 647 int do_validation = 0; 648 int do_test = 0; 649 int str_len; 650 int test_result = 0; 651 652 name = rte_vdev_device_name(vdev); 653 params = rte_vdev_device_args(vdev); 654 if (params != NULL && params[0] != '\0') { 655 struct rte_kvargs *kvlist = rte_kvargs_parse(params, args); 656 657 if (!kvlist) { 658 PMD_DRV_LOG(INFO, 659 "Ignoring unsupported parameters when creating device '%s'\n", 660 name); 661 } else { 662 int ret = rte_kvargs_process(kvlist, NUMA_NODE_ARG, 663 assign_numa_node, &socket_id); 664 if (ret != 0) { 665 PMD_DRV_LOG(ERR, 666 "%s: Error parsing numa node parameter", 667 name); 668 669 rte_kvargs_free(kvlist); 670 return ret; 671 } 672 673 ret = rte_kvargs_process(kvlist, DO_VALIDATION_ARG, 674 set_do_validation, &do_validation); 675 if (ret != 0) { 676 PMD_DRV_LOG(ERR, 677 "%s: Error parsing do validation parameter", 678 name); 679 rte_kvargs_free(kvlist); 680 return ret; 681 } 682 683 ret = rte_kvargs_process(kvlist, DO_TEST_ARG, 684 set_do_test, &do_test); 685 if (ret != 0) { 686 PMD_DRV_LOG(ERR, 687 "%s: Error parsing do test parameter", 688 name); 689 rte_kvargs_free(kvlist); 690 return ret; 691 } 692 693 rte_kvargs_free(kvlist); 694 } 695 } 696 dev = rte_event_pmd_vdev_init(name, 697 sizeof(struct opdl_evdev), socket_id); 698 699 if (dev == NULL) { 700 PMD_DRV_LOG(ERR, "eventdev vdev init() failed"); 701 return -EFAULT; 702 } 703 704 PMD_DRV_LOG(INFO, "DEV_ID:[%02d] : " 705 "Success - creating eventdev device %s, numa_node:[%d], do_valdation:[%s]" 706 " , self_test:[%s]\n", 707 dev->data->dev_id, 708 name, 709 socket_id, 710 (do_validation ? "true" : "false"), 711 (do_test ? "true" : "false")); 712 713 dev->dev_ops = &evdev_opdl_ops; 714 715 dev->enqueue = opdl_event_enqueue; 716 dev->enqueue_burst = opdl_event_enqueue_burst; 717 dev->enqueue_new_burst = opdl_event_enqueue_burst; 718 dev->enqueue_forward_burst = opdl_event_enqueue_burst; 719 dev->dequeue = opdl_event_dequeue; 720 dev->dequeue_burst = opdl_event_dequeue_burst; 721 722 if (rte_eal_process_type() != RTE_PROC_PRIMARY) 723 return 0; 724 725 opdl = dev->data->dev_private; 726 opdl->data = dev->data; 727 opdl->socket = socket_id; 728 opdl->do_validation = do_validation; 729 opdl->do_test = do_test; 730 str_len = strlen(name); 731 memcpy(opdl->service_name, name, str_len); 732 733 if (do_test == 1) 734 test_result = opdl_selftest(); 735 736 return test_result; 737 } 738 739 static int 740 opdl_remove(struct rte_vdev_device *vdev) 741 { 742 const char *name; 743 744 name = rte_vdev_device_name(vdev); 745 if (name == NULL) 746 return -EINVAL; 747 748 PMD_DRV_LOG(INFO, "Closing eventdev opdl device %s\n", name); 749 750 return rte_event_pmd_vdev_uninit(name); 751 } 752 753 static struct rte_vdev_driver evdev_opdl_pmd_drv = { 754 .probe = opdl_probe, 755 .remove = opdl_remove 756 }; 757 758 RTE_LOG_REGISTER(opdl_logtype_driver, pmd.event.opdl.driver, INFO); 759 760 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OPDL_PMD, evdev_opdl_pmd_drv); 761 RTE_PMD_REGISTER_PARAM_STRING(event_opdl, NUMA_NODE_ARG "=<int>" 762 DO_VALIDATION_ARG "=<int>" DO_TEST_ARG "=<int>"); 763