1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <string.h> 7 #include <stdint.h> 8 #include <stdlib.h> 9 #include <errno.h> 10 #include <unistd.h> 11 #include <sys/queue.h> 12 13 #include <rte_memory.h> 14 #include <rte_memzone.h> 15 #include <rte_launch.h> 16 #include <rte_eal.h> 17 #include <rte_per_lcore.h> 18 #include <rte_lcore.h> 19 #include <rte_debug.h> 20 #include <rte_ethdev.h> 21 #include <rte_cycles.h> 22 #include <rte_eventdev.h> 23 #include <bus_vdev_driver.h> 24 #include <rte_pause.h> 25 26 #include "opdl_evdev.h" 27 #include "opdl_log.h" 28 29 30 #define MAX_PORTS 16 31 #define MAX_QIDS 16 32 #define NUM_PACKETS (1<<18) 33 #define NUM_EVENTS 256 34 #define BURST_SIZE 32 35 36 37 38 static int evdev; 39 40 struct test { 41 struct rte_mempool *mbuf_pool; 42 uint8_t port[MAX_PORTS]; 43 uint8_t qid[MAX_QIDS]; 44 int nb_qids; 45 }; 46 47 static struct rte_mempool *eventdev_func_mempool; 48 49 static __rte_always_inline struct rte_mbuf * 50 rte_gen_arp(int portid, struct rte_mempool *mp) 51 { 52 /* 53 * len = 14 + 46 54 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46 55 */ 56 static const uint8_t arp_request[] = { 57 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8, 58 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01, 59 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8, 60 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01, 61 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 62 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 63 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 64 0x00, 0x00, 0x00, 0x00 65 }; 66 struct rte_mbuf *m; 67 int pkt_len = sizeof(arp_request) - 1; 68 69 m = rte_pktmbuf_alloc(mp); 70 if (!m) 71 return 0; 72 73 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off), 74 arp_request, pkt_len); 75 rte_pktmbuf_pkt_len(m) = pkt_len; 76 rte_pktmbuf_data_len(m) = pkt_len; 77 78 RTE_SET_USED(portid); 79 80 return m; 81 } 82 83 /* initialization and config */ 84 static __rte_always_inline int 85 init(struct test *t, int nb_queues, int nb_ports) 86 { 87 struct rte_event_dev_config config = { 88 .nb_event_queues = nb_queues, 89 .nb_event_ports = nb_ports, 90 .nb_event_queue_flows = 1024, 91 .nb_events_limit = 4096, 92 .nb_event_port_dequeue_depth = 128, 93 .nb_event_port_enqueue_depth = 128, 94 }; 95 int ret; 96 97 void *temp = t->mbuf_pool; /* save and restore mbuf pool */ 98 99 memset(t, 0, sizeof(*t)); 100 t->mbuf_pool = temp; 101 102 ret = rte_event_dev_configure(evdev, &config); 103 if (ret < 0) 104 PMD_DRV_LOG(ERR, "%d: Error configuring device", __LINE__); 105 return ret; 106 }; 107 108 static __rte_always_inline int 109 create_ports(struct test *t, int num_ports) 110 { 111 int i; 112 static const struct rte_event_port_conf conf = { 113 .new_event_threshold = 1024, 114 .dequeue_depth = 32, 115 .enqueue_depth = 32, 116 }; 117 if (num_ports > MAX_PORTS) 118 return -1; 119 120 for (i = 0; i < num_ports; i++) { 121 if (rte_event_port_setup(evdev, i, &conf) < 0) { 122 PMD_DRV_LOG(ERR, "Error setting up port %d", i); 123 return -1; 124 } 125 t->port[i] = i; 126 } 127 128 return 0; 129 }; 130 131 static __rte_always_inline int 132 create_queues_type(struct test *t, int num_qids, enum queue_type flags) 133 { 134 int i; 135 uint8_t type; 136 137 switch (flags) { 138 case OPDL_Q_TYPE_ORDERED: 139 type = RTE_SCHED_TYPE_ORDERED; 140 break; 141 case OPDL_Q_TYPE_ATOMIC: 142 type = RTE_SCHED_TYPE_ATOMIC; 143 break; 144 default: 145 type = 0; 146 } 147 148 /* Q creation */ 149 const struct rte_event_queue_conf conf = { 150 .event_queue_cfg = 151 (flags == OPDL_Q_TYPE_SINGLE_LINK ? 152 RTE_EVENT_QUEUE_CFG_SINGLE_LINK : 0), 153 .schedule_type = type, 154 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 155 .nb_atomic_flows = 1024, 156 .nb_atomic_order_sequences = 1024, 157 }; 158 159 for (i = t->nb_qids ; i < t->nb_qids + num_qids; i++) { 160 if (rte_event_queue_setup(evdev, i, &conf) < 0) { 161 PMD_DRV_LOG(ERR, "%d: error creating qid %d ", 162 __LINE__, i); 163 return -1; 164 } 165 t->qid[i] = i; 166 } 167 168 t->nb_qids += num_qids; 169 170 if (t->nb_qids > MAX_QIDS) 171 return -1; 172 173 return 0; 174 } 175 176 177 /* destruction */ 178 static __rte_always_inline int 179 cleanup(struct test *t __rte_unused) 180 { 181 rte_event_dev_stop(evdev); 182 rte_event_dev_close(evdev); 183 PMD_DRV_LOG(ERR, "clean up for test done"); 184 return 0; 185 }; 186 187 static int 188 ordered_basic(struct test *t) 189 { 190 const uint8_t rx_port = 0; 191 const uint8_t w1_port = 1; 192 const uint8_t w3_port = 3; 193 const uint8_t tx_port = 4; 194 int err; 195 uint32_t i; 196 uint32_t deq_pkts; 197 struct rte_mbuf *mbufs[3]; 198 199 const uint32_t MAGIC_SEQN = 1234; 200 201 /* Create instance with 5 ports */ 202 if (init(t, 2, tx_port+1) < 0 || 203 create_ports(t, tx_port+1) < 0 || 204 create_queues_type(t, 2, OPDL_Q_TYPE_ORDERED)) { 205 PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); 206 return -1; 207 } 208 209 /* 210 * CQ mapping to QID 211 * We need three ports, all mapped to the same ordered qid0. Then we'll 212 * take a packet out to each port, re-enqueue in reverse order, 213 * then make sure the reordering has taken place properly when we 214 * dequeue from the tx_port. 215 * 216 * Simplified test setup diagram: 217 * 218 * rx_port w1_port 219 * \ / \ 220 * qid0 - w2_port - qid1 221 * \ / \ 222 * w3_port tx_port 223 */ 224 /* CQ mapping to QID for LB ports (directed mapped on create) */ 225 for (i = w1_port; i <= w3_port; i++) { 226 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL, 227 1); 228 if (err != 1) { 229 PMD_DRV_LOG(ERR, "%d: error mapping lb qid", 230 __LINE__); 231 cleanup(t); 232 return -1; 233 } 234 } 235 236 err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL, 237 1); 238 if (err != 1) { 239 PMD_DRV_LOG(ERR, "%d: error mapping TX qid", __LINE__); 240 cleanup(t); 241 return -1; 242 } 243 244 if (rte_event_dev_start(evdev) < 0) { 245 PMD_DRV_LOG(ERR, "%d: Error with start call", __LINE__); 246 return -1; 247 } 248 /* Enqueue 3 packets to the rx port */ 249 for (i = 0; i < 3; i++) { 250 struct rte_event ev; 251 mbufs[i] = rte_gen_arp(0, t->mbuf_pool); 252 if (!mbufs[i]) { 253 PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__); 254 return -1; 255 } 256 257 ev.queue_id = t->qid[0]; 258 ev.op = RTE_EVENT_OP_NEW; 259 ev.mbuf = mbufs[i]; 260 *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i; 261 262 /* generate pkt and enqueue */ 263 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); 264 if (err != 1) { 265 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u", 266 __LINE__, i, err); 267 return -1; 268 } 269 } 270 271 /* use extra slot to make logic in loops easier */ 272 struct rte_event deq_ev[w3_port + 1]; 273 274 uint32_t seq = 0; 275 276 /* Dequeue the 3 packets, one from each worker port */ 277 for (i = w1_port; i <= w3_port; i++) { 278 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i], 279 &deq_ev[i], 1, 0); 280 if (deq_pkts != 1) { 281 PMD_DRV_LOG(ERR, "%d: Failed to deq", __LINE__); 282 rte_event_dev_dump(evdev, stdout); 283 return -1; 284 } 285 seq = *rte_event_pmd_selftest_seqn(deq_ev[i].mbuf) - MAGIC_SEQN; 286 287 if (seq != (i-1)) { 288 PMD_DRV_LOG(ERR, " seq test failed ! eq is %d , " 289 "port number is %u", seq, i); 290 return -1; 291 } 292 } 293 294 /* Enqueue each packet in reverse order, flushing after each one */ 295 for (i = w3_port; i >= w1_port; i--) { 296 297 deq_ev[i].op = RTE_EVENT_OP_FORWARD; 298 deq_ev[i].queue_id = t->qid[1]; 299 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1); 300 if (err != 1) { 301 PMD_DRV_LOG(ERR, "%d: Failed to enqueue", __LINE__); 302 return -1; 303 } 304 } 305 306 /* dequeue from the tx ports, we should get 3 packets */ 307 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev, 308 3, 0); 309 310 /* Check to see if we've got all 3 packets */ 311 if (deq_pkts != 3) { 312 PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d", 313 __LINE__, deq_pkts, tx_port); 314 rte_event_dev_dump(evdev, stdout); 315 return 1; 316 } 317 318 /* Destroy the instance */ 319 cleanup(t); 320 321 return 0; 322 } 323 324 325 static int 326 atomic_basic(struct test *t) 327 { 328 const uint8_t rx_port = 0; 329 const uint8_t w1_port = 1; 330 const uint8_t w3_port = 3; 331 const uint8_t tx_port = 4; 332 int err; 333 int i; 334 uint32_t deq_pkts; 335 struct rte_mbuf *mbufs[3]; 336 const uint32_t MAGIC_SEQN = 1234; 337 338 /* Create instance with 5 ports */ 339 if (init(t, 2, tx_port+1) < 0 || 340 create_ports(t, tx_port+1) < 0 || 341 create_queues_type(t, 2, OPDL_Q_TYPE_ATOMIC)) { 342 PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); 343 return -1; 344 } 345 346 347 /* 348 * CQ mapping to QID 349 * We need three ports, all mapped to the same ordered qid0. Then we'll 350 * take a packet out to each port, re-enqueue in reverse order, 351 * then make sure the reordering has taken place properly when we 352 * dequeue from the tx_port. 353 * 354 * Simplified test setup diagram: 355 * 356 * rx_port w1_port 357 * \ / \ 358 * qid0 - w2_port - qid1 359 * \ / \ 360 * w3_port tx_port 361 */ 362 /* CQ mapping to QID for Atomic ports (directed mapped on create) */ 363 for (i = w1_port; i <= w3_port; i++) { 364 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL, 365 1); 366 if (err != 1) { 367 PMD_DRV_LOG(ERR, "%d: error mapping lb qid", 368 __LINE__); 369 cleanup(t); 370 return -1; 371 } 372 } 373 374 err = rte_event_port_link(evdev, t->port[tx_port], &t->qid[1], NULL, 375 1); 376 if (err != 1) { 377 PMD_DRV_LOG(ERR, "%d: error mapping TX qid", __LINE__); 378 cleanup(t); 379 return -1; 380 } 381 382 if (rte_event_dev_start(evdev) < 0) { 383 PMD_DRV_LOG(ERR, "%d: Error with start call", __LINE__); 384 return -1; 385 } 386 387 /* Enqueue 3 packets to the rx port */ 388 for (i = 0; i < 3; i++) { 389 struct rte_event ev; 390 mbufs[i] = rte_gen_arp(0, t->mbuf_pool); 391 if (!mbufs[i]) { 392 PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__); 393 return -1; 394 } 395 396 ev.queue_id = t->qid[0]; 397 ev.op = RTE_EVENT_OP_NEW; 398 ev.flow_id = 1; 399 ev.mbuf = mbufs[i]; 400 *rte_event_pmd_selftest_seqn(mbufs[i]) = MAGIC_SEQN + i; 401 402 /* generate pkt and enqueue */ 403 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); 404 if (err != 1) { 405 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u", 406 __LINE__, i, err); 407 return -1; 408 } 409 } 410 411 /* use extra slot to make logic in loops easier */ 412 struct rte_event deq_ev[w3_port + 1]; 413 414 /* Dequeue the 3 packets, one from each worker port */ 415 for (i = w1_port; i <= w3_port; i++) { 416 417 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i], 418 deq_ev, 3, 0); 419 420 if (t->port[i] != 2) { 421 if (deq_pkts != 0) { 422 PMD_DRV_LOG(ERR, "%d: deq none zero !", 423 __LINE__); 424 rte_event_dev_dump(evdev, stdout); 425 return -1; 426 } 427 } else { 428 429 if (deq_pkts != 3) { 430 PMD_DRV_LOG(ERR, "%d: deq not eqal to 3 %u !", 431 __LINE__, deq_pkts); 432 rte_event_dev_dump(evdev, stdout); 433 return -1; 434 } 435 436 int j; 437 for (j = 0; j < 3; j++) { 438 deq_ev[j].op = RTE_EVENT_OP_FORWARD; 439 deq_ev[j].queue_id = t->qid[1]; 440 } 441 442 err = rte_event_enqueue_burst(evdev, t->port[i], 443 deq_ev, 3); 444 445 if (err != 3) { 446 PMD_DRV_LOG(ERR, "port %d: Failed to enqueue pkt %u, " 447 "retval = %u", 448 t->port[i], 3, err); 449 return -1; 450 } 451 452 } 453 454 } 455 456 457 /* dequeue from the tx ports, we should get 3 packets */ 458 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev, 459 3, 0); 460 461 /* Check to see if we've got all 3 packets */ 462 if (deq_pkts != 3) { 463 PMD_DRV_LOG(ERR, "%d: expected 3 pkts at tx port got %d from port %d", 464 __LINE__, deq_pkts, tx_port); 465 rte_event_dev_dump(evdev, stdout); 466 return 1; 467 } 468 469 cleanup(t); 470 471 return 0; 472 } 473 static __rte_always_inline int 474 check_qid_stats(uint64_t id[], int index) 475 { 476 477 if (index == 0) { 478 if (id[0] != 3 || id[1] != 3 479 || id[2] != 3) 480 return -1; 481 } else if (index == 1) { 482 if (id[0] != 5 || id[1] != 5 483 || id[2] != 2) 484 return -1; 485 } else if (index == 2) { 486 if (id[0] != 3 || id[1] != 1 487 || id[2] != 1) 488 return -1; 489 } 490 491 return 0; 492 } 493 494 495 static int 496 check_statistics(void) 497 { 498 int num_ports = 3; /* Hard-coded for this app */ 499 int i; 500 501 for (i = 0; i < num_ports; i++) { 502 int num_stats, num_stats_returned; 503 504 num_stats = rte_event_dev_xstats_names_get(0, 505 RTE_EVENT_DEV_XSTATS_PORT, 506 i, 507 NULL, 508 NULL, 509 0); 510 if (num_stats > 0) { 511 512 uint64_t id[num_stats]; 513 struct rte_event_dev_xstats_name names[num_stats]; 514 uint64_t values[num_stats]; 515 516 num_stats_returned = rte_event_dev_xstats_names_get(0, 517 RTE_EVENT_DEV_XSTATS_PORT, 518 i, 519 names, 520 id, 521 num_stats); 522 523 if (num_stats == num_stats_returned) { 524 num_stats_returned = rte_event_dev_xstats_get(0, 525 RTE_EVENT_DEV_XSTATS_PORT, 526 i, 527 id, 528 values, 529 num_stats); 530 531 if (num_stats == num_stats_returned) { 532 int err; 533 534 err = check_qid_stats(id, i); 535 536 if (err) 537 return err; 538 539 } else { 540 return -1; 541 } 542 } else { 543 return -1; 544 } 545 } else { 546 return -1; 547 } 548 } 549 return 0; 550 } 551 552 #define OLD_NUM_PACKETS 3 553 #define NEW_NUM_PACKETS 2 554 static int 555 single_link_w_stats(struct test *t) 556 { 557 const uint8_t rx_port = 0; 558 const uint8_t w1_port = 1; 559 const uint8_t tx_port = 2; 560 int err; 561 int i; 562 uint32_t deq_pkts; 563 struct rte_mbuf *mbufs[3]; 564 RTE_SET_USED(mbufs); 565 566 /* Create instance with 3 ports */ 567 if (init(t, 2, tx_port + 1) < 0 || 568 create_ports(t, 3) < 0 || /* 0,1,2 */ 569 create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 || 570 create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) { 571 PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); 572 return -1; 573 } 574 575 576 /* 577 * 578 * Simplified test setup diagram: 579 * 580 * rx_port(0) 581 * \ 582 * qid0 - w1_port(1) - qid1 583 * \ 584 * tx_port(2) 585 */ 586 587 err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 588 1); 589 if (err != 1) { 590 PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]", 591 __LINE__, 592 t->port[1], 593 t->qid[0]); 594 cleanup(t); 595 return -1; 596 } 597 598 err = rte_event_port_link(evdev, t->port[2], &t->qid[1], NULL, 599 1); 600 if (err != 1) { 601 PMD_DRV_LOG(ERR, "%d: error linking port:[%u] to queue:[%u]", 602 __LINE__, 603 t->port[2], 604 t->qid[1]); 605 cleanup(t); 606 return -1; 607 } 608 609 if (rte_event_dev_start(evdev) != 0) { 610 PMD_DRV_LOG(ERR, "%d: failed to start device", __LINE__); 611 cleanup(t); 612 return -1; 613 } 614 615 /* 616 * Enqueue 3 packets to the rx port 617 */ 618 for (i = 0; i < 3; i++) { 619 struct rte_event ev; 620 mbufs[i] = rte_gen_arp(0, t->mbuf_pool); 621 if (!mbufs[i]) { 622 PMD_DRV_LOG(ERR, "%d: gen of pkt failed", __LINE__); 623 return -1; 624 } 625 626 ev.queue_id = t->qid[0]; 627 ev.op = RTE_EVENT_OP_NEW; 628 ev.mbuf = mbufs[i]; 629 *rte_event_pmd_selftest_seqn(mbufs[i]) = 1234 + i; 630 631 /* generate pkt and enqueue */ 632 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); 633 if (err != 1) { 634 PMD_DRV_LOG(ERR, "%d: Failed to enqueue pkt %u, retval = %u", 635 __LINE__, 636 t->port[rx_port], 637 err); 638 return -1; 639 } 640 } 641 642 /* Dequeue the 3 packets, from SINGLE_LINK worker port */ 643 struct rte_event deq_ev[3]; 644 645 deq_pkts = rte_event_dequeue_burst(evdev, 646 t->port[w1_port], 647 deq_ev, 3, 0); 648 649 if (deq_pkts != 3) { 650 PMD_DRV_LOG(ERR, "%d: deq not 3 !", __LINE__); 651 cleanup(t); 652 return -1; 653 } 654 655 /* Just enqueue 2 onto new ring */ 656 for (i = 0; i < NEW_NUM_PACKETS; i++) 657 deq_ev[i].queue_id = t->qid[1]; 658 659 deq_pkts = rte_event_enqueue_burst(evdev, 660 t->port[w1_port], 661 deq_ev, 662 NEW_NUM_PACKETS); 663 664 if (deq_pkts != 2) { 665 PMD_DRV_LOG(ERR, "%d: enq not 2 but %u!", __LINE__, deq_pkts); 666 cleanup(t); 667 return -1; 668 } 669 670 /* dequeue from the tx ports, we should get 2 packets */ 671 deq_pkts = rte_event_dequeue_burst(evdev, 672 t->port[tx_port], 673 deq_ev, 674 3, 675 0); 676 677 /* Check to see if we've got all 2 packets */ 678 if (deq_pkts != 2) { 679 PMD_DRV_LOG(ERR, "%d: expected 2 pkts at tx port got %d from port %d", 680 __LINE__, deq_pkts, tx_port); 681 cleanup(t); 682 return -1; 683 } 684 685 if (!check_statistics()) { 686 PMD_DRV_LOG(ERR, "xstats check failed"); 687 cleanup(t); 688 return -1; 689 } 690 691 cleanup(t); 692 693 return 0; 694 } 695 696 static int 697 single_link(struct test *t) 698 { 699 const uint8_t tx_port = 2; 700 int err; 701 struct rte_mbuf *mbufs[3]; 702 RTE_SET_USED(mbufs); 703 704 /* Create instance with 5 ports */ 705 if (init(t, 2, tx_port+1) < 0 || 706 create_ports(t, 3) < 0 || /* 0,1,2 */ 707 create_queues_type(t, 1, OPDL_Q_TYPE_SINGLE_LINK) < 0 || 708 create_queues_type(t, 1, OPDL_Q_TYPE_ORDERED) < 0) { 709 PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); 710 return -1; 711 } 712 713 714 /* 715 * 716 * Simplified test setup diagram: 717 * 718 * rx_port(0) 719 * \ 720 * qid0 - w1_port(1) - qid1 721 * \ 722 * tx_port(2) 723 */ 724 725 err = rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 726 1); 727 if (err != 1) { 728 PMD_DRV_LOG(ERR, "%d: error mapping lb qid", __LINE__); 729 cleanup(t); 730 return -1; 731 } 732 733 err = rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 734 1); 735 if (err != 1) { 736 PMD_DRV_LOG(ERR, "%d: error mapping lb qid", __LINE__); 737 cleanup(t); 738 return -1; 739 } 740 741 if (rte_event_dev_start(evdev) == 0) { 742 PMD_DRV_LOG(ERR, "%d: start DIDN'T FAIL with more than 1 " 743 "SINGLE_LINK PORT", __LINE__); 744 cleanup(t); 745 return -1; 746 } 747 748 cleanup(t); 749 750 return 0; 751 } 752 753 754 static __rte_always_inline void 755 populate_event_burst(struct rte_event ev[], 756 uint8_t qid, 757 uint16_t num_events) 758 { 759 uint16_t i; 760 for (i = 0; i < num_events; i++) { 761 ev[i].flow_id = 1; 762 ev[i].op = RTE_EVENT_OP_NEW; 763 ev[i].sched_type = RTE_SCHED_TYPE_ORDERED; 764 ev[i].queue_id = qid; 765 ev[i].event_type = RTE_EVENT_TYPE_ETHDEV; 766 ev[i].sub_event_type = 0; 767 ev[i].priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 768 ev[i].mbuf = (struct rte_mbuf *)0xdead0000; 769 } 770 } 771 772 #define NUM_QUEUES 3 773 #define BATCH_SIZE 32 774 775 static int 776 qid_basic(struct test *t) 777 { 778 int err = 0; 779 780 uint8_t q_id = 0; 781 uint8_t p_id = 0; 782 783 uint32_t num_events; 784 uint32_t i; 785 786 struct rte_event ev[BATCH_SIZE]; 787 788 /* Create instance with 4 ports */ 789 if (init(t, NUM_QUEUES, NUM_QUEUES+1) < 0 || 790 create_ports(t, NUM_QUEUES+1) < 0 || 791 create_queues_type(t, NUM_QUEUES, OPDL_Q_TYPE_ORDERED)) { 792 PMD_DRV_LOG(ERR, "%d: Error initializing device", __LINE__); 793 return -1; 794 } 795 796 for (i = 0; i < NUM_QUEUES; i++) { 797 int nb_linked; 798 q_id = i; 799 800 nb_linked = rte_event_port_link(evdev, 801 i+1, /* port = q_id + 1*/ 802 &q_id, 803 NULL, 804 1); 805 806 if (nb_linked != 1) { 807 808 PMD_DRV_LOG(ERR, "%s:%d: error mapping port:%u to queue:%u", 809 __FILE__, 810 __LINE__, 811 i + 1, 812 q_id); 813 814 err = -1; 815 break; 816 } 817 818 } 819 820 821 /* Try and link to the same port again */ 822 if (!err) { 823 uint8_t t_qid = 0; 824 if (rte_event_port_link(evdev, 825 1, 826 &t_qid, 827 NULL, 828 1) > 0) { 829 PMD_DRV_LOG(ERR, "%s:%d: Second call to port link on same port DID NOT fail", 830 __FILE__, 831 __LINE__); 832 err = -1; 833 } 834 835 uint32_t test_num_events; 836 837 if (!err) { 838 test_num_events = rte_event_dequeue_burst(evdev, 839 p_id, 840 ev, 841 BATCH_SIZE, 842 0); 843 if (test_num_events != 0) { 844 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing 0 packets from port %u on stopped device", 845 __FILE__, 846 __LINE__, 847 p_id); 848 err = -1; 849 } 850 } 851 852 if (!err) { 853 test_num_events = rte_event_enqueue_burst(evdev, 854 p_id, 855 ev, 856 BATCH_SIZE); 857 if (test_num_events != 0) { 858 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing 0 packets to port %u on stopped device", 859 __FILE__, 860 __LINE__, 861 p_id); 862 err = -1; 863 } 864 } 865 } 866 867 868 /* Start the device */ 869 if (!err) { 870 if (rte_event_dev_start(evdev) < 0) { 871 PMD_DRV_LOG(ERR, "%s:%d: Error with start call", 872 __FILE__, 873 __LINE__); 874 err = -1; 875 } 876 } 877 878 879 /* Check we can't do any more links now that device is started.*/ 880 if (!err) { 881 uint8_t t_qid = 0; 882 if (rte_event_port_link(evdev, 883 1, 884 &t_qid, 885 NULL, 886 1) > 0) { 887 PMD_DRV_LOG(ERR, "%s:%d: Call to port link on started device DID NOT fail", 888 __FILE__, 889 __LINE__); 890 err = -1; 891 } 892 } 893 894 if (!err) { 895 896 q_id = 0; 897 898 populate_event_burst(ev, 899 q_id, 900 BATCH_SIZE); 901 902 num_events = rte_event_enqueue_burst(evdev, 903 p_id, 904 ev, 905 BATCH_SIZE); 906 if (num_events != BATCH_SIZE) { 907 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing rx packets", 908 __FILE__, 909 __LINE__); 910 err = -1; 911 } 912 } 913 914 if (!err) { 915 while (++p_id < NUM_QUEUES) { 916 917 num_events = rte_event_dequeue_burst(evdev, 918 p_id, 919 ev, 920 BATCH_SIZE, 921 0); 922 923 if (num_events != BATCH_SIZE) { 924 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from port %u", 925 __FILE__, 926 __LINE__, 927 p_id); 928 err = -1; 929 break; 930 } 931 932 if (ev[0].queue_id != q_id) { 933 PMD_DRV_LOG(ERR, "%s:%d: Error event portid[%u] q_id:[%u] does not match expected:[%u]", 934 __FILE__, 935 __LINE__, 936 p_id, 937 ev[0].queue_id, 938 q_id); 939 err = -1; 940 break; 941 } 942 943 populate_event_burst(ev, 944 ++q_id, 945 BATCH_SIZE); 946 947 num_events = rte_event_enqueue_burst(evdev, 948 p_id, 949 ev, 950 BATCH_SIZE); 951 if (num_events != BATCH_SIZE) { 952 PMD_DRV_LOG(ERR, "%s:%d: Error enqueuing packets from port:%u to queue:%u", 953 __FILE__, 954 __LINE__, 955 p_id, 956 q_id); 957 err = -1; 958 break; 959 } 960 } 961 } 962 963 if (!err) { 964 num_events = rte_event_dequeue_burst(evdev, 965 p_id, 966 ev, 967 BATCH_SIZE, 968 0); 969 if (num_events != BATCH_SIZE) { 970 PMD_DRV_LOG(ERR, "%s:%d: Error dequeuing packets from tx port %u", 971 __FILE__, 972 __LINE__, 973 p_id); 974 err = -1; 975 } 976 } 977 978 cleanup(t); 979 980 return err; 981 } 982 983 984 985 int 986 opdl_selftest(void) 987 { 988 struct test *t = malloc(sizeof(struct test)); 989 int ret; 990 991 const char *eventdev_name = "event_opdl0"; 992 993 evdev = rte_event_dev_get_dev_id(eventdev_name); 994 995 if (evdev < 0) { 996 PMD_DRV_LOG(ERR, "%d: Eventdev %s not found - creating.", 997 __LINE__, eventdev_name); 998 /* turn on stats by default */ 999 if (rte_vdev_init(eventdev_name, "do_validation=1") < 0) { 1000 PMD_DRV_LOG(ERR, "Error creating eventdev"); 1001 free(t); 1002 return -1; 1003 } 1004 evdev = rte_event_dev_get_dev_id(eventdev_name); 1005 if (evdev < 0) { 1006 PMD_DRV_LOG(ERR, "Error finding newly created eventdev"); 1007 free(t); 1008 return -1; 1009 } 1010 } 1011 1012 /* Only create mbuf pool once, reuse for each test run */ 1013 if (!eventdev_func_mempool) { 1014 eventdev_func_mempool = rte_pktmbuf_pool_create( 1015 "EVENTDEV_SW_SA_MBUF_POOL", 1016 (1<<12), /* 4k buffers */ 1017 32 /*MBUF_CACHE_SIZE*/, 1018 0, 1019 512, /* use very small mbufs */ 1020 rte_socket_id()); 1021 if (!eventdev_func_mempool) { 1022 PMD_DRV_LOG(ERR, "ERROR creating mempool"); 1023 free(t); 1024 return -1; 1025 } 1026 } 1027 t->mbuf_pool = eventdev_func_mempool; 1028 1029 PMD_DRV_LOG(ERR, "*** Running Ordered Basic test..."); 1030 ret = ordered_basic(t); 1031 1032 PMD_DRV_LOG(ERR, "*** Running Atomic Basic test..."); 1033 ret = atomic_basic(t); 1034 1035 1036 PMD_DRV_LOG(ERR, "*** Running QID Basic test..."); 1037 ret = qid_basic(t); 1038 1039 PMD_DRV_LOG(ERR, "*** Running SINGLE LINK failure test..."); 1040 ret = single_link(t); 1041 1042 PMD_DRV_LOG(ERR, "*** Running SINGLE LINK w stats test..."); 1043 ret = single_link_w_stats(t); 1044 1045 /* 1046 * Free test instance, free mempool 1047 */ 1048 rte_mempool_free(t->mbuf_pool); 1049 free(t); 1050 1051 if (ret != 0) 1052 return ret; 1053 return 0; 1054 1055 } 1056