1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2016-2017 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <string.h> 7 #include <stdint.h> 8 #include <errno.h> 9 #include <unistd.h> 10 #include <sys/queue.h> 11 12 #include <rte_memory.h> 13 #include <rte_launch.h> 14 #include <rte_eal.h> 15 #include <rte_per_lcore.h> 16 #include <rte_lcore.h> 17 #include <rte_debug.h> 18 #include <rte_ethdev.h> 19 #include <rte_cycles.h> 20 #include <rte_eventdev.h> 21 #include <rte_pause.h> 22 #include <rte_service.h> 23 #include <rte_service_component.h> 24 #include <rte_bus_vdev.h> 25 26 #include "sw_evdev.h" 27 28 #define MAX_PORTS 16 29 #define MAX_QIDS 16 30 #define NUM_PACKETS (1<<18) 31 32 static int evdev; 33 34 struct test { 35 struct rte_mempool *mbuf_pool; 36 uint8_t port[MAX_PORTS]; 37 uint8_t qid[MAX_QIDS]; 38 int nb_qids; 39 uint32_t service_id; 40 }; 41 42 static struct rte_event release_ev; 43 44 static inline struct rte_mbuf * 45 rte_gen_arp(int portid, struct rte_mempool *mp) 46 { 47 /* 48 * len = 14 + 46 49 * ARP, Request who-has 10.0.0.1 tell 10.0.0.2, length 46 50 */ 51 static const uint8_t arp_request[] = { 52 /*0x0000:*/ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xec, 0xa8, 53 0x6b, 0xfd, 0x02, 0x29, 0x08, 0x06, 0x00, 0x01, 54 /*0x0010:*/ 0x08, 0x00, 0x06, 0x04, 0x00, 0x01, 0xec, 0xa8, 55 0x6b, 0xfd, 0x02, 0x29, 0x0a, 0x00, 0x00, 0x01, 56 /*0x0020:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x00, 57 0x00, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 58 /*0x0030:*/ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 59 0x00, 0x00, 0x00, 0x00 60 }; 61 struct rte_mbuf *m; 62 int pkt_len = sizeof(arp_request) - 1; 63 64 m = rte_pktmbuf_alloc(mp); 65 if (!m) 66 return 0; 67 68 memcpy((void *)((uintptr_t)m->buf_addr + m->data_off), 69 arp_request, pkt_len); 70 rte_pktmbuf_pkt_len(m) = pkt_len; 71 rte_pktmbuf_data_len(m) = pkt_len; 72 73 RTE_SET_USED(portid); 74 75 return m; 76 } 77 78 static void 79 xstats_print(void) 80 { 81 const uint32_t XSTATS_MAX = 1024; 82 uint32_t i; 83 uint32_t ids[XSTATS_MAX]; 84 uint64_t values[XSTATS_MAX]; 85 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; 86 87 for (i = 0; i < XSTATS_MAX; i++) 88 ids[i] = i; 89 90 /* Device names / values */ 91 int ret = rte_event_dev_xstats_names_get(evdev, 92 RTE_EVENT_DEV_XSTATS_DEVICE, 0, 93 xstats_names, ids, XSTATS_MAX); 94 if (ret < 0) { 95 printf("%d: xstats names get() returned error\n", 96 __LINE__); 97 return; 98 } 99 ret = rte_event_dev_xstats_get(evdev, 100 RTE_EVENT_DEV_XSTATS_DEVICE, 101 0, ids, values, ret); 102 if (ret > (signed int)XSTATS_MAX) 103 printf("%s %d: more xstats available than space\n", 104 __func__, __LINE__); 105 for (i = 0; (signed int)i < ret; i++) { 106 printf("%d : %s : %"PRIu64"\n", 107 i, xstats_names[i].name, values[i]); 108 } 109 110 /* Port names / values */ 111 ret = rte_event_dev_xstats_names_get(evdev, 112 RTE_EVENT_DEV_XSTATS_PORT, 0, 113 xstats_names, ids, XSTATS_MAX); 114 ret = rte_event_dev_xstats_get(evdev, 115 RTE_EVENT_DEV_XSTATS_PORT, 1, 116 ids, values, ret); 117 if (ret > (signed int)XSTATS_MAX) 118 printf("%s %d: more xstats available than space\n", 119 __func__, __LINE__); 120 for (i = 0; (signed int)i < ret; i++) { 121 printf("%d : %s : %"PRIu64"\n", 122 i, xstats_names[i].name, values[i]); 123 } 124 125 /* Queue names / values */ 126 ret = rte_event_dev_xstats_names_get(evdev, 127 RTE_EVENT_DEV_XSTATS_QUEUE, 0, 128 xstats_names, ids, XSTATS_MAX); 129 ret = rte_event_dev_xstats_get(evdev, 130 RTE_EVENT_DEV_XSTATS_QUEUE, 131 1, ids, values, ret); 132 if (ret > (signed int)XSTATS_MAX) 133 printf("%s %d: more xstats available than space\n", 134 __func__, __LINE__); 135 for (i = 0; (signed int)i < ret; i++) { 136 printf("%d : %s : %"PRIu64"\n", 137 i, xstats_names[i].name, values[i]); 138 } 139 } 140 141 /* initialization and config */ 142 static inline int 143 init(struct test *t, int nb_queues, int nb_ports) 144 { 145 struct rte_event_dev_config config = { 146 .nb_event_queues = nb_queues, 147 .nb_event_ports = nb_ports, 148 .nb_event_queue_flows = 1024, 149 .nb_events_limit = 4096, 150 .nb_event_port_dequeue_depth = 128, 151 .nb_event_port_enqueue_depth = 128, 152 }; 153 int ret; 154 155 void *temp = t->mbuf_pool; /* save and restore mbuf pool */ 156 157 memset(t, 0, sizeof(*t)); 158 t->mbuf_pool = temp; 159 160 ret = rte_event_dev_configure(evdev, &config); 161 if (ret < 0) 162 printf("%d: Error configuring device\n", __LINE__); 163 return ret; 164 }; 165 166 static inline int 167 create_ports(struct test *t, int num_ports) 168 { 169 int i; 170 static const struct rte_event_port_conf conf = { 171 .new_event_threshold = 1024, 172 .dequeue_depth = 32, 173 .enqueue_depth = 64, 174 .disable_implicit_release = 0, 175 }; 176 if (num_ports > MAX_PORTS) 177 return -1; 178 179 for (i = 0; i < num_ports; i++) { 180 if (rte_event_port_setup(evdev, i, &conf) < 0) { 181 printf("Error setting up port %d\n", i); 182 return -1; 183 } 184 t->port[i] = i; 185 } 186 187 return 0; 188 } 189 190 static inline int 191 create_lb_qids(struct test *t, int num_qids, uint32_t flags) 192 { 193 int i; 194 195 /* Q creation */ 196 const struct rte_event_queue_conf conf = { 197 .schedule_type = flags, 198 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 199 .nb_atomic_flows = 1024, 200 .nb_atomic_order_sequences = 1024, 201 }; 202 203 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) { 204 if (rte_event_queue_setup(evdev, i, &conf) < 0) { 205 printf("%d: error creating qid %d\n", __LINE__, i); 206 return -1; 207 } 208 t->qid[i] = i; 209 } 210 t->nb_qids += num_qids; 211 if (t->nb_qids > MAX_QIDS) 212 return -1; 213 214 return 0; 215 } 216 217 static inline int 218 create_atomic_qids(struct test *t, int num_qids) 219 { 220 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ATOMIC); 221 } 222 223 static inline int 224 create_ordered_qids(struct test *t, int num_qids) 225 { 226 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_ORDERED); 227 } 228 229 230 static inline int 231 create_unordered_qids(struct test *t, int num_qids) 232 { 233 return create_lb_qids(t, num_qids, RTE_SCHED_TYPE_PARALLEL); 234 } 235 236 static inline int 237 create_directed_qids(struct test *t, int num_qids, const uint8_t ports[]) 238 { 239 int i; 240 241 /* Q creation */ 242 static const struct rte_event_queue_conf conf = { 243 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 244 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK, 245 }; 246 247 for (i = t->nb_qids; i < t->nb_qids + num_qids; i++) { 248 if (rte_event_queue_setup(evdev, i, &conf) < 0) { 249 printf("%d: error creating qid %d\n", __LINE__, i); 250 return -1; 251 } 252 t->qid[i] = i; 253 254 if (rte_event_port_link(evdev, ports[i - t->nb_qids], 255 &t->qid[i], NULL, 1) != 1) { 256 printf("%d: error creating link for qid %d\n", 257 __LINE__, i); 258 return -1; 259 } 260 } 261 t->nb_qids += num_qids; 262 if (t->nb_qids > MAX_QIDS) 263 return -1; 264 265 return 0; 266 } 267 268 /* destruction */ 269 static inline int 270 cleanup(struct test *t __rte_unused) 271 { 272 rte_event_dev_stop(evdev); 273 rte_event_dev_close(evdev); 274 return 0; 275 }; 276 277 struct test_event_dev_stats { 278 uint64_t rx_pkts; /**< Total packets received */ 279 uint64_t rx_dropped; /**< Total packets dropped (Eg Invalid QID) */ 280 uint64_t tx_pkts; /**< Total packets transmitted */ 281 282 /** Packets received on this port */ 283 uint64_t port_rx_pkts[MAX_PORTS]; 284 /** Packets dropped on this port */ 285 uint64_t port_rx_dropped[MAX_PORTS]; 286 /** Packets inflight on this port */ 287 uint64_t port_inflight[MAX_PORTS]; 288 /** Packets transmitted on this port */ 289 uint64_t port_tx_pkts[MAX_PORTS]; 290 /** Packets received on this qid */ 291 uint64_t qid_rx_pkts[MAX_QIDS]; 292 /** Packets dropped on this qid */ 293 uint64_t qid_rx_dropped[MAX_QIDS]; 294 /** Packets transmitted on this qid */ 295 uint64_t qid_tx_pkts[MAX_QIDS]; 296 }; 297 298 static inline int 299 test_event_dev_stats_get(int dev_id, struct test_event_dev_stats *stats) 300 { 301 static uint32_t i; 302 static uint32_t total_ids[3]; /* rx, tx and drop */ 303 static uint32_t port_rx_pkts_ids[MAX_PORTS]; 304 static uint32_t port_rx_dropped_ids[MAX_PORTS]; 305 static uint32_t port_inflight_ids[MAX_PORTS]; 306 static uint32_t port_tx_pkts_ids[MAX_PORTS]; 307 static uint32_t qid_rx_pkts_ids[MAX_QIDS]; 308 static uint32_t qid_rx_dropped_ids[MAX_QIDS]; 309 static uint32_t qid_tx_pkts_ids[MAX_QIDS]; 310 311 312 stats->rx_pkts = rte_event_dev_xstats_by_name_get(dev_id, 313 "dev_rx", &total_ids[0]); 314 stats->rx_dropped = rte_event_dev_xstats_by_name_get(dev_id, 315 "dev_drop", &total_ids[1]); 316 stats->tx_pkts = rte_event_dev_xstats_by_name_get(dev_id, 317 "dev_tx", &total_ids[2]); 318 for (i = 0; i < MAX_PORTS; i++) { 319 char name[32]; 320 snprintf(name, sizeof(name), "port_%u_rx", i); 321 stats->port_rx_pkts[i] = rte_event_dev_xstats_by_name_get( 322 dev_id, name, &port_rx_pkts_ids[i]); 323 snprintf(name, sizeof(name), "port_%u_drop", i); 324 stats->port_rx_dropped[i] = rte_event_dev_xstats_by_name_get( 325 dev_id, name, &port_rx_dropped_ids[i]); 326 snprintf(name, sizeof(name), "port_%u_inflight", i); 327 stats->port_inflight[i] = rte_event_dev_xstats_by_name_get( 328 dev_id, name, &port_inflight_ids[i]); 329 snprintf(name, sizeof(name), "port_%u_tx", i); 330 stats->port_tx_pkts[i] = rte_event_dev_xstats_by_name_get( 331 dev_id, name, &port_tx_pkts_ids[i]); 332 } 333 for (i = 0; i < MAX_QIDS; i++) { 334 char name[32]; 335 snprintf(name, sizeof(name), "qid_%u_rx", i); 336 stats->qid_rx_pkts[i] = rte_event_dev_xstats_by_name_get( 337 dev_id, name, &qid_rx_pkts_ids[i]); 338 snprintf(name, sizeof(name), "qid_%u_drop", i); 339 stats->qid_rx_dropped[i] = rte_event_dev_xstats_by_name_get( 340 dev_id, name, &qid_rx_dropped_ids[i]); 341 snprintf(name, sizeof(name), "qid_%u_tx", i); 342 stats->qid_tx_pkts[i] = rte_event_dev_xstats_by_name_get( 343 dev_id, name, &qid_tx_pkts_ids[i]); 344 } 345 346 return 0; 347 } 348 349 /* run_prio_packet_test 350 * This performs a basic packet priority check on the test instance passed in. 351 * It is factored out of the main priority tests as the same tests must be 352 * performed to ensure prioritization of each type of QID. 353 * 354 * Requirements: 355 * - An initialized test structure, including mempool 356 * - t->port[0] is initialized for both Enq / Deq of packets to the QID 357 * - t->qid[0] is the QID to be tested 358 * - if LB QID, the CQ must be mapped to the QID. 359 */ 360 static int 361 run_prio_packet_test(struct test *t) 362 { 363 int err; 364 const uint32_t MAGIC_SEQN[] = {4711, 1234}; 365 const uint32_t PRIORITY[] = { 366 RTE_EVENT_DEV_PRIORITY_NORMAL, 367 RTE_EVENT_DEV_PRIORITY_HIGHEST 368 }; 369 unsigned int i; 370 for (i = 0; i < RTE_DIM(MAGIC_SEQN); i++) { 371 /* generate pkt and enqueue */ 372 struct rte_event ev; 373 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 374 if (!arp) { 375 printf("%d: gen of pkt failed\n", __LINE__); 376 return -1; 377 } 378 arp->seqn = MAGIC_SEQN[i]; 379 380 ev = (struct rte_event){ 381 .priority = PRIORITY[i], 382 .op = RTE_EVENT_OP_NEW, 383 .queue_id = t->qid[0], 384 .mbuf = arp 385 }; 386 err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1); 387 if (err < 0) { 388 printf("%d: error failed to enqueue\n", __LINE__); 389 return -1; 390 } 391 } 392 393 rte_service_run_iter_on_app_lcore(t->service_id, 1); 394 395 struct test_event_dev_stats stats; 396 err = test_event_dev_stats_get(evdev, &stats); 397 if (err) { 398 printf("%d: error failed to get stats\n", __LINE__); 399 return -1; 400 } 401 402 if (stats.port_rx_pkts[t->port[0]] != 2) { 403 printf("%d: error stats incorrect for directed port\n", 404 __LINE__); 405 rte_event_dev_dump(evdev, stdout); 406 return -1; 407 } 408 409 struct rte_event ev, ev2; 410 uint32_t deq_pkts; 411 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0); 412 if (deq_pkts != 1) { 413 printf("%d: error failed to deq\n", __LINE__); 414 rte_event_dev_dump(evdev, stdout); 415 return -1; 416 } 417 if (ev.mbuf->seqn != MAGIC_SEQN[1]) { 418 printf("%d: first packet out not highest priority\n", 419 __LINE__); 420 rte_event_dev_dump(evdev, stdout); 421 return -1; 422 } 423 rte_pktmbuf_free(ev.mbuf); 424 425 deq_pkts = rte_event_dequeue_burst(evdev, t->port[0], &ev2, 1, 0); 426 if (deq_pkts != 1) { 427 printf("%d: error failed to deq\n", __LINE__); 428 rte_event_dev_dump(evdev, stdout); 429 return -1; 430 } 431 if (ev2.mbuf->seqn != MAGIC_SEQN[0]) { 432 printf("%d: second packet out not lower priority\n", 433 __LINE__); 434 rte_event_dev_dump(evdev, stdout); 435 return -1; 436 } 437 rte_pktmbuf_free(ev2.mbuf); 438 439 cleanup(t); 440 return 0; 441 } 442 443 static int 444 test_single_directed_packet(struct test *t) 445 { 446 const int rx_enq = 0; 447 const int wrk_enq = 2; 448 int err; 449 450 /* Create instance with 3 directed QIDs going to 3 ports */ 451 if (init(t, 3, 3) < 0 || 452 create_ports(t, 3) < 0 || 453 create_directed_qids(t, 3, t->port) < 0) 454 return -1; 455 456 if (rte_event_dev_start(evdev) < 0) { 457 printf("%d: Error with start call\n", __LINE__); 458 return -1; 459 } 460 461 /************** FORWARD ****************/ 462 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 463 struct rte_event ev = { 464 .op = RTE_EVENT_OP_NEW, 465 .queue_id = wrk_enq, 466 .mbuf = arp, 467 }; 468 469 if (!arp) { 470 printf("%d: gen of pkt failed\n", __LINE__); 471 return -1; 472 } 473 474 const uint32_t MAGIC_SEQN = 4711; 475 arp->seqn = MAGIC_SEQN; 476 477 /* generate pkt and enqueue */ 478 err = rte_event_enqueue_burst(evdev, rx_enq, &ev, 1); 479 if (err < 0) { 480 printf("%d: error failed to enqueue\n", __LINE__); 481 return -1; 482 } 483 484 /* Run schedule() as dir packets may need to be re-ordered */ 485 rte_service_run_iter_on_app_lcore(t->service_id, 1); 486 487 struct test_event_dev_stats stats; 488 err = test_event_dev_stats_get(evdev, &stats); 489 if (err) { 490 printf("%d: error failed to get stats\n", __LINE__); 491 return -1; 492 } 493 494 if (stats.port_rx_pkts[rx_enq] != 1) { 495 printf("%d: error stats incorrect for directed port\n", 496 __LINE__); 497 return -1; 498 } 499 500 uint32_t deq_pkts; 501 deq_pkts = rte_event_dequeue_burst(evdev, wrk_enq, &ev, 1, 0); 502 if (deq_pkts != 1) { 503 printf("%d: error failed to deq\n", __LINE__); 504 return -1; 505 } 506 507 err = test_event_dev_stats_get(evdev, &stats); 508 if (stats.port_rx_pkts[wrk_enq] != 0 && 509 stats.port_rx_pkts[wrk_enq] != 1) { 510 printf("%d: error directed stats post-dequeue\n", __LINE__); 511 return -1; 512 } 513 514 if (ev.mbuf->seqn != MAGIC_SEQN) { 515 printf("%d: error magic sequence number not dequeued\n", 516 __LINE__); 517 return -1; 518 } 519 520 rte_pktmbuf_free(ev.mbuf); 521 cleanup(t); 522 return 0; 523 } 524 525 static int 526 test_directed_forward_credits(struct test *t) 527 { 528 uint32_t i; 529 int32_t err; 530 531 if (init(t, 1, 1) < 0 || 532 create_ports(t, 1) < 0 || 533 create_directed_qids(t, 1, t->port) < 0) 534 return -1; 535 536 if (rte_event_dev_start(evdev) < 0) { 537 printf("%d: Error with start call\n", __LINE__); 538 return -1; 539 } 540 541 struct rte_event ev = { 542 .op = RTE_EVENT_OP_NEW, 543 .queue_id = 0, 544 }; 545 546 for (i = 0; i < 1000; i++) { 547 err = rte_event_enqueue_burst(evdev, 0, &ev, 1); 548 if (err < 0) { 549 printf("%d: error failed to enqueue\n", __LINE__); 550 return -1; 551 } 552 rte_service_run_iter_on_app_lcore(t->service_id, 1); 553 554 uint32_t deq_pkts; 555 deq_pkts = rte_event_dequeue_burst(evdev, 0, &ev, 1, 0); 556 if (deq_pkts != 1) { 557 printf("%d: error failed to deq\n", __LINE__); 558 return -1; 559 } 560 561 /* re-write event to be a forward, and continue looping it */ 562 ev.op = RTE_EVENT_OP_FORWARD; 563 } 564 565 cleanup(t); 566 return 0; 567 } 568 569 570 static int 571 test_priority_directed(struct test *t) 572 { 573 if (init(t, 1, 1) < 0 || 574 create_ports(t, 1) < 0 || 575 create_directed_qids(t, 1, t->port) < 0) { 576 printf("%d: Error initializing device\n", __LINE__); 577 return -1; 578 } 579 580 if (rte_event_dev_start(evdev) < 0) { 581 printf("%d: Error with start call\n", __LINE__); 582 return -1; 583 } 584 585 return run_prio_packet_test(t); 586 } 587 588 static int 589 test_priority_atomic(struct test *t) 590 { 591 if (init(t, 1, 1) < 0 || 592 create_ports(t, 1) < 0 || 593 create_atomic_qids(t, 1) < 0) { 594 printf("%d: Error initializing device\n", __LINE__); 595 return -1; 596 } 597 598 /* map the QID */ 599 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) { 600 printf("%d: error mapping qid to port\n", __LINE__); 601 return -1; 602 } 603 if (rte_event_dev_start(evdev) < 0) { 604 printf("%d: Error with start call\n", __LINE__); 605 return -1; 606 } 607 608 return run_prio_packet_test(t); 609 } 610 611 static int 612 test_priority_ordered(struct test *t) 613 { 614 if (init(t, 1, 1) < 0 || 615 create_ports(t, 1) < 0 || 616 create_ordered_qids(t, 1) < 0) { 617 printf("%d: Error initializing device\n", __LINE__); 618 return -1; 619 } 620 621 /* map the QID */ 622 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) { 623 printf("%d: error mapping qid to port\n", __LINE__); 624 return -1; 625 } 626 if (rte_event_dev_start(evdev) < 0) { 627 printf("%d: Error with start call\n", __LINE__); 628 return -1; 629 } 630 631 return run_prio_packet_test(t); 632 } 633 634 static int 635 test_priority_unordered(struct test *t) 636 { 637 if (init(t, 1, 1) < 0 || 638 create_ports(t, 1) < 0 || 639 create_unordered_qids(t, 1) < 0) { 640 printf("%d: Error initializing device\n", __LINE__); 641 return -1; 642 } 643 644 /* map the QID */ 645 if (rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1) != 1) { 646 printf("%d: error mapping qid to port\n", __LINE__); 647 return -1; 648 } 649 if (rte_event_dev_start(evdev) < 0) { 650 printf("%d: Error with start call\n", __LINE__); 651 return -1; 652 } 653 654 return run_prio_packet_test(t); 655 } 656 657 static int 658 burst_packets(struct test *t) 659 { 660 /************** CONFIG ****************/ 661 uint32_t i; 662 int err; 663 int ret; 664 665 /* Create instance with 2 ports and 2 queues */ 666 if (init(t, 2, 2) < 0 || 667 create_ports(t, 2) < 0 || 668 create_atomic_qids(t, 2) < 0) { 669 printf("%d: Error initializing device\n", __LINE__); 670 return -1; 671 } 672 673 /* CQ mapping to QID */ 674 ret = rte_event_port_link(evdev, t->port[0], &t->qid[0], NULL, 1); 675 if (ret != 1) { 676 printf("%d: error mapping lb qid0\n", __LINE__); 677 return -1; 678 } 679 ret = rte_event_port_link(evdev, t->port[1], &t->qid[1], NULL, 1); 680 if (ret != 1) { 681 printf("%d: error mapping lb qid1\n", __LINE__); 682 return -1; 683 } 684 685 if (rte_event_dev_start(evdev) < 0) { 686 printf("%d: Error with start call\n", __LINE__); 687 return -1; 688 } 689 690 /************** FORWARD ****************/ 691 const uint32_t rx_port = 0; 692 const uint32_t NUM_PKTS = 2; 693 694 for (i = 0; i < NUM_PKTS; i++) { 695 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 696 if (!arp) { 697 printf("%d: error generating pkt\n", __LINE__); 698 return -1; 699 } 700 701 struct rte_event ev = { 702 .op = RTE_EVENT_OP_NEW, 703 .queue_id = i % 2, 704 .flow_id = i % 3, 705 .mbuf = arp, 706 }; 707 /* generate pkt and enqueue */ 708 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); 709 if (err < 0) { 710 printf("%d: Failed to enqueue\n", __LINE__); 711 return -1; 712 } 713 } 714 rte_service_run_iter_on_app_lcore(t->service_id, 1); 715 716 /* Check stats for all NUM_PKTS arrived to sched core */ 717 struct test_event_dev_stats stats; 718 719 err = test_event_dev_stats_get(evdev, &stats); 720 if (err) { 721 printf("%d: failed to get stats\n", __LINE__); 722 return -1; 723 } 724 if (stats.rx_pkts != NUM_PKTS || stats.tx_pkts != NUM_PKTS) { 725 printf("%d: Sched core didn't receive all %d pkts\n", 726 __LINE__, NUM_PKTS); 727 rte_event_dev_dump(evdev, stdout); 728 return -1; 729 } 730 731 uint32_t deq_pkts; 732 int p; 733 734 deq_pkts = 0; 735 /******** DEQ QID 1 *******/ 736 do { 737 struct rte_event ev; 738 p = rte_event_dequeue_burst(evdev, t->port[0], &ev, 1, 0); 739 deq_pkts += p; 740 rte_pktmbuf_free(ev.mbuf); 741 } while (p); 742 743 if (deq_pkts != NUM_PKTS/2) { 744 printf("%d: Half of NUM_PKTS didn't arrive at port 1\n", 745 __LINE__); 746 return -1; 747 } 748 749 /******** DEQ QID 2 *******/ 750 deq_pkts = 0; 751 do { 752 struct rte_event ev; 753 p = rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0); 754 deq_pkts += p; 755 rte_pktmbuf_free(ev.mbuf); 756 } while (p); 757 if (deq_pkts != NUM_PKTS/2) { 758 printf("%d: Half of NUM_PKTS didn't arrive at port 2\n", 759 __LINE__); 760 return -1; 761 } 762 763 cleanup(t); 764 return 0; 765 } 766 767 static int 768 abuse_inflights(struct test *t) 769 { 770 const int rx_enq = 0; 771 const int wrk_enq = 2; 772 int err; 773 774 /* Create instance with 4 ports */ 775 if (init(t, 1, 4) < 0 || 776 create_ports(t, 4) < 0 || 777 create_atomic_qids(t, 1) < 0) { 778 printf("%d: Error initializing device\n", __LINE__); 779 return -1; 780 } 781 782 /* CQ mapping to QID */ 783 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); 784 if (err != 1) { 785 printf("%d: error mapping lb qid\n", __LINE__); 786 cleanup(t); 787 return -1; 788 } 789 790 if (rte_event_dev_start(evdev) < 0) { 791 printf("%d: Error with start call\n", __LINE__); 792 return -1; 793 } 794 795 /* Enqueue op only */ 796 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &release_ev, 1); 797 if (err < 0) { 798 printf("%d: Failed to enqueue\n", __LINE__); 799 return -1; 800 } 801 802 /* schedule */ 803 rte_service_run_iter_on_app_lcore(t->service_id, 1); 804 805 struct test_event_dev_stats stats; 806 807 err = test_event_dev_stats_get(evdev, &stats); 808 if (err) { 809 printf("%d: failed to get stats\n", __LINE__); 810 return -1; 811 } 812 813 if (stats.rx_pkts != 0 || 814 stats.tx_pkts != 0 || 815 stats.port_inflight[wrk_enq] != 0) { 816 printf("%d: Sched core didn't handle pkt as expected\n", 817 __LINE__); 818 return -1; 819 } 820 821 cleanup(t); 822 return 0; 823 } 824 825 static int 826 xstats_tests(struct test *t) 827 { 828 const int wrk_enq = 2; 829 int err; 830 831 /* Create instance with 4 ports */ 832 if (init(t, 1, 4) < 0 || 833 create_ports(t, 4) < 0 || 834 create_atomic_qids(t, 1) < 0) { 835 printf("%d: Error initializing device\n", __LINE__); 836 return -1; 837 } 838 839 /* CQ mapping to QID */ 840 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); 841 if (err != 1) { 842 printf("%d: error mapping lb qid\n", __LINE__); 843 cleanup(t); 844 return -1; 845 } 846 847 if (rte_event_dev_start(evdev) < 0) { 848 printf("%d: Error with start call\n", __LINE__); 849 return -1; 850 } 851 852 const uint32_t XSTATS_MAX = 1024; 853 854 uint32_t i; 855 uint32_t ids[XSTATS_MAX]; 856 uint64_t values[XSTATS_MAX]; 857 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; 858 859 for (i = 0; i < XSTATS_MAX; i++) 860 ids[i] = i; 861 862 /* Device names / values */ 863 int ret = rte_event_dev_xstats_names_get(evdev, 864 RTE_EVENT_DEV_XSTATS_DEVICE, 865 0, xstats_names, ids, XSTATS_MAX); 866 if (ret != 6) { 867 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret); 868 return -1; 869 } 870 ret = rte_event_dev_xstats_get(evdev, 871 RTE_EVENT_DEV_XSTATS_DEVICE, 872 0, ids, values, ret); 873 if (ret != 6) { 874 printf("%d: expected 6 stats, got return %d\n", __LINE__, ret); 875 return -1; 876 } 877 878 /* Port names / values */ 879 ret = rte_event_dev_xstats_names_get(evdev, 880 RTE_EVENT_DEV_XSTATS_PORT, 0, 881 xstats_names, ids, XSTATS_MAX); 882 if (ret != 21) { 883 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret); 884 return -1; 885 } 886 ret = rte_event_dev_xstats_get(evdev, 887 RTE_EVENT_DEV_XSTATS_PORT, 0, 888 ids, values, ret); 889 if (ret != 21) { 890 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret); 891 return -1; 892 } 893 894 /* Queue names / values */ 895 ret = rte_event_dev_xstats_names_get(evdev, 896 RTE_EVENT_DEV_XSTATS_QUEUE, 897 0, xstats_names, ids, XSTATS_MAX); 898 if (ret != 16) { 899 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret); 900 return -1; 901 } 902 903 /* NEGATIVE TEST: with wrong queue passed, 0 stats should be returned */ 904 ret = rte_event_dev_xstats_get(evdev, 905 RTE_EVENT_DEV_XSTATS_QUEUE, 906 1, ids, values, ret); 907 if (ret != -EINVAL) { 908 printf("%d: expected 0 stats, got return %d\n", __LINE__, ret); 909 return -1; 910 } 911 912 ret = rte_event_dev_xstats_get(evdev, 913 RTE_EVENT_DEV_XSTATS_QUEUE, 914 0, ids, values, ret); 915 if (ret != 16) { 916 printf("%d: expected 16 stats, got return %d\n", __LINE__, ret); 917 return -1; 918 } 919 920 /* enqueue packets to check values */ 921 for (i = 0; i < 3; i++) { 922 struct rte_event ev; 923 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 924 if (!arp) { 925 printf("%d: gen of pkt failed\n", __LINE__); 926 return -1; 927 } 928 ev.queue_id = t->qid[i]; 929 ev.op = RTE_EVENT_OP_NEW; 930 ev.mbuf = arp; 931 ev.flow_id = 7; 932 arp->seqn = i; 933 934 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1); 935 if (err != 1) { 936 printf("%d: Failed to enqueue\n", __LINE__); 937 return -1; 938 } 939 } 940 941 rte_service_run_iter_on_app_lcore(t->service_id, 1); 942 943 /* Device names / values */ 944 int num_stats = rte_event_dev_xstats_names_get(evdev, 945 RTE_EVENT_DEV_XSTATS_DEVICE, 0, 946 xstats_names, ids, XSTATS_MAX); 947 if (num_stats < 0) 948 goto fail; 949 ret = rte_event_dev_xstats_get(evdev, 950 RTE_EVENT_DEV_XSTATS_DEVICE, 951 0, ids, values, num_stats); 952 static const uint64_t expected[] = {3, 3, 0, 1, 0, 0}; 953 for (i = 0; (signed int)i < ret; i++) { 954 if (expected[i] != values[i]) { 955 printf( 956 "%d Error xstat %d (id %d) %s : %"PRIu64 957 ", expect %"PRIu64"\n", 958 __LINE__, i, ids[i], xstats_names[i].name, 959 values[i], expected[i]); 960 goto fail; 961 } 962 } 963 964 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_DEVICE, 965 0, NULL, 0); 966 967 /* ensure reset statistics are zero-ed */ 968 static const uint64_t expected_zero[] = {0, 0, 0, 0, 0, 0}; 969 ret = rte_event_dev_xstats_get(evdev, 970 RTE_EVENT_DEV_XSTATS_DEVICE, 971 0, ids, values, num_stats); 972 for (i = 0; (signed int)i < ret; i++) { 973 if (expected_zero[i] != values[i]) { 974 printf( 975 "%d Error, xstat %d (id %d) %s : %"PRIu64 976 ", expect %"PRIu64"\n", 977 __LINE__, i, ids[i], xstats_names[i].name, 978 values[i], expected_zero[i]); 979 goto fail; 980 } 981 } 982 983 /* port reset checks */ 984 num_stats = rte_event_dev_xstats_names_get(evdev, 985 RTE_EVENT_DEV_XSTATS_PORT, 0, 986 xstats_names, ids, XSTATS_MAX); 987 if (num_stats < 0) 988 goto fail; 989 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, 990 0, ids, values, num_stats); 991 992 static const uint64_t port_expected[] = { 993 3 /* rx */, 994 0 /* tx */, 995 0 /* drop */, 996 0 /* inflights */, 997 0 /* avg pkt cycles */, 998 29 /* credits */, 999 0 /* rx ring used */, 1000 4096 /* rx ring free */, 1001 0 /* cq ring used */, 1002 32 /* cq ring free */, 1003 0 /* dequeue calls */, 1004 /* 10 dequeue burst buckets */ 1005 0, 0, 0, 0, 0, 1006 0, 0, 0, 0, 0, 1007 }; 1008 if (ret != RTE_DIM(port_expected)) { 1009 printf( 1010 "%s %d: wrong number of port stats (%d), expected %zu\n", 1011 __func__, __LINE__, ret, RTE_DIM(port_expected)); 1012 } 1013 1014 for (i = 0; (signed int)i < ret; i++) { 1015 if (port_expected[i] != values[i]) { 1016 printf( 1017 "%s : %d: Error stat %s is %"PRIu64 1018 ", expected %"PRIu64"\n", 1019 __func__, __LINE__, xstats_names[i].name, 1020 values[i], port_expected[i]); 1021 goto fail; 1022 } 1023 } 1024 1025 ret = rte_event_dev_xstats_reset(evdev, RTE_EVENT_DEV_XSTATS_PORT, 1026 0, NULL, 0); 1027 1028 /* ensure reset statistics are zero-ed */ 1029 static const uint64_t port_expected_zero[] = { 1030 0 /* rx */, 1031 0 /* tx */, 1032 0 /* drop */, 1033 0 /* inflights */, 1034 0 /* avg pkt cycles */, 1035 29 /* credits */, 1036 0 /* rx ring used */, 1037 4096 /* rx ring free */, 1038 0 /* cq ring used */, 1039 32 /* cq ring free */, 1040 0 /* dequeue calls */, 1041 /* 10 dequeue burst buckets */ 1042 0, 0, 0, 0, 0, 1043 0, 0, 0, 0, 0, 1044 }; 1045 ret = rte_event_dev_xstats_get(evdev, 1046 RTE_EVENT_DEV_XSTATS_PORT, 1047 0, ids, values, num_stats); 1048 for (i = 0; (signed int)i < ret; i++) { 1049 if (port_expected_zero[i] != values[i]) { 1050 printf( 1051 "%d, Error, xstat %d (id %d) %s : %"PRIu64 1052 ", expect %"PRIu64"\n", 1053 __LINE__, i, ids[i], xstats_names[i].name, 1054 values[i], port_expected_zero[i]); 1055 goto fail; 1056 } 1057 } 1058 1059 /* QUEUE STATS TESTS */ 1060 num_stats = rte_event_dev_xstats_names_get(evdev, 1061 RTE_EVENT_DEV_XSTATS_QUEUE, 0, 1062 xstats_names, ids, XSTATS_MAX); 1063 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 1064 0, ids, values, num_stats); 1065 if (ret < 0) { 1066 printf("xstats get returned %d\n", ret); 1067 goto fail; 1068 } 1069 if ((unsigned int)ret > XSTATS_MAX) 1070 printf("%s %d: more xstats available than space\n", 1071 __func__, __LINE__); 1072 1073 static const uint64_t queue_expected[] = { 1074 3 /* rx */, 1075 3 /* tx */, 1076 0 /* drop */, 1077 3 /* inflights */, 1078 0, 0, 0, 0, /* iq 0, 1, 2, 3 used */ 1079 /* QID-to-Port: pinned_flows, packets */ 1080 0, 0, 1081 0, 0, 1082 1, 3, 1083 0, 0, 1084 }; 1085 for (i = 0; (signed int)i < ret; i++) { 1086 if (queue_expected[i] != values[i]) { 1087 printf( 1088 "%d, Error, xstat %d (id %d) %s : %"PRIu64 1089 ", expect %"PRIu64"\n", 1090 __LINE__, i, ids[i], xstats_names[i].name, 1091 values[i], queue_expected[i]); 1092 goto fail; 1093 } 1094 } 1095 1096 /* Reset the queue stats here */ 1097 ret = rte_event_dev_xstats_reset(evdev, 1098 RTE_EVENT_DEV_XSTATS_QUEUE, 0, 1099 NULL, 1100 0); 1101 1102 /* Verify that the resetable stats are reset, and others are not */ 1103 static const uint64_t queue_expected_zero[] = { 1104 0 /* rx */, 1105 0 /* tx */, 1106 0 /* drop */, 1107 3 /* inflight */, 1108 0, 0, 0, 0, /* 4 iq used */ 1109 /* QID-to-Port: pinned_flows, packets */ 1110 0, 0, 1111 0, 0, 1112 1, 0, 1113 0, 0, 1114 }; 1115 1116 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 0, 1117 ids, values, num_stats); 1118 int fails = 0; 1119 for (i = 0; (signed int)i < ret; i++) { 1120 if (queue_expected_zero[i] != values[i]) { 1121 printf( 1122 "%d, Error, xstat %d (id %d) %s : %"PRIu64 1123 ", expect %"PRIu64"\n", 1124 __LINE__, i, ids[i], xstats_names[i].name, 1125 values[i], queue_expected_zero[i]); 1126 fails++; 1127 } 1128 } 1129 if (fails) { 1130 printf("%d : %d of values were not as expected above\n", 1131 __LINE__, fails); 1132 goto fail; 1133 } 1134 1135 cleanup(t); 1136 return 0; 1137 1138 fail: 1139 rte_event_dev_dump(0, stdout); 1140 cleanup(t); 1141 return -1; 1142 } 1143 1144 1145 static int 1146 xstats_id_abuse_tests(struct test *t) 1147 { 1148 int err; 1149 const uint32_t XSTATS_MAX = 1024; 1150 const uint32_t link_port = 2; 1151 1152 uint32_t ids[XSTATS_MAX]; 1153 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; 1154 1155 /* Create instance with 4 ports */ 1156 if (init(t, 1, 4) < 0 || 1157 create_ports(t, 4) < 0 || 1158 create_atomic_qids(t, 1) < 0) { 1159 printf("%d: Error initializing device\n", __LINE__); 1160 goto fail; 1161 } 1162 1163 err = rte_event_port_link(evdev, t->port[link_port], NULL, NULL, 0); 1164 if (err != 1) { 1165 printf("%d: error mapping lb qid\n", __LINE__); 1166 goto fail; 1167 } 1168 1169 if (rte_event_dev_start(evdev) < 0) { 1170 printf("%d: Error with start call\n", __LINE__); 1171 goto fail; 1172 } 1173 1174 /* no test for device, as it ignores the port/q number */ 1175 int num_stats = rte_event_dev_xstats_names_get(evdev, 1176 RTE_EVENT_DEV_XSTATS_PORT, 1177 UINT8_MAX-1, xstats_names, ids, 1178 XSTATS_MAX); 1179 if (num_stats != 0) { 1180 printf("%d: expected %d stats, got return %d\n", __LINE__, 1181 0, num_stats); 1182 goto fail; 1183 } 1184 1185 num_stats = rte_event_dev_xstats_names_get(evdev, 1186 RTE_EVENT_DEV_XSTATS_QUEUE, 1187 UINT8_MAX-1, xstats_names, ids, 1188 XSTATS_MAX); 1189 if (num_stats != 0) { 1190 printf("%d: expected %d stats, got return %d\n", __LINE__, 1191 0, num_stats); 1192 goto fail; 1193 } 1194 1195 cleanup(t); 1196 return 0; 1197 fail: 1198 cleanup(t); 1199 return -1; 1200 } 1201 1202 static int 1203 port_reconfig_credits(struct test *t) 1204 { 1205 if (init(t, 1, 1) < 0) { 1206 printf("%d: Error initializing device\n", __LINE__); 1207 return -1; 1208 } 1209 1210 uint32_t i; 1211 const uint32_t NUM_ITERS = 32; 1212 for (i = 0; i < NUM_ITERS; i++) { 1213 const struct rte_event_queue_conf conf = { 1214 .schedule_type = RTE_SCHED_TYPE_ATOMIC, 1215 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1216 .nb_atomic_flows = 1024, 1217 .nb_atomic_order_sequences = 1024, 1218 }; 1219 if (rte_event_queue_setup(evdev, 0, &conf) < 0) { 1220 printf("%d: error creating qid\n", __LINE__); 1221 return -1; 1222 } 1223 t->qid[0] = 0; 1224 1225 static const struct rte_event_port_conf port_conf = { 1226 .new_event_threshold = 128, 1227 .dequeue_depth = 32, 1228 .enqueue_depth = 64, 1229 .disable_implicit_release = 0, 1230 }; 1231 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) { 1232 printf("%d Error setting up port\n", __LINE__); 1233 return -1; 1234 } 1235 1236 int links = rte_event_port_link(evdev, 0, NULL, NULL, 0); 1237 if (links != 1) { 1238 printf("%d: error mapping lb qid\n", __LINE__); 1239 goto fail; 1240 } 1241 1242 if (rte_event_dev_start(evdev) < 0) { 1243 printf("%d: Error with start call\n", __LINE__); 1244 goto fail; 1245 } 1246 1247 const uint32_t NPKTS = 1; 1248 uint32_t j; 1249 for (j = 0; j < NPKTS; j++) { 1250 struct rte_event ev; 1251 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 1252 if (!arp) { 1253 printf("%d: gen of pkt failed\n", __LINE__); 1254 goto fail; 1255 } 1256 ev.queue_id = t->qid[0]; 1257 ev.op = RTE_EVENT_OP_NEW; 1258 ev.mbuf = arp; 1259 int err = rte_event_enqueue_burst(evdev, 0, &ev, 1); 1260 if (err != 1) { 1261 printf("%d: Failed to enqueue\n", __LINE__); 1262 rte_event_dev_dump(0, stdout); 1263 goto fail; 1264 } 1265 } 1266 1267 rte_service_run_iter_on_app_lcore(t->service_id, 1); 1268 1269 struct rte_event ev[NPKTS]; 1270 int deq = rte_event_dequeue_burst(evdev, t->port[0], ev, 1271 NPKTS, 0); 1272 if (deq != 1) 1273 printf("%d error; no packet dequeued\n", __LINE__); 1274 1275 /* let cleanup below stop the device on last iter */ 1276 if (i != NUM_ITERS-1) 1277 rte_event_dev_stop(evdev); 1278 } 1279 1280 cleanup(t); 1281 return 0; 1282 fail: 1283 cleanup(t); 1284 return -1; 1285 } 1286 1287 static int 1288 port_single_lb_reconfig(struct test *t) 1289 { 1290 if (init(t, 2, 2) < 0) { 1291 printf("%d: Error initializing device\n", __LINE__); 1292 goto fail; 1293 } 1294 1295 static const struct rte_event_queue_conf conf_lb_atomic = { 1296 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1297 .schedule_type = RTE_SCHED_TYPE_ATOMIC, 1298 .nb_atomic_flows = 1024, 1299 .nb_atomic_order_sequences = 1024, 1300 }; 1301 if (rte_event_queue_setup(evdev, 0, &conf_lb_atomic) < 0) { 1302 printf("%d: error creating qid\n", __LINE__); 1303 goto fail; 1304 } 1305 1306 static const struct rte_event_queue_conf conf_single_link = { 1307 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1308 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK, 1309 }; 1310 if (rte_event_queue_setup(evdev, 1, &conf_single_link) < 0) { 1311 printf("%d: error creating qid\n", __LINE__); 1312 goto fail; 1313 } 1314 1315 struct rte_event_port_conf port_conf = { 1316 .new_event_threshold = 128, 1317 .dequeue_depth = 32, 1318 .enqueue_depth = 64, 1319 .disable_implicit_release = 0, 1320 }; 1321 if (rte_event_port_setup(evdev, 0, &port_conf) < 0) { 1322 printf("%d Error setting up port\n", __LINE__); 1323 goto fail; 1324 } 1325 if (rte_event_port_setup(evdev, 1, &port_conf) < 0) { 1326 printf("%d Error setting up port\n", __LINE__); 1327 goto fail; 1328 } 1329 1330 /* link port to lb queue */ 1331 uint8_t queue_id = 0; 1332 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) { 1333 printf("%d: error creating link for qid\n", __LINE__); 1334 goto fail; 1335 } 1336 1337 int ret = rte_event_port_unlink(evdev, 0, &queue_id, 1); 1338 if (ret != 1) { 1339 printf("%d: Error unlinking lb port\n", __LINE__); 1340 goto fail; 1341 } 1342 1343 queue_id = 1; 1344 if (rte_event_port_link(evdev, 0, &queue_id, NULL, 1) != 1) { 1345 printf("%d: error creating link for qid\n", __LINE__); 1346 goto fail; 1347 } 1348 1349 queue_id = 0; 1350 int err = rte_event_port_link(evdev, 1, &queue_id, NULL, 1); 1351 if (err != 1) { 1352 printf("%d: error mapping lb qid\n", __LINE__); 1353 goto fail; 1354 } 1355 1356 if (rte_event_dev_start(evdev) < 0) { 1357 printf("%d: Error with start call\n", __LINE__); 1358 goto fail; 1359 } 1360 1361 cleanup(t); 1362 return 0; 1363 fail: 1364 cleanup(t); 1365 return -1; 1366 } 1367 1368 static int 1369 xstats_brute_force(struct test *t) 1370 { 1371 uint32_t i; 1372 const uint32_t XSTATS_MAX = 1024; 1373 uint32_t ids[XSTATS_MAX]; 1374 uint64_t values[XSTATS_MAX]; 1375 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; 1376 1377 1378 /* Create instance with 4 ports */ 1379 if (init(t, 1, 4) < 0 || 1380 create_ports(t, 4) < 0 || 1381 create_atomic_qids(t, 1) < 0) { 1382 printf("%d: Error initializing device\n", __LINE__); 1383 return -1; 1384 } 1385 1386 int err = rte_event_port_link(evdev, t->port[0], NULL, NULL, 0); 1387 if (err != 1) { 1388 printf("%d: error mapping lb qid\n", __LINE__); 1389 goto fail; 1390 } 1391 1392 if (rte_event_dev_start(evdev) < 0) { 1393 printf("%d: Error with start call\n", __LINE__); 1394 goto fail; 1395 } 1396 1397 for (i = 0; i < XSTATS_MAX; i++) 1398 ids[i] = i; 1399 1400 for (i = 0; i < 3; i++) { 1401 uint32_t mode = RTE_EVENT_DEV_XSTATS_DEVICE + i; 1402 uint32_t j; 1403 for (j = 0; j < UINT8_MAX; j++) { 1404 rte_event_dev_xstats_names_get(evdev, mode, 1405 j, xstats_names, ids, XSTATS_MAX); 1406 1407 rte_event_dev_xstats_get(evdev, mode, j, ids, 1408 values, XSTATS_MAX); 1409 } 1410 } 1411 1412 cleanup(t); 1413 return 0; 1414 fail: 1415 cleanup(t); 1416 return -1; 1417 } 1418 1419 static int 1420 xstats_id_reset_tests(struct test *t) 1421 { 1422 const int wrk_enq = 2; 1423 int err; 1424 1425 /* Create instance with 4 ports */ 1426 if (init(t, 1, 4) < 0 || 1427 create_ports(t, 4) < 0 || 1428 create_atomic_qids(t, 1) < 0) { 1429 printf("%d: Error initializing device\n", __LINE__); 1430 return -1; 1431 } 1432 1433 /* CQ mapping to QID */ 1434 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); 1435 if (err != 1) { 1436 printf("%d: error mapping lb qid\n", __LINE__); 1437 goto fail; 1438 } 1439 1440 if (rte_event_dev_start(evdev) < 0) { 1441 printf("%d: Error with start call\n", __LINE__); 1442 goto fail; 1443 } 1444 1445 #define XSTATS_MAX 1024 1446 int ret; 1447 uint32_t i; 1448 uint32_t ids[XSTATS_MAX]; 1449 uint64_t values[XSTATS_MAX]; 1450 struct rte_event_dev_xstats_name xstats_names[XSTATS_MAX]; 1451 1452 for (i = 0; i < XSTATS_MAX; i++) 1453 ids[i] = i; 1454 1455 #define NUM_DEV_STATS 6 1456 /* Device names / values */ 1457 int num_stats = rte_event_dev_xstats_names_get(evdev, 1458 RTE_EVENT_DEV_XSTATS_DEVICE, 1459 0, xstats_names, ids, XSTATS_MAX); 1460 if (num_stats != NUM_DEV_STATS) { 1461 printf("%d: expected %d stats, got return %d\n", __LINE__, 1462 NUM_DEV_STATS, num_stats); 1463 goto fail; 1464 } 1465 ret = rte_event_dev_xstats_get(evdev, 1466 RTE_EVENT_DEV_XSTATS_DEVICE, 1467 0, ids, values, num_stats); 1468 if (ret != NUM_DEV_STATS) { 1469 printf("%d: expected %d stats, got return %d\n", __LINE__, 1470 NUM_DEV_STATS, ret); 1471 goto fail; 1472 } 1473 1474 #define NPKTS 7 1475 for (i = 0; i < NPKTS; i++) { 1476 struct rte_event ev; 1477 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 1478 if (!arp) { 1479 printf("%d: gen of pkt failed\n", __LINE__); 1480 goto fail; 1481 } 1482 ev.queue_id = t->qid[i]; 1483 ev.op = RTE_EVENT_OP_NEW; 1484 ev.mbuf = arp; 1485 arp->seqn = i; 1486 1487 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1); 1488 if (err != 1) { 1489 printf("%d: Failed to enqueue\n", __LINE__); 1490 goto fail; 1491 } 1492 } 1493 1494 rte_service_run_iter_on_app_lcore(t->service_id, 1); 1495 1496 static const char * const dev_names[] = { 1497 "dev_rx", "dev_tx", "dev_drop", "dev_sched_calls", 1498 "dev_sched_no_iq_enq", "dev_sched_no_cq_enq", 1499 }; 1500 uint64_t dev_expected[] = {NPKTS, NPKTS, 0, 1, 0, 0}; 1501 for (i = 0; (int)i < ret; i++) { 1502 unsigned int id; 1503 uint64_t val = rte_event_dev_xstats_by_name_get(evdev, 1504 dev_names[i], 1505 &id); 1506 if (id != i) { 1507 printf("%d: %s id incorrect, expected %d got %d\n", 1508 __LINE__, dev_names[i], i, id); 1509 goto fail; 1510 } 1511 if (val != dev_expected[i]) { 1512 printf("%d: %s value incorrect, expected %" 1513 PRIu64" got %d\n", __LINE__, dev_names[i], 1514 dev_expected[i], id); 1515 goto fail; 1516 } 1517 /* reset to zero */ 1518 int reset_ret = rte_event_dev_xstats_reset(evdev, 1519 RTE_EVENT_DEV_XSTATS_DEVICE, 0, 1520 &id, 1521 1); 1522 if (reset_ret) { 1523 printf("%d: failed to reset successfully\n", __LINE__); 1524 goto fail; 1525 } 1526 dev_expected[i] = 0; 1527 /* check value again */ 1528 val = rte_event_dev_xstats_by_name_get(evdev, dev_names[i], 0); 1529 if (val != dev_expected[i]) { 1530 printf("%d: %s value incorrect, expected %"PRIu64 1531 " got %"PRIu64"\n", __LINE__, dev_names[i], 1532 dev_expected[i], val); 1533 goto fail; 1534 } 1535 }; 1536 1537 /* 48 is stat offset from start of the devices whole xstats. 1538 * This WILL break every time we add a statistic to a port 1539 * or the device, but there is no other way to test 1540 */ 1541 #define PORT_OFF 48 1542 /* num stats for the tested port. CQ size adds more stats to a port */ 1543 #define NUM_PORT_STATS 21 1544 /* the port to test. */ 1545 #define PORT 2 1546 num_stats = rte_event_dev_xstats_names_get(evdev, 1547 RTE_EVENT_DEV_XSTATS_PORT, PORT, 1548 xstats_names, ids, XSTATS_MAX); 1549 if (num_stats != NUM_PORT_STATS) { 1550 printf("%d: expected %d stats, got return %d\n", 1551 __LINE__, NUM_PORT_STATS, num_stats); 1552 goto fail; 1553 } 1554 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_PORT, PORT, 1555 ids, values, num_stats); 1556 1557 if (ret != NUM_PORT_STATS) { 1558 printf("%d: expected %d stats, got return %d\n", 1559 __LINE__, NUM_PORT_STATS, ret); 1560 goto fail; 1561 } 1562 static const char * const port_names[] = { 1563 "port_2_rx", 1564 "port_2_tx", 1565 "port_2_drop", 1566 "port_2_inflight", 1567 "port_2_avg_pkt_cycles", 1568 "port_2_credits", 1569 "port_2_rx_ring_used", 1570 "port_2_rx_ring_free", 1571 "port_2_cq_ring_used", 1572 "port_2_cq_ring_free", 1573 "port_2_dequeue_calls", 1574 "port_2_dequeues_returning_0", 1575 "port_2_dequeues_returning_1-4", 1576 "port_2_dequeues_returning_5-8", 1577 "port_2_dequeues_returning_9-12", 1578 "port_2_dequeues_returning_13-16", 1579 "port_2_dequeues_returning_17-20", 1580 "port_2_dequeues_returning_21-24", 1581 "port_2_dequeues_returning_25-28", 1582 "port_2_dequeues_returning_29-32", 1583 "port_2_dequeues_returning_33-36", 1584 }; 1585 uint64_t port_expected[] = { 1586 0, /* rx */ 1587 NPKTS, /* tx */ 1588 0, /* drop */ 1589 NPKTS, /* inflight */ 1590 0, /* avg pkt cycles */ 1591 0, /* credits */ 1592 0, /* rx ring used */ 1593 4096, /* rx ring free */ 1594 NPKTS, /* cq ring used */ 1595 25, /* cq ring free */ 1596 0, /* dequeue zero calls */ 1597 0, 0, 0, 0, 0, /* 10 dequeue buckets */ 1598 0, 0, 0, 0, 0, 1599 }; 1600 uint64_t port_expected_zero[] = { 1601 0, /* rx */ 1602 0, /* tx */ 1603 0, /* drop */ 1604 NPKTS, /* inflight */ 1605 0, /* avg pkt cycles */ 1606 0, /* credits */ 1607 0, /* rx ring used */ 1608 4096, /* rx ring free */ 1609 NPKTS, /* cq ring used */ 1610 25, /* cq ring free */ 1611 0, /* dequeue zero calls */ 1612 0, 0, 0, 0, 0, /* 10 dequeue buckets */ 1613 0, 0, 0, 0, 0, 1614 }; 1615 if (RTE_DIM(port_expected) != NUM_PORT_STATS || 1616 RTE_DIM(port_names) != NUM_PORT_STATS) { 1617 printf("%d: port array of wrong size\n", __LINE__); 1618 goto fail; 1619 } 1620 1621 int failed = 0; 1622 for (i = 0; (int)i < ret; i++) { 1623 unsigned int id; 1624 uint64_t val = rte_event_dev_xstats_by_name_get(evdev, 1625 port_names[i], 1626 &id); 1627 if (id != i + PORT_OFF) { 1628 printf("%d: %s id incorrect, expected %d got %d\n", 1629 __LINE__, port_names[i], i+PORT_OFF, 1630 id); 1631 failed = 1; 1632 } 1633 if (val != port_expected[i]) { 1634 printf("%d: %s value incorrect, expected %"PRIu64 1635 " got %d\n", __LINE__, port_names[i], 1636 port_expected[i], id); 1637 failed = 1; 1638 } 1639 /* reset to zero */ 1640 int reset_ret = rte_event_dev_xstats_reset(evdev, 1641 RTE_EVENT_DEV_XSTATS_PORT, PORT, 1642 &id, 1643 1); 1644 if (reset_ret) { 1645 printf("%d: failed to reset successfully\n", __LINE__); 1646 failed = 1; 1647 } 1648 /* check value again */ 1649 val = rte_event_dev_xstats_by_name_get(evdev, port_names[i], 0); 1650 if (val != port_expected_zero[i]) { 1651 printf("%d: %s value incorrect, expected %"PRIu64 1652 " got %"PRIu64"\n", __LINE__, port_names[i], 1653 port_expected_zero[i], val); 1654 failed = 1; 1655 } 1656 }; 1657 if (failed) 1658 goto fail; 1659 1660 /* num queue stats */ 1661 #define NUM_Q_STATS 16 1662 /* queue offset from start of the devices whole xstats. 1663 * This will break every time we add a statistic to a device/port/queue 1664 */ 1665 #define QUEUE_OFF 90 1666 const uint32_t queue = 0; 1667 num_stats = rte_event_dev_xstats_names_get(evdev, 1668 RTE_EVENT_DEV_XSTATS_QUEUE, queue, 1669 xstats_names, ids, XSTATS_MAX); 1670 if (num_stats != NUM_Q_STATS) { 1671 printf("%d: expected %d stats, got return %d\n", 1672 __LINE__, NUM_Q_STATS, num_stats); 1673 goto fail; 1674 } 1675 ret = rte_event_dev_xstats_get(evdev, RTE_EVENT_DEV_XSTATS_QUEUE, 1676 queue, ids, values, num_stats); 1677 if (ret != NUM_Q_STATS) { 1678 printf("%d: expected 21 stats, got return %d\n", __LINE__, ret); 1679 goto fail; 1680 } 1681 static const char * const queue_names[] = { 1682 "qid_0_rx", 1683 "qid_0_tx", 1684 "qid_0_drop", 1685 "qid_0_inflight", 1686 "qid_0_iq_0_used", 1687 "qid_0_iq_1_used", 1688 "qid_0_iq_2_used", 1689 "qid_0_iq_3_used", 1690 "qid_0_port_0_pinned_flows", 1691 "qid_0_port_0_packets", 1692 "qid_0_port_1_pinned_flows", 1693 "qid_0_port_1_packets", 1694 "qid_0_port_2_pinned_flows", 1695 "qid_0_port_2_packets", 1696 "qid_0_port_3_pinned_flows", 1697 "qid_0_port_3_packets", 1698 }; 1699 uint64_t queue_expected[] = { 1700 7, /* rx */ 1701 7, /* tx */ 1702 0, /* drop */ 1703 7, /* inflight */ 1704 0, /* iq 0 used */ 1705 0, /* iq 1 used */ 1706 0, /* iq 2 used */ 1707 0, /* iq 3 used */ 1708 /* QID-to-Port: pinned_flows, packets */ 1709 0, 0, 1710 0, 0, 1711 1, 7, 1712 0, 0, 1713 }; 1714 uint64_t queue_expected_zero[] = { 1715 0, /* rx */ 1716 0, /* tx */ 1717 0, /* drop */ 1718 7, /* inflight */ 1719 0, /* iq 0 used */ 1720 0, /* iq 1 used */ 1721 0, /* iq 2 used */ 1722 0, /* iq 3 used */ 1723 /* QID-to-Port: pinned_flows, packets */ 1724 0, 0, 1725 0, 0, 1726 1, 0, 1727 0, 0, 1728 }; 1729 if (RTE_DIM(queue_expected) != NUM_Q_STATS || 1730 RTE_DIM(queue_expected_zero) != NUM_Q_STATS || 1731 RTE_DIM(queue_names) != NUM_Q_STATS) { 1732 printf("%d : queue array of wrong size\n", __LINE__); 1733 goto fail; 1734 } 1735 1736 failed = 0; 1737 for (i = 0; (int)i < ret; i++) { 1738 unsigned int id; 1739 uint64_t val = rte_event_dev_xstats_by_name_get(evdev, 1740 queue_names[i], 1741 &id); 1742 if (id != i + QUEUE_OFF) { 1743 printf("%d: %s id incorrect, expected %d got %d\n", 1744 __LINE__, queue_names[i], i+QUEUE_OFF, 1745 id); 1746 failed = 1; 1747 } 1748 if (val != queue_expected[i]) { 1749 printf("%d: %d: %s value , expected %"PRIu64 1750 " got %"PRIu64"\n", i, __LINE__, 1751 queue_names[i], queue_expected[i], val); 1752 failed = 1; 1753 } 1754 /* reset to zero */ 1755 int reset_ret = rte_event_dev_xstats_reset(evdev, 1756 RTE_EVENT_DEV_XSTATS_QUEUE, 1757 queue, &id, 1); 1758 if (reset_ret) { 1759 printf("%d: failed to reset successfully\n", __LINE__); 1760 failed = 1; 1761 } 1762 /* check value again */ 1763 val = rte_event_dev_xstats_by_name_get(evdev, queue_names[i], 1764 0); 1765 if (val != queue_expected_zero[i]) { 1766 printf("%d: %s value incorrect, expected %"PRIu64 1767 " got %"PRIu64"\n", __LINE__, queue_names[i], 1768 queue_expected_zero[i], val); 1769 failed = 1; 1770 } 1771 }; 1772 1773 if (failed) 1774 goto fail; 1775 1776 cleanup(t); 1777 return 0; 1778 fail: 1779 cleanup(t); 1780 return -1; 1781 } 1782 1783 static int 1784 ordered_reconfigure(struct test *t) 1785 { 1786 if (init(t, 1, 1) < 0 || 1787 create_ports(t, 1) < 0) { 1788 printf("%d: Error initializing device\n", __LINE__); 1789 return -1; 1790 } 1791 1792 const struct rte_event_queue_conf conf = { 1793 .schedule_type = RTE_SCHED_TYPE_ORDERED, 1794 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 1795 .nb_atomic_flows = 1024, 1796 .nb_atomic_order_sequences = 1024, 1797 }; 1798 1799 if (rte_event_queue_setup(evdev, 0, &conf) < 0) { 1800 printf("%d: error creating qid\n", __LINE__); 1801 goto failed; 1802 } 1803 1804 if (rte_event_queue_setup(evdev, 0, &conf) < 0) { 1805 printf("%d: error creating qid, for 2nd time\n", __LINE__); 1806 goto failed; 1807 } 1808 1809 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0); 1810 if (rte_event_dev_start(evdev) < 0) { 1811 printf("%d: Error with start call\n", __LINE__); 1812 return -1; 1813 } 1814 1815 cleanup(t); 1816 return 0; 1817 failed: 1818 cleanup(t); 1819 return -1; 1820 } 1821 1822 static int 1823 qid_priorities(struct test *t) 1824 { 1825 /* Test works by having a CQ with enough empty space for all packets, 1826 * and enqueueing 3 packets to 3 QIDs. They must return based on the 1827 * priority of the QID, not the ingress order, to pass the test 1828 */ 1829 unsigned int i; 1830 /* Create instance with 1 ports, and 3 qids */ 1831 if (init(t, 3, 1) < 0 || 1832 create_ports(t, 1) < 0) { 1833 printf("%d: Error initializing device\n", __LINE__); 1834 return -1; 1835 } 1836 1837 for (i = 0; i < 3; i++) { 1838 /* Create QID */ 1839 const struct rte_event_queue_conf conf = { 1840 .schedule_type = RTE_SCHED_TYPE_ATOMIC, 1841 /* increase priority (0 == highest), as we go */ 1842 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL - i, 1843 .nb_atomic_flows = 1024, 1844 .nb_atomic_order_sequences = 1024, 1845 }; 1846 1847 if (rte_event_queue_setup(evdev, i, &conf) < 0) { 1848 printf("%d: error creating qid %d\n", __LINE__, i); 1849 return -1; 1850 } 1851 t->qid[i] = i; 1852 } 1853 t->nb_qids = i; 1854 /* map all QIDs to port */ 1855 rte_event_port_link(evdev, t->port[0], NULL, NULL, 0); 1856 1857 if (rte_event_dev_start(evdev) < 0) { 1858 printf("%d: Error with start call\n", __LINE__); 1859 return -1; 1860 } 1861 1862 /* enqueue 3 packets, setting seqn and QID to check priority */ 1863 for (i = 0; i < 3; i++) { 1864 struct rte_event ev; 1865 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 1866 if (!arp) { 1867 printf("%d: gen of pkt failed\n", __LINE__); 1868 return -1; 1869 } 1870 ev.queue_id = t->qid[i]; 1871 ev.op = RTE_EVENT_OP_NEW; 1872 ev.mbuf = arp; 1873 arp->seqn = i; 1874 1875 int err = rte_event_enqueue_burst(evdev, t->port[0], &ev, 1); 1876 if (err != 1) { 1877 printf("%d: Failed to enqueue\n", __LINE__); 1878 return -1; 1879 } 1880 } 1881 1882 rte_service_run_iter_on_app_lcore(t->service_id, 1); 1883 1884 /* dequeue packets, verify priority was upheld */ 1885 struct rte_event ev[32]; 1886 uint32_t deq_pkts = 1887 rte_event_dequeue_burst(evdev, t->port[0], ev, 32, 0); 1888 if (deq_pkts != 3) { 1889 printf("%d: failed to deq packets\n", __LINE__); 1890 rte_event_dev_dump(evdev, stdout); 1891 return -1; 1892 } 1893 for (i = 0; i < 3; i++) { 1894 if (ev[i].mbuf->seqn != 2-i) { 1895 printf( 1896 "%d: qid priority test: seqn %d incorrectly prioritized\n", 1897 __LINE__, i); 1898 } 1899 } 1900 1901 cleanup(t); 1902 return 0; 1903 } 1904 1905 static int 1906 load_balancing(struct test *t) 1907 { 1908 const int rx_enq = 0; 1909 int err; 1910 uint32_t i; 1911 1912 if (init(t, 1, 4) < 0 || 1913 create_ports(t, 4) < 0 || 1914 create_atomic_qids(t, 1) < 0) { 1915 printf("%d: Error initializing device\n", __LINE__); 1916 return -1; 1917 } 1918 1919 for (i = 0; i < 3; i++) { 1920 /* map port 1 - 3 inclusive */ 1921 if (rte_event_port_link(evdev, t->port[i+1], &t->qid[0], 1922 NULL, 1) != 1) { 1923 printf("%d: error mapping qid to port %d\n", 1924 __LINE__, i); 1925 return -1; 1926 } 1927 } 1928 1929 if (rte_event_dev_start(evdev) < 0) { 1930 printf("%d: Error with start call\n", __LINE__); 1931 return -1; 1932 } 1933 1934 /************** FORWARD ****************/ 1935 /* 1936 * Create a set of flows that test the load-balancing operation of the 1937 * implementation. Fill CQ 0 and 1 with flows 0 and 1, and test 1938 * with a new flow, which should be sent to the 3rd mapped CQ 1939 */ 1940 static uint32_t flows[] = {0, 1, 1, 0, 0, 2, 2, 0, 2}; 1941 1942 for (i = 0; i < RTE_DIM(flows); i++) { 1943 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 1944 if (!arp) { 1945 printf("%d: gen of pkt failed\n", __LINE__); 1946 return -1; 1947 } 1948 1949 struct rte_event ev = { 1950 .op = RTE_EVENT_OP_NEW, 1951 .queue_id = t->qid[0], 1952 .flow_id = flows[i], 1953 .mbuf = arp, 1954 }; 1955 /* generate pkt and enqueue */ 1956 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); 1957 if (err < 0) { 1958 printf("%d: Failed to enqueue\n", __LINE__); 1959 return -1; 1960 } 1961 } 1962 1963 rte_service_run_iter_on_app_lcore(t->service_id, 1); 1964 1965 struct test_event_dev_stats stats; 1966 err = test_event_dev_stats_get(evdev, &stats); 1967 if (err) { 1968 printf("%d: failed to get stats\n", __LINE__); 1969 return -1; 1970 } 1971 1972 if (stats.port_inflight[1] != 4) { 1973 printf("%d:%s: port 1 inflight not correct\n", __LINE__, 1974 __func__); 1975 return -1; 1976 } 1977 if (stats.port_inflight[2] != 2) { 1978 printf("%d:%s: port 2 inflight not correct\n", __LINE__, 1979 __func__); 1980 return -1; 1981 } 1982 if (stats.port_inflight[3] != 3) { 1983 printf("%d:%s: port 3 inflight not correct\n", __LINE__, 1984 __func__); 1985 return -1; 1986 } 1987 1988 cleanup(t); 1989 return 0; 1990 } 1991 1992 static int 1993 load_balancing_history(struct test *t) 1994 { 1995 struct test_event_dev_stats stats = {0}; 1996 const int rx_enq = 0; 1997 int err; 1998 uint32_t i; 1999 2000 /* Create instance with 1 atomic QID going to 3 ports + 1 prod port */ 2001 if (init(t, 1, 4) < 0 || 2002 create_ports(t, 4) < 0 || 2003 create_atomic_qids(t, 1) < 0) 2004 return -1; 2005 2006 /* CQ mapping to QID */ 2007 if (rte_event_port_link(evdev, t->port[1], &t->qid[0], NULL, 1) != 1) { 2008 printf("%d: error mapping port 1 qid\n", __LINE__); 2009 return -1; 2010 } 2011 if (rte_event_port_link(evdev, t->port[2], &t->qid[0], NULL, 1) != 1) { 2012 printf("%d: error mapping port 2 qid\n", __LINE__); 2013 return -1; 2014 } 2015 if (rte_event_port_link(evdev, t->port[3], &t->qid[0], NULL, 1) != 1) { 2016 printf("%d: error mapping port 3 qid\n", __LINE__); 2017 return -1; 2018 } 2019 if (rte_event_dev_start(evdev) < 0) { 2020 printf("%d: Error with start call\n", __LINE__); 2021 return -1; 2022 } 2023 2024 /* 2025 * Create a set of flows that test the load-balancing operation of the 2026 * implementation. Fill CQ 0, 1 and 2 with flows 0, 1 and 2, drop 2027 * the packet from CQ 0, send in a new set of flows. Ensure that: 2028 * 1. The new flow 3 gets into the empty CQ0 2029 * 2. packets for existing flow gets added into CQ1 2030 * 3. Next flow 0 pkt is now onto CQ2, since CQ0 and CQ1 now contain 2031 * more outstanding pkts 2032 * 2033 * This test makes sure that when a flow ends (i.e. all packets 2034 * have been completed for that flow), that the flow can be moved 2035 * to a different CQ when new packets come in for that flow. 2036 */ 2037 static uint32_t flows1[] = {0, 1, 1, 2}; 2038 2039 for (i = 0; i < RTE_DIM(flows1); i++) { 2040 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 2041 struct rte_event ev = { 2042 .flow_id = flows1[i], 2043 .op = RTE_EVENT_OP_NEW, 2044 .queue_id = t->qid[0], 2045 .event_type = RTE_EVENT_TYPE_CPU, 2046 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 2047 .mbuf = arp 2048 }; 2049 2050 if (!arp) { 2051 printf("%d: gen of pkt failed\n", __LINE__); 2052 return -1; 2053 } 2054 arp->hash.rss = flows1[i]; 2055 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); 2056 if (err < 0) { 2057 printf("%d: Failed to enqueue\n", __LINE__); 2058 return -1; 2059 } 2060 } 2061 2062 /* call the scheduler */ 2063 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2064 2065 /* Dequeue the flow 0 packet from port 1, so that we can then drop */ 2066 struct rte_event ev; 2067 if (!rte_event_dequeue_burst(evdev, t->port[1], &ev, 1, 0)) { 2068 printf("%d: failed to dequeue\n", __LINE__); 2069 return -1; 2070 } 2071 if (ev.mbuf->hash.rss != flows1[0]) { 2072 printf("%d: unexpected flow received\n", __LINE__); 2073 return -1; 2074 } 2075 2076 /* drop the flow 0 packet from port 1 */ 2077 rte_event_enqueue_burst(evdev, t->port[1], &release_ev, 1); 2078 2079 /* call the scheduler */ 2080 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2081 2082 /* 2083 * Set up the next set of flows, first a new flow to fill up 2084 * CQ 0, so that the next flow 0 packet should go to CQ2 2085 */ 2086 static uint32_t flows2[] = { 3, 3, 3, 1, 1, 0 }; 2087 2088 for (i = 0; i < RTE_DIM(flows2); i++) { 2089 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 2090 struct rte_event ev = { 2091 .flow_id = flows2[i], 2092 .op = RTE_EVENT_OP_NEW, 2093 .queue_id = t->qid[0], 2094 .event_type = RTE_EVENT_TYPE_CPU, 2095 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 2096 .mbuf = arp 2097 }; 2098 2099 if (!arp) { 2100 printf("%d: gen of pkt failed\n", __LINE__); 2101 return -1; 2102 } 2103 arp->hash.rss = flows2[i]; 2104 2105 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); 2106 if (err < 0) { 2107 printf("%d: Failed to enqueue\n", __LINE__); 2108 return -1; 2109 } 2110 } 2111 2112 /* schedule */ 2113 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2114 2115 err = test_event_dev_stats_get(evdev, &stats); 2116 if (err) { 2117 printf("%d:failed to get stats\n", __LINE__); 2118 return -1; 2119 } 2120 2121 /* 2122 * Now check the resulting inflights on each port. 2123 */ 2124 if (stats.port_inflight[1] != 3) { 2125 printf("%d:%s: port 1 inflight not correct\n", __LINE__, 2126 __func__); 2127 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n", 2128 (unsigned int)stats.port_inflight[1], 2129 (unsigned int)stats.port_inflight[2], 2130 (unsigned int)stats.port_inflight[3]); 2131 return -1; 2132 } 2133 if (stats.port_inflight[2] != 4) { 2134 printf("%d:%s: port 2 inflight not correct\n", __LINE__, 2135 __func__); 2136 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n", 2137 (unsigned int)stats.port_inflight[1], 2138 (unsigned int)stats.port_inflight[2], 2139 (unsigned int)stats.port_inflight[3]); 2140 return -1; 2141 } 2142 if (stats.port_inflight[3] != 2) { 2143 printf("%d:%s: port 3 inflight not correct\n", __LINE__, 2144 __func__); 2145 printf("Inflights, ports 1, 2, 3: %u, %u, %u\n", 2146 (unsigned int)stats.port_inflight[1], 2147 (unsigned int)stats.port_inflight[2], 2148 (unsigned int)stats.port_inflight[3]); 2149 return -1; 2150 } 2151 2152 for (i = 1; i <= 3; i++) { 2153 struct rte_event ev; 2154 while (rte_event_dequeue_burst(evdev, i, &ev, 1, 0)) 2155 rte_event_enqueue_burst(evdev, i, &release_ev, 1); 2156 } 2157 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2158 2159 cleanup(t); 2160 return 0; 2161 } 2162 2163 static int 2164 invalid_qid(struct test *t) 2165 { 2166 struct test_event_dev_stats stats; 2167 const int rx_enq = 0; 2168 int err; 2169 uint32_t i; 2170 2171 if (init(t, 1, 4) < 0 || 2172 create_ports(t, 4) < 0 || 2173 create_atomic_qids(t, 1) < 0) { 2174 printf("%d: Error initializing device\n", __LINE__); 2175 return -1; 2176 } 2177 2178 /* CQ mapping to QID */ 2179 for (i = 0; i < 4; i++) { 2180 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], 2181 NULL, 1); 2182 if (err != 1) { 2183 printf("%d: error mapping port 1 qid\n", __LINE__); 2184 return -1; 2185 } 2186 } 2187 2188 if (rte_event_dev_start(evdev) < 0) { 2189 printf("%d: Error with start call\n", __LINE__); 2190 return -1; 2191 } 2192 2193 /* 2194 * Send in a packet with an invalid qid to the scheduler. 2195 * We should see the packed enqueued OK, but the inflights for 2196 * that packet should not be incremented, and the rx_dropped 2197 * should be incremented. 2198 */ 2199 static uint32_t flows1[] = {20}; 2200 2201 for (i = 0; i < RTE_DIM(flows1); i++) { 2202 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 2203 if (!arp) { 2204 printf("%d: gen of pkt failed\n", __LINE__); 2205 return -1; 2206 } 2207 2208 struct rte_event ev = { 2209 .op = RTE_EVENT_OP_NEW, 2210 .queue_id = t->qid[0] + flows1[i], 2211 .flow_id = i, 2212 .mbuf = arp, 2213 }; 2214 /* generate pkt and enqueue */ 2215 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); 2216 if (err < 0) { 2217 printf("%d: Failed to enqueue\n", __LINE__); 2218 return -1; 2219 } 2220 } 2221 2222 /* call the scheduler */ 2223 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2224 2225 err = test_event_dev_stats_get(evdev, &stats); 2226 if (err) { 2227 printf("%d: failed to get stats\n", __LINE__); 2228 return -1; 2229 } 2230 2231 /* 2232 * Now check the resulting inflights on the port, and the rx_dropped. 2233 */ 2234 if (stats.port_inflight[0] != 0) { 2235 printf("%d:%s: port 1 inflight count not correct\n", __LINE__, 2236 __func__); 2237 rte_event_dev_dump(evdev, stdout); 2238 return -1; 2239 } 2240 if (stats.port_rx_dropped[0] != 1) { 2241 printf("%d:%s: port 1 drops\n", __LINE__, __func__); 2242 rte_event_dev_dump(evdev, stdout); 2243 return -1; 2244 } 2245 /* each packet drop should only be counted in one place - port or dev */ 2246 if (stats.rx_dropped != 0) { 2247 printf("%d:%s: port 1 dropped count not correct\n", __LINE__, 2248 __func__); 2249 rte_event_dev_dump(evdev, stdout); 2250 return -1; 2251 } 2252 2253 cleanup(t); 2254 return 0; 2255 } 2256 2257 static int 2258 single_packet(struct test *t) 2259 { 2260 const uint32_t MAGIC_SEQN = 7321; 2261 struct rte_event ev; 2262 struct test_event_dev_stats stats; 2263 const int rx_enq = 0; 2264 const int wrk_enq = 2; 2265 int err; 2266 2267 /* Create instance with 4 ports */ 2268 if (init(t, 1, 4) < 0 || 2269 create_ports(t, 4) < 0 || 2270 create_atomic_qids(t, 1) < 0) { 2271 printf("%d: Error initializing device\n", __LINE__); 2272 return -1; 2273 } 2274 2275 /* CQ mapping to QID */ 2276 err = rte_event_port_link(evdev, t->port[wrk_enq], NULL, NULL, 0); 2277 if (err != 1) { 2278 printf("%d: error mapping lb qid\n", __LINE__); 2279 cleanup(t); 2280 return -1; 2281 } 2282 2283 if (rte_event_dev_start(evdev) < 0) { 2284 printf("%d: Error with start call\n", __LINE__); 2285 return -1; 2286 } 2287 2288 /************** Gen pkt and enqueue ****************/ 2289 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 2290 if (!arp) { 2291 printf("%d: gen of pkt failed\n", __LINE__); 2292 return -1; 2293 } 2294 2295 ev.op = RTE_EVENT_OP_NEW; 2296 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 2297 ev.mbuf = arp; 2298 ev.queue_id = 0; 2299 ev.flow_id = 3; 2300 arp->seqn = MAGIC_SEQN; 2301 2302 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); 2303 if (err < 0) { 2304 printf("%d: Failed to enqueue\n", __LINE__); 2305 return -1; 2306 } 2307 2308 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2309 2310 err = test_event_dev_stats_get(evdev, &stats); 2311 if (err) { 2312 printf("%d: failed to get stats\n", __LINE__); 2313 return -1; 2314 } 2315 2316 if (stats.rx_pkts != 1 || 2317 stats.tx_pkts != 1 || 2318 stats.port_inflight[wrk_enq] != 1) { 2319 printf("%d: Sched core didn't handle pkt as expected\n", 2320 __LINE__); 2321 rte_event_dev_dump(evdev, stdout); 2322 return -1; 2323 } 2324 2325 uint32_t deq_pkts; 2326 2327 deq_pkts = rte_event_dequeue_burst(evdev, t->port[wrk_enq], &ev, 1, 0); 2328 if (deq_pkts < 1) { 2329 printf("%d: Failed to deq\n", __LINE__); 2330 return -1; 2331 } 2332 2333 err = test_event_dev_stats_get(evdev, &stats); 2334 if (err) { 2335 printf("%d: failed to get stats\n", __LINE__); 2336 return -1; 2337 } 2338 2339 err = test_event_dev_stats_get(evdev, &stats); 2340 if (ev.mbuf->seqn != MAGIC_SEQN) { 2341 printf("%d: magic sequence number not dequeued\n", __LINE__); 2342 return -1; 2343 } 2344 2345 rte_pktmbuf_free(ev.mbuf); 2346 err = rte_event_enqueue_burst(evdev, t->port[wrk_enq], &release_ev, 1); 2347 if (err < 0) { 2348 printf("%d: Failed to enqueue\n", __LINE__); 2349 return -1; 2350 } 2351 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2352 2353 err = test_event_dev_stats_get(evdev, &stats); 2354 if (stats.port_inflight[wrk_enq] != 0) { 2355 printf("%d: port inflight not correct\n", __LINE__); 2356 return -1; 2357 } 2358 2359 cleanup(t); 2360 return 0; 2361 } 2362 2363 static int 2364 inflight_counts(struct test *t) 2365 { 2366 struct rte_event ev; 2367 struct test_event_dev_stats stats; 2368 const int rx_enq = 0; 2369 const int p1 = 1; 2370 const int p2 = 2; 2371 int err; 2372 int i; 2373 2374 /* Create instance with 4 ports */ 2375 if (init(t, 2, 3) < 0 || 2376 create_ports(t, 3) < 0 || 2377 create_atomic_qids(t, 2) < 0) { 2378 printf("%d: Error initializing device\n", __LINE__); 2379 return -1; 2380 } 2381 2382 /* CQ mapping to QID */ 2383 err = rte_event_port_link(evdev, t->port[p1], &t->qid[0], NULL, 1); 2384 if (err != 1) { 2385 printf("%d: error mapping lb qid\n", __LINE__); 2386 cleanup(t); 2387 return -1; 2388 } 2389 err = rte_event_port_link(evdev, t->port[p2], &t->qid[1], NULL, 1); 2390 if (err != 1) { 2391 printf("%d: error mapping lb qid\n", __LINE__); 2392 cleanup(t); 2393 return -1; 2394 } 2395 2396 if (rte_event_dev_start(evdev) < 0) { 2397 printf("%d: Error with start call\n", __LINE__); 2398 return -1; 2399 } 2400 2401 /************** FORWARD ****************/ 2402 #define QID1_NUM 5 2403 for (i = 0; i < QID1_NUM; i++) { 2404 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 2405 2406 if (!arp) { 2407 printf("%d: gen of pkt failed\n", __LINE__); 2408 goto err; 2409 } 2410 2411 ev.queue_id = t->qid[0]; 2412 ev.op = RTE_EVENT_OP_NEW; 2413 ev.mbuf = arp; 2414 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); 2415 if (err != 1) { 2416 printf("%d: Failed to enqueue\n", __LINE__); 2417 goto err; 2418 } 2419 } 2420 #define QID2_NUM 3 2421 for (i = 0; i < QID2_NUM; i++) { 2422 struct rte_mbuf *arp = rte_gen_arp(0, t->mbuf_pool); 2423 2424 if (!arp) { 2425 printf("%d: gen of pkt failed\n", __LINE__); 2426 goto err; 2427 } 2428 ev.queue_id = t->qid[1]; 2429 ev.op = RTE_EVENT_OP_NEW; 2430 ev.mbuf = arp; 2431 err = rte_event_enqueue_burst(evdev, t->port[rx_enq], &ev, 1); 2432 if (err != 1) { 2433 printf("%d: Failed to enqueue\n", __LINE__); 2434 goto err; 2435 } 2436 } 2437 2438 /* schedule */ 2439 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2440 2441 err = test_event_dev_stats_get(evdev, &stats); 2442 if (err) { 2443 printf("%d: failed to get stats\n", __LINE__); 2444 goto err; 2445 } 2446 2447 if (stats.rx_pkts != QID1_NUM + QID2_NUM || 2448 stats.tx_pkts != QID1_NUM + QID2_NUM) { 2449 printf("%d: Sched core didn't handle pkt as expected\n", 2450 __LINE__); 2451 goto err; 2452 } 2453 2454 if (stats.port_inflight[p1] != QID1_NUM) { 2455 printf("%d: %s port 1 inflight not correct\n", __LINE__, 2456 __func__); 2457 goto err; 2458 } 2459 if (stats.port_inflight[p2] != QID2_NUM) { 2460 printf("%d: %s port 2 inflight not correct\n", __LINE__, 2461 __func__); 2462 goto err; 2463 } 2464 2465 /************** DEQUEUE INFLIGHT COUNT CHECKS ****************/ 2466 /* port 1 */ 2467 struct rte_event events[QID1_NUM + QID2_NUM]; 2468 uint32_t deq_pkts = rte_event_dequeue_burst(evdev, t->port[p1], events, 2469 RTE_DIM(events), 0); 2470 2471 if (deq_pkts != QID1_NUM) { 2472 printf("%d: Port 1: DEQUEUE inflight failed\n", __LINE__); 2473 goto err; 2474 } 2475 err = test_event_dev_stats_get(evdev, &stats); 2476 if (stats.port_inflight[p1] != QID1_NUM) { 2477 printf("%d: port 1 inflight decrement after DEQ != 0\n", 2478 __LINE__); 2479 goto err; 2480 } 2481 for (i = 0; i < QID1_NUM; i++) { 2482 err = rte_event_enqueue_burst(evdev, t->port[p1], &release_ev, 2483 1); 2484 if (err != 1) { 2485 printf("%d: %s rte enqueue of inf release failed\n", 2486 __LINE__, __func__); 2487 goto err; 2488 } 2489 } 2490 2491 /* 2492 * As the scheduler core decrements inflights, it needs to run to 2493 * process packets to act on the drop messages 2494 */ 2495 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2496 2497 err = test_event_dev_stats_get(evdev, &stats); 2498 if (stats.port_inflight[p1] != 0) { 2499 printf("%d: port 1 inflight NON NULL after DROP\n", __LINE__); 2500 goto err; 2501 } 2502 2503 /* port2 */ 2504 deq_pkts = rte_event_dequeue_burst(evdev, t->port[p2], events, 2505 RTE_DIM(events), 0); 2506 if (deq_pkts != QID2_NUM) { 2507 printf("%d: Port 2: DEQUEUE inflight failed\n", __LINE__); 2508 goto err; 2509 } 2510 err = test_event_dev_stats_get(evdev, &stats); 2511 if (stats.port_inflight[p2] != QID2_NUM) { 2512 printf("%d: port 1 inflight decrement after DEQ != 0\n", 2513 __LINE__); 2514 goto err; 2515 } 2516 for (i = 0; i < QID2_NUM; i++) { 2517 err = rte_event_enqueue_burst(evdev, t->port[p2], &release_ev, 2518 1); 2519 if (err != 1) { 2520 printf("%d: %s rte enqueue of inf release failed\n", 2521 __LINE__, __func__); 2522 goto err; 2523 } 2524 } 2525 2526 /* 2527 * As the scheduler core decrements inflights, it needs to run to 2528 * process packets to act on the drop messages 2529 */ 2530 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2531 2532 err = test_event_dev_stats_get(evdev, &stats); 2533 if (stats.port_inflight[p2] != 0) { 2534 printf("%d: port 2 inflight NON NULL after DROP\n", __LINE__); 2535 goto err; 2536 } 2537 cleanup(t); 2538 return 0; 2539 2540 err: 2541 rte_event_dev_dump(evdev, stdout); 2542 cleanup(t); 2543 return -1; 2544 } 2545 2546 static int 2547 parallel_basic(struct test *t, int check_order) 2548 { 2549 const uint8_t rx_port = 0; 2550 const uint8_t w1_port = 1; 2551 const uint8_t w3_port = 3; 2552 const uint8_t tx_port = 4; 2553 int err; 2554 int i; 2555 uint32_t deq_pkts, j; 2556 struct rte_mbuf *mbufs[3]; 2557 struct rte_mbuf *mbufs_out[3] = { 0 }; 2558 const uint32_t MAGIC_SEQN = 1234; 2559 2560 /* Create instance with 4 ports */ 2561 if (init(t, 2, tx_port + 1) < 0 || 2562 create_ports(t, tx_port + 1) < 0 || 2563 (check_order ? create_ordered_qids(t, 1) : 2564 create_unordered_qids(t, 1)) < 0 || 2565 create_directed_qids(t, 1, &tx_port)) { 2566 printf("%d: Error initializing device\n", __LINE__); 2567 return -1; 2568 } 2569 2570 /* 2571 * CQ mapping to QID 2572 * We need three ports, all mapped to the same ordered qid0. Then we'll 2573 * take a packet out to each port, re-enqueue in reverse order, 2574 * then make sure the reordering has taken place properly when we 2575 * dequeue from the tx_port. 2576 * 2577 * Simplified test setup diagram: 2578 * 2579 * rx_port w1_port 2580 * \ / \ 2581 * qid0 - w2_port - qid1 2582 * \ / \ 2583 * w3_port tx_port 2584 */ 2585 /* CQ mapping to QID for LB ports (directed mapped on create) */ 2586 for (i = w1_port; i <= w3_port; i++) { 2587 err = rte_event_port_link(evdev, t->port[i], &t->qid[0], NULL, 2588 1); 2589 if (err != 1) { 2590 printf("%d: error mapping lb qid\n", __LINE__); 2591 cleanup(t); 2592 return -1; 2593 } 2594 } 2595 2596 if (rte_event_dev_start(evdev) < 0) { 2597 printf("%d: Error with start call\n", __LINE__); 2598 return -1; 2599 } 2600 2601 /* Enqueue 3 packets to the rx port */ 2602 for (i = 0; i < 3; i++) { 2603 struct rte_event ev; 2604 mbufs[i] = rte_gen_arp(0, t->mbuf_pool); 2605 if (!mbufs[i]) { 2606 printf("%d: gen of pkt failed\n", __LINE__); 2607 return -1; 2608 } 2609 2610 ev.queue_id = t->qid[0]; 2611 ev.op = RTE_EVENT_OP_NEW; 2612 ev.mbuf = mbufs[i]; 2613 mbufs[i]->seqn = MAGIC_SEQN + i; 2614 2615 /* generate pkt and enqueue */ 2616 err = rte_event_enqueue_burst(evdev, t->port[rx_port], &ev, 1); 2617 if (err != 1) { 2618 printf("%d: Failed to enqueue pkt %u, retval = %u\n", 2619 __LINE__, i, err); 2620 return -1; 2621 } 2622 } 2623 2624 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2625 2626 /* use extra slot to make logic in loops easier */ 2627 struct rte_event deq_ev[w3_port + 1]; 2628 2629 /* Dequeue the 3 packets, one from each worker port */ 2630 for (i = w1_port; i <= w3_port; i++) { 2631 deq_pkts = rte_event_dequeue_burst(evdev, t->port[i], 2632 &deq_ev[i], 1, 0); 2633 if (deq_pkts != 1) { 2634 printf("%d: Failed to deq\n", __LINE__); 2635 rte_event_dev_dump(evdev, stdout); 2636 return -1; 2637 } 2638 } 2639 2640 /* Enqueue each packet in reverse order, flushing after each one */ 2641 for (i = w3_port; i >= w1_port; i--) { 2642 2643 deq_ev[i].op = RTE_EVENT_OP_FORWARD; 2644 deq_ev[i].queue_id = t->qid[1]; 2645 err = rte_event_enqueue_burst(evdev, t->port[i], &deq_ev[i], 1); 2646 if (err != 1) { 2647 printf("%d: Failed to enqueue\n", __LINE__); 2648 return -1; 2649 } 2650 } 2651 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2652 2653 /* dequeue from the tx ports, we should get 3 packets */ 2654 deq_pkts = rte_event_dequeue_burst(evdev, t->port[tx_port], deq_ev, 2655 3, 0); 2656 2657 /* Check to see if we've got all 3 packets */ 2658 if (deq_pkts != 3) { 2659 printf("%d: expected 3 pkts at tx port got %d from port %d\n", 2660 __LINE__, deq_pkts, tx_port); 2661 rte_event_dev_dump(evdev, stdout); 2662 return 1; 2663 } 2664 2665 /* Check to see if the sequence numbers are in expected order */ 2666 if (check_order) { 2667 for (j = 0 ; j < deq_pkts ; j++) { 2668 if (deq_ev[j].mbuf->seqn != MAGIC_SEQN + j) { 2669 printf( 2670 "%d: Incorrect sequence number(%d) from port %d\n", 2671 __LINE__, mbufs_out[j]->seqn, tx_port); 2672 return -1; 2673 } 2674 } 2675 } 2676 2677 /* Destroy the instance */ 2678 cleanup(t); 2679 return 0; 2680 } 2681 2682 static int 2683 ordered_basic(struct test *t) 2684 { 2685 return parallel_basic(t, 1); 2686 } 2687 2688 static int 2689 unordered_basic(struct test *t) 2690 { 2691 return parallel_basic(t, 0); 2692 } 2693 2694 static int 2695 holb(struct test *t) /* test to check we avoid basic head-of-line blocking */ 2696 { 2697 const struct rte_event new_ev = { 2698 .op = RTE_EVENT_OP_NEW 2699 /* all other fields zero */ 2700 }; 2701 struct rte_event ev = new_ev; 2702 unsigned int rx_port = 0; /* port we get the first flow on */ 2703 char rx_port_used_stat[64]; 2704 char rx_port_free_stat[64]; 2705 char other_port_used_stat[64]; 2706 2707 if (init(t, 1, 2) < 0 || 2708 create_ports(t, 2) < 0 || 2709 create_atomic_qids(t, 1) < 0) { 2710 printf("%d: Error initializing device\n", __LINE__); 2711 return -1; 2712 } 2713 int nb_links = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0); 2714 if (rte_event_port_link(evdev, t->port[0], NULL, NULL, 0) != 1 || 2715 nb_links != 1) { 2716 printf("%d: Error links queue to ports\n", __LINE__); 2717 goto err; 2718 } 2719 if (rte_event_dev_start(evdev) < 0) { 2720 printf("%d: Error with start call\n", __LINE__); 2721 goto err; 2722 } 2723 2724 /* send one packet and see where it goes, port 0 or 1 */ 2725 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { 2726 printf("%d: Error doing first enqueue\n", __LINE__); 2727 goto err; 2728 } 2729 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2730 2731 if (rte_event_dev_xstats_by_name_get(evdev, "port_0_cq_ring_used", NULL) 2732 != 1) 2733 rx_port = 1; 2734 2735 snprintf(rx_port_used_stat, sizeof(rx_port_used_stat), 2736 "port_%u_cq_ring_used", rx_port); 2737 snprintf(rx_port_free_stat, sizeof(rx_port_free_stat), 2738 "port_%u_cq_ring_free", rx_port); 2739 snprintf(other_port_used_stat, sizeof(other_port_used_stat), 2740 "port_%u_cq_ring_used", rx_port ^ 1); 2741 if (rte_event_dev_xstats_by_name_get(evdev, rx_port_used_stat, NULL) 2742 != 1) { 2743 printf("%d: Error, first event not scheduled\n", __LINE__); 2744 goto err; 2745 } 2746 2747 /* now fill up the rx port's queue with one flow to cause HOLB */ 2748 do { 2749 ev = new_ev; 2750 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { 2751 printf("%d: Error with enqueue\n", __LINE__); 2752 goto err; 2753 } 2754 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2755 } while (rte_event_dev_xstats_by_name_get(evdev, 2756 rx_port_free_stat, NULL) != 0); 2757 2758 /* one more packet, which needs to stay in IQ - i.e. HOLB */ 2759 ev = new_ev; 2760 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { 2761 printf("%d: Error with enqueue\n", __LINE__); 2762 goto err; 2763 } 2764 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2765 2766 /* check that the other port still has an empty CQ */ 2767 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL) 2768 != 0) { 2769 printf("%d: Error, second port CQ is not empty\n", __LINE__); 2770 goto err; 2771 } 2772 /* check IQ now has one packet */ 2773 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL) 2774 != 1) { 2775 printf("%d: Error, QID does not have exactly 1 packet\n", 2776 __LINE__); 2777 goto err; 2778 } 2779 2780 /* send another flow, which should pass the other IQ entry */ 2781 ev = new_ev; 2782 ev.flow_id = 1; 2783 if (rte_event_enqueue_burst(evdev, t->port[0], &ev, 1) != 1) { 2784 printf("%d: Error with enqueue\n", __LINE__); 2785 goto err; 2786 } 2787 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2788 2789 if (rte_event_dev_xstats_by_name_get(evdev, other_port_used_stat, NULL) 2790 != 1) { 2791 printf("%d: Error, second flow did not pass out first\n", 2792 __LINE__); 2793 goto err; 2794 } 2795 2796 if (rte_event_dev_xstats_by_name_get(evdev, "qid_0_iq_0_used", NULL) 2797 != 1) { 2798 printf("%d: Error, QID does not have exactly 1 packet\n", 2799 __LINE__); 2800 goto err; 2801 } 2802 cleanup(t); 2803 return 0; 2804 err: 2805 rte_event_dev_dump(evdev, stdout); 2806 cleanup(t); 2807 return -1; 2808 } 2809 2810 static int 2811 worker_loopback_worker_fn(void *arg) 2812 { 2813 struct test *t = arg; 2814 uint8_t port = t->port[1]; 2815 int count = 0; 2816 int enqd; 2817 2818 /* 2819 * Takes packets from the input port and then loops them back through 2820 * the Eventdev. Each packet gets looped through QIDs 0-8, 16 times 2821 * so each packet goes through 8*16 = 128 times. 2822 */ 2823 printf("%d: \tWorker function started\n", __LINE__); 2824 while (count < NUM_PACKETS) { 2825 #define BURST_SIZE 32 2826 struct rte_event ev[BURST_SIZE]; 2827 uint16_t i, nb_rx = rte_event_dequeue_burst(evdev, port, ev, 2828 BURST_SIZE, 0); 2829 if (nb_rx == 0) { 2830 rte_pause(); 2831 continue; 2832 } 2833 2834 for (i = 0; i < nb_rx; i++) { 2835 ev[i].queue_id++; 2836 if (ev[i].queue_id != 8) { 2837 ev[i].op = RTE_EVENT_OP_FORWARD; 2838 enqd = rte_event_enqueue_burst(evdev, port, 2839 &ev[i], 1); 2840 if (enqd != 1) { 2841 printf("%d: Can't enqueue FWD!!\n", 2842 __LINE__); 2843 return -1; 2844 } 2845 continue; 2846 } 2847 2848 ev[i].queue_id = 0; 2849 ev[i].mbuf->udata64++; 2850 if (ev[i].mbuf->udata64 != 16) { 2851 ev[i].op = RTE_EVENT_OP_FORWARD; 2852 enqd = rte_event_enqueue_burst(evdev, port, 2853 &ev[i], 1); 2854 if (enqd != 1) { 2855 printf("%d: Can't enqueue FWD!!\n", 2856 __LINE__); 2857 return -1; 2858 } 2859 continue; 2860 } 2861 /* we have hit 16 iterations through system - drop */ 2862 rte_pktmbuf_free(ev[i].mbuf); 2863 count++; 2864 ev[i].op = RTE_EVENT_OP_RELEASE; 2865 enqd = rte_event_enqueue_burst(evdev, port, &ev[i], 1); 2866 if (enqd != 1) { 2867 printf("%d drop enqueue failed\n", __LINE__); 2868 return -1; 2869 } 2870 } 2871 } 2872 2873 return 0; 2874 } 2875 2876 static int 2877 worker_loopback_producer_fn(void *arg) 2878 { 2879 struct test *t = arg; 2880 uint8_t port = t->port[0]; 2881 uint64_t count = 0; 2882 2883 printf("%d: \tProducer function started\n", __LINE__); 2884 while (count < NUM_PACKETS) { 2885 struct rte_mbuf *m = 0; 2886 do { 2887 m = rte_pktmbuf_alloc(t->mbuf_pool); 2888 } while (m == NULL); 2889 2890 m->udata64 = 0; 2891 2892 struct rte_event ev = { 2893 .op = RTE_EVENT_OP_NEW, 2894 .queue_id = t->qid[0], 2895 .flow_id = (uintptr_t)m & 0xFFFF, 2896 .mbuf = m, 2897 }; 2898 2899 if (rte_event_enqueue_burst(evdev, port, &ev, 1) != 1) { 2900 while (rte_event_enqueue_burst(evdev, port, &ev, 1) != 2901 1) 2902 rte_pause(); 2903 } 2904 2905 count++; 2906 } 2907 2908 return 0; 2909 } 2910 2911 static int 2912 worker_loopback(struct test *t, uint8_t disable_implicit_release) 2913 { 2914 /* use a single producer core, and a worker core to see what happens 2915 * if the worker loops packets back multiple times 2916 */ 2917 struct test_event_dev_stats stats; 2918 uint64_t print_cycles = 0, cycles = 0; 2919 uint64_t tx_pkts = 0; 2920 int err; 2921 int w_lcore, p_lcore; 2922 2923 if (init(t, 8, 2) < 0 || 2924 create_atomic_qids(t, 8) < 0) { 2925 printf("%d: Error initializing device\n", __LINE__); 2926 return -1; 2927 } 2928 2929 /* RX with low max events */ 2930 static struct rte_event_port_conf conf = { 2931 .dequeue_depth = 32, 2932 .enqueue_depth = 64, 2933 }; 2934 /* beware: this cannot be initialized in the static above as it would 2935 * only be initialized once - and this needs to be set for multiple runs 2936 */ 2937 conf.new_event_threshold = 512; 2938 conf.disable_implicit_release = disable_implicit_release; 2939 2940 if (rte_event_port_setup(evdev, 0, &conf) < 0) { 2941 printf("Error setting up RX port\n"); 2942 return -1; 2943 } 2944 t->port[0] = 0; 2945 /* TX with higher max events */ 2946 conf.new_event_threshold = 4096; 2947 if (rte_event_port_setup(evdev, 1, &conf) < 0) { 2948 printf("Error setting up TX port\n"); 2949 return -1; 2950 } 2951 t->port[1] = 1; 2952 2953 /* CQ mapping to QID */ 2954 err = rte_event_port_link(evdev, t->port[1], NULL, NULL, 0); 2955 if (err != 8) { /* should have mapped all queues*/ 2956 printf("%d: error mapping port 2 to all qids\n", __LINE__); 2957 return -1; 2958 } 2959 2960 if (rte_event_dev_start(evdev) < 0) { 2961 printf("%d: Error with start call\n", __LINE__); 2962 return -1; 2963 } 2964 2965 p_lcore = rte_get_next_lcore( 2966 /* start core */ -1, 2967 /* skip master */ 1, 2968 /* wrap */ 0); 2969 w_lcore = rte_get_next_lcore(p_lcore, 1, 0); 2970 2971 rte_eal_remote_launch(worker_loopback_producer_fn, t, p_lcore); 2972 rte_eal_remote_launch(worker_loopback_worker_fn, t, w_lcore); 2973 2974 print_cycles = cycles = rte_get_timer_cycles(); 2975 while (rte_eal_get_lcore_state(p_lcore) != FINISHED || 2976 rte_eal_get_lcore_state(w_lcore) != FINISHED) { 2977 2978 rte_service_run_iter_on_app_lcore(t->service_id, 1); 2979 2980 uint64_t new_cycles = rte_get_timer_cycles(); 2981 2982 if (new_cycles - print_cycles > rte_get_timer_hz()) { 2983 test_event_dev_stats_get(evdev, &stats); 2984 printf( 2985 "%d: \tSched Rx = %"PRIu64", Tx = %"PRIu64"\n", 2986 __LINE__, stats.rx_pkts, stats.tx_pkts); 2987 2988 print_cycles = new_cycles; 2989 } 2990 if (new_cycles - cycles > rte_get_timer_hz() * 3) { 2991 test_event_dev_stats_get(evdev, &stats); 2992 if (stats.tx_pkts == tx_pkts) { 2993 rte_event_dev_dump(evdev, stdout); 2994 printf("Dumping xstats:\n"); 2995 xstats_print(); 2996 printf( 2997 "%d: No schedules for seconds, deadlock\n", 2998 __LINE__); 2999 return -1; 3000 } 3001 tx_pkts = stats.tx_pkts; 3002 cycles = new_cycles; 3003 } 3004 } 3005 rte_service_run_iter_on_app_lcore(t->service_id, 1); 3006 /* ensure all completions are flushed */ 3007 3008 rte_eal_mp_wait_lcore(); 3009 3010 cleanup(t); 3011 return 0; 3012 } 3013 3014 static struct rte_mempool *eventdev_func_mempool; 3015 3016 int 3017 test_sw_eventdev(void) 3018 { 3019 struct test *t; 3020 int ret; 3021 3022 t = malloc(sizeof(struct test)); 3023 if (t == NULL) 3024 return -1; 3025 /* manually initialize the op, older gcc's complain on static 3026 * initialization of struct elements that are a bitfield. 3027 */ 3028 release_ev.op = RTE_EVENT_OP_RELEASE; 3029 3030 const char *eventdev_name = "event_sw"; 3031 evdev = rte_event_dev_get_dev_id(eventdev_name); 3032 if (evdev < 0) { 3033 printf("%d: Eventdev %s not found - creating.\n", 3034 __LINE__, eventdev_name); 3035 if (rte_vdev_init(eventdev_name, NULL) < 0) { 3036 printf("Error creating eventdev\n"); 3037 goto test_fail; 3038 } 3039 evdev = rte_event_dev_get_dev_id(eventdev_name); 3040 if (evdev < 0) { 3041 printf("Error finding newly created eventdev\n"); 3042 goto test_fail; 3043 } 3044 } 3045 3046 if (rte_event_dev_service_id_get(evdev, &t->service_id) < 0) { 3047 printf("Failed to get service ID for software event dev\n"); 3048 goto test_fail; 3049 } 3050 3051 rte_service_runstate_set(t->service_id, 1); 3052 rte_service_set_runstate_mapped_check(t->service_id, 0); 3053 3054 /* Only create mbuf pool once, reuse for each test run */ 3055 if (!eventdev_func_mempool) { 3056 eventdev_func_mempool = rte_pktmbuf_pool_create( 3057 "EVENTDEV_SW_SA_MBUF_POOL", 3058 (1<<12), /* 4k buffers */ 3059 32 /*MBUF_CACHE_SIZE*/, 3060 0, 3061 512, /* use very small mbufs */ 3062 rte_socket_id()); 3063 if (!eventdev_func_mempool) { 3064 printf("ERROR creating mempool\n"); 3065 goto test_fail; 3066 } 3067 } 3068 t->mbuf_pool = eventdev_func_mempool; 3069 printf("*** Running Single Directed Packet test...\n"); 3070 ret = test_single_directed_packet(t); 3071 if (ret != 0) { 3072 printf("ERROR - Single Directed Packet test FAILED.\n"); 3073 goto test_fail; 3074 } 3075 printf("*** Running Directed Forward Credit test...\n"); 3076 ret = test_directed_forward_credits(t); 3077 if (ret != 0) { 3078 printf("ERROR - Directed Forward Credit test FAILED.\n"); 3079 goto test_fail; 3080 } 3081 printf("*** Running Single Load Balanced Packet test...\n"); 3082 ret = single_packet(t); 3083 if (ret != 0) { 3084 printf("ERROR - Single Packet test FAILED.\n"); 3085 goto test_fail; 3086 } 3087 printf("*** Running Unordered Basic test...\n"); 3088 ret = unordered_basic(t); 3089 if (ret != 0) { 3090 printf("ERROR - Unordered Basic test FAILED.\n"); 3091 goto test_fail; 3092 } 3093 printf("*** Running Ordered Basic test...\n"); 3094 ret = ordered_basic(t); 3095 if (ret != 0) { 3096 printf("ERROR - Ordered Basic test FAILED.\n"); 3097 goto test_fail; 3098 } 3099 printf("*** Running Burst Packets test...\n"); 3100 ret = burst_packets(t); 3101 if (ret != 0) { 3102 printf("ERROR - Burst Packets test FAILED.\n"); 3103 goto test_fail; 3104 } 3105 printf("*** Running Load Balancing test...\n"); 3106 ret = load_balancing(t); 3107 if (ret != 0) { 3108 printf("ERROR - Load Balancing test FAILED.\n"); 3109 goto test_fail; 3110 } 3111 printf("*** Running Prioritized Directed test...\n"); 3112 ret = test_priority_directed(t); 3113 if (ret != 0) { 3114 printf("ERROR - Prioritized Directed test FAILED.\n"); 3115 goto test_fail; 3116 } 3117 printf("*** Running Prioritized Atomic test...\n"); 3118 ret = test_priority_atomic(t); 3119 if (ret != 0) { 3120 printf("ERROR - Prioritized Atomic test FAILED.\n"); 3121 goto test_fail; 3122 } 3123 3124 printf("*** Running Prioritized Ordered test...\n"); 3125 ret = test_priority_ordered(t); 3126 if (ret != 0) { 3127 printf("ERROR - Prioritized Ordered test FAILED.\n"); 3128 goto test_fail; 3129 } 3130 printf("*** Running Prioritized Unordered test...\n"); 3131 ret = test_priority_unordered(t); 3132 if (ret != 0) { 3133 printf("ERROR - Prioritized Unordered test FAILED.\n"); 3134 goto test_fail; 3135 } 3136 printf("*** Running Invalid QID test...\n"); 3137 ret = invalid_qid(t); 3138 if (ret != 0) { 3139 printf("ERROR - Invalid QID test FAILED.\n"); 3140 goto test_fail; 3141 } 3142 printf("*** Running Load Balancing History test...\n"); 3143 ret = load_balancing_history(t); 3144 if (ret != 0) { 3145 printf("ERROR - Load Balancing History test FAILED.\n"); 3146 goto test_fail; 3147 } 3148 printf("*** Running Inflight Count test...\n"); 3149 ret = inflight_counts(t); 3150 if (ret != 0) { 3151 printf("ERROR - Inflight Count test FAILED.\n"); 3152 goto test_fail; 3153 } 3154 printf("*** Running Abuse Inflights test...\n"); 3155 ret = abuse_inflights(t); 3156 if (ret != 0) { 3157 printf("ERROR - Abuse Inflights test FAILED.\n"); 3158 goto test_fail; 3159 } 3160 printf("*** Running XStats test...\n"); 3161 ret = xstats_tests(t); 3162 if (ret != 0) { 3163 printf("ERROR - XStats test FAILED.\n"); 3164 goto test_fail; 3165 } 3166 printf("*** Running XStats ID Reset test...\n"); 3167 ret = xstats_id_reset_tests(t); 3168 if (ret != 0) { 3169 printf("ERROR - XStats ID Reset test FAILED.\n"); 3170 goto test_fail; 3171 } 3172 printf("*** Running XStats Brute Force test...\n"); 3173 ret = xstats_brute_force(t); 3174 if (ret != 0) { 3175 printf("ERROR - XStats Brute Force test FAILED.\n"); 3176 goto test_fail; 3177 } 3178 printf("*** Running XStats ID Abuse test...\n"); 3179 ret = xstats_id_abuse_tests(t); 3180 if (ret != 0) { 3181 printf("ERROR - XStats ID Abuse test FAILED.\n"); 3182 goto test_fail; 3183 } 3184 printf("*** Running QID Priority test...\n"); 3185 ret = qid_priorities(t); 3186 if (ret != 0) { 3187 printf("ERROR - QID Priority test FAILED.\n"); 3188 goto test_fail; 3189 } 3190 printf("*** Running Ordered Reconfigure test...\n"); 3191 ret = ordered_reconfigure(t); 3192 if (ret != 0) { 3193 printf("ERROR - Ordered Reconfigure test FAILED.\n"); 3194 goto test_fail; 3195 } 3196 printf("*** Running Port LB Single Reconfig test...\n"); 3197 ret = port_single_lb_reconfig(t); 3198 if (ret != 0) { 3199 printf("ERROR - Port LB Single Reconfig test FAILED.\n"); 3200 goto test_fail; 3201 } 3202 printf("*** Running Port Reconfig Credits test...\n"); 3203 ret = port_reconfig_credits(t); 3204 if (ret != 0) { 3205 printf("ERROR - Port Reconfig Credits Reset test FAILED.\n"); 3206 goto test_fail; 3207 } 3208 printf("*** Running Head-of-line-blocking test...\n"); 3209 ret = holb(t); 3210 if (ret != 0) { 3211 printf("ERROR - Head-of-line-blocking test FAILED.\n"); 3212 goto test_fail; 3213 } 3214 if (rte_lcore_count() >= 3) { 3215 printf("*** Running Worker loopback test...\n"); 3216 ret = worker_loopback(t, 0); 3217 if (ret != 0) { 3218 printf("ERROR - Worker loopback test FAILED.\n"); 3219 return ret; 3220 } 3221 3222 printf("*** Running Worker loopback test (implicit release disabled)...\n"); 3223 ret = worker_loopback(t, 1); 3224 if (ret != 0) { 3225 printf("ERROR - Worker loopback test FAILED.\n"); 3226 goto test_fail; 3227 } 3228 } else { 3229 printf("### Not enough cores for worker loopback tests.\n"); 3230 printf("### Need at least 3 cores for the tests.\n"); 3231 } 3232 3233 /* 3234 * Free test instance, leaving mempool initialized, and a pointer to it 3235 * in static eventdev_func_mempool, as it is re-used on re-runs 3236 */ 3237 free(t); 3238 3239 printf("SW Eventdev Selftest Successful.\n"); 3240 return 0; 3241 test_fail: 3242 free(t); 3243 printf("SW Eventdev Selftest Failed.\n"); 3244 return -1; 3245 } 3246