1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2020 Mellanox Technologies, Ltd 3 * 4 * This file contain the application main file 5 * This application provides the user the ability to test the 6 * insertion rate for specific rte_flow rule under stress state ~4M rule/ 7 * 8 * Then it will also provide packet per second measurement after installing 9 * all rules, the user may send traffic to test the PPS that match the rules 10 * after all rules are installed, to check performance or functionality after 11 * the stress. 12 * 13 * The flows insertion will go for all ports first, then it will print the 14 * results, after that the application will go into forwarding packets mode 15 * it will start receiving traffic if any and then forwarding it back and 16 * gives packet per second measurement. 17 */ 18 19 #include <locale.h> 20 #include <stdio.h> 21 #include <stdlib.h> 22 #include <string.h> 23 #include <stdint.h> 24 #include <inttypes.h> 25 #include <stdarg.h> 26 #include <errno.h> 27 #include <getopt.h> 28 #include <stdbool.h> 29 #include <sys/time.h> 30 #include <signal.h> 31 #include <unistd.h> 32 33 #include <rte_malloc.h> 34 #include <rte_mempool.h> 35 #include <rte_mbuf.h> 36 #include <rte_ethdev.h> 37 #include <rte_flow.h> 38 #include <rte_mtr.h> 39 40 #include "config.h" 41 #include "actions_gen.h" 42 #include "flow_gen.h" 43 44 #define MAX_BATCHES_COUNT 100 45 #define DEFAULT_RULES_COUNT 4000000 46 #define DEFAULT_RULES_BATCH 100000 47 #define DEFAULT_GROUP 0 48 49 #define HAIRPIN_RX_CONF_FORCE_MEMORY (0x0001) 50 #define HAIRPIN_TX_CONF_FORCE_MEMORY (0x0002) 51 52 #define HAIRPIN_RX_CONF_LOCKED_MEMORY (0x0010) 53 #define HAIRPIN_RX_CONF_RTE_MEMORY (0x0020) 54 55 #define HAIRPIN_TX_CONF_LOCKED_MEMORY (0x0100) 56 #define HAIRPIN_TX_CONF_RTE_MEMORY (0x0200) 57 58 struct rte_flow *flow; 59 static uint8_t flow_group; 60 61 static uint64_t encap_data; 62 static uint64_t decap_data; 63 static uint64_t all_actions[RTE_COLORS][MAX_ACTIONS_NUM]; 64 static char *actions_str[RTE_COLORS]; 65 66 static uint64_t flow_items[MAX_ITEMS_NUM]; 67 static uint64_t flow_actions[MAX_ACTIONS_NUM]; 68 static uint64_t flow_attrs[MAX_ATTRS_NUM]; 69 static uint32_t policy_id[MAX_PORTS]; 70 static uint8_t items_idx, actions_idx, attrs_idx; 71 72 static uint64_t ports_mask; 73 static uint64_t hairpin_conf_mask; 74 static uint16_t dst_ports[RTE_MAX_ETHPORTS]; 75 static volatile bool force_quit; 76 static bool dump_iterations; 77 static bool delete_flag; 78 static bool dump_socket_mem_flag; 79 static bool enable_fwd; 80 static bool unique_data; 81 static bool policy_mtr; 82 static bool packet_mode; 83 84 static uint8_t rx_queues_count; 85 static uint8_t tx_queues_count; 86 static uint8_t rxd_count; 87 static uint8_t txd_count; 88 static uint32_t mbuf_size; 89 static uint32_t mbuf_cache_size; 90 static uint32_t total_mbuf_num; 91 92 static struct rte_mempool *mbuf_mp; 93 static uint32_t nb_lcores; 94 static uint32_t rules_count; 95 static uint32_t rules_batch; 96 static uint32_t hairpin_queues_num; /* total hairpin q number - default: 0 */ 97 static uint32_t nb_lcores; 98 static uint8_t max_priority; 99 static uint32_t rand_seed; 100 static uint64_t meter_profile_values[3]; /* CIR CBS EBS values. */ 101 102 #define MAX_PKT_BURST 32 103 #define LCORE_MODE_PKT 1 104 #define LCORE_MODE_STATS 2 105 #define MAX_STREAMS 64 106 #define METER_CREATE 1 107 #define METER_DELETE 2 108 109 struct stream { 110 int tx_port; 111 int tx_queue; 112 int rx_port; 113 int rx_queue; 114 }; 115 116 struct lcore_info { 117 int mode; 118 int streams_nb; 119 struct stream streams[MAX_STREAMS]; 120 /* stats */ 121 uint64_t tx_pkts; 122 uint64_t tx_drops; 123 uint64_t rx_pkts; 124 struct rte_mbuf *pkts[MAX_PKT_BURST]; 125 } __rte_cache_aligned; 126 127 static struct lcore_info lcore_infos[RTE_MAX_LCORE]; 128 129 struct used_cpu_time { 130 double insertion[MAX_PORTS][RTE_MAX_LCORE]; 131 double deletion[MAX_PORTS][RTE_MAX_LCORE]; 132 }; 133 134 struct multi_cores_pool { 135 uint32_t cores_count; 136 uint32_t rules_count; 137 struct used_cpu_time meters_record; 138 struct used_cpu_time flows_record; 139 int64_t last_alloc[RTE_MAX_LCORE]; 140 int64_t current_alloc[RTE_MAX_LCORE]; 141 } __rte_cache_aligned; 142 143 static struct multi_cores_pool mc_pool = { 144 .cores_count = 1, 145 }; 146 147 static const struct option_dict { 148 const char *str; 149 const uint64_t mask; 150 uint64_t *map; 151 uint8_t *map_idx; 152 153 } flow_options[] = { 154 { 155 .str = "ether", 156 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH), 157 .map = &flow_items[0], 158 .map_idx = &items_idx 159 }, 160 { 161 .str = "ipv4", 162 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4), 163 .map = &flow_items[0], 164 .map_idx = &items_idx 165 }, 166 { 167 .str = "ipv6", 168 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6), 169 .map = &flow_items[0], 170 .map_idx = &items_idx 171 }, 172 { 173 .str = "vlan", 174 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN), 175 .map = &flow_items[0], 176 .map_idx = &items_idx 177 }, 178 { 179 .str = "tcp", 180 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_TCP), 181 .map = &flow_items[0], 182 .map_idx = &items_idx 183 }, 184 { 185 .str = "udp", 186 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP), 187 .map = &flow_items[0], 188 .map_idx = &items_idx 189 }, 190 { 191 .str = "vxlan", 192 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN), 193 .map = &flow_items[0], 194 .map_idx = &items_idx 195 }, 196 { 197 .str = "vxlan-gpe", 198 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE), 199 .map = &flow_items[0], 200 .map_idx = &items_idx 201 }, 202 { 203 .str = "gre", 204 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE), 205 .map = &flow_items[0], 206 .map_idx = &items_idx 207 }, 208 { 209 .str = "geneve", 210 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE), 211 .map = &flow_items[0], 212 .map_idx = &items_idx 213 }, 214 { 215 .str = "gtp", 216 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP), 217 .map = &flow_items[0], 218 .map_idx = &items_idx 219 }, 220 { 221 .str = "meta", 222 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_META), 223 .map = &flow_items[0], 224 .map_idx = &items_idx 225 }, 226 { 227 .str = "tag", 228 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_TAG), 229 .map = &flow_items[0], 230 .map_idx = &items_idx 231 }, 232 { 233 .str = "icmpv4", 234 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ICMP), 235 .map = &flow_items[0], 236 .map_idx = &items_idx 237 }, 238 { 239 .str = "icmpv6", 240 .mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ICMP6), 241 .map = &flow_items[0], 242 .map_idx = &items_idx 243 }, 244 { 245 .str = "ingress", 246 .mask = INGRESS, 247 .map = &flow_attrs[0], 248 .map_idx = &attrs_idx 249 }, 250 { 251 .str = "egress", 252 .mask = EGRESS, 253 .map = &flow_attrs[0], 254 .map_idx = &attrs_idx 255 }, 256 { 257 .str = "transfer", 258 .mask = TRANSFER, 259 .map = &flow_attrs[0], 260 .map_idx = &attrs_idx 261 }, 262 { 263 .str = "port-id", 264 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID), 265 .map = &flow_actions[0], 266 .map_idx = &actions_idx 267 }, 268 { 269 .str = "rss", 270 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS), 271 .map = &flow_actions[0], 272 .map_idx = &actions_idx 273 }, 274 { 275 .str = "queue", 276 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE), 277 .map = &flow_actions[0], 278 .map_idx = &actions_idx 279 }, 280 { 281 .str = "jump", 282 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP), 283 .map = &flow_actions[0], 284 .map_idx = &actions_idx 285 }, 286 { 287 .str = "mark", 288 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK), 289 .map = &flow_actions[0], 290 .map_idx = &actions_idx 291 }, 292 { 293 .str = "count", 294 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT), 295 .map = &flow_actions[0], 296 .map_idx = &actions_idx 297 }, 298 { 299 .str = "set-meta", 300 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META), 301 .map = &flow_actions[0], 302 .map_idx = &actions_idx 303 }, 304 { 305 .str = "set-tag", 306 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG), 307 .map = &flow_actions[0], 308 .map_idx = &actions_idx 309 }, 310 { 311 .str = "drop", 312 .mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP), 313 .map = &flow_actions[0], 314 .map_idx = &actions_idx 315 }, 316 { 317 .str = "set-src-mac", 318 .mask = FLOW_ACTION_MASK( 319 RTE_FLOW_ACTION_TYPE_SET_MAC_SRC 320 ), 321 .map = &flow_actions[0], 322 .map_idx = &actions_idx 323 }, 324 { 325 .str = "set-dst-mac", 326 .mask = FLOW_ACTION_MASK( 327 RTE_FLOW_ACTION_TYPE_SET_MAC_DST 328 ), 329 .map = &flow_actions[0], 330 .map_idx = &actions_idx 331 }, 332 { 333 .str = "set-src-ipv4", 334 .mask = FLOW_ACTION_MASK( 335 RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC 336 ), 337 .map = &flow_actions[0], 338 .map_idx = &actions_idx 339 }, 340 { 341 .str = "set-dst-ipv4", 342 .mask = FLOW_ACTION_MASK( 343 RTE_FLOW_ACTION_TYPE_SET_IPV4_DST 344 ), 345 .map = &flow_actions[0], 346 .map_idx = &actions_idx 347 }, 348 { 349 .str = "set-src-ipv6", 350 .mask = FLOW_ACTION_MASK( 351 RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC 352 ), 353 .map = &flow_actions[0], 354 .map_idx = &actions_idx 355 }, 356 { 357 .str = "set-dst-ipv6", 358 .mask = FLOW_ACTION_MASK( 359 RTE_FLOW_ACTION_TYPE_SET_IPV6_DST 360 ), 361 .map = &flow_actions[0], 362 .map_idx = &actions_idx 363 }, 364 { 365 .str = "set-src-tp", 366 .mask = FLOW_ACTION_MASK( 367 RTE_FLOW_ACTION_TYPE_SET_TP_SRC 368 ), 369 .map = &flow_actions[0], 370 .map_idx = &actions_idx 371 }, 372 { 373 .str = "set-dst-tp", 374 .mask = FLOW_ACTION_MASK( 375 RTE_FLOW_ACTION_TYPE_SET_TP_DST 376 ), 377 .map = &flow_actions[0], 378 .map_idx = &actions_idx 379 }, 380 { 381 .str = "inc-tcp-ack", 382 .mask = FLOW_ACTION_MASK( 383 RTE_FLOW_ACTION_TYPE_INC_TCP_ACK 384 ), 385 .map = &flow_actions[0], 386 .map_idx = &actions_idx 387 }, 388 { 389 .str = "dec-tcp-ack", 390 .mask = FLOW_ACTION_MASK( 391 RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK 392 ), 393 .map = &flow_actions[0], 394 .map_idx = &actions_idx 395 }, 396 { 397 .str = "inc-tcp-seq", 398 .mask = FLOW_ACTION_MASK( 399 RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ 400 ), 401 .map = &flow_actions[0], 402 .map_idx = &actions_idx 403 }, 404 { 405 .str = "dec-tcp-seq", 406 .mask = FLOW_ACTION_MASK( 407 RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ 408 ), 409 .map = &flow_actions[0], 410 .map_idx = &actions_idx 411 }, 412 { 413 .str = "set-ttl", 414 .mask = FLOW_ACTION_MASK( 415 RTE_FLOW_ACTION_TYPE_SET_TTL 416 ), 417 .map = &flow_actions[0], 418 .map_idx = &actions_idx 419 }, 420 { 421 .str = "dec-ttl", 422 .mask = FLOW_ACTION_MASK( 423 RTE_FLOW_ACTION_TYPE_DEC_TTL 424 ), 425 .map = &flow_actions[0], 426 .map_idx = &actions_idx 427 }, 428 { 429 .str = "set-ipv4-dscp", 430 .mask = FLOW_ACTION_MASK( 431 RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP 432 ), 433 .map = &flow_actions[0], 434 .map_idx = &actions_idx 435 }, 436 { 437 .str = "set-ipv6-dscp", 438 .mask = FLOW_ACTION_MASK( 439 RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP 440 ), 441 .map = &flow_actions[0], 442 .map_idx = &actions_idx 443 }, 444 { 445 .str = "flag", 446 .mask = FLOW_ACTION_MASK( 447 RTE_FLOW_ACTION_TYPE_FLAG 448 ), 449 .map = &flow_actions[0], 450 .map_idx = &actions_idx 451 }, 452 { 453 .str = "meter", 454 .mask = FLOW_ACTION_MASK( 455 RTE_FLOW_ACTION_TYPE_METER 456 ), 457 .map = &flow_actions[0], 458 .map_idx = &actions_idx 459 }, 460 { 461 .str = "vxlan-encap", 462 .mask = FLOW_ACTION_MASK( 463 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP 464 ), 465 .map = &flow_actions[0], 466 .map_idx = &actions_idx 467 }, 468 { 469 .str = "vxlan-decap", 470 .mask = FLOW_ACTION_MASK( 471 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP 472 ), 473 .map = &flow_actions[0], 474 .map_idx = &actions_idx 475 }, 476 }; 477 478 static void 479 usage(char *progname) 480 { 481 printf("\nusage: %s\n", progname); 482 printf("\nControl configurations:\n"); 483 printf(" --rules-count=N: to set the number of needed" 484 " rules to insert, default is %d\n", DEFAULT_RULES_COUNT); 485 printf(" --rules-batch=N: set number of batched rules," 486 " default is %d\n", DEFAULT_RULES_BATCH); 487 printf(" --dump-iterations: To print rates for each" 488 " iteration\n"); 489 printf(" --deletion-rate: Enable deletion rate" 490 " calculations\n"); 491 printf(" --dump-socket-mem: To dump all socket memory\n"); 492 printf(" --enable-fwd: To enable packets forwarding" 493 " after insertion\n"); 494 printf(" --portmask=N: hexadecimal bitmask of ports used\n"); 495 printf(" --hairpin-conf=0xXXXX: hexadecimal bitmask of hairpin queue configuration\n"); 496 printf(" --random-priority=N,S: use random priority levels " 497 "from 0 to (N - 1) for flows " 498 "and S as seed for pseudo-random number generator\n"); 499 printf(" --unique-data: flag to set using unique data for all" 500 " actions that support data, such as header modify and encap actions\n"); 501 printf(" --meter-profile=cir,cbs,ebs: set CIR CBS EBS parameters in meter" 502 " profile, default values are %d,%d,%d\n", METER_CIR, 503 METER_CIR / 8, 0); 504 printf(" --packet-mode: to enable packet mode for meter profile\n"); 505 506 printf("To set flow attributes:\n"); 507 printf(" --ingress: set ingress attribute in flows\n"); 508 printf(" --egress: set egress attribute in flows\n"); 509 printf(" --transfer: set transfer attribute in flows\n"); 510 printf(" --group=N: set group for all flows," 511 " default is %d\n", DEFAULT_GROUP); 512 printf(" --cores=N: to set the number of needed " 513 "cores to insert rte_flow rules, default is 1\n"); 514 printf(" --rxq=N: to set the count of receive queues\n"); 515 printf(" --txq=N: to set the count of send queues\n"); 516 printf(" --rxd=N: to set the count of rxd\n"); 517 printf(" --txd=N: to set the count of txd\n"); 518 printf(" --mbuf-size=N: to set the size of mbuf\n"); 519 printf(" --mbuf-cache-size=N: to set the size of mbuf cache\n"); 520 printf(" --total-mbuf-count=N: to set the count of total mbuf count\n"); 521 522 523 printf("To set flow items:\n"); 524 printf(" --ether: add ether layer in flow items\n"); 525 printf(" --vlan: add vlan layer in flow items\n"); 526 printf(" --ipv4: add ipv4 layer in flow items\n"); 527 printf(" --ipv6: add ipv6 layer in flow items\n"); 528 printf(" --tcp: add tcp layer in flow items\n"); 529 printf(" --udp: add udp layer in flow items\n"); 530 printf(" --vxlan: add vxlan layer in flow items\n"); 531 printf(" --vxlan-gpe: add vxlan-gpe layer in flow items\n"); 532 printf(" --gre: add gre layer in flow items\n"); 533 printf(" --geneve: add geneve layer in flow items\n"); 534 printf(" --gtp: add gtp layer in flow items\n"); 535 printf(" --meta: add meta layer in flow items\n"); 536 printf(" --tag: add tag layer in flow items\n"); 537 printf(" --icmpv4: add icmpv4 layer in flow items\n"); 538 printf(" --icmpv6: add icmpv6 layer in flow items\n"); 539 540 printf("To set flow actions:\n"); 541 printf(" --port-id: add port-id action in flow actions\n"); 542 printf(" --rss: add rss action in flow actions\n"); 543 printf(" --queue: add queue action in flow actions\n"); 544 printf(" --jump: add jump action in flow actions\n"); 545 printf(" --mark: add mark action in flow actions\n"); 546 printf(" --count: add count action in flow actions\n"); 547 printf(" --set-meta: add set meta action in flow actions\n"); 548 printf(" --set-tag: add set tag action in flow actions\n"); 549 printf(" --drop: add drop action in flow actions\n"); 550 printf(" --hairpin-queue=N: add hairpin-queue action in flow actions\n"); 551 printf(" --hairpin-rss=N: add hairpin-rss action in flow actions\n"); 552 printf(" --set-src-mac: add set src mac action to flow actions\n" 553 "Src mac to be set is random each flow\n"); 554 printf(" --set-dst-mac: add set dst mac action to flow actions\n" 555 "Dst mac to be set is random each flow\n"); 556 printf(" --set-src-ipv4: add set src ipv4 action to flow actions\n" 557 "Src ipv4 to be set is random each flow\n"); 558 printf(" --set-dst-ipv4 add set dst ipv4 action to flow actions\n" 559 "Dst ipv4 to be set is random each flow\n"); 560 printf(" --set-src-ipv6: add set src ipv6 action to flow actions\n" 561 "Src ipv6 to be set is random each flow\n"); 562 printf(" --set-dst-ipv6: add set dst ipv6 action to flow actions\n" 563 "Dst ipv6 to be set is random each flow\n"); 564 printf(" --set-src-tp: add set src tp action to flow actions\n" 565 "Src tp to be set is random each flow\n"); 566 printf(" --set-dst-tp: add set dst tp action to flow actions\n" 567 "Dst tp to be set is random each flow\n"); 568 printf(" --inc-tcp-ack: add inc tcp ack action to flow actions\n" 569 "tcp ack will be increments by 1\n"); 570 printf(" --dec-tcp-ack: add dec tcp ack action to flow actions\n" 571 "tcp ack will be decrements by 1\n"); 572 printf(" --inc-tcp-seq: add inc tcp seq action to flow actions\n" 573 "tcp seq will be increments by 1\n"); 574 printf(" --dec-tcp-seq: add dec tcp seq action to flow actions\n" 575 "tcp seq will be decrements by 1\n"); 576 printf(" --set-ttl: add set ttl action to flow actions\n" 577 "L3 ttl to be set is random each flow\n"); 578 printf(" --dec-ttl: add dec ttl action to flow actions\n" 579 "L3 ttl will be decrements by 1\n"); 580 printf(" --set-ipv4-dscp: add set ipv4 dscp action to flow actions\n" 581 "ipv4 dscp value to be set is random each flow\n"); 582 printf(" --set-ipv6-dscp: add set ipv6 dscp action to flow actions\n" 583 "ipv6 dscp value to be set is random each flow\n"); 584 printf(" --flag: add flag action to flow actions\n"); 585 printf(" --meter: add meter action to flow actions\n"); 586 printf(" --policy-mtr=\"g1,g2:y1:r1\": to create meter with specified " 587 "colored actions\n"); 588 printf(" --raw-encap=<data>: add raw encap action to flow actions\n" 589 "Data is the data needed to be encaped\n" 590 "Example: raw-encap=ether,ipv4,udp,vxlan\n"); 591 printf(" --raw-decap=<data>: add raw decap action to flow actions\n" 592 "Data is the data needed to be decaped\n" 593 "Example: raw-decap=ether,ipv4,udp,vxlan\n"); 594 printf(" --vxlan-encap: add vxlan-encap action to flow actions\n" 595 "Encapped data is fixed with pattern: ether,ipv4,udp,vxlan\n" 596 "With fixed values\n"); 597 printf(" --vxlan-decap: add vxlan_decap action to flow actions\n"); 598 } 599 600 static void 601 read_meter_policy(char *prog, char *arg) 602 { 603 char *token; 604 size_t i, j, k; 605 606 j = 0; 607 k = 0; 608 policy_mtr = true; 609 token = strsep(&arg, ":\0"); 610 while (token != NULL && j < RTE_COLORS) { 611 actions_str[j++] = token; 612 token = strsep(&arg, ":\0"); 613 } 614 j = 0; 615 token = strtok(actions_str[0], ",\0"); 616 while (token == NULL && j < RTE_COLORS - 1) 617 token = strtok(actions_str[++j], ",\0"); 618 while (j < RTE_COLORS && token != NULL) { 619 for (i = 0; i < RTE_DIM(flow_options); i++) { 620 if (!strcmp(token, flow_options[i].str)) { 621 all_actions[j][k++] = flow_options[i].mask; 622 break; 623 } 624 } 625 /* Reached last action with no match */ 626 if (i >= RTE_DIM(flow_options)) { 627 fprintf(stderr, "Invalid colored actions: %s\n", token); 628 usage(prog); 629 rte_exit(EXIT_SUCCESS, "Invalid colored actions\n"); 630 } 631 token = strtok(NULL, ",\0"); 632 while (!token && j < RTE_COLORS - 1) { 633 token = strtok(actions_str[++j], ",\0"); 634 k = 0; 635 } 636 } 637 } 638 639 static void 640 args_parse(int argc, char **argv) 641 { 642 uint64_t pm, seed; 643 uint64_t hp_conf; 644 char **argvopt; 645 uint32_t prio; 646 char *token; 647 char *end; 648 int n, opt; 649 int opt_idx; 650 size_t i; 651 652 static const struct option lgopts[] = { 653 /* Control */ 654 { "help", 0, 0, 0 }, 655 { "rules-count", 1, 0, 0 }, 656 { "rules-batch", 1, 0, 0 }, 657 { "dump-iterations", 0, 0, 0 }, 658 { "deletion-rate", 0, 0, 0 }, 659 { "dump-socket-mem", 0, 0, 0 }, 660 { "enable-fwd", 0, 0, 0 }, 661 { "unique-data", 0, 0, 0 }, 662 { "portmask", 1, 0, 0 }, 663 { "hairpin-conf", 1, 0, 0 }, 664 { "cores", 1, 0, 0 }, 665 { "random-priority", 1, 0, 0 }, 666 { "meter-profile-alg", 1, 0, 0 }, 667 { "rxq", 1, 0, 0 }, 668 { "txq", 1, 0, 0 }, 669 { "rxd", 1, 0, 0 }, 670 { "txd", 1, 0, 0 }, 671 { "mbuf-size", 1, 0, 0 }, 672 { "mbuf-cache-size", 1, 0, 0 }, 673 { "total-mbuf-count", 1, 0, 0 }, 674 /* Attributes */ 675 { "ingress", 0, 0, 0 }, 676 { "egress", 0, 0, 0 }, 677 { "transfer", 0, 0, 0 }, 678 { "group", 1, 0, 0 }, 679 /* Items */ 680 { "ether", 0, 0, 0 }, 681 { "vlan", 0, 0, 0 }, 682 { "ipv4", 0, 0, 0 }, 683 { "ipv6", 0, 0, 0 }, 684 { "tcp", 0, 0, 0 }, 685 { "udp", 0, 0, 0 }, 686 { "vxlan", 0, 0, 0 }, 687 { "vxlan-gpe", 0, 0, 0 }, 688 { "gre", 0, 0, 0 }, 689 { "geneve", 0, 0, 0 }, 690 { "gtp", 0, 0, 0 }, 691 { "meta", 0, 0, 0 }, 692 { "tag", 0, 0, 0 }, 693 { "icmpv4", 0, 0, 0 }, 694 { "icmpv6", 0, 0, 0 }, 695 /* Actions */ 696 { "port-id", 2, 0, 0 }, 697 { "rss", 0, 0, 0 }, 698 { "queue", 0, 0, 0 }, 699 { "jump", 0, 0, 0 }, 700 { "mark", 0, 0, 0 }, 701 { "count", 0, 0, 0 }, 702 { "set-meta", 0, 0, 0 }, 703 { "set-tag", 0, 0, 0 }, 704 { "drop", 0, 0, 0 }, 705 { "hairpin-queue", 1, 0, 0 }, 706 { "hairpin-rss", 1, 0, 0 }, 707 { "set-src-mac", 0, 0, 0 }, 708 { "set-dst-mac", 0, 0, 0 }, 709 { "set-src-ipv4", 0, 0, 0 }, 710 { "set-dst-ipv4", 0, 0, 0 }, 711 { "set-src-ipv6", 0, 0, 0 }, 712 { "set-dst-ipv6", 0, 0, 0 }, 713 { "set-src-tp", 0, 0, 0 }, 714 { "set-dst-tp", 0, 0, 0 }, 715 { "inc-tcp-ack", 0, 0, 0 }, 716 { "dec-tcp-ack", 0, 0, 0 }, 717 { "inc-tcp-seq", 0, 0, 0 }, 718 { "dec-tcp-seq", 0, 0, 0 }, 719 { "set-ttl", 0, 0, 0 }, 720 { "dec-ttl", 0, 0, 0 }, 721 { "set-ipv4-dscp", 0, 0, 0 }, 722 { "set-ipv6-dscp", 0, 0, 0 }, 723 { "flag", 0, 0, 0 }, 724 { "meter", 0, 0, 0 }, 725 { "raw-encap", 1, 0, 0 }, 726 { "raw-decap", 1, 0, 0 }, 727 { "vxlan-encap", 0, 0, 0 }, 728 { "vxlan-decap", 0, 0, 0 }, 729 { "policy-mtr", 1, 0, 0 }, 730 { "meter-profile", 1, 0, 0 }, 731 { "packet-mode", 0, 0, 0 }, 732 { 0, 0, 0, 0 }, 733 }; 734 735 RTE_ETH_FOREACH_DEV(i) 736 ports_mask |= 1 << i; 737 738 for (i = 0; i < RTE_MAX_ETHPORTS; i++) 739 dst_ports[i] = PORT_ID_DST; 740 741 hairpin_queues_num = 0; 742 argvopt = argv; 743 744 printf(":: Flow -> "); 745 while ((opt = getopt_long(argc, argvopt, "", 746 lgopts, &opt_idx)) != EOF) { 747 switch (opt) { 748 case 0: 749 if (strcmp(lgopts[opt_idx].name, "help") == 0) { 750 usage(argv[0]); 751 exit(EXIT_SUCCESS); 752 } 753 754 if (strcmp(lgopts[opt_idx].name, "group") == 0) { 755 n = atoi(optarg); 756 if (n >= 0) 757 flow_group = n; 758 else 759 rte_exit(EXIT_FAILURE, 760 "flow group should be >= 0\n"); 761 printf("group %d / ", flow_group); 762 } 763 764 for (i = 0; i < RTE_DIM(flow_options); i++) 765 if (strcmp(lgopts[opt_idx].name, 766 flow_options[i].str) == 0) { 767 flow_options[i].map[ 768 (*flow_options[i].map_idx)++] = 769 flow_options[i].mask; 770 printf("%s / ", flow_options[i].str); 771 } 772 773 if (strcmp(lgopts[opt_idx].name, 774 "hairpin-rss") == 0) { 775 n = atoi(optarg); 776 if (n > 0) 777 hairpin_queues_num = n; 778 else 779 rte_exit(EXIT_FAILURE, 780 "Hairpin queues should be > 0\n"); 781 782 flow_actions[actions_idx++] = 783 HAIRPIN_RSS_ACTION; 784 printf("hairpin-rss / "); 785 } 786 if (strcmp(lgopts[opt_idx].name, 787 "hairpin-queue") == 0) { 788 n = atoi(optarg); 789 if (n > 0) 790 hairpin_queues_num = n; 791 else 792 rte_exit(EXIT_FAILURE, 793 "Hairpin queues should be > 0\n"); 794 795 flow_actions[actions_idx++] = 796 HAIRPIN_QUEUE_ACTION; 797 printf("hairpin-queue / "); 798 } 799 800 if (strcmp(lgopts[opt_idx].name, "raw-encap") == 0) { 801 printf("raw-encap "); 802 flow_actions[actions_idx++] = 803 FLOW_ITEM_MASK( 804 RTE_FLOW_ACTION_TYPE_RAW_ENCAP 805 ); 806 807 token = strtok(optarg, ","); 808 while (token != NULL) { 809 for (i = 0; i < RTE_DIM(flow_options); i++) { 810 if (strcmp(flow_options[i].str, token) == 0) { 811 printf("%s,", token); 812 encap_data |= flow_options[i].mask; 813 break; 814 } 815 /* Reached last item with no match */ 816 if (i == (RTE_DIM(flow_options) - 1)) 817 rte_exit(EXIT_FAILURE, 818 "Invalid encap item: %s\n", token); 819 } 820 token = strtok(NULL, ","); 821 } 822 printf(" / "); 823 } 824 if (strcmp(lgopts[opt_idx].name, "raw-decap") == 0) { 825 printf("raw-decap "); 826 flow_actions[actions_idx++] = 827 FLOW_ITEM_MASK( 828 RTE_FLOW_ACTION_TYPE_RAW_DECAP 829 ); 830 831 token = strtok(optarg, ","); 832 while (token != NULL) { 833 for (i = 0; i < RTE_DIM(flow_options); i++) { 834 if (strcmp(flow_options[i].str, token) == 0) { 835 printf("%s,", token); 836 decap_data |= flow_options[i].mask; 837 break; 838 } 839 /* Reached last item with no match */ 840 if (i == (RTE_DIM(flow_options) - 1)) 841 rte_exit(EXIT_FAILURE, 842 "Invalid decap item %s\n", token); 843 } 844 token = strtok(NULL, ","); 845 } 846 printf(" / "); 847 } 848 /* Control */ 849 if (strcmp(lgopts[opt_idx].name, 850 "rules-batch") == 0) { 851 rules_batch = atoi(optarg); 852 } 853 if (strcmp(lgopts[opt_idx].name, 854 "rules-count") == 0) { 855 rules_count = atoi(optarg); 856 } 857 if (strcmp(lgopts[opt_idx].name, "random-priority") == 858 0) { 859 end = NULL; 860 prio = strtol(optarg, &end, 10); 861 if ((optarg[0] == '\0') || (end == NULL)) 862 rte_exit(EXIT_FAILURE, 863 "Invalid value for random-priority\n"); 864 max_priority = prio; 865 token = end + 1; 866 seed = strtoll(token, &end, 10); 867 if ((token[0] == '\0') || (*end != '\0')) 868 rte_exit(EXIT_FAILURE, 869 "Invalid value for random-priority\n"); 870 rand_seed = seed; 871 } 872 if (strcmp(lgopts[opt_idx].name, 873 "dump-iterations") == 0) 874 dump_iterations = true; 875 if (strcmp(lgopts[opt_idx].name, 876 "unique-data") == 0) 877 unique_data = true; 878 if (strcmp(lgopts[opt_idx].name, 879 "deletion-rate") == 0) 880 delete_flag = true; 881 if (strcmp(lgopts[opt_idx].name, 882 "dump-socket-mem") == 0) 883 dump_socket_mem_flag = true; 884 if (strcmp(lgopts[opt_idx].name, 885 "enable-fwd") == 0) 886 enable_fwd = true; 887 if (strcmp(lgopts[opt_idx].name, 888 "portmask") == 0) { 889 /* parse hexadecimal string */ 890 end = NULL; 891 pm = strtoull(optarg, &end, 16); 892 if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0')) 893 rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n"); 894 ports_mask = pm; 895 } 896 if (strcmp(lgopts[opt_idx].name, "hairpin-conf") == 0) { 897 end = NULL; 898 hp_conf = strtoull(optarg, &end, 16); 899 if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0')) 900 rte_exit(EXIT_FAILURE, "Invalid hairpin config mask\n"); 901 hairpin_conf_mask = hp_conf; 902 } 903 if (strcmp(lgopts[opt_idx].name, 904 "port-id") == 0) { 905 uint16_t port_idx = 0; 906 char *token; 907 908 token = strtok(optarg, ","); 909 while (token != NULL) { 910 dst_ports[port_idx++] = atoi(token); 911 token = strtok(NULL, ","); 912 } 913 } 914 if (strcmp(lgopts[opt_idx].name, "rxq") == 0) { 915 n = atoi(optarg); 916 rx_queues_count = (uint8_t) n; 917 } 918 if (strcmp(lgopts[opt_idx].name, "txq") == 0) { 919 n = atoi(optarg); 920 tx_queues_count = (uint8_t) n; 921 } 922 if (strcmp(lgopts[opt_idx].name, "rxd") == 0) { 923 n = atoi(optarg); 924 rxd_count = (uint8_t) n; 925 } 926 if (strcmp(lgopts[opt_idx].name, "txd") == 0) { 927 n = atoi(optarg); 928 txd_count = (uint8_t) n; 929 } 930 if (strcmp(lgopts[opt_idx].name, "mbuf-size") == 0) { 931 n = atoi(optarg); 932 mbuf_size = (uint32_t) n; 933 } 934 if (strcmp(lgopts[opt_idx].name, "mbuf-cache-size") == 0) { 935 n = atoi(optarg); 936 mbuf_cache_size = (uint32_t) n; 937 } 938 if (strcmp(lgopts[opt_idx].name, "total-mbuf-count") == 0) { 939 n = atoi(optarg); 940 total_mbuf_num = (uint32_t) n; 941 } 942 if (strcmp(lgopts[opt_idx].name, "cores") == 0) { 943 n = atoi(optarg); 944 if ((int) rte_lcore_count() <= n) { 945 rte_exit(EXIT_FAILURE, 946 "Error: you need %d cores to run on multi-cores\n" 947 "Existing cores are: %d\n", n, rte_lcore_count()); 948 } 949 if (n <= RTE_MAX_LCORE && n > 0) 950 mc_pool.cores_count = n; 951 else { 952 rte_exit(EXIT_FAILURE, 953 "Error: cores count must be > 0 and < %d\n", 954 RTE_MAX_LCORE); 955 } 956 } 957 if (strcmp(lgopts[opt_idx].name, "policy-mtr") == 0) 958 read_meter_policy(argv[0], optarg); 959 if (strcmp(lgopts[opt_idx].name, 960 "meter-profile") == 0) { 961 i = 0; 962 token = strsep(&optarg, ",\0"); 963 while (token != NULL && i < sizeof( 964 meter_profile_values) / 965 sizeof(uint64_t)) { 966 meter_profile_values[i++] = atol(token); 967 token = strsep(&optarg, ",\0"); 968 } 969 } 970 if (strcmp(lgopts[opt_idx].name, "packet-mode") == 0) 971 packet_mode = true; 972 break; 973 default: 974 usage(argv[0]); 975 rte_exit(EXIT_FAILURE, "Invalid option: %s\n", 976 argv[optind - 1]); 977 break; 978 } 979 } 980 if (rules_count % rules_batch != 0) { 981 rte_exit(EXIT_FAILURE, 982 "rules_count %% rules_batch should be 0\n"); 983 } 984 if (rules_count / rules_batch > MAX_BATCHES_COUNT) { 985 rte_exit(EXIT_FAILURE, 986 "rules_count / rules_batch should be <= %d\n", 987 MAX_BATCHES_COUNT); 988 } 989 990 printf("end_flow\n"); 991 } 992 993 /* Dump the socket memory statistics on console */ 994 static size_t 995 dump_socket_mem(FILE *f) 996 { 997 struct rte_malloc_socket_stats socket_stats; 998 unsigned int i = 0; 999 size_t total = 0; 1000 size_t alloc = 0; 1001 size_t free = 0; 1002 unsigned int n_alloc = 0; 1003 unsigned int n_free = 0; 1004 bool active_nodes = false; 1005 1006 1007 for (i = 0; i < RTE_MAX_NUMA_NODES; i++) { 1008 if (rte_malloc_get_socket_stats(i, &socket_stats) || 1009 !socket_stats.heap_totalsz_bytes) 1010 continue; 1011 active_nodes = true; 1012 total += socket_stats.heap_totalsz_bytes; 1013 alloc += socket_stats.heap_allocsz_bytes; 1014 free += socket_stats.heap_freesz_bytes; 1015 n_alloc += socket_stats.alloc_count; 1016 n_free += socket_stats.free_count; 1017 if (dump_socket_mem_flag) { 1018 fprintf(f, "::::::::::::::::::::::::::::::::::::::::"); 1019 fprintf(f, 1020 "\nSocket %u:\nsize(M) total: %.6lf\nalloc:" 1021 " %.6lf(%.3lf%%)\nfree: %.6lf" 1022 "\nmax: %.6lf" 1023 "\ncount alloc: %u\nfree: %u\n", 1024 i, 1025 socket_stats.heap_totalsz_bytes / 1.0e6, 1026 socket_stats.heap_allocsz_bytes / 1.0e6, 1027 (double)socket_stats.heap_allocsz_bytes * 100 / 1028 (double)socket_stats.heap_totalsz_bytes, 1029 socket_stats.heap_freesz_bytes / 1.0e6, 1030 socket_stats.greatest_free_size / 1.0e6, 1031 socket_stats.alloc_count, 1032 socket_stats.free_count); 1033 fprintf(f, "::::::::::::::::::::::::::::::::::::::::"); 1034 } 1035 } 1036 if (dump_socket_mem_flag && active_nodes) { 1037 fprintf(f, 1038 "\nTotal: size(M)\ntotal: %.6lf" 1039 "\nalloc: %.6lf(%.3lf%%)\nfree: %.6lf" 1040 "\ncount alloc: %u\nfree: %u\n", 1041 total / 1.0e6, alloc / 1.0e6, 1042 (double)alloc * 100 / (double)total, free / 1.0e6, 1043 n_alloc, n_free); 1044 fprintf(f, "::::::::::::::::::::::::::::::::::::::::\n"); 1045 } 1046 return alloc; 1047 } 1048 1049 static void 1050 print_flow_error(struct rte_flow_error error) 1051 { 1052 printf("Flow can't be created %d message: %s\n", 1053 error.type, 1054 error.message ? error.message : "(no stated reason)"); 1055 } 1056 1057 static inline void 1058 print_rules_batches(double *cpu_time_per_batch) 1059 { 1060 uint8_t idx; 1061 double delta; 1062 double rate; 1063 1064 for (idx = 0; idx < MAX_BATCHES_COUNT; idx++) { 1065 if (!cpu_time_per_batch[idx]) 1066 break; 1067 delta = (double)(rules_batch / cpu_time_per_batch[idx]); 1068 rate = delta / 1000; /* Save rate in K unit. */ 1069 printf(":: Rules batch #%d: %d rules " 1070 "in %f sec[ Rate = %f K Rule/Sec ]\n", 1071 idx, rules_batch, 1072 cpu_time_per_batch[idx], rate); 1073 } 1074 } 1075 1076 static inline int 1077 has_meter(void) 1078 { 1079 int i; 1080 1081 for (i = 0; i < MAX_ACTIONS_NUM; i++) { 1082 if (flow_actions[i] == 0) 1083 break; 1084 if (flow_actions[i] 1085 & FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_METER)) 1086 return 1; 1087 } 1088 return 0; 1089 } 1090 1091 static void 1092 create_meter_policy(void) 1093 { 1094 struct rte_mtr_error error; 1095 int ret, port_id; 1096 struct rte_mtr_meter_policy_params policy; 1097 uint16_t nr_ports; 1098 struct rte_flow_action actions[RTE_COLORS][MAX_ACTIONS_NUM]; 1099 int i; 1100 1101 memset(actions, 0, sizeof(actions)); 1102 memset(&policy, 0, sizeof(policy)); 1103 nr_ports = rte_eth_dev_count_avail(); 1104 for (port_id = 0; port_id < nr_ports; port_id++) { 1105 for (i = 0; i < RTE_COLORS; i++) 1106 fill_actions(actions[i], all_actions[i], 0, 0, 0, 1107 0, 0, 0, unique_data, rx_queues_count, 1108 dst_ports[port_id]); 1109 policy.actions[RTE_COLOR_GREEN] = actions[RTE_COLOR_GREEN]; 1110 policy.actions[RTE_COLOR_YELLOW] = actions[RTE_COLOR_YELLOW]; 1111 policy.actions[RTE_COLOR_RED] = actions[RTE_COLOR_RED]; 1112 policy_id[port_id] = port_id + 10; 1113 ret = rte_mtr_meter_policy_add(port_id, policy_id[port_id], 1114 &policy, &error); 1115 if (ret) { 1116 fprintf(stderr, "port %d: failed to create meter policy\n", 1117 port_id); 1118 policy_id[port_id] = UINT32_MAX; 1119 } 1120 memset(actions, 0, sizeof(actions)); 1121 } 1122 } 1123 1124 static void 1125 destroy_meter_policy(void) 1126 { 1127 struct rte_mtr_error error; 1128 uint16_t nr_ports; 1129 int port_id; 1130 1131 nr_ports = rte_eth_dev_count_avail(); 1132 for (port_id = 0; port_id < nr_ports; port_id++) { 1133 /* If port outside portmask */ 1134 if (!((ports_mask >> port_id) & 0x1)) 1135 continue; 1136 1137 if (rte_mtr_meter_policy_delete 1138 (port_id, policy_id[port_id], &error)) { 1139 fprintf(stderr, "port %u: failed to delete meter policy\n", 1140 port_id); 1141 rte_exit(EXIT_FAILURE, "Error: Failed to delete meter policy.\n"); 1142 } 1143 } 1144 } 1145 1146 static void 1147 create_meter_rule(int port_id, uint32_t counter) 1148 { 1149 int ret; 1150 struct rte_mtr_params params; 1151 struct rte_mtr_error error; 1152 1153 memset(¶ms, 0, sizeof(struct rte_mtr_params)); 1154 params.meter_enable = 1; 1155 params.stats_mask = 0xffff; 1156 params.use_prev_mtr_color = 0; 1157 params.dscp_table = NULL; 1158 1159 /*create meter*/ 1160 params.meter_profile_id = DEFAULT_METER_PROF_ID; 1161 1162 if (!policy_mtr) { 1163 ret = rte_mtr_create(port_id, counter, ¶ms, 1, &error); 1164 } else { 1165 params.meter_policy_id = policy_id[port_id]; 1166 ret = rte_mtr_create(port_id, counter, ¶ms, 0, &error); 1167 } 1168 1169 if (ret != 0) { 1170 printf("Port %u create meter idx(%d) error(%d) message: %s\n", 1171 port_id, counter, error.type, 1172 error.message ? error.message : "(no stated reason)"); 1173 rte_exit(EXIT_FAILURE, "Error in creating meter\n"); 1174 } 1175 } 1176 1177 static void 1178 destroy_meter_rule(int port_id, uint32_t counter) 1179 { 1180 struct rte_mtr_error error; 1181 1182 if (policy_mtr && policy_id[port_id] != UINT32_MAX) { 1183 if (rte_mtr_meter_policy_delete(port_id, policy_id[port_id], 1184 &error)) 1185 fprintf(stderr, "Error: Failed to delete meter policy\n"); 1186 policy_id[port_id] = UINT32_MAX; 1187 } 1188 if (rte_mtr_destroy(port_id, counter, &error)) { 1189 fprintf(stderr, "Port %d: Failed to delete meter.\n", 1190 port_id); 1191 rte_exit(EXIT_FAILURE, "Error in deleting meter rule"); 1192 } 1193 } 1194 1195 static void 1196 meters_handler(int port_id, uint8_t core_id, uint8_t ops) 1197 { 1198 uint64_t start_batch; 1199 double cpu_time_used, insertion_rate; 1200 int rules_count_per_core, rules_batch_idx; 1201 uint32_t counter, start_counter = 0, end_counter; 1202 double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 }; 1203 1204 rules_count_per_core = rules_count / mc_pool.cores_count; 1205 1206 if (core_id) 1207 start_counter = core_id * rules_count_per_core; 1208 end_counter = (core_id + 1) * rules_count_per_core; 1209 1210 cpu_time_used = 0; 1211 start_batch = rte_get_timer_cycles(); 1212 for (counter = start_counter; counter < end_counter; counter++) { 1213 if (ops == METER_CREATE) 1214 create_meter_rule(port_id, counter); 1215 else 1216 destroy_meter_rule(port_id, counter); 1217 /* 1218 * Save the insertion rate for rules batch. 1219 * Check if the insertion reached the rules 1220 * patch counter, then save the insertion rate 1221 * for this batch. 1222 */ 1223 if (!((counter + 1) % rules_batch)) { 1224 rules_batch_idx = ((counter + 1) / rules_batch) - 1; 1225 cpu_time_per_batch[rules_batch_idx] = 1226 ((double)(rte_get_timer_cycles() - start_batch)) 1227 / rte_get_timer_hz(); 1228 cpu_time_used += cpu_time_per_batch[rules_batch_idx]; 1229 start_batch = rte_get_timer_cycles(); 1230 } 1231 } 1232 1233 /* Print insertion rates for all batches */ 1234 if (dump_iterations) 1235 print_rules_batches(cpu_time_per_batch); 1236 1237 insertion_rate = 1238 ((double) (rules_count_per_core / cpu_time_used) / 1000); 1239 1240 /* Insertion rate for all rules in one core */ 1241 printf(":: Port %d :: Core %d Meter %s :: start @[%d] - end @[%d]," 1242 " use:%.02fs, rate:%.02fk Rule/Sec\n", 1243 port_id, core_id, ops == METER_CREATE ? "create" : "delete", 1244 start_counter, end_counter - 1, 1245 cpu_time_used, insertion_rate); 1246 1247 if (ops == METER_CREATE) 1248 mc_pool.meters_record.insertion[port_id][core_id] 1249 = cpu_time_used; 1250 else 1251 mc_pool.meters_record.deletion[port_id][core_id] 1252 = cpu_time_used; 1253 } 1254 1255 static void 1256 destroy_meter_profile(void) 1257 { 1258 struct rte_mtr_error error; 1259 uint16_t nr_ports; 1260 int port_id; 1261 1262 nr_ports = rte_eth_dev_count_avail(); 1263 for (port_id = 0; port_id < nr_ports; port_id++) { 1264 /* If port outside portmask */ 1265 if (!((ports_mask >> port_id) & 0x1)) 1266 continue; 1267 1268 if (rte_mtr_meter_profile_delete 1269 (port_id, DEFAULT_METER_PROF_ID, &error)) { 1270 printf("Port %u del profile error(%d) message: %s\n", 1271 port_id, error.type, 1272 error.message ? error.message : "(no stated reason)"); 1273 rte_exit(EXIT_FAILURE, "Error: Destroy meter profile Failed!\n"); 1274 } 1275 } 1276 } 1277 1278 static void 1279 create_meter_profile(void) 1280 { 1281 uint16_t nr_ports; 1282 int ret, port_id; 1283 struct rte_mtr_meter_profile mp; 1284 struct rte_mtr_error error; 1285 1286 /* 1287 *currently , only create one meter file for one port 1288 *1 meter profile -> N meter rules -> N rte flows 1289 */ 1290 memset(&mp, 0, sizeof(struct rte_mtr_meter_profile)); 1291 nr_ports = rte_eth_dev_count_avail(); 1292 for (port_id = 0; port_id < nr_ports; port_id++) { 1293 /* If port outside portmask */ 1294 if (!((ports_mask >> port_id) & 0x1)) 1295 continue; 1296 mp.alg = RTE_MTR_SRTCM_RFC2697; 1297 mp.srtcm_rfc2697.cir = meter_profile_values[0] ? 1298 meter_profile_values[0] : METER_CIR; 1299 mp.srtcm_rfc2697.cbs = meter_profile_values[1] ? 1300 meter_profile_values[1] : METER_CIR / 8; 1301 mp.srtcm_rfc2697.ebs = meter_profile_values[2]; 1302 mp.packet_mode = packet_mode; 1303 ret = rte_mtr_meter_profile_add 1304 (port_id, DEFAULT_METER_PROF_ID, &mp, &error); 1305 if (ret != 0) { 1306 printf("Port %u create Profile error(%d) message: %s\n", 1307 port_id, error.type, 1308 error.message ? error.message : "(no stated reason)"); 1309 rte_exit(EXIT_FAILURE, "Error: Creation meter profile Failed!\n"); 1310 } 1311 } 1312 } 1313 1314 static inline void 1315 destroy_flows(int port_id, uint8_t core_id, struct rte_flow **flows_list) 1316 { 1317 struct rte_flow_error error; 1318 clock_t start_batch, end_batch; 1319 double cpu_time_used = 0; 1320 double deletion_rate; 1321 double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 }; 1322 double delta; 1323 uint32_t i; 1324 int rules_batch_idx; 1325 int rules_count_per_core; 1326 1327 rules_count_per_core = rules_count / mc_pool.cores_count; 1328 /* If group > 0 , should add 1 flow which created in group 0 */ 1329 if (flow_group > 0 && core_id == 0) 1330 rules_count_per_core++; 1331 1332 start_batch = rte_get_timer_cycles(); 1333 for (i = 0; i < (uint32_t) rules_count_per_core; i++) { 1334 if (flows_list[i] == 0) 1335 break; 1336 1337 memset(&error, 0x33, sizeof(error)); 1338 if (rte_flow_destroy(port_id, flows_list[i], &error)) { 1339 print_flow_error(error); 1340 rte_exit(EXIT_FAILURE, "Error in deleting flow\n"); 1341 } 1342 1343 /* 1344 * Save the deletion rate for rules batch. 1345 * Check if the deletion reached the rules 1346 * patch counter, then save the deletion rate 1347 * for this batch. 1348 */ 1349 if (!((i + 1) % rules_batch)) { 1350 end_batch = rte_get_timer_cycles(); 1351 delta = (double) (end_batch - start_batch); 1352 rules_batch_idx = ((i + 1) / rules_batch) - 1; 1353 cpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz(); 1354 cpu_time_used += cpu_time_per_batch[rules_batch_idx]; 1355 start_batch = rte_get_timer_cycles(); 1356 } 1357 } 1358 1359 /* Print deletion rates for all batches */ 1360 if (dump_iterations) 1361 print_rules_batches(cpu_time_per_batch); 1362 1363 /* Deletion rate for all rules */ 1364 deletion_rate = ((double) (rules_count_per_core / cpu_time_used) / 1000); 1365 printf(":: Port %d :: Core %d :: Rules deletion rate -> %f K Rule/Sec\n", 1366 port_id, core_id, deletion_rate); 1367 printf(":: Port %d :: Core %d :: The time for deleting %d rules is %f seconds\n", 1368 port_id, core_id, rules_count_per_core, cpu_time_used); 1369 1370 mc_pool.flows_record.deletion[port_id][core_id] = cpu_time_used; 1371 } 1372 1373 static struct rte_flow ** 1374 insert_flows(int port_id, uint8_t core_id, uint16_t dst_port_id) 1375 { 1376 struct rte_flow **flows_list; 1377 struct rte_flow_error error; 1378 clock_t start_batch, end_batch; 1379 double first_flow_latency; 1380 double cpu_time_used; 1381 double insertion_rate; 1382 double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 }; 1383 double delta; 1384 uint32_t flow_index; 1385 uint32_t counter, start_counter = 0, end_counter; 1386 uint64_t global_items[MAX_ITEMS_NUM] = { 0 }; 1387 uint64_t global_actions[MAX_ACTIONS_NUM] = { 0 }; 1388 int rules_batch_idx; 1389 int rules_count_per_core; 1390 1391 rules_count_per_core = rules_count / mc_pool.cores_count; 1392 1393 /* Set boundaries of rules for each core. */ 1394 if (core_id) 1395 start_counter = core_id * rules_count_per_core; 1396 end_counter = (core_id + 1) * rules_count_per_core; 1397 1398 global_items[0] = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH); 1399 global_actions[0] = FLOW_ITEM_MASK(RTE_FLOW_ACTION_TYPE_JUMP); 1400 1401 flows_list = rte_zmalloc("flows_list", 1402 (sizeof(struct rte_flow *) * rules_count_per_core) + 1, 0); 1403 if (flows_list == NULL) 1404 rte_exit(EXIT_FAILURE, "No Memory available!\n"); 1405 1406 cpu_time_used = 0; 1407 flow_index = 0; 1408 if (flow_group > 0 && core_id == 0) { 1409 /* 1410 * Create global rule to jump into flow_group, 1411 * this way the app will avoid the default rules. 1412 * 1413 * This rule will be created only once. 1414 * 1415 * Global rule: 1416 * group 0 eth / end actions jump group <flow_group> 1417 */ 1418 flow = generate_flow(port_id, 0, flow_attrs, 1419 global_items, global_actions, 1420 flow_group, 0, 0, 0, 0, dst_port_id, core_id, 1421 rx_queues_count, unique_data, max_priority, &error); 1422 1423 if (flow == NULL) { 1424 print_flow_error(error); 1425 rte_exit(EXIT_FAILURE, "Error in creating flow\n"); 1426 } 1427 flows_list[flow_index++] = flow; 1428 } 1429 1430 start_batch = rte_get_timer_cycles(); 1431 for (counter = start_counter; counter < end_counter; counter++) { 1432 flow = generate_flow(port_id, flow_group, 1433 flow_attrs, flow_items, flow_actions, 1434 JUMP_ACTION_TABLE, counter, 1435 hairpin_queues_num, encap_data, 1436 decap_data, dst_port_id, 1437 core_id, rx_queues_count, 1438 unique_data, max_priority, &error); 1439 1440 if (!counter) { 1441 first_flow_latency = (double) (rte_get_timer_cycles() - start_batch); 1442 first_flow_latency /= rte_get_timer_hz(); 1443 /* In millisecond */ 1444 first_flow_latency *= 1000; 1445 printf(":: First Flow Latency :: Port %d :: First flow " 1446 "installed in %f milliseconds\n", 1447 port_id, first_flow_latency); 1448 } 1449 1450 if (force_quit) 1451 counter = end_counter; 1452 1453 if (!flow) { 1454 print_flow_error(error); 1455 rte_exit(EXIT_FAILURE, "Error in creating flow\n"); 1456 } 1457 1458 flows_list[flow_index++] = flow; 1459 1460 /* 1461 * Save the insertion rate for rules batch. 1462 * Check if the insertion reached the rules 1463 * patch counter, then save the insertion rate 1464 * for this batch. 1465 */ 1466 if (!((counter + 1) % rules_batch)) { 1467 end_batch = rte_get_timer_cycles(); 1468 delta = (double) (end_batch - start_batch); 1469 rules_batch_idx = ((counter + 1) / rules_batch) - 1; 1470 cpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz(); 1471 cpu_time_used += cpu_time_per_batch[rules_batch_idx]; 1472 start_batch = rte_get_timer_cycles(); 1473 } 1474 } 1475 1476 /* Print insertion rates for all batches */ 1477 if (dump_iterations) 1478 print_rules_batches(cpu_time_per_batch); 1479 1480 printf(":: Port %d :: Core %d boundaries :: start @[%d] - end @[%d]\n", 1481 port_id, core_id, start_counter, end_counter - 1); 1482 1483 /* Insertion rate for all rules in one core */ 1484 insertion_rate = ((double) (rules_count_per_core / cpu_time_used) / 1000); 1485 printf(":: Port %d :: Core %d :: Rules insertion rate -> %f K Rule/Sec\n", 1486 port_id, core_id, insertion_rate); 1487 printf(":: Port %d :: Core %d :: The time for creating %d in rules %f seconds\n", 1488 port_id, core_id, rules_count_per_core, cpu_time_used); 1489 1490 mc_pool.flows_record.insertion[port_id][core_id] = cpu_time_used; 1491 return flows_list; 1492 } 1493 1494 static void 1495 flows_handler(uint8_t core_id) 1496 { 1497 struct rte_flow **flows_list; 1498 uint16_t port_idx = 0; 1499 uint16_t nr_ports; 1500 int port_id; 1501 1502 nr_ports = rte_eth_dev_count_avail(); 1503 1504 if (rules_batch > rules_count) 1505 rules_batch = rules_count; 1506 1507 printf(":: Rules Count per port: %d\n\n", rules_count); 1508 1509 for (port_id = 0; port_id < nr_ports; port_id++) { 1510 /* If port outside portmask */ 1511 if (!((ports_mask >> port_id) & 0x1)) 1512 continue; 1513 1514 /* Insertion part. */ 1515 mc_pool.last_alloc[core_id] = (int64_t)dump_socket_mem(stdout); 1516 if (has_meter()) 1517 meters_handler(port_id, core_id, METER_CREATE); 1518 flows_list = insert_flows(port_id, core_id, 1519 dst_ports[port_idx++]); 1520 if (flows_list == NULL) 1521 rte_exit(EXIT_FAILURE, "Error: Insertion Failed!\n"); 1522 mc_pool.current_alloc[core_id] = (int64_t)dump_socket_mem(stdout); 1523 1524 /* Deletion part. */ 1525 if (delete_flag) { 1526 destroy_flows(port_id, core_id, flows_list); 1527 if (has_meter()) 1528 meters_handler(port_id, core_id, METER_DELETE); 1529 } 1530 } 1531 } 1532 1533 static void 1534 dump_used_cpu_time(const char *item, 1535 uint16_t port, struct used_cpu_time *used_time) 1536 { 1537 uint32_t i; 1538 /* Latency: total count of rte rules divided 1539 * over max time used by thread between all 1540 * threads time. 1541 * 1542 * Throughput: total count of rte rules divided 1543 * over the average of the time consumed by all 1544 * threads time. 1545 */ 1546 double insertion_latency_time; 1547 double insertion_throughput_time; 1548 double deletion_latency_time; 1549 double deletion_throughput_time; 1550 double insertion_latency, insertion_throughput; 1551 double deletion_latency, deletion_throughput; 1552 1553 /* Save first insertion/deletion rates from first thread. 1554 * Start comparing with all threads, if any thread used 1555 * time more than current saved, replace it. 1556 * 1557 * Thus in the end we will have the max time used for 1558 * insertion/deletion by one thread. 1559 * 1560 * As for memory consumption, save the min of all threads 1561 * of last alloc, and save the max for all threads for 1562 * current alloc. 1563 */ 1564 1565 insertion_latency_time = used_time->insertion[port][0]; 1566 deletion_latency_time = used_time->deletion[port][0]; 1567 insertion_throughput_time = used_time->insertion[port][0]; 1568 deletion_throughput_time = used_time->deletion[port][0]; 1569 1570 i = mc_pool.cores_count; 1571 while (i-- > 1) { 1572 insertion_throughput_time += used_time->insertion[port][i]; 1573 deletion_throughput_time += used_time->deletion[port][i]; 1574 if (insertion_latency_time < used_time->insertion[port][i]) 1575 insertion_latency_time = used_time->insertion[port][i]; 1576 if (deletion_latency_time < used_time->deletion[port][i]) 1577 deletion_latency_time = used_time->deletion[port][i]; 1578 } 1579 1580 insertion_latency = ((double) (mc_pool.rules_count 1581 / insertion_latency_time) / 1000); 1582 deletion_latency = ((double) (mc_pool.rules_count 1583 / deletion_latency_time) / 1000); 1584 1585 insertion_throughput_time /= mc_pool.cores_count; 1586 deletion_throughput_time /= mc_pool.cores_count; 1587 insertion_throughput = ((double) (mc_pool.rules_count 1588 / insertion_throughput_time) / 1000); 1589 deletion_throughput = ((double) (mc_pool.rules_count 1590 / deletion_throughput_time) / 1000); 1591 1592 /* Latency stats */ 1593 printf("\n%s\n:: [Latency | Insertion] All Cores :: Port %d :: ", 1594 item, port); 1595 printf("Total flows insertion rate -> %f K Rules/Sec\n", 1596 insertion_latency); 1597 printf(":: [Latency | Insertion] All Cores :: Port %d :: ", port); 1598 printf("The time for creating %d rules is %f seconds\n", 1599 mc_pool.rules_count, insertion_latency_time); 1600 1601 /* Throughput stats */ 1602 printf(":: [Throughput | Insertion] All Cores :: Port %d :: ", port); 1603 printf("Total flows insertion rate -> %f K Rules/Sec\n", 1604 insertion_throughput); 1605 printf(":: [Throughput | Insertion] All Cores :: Port %d :: ", port); 1606 printf("The average time for creating %d rules is %f seconds\n", 1607 mc_pool.rules_count, insertion_throughput_time); 1608 1609 if (delete_flag) { 1610 /* Latency stats */ 1611 printf(":: [Latency | Deletion] All Cores :: Port %d :: Total " 1612 "deletion rate -> %f K Rules/Sec\n", 1613 port, deletion_latency); 1614 printf(":: [Latency | Deletion] All Cores :: Port %d :: ", 1615 port); 1616 printf("The time for deleting %d rules is %f seconds\n", 1617 mc_pool.rules_count, deletion_latency_time); 1618 1619 /* Throughput stats */ 1620 printf(":: [Throughput | Deletion] All Cores :: Port %d :: Total " 1621 "deletion rate -> %f K Rules/Sec\n", 1622 port, deletion_throughput); 1623 printf(":: [Throughput | Deletion] All Cores :: Port %d :: ", 1624 port); 1625 printf("The average time for deleting %d rules is %f seconds\n", 1626 mc_pool.rules_count, deletion_throughput_time); 1627 } 1628 } 1629 1630 static void 1631 dump_used_mem(uint16_t port) 1632 { 1633 uint32_t i; 1634 int64_t last_alloc, current_alloc; 1635 int flow_size_in_bytes; 1636 1637 last_alloc = mc_pool.last_alloc[0]; 1638 current_alloc = mc_pool.current_alloc[0]; 1639 1640 i = mc_pool.cores_count; 1641 while (i-- > 1) { 1642 if (last_alloc > mc_pool.last_alloc[i]) 1643 last_alloc = mc_pool.last_alloc[i]; 1644 if (current_alloc < mc_pool.current_alloc[i]) 1645 current_alloc = mc_pool.current_alloc[i]; 1646 } 1647 1648 flow_size_in_bytes = (current_alloc - last_alloc) / mc_pool.rules_count; 1649 printf("\n:: Port %d :: rte_flow size in DPDK layer: %d Bytes\n", 1650 port, flow_size_in_bytes); 1651 } 1652 1653 static int 1654 run_rte_flow_handler_cores(void *data __rte_unused) 1655 { 1656 uint16_t port; 1657 int lcore_counter = 0; 1658 int lcore_id = rte_lcore_id(); 1659 int i; 1660 1661 RTE_LCORE_FOREACH(i) { 1662 /* If core not needed return. */ 1663 if (lcore_id == i) { 1664 printf(":: lcore %d mapped with index %d\n", lcore_id, lcore_counter); 1665 if (lcore_counter >= (int) mc_pool.cores_count) 1666 return 0; 1667 break; 1668 } 1669 lcore_counter++; 1670 } 1671 lcore_id = lcore_counter; 1672 1673 if (lcore_id >= (int) mc_pool.cores_count) 1674 return 0; 1675 1676 mc_pool.rules_count = rules_count; 1677 1678 flows_handler(lcore_id); 1679 1680 /* Only main core to print total results. */ 1681 if (lcore_id != 0) 1682 return 0; 1683 1684 /* Make sure all cores finished insertion/deletion process. */ 1685 rte_eal_mp_wait_lcore(); 1686 1687 RTE_ETH_FOREACH_DEV(port) { 1688 /* If port outside portmask */ 1689 if (!((ports_mask >> port) & 0x1)) 1690 continue; 1691 if (has_meter()) 1692 dump_used_cpu_time("Meters:", 1693 port, &mc_pool.meters_record); 1694 dump_used_cpu_time("Flows:", 1695 port, &mc_pool.flows_record); 1696 dump_used_mem(port); 1697 } 1698 1699 return 0; 1700 } 1701 1702 static void 1703 signal_handler(int signum) 1704 { 1705 if (signum == SIGINT || signum == SIGTERM) { 1706 printf("\n\nSignal %d received, preparing to exit...\n", 1707 signum); 1708 printf("Error: Stats are wrong due to sudden signal!\n\n"); 1709 force_quit = true; 1710 } 1711 } 1712 1713 static inline uint16_t 1714 do_rx(struct lcore_info *li, uint16_t rx_port, uint16_t rx_queue) 1715 { 1716 uint16_t cnt = 0; 1717 cnt = rte_eth_rx_burst(rx_port, rx_queue, li->pkts, MAX_PKT_BURST); 1718 li->rx_pkts += cnt; 1719 return cnt; 1720 } 1721 1722 static inline void 1723 do_tx(struct lcore_info *li, uint16_t cnt, uint16_t tx_port, 1724 uint16_t tx_queue) 1725 { 1726 uint16_t nr_tx = 0; 1727 uint16_t i; 1728 1729 nr_tx = rte_eth_tx_burst(tx_port, tx_queue, li->pkts, cnt); 1730 li->tx_pkts += nr_tx; 1731 li->tx_drops += cnt - nr_tx; 1732 1733 for (i = nr_tx; i < cnt; i++) 1734 rte_pktmbuf_free(li->pkts[i]); 1735 } 1736 1737 static void 1738 packet_per_second_stats(void) 1739 { 1740 struct lcore_info *old; 1741 struct lcore_info *li, *oli; 1742 int nr_lines = 0; 1743 int i; 1744 1745 old = rte_zmalloc("old", 1746 sizeof(struct lcore_info) * RTE_MAX_LCORE, 0); 1747 if (old == NULL) 1748 rte_exit(EXIT_FAILURE, "No Memory available!\n"); 1749 1750 memcpy(old, lcore_infos, 1751 sizeof(struct lcore_info) * RTE_MAX_LCORE); 1752 1753 while (!force_quit) { 1754 uint64_t total_tx_pkts = 0; 1755 uint64_t total_rx_pkts = 0; 1756 uint64_t total_tx_drops = 0; 1757 uint64_t tx_delta, rx_delta, drops_delta; 1758 int nr_valid_core = 0; 1759 1760 sleep(1); 1761 1762 if (nr_lines) { 1763 char go_up_nr_lines[16]; 1764 1765 sprintf(go_up_nr_lines, "%c[%dA\r", 27, nr_lines); 1766 printf("%s\r", go_up_nr_lines); 1767 } 1768 1769 printf("\n%6s %16s %16s %16s\n", "core", "tx", "tx drops", "rx"); 1770 printf("%6s %16s %16s %16s\n", "------", "----------------", 1771 "----------------", "----------------"); 1772 nr_lines = 3; 1773 for (i = 0; i < RTE_MAX_LCORE; i++) { 1774 li = &lcore_infos[i]; 1775 oli = &old[i]; 1776 if (li->mode != LCORE_MODE_PKT) 1777 continue; 1778 1779 tx_delta = li->tx_pkts - oli->tx_pkts; 1780 rx_delta = li->rx_pkts - oli->rx_pkts; 1781 drops_delta = li->tx_drops - oli->tx_drops; 1782 printf("%6d %'16"PRId64" %'16"PRId64" %'16"PRId64"\n", 1783 i, tx_delta, drops_delta, rx_delta); 1784 1785 total_tx_pkts += tx_delta; 1786 total_rx_pkts += rx_delta; 1787 total_tx_drops += drops_delta; 1788 1789 nr_valid_core++; 1790 nr_lines += 1; 1791 } 1792 1793 if (nr_valid_core > 1) { 1794 printf("%6s %'16"PRId64" %'16"PRId64" %'16"PRId64"\n", 1795 "total", total_tx_pkts, total_tx_drops, 1796 total_rx_pkts); 1797 nr_lines += 1; 1798 } 1799 1800 memcpy(old, lcore_infos, 1801 sizeof(struct lcore_info) * RTE_MAX_LCORE); 1802 } 1803 } 1804 1805 static int 1806 start_forwarding(void *data __rte_unused) 1807 { 1808 int lcore = rte_lcore_id(); 1809 int stream_id; 1810 uint16_t cnt; 1811 struct lcore_info *li = &lcore_infos[lcore]; 1812 1813 if (!li->mode) 1814 return 0; 1815 1816 if (li->mode == LCORE_MODE_STATS) { 1817 printf(":: started stats on lcore %u\n", lcore); 1818 packet_per_second_stats(); 1819 return 0; 1820 } 1821 1822 while (!force_quit) 1823 for (stream_id = 0; stream_id < MAX_STREAMS; stream_id++) { 1824 if (li->streams[stream_id].rx_port == -1) 1825 continue; 1826 1827 cnt = do_rx(li, 1828 li->streams[stream_id].rx_port, 1829 li->streams[stream_id].rx_queue); 1830 if (cnt) 1831 do_tx(li, cnt, 1832 li->streams[stream_id].tx_port, 1833 li->streams[stream_id].tx_queue); 1834 } 1835 return 0; 1836 } 1837 1838 static void 1839 init_lcore_info(void) 1840 { 1841 int i, j; 1842 unsigned int lcore; 1843 uint16_t nr_port; 1844 uint16_t queue; 1845 int port; 1846 int stream_id = 0; 1847 int streams_per_core; 1848 int unassigned_streams; 1849 int nb_fwd_streams; 1850 nr_port = rte_eth_dev_count_avail(); 1851 1852 /* First logical core is reserved for stats printing */ 1853 lcore = rte_get_next_lcore(-1, 0, 0); 1854 lcore_infos[lcore].mode = LCORE_MODE_STATS; 1855 1856 /* 1857 * Initialize all cores 1858 * All cores at first must have -1 value in all streams 1859 * This means that this stream is not used, or not set 1860 * yet. 1861 */ 1862 for (i = 0; i < RTE_MAX_LCORE; i++) 1863 for (j = 0; j < MAX_STREAMS; j++) { 1864 lcore_infos[i].streams[j].tx_port = -1; 1865 lcore_infos[i].streams[j].rx_port = -1; 1866 lcore_infos[i].streams[j].tx_queue = -1; 1867 lcore_infos[i].streams[j].rx_queue = -1; 1868 lcore_infos[i].streams_nb = 0; 1869 } 1870 1871 /* 1872 * Calculate the total streams count. 1873 * Also distribute those streams count between the available 1874 * logical cores except first core, since it's reserved for 1875 * stats prints. 1876 */ 1877 nb_fwd_streams = nr_port * rx_queues_count; 1878 if ((int)(nb_lcores - 1) >= nb_fwd_streams) 1879 for (i = 0; i < (int)(nb_lcores - 1); i++) { 1880 lcore = rte_get_next_lcore(lcore, 0, 0); 1881 lcore_infos[lcore].streams_nb = 1; 1882 } 1883 else { 1884 streams_per_core = nb_fwd_streams / (nb_lcores - 1); 1885 unassigned_streams = nb_fwd_streams % (nb_lcores - 1); 1886 for (i = 0; i < (int)(nb_lcores - 1); i++) { 1887 lcore = rte_get_next_lcore(lcore, 0, 0); 1888 lcore_infos[lcore].streams_nb = streams_per_core; 1889 if (unassigned_streams) { 1890 lcore_infos[lcore].streams_nb++; 1891 unassigned_streams--; 1892 } 1893 } 1894 } 1895 1896 /* 1897 * Set the streams for the cores according to each logical 1898 * core stream count. 1899 * The streams is built on the design of what received should 1900 * forward as well, this means that if you received packets on 1901 * port 0 queue 0 then the same queue should forward the 1902 * packets, using the same logical core. 1903 */ 1904 lcore = rte_get_next_lcore(-1, 0, 0); 1905 for (port = 0; port < nr_port; port++) { 1906 /* Create FWD stream */ 1907 for (queue = 0; queue < rx_queues_count; queue++) { 1908 if (!lcore_infos[lcore].streams_nb || 1909 !(stream_id % lcore_infos[lcore].streams_nb)) { 1910 lcore = rte_get_next_lcore(lcore, 0, 0); 1911 lcore_infos[lcore].mode = LCORE_MODE_PKT; 1912 stream_id = 0; 1913 } 1914 lcore_infos[lcore].streams[stream_id].rx_queue = queue; 1915 lcore_infos[lcore].streams[stream_id].tx_queue = queue; 1916 lcore_infos[lcore].streams[stream_id].rx_port = port; 1917 lcore_infos[lcore].streams[stream_id].tx_port = port; 1918 stream_id++; 1919 } 1920 } 1921 1922 /* Print all streams */ 1923 printf(":: Stream -> core id[N]: (rx_port, rx_queue)->(tx_port, tx_queue)\n"); 1924 for (i = 0; i < RTE_MAX_LCORE; i++) 1925 for (j = 0; j < MAX_STREAMS; j++) { 1926 /* No streams for this core */ 1927 if (lcore_infos[i].streams[j].tx_port == -1) 1928 break; 1929 printf("Stream -> core id[%d]: (%d,%d)->(%d,%d)\n", 1930 i, 1931 lcore_infos[i].streams[j].rx_port, 1932 lcore_infos[i].streams[j].rx_queue, 1933 lcore_infos[i].streams[j].tx_port, 1934 lcore_infos[i].streams[j].tx_queue); 1935 } 1936 } 1937 1938 static void 1939 init_port(void) 1940 { 1941 int ret; 1942 uint16_t std_queue; 1943 uint16_t hairpin_queue; 1944 uint16_t port_id; 1945 uint16_t nr_ports; 1946 uint16_t nr_queues; 1947 struct rte_eth_hairpin_conf hairpin_conf = { 1948 .peer_count = 1, 1949 }; 1950 struct rte_eth_conf port_conf = { 1951 .rx_adv_conf = { 1952 .rss_conf.rss_hf = 1953 GET_RSS_HF(), 1954 } 1955 }; 1956 struct rte_eth_txconf txq_conf; 1957 struct rte_eth_rxconf rxq_conf; 1958 struct rte_eth_dev_info dev_info; 1959 1960 nr_queues = rx_queues_count; 1961 if (hairpin_queues_num != 0) 1962 nr_queues = rx_queues_count + hairpin_queues_num; 1963 1964 nr_ports = rte_eth_dev_count_avail(); 1965 if (nr_ports == 0) 1966 rte_exit(EXIT_FAILURE, "Error: no port detected\n"); 1967 1968 mbuf_mp = rte_pktmbuf_pool_create("mbuf_pool", 1969 total_mbuf_num, mbuf_cache_size, 1970 0, mbuf_size, 1971 rte_socket_id()); 1972 if (mbuf_mp == NULL) 1973 rte_exit(EXIT_FAILURE, "Error: can't init mbuf pool\n"); 1974 1975 for (port_id = 0; port_id < nr_ports; port_id++) { 1976 uint64_t rx_metadata = 0; 1977 1978 rx_metadata |= RTE_ETH_RX_METADATA_USER_FLAG; 1979 rx_metadata |= RTE_ETH_RX_METADATA_USER_MARK; 1980 1981 ret = rte_eth_rx_metadata_negotiate(port_id, &rx_metadata); 1982 if (ret == 0) { 1983 if (!(rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG)) { 1984 printf(":: flow action FLAG will not affect Rx mbufs on port=%u\n", 1985 port_id); 1986 } 1987 1988 if (!(rx_metadata & RTE_ETH_RX_METADATA_USER_MARK)) { 1989 printf(":: flow action MARK will not affect Rx mbufs on port=%u\n", 1990 port_id); 1991 } 1992 } else if (ret != -ENOTSUP) { 1993 rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port=%u: %s\n", 1994 port_id, rte_strerror(-ret)); 1995 } 1996 1997 ret = rte_eth_dev_info_get(port_id, &dev_info); 1998 if (ret != 0) 1999 rte_exit(EXIT_FAILURE, 2000 "Error during getting device" 2001 " (port %u) info: %s\n", 2002 port_id, strerror(-ret)); 2003 2004 port_conf.txmode.offloads &= dev_info.tx_offload_capa; 2005 port_conf.rxmode.offloads &= dev_info.rx_offload_capa; 2006 2007 printf(":: initializing port: %d\n", port_id); 2008 2009 ret = rte_eth_dev_configure(port_id, nr_queues, 2010 nr_queues, &port_conf); 2011 if (ret < 0) 2012 rte_exit(EXIT_FAILURE, 2013 ":: cannot configure device: err=%d, port=%u\n", 2014 ret, port_id); 2015 2016 rxq_conf = dev_info.default_rxconf; 2017 for (std_queue = 0; std_queue < rx_queues_count; std_queue++) { 2018 ret = rte_eth_rx_queue_setup(port_id, std_queue, rxd_count, 2019 rte_eth_dev_socket_id(port_id), 2020 &rxq_conf, 2021 mbuf_mp); 2022 if (ret < 0) 2023 rte_exit(EXIT_FAILURE, 2024 ":: Rx queue setup failed: err=%d, port=%u\n", 2025 ret, port_id); 2026 } 2027 2028 txq_conf = dev_info.default_txconf; 2029 for (std_queue = 0; std_queue < tx_queues_count; std_queue++) { 2030 ret = rte_eth_tx_queue_setup(port_id, std_queue, txd_count, 2031 rte_eth_dev_socket_id(port_id), 2032 &txq_conf); 2033 if (ret < 0) 2034 rte_exit(EXIT_FAILURE, 2035 ":: Tx queue setup failed: err=%d, port=%u\n", 2036 ret, port_id); 2037 } 2038 2039 /* Catch all packets from traffic generator. */ 2040 ret = rte_eth_promiscuous_enable(port_id); 2041 if (ret != 0) 2042 rte_exit(EXIT_FAILURE, 2043 ":: promiscuous mode enable failed: err=%s, port=%u\n", 2044 rte_strerror(-ret), port_id); 2045 2046 if (hairpin_queues_num != 0) { 2047 /* 2048 * Configure peer which represents hairpin Tx. 2049 * Hairpin queue numbers start after standard queues 2050 * (rx_queues_count and tx_queues_count). 2051 */ 2052 for (hairpin_queue = rx_queues_count, std_queue = 0; 2053 hairpin_queue < nr_queues; 2054 hairpin_queue++, std_queue++) { 2055 hairpin_conf.peers[0].port = port_id; 2056 hairpin_conf.peers[0].queue = 2057 std_queue + tx_queues_count; 2058 hairpin_conf.use_locked_device_memory = 2059 !!(hairpin_conf_mask & HAIRPIN_RX_CONF_LOCKED_MEMORY); 2060 hairpin_conf.use_rte_memory = 2061 !!(hairpin_conf_mask & HAIRPIN_RX_CONF_RTE_MEMORY); 2062 hairpin_conf.force_memory = 2063 !!(hairpin_conf_mask & HAIRPIN_RX_CONF_FORCE_MEMORY); 2064 ret = rte_eth_rx_hairpin_queue_setup( 2065 port_id, hairpin_queue, 2066 rxd_count, &hairpin_conf); 2067 if (ret != 0) 2068 rte_exit(EXIT_FAILURE, 2069 ":: Hairpin rx queue setup failed: err=%d, port=%u\n", 2070 ret, port_id); 2071 } 2072 2073 for (hairpin_queue = tx_queues_count, std_queue = 0; 2074 hairpin_queue < nr_queues; 2075 hairpin_queue++, std_queue++) { 2076 hairpin_conf.peers[0].port = port_id; 2077 hairpin_conf.peers[0].queue = 2078 std_queue + rx_queues_count; 2079 hairpin_conf.use_locked_device_memory = 2080 !!(hairpin_conf_mask & HAIRPIN_TX_CONF_LOCKED_MEMORY); 2081 hairpin_conf.use_rte_memory = 2082 !!(hairpin_conf_mask & HAIRPIN_TX_CONF_RTE_MEMORY); 2083 hairpin_conf.force_memory = 2084 !!(hairpin_conf_mask & HAIRPIN_TX_CONF_FORCE_MEMORY); 2085 ret = rte_eth_tx_hairpin_queue_setup( 2086 port_id, hairpin_queue, 2087 txd_count, &hairpin_conf); 2088 if (ret != 0) 2089 rte_exit(EXIT_FAILURE, 2090 ":: Hairpin tx queue setup failed: err=%d, port=%u\n", 2091 ret, port_id); 2092 } 2093 } 2094 2095 ret = rte_eth_dev_start(port_id); 2096 if (ret < 0) 2097 rte_exit(EXIT_FAILURE, 2098 "rte_eth_dev_start:err=%d, port=%u\n", 2099 ret, port_id); 2100 2101 printf(":: initializing port: %d done\n", port_id); 2102 } 2103 } 2104 2105 int 2106 main(int argc, char **argv) 2107 { 2108 int ret; 2109 uint16_t port; 2110 struct rte_flow_error error; 2111 2112 ret = rte_eal_init(argc, argv); 2113 if (ret < 0) 2114 rte_exit(EXIT_FAILURE, "EAL init failed\n"); 2115 2116 force_quit = false; 2117 dump_iterations = false; 2118 rules_count = DEFAULT_RULES_COUNT; 2119 rules_batch = DEFAULT_RULES_BATCH; 2120 delete_flag = false; 2121 dump_socket_mem_flag = false; 2122 flow_group = DEFAULT_GROUP; 2123 unique_data = false; 2124 2125 rx_queues_count = (uint8_t) RXQ_NUM; 2126 tx_queues_count = (uint8_t) TXQ_NUM; 2127 rxd_count = (uint8_t) NR_RXD; 2128 txd_count = (uint8_t) NR_TXD; 2129 mbuf_size = (uint32_t) MBUF_SIZE; 2130 mbuf_cache_size = (uint32_t) MBUF_CACHE_SIZE; 2131 total_mbuf_num = (uint32_t) TOTAL_MBUF_NUM; 2132 2133 signal(SIGINT, signal_handler); 2134 signal(SIGTERM, signal_handler); 2135 2136 argc -= ret; 2137 argv += ret; 2138 if (argc > 1) 2139 args_parse(argc, argv); 2140 2141 /* For more fancy, localised integer formatting. */ 2142 setlocale(LC_NUMERIC, ""); 2143 2144 init_port(); 2145 2146 nb_lcores = rte_lcore_count(); 2147 if (nb_lcores <= 1) 2148 rte_exit(EXIT_FAILURE, "This app needs at least two cores\n"); 2149 2150 printf(":: Flows Count per port: %d\n\n", rules_count); 2151 2152 rte_srand(rand_seed); 2153 2154 if (has_meter()) { 2155 create_meter_profile(); 2156 if (policy_mtr) 2157 create_meter_policy(); 2158 } 2159 rte_eal_mp_remote_launch(run_rte_flow_handler_cores, NULL, CALL_MAIN); 2160 2161 if (enable_fwd) { 2162 init_lcore_info(); 2163 rte_eal_mp_remote_launch(start_forwarding, NULL, CALL_MAIN); 2164 } 2165 if (has_meter() && delete_flag) { 2166 destroy_meter_profile(); 2167 if (policy_mtr) 2168 destroy_meter_policy(); 2169 } 2170 2171 RTE_ETH_FOREACH_DEV(port) { 2172 rte_flow_flush(port, &error); 2173 if (rte_eth_dev_stop(port) != 0) 2174 printf("Failed to stop device on port %u\n", port); 2175 rte_eth_dev_close(port); 2176 } 2177 printf("\nBye ...\n"); 2178 return 0; 2179 } 2180