xref: /dpdk/app/test-flow-perf/main.c (revision 0efea35a2bb0ae9df6e204151c7f96b5eb93e130)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2020 Mellanox Technologies, Ltd
3  *
4  * This file contain the application main file
5  * This application provides the user the ability to test the
6  * insertion rate for specific rte_flow rule under stress state ~4M rule/
7  *
8  * Then it will also provide packet per second measurement after installing
9  * all rules, the user may send traffic to test the PPS that match the rules
10  * after all rules are installed, to check performance or functionality after
11  * the stress.
12  *
13  * The flows insertion will go for all ports first, then it will print the
14  * results, after that the application will go into forwarding packets mode
15  * it will start receiving traffic if any and then forwarding it back and
16  * gives packet per second measurement.
17  */
18 
19 #include <locale.h>
20 #include <stdio.h>
21 #include <stdlib.h>
22 #include <string.h>
23 #include <stdint.h>
24 #include <inttypes.h>
25 #include <stdarg.h>
26 #include <errno.h>
27 #include <getopt.h>
28 #include <stdbool.h>
29 #include <sys/time.h>
30 #include <signal.h>
31 #include <unistd.h>
32 
33 #include <rte_malloc.h>
34 #include <rte_mempool.h>
35 #include <rte_mbuf.h>
36 #include <rte_ethdev.h>
37 #include <rte_flow.h>
38 #include <rte_mtr.h>
39 
40 #include "config.h"
41 #include "actions_gen.h"
42 #include "flow_gen.h"
43 
44 #define MAX_BATCHES_COUNT          100
45 #define DEFAULT_RULES_COUNT    4000000
46 #define DEFAULT_RULES_BATCH     100000
47 #define DEFAULT_GROUP                0
48 
49 #define HAIRPIN_RX_CONF_FORCE_MEMORY  (0x0001)
50 #define HAIRPIN_TX_CONF_FORCE_MEMORY  (0x0002)
51 
52 #define HAIRPIN_RX_CONF_LOCKED_MEMORY (0x0010)
53 #define HAIRPIN_RX_CONF_RTE_MEMORY    (0x0020)
54 
55 #define HAIRPIN_TX_CONF_LOCKED_MEMORY (0x0100)
56 #define HAIRPIN_TX_CONF_RTE_MEMORY    (0x0200)
57 
58 struct rte_flow *flow;
59 static uint8_t flow_group;
60 
61 static uint64_t encap_data;
62 static uint64_t decap_data;
63 static uint64_t all_actions[RTE_COLORS][MAX_ACTIONS_NUM];
64 static char *actions_str[RTE_COLORS];
65 
66 static uint64_t flow_items[MAX_ITEMS_NUM];
67 static uint64_t flow_actions[MAX_ACTIONS_NUM];
68 static uint64_t flow_attrs[MAX_ATTRS_NUM];
69 static uint32_t policy_id[MAX_PORTS];
70 static uint8_t items_idx, actions_idx, attrs_idx;
71 
72 static uint64_t ports_mask;
73 static uint64_t hairpin_conf_mask;
74 static uint16_t dst_ports[RTE_MAX_ETHPORTS];
75 static volatile bool force_quit;
76 static bool dump_iterations;
77 static bool delete_flag;
78 static bool dump_socket_mem_flag;
79 static bool enable_fwd;
80 static bool unique_data;
81 static bool policy_mtr;
82 static bool packet_mode;
83 
84 static uint8_t rx_queues_count;
85 static uint8_t tx_queues_count;
86 static uint8_t rxd_count;
87 static uint8_t txd_count;
88 static uint32_t mbuf_size;
89 static uint32_t mbuf_cache_size;
90 static uint32_t total_mbuf_num;
91 
92 static struct rte_mempool *mbuf_mp;
93 static uint32_t nb_lcores;
94 static uint32_t rules_count;
95 static uint32_t rules_batch;
96 static uint32_t hairpin_queues_num; /* total hairpin q number - default: 0 */
97 static uint32_t nb_lcores;
98 static uint8_t max_priority;
99 static uint32_t rand_seed;
100 static uint64_t meter_profile_values[3]; /* CIR CBS EBS values. */
101 
102 #define MAX_PKT_BURST    32
103 #define LCORE_MODE_PKT    1
104 #define LCORE_MODE_STATS  2
105 #define MAX_STREAMS      64
106 #define METER_CREATE	  1
107 #define METER_DELETE	  2
108 
109 struct stream {
110 	int tx_port;
111 	int tx_queue;
112 	int rx_port;
113 	int rx_queue;
114 };
115 
116 struct __rte_cache_aligned lcore_info {
117 	int mode;
118 	int streams_nb;
119 	struct stream streams[MAX_STREAMS];
120 	/* stats */
121 	uint64_t tx_pkts;
122 	uint64_t tx_drops;
123 	uint64_t rx_pkts;
124 	struct rte_mbuf *pkts[MAX_PKT_BURST];
125 };
126 
127 static struct lcore_info lcore_infos[RTE_MAX_LCORE];
128 
129 struct used_cpu_time {
130 	double insertion[MAX_PORTS][RTE_MAX_LCORE];
131 	double deletion[MAX_PORTS][RTE_MAX_LCORE];
132 };
133 
134 struct __rte_cache_aligned multi_cores_pool {
135 	uint32_t cores_count;
136 	uint32_t rules_count;
137 	struct used_cpu_time meters_record;
138 	struct used_cpu_time flows_record;
139 	int64_t last_alloc[RTE_MAX_LCORE];
140 	int64_t current_alloc[RTE_MAX_LCORE];
141 };
142 
143 static struct multi_cores_pool mc_pool = {
144 	.cores_count = 1,
145 };
146 
147 static const struct option_dict {
148 	const char *str;
149 	const uint64_t mask;
150 	uint64_t *map;
151 	uint8_t *map_idx;
152 
153 } flow_options[] = {
154 	{
155 		.str = "ether",
156 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH),
157 		.map = &flow_items[0],
158 		.map_idx = &items_idx
159 	},
160 	{
161 		.str = "ipv4",
162 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV4),
163 		.map = &flow_items[0],
164 		.map_idx = &items_idx
165 	},
166 	{
167 		.str = "ipv6",
168 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_IPV6),
169 		.map = &flow_items[0],
170 		.map_idx = &items_idx
171 	},
172 	{
173 		.str = "vlan",
174 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VLAN),
175 		.map = &flow_items[0],
176 		.map_idx = &items_idx
177 	},
178 	{
179 		.str = "tcp",
180 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_TCP),
181 		.map = &flow_items[0],
182 		.map_idx = &items_idx
183 	},
184 	{
185 		.str = "udp",
186 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_UDP),
187 		.map = &flow_items[0],
188 		.map_idx = &items_idx
189 	},
190 	{
191 		.str = "vxlan",
192 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN),
193 		.map = &flow_items[0],
194 		.map_idx = &items_idx
195 	},
196 	{
197 		.str = "vxlan-gpe",
198 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_VXLAN_GPE),
199 		.map = &flow_items[0],
200 		.map_idx = &items_idx
201 	},
202 	{
203 		.str = "gre",
204 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GRE),
205 		.map = &flow_items[0],
206 		.map_idx = &items_idx
207 	},
208 	{
209 		.str = "geneve",
210 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GENEVE),
211 		.map = &flow_items[0],
212 		.map_idx = &items_idx
213 	},
214 	{
215 		.str = "gtp",
216 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_GTP),
217 		.map = &flow_items[0],
218 		.map_idx = &items_idx
219 	},
220 	{
221 		.str = "meta",
222 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_META),
223 		.map = &flow_items[0],
224 		.map_idx = &items_idx
225 	},
226 	{
227 		.str = "tag",
228 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_TAG),
229 		.map = &flow_items[0],
230 		.map_idx = &items_idx
231 	},
232 	{
233 		.str = "icmpv4",
234 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ICMP),
235 		.map = &flow_items[0],
236 		.map_idx = &items_idx
237 	},
238 	{
239 		.str = "icmpv6",
240 		.mask = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ICMP6),
241 		.map = &flow_items[0],
242 		.map_idx = &items_idx
243 	},
244 	{
245 		.str = "ingress",
246 		.mask = INGRESS,
247 		.map = &flow_attrs[0],
248 		.map_idx = &attrs_idx
249 	},
250 	{
251 		.str = "egress",
252 		.mask = EGRESS,
253 		.map = &flow_attrs[0],
254 		.map_idx = &attrs_idx
255 	},
256 	{
257 		.str = "transfer",
258 		.mask = TRANSFER,
259 		.map = &flow_attrs[0],
260 		.map_idx = &attrs_idx
261 	},
262 	{
263 		.str = "port-id",
264 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_PORT_ID),
265 		.map = &flow_actions[0],
266 		.map_idx = &actions_idx
267 	},
268 	{
269 		.str = "rss",
270 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_RSS),
271 		.map = &flow_actions[0],
272 		.map_idx = &actions_idx
273 	},
274 	{
275 		.str = "queue",
276 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_QUEUE),
277 		.map = &flow_actions[0],
278 		.map_idx = &actions_idx
279 	},
280 	{
281 		.str = "jump",
282 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_JUMP),
283 		.map = &flow_actions[0],
284 		.map_idx = &actions_idx
285 	},
286 	{
287 		.str = "mark",
288 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_MARK),
289 		.map = &flow_actions[0],
290 		.map_idx = &actions_idx
291 	},
292 	{
293 		.str = "count",
294 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_COUNT),
295 		.map = &flow_actions[0],
296 		.map_idx = &actions_idx
297 	},
298 	{
299 		.str = "set-meta",
300 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_META),
301 		.map = &flow_actions[0],
302 		.map_idx = &actions_idx
303 	},
304 	{
305 		.str = "set-tag",
306 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_SET_TAG),
307 		.map = &flow_actions[0],
308 		.map_idx = &actions_idx
309 	},
310 	{
311 		.str = "drop",
312 		.mask = FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_DROP),
313 		.map = &flow_actions[0],
314 		.map_idx = &actions_idx
315 	},
316 	{
317 		.str = "set-src-mac",
318 		.mask = FLOW_ACTION_MASK(
319 			RTE_FLOW_ACTION_TYPE_SET_MAC_SRC
320 		),
321 		.map = &flow_actions[0],
322 		.map_idx = &actions_idx
323 	},
324 	{
325 		.str = "set-dst-mac",
326 		.mask = FLOW_ACTION_MASK(
327 			RTE_FLOW_ACTION_TYPE_SET_MAC_DST
328 		),
329 		.map = &flow_actions[0],
330 		.map_idx = &actions_idx
331 	},
332 	{
333 		.str = "set-src-ipv4",
334 		.mask = FLOW_ACTION_MASK(
335 			RTE_FLOW_ACTION_TYPE_SET_IPV4_SRC
336 		),
337 		.map = &flow_actions[0],
338 		.map_idx = &actions_idx
339 	},
340 	{
341 		.str = "set-dst-ipv4",
342 		.mask = FLOW_ACTION_MASK(
343 			RTE_FLOW_ACTION_TYPE_SET_IPV4_DST
344 		),
345 		.map = &flow_actions[0],
346 		.map_idx = &actions_idx
347 	},
348 	{
349 		.str = "set-src-ipv6",
350 		.mask = FLOW_ACTION_MASK(
351 			RTE_FLOW_ACTION_TYPE_SET_IPV6_SRC
352 		),
353 		.map = &flow_actions[0],
354 		.map_idx = &actions_idx
355 	},
356 	{
357 		.str = "set-dst-ipv6",
358 		.mask = FLOW_ACTION_MASK(
359 			RTE_FLOW_ACTION_TYPE_SET_IPV6_DST
360 		),
361 		.map = &flow_actions[0],
362 		.map_idx = &actions_idx
363 	},
364 	{
365 		.str = "set-src-tp",
366 		.mask = FLOW_ACTION_MASK(
367 			RTE_FLOW_ACTION_TYPE_SET_TP_SRC
368 		),
369 		.map = &flow_actions[0],
370 		.map_idx = &actions_idx
371 	},
372 	{
373 		.str = "set-dst-tp",
374 		.mask = FLOW_ACTION_MASK(
375 			RTE_FLOW_ACTION_TYPE_SET_TP_DST
376 		),
377 		.map = &flow_actions[0],
378 		.map_idx = &actions_idx
379 	},
380 	{
381 		.str = "inc-tcp-ack",
382 		.mask = FLOW_ACTION_MASK(
383 			RTE_FLOW_ACTION_TYPE_INC_TCP_ACK
384 		),
385 		.map = &flow_actions[0],
386 		.map_idx = &actions_idx
387 	},
388 	{
389 		.str = "dec-tcp-ack",
390 		.mask = FLOW_ACTION_MASK(
391 			RTE_FLOW_ACTION_TYPE_DEC_TCP_ACK
392 		),
393 		.map = &flow_actions[0],
394 		.map_idx = &actions_idx
395 	},
396 	{
397 		.str = "inc-tcp-seq",
398 		.mask = FLOW_ACTION_MASK(
399 			RTE_FLOW_ACTION_TYPE_INC_TCP_SEQ
400 		),
401 		.map = &flow_actions[0],
402 		.map_idx = &actions_idx
403 	},
404 	{
405 		.str = "dec-tcp-seq",
406 		.mask = FLOW_ACTION_MASK(
407 			RTE_FLOW_ACTION_TYPE_DEC_TCP_SEQ
408 		),
409 		.map = &flow_actions[0],
410 		.map_idx = &actions_idx
411 	},
412 	{
413 		.str = "set-ttl",
414 		.mask = FLOW_ACTION_MASK(
415 			RTE_FLOW_ACTION_TYPE_SET_TTL
416 		),
417 		.map = &flow_actions[0],
418 		.map_idx = &actions_idx
419 	},
420 	{
421 		.str = "dec-ttl",
422 		.mask = FLOW_ACTION_MASK(
423 			RTE_FLOW_ACTION_TYPE_DEC_TTL
424 		),
425 		.map = &flow_actions[0],
426 		.map_idx = &actions_idx
427 	},
428 	{
429 		.str = "set-ipv4-dscp",
430 		.mask = FLOW_ACTION_MASK(
431 			RTE_FLOW_ACTION_TYPE_SET_IPV4_DSCP
432 		),
433 		.map = &flow_actions[0],
434 		.map_idx = &actions_idx
435 	},
436 	{
437 		.str = "set-ipv6-dscp",
438 		.mask = FLOW_ACTION_MASK(
439 			RTE_FLOW_ACTION_TYPE_SET_IPV6_DSCP
440 		),
441 		.map = &flow_actions[0],
442 		.map_idx = &actions_idx
443 	},
444 	{
445 		.str = "flag",
446 		.mask = FLOW_ACTION_MASK(
447 			RTE_FLOW_ACTION_TYPE_FLAG
448 		),
449 		.map = &flow_actions[0],
450 		.map_idx = &actions_idx
451 	},
452 	{
453 		.str = "meter",
454 		.mask = FLOW_ACTION_MASK(
455 			RTE_FLOW_ACTION_TYPE_METER
456 		),
457 		.map = &flow_actions[0],
458 		.map_idx = &actions_idx
459 	},
460 	{
461 		.str = "vxlan-encap",
462 		.mask = FLOW_ACTION_MASK(
463 			RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP
464 		),
465 		.map = &flow_actions[0],
466 		.map_idx = &actions_idx
467 	},
468 	{
469 		.str = "vxlan-decap",
470 		.mask = FLOW_ACTION_MASK(
471 			RTE_FLOW_ACTION_TYPE_VXLAN_DECAP
472 		),
473 		.map = &flow_actions[0],
474 		.map_idx = &actions_idx
475 	},
476 };
477 
478 static void
usage(char * progname)479 usage(char *progname)
480 {
481 	printf("\nusage: %s\n", progname);
482 	printf("\nControl configurations:\n");
483 	printf("  --rules-count=N: to set the number of needed"
484 		" rules to insert, default is %d\n", DEFAULT_RULES_COUNT);
485 	printf("  --rules-batch=N: set number of batched rules,"
486 		" default is %d\n", DEFAULT_RULES_BATCH);
487 	printf("  --dump-iterations: To print rates for each"
488 		" iteration\n");
489 	printf("  --deletion-rate: Enable deletion rate"
490 		" calculations\n");
491 	printf("  --dump-socket-mem: To dump all socket memory\n");
492 	printf("  --enable-fwd: To enable packets forwarding"
493 		" after insertion\n");
494 	printf("  --portmask=N: hexadecimal bitmask of ports used\n");
495 	printf("  --hairpin-conf=0xXXXX: hexadecimal bitmask of hairpin queue configuration\n");
496 	printf("  --random-priority=N,S: use random priority levels "
497 		"from 0 to (N - 1) for flows "
498 		"and S as seed for pseudo-random number generator\n");
499 	printf("  --unique-data: flag to set using unique data for all"
500 		" actions that support data, such as header modify and encap actions\n");
501 	printf("  --meter-profile=cir,cbs,ebs: set CIR CBS EBS parameters in meter"
502 		" profile, default values are %d,%d,%d\n", METER_CIR,
503 		METER_CIR / 8, 0);
504 	printf("  --packet-mode: to enable packet mode for meter profile\n");
505 
506 	printf("To set flow attributes:\n");
507 	printf("  --ingress: set ingress attribute in flows\n");
508 	printf("  --egress: set egress attribute in flows\n");
509 	printf("  --transfer: set transfer attribute in flows\n");
510 	printf("  --group=N: set group for all flows,"
511 		" default is %d\n", DEFAULT_GROUP);
512 	printf("  --cores=N: to set the number of needed "
513 		"cores to insert rte_flow rules, default is 1\n");
514 	printf("  --rxq=N: to set the count of receive queues\n");
515 	printf("  --txq=N: to set the count of send queues\n");
516 	printf("  --rxd=N: to set the count of rxd\n");
517 	printf("  --txd=N: to set the count of txd\n");
518 	printf("  --mbuf-size=N: to set the size of mbuf\n");
519 	printf("  --mbuf-cache-size=N: to set the size of mbuf cache\n");
520 	printf("  --total-mbuf-count=N: to set the count of total mbuf count\n");
521 
522 
523 	printf("To set flow items:\n");
524 	printf("  --ether: add ether layer in flow items\n");
525 	printf("  --vlan: add vlan layer in flow items\n");
526 	printf("  --ipv4: add ipv4 layer in flow items\n");
527 	printf("  --ipv6: add ipv6 layer in flow items\n");
528 	printf("  --tcp: add tcp layer in flow items\n");
529 	printf("  --udp: add udp layer in flow items\n");
530 	printf("  --vxlan: add vxlan layer in flow items\n");
531 	printf("  --vxlan-gpe: add vxlan-gpe layer in flow items\n");
532 	printf("  --gre: add gre layer in flow items\n");
533 	printf("  --geneve: add geneve layer in flow items\n");
534 	printf("  --gtp: add gtp layer in flow items\n");
535 	printf("  --meta: add meta layer in flow items\n");
536 	printf("  --tag: add tag layer in flow items\n");
537 	printf("  --icmpv4: add icmpv4 layer in flow items\n");
538 	printf("  --icmpv6: add icmpv6 layer in flow items\n");
539 
540 	printf("To set flow actions:\n");
541 	printf("  --port-id: add port-id action in flow actions\n");
542 	printf("  --rss: add rss action in flow actions\n");
543 	printf("  --queue: add queue action in flow actions\n");
544 	printf("  --jump: add jump action in flow actions\n");
545 	printf("  --mark: add mark action in flow actions\n");
546 	printf("  --count: add count action in flow actions\n");
547 	printf("  --set-meta: add set meta action in flow actions\n");
548 	printf("  --set-tag: add set tag action in flow actions\n");
549 	printf("  --drop: add drop action in flow actions\n");
550 	printf("  --hairpin-queue=N: add hairpin-queue action in flow actions\n");
551 	printf("  --hairpin-rss=N: add hairpin-rss action in flow actions\n");
552 	printf("  --set-src-mac: add set src mac action to flow actions\n"
553 		"Src mac to be set is random each flow\n");
554 	printf("  --set-dst-mac: add set dst mac action to flow actions\n"
555 		 "Dst mac to be set is random each flow\n");
556 	printf("  --set-src-ipv4: add set src ipv4 action to flow actions\n"
557 		"Src ipv4 to be set is random each flow\n");
558 	printf("  --set-dst-ipv4 add set dst ipv4 action to flow actions\n"
559 		"Dst ipv4 to be set is random each flow\n");
560 	printf("  --set-src-ipv6: add set src ipv6 action to flow actions\n"
561 		"Src ipv6 to be set is random each flow\n");
562 	printf("  --set-dst-ipv6: add set dst ipv6 action to flow actions\n"
563 		"Dst ipv6 to be set is random each flow\n");
564 	printf("  --set-src-tp: add set src tp action to flow actions\n"
565 		"Src tp to be set is random each flow\n");
566 	printf("  --set-dst-tp: add set dst tp action to flow actions\n"
567 		"Dst tp to be set is random each flow\n");
568 	printf("  --inc-tcp-ack: add inc tcp ack action to flow actions\n"
569 		"tcp ack will be increments by 1\n");
570 	printf("  --dec-tcp-ack: add dec tcp ack action to flow actions\n"
571 		"tcp ack will be decrements by 1\n");
572 	printf("  --inc-tcp-seq: add inc tcp seq action to flow actions\n"
573 		"tcp seq will be increments by 1\n");
574 	printf("  --dec-tcp-seq: add dec tcp seq action to flow actions\n"
575 		"tcp seq will be decrements by 1\n");
576 	printf("  --set-ttl: add set ttl action to flow actions\n"
577 		"L3 ttl to be set is random each flow\n");
578 	printf("  --dec-ttl: add dec ttl action to flow actions\n"
579 		"L3 ttl will be decrements by 1\n");
580 	printf("  --set-ipv4-dscp: add set ipv4 dscp action to flow actions\n"
581 		"ipv4 dscp value to be set is random each flow\n");
582 	printf("  --set-ipv6-dscp: add set ipv6 dscp action to flow actions\n"
583 		"ipv6 dscp value to be set is random each flow\n");
584 	printf("  --flag: add flag action to flow actions\n");
585 	printf("  --meter: add meter action to flow actions\n");
586 	printf("  --policy-mtr=\"g1,g2:y1:r1\": to create meter with specified "
587 		"colored actions\n");
588 	printf("  --raw-encap=<data>: add raw encap action to flow actions\n"
589 		"Data is the data needed to be encaped\n"
590 		"Example: raw-encap=ether,ipv4,udp,vxlan\n");
591 	printf("  --raw-decap=<data>: add raw decap action to flow actions\n"
592 		"Data is the data needed to be decaped\n"
593 		"Example: raw-decap=ether,ipv4,udp,vxlan\n");
594 	printf("  --vxlan-encap: add vxlan-encap action to flow actions\n"
595 		"Encapped data is fixed with pattern: ether,ipv4,udp,vxlan\n"
596 		"With fixed values\n");
597 	printf("  --vxlan-decap: add vxlan_decap action to flow actions\n");
598 }
599 
600 static void
read_meter_policy(char * prog,char * arg)601 read_meter_policy(char *prog, char *arg)
602 {
603 	char *token;
604 	size_t i, j, k;
605 
606 	j = 0;
607 	k = 0;
608 	policy_mtr = true;
609 	token = strsep(&arg, ":\0");
610 	while (token != NULL && j < RTE_COLORS) {
611 		actions_str[j++] = token;
612 		token = strsep(&arg, ":\0");
613 	}
614 	j = 0;
615 	token = strtok(actions_str[0], ",\0");
616 	while (token == NULL && j < RTE_COLORS - 1)
617 		token = strtok(actions_str[++j], ",\0");
618 	while (j < RTE_COLORS && token != NULL) {
619 		for (i = 0; i < RTE_DIM(flow_options); i++) {
620 			if (!strcmp(token, flow_options[i].str)) {
621 				all_actions[j][k++] = flow_options[i].mask;
622 				break;
623 			}
624 		}
625 		/* Reached last action with no match */
626 		if (i >= RTE_DIM(flow_options)) {
627 			fprintf(stderr, "Invalid colored actions: %s\n", token);
628 			usage(prog);
629 			rte_exit(EXIT_SUCCESS, "Invalid colored actions\n");
630 		}
631 		token = strtok(NULL, ",\0");
632 		while (!token && j < RTE_COLORS - 1) {
633 			token = strtok(actions_str[++j], ",\0");
634 			k = 0;
635 		}
636 	}
637 }
638 
639 static void
args_parse(int argc,char ** argv)640 args_parse(int argc, char **argv)
641 {
642 	uint64_t pm, seed;
643 	uint64_t hp_conf;
644 	char **argvopt;
645 	uint32_t prio;
646 	char *token;
647 	char *end;
648 	int n, opt;
649 	int opt_idx;
650 	size_t i;
651 
652 	static const struct option lgopts[] = {
653 		/* Control */
654 		{ "help",                       0, 0, 0 },
655 		{ "rules-count",                1, 0, 0 },
656 		{ "rules-batch",                1, 0, 0 },
657 		{ "dump-iterations",            0, 0, 0 },
658 		{ "deletion-rate",              0, 0, 0 },
659 		{ "dump-socket-mem",            0, 0, 0 },
660 		{ "enable-fwd",                 0, 0, 0 },
661 		{ "unique-data",                0, 0, 0 },
662 		{ "portmask",                   1, 0, 0 },
663 		{ "hairpin-conf",               1, 0, 0 },
664 		{ "cores",                      1, 0, 0 },
665 		{ "random-priority",            1, 0, 0 },
666 		{ "meter-profile-alg",          1, 0, 0 },
667 		{ "rxq",                        1, 0, 0 },
668 		{ "txq",                        1, 0, 0 },
669 		{ "rxd",                        1, 0, 0 },
670 		{ "txd",                        1, 0, 0 },
671 		{ "mbuf-size",                  1, 0, 0 },
672 		{ "mbuf-cache-size",            1, 0, 0 },
673 		{ "total-mbuf-count",           1, 0, 0 },
674 		/* Attributes */
675 		{ "ingress",                    0, 0, 0 },
676 		{ "egress",                     0, 0, 0 },
677 		{ "transfer",                   0, 0, 0 },
678 		{ "group",                      1, 0, 0 },
679 		/* Items */
680 		{ "ether",                      0, 0, 0 },
681 		{ "vlan",                       0, 0, 0 },
682 		{ "ipv4",                       0, 0, 0 },
683 		{ "ipv6",                       0, 0, 0 },
684 		{ "tcp",                        0, 0, 0 },
685 		{ "udp",                        0, 0, 0 },
686 		{ "vxlan",                      0, 0, 0 },
687 		{ "vxlan-gpe",                  0, 0, 0 },
688 		{ "gre",                        0, 0, 0 },
689 		{ "geneve",                     0, 0, 0 },
690 		{ "gtp",                        0, 0, 0 },
691 		{ "meta",                       0, 0, 0 },
692 		{ "tag",                        0, 0, 0 },
693 		{ "icmpv4",                     0, 0, 0 },
694 		{ "icmpv6",                     0, 0, 0 },
695 		/* Actions */
696 		{ "port-id",                    2, 0, 0 },
697 		{ "rss",                        0, 0, 0 },
698 		{ "queue",                      0, 0, 0 },
699 		{ "jump",                       0, 0, 0 },
700 		{ "mark",                       0, 0, 0 },
701 		{ "count",                      0, 0, 0 },
702 		{ "set-meta",                   0, 0, 0 },
703 		{ "set-tag",                    0, 0, 0 },
704 		{ "drop",                       0, 0, 0 },
705 		{ "hairpin-queue",              1, 0, 0 },
706 		{ "hairpin-rss",                1, 0, 0 },
707 		{ "set-src-mac",                0, 0, 0 },
708 		{ "set-dst-mac",                0, 0, 0 },
709 		{ "set-src-ipv4",               0, 0, 0 },
710 		{ "set-dst-ipv4",               0, 0, 0 },
711 		{ "set-src-ipv6",               0, 0, 0 },
712 		{ "set-dst-ipv6",               0, 0, 0 },
713 		{ "set-src-tp",                 0, 0, 0 },
714 		{ "set-dst-tp",                 0, 0, 0 },
715 		{ "inc-tcp-ack",                0, 0, 0 },
716 		{ "dec-tcp-ack",                0, 0, 0 },
717 		{ "inc-tcp-seq",                0, 0, 0 },
718 		{ "dec-tcp-seq",                0, 0, 0 },
719 		{ "set-ttl",                    0, 0, 0 },
720 		{ "dec-ttl",                    0, 0, 0 },
721 		{ "set-ipv4-dscp",              0, 0, 0 },
722 		{ "set-ipv6-dscp",              0, 0, 0 },
723 		{ "flag",                       0, 0, 0 },
724 		{ "meter",                      0, 0, 0 },
725 		{ "raw-encap",                  1, 0, 0 },
726 		{ "raw-decap",                  1, 0, 0 },
727 		{ "vxlan-encap",                0, 0, 0 },
728 		{ "vxlan-decap",                0, 0, 0 },
729 		{ "policy-mtr",                 1, 0, 0 },
730 		{ "meter-profile",              1, 0, 0 },
731 		{ "packet-mode",                0, 0, 0 },
732 		{ 0, 0, 0, 0 },
733 	};
734 
735 	RTE_ETH_FOREACH_DEV(i)
736 		ports_mask |= 1 << i;
737 
738 	for (i = 0; i < RTE_MAX_ETHPORTS; i++)
739 		dst_ports[i] = PORT_ID_DST;
740 
741 	hairpin_queues_num = 0;
742 	argvopt = argv;
743 
744 	printf(":: Flow -> ");
745 	while ((opt = getopt_long(argc, argvopt, "",
746 				lgopts, &opt_idx)) != EOF) {
747 		switch (opt) {
748 		case 0:
749 			if (strcmp(lgopts[opt_idx].name, "help") == 0) {
750 				usage(argv[0]);
751 				exit(EXIT_SUCCESS);
752 			}
753 
754 			if (strcmp(lgopts[opt_idx].name, "group") == 0) {
755 				n = atoi(optarg);
756 				if (n >= 0)
757 					flow_group = n;
758 				else
759 					rte_exit(EXIT_FAILURE,
760 						"flow group should be >= 0\n");
761 				printf("group %d / ", flow_group);
762 			}
763 
764 			for (i = 0; i < RTE_DIM(flow_options); i++)
765 				if (strcmp(lgopts[opt_idx].name,
766 						flow_options[i].str) == 0) {
767 					flow_options[i].map[
768 					(*flow_options[i].map_idx)++] =
769 						flow_options[i].mask;
770 					printf("%s / ", flow_options[i].str);
771 				}
772 
773 			if (strcmp(lgopts[opt_idx].name,
774 					"hairpin-rss") == 0) {
775 				n = atoi(optarg);
776 				if (n > 0)
777 					hairpin_queues_num = n;
778 				else
779 					rte_exit(EXIT_FAILURE,
780 						"Hairpin queues should be > 0\n");
781 
782 				flow_actions[actions_idx++] =
783 					HAIRPIN_RSS_ACTION;
784 				printf("hairpin-rss / ");
785 			}
786 			if (strcmp(lgopts[opt_idx].name,
787 					"hairpin-queue") == 0) {
788 				n = atoi(optarg);
789 				if (n > 0)
790 					hairpin_queues_num = n;
791 				else
792 					rte_exit(EXIT_FAILURE,
793 						"Hairpin queues should be > 0\n");
794 
795 				flow_actions[actions_idx++] =
796 					HAIRPIN_QUEUE_ACTION;
797 				printf("hairpin-queue / ");
798 			}
799 
800 			if (strcmp(lgopts[opt_idx].name, "raw-encap") == 0) {
801 				printf("raw-encap ");
802 				flow_actions[actions_idx++] =
803 					FLOW_ITEM_MASK(
804 						RTE_FLOW_ACTION_TYPE_RAW_ENCAP
805 					);
806 
807 				token = strtok(optarg, ",");
808 				while (token != NULL) {
809 					for (i = 0; i < RTE_DIM(flow_options); i++) {
810 						if (strcmp(flow_options[i].str, token) == 0) {
811 							printf("%s,", token);
812 							encap_data |= flow_options[i].mask;
813 							break;
814 						}
815 						/* Reached last item with no match */
816 						if (i == (RTE_DIM(flow_options) - 1))
817 							rte_exit(EXIT_FAILURE,
818 								"Invalid encap item: %s\n", token);
819 					}
820 					token = strtok(NULL, ",");
821 				}
822 				printf(" / ");
823 			}
824 			if (strcmp(lgopts[opt_idx].name, "raw-decap") == 0) {
825 				printf("raw-decap ");
826 				flow_actions[actions_idx++] =
827 					FLOW_ITEM_MASK(
828 						RTE_FLOW_ACTION_TYPE_RAW_DECAP
829 					);
830 
831 				token = strtok(optarg, ",");
832 				while (token != NULL) {
833 					for (i = 0; i < RTE_DIM(flow_options); i++) {
834 						if (strcmp(flow_options[i].str, token) == 0) {
835 							printf("%s,", token);
836 							decap_data |= flow_options[i].mask;
837 							break;
838 						}
839 						/* Reached last item with no match */
840 						if (i == (RTE_DIM(flow_options) - 1))
841 							rte_exit(EXIT_FAILURE,
842 								"Invalid decap item %s\n", token);
843 					}
844 					token = strtok(NULL, ",");
845 				}
846 				printf(" / ");
847 			}
848 			/* Control */
849 			if (strcmp(lgopts[opt_idx].name,
850 					"rules-batch") == 0) {
851 				n = atoi(optarg);
852 				if (n > 0)
853 					rules_batch = n;
854 				else
855 					rte_exit(EXIT_FAILURE,
856 							"flow rules-batch should be > 0\n");
857 			}
858 			if (strcmp(lgopts[opt_idx].name,
859 					"rules-count") == 0) {
860 				rules_count = atoi(optarg);
861 			}
862 			if (strcmp(lgopts[opt_idx].name, "random-priority") ==
863 			    0) {
864 				end = NULL;
865 				prio = strtol(optarg, &end, 10);
866 				if ((optarg[0] == '\0') || (end == NULL))
867 					rte_exit(EXIT_FAILURE,
868 						 "Invalid value for random-priority\n");
869 				max_priority = prio;
870 				token = end + 1;
871 				seed = strtoll(token, &end, 10);
872 				if ((token[0] == '\0') || (*end != '\0'))
873 					rte_exit(EXIT_FAILURE,
874 						 "Invalid value for random-priority\n");
875 				rand_seed = seed;
876 			}
877 			if (strcmp(lgopts[opt_idx].name,
878 					"dump-iterations") == 0)
879 				dump_iterations = true;
880 			if (strcmp(lgopts[opt_idx].name,
881 					"unique-data") == 0)
882 				unique_data = true;
883 			if (strcmp(lgopts[opt_idx].name,
884 					"deletion-rate") == 0)
885 				delete_flag = true;
886 			if (strcmp(lgopts[opt_idx].name,
887 					"dump-socket-mem") == 0)
888 				dump_socket_mem_flag = true;
889 			if (strcmp(lgopts[opt_idx].name,
890 					"enable-fwd") == 0)
891 				enable_fwd = true;
892 			if (strcmp(lgopts[opt_idx].name,
893 					"portmask") == 0) {
894 				/* parse hexadecimal string */
895 				end = NULL;
896 				pm = strtoull(optarg, &end, 16);
897 				if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0'))
898 					rte_exit(EXIT_FAILURE, "Invalid fwd port mask\n");
899 				ports_mask = pm;
900 			}
901 			if (strcmp(lgopts[opt_idx].name, "hairpin-conf") == 0) {
902 				end = NULL;
903 				hp_conf = strtoull(optarg, &end, 16);
904 				if ((optarg[0] == '\0') || (end == NULL) || (*end != '\0'))
905 					rte_exit(EXIT_FAILURE, "Invalid hairpin config mask\n");
906 				hairpin_conf_mask = hp_conf;
907 			}
908 			if (strcmp(lgopts[opt_idx].name,
909 					"port-id") == 0) {
910 				uint16_t port_idx = 0;
911 				char *token;
912 
913 				token = strtok(optarg, ",");
914 				while (token != NULL) {
915 					dst_ports[port_idx++] = atoi(token);
916 					token = strtok(NULL, ",");
917 				}
918 			}
919 			if (strcmp(lgopts[opt_idx].name, "rxq") == 0) {
920 				n = atoi(optarg);
921 				rx_queues_count = (uint8_t) n;
922 			}
923 			if (strcmp(lgopts[opt_idx].name, "txq") == 0) {
924 				n = atoi(optarg);
925 				tx_queues_count = (uint8_t) n;
926 			}
927 			if (strcmp(lgopts[opt_idx].name, "rxd") == 0) {
928 				n = atoi(optarg);
929 				rxd_count = (uint8_t) n;
930 			}
931 			if (strcmp(lgopts[opt_idx].name, "txd") == 0) {
932 				n = atoi(optarg);
933 				txd_count = (uint8_t) n;
934 			}
935 			if (strcmp(lgopts[opt_idx].name, "mbuf-size") == 0) {
936 				n = atoi(optarg);
937 				mbuf_size = (uint32_t) n;
938 			}
939 			if (strcmp(lgopts[opt_idx].name, "mbuf-cache-size") == 0) {
940 				n = atoi(optarg);
941 				mbuf_cache_size = (uint32_t) n;
942 			}
943 			if (strcmp(lgopts[opt_idx].name, "total-mbuf-count") == 0) {
944 				n = atoi(optarg);
945 				total_mbuf_num = (uint32_t) n;
946 			}
947 			if (strcmp(lgopts[opt_idx].name, "cores") == 0) {
948 				n = atoi(optarg);
949 				if ((int) rte_lcore_count() <= n) {
950 					rte_exit(EXIT_FAILURE,
951 						"Error: you need %d cores to run on multi-cores\n"
952 						"Existing cores are: %d\n", n, rte_lcore_count());
953 				}
954 				if (n <= RTE_MAX_LCORE && n > 0)
955 					mc_pool.cores_count = n;
956 				else {
957 					rte_exit(EXIT_FAILURE,
958 						"Error: cores count must be > 0 and < %d\n",
959 						RTE_MAX_LCORE);
960 				}
961 			}
962 			if (strcmp(lgopts[opt_idx].name, "policy-mtr") == 0)
963 				read_meter_policy(argv[0], optarg);
964 			if (strcmp(lgopts[opt_idx].name,
965 						"meter-profile") == 0) {
966 				i = 0;
967 				token = strsep(&optarg, ",\0");
968 				while (token != NULL && i < sizeof(
969 						meter_profile_values) /
970 						sizeof(uint64_t)) {
971 					meter_profile_values[i++] = atol(token);
972 					token = strsep(&optarg, ",\0");
973 				}
974 			}
975 			if (strcmp(lgopts[opt_idx].name, "packet-mode") == 0)
976 				packet_mode = true;
977 			break;
978 		default:
979 			usage(argv[0]);
980 			rte_exit(EXIT_FAILURE, "Invalid option: %s\n",
981 					argv[optind - 1]);
982 			break;
983 		}
984 	}
985 	if (rules_count % rules_batch != 0) {
986 		rte_exit(EXIT_FAILURE,
987 			 "rules_count %% rules_batch should be 0\n");
988 	}
989 	if (rules_count / rules_batch > MAX_BATCHES_COUNT) {
990 		rte_exit(EXIT_FAILURE,
991 			 "rules_count / rules_batch should be <= %d\n",
992 			 MAX_BATCHES_COUNT);
993 	}
994 
995 	printf("end_flow\n");
996 }
997 
998 /* Dump the socket memory statistics on console */
999 static size_t
dump_socket_mem(FILE * f)1000 dump_socket_mem(FILE *f)
1001 {
1002 	struct rte_malloc_socket_stats socket_stats;
1003 	unsigned int i = 0;
1004 	size_t total = 0;
1005 	size_t alloc = 0;
1006 	size_t free = 0;
1007 	unsigned int n_alloc = 0;
1008 	unsigned int n_free = 0;
1009 	bool active_nodes = false;
1010 
1011 
1012 	for (i = 0; i < RTE_MAX_NUMA_NODES; i++) {
1013 		if (rte_malloc_get_socket_stats(i, &socket_stats) ||
1014 		    !socket_stats.heap_totalsz_bytes)
1015 			continue;
1016 		active_nodes = true;
1017 		total += socket_stats.heap_totalsz_bytes;
1018 		alloc += socket_stats.heap_allocsz_bytes;
1019 		free += socket_stats.heap_freesz_bytes;
1020 		n_alloc += socket_stats.alloc_count;
1021 		n_free += socket_stats.free_count;
1022 		if (dump_socket_mem_flag) {
1023 			fprintf(f, "::::::::::::::::::::::::::::::::::::::::");
1024 			fprintf(f,
1025 				"\nSocket %u:\nsize(M) total: %.6lf\nalloc:"
1026 				" %.6lf(%.3lf%%)\nfree: %.6lf"
1027 				"\nmax: %.6lf"
1028 				"\ncount alloc: %u\nfree: %u\n",
1029 				i,
1030 				socket_stats.heap_totalsz_bytes / 1.0e6,
1031 				socket_stats.heap_allocsz_bytes / 1.0e6,
1032 				(double)socket_stats.heap_allocsz_bytes * 100 /
1033 				(double)socket_stats.heap_totalsz_bytes,
1034 				socket_stats.heap_freesz_bytes / 1.0e6,
1035 				socket_stats.greatest_free_size / 1.0e6,
1036 				socket_stats.alloc_count,
1037 				socket_stats.free_count);
1038 				fprintf(f, "::::::::::::::::::::::::::::::::::::::::");
1039 		}
1040 	}
1041 	if (dump_socket_mem_flag && active_nodes) {
1042 		fprintf(f,
1043 			"\nTotal: size(M)\ntotal: %.6lf"
1044 			"\nalloc: %.6lf(%.3lf%%)\nfree: %.6lf"
1045 			"\ncount alloc: %u\nfree: %u\n",
1046 			total / 1.0e6, alloc / 1.0e6,
1047 			(double)alloc * 100 / (double)total, free / 1.0e6,
1048 			n_alloc, n_free);
1049 		fprintf(f, "::::::::::::::::::::::::::::::::::::::::\n");
1050 	}
1051 	return alloc;
1052 }
1053 
1054 static void
print_flow_error(struct rte_flow_error error)1055 print_flow_error(struct rte_flow_error error)
1056 {
1057 	printf("Flow can't be created %d message: %s\n",
1058 		error.type,
1059 		error.message ? error.message : "(no stated reason)");
1060 }
1061 
1062 static inline void
print_rules_batches(double * cpu_time_per_batch)1063 print_rules_batches(double *cpu_time_per_batch)
1064 {
1065 	uint8_t idx;
1066 	double delta;
1067 	double rate;
1068 
1069 	for (idx = 0; idx < MAX_BATCHES_COUNT; idx++) {
1070 		if (!cpu_time_per_batch[idx])
1071 			break;
1072 		delta = (double)(rules_batch / cpu_time_per_batch[idx]);
1073 		rate = delta / 1000; /* Save rate in K unit. */
1074 		printf(":: Rules batch #%d: %d rules "
1075 			"in %f sec[ Rate = %f K Rule/Sec ]\n",
1076 			idx, rules_batch,
1077 			cpu_time_per_batch[idx], rate);
1078 	}
1079 }
1080 
1081 static inline int
has_meter(void)1082 has_meter(void)
1083 {
1084 	int i;
1085 
1086 	for (i = 0; i < MAX_ACTIONS_NUM; i++) {
1087 		if (flow_actions[i] == 0)
1088 			break;
1089 		if (flow_actions[i]
1090 				& FLOW_ACTION_MASK(RTE_FLOW_ACTION_TYPE_METER))
1091 			return 1;
1092 	}
1093 	return 0;
1094 }
1095 
1096 static void
create_meter_policy(void)1097 create_meter_policy(void)
1098 {
1099 	struct rte_mtr_error error;
1100 	int ret, port_id;
1101 	struct rte_mtr_meter_policy_params policy;
1102 	uint16_t nr_ports;
1103 	struct rte_flow_action actions[RTE_COLORS][MAX_ACTIONS_NUM];
1104 	int i;
1105 
1106 	memset(actions, 0, sizeof(actions));
1107 	memset(&policy, 0, sizeof(policy));
1108 	nr_ports = rte_eth_dev_count_avail();
1109 	for (port_id = 0; port_id < nr_ports; port_id++) {
1110 		for (i = 0; i < RTE_COLORS; i++)
1111 			fill_actions(actions[i], all_actions[i], 0, 0, 0,
1112 				     0, 0, 0, unique_data, rx_queues_count,
1113 				     dst_ports[port_id]);
1114 		policy.actions[RTE_COLOR_GREEN] = actions[RTE_COLOR_GREEN];
1115 		policy.actions[RTE_COLOR_YELLOW] = actions[RTE_COLOR_YELLOW];
1116 		policy.actions[RTE_COLOR_RED] = actions[RTE_COLOR_RED];
1117 		policy_id[port_id] = port_id + 10;
1118 		ret = rte_mtr_meter_policy_add(port_id, policy_id[port_id],
1119 					       &policy, &error);
1120 		if (ret) {
1121 			fprintf(stderr, "port %d: failed to create meter policy\n",
1122 				port_id);
1123 			policy_id[port_id] = UINT32_MAX;
1124 		}
1125 		memset(actions, 0, sizeof(actions));
1126 	}
1127 }
1128 
1129 static void
destroy_meter_policy(void)1130 destroy_meter_policy(void)
1131 {
1132 	struct rte_mtr_error error;
1133 	uint16_t nr_ports;
1134 	int port_id;
1135 
1136 	nr_ports = rte_eth_dev_count_avail();
1137 	for (port_id = 0; port_id < nr_ports; port_id++) {
1138 		/* If port outside portmask */
1139 		if (!((ports_mask >> port_id) & 0x1))
1140 			continue;
1141 
1142 		if (rte_mtr_meter_policy_delete
1143 			(port_id, policy_id[port_id], &error)) {
1144 			fprintf(stderr, "port %u:  failed to  delete meter policy\n",
1145 				port_id);
1146 			rte_exit(EXIT_FAILURE, "Error: Failed to delete meter policy.\n");
1147 		}
1148 	}
1149 }
1150 
1151 static void
create_meter_rule(int port_id,uint32_t counter)1152 create_meter_rule(int port_id, uint32_t counter)
1153 {
1154 	int ret;
1155 	struct rte_mtr_params params;
1156 	struct rte_mtr_error error;
1157 
1158 	memset(&params, 0, sizeof(struct rte_mtr_params));
1159 	params.meter_enable = 1;
1160 	params.stats_mask = 0xffff;
1161 	params.use_prev_mtr_color = 0;
1162 	params.dscp_table = NULL;
1163 
1164 	/*create meter*/
1165 	params.meter_profile_id = DEFAULT_METER_PROF_ID;
1166 
1167 	if (!policy_mtr) {
1168 		ret = rte_mtr_create(port_id, counter, &params, 1, &error);
1169 	} else {
1170 		params.meter_policy_id = policy_id[port_id];
1171 		ret = rte_mtr_create(port_id, counter, &params, 0, &error);
1172 	}
1173 
1174 	if (ret != 0) {
1175 		printf("Port %u create meter idx(%d) error(%d) message: %s\n",
1176 			port_id, counter, error.type,
1177 			error.message ? error.message : "(no stated reason)");
1178 		rte_exit(EXIT_FAILURE, "Error in creating meter\n");
1179 	}
1180 }
1181 
1182 static void
destroy_meter_rule(int port_id,uint32_t counter)1183 destroy_meter_rule(int port_id, uint32_t counter)
1184 {
1185 	struct rte_mtr_error error;
1186 
1187 	if (policy_mtr && policy_id[port_id] != UINT32_MAX) {
1188 		if (rte_mtr_meter_policy_delete(port_id, policy_id[port_id],
1189 					&error))
1190 			fprintf(stderr, "Error: Failed to delete meter policy\n");
1191 		policy_id[port_id] = UINT32_MAX;
1192 	}
1193 	if (rte_mtr_destroy(port_id, counter, &error)) {
1194 		fprintf(stderr, "Port %d: Failed to delete meter.\n",
1195 				port_id);
1196 		rte_exit(EXIT_FAILURE, "Error in deleting meter rule");
1197 	}
1198 }
1199 
1200 static void
meters_handler(int port_id,uint8_t core_id,uint8_t ops)1201 meters_handler(int port_id, uint8_t core_id, uint8_t ops)
1202 {
1203 	uint64_t start_batch;
1204 	double cpu_time_used, insertion_rate;
1205 	int rules_count_per_core, rules_batch_idx;
1206 	uint32_t counter, start_counter = 0, end_counter;
1207 	double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 };
1208 
1209 	rules_count_per_core = rules_count / mc_pool.cores_count;
1210 
1211 	if (core_id)
1212 		start_counter = core_id * rules_count_per_core;
1213 	end_counter = (core_id + 1) * rules_count_per_core;
1214 
1215 	cpu_time_used = 0;
1216 	start_batch = rte_get_timer_cycles();
1217 	for (counter = start_counter; counter < end_counter; counter++) {
1218 		if (ops == METER_CREATE)
1219 			create_meter_rule(port_id, counter);
1220 		else
1221 			destroy_meter_rule(port_id, counter);
1222 		/*
1223 		 * Save the insertion rate for rules batch.
1224 		 * Check if the insertion reached the rules
1225 		 * patch counter, then save the insertion rate
1226 		 * for this batch.
1227 		 */
1228 		if (!((counter + 1) % rules_batch)) {
1229 			rules_batch_idx = ((counter + 1) / rules_batch) - 1;
1230 			cpu_time_per_batch[rules_batch_idx] =
1231 				((double)(rte_get_timer_cycles() - start_batch))
1232 				/ rte_get_timer_hz();
1233 			cpu_time_used += cpu_time_per_batch[rules_batch_idx];
1234 			start_batch = rte_get_timer_cycles();
1235 		}
1236 	}
1237 
1238 	/* Print insertion rates for all batches */
1239 	if (dump_iterations)
1240 		print_rules_batches(cpu_time_per_batch);
1241 
1242 	insertion_rate =
1243 		((double) (rules_count_per_core / cpu_time_used) / 1000);
1244 
1245 	/* Insertion rate for all rules in one core */
1246 	printf(":: Port %d :: Core %d Meter %s :: start @[%d] - end @[%d],"
1247 		" use:%.02fs, rate:%.02fk Rule/Sec\n",
1248 		port_id, core_id, ops == METER_CREATE ? "create" : "delete",
1249 		start_counter, end_counter - 1,
1250 		cpu_time_used, insertion_rate);
1251 
1252 	if (ops == METER_CREATE)
1253 		mc_pool.meters_record.insertion[port_id][core_id]
1254 			= cpu_time_used;
1255 	else
1256 		mc_pool.meters_record.deletion[port_id][core_id]
1257 			= cpu_time_used;
1258 }
1259 
1260 static void
destroy_meter_profile(void)1261 destroy_meter_profile(void)
1262 {
1263 	struct rte_mtr_error error;
1264 	uint16_t nr_ports;
1265 	int port_id;
1266 
1267 	nr_ports = rte_eth_dev_count_avail();
1268 	for (port_id = 0; port_id < nr_ports; port_id++) {
1269 		/* If port outside portmask */
1270 		if (!((ports_mask >> port_id) & 0x1))
1271 			continue;
1272 
1273 		if (rte_mtr_meter_profile_delete
1274 			(port_id, DEFAULT_METER_PROF_ID, &error)) {
1275 			printf("Port %u del profile error(%d) message: %s\n",
1276 				port_id, error.type,
1277 				error.message ? error.message : "(no stated reason)");
1278 			rte_exit(EXIT_FAILURE, "Error: Destroy meter profile Failed!\n");
1279 		}
1280 	}
1281 }
1282 
1283 static void
create_meter_profile(void)1284 create_meter_profile(void)
1285 {
1286 	uint16_t nr_ports;
1287 	int ret, port_id;
1288 	struct rte_mtr_meter_profile mp;
1289 	struct rte_mtr_error error;
1290 
1291 	/*
1292 	 *currently , only create one meter file for one port
1293 	 *1 meter profile -> N meter rules -> N rte flows
1294 	 */
1295 	memset(&mp, 0, sizeof(struct rte_mtr_meter_profile));
1296 	nr_ports = rte_eth_dev_count_avail();
1297 	for (port_id = 0; port_id < nr_ports; port_id++) {
1298 		/* If port outside portmask */
1299 		if (!((ports_mask >> port_id) & 0x1))
1300 			continue;
1301 		mp.alg = RTE_MTR_SRTCM_RFC2697;
1302 		mp.srtcm_rfc2697.cir = meter_profile_values[0] ?
1303 			meter_profile_values[0] : METER_CIR;
1304 		mp.srtcm_rfc2697.cbs = meter_profile_values[1] ?
1305 			meter_profile_values[1] : METER_CIR / 8;
1306 		mp.srtcm_rfc2697.ebs = meter_profile_values[2];
1307 		mp.packet_mode = packet_mode;
1308 		ret = rte_mtr_meter_profile_add
1309 			(port_id, DEFAULT_METER_PROF_ID, &mp, &error);
1310 		if (ret != 0) {
1311 			printf("Port %u create Profile error(%d) message: %s\n",
1312 				port_id, error.type,
1313 				error.message ? error.message : "(no stated reason)");
1314 			rte_exit(EXIT_FAILURE, "Error: Creation meter profile Failed!\n");
1315 		}
1316 	}
1317 }
1318 
1319 static inline void
destroy_flows(int port_id,uint8_t core_id,struct rte_flow ** flows_list)1320 destroy_flows(int port_id, uint8_t core_id, struct rte_flow **flows_list)
1321 {
1322 	struct rte_flow_error error;
1323 	clock_t start_batch, end_batch;
1324 	double cpu_time_used = 0;
1325 	double deletion_rate;
1326 	double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 };
1327 	double delta;
1328 	uint32_t i;
1329 	int rules_batch_idx;
1330 	int rules_count_per_core;
1331 
1332 	rules_count_per_core = rules_count / mc_pool.cores_count;
1333 	/* If group > 0 , should add 1 flow which created in group 0 */
1334 	if (flow_group > 0 && core_id == 0)
1335 		rules_count_per_core++;
1336 
1337 	start_batch = rte_get_timer_cycles();
1338 	for (i = 0; i < (uint32_t) rules_count_per_core; i++) {
1339 		if (flows_list[i] == 0)
1340 			break;
1341 
1342 		memset(&error, 0x33, sizeof(error));
1343 		if (rte_flow_destroy(port_id, flows_list[i], &error)) {
1344 			print_flow_error(error);
1345 			rte_exit(EXIT_FAILURE, "Error in deleting flow\n");
1346 		}
1347 
1348 		/*
1349 		 * Save the deletion rate for rules batch.
1350 		 * Check if the deletion reached the rules
1351 		 * patch counter, then save the deletion rate
1352 		 * for this batch.
1353 		 */
1354 		if (!((i + 1) % rules_batch)) {
1355 			end_batch = rte_get_timer_cycles();
1356 			delta = (double) (end_batch - start_batch);
1357 			rules_batch_idx = ((i + 1) / rules_batch) - 1;
1358 			cpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz();
1359 			cpu_time_used += cpu_time_per_batch[rules_batch_idx];
1360 			start_batch = rte_get_timer_cycles();
1361 		}
1362 	}
1363 
1364 	/* Print deletion rates for all batches */
1365 	if (dump_iterations)
1366 		print_rules_batches(cpu_time_per_batch);
1367 
1368 	/* Deletion rate for all rules */
1369 	deletion_rate = ((double) (rules_count_per_core / cpu_time_used) / 1000);
1370 	printf(":: Port %d :: Core %d :: Rules deletion rate -> %f K Rule/Sec\n",
1371 		port_id, core_id, deletion_rate);
1372 	printf(":: Port %d :: Core %d :: The time for deleting %d rules is %f seconds\n",
1373 		port_id, core_id, rules_count_per_core, cpu_time_used);
1374 
1375 	mc_pool.flows_record.deletion[port_id][core_id] = cpu_time_used;
1376 }
1377 
1378 static struct rte_flow **
insert_flows(int port_id,uint8_t core_id,uint16_t dst_port_id)1379 insert_flows(int port_id, uint8_t core_id, uint16_t dst_port_id)
1380 {
1381 	struct rte_flow **flows_list;
1382 	struct rte_flow_error error;
1383 	clock_t start_batch, end_batch;
1384 	double first_flow_latency;
1385 	double cpu_time_used;
1386 	double insertion_rate;
1387 	double cpu_time_per_batch[MAX_BATCHES_COUNT] = { 0 };
1388 	double delta;
1389 	uint32_t flow_index;
1390 	uint32_t counter, start_counter = 0, end_counter;
1391 	uint64_t global_items[MAX_ITEMS_NUM] = { 0 };
1392 	uint64_t global_actions[MAX_ACTIONS_NUM] = { 0 };
1393 	int rules_batch_idx;
1394 	int rules_count_per_core;
1395 
1396 	rules_count_per_core = rules_count / mc_pool.cores_count;
1397 
1398 	/* Set boundaries of rules for each core. */
1399 	if (core_id)
1400 		start_counter = core_id * rules_count_per_core;
1401 	end_counter = (core_id + 1) * rules_count_per_core;
1402 
1403 	global_items[0] = FLOW_ITEM_MASK(RTE_FLOW_ITEM_TYPE_ETH);
1404 	global_actions[0] = FLOW_ITEM_MASK(RTE_FLOW_ACTION_TYPE_JUMP);
1405 
1406 	flows_list = rte_zmalloc("flows_list",
1407 		(sizeof(struct rte_flow *) * rules_count_per_core) + 1, 0);
1408 	if (flows_list == NULL)
1409 		rte_exit(EXIT_FAILURE, "No Memory available!\n");
1410 
1411 	cpu_time_used = 0;
1412 	flow_index = 0;
1413 	if (flow_group > 0 && core_id == 0) {
1414 		/*
1415 		 * Create global rule to jump into flow_group,
1416 		 * this way the app will avoid the default rules.
1417 		 *
1418 		 * This rule will be created only once.
1419 		 *
1420 		 * Global rule:
1421 		 * group 0 eth / end actions jump group <flow_group>
1422 		 */
1423 		flow = generate_flow(port_id, 0, flow_attrs,
1424 			global_items, global_actions,
1425 			flow_group, 0, 0, 0, 0, dst_port_id, core_id,
1426 			rx_queues_count, unique_data, max_priority, &error);
1427 
1428 		if (flow == NULL) {
1429 			print_flow_error(error);
1430 			rte_exit(EXIT_FAILURE, "Error in creating flow\n");
1431 		}
1432 		flows_list[flow_index++] = flow;
1433 	}
1434 
1435 	start_batch = rte_get_timer_cycles();
1436 	for (counter = start_counter; counter < end_counter; counter++) {
1437 		flow = generate_flow(port_id, flow_group,
1438 			flow_attrs, flow_items, flow_actions,
1439 			JUMP_ACTION_TABLE, counter,
1440 			hairpin_queues_num, encap_data,
1441 			decap_data, dst_port_id,
1442 			core_id, rx_queues_count,
1443 			unique_data, max_priority, &error);
1444 
1445 		if (!counter) {
1446 			first_flow_latency = (double) (rte_get_timer_cycles() - start_batch);
1447 			first_flow_latency /= rte_get_timer_hz();
1448 			/* In millisecond */
1449 			first_flow_latency *= 1000;
1450 			printf(":: First Flow Latency :: Port %d :: First flow "
1451 				"installed in %f milliseconds\n",
1452 				port_id, first_flow_latency);
1453 		}
1454 
1455 		if (force_quit)
1456 			counter = end_counter;
1457 
1458 		if (!flow) {
1459 			print_flow_error(error);
1460 			rte_exit(EXIT_FAILURE, "Error in creating flow\n");
1461 		}
1462 
1463 		flows_list[flow_index++] = flow;
1464 
1465 		/*
1466 		 * Save the insertion rate for rules batch.
1467 		 * Check if the insertion reached the rules
1468 		 * patch counter, then save the insertion rate
1469 		 * for this batch.
1470 		 */
1471 		if (!((counter + 1) % rules_batch)) {
1472 			end_batch = rte_get_timer_cycles();
1473 			delta = (double) (end_batch - start_batch);
1474 			rules_batch_idx = ((counter + 1) / rules_batch) - 1;
1475 			cpu_time_per_batch[rules_batch_idx] = delta / rte_get_timer_hz();
1476 			cpu_time_used += cpu_time_per_batch[rules_batch_idx];
1477 			start_batch = rte_get_timer_cycles();
1478 		}
1479 	}
1480 
1481 	/* Print insertion rates for all batches */
1482 	if (dump_iterations)
1483 		print_rules_batches(cpu_time_per_batch);
1484 
1485 	printf(":: Port %d :: Core %d boundaries :: start @[%d] - end @[%d]\n",
1486 		port_id, core_id, start_counter, end_counter - 1);
1487 
1488 	/* Insertion rate for all rules in one core */
1489 	insertion_rate = ((double) (rules_count_per_core / cpu_time_used) / 1000);
1490 	printf(":: Port %d :: Core %d :: Rules insertion rate -> %f K Rule/Sec\n",
1491 		port_id, core_id, insertion_rate);
1492 	printf(":: Port %d :: Core %d :: The time for creating %d in rules %f seconds\n",
1493 		port_id, core_id, rules_count_per_core, cpu_time_used);
1494 
1495 	mc_pool.flows_record.insertion[port_id][core_id] = cpu_time_used;
1496 	return flows_list;
1497 }
1498 
1499 static void
flows_handler(uint8_t core_id)1500 flows_handler(uint8_t core_id)
1501 {
1502 	struct rte_flow **flows_list;
1503 	uint16_t port_idx = 0;
1504 	uint16_t nr_ports;
1505 	int port_id;
1506 
1507 	nr_ports = rte_eth_dev_count_avail();
1508 
1509 	if (rules_batch > rules_count)
1510 		rules_batch = rules_count;
1511 
1512 	printf(":: Rules Count per port: %d\n\n", rules_count);
1513 
1514 	for (port_id = 0; port_id < nr_ports; port_id++) {
1515 		/* If port outside portmask */
1516 		if (!((ports_mask >> port_id) & 0x1))
1517 			continue;
1518 
1519 		/* Insertion part. */
1520 		mc_pool.last_alloc[core_id] = (int64_t)dump_socket_mem(stdout);
1521 		if (has_meter())
1522 			meters_handler(port_id, core_id, METER_CREATE);
1523 		flows_list = insert_flows(port_id, core_id,
1524 						dst_ports[port_idx++]);
1525 		if (flows_list == NULL)
1526 			rte_exit(EXIT_FAILURE, "Error: Insertion Failed!\n");
1527 		mc_pool.current_alloc[core_id] = (int64_t)dump_socket_mem(stdout);
1528 
1529 		/* Deletion part. */
1530 		if (delete_flag) {
1531 			destroy_flows(port_id, core_id, flows_list);
1532 			if (has_meter())
1533 				meters_handler(port_id, core_id, METER_DELETE);
1534 		}
1535 	}
1536 }
1537 
1538 static void
dump_used_cpu_time(const char * item,uint16_t port,struct used_cpu_time * used_time)1539 dump_used_cpu_time(const char *item,
1540 		uint16_t port, struct used_cpu_time *used_time)
1541 {
1542 	uint32_t i;
1543 	/* Latency: total count of rte rules divided
1544 	 * over max time used by thread between all
1545 	 * threads time.
1546 	 *
1547 	 * Throughput: total count of rte rules divided
1548 	 * over the average of the time consumed by all
1549 	 * threads time.
1550 	 */
1551 	double insertion_latency_time;
1552 	double insertion_throughput_time;
1553 	double deletion_latency_time;
1554 	double deletion_throughput_time;
1555 	double insertion_latency, insertion_throughput;
1556 	double deletion_latency, deletion_throughput;
1557 
1558 	/* Save first insertion/deletion rates from first thread.
1559 	 * Start comparing with all threads, if any thread used
1560 	 * time more than current saved, replace it.
1561 	 *
1562 	 * Thus in the end we will have the max time used for
1563 	 * insertion/deletion by one thread.
1564 	 *
1565 	 * As for memory consumption, save the min of all threads
1566 	 * of last alloc, and save the max for all threads for
1567 	 * current alloc.
1568 	 */
1569 
1570 	insertion_latency_time = used_time->insertion[port][0];
1571 	deletion_latency_time = used_time->deletion[port][0];
1572 	insertion_throughput_time = used_time->insertion[port][0];
1573 	deletion_throughput_time = used_time->deletion[port][0];
1574 
1575 	i = mc_pool.cores_count;
1576 	while (i-- > 1) {
1577 		insertion_throughput_time += used_time->insertion[port][i];
1578 		deletion_throughput_time += used_time->deletion[port][i];
1579 		if (insertion_latency_time < used_time->insertion[port][i])
1580 			insertion_latency_time = used_time->insertion[port][i];
1581 		if (deletion_latency_time < used_time->deletion[port][i])
1582 			deletion_latency_time = used_time->deletion[port][i];
1583 	}
1584 
1585 	insertion_latency = ((double) (mc_pool.rules_count
1586 				/ insertion_latency_time) / 1000);
1587 	deletion_latency = ((double) (mc_pool.rules_count
1588 				/ deletion_latency_time) / 1000);
1589 
1590 	insertion_throughput_time /= mc_pool.cores_count;
1591 	deletion_throughput_time /= mc_pool.cores_count;
1592 	insertion_throughput = ((double) (mc_pool.rules_count
1593 				/ insertion_throughput_time) / 1000);
1594 	deletion_throughput = ((double) (mc_pool.rules_count
1595 				/ deletion_throughput_time) / 1000);
1596 
1597 	/* Latency stats */
1598 	printf("\n%s\n:: [Latency | Insertion] All Cores :: Port %d :: ",
1599 		item, port);
1600 	printf("Total flows insertion rate -> %f K Rules/Sec\n",
1601 		insertion_latency);
1602 	printf(":: [Latency | Insertion] All Cores :: Port %d :: ", port);
1603 	printf("The time for creating %d rules is %f seconds\n",
1604 		mc_pool.rules_count, insertion_latency_time);
1605 
1606 	/* Throughput stats */
1607 	printf(":: [Throughput | Insertion] All Cores :: Port %d :: ", port);
1608 	printf("Total flows insertion rate -> %f K Rules/Sec\n",
1609 		insertion_throughput);
1610 	printf(":: [Throughput | Insertion] All Cores :: Port %d :: ", port);
1611 	printf("The average time for creating %d rules is %f seconds\n",
1612 		mc_pool.rules_count, insertion_throughput_time);
1613 
1614 	if (delete_flag) {
1615 	/* Latency stats */
1616 		printf(":: [Latency | Deletion] All Cores :: Port %d :: Total "
1617 			"deletion rate -> %f K Rules/Sec\n",
1618 			port, deletion_latency);
1619 		printf(":: [Latency | Deletion] All Cores :: Port %d :: ",
1620 			port);
1621 		printf("The time for deleting %d rules is %f seconds\n",
1622 			mc_pool.rules_count, deletion_latency_time);
1623 
1624 		/* Throughput stats */
1625 		printf(":: [Throughput | Deletion] All Cores :: Port %d :: Total "
1626 			"deletion rate -> %f K Rules/Sec\n",
1627 			port, deletion_throughput);
1628 		printf(":: [Throughput | Deletion] All Cores :: Port %d :: ",
1629 			port);
1630 		printf("The average time for deleting %d rules is %f seconds\n",
1631 			mc_pool.rules_count, deletion_throughput_time);
1632 	}
1633 }
1634 
1635 static void
dump_used_mem(uint16_t port)1636 dump_used_mem(uint16_t port)
1637 {
1638 	uint32_t i;
1639 	int64_t last_alloc, current_alloc;
1640 	int flow_size_in_bytes;
1641 
1642 	last_alloc = mc_pool.last_alloc[0];
1643 	current_alloc = mc_pool.current_alloc[0];
1644 
1645 	i = mc_pool.cores_count;
1646 	while (i-- > 1) {
1647 		if (last_alloc > mc_pool.last_alloc[i])
1648 			last_alloc = mc_pool.last_alloc[i];
1649 		if (current_alloc < mc_pool.current_alloc[i])
1650 			current_alloc = mc_pool.current_alloc[i];
1651 	}
1652 
1653 	flow_size_in_bytes = (current_alloc - last_alloc) / mc_pool.rules_count;
1654 	printf("\n:: Port %d :: rte_flow size in DPDK layer: %d Bytes\n",
1655 		port, flow_size_in_bytes);
1656 }
1657 
1658 static int
run_rte_flow_handler_cores(void * data __rte_unused)1659 run_rte_flow_handler_cores(void *data __rte_unused)
1660 {
1661 	uint16_t port;
1662 	int lcore_counter = 0;
1663 	int lcore_id = rte_lcore_id();
1664 	int i;
1665 
1666 	RTE_LCORE_FOREACH(i) {
1667 		/*  If core not needed return. */
1668 		if (lcore_id == i) {
1669 			printf(":: lcore %d mapped with index %d\n", lcore_id, lcore_counter);
1670 			if (lcore_counter >= (int) mc_pool.cores_count)
1671 				return 0;
1672 			break;
1673 		}
1674 		lcore_counter++;
1675 	}
1676 	lcore_id = lcore_counter;
1677 
1678 	if (lcore_id >= (int) mc_pool.cores_count)
1679 		return 0;
1680 
1681 	mc_pool.rules_count = rules_count;
1682 
1683 	flows_handler(lcore_id);
1684 
1685 	/* Only main core to print total results. */
1686 	if (lcore_id != 0)
1687 		return 0;
1688 
1689 	/* Make sure all cores finished insertion/deletion process. */
1690 	rte_eal_mp_wait_lcore();
1691 
1692 	RTE_ETH_FOREACH_DEV(port) {
1693 		/* If port outside portmask */
1694 		if (!((ports_mask >> port) & 0x1))
1695 			continue;
1696 		if (has_meter())
1697 			dump_used_cpu_time("Meters:",
1698 				port, &mc_pool.meters_record);
1699 		dump_used_cpu_time("Flows:",
1700 			port, &mc_pool.flows_record);
1701 		dump_used_mem(port);
1702 	}
1703 
1704 	return 0;
1705 }
1706 
1707 static void
signal_handler(int signum)1708 signal_handler(int signum)
1709 {
1710 	if (signum == SIGINT || signum == SIGTERM) {
1711 		force_quit = true;
1712 	}
1713 }
1714 
1715 static inline uint16_t
do_rx(struct lcore_info * li,uint16_t rx_port,uint16_t rx_queue)1716 do_rx(struct lcore_info *li, uint16_t rx_port, uint16_t rx_queue)
1717 {
1718 	uint16_t cnt = 0;
1719 	cnt = rte_eth_rx_burst(rx_port, rx_queue, li->pkts, MAX_PKT_BURST);
1720 	li->rx_pkts += cnt;
1721 	return cnt;
1722 }
1723 
1724 static inline void
do_tx(struct lcore_info * li,uint16_t cnt,uint16_t tx_port,uint16_t tx_queue)1725 do_tx(struct lcore_info *li, uint16_t cnt, uint16_t tx_port,
1726 			uint16_t tx_queue)
1727 {
1728 	uint16_t nr_tx = 0;
1729 	uint16_t i;
1730 
1731 	nr_tx = rte_eth_tx_burst(tx_port, tx_queue, li->pkts, cnt);
1732 	li->tx_pkts  += nr_tx;
1733 	li->tx_drops += cnt - nr_tx;
1734 
1735 	for (i = nr_tx; i < cnt; i++)
1736 		rte_pktmbuf_free(li->pkts[i]);
1737 }
1738 
1739 static void
packet_per_second_stats(void)1740 packet_per_second_stats(void)
1741 {
1742 	struct lcore_info *old;
1743 	struct lcore_info *li, *oli;
1744 	int nr_lines = 0;
1745 	int i;
1746 
1747 	old = rte_zmalloc("old",
1748 		sizeof(struct lcore_info) * RTE_MAX_LCORE, 0);
1749 	if (old == NULL)
1750 		rte_exit(EXIT_FAILURE, "No Memory available!\n");
1751 
1752 	memcpy(old, lcore_infos,
1753 		sizeof(struct lcore_info) * RTE_MAX_LCORE);
1754 
1755 	while (!force_quit) {
1756 		uint64_t total_tx_pkts = 0;
1757 		uint64_t total_rx_pkts = 0;
1758 		uint64_t total_tx_drops = 0;
1759 		uint64_t tx_delta, rx_delta, drops_delta;
1760 		int nr_valid_core = 0;
1761 
1762 		sleep(1);
1763 
1764 		if (nr_lines) {
1765 			char go_up_nr_lines[16];
1766 
1767 			sprintf(go_up_nr_lines, "%c[%dA\r", 27, nr_lines);
1768 			printf("%s\r", go_up_nr_lines);
1769 		}
1770 
1771 		printf("\n%6s %16s %16s %16s\n", "core", "tx", "tx drops", "rx");
1772 		printf("%6s %16s %16s %16s\n", "------", "----------------",
1773 			"----------------", "----------------");
1774 		nr_lines = 3;
1775 		for (i = 0; i < RTE_MAX_LCORE; i++) {
1776 			li  = &lcore_infos[i];
1777 			oli = &old[i];
1778 			if (li->mode != LCORE_MODE_PKT)
1779 				continue;
1780 
1781 			tx_delta    = li->tx_pkts  - oli->tx_pkts;
1782 			rx_delta    = li->rx_pkts  - oli->rx_pkts;
1783 			drops_delta = li->tx_drops - oli->tx_drops;
1784 			printf("%6d %'16"PRId64" %'16"PRId64" %'16"PRId64"\n",
1785 				i, tx_delta, drops_delta, rx_delta);
1786 
1787 			total_tx_pkts  += tx_delta;
1788 			total_rx_pkts  += rx_delta;
1789 			total_tx_drops += drops_delta;
1790 
1791 			nr_valid_core++;
1792 			nr_lines += 1;
1793 		}
1794 
1795 		if (nr_valid_core > 1) {
1796 			printf("%6s %'16"PRId64" %'16"PRId64" %'16"PRId64"\n",
1797 				"total", total_tx_pkts, total_tx_drops,
1798 				total_rx_pkts);
1799 			nr_lines += 1;
1800 		}
1801 
1802 		memcpy(old, lcore_infos,
1803 			sizeof(struct lcore_info) * RTE_MAX_LCORE);
1804 	}
1805 }
1806 
1807 static int
start_forwarding(void * data __rte_unused)1808 start_forwarding(void *data __rte_unused)
1809 {
1810 	int lcore = rte_lcore_id();
1811 	int stream_id;
1812 	uint16_t cnt;
1813 	struct lcore_info *li = &lcore_infos[lcore];
1814 
1815 	if (!li->mode)
1816 		return 0;
1817 
1818 	if (li->mode == LCORE_MODE_STATS) {
1819 		printf(":: started stats on lcore %u\n", lcore);
1820 		packet_per_second_stats();
1821 		return 0;
1822 	}
1823 
1824 	while (!force_quit)
1825 		for (stream_id = 0; stream_id < MAX_STREAMS; stream_id++) {
1826 			if (li->streams[stream_id].rx_port == -1)
1827 				continue;
1828 
1829 			cnt = do_rx(li,
1830 					li->streams[stream_id].rx_port,
1831 					li->streams[stream_id].rx_queue);
1832 			if (cnt)
1833 				do_tx(li, cnt,
1834 					li->streams[stream_id].tx_port,
1835 					li->streams[stream_id].tx_queue);
1836 		}
1837 	return 0;
1838 }
1839 
1840 static void
init_lcore_info(void)1841 init_lcore_info(void)
1842 {
1843 	int i, j;
1844 	unsigned int lcore;
1845 	uint16_t nr_port;
1846 	uint16_t queue;
1847 	int port;
1848 	int stream_id = 0;
1849 	int streams_per_core;
1850 	int unassigned_streams;
1851 	int nb_fwd_streams;
1852 	nr_port = rte_eth_dev_count_avail();
1853 
1854 	/* First logical core is reserved for stats printing */
1855 	lcore = rte_get_next_lcore(-1, 0, 0);
1856 	lcore_infos[lcore].mode = LCORE_MODE_STATS;
1857 
1858 	/*
1859 	 * Initialize all cores
1860 	 * All cores at first must have -1 value in all streams
1861 	 * This means that this stream is not used, or not set
1862 	 * yet.
1863 	 */
1864 	for (i = 0; i < RTE_MAX_LCORE; i++)
1865 		for (j = 0; j < MAX_STREAMS; j++) {
1866 			lcore_infos[i].streams[j].tx_port = -1;
1867 			lcore_infos[i].streams[j].rx_port = -1;
1868 			lcore_infos[i].streams[j].tx_queue = -1;
1869 			lcore_infos[i].streams[j].rx_queue = -1;
1870 			lcore_infos[i].streams_nb = 0;
1871 		}
1872 
1873 	/*
1874 	 * Calculate the total streams count.
1875 	 * Also distribute those streams count between the available
1876 	 * logical cores except first core, since it's reserved for
1877 	 * stats prints.
1878 	 */
1879 	nb_fwd_streams = nr_port * rx_queues_count;
1880 	if ((int)(nb_lcores - 1) >= nb_fwd_streams)
1881 		for (i = 0; i < (int)(nb_lcores - 1); i++) {
1882 			lcore = rte_get_next_lcore(lcore, 0, 0);
1883 			lcore_infos[lcore].streams_nb = 1;
1884 		}
1885 	else {
1886 		streams_per_core = nb_fwd_streams / (nb_lcores - 1);
1887 		unassigned_streams = nb_fwd_streams % (nb_lcores - 1);
1888 		for (i = 0; i < (int)(nb_lcores - 1); i++) {
1889 			lcore = rte_get_next_lcore(lcore, 0, 0);
1890 			lcore_infos[lcore].streams_nb = streams_per_core;
1891 			if (unassigned_streams) {
1892 				lcore_infos[lcore].streams_nb++;
1893 				unassigned_streams--;
1894 			}
1895 		}
1896 	}
1897 
1898 	/*
1899 	 * Set the streams for the cores according to each logical
1900 	 * core stream count.
1901 	 * The streams is built on the design of what received should
1902 	 * forward as well, this means that if you received packets on
1903 	 * port 0 queue 0 then the same queue should forward the
1904 	 * packets, using the same logical core.
1905 	 */
1906 	lcore = rte_get_next_lcore(-1, 0, 0);
1907 	for (port = 0; port < nr_port; port++) {
1908 		/* Create FWD stream */
1909 		for (queue = 0; queue < rx_queues_count; queue++) {
1910 			if (!lcore_infos[lcore].streams_nb ||
1911 				!(stream_id % lcore_infos[lcore].streams_nb)) {
1912 				lcore = rte_get_next_lcore(lcore, 0, 0);
1913 				lcore_infos[lcore].mode = LCORE_MODE_PKT;
1914 				stream_id = 0;
1915 			}
1916 			lcore_infos[lcore].streams[stream_id].rx_queue = queue;
1917 			lcore_infos[lcore].streams[stream_id].tx_queue = queue;
1918 			lcore_infos[lcore].streams[stream_id].rx_port = port;
1919 			lcore_infos[lcore].streams[stream_id].tx_port = port;
1920 			stream_id++;
1921 		}
1922 	}
1923 
1924 	/* Print all streams */
1925 	printf(":: Stream -> core id[N]: (rx_port, rx_queue)->(tx_port, tx_queue)\n");
1926 	for (i = 0; i < RTE_MAX_LCORE; i++)
1927 		for (j = 0; j < MAX_STREAMS; j++) {
1928 			/* No streams for this core */
1929 			if (lcore_infos[i].streams[j].tx_port == -1)
1930 				break;
1931 			printf("Stream -> core id[%d]: (%d,%d)->(%d,%d)\n",
1932 				i,
1933 				lcore_infos[i].streams[j].rx_port,
1934 				lcore_infos[i].streams[j].rx_queue,
1935 				lcore_infos[i].streams[j].tx_port,
1936 				lcore_infos[i].streams[j].tx_queue);
1937 		}
1938 }
1939 
1940 static void
init_port(void)1941 init_port(void)
1942 {
1943 	int ret;
1944 	uint16_t std_queue;
1945 	uint16_t hairpin_queue;
1946 	uint16_t port_id;
1947 	uint16_t nr_ports;
1948 	uint16_t nr_queues;
1949 	struct rte_eth_hairpin_conf hairpin_conf = {
1950 		.peer_count = 1,
1951 	};
1952 	struct rte_eth_conf port_conf = {
1953 		.rx_adv_conf = {
1954 			.rss_conf.rss_hf =
1955 				GET_RSS_HF(),
1956 		}
1957 	};
1958 	struct rte_eth_txconf txq_conf;
1959 	struct rte_eth_rxconf rxq_conf;
1960 	struct rte_eth_dev_info dev_info;
1961 
1962 	nr_queues = rx_queues_count;
1963 	if (hairpin_queues_num != 0)
1964 		nr_queues = rx_queues_count + hairpin_queues_num;
1965 
1966 	nr_ports = rte_eth_dev_count_avail();
1967 	if (nr_ports == 0)
1968 		rte_exit(EXIT_FAILURE, "Error: no port detected\n");
1969 
1970 	mbuf_mp = rte_pktmbuf_pool_create("mbuf_pool",
1971 					total_mbuf_num, mbuf_cache_size,
1972 					0, mbuf_size,
1973 					rte_socket_id());
1974 	if (mbuf_mp == NULL)
1975 		rte_exit(EXIT_FAILURE, "Error: can't init mbuf pool\n");
1976 
1977 	for (port_id = 0; port_id < nr_ports; port_id++) {
1978 		uint64_t rx_metadata = 0;
1979 
1980 		rx_metadata |= RTE_ETH_RX_METADATA_USER_FLAG;
1981 		rx_metadata |= RTE_ETH_RX_METADATA_USER_MARK;
1982 
1983 		ret = rte_eth_rx_metadata_negotiate(port_id, &rx_metadata);
1984 		if (ret == 0) {
1985 			if (!(rx_metadata & RTE_ETH_RX_METADATA_USER_FLAG)) {
1986 				printf(":: flow action FLAG will not affect Rx mbufs on port=%u\n",
1987 				       port_id);
1988 			}
1989 
1990 			if (!(rx_metadata & RTE_ETH_RX_METADATA_USER_MARK)) {
1991 				printf(":: flow action MARK will not affect Rx mbufs on port=%u\n",
1992 				       port_id);
1993 			}
1994 		} else if (ret != -ENOTSUP) {
1995 			rte_exit(EXIT_FAILURE, "Error when negotiating Rx meta features on port=%u: %s\n",
1996 				 port_id, rte_strerror(-ret));
1997 		}
1998 
1999 		ret = rte_eth_dev_info_get(port_id, &dev_info);
2000 		if (ret != 0)
2001 			rte_exit(EXIT_FAILURE,
2002 				"Error during getting device"
2003 				" (port %u) info: %s\n",
2004 				port_id, strerror(-ret));
2005 
2006 		port_conf.txmode.offloads &= dev_info.tx_offload_capa;
2007 		port_conf.rxmode.offloads &= dev_info.rx_offload_capa;
2008 
2009 		printf(":: initializing port: %d\n", port_id);
2010 
2011 		ret = rte_eth_dev_configure(port_id, nr_queues,
2012 				nr_queues, &port_conf);
2013 		if (ret < 0)
2014 			rte_exit(EXIT_FAILURE,
2015 				":: cannot configure device: err=%d, port=%u\n",
2016 				ret, port_id);
2017 
2018 		rxq_conf = dev_info.default_rxconf;
2019 		for (std_queue = 0; std_queue < rx_queues_count; std_queue++) {
2020 			ret = rte_eth_rx_queue_setup(port_id, std_queue, rxd_count,
2021 					rte_eth_dev_socket_id(port_id),
2022 					&rxq_conf,
2023 					mbuf_mp);
2024 			if (ret < 0)
2025 				rte_exit(EXIT_FAILURE,
2026 					":: Rx queue setup failed: err=%d, port=%u\n",
2027 					ret, port_id);
2028 		}
2029 
2030 		txq_conf = dev_info.default_txconf;
2031 		for (std_queue = 0; std_queue < tx_queues_count; std_queue++) {
2032 			ret = rte_eth_tx_queue_setup(port_id, std_queue, txd_count,
2033 					rte_eth_dev_socket_id(port_id),
2034 					&txq_conf);
2035 			if (ret < 0)
2036 				rte_exit(EXIT_FAILURE,
2037 					":: Tx queue setup failed: err=%d, port=%u\n",
2038 					ret, port_id);
2039 		}
2040 
2041 		/* Catch all packets from traffic generator. */
2042 		ret = rte_eth_promiscuous_enable(port_id);
2043 		if (ret != 0)
2044 			rte_exit(EXIT_FAILURE,
2045 				":: promiscuous mode enable failed: err=%s, port=%u\n",
2046 				rte_strerror(-ret), port_id);
2047 
2048 		if (hairpin_queues_num != 0) {
2049 			/*
2050 			 * Configure peer which represents hairpin Tx.
2051 			 * Hairpin queue numbers start after standard queues
2052 			 * (rx_queues_count and tx_queues_count).
2053 			 */
2054 			for (hairpin_queue = rx_queues_count, std_queue = 0;
2055 					hairpin_queue < nr_queues;
2056 					hairpin_queue++, std_queue++) {
2057 				hairpin_conf.peers[0].port = port_id;
2058 				hairpin_conf.peers[0].queue =
2059 					std_queue + tx_queues_count;
2060 				hairpin_conf.use_locked_device_memory =
2061 					!!(hairpin_conf_mask & HAIRPIN_RX_CONF_LOCKED_MEMORY);
2062 				hairpin_conf.use_rte_memory =
2063 					!!(hairpin_conf_mask & HAIRPIN_RX_CONF_RTE_MEMORY);
2064 				hairpin_conf.force_memory =
2065 					!!(hairpin_conf_mask & HAIRPIN_RX_CONF_FORCE_MEMORY);
2066 				ret = rte_eth_rx_hairpin_queue_setup(
2067 						port_id, hairpin_queue,
2068 						rxd_count, &hairpin_conf);
2069 				if (ret != 0)
2070 					rte_exit(EXIT_FAILURE,
2071 						":: Hairpin rx queue setup failed: err=%d, port=%u\n",
2072 						ret, port_id);
2073 			}
2074 
2075 			for (hairpin_queue = tx_queues_count, std_queue = 0;
2076 					hairpin_queue < nr_queues;
2077 					hairpin_queue++, std_queue++) {
2078 				hairpin_conf.peers[0].port = port_id;
2079 				hairpin_conf.peers[0].queue =
2080 					std_queue + rx_queues_count;
2081 				hairpin_conf.use_locked_device_memory =
2082 					!!(hairpin_conf_mask & HAIRPIN_TX_CONF_LOCKED_MEMORY);
2083 				hairpin_conf.use_rte_memory =
2084 					!!(hairpin_conf_mask & HAIRPIN_TX_CONF_RTE_MEMORY);
2085 				hairpin_conf.force_memory =
2086 					!!(hairpin_conf_mask & HAIRPIN_TX_CONF_FORCE_MEMORY);
2087 				ret = rte_eth_tx_hairpin_queue_setup(
2088 						port_id, hairpin_queue,
2089 						txd_count, &hairpin_conf);
2090 				if (ret != 0)
2091 					rte_exit(EXIT_FAILURE,
2092 						":: Hairpin tx queue setup failed: err=%d, port=%u\n",
2093 						ret, port_id);
2094 			}
2095 		}
2096 
2097 		ret = rte_eth_dev_start(port_id);
2098 		if (ret < 0)
2099 			rte_exit(EXIT_FAILURE,
2100 				"rte_eth_dev_start:err=%d, port=%u\n",
2101 				ret, port_id);
2102 
2103 		printf(":: initializing port: %d done\n", port_id);
2104 	}
2105 }
2106 
2107 int
main(int argc,char ** argv)2108 main(int argc, char **argv)
2109 {
2110 	int ret;
2111 	uint16_t port;
2112 	struct rte_flow_error error;
2113 
2114 	ret = rte_eal_init(argc, argv);
2115 	if (ret < 0)
2116 		rte_exit(EXIT_FAILURE, "EAL init failed\n");
2117 
2118 	force_quit = false;
2119 	dump_iterations = false;
2120 	rules_count = DEFAULT_RULES_COUNT;
2121 	rules_batch = DEFAULT_RULES_BATCH;
2122 	delete_flag = false;
2123 	dump_socket_mem_flag = false;
2124 	flow_group = DEFAULT_GROUP;
2125 	unique_data = false;
2126 
2127 	rx_queues_count = (uint8_t) RXQ_NUM;
2128 	tx_queues_count = (uint8_t) TXQ_NUM;
2129 	rxd_count = (uint8_t) NR_RXD;
2130 	txd_count = (uint8_t) NR_TXD;
2131 	mbuf_size = (uint32_t) MBUF_SIZE;
2132 	mbuf_cache_size = (uint32_t) MBUF_CACHE_SIZE;
2133 	total_mbuf_num = (uint32_t) TOTAL_MBUF_NUM;
2134 
2135 	signal(SIGINT, signal_handler);
2136 	signal(SIGTERM, signal_handler);
2137 
2138 	argc -= ret;
2139 	argv += ret;
2140 	if (argc > 1)
2141 		args_parse(argc, argv);
2142 
2143 	/* For more fancy, localised integer formatting. */
2144 	setlocale(LC_NUMERIC, "");
2145 
2146 	init_port();
2147 
2148 	nb_lcores = rte_lcore_count();
2149 	if (nb_lcores <= 1)
2150 		rte_exit(EXIT_FAILURE, "This app needs at least two cores\n");
2151 
2152 	printf(":: Flows Count per port: %d\n\n", rules_count);
2153 
2154 	rte_srand(rand_seed);
2155 
2156 	if (has_meter()) {
2157 		create_meter_profile();
2158 		if (policy_mtr)
2159 			create_meter_policy();
2160 	}
2161 	rte_eal_mp_remote_launch(run_rte_flow_handler_cores, NULL, CALL_MAIN);
2162 
2163 	if (enable_fwd) {
2164 		init_lcore_info();
2165 		rte_eal_mp_remote_launch(start_forwarding, NULL, CALL_MAIN);
2166 	}
2167 	if (has_meter() && delete_flag) {
2168 		destroy_meter_profile();
2169 		if (policy_mtr)
2170 			destroy_meter_policy();
2171 	}
2172 
2173 	RTE_ETH_FOREACH_DEV(port) {
2174 		rte_flow_flush(port, &error);
2175 		if (rte_eth_dev_stop(port) != 0)
2176 			printf("Failed to stop device on port %u\n", port);
2177 		rte_eth_dev_close(port);
2178 	}
2179 	printf("\nBye ...\n");
2180 	return 0;
2181 }
2182