xref: /dpdk/app/test/test_lpm.c (revision ceb1ccd5d50c1a89ba8bdd97cc199e7f07422b98)
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Intel Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33 
34 #include <stdio.h>
35 #include <stdint.h>
36 #include <stdlib.h>
37 #include <errno.h>
38 #include <sys/queue.h>
39 
40 #include <rte_common.h>
41 #include <rte_cycles.h>
42 #include <rte_memory.h>
43 #include <rte_random.h>
44 #include <rte_branch_prediction.h>
45 #include <rte_ip.h>
46 #include <time.h>
47 
48 #include "test.h"
49 
50 #include "rte_lpm.h"
51 #include "test_lpm_routes.h"
52 #include "test_xmmt_ops.h"
53 
54 #define TEST_LPM_ASSERT(cond) do {                                            \
55 	if (!(cond)) {                                                        \
56 		printf("Error at line %d: \n", __LINE__);                     \
57 		return -1;                                                    \
58 	}                                                                     \
59 } while(0)
60 
61 typedef int32_t (*rte_lpm_test)(void);
62 
63 static int32_t test0(void);
64 static int32_t test1(void);
65 static int32_t test2(void);
66 static int32_t test3(void);
67 static int32_t test4(void);
68 static int32_t test5(void);
69 static int32_t test6(void);
70 static int32_t test7(void);
71 static int32_t test8(void);
72 static int32_t test9(void);
73 static int32_t test10(void);
74 static int32_t test11(void);
75 static int32_t test12(void);
76 static int32_t test13(void);
77 static int32_t test14(void);
78 static int32_t test15(void);
79 static int32_t test16(void);
80 static int32_t test17(void);
81 static int32_t perf_test(void);
82 
83 rte_lpm_test tests[] = {
84 /* Test Cases */
85 	test0,
86 	test1,
87 	test2,
88 	test3,
89 	test4,
90 	test5,
91 	test6,
92 	test7,
93 	test8,
94 	test9,
95 	test10,
96 	test11,
97 	test12,
98 	test13,
99 	test14,
100 	test15,
101 	test16,
102 	test17,
103 	perf_test,
104 };
105 
106 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0]))
107 #define MAX_DEPTH 32
108 #define MAX_RULES 256
109 #define NUMBER_TBL8S 256
110 #define PASS 0
111 
112 /*
113  * Check that rte_lpm_create fails gracefully for incorrect user input
114  * arguments
115  */
116 int32_t
117 test0(void)
118 {
119 	struct rte_lpm *lpm = NULL;
120 	struct rte_lpm_config config;
121 
122 	config.max_rules = MAX_RULES;
123 	config.number_tbl8s = NUMBER_TBL8S;
124 	config.flags = 0;
125 
126 	/* rte_lpm_create: lpm name == NULL */
127 	lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config);
128 	TEST_LPM_ASSERT(lpm == NULL);
129 
130 	/* rte_lpm_create: max_rules = 0 */
131 	/* Note: __func__ inserts the function name, in this case "test0". */
132 	config.max_rules = 0;
133 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
134 	TEST_LPM_ASSERT(lpm == NULL);
135 
136 	/* socket_id < -1 is invalid */
137 	config.max_rules = MAX_RULES;
138 	lpm = rte_lpm_create(__func__, -2, &config);
139 	TEST_LPM_ASSERT(lpm == NULL);
140 
141 	return PASS;
142 }
143 
144 /*
145  * Create lpm table then delete lpm table 100 times
146  * Use a slightly different rules size each time
147  * */
148 int32_t
149 test1(void)
150 {
151 	struct rte_lpm *lpm = NULL;
152 	struct rte_lpm_config config;
153 
154 	config.number_tbl8s = NUMBER_TBL8S;
155 	config.flags = 0;
156 	int32_t i;
157 
158 	/* rte_lpm_free: Free NULL */
159 	for (i = 0; i < 100; i++) {
160 		config.max_rules = MAX_RULES - i;
161 		lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
162 		TEST_LPM_ASSERT(lpm != NULL);
163 
164 		rte_lpm_free(lpm);
165 	}
166 
167 	/* Can not test free so return success */
168 	return PASS;
169 }
170 
171 /*
172  * Call rte_lpm_free for NULL pointer user input. Note: free has no return and
173  * therefore it is impossible to check for failure but this test is added to
174  * increase function coverage metrics and to validate that freeing null does
175  * not crash.
176  */
177 int32_t
178 test2(void)
179 {
180 	struct rte_lpm *lpm = NULL;
181 	struct rte_lpm_config config;
182 
183 	config.max_rules = MAX_RULES;
184 	config.number_tbl8s = NUMBER_TBL8S;
185 	config.flags = 0;
186 
187 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
188 	TEST_LPM_ASSERT(lpm != NULL);
189 
190 	rte_lpm_free(lpm);
191 	rte_lpm_free(NULL);
192 	return PASS;
193 }
194 
195 /*
196  * Check that rte_lpm_add fails gracefully for incorrect user input arguments
197  */
198 int32_t
199 test3(void)
200 {
201 	struct rte_lpm *lpm = NULL;
202 	struct rte_lpm_config config;
203 
204 	config.max_rules = MAX_RULES;
205 	config.number_tbl8s = NUMBER_TBL8S;
206 	config.flags = 0;
207 	uint32_t ip = IPv4(0, 0, 0, 0), next_hop = 100;
208 	uint8_t depth = 24;
209 	int32_t status = 0;
210 
211 	/* rte_lpm_add: lpm == NULL */
212 	status = rte_lpm_add(NULL, ip, depth, next_hop);
213 	TEST_LPM_ASSERT(status < 0);
214 
215 	/*Create vaild lpm to use in rest of test. */
216 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
217 	TEST_LPM_ASSERT(lpm != NULL);
218 
219 	/* rte_lpm_add: depth < 1 */
220 	status = rte_lpm_add(lpm, ip, 0, next_hop);
221 	TEST_LPM_ASSERT(status < 0);
222 
223 	/* rte_lpm_add: depth > MAX_DEPTH */
224 	status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop);
225 	TEST_LPM_ASSERT(status < 0);
226 
227 	rte_lpm_free(lpm);
228 
229 	return PASS;
230 }
231 
232 /*
233  * Check that rte_lpm_delete fails gracefully for incorrect user input
234  * arguments
235  */
236 int32_t
237 test4(void)
238 {
239 	struct rte_lpm *lpm = NULL;
240 	struct rte_lpm_config config;
241 
242 	config.max_rules = MAX_RULES;
243 	config.number_tbl8s = NUMBER_TBL8S;
244 	config.flags = 0;
245 	uint32_t ip = IPv4(0, 0, 0, 0);
246 	uint8_t depth = 24;
247 	int32_t status = 0;
248 
249 	/* rte_lpm_delete: lpm == NULL */
250 	status = rte_lpm_delete(NULL, ip, depth);
251 	TEST_LPM_ASSERT(status < 0);
252 
253 	/*Create vaild lpm to use in rest of test. */
254 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
255 	TEST_LPM_ASSERT(lpm != NULL);
256 
257 	/* rte_lpm_delete: depth < 1 */
258 	status = rte_lpm_delete(lpm, ip, 0);
259 	TEST_LPM_ASSERT(status < 0);
260 
261 	/* rte_lpm_delete: depth > MAX_DEPTH */
262 	status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1));
263 	TEST_LPM_ASSERT(status < 0);
264 
265 	rte_lpm_free(lpm);
266 
267 	return PASS;
268 }
269 
270 /*
271  * Check that rte_lpm_lookup fails gracefully for incorrect user input
272  * arguments
273  */
274 int32_t
275 test5(void)
276 {
277 #if defined(RTE_LIBRTE_LPM_DEBUG)
278 	struct rte_lpm *lpm = NULL;
279 	struct rte_lpm_config config;
280 
281 	config.max_rules = MAX_RULES;
282 	config.number_tbl8s = NUMBER_TBL8S;
283 	config.flags = 0;
284 	uint32_t ip = IPv4(0, 0, 0, 0), next_hop_return = 0;
285 	int32_t status = 0;
286 
287 	/* rte_lpm_lookup: lpm == NULL */
288 	status = rte_lpm_lookup(NULL, ip, &next_hop_return);
289 	TEST_LPM_ASSERT(status < 0);
290 
291 	/*Create vaild lpm to use in rest of test. */
292 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
293 	TEST_LPM_ASSERT(lpm != NULL);
294 
295 	/* rte_lpm_lookup: depth < 1 */
296 	status = rte_lpm_lookup(lpm, ip, NULL);
297 	TEST_LPM_ASSERT(status < 0);
298 
299 	rte_lpm_free(lpm);
300 #endif
301 	return PASS;
302 }
303 
304 
305 
306 /*
307  * Call add, lookup and delete for a single rule with depth <= 24
308  */
309 int32_t
310 test6(void)
311 {
312 	struct rte_lpm *lpm = NULL;
313 	struct rte_lpm_config config;
314 
315 	config.max_rules = MAX_RULES;
316 	config.number_tbl8s = NUMBER_TBL8S;
317 	config.flags = 0;
318 	uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
319 	uint8_t depth = 24;
320 	int32_t status = 0;
321 
322 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
323 	TEST_LPM_ASSERT(lpm != NULL);
324 
325 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
326 	TEST_LPM_ASSERT(status == 0);
327 
328 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
329 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
330 
331 	status = rte_lpm_delete(lpm, ip, depth);
332 	TEST_LPM_ASSERT(status == 0);
333 
334 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
335 	TEST_LPM_ASSERT(status == -ENOENT);
336 
337 	rte_lpm_free(lpm);
338 
339 	return PASS;
340 }
341 
342 /*
343  * Call add, lookup and delete for a single rule with depth > 24
344  */
345 
346 int32_t
347 test7(void)
348 {
349 	xmm_t ipx4;
350 	uint32_t hop[4];
351 	struct rte_lpm *lpm = NULL;
352 	struct rte_lpm_config config;
353 
354 	config.max_rules = MAX_RULES;
355 	config.number_tbl8s = NUMBER_TBL8S;
356 	config.flags = 0;
357 	uint32_t ip = IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0;
358 	uint8_t depth = 32;
359 	int32_t status = 0;
360 
361 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
362 	TEST_LPM_ASSERT(lpm != NULL);
363 
364 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
365 	TEST_LPM_ASSERT(status == 0);
366 
367 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
368 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
369 
370 	ipx4 = vect_set_epi32(ip, ip + 0x100, ip - 0x100, ip);
371 	rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
372 	TEST_LPM_ASSERT(hop[0] == next_hop_add);
373 	TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
374 	TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
375 	TEST_LPM_ASSERT(hop[3] == next_hop_add);
376 
377 	status = rte_lpm_delete(lpm, ip, depth);
378 	TEST_LPM_ASSERT(status == 0);
379 
380 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
381 	TEST_LPM_ASSERT(status == -ENOENT);
382 
383 	rte_lpm_free(lpm);
384 
385 	return PASS;
386 }
387 
388 /*
389  * Use rte_lpm_add to add rules which effect only the second half of the lpm
390  * table. Use all possible depths ranging from 1..32. Set the next hop = to the
391  * depth. Check lookup hit for on every add and check for lookup miss on the
392  * first half of the lpm table after each add. Finally delete all rules going
393  * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each
394  * delete. The lookup should return the next_hop_add value related to the
395  * previous depth value (i.e. depth -1).
396  */
397 int32_t
398 test8(void)
399 {
400 	xmm_t ipx4;
401 	uint32_t hop[4];
402 	struct rte_lpm *lpm = NULL;
403 	struct rte_lpm_config config;
404 
405 	config.max_rules = MAX_RULES;
406 	config.number_tbl8s = NUMBER_TBL8S;
407 	config.flags = 0;
408 	uint32_t ip1 = IPv4(127, 255, 255, 255), ip2 = IPv4(128, 0, 0, 0);
409 	uint32_t next_hop_add, next_hop_return;
410 	uint8_t depth;
411 	int32_t status = 0;
412 
413 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
414 	TEST_LPM_ASSERT(lpm != NULL);
415 
416 	/* Loop with rte_lpm_add. */
417 	for (depth = 1; depth <= 32; depth++) {
418 		/* Let the next_hop_add value = depth. Just for change. */
419 		next_hop_add = depth;
420 
421 		status = rte_lpm_add(lpm, ip2, depth, next_hop_add);
422 		TEST_LPM_ASSERT(status == 0);
423 
424 		/* Check IP in first half of tbl24 which should be empty. */
425 		status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
426 		TEST_LPM_ASSERT(status == -ENOENT);
427 
428 		status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
429 		TEST_LPM_ASSERT((status == 0) &&
430 			(next_hop_return == next_hop_add));
431 
432 		ipx4 = vect_set_epi32(ip2, ip1, ip2, ip1);
433 		rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
434 		TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
435 		TEST_LPM_ASSERT(hop[1] == next_hop_add);
436 		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
437 		TEST_LPM_ASSERT(hop[3] == next_hop_add);
438 	}
439 
440 	/* Loop with rte_lpm_delete. */
441 	for (depth = 32; depth >= 1; depth--) {
442 		next_hop_add = (uint8_t) (depth - 1);
443 
444 		status = rte_lpm_delete(lpm, ip2, depth);
445 		TEST_LPM_ASSERT(status == 0);
446 
447 		status = rte_lpm_lookup(lpm, ip2, &next_hop_return);
448 
449 		if (depth != 1) {
450 			TEST_LPM_ASSERT((status == 0) &&
451 				(next_hop_return == next_hop_add));
452 		} else {
453 			TEST_LPM_ASSERT(status == -ENOENT);
454 		}
455 
456 		status = rte_lpm_lookup(lpm, ip1, &next_hop_return);
457 		TEST_LPM_ASSERT(status == -ENOENT);
458 
459 		ipx4 = vect_set_epi32(ip1, ip1, ip2, ip2);
460 		rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
461 		if (depth != 1) {
462 			TEST_LPM_ASSERT(hop[0] == next_hop_add);
463 			TEST_LPM_ASSERT(hop[1] == next_hop_add);
464 		} else {
465 			TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
466 			TEST_LPM_ASSERT(hop[1] == UINT32_MAX);
467 		}
468 		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
469 		TEST_LPM_ASSERT(hop[3] == UINT32_MAX);
470 	}
471 
472 	rte_lpm_free(lpm);
473 
474 	return PASS;
475 }
476 
477 /*
478  * - Add & lookup to hit invalid TBL24 entry
479  * - Add & lookup to hit valid TBL24 entry not extended
480  * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry
481  * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry
482  *
483  */
484 int32_t
485 test9(void)
486 {
487 	struct rte_lpm *lpm = NULL;
488 	struct rte_lpm_config config;
489 
490 	config.max_rules = MAX_RULES;
491 	config.number_tbl8s = NUMBER_TBL8S;
492 	config.flags = 0;
493 	uint32_t ip, ip_1, ip_2;
494 	uint8_t depth, depth_1, depth_2;
495 	uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return;
496 	int32_t status = 0;
497 
498 	/* Add & lookup to hit invalid TBL24 entry */
499 	ip = IPv4(128, 0, 0, 0);
500 	depth = 24;
501 	next_hop_add = 100;
502 
503 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
504 	TEST_LPM_ASSERT(lpm != NULL);
505 
506 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
507 	TEST_LPM_ASSERT(status == 0);
508 
509 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
510 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
511 
512 	status = rte_lpm_delete(lpm, ip, depth);
513 	TEST_LPM_ASSERT(status == 0);
514 
515 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
516 	TEST_LPM_ASSERT(status == -ENOENT);
517 
518 	rte_lpm_delete_all(lpm);
519 
520 	/* Add & lookup to hit valid TBL24 entry not extended */
521 	ip = IPv4(128, 0, 0, 0);
522 	depth = 23;
523 	next_hop_add = 100;
524 
525 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
526 	TEST_LPM_ASSERT(status == 0);
527 
528 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
529 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
530 
531 	depth = 24;
532 	next_hop_add = 101;
533 
534 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
535 	TEST_LPM_ASSERT(status == 0);
536 
537 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
538 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
539 
540 	depth = 24;
541 
542 	status = rte_lpm_delete(lpm, ip, depth);
543 	TEST_LPM_ASSERT(status == 0);
544 
545 	depth = 23;
546 
547 	status = rte_lpm_delete(lpm, ip, depth);
548 	TEST_LPM_ASSERT(status == 0);
549 
550 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
551 	TEST_LPM_ASSERT(status == -ENOENT);
552 
553 	rte_lpm_delete_all(lpm);
554 
555 	/* Add & lookup to hit valid extended TBL24 entry with invalid TBL8
556 	 * entry */
557 	ip = IPv4(128, 0, 0, 0);
558 	depth = 32;
559 	next_hop_add = 100;
560 
561 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
562 	TEST_LPM_ASSERT(status == 0);
563 
564 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
565 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
566 
567 	ip = IPv4(128, 0, 0, 5);
568 	depth = 32;
569 	next_hop_add = 101;
570 
571 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
572 	TEST_LPM_ASSERT(status == 0);
573 
574 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
575 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
576 
577 	status = rte_lpm_delete(lpm, ip, depth);
578 	TEST_LPM_ASSERT(status == 0);
579 
580 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
581 	TEST_LPM_ASSERT(status == -ENOENT);
582 
583 	ip = IPv4(128, 0, 0, 0);
584 	depth = 32;
585 	next_hop_add = 100;
586 
587 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
588 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
589 
590 	status = rte_lpm_delete(lpm, ip, depth);
591 	TEST_LPM_ASSERT(status == 0);
592 
593 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
594 	TEST_LPM_ASSERT(status == -ENOENT);
595 
596 	rte_lpm_delete_all(lpm);
597 
598 	/* Add & lookup to hit valid extended TBL24 entry with valid TBL8
599 	 * entry */
600 	ip_1 = IPv4(128, 0, 0, 0);
601 	depth_1 = 25;
602 	next_hop_add_1 = 101;
603 
604 	ip_2 = IPv4(128, 0, 0, 5);
605 	depth_2 = 32;
606 	next_hop_add_2 = 102;
607 
608 	next_hop_return = 0;
609 
610 	status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1);
611 	TEST_LPM_ASSERT(status == 0);
612 
613 	status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
614 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
615 
616 	status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2);
617 	TEST_LPM_ASSERT(status == 0);
618 
619 	status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
620 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2));
621 
622 	status = rte_lpm_delete(lpm, ip_2, depth_2);
623 	TEST_LPM_ASSERT(status == 0);
624 
625 	status = rte_lpm_lookup(lpm, ip_2, &next_hop_return);
626 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
627 
628 	status = rte_lpm_delete(lpm, ip_1, depth_1);
629 	TEST_LPM_ASSERT(status == 0);
630 
631 	status = rte_lpm_lookup(lpm, ip_1, &next_hop_return);
632 	TEST_LPM_ASSERT(status == -ENOENT);
633 
634 	rte_lpm_free(lpm);
635 
636 	return PASS;
637 }
638 
639 
640 /*
641  * - Add rule that covers a TBL24 range previously invalid & lookup (& delete &
642  *   lookup)
643  * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup)
644  * - Add rule that extends a TBL24 valid entry & lookup for both rules (&
645  *   delete & lookup)
646  * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup)
647  * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup)
648  * - Delete a rule that is not present in the TBL24 & lookup
649  * - Delete a rule that is not present in the TBL8 & lookup
650  *
651  */
652 int32_t
653 test10(void)
654 {
655 
656 	struct rte_lpm *lpm = NULL;
657 	struct rte_lpm_config config;
658 
659 	config.max_rules = MAX_RULES;
660 	config.number_tbl8s = NUMBER_TBL8S;
661 	config.flags = 0;
662 	uint32_t ip, next_hop_add, next_hop_return;
663 	uint8_t depth;
664 	int32_t status = 0;
665 
666 	/* Add rule that covers a TBL24 range previously invalid & lookup
667 	 * (& delete & lookup) */
668 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
669 	TEST_LPM_ASSERT(lpm != NULL);
670 
671 	ip = IPv4(128, 0, 0, 0);
672 	depth = 16;
673 	next_hop_add = 100;
674 
675 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
676 	TEST_LPM_ASSERT(status == 0);
677 
678 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
679 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
680 
681 	status = rte_lpm_delete(lpm, ip, depth);
682 	TEST_LPM_ASSERT(status == 0);
683 
684 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
685 	TEST_LPM_ASSERT(status == -ENOENT);
686 
687 	rte_lpm_delete_all(lpm);
688 
689 	ip = IPv4(128, 0, 0, 0);
690 	depth = 25;
691 	next_hop_add = 100;
692 
693 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
694 	TEST_LPM_ASSERT(status == 0);
695 
696 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
697 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
698 
699 	status = rte_lpm_delete(lpm, ip, depth);
700 	TEST_LPM_ASSERT(status == 0);
701 
702 	rte_lpm_delete_all(lpm);
703 
704 	/* Add rule that extends a TBL24 valid entry & lookup for both rules
705 	 * (& delete & lookup) */
706 
707 	ip = IPv4(128, 0, 0, 0);
708 	depth = 24;
709 	next_hop_add = 100;
710 
711 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
712 	TEST_LPM_ASSERT(status == 0);
713 
714 	ip = IPv4(128, 0, 0, 10);
715 	depth = 32;
716 	next_hop_add = 101;
717 
718 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
719 	TEST_LPM_ASSERT(status == 0);
720 
721 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
722 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
723 
724 	ip = IPv4(128, 0, 0, 0);
725 	next_hop_add = 100;
726 
727 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
728 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
729 
730 	ip = IPv4(128, 0, 0, 0);
731 	depth = 24;
732 
733 	status = rte_lpm_delete(lpm, ip, depth);
734 	TEST_LPM_ASSERT(status == 0);
735 
736 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
737 	TEST_LPM_ASSERT(status == -ENOENT);
738 
739 	ip = IPv4(128, 0, 0, 10);
740 	depth = 32;
741 
742 	status = rte_lpm_delete(lpm, ip, depth);
743 	TEST_LPM_ASSERT(status == 0);
744 
745 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
746 	TEST_LPM_ASSERT(status == -ENOENT);
747 
748 	rte_lpm_delete_all(lpm);
749 
750 	/* Add rule that updates the next hop in TBL24 & lookup
751 	 * (& delete & lookup) */
752 
753 	ip = IPv4(128, 0, 0, 0);
754 	depth = 24;
755 	next_hop_add = 100;
756 
757 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
758 	TEST_LPM_ASSERT(status == 0);
759 
760 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
761 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
762 
763 	next_hop_add = 101;
764 
765 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
766 	TEST_LPM_ASSERT(status == 0);
767 
768 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
769 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
770 
771 	status = rte_lpm_delete(lpm, ip, depth);
772 	TEST_LPM_ASSERT(status == 0);
773 
774 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
775 	TEST_LPM_ASSERT(status == -ENOENT);
776 
777 	rte_lpm_delete_all(lpm);
778 
779 	/* Add rule that updates the next hop in TBL8 & lookup
780 	 * (& delete & lookup) */
781 
782 	ip = IPv4(128, 0, 0, 0);
783 	depth = 32;
784 	next_hop_add = 100;
785 
786 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
787 	TEST_LPM_ASSERT(status == 0);
788 
789 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
790 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
791 
792 	next_hop_add = 101;
793 
794 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
795 	TEST_LPM_ASSERT(status == 0);
796 
797 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
798 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
799 
800 	status = rte_lpm_delete(lpm, ip, depth);
801 	TEST_LPM_ASSERT(status == 0);
802 
803 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
804 	TEST_LPM_ASSERT(status == -ENOENT);
805 
806 	rte_lpm_delete_all(lpm);
807 
808 	/* Delete a rule that is not present in the TBL24 & lookup */
809 
810 	ip = IPv4(128, 0, 0, 0);
811 	depth = 24;
812 
813 	status = rte_lpm_delete(lpm, ip, depth);
814 	TEST_LPM_ASSERT(status < 0);
815 
816 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
817 	TEST_LPM_ASSERT(status == -ENOENT);
818 
819 	rte_lpm_delete_all(lpm);
820 
821 	/* Delete a rule that is not present in the TBL8 & lookup */
822 
823 	ip = IPv4(128, 0, 0, 0);
824 	depth = 32;
825 
826 	status = rte_lpm_delete(lpm, ip, depth);
827 	TEST_LPM_ASSERT(status < 0);
828 
829 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
830 	TEST_LPM_ASSERT(status == -ENOENT);
831 
832 	rte_lpm_free(lpm);
833 
834 	return PASS;
835 }
836 
837 /*
838  * Add two rules, lookup to hit the more specific one, lookup to hit the less
839  * specific one delete the less specific rule and lookup previous values again;
840  * add a more specific rule than the existing rule, lookup again
841  *
842  * */
843 int32_t
844 test11(void)
845 {
846 
847 	struct rte_lpm *lpm = NULL;
848 	struct rte_lpm_config config;
849 
850 	config.max_rules = MAX_RULES;
851 	config.number_tbl8s = NUMBER_TBL8S;
852 	config.flags = 0;
853 	uint32_t ip, next_hop_add, next_hop_return;
854 	uint8_t depth;
855 	int32_t status = 0;
856 
857 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
858 	TEST_LPM_ASSERT(lpm != NULL);
859 
860 	ip = IPv4(128, 0, 0, 0);
861 	depth = 24;
862 	next_hop_add = 100;
863 
864 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
865 	TEST_LPM_ASSERT(status == 0);
866 
867 	ip = IPv4(128, 0, 0, 10);
868 	depth = 32;
869 	next_hop_add = 101;
870 
871 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
872 	TEST_LPM_ASSERT(status == 0);
873 
874 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
875 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
876 
877 	ip = IPv4(128, 0, 0, 0);
878 	next_hop_add = 100;
879 
880 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
881 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add));
882 
883 	ip = IPv4(128, 0, 0, 0);
884 	depth = 24;
885 
886 	status = rte_lpm_delete(lpm, ip, depth);
887 	TEST_LPM_ASSERT(status == 0);
888 
889 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
890 	TEST_LPM_ASSERT(status == -ENOENT);
891 
892 	ip = IPv4(128, 0, 0, 10);
893 	depth = 32;
894 
895 	status = rte_lpm_delete(lpm, ip, depth);
896 	TEST_LPM_ASSERT(status == 0);
897 
898 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
899 	TEST_LPM_ASSERT(status == -ENOENT);
900 
901 	rte_lpm_free(lpm);
902 
903 	return PASS;
904 }
905 
906 /*
907  * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete,
908  * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension
909  * and contraction.
910  *
911  * */
912 
913 int32_t
914 test12(void)
915 {
916 	xmm_t ipx4;
917 	uint32_t hop[4];
918 	struct rte_lpm *lpm = NULL;
919 	struct rte_lpm_config config;
920 
921 	config.max_rules = MAX_RULES;
922 	config.number_tbl8s = NUMBER_TBL8S;
923 	config.flags = 0;
924 	uint32_t ip, i, next_hop_add, next_hop_return;
925 	uint8_t depth;
926 	int32_t status = 0;
927 
928 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
929 	TEST_LPM_ASSERT(lpm != NULL);
930 
931 	ip = IPv4(128, 0, 0, 0);
932 	depth = 32;
933 	next_hop_add = 100;
934 
935 	for (i = 0; i < 1000; i++) {
936 		status = rte_lpm_add(lpm, ip, depth, next_hop_add);
937 		TEST_LPM_ASSERT(status == 0);
938 
939 		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
940 		TEST_LPM_ASSERT((status == 0) &&
941 				(next_hop_return == next_hop_add));
942 
943 		ipx4 = vect_set_epi32(ip, ip + 1, ip, ip - 1);
944 		rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX);
945 		TEST_LPM_ASSERT(hop[0] == UINT32_MAX);
946 		TEST_LPM_ASSERT(hop[1] == next_hop_add);
947 		TEST_LPM_ASSERT(hop[2] == UINT32_MAX);
948 		TEST_LPM_ASSERT(hop[3] == next_hop_add);
949 
950 		status = rte_lpm_delete(lpm, ip, depth);
951 		TEST_LPM_ASSERT(status == 0);
952 
953 		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
954 		TEST_LPM_ASSERT(status == -ENOENT);
955 	}
956 
957 	rte_lpm_free(lpm);
958 
959 	return PASS;
960 }
961 
962 /*
963  * Add a rule to tbl24, lookup (hit), then add a rule that will extend this
964  * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension,
965  * lookup (miss) and repeat for loop of 1000 times. This will check tbl8
966  * extension and contraction.
967  *
968  * */
969 
970 int32_t
971 test13(void)
972 {
973 	struct rte_lpm *lpm = NULL;
974 	struct rte_lpm_config config;
975 
976 	config.max_rules = MAX_RULES;
977 	config.number_tbl8s = NUMBER_TBL8S;
978 	config.flags = 0;
979 	uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return;
980 	uint8_t depth;
981 	int32_t status = 0;
982 
983 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
984 	TEST_LPM_ASSERT(lpm != NULL);
985 
986 	ip = IPv4(128, 0, 0, 0);
987 	depth = 24;
988 	next_hop_add_1 = 100;
989 
990 	status = rte_lpm_add(lpm, ip, depth, next_hop_add_1);
991 	TEST_LPM_ASSERT(status == 0);
992 
993 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
994 	TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1));
995 
996 	depth = 32;
997 	next_hop_add_2 = 101;
998 
999 	for (i = 0; i < 1000; i++) {
1000 		status = rte_lpm_add(lpm, ip, depth, next_hop_add_2);
1001 		TEST_LPM_ASSERT(status == 0);
1002 
1003 		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1004 		TEST_LPM_ASSERT((status == 0) &&
1005 				(next_hop_return == next_hop_add_2));
1006 
1007 		status = rte_lpm_delete(lpm, ip, depth);
1008 		TEST_LPM_ASSERT(status == 0);
1009 
1010 		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1011 		TEST_LPM_ASSERT((status == 0) &&
1012 				(next_hop_return == next_hop_add_1));
1013 	}
1014 
1015 	depth = 24;
1016 
1017 	status = rte_lpm_delete(lpm, ip, depth);
1018 	TEST_LPM_ASSERT(status == 0);
1019 
1020 	status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1021 	TEST_LPM_ASSERT(status == -ENOENT);
1022 
1023 	rte_lpm_free(lpm);
1024 
1025 	return PASS;
1026 }
1027 
1028 /*
1029  * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension.
1030  * No more tbl8 extensions will be allowed. Now add one more rule that required
1031  * a tbl8 extension and get fail.
1032  * */
1033 int32_t
1034 test14(void)
1035 {
1036 
1037 	/* We only use depth = 32 in the loop below so we must make sure
1038 	 * that we have enough storage for all rules at that depth*/
1039 
1040 	struct rte_lpm *lpm = NULL;
1041 	struct rte_lpm_config config;
1042 
1043 	config.max_rules = 256 * 32;
1044 	config.number_tbl8s = NUMBER_TBL8S;
1045 	config.flags = 0;
1046 	uint32_t ip, next_hop_add, next_hop_return;
1047 	uint8_t depth;
1048 	int32_t status = 0;
1049 
1050 	/* Add enough space for 256 rules for every depth */
1051 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1052 	TEST_LPM_ASSERT(lpm != NULL);
1053 
1054 	depth = 32;
1055 	next_hop_add = 100;
1056 	ip = IPv4(0, 0, 0, 0);
1057 
1058 	/* Add 256 rules that require a tbl8 extension */
1059 	for (; ip <= IPv4(0, 0, 255, 0); ip += 256) {
1060 		status = rte_lpm_add(lpm, ip, depth, next_hop_add);
1061 		TEST_LPM_ASSERT(status == 0);
1062 
1063 		status = rte_lpm_lookup(lpm, ip, &next_hop_return);
1064 		TEST_LPM_ASSERT((status == 0) &&
1065 				(next_hop_return == next_hop_add));
1066 	}
1067 
1068 	/* All tbl8 extensions have been used above. Try to add one more and
1069 	 * we get a fail */
1070 	ip = IPv4(1, 0, 0, 0);
1071 	depth = 32;
1072 
1073 	status = rte_lpm_add(lpm, ip, depth, next_hop_add);
1074 	TEST_LPM_ASSERT(status < 0);
1075 
1076 	rte_lpm_free(lpm);
1077 
1078 	return PASS;
1079 }
1080 
1081 /*
1082  * Sequence of operations for find existing lpm table
1083  *
1084  *  - create table
1085  *  - find existing table: hit
1086  *  - find non-existing table: miss
1087  *
1088  */
1089 int32_t
1090 test15(void)
1091 {
1092 	struct rte_lpm *lpm = NULL, *result = NULL;
1093 	struct rte_lpm_config config;
1094 
1095 	config.max_rules = 256 * 32;
1096 	config.number_tbl8s = NUMBER_TBL8S;
1097 	config.flags = 0;
1098 
1099 	/* Create lpm  */
1100 	lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config);
1101 	TEST_LPM_ASSERT(lpm != NULL);
1102 
1103 	/* Try to find existing lpm */
1104 	result = rte_lpm_find_existing("lpm_find_existing");
1105 	TEST_LPM_ASSERT(result == lpm);
1106 
1107 	/* Try to find non-existing lpm */
1108 	result = rte_lpm_find_existing("lpm_find_non_existing");
1109 	TEST_LPM_ASSERT(result == NULL);
1110 
1111 	/* Cleanup. */
1112 	rte_lpm_delete_all(lpm);
1113 	rte_lpm_free(lpm);
1114 
1115 	return PASS;
1116 }
1117 
1118 /*
1119  * test failure condition of overloading the tbl8 so no more will fit
1120  * Check we get an error return value in that case
1121  */
1122 int32_t
1123 test16(void)
1124 {
1125 	uint32_t ip;
1126 	struct rte_lpm_config config;
1127 
1128 	config.max_rules = 256 * 32;
1129 	config.number_tbl8s = NUMBER_TBL8S;
1130 	config.flags = 0;
1131 	struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1132 
1133 	/* ip loops through all possibilities for top 24 bits of address */
1134 	for (ip = 0; ip < 0xFFFFFF; ip++) {
1135 		/* add an entry within a different tbl8 each time, since
1136 		 * depth >24 and the top 24 bits are different */
1137 		if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0)
1138 			break;
1139 	}
1140 
1141 	if (ip != NUMBER_TBL8S) {
1142 		printf("Error, unexpected failure with filling tbl8 groups\n");
1143 		printf("Failed after %u additions, expected after %u\n",
1144 				(unsigned)ip, (unsigned)NUMBER_TBL8S);
1145 	}
1146 
1147 	rte_lpm_free(lpm);
1148 	return 0;
1149 }
1150 
1151 /*
1152  * Test for overwriting of tbl8:
1153  *  - add rule /32 and lookup
1154  *  - add new rule /24 and lookup
1155  *	- add third rule /25 and lookup
1156  *	- lookup /32 and /24 rule to ensure the table has not been overwritten.
1157  */
1158 int32_t
1159 test17(void)
1160 {
1161 	struct rte_lpm *lpm = NULL;
1162 	struct rte_lpm_config config;
1163 
1164 	config.max_rules = MAX_RULES;
1165 	config.number_tbl8s = NUMBER_TBL8S;
1166 	config.flags = 0;
1167 	const uint32_t ip_10_32 = IPv4(10, 10, 10, 2);
1168 	const uint32_t ip_10_24 = IPv4(10, 10, 10, 0);
1169 	const uint32_t ip_20_25 = IPv4(10, 10, 20, 2);
1170 	const uint8_t d_ip_10_32 = 32,
1171 			d_ip_10_24 = 24,
1172 			d_ip_20_25 = 25;
1173 	const uint32_t next_hop_ip_10_32 = 100,
1174 			next_hop_ip_10_24 = 105,
1175 			next_hop_ip_20_25 = 111;
1176 	uint32_t next_hop_return = 0;
1177 	int32_t status = 0;
1178 
1179 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1180 	TEST_LPM_ASSERT(lpm != NULL);
1181 
1182 	if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32,
1183 			next_hop_ip_10_32)) < 0)
1184 		return -1;
1185 
1186 	status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1187 	uint32_t test_hop_10_32 = next_hop_return;
1188 	TEST_LPM_ASSERT(status == 0);
1189 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1190 
1191 	if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24,
1192 			next_hop_ip_10_24)) < 0)
1193 			return -1;
1194 
1195 	status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1196 	uint32_t test_hop_10_24 = next_hop_return;
1197 	TEST_LPM_ASSERT(status == 0);
1198 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1199 
1200 	if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25,
1201 			next_hop_ip_20_25)) < 0)
1202 		return -1;
1203 
1204 	status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return);
1205 	uint32_t test_hop_20_25 = next_hop_return;
1206 	TEST_LPM_ASSERT(status == 0);
1207 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25);
1208 
1209 	if (test_hop_10_32 == test_hop_10_24) {
1210 		printf("Next hop return equal\n");
1211 		return -1;
1212 	}
1213 
1214 	if (test_hop_10_24 == test_hop_20_25) {
1215 		printf("Next hop return equal\n");
1216 		return -1;
1217 	}
1218 
1219 	status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return);
1220 	TEST_LPM_ASSERT(status == 0);
1221 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32);
1222 
1223 	status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return);
1224 	TEST_LPM_ASSERT(status == 0);
1225 	TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24);
1226 
1227 	rte_lpm_free(lpm);
1228 
1229 	return PASS;
1230 }
1231 
1232 /*
1233  * Lookup performance test
1234  */
1235 
1236 #define ITERATIONS (1 << 10)
1237 #define BATCH_SIZE (1 << 12)
1238 #define BULK_SIZE 32
1239 
1240 static void
1241 print_route_distribution(const struct route_rule *table, uint32_t n)
1242 {
1243 	unsigned i, j;
1244 
1245 	printf("Route distribution per prefix width: \n");
1246 	printf("DEPTH    QUANTITY (PERCENT)\n");
1247 	printf("--------------------------- \n");
1248 
1249 	/* Count depths. */
1250 	for (i = 1; i <= 32; i++) {
1251 		unsigned depth_counter = 0;
1252 		double percent_hits;
1253 
1254 		for (j = 0; j < n; j++)
1255 			if (table[j].depth == (uint8_t) i)
1256 				depth_counter++;
1257 
1258 		percent_hits = ((double)depth_counter)/((double)n) * 100;
1259 		printf("%.2u%15u (%.2f)\n", i, depth_counter, percent_hits);
1260 	}
1261 	printf("\n");
1262 }
1263 
1264 int32_t
1265 perf_test(void)
1266 {
1267 	struct rte_lpm *lpm = NULL;
1268 	struct rte_lpm_config config;
1269 
1270 	config.max_rules = 1000000;
1271 	config.number_tbl8s = NUMBER_TBL8S;
1272 	config.flags = 0;
1273 	uint64_t begin, total_time, lpm_used_entries = 0;
1274 	unsigned i, j;
1275 	uint32_t next_hop_add = 0xAA, next_hop_return = 0;
1276 	int status = 0;
1277 	uint64_t cache_line_counter = 0;
1278 	int64_t count = 0;
1279 
1280 	rte_srand(rte_rdtsc());
1281 
1282 	printf("No. routes = %u\n", (unsigned) NUM_ROUTE_ENTRIES);
1283 
1284 	print_route_distribution(large_route_table, (uint32_t) NUM_ROUTE_ENTRIES);
1285 
1286 	lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config);
1287 	TEST_LPM_ASSERT(lpm != NULL);
1288 
1289 	/* Measue add. */
1290 	begin = rte_rdtsc();
1291 
1292 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1293 		if (rte_lpm_add(lpm, large_route_table[i].ip,
1294 				large_route_table[i].depth, next_hop_add) == 0)
1295 			status++;
1296 	}
1297 	/* End Timer. */
1298 	total_time = rte_rdtsc() - begin;
1299 
1300 	printf("Unique added entries = %d\n", status);
1301 	/* Obtain add statistics. */
1302 	for (i = 0; i < RTE_LPM_TBL24_NUM_ENTRIES; i++) {
1303 		if (lpm->tbl24[i].valid)
1304 			lpm_used_entries++;
1305 
1306 		if (i % 32 == 0) {
1307 			if ((uint64_t)count < lpm_used_entries) {
1308 				cache_line_counter++;
1309 				count = lpm_used_entries;
1310 			}
1311 		}
1312 	}
1313 
1314 	printf("Used table 24 entries = %u (%g%%)\n",
1315 			(unsigned) lpm_used_entries,
1316 			(lpm_used_entries * 100.0) / RTE_LPM_TBL24_NUM_ENTRIES);
1317 	printf("64 byte Cache entries used = %u (%u bytes)\n",
1318 			(unsigned) cache_line_counter, (unsigned) cache_line_counter * 64);
1319 
1320 	printf("Average LPM Add: %g cycles\n",
1321 			(double)total_time / NUM_ROUTE_ENTRIES);
1322 
1323 	/* Measure single Lookup */
1324 	total_time = 0;
1325 	count = 0;
1326 
1327 	for (i = 0; i < ITERATIONS; i++) {
1328 		static uint32_t ip_batch[BATCH_SIZE];
1329 
1330 		for (j = 0; j < BATCH_SIZE; j++)
1331 			ip_batch[j] = rte_rand();
1332 
1333 		/* Lookup per batch */
1334 		begin = rte_rdtsc();
1335 
1336 		for (j = 0; j < BATCH_SIZE; j++) {
1337 			if (rte_lpm_lookup(lpm, ip_batch[j], &next_hop_return) != 0)
1338 				count++;
1339 		}
1340 
1341 		total_time += rte_rdtsc() - begin;
1342 
1343 	}
1344 	printf("Average LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1345 			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1346 			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1347 
1348 	/* Measure bulk Lookup */
1349 	total_time = 0;
1350 	count = 0;
1351 	for (i = 0; i < ITERATIONS; i++) {
1352 		static uint32_t ip_batch[BATCH_SIZE];
1353 		uint32_t next_hops[BULK_SIZE];
1354 
1355 		/* Create array of random IP addresses */
1356 		for (j = 0; j < BATCH_SIZE; j++)
1357 			ip_batch[j] = rte_rand();
1358 
1359 		/* Lookup per batch */
1360 		begin = rte_rdtsc();
1361 		for (j = 0; j < BATCH_SIZE; j += BULK_SIZE) {
1362 			unsigned k;
1363 			rte_lpm_lookup_bulk(lpm, &ip_batch[j], next_hops, BULK_SIZE);
1364 			for (k = 0; k < BULK_SIZE; k++)
1365 				if (unlikely(!(next_hops[k] & RTE_LPM_LOOKUP_SUCCESS)))
1366 					count++;
1367 		}
1368 
1369 		total_time += rte_rdtsc() - begin;
1370 	}
1371 	printf("BULK LPM Lookup: %.1f cycles (fails = %.1f%%)\n",
1372 			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1373 			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1374 
1375 	/* Measure LookupX4 */
1376 	total_time = 0;
1377 	count = 0;
1378 	for (i = 0; i < ITERATIONS; i++) {
1379 		static uint32_t ip_batch[BATCH_SIZE];
1380 		uint32_t next_hops[4];
1381 
1382 		/* Create array of random IP addresses */
1383 		for (j = 0; j < BATCH_SIZE; j++)
1384 			ip_batch[j] = rte_rand();
1385 
1386 		/* Lookup per batch */
1387 		begin = rte_rdtsc();
1388 		for (j = 0; j < BATCH_SIZE; j += RTE_DIM(next_hops)) {
1389 			unsigned k;
1390 			xmm_t ipx4;
1391 
1392 			ipx4 = vect_loadu_sil128((xmm_t *)(ip_batch + j));
1393 			ipx4 = *(xmm_t *)(ip_batch + j);
1394 			rte_lpm_lookupx4(lpm, ipx4, next_hops, UINT32_MAX);
1395 			for (k = 0; k < RTE_DIM(next_hops); k++)
1396 				if (unlikely(next_hops[k] == UINT32_MAX))
1397 					count++;
1398 		}
1399 
1400 		total_time += rte_rdtsc() - begin;
1401 	}
1402 	printf("LPM LookupX4: %.1f cycles (fails = %.1f%%)\n",
1403 			(double)total_time / ((double)ITERATIONS * BATCH_SIZE),
1404 			(count * 100.0) / (double)(ITERATIONS * BATCH_SIZE));
1405 
1406 	/* Delete */
1407 	status = 0;
1408 	begin = rte_rdtsc();
1409 
1410 	for (i = 0; i < NUM_ROUTE_ENTRIES; i++) {
1411 		/* rte_lpm_delete(lpm, ip, depth) */
1412 		status += rte_lpm_delete(lpm, large_route_table[i].ip,
1413 				large_route_table[i].depth);
1414 	}
1415 
1416 	total_time += rte_rdtsc() - begin;
1417 
1418 	printf("Average LPM Delete: %g cycles\n",
1419 			(double)total_time / NUM_ROUTE_ENTRIES);
1420 
1421 	rte_lpm_delete_all(lpm);
1422 	rte_lpm_free(lpm);
1423 
1424 	return PASS;
1425 }
1426 
1427 /*
1428  * Do all unit and performance tests.
1429  */
1430 
1431 static int
1432 test_lpm(void)
1433 {
1434 	unsigned i;
1435 	int status, global_status = 0;
1436 
1437 	for (i = 0; i < NUM_LPM_TESTS; i++) {
1438 		status = tests[i]();
1439 		if (status < 0) {
1440 			printf("ERROR: LPM Test %s: FAIL\n", RTE_STR(tests[i]));
1441 			global_status = status;
1442 		}
1443 	}
1444 
1445 	return global_status;
1446 }
1447 
1448 static struct test_command lpm_cmd = {
1449 	.command = "lpm_autotest",
1450 	.callback = test_lpm,
1451 };
1452 REGISTER_TEST_COMMAND(lpm_cmd);
1453