1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdint.h> 7 #include <stdlib.h> 8 9 #include <rte_ip.h> 10 #include <rte_lpm.h> 11 12 #include "test.h" 13 #include "test_xmmt_ops.h" 14 15 #define TEST_LPM_ASSERT(cond) do { \ 16 if (!(cond)) { \ 17 printf("Error at line %d: \n", __LINE__); \ 18 return -1; \ 19 } \ 20 } while(0) 21 22 typedef int32_t (*rte_lpm_test)(void); 23 24 static int32_t test0(void); 25 static int32_t test1(void); 26 static int32_t test2(void); 27 static int32_t test3(void); 28 static int32_t test4(void); 29 static int32_t test5(void); 30 static int32_t test6(void); 31 static int32_t test7(void); 32 static int32_t test8(void); 33 static int32_t test9(void); 34 static int32_t test10(void); 35 static int32_t test11(void); 36 static int32_t test12(void); 37 static int32_t test13(void); 38 static int32_t test14(void); 39 static int32_t test15(void); 40 static int32_t test16(void); 41 static int32_t test17(void); 42 static int32_t test18(void); 43 44 rte_lpm_test tests[] = { 45 /* Test Cases */ 46 test0, 47 test1, 48 test2, 49 test3, 50 test4, 51 test5, 52 test6, 53 test7, 54 test8, 55 test9, 56 test10, 57 test11, 58 test12, 59 test13, 60 test14, 61 test15, 62 test16, 63 test17, 64 test18 65 }; 66 67 #define NUM_LPM_TESTS (sizeof(tests)/sizeof(tests[0])) 68 #define MAX_DEPTH 32 69 #define MAX_RULES 256 70 #define NUMBER_TBL8S 256 71 #define PASS 0 72 73 /* 74 * Check that rte_lpm_create fails gracefully for incorrect user input 75 * arguments 76 */ 77 int32_t 78 test0(void) 79 { 80 struct rte_lpm *lpm = NULL; 81 struct rte_lpm_config config; 82 83 config.max_rules = MAX_RULES; 84 config.number_tbl8s = NUMBER_TBL8S; 85 config.flags = 0; 86 87 /* rte_lpm_create: lpm name == NULL */ 88 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config); 89 TEST_LPM_ASSERT(lpm == NULL); 90 91 /* rte_lpm_create: max_rules = 0 */ 92 /* Note: __func__ inserts the function name, in this case "test0". */ 93 config.max_rules = 0; 94 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 95 TEST_LPM_ASSERT(lpm == NULL); 96 97 /* socket_id < -1 is invalid */ 98 config.max_rules = MAX_RULES; 99 lpm = rte_lpm_create(__func__, -2, &config); 100 TEST_LPM_ASSERT(lpm == NULL); 101 102 return PASS; 103 } 104 105 /* 106 * Create lpm table then delete lpm table 100 times 107 * Use a slightly different rules size each time 108 * */ 109 int32_t 110 test1(void) 111 { 112 struct rte_lpm *lpm = NULL; 113 struct rte_lpm_config config; 114 115 config.number_tbl8s = NUMBER_TBL8S; 116 config.flags = 0; 117 int32_t i; 118 119 /* rte_lpm_free: Free NULL */ 120 for (i = 0; i < 100; i++) { 121 config.max_rules = MAX_RULES - i; 122 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 123 TEST_LPM_ASSERT(lpm != NULL); 124 125 rte_lpm_free(lpm); 126 } 127 128 /* Can not test free so return success */ 129 return PASS; 130 } 131 132 /* 133 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and 134 * therefore it is impossible to check for failure but this test is added to 135 * increase function coverage metrics and to validate that freeing null does 136 * not crash. 137 */ 138 int32_t 139 test2(void) 140 { 141 struct rte_lpm *lpm = NULL; 142 struct rte_lpm_config config; 143 144 config.max_rules = MAX_RULES; 145 config.number_tbl8s = NUMBER_TBL8S; 146 config.flags = 0; 147 148 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 149 TEST_LPM_ASSERT(lpm != NULL); 150 151 rte_lpm_free(lpm); 152 rte_lpm_free(NULL); 153 return PASS; 154 } 155 156 /* 157 * Check that rte_lpm_add fails gracefully for incorrect user input arguments 158 */ 159 int32_t 160 test3(void) 161 { 162 struct rte_lpm *lpm = NULL; 163 struct rte_lpm_config config; 164 165 config.max_rules = MAX_RULES; 166 config.number_tbl8s = NUMBER_TBL8S; 167 config.flags = 0; 168 uint32_t ip = RTE_IPv4(0, 0, 0, 0), next_hop = 100; 169 uint8_t depth = 24; 170 int32_t status = 0; 171 172 /* rte_lpm_add: lpm == NULL */ 173 status = rte_lpm_add(NULL, ip, depth, next_hop); 174 TEST_LPM_ASSERT(status < 0); 175 176 /*Create vaild lpm to use in rest of test. */ 177 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 178 TEST_LPM_ASSERT(lpm != NULL); 179 180 /* rte_lpm_add: depth < 1 */ 181 status = rte_lpm_add(lpm, ip, 0, next_hop); 182 TEST_LPM_ASSERT(status < 0); 183 184 /* rte_lpm_add: depth > MAX_DEPTH */ 185 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop); 186 TEST_LPM_ASSERT(status < 0); 187 188 rte_lpm_free(lpm); 189 190 return PASS; 191 } 192 193 /* 194 * Check that rte_lpm_delete fails gracefully for incorrect user input 195 * arguments 196 */ 197 int32_t 198 test4(void) 199 { 200 struct rte_lpm *lpm = NULL; 201 struct rte_lpm_config config; 202 203 config.max_rules = MAX_RULES; 204 config.number_tbl8s = NUMBER_TBL8S; 205 config.flags = 0; 206 uint32_t ip = RTE_IPv4(0, 0, 0, 0); 207 uint8_t depth = 24; 208 int32_t status = 0; 209 210 /* rte_lpm_delete: lpm == NULL */ 211 status = rte_lpm_delete(NULL, ip, depth); 212 TEST_LPM_ASSERT(status < 0); 213 214 /*Create vaild lpm to use in rest of test. */ 215 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 216 TEST_LPM_ASSERT(lpm != NULL); 217 218 /* rte_lpm_delete: depth < 1 */ 219 status = rte_lpm_delete(lpm, ip, 0); 220 TEST_LPM_ASSERT(status < 0); 221 222 /* rte_lpm_delete: depth > MAX_DEPTH */ 223 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1)); 224 TEST_LPM_ASSERT(status < 0); 225 226 rte_lpm_free(lpm); 227 228 return PASS; 229 } 230 231 /* 232 * Check that rte_lpm_lookup fails gracefully for incorrect user input 233 * arguments 234 */ 235 int32_t 236 test5(void) 237 { 238 #if defined(RTE_LIBRTE_LPM_DEBUG) 239 struct rte_lpm *lpm = NULL; 240 struct rte_lpm_config config; 241 242 config.max_rules = MAX_RULES; 243 config.number_tbl8s = NUMBER_TBL8S; 244 config.flags = 0; 245 uint32_t ip = RTE_IPv4(0, 0, 0, 0), next_hop_return = 0; 246 int32_t status = 0; 247 248 /* rte_lpm_lookup: lpm == NULL */ 249 status = rte_lpm_lookup(NULL, ip, &next_hop_return); 250 TEST_LPM_ASSERT(status < 0); 251 252 /*Create vaild lpm to use in rest of test. */ 253 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 254 TEST_LPM_ASSERT(lpm != NULL); 255 256 /* rte_lpm_lookup: depth < 1 */ 257 status = rte_lpm_lookup(lpm, ip, NULL); 258 TEST_LPM_ASSERT(status < 0); 259 260 rte_lpm_free(lpm); 261 #endif 262 return PASS; 263 } 264 265 266 267 /* 268 * Call add, lookup and delete for a single rule with depth <= 24 269 */ 270 int32_t 271 test6(void) 272 { 273 struct rte_lpm *lpm = NULL; 274 struct rte_lpm_config config; 275 276 config.max_rules = MAX_RULES; 277 config.number_tbl8s = NUMBER_TBL8S; 278 config.flags = 0; 279 uint32_t ip = RTE_IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0; 280 uint8_t depth = 24; 281 int32_t status = 0; 282 283 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 284 TEST_LPM_ASSERT(lpm != NULL); 285 286 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 287 TEST_LPM_ASSERT(status == 0); 288 289 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 290 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 291 292 status = rte_lpm_delete(lpm, ip, depth); 293 TEST_LPM_ASSERT(status == 0); 294 295 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 296 TEST_LPM_ASSERT(status == -ENOENT); 297 298 rte_lpm_free(lpm); 299 300 return PASS; 301 } 302 303 /* 304 * Call add, lookup and delete for a single rule with depth > 24 305 */ 306 307 int32_t 308 test7(void) 309 { 310 xmm_t ipx4; 311 uint32_t hop[4]; 312 struct rte_lpm *lpm = NULL; 313 struct rte_lpm_config config; 314 315 config.max_rules = MAX_RULES; 316 config.number_tbl8s = NUMBER_TBL8S; 317 config.flags = 0; 318 uint32_t ip = RTE_IPv4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0; 319 uint8_t depth = 32; 320 int32_t status = 0; 321 322 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 323 TEST_LPM_ASSERT(lpm != NULL); 324 325 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 326 TEST_LPM_ASSERT(status == 0); 327 328 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 329 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 330 331 ipx4 = vect_set_epi32(ip, ip + 0x100, ip - 0x100, ip); 332 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX); 333 TEST_LPM_ASSERT(hop[0] == next_hop_add); 334 TEST_LPM_ASSERT(hop[1] == UINT32_MAX); 335 TEST_LPM_ASSERT(hop[2] == UINT32_MAX); 336 TEST_LPM_ASSERT(hop[3] == next_hop_add); 337 338 status = rte_lpm_delete(lpm, ip, depth); 339 TEST_LPM_ASSERT(status == 0); 340 341 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 342 TEST_LPM_ASSERT(status == -ENOENT); 343 344 rte_lpm_free(lpm); 345 346 return PASS; 347 } 348 349 /* 350 * Use rte_lpm_add to add rules which effect only the second half of the lpm 351 * table. Use all possible depths ranging from 1..32. Set the next hop = to the 352 * depth. Check lookup hit for on every add and check for lookup miss on the 353 * first half of the lpm table after each add. Finally delete all rules going 354 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each 355 * delete. The lookup should return the next_hop_add value related to the 356 * previous depth value (i.e. depth -1). 357 */ 358 int32_t 359 test8(void) 360 { 361 xmm_t ipx4; 362 uint32_t hop[4]; 363 struct rte_lpm *lpm = NULL; 364 struct rte_lpm_config config; 365 366 config.max_rules = MAX_RULES; 367 config.number_tbl8s = NUMBER_TBL8S; 368 config.flags = 0; 369 uint32_t ip1 = RTE_IPv4(127, 255, 255, 255), ip2 = RTE_IPv4(128, 0, 0, 0); 370 uint32_t next_hop_add, next_hop_return; 371 uint8_t depth; 372 int32_t status = 0; 373 374 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 375 TEST_LPM_ASSERT(lpm != NULL); 376 377 /* Loop with rte_lpm_add. */ 378 for (depth = 1; depth <= 32; depth++) { 379 /* Let the next_hop_add value = depth. Just for change. */ 380 next_hop_add = depth; 381 382 status = rte_lpm_add(lpm, ip2, depth, next_hop_add); 383 TEST_LPM_ASSERT(status == 0); 384 385 /* Check IP in first half of tbl24 which should be empty. */ 386 status = rte_lpm_lookup(lpm, ip1, &next_hop_return); 387 TEST_LPM_ASSERT(status == -ENOENT); 388 389 status = rte_lpm_lookup(lpm, ip2, &next_hop_return); 390 TEST_LPM_ASSERT((status == 0) && 391 (next_hop_return == next_hop_add)); 392 393 ipx4 = vect_set_epi32(ip2, ip1, ip2, ip1); 394 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX); 395 TEST_LPM_ASSERT(hop[0] == UINT32_MAX); 396 TEST_LPM_ASSERT(hop[1] == next_hop_add); 397 TEST_LPM_ASSERT(hop[2] == UINT32_MAX); 398 TEST_LPM_ASSERT(hop[3] == next_hop_add); 399 } 400 401 /* Loop with rte_lpm_delete. */ 402 for (depth = 32; depth >= 1; depth--) { 403 next_hop_add = (uint8_t) (depth - 1); 404 405 status = rte_lpm_delete(lpm, ip2, depth); 406 TEST_LPM_ASSERT(status == 0); 407 408 status = rte_lpm_lookup(lpm, ip2, &next_hop_return); 409 410 if (depth != 1) { 411 TEST_LPM_ASSERT((status == 0) && 412 (next_hop_return == next_hop_add)); 413 } else { 414 TEST_LPM_ASSERT(status == -ENOENT); 415 } 416 417 status = rte_lpm_lookup(lpm, ip1, &next_hop_return); 418 TEST_LPM_ASSERT(status == -ENOENT); 419 420 ipx4 = vect_set_epi32(ip1, ip1, ip2, ip2); 421 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX); 422 if (depth != 1) { 423 TEST_LPM_ASSERT(hop[0] == next_hop_add); 424 TEST_LPM_ASSERT(hop[1] == next_hop_add); 425 } else { 426 TEST_LPM_ASSERT(hop[0] == UINT32_MAX); 427 TEST_LPM_ASSERT(hop[1] == UINT32_MAX); 428 } 429 TEST_LPM_ASSERT(hop[2] == UINT32_MAX); 430 TEST_LPM_ASSERT(hop[3] == UINT32_MAX); 431 } 432 433 rte_lpm_free(lpm); 434 435 return PASS; 436 } 437 438 /* 439 * - Add & lookup to hit invalid TBL24 entry 440 * - Add & lookup to hit valid TBL24 entry not extended 441 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry 442 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry 443 * 444 */ 445 int32_t 446 test9(void) 447 { 448 struct rte_lpm *lpm = NULL; 449 struct rte_lpm_config config; 450 451 config.max_rules = MAX_RULES; 452 config.number_tbl8s = NUMBER_TBL8S; 453 config.flags = 0; 454 uint32_t ip, ip_1, ip_2; 455 uint8_t depth, depth_1, depth_2; 456 uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return; 457 int32_t status = 0; 458 459 /* Add & lookup to hit invalid TBL24 entry */ 460 ip = RTE_IPv4(128, 0, 0, 0); 461 depth = 24; 462 next_hop_add = 100; 463 464 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 465 TEST_LPM_ASSERT(lpm != NULL); 466 467 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 468 TEST_LPM_ASSERT(status == 0); 469 470 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 471 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 472 473 status = rte_lpm_delete(lpm, ip, depth); 474 TEST_LPM_ASSERT(status == 0); 475 476 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 477 TEST_LPM_ASSERT(status == -ENOENT); 478 479 rte_lpm_delete_all(lpm); 480 481 /* Add & lookup to hit valid TBL24 entry not extended */ 482 ip = RTE_IPv4(128, 0, 0, 0); 483 depth = 23; 484 next_hop_add = 100; 485 486 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 487 TEST_LPM_ASSERT(status == 0); 488 489 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 490 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 491 492 depth = 24; 493 next_hop_add = 101; 494 495 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 496 TEST_LPM_ASSERT(status == 0); 497 498 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 499 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 500 501 depth = 24; 502 503 status = rte_lpm_delete(lpm, ip, depth); 504 TEST_LPM_ASSERT(status == 0); 505 506 depth = 23; 507 508 status = rte_lpm_delete(lpm, ip, depth); 509 TEST_LPM_ASSERT(status == 0); 510 511 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 512 TEST_LPM_ASSERT(status == -ENOENT); 513 514 rte_lpm_delete_all(lpm); 515 516 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8 517 * entry */ 518 ip = RTE_IPv4(128, 0, 0, 0); 519 depth = 32; 520 next_hop_add = 100; 521 522 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 523 TEST_LPM_ASSERT(status == 0); 524 525 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 526 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 527 528 ip = RTE_IPv4(128, 0, 0, 5); 529 depth = 32; 530 next_hop_add = 101; 531 532 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 533 TEST_LPM_ASSERT(status == 0); 534 535 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 536 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 537 538 status = rte_lpm_delete(lpm, ip, depth); 539 TEST_LPM_ASSERT(status == 0); 540 541 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 542 TEST_LPM_ASSERT(status == -ENOENT); 543 544 ip = RTE_IPv4(128, 0, 0, 0); 545 depth = 32; 546 next_hop_add = 100; 547 548 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 549 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 550 551 status = rte_lpm_delete(lpm, ip, depth); 552 TEST_LPM_ASSERT(status == 0); 553 554 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 555 TEST_LPM_ASSERT(status == -ENOENT); 556 557 rte_lpm_delete_all(lpm); 558 559 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8 560 * entry */ 561 ip_1 = RTE_IPv4(128, 0, 0, 0); 562 depth_1 = 25; 563 next_hop_add_1 = 101; 564 565 ip_2 = RTE_IPv4(128, 0, 0, 5); 566 depth_2 = 32; 567 next_hop_add_2 = 102; 568 569 next_hop_return = 0; 570 571 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1); 572 TEST_LPM_ASSERT(status == 0); 573 574 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return); 575 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); 576 577 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2); 578 TEST_LPM_ASSERT(status == 0); 579 580 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return); 581 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2)); 582 583 status = rte_lpm_delete(lpm, ip_2, depth_2); 584 TEST_LPM_ASSERT(status == 0); 585 586 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return); 587 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); 588 589 status = rte_lpm_delete(lpm, ip_1, depth_1); 590 TEST_LPM_ASSERT(status == 0); 591 592 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return); 593 TEST_LPM_ASSERT(status == -ENOENT); 594 595 rte_lpm_free(lpm); 596 597 return PASS; 598 } 599 600 601 /* 602 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete & 603 * lookup) 604 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup) 605 * - Add rule that extends a TBL24 valid entry & lookup for both rules (& 606 * delete & lookup) 607 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup) 608 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup) 609 * - Delete a rule that is not present in the TBL24 & lookup 610 * - Delete a rule that is not present in the TBL8 & lookup 611 * 612 */ 613 int32_t 614 test10(void) 615 { 616 617 struct rte_lpm *lpm = NULL; 618 struct rte_lpm_config config; 619 620 config.max_rules = MAX_RULES; 621 config.number_tbl8s = NUMBER_TBL8S; 622 config.flags = 0; 623 uint32_t ip, next_hop_add, next_hop_return; 624 uint8_t depth; 625 int32_t status = 0; 626 627 /* Add rule that covers a TBL24 range previously invalid & lookup 628 * (& delete & lookup) */ 629 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 630 TEST_LPM_ASSERT(lpm != NULL); 631 632 ip = RTE_IPv4(128, 0, 0, 0); 633 depth = 16; 634 next_hop_add = 100; 635 636 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 637 TEST_LPM_ASSERT(status == 0); 638 639 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 640 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 641 642 status = rte_lpm_delete(lpm, ip, depth); 643 TEST_LPM_ASSERT(status == 0); 644 645 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 646 TEST_LPM_ASSERT(status == -ENOENT); 647 648 rte_lpm_delete_all(lpm); 649 650 ip = RTE_IPv4(128, 0, 0, 0); 651 depth = 25; 652 next_hop_add = 100; 653 654 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 655 TEST_LPM_ASSERT(status == 0); 656 657 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 658 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 659 660 status = rte_lpm_delete(lpm, ip, depth); 661 TEST_LPM_ASSERT(status == 0); 662 663 rte_lpm_delete_all(lpm); 664 665 /* Add rule that extends a TBL24 valid entry & lookup for both rules 666 * (& delete & lookup) */ 667 668 ip = RTE_IPv4(128, 0, 0, 0); 669 depth = 24; 670 next_hop_add = 100; 671 672 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 673 TEST_LPM_ASSERT(status == 0); 674 675 ip = RTE_IPv4(128, 0, 0, 10); 676 depth = 32; 677 next_hop_add = 101; 678 679 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 680 TEST_LPM_ASSERT(status == 0); 681 682 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 683 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 684 685 ip = RTE_IPv4(128, 0, 0, 0); 686 next_hop_add = 100; 687 688 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 689 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 690 691 ip = RTE_IPv4(128, 0, 0, 0); 692 depth = 24; 693 694 status = rte_lpm_delete(lpm, ip, depth); 695 TEST_LPM_ASSERT(status == 0); 696 697 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 698 TEST_LPM_ASSERT(status == -ENOENT); 699 700 ip = RTE_IPv4(128, 0, 0, 10); 701 depth = 32; 702 703 status = rte_lpm_delete(lpm, ip, depth); 704 TEST_LPM_ASSERT(status == 0); 705 706 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 707 TEST_LPM_ASSERT(status == -ENOENT); 708 709 rte_lpm_delete_all(lpm); 710 711 /* Add rule that updates the next hop in TBL24 & lookup 712 * (& delete & lookup) */ 713 714 ip = RTE_IPv4(128, 0, 0, 0); 715 depth = 24; 716 next_hop_add = 100; 717 718 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 719 TEST_LPM_ASSERT(status == 0); 720 721 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 722 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 723 724 next_hop_add = 101; 725 726 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 727 TEST_LPM_ASSERT(status == 0); 728 729 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 730 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 731 732 status = rte_lpm_delete(lpm, ip, depth); 733 TEST_LPM_ASSERT(status == 0); 734 735 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 736 TEST_LPM_ASSERT(status == -ENOENT); 737 738 rte_lpm_delete_all(lpm); 739 740 /* Add rule that updates the next hop in TBL8 & lookup 741 * (& delete & lookup) */ 742 743 ip = RTE_IPv4(128, 0, 0, 0); 744 depth = 32; 745 next_hop_add = 100; 746 747 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 748 TEST_LPM_ASSERT(status == 0); 749 750 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 751 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 752 753 next_hop_add = 101; 754 755 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 756 TEST_LPM_ASSERT(status == 0); 757 758 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 759 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 760 761 status = rte_lpm_delete(lpm, ip, depth); 762 TEST_LPM_ASSERT(status == 0); 763 764 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 765 TEST_LPM_ASSERT(status == -ENOENT); 766 767 rte_lpm_delete_all(lpm); 768 769 /* Delete a rule that is not present in the TBL24 & lookup */ 770 771 ip = RTE_IPv4(128, 0, 0, 0); 772 depth = 24; 773 774 status = rte_lpm_delete(lpm, ip, depth); 775 TEST_LPM_ASSERT(status < 0); 776 777 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 778 TEST_LPM_ASSERT(status == -ENOENT); 779 780 rte_lpm_delete_all(lpm); 781 782 /* Delete a rule that is not present in the TBL8 & lookup */ 783 784 ip = RTE_IPv4(128, 0, 0, 0); 785 depth = 32; 786 787 status = rte_lpm_delete(lpm, ip, depth); 788 TEST_LPM_ASSERT(status < 0); 789 790 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 791 TEST_LPM_ASSERT(status == -ENOENT); 792 793 rte_lpm_free(lpm); 794 795 return PASS; 796 } 797 798 /* 799 * Add two rules, lookup to hit the more specific one, lookup to hit the less 800 * specific one delete the less specific rule and lookup previous values again; 801 * add a more specific rule than the existing rule, lookup again 802 * 803 * */ 804 int32_t 805 test11(void) 806 { 807 808 struct rte_lpm *lpm = NULL; 809 struct rte_lpm_config config; 810 811 config.max_rules = MAX_RULES; 812 config.number_tbl8s = NUMBER_TBL8S; 813 config.flags = 0; 814 uint32_t ip, next_hop_add, next_hop_return; 815 uint8_t depth; 816 int32_t status = 0; 817 818 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 819 TEST_LPM_ASSERT(lpm != NULL); 820 821 ip = RTE_IPv4(128, 0, 0, 0); 822 depth = 24; 823 next_hop_add = 100; 824 825 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 826 TEST_LPM_ASSERT(status == 0); 827 828 ip = RTE_IPv4(128, 0, 0, 10); 829 depth = 32; 830 next_hop_add = 101; 831 832 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 833 TEST_LPM_ASSERT(status == 0); 834 835 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 836 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 837 838 ip = RTE_IPv4(128, 0, 0, 0); 839 next_hop_add = 100; 840 841 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 842 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 843 844 ip = RTE_IPv4(128, 0, 0, 0); 845 depth = 24; 846 847 status = rte_lpm_delete(lpm, ip, depth); 848 TEST_LPM_ASSERT(status == 0); 849 850 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 851 TEST_LPM_ASSERT(status == -ENOENT); 852 853 ip = RTE_IPv4(128, 0, 0, 10); 854 depth = 32; 855 856 status = rte_lpm_delete(lpm, ip, depth); 857 TEST_LPM_ASSERT(status == 0); 858 859 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 860 TEST_LPM_ASSERT(status == -ENOENT); 861 862 rte_lpm_free(lpm); 863 864 return PASS; 865 } 866 867 /* 868 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete, 869 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension 870 * and contraction. 871 * 872 * */ 873 874 int32_t 875 test12(void) 876 { 877 xmm_t ipx4; 878 uint32_t hop[4]; 879 struct rte_lpm *lpm = NULL; 880 struct rte_lpm_config config; 881 882 config.max_rules = MAX_RULES; 883 config.number_tbl8s = NUMBER_TBL8S; 884 config.flags = 0; 885 uint32_t ip, i, next_hop_add, next_hop_return; 886 uint8_t depth; 887 int32_t status = 0; 888 889 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 890 TEST_LPM_ASSERT(lpm != NULL); 891 892 ip = RTE_IPv4(128, 0, 0, 0); 893 depth = 32; 894 next_hop_add = 100; 895 896 for (i = 0; i < 1000; i++) { 897 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 898 TEST_LPM_ASSERT(status == 0); 899 900 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 901 TEST_LPM_ASSERT((status == 0) && 902 (next_hop_return == next_hop_add)); 903 904 ipx4 = vect_set_epi32(ip, ip + 1, ip, ip - 1); 905 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX); 906 TEST_LPM_ASSERT(hop[0] == UINT32_MAX); 907 TEST_LPM_ASSERT(hop[1] == next_hop_add); 908 TEST_LPM_ASSERT(hop[2] == UINT32_MAX); 909 TEST_LPM_ASSERT(hop[3] == next_hop_add); 910 911 status = rte_lpm_delete(lpm, ip, depth); 912 TEST_LPM_ASSERT(status == 0); 913 914 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 915 TEST_LPM_ASSERT(status == -ENOENT); 916 } 917 918 rte_lpm_free(lpm); 919 920 return PASS; 921 } 922 923 /* 924 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this 925 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension, 926 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8 927 * extension and contraction. 928 * 929 * */ 930 931 int32_t 932 test13(void) 933 { 934 struct rte_lpm *lpm = NULL; 935 struct rte_lpm_config config; 936 937 config.max_rules = MAX_RULES; 938 config.number_tbl8s = NUMBER_TBL8S; 939 config.flags = 0; 940 uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return; 941 uint8_t depth; 942 int32_t status = 0; 943 944 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 945 TEST_LPM_ASSERT(lpm != NULL); 946 947 ip = RTE_IPv4(128, 0, 0, 0); 948 depth = 24; 949 next_hop_add_1 = 100; 950 951 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1); 952 TEST_LPM_ASSERT(status == 0); 953 954 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 955 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); 956 957 depth = 32; 958 next_hop_add_2 = 101; 959 960 for (i = 0; i < 1000; i++) { 961 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2); 962 TEST_LPM_ASSERT(status == 0); 963 964 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 965 TEST_LPM_ASSERT((status == 0) && 966 (next_hop_return == next_hop_add_2)); 967 968 status = rte_lpm_delete(lpm, ip, depth); 969 TEST_LPM_ASSERT(status == 0); 970 971 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 972 TEST_LPM_ASSERT((status == 0) && 973 (next_hop_return == next_hop_add_1)); 974 } 975 976 depth = 24; 977 978 status = rte_lpm_delete(lpm, ip, depth); 979 TEST_LPM_ASSERT(status == 0); 980 981 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 982 TEST_LPM_ASSERT(status == -ENOENT); 983 984 rte_lpm_free(lpm); 985 986 return PASS; 987 } 988 989 /* 990 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension. 991 * No more tbl8 extensions will be allowed. Now add one more rule that required 992 * a tbl8 extension and get fail. 993 * */ 994 int32_t 995 test14(void) 996 { 997 998 /* We only use depth = 32 in the loop below so we must make sure 999 * that we have enough storage for all rules at that depth*/ 1000 1001 struct rte_lpm *lpm = NULL; 1002 struct rte_lpm_config config; 1003 1004 config.max_rules = 256 * 32; 1005 config.number_tbl8s = NUMBER_TBL8S; 1006 config.flags = 0; 1007 uint32_t ip, next_hop_add, next_hop_return; 1008 uint8_t depth; 1009 int32_t status = 0; 1010 1011 /* Add enough space for 256 rules for every depth */ 1012 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 1013 TEST_LPM_ASSERT(lpm != NULL); 1014 1015 depth = 32; 1016 next_hop_add = 100; 1017 ip = RTE_IPv4(0, 0, 0, 0); 1018 1019 /* Add 256 rules that require a tbl8 extension */ 1020 for (; ip <= RTE_IPv4(0, 0, 255, 0); ip += 256) { 1021 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 1022 TEST_LPM_ASSERT(status == 0); 1023 1024 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 1025 TEST_LPM_ASSERT((status == 0) && 1026 (next_hop_return == next_hop_add)); 1027 } 1028 1029 /* All tbl8 extensions have been used above. Try to add one more and 1030 * we get a fail */ 1031 ip = RTE_IPv4(1, 0, 0, 0); 1032 depth = 32; 1033 1034 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 1035 TEST_LPM_ASSERT(status < 0); 1036 1037 rte_lpm_free(lpm); 1038 1039 return PASS; 1040 } 1041 1042 /* 1043 * Sequence of operations for find existing lpm table 1044 * 1045 * - create table 1046 * - find existing table: hit 1047 * - find non-existing table: miss 1048 * 1049 */ 1050 int32_t 1051 test15(void) 1052 { 1053 struct rte_lpm *lpm = NULL, *result = NULL; 1054 struct rte_lpm_config config; 1055 1056 config.max_rules = 256 * 32; 1057 config.number_tbl8s = NUMBER_TBL8S; 1058 config.flags = 0; 1059 1060 /* Create lpm */ 1061 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config); 1062 TEST_LPM_ASSERT(lpm != NULL); 1063 1064 /* Try to find existing lpm */ 1065 result = rte_lpm_find_existing("lpm_find_existing"); 1066 TEST_LPM_ASSERT(result == lpm); 1067 1068 /* Try to find non-existing lpm */ 1069 result = rte_lpm_find_existing("lpm_find_non_existing"); 1070 TEST_LPM_ASSERT(result == NULL); 1071 1072 /* Cleanup. */ 1073 rte_lpm_delete_all(lpm); 1074 rte_lpm_free(lpm); 1075 1076 return PASS; 1077 } 1078 1079 /* 1080 * test failure condition of overloading the tbl8 so no more will fit 1081 * Check we get an error return value in that case 1082 */ 1083 int32_t 1084 test16(void) 1085 { 1086 uint32_t ip; 1087 struct rte_lpm_config config; 1088 1089 config.max_rules = 256 * 32; 1090 config.number_tbl8s = NUMBER_TBL8S; 1091 config.flags = 0; 1092 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 1093 1094 /* ip loops through all possibilities for top 24 bits of address */ 1095 for (ip = 0; ip < 0xFFFFFF; ip++) { 1096 /* add an entry within a different tbl8 each time, since 1097 * depth >24 and the top 24 bits are different */ 1098 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0) 1099 break; 1100 } 1101 1102 if (ip != NUMBER_TBL8S) { 1103 printf("Error, unexpected failure with filling tbl8 groups\n"); 1104 printf("Failed after %u additions, expected after %u\n", 1105 (unsigned)ip, (unsigned)NUMBER_TBL8S); 1106 } 1107 1108 rte_lpm_free(lpm); 1109 return 0; 1110 } 1111 1112 /* 1113 * Test for overwriting of tbl8: 1114 * - add rule /32 and lookup 1115 * - add new rule /24 and lookup 1116 * - add third rule /25 and lookup 1117 * - lookup /32 and /24 rule to ensure the table has not been overwritten. 1118 */ 1119 int32_t 1120 test17(void) 1121 { 1122 struct rte_lpm *lpm = NULL; 1123 struct rte_lpm_config config; 1124 1125 config.max_rules = MAX_RULES; 1126 config.number_tbl8s = NUMBER_TBL8S; 1127 config.flags = 0; 1128 const uint32_t ip_10_32 = RTE_IPv4(10, 10, 10, 2); 1129 const uint32_t ip_10_24 = RTE_IPv4(10, 10, 10, 0); 1130 const uint32_t ip_20_25 = RTE_IPv4(10, 10, 20, 2); 1131 const uint8_t d_ip_10_32 = 32, 1132 d_ip_10_24 = 24, 1133 d_ip_20_25 = 25; 1134 const uint32_t next_hop_ip_10_32 = 100, 1135 next_hop_ip_10_24 = 105, 1136 next_hop_ip_20_25 = 111; 1137 uint32_t next_hop_return = 0; 1138 int32_t status = 0; 1139 1140 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 1141 TEST_LPM_ASSERT(lpm != NULL); 1142 1143 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, 1144 next_hop_ip_10_32)) < 0) 1145 return -1; 1146 1147 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); 1148 uint32_t test_hop_10_32 = next_hop_return; 1149 TEST_LPM_ASSERT(status == 0); 1150 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); 1151 1152 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, 1153 next_hop_ip_10_24)) < 0) 1154 return -1; 1155 1156 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); 1157 uint32_t test_hop_10_24 = next_hop_return; 1158 TEST_LPM_ASSERT(status == 0); 1159 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); 1160 1161 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, 1162 next_hop_ip_20_25)) < 0) 1163 return -1; 1164 1165 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return); 1166 uint32_t test_hop_20_25 = next_hop_return; 1167 TEST_LPM_ASSERT(status == 0); 1168 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25); 1169 1170 if (test_hop_10_32 == test_hop_10_24) { 1171 printf("Next hop return equal\n"); 1172 return -1; 1173 } 1174 1175 if (test_hop_10_24 == test_hop_20_25) { 1176 printf("Next hop return equal\n"); 1177 return -1; 1178 } 1179 1180 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); 1181 TEST_LPM_ASSERT(status == 0); 1182 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); 1183 1184 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); 1185 TEST_LPM_ASSERT(status == 0); 1186 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); 1187 1188 rte_lpm_free(lpm); 1189 1190 return PASS; 1191 } 1192 1193 /* 1194 * Test for recycle of tbl8 1195 * - step 1: add a rule with depth=28 (> 24) 1196 * - step 2: add a rule with same 24-bit prefix and depth=23 (< 24) 1197 * - step 3: delete the first rule 1198 * - step 4: check tbl8 is freed 1199 * - step 5: add a rule same as the first one (depth=28) 1200 * - step 6: check same tbl8 is allocated 1201 * - step 7: add a rule with same 24-bit prefix and depth=24 1202 * - step 8: delete the rule (depth=28) added in step 5 1203 * - step 9: check tbl8 is freed 1204 * - step 10: add a rule with same 24-bit prefix and depth = 28 1205 * - setp 11: check same tbl8 is allocated again 1206 */ 1207 int32_t 1208 test18(void) 1209 { 1210 #define group_idx next_hop 1211 struct rte_lpm *lpm = NULL; 1212 struct rte_lpm_config config; 1213 uint32_t ip, next_hop; 1214 uint8_t depth; 1215 uint32_t tbl8_group_index; 1216 1217 config.max_rules = MAX_RULES; 1218 config.number_tbl8s = NUMBER_TBL8S; 1219 config.flags = 0; 1220 1221 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 1222 TEST_LPM_ASSERT(lpm != NULL); 1223 1224 ip = RTE_IPv4(192, 168, 100, 100); 1225 depth = 28; 1226 next_hop = 1; 1227 rte_lpm_add(lpm, ip, depth, next_hop); 1228 1229 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1230 tbl8_group_index = lpm->tbl24[ip>>8].group_idx; 1231 1232 depth = 23; 1233 next_hop = 2; 1234 rte_lpm_add(lpm, ip, depth, next_hop); 1235 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1236 1237 depth = 28; 1238 rte_lpm_delete(lpm, ip, depth); 1239 1240 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group); 1241 1242 next_hop = 3; 1243 rte_lpm_add(lpm, ip, depth, next_hop); 1244 1245 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1246 TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx); 1247 1248 depth = 24; 1249 next_hop = 4; 1250 rte_lpm_add(lpm, ip, depth, next_hop); 1251 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1252 1253 depth = 28; 1254 rte_lpm_delete(lpm, ip, depth); 1255 1256 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group); 1257 1258 next_hop = 5; 1259 rte_lpm_add(lpm, ip, depth, next_hop); 1260 1261 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1262 TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx); 1263 1264 rte_lpm_free(lpm); 1265 #undef group_idx 1266 return PASS; 1267 } 1268 1269 /* 1270 * Do all unit tests. 1271 */ 1272 1273 static int 1274 test_lpm(void) 1275 { 1276 unsigned i; 1277 int status, global_status = 0; 1278 1279 for (i = 0; i < NUM_LPM_TESTS; i++) { 1280 status = tests[i](); 1281 if (status < 0) { 1282 printf("ERROR: LPM Test %u: FAIL\n", i); 1283 global_status = status; 1284 } 1285 } 1286 1287 return global_status; 1288 } 1289 1290 REGISTER_TEST_COMMAND(lpm_autotest, test_lpm); 1291