1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <stdint.h> 7 #include <stdlib.h> 8 9 #include <rte_ip.h> 10 #include <rte_lpm.h> 11 12 #include "test.h" 13 #include "test_xmmt_ops.h" 14 15 #define TEST_LPM_ASSERT(cond) do { \ 16 if (!(cond)) { \ 17 printf("Error at line %d: \n", __LINE__); \ 18 return -1; \ 19 } \ 20 } while(0) 21 22 typedef int32_t (*rte_lpm_test)(void); 23 24 static int32_t test0(void); 25 static int32_t test1(void); 26 static int32_t test2(void); 27 static int32_t test3(void); 28 static int32_t test4(void); 29 static int32_t test5(void); 30 static int32_t test6(void); 31 static int32_t test7(void); 32 static int32_t test8(void); 33 static int32_t test9(void); 34 static int32_t test10(void); 35 static int32_t test11(void); 36 static int32_t test12(void); 37 static int32_t test13(void); 38 static int32_t test14(void); 39 static int32_t test15(void); 40 static int32_t test16(void); 41 static int32_t test17(void); 42 static int32_t test18(void); 43 44 rte_lpm_test tests[] = { 45 /* Test Cases */ 46 test0, 47 test1, 48 test2, 49 test3, 50 test4, 51 test5, 52 test6, 53 test7, 54 test8, 55 test9, 56 test10, 57 test11, 58 test12, 59 test13, 60 test14, 61 test15, 62 test16, 63 test17, 64 test18 65 }; 66 67 #define MAX_DEPTH 32 68 #define MAX_RULES 256 69 #define NUMBER_TBL8S 256 70 #define PASS 0 71 72 /* 73 * Check that rte_lpm_create fails gracefully for incorrect user input 74 * arguments 75 */ 76 int32_t 77 test0(void) 78 { 79 struct rte_lpm *lpm = NULL; 80 struct rte_lpm_config config; 81 82 config.max_rules = MAX_RULES; 83 config.number_tbl8s = NUMBER_TBL8S; 84 config.flags = 0; 85 86 /* rte_lpm_create: lpm name == NULL */ 87 lpm = rte_lpm_create(NULL, SOCKET_ID_ANY, &config); 88 TEST_LPM_ASSERT(lpm == NULL); 89 90 /* rte_lpm_create: max_rules = 0 */ 91 /* Note: __func__ inserts the function name, in this case "test0". */ 92 config.max_rules = 0; 93 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 94 TEST_LPM_ASSERT(lpm == NULL); 95 96 /* socket_id < -1 is invalid */ 97 config.max_rules = MAX_RULES; 98 lpm = rte_lpm_create(__func__, -2, &config); 99 TEST_LPM_ASSERT(lpm == NULL); 100 101 return PASS; 102 } 103 104 /* 105 * Create lpm table then delete lpm table 100 times 106 * Use a slightly different rules size each time 107 * */ 108 int32_t 109 test1(void) 110 { 111 struct rte_lpm *lpm = NULL; 112 struct rte_lpm_config config; 113 114 config.number_tbl8s = NUMBER_TBL8S; 115 config.flags = 0; 116 int32_t i; 117 118 /* rte_lpm_free: Free NULL */ 119 for (i = 0; i < 100; i++) { 120 config.max_rules = MAX_RULES - i; 121 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 122 TEST_LPM_ASSERT(lpm != NULL); 123 124 rte_lpm_free(lpm); 125 } 126 127 /* Can not test free so return success */ 128 return PASS; 129 } 130 131 /* 132 * Call rte_lpm_free for NULL pointer user input. Note: free has no return and 133 * therefore it is impossible to check for failure but this test is added to 134 * increase function coverage metrics and to validate that freeing null does 135 * not crash. 136 */ 137 int32_t 138 test2(void) 139 { 140 struct rte_lpm *lpm = NULL; 141 struct rte_lpm_config config; 142 143 config.max_rules = MAX_RULES; 144 config.number_tbl8s = NUMBER_TBL8S; 145 config.flags = 0; 146 147 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 148 TEST_LPM_ASSERT(lpm != NULL); 149 150 rte_lpm_free(lpm); 151 rte_lpm_free(NULL); 152 return PASS; 153 } 154 155 /* 156 * Check that rte_lpm_add fails gracefully for incorrect user input arguments 157 */ 158 int32_t 159 test3(void) 160 { 161 struct rte_lpm *lpm = NULL; 162 struct rte_lpm_config config; 163 164 config.max_rules = MAX_RULES; 165 config.number_tbl8s = NUMBER_TBL8S; 166 config.flags = 0; 167 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop = 100; 168 uint8_t depth = 24; 169 int32_t status = 0; 170 171 /* rte_lpm_add: lpm == NULL */ 172 status = rte_lpm_add(NULL, ip, depth, next_hop); 173 TEST_LPM_ASSERT(status < 0); 174 175 /*Create vaild lpm to use in rest of test. */ 176 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 177 TEST_LPM_ASSERT(lpm != NULL); 178 179 /* rte_lpm_add: depth < 1 */ 180 status = rte_lpm_add(lpm, ip, 0, next_hop); 181 TEST_LPM_ASSERT(status < 0); 182 183 /* rte_lpm_add: depth > MAX_DEPTH */ 184 status = rte_lpm_add(lpm, ip, (MAX_DEPTH + 1), next_hop); 185 TEST_LPM_ASSERT(status < 0); 186 187 rte_lpm_free(lpm); 188 189 return PASS; 190 } 191 192 /* 193 * Check that rte_lpm_delete fails gracefully for incorrect user input 194 * arguments 195 */ 196 int32_t 197 test4(void) 198 { 199 struct rte_lpm *lpm = NULL; 200 struct rte_lpm_config config; 201 202 config.max_rules = MAX_RULES; 203 config.number_tbl8s = NUMBER_TBL8S; 204 config.flags = 0; 205 uint32_t ip = RTE_IPV4(0, 0, 0, 0); 206 uint8_t depth = 24; 207 int32_t status = 0; 208 209 /* rte_lpm_delete: lpm == NULL */ 210 status = rte_lpm_delete(NULL, ip, depth); 211 TEST_LPM_ASSERT(status < 0); 212 213 /*Create vaild lpm to use in rest of test. */ 214 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 215 TEST_LPM_ASSERT(lpm != NULL); 216 217 /* rte_lpm_delete: depth < 1 */ 218 status = rte_lpm_delete(lpm, ip, 0); 219 TEST_LPM_ASSERT(status < 0); 220 221 /* rte_lpm_delete: depth > MAX_DEPTH */ 222 status = rte_lpm_delete(lpm, ip, (MAX_DEPTH + 1)); 223 TEST_LPM_ASSERT(status < 0); 224 225 rte_lpm_free(lpm); 226 227 return PASS; 228 } 229 230 /* 231 * Check that rte_lpm_lookup fails gracefully for incorrect user input 232 * arguments 233 */ 234 int32_t 235 test5(void) 236 { 237 #if defined(RTE_LIBRTE_LPM_DEBUG) 238 struct rte_lpm *lpm = NULL; 239 struct rte_lpm_config config; 240 241 config.max_rules = MAX_RULES; 242 config.number_tbl8s = NUMBER_TBL8S; 243 config.flags = 0; 244 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_return = 0; 245 int32_t status = 0; 246 247 /* rte_lpm_lookup: lpm == NULL */ 248 status = rte_lpm_lookup(NULL, ip, &next_hop_return); 249 TEST_LPM_ASSERT(status < 0); 250 251 /*Create vaild lpm to use in rest of test. */ 252 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 253 TEST_LPM_ASSERT(lpm != NULL); 254 255 /* rte_lpm_lookup: depth < 1 */ 256 status = rte_lpm_lookup(lpm, ip, NULL); 257 TEST_LPM_ASSERT(status < 0); 258 259 rte_lpm_free(lpm); 260 #endif 261 return PASS; 262 } 263 264 265 266 /* 267 * Call add, lookup and delete for a single rule with depth <= 24 268 */ 269 int32_t 270 test6(void) 271 { 272 struct rte_lpm *lpm = NULL; 273 struct rte_lpm_config config; 274 275 config.max_rules = MAX_RULES; 276 config.number_tbl8s = NUMBER_TBL8S; 277 config.flags = 0; 278 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0; 279 uint8_t depth = 24; 280 int32_t status = 0; 281 282 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 283 TEST_LPM_ASSERT(lpm != NULL); 284 285 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 286 TEST_LPM_ASSERT(status == 0); 287 288 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 289 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 290 291 status = rte_lpm_delete(lpm, ip, depth); 292 TEST_LPM_ASSERT(status == 0); 293 294 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 295 TEST_LPM_ASSERT(status == -ENOENT); 296 297 rte_lpm_free(lpm); 298 299 return PASS; 300 } 301 302 /* 303 * Call add, lookup and delete for a single rule with depth > 24 304 */ 305 306 int32_t 307 test7(void) 308 { 309 xmm_t ipx4; 310 uint32_t hop[4]; 311 struct rte_lpm *lpm = NULL; 312 struct rte_lpm_config config; 313 314 config.max_rules = MAX_RULES; 315 config.number_tbl8s = NUMBER_TBL8S; 316 config.flags = 0; 317 uint32_t ip = RTE_IPV4(0, 0, 0, 0), next_hop_add = 100, next_hop_return = 0; 318 uint8_t depth = 32; 319 int32_t status = 0; 320 321 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 322 TEST_LPM_ASSERT(lpm != NULL); 323 324 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 325 TEST_LPM_ASSERT(status == 0); 326 327 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 328 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 329 330 ipx4 = vect_set_epi32(ip, ip + 0x100, ip - 0x100, ip); 331 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX); 332 TEST_LPM_ASSERT(hop[0] == next_hop_add); 333 TEST_LPM_ASSERT(hop[1] == UINT32_MAX); 334 TEST_LPM_ASSERT(hop[2] == UINT32_MAX); 335 TEST_LPM_ASSERT(hop[3] == next_hop_add); 336 337 status = rte_lpm_delete(lpm, ip, depth); 338 TEST_LPM_ASSERT(status == 0); 339 340 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 341 TEST_LPM_ASSERT(status == -ENOENT); 342 343 rte_lpm_free(lpm); 344 345 return PASS; 346 } 347 348 /* 349 * Use rte_lpm_add to add rules which effect only the second half of the lpm 350 * table. Use all possible depths ranging from 1..32. Set the next hop = to the 351 * depth. Check lookup hit for on every add and check for lookup miss on the 352 * first half of the lpm table after each add. Finally delete all rules going 353 * backwards (i.e. from depth = 32 ..1) and carry out a lookup after each 354 * delete. The lookup should return the next_hop_add value related to the 355 * previous depth value (i.e. depth -1). 356 */ 357 int32_t 358 test8(void) 359 { 360 xmm_t ipx4; 361 uint32_t hop[4]; 362 struct rte_lpm *lpm = NULL; 363 struct rte_lpm_config config; 364 365 config.max_rules = MAX_RULES; 366 config.number_tbl8s = NUMBER_TBL8S; 367 config.flags = 0; 368 uint32_t ip1 = RTE_IPV4(127, 255, 255, 255), ip2 = RTE_IPV4(128, 0, 0, 0); 369 uint32_t next_hop_add, next_hop_return; 370 uint8_t depth; 371 int32_t status = 0; 372 373 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 374 TEST_LPM_ASSERT(lpm != NULL); 375 376 /* Loop with rte_lpm_add. */ 377 for (depth = 1; depth <= 32; depth++) { 378 /* Let the next_hop_add value = depth. Just for change. */ 379 next_hop_add = depth; 380 381 status = rte_lpm_add(lpm, ip2, depth, next_hop_add); 382 TEST_LPM_ASSERT(status == 0); 383 384 /* Check IP in first half of tbl24 which should be empty. */ 385 status = rte_lpm_lookup(lpm, ip1, &next_hop_return); 386 TEST_LPM_ASSERT(status == -ENOENT); 387 388 status = rte_lpm_lookup(lpm, ip2, &next_hop_return); 389 TEST_LPM_ASSERT((status == 0) && 390 (next_hop_return == next_hop_add)); 391 392 ipx4 = vect_set_epi32(ip2, ip1, ip2, ip1); 393 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX); 394 TEST_LPM_ASSERT(hop[0] == UINT32_MAX); 395 TEST_LPM_ASSERT(hop[1] == next_hop_add); 396 TEST_LPM_ASSERT(hop[2] == UINT32_MAX); 397 TEST_LPM_ASSERT(hop[3] == next_hop_add); 398 } 399 400 /* Loop with rte_lpm_delete. */ 401 for (depth = 32; depth >= 1; depth--) { 402 next_hop_add = (uint8_t) (depth - 1); 403 404 status = rte_lpm_delete(lpm, ip2, depth); 405 TEST_LPM_ASSERT(status == 0); 406 407 status = rte_lpm_lookup(lpm, ip2, &next_hop_return); 408 409 if (depth != 1) { 410 TEST_LPM_ASSERT((status == 0) && 411 (next_hop_return == next_hop_add)); 412 } else { 413 TEST_LPM_ASSERT(status == -ENOENT); 414 } 415 416 status = rte_lpm_lookup(lpm, ip1, &next_hop_return); 417 TEST_LPM_ASSERT(status == -ENOENT); 418 419 ipx4 = vect_set_epi32(ip1, ip1, ip2, ip2); 420 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX); 421 if (depth != 1) { 422 TEST_LPM_ASSERT(hop[0] == next_hop_add); 423 TEST_LPM_ASSERT(hop[1] == next_hop_add); 424 } else { 425 TEST_LPM_ASSERT(hop[0] == UINT32_MAX); 426 TEST_LPM_ASSERT(hop[1] == UINT32_MAX); 427 } 428 TEST_LPM_ASSERT(hop[2] == UINT32_MAX); 429 TEST_LPM_ASSERT(hop[3] == UINT32_MAX); 430 } 431 432 rte_lpm_free(lpm); 433 434 return PASS; 435 } 436 437 /* 438 * - Add & lookup to hit invalid TBL24 entry 439 * - Add & lookup to hit valid TBL24 entry not extended 440 * - Add & lookup to hit valid extended TBL24 entry with invalid TBL8 entry 441 * - Add & lookup to hit valid extended TBL24 entry with valid TBL8 entry 442 * 443 */ 444 int32_t 445 test9(void) 446 { 447 struct rte_lpm *lpm = NULL; 448 struct rte_lpm_config config; 449 450 config.max_rules = MAX_RULES; 451 config.number_tbl8s = NUMBER_TBL8S; 452 config.flags = 0; 453 uint32_t ip, ip_1, ip_2; 454 uint8_t depth, depth_1, depth_2; 455 uint32_t next_hop_add, next_hop_add_1, next_hop_add_2, next_hop_return; 456 int32_t status = 0; 457 458 /* Add & lookup to hit invalid TBL24 entry */ 459 ip = RTE_IPV4(128, 0, 0, 0); 460 depth = 24; 461 next_hop_add = 100; 462 463 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 464 TEST_LPM_ASSERT(lpm != NULL); 465 466 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 467 TEST_LPM_ASSERT(status == 0); 468 469 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 470 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 471 472 status = rte_lpm_delete(lpm, ip, depth); 473 TEST_LPM_ASSERT(status == 0); 474 475 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 476 TEST_LPM_ASSERT(status == -ENOENT); 477 478 rte_lpm_delete_all(lpm); 479 480 /* Add & lookup to hit valid TBL24 entry not extended */ 481 ip = RTE_IPV4(128, 0, 0, 0); 482 depth = 23; 483 next_hop_add = 100; 484 485 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 486 TEST_LPM_ASSERT(status == 0); 487 488 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 489 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 490 491 depth = 24; 492 next_hop_add = 101; 493 494 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 495 TEST_LPM_ASSERT(status == 0); 496 497 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 498 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 499 500 depth = 24; 501 502 status = rte_lpm_delete(lpm, ip, depth); 503 TEST_LPM_ASSERT(status == 0); 504 505 depth = 23; 506 507 status = rte_lpm_delete(lpm, ip, depth); 508 TEST_LPM_ASSERT(status == 0); 509 510 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 511 TEST_LPM_ASSERT(status == -ENOENT); 512 513 rte_lpm_delete_all(lpm); 514 515 /* Add & lookup to hit valid extended TBL24 entry with invalid TBL8 516 * entry */ 517 ip = RTE_IPV4(128, 0, 0, 0); 518 depth = 32; 519 next_hop_add = 100; 520 521 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 522 TEST_LPM_ASSERT(status == 0); 523 524 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 525 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 526 527 ip = RTE_IPV4(128, 0, 0, 5); 528 depth = 32; 529 next_hop_add = 101; 530 531 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 532 TEST_LPM_ASSERT(status == 0); 533 534 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 535 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 536 537 status = rte_lpm_delete(lpm, ip, depth); 538 TEST_LPM_ASSERT(status == 0); 539 540 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 541 TEST_LPM_ASSERT(status == -ENOENT); 542 543 ip = RTE_IPV4(128, 0, 0, 0); 544 depth = 32; 545 next_hop_add = 100; 546 547 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 548 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 549 550 status = rte_lpm_delete(lpm, ip, depth); 551 TEST_LPM_ASSERT(status == 0); 552 553 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 554 TEST_LPM_ASSERT(status == -ENOENT); 555 556 rte_lpm_delete_all(lpm); 557 558 /* Add & lookup to hit valid extended TBL24 entry with valid TBL8 559 * entry */ 560 ip_1 = RTE_IPV4(128, 0, 0, 0); 561 depth_1 = 25; 562 next_hop_add_1 = 101; 563 564 ip_2 = RTE_IPV4(128, 0, 0, 5); 565 depth_2 = 32; 566 next_hop_add_2 = 102; 567 568 next_hop_return = 0; 569 570 status = rte_lpm_add(lpm, ip_1, depth_1, next_hop_add_1); 571 TEST_LPM_ASSERT(status == 0); 572 573 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return); 574 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); 575 576 status = rte_lpm_add(lpm, ip_2, depth_2, next_hop_add_2); 577 TEST_LPM_ASSERT(status == 0); 578 579 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return); 580 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_2)); 581 582 status = rte_lpm_delete(lpm, ip_2, depth_2); 583 TEST_LPM_ASSERT(status == 0); 584 585 status = rte_lpm_lookup(lpm, ip_2, &next_hop_return); 586 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); 587 588 status = rte_lpm_delete(lpm, ip_1, depth_1); 589 TEST_LPM_ASSERT(status == 0); 590 591 status = rte_lpm_lookup(lpm, ip_1, &next_hop_return); 592 TEST_LPM_ASSERT(status == -ENOENT); 593 594 rte_lpm_free(lpm); 595 596 return PASS; 597 } 598 599 600 /* 601 * - Add rule that covers a TBL24 range previously invalid & lookup (& delete & 602 * lookup) 603 * - Add rule that extends a TBL24 invalid entry & lookup (& delete & lookup) 604 * - Add rule that extends a TBL24 valid entry & lookup for both rules (& 605 * delete & lookup) 606 * - Add rule that updates the next hop in TBL24 & lookup (& delete & lookup) 607 * - Add rule that updates the next hop in TBL8 & lookup (& delete & lookup) 608 * - Delete a rule that is not present in the TBL24 & lookup 609 * - Delete a rule that is not present in the TBL8 & lookup 610 * 611 */ 612 int32_t 613 test10(void) 614 { 615 616 struct rte_lpm *lpm = NULL; 617 struct rte_lpm_config config; 618 619 config.max_rules = MAX_RULES; 620 config.number_tbl8s = NUMBER_TBL8S; 621 config.flags = 0; 622 uint32_t ip, next_hop_add, next_hop_return; 623 uint8_t depth; 624 int32_t status = 0; 625 626 /* Add rule that covers a TBL24 range previously invalid & lookup 627 * (& delete & lookup) */ 628 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 629 TEST_LPM_ASSERT(lpm != NULL); 630 631 ip = RTE_IPV4(128, 0, 0, 0); 632 depth = 16; 633 next_hop_add = 100; 634 635 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 636 TEST_LPM_ASSERT(status == 0); 637 638 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 639 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 640 641 status = rte_lpm_delete(lpm, ip, depth); 642 TEST_LPM_ASSERT(status == 0); 643 644 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 645 TEST_LPM_ASSERT(status == -ENOENT); 646 647 rte_lpm_delete_all(lpm); 648 649 ip = RTE_IPV4(128, 0, 0, 0); 650 depth = 25; 651 next_hop_add = 100; 652 653 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 654 TEST_LPM_ASSERT(status == 0); 655 656 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 657 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 658 659 status = rte_lpm_delete(lpm, ip, depth); 660 TEST_LPM_ASSERT(status == 0); 661 662 rte_lpm_delete_all(lpm); 663 664 /* Add rule that extends a TBL24 valid entry & lookup for both rules 665 * (& delete & lookup) */ 666 667 ip = RTE_IPV4(128, 0, 0, 0); 668 depth = 24; 669 next_hop_add = 100; 670 671 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 672 TEST_LPM_ASSERT(status == 0); 673 674 ip = RTE_IPV4(128, 0, 0, 10); 675 depth = 32; 676 next_hop_add = 101; 677 678 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 679 TEST_LPM_ASSERT(status == 0); 680 681 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 682 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 683 684 ip = RTE_IPV4(128, 0, 0, 0); 685 next_hop_add = 100; 686 687 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 688 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 689 690 ip = RTE_IPV4(128, 0, 0, 0); 691 depth = 24; 692 693 status = rte_lpm_delete(lpm, ip, depth); 694 TEST_LPM_ASSERT(status == 0); 695 696 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 697 TEST_LPM_ASSERT(status == -ENOENT); 698 699 ip = RTE_IPV4(128, 0, 0, 10); 700 depth = 32; 701 702 status = rte_lpm_delete(lpm, ip, depth); 703 TEST_LPM_ASSERT(status == 0); 704 705 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 706 TEST_LPM_ASSERT(status == -ENOENT); 707 708 rte_lpm_delete_all(lpm); 709 710 /* Add rule that updates the next hop in TBL24 & lookup 711 * (& delete & lookup) */ 712 713 ip = RTE_IPV4(128, 0, 0, 0); 714 depth = 24; 715 next_hop_add = 100; 716 717 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 718 TEST_LPM_ASSERT(status == 0); 719 720 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 721 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 722 723 next_hop_add = 101; 724 725 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 726 TEST_LPM_ASSERT(status == 0); 727 728 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 729 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 730 731 status = rte_lpm_delete(lpm, ip, depth); 732 TEST_LPM_ASSERT(status == 0); 733 734 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 735 TEST_LPM_ASSERT(status == -ENOENT); 736 737 rte_lpm_delete_all(lpm); 738 739 /* Add rule that updates the next hop in TBL8 & lookup 740 * (& delete & lookup) */ 741 742 ip = RTE_IPV4(128, 0, 0, 0); 743 depth = 32; 744 next_hop_add = 100; 745 746 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 747 TEST_LPM_ASSERT(status == 0); 748 749 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 750 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 751 752 next_hop_add = 101; 753 754 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 755 TEST_LPM_ASSERT(status == 0); 756 757 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 758 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 759 760 status = rte_lpm_delete(lpm, ip, depth); 761 TEST_LPM_ASSERT(status == 0); 762 763 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 764 TEST_LPM_ASSERT(status == -ENOENT); 765 766 rte_lpm_delete_all(lpm); 767 768 /* Delete a rule that is not present in the TBL24 & lookup */ 769 770 ip = RTE_IPV4(128, 0, 0, 0); 771 depth = 24; 772 773 status = rte_lpm_delete(lpm, ip, depth); 774 TEST_LPM_ASSERT(status < 0); 775 776 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 777 TEST_LPM_ASSERT(status == -ENOENT); 778 779 rte_lpm_delete_all(lpm); 780 781 /* Delete a rule that is not present in the TBL8 & lookup */ 782 783 ip = RTE_IPV4(128, 0, 0, 0); 784 depth = 32; 785 786 status = rte_lpm_delete(lpm, ip, depth); 787 TEST_LPM_ASSERT(status < 0); 788 789 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 790 TEST_LPM_ASSERT(status == -ENOENT); 791 792 rte_lpm_free(lpm); 793 794 return PASS; 795 } 796 797 /* 798 * Add two rules, lookup to hit the more specific one, lookup to hit the less 799 * specific one delete the less specific rule and lookup previous values again; 800 * add a more specific rule than the existing rule, lookup again 801 * 802 * */ 803 int32_t 804 test11(void) 805 { 806 807 struct rte_lpm *lpm = NULL; 808 struct rte_lpm_config config; 809 810 config.max_rules = MAX_RULES; 811 config.number_tbl8s = NUMBER_TBL8S; 812 config.flags = 0; 813 uint32_t ip, next_hop_add, next_hop_return; 814 uint8_t depth; 815 int32_t status = 0; 816 817 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 818 TEST_LPM_ASSERT(lpm != NULL); 819 820 ip = RTE_IPV4(128, 0, 0, 0); 821 depth = 24; 822 next_hop_add = 100; 823 824 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 825 TEST_LPM_ASSERT(status == 0); 826 827 ip = RTE_IPV4(128, 0, 0, 10); 828 depth = 32; 829 next_hop_add = 101; 830 831 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 832 TEST_LPM_ASSERT(status == 0); 833 834 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 835 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 836 837 ip = RTE_IPV4(128, 0, 0, 0); 838 next_hop_add = 100; 839 840 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 841 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add)); 842 843 ip = RTE_IPV4(128, 0, 0, 0); 844 depth = 24; 845 846 status = rte_lpm_delete(lpm, ip, depth); 847 TEST_LPM_ASSERT(status == 0); 848 849 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 850 TEST_LPM_ASSERT(status == -ENOENT); 851 852 ip = RTE_IPV4(128, 0, 0, 10); 853 depth = 32; 854 855 status = rte_lpm_delete(lpm, ip, depth); 856 TEST_LPM_ASSERT(status == 0); 857 858 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 859 TEST_LPM_ASSERT(status == -ENOENT); 860 861 rte_lpm_free(lpm); 862 863 return PASS; 864 } 865 866 /* 867 * Add an extended rule (i.e. depth greater than 24, lookup (hit), delete, 868 * lookup (miss) in a for loop of 1000 times. This will check tbl8 extension 869 * and contraction. 870 * 871 * */ 872 873 int32_t 874 test12(void) 875 { 876 xmm_t ipx4; 877 uint32_t hop[4]; 878 struct rte_lpm *lpm = NULL; 879 struct rte_lpm_config config; 880 881 config.max_rules = MAX_RULES; 882 config.number_tbl8s = NUMBER_TBL8S; 883 config.flags = 0; 884 uint32_t ip, i, next_hop_add, next_hop_return; 885 uint8_t depth; 886 int32_t status = 0; 887 888 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 889 TEST_LPM_ASSERT(lpm != NULL); 890 891 ip = RTE_IPV4(128, 0, 0, 0); 892 depth = 32; 893 next_hop_add = 100; 894 895 for (i = 0; i < 1000; i++) { 896 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 897 TEST_LPM_ASSERT(status == 0); 898 899 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 900 TEST_LPM_ASSERT((status == 0) && 901 (next_hop_return == next_hop_add)); 902 903 ipx4 = vect_set_epi32(ip, ip + 1, ip, ip - 1); 904 rte_lpm_lookupx4(lpm, ipx4, hop, UINT32_MAX); 905 TEST_LPM_ASSERT(hop[0] == UINT32_MAX); 906 TEST_LPM_ASSERT(hop[1] == next_hop_add); 907 TEST_LPM_ASSERT(hop[2] == UINT32_MAX); 908 TEST_LPM_ASSERT(hop[3] == next_hop_add); 909 910 status = rte_lpm_delete(lpm, ip, depth); 911 TEST_LPM_ASSERT(status == 0); 912 913 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 914 TEST_LPM_ASSERT(status == -ENOENT); 915 } 916 917 rte_lpm_free(lpm); 918 919 return PASS; 920 } 921 922 /* 923 * Add a rule to tbl24, lookup (hit), then add a rule that will extend this 924 * tbl24 entry, lookup (hit). delete the rule that caused the tbl24 extension, 925 * lookup (miss) and repeat for loop of 1000 times. This will check tbl8 926 * extension and contraction. 927 * 928 * */ 929 930 int32_t 931 test13(void) 932 { 933 struct rte_lpm *lpm = NULL; 934 struct rte_lpm_config config; 935 936 config.max_rules = MAX_RULES; 937 config.number_tbl8s = NUMBER_TBL8S; 938 config.flags = 0; 939 uint32_t ip, i, next_hop_add_1, next_hop_add_2, next_hop_return; 940 uint8_t depth; 941 int32_t status = 0; 942 943 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 944 TEST_LPM_ASSERT(lpm != NULL); 945 946 ip = RTE_IPV4(128, 0, 0, 0); 947 depth = 24; 948 next_hop_add_1 = 100; 949 950 status = rte_lpm_add(lpm, ip, depth, next_hop_add_1); 951 TEST_LPM_ASSERT(status == 0); 952 953 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 954 TEST_LPM_ASSERT((status == 0) && (next_hop_return == next_hop_add_1)); 955 956 depth = 32; 957 next_hop_add_2 = 101; 958 959 for (i = 0; i < 1000; i++) { 960 status = rte_lpm_add(lpm, ip, depth, next_hop_add_2); 961 TEST_LPM_ASSERT(status == 0); 962 963 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 964 TEST_LPM_ASSERT((status == 0) && 965 (next_hop_return == next_hop_add_2)); 966 967 status = rte_lpm_delete(lpm, ip, depth); 968 TEST_LPM_ASSERT(status == 0); 969 970 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 971 TEST_LPM_ASSERT((status == 0) && 972 (next_hop_return == next_hop_add_1)); 973 } 974 975 depth = 24; 976 977 status = rte_lpm_delete(lpm, ip, depth); 978 TEST_LPM_ASSERT(status == 0); 979 980 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 981 TEST_LPM_ASSERT(status == -ENOENT); 982 983 rte_lpm_free(lpm); 984 985 return PASS; 986 } 987 988 /* 989 * Fore TBL8 extension exhaustion. Add 256 rules that require a tbl8 extension. 990 * No more tbl8 extensions will be allowed. Now add one more rule that required 991 * a tbl8 extension and get fail. 992 * */ 993 int32_t 994 test14(void) 995 { 996 997 /* We only use depth = 32 in the loop below so we must make sure 998 * that we have enough storage for all rules at that depth*/ 999 1000 struct rte_lpm *lpm = NULL; 1001 struct rte_lpm_config config; 1002 1003 config.max_rules = 256 * 32; 1004 config.number_tbl8s = NUMBER_TBL8S; 1005 config.flags = 0; 1006 uint32_t ip, next_hop_add, next_hop_return; 1007 uint8_t depth; 1008 int32_t status = 0; 1009 1010 /* Add enough space for 256 rules for every depth */ 1011 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 1012 TEST_LPM_ASSERT(lpm != NULL); 1013 1014 depth = 32; 1015 next_hop_add = 100; 1016 ip = RTE_IPV4(0, 0, 0, 0); 1017 1018 /* Add 256 rules that require a tbl8 extension */ 1019 for (; ip <= RTE_IPV4(0, 0, 255, 0); ip += 256) { 1020 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 1021 TEST_LPM_ASSERT(status == 0); 1022 1023 status = rte_lpm_lookup(lpm, ip, &next_hop_return); 1024 TEST_LPM_ASSERT((status == 0) && 1025 (next_hop_return == next_hop_add)); 1026 } 1027 1028 /* All tbl8 extensions have been used above. Try to add one more and 1029 * we get a fail */ 1030 ip = RTE_IPV4(1, 0, 0, 0); 1031 depth = 32; 1032 1033 status = rte_lpm_add(lpm, ip, depth, next_hop_add); 1034 TEST_LPM_ASSERT(status < 0); 1035 1036 rte_lpm_free(lpm); 1037 1038 return PASS; 1039 } 1040 1041 /* 1042 * Sequence of operations for find existing lpm table 1043 * 1044 * - create table 1045 * - find existing table: hit 1046 * - find non-existing table: miss 1047 * 1048 */ 1049 int32_t 1050 test15(void) 1051 { 1052 struct rte_lpm *lpm = NULL, *result = NULL; 1053 struct rte_lpm_config config; 1054 1055 config.max_rules = 256 * 32; 1056 config.number_tbl8s = NUMBER_TBL8S; 1057 config.flags = 0; 1058 1059 /* Create lpm */ 1060 lpm = rte_lpm_create("lpm_find_existing", SOCKET_ID_ANY, &config); 1061 TEST_LPM_ASSERT(lpm != NULL); 1062 1063 /* Try to find existing lpm */ 1064 result = rte_lpm_find_existing("lpm_find_existing"); 1065 TEST_LPM_ASSERT(result == lpm); 1066 1067 /* Try to find non-existing lpm */ 1068 result = rte_lpm_find_existing("lpm_find_non_existing"); 1069 TEST_LPM_ASSERT(result == NULL); 1070 1071 /* Cleanup. */ 1072 rte_lpm_delete_all(lpm); 1073 rte_lpm_free(lpm); 1074 1075 return PASS; 1076 } 1077 1078 /* 1079 * test failure condition of overloading the tbl8 so no more will fit 1080 * Check we get an error return value in that case 1081 */ 1082 int32_t 1083 test16(void) 1084 { 1085 uint32_t ip; 1086 struct rte_lpm_config config; 1087 1088 config.max_rules = 256 * 32; 1089 config.number_tbl8s = NUMBER_TBL8S; 1090 config.flags = 0; 1091 struct rte_lpm *lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 1092 1093 /* ip loops through all possibilities for top 24 bits of address */ 1094 for (ip = 0; ip < 0xFFFFFF; ip++) { 1095 /* add an entry within a different tbl8 each time, since 1096 * depth >24 and the top 24 bits are different */ 1097 if (rte_lpm_add(lpm, (ip << 8) + 0xF0, 30, 0) < 0) 1098 break; 1099 } 1100 1101 if (ip != NUMBER_TBL8S) { 1102 printf("Error, unexpected failure with filling tbl8 groups\n"); 1103 printf("Failed after %u additions, expected after %u\n", 1104 (unsigned)ip, (unsigned)NUMBER_TBL8S); 1105 } 1106 1107 rte_lpm_free(lpm); 1108 return 0; 1109 } 1110 1111 /* 1112 * Test for overwriting of tbl8: 1113 * - add rule /32 and lookup 1114 * - add new rule /24 and lookup 1115 * - add third rule /25 and lookup 1116 * - lookup /32 and /24 rule to ensure the table has not been overwritten. 1117 */ 1118 int32_t 1119 test17(void) 1120 { 1121 struct rte_lpm *lpm = NULL; 1122 struct rte_lpm_config config; 1123 1124 config.max_rules = MAX_RULES; 1125 config.number_tbl8s = NUMBER_TBL8S; 1126 config.flags = 0; 1127 const uint32_t ip_10_32 = RTE_IPV4(10, 10, 10, 2); 1128 const uint32_t ip_10_24 = RTE_IPV4(10, 10, 10, 0); 1129 const uint32_t ip_20_25 = RTE_IPV4(10, 10, 20, 2); 1130 const uint8_t d_ip_10_32 = 32, 1131 d_ip_10_24 = 24, 1132 d_ip_20_25 = 25; 1133 const uint32_t next_hop_ip_10_32 = 100, 1134 next_hop_ip_10_24 = 105, 1135 next_hop_ip_20_25 = 111; 1136 uint32_t next_hop_return = 0; 1137 int32_t status = 0; 1138 1139 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 1140 TEST_LPM_ASSERT(lpm != NULL); 1141 1142 if ((status = rte_lpm_add(lpm, ip_10_32, d_ip_10_32, 1143 next_hop_ip_10_32)) < 0) 1144 return -1; 1145 1146 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); 1147 uint32_t test_hop_10_32 = next_hop_return; 1148 TEST_LPM_ASSERT(status == 0); 1149 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); 1150 1151 if ((status = rte_lpm_add(lpm, ip_10_24, d_ip_10_24, 1152 next_hop_ip_10_24)) < 0) 1153 return -1; 1154 1155 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); 1156 uint32_t test_hop_10_24 = next_hop_return; 1157 TEST_LPM_ASSERT(status == 0); 1158 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); 1159 1160 if ((status = rte_lpm_add(lpm, ip_20_25, d_ip_20_25, 1161 next_hop_ip_20_25)) < 0) 1162 return -1; 1163 1164 status = rte_lpm_lookup(lpm, ip_20_25, &next_hop_return); 1165 uint32_t test_hop_20_25 = next_hop_return; 1166 TEST_LPM_ASSERT(status == 0); 1167 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_20_25); 1168 1169 if (test_hop_10_32 == test_hop_10_24) { 1170 printf("Next hop return equal\n"); 1171 return -1; 1172 } 1173 1174 if (test_hop_10_24 == test_hop_20_25) { 1175 printf("Next hop return equal\n"); 1176 return -1; 1177 } 1178 1179 status = rte_lpm_lookup(lpm, ip_10_32, &next_hop_return); 1180 TEST_LPM_ASSERT(status == 0); 1181 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_32); 1182 1183 status = rte_lpm_lookup(lpm, ip_10_24, &next_hop_return); 1184 TEST_LPM_ASSERT(status == 0); 1185 TEST_LPM_ASSERT(next_hop_return == next_hop_ip_10_24); 1186 1187 rte_lpm_free(lpm); 1188 1189 return PASS; 1190 } 1191 1192 /* 1193 * Test for recycle of tbl8 1194 * - step 1: add a rule with depth=28 (> 24) 1195 * - step 2: add a rule with same 24-bit prefix and depth=23 (< 24) 1196 * - step 3: delete the first rule 1197 * - step 4: check tbl8 is freed 1198 * - step 5: add a rule same as the first one (depth=28) 1199 * - step 6: check same tbl8 is allocated 1200 * - step 7: add a rule with same 24-bit prefix and depth=24 1201 * - step 8: delete the rule (depth=28) added in step 5 1202 * - step 9: check tbl8 is freed 1203 * - step 10: add a rule with same 24-bit prefix and depth = 28 1204 * - setp 11: check same tbl8 is allocated again 1205 */ 1206 int32_t 1207 test18(void) 1208 { 1209 #define group_idx next_hop 1210 struct rte_lpm *lpm = NULL; 1211 struct rte_lpm_config config; 1212 uint32_t ip, next_hop; 1213 uint8_t depth; 1214 uint32_t tbl8_group_index; 1215 1216 config.max_rules = MAX_RULES; 1217 config.number_tbl8s = NUMBER_TBL8S; 1218 config.flags = 0; 1219 1220 lpm = rte_lpm_create(__func__, SOCKET_ID_ANY, &config); 1221 TEST_LPM_ASSERT(lpm != NULL); 1222 1223 ip = RTE_IPV4(192, 168, 100, 100); 1224 depth = 28; 1225 next_hop = 1; 1226 rte_lpm_add(lpm, ip, depth, next_hop); 1227 1228 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1229 tbl8_group_index = lpm->tbl24[ip>>8].group_idx; 1230 1231 depth = 23; 1232 next_hop = 2; 1233 rte_lpm_add(lpm, ip, depth, next_hop); 1234 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1235 1236 depth = 28; 1237 rte_lpm_delete(lpm, ip, depth); 1238 1239 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group); 1240 1241 next_hop = 3; 1242 rte_lpm_add(lpm, ip, depth, next_hop); 1243 1244 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1245 TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx); 1246 1247 depth = 24; 1248 next_hop = 4; 1249 rte_lpm_add(lpm, ip, depth, next_hop); 1250 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1251 1252 depth = 28; 1253 rte_lpm_delete(lpm, ip, depth); 1254 1255 TEST_LPM_ASSERT(!lpm->tbl24[ip>>8].valid_group); 1256 1257 next_hop = 5; 1258 rte_lpm_add(lpm, ip, depth, next_hop); 1259 1260 TEST_LPM_ASSERT(lpm->tbl24[ip>>8].valid_group); 1261 TEST_LPM_ASSERT(tbl8_group_index == lpm->tbl24[ip>>8].group_idx); 1262 1263 rte_lpm_free(lpm); 1264 #undef group_idx 1265 return PASS; 1266 } 1267 1268 /* 1269 * Do all unit tests. 1270 */ 1271 1272 static int 1273 test_lpm(void) 1274 { 1275 unsigned i; 1276 int status, global_status = 0; 1277 1278 for (i = 0; i < RTE_DIM(tests); i++) { 1279 status = tests[i](); 1280 if (status < 0) { 1281 printf("ERROR: LPM Test %u: FAIL\n", i); 1282 global_status = status; 1283 } 1284 } 1285 1286 return global_status; 1287 } 1288 1289 REGISTER_TEST_COMMAND(lpm_autotest, test_lpm); 1290