1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 #include <string.h> 5 #include <stdint.h> 6 #include <errno.h> 7 #include <stdio.h> 8 #include <sys/queue.h> 9 10 #include <rte_log.h> 11 #include <rte_common.h> 12 #include <rte_malloc.h> 13 #include <rte_memcpy.h> 14 #include <rte_eal_memconfig.h> 15 #include <rte_string_fns.h> 16 #include <rte_errno.h> 17 #include <rte_hash.h> 18 #include <assert.h> 19 #include <rte_jhash.h> 20 #include <rte_tailq.h> 21 22 #include "rte_lpm6.h" 23 24 #define RTE_LPM6_TBL24_NUM_ENTRIES (1 << 24) 25 #define RTE_LPM6_TBL8_GROUP_NUM_ENTRIES 256 26 #define RTE_LPM6_TBL8_MAX_NUM_GROUPS (1 << 21) 27 28 #define RTE_LPM6_VALID_EXT_ENTRY_BITMASK 0xA0000000 29 #define RTE_LPM6_LOOKUP_SUCCESS 0x20000000 30 #define RTE_LPM6_TBL8_BITMASK 0x001FFFFF 31 32 #define ADD_FIRST_BYTE 3 33 #define LOOKUP_FIRST_BYTE 4 34 #define BYTE_SIZE 8 35 #define BYTES2_SIZE 16 36 37 #define RULE_HASH_TABLE_EXTRA_SPACE 64 38 #define TBL24_IND UINT32_MAX 39 40 #define lpm6_tbl8_gindex next_hop 41 42 /** Flags for setting an entry as valid/invalid. */ 43 enum valid_flag { 44 INVALID = 0, 45 VALID 46 }; 47 48 TAILQ_HEAD(rte_lpm6_list, rte_tailq_entry); 49 50 static struct rte_tailq_elem rte_lpm6_tailq = { 51 .name = "RTE_LPM6", 52 }; 53 EAL_REGISTER_TAILQ(rte_lpm6_tailq) 54 55 /** Tbl entry structure. It is the same for both tbl24 and tbl8 */ 56 struct rte_lpm6_tbl_entry { 57 uint32_t next_hop: 21; /**< Next hop / next table to be checked. */ 58 uint32_t depth :8; /**< Rule depth. */ 59 60 /* Flags. */ 61 uint32_t valid :1; /**< Validation flag. */ 62 uint32_t valid_group :1; /**< Group validation flag. */ 63 uint32_t ext_entry :1; /**< External entry. */ 64 }; 65 66 /** Rules tbl entry structure. */ 67 struct rte_lpm6_rule { 68 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */ 69 uint32_t next_hop; /**< Rule next hop. */ 70 uint8_t depth; /**< Rule depth. */ 71 }; 72 73 /** Rules tbl entry key. */ 74 struct rte_lpm6_rule_key { 75 uint8_t ip[RTE_LPM6_IPV6_ADDR_SIZE]; /**< Rule IP address. */ 76 uint32_t depth; /**< Rule depth. */ 77 }; 78 79 /* Header of tbl8 */ 80 struct rte_lpm_tbl8_hdr { 81 uint32_t owner_tbl_ind; /**< owner table: TBL24_IND if owner is tbl24, 82 * otherwise index of tbl8 83 */ 84 uint32_t owner_entry_ind; /**< index of the owner table entry where 85 * pointer to the tbl8 is stored 86 */ 87 uint32_t ref_cnt; /**< table reference counter */ 88 }; 89 90 /** LPM6 structure. */ 91 struct rte_lpm6 { 92 /* LPM metadata. */ 93 char name[RTE_LPM6_NAMESIZE]; /**< Name of the lpm. */ 94 uint32_t max_rules; /**< Max number of rules. */ 95 uint32_t used_rules; /**< Used rules so far. */ 96 uint32_t number_tbl8s; /**< Number of tbl8s to allocate. */ 97 98 /* LPM Tables. */ 99 struct rte_hash *rules_tbl; /**< LPM rules. */ 100 struct rte_lpm6_tbl_entry tbl24[RTE_LPM6_TBL24_NUM_ENTRIES] 101 __rte_cache_aligned; /**< LPM tbl24 table. */ 102 103 uint32_t *tbl8_pool; /**< pool of indexes of free tbl8s */ 104 uint32_t tbl8_pool_pos; /**< current position in the tbl8 pool */ 105 106 struct rte_lpm_tbl8_hdr *tbl8_hdrs; /* array of tbl8 headers */ 107 108 struct rte_lpm6_tbl_entry tbl8[0] 109 __rte_cache_aligned; /**< LPM tbl8 table. */ 110 }; 111 112 /* 113 * Takes an array of uint8_t (IPv6 address) and masks it using the depth. 114 * It leaves untouched one bit per unit in the depth variable 115 * and set the rest to 0. 116 */ 117 static inline void 118 ip6_mask_addr(uint8_t *ip, uint8_t depth) 119 { 120 int16_t part_depth, mask; 121 int i; 122 123 part_depth = depth; 124 125 for (i = 0; i < RTE_LPM6_IPV6_ADDR_SIZE; i++) { 126 if (part_depth < BYTE_SIZE && part_depth >= 0) { 127 mask = (uint16_t)(~(UINT8_MAX >> part_depth)); 128 ip[i] = (uint8_t)(ip[i] & mask); 129 } else if (part_depth < 0) 130 ip[i] = 0; 131 132 part_depth -= BYTE_SIZE; 133 } 134 } 135 136 /* copy ipv6 address */ 137 static inline void 138 ip6_copy_addr(uint8_t *dst, const uint8_t *src) 139 { 140 rte_memcpy(dst, src, RTE_LPM6_IPV6_ADDR_SIZE); 141 } 142 143 /* 144 * LPM6 rule hash function 145 * 146 * It's used as a hash function for the rte_hash 147 * containing rules 148 */ 149 static inline uint32_t 150 rule_hash(const void *data, __rte_unused uint32_t data_len, 151 uint32_t init_val) 152 { 153 return rte_jhash(data, sizeof(struct rte_lpm6_rule_key), init_val); 154 } 155 156 /* 157 * Init pool of free tbl8 indexes 158 */ 159 static void 160 tbl8_pool_init(struct rte_lpm6 *lpm) 161 { 162 uint32_t i; 163 164 /* put entire range of indexes to the tbl8 pool */ 165 for (i = 0; i < lpm->number_tbl8s; i++) 166 lpm->tbl8_pool[i] = i; 167 168 lpm->tbl8_pool_pos = 0; 169 } 170 171 /* 172 * Get an index of a free tbl8 from the pool 173 */ 174 static inline uint32_t 175 tbl8_get(struct rte_lpm6 *lpm, uint32_t *tbl8_ind) 176 { 177 if (lpm->tbl8_pool_pos == lpm->number_tbl8s) 178 /* no more free tbl8 */ 179 return -ENOSPC; 180 181 /* next index */ 182 *tbl8_ind = lpm->tbl8_pool[lpm->tbl8_pool_pos++]; 183 return 0; 184 } 185 186 /* 187 * Put an index of a free tbl8 back to the pool 188 */ 189 static inline uint32_t 190 tbl8_put(struct rte_lpm6 *lpm, uint32_t tbl8_ind) 191 { 192 if (lpm->tbl8_pool_pos == 0) 193 /* pool is full */ 194 return -ENOSPC; 195 196 lpm->tbl8_pool[--lpm->tbl8_pool_pos] = tbl8_ind; 197 return 0; 198 } 199 200 /* 201 * Returns number of tbl8s available in the pool 202 */ 203 static inline uint32_t 204 tbl8_available(struct rte_lpm6 *lpm) 205 { 206 return lpm->number_tbl8s - lpm->tbl8_pool_pos; 207 } 208 209 /* 210 * Init a rule key. 211 * note that ip must be already masked 212 */ 213 static inline void 214 rule_key_init(struct rte_lpm6_rule_key *key, uint8_t *ip, uint8_t depth) 215 { 216 ip6_copy_addr(key->ip, ip); 217 key->depth = depth; 218 } 219 220 /* 221 * Rebuild the entire LPM tree by reinserting all rules 222 */ 223 static void 224 rebuild_lpm(struct rte_lpm6 *lpm) 225 { 226 uint64_t next_hop; 227 struct rte_lpm6_rule_key *rule_key; 228 uint32_t iter = 0; 229 230 while (rte_hash_iterate(lpm->rules_tbl, (void *) &rule_key, 231 (void **) &next_hop, &iter) >= 0) 232 rte_lpm6_add(lpm, rule_key->ip, rule_key->depth, 233 (uint32_t) next_hop); 234 } 235 236 /* 237 * Allocates memory for LPM object 238 */ 239 struct rte_lpm6 * 240 rte_lpm6_create(const char *name, int socket_id, 241 const struct rte_lpm6_config *config) 242 { 243 char mem_name[RTE_LPM6_NAMESIZE]; 244 struct rte_lpm6 *lpm = NULL; 245 struct rte_tailq_entry *te; 246 uint64_t mem_size; 247 struct rte_lpm6_list *lpm_list; 248 struct rte_hash *rules_tbl = NULL; 249 uint32_t *tbl8_pool = NULL; 250 struct rte_lpm_tbl8_hdr *tbl8_hdrs = NULL; 251 252 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list); 253 254 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_tbl_entry) != sizeof(uint32_t)); 255 RTE_BUILD_BUG_ON(sizeof(struct rte_lpm6_rule_key) % 256 sizeof(uint32_t) != 0); 257 258 /* Check user arguments. */ 259 if ((name == NULL) || (socket_id < -1) || (config == NULL) || 260 (config->max_rules == 0) || 261 config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) { 262 rte_errno = EINVAL; 263 return NULL; 264 } 265 266 /* create rules hash table */ 267 snprintf(mem_name, sizeof(mem_name), "LRH_%s", name); 268 struct rte_hash_parameters rule_hash_tbl_params = { 269 .entries = config->max_rules * 1.2 + 270 RULE_HASH_TABLE_EXTRA_SPACE, 271 .key_len = sizeof(struct rte_lpm6_rule_key), 272 .hash_func = rule_hash, 273 .hash_func_init_val = 0, 274 .name = mem_name, 275 .reserved = 0, 276 .socket_id = socket_id, 277 .extra_flag = 0 278 }; 279 280 rules_tbl = rte_hash_create(&rule_hash_tbl_params); 281 if (rules_tbl == NULL) { 282 RTE_LOG(ERR, LPM, "LPM rules hash table allocation failed: %s (%d)", 283 rte_strerror(rte_errno), rte_errno); 284 goto fail_wo_unlock; 285 } 286 287 /* allocate tbl8 indexes pool */ 288 tbl8_pool = rte_malloc(NULL, 289 sizeof(uint32_t) * config->number_tbl8s, 290 RTE_CACHE_LINE_SIZE); 291 if (tbl8_pool == NULL) { 292 RTE_LOG(ERR, LPM, "LPM tbl8 pool allocation failed: %s (%d)", 293 rte_strerror(rte_errno), rte_errno); 294 rte_errno = ENOMEM; 295 goto fail_wo_unlock; 296 } 297 298 /* allocate tbl8 headers */ 299 tbl8_hdrs = rte_malloc(NULL, 300 sizeof(struct rte_lpm_tbl8_hdr) * config->number_tbl8s, 301 RTE_CACHE_LINE_SIZE); 302 if (tbl8_hdrs == NULL) { 303 RTE_LOG(ERR, LPM, "LPM tbl8 headers allocation failed: %s (%d)", 304 rte_strerror(rte_errno), rte_errno); 305 rte_errno = ENOMEM; 306 goto fail_wo_unlock; 307 } 308 309 snprintf(mem_name, sizeof(mem_name), "LPM_%s", name); 310 311 /* Determine the amount of memory to allocate. */ 312 mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) * 313 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s); 314 315 rte_mcfg_tailq_write_lock(); 316 317 /* Guarantee there's no existing */ 318 TAILQ_FOREACH(te, lpm_list, next) { 319 lpm = (struct rte_lpm6 *) te->data; 320 if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0) 321 break; 322 } 323 lpm = NULL; 324 if (te != NULL) { 325 rte_errno = EEXIST; 326 goto fail; 327 } 328 329 /* allocate tailq entry */ 330 te = rte_zmalloc("LPM6_TAILQ_ENTRY", sizeof(*te), 0); 331 if (te == NULL) { 332 RTE_LOG(ERR, LPM, "Failed to allocate tailq entry!\n"); 333 rte_errno = ENOMEM; 334 goto fail; 335 } 336 337 /* Allocate memory to store the LPM data structures. */ 338 lpm = rte_zmalloc_socket(mem_name, (size_t)mem_size, 339 RTE_CACHE_LINE_SIZE, socket_id); 340 341 if (lpm == NULL) { 342 RTE_LOG(ERR, LPM, "LPM memory allocation failed\n"); 343 rte_free(te); 344 rte_errno = ENOMEM; 345 goto fail; 346 } 347 348 /* Save user arguments. */ 349 lpm->max_rules = config->max_rules; 350 lpm->number_tbl8s = config->number_tbl8s; 351 strlcpy(lpm->name, name, sizeof(lpm->name)); 352 lpm->rules_tbl = rules_tbl; 353 lpm->tbl8_pool = tbl8_pool; 354 lpm->tbl8_hdrs = tbl8_hdrs; 355 356 /* init the stack */ 357 tbl8_pool_init(lpm); 358 359 te->data = (void *) lpm; 360 361 TAILQ_INSERT_TAIL(lpm_list, te, next); 362 rte_mcfg_tailq_write_unlock(); 363 return lpm; 364 365 fail: 366 rte_mcfg_tailq_write_unlock(); 367 368 fail_wo_unlock: 369 rte_free(tbl8_hdrs); 370 rte_free(tbl8_pool); 371 rte_hash_free(rules_tbl); 372 373 return NULL; 374 } 375 376 /* 377 * Find an existing lpm table and return a pointer to it. 378 */ 379 struct rte_lpm6 * 380 rte_lpm6_find_existing(const char *name) 381 { 382 struct rte_lpm6 *l = NULL; 383 struct rte_tailq_entry *te; 384 struct rte_lpm6_list *lpm_list; 385 386 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list); 387 388 rte_mcfg_tailq_read_lock(); 389 TAILQ_FOREACH(te, lpm_list, next) { 390 l = (struct rte_lpm6 *) te->data; 391 if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0) 392 break; 393 } 394 rte_mcfg_tailq_read_unlock(); 395 396 if (te == NULL) { 397 rte_errno = ENOENT; 398 return NULL; 399 } 400 401 return l; 402 } 403 404 /* 405 * Deallocates memory for given LPM table. 406 */ 407 void 408 rte_lpm6_free(struct rte_lpm6 *lpm) 409 { 410 struct rte_lpm6_list *lpm_list; 411 struct rte_tailq_entry *te; 412 413 /* Check user arguments. */ 414 if (lpm == NULL) 415 return; 416 417 lpm_list = RTE_TAILQ_CAST(rte_lpm6_tailq.head, rte_lpm6_list); 418 419 rte_mcfg_tailq_write_lock(); 420 421 /* find our tailq entry */ 422 TAILQ_FOREACH(te, lpm_list, next) { 423 if (te->data == (void *) lpm) 424 break; 425 } 426 427 if (te != NULL) 428 TAILQ_REMOVE(lpm_list, te, next); 429 430 rte_mcfg_tailq_write_unlock(); 431 432 rte_free(lpm->tbl8_hdrs); 433 rte_free(lpm->tbl8_pool); 434 rte_hash_free(lpm->rules_tbl); 435 rte_free(lpm); 436 rte_free(te); 437 } 438 439 /* Find a rule */ 440 static inline int 441 rule_find_with_key(struct rte_lpm6 *lpm, 442 const struct rte_lpm6_rule_key *rule_key, 443 uint32_t *next_hop) 444 { 445 uint64_t hash_val; 446 int ret; 447 448 /* lookup for a rule */ 449 ret = rte_hash_lookup_data(lpm->rules_tbl, (const void *) rule_key, 450 (void **) &hash_val); 451 if (ret >= 0) { 452 *next_hop = (uint32_t) hash_val; 453 return 1; 454 } 455 456 return 0; 457 } 458 459 /* Find a rule */ 460 static int 461 rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, 462 uint32_t *next_hop) 463 { 464 struct rte_lpm6_rule_key rule_key; 465 466 /* init a rule key */ 467 rule_key_init(&rule_key, ip, depth); 468 469 return rule_find_with_key(lpm, &rule_key, next_hop); 470 } 471 472 /* 473 * Checks if a rule already exists in the rules table and updates 474 * the nexthop if so. Otherwise it adds a new rule if enough space is available. 475 * 476 * Returns: 477 * 0 - next hop of existed rule is updated 478 * 1 - new rule successfully added 479 * <0 - error 480 */ 481 static inline int 482 rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t next_hop) 483 { 484 int ret, rule_exist; 485 struct rte_lpm6_rule_key rule_key; 486 uint32_t unused; 487 488 /* init a rule key */ 489 rule_key_init(&rule_key, ip, depth); 490 491 /* Scan through rule list to see if rule already exists. */ 492 rule_exist = rule_find_with_key(lpm, &rule_key, &unused); 493 494 /* 495 * If rule does not exist check if there is space to add a new rule to 496 * this rule group. If there is no space return error. 497 */ 498 if (!rule_exist && lpm->used_rules == lpm->max_rules) 499 return -ENOSPC; 500 501 /* add the rule or update rules next hop */ 502 ret = rte_hash_add_key_data(lpm->rules_tbl, &rule_key, 503 (void *)(uintptr_t) next_hop); 504 if (ret < 0) 505 return ret; 506 507 /* Increment the used rules counter for this rule group. */ 508 if (!rule_exist) { 509 lpm->used_rules++; 510 return 1; 511 } 512 513 return 0; 514 } 515 516 /* 517 * Function that expands a rule across the data structure when a less-generic 518 * one has been added before. It assures that every possible combination of bits 519 * in the IP address returns a match. 520 */ 521 static void 522 expand_rule(struct rte_lpm6 *lpm, uint32_t tbl8_gindex, uint8_t old_depth, 523 uint8_t new_depth, uint32_t next_hop, uint8_t valid) 524 { 525 uint32_t tbl8_group_end, tbl8_gindex_next, j; 526 527 tbl8_group_end = tbl8_gindex + RTE_LPM6_TBL8_GROUP_NUM_ENTRIES; 528 529 struct rte_lpm6_tbl_entry new_tbl8_entry = { 530 .valid = valid, 531 .valid_group = valid, 532 .depth = new_depth, 533 .next_hop = next_hop, 534 .ext_entry = 0, 535 }; 536 537 for (j = tbl8_gindex; j < tbl8_group_end; j++) { 538 if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0 539 && lpm->tbl8[j].depth <= old_depth)) { 540 541 lpm->tbl8[j] = new_tbl8_entry; 542 543 } else if (lpm->tbl8[j].ext_entry == 1) { 544 545 tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex 546 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES; 547 expand_rule(lpm, tbl8_gindex_next, old_depth, new_depth, 548 next_hop, valid); 549 } 550 } 551 } 552 553 /* 554 * Init a tbl8 header 555 */ 556 static inline void 557 init_tbl8_header(struct rte_lpm6 *lpm, uint32_t tbl_ind, 558 uint32_t owner_tbl_ind, uint32_t owner_entry_ind) 559 { 560 struct rte_lpm_tbl8_hdr *tbl_hdr = &lpm->tbl8_hdrs[tbl_ind]; 561 tbl_hdr->owner_tbl_ind = owner_tbl_ind; 562 tbl_hdr->owner_entry_ind = owner_entry_ind; 563 tbl_hdr->ref_cnt = 0; 564 } 565 566 /* 567 * Calculate index to the table based on the number and position 568 * of the bytes being inspected in this step. 569 */ 570 static uint32_t 571 get_bitshift(const uint8_t *ip, uint8_t first_byte, uint8_t bytes) 572 { 573 uint32_t entry_ind, i; 574 int8_t bitshift; 575 576 entry_ind = 0; 577 for (i = first_byte; i < (uint32_t)(first_byte + bytes); i++) { 578 bitshift = (int8_t)((bytes - i)*BYTE_SIZE); 579 580 if (bitshift < 0) 581 bitshift = 0; 582 entry_ind = entry_ind | ip[i-1] << bitshift; 583 } 584 585 return entry_ind; 586 } 587 588 /* 589 * Simulate adding a new route to the LPM counting number 590 * of new tables that will be needed 591 * 592 * It returns 0 on success, or 1 if 593 * the process needs to be continued by calling the function again. 594 */ 595 static inline int 596 simulate_add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, 597 struct rte_lpm6_tbl_entry **next_tbl, const uint8_t *ip, 598 uint8_t bytes, uint8_t first_byte, uint8_t depth, 599 uint32_t *need_tbl_nb) 600 { 601 uint32_t entry_ind; 602 uint8_t bits_covered; 603 uint32_t next_tbl_ind; 604 605 /* 606 * Calculate index to the table based on the number and position 607 * of the bytes being inspected in this step. 608 */ 609 entry_ind = get_bitshift(ip, first_byte, bytes); 610 611 /* Number of bits covered in this step */ 612 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE); 613 614 if (depth <= bits_covered) { 615 *need_tbl_nb = 0; 616 return 0; 617 } 618 619 if (tbl[entry_ind].valid == 0 || tbl[entry_ind].ext_entry == 0) { 620 /* from this point on a new table is needed on each level 621 * that is not covered yet 622 */ 623 depth -= bits_covered; 624 uint32_t cnt = depth >> 3; /* depth / BYTE_SIZE */ 625 if (depth & 7) /* 0b00000111 */ 626 /* if depth % 8 > 0 then one more table is needed 627 * for those last bits 628 */ 629 cnt++; 630 631 *need_tbl_nb = cnt; 632 return 0; 633 } 634 635 next_tbl_ind = tbl[entry_ind].lpm6_tbl8_gindex; 636 *next_tbl = &(lpm->tbl8[next_tbl_ind * 637 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]); 638 *need_tbl_nb = 0; 639 return 1; 640 } 641 642 /* 643 * Partially adds a new route to the data structure (tbl24+tbl8s). 644 * It returns 0 on success, a negative number on failure, or 1 if 645 * the process needs to be continued by calling the function again. 646 */ 647 static inline int 648 add_step(struct rte_lpm6 *lpm, struct rte_lpm6_tbl_entry *tbl, 649 uint32_t tbl_ind, struct rte_lpm6_tbl_entry **next_tbl, 650 uint32_t *next_tbl_ind, uint8_t *ip, uint8_t bytes, 651 uint8_t first_byte, uint8_t depth, uint32_t next_hop, 652 uint8_t is_new_rule) 653 { 654 uint32_t entry_ind, tbl_range, tbl8_group_start, tbl8_group_end, i; 655 uint32_t tbl8_gindex; 656 uint8_t bits_covered; 657 int ret; 658 659 /* 660 * Calculate index to the table based on the number and position 661 * of the bytes being inspected in this step. 662 */ 663 entry_ind = get_bitshift(ip, first_byte, bytes); 664 665 /* Number of bits covered in this step */ 666 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE); 667 668 /* 669 * If depth if smaller than this number (ie this is the last step) 670 * expand the rule across the relevant positions in the table. 671 */ 672 if (depth <= bits_covered) { 673 tbl_range = 1 << (bits_covered - depth); 674 675 for (i = entry_ind; i < (entry_ind + tbl_range); i++) { 676 if (!tbl[i].valid || (tbl[i].ext_entry == 0 && 677 tbl[i].depth <= depth)) { 678 679 struct rte_lpm6_tbl_entry new_tbl_entry = { 680 .next_hop = next_hop, 681 .depth = depth, 682 .valid = VALID, 683 .valid_group = VALID, 684 .ext_entry = 0, 685 }; 686 687 tbl[i] = new_tbl_entry; 688 689 } else if (tbl[i].ext_entry == 1) { 690 691 /* 692 * If tbl entry is valid and extended calculate the index 693 * into next tbl8 and expand the rule across the data structure. 694 */ 695 tbl8_gindex = tbl[i].lpm6_tbl8_gindex * 696 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES; 697 expand_rule(lpm, tbl8_gindex, depth, depth, 698 next_hop, VALID); 699 } 700 } 701 702 /* update tbl8 rule reference counter */ 703 if (tbl_ind != TBL24_IND && is_new_rule) 704 lpm->tbl8_hdrs[tbl_ind].ref_cnt++; 705 706 return 0; 707 } 708 /* 709 * If this is not the last step just fill one position 710 * and calculate the index to the next table. 711 */ 712 else { 713 /* If it's invalid a new tbl8 is needed */ 714 if (!tbl[entry_ind].valid) { 715 /* get a new table */ 716 ret = tbl8_get(lpm, &tbl8_gindex); 717 if (ret != 0) 718 return -ENOSPC; 719 720 /* invalidate all new tbl8 entries */ 721 tbl8_group_start = tbl8_gindex * 722 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES; 723 memset(&lpm->tbl8[tbl8_group_start], 0, 724 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * 725 sizeof(struct rte_lpm6_tbl_entry)); 726 727 /* init the new table's header: 728 * save the reference to the owner table 729 */ 730 init_tbl8_header(lpm, tbl8_gindex, tbl_ind, entry_ind); 731 732 /* reference to a new tbl8 */ 733 struct rte_lpm6_tbl_entry new_tbl_entry = { 734 .lpm6_tbl8_gindex = tbl8_gindex, 735 .depth = 0, 736 .valid = VALID, 737 .valid_group = VALID, 738 .ext_entry = 1, 739 }; 740 741 tbl[entry_ind] = new_tbl_entry; 742 743 /* update the current table's reference counter */ 744 if (tbl_ind != TBL24_IND) 745 lpm->tbl8_hdrs[tbl_ind].ref_cnt++; 746 } 747 /* 748 * If it's valid but not extended the rule that was stored 749 * here needs to be moved to the next table. 750 */ 751 else if (tbl[entry_ind].ext_entry == 0) { 752 /* get a new tbl8 index */ 753 ret = tbl8_get(lpm, &tbl8_gindex); 754 if (ret != 0) 755 return -ENOSPC; 756 757 tbl8_group_start = tbl8_gindex * 758 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES; 759 tbl8_group_end = tbl8_group_start + 760 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES; 761 762 struct rte_lpm6_tbl_entry tbl_entry = { 763 .next_hop = tbl[entry_ind].next_hop, 764 .depth = tbl[entry_ind].depth, 765 .valid = VALID, 766 .valid_group = VALID, 767 .ext_entry = 0 768 }; 769 770 /* Populate new tbl8 with tbl value. */ 771 for (i = tbl8_group_start; i < tbl8_group_end; i++) 772 lpm->tbl8[i] = tbl_entry; 773 774 /* init the new table's header: 775 * save the reference to the owner table 776 */ 777 init_tbl8_header(lpm, tbl8_gindex, tbl_ind, entry_ind); 778 779 /* 780 * Update tbl entry to point to new tbl8 entry. Note: The 781 * ext_flag and tbl8_index need to be updated simultaneously, 782 * so assign whole structure in one go. 783 */ 784 struct rte_lpm6_tbl_entry new_tbl_entry = { 785 .lpm6_tbl8_gindex = tbl8_gindex, 786 .depth = 0, 787 .valid = VALID, 788 .valid_group = VALID, 789 .ext_entry = 1, 790 }; 791 792 tbl[entry_ind] = new_tbl_entry; 793 794 /* update the current table's reference counter */ 795 if (tbl_ind != TBL24_IND) 796 lpm->tbl8_hdrs[tbl_ind].ref_cnt++; 797 } 798 799 *next_tbl_ind = tbl[entry_ind].lpm6_tbl8_gindex; 800 *next_tbl = &(lpm->tbl8[*next_tbl_ind * 801 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]); 802 } 803 804 return 1; 805 } 806 807 /* 808 * Simulate adding a route to LPM 809 * 810 * Returns: 811 * 0 on success 812 * -ENOSPC not enough tbl8 left 813 */ 814 static int 815 simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth) 816 { 817 struct rte_lpm6_tbl_entry *tbl; 818 struct rte_lpm6_tbl_entry *tbl_next = NULL; 819 int ret, i; 820 821 /* number of new tables needed for a step */ 822 uint32_t need_tbl_nb; 823 /* total number of new tables needed */ 824 uint32_t total_need_tbl_nb; 825 826 /* Inspect the first three bytes through tbl24 on the first step. */ 827 ret = simulate_add_step(lpm, lpm->tbl24, &tbl_next, masked_ip, 828 ADD_FIRST_BYTE, 1, depth, &need_tbl_nb); 829 total_need_tbl_nb = need_tbl_nb; 830 /* 831 * Inspect one by one the rest of the bytes until 832 * the process is completed. 833 */ 834 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && ret == 1; i++) { 835 tbl = tbl_next; 836 ret = simulate_add_step(lpm, tbl, &tbl_next, masked_ip, 1, 837 (uint8_t)(i + 1), depth, &need_tbl_nb); 838 total_need_tbl_nb += need_tbl_nb; 839 } 840 841 if (tbl8_available(lpm) < total_need_tbl_nb) 842 /* not enough tbl8 to add a rule */ 843 return -ENOSPC; 844 845 return 0; 846 } 847 848 /* 849 * Add a route 850 */ 851 int 852 rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth, 853 uint32_t next_hop) 854 { 855 struct rte_lpm6_tbl_entry *tbl; 856 struct rte_lpm6_tbl_entry *tbl_next = NULL; 857 /* init to avoid compiler warning */ 858 uint32_t tbl_next_num = 123456; 859 int status; 860 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE]; 861 int i; 862 863 /* Check user arguments. */ 864 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) 865 return -EINVAL; 866 867 /* Copy the IP and mask it to avoid modifying user's input data. */ 868 ip6_copy_addr(masked_ip, ip); 869 ip6_mask_addr(masked_ip, depth); 870 871 /* Simulate adding a new route */ 872 int ret = simulate_add(lpm, masked_ip, depth); 873 if (ret < 0) 874 return ret; 875 876 /* Add the rule to the rule table. */ 877 int is_new_rule = rule_add(lpm, masked_ip, depth, next_hop); 878 /* If there is no space available for new rule return error. */ 879 if (is_new_rule < 0) 880 return is_new_rule; 881 882 /* Inspect the first three bytes through tbl24 on the first step. */ 883 tbl = lpm->tbl24; 884 status = add_step(lpm, tbl, TBL24_IND, &tbl_next, &tbl_next_num, 885 masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop, 886 is_new_rule); 887 assert(status >= 0); 888 889 /* 890 * Inspect one by one the rest of the bytes until 891 * the process is completed. 892 */ 893 for (i = ADD_FIRST_BYTE; i < RTE_LPM6_IPV6_ADDR_SIZE && status == 1; i++) { 894 tbl = tbl_next; 895 status = add_step(lpm, tbl, tbl_next_num, &tbl_next, 896 &tbl_next_num, masked_ip, 1, (uint8_t)(i + 1), 897 depth, next_hop, is_new_rule); 898 assert(status >= 0); 899 } 900 901 return status; 902 } 903 904 /* 905 * Takes a pointer to a table entry and inspect one level. 906 * The function returns 0 on lookup success, ENOENT if no match was found 907 * or 1 if the process needs to be continued by calling the function again. 908 */ 909 static inline int 910 lookup_step(const struct rte_lpm6 *lpm, const struct rte_lpm6_tbl_entry *tbl, 911 const struct rte_lpm6_tbl_entry **tbl_next, const uint8_t *ip, 912 uint8_t first_byte, uint32_t *next_hop) 913 { 914 uint32_t tbl8_index, tbl_entry; 915 916 /* Take the integer value from the pointer. */ 917 tbl_entry = *(const uint32_t *)tbl; 918 919 /* If it is valid and extended we calculate the new pointer to return. */ 920 if ((tbl_entry & RTE_LPM6_VALID_EXT_ENTRY_BITMASK) == 921 RTE_LPM6_VALID_EXT_ENTRY_BITMASK) { 922 923 tbl8_index = ip[first_byte-1] + 924 ((tbl_entry & RTE_LPM6_TBL8_BITMASK) * 925 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES); 926 927 *tbl_next = &lpm->tbl8[tbl8_index]; 928 929 return 1; 930 } else { 931 /* If not extended then we can have a match. */ 932 *next_hop = ((uint32_t)tbl_entry & RTE_LPM6_TBL8_BITMASK); 933 return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT; 934 } 935 } 936 937 /* 938 * Looks up an IP 939 */ 940 int 941 rte_lpm6_lookup(const struct rte_lpm6 *lpm, const uint8_t *ip, 942 uint32_t *next_hop) 943 { 944 const struct rte_lpm6_tbl_entry *tbl; 945 const struct rte_lpm6_tbl_entry *tbl_next = NULL; 946 int status; 947 uint8_t first_byte; 948 uint32_t tbl24_index; 949 950 /* DEBUG: Check user input arguments. */ 951 if ((lpm == NULL) || (ip == NULL) || (next_hop == NULL)) 952 return -EINVAL; 953 954 first_byte = LOOKUP_FIRST_BYTE; 955 tbl24_index = (ip[0] << BYTES2_SIZE) | (ip[1] << BYTE_SIZE) | ip[2]; 956 957 /* Calculate pointer to the first entry to be inspected */ 958 tbl = &lpm->tbl24[tbl24_index]; 959 960 do { 961 /* Continue inspecting following levels until success or failure */ 962 status = lookup_step(lpm, tbl, &tbl_next, ip, first_byte++, next_hop); 963 tbl = tbl_next; 964 } while (status == 1); 965 966 return status; 967 } 968 969 /* 970 * Looks up a group of IP addresses 971 */ 972 int 973 rte_lpm6_lookup_bulk_func(const struct rte_lpm6 *lpm, 974 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], 975 int32_t *next_hops, unsigned int n) 976 { 977 unsigned int i; 978 const struct rte_lpm6_tbl_entry *tbl; 979 const struct rte_lpm6_tbl_entry *tbl_next = NULL; 980 uint32_t tbl24_index, next_hop; 981 uint8_t first_byte; 982 int status; 983 984 /* DEBUG: Check user input arguments. */ 985 if ((lpm == NULL) || (ips == NULL) || (next_hops == NULL)) 986 return -EINVAL; 987 988 for (i = 0; i < n; i++) { 989 first_byte = LOOKUP_FIRST_BYTE; 990 tbl24_index = (ips[i][0] << BYTES2_SIZE) | 991 (ips[i][1] << BYTE_SIZE) | ips[i][2]; 992 993 /* Calculate pointer to the first entry to be inspected */ 994 tbl = &lpm->tbl24[tbl24_index]; 995 996 do { 997 /* Continue inspecting following levels 998 * until success or failure 999 */ 1000 status = lookup_step(lpm, tbl, &tbl_next, ips[i], 1001 first_byte++, &next_hop); 1002 tbl = tbl_next; 1003 } while (status == 1); 1004 1005 if (status < 0) 1006 next_hops[i] = -1; 1007 else 1008 next_hops[i] = (int32_t)next_hop; 1009 } 1010 1011 return 0; 1012 } 1013 1014 /* 1015 * Look for a rule in the high-level rules table 1016 */ 1017 int 1018 rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth, 1019 uint32_t *next_hop) 1020 { 1021 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE]; 1022 1023 /* Check user arguments. */ 1024 if ((lpm == NULL) || next_hop == NULL || ip == NULL || 1025 (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) 1026 return -EINVAL; 1027 1028 /* Copy the IP and mask it to avoid modifying user's input data. */ 1029 ip6_copy_addr(masked_ip, ip); 1030 ip6_mask_addr(masked_ip, depth); 1031 1032 return rule_find(lpm, masked_ip, depth, next_hop); 1033 } 1034 1035 /* 1036 * Delete a rule from the rule table. 1037 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive. 1038 * return 1039 * 0 on success 1040 * <0 on failure 1041 */ 1042 static inline int 1043 rule_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth) 1044 { 1045 int ret; 1046 struct rte_lpm6_rule_key rule_key; 1047 1048 /* init rule key */ 1049 rule_key_init(&rule_key, ip, depth); 1050 1051 /* delete the rule */ 1052 ret = rte_hash_del_key(lpm->rules_tbl, (void *) &rule_key); 1053 if (ret >= 0) 1054 lpm->used_rules--; 1055 1056 return ret; 1057 } 1058 1059 /* 1060 * Deletes a group of rules 1061 * 1062 * Note that the function rebuilds the lpm table, 1063 * rather than doing incremental updates like 1064 * the regular delete function 1065 */ 1066 int 1067 rte_lpm6_delete_bulk_func(struct rte_lpm6 *lpm, 1068 uint8_t ips[][RTE_LPM6_IPV6_ADDR_SIZE], uint8_t *depths, 1069 unsigned n) 1070 { 1071 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE]; 1072 unsigned i; 1073 1074 /* Check input arguments. */ 1075 if ((lpm == NULL) || (ips == NULL) || (depths == NULL)) 1076 return -EINVAL; 1077 1078 for (i = 0; i < n; i++) { 1079 ip6_copy_addr(masked_ip, ips[i]); 1080 ip6_mask_addr(masked_ip, depths[i]); 1081 rule_delete(lpm, masked_ip, depths[i]); 1082 } 1083 1084 /* 1085 * Set all the table entries to 0 (ie delete every rule 1086 * from the data structure. 1087 */ 1088 memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); 1089 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) 1090 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s); 1091 tbl8_pool_init(lpm); 1092 1093 /* 1094 * Add every rule again (except for the ones that were removed from 1095 * the rules table). 1096 */ 1097 rebuild_lpm(lpm); 1098 1099 return 0; 1100 } 1101 1102 /* 1103 * Delete all rules from the LPM table. 1104 */ 1105 void 1106 rte_lpm6_delete_all(struct rte_lpm6 *lpm) 1107 { 1108 /* Zero used rules counter. */ 1109 lpm->used_rules = 0; 1110 1111 /* Zero tbl24. */ 1112 memset(lpm->tbl24, 0, sizeof(lpm->tbl24)); 1113 1114 /* Zero tbl8. */ 1115 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) * 1116 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s); 1117 1118 /* init pool of free tbl8 indexes */ 1119 tbl8_pool_init(lpm); 1120 1121 /* Delete all rules form the rules table. */ 1122 rte_hash_reset(lpm->rules_tbl); 1123 } 1124 1125 /* 1126 * Convert a depth to a one byte long mask 1127 * Example: 4 will be converted to 0xF0 1128 */ 1129 static uint8_t __attribute__((pure)) 1130 depth_to_mask_1b(uint8_t depth) 1131 { 1132 /* To calculate a mask start with a 1 on the left hand side and right 1133 * shift while populating the left hand side with 1's 1134 */ 1135 return (signed char)0x80 >> (depth - 1); 1136 } 1137 1138 /* 1139 * Find a less specific rule 1140 */ 1141 static int 1142 rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, 1143 struct rte_lpm6_rule *rule) 1144 { 1145 int ret; 1146 uint32_t next_hop; 1147 uint8_t mask; 1148 struct rte_lpm6_rule_key rule_key; 1149 1150 if (depth == 1) 1151 return 0; 1152 1153 rule_key_init(&rule_key, ip, depth); 1154 1155 while (depth > 1) { 1156 depth--; 1157 1158 /* each iteration zero one more bit of the key */ 1159 mask = depth & 7; /* depth % BYTE_SIZE */ 1160 if (mask > 0) 1161 mask = depth_to_mask_1b(mask); 1162 1163 rule_key.depth = depth; 1164 rule_key.ip[depth >> 3] &= mask; 1165 1166 ret = rule_find_with_key(lpm, &rule_key, &next_hop); 1167 if (ret) { 1168 rule->depth = depth; 1169 ip6_copy_addr(rule->ip, rule_key.ip); 1170 rule->next_hop = next_hop; 1171 return 1; 1172 } 1173 } 1174 1175 return 0; 1176 } 1177 1178 /* 1179 * Find range of tbl8 cells occupied by a rule 1180 */ 1181 static void 1182 rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth, 1183 struct rte_lpm6_tbl_entry **from, 1184 struct rte_lpm6_tbl_entry **to, 1185 uint32_t *out_tbl_ind) 1186 { 1187 uint32_t ind; 1188 uint32_t first_3bytes = (uint32_t)ip[0] << 16 | ip[1] << 8 | ip[2]; 1189 1190 if (depth <= 24) { 1191 /* rule is within the top level */ 1192 ind = first_3bytes; 1193 *from = &lpm->tbl24[ind]; 1194 ind += (1 << (24 - depth)) - 1; 1195 *to = &lpm->tbl24[ind]; 1196 *out_tbl_ind = TBL24_IND; 1197 } else { 1198 /* top level entry */ 1199 struct rte_lpm6_tbl_entry *tbl = &lpm->tbl24[first_3bytes]; 1200 assert(tbl->ext_entry == 1); 1201 /* first tbl8 */ 1202 uint32_t tbl_ind = tbl->lpm6_tbl8_gindex; 1203 tbl = &lpm->tbl8[tbl_ind * 1204 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]; 1205 /* current ip byte, the top level is already behind */ 1206 uint8_t byte = 3; 1207 /* minus top level */ 1208 depth -= 24; 1209 1210 /* iterate through levels (tbl8s) 1211 * until we reach the last one 1212 */ 1213 while (depth > 8) { 1214 tbl += ip[byte]; 1215 assert(tbl->ext_entry == 1); 1216 /* go to the next level/tbl8 */ 1217 tbl_ind = tbl->lpm6_tbl8_gindex; 1218 tbl = &lpm->tbl8[tbl_ind * 1219 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES]; 1220 byte += 1; 1221 depth -= 8; 1222 } 1223 1224 /* last level/tbl8 */ 1225 ind = ip[byte] & depth_to_mask_1b(depth); 1226 *from = &tbl[ind]; 1227 ind += (1 << (8 - depth)) - 1; 1228 *to = &tbl[ind]; 1229 *out_tbl_ind = tbl_ind; 1230 } 1231 } 1232 1233 /* 1234 * Remove a table from the LPM tree 1235 */ 1236 static void 1237 remove_tbl(struct rte_lpm6 *lpm, struct rte_lpm_tbl8_hdr *tbl_hdr, 1238 uint32_t tbl_ind, struct rte_lpm6_rule *lsp_rule) 1239 { 1240 struct rte_lpm6_tbl_entry *owner_entry; 1241 1242 if (tbl_hdr->owner_tbl_ind == TBL24_IND) 1243 owner_entry = &lpm->tbl24[tbl_hdr->owner_entry_ind]; 1244 else { 1245 uint32_t owner_tbl_ind = tbl_hdr->owner_tbl_ind; 1246 owner_entry = &lpm->tbl8[ 1247 owner_tbl_ind * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES + 1248 tbl_hdr->owner_entry_ind]; 1249 1250 struct rte_lpm_tbl8_hdr *owner_tbl_hdr = 1251 &lpm->tbl8_hdrs[owner_tbl_ind]; 1252 if (--owner_tbl_hdr->ref_cnt == 0) 1253 remove_tbl(lpm, owner_tbl_hdr, owner_tbl_ind, lsp_rule); 1254 } 1255 1256 assert(owner_entry->ext_entry == 1); 1257 1258 /* unlink the table */ 1259 if (lsp_rule != NULL) { 1260 struct rte_lpm6_tbl_entry new_tbl_entry = { 1261 .next_hop = lsp_rule->next_hop, 1262 .depth = lsp_rule->depth, 1263 .valid = VALID, 1264 .valid_group = VALID, 1265 .ext_entry = 0 1266 }; 1267 1268 *owner_entry = new_tbl_entry; 1269 } else { 1270 struct rte_lpm6_tbl_entry new_tbl_entry = { 1271 .next_hop = 0, 1272 .depth = 0, 1273 .valid = INVALID, 1274 .valid_group = INVALID, 1275 .ext_entry = 0 1276 }; 1277 1278 *owner_entry = new_tbl_entry; 1279 } 1280 1281 /* return the table to the pool */ 1282 tbl8_put(lpm, tbl_ind); 1283 } 1284 1285 /* 1286 * Deletes a rule 1287 */ 1288 int 1289 rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth) 1290 { 1291 uint8_t masked_ip[RTE_LPM6_IPV6_ADDR_SIZE]; 1292 struct rte_lpm6_rule lsp_rule_obj; 1293 struct rte_lpm6_rule *lsp_rule; 1294 int ret; 1295 uint32_t tbl_ind; 1296 struct rte_lpm6_tbl_entry *from, *to; 1297 1298 /* Check input arguments. */ 1299 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) 1300 return -EINVAL; 1301 1302 /* Copy the IP and mask it to avoid modifying user's input data. */ 1303 ip6_copy_addr(masked_ip, ip); 1304 ip6_mask_addr(masked_ip, depth); 1305 1306 /* Delete the rule from the rule table. */ 1307 ret = rule_delete(lpm, masked_ip, depth); 1308 if (ret < 0) 1309 return -ENOENT; 1310 1311 /* find rule cells */ 1312 rule_find_range(lpm, masked_ip, depth, &from, &to, &tbl_ind); 1313 1314 /* find a less specific rule (a rule with smaller depth) 1315 * note: masked_ip will be modified, don't use it anymore 1316 */ 1317 ret = rule_find_less_specific(lpm, masked_ip, depth, 1318 &lsp_rule_obj); 1319 lsp_rule = ret ? &lsp_rule_obj : NULL; 1320 1321 /* decrement the table rule counter, 1322 * note that tbl24 doesn't have a header 1323 */ 1324 if (tbl_ind != TBL24_IND) { 1325 struct rte_lpm_tbl8_hdr *tbl_hdr = &lpm->tbl8_hdrs[tbl_ind]; 1326 if (--tbl_hdr->ref_cnt == 0) { 1327 /* remove the table */ 1328 remove_tbl(lpm, tbl_hdr, tbl_ind, lsp_rule); 1329 return 0; 1330 } 1331 } 1332 1333 /* iterate rule cells */ 1334 for (; from <= to; from++) 1335 if (from->ext_entry == 1) { 1336 /* reference to a more specific space 1337 * of the prefix/rule. Entries in a more 1338 * specific space that are not used by 1339 * a more specific prefix must be occupied 1340 * by the prefix 1341 */ 1342 if (lsp_rule != NULL) 1343 expand_rule(lpm, 1344 from->lpm6_tbl8_gindex * 1345 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES, 1346 depth, lsp_rule->depth, 1347 lsp_rule->next_hop, VALID); 1348 else 1349 /* since the prefix has no less specific prefix, 1350 * its more specific space must be invalidated 1351 */ 1352 expand_rule(lpm, 1353 from->lpm6_tbl8_gindex * 1354 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES, 1355 depth, 0, 0, INVALID); 1356 } else if (from->depth == depth) { 1357 /* entry is not a reference and belongs to the prefix */ 1358 if (lsp_rule != NULL) { 1359 struct rte_lpm6_tbl_entry new_tbl_entry = { 1360 .next_hop = lsp_rule->next_hop, 1361 .depth = lsp_rule->depth, 1362 .valid = VALID, 1363 .valid_group = VALID, 1364 .ext_entry = 0 1365 }; 1366 1367 *from = new_tbl_entry; 1368 } else { 1369 struct rte_lpm6_tbl_entry new_tbl_entry = { 1370 .next_hop = 0, 1371 .depth = 0, 1372 .valid = INVALID, 1373 .valid_group = INVALID, 1374 .ext_entry = 0 1375 }; 1376 1377 *from = new_tbl_entry; 1378 } 1379 } 1380 1381 return 0; 1382 } 1383