Lines Matching +full:depth +full:-

1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
57 uint32_t depth :8; /**< Rule depth. */ member
69 uint8_t depth; /**< Rule depth. */ member
75 uint8_t depth; /**< Rule depth. */ member
113 * Takes an array of uint8_t (IPv6 address) and masks it using the depth.
114 * It leaves untouched one bit per unit in the depth variable
118 ip6_mask_addr(uint8_t *ip, uint8_t depth) in ip6_mask_addr() argument
123 part_depth = depth; in ip6_mask_addr()
132 part_depth -= BYTE_SIZE; in ip6_mask_addr()
167 for (i = 0; i < lpm->number_tbl8s; i++) in tbl8_pool_init()
168 lpm->tbl8_pool[i] = i; in tbl8_pool_init()
170 lpm->tbl8_pool_pos = 0; in tbl8_pool_init()
179 if (lpm->tbl8_pool_pos == lpm->number_tbl8s) in tbl8_get()
181 return -ENOSPC; in tbl8_get()
184 *tbl8_ind = lpm->tbl8_pool[lpm->tbl8_pool_pos++]; in tbl8_get()
194 if (lpm->tbl8_pool_pos == 0) in tbl8_put()
196 return -ENOSPC; in tbl8_put()
198 lpm->tbl8_pool[--lpm->tbl8_pool_pos] = tbl8_ind; in tbl8_put()
208 return lpm->number_tbl8s - lpm->tbl8_pool_pos; in tbl8_available()
217 rule_key_init(struct rte_lpm6_rule_key *key, uint8_t *ip, uint8_t depth)
219 ip6_copy_addr(key->ip, ip);
220 key->depth = depth;
233 while (rte_hash_iterate(lpm->rules_tbl, (void *) &rule_key,
235 rte_lpm6_add(lpm, rule_key->ip, rule_key->depth,
261 if ((name == NULL) || (socket_id < -1) || (config == NULL) || in rte_lpm6_create()
262 config->number_tbl8s > RTE_LPM6_TBL8_MAX_NUM_GROUPS) { in rte_lpm6_create()
271 .entries = config->max_rules * 1.2 + in rte_lpm6_create()
292 sizeof(uint32_t) * config->number_tbl8s, in rte_lpm6_create()
303 sizeof(struct rte_lpm_tbl8_hdr) * config->number_tbl8s, in rte_lpm6_create()
315 mem_size = sizeof(*lpm) + (sizeof(lpm->tbl8[0]) * in rte_lpm6_create()
316 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s); in rte_lpm6_create()
323 lpm = (struct rte_lpm6 *) te->data; in rte_lpm6_create()
324 if (strncmp(name, lpm->name, RTE_LPM6_NAMESIZE) == 0) in rte_lpm6_create()
354 //lpm->max_rules = config->max_rules; in rte_lpm6_create()
355 lpm->number_tbl8s = config->number_tbl8s; in rte_lpm6_create()
356 strlcpy(lpm->name, name, sizeof(lpm->name)); in rte_lpm6_create()
357 //lpm->rules_tbl = rules_tbl; in rte_lpm6_create()
358 lpm->tbl8_pool = tbl8_pool; in rte_lpm6_create()
359 lpm->tbl8_hdrs = tbl8_hdrs; in rte_lpm6_create()
364 //te->data = (void *) lpm; in rte_lpm6_create()
396 l = (struct rte_lpm6 *) te->data;
397 if (strncmp(name, l->name, RTE_LPM6_NAMESIZE) == 0)
431 if (te->data == (void *) lpm) in rte_lpm6_free()
441 rte_free(lpm->tbl8_hdrs); in rte_lpm6_free()
442 rte_free(lpm->tbl8_pool); in rte_lpm6_free()
443 //rte_hash_free(lpm->rules_tbl); in rte_lpm6_free()
459 ret = rte_hash_lookup_data(lpm->rules_tbl, (const void *) rule_key,
471 rule_find(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
477 rule_key_init(&rule_key, ip, depth);
487 * 0 - next hop of existed rule is updated
488 * 1 - new rule successfully added
489 * <0 - error
492 rule_add(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth, uint32_t next_hop)
499 rule_key_init(&rule_key, ip, depth);
508 if (!rule_exist && lpm->used_rules == lpm->max_rules)
509 return -ENOSPC;
512 ret = rte_hash_add_key_data(lpm->rules_tbl, &rule_key,
519 lpm->used_rules++;
528 * Function that expands a rule across the data structure when a less-generic
543 .depth = new_depth, in expand_rule()
549 if (!lpm->tbl8[j].valid || (lpm->tbl8[j].ext_entry == 0 in expand_rule()
550 && lpm->tbl8[j].depth <= old_depth)) { in expand_rule()
552 lpm->tbl8[j] = new_tbl8_entry; in expand_rule()
554 } else if (lpm->tbl8[j].ext_entry == 1) { in expand_rule()
556 tbl8_gindex_next = lpm->tbl8[j].lpm6_tbl8_gindex in expand_rule()
571 struct rte_lpm_tbl8_hdr *tbl_hdr = &lpm->tbl8_hdrs[tbl_ind]; in init_tbl8_header()
572 tbl_hdr->owner_tbl_ind = owner_tbl_ind; in init_tbl8_header()
573 tbl_hdr->owner_entry_ind = owner_entry_ind; in init_tbl8_header()
574 tbl_hdr->ref_cnt = 0; in init_tbl8_header()
589 bitshift = (int8_t)((bytes - i)*BYTE_SIZE); in get_bitshift()
593 entry_ind = entry_ind | ip[i-1] << bitshift; in get_bitshift()
609 uint8_t bytes, uint8_t first_byte, uint8_t depth, in simulate_add_step() argument
623 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE); in simulate_add_step()
625 if (depth <= bits_covered) { in simulate_add_step()
634 depth -= bits_covered; in simulate_add_step()
635 uint32_t cnt = depth >> 3; /* depth / BYTE_SIZE */ in simulate_add_step()
636 if (depth & 7) /* 0b00000111 */ in simulate_add_step()
637 /* if depth % 8 > 0 then one more table is needed in simulate_add_step()
647 *next_tbl = &(lpm->tbl8[next_tbl_ind * in simulate_add_step()
662 uint8_t first_byte, uint8_t depth, uint32_t next_hop, in add_step() argument
677 bits_covered = (uint8_t)((bytes+first_byte-1)*BYTE_SIZE); in add_step()
680 * If depth if smaller than this number (ie this is the last step) in add_step()
683 if (depth <= bits_covered) { in add_step()
684 tbl_range = 1 << (bits_covered - depth); in add_step()
688 tbl[i].depth <= depth)) { in add_step()
692 .depth = depth, in add_step()
708 expand_rule(lpm, tbl8_gindex, depth, depth, in add_step()
715 lpm->tbl8_hdrs[tbl_ind].ref_cnt++; in add_step()
729 return -ENOSPC; in add_step()
734 memset(&lpm->tbl8[tbl8_group_start], 0, in add_step()
746 .depth = 0, in add_step()
756 lpm->tbl8_hdrs[tbl_ind].ref_cnt++; in add_step()
766 return -ENOSPC; in add_step()
775 .depth = tbl[entry_ind].depth, in add_step()
783 lpm->tbl8[i] = tbl_entry; in add_step()
797 .depth = 0, in add_step()
807 lpm->tbl8_hdrs[tbl_ind].ref_cnt++; in add_step()
811 *next_tbl = &(lpm->tbl8[*next_tbl_ind * in add_step()
823 * -ENOSPC not enough tbl8 left
826 simulate_add(struct rte_lpm6 *lpm, const uint8_t *masked_ip, uint8_t depth) in simulate_add() argument
838 ret = simulate_add_step(lpm, lpm->tbl24, &tbl_next, masked_ip, in simulate_add()
839 ADD_FIRST_BYTE, 1, depth, &need_tbl_nb); in simulate_add()
848 (uint8_t)(i + 1), depth, &need_tbl_nb); in simulate_add()
854 return -ENOSPC; in simulate_add()
863 rte_lpm6_add(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth, in rte_lpm6_add() argument
875 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) in rte_lpm6_add()
876 return -EINVAL; in rte_lpm6_add()
880 ip6_mask_addr(masked_ip, depth); in rte_lpm6_add()
883 int ret = simulate_add(lpm, masked_ip, depth); in rte_lpm6_add()
889 int is_new_rule = rule_add(lpm, masked_ip, depth, next_hop); in rte_lpm6_add()
896 tbl = lpm->tbl24; in rte_lpm6_add()
898 masked_ip, ADD_FIRST_BYTE, 1, depth, next_hop, in rte_lpm6_add()
910 depth, next_hop, is_new_rule); in rte_lpm6_add()
936 tbl8_index = ip[first_byte-1] + in lookup_step()
940 *tbl_next = &lpm->tbl8[tbl8_index]; in lookup_step()
946 return (tbl_entry & RTE_LPM6_LOOKUP_SUCCESS) ? 0 : -ENOENT; in lookup_step()
965 return -EINVAL; in rte_lpm6_lookup()
971 tbl = &lpm->tbl24[tbl24_index]; in rte_lpm6_lookup()
999 return -EINVAL; in rte_lpm6_lookup_bulk_func()
1007 tbl = &lpm->tbl24[tbl24_index]; in rte_lpm6_lookup_bulk_func()
1019 next_hops[i] = -1; in rte_lpm6_lookup_bulk_func()
1028 fill_rule6(char *buffer, const uint8_t *ip, uint8_t depth, uint32_t next_hop) in fill_rule6() argument
1032 ip6_copy_addr((uint8_t *)&rule->ip, ip); in fill_rule6()
1033 rule->depth = depth; in fill_rule6()
1034 rule->next_hop = next_hop; in fill_rule6()
1041 * Look for a rule in the high-level rules table
1044 rte_lpm6_is_rule_present(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth,
1051 (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH))
1052 return -EINVAL;
1056 ip6_mask_addr(masked_ip, depth);
1058 return rule_find(lpm, masked_ip, depth, next_hop);
1063 * NOTE: Valid range for depth parameter is 1 .. 128 inclusive.
1069 rule_delete(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth)
1075 rule_key_init(&rule_key, ip, depth);
1078 ret = rte_hash_del_key(lpm->rules_tbl, (void *) &rule_key);
1080 lpm->used_rules--;
1102 return -EINVAL;
1114 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1115 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1116 * RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1135 lpm->used_rules = 0;
1138 memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1141 memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0]) *
1142 RTE_LPM6_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1148 rte_hash_reset(lpm->rules_tbl);
1153 * Convert a depth to a one byte long mask
1157 depth_to_mask_1b(uint8_t depth) in depth_to_mask_1b() argument
1162 return (signed char)0x80 >> (depth - 1); in depth_to_mask_1b()
1170 rule_find_less_specific(struct rte_lpm6 *lpm, uint8_t *ip, uint8_t depth,
1178 if (depth == 1)
1181 rule_key_init(&rule_key, ip, depth);
1183 while (depth > 1) {
1184 depth--;
1187 mask = depth & 7; /* depth % BYTE_SIZE */
1191 rule_key.depth = depth;
1192 rule_key.ip[depth >> 3] &= mask;
1196 rule->depth = depth;
1197 ip6_copy_addr(rule->ip, rule_key.ip);
1198 rule->next_hop = next_hop;
1211 rule_find_range(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth, in rule_find_range() argument
1219 if (depth <= 24) { in rule_find_range()
1222 *from = &lpm->tbl24[ind]; in rule_find_range()
1223 ind += (1 << (24 - depth)) - 1; in rule_find_range()
1224 *to = &lpm->tbl24[ind]; in rule_find_range()
1228 struct rte_lpm6_tbl_entry *tbl = &lpm->tbl24[first_3bytes]; in rule_find_range()
1229 assert(tbl->ext_entry == 1); in rule_find_range()
1231 uint32_t tbl_ind = tbl->lpm6_tbl8_gindex; in rule_find_range()
1232 tbl = &lpm->tbl8[tbl_ind * in rule_find_range()
1237 depth -= 24; in rule_find_range()
1242 while (depth > 8) { in rule_find_range()
1244 assert(tbl->ext_entry == 1); in rule_find_range()
1246 tbl_ind = tbl->lpm6_tbl8_gindex; in rule_find_range()
1247 tbl = &lpm->tbl8[tbl_ind * in rule_find_range()
1250 depth -= 8; in rule_find_range()
1254 ind = ip[byte] & depth_to_mask_1b(depth); in rule_find_range()
1256 ind += (1 << (8 - depth)) - 1; in rule_find_range()
1271 if (tbl_hdr->owner_tbl_ind == TBL24_IND) in remove_tbl()
1272 owner_entry = &lpm->tbl24[tbl_hdr->owner_entry_ind]; in remove_tbl()
1274 uint32_t owner_tbl_ind = tbl_hdr->owner_tbl_ind; in remove_tbl()
1275 owner_entry = &lpm->tbl8[ in remove_tbl()
1277 tbl_hdr->owner_entry_ind]; in remove_tbl()
1280 &lpm->tbl8_hdrs[owner_tbl_ind]; in remove_tbl()
1281 if (--owner_tbl_hdr->ref_cnt == 0) in remove_tbl()
1285 assert(owner_entry->ext_entry == 1); in remove_tbl()
1290 .next_hop = lsp_rule->next_hop, in remove_tbl()
1291 .depth = lsp_rule->depth, in remove_tbl()
1301 .depth = 0, in remove_tbl()
1318 rte_lpm6_delete(struct rte_lpm6 *lpm, const uint8_t *ip, uint8_t depth, in rte_lpm6_delete() argument
1329 if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM6_MAX_DEPTH)) in rte_lpm6_delete()
1330 return -EINVAL; in rte_lpm6_delete()
1334 ip6_mask_addr(masked_ip, depth); in rte_lpm6_delete()
1338 ret = rule_delete(lpm, masked_ip, depth); in rte_lpm6_delete()
1340 return -ENOENT; in rte_lpm6_delete()
1344 rule_find_range(lpm, masked_ip, depth, &from, &to, &tbl_ind); in rte_lpm6_delete()
1347 /* find a less specific rule (a rule with smaller depth) in rte_lpm6_delete()
1350 ret = rule_find_less_specific(lpm, masked_ip, depth, in rte_lpm6_delete()
1358 struct rte_lpm_tbl8_hdr *tbl_hdr = &lpm->tbl8_hdrs[tbl_ind]; in rte_lpm6_delete()
1359 if (--tbl_hdr->ref_cnt == 0) { in rte_lpm6_delete()
1368 if (from->ext_entry == 1) { in rte_lpm6_delete()
1377 from->lpm6_tbl8_gindex * in rte_lpm6_delete()
1379 depth, lsp_rule->depth, in rte_lpm6_delete()
1380 lsp_rule->next_hop, VALID); in rte_lpm6_delete()
1386 from->lpm6_tbl8_gindex * in rte_lpm6_delete()
1388 depth, 0, 0, INVALID); in rte_lpm6_delete()
1389 } else if (from->depth == depth) { in rte_lpm6_delete()
1393 .next_hop = lsp_rule->next_hop, in rte_lpm6_delete()
1394 .depth = lsp_rule->depth, in rte_lpm6_delete()
1404 .depth = 0, in rte_lpm6_delete()