xref: /dpdk/lib/fib/trie.c (revision 070db97e017b7ed9a5320b2f624f05562a632bd3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
3  * Copyright(c) 2019 Intel Corporation
4  */
5 
6 #include <stdint.h>
7 #include <stdio.h>
8 
9 #include <rte_debug.h>
10 #include <rte_malloc.h>
11 #include <rte_errno.h>
12 
13 #include <rte_rib6.h>
14 #include <rte_fib6.h>
15 #include "trie.h"
16 
17 #ifdef CC_TRIE_AVX512_SUPPORT
18 
19 #include "trie_avx512.h"
20 
21 #endif /* CC_TRIE_AVX512_SUPPORT */
22 
23 #define TRIE_NAMESIZE		64
24 
25 enum edge {
26 	LEDGE,
27 	REDGE
28 };
29 
30 static inline rte_fib6_lookup_fn_t
31 get_scalar_fn(enum rte_fib_trie_nh_sz nh_sz)
32 {
33 	switch (nh_sz) {
34 	case RTE_FIB6_TRIE_2B:
35 		return rte_trie_lookup_bulk_2b;
36 	case RTE_FIB6_TRIE_4B:
37 		return rte_trie_lookup_bulk_4b;
38 	case RTE_FIB6_TRIE_8B:
39 		return rte_trie_lookup_bulk_8b;
40 	default:
41 		return NULL;
42 	}
43 }
44 
45 static inline rte_fib6_lookup_fn_t
46 get_vector_fn(enum rte_fib_trie_nh_sz nh_sz)
47 {
48 #ifdef CC_TRIE_AVX512_SUPPORT
49 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0 ||
50 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ) <= 0 ||
51 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) <= 0 ||
52 			rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)
53 		return NULL;
54 	switch (nh_sz) {
55 	case RTE_FIB6_TRIE_2B:
56 		return rte_trie_vec_lookup_bulk_2b;
57 	case RTE_FIB6_TRIE_4B:
58 		return rte_trie_vec_lookup_bulk_4b;
59 	case RTE_FIB6_TRIE_8B:
60 		return rte_trie_vec_lookup_bulk_8b;
61 	default:
62 		return NULL;
63 	}
64 #else
65 	RTE_SET_USED(nh_sz);
66 #endif
67 	return NULL;
68 }
69 
70 rte_fib6_lookup_fn_t
71 trie_get_lookup_fn(void *p, enum rte_fib6_lookup_type type)
72 {
73 	enum rte_fib_trie_nh_sz nh_sz;
74 	rte_fib6_lookup_fn_t ret_fn;
75 	struct rte_trie_tbl *dp = p;
76 
77 	if (dp == NULL)
78 		return NULL;
79 
80 	nh_sz = dp->nh_sz;
81 
82 	switch (type) {
83 	case RTE_FIB6_LOOKUP_TRIE_SCALAR:
84 		return get_scalar_fn(nh_sz);
85 	case RTE_FIB6_LOOKUP_TRIE_VECTOR_AVX512:
86 		return get_vector_fn(nh_sz);
87 	case RTE_FIB6_LOOKUP_DEFAULT:
88 		ret_fn = get_vector_fn(nh_sz);
89 		return (ret_fn != NULL) ? ret_fn : get_scalar_fn(nh_sz);
90 	default:
91 		return NULL;
92 	}
93 	return NULL;
94 }
95 
96 static void
97 write_to_dp(void *ptr, uint64_t val, enum rte_fib_trie_nh_sz size, int n)
98 {
99 	int i;
100 	uint16_t *ptr16 = (uint16_t *)ptr;
101 	uint32_t *ptr32 = (uint32_t *)ptr;
102 	uint64_t *ptr64 = (uint64_t *)ptr;
103 
104 	switch (size) {
105 	case RTE_FIB6_TRIE_2B:
106 		for (i = 0; i < n; i++)
107 			ptr16[i] = (uint16_t)val;
108 		break;
109 	case RTE_FIB6_TRIE_4B:
110 		for (i = 0; i < n; i++)
111 			ptr32[i] = (uint32_t)val;
112 		break;
113 	case RTE_FIB6_TRIE_8B:
114 		for (i = 0; i < n; i++)
115 			ptr64[i] = (uint64_t)val;
116 		break;
117 	}
118 }
119 
120 static void
121 tbl8_pool_init(struct rte_trie_tbl *dp)
122 {
123 	uint32_t i;
124 
125 	/* put entire range of indexes to the tbl8 pool */
126 	for (i = 0; i < dp->number_tbl8s; i++)
127 		dp->tbl8_pool[i] = i;
128 
129 	dp->tbl8_pool_pos = 0;
130 }
131 
132 /*
133  * Get an index of a free tbl8 from the pool
134  */
135 static inline int32_t
136 tbl8_get(struct rte_trie_tbl *dp)
137 {
138 	if (dp->tbl8_pool_pos == dp->number_tbl8s)
139 		/* no more free tbl8 */
140 		return -ENOSPC;
141 
142 	/* next index */
143 	return dp->tbl8_pool[dp->tbl8_pool_pos++];
144 }
145 
146 /*
147  * Put an index of a free tbl8 back to the pool
148  */
149 static inline void
150 tbl8_put(struct rte_trie_tbl *dp, uint32_t tbl8_ind)
151 {
152 	dp->tbl8_pool[--dp->tbl8_pool_pos] = tbl8_ind;
153 }
154 
155 static int
156 tbl8_alloc(struct rte_trie_tbl *dp, uint64_t nh)
157 {
158 	int64_t		tbl8_idx;
159 	uint8_t		*tbl8_ptr;
160 
161 	tbl8_idx = tbl8_get(dp);
162 	if (tbl8_idx < 0)
163 		return tbl8_idx;
164 	tbl8_ptr = get_tbl_p_by_idx(dp->tbl8,
165 		tbl8_idx * TRIE_TBL8_GRP_NUM_ENT, dp->nh_sz);
166 	/*Init tbl8 entries with nexthop from tbl24*/
167 	write_to_dp((void *)tbl8_ptr, nh, dp->nh_sz,
168 		TRIE_TBL8_GRP_NUM_ENT);
169 	return tbl8_idx;
170 }
171 
172 static void
173 tbl8_recycle(struct rte_trie_tbl *dp, void *par, uint64_t tbl8_idx)
174 {
175 	uint32_t i;
176 	uint64_t nh;
177 	uint16_t *ptr16;
178 	uint32_t *ptr32;
179 	uint64_t *ptr64;
180 
181 	switch (dp->nh_sz) {
182 	case RTE_FIB6_TRIE_2B:
183 		ptr16 = &((uint16_t *)dp->tbl8)[tbl8_idx *
184 				TRIE_TBL8_GRP_NUM_ENT];
185 		nh = *ptr16;
186 		if (nh & TRIE_EXT_ENT)
187 			return;
188 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
189 			if (nh != ptr16[i])
190 				return;
191 		}
192 		write_to_dp(par, nh, dp->nh_sz, 1);
193 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
194 			ptr16[i] = 0;
195 		break;
196 	case RTE_FIB6_TRIE_4B:
197 		ptr32 = &((uint32_t *)dp->tbl8)[tbl8_idx *
198 				TRIE_TBL8_GRP_NUM_ENT];
199 		nh = *ptr32;
200 		if (nh & TRIE_EXT_ENT)
201 			return;
202 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
203 			if (nh != ptr32[i])
204 				return;
205 		}
206 		write_to_dp(par, nh, dp->nh_sz, 1);
207 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
208 			ptr32[i] = 0;
209 		break;
210 	case RTE_FIB6_TRIE_8B:
211 		ptr64 = &((uint64_t *)dp->tbl8)[tbl8_idx *
212 				TRIE_TBL8_GRP_NUM_ENT];
213 		nh = *ptr64;
214 		if (nh & TRIE_EXT_ENT)
215 			return;
216 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
217 			if (nh != ptr64[i])
218 				return;
219 		}
220 		write_to_dp(par, nh, dp->nh_sz, 1);
221 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
222 			ptr64[i] = 0;
223 		break;
224 	}
225 	tbl8_put(dp, tbl8_idx);
226 }
227 
228 #define BYTE_SIZE	8
229 static inline uint32_t
230 get_idx(const uint8_t *ip, uint32_t prev_idx, int bytes, int first_byte)
231 {
232 	int i;
233 	uint32_t idx = 0;
234 	uint8_t bitshift;
235 
236 	for (i = first_byte; i < (first_byte + bytes); i++) {
237 		bitshift = (int8_t)(((first_byte + bytes - 1) - i)*BYTE_SIZE);
238 		idx |= ip[i] <<  bitshift;
239 	}
240 	return (prev_idx * TRIE_TBL8_GRP_NUM_ENT) + idx;
241 }
242 
243 static inline uint64_t
244 get_val_by_p(void *p, uint8_t nh_sz)
245 {
246 	uint64_t val = 0;
247 
248 	switch (nh_sz) {
249 	case RTE_FIB6_TRIE_2B:
250 		val = *(uint16_t *)p;
251 		break;
252 	case RTE_FIB6_TRIE_4B:
253 		val = *(uint32_t *)p;
254 		break;
255 	case RTE_FIB6_TRIE_8B:
256 		val = *(uint64_t *)p;
257 		break;
258 	}
259 	return val;
260 }
261 
262 /*
263  * recursively recycle tbl8's
264  */
265 static void
266 recycle_root_path(struct rte_trie_tbl *dp, const uint8_t *ip_part,
267 	uint8_t common_tbl8, void *prev)
268 {
269 	void *p;
270 	uint64_t val;
271 
272 	val = get_val_by_p(prev, dp->nh_sz);
273 	if (unlikely((val & TRIE_EXT_ENT) != TRIE_EXT_ENT))
274 		return;
275 
276 	if (common_tbl8 != 0) {
277 		p = get_tbl_p_by_idx(dp->tbl8, (val >> 1) *
278 			TRIE_TBL8_GRP_NUM_ENT + *ip_part, dp->nh_sz);
279 		recycle_root_path(dp, ip_part + 1, common_tbl8 - 1, p);
280 	}
281 	tbl8_recycle(dp, prev, val >> 1);
282 }
283 
284 static inline int
285 build_common_root(struct rte_trie_tbl *dp, const uint8_t *ip,
286 	int common_bytes, void **tbl)
287 {
288 	void *tbl_ptr = NULL;
289 	uint64_t *cur_tbl;
290 	uint64_t val;
291 	int i, j, idx, prev_idx = 0;
292 
293 	cur_tbl = dp->tbl24;
294 	for (i = 3, j = 0; i <= common_bytes; i++) {
295 		idx = get_idx(ip, prev_idx, i - j, j);
296 		val = get_tbl_val_by_idx(cur_tbl, idx, dp->nh_sz);
297 		tbl_ptr = get_tbl_p_by_idx(cur_tbl, idx, dp->nh_sz);
298 		if ((val & TRIE_EXT_ENT) != TRIE_EXT_ENT) {
299 			idx = tbl8_alloc(dp, val);
300 			if (unlikely(idx < 0))
301 				return idx;
302 			write_to_dp(tbl_ptr, (idx << 1) |
303 				TRIE_EXT_ENT, dp->nh_sz, 1);
304 			prev_idx = idx;
305 		} else
306 			prev_idx = val >> 1;
307 
308 		j = i;
309 		cur_tbl = dp->tbl8;
310 	}
311 	*tbl = get_tbl_p_by_idx(cur_tbl, prev_idx * TRIE_TBL8_GRP_NUM_ENT,
312 		dp->nh_sz);
313 	return 0;
314 }
315 
316 static int
317 write_edge(struct rte_trie_tbl *dp, const uint8_t *ip_part, uint64_t next_hop,
318 	int len, enum edge edge, void *ent)
319 {
320 	uint64_t val = next_hop << 1;
321 	int tbl8_idx;
322 	int ret = 0;
323 	void *p;
324 
325 	if (len != 0) {
326 		val = get_val_by_p(ent, dp->nh_sz);
327 		if ((val & TRIE_EXT_ENT) == TRIE_EXT_ENT)
328 			tbl8_idx = val >> 1;
329 		else {
330 			tbl8_idx = tbl8_alloc(dp, val);
331 			if (tbl8_idx < 0)
332 				return tbl8_idx;
333 			val = (tbl8_idx << 1)|TRIE_EXT_ENT;
334 		}
335 		p = get_tbl_p_by_idx(dp->tbl8, (tbl8_idx *
336 			TRIE_TBL8_GRP_NUM_ENT) + *ip_part, dp->nh_sz);
337 		ret = write_edge(dp, ip_part + 1, next_hop, len - 1, edge, p);
338 		if (ret < 0)
339 			return ret;
340 		if (edge == LEDGE) {
341 			write_to_dp((uint8_t *)p + (1 << dp->nh_sz),
342 				next_hop << 1, dp->nh_sz, UINT8_MAX - *ip_part);
343 		} else {
344 			write_to_dp(get_tbl_p_by_idx(dp->tbl8, tbl8_idx *
345 				TRIE_TBL8_GRP_NUM_ENT, dp->nh_sz),
346 				next_hop << 1, dp->nh_sz, *ip_part);
347 		}
348 		tbl8_recycle(dp, &val, tbl8_idx);
349 	}
350 
351 	write_to_dp(ent, val, dp->nh_sz, 1);
352 	return ret;
353 }
354 
355 #define IPV6_MAX_IDX	(RTE_FIB6_IPV6_ADDR_SIZE - 1)
356 #define TBL24_BYTES	3
357 #define TBL8_LEN	(RTE_FIB6_IPV6_ADDR_SIZE - TBL24_BYTES)
358 
359 static int
360 install_to_dp(struct rte_trie_tbl *dp, const uint8_t *ledge, const uint8_t *r,
361 	uint64_t next_hop)
362 {
363 	void *common_root_tbl;
364 	void *ent;
365 	int ret;
366 	int i;
367 	int common_bytes;
368 	int llen, rlen;
369 	uint8_t redge[16];
370 
371 	/* decrement redge by 1*/
372 	rte_rib6_copy_addr(redge, r);
373 	for (i = 15; i >= 0; i--) {
374 		redge[i]--;
375 		if (redge[i] != 0xff)
376 			break;
377 	}
378 
379 	for (common_bytes = 0; common_bytes < 15; common_bytes++) {
380 		if (ledge[common_bytes] != redge[common_bytes])
381 			break;
382 	}
383 
384 	ret = build_common_root(dp, ledge, common_bytes, &common_root_tbl);
385 	if (unlikely(ret != 0))
386 		return ret;
387 	/*first uncommon tbl8 byte idx*/
388 	uint8_t first_tbl8_byte = RTE_MAX(common_bytes, TBL24_BYTES);
389 
390 	for (i = IPV6_MAX_IDX; i > first_tbl8_byte; i--) {
391 		if (ledge[i] != 0)
392 			break;
393 	}
394 
395 	llen = i - first_tbl8_byte + (common_bytes < 3);
396 
397 	for (i = IPV6_MAX_IDX; i > first_tbl8_byte; i--) {
398 		if (redge[i] != UINT8_MAX)
399 			break;
400 	}
401 	rlen = i - first_tbl8_byte + (common_bytes < 3);
402 
403 	/*first noncommon byte*/
404 	uint8_t first_byte_idx = (common_bytes < 3) ? 0 : common_bytes;
405 	uint8_t first_idx_len = (common_bytes < 3) ? 3 : 1;
406 
407 	uint32_t left_idx = get_idx(ledge, 0, first_idx_len, first_byte_idx);
408 	uint32_t right_idx = get_idx(redge, 0, first_idx_len, first_byte_idx);
409 
410 	ent = get_tbl_p_by_idx(common_root_tbl, left_idx, dp->nh_sz);
411 	ret = write_edge(dp, &ledge[first_tbl8_byte + !(common_bytes < 3)],
412 		next_hop, llen, LEDGE, ent);
413 	if (ret < 0)
414 		return ret;
415 
416 	if (right_idx > left_idx + 1) {
417 		ent = get_tbl_p_by_idx(common_root_tbl, left_idx + 1,
418 			dp->nh_sz);
419 		write_to_dp(ent, next_hop << 1, dp->nh_sz,
420 			right_idx - (left_idx + 1));
421 	}
422 	ent = get_tbl_p_by_idx(common_root_tbl, right_idx, dp->nh_sz);
423 	ret = write_edge(dp, &redge[first_tbl8_byte + !((common_bytes < 3))],
424 		next_hop, rlen, REDGE, ent);
425 	if (ret < 0)
426 		return ret;
427 
428 	uint8_t	common_tbl8 = (common_bytes < TBL24_BYTES) ?
429 			0 : common_bytes - (TBL24_BYTES - 1);
430 	ent = get_tbl24_p(dp, ledge, dp->nh_sz);
431 	recycle_root_path(dp, ledge + TBL24_BYTES, common_tbl8, ent);
432 	return 0;
433 }
434 
435 static void
436 get_nxt_net(uint8_t *ip, uint8_t depth)
437 {
438 	int i;
439 	uint8_t part_depth;
440 	uint8_t prev_byte;
441 
442 	for (i = 0, part_depth = depth; part_depth > 8; part_depth -= 8, i++)
443 		;
444 
445 	prev_byte = ip[i];
446 	ip[i] += 1 << (8 - part_depth);
447 	if (ip[i] < prev_byte) {
448 		while (i > 0) {
449 			ip[--i] += 1;
450 			if (ip[i] != 0)
451 				break;
452 		}
453 	}
454 }
455 
456 static int
457 v6_addr_is_zero(const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE])
458 {
459 	uint8_t ip_addr[RTE_FIB6_IPV6_ADDR_SIZE] = {0};
460 
461 	return rte_rib6_is_equal(ip, ip_addr);
462 }
463 
464 static int
465 modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
466 	const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE],
467 	uint8_t depth, uint64_t next_hop)
468 {
469 	struct rte_rib6_node *tmp = NULL;
470 	uint8_t ledge[RTE_FIB6_IPV6_ADDR_SIZE];
471 	uint8_t redge[RTE_FIB6_IPV6_ADDR_SIZE];
472 	int ret;
473 	uint8_t tmp_depth;
474 
475 	if (next_hop > get_max_nh(dp->nh_sz))
476 		return -EINVAL;
477 
478 	rte_rib6_copy_addr(ledge, ip);
479 	do {
480 		tmp = rte_rib6_get_nxt(rib, ip, depth, tmp,
481 			RTE_RIB6_GET_NXT_COVER);
482 		if (tmp != NULL) {
483 			rte_rib6_get_depth(tmp, &tmp_depth);
484 			if (tmp_depth == depth)
485 				continue;
486 			rte_rib6_get_ip(tmp, redge);
487 			if (rte_rib6_is_equal(ledge, redge)) {
488 				get_nxt_net(ledge, tmp_depth);
489 				continue;
490 			}
491 			ret = install_to_dp(dp, ledge, redge,
492 				next_hop);
493 			if (ret != 0)
494 				return ret;
495 			get_nxt_net(redge, tmp_depth);
496 			rte_rib6_copy_addr(ledge, redge);
497 			/*
498 			 * we got to the end of address space
499 			 * and wrapped around
500 			 */
501 			if (v6_addr_is_zero(ledge))
502 				break;
503 		} else {
504 			rte_rib6_copy_addr(redge, ip);
505 			get_nxt_net(redge, depth);
506 			if (rte_rib6_is_equal(ledge, redge) &&
507 					!v6_addr_is_zero(ledge))
508 				break;
509 
510 			ret = install_to_dp(dp, ledge, redge,
511 				next_hop);
512 			if (ret != 0)
513 				return ret;
514 		}
515 	} while (tmp);
516 
517 	return 0;
518 }
519 
520 int
521 trie_modify(struct rte_fib6 *fib, const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE],
522 	uint8_t depth, uint64_t next_hop, int op)
523 {
524 	struct rte_trie_tbl *dp;
525 	struct rte_rib6 *rib;
526 	struct rte_rib6_node *tmp = NULL;
527 	struct rte_rib6_node *node;
528 	struct rte_rib6_node *parent;
529 	uint8_t	ip_masked[RTE_FIB6_IPV6_ADDR_SIZE];
530 	int i, ret = 0;
531 	uint64_t par_nh, node_nh;
532 	uint8_t tmp_depth, depth_diff = 0, parent_depth = 24;
533 
534 	if ((fib == NULL) || (ip == NULL) || (depth > RTE_FIB6_MAXDEPTH))
535 		return -EINVAL;
536 
537 	dp = rte_fib6_get_dp(fib);
538 	RTE_ASSERT(dp);
539 	rib = rte_fib6_get_rib(fib);
540 	RTE_ASSERT(rib);
541 
542 	for (i = 0; i < RTE_FIB6_IPV6_ADDR_SIZE; i++)
543 		ip_masked[i] = ip[i] & get_msk_part(depth, i);
544 
545 	if (depth > 24) {
546 		tmp = rte_rib6_get_nxt(rib, ip_masked,
547 			RTE_ALIGN_FLOOR(depth, 8), NULL,
548 			RTE_RIB6_GET_NXT_COVER);
549 		if (tmp == NULL) {
550 			tmp = rte_rib6_lookup(rib, ip);
551 			if (tmp != NULL) {
552 				rte_rib6_get_depth(tmp, &tmp_depth);
553 				parent_depth = RTE_MAX(tmp_depth, 24);
554 			}
555 			depth_diff = RTE_ALIGN_CEIL(depth, 8) -
556 				RTE_ALIGN_CEIL(parent_depth, 8);
557 			depth_diff = depth_diff >> 3;
558 		}
559 	}
560 	node = rte_rib6_lookup_exact(rib, ip_masked, depth);
561 	switch (op) {
562 	case RTE_FIB6_ADD:
563 		if (node != NULL) {
564 			rte_rib6_get_nh(node, &node_nh);
565 			if (node_nh == next_hop)
566 				return 0;
567 			ret = modify_dp(dp, rib, ip_masked, depth, next_hop);
568 			if (ret == 0)
569 				rte_rib6_set_nh(node, next_hop);
570 			return 0;
571 		}
572 
573 		if ((depth > 24) && (dp->rsvd_tbl8s >=
574 				dp->number_tbl8s - depth_diff))
575 			return -ENOSPC;
576 
577 		node = rte_rib6_insert(rib, ip_masked, depth);
578 		if (node == NULL)
579 			return -rte_errno;
580 		rte_rib6_set_nh(node, next_hop);
581 		parent = rte_rib6_lookup_parent(node);
582 		if (parent != NULL) {
583 			rte_rib6_get_nh(parent, &par_nh);
584 			if (par_nh == next_hop)
585 				return 0;
586 		}
587 		ret = modify_dp(dp, rib, ip_masked, depth, next_hop);
588 		if (ret != 0) {
589 			rte_rib6_remove(rib, ip_masked, depth);
590 			return ret;
591 		}
592 
593 		dp->rsvd_tbl8s += depth_diff;
594 		return 0;
595 	case RTE_FIB6_DEL:
596 		if (node == NULL)
597 			return -ENOENT;
598 
599 		parent = rte_rib6_lookup_parent(node);
600 		if (parent != NULL) {
601 			rte_rib6_get_nh(parent, &par_nh);
602 			rte_rib6_get_nh(node, &node_nh);
603 			if (par_nh != node_nh)
604 				ret = modify_dp(dp, rib, ip_masked, depth,
605 					par_nh);
606 		} else
607 			ret = modify_dp(dp, rib, ip_masked, depth, dp->def_nh);
608 
609 		if (ret != 0)
610 			return ret;
611 		rte_rib6_remove(rib, ip, depth);
612 
613 		dp->rsvd_tbl8s -= depth_diff;
614 		return 0;
615 	default:
616 		break;
617 	}
618 	return -EINVAL;
619 }
620 
621 void *
622 trie_create(const char *name, int socket_id,
623 	struct rte_fib6_conf *conf)
624 {
625 	char mem_name[TRIE_NAMESIZE];
626 	struct rte_trie_tbl *dp = NULL;
627 	uint64_t	def_nh;
628 	uint32_t	num_tbl8;
629 	enum rte_fib_trie_nh_sz	nh_sz;
630 
631 	if ((name == NULL) || (conf == NULL) ||
632 			(conf->trie.nh_sz < RTE_FIB6_TRIE_2B) ||
633 			(conf->trie.nh_sz > RTE_FIB6_TRIE_8B) ||
634 			(conf->trie.num_tbl8 >
635 			get_max_nh(conf->trie.nh_sz)) ||
636 			(conf->trie.num_tbl8 == 0) ||
637 			(conf->default_nh >
638 			get_max_nh(conf->trie.nh_sz))) {
639 
640 		rte_errno = EINVAL;
641 		return NULL;
642 	}
643 
644 	def_nh = conf->default_nh;
645 	nh_sz = conf->trie.nh_sz;
646 	num_tbl8 = conf->trie.num_tbl8;
647 
648 	snprintf(mem_name, sizeof(mem_name), "DP_%s", name);
649 	dp = rte_zmalloc_socket(name, sizeof(struct rte_trie_tbl) +
650 		TRIE_TBL24_NUM_ENT * (1 << nh_sz) + sizeof(uint32_t),
651 		RTE_CACHE_LINE_SIZE, socket_id);
652 	if (dp == NULL) {
653 		rte_errno = ENOMEM;
654 		return dp;
655 	}
656 
657 	write_to_dp(&dp->tbl24, (def_nh << 1), nh_sz, 1 << 24);
658 
659 	snprintf(mem_name, sizeof(mem_name), "TBL8_%p", dp);
660 	dp->tbl8 = rte_zmalloc_socket(mem_name, TRIE_TBL8_GRP_NUM_ENT *
661 			(1ll << nh_sz) * (num_tbl8 + 1),
662 			RTE_CACHE_LINE_SIZE, socket_id);
663 	if (dp->tbl8 == NULL) {
664 		rte_errno = ENOMEM;
665 		rte_free(dp);
666 		return NULL;
667 	}
668 	dp->def_nh = def_nh;
669 	dp->nh_sz = nh_sz;
670 	dp->number_tbl8s = num_tbl8;
671 
672 	snprintf(mem_name, sizeof(mem_name), "TBL8_idxes_%p", dp);
673 	dp->tbl8_pool = rte_zmalloc_socket(mem_name,
674 			sizeof(uint32_t) * dp->number_tbl8s,
675 			RTE_CACHE_LINE_SIZE, socket_id);
676 	if (dp->tbl8_pool == NULL) {
677 		rte_errno = ENOMEM;
678 		rte_free(dp->tbl8);
679 		rte_free(dp);
680 		return NULL;
681 	}
682 
683 	tbl8_pool_init(dp);
684 
685 	return dp;
686 }
687 
688 void
689 trie_free(void *p)
690 {
691 	struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p;
692 
693 	rte_free(dp->tbl8_pool);
694 	rte_free(dp->tbl8);
695 	rte_free(dp);
696 }
697