xref: /dpdk/lib/fib/trie.c (revision ad6833e5accbf67b4e1e8c9ca4911ba1163d3cb5)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
3  * Copyright(c) 2019 Intel Corporation
4  */
5 
6 #include <stdint.h>
7 #include <stdio.h>
8 
9 #include <rte_debug.h>
10 #include <rte_malloc.h>
11 #include <rte_errno.h>
12 
13 #include <rte_rib6.h>
14 #include <rte_fib6.h>
15 #include "trie.h"
16 
17 #ifdef CC_TRIE_AVX512_SUPPORT
18 
19 #include "trie_avx512.h"
20 
21 #endif /* CC_TRIE_AVX512_SUPPORT */
22 
23 #define TRIE_NAMESIZE		64
24 
25 enum edge {
26 	LEDGE,
27 	REDGE
28 };
29 
30 static inline rte_fib6_lookup_fn_t
31 get_scalar_fn(enum rte_fib_trie_nh_sz nh_sz)
32 {
33 	switch (nh_sz) {
34 	case RTE_FIB6_TRIE_2B:
35 		return rte_trie_lookup_bulk_2b;
36 	case RTE_FIB6_TRIE_4B:
37 		return rte_trie_lookup_bulk_4b;
38 	case RTE_FIB6_TRIE_8B:
39 		return rte_trie_lookup_bulk_8b;
40 	default:
41 		return NULL;
42 	}
43 }
44 
45 static inline rte_fib6_lookup_fn_t
46 get_vector_fn(enum rte_fib_trie_nh_sz nh_sz)
47 {
48 #ifdef CC_TRIE_AVX512_SUPPORT
49 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0 ||
50 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ) <= 0 ||
51 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) <= 0 ||
52 			rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)
53 		return NULL;
54 	switch (nh_sz) {
55 	case RTE_FIB6_TRIE_2B:
56 		return rte_trie_vec_lookup_bulk_2b;
57 	case RTE_FIB6_TRIE_4B:
58 		return rte_trie_vec_lookup_bulk_4b;
59 	case RTE_FIB6_TRIE_8B:
60 		return rte_trie_vec_lookup_bulk_8b;
61 	default:
62 		return NULL;
63 	}
64 #else
65 	RTE_SET_USED(nh_sz);
66 #endif
67 	return NULL;
68 }
69 
70 rte_fib6_lookup_fn_t
71 trie_get_lookup_fn(void *p, enum rte_fib6_lookup_type type)
72 {
73 	enum rte_fib_trie_nh_sz nh_sz;
74 	rte_fib6_lookup_fn_t ret_fn;
75 	struct rte_trie_tbl *dp = p;
76 
77 	if (dp == NULL)
78 		return NULL;
79 
80 	nh_sz = dp->nh_sz;
81 
82 	switch (type) {
83 	case RTE_FIB6_LOOKUP_TRIE_SCALAR:
84 		return get_scalar_fn(nh_sz);
85 	case RTE_FIB6_LOOKUP_TRIE_VECTOR_AVX512:
86 		return get_vector_fn(nh_sz);
87 	case RTE_FIB6_LOOKUP_DEFAULT:
88 		ret_fn = get_vector_fn(nh_sz);
89 		return (ret_fn != NULL) ? ret_fn : get_scalar_fn(nh_sz);
90 	default:
91 		return NULL;
92 	}
93 	return NULL;
94 }
95 
96 static void
97 write_to_dp(void *ptr, uint64_t val, enum rte_fib_trie_nh_sz size, int n)
98 {
99 	int i;
100 	uint16_t *ptr16 = (uint16_t *)ptr;
101 	uint32_t *ptr32 = (uint32_t *)ptr;
102 	uint64_t *ptr64 = (uint64_t *)ptr;
103 
104 	switch (size) {
105 	case RTE_FIB6_TRIE_2B:
106 		for (i = 0; i < n; i++)
107 			ptr16[i] = (uint16_t)val;
108 		break;
109 	case RTE_FIB6_TRIE_4B:
110 		for (i = 0; i < n; i++)
111 			ptr32[i] = (uint32_t)val;
112 		break;
113 	case RTE_FIB6_TRIE_8B:
114 		for (i = 0; i < n; i++)
115 			ptr64[i] = (uint64_t)val;
116 		break;
117 	}
118 }
119 
120 static void
121 tbl8_pool_init(struct rte_trie_tbl *dp)
122 {
123 	uint32_t i;
124 
125 	/* put entire range of indexes to the tbl8 pool */
126 	for (i = 0; i < dp->number_tbl8s; i++)
127 		dp->tbl8_pool[i] = i;
128 
129 	dp->tbl8_pool_pos = 0;
130 }
131 
132 /*
133  * Get an index of a free tbl8 from the pool
134  */
135 static inline int32_t
136 tbl8_get(struct rte_trie_tbl *dp)
137 {
138 	if (dp->tbl8_pool_pos == dp->number_tbl8s)
139 		/* no more free tbl8 */
140 		return -ENOSPC;
141 
142 	/* next index */
143 	return dp->tbl8_pool[dp->tbl8_pool_pos++];
144 }
145 
146 /*
147  * Put an index of a free tbl8 back to the pool
148  */
149 static inline void
150 tbl8_put(struct rte_trie_tbl *dp, uint32_t tbl8_ind)
151 {
152 	dp->tbl8_pool[--dp->tbl8_pool_pos] = tbl8_ind;
153 }
154 
155 static int
156 tbl8_alloc(struct rte_trie_tbl *dp, uint64_t nh)
157 {
158 	int64_t		tbl8_idx;
159 	uint8_t		*tbl8_ptr;
160 
161 	tbl8_idx = tbl8_get(dp);
162 	if (tbl8_idx < 0)
163 		return tbl8_idx;
164 	tbl8_ptr = get_tbl_p_by_idx(dp->tbl8,
165 		tbl8_idx * TRIE_TBL8_GRP_NUM_ENT, dp->nh_sz);
166 	/*Init tbl8 entries with nexthop from tbl24*/
167 	write_to_dp((void *)tbl8_ptr, nh, dp->nh_sz,
168 		TRIE_TBL8_GRP_NUM_ENT);
169 	return tbl8_idx;
170 }
171 
172 static void
173 tbl8_recycle(struct rte_trie_tbl *dp, void *par, uint64_t tbl8_idx)
174 {
175 	uint32_t i;
176 	uint64_t nh;
177 	uint16_t *ptr16;
178 	uint32_t *ptr32;
179 	uint64_t *ptr64;
180 
181 	switch (dp->nh_sz) {
182 	case RTE_FIB6_TRIE_2B:
183 		ptr16 = &((uint16_t *)dp->tbl8)[tbl8_idx *
184 				TRIE_TBL8_GRP_NUM_ENT];
185 		nh = *ptr16;
186 		if (nh & TRIE_EXT_ENT)
187 			return;
188 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
189 			if (nh != ptr16[i])
190 				return;
191 		}
192 		write_to_dp(par, nh, dp->nh_sz, 1);
193 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
194 			ptr16[i] = 0;
195 		break;
196 	case RTE_FIB6_TRIE_4B:
197 		ptr32 = &((uint32_t *)dp->tbl8)[tbl8_idx *
198 				TRIE_TBL8_GRP_NUM_ENT];
199 		nh = *ptr32;
200 		if (nh & TRIE_EXT_ENT)
201 			return;
202 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
203 			if (nh != ptr32[i])
204 				return;
205 		}
206 		write_to_dp(par, nh, dp->nh_sz, 1);
207 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
208 			ptr32[i] = 0;
209 		break;
210 	case RTE_FIB6_TRIE_8B:
211 		ptr64 = &((uint64_t *)dp->tbl8)[tbl8_idx *
212 				TRIE_TBL8_GRP_NUM_ENT];
213 		nh = *ptr64;
214 		if (nh & TRIE_EXT_ENT)
215 			return;
216 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
217 			if (nh != ptr64[i])
218 				return;
219 		}
220 		write_to_dp(par, nh, dp->nh_sz, 1);
221 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
222 			ptr64[i] = 0;
223 		break;
224 	}
225 	tbl8_put(dp, tbl8_idx);
226 }
227 
228 #define BYTE_SIZE	8
229 static inline uint32_t
230 get_idx(const struct rte_ipv6_addr *ip, uint32_t prev_idx, int bytes, int first_byte)
231 {
232 	int i;
233 	uint32_t idx = 0;
234 	uint8_t bitshift;
235 
236 	for (i = first_byte; i < (first_byte + bytes); i++) {
237 		bitshift = (int8_t)(((first_byte + bytes - 1) - i)*BYTE_SIZE);
238 		idx |= ip->a[i] <<  bitshift;
239 	}
240 	return (prev_idx * TRIE_TBL8_GRP_NUM_ENT) + idx;
241 }
242 
243 static inline uint64_t
244 get_val_by_p(void *p, uint8_t nh_sz)
245 {
246 	uint64_t val = 0;
247 
248 	switch (nh_sz) {
249 	case RTE_FIB6_TRIE_2B:
250 		val = *(uint16_t *)p;
251 		break;
252 	case RTE_FIB6_TRIE_4B:
253 		val = *(uint32_t *)p;
254 		break;
255 	case RTE_FIB6_TRIE_8B:
256 		val = *(uint64_t *)p;
257 		break;
258 	}
259 	return val;
260 }
261 
262 /*
263  * recursively recycle tbl8's
264  */
265 static void
266 recycle_root_path(struct rte_trie_tbl *dp, const uint8_t *ip_part,
267 	uint8_t common_tbl8, void *prev)
268 {
269 	void *p;
270 	uint64_t val;
271 
272 	val = get_val_by_p(prev, dp->nh_sz);
273 	if (unlikely((val & TRIE_EXT_ENT) != TRIE_EXT_ENT))
274 		return;
275 
276 	if (common_tbl8 != 0) {
277 		p = get_tbl_p_by_idx(dp->tbl8, (val >> 1) *
278 			TRIE_TBL8_GRP_NUM_ENT + *ip_part, dp->nh_sz);
279 		recycle_root_path(dp, ip_part + 1, common_tbl8 - 1, p);
280 	}
281 	tbl8_recycle(dp, prev, val >> 1);
282 }
283 
284 static inline int
285 build_common_root(struct rte_trie_tbl *dp, const struct rte_ipv6_addr *ip,
286 	int common_bytes, void **tbl)
287 {
288 	void *tbl_ptr = NULL;
289 	uint64_t *cur_tbl;
290 	uint64_t val;
291 	int i, j, idx, prev_idx = 0;
292 
293 	cur_tbl = dp->tbl24;
294 	for (i = 3, j = 0; i <= common_bytes; i++) {
295 		idx = get_idx(ip, prev_idx, i - j, j);
296 		val = get_tbl_val_by_idx(cur_tbl, idx, dp->nh_sz);
297 		tbl_ptr = get_tbl_p_by_idx(cur_tbl, idx, dp->nh_sz);
298 		if ((val & TRIE_EXT_ENT) != TRIE_EXT_ENT) {
299 			idx = tbl8_alloc(dp, val);
300 			if (unlikely(idx < 0))
301 				return idx;
302 			write_to_dp(tbl_ptr, (idx << 1) |
303 				TRIE_EXT_ENT, dp->nh_sz, 1);
304 			prev_idx = idx;
305 		} else
306 			prev_idx = val >> 1;
307 
308 		j = i;
309 		cur_tbl = dp->tbl8;
310 	}
311 	*tbl = get_tbl_p_by_idx(cur_tbl, prev_idx * TRIE_TBL8_GRP_NUM_ENT,
312 		dp->nh_sz);
313 	return 0;
314 }
315 
316 static int
317 write_edge(struct rte_trie_tbl *dp, const uint8_t *ip_part, uint64_t next_hop,
318 	int len, enum edge edge, void *ent)
319 {
320 	uint64_t val = next_hop << 1;
321 	int tbl8_idx;
322 	int ret = 0;
323 	void *p;
324 
325 	if (len != 0) {
326 		val = get_val_by_p(ent, dp->nh_sz);
327 		if ((val & TRIE_EXT_ENT) == TRIE_EXT_ENT)
328 			tbl8_idx = val >> 1;
329 		else {
330 			tbl8_idx = tbl8_alloc(dp, val);
331 			if (tbl8_idx < 0)
332 				return tbl8_idx;
333 			val = (tbl8_idx << 1)|TRIE_EXT_ENT;
334 		}
335 		p = get_tbl_p_by_idx(dp->tbl8, (tbl8_idx *
336 			TRIE_TBL8_GRP_NUM_ENT) + *ip_part, dp->nh_sz);
337 		ret = write_edge(dp, ip_part + 1, next_hop, len - 1, edge, p);
338 		if (ret < 0)
339 			return ret;
340 		if (edge == LEDGE) {
341 			write_to_dp((uint8_t *)p + (1 << dp->nh_sz),
342 				next_hop << 1, dp->nh_sz, UINT8_MAX - *ip_part);
343 		} else {
344 			write_to_dp(get_tbl_p_by_idx(dp->tbl8, tbl8_idx *
345 				TRIE_TBL8_GRP_NUM_ENT, dp->nh_sz),
346 				next_hop << 1, dp->nh_sz, *ip_part);
347 		}
348 		tbl8_recycle(dp, &val, tbl8_idx);
349 	}
350 
351 	write_to_dp(ent, val, dp->nh_sz, 1);
352 	return ret;
353 }
354 
355 #define IPV6_MAX_IDX	(RTE_IPV6_ADDR_SIZE - 1)
356 #define TBL24_BYTES	3
357 #define TBL8_LEN	(RTE_IPV6_ADDR_SIZE - TBL24_BYTES)
358 
359 static int
360 install_to_dp(struct rte_trie_tbl *dp, const struct rte_ipv6_addr *ledge,
361 	const struct rte_ipv6_addr *r, uint64_t next_hop)
362 {
363 	void *common_root_tbl;
364 	void *ent;
365 	int ret;
366 	int i;
367 	int common_bytes;
368 	int llen, rlen;
369 	struct rte_ipv6_addr redge;
370 
371 	/* decrement redge by 1*/
372 	redge = *r;
373 	for (i = 15; i >= 0; i--) {
374 		redge.a[i]--;
375 		if (redge.a[i] != 0xff)
376 			break;
377 	}
378 
379 	for (common_bytes = 0; common_bytes < 15; common_bytes++) {
380 		if (ledge->a[common_bytes] != redge.a[common_bytes])
381 			break;
382 	}
383 
384 	ret = build_common_root(dp, ledge, common_bytes, &common_root_tbl);
385 	if (unlikely(ret != 0))
386 		return ret;
387 	/*first uncommon tbl8 byte idx*/
388 	uint8_t first_tbl8_byte = RTE_MAX(common_bytes, TBL24_BYTES);
389 
390 	for (i = IPV6_MAX_IDX; i > first_tbl8_byte; i--) {
391 		if (ledge->a[i] != 0)
392 			break;
393 	}
394 
395 	llen = i - first_tbl8_byte + (common_bytes < 3);
396 
397 	for (i = IPV6_MAX_IDX; i > first_tbl8_byte; i--) {
398 		if (redge.a[i] != UINT8_MAX)
399 			break;
400 	}
401 	rlen = i - first_tbl8_byte + (common_bytes < 3);
402 
403 	/*first noncommon byte*/
404 	uint8_t first_byte_idx = (common_bytes < 3) ? 0 : common_bytes;
405 	uint8_t first_idx_len = (common_bytes < 3) ? 3 : 1;
406 
407 	uint32_t left_idx = get_idx(ledge, 0, first_idx_len, first_byte_idx);
408 	uint32_t right_idx = get_idx(&redge, 0, first_idx_len, first_byte_idx);
409 
410 	ent = get_tbl_p_by_idx(common_root_tbl, left_idx, dp->nh_sz);
411 	ret = write_edge(dp, &ledge->a[first_tbl8_byte + !(common_bytes < 3)],
412 		next_hop, llen, LEDGE, ent);
413 	if (ret < 0)
414 		return ret;
415 
416 	if (right_idx > left_idx + 1) {
417 		ent = get_tbl_p_by_idx(common_root_tbl, left_idx + 1,
418 			dp->nh_sz);
419 		write_to_dp(ent, next_hop << 1, dp->nh_sz,
420 			right_idx - (left_idx + 1));
421 	}
422 	ent = get_tbl_p_by_idx(common_root_tbl, right_idx, dp->nh_sz);
423 	ret = write_edge(dp, &redge.a[first_tbl8_byte + !((common_bytes < 3))],
424 		next_hop, rlen, REDGE, ent);
425 	if (ret < 0)
426 		return ret;
427 
428 	uint8_t	common_tbl8 = (common_bytes < TBL24_BYTES) ?
429 			0 : common_bytes - (TBL24_BYTES - 1);
430 	ent = get_tbl24_p(dp, ledge, dp->nh_sz);
431 	recycle_root_path(dp, ledge->a + TBL24_BYTES, common_tbl8, ent);
432 	return 0;
433 }
434 
435 static void
436 get_nxt_net(struct rte_ipv6_addr *ip, uint8_t depth)
437 {
438 	int i;
439 	uint8_t part_depth;
440 	uint8_t prev_byte;
441 
442 	for (i = 0, part_depth = depth; part_depth > 8; part_depth -= 8, i++)
443 		;
444 
445 	prev_byte = ip->a[i];
446 	ip->a[i] += 1 << (8 - part_depth);
447 	if (ip->a[i] < prev_byte) {
448 		while (i > 0) {
449 			ip->a[--i] += 1;
450 			if (ip->a[i] != 0)
451 				break;
452 		}
453 	}
454 }
455 
456 static int
457 modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
458 	const struct rte_ipv6_addr *ip,
459 	uint8_t depth, uint64_t next_hop)
460 {
461 	struct rte_rib6_node *tmp = NULL;
462 	struct rte_ipv6_addr ledge, redge;
463 	int ret;
464 	uint8_t tmp_depth;
465 
466 	if (next_hop > get_max_nh(dp->nh_sz))
467 		return -EINVAL;
468 
469 	ledge = *ip;
470 	do {
471 		tmp = rte_rib6_get_nxt(rib, ip, depth, tmp,
472 			RTE_RIB6_GET_NXT_COVER);
473 		if (tmp != NULL) {
474 			rte_rib6_get_depth(tmp, &tmp_depth);
475 			if (tmp_depth == depth)
476 				continue;
477 			rte_rib6_get_ip(tmp, &redge);
478 			if (rte_ipv6_addr_eq(&ledge, &redge)) {
479 				get_nxt_net(&ledge, tmp_depth);
480 				continue;
481 			}
482 			ret = install_to_dp(dp, &ledge, &redge, next_hop);
483 			if (ret != 0)
484 				return ret;
485 			get_nxt_net(&redge, tmp_depth);
486 			ledge = redge;
487 			/*
488 			 * we got to the end of address space
489 			 * and wrapped around
490 			 */
491 			if (rte_ipv6_addr_is_unspec(&ledge))
492 				break;
493 		} else {
494 			redge = *ip;
495 			get_nxt_net(&redge, depth);
496 			if (rte_ipv6_addr_eq(&ledge, &redge) &&
497 					!rte_ipv6_addr_is_unspec(&ledge))
498 				break;
499 
500 			ret = install_to_dp(dp, &ledge, &redge, next_hop);
501 			if (ret != 0)
502 				return ret;
503 		}
504 	} while (tmp);
505 
506 	return 0;
507 }
508 
509 int
510 trie_modify(struct rte_fib6 *fib, const struct rte_ipv6_addr *ip,
511 	uint8_t depth, uint64_t next_hop, int op)
512 {
513 	struct rte_trie_tbl *dp;
514 	struct rte_rib6 *rib;
515 	struct rte_rib6_node *tmp = NULL;
516 	struct rte_rib6_node *node;
517 	struct rte_rib6_node *parent;
518 	struct rte_ipv6_addr ip_masked;
519 	int ret = 0;
520 	uint64_t par_nh, node_nh;
521 	uint8_t tmp_depth, depth_diff = 0, parent_depth = 24;
522 
523 	if ((fib == NULL) || (ip == NULL) || (depth > RTE_IPV6_MAX_DEPTH))
524 		return -EINVAL;
525 
526 	dp = rte_fib6_get_dp(fib);
527 	RTE_ASSERT(dp);
528 	rib = rte_fib6_get_rib(fib);
529 	RTE_ASSERT(rib);
530 
531 	ip_masked = *ip;
532 	rte_ipv6_addr_mask(&ip_masked, depth);
533 
534 	if (depth > 24) {
535 		tmp = rte_rib6_get_nxt(rib, &ip_masked,
536 			RTE_ALIGN_FLOOR(depth, 8), NULL,
537 			RTE_RIB6_GET_NXT_COVER);
538 		if (tmp == NULL) {
539 			tmp = rte_rib6_lookup(rib, ip);
540 			if (tmp != NULL) {
541 				rte_rib6_get_depth(tmp, &tmp_depth);
542 				parent_depth = RTE_MAX(tmp_depth, 24);
543 			}
544 			depth_diff = RTE_ALIGN_CEIL(depth, 8) -
545 				RTE_ALIGN_CEIL(parent_depth, 8);
546 			depth_diff = depth_diff >> 3;
547 		}
548 	}
549 	node = rte_rib6_lookup_exact(rib, &ip_masked, depth);
550 	switch (op) {
551 	case RTE_FIB6_ADD:
552 		if (node != NULL) {
553 			rte_rib6_get_nh(node, &node_nh);
554 			if (node_nh == next_hop)
555 				return 0;
556 			ret = modify_dp(dp, rib, &ip_masked, depth, next_hop);
557 			if (ret == 0)
558 				rte_rib6_set_nh(node, next_hop);
559 			return 0;
560 		}
561 
562 		if ((depth > 24) && (dp->rsvd_tbl8s >=
563 				dp->number_tbl8s - depth_diff))
564 			return -ENOSPC;
565 
566 		node = rte_rib6_insert(rib, &ip_masked, depth);
567 		if (node == NULL)
568 			return -rte_errno;
569 		rte_rib6_set_nh(node, next_hop);
570 		parent = rte_rib6_lookup_parent(node);
571 		if (parent != NULL) {
572 			rte_rib6_get_nh(parent, &par_nh);
573 			if (par_nh == next_hop)
574 				return 0;
575 		}
576 		ret = modify_dp(dp, rib, &ip_masked, depth, next_hop);
577 		if (ret != 0) {
578 			rte_rib6_remove(rib, &ip_masked, depth);
579 			return ret;
580 		}
581 
582 		dp->rsvd_tbl8s += depth_diff;
583 		return 0;
584 	case RTE_FIB6_DEL:
585 		if (node == NULL)
586 			return -ENOENT;
587 
588 		parent = rte_rib6_lookup_parent(node);
589 		if (parent != NULL) {
590 			rte_rib6_get_nh(parent, &par_nh);
591 			rte_rib6_get_nh(node, &node_nh);
592 			if (par_nh != node_nh)
593 				ret = modify_dp(dp, rib, &ip_masked, depth,
594 					par_nh);
595 		} else
596 			ret = modify_dp(dp, rib, &ip_masked, depth, dp->def_nh);
597 
598 		if (ret != 0)
599 			return ret;
600 		rte_rib6_remove(rib, ip, depth);
601 
602 		dp->rsvd_tbl8s -= depth_diff;
603 		return 0;
604 	default:
605 		break;
606 	}
607 	return -EINVAL;
608 }
609 
610 void *
611 trie_create(const char *name, int socket_id,
612 	struct rte_fib6_conf *conf)
613 {
614 	char mem_name[TRIE_NAMESIZE];
615 	struct rte_trie_tbl *dp = NULL;
616 	uint64_t	def_nh;
617 	uint32_t	num_tbl8;
618 	enum rte_fib_trie_nh_sz	nh_sz;
619 
620 	if ((name == NULL) || (conf == NULL) ||
621 			(conf->trie.nh_sz < RTE_FIB6_TRIE_2B) ||
622 			(conf->trie.nh_sz > RTE_FIB6_TRIE_8B) ||
623 			(conf->trie.num_tbl8 >
624 			get_max_nh(conf->trie.nh_sz)) ||
625 			(conf->trie.num_tbl8 == 0) ||
626 			(conf->default_nh >
627 			get_max_nh(conf->trie.nh_sz))) {
628 
629 		rte_errno = EINVAL;
630 		return NULL;
631 	}
632 
633 	def_nh = conf->default_nh;
634 	nh_sz = conf->trie.nh_sz;
635 	num_tbl8 = conf->trie.num_tbl8;
636 
637 	snprintf(mem_name, sizeof(mem_name), "DP_%s", name);
638 	dp = rte_zmalloc_socket(name, sizeof(struct rte_trie_tbl) +
639 		TRIE_TBL24_NUM_ENT * (1 << nh_sz) + sizeof(uint32_t),
640 		RTE_CACHE_LINE_SIZE, socket_id);
641 	if (dp == NULL) {
642 		rte_errno = ENOMEM;
643 		return dp;
644 	}
645 
646 	write_to_dp(&dp->tbl24, (def_nh << 1), nh_sz, 1 << 24);
647 
648 	snprintf(mem_name, sizeof(mem_name), "TBL8_%p", dp);
649 	dp->tbl8 = rte_zmalloc_socket(mem_name, TRIE_TBL8_GRP_NUM_ENT *
650 			(1ll << nh_sz) * (num_tbl8 + 1),
651 			RTE_CACHE_LINE_SIZE, socket_id);
652 	if (dp->tbl8 == NULL) {
653 		rte_errno = ENOMEM;
654 		rte_free(dp);
655 		return NULL;
656 	}
657 	dp->def_nh = def_nh;
658 	dp->nh_sz = nh_sz;
659 	dp->number_tbl8s = num_tbl8;
660 
661 	snprintf(mem_name, sizeof(mem_name), "TBL8_idxes_%p", dp);
662 	dp->tbl8_pool = rte_zmalloc_socket(mem_name,
663 			sizeof(uint32_t) * dp->number_tbl8s,
664 			RTE_CACHE_LINE_SIZE, socket_id);
665 	if (dp->tbl8_pool == NULL) {
666 		rte_errno = ENOMEM;
667 		rte_free(dp->tbl8);
668 		rte_free(dp);
669 		return NULL;
670 	}
671 
672 	tbl8_pool_init(dp);
673 
674 	return dp;
675 }
676 
677 void
678 trie_free(void *p)
679 {
680 	struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p;
681 
682 	rte_free(dp->tbl8_pool);
683 	rte_free(dp->tbl8);
684 	rte_free(dp);
685 }
686