xref: /dpdk/lib/fib/trie.c (revision 30a1de105a5f40d77b344a891c4a68f79e815c43)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
3  * Copyright(c) 2019 Intel Corporation
4  */
5 
6 #include <stdint.h>
7 #include <stdio.h>
8 
9 #include <rte_debug.h>
10 #include <rte_malloc.h>
11 #include <rte_errno.h>
12 
13 #include <rte_rib6.h>
14 #include <rte_fib6.h>
15 #include "trie.h"
16 
17 #ifdef CC_TRIE_AVX512_SUPPORT
18 
19 #include "trie_avx512.h"
20 
21 #endif /* CC_TRIE_AVX512_SUPPORT */
22 
23 #define TRIE_NAMESIZE		64
24 
25 enum edge {
26 	LEDGE,
27 	REDGE
28 };
29 
30 static inline rte_fib6_lookup_fn_t
31 get_scalar_fn(enum rte_fib_trie_nh_sz nh_sz)
32 {
33 	switch (nh_sz) {
34 	case RTE_FIB6_TRIE_2B:
35 		return rte_trie_lookup_bulk_2b;
36 	case RTE_FIB6_TRIE_4B:
37 		return rte_trie_lookup_bulk_4b;
38 	case RTE_FIB6_TRIE_8B:
39 		return rte_trie_lookup_bulk_8b;
40 	default:
41 		return NULL;
42 	}
43 }
44 
45 static inline rte_fib6_lookup_fn_t
46 get_vector_fn(enum rte_fib_trie_nh_sz nh_sz)
47 {
48 #ifdef CC_TRIE_AVX512_SUPPORT
49 	if ((rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0) ||
50 			(rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512))
51 		return NULL;
52 	switch (nh_sz) {
53 	case RTE_FIB6_TRIE_2B:
54 		return rte_trie_vec_lookup_bulk_2b;
55 	case RTE_FIB6_TRIE_4B:
56 		return rte_trie_vec_lookup_bulk_4b;
57 	case RTE_FIB6_TRIE_8B:
58 		return rte_trie_vec_lookup_bulk_8b;
59 	default:
60 		return NULL;
61 	}
62 #else
63 	RTE_SET_USED(nh_sz);
64 #endif
65 	return NULL;
66 }
67 
68 rte_fib6_lookup_fn_t
69 trie_get_lookup_fn(void *p, enum rte_fib6_lookup_type type)
70 {
71 	enum rte_fib_trie_nh_sz nh_sz;
72 	rte_fib6_lookup_fn_t ret_fn;
73 	struct rte_trie_tbl *dp = p;
74 
75 	if (dp == NULL)
76 		return NULL;
77 
78 	nh_sz = dp->nh_sz;
79 
80 	switch (type) {
81 	case RTE_FIB6_LOOKUP_TRIE_SCALAR:
82 		return get_scalar_fn(nh_sz);
83 	case RTE_FIB6_LOOKUP_TRIE_VECTOR_AVX512:
84 		return get_vector_fn(nh_sz);
85 	case RTE_FIB6_LOOKUP_DEFAULT:
86 		ret_fn = get_vector_fn(nh_sz);
87 		return (ret_fn != NULL) ? ret_fn : get_scalar_fn(nh_sz);
88 	default:
89 		return NULL;
90 	}
91 	return NULL;
92 }
93 
94 static void
95 write_to_dp(void *ptr, uint64_t val, enum rte_fib_trie_nh_sz size, int n)
96 {
97 	int i;
98 	uint16_t *ptr16 = (uint16_t *)ptr;
99 	uint32_t *ptr32 = (uint32_t *)ptr;
100 	uint64_t *ptr64 = (uint64_t *)ptr;
101 
102 	switch (size) {
103 	case RTE_FIB6_TRIE_2B:
104 		for (i = 0; i < n; i++)
105 			ptr16[i] = (uint16_t)val;
106 		break;
107 	case RTE_FIB6_TRIE_4B:
108 		for (i = 0; i < n; i++)
109 			ptr32[i] = (uint32_t)val;
110 		break;
111 	case RTE_FIB6_TRIE_8B:
112 		for (i = 0; i < n; i++)
113 			ptr64[i] = (uint64_t)val;
114 		break;
115 	}
116 }
117 
118 static void
119 tbl8_pool_init(struct rte_trie_tbl *dp)
120 {
121 	uint32_t i;
122 
123 	/* put entire range of indexes to the tbl8 pool */
124 	for (i = 0; i < dp->number_tbl8s; i++)
125 		dp->tbl8_pool[i] = i;
126 
127 	dp->tbl8_pool_pos = 0;
128 }
129 
130 /*
131  * Get an index of a free tbl8 from the pool
132  */
133 static inline int32_t
134 tbl8_get(struct rte_trie_tbl *dp)
135 {
136 	if (dp->tbl8_pool_pos == dp->number_tbl8s)
137 		/* no more free tbl8 */
138 		return -ENOSPC;
139 
140 	/* next index */
141 	return dp->tbl8_pool[dp->tbl8_pool_pos++];
142 }
143 
144 /*
145  * Put an index of a free tbl8 back to the pool
146  */
147 static inline void
148 tbl8_put(struct rte_trie_tbl *dp, uint32_t tbl8_ind)
149 {
150 	dp->tbl8_pool[--dp->tbl8_pool_pos] = tbl8_ind;
151 }
152 
153 static int
154 tbl8_alloc(struct rte_trie_tbl *dp, uint64_t nh)
155 {
156 	int64_t		tbl8_idx;
157 	uint8_t		*tbl8_ptr;
158 
159 	tbl8_idx = tbl8_get(dp);
160 	if (tbl8_idx < 0)
161 		return tbl8_idx;
162 	tbl8_ptr = get_tbl_p_by_idx(dp->tbl8,
163 		tbl8_idx * TRIE_TBL8_GRP_NUM_ENT, dp->nh_sz);
164 	/*Init tbl8 entries with nexthop from tbl24*/
165 	write_to_dp((void *)tbl8_ptr, nh, dp->nh_sz,
166 		TRIE_TBL8_GRP_NUM_ENT);
167 	return tbl8_idx;
168 }
169 
170 static void
171 tbl8_recycle(struct rte_trie_tbl *dp, void *par, uint64_t tbl8_idx)
172 {
173 	uint32_t i;
174 	uint64_t nh;
175 	uint16_t *ptr16;
176 	uint32_t *ptr32;
177 	uint64_t *ptr64;
178 
179 	switch (dp->nh_sz) {
180 	case RTE_FIB6_TRIE_2B:
181 		ptr16 = &((uint16_t *)dp->tbl8)[tbl8_idx *
182 				TRIE_TBL8_GRP_NUM_ENT];
183 		nh = *ptr16;
184 		if (nh & TRIE_EXT_ENT)
185 			return;
186 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
187 			if (nh != ptr16[i])
188 				return;
189 		}
190 		write_to_dp(par, nh, dp->nh_sz, 1);
191 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
192 			ptr16[i] = 0;
193 		break;
194 	case RTE_FIB6_TRIE_4B:
195 		ptr32 = &((uint32_t *)dp->tbl8)[tbl8_idx *
196 				TRIE_TBL8_GRP_NUM_ENT];
197 		nh = *ptr32;
198 		if (nh & TRIE_EXT_ENT)
199 			return;
200 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
201 			if (nh != ptr32[i])
202 				return;
203 		}
204 		write_to_dp(par, nh, dp->nh_sz, 1);
205 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
206 			ptr32[i] = 0;
207 		break;
208 	case RTE_FIB6_TRIE_8B:
209 		ptr64 = &((uint64_t *)dp->tbl8)[tbl8_idx *
210 				TRIE_TBL8_GRP_NUM_ENT];
211 		nh = *ptr64;
212 		if (nh & TRIE_EXT_ENT)
213 			return;
214 		for (i = 1; i < TRIE_TBL8_GRP_NUM_ENT; i++) {
215 			if (nh != ptr64[i])
216 				return;
217 		}
218 		write_to_dp(par, nh, dp->nh_sz, 1);
219 		for (i = 0; i < TRIE_TBL8_GRP_NUM_ENT; i++)
220 			ptr64[i] = 0;
221 		break;
222 	}
223 	tbl8_put(dp, tbl8_idx);
224 }
225 
226 #define BYTE_SIZE	8
227 static inline uint32_t
228 get_idx(const uint8_t *ip, uint32_t prev_idx, int bytes, int first_byte)
229 {
230 	int i;
231 	uint32_t idx = 0;
232 	uint8_t bitshift;
233 
234 	for (i = first_byte; i < (first_byte + bytes); i++) {
235 		bitshift = (int8_t)(((first_byte + bytes - 1) - i)*BYTE_SIZE);
236 		idx |= ip[i] <<  bitshift;
237 	}
238 	return (prev_idx * TRIE_TBL8_GRP_NUM_ENT) + idx;
239 }
240 
241 static inline uint64_t
242 get_val_by_p(void *p, uint8_t nh_sz)
243 {
244 	uint64_t val = 0;
245 
246 	switch (nh_sz) {
247 	case RTE_FIB6_TRIE_2B:
248 		val = *(uint16_t *)p;
249 		break;
250 	case RTE_FIB6_TRIE_4B:
251 		val = *(uint32_t *)p;
252 		break;
253 	case RTE_FIB6_TRIE_8B:
254 		val = *(uint64_t *)p;
255 		break;
256 	}
257 	return val;
258 }
259 
260 /*
261  * recursively recycle tbl8's
262  */
263 static void
264 recycle_root_path(struct rte_trie_tbl *dp, const uint8_t *ip_part,
265 	uint8_t common_tbl8, void *prev)
266 {
267 	void *p;
268 	uint64_t val;
269 
270 	val = get_val_by_p(prev, dp->nh_sz);
271 	if (unlikely((val & TRIE_EXT_ENT) != TRIE_EXT_ENT))
272 		return;
273 
274 	if (common_tbl8 != 0) {
275 		p = get_tbl_p_by_idx(dp->tbl8, (val >> 1) *
276 			TRIE_TBL8_GRP_NUM_ENT + *ip_part, dp->nh_sz);
277 		recycle_root_path(dp, ip_part + 1, common_tbl8 - 1, p);
278 	}
279 	tbl8_recycle(dp, prev, val >> 1);
280 }
281 
282 static inline int
283 build_common_root(struct rte_trie_tbl *dp, const uint8_t *ip,
284 	int common_bytes, void **tbl)
285 {
286 	void *tbl_ptr = NULL;
287 	uint64_t *cur_tbl;
288 	uint64_t val;
289 	int i, j, idx, prev_idx = 0;
290 
291 	cur_tbl = dp->tbl24;
292 	for (i = 3, j = 0; i <= common_bytes; i++) {
293 		idx = get_idx(ip, prev_idx, i - j, j);
294 		val = get_tbl_val_by_idx(cur_tbl, idx, dp->nh_sz);
295 		tbl_ptr = get_tbl_p_by_idx(cur_tbl, idx, dp->nh_sz);
296 		if ((val & TRIE_EXT_ENT) != TRIE_EXT_ENT) {
297 			idx = tbl8_alloc(dp, val);
298 			if (unlikely(idx < 0))
299 				return idx;
300 			write_to_dp(tbl_ptr, (idx << 1) |
301 				TRIE_EXT_ENT, dp->nh_sz, 1);
302 			prev_idx = idx;
303 		} else
304 			prev_idx = val >> 1;
305 
306 		j = i;
307 		cur_tbl = dp->tbl8;
308 	}
309 	*tbl = get_tbl_p_by_idx(cur_tbl, prev_idx * TRIE_TBL8_GRP_NUM_ENT,
310 		dp->nh_sz);
311 	return 0;
312 }
313 
314 static int
315 write_edge(struct rte_trie_tbl *dp, const uint8_t *ip_part, uint64_t next_hop,
316 	int len, enum edge edge, void *ent)
317 {
318 	uint64_t val = next_hop << 1;
319 	int tbl8_idx;
320 	int ret = 0;
321 	void *p;
322 
323 	if (len != 0) {
324 		val = get_val_by_p(ent, dp->nh_sz);
325 		if ((val & TRIE_EXT_ENT) == TRIE_EXT_ENT)
326 			tbl8_idx = val >> 1;
327 		else {
328 			tbl8_idx = tbl8_alloc(dp, val);
329 			if (tbl8_idx < 0)
330 				return tbl8_idx;
331 			val = (tbl8_idx << 1)|TRIE_EXT_ENT;
332 		}
333 		p = get_tbl_p_by_idx(dp->tbl8, (tbl8_idx *
334 			TRIE_TBL8_GRP_NUM_ENT) + *ip_part, dp->nh_sz);
335 		ret = write_edge(dp, ip_part + 1, next_hop, len - 1, edge, p);
336 		if (ret < 0)
337 			return ret;
338 		if (edge == LEDGE) {
339 			write_to_dp((uint8_t *)p + (1 << dp->nh_sz),
340 				next_hop << 1, dp->nh_sz, UINT8_MAX - *ip_part);
341 		} else {
342 			write_to_dp(get_tbl_p_by_idx(dp->tbl8, tbl8_idx *
343 				TRIE_TBL8_GRP_NUM_ENT, dp->nh_sz),
344 				next_hop << 1, dp->nh_sz, *ip_part);
345 		}
346 		tbl8_recycle(dp, &val, tbl8_idx);
347 	}
348 
349 	write_to_dp(ent, val, dp->nh_sz, 1);
350 	return ret;
351 }
352 
353 #define IPV6_MAX_IDX	(RTE_FIB6_IPV6_ADDR_SIZE - 1)
354 #define TBL24_BYTES	3
355 #define TBL8_LEN	(RTE_FIB6_IPV6_ADDR_SIZE - TBL24_BYTES)
356 
357 static int
358 install_to_dp(struct rte_trie_tbl *dp, const uint8_t *ledge, const uint8_t *r,
359 	uint64_t next_hop)
360 {
361 	void *common_root_tbl;
362 	void *ent;
363 	int ret;
364 	int i;
365 	int common_bytes;
366 	int llen, rlen;
367 	uint8_t redge[16];
368 
369 	/* decrement redge by 1*/
370 	rte_rib6_copy_addr(redge, r);
371 	for (i = 15; i >= 0; i--) {
372 		redge[i]--;
373 		if (redge[i] != 0xff)
374 			break;
375 	}
376 
377 	for (common_bytes = 0; common_bytes < 15; common_bytes++) {
378 		if (ledge[common_bytes] != redge[common_bytes])
379 			break;
380 	}
381 
382 	ret = build_common_root(dp, ledge, common_bytes, &common_root_tbl);
383 	if (unlikely(ret != 0))
384 		return ret;
385 	/*first uncommon tbl8 byte idx*/
386 	uint8_t first_tbl8_byte = RTE_MAX(common_bytes, TBL24_BYTES);
387 
388 	for (i = IPV6_MAX_IDX; i > first_tbl8_byte; i--) {
389 		if (ledge[i] != 0)
390 			break;
391 	}
392 
393 	llen = i - first_tbl8_byte + (common_bytes < 3);
394 
395 	for (i = IPV6_MAX_IDX; i > first_tbl8_byte; i--) {
396 		if (redge[i] != UINT8_MAX)
397 			break;
398 	}
399 	rlen = i - first_tbl8_byte + (common_bytes < 3);
400 
401 	/*first noncommon byte*/
402 	uint8_t first_byte_idx = (common_bytes < 3) ? 0 : common_bytes;
403 	uint8_t first_idx_len = (common_bytes < 3) ? 3 : 1;
404 
405 	uint32_t left_idx = get_idx(ledge, 0, first_idx_len, first_byte_idx);
406 	uint32_t right_idx = get_idx(redge, 0, first_idx_len, first_byte_idx);
407 
408 	ent = get_tbl_p_by_idx(common_root_tbl, left_idx, dp->nh_sz);
409 	ret = write_edge(dp, &ledge[first_tbl8_byte + !(common_bytes < 3)],
410 		next_hop, llen, LEDGE, ent);
411 	if (ret < 0)
412 		return ret;
413 
414 	if (right_idx > left_idx + 1) {
415 		ent = get_tbl_p_by_idx(common_root_tbl, left_idx + 1,
416 			dp->nh_sz);
417 		write_to_dp(ent, next_hop << 1, dp->nh_sz,
418 			right_idx - (left_idx + 1));
419 	}
420 	ent = get_tbl_p_by_idx(common_root_tbl, right_idx, dp->nh_sz);
421 	ret = write_edge(dp, &redge[first_tbl8_byte + !((common_bytes < 3))],
422 		next_hop, rlen, REDGE, ent);
423 	if (ret < 0)
424 		return ret;
425 
426 	uint8_t	common_tbl8 = (common_bytes < TBL24_BYTES) ?
427 			0 : common_bytes - (TBL24_BYTES - 1);
428 	ent = get_tbl24_p(dp, ledge, dp->nh_sz);
429 	recycle_root_path(dp, ledge + TBL24_BYTES, common_tbl8, ent);
430 	return 0;
431 }
432 
433 static void
434 get_nxt_net(uint8_t *ip, uint8_t depth)
435 {
436 	int i;
437 	uint8_t part_depth;
438 	uint8_t prev_byte;
439 
440 	for (i = 0, part_depth = depth; part_depth > 8; part_depth -= 8, i++)
441 		;
442 
443 	prev_byte = ip[i];
444 	ip[i] += 1 << (8 - part_depth);
445 	if (ip[i] < prev_byte) {
446 		while (i > 0) {
447 			ip[--i] += 1;
448 			if (ip[i] != 0)
449 				break;
450 		}
451 	}
452 }
453 
454 static int
455 modify_dp(struct rte_trie_tbl *dp, struct rte_rib6 *rib,
456 	const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE],
457 	uint8_t depth, uint64_t next_hop)
458 {
459 	struct rte_rib6_node *tmp = NULL;
460 	uint8_t ledge[RTE_FIB6_IPV6_ADDR_SIZE];
461 	uint8_t redge[RTE_FIB6_IPV6_ADDR_SIZE];
462 	int ret;
463 	uint8_t tmp_depth;
464 
465 	if (next_hop > get_max_nh(dp->nh_sz))
466 		return -EINVAL;
467 
468 	rte_rib6_copy_addr(ledge, ip);
469 	do {
470 		tmp = rte_rib6_get_nxt(rib, ip, depth, tmp,
471 			RTE_RIB6_GET_NXT_COVER);
472 		if (tmp != NULL) {
473 			rte_rib6_get_depth(tmp, &tmp_depth);
474 			if (tmp_depth == depth)
475 				continue;
476 			rte_rib6_get_ip(tmp, redge);
477 			if (rte_rib6_is_equal(ledge, redge)) {
478 				get_nxt_net(ledge, tmp_depth);
479 				continue;
480 			}
481 			ret = install_to_dp(dp, ledge, redge,
482 				next_hop);
483 			if (ret != 0)
484 				return ret;
485 			get_nxt_net(redge, tmp_depth);
486 			rte_rib6_copy_addr(ledge, redge);
487 		} else {
488 			rte_rib6_copy_addr(redge, ip);
489 			get_nxt_net(redge, depth);
490 			if (rte_rib6_is_equal(ledge, redge))
491 				break;
492 			ret = install_to_dp(dp, ledge, redge,
493 				next_hop);
494 			if (ret != 0)
495 				return ret;
496 		}
497 	} while (tmp);
498 
499 	return 0;
500 }
501 
502 int
503 trie_modify(struct rte_fib6 *fib, const uint8_t ip[RTE_FIB6_IPV6_ADDR_SIZE],
504 	uint8_t depth, uint64_t next_hop, int op)
505 {
506 	struct rte_trie_tbl *dp;
507 	struct rte_rib6 *rib;
508 	struct rte_rib6_node *tmp = NULL;
509 	struct rte_rib6_node *node;
510 	struct rte_rib6_node *parent;
511 	uint8_t	ip_masked[RTE_FIB6_IPV6_ADDR_SIZE];
512 	int i, ret = 0;
513 	uint64_t par_nh, node_nh;
514 	uint8_t tmp_depth, depth_diff = 0, parent_depth = 24;
515 
516 	if ((fib == NULL) || (ip == NULL) || (depth > RTE_FIB6_MAXDEPTH))
517 		return -EINVAL;
518 
519 	dp = rte_fib6_get_dp(fib);
520 	RTE_ASSERT(dp);
521 	rib = rte_fib6_get_rib(fib);
522 	RTE_ASSERT(rib);
523 
524 	for (i = 0; i < RTE_FIB6_IPV6_ADDR_SIZE; i++)
525 		ip_masked[i] = ip[i] & get_msk_part(depth, i);
526 
527 	if (depth > 24) {
528 		tmp = rte_rib6_get_nxt(rib, ip_masked,
529 			RTE_ALIGN_FLOOR(depth, 8), NULL,
530 			RTE_RIB6_GET_NXT_COVER);
531 		if (tmp == NULL) {
532 			tmp = rte_rib6_lookup(rib, ip);
533 			if (tmp != NULL) {
534 				rte_rib6_get_depth(tmp, &tmp_depth);
535 				parent_depth = RTE_MAX(tmp_depth, 24);
536 			}
537 			depth_diff = RTE_ALIGN_CEIL(depth, 8) -
538 				RTE_ALIGN_CEIL(parent_depth, 8);
539 			depth_diff = depth_diff >> 3;
540 		}
541 	}
542 	node = rte_rib6_lookup_exact(rib, ip_masked, depth);
543 	switch (op) {
544 	case RTE_FIB6_ADD:
545 		if (node != NULL) {
546 			rte_rib6_get_nh(node, &node_nh);
547 			if (node_nh == next_hop)
548 				return 0;
549 			ret = modify_dp(dp, rib, ip_masked, depth, next_hop);
550 			if (ret == 0)
551 				rte_rib6_set_nh(node, next_hop);
552 			return 0;
553 		}
554 
555 		if ((depth > 24) && (dp->rsvd_tbl8s >=
556 				dp->number_tbl8s - depth_diff))
557 			return -ENOSPC;
558 
559 		node = rte_rib6_insert(rib, ip_masked, depth);
560 		if (node == NULL)
561 			return -rte_errno;
562 		rte_rib6_set_nh(node, next_hop);
563 		parent = rte_rib6_lookup_parent(node);
564 		if (parent != NULL) {
565 			rte_rib6_get_nh(parent, &par_nh);
566 			if (par_nh == next_hop)
567 				return 0;
568 		}
569 		ret = modify_dp(dp, rib, ip_masked, depth, next_hop);
570 		if (ret != 0) {
571 			rte_rib6_remove(rib, ip_masked, depth);
572 			return ret;
573 		}
574 
575 		dp->rsvd_tbl8s += depth_diff;
576 		return 0;
577 	case RTE_FIB6_DEL:
578 		if (node == NULL)
579 			return -ENOENT;
580 
581 		parent = rte_rib6_lookup_parent(node);
582 		if (parent != NULL) {
583 			rte_rib6_get_nh(parent, &par_nh);
584 			rte_rib6_get_nh(node, &node_nh);
585 			if (par_nh != node_nh)
586 				ret = modify_dp(dp, rib, ip_masked, depth,
587 					par_nh);
588 		} else
589 			ret = modify_dp(dp, rib, ip_masked, depth, dp->def_nh);
590 
591 		if (ret != 0)
592 			return ret;
593 		rte_rib6_remove(rib, ip, depth);
594 
595 		dp->rsvd_tbl8s -= depth_diff;
596 		return 0;
597 	default:
598 		break;
599 	}
600 	return -EINVAL;
601 }
602 
603 void *
604 trie_create(const char *name, int socket_id,
605 	struct rte_fib6_conf *conf)
606 {
607 	char mem_name[TRIE_NAMESIZE];
608 	struct rte_trie_tbl *dp = NULL;
609 	uint64_t	def_nh;
610 	uint32_t	num_tbl8;
611 	enum rte_fib_trie_nh_sz	nh_sz;
612 
613 	if ((name == NULL) || (conf == NULL) ||
614 			(conf->trie.nh_sz < RTE_FIB6_TRIE_2B) ||
615 			(conf->trie.nh_sz > RTE_FIB6_TRIE_8B) ||
616 			(conf->trie.num_tbl8 >
617 			get_max_nh(conf->trie.nh_sz)) ||
618 			(conf->trie.num_tbl8 == 0) ||
619 			(conf->default_nh >
620 			get_max_nh(conf->trie.nh_sz))) {
621 
622 		rte_errno = EINVAL;
623 		return NULL;
624 	}
625 
626 	def_nh = conf->default_nh;
627 	nh_sz = conf->trie.nh_sz;
628 	num_tbl8 = conf->trie.num_tbl8;
629 
630 	snprintf(mem_name, sizeof(mem_name), "DP_%s", name);
631 	dp = rte_zmalloc_socket(name, sizeof(struct rte_trie_tbl) +
632 		TRIE_TBL24_NUM_ENT * (1 << nh_sz), RTE_CACHE_LINE_SIZE,
633 		socket_id);
634 	if (dp == NULL) {
635 		rte_errno = ENOMEM;
636 		return dp;
637 	}
638 
639 	write_to_dp(&dp->tbl24, (def_nh << 1), nh_sz, 1 << 24);
640 
641 	snprintf(mem_name, sizeof(mem_name), "TBL8_%p", dp);
642 	dp->tbl8 = rte_zmalloc_socket(mem_name, TRIE_TBL8_GRP_NUM_ENT *
643 			(1ll << nh_sz) * (num_tbl8 + 1),
644 			RTE_CACHE_LINE_SIZE, socket_id);
645 	if (dp->tbl8 == NULL) {
646 		rte_errno = ENOMEM;
647 		rte_free(dp);
648 		return NULL;
649 	}
650 	dp->def_nh = def_nh;
651 	dp->nh_sz = nh_sz;
652 	dp->number_tbl8s = num_tbl8;
653 
654 	snprintf(mem_name, sizeof(mem_name), "TBL8_idxes_%p", dp);
655 	dp->tbl8_pool = rte_zmalloc_socket(mem_name,
656 			sizeof(uint32_t) * dp->number_tbl8s,
657 			RTE_CACHE_LINE_SIZE, socket_id);
658 	if (dp->tbl8_pool == NULL) {
659 		rte_errno = ENOMEM;
660 		rte_free(dp->tbl8);
661 		rte_free(dp);
662 		return NULL;
663 	}
664 
665 	tbl8_pool_init(dp);
666 
667 	return dp;
668 }
669 
670 void
671 trie_free(void *p)
672 {
673 	struct rte_trie_tbl *dp = (struct rte_trie_tbl *)p;
674 
675 	rte_free(dp->tbl8_pool);
676 	rte_free(dp->tbl8);
677 	rte_free(dp);
678 }
679