xref: /dpdk/lib/fib/dir24_8.c (revision c1d145834f287aa8cf53de914618a7312f2c360e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2018 Vladimir Medvedkin <medvedkinv@gmail.com>
3  * Copyright(c) 2019 Intel Corporation
4  */
5 
6 #include <stdint.h>
7 #include <stdio.h>
8 
9 #include <rte_debug.h>
10 #include <rte_malloc.h>
11 #include <rte_errno.h>
12 #include <rte_vect.h>
13 
14 #include <rte_rib.h>
15 #include <rte_fib.h>
16 #include "dir24_8.h"
17 #include "fib_log.h"
18 
19 #ifdef CC_DIR24_8_AVX512_SUPPORT
20 
21 #include "dir24_8_avx512.h"
22 
23 #endif /* CC_DIR24_8_AVX512_SUPPORT */
24 
25 #define DIR24_8_NAMESIZE	64
26 
27 #define ROUNDUP(x, y)	 RTE_ALIGN_CEIL(x, (1 << (32 - y)))
28 
29 static inline rte_fib_lookup_fn_t
30 get_scalar_fn(enum rte_fib_dir24_8_nh_sz nh_sz, bool be_addr)
31 {
32 	switch (nh_sz) {
33 	case RTE_FIB_DIR24_8_1B:
34 		return be_addr ? dir24_8_lookup_bulk_1b_be : dir24_8_lookup_bulk_1b;
35 	case RTE_FIB_DIR24_8_2B:
36 		return be_addr ? dir24_8_lookup_bulk_2b_be : dir24_8_lookup_bulk_2b;
37 	case RTE_FIB_DIR24_8_4B:
38 		return be_addr ? dir24_8_lookup_bulk_4b_be : dir24_8_lookup_bulk_4b;
39 	case RTE_FIB_DIR24_8_8B:
40 		return be_addr ? dir24_8_lookup_bulk_8b_be : dir24_8_lookup_bulk_8b;
41 	default:
42 		return NULL;
43 	}
44 }
45 
46 static inline rte_fib_lookup_fn_t
47 get_scalar_fn_inlined(enum rte_fib_dir24_8_nh_sz nh_sz, bool be_addr)
48 {
49 	switch (nh_sz) {
50 	case RTE_FIB_DIR24_8_1B:
51 		return be_addr ? dir24_8_lookup_bulk_0_be : dir24_8_lookup_bulk_0;
52 	case RTE_FIB_DIR24_8_2B:
53 		return be_addr ? dir24_8_lookup_bulk_1_be : dir24_8_lookup_bulk_1;
54 	case RTE_FIB_DIR24_8_4B:
55 		return be_addr ? dir24_8_lookup_bulk_2_be : dir24_8_lookup_bulk_2;
56 	case RTE_FIB_DIR24_8_8B:
57 		return be_addr ? dir24_8_lookup_bulk_3_be : dir24_8_lookup_bulk_3;
58 	default:
59 		return NULL;
60 	}
61 }
62 
63 static inline rte_fib_lookup_fn_t
64 get_vector_fn(enum rte_fib_dir24_8_nh_sz nh_sz, bool be_addr)
65 {
66 #ifdef CC_DIR24_8_AVX512_SUPPORT
67 	if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) <= 0 ||
68 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512DQ) <= 0 ||
69 			rte_vect_get_max_simd_bitwidth() < RTE_VECT_SIMD_512)
70 		return NULL;
71 
72 	if (be_addr && rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW) <= 0)
73 		return NULL;
74 
75 	switch (nh_sz) {
76 	case RTE_FIB_DIR24_8_1B:
77 		return be_addr ? rte_dir24_8_vec_lookup_bulk_1b_be :
78 			rte_dir24_8_vec_lookup_bulk_1b;
79 	case RTE_FIB_DIR24_8_2B:
80 		return be_addr ? rte_dir24_8_vec_lookup_bulk_2b_be :
81 			rte_dir24_8_vec_lookup_bulk_2b;
82 	case RTE_FIB_DIR24_8_4B:
83 		return be_addr ? rte_dir24_8_vec_lookup_bulk_4b_be :
84 			rte_dir24_8_vec_lookup_bulk_4b;
85 	case RTE_FIB_DIR24_8_8B:
86 		return be_addr ? rte_dir24_8_vec_lookup_bulk_8b_be :
87 			rte_dir24_8_vec_lookup_bulk_8b;
88 	default:
89 		return NULL;
90 	}
91 #else
92 	RTE_SET_USED(nh_sz);
93 	RTE_SET_USED(be_addr);
94 #endif
95 	return NULL;
96 }
97 
98 rte_fib_lookup_fn_t
99 dir24_8_get_lookup_fn(void *p, enum rte_fib_lookup_type type, bool be_addr)
100 {
101 	enum rte_fib_dir24_8_nh_sz nh_sz;
102 	rte_fib_lookup_fn_t ret_fn;
103 	struct dir24_8_tbl *dp = p;
104 
105 	if (dp == NULL)
106 		return NULL;
107 
108 	nh_sz = dp->nh_sz;
109 
110 	switch (type) {
111 	case RTE_FIB_LOOKUP_DIR24_8_SCALAR_MACRO:
112 		return get_scalar_fn(nh_sz, be_addr);
113 	case RTE_FIB_LOOKUP_DIR24_8_SCALAR_INLINE:
114 		return get_scalar_fn_inlined(nh_sz, be_addr);
115 	case RTE_FIB_LOOKUP_DIR24_8_SCALAR_UNI:
116 		return be_addr ? dir24_8_lookup_bulk_uni_be : dir24_8_lookup_bulk_uni;
117 	case RTE_FIB_LOOKUP_DIR24_8_VECTOR_AVX512:
118 		return get_vector_fn(nh_sz, be_addr);
119 	case RTE_FIB_LOOKUP_DEFAULT:
120 		ret_fn = get_vector_fn(nh_sz, be_addr);
121 		return ret_fn != NULL ? ret_fn : get_scalar_fn(nh_sz, be_addr);
122 	default:
123 		return NULL;
124 	}
125 
126 	return NULL;
127 }
128 
129 static void
130 write_to_fib(void *ptr, uint64_t val, enum rte_fib_dir24_8_nh_sz size, int n)
131 {
132 	int i;
133 	uint8_t *ptr8 = (uint8_t *)ptr;
134 	uint16_t *ptr16 = (uint16_t *)ptr;
135 	uint32_t *ptr32 = (uint32_t *)ptr;
136 	uint64_t *ptr64 = (uint64_t *)ptr;
137 
138 	switch (size) {
139 	case RTE_FIB_DIR24_8_1B:
140 		for (i = 0; i < n; i++)
141 			ptr8[i] = (uint8_t)val;
142 		break;
143 	case RTE_FIB_DIR24_8_2B:
144 		for (i = 0; i < n; i++)
145 			ptr16[i] = (uint16_t)val;
146 		break;
147 	case RTE_FIB_DIR24_8_4B:
148 		for (i = 0; i < n; i++)
149 			ptr32[i] = (uint32_t)val;
150 		break;
151 	case RTE_FIB_DIR24_8_8B:
152 		for (i = 0; i < n; i++)
153 			ptr64[i] = (uint64_t)val;
154 		break;
155 	}
156 }
157 
158 static int
159 tbl8_get_idx(struct dir24_8_tbl *dp)
160 {
161 	uint32_t i;
162 	int bit_idx;
163 
164 	for (i = 0; (i < (dp->number_tbl8s >> BITMAP_SLAB_BIT_SIZE_LOG2)) &&
165 			(dp->tbl8_idxes[i] == UINT64_MAX); i++)
166 		;
167 	if (i < (dp->number_tbl8s >> BITMAP_SLAB_BIT_SIZE_LOG2)) {
168 		bit_idx = rte_ctz64(~dp->tbl8_idxes[i]);
169 		dp->tbl8_idxes[i] |= (1ULL << bit_idx);
170 		return (i << BITMAP_SLAB_BIT_SIZE_LOG2) + bit_idx;
171 	}
172 	return -ENOSPC;
173 }
174 
175 static inline void
176 tbl8_free_idx(struct dir24_8_tbl *dp, int idx)
177 {
178 	dp->tbl8_idxes[idx >> BITMAP_SLAB_BIT_SIZE_LOG2] &=
179 		~(1ULL << (idx & BITMAP_SLAB_BITMASK));
180 }
181 
182 static int
183 tbl8_alloc(struct dir24_8_tbl *dp, uint64_t nh)
184 {
185 	int64_t	tbl8_idx;
186 	uint8_t	*tbl8_ptr;
187 
188 	tbl8_idx = tbl8_get_idx(dp);
189 
190 	/* If there are no tbl8 groups try to reclaim one. */
191 	if (unlikely(tbl8_idx == -ENOSPC && dp->dq &&
192 			!rte_rcu_qsbr_dq_reclaim(dp->dq, 1, NULL, NULL, NULL)))
193 		tbl8_idx = tbl8_get_idx(dp);
194 
195 	if (tbl8_idx < 0)
196 		return tbl8_idx;
197 	tbl8_ptr = (uint8_t *)dp->tbl8 +
198 		((tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT) <<
199 		dp->nh_sz);
200 	/*Init tbl8 entries with nexthop from tbl24*/
201 	write_to_fib((void *)tbl8_ptr, nh|
202 		DIR24_8_EXT_ENT, dp->nh_sz,
203 		DIR24_8_TBL8_GRP_NUM_ENT);
204 	dp->cur_tbl8s++;
205 	return tbl8_idx;
206 }
207 
208 static void
209 tbl8_cleanup_and_free(struct dir24_8_tbl *dp, uint64_t tbl8_idx)
210 {
211 	uint8_t *ptr = (uint8_t *)dp->tbl8 + (tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT << dp->nh_sz);
212 
213 	memset(ptr, 0, DIR24_8_TBL8_GRP_NUM_ENT << dp->nh_sz);
214 	tbl8_free_idx(dp, tbl8_idx);
215 	dp->cur_tbl8s--;
216 }
217 
218 static void
219 __rcu_qsbr_free_resource(void *p, void *data, unsigned int n __rte_unused)
220 {
221 	struct dir24_8_tbl *dp = p;
222 	uint64_t tbl8_idx = *(uint64_t *)data;
223 
224 	tbl8_cleanup_and_free(dp, tbl8_idx);
225 }
226 
227 static void
228 tbl8_recycle(struct dir24_8_tbl *dp, uint32_t ip, uint64_t tbl8_idx)
229 {
230 	uint32_t i;
231 	uint64_t nh;
232 	uint8_t *ptr8;
233 	uint16_t *ptr16;
234 	uint32_t *ptr32;
235 	uint64_t *ptr64;
236 
237 	switch (dp->nh_sz) {
238 	case RTE_FIB_DIR24_8_1B:
239 		ptr8 = &((uint8_t *)dp->tbl8)[tbl8_idx *
240 				DIR24_8_TBL8_GRP_NUM_ENT];
241 		nh = *ptr8;
242 		for (i = 1; i < DIR24_8_TBL8_GRP_NUM_ENT; i++) {
243 			if (nh != ptr8[i])
244 				return;
245 		}
246 		((uint8_t *)dp->tbl24)[ip >> 8] =
247 			nh & ~DIR24_8_EXT_ENT;
248 		break;
249 	case RTE_FIB_DIR24_8_2B:
250 		ptr16 = &((uint16_t *)dp->tbl8)[tbl8_idx *
251 				DIR24_8_TBL8_GRP_NUM_ENT];
252 		nh = *ptr16;
253 		for (i = 1; i < DIR24_8_TBL8_GRP_NUM_ENT; i++) {
254 			if (nh != ptr16[i])
255 				return;
256 		}
257 		((uint16_t *)dp->tbl24)[ip >> 8] =
258 			nh & ~DIR24_8_EXT_ENT;
259 		break;
260 	case RTE_FIB_DIR24_8_4B:
261 		ptr32 = &((uint32_t *)dp->tbl8)[tbl8_idx *
262 				DIR24_8_TBL8_GRP_NUM_ENT];
263 		nh = *ptr32;
264 		for (i = 1; i < DIR24_8_TBL8_GRP_NUM_ENT; i++) {
265 			if (nh != ptr32[i])
266 				return;
267 		}
268 		((uint32_t *)dp->tbl24)[ip >> 8] =
269 			nh & ~DIR24_8_EXT_ENT;
270 		break;
271 	case RTE_FIB_DIR24_8_8B:
272 		ptr64 = &((uint64_t *)dp->tbl8)[tbl8_idx *
273 				DIR24_8_TBL8_GRP_NUM_ENT];
274 		nh = *ptr64;
275 		for (i = 1; i < DIR24_8_TBL8_GRP_NUM_ENT; i++) {
276 			if (nh != ptr64[i])
277 				return;
278 		}
279 		((uint64_t *)dp->tbl24)[ip >> 8] =
280 			nh & ~DIR24_8_EXT_ENT;
281 		break;
282 	}
283 
284 	if (dp->v == NULL) {
285 		tbl8_cleanup_and_free(dp, tbl8_idx);
286 	} else if (dp->rcu_mode == RTE_FIB_QSBR_MODE_SYNC) {
287 		rte_rcu_qsbr_synchronize(dp->v, RTE_QSBR_THRID_INVALID);
288 		tbl8_cleanup_and_free(dp, tbl8_idx);
289 	} else { /* RTE_FIB_QSBR_MODE_DQ */
290 		if (rte_rcu_qsbr_dq_enqueue(dp->dq, &tbl8_idx))
291 			FIB_LOG(ERR, "Failed to push QSBR FIFO");
292 	}
293 }
294 
295 static int
296 install_to_fib(struct dir24_8_tbl *dp, uint32_t ledge, uint32_t redge,
297 	uint64_t next_hop)
298 {
299 	uint64_t	tbl24_tmp;
300 	int	tbl8_idx;
301 	int tmp_tbl8_idx;
302 	uint8_t	*tbl8_ptr;
303 	uint32_t len;
304 
305 	len = ((ledge == 0) && (redge == 0)) ? 1 << 24 :
306 		((redge & DIR24_8_TBL24_MASK) - ROUNDUP(ledge, 24)) >> 8;
307 
308 	if (((ledge >> 8) != (redge >> 8)) || (len == 1 << 24)) {
309 		if ((ROUNDUP(ledge, 24) - ledge) != 0) {
310 			tbl24_tmp = get_tbl24(dp, ledge, dp->nh_sz);
311 			if ((tbl24_tmp & DIR24_8_EXT_ENT) !=
312 					DIR24_8_EXT_ENT) {
313 				/**
314 				 * Make sure there is space for two TBL8.
315 				 * This is necessary when installing range that
316 				 * needs tbl8 for ledge and redge.
317 				 */
318 				tbl8_idx = tbl8_alloc(dp, tbl24_tmp);
319 				tmp_tbl8_idx = tbl8_get_idx(dp);
320 				if (tbl8_idx < 0)
321 					return -ENOSPC;
322 				else if (tmp_tbl8_idx < 0) {
323 					tbl8_free_idx(dp, tbl8_idx);
324 					return -ENOSPC;
325 				}
326 				tbl8_free_idx(dp, tmp_tbl8_idx);
327 				/*update dir24 entry with tbl8 index*/
328 				write_to_fib(get_tbl24_p(dp, ledge,
329 					dp->nh_sz), (tbl8_idx << 1)|
330 					DIR24_8_EXT_ENT,
331 					dp->nh_sz, 1);
332 			} else
333 				tbl8_idx = tbl24_tmp >> 1;
334 			tbl8_ptr = (uint8_t *)dp->tbl8 +
335 				(((tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT) +
336 				(ledge & ~DIR24_8_TBL24_MASK)) <<
337 				dp->nh_sz);
338 			/*update tbl8 with new next hop*/
339 			write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
340 				DIR24_8_EXT_ENT,
341 				dp->nh_sz, ROUNDUP(ledge, 24) - ledge);
342 			tbl8_recycle(dp, ledge, tbl8_idx);
343 		}
344 		write_to_fib(get_tbl24_p(dp, ROUNDUP(ledge, 24), dp->nh_sz),
345 			next_hop << 1, dp->nh_sz, len);
346 		if (redge & ~DIR24_8_TBL24_MASK) {
347 			tbl24_tmp = get_tbl24(dp, redge, dp->nh_sz);
348 			if ((tbl24_tmp & DIR24_8_EXT_ENT) !=
349 					DIR24_8_EXT_ENT) {
350 				tbl8_idx = tbl8_alloc(dp, tbl24_tmp);
351 				if (tbl8_idx < 0)
352 					return -ENOSPC;
353 				/*update dir24 entry with tbl8 index*/
354 				write_to_fib(get_tbl24_p(dp, redge,
355 					dp->nh_sz), (tbl8_idx << 1)|
356 					DIR24_8_EXT_ENT,
357 					dp->nh_sz, 1);
358 			} else
359 				tbl8_idx = tbl24_tmp >> 1;
360 			tbl8_ptr = (uint8_t *)dp->tbl8 +
361 				((tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT) <<
362 				dp->nh_sz);
363 			/*update tbl8 with new next hop*/
364 			write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
365 				DIR24_8_EXT_ENT,
366 				dp->nh_sz, redge & ~DIR24_8_TBL24_MASK);
367 			tbl8_recycle(dp, redge, tbl8_idx);
368 		}
369 	} else if ((redge - ledge) != 0) {
370 		tbl24_tmp = get_tbl24(dp, ledge, dp->nh_sz);
371 		if ((tbl24_tmp & DIR24_8_EXT_ENT) !=
372 				DIR24_8_EXT_ENT) {
373 			tbl8_idx = tbl8_alloc(dp, tbl24_tmp);
374 			if (tbl8_idx < 0)
375 				return -ENOSPC;
376 			/*update dir24 entry with tbl8 index*/
377 			write_to_fib(get_tbl24_p(dp, ledge, dp->nh_sz),
378 				(tbl8_idx << 1)|
379 				DIR24_8_EXT_ENT,
380 				dp->nh_sz, 1);
381 		} else
382 			tbl8_idx = tbl24_tmp >> 1;
383 		tbl8_ptr = (uint8_t *)dp->tbl8 +
384 			(((tbl8_idx * DIR24_8_TBL8_GRP_NUM_ENT) +
385 			(ledge & ~DIR24_8_TBL24_MASK)) <<
386 			dp->nh_sz);
387 		/*update tbl8 with new next hop*/
388 		write_to_fib((void *)tbl8_ptr, (next_hop << 1)|
389 			DIR24_8_EXT_ENT,
390 			dp->nh_sz, redge - ledge);
391 		tbl8_recycle(dp, ledge, tbl8_idx);
392 	}
393 	return 0;
394 }
395 
396 static int
397 modify_fib(struct dir24_8_tbl *dp, struct rte_rib *rib, uint32_t ip,
398 	uint8_t depth, uint64_t next_hop)
399 {
400 	struct rte_rib_node *tmp = NULL;
401 	uint32_t ledge, redge, tmp_ip;
402 	int ret;
403 	uint8_t tmp_depth;
404 
405 	ledge = ip;
406 	do {
407 		tmp = rte_rib_get_nxt(rib, ip, depth, tmp,
408 			RTE_RIB_GET_NXT_COVER);
409 		if (tmp != NULL) {
410 			rte_rib_get_depth(tmp, &tmp_depth);
411 			if (tmp_depth == depth)
412 				continue;
413 			rte_rib_get_ip(tmp, &tmp_ip);
414 			redge = tmp_ip & rte_rib_depth_to_mask(tmp_depth);
415 			if (ledge == redge) {
416 				ledge = redge +
417 					(uint32_t)(1ULL << (32 - tmp_depth));
418 				continue;
419 			}
420 			ret = install_to_fib(dp, ledge, redge,
421 				next_hop);
422 			if (ret != 0)
423 				return ret;
424 			ledge = redge +
425 				(uint32_t)(1ULL << (32 - tmp_depth));
426 			/*
427 			 * we got to the end of address space
428 			 * and wrapped around
429 			 */
430 			if (ledge == 0)
431 				break;
432 		} else {
433 			redge = ip + (uint32_t)(1ULL << (32 - depth));
434 			if (ledge == redge && ledge != 0)
435 				break;
436 			ret = install_to_fib(dp, ledge, redge,
437 				next_hop);
438 			if (ret != 0)
439 				return ret;
440 		}
441 	} while (tmp);
442 
443 	return 0;
444 }
445 
446 int
447 dir24_8_modify(struct rte_fib *fib, uint32_t ip, uint8_t depth,
448 	uint64_t next_hop, int op)
449 {
450 	struct dir24_8_tbl *dp;
451 	struct rte_rib *rib;
452 	struct rte_rib_node *tmp = NULL;
453 	struct rte_rib_node *node;
454 	struct rte_rib_node *parent;
455 	int ret = 0;
456 	uint64_t par_nh, node_nh;
457 
458 	if ((fib == NULL) || (depth > RTE_FIB_MAXDEPTH))
459 		return -EINVAL;
460 
461 	dp = rte_fib_get_dp(fib);
462 	rib = rte_fib_get_rib(fib);
463 	RTE_ASSERT((dp != NULL) && (rib != NULL));
464 
465 	if (next_hop > get_max_nh(dp->nh_sz))
466 		return -EINVAL;
467 
468 	ip &= rte_rib_depth_to_mask(depth);
469 
470 	node = rte_rib_lookup_exact(rib, ip, depth);
471 	switch (op) {
472 	case RTE_FIB_ADD:
473 		if (node != NULL) {
474 			rte_rib_get_nh(node, &node_nh);
475 			if (node_nh == next_hop)
476 				return 0;
477 			ret = modify_fib(dp, rib, ip, depth, next_hop);
478 			if (ret == 0)
479 				rte_rib_set_nh(node, next_hop);
480 			return 0;
481 		}
482 		if (depth > 24) {
483 			tmp = rte_rib_get_nxt(rib, ip, 24, NULL,
484 				RTE_RIB_GET_NXT_COVER);
485 			if ((tmp == NULL) &&
486 				(dp->rsvd_tbl8s >= dp->number_tbl8s))
487 				return -ENOSPC;
488 
489 		}
490 		node = rte_rib_insert(rib, ip, depth);
491 		if (node == NULL)
492 			return -rte_errno;
493 		rte_rib_set_nh(node, next_hop);
494 		parent = rte_rib_lookup_parent(node);
495 		if (parent != NULL) {
496 			rte_rib_get_nh(parent, &par_nh);
497 			if (par_nh == next_hop)
498 				return 0;
499 		}
500 		ret = modify_fib(dp, rib, ip, depth, next_hop);
501 		if (ret != 0) {
502 			rte_rib_remove(rib, ip, depth);
503 			return ret;
504 		}
505 		if ((depth > 24) && (tmp == NULL))
506 			dp->rsvd_tbl8s++;
507 		return 0;
508 	case RTE_FIB_DEL:
509 		if (node == NULL)
510 			return -ENOENT;
511 
512 		parent = rte_rib_lookup_parent(node);
513 		if (parent != NULL) {
514 			rte_rib_get_nh(parent, &par_nh);
515 			rte_rib_get_nh(node, &node_nh);
516 			if (par_nh != node_nh)
517 				ret = modify_fib(dp, rib, ip, depth, par_nh);
518 		} else
519 			ret = modify_fib(dp, rib, ip, depth, dp->def_nh);
520 		if (ret == 0) {
521 			rte_rib_remove(rib, ip, depth);
522 			if (depth > 24) {
523 				tmp = rte_rib_get_nxt(rib, ip, 24, NULL,
524 					RTE_RIB_GET_NXT_COVER);
525 				if (tmp == NULL)
526 					dp->rsvd_tbl8s--;
527 			}
528 		}
529 		return ret;
530 	default:
531 		break;
532 	}
533 	return -EINVAL;
534 }
535 
536 void *
537 dir24_8_create(const char *name, int socket_id, struct rte_fib_conf *fib_conf)
538 {
539 	char mem_name[DIR24_8_NAMESIZE];
540 	struct dir24_8_tbl *dp;
541 	uint64_t	def_nh;
542 	uint32_t	num_tbl8;
543 	enum rte_fib_dir24_8_nh_sz	nh_sz;
544 
545 	if ((name == NULL) || (fib_conf == NULL) ||
546 			(fib_conf->dir24_8.nh_sz < RTE_FIB_DIR24_8_1B) ||
547 			(fib_conf->dir24_8.nh_sz > RTE_FIB_DIR24_8_8B) ||
548 			(fib_conf->dir24_8.num_tbl8 >
549 			get_max_nh(fib_conf->dir24_8.nh_sz)) ||
550 			(fib_conf->dir24_8.num_tbl8 == 0) ||
551 			(fib_conf->default_nh >
552 			get_max_nh(fib_conf->dir24_8.nh_sz))) {
553 		rte_errno = EINVAL;
554 		return NULL;
555 	}
556 
557 	def_nh = fib_conf->default_nh;
558 	nh_sz = fib_conf->dir24_8.nh_sz;
559 	num_tbl8 = RTE_ALIGN_CEIL(fib_conf->dir24_8.num_tbl8,
560 			BITMAP_SLAB_BIT_SIZE);
561 
562 	snprintf(mem_name, sizeof(mem_name), "DP_%s", name);
563 	dp = rte_zmalloc_socket(name, sizeof(struct dir24_8_tbl) +
564 		DIR24_8_TBL24_NUM_ENT * (1 << nh_sz) + sizeof(uint32_t),
565 		RTE_CACHE_LINE_SIZE, socket_id);
566 	if (dp == NULL) {
567 		rte_errno = ENOMEM;
568 		return NULL;
569 	}
570 
571 	/* Init table with default value */
572 	write_to_fib(dp->tbl24, (def_nh << 1), nh_sz, 1 << 24);
573 
574 	snprintf(mem_name, sizeof(mem_name), "TBL8_%p", dp);
575 	uint64_t tbl8_sz = DIR24_8_TBL8_GRP_NUM_ENT * (1ULL << nh_sz) *
576 			(num_tbl8 + 1);
577 	dp->tbl8 = rte_zmalloc_socket(mem_name, tbl8_sz,
578 			RTE_CACHE_LINE_SIZE, socket_id);
579 	if (dp->tbl8 == NULL) {
580 		rte_errno = ENOMEM;
581 		rte_free(dp);
582 		return NULL;
583 	}
584 	dp->def_nh = def_nh;
585 	dp->nh_sz = nh_sz;
586 	dp->number_tbl8s = num_tbl8;
587 
588 	snprintf(mem_name, sizeof(mem_name), "TBL8_idxes_%p", dp);
589 	dp->tbl8_idxes = rte_zmalloc_socket(mem_name,
590 			RTE_ALIGN_CEIL(dp->number_tbl8s, 64) >> 3,
591 			RTE_CACHE_LINE_SIZE, socket_id);
592 	if (dp->tbl8_idxes == NULL) {
593 		rte_errno = ENOMEM;
594 		rte_free(dp->tbl8);
595 		rte_free(dp);
596 		return NULL;
597 	}
598 
599 	return dp;
600 }
601 
602 void
603 dir24_8_free(void *p)
604 {
605 	struct dir24_8_tbl *dp = (struct dir24_8_tbl *)p;
606 
607 	rte_rcu_qsbr_dq_delete(dp->dq);
608 	rte_free(dp->tbl8_idxes);
609 	rte_free(dp->tbl8);
610 	rte_free(dp);
611 }
612 
613 int
614 dir24_8_rcu_qsbr_add(struct dir24_8_tbl *dp, struct rte_fib_rcu_config *cfg,
615 	const char *name)
616 {
617 	struct rte_rcu_qsbr_dq_parameters params = {0};
618 	char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
619 
620 	if (dp == NULL || cfg == NULL)
621 		return -EINVAL;
622 
623 	if (dp->v != NULL)
624 		return -EEXIST;
625 
626 	if (cfg->mode == RTE_FIB_QSBR_MODE_SYNC) {
627 		/* No other things to do. */
628 	} else if (cfg->mode == RTE_FIB_QSBR_MODE_DQ) {
629 		/* Init QSBR defer queue. */
630 		snprintf(rcu_dq_name, sizeof(rcu_dq_name),
631 				"FIB_RCU_%s", name);
632 		params.name = rcu_dq_name;
633 		params.size = cfg->dq_size;
634 		if (params.size == 0)
635 			params.size = RTE_FIB_RCU_DQ_RECLAIM_SZ;
636 		params.trigger_reclaim_limit = cfg->reclaim_thd;
637 		params.max_reclaim_size = cfg->reclaim_max;
638 		if (params.max_reclaim_size == 0)
639 			params.max_reclaim_size = RTE_FIB_RCU_DQ_RECLAIM_MAX;
640 		params.esize = sizeof(uint64_t);
641 		params.free_fn = __rcu_qsbr_free_resource;
642 		params.p = dp;
643 		params.v = cfg->v;
644 		dp->dq = rte_rcu_qsbr_dq_create(&params);
645 		if (dp->dq == NULL) {
646 			FIB_LOG(ERR, "LPM defer queue creation failed");
647 			return -rte_errno;
648 		}
649 	} else {
650 		return -EINVAL;
651 	}
652 
653 	dp->rcu_mode = cfg->mode;
654 	dp->v = cfg->v;
655 
656 	return 0;
657 }
658