xref: /dpdk/lib/lpm/rte_lpm.c (revision 30a1de105a5f40d77b344a891c4a68f79e815c43)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  * Copyright(c) 2020 Arm Limited
4  */
5 
6 #include <string.h>
7 #include <stdint.h>
8 #include <errno.h>
9 #include <stdio.h>
10 #include <sys/queue.h>
11 
12 #include <rte_log.h>
13 #include <rte_common.h>
14 #include <rte_malloc.h>
15 #include <rte_eal_memconfig.h>
16 #include <rte_string_fns.h>
17 #include <rte_errno.h>
18 #include <rte_tailq.h>
19 
20 #include "rte_lpm.h"
21 
22 TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
23 
24 static struct rte_tailq_elem rte_lpm_tailq = {
25 	.name = "RTE_LPM",
26 };
27 EAL_REGISTER_TAILQ(rte_lpm_tailq)
28 
29 #define MAX_DEPTH_TBL24 24
30 
31 enum valid_flag {
32 	INVALID = 0,
33 	VALID
34 };
35 
36 /** @internal Rule structure. */
37 struct rte_lpm_rule {
38 	uint32_t ip; /**< Rule IP address. */
39 	uint32_t next_hop; /**< Rule next hop. */
40 };
41 
42 /** @internal Contains metadata about the rules table. */
43 struct rte_lpm_rule_info {
44 	uint32_t used_rules; /**< Used rules so far. */
45 	uint32_t first_rule; /**< Indexes the first rule of a given depth. */
46 };
47 
48 /** @internal LPM structure. */
49 struct __rte_lpm {
50 	/* Exposed LPM data. */
51 	struct rte_lpm lpm;
52 
53 	/* LPM metadata. */
54 	char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
55 	uint32_t max_rules; /**< Max. balanced rules per lpm. */
56 	uint32_t number_tbl8s; /**< Number of tbl8s. */
57 	/**< Rule info table. */
58 	struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
59 	struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
60 
61 	/* RCU config. */
62 	struct rte_rcu_qsbr *v;		/* RCU QSBR variable. */
63 	enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
64 	struct rte_rcu_qsbr_dq *dq;	/* RCU QSBR defer queue. */
65 };
66 
67 /* Macro to enable/disable run-time checks. */
68 #if defined(RTE_LIBRTE_LPM_DEBUG)
69 #include <rte_debug.h>
70 #define VERIFY_DEPTH(depth) do {                                \
71 	if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
72 		rte_panic("LPM: Invalid depth (%u) at line %d", \
73 				(unsigned)(depth), __LINE__);   \
74 } while (0)
75 #else
76 #define VERIFY_DEPTH(depth)
77 #endif
78 
79 /*
80  * Converts a given depth value to its corresponding mask value.
81  *
82  * depth  (IN)		: range = 1 - 32
83  * mask   (OUT)		: 32bit mask
84  */
85 static uint32_t __attribute__((pure))
86 depth_to_mask(uint8_t depth)
87 {
88 	VERIFY_DEPTH(depth);
89 
90 	/* To calculate a mask start with a 1 on the left hand side and right
91 	 * shift while populating the left hand side with 1's
92 	 */
93 	return (int)0x80000000 >> (depth - 1);
94 }
95 
96 /*
97  * Converts given depth value to its corresponding range value.
98  */
99 static uint32_t __attribute__((pure))
100 depth_to_range(uint8_t depth)
101 {
102 	VERIFY_DEPTH(depth);
103 
104 	/*
105 	 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
106 	 */
107 	if (depth <= MAX_DEPTH_TBL24)
108 		return 1 << (MAX_DEPTH_TBL24 - depth);
109 
110 	/* Else if depth is greater than 24 */
111 	return 1 << (RTE_LPM_MAX_DEPTH - depth);
112 }
113 
114 /*
115  * Find an existing lpm table and return a pointer to it.
116  */
117 struct rte_lpm *
118 rte_lpm_find_existing(const char *name)
119 {
120 	struct __rte_lpm *i_lpm = NULL;
121 	struct rte_tailq_entry *te;
122 	struct rte_lpm_list *lpm_list;
123 
124 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
125 
126 	rte_mcfg_tailq_read_lock();
127 	TAILQ_FOREACH(te, lpm_list, next) {
128 		i_lpm = te->data;
129 		if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
130 			break;
131 	}
132 	rte_mcfg_tailq_read_unlock();
133 
134 	if (te == NULL) {
135 		rte_errno = ENOENT;
136 		return NULL;
137 	}
138 
139 	return &i_lpm->lpm;
140 }
141 
142 /*
143  * Allocates memory for LPM object
144  */
145 struct rte_lpm *
146 rte_lpm_create(const char *name, int socket_id,
147 		const struct rte_lpm_config *config)
148 {
149 	char mem_name[RTE_LPM_NAMESIZE];
150 	struct __rte_lpm *i_lpm;
151 	struct rte_lpm *lpm = NULL;
152 	struct rte_tailq_entry *te;
153 	uint32_t mem_size, rules_size, tbl8s_size;
154 	struct rte_lpm_list *lpm_list;
155 
156 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
157 
158 	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
159 
160 	/* Check user arguments. */
161 	if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
162 			|| config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
163 		rte_errno = EINVAL;
164 		return NULL;
165 	}
166 
167 	snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
168 
169 	rte_mcfg_tailq_write_lock();
170 
171 	/* guarantee there's no existing */
172 	TAILQ_FOREACH(te, lpm_list, next) {
173 		i_lpm = te->data;
174 		if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
175 			break;
176 	}
177 
178 	if (te != NULL) {
179 		rte_errno = EEXIST;
180 		goto exit;
181 	}
182 
183 	/* Determine the amount of memory to allocate. */
184 	mem_size = sizeof(*i_lpm);
185 	rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
186 	tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
187 			RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
188 
189 	/* allocate tailq entry */
190 	te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
191 	if (te == NULL) {
192 		RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
193 		rte_errno = ENOMEM;
194 		goto exit;
195 	}
196 
197 	/* Allocate memory to store the LPM data structures. */
198 	i_lpm = rte_zmalloc_socket(mem_name, mem_size,
199 			RTE_CACHE_LINE_SIZE, socket_id);
200 	if (i_lpm == NULL) {
201 		RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
202 		rte_free(te);
203 		rte_errno = ENOMEM;
204 		goto exit;
205 	}
206 
207 	i_lpm->rules_tbl = rte_zmalloc_socket(NULL,
208 			(size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
209 
210 	if (i_lpm->rules_tbl == NULL) {
211 		RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
212 		rte_free(i_lpm);
213 		i_lpm = NULL;
214 		rte_free(te);
215 		rte_errno = ENOMEM;
216 		goto exit;
217 	}
218 
219 	i_lpm->lpm.tbl8 = rte_zmalloc_socket(NULL,
220 			(size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
221 
222 	if (i_lpm->lpm.tbl8 == NULL) {
223 		RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
224 		rte_free(i_lpm->rules_tbl);
225 		rte_free(i_lpm);
226 		i_lpm = NULL;
227 		rte_free(te);
228 		rte_errno = ENOMEM;
229 		goto exit;
230 	}
231 
232 	/* Save user arguments. */
233 	i_lpm->max_rules = config->max_rules;
234 	i_lpm->number_tbl8s = config->number_tbl8s;
235 	strlcpy(i_lpm->name, name, sizeof(i_lpm->name));
236 
237 	te->data = i_lpm;
238 	lpm = &i_lpm->lpm;
239 
240 	TAILQ_INSERT_TAIL(lpm_list, te, next);
241 
242 exit:
243 	rte_mcfg_tailq_write_unlock();
244 
245 	return lpm;
246 }
247 
248 /*
249  * Deallocates memory for given LPM table.
250  */
251 void
252 rte_lpm_free(struct rte_lpm *lpm)
253 {
254 	struct rte_lpm_list *lpm_list;
255 	struct rte_tailq_entry *te;
256 	struct __rte_lpm *i_lpm;
257 
258 	/* Check user arguments. */
259 	if (lpm == NULL)
260 		return;
261 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
262 
263 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
264 
265 	rte_mcfg_tailq_write_lock();
266 
267 	/* find our tailq entry */
268 	TAILQ_FOREACH(te, lpm_list, next) {
269 		if (te->data == (void *)i_lpm)
270 			break;
271 	}
272 	if (te != NULL)
273 		TAILQ_REMOVE(lpm_list, te, next);
274 
275 	rte_mcfg_tailq_write_unlock();
276 
277 	if (i_lpm->dq != NULL)
278 		rte_rcu_qsbr_dq_delete(i_lpm->dq);
279 	rte_free(i_lpm->lpm.tbl8);
280 	rte_free(i_lpm->rules_tbl);
281 	rte_free(i_lpm);
282 	rte_free(te);
283 }
284 
285 static void
286 __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
287 {
288 	struct rte_lpm_tbl_entry *tbl8 = ((struct __rte_lpm *)p)->lpm.tbl8;
289 	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
290 	uint32_t tbl8_group_index = *(uint32_t *)data;
291 
292 	RTE_SET_USED(n);
293 	/* Set tbl8 group invalid */
294 	__atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
295 		__ATOMIC_RELAXED);
296 }
297 
298 /* Associate QSBR variable with an LPM object.
299  */
300 int
301 rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
302 {
303 	struct rte_rcu_qsbr_dq_parameters params = {0};
304 	char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
305 	struct __rte_lpm *i_lpm;
306 
307 	if (lpm == NULL || cfg == NULL) {
308 		rte_errno = EINVAL;
309 		return 1;
310 	}
311 
312 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
313 	if (i_lpm->v != NULL) {
314 		rte_errno = EEXIST;
315 		return 1;
316 	}
317 
318 	if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
319 		/* No other things to do. */
320 	} else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
321 		/* Init QSBR defer queue. */
322 		snprintf(rcu_dq_name, sizeof(rcu_dq_name),
323 				"LPM_RCU_%s", i_lpm->name);
324 		params.name = rcu_dq_name;
325 		params.size = cfg->dq_size;
326 		if (params.size == 0)
327 			params.size = i_lpm->number_tbl8s;
328 		params.trigger_reclaim_limit = cfg->reclaim_thd;
329 		params.max_reclaim_size = cfg->reclaim_max;
330 		if (params.max_reclaim_size == 0)
331 			params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
332 		params.esize = sizeof(uint32_t);	/* tbl8 group index */
333 		params.free_fn = __lpm_rcu_qsbr_free_resource;
334 		params.p = i_lpm;
335 		params.v = cfg->v;
336 		i_lpm->dq = rte_rcu_qsbr_dq_create(&params);
337 		if (i_lpm->dq == NULL) {
338 			RTE_LOG(ERR, LPM, "LPM defer queue creation failed\n");
339 			return 1;
340 		}
341 	} else {
342 		rte_errno = EINVAL;
343 		return 1;
344 	}
345 	i_lpm->rcu_mode = cfg->mode;
346 	i_lpm->v = cfg->v;
347 
348 	return 0;
349 }
350 
351 /*
352  * Adds a rule to the rule table.
353  *
354  * NOTE: The rule table is split into 32 groups. Each group contains rules that
355  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
356  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
357  * to refer to depth 1 because even though the depth range is 1 - 32, depths
358  * are stored in the rule table from 0 - 31.
359  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
360  */
361 static int32_t
362 rule_add(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
363 	uint32_t next_hop)
364 {
365 	uint32_t rule_gindex, rule_index, last_rule;
366 	int i;
367 
368 	VERIFY_DEPTH(depth);
369 
370 	/* Scan through rule group to see if rule already exists. */
371 	if (i_lpm->rule_info[depth - 1].used_rules > 0) {
372 
373 		/* rule_gindex stands for rule group index. */
374 		rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
375 		/* Initialise rule_index to point to start of rule group. */
376 		rule_index = rule_gindex;
377 		/* Last rule = Last used rule in this rule group. */
378 		last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
379 
380 		for (; rule_index < last_rule; rule_index++) {
381 
382 			/* If rule already exists update next hop and return. */
383 			if (i_lpm->rules_tbl[rule_index].ip == ip_masked) {
384 
385 				if (i_lpm->rules_tbl[rule_index].next_hop
386 						== next_hop)
387 					return -EEXIST;
388 				i_lpm->rules_tbl[rule_index].next_hop = next_hop;
389 
390 				return rule_index;
391 			}
392 		}
393 
394 		if (rule_index == i_lpm->max_rules)
395 			return -ENOSPC;
396 	} else {
397 		/* Calculate the position in which the rule will be stored. */
398 		rule_index = 0;
399 
400 		for (i = depth - 1; i > 0; i--) {
401 			if (i_lpm->rule_info[i - 1].used_rules > 0) {
402 				rule_index = i_lpm->rule_info[i - 1].first_rule
403 						+ i_lpm->rule_info[i - 1].used_rules;
404 				break;
405 			}
406 		}
407 		if (rule_index == i_lpm->max_rules)
408 			return -ENOSPC;
409 
410 		i_lpm->rule_info[depth - 1].first_rule = rule_index;
411 	}
412 
413 	/* Make room for the new rule in the array. */
414 	for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
415 		if (i_lpm->rule_info[i - 1].first_rule
416 				+ i_lpm->rule_info[i - 1].used_rules == i_lpm->max_rules)
417 			return -ENOSPC;
418 
419 		if (i_lpm->rule_info[i - 1].used_rules > 0) {
420 			i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule
421 				+ i_lpm->rule_info[i - 1].used_rules]
422 					= i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule];
423 			i_lpm->rule_info[i - 1].first_rule++;
424 		}
425 	}
426 
427 	/* Add the new rule. */
428 	i_lpm->rules_tbl[rule_index].ip = ip_masked;
429 	i_lpm->rules_tbl[rule_index].next_hop = next_hop;
430 
431 	/* Increment the used rules counter for this rule group. */
432 	i_lpm->rule_info[depth - 1].used_rules++;
433 
434 	return rule_index;
435 }
436 
437 /*
438  * Delete a rule from the rule table.
439  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
440  */
441 static void
442 rule_delete(struct __rte_lpm *i_lpm, int32_t rule_index, uint8_t depth)
443 {
444 	int i;
445 
446 	VERIFY_DEPTH(depth);
447 
448 	i_lpm->rules_tbl[rule_index] =
449 			i_lpm->rules_tbl[i_lpm->rule_info[depth - 1].first_rule
450 			+ i_lpm->rule_info[depth - 1].used_rules - 1];
451 
452 	for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
453 		if (i_lpm->rule_info[i].used_rules > 0) {
454 			i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule - 1] =
455 					i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule
456 						+ i_lpm->rule_info[i].used_rules - 1];
457 			i_lpm->rule_info[i].first_rule--;
458 		}
459 	}
460 
461 	i_lpm->rule_info[depth - 1].used_rules--;
462 }
463 
464 /*
465  * Finds a rule in rule table.
466  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
467  */
468 static int32_t
469 rule_find(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth)
470 {
471 	uint32_t rule_gindex, last_rule, rule_index;
472 
473 	VERIFY_DEPTH(depth);
474 
475 	rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
476 	last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
477 
478 	/* Scan used rules at given depth to find rule. */
479 	for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
480 		/* If rule is found return the rule index. */
481 		if (i_lpm->rules_tbl[rule_index].ip == ip_masked)
482 			return rule_index;
483 	}
484 
485 	/* If rule is not found return -EINVAL. */
486 	return -EINVAL;
487 }
488 
489 /*
490  * Find, clean and allocate a tbl8.
491  */
492 static int32_t
493 _tbl8_alloc(struct __rte_lpm *i_lpm)
494 {
495 	uint32_t group_idx; /* tbl8 group index. */
496 	struct rte_lpm_tbl_entry *tbl8_entry;
497 
498 	/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
499 	for (group_idx = 0; group_idx < i_lpm->number_tbl8s; group_idx++) {
500 		tbl8_entry = &i_lpm->lpm.tbl8[group_idx *
501 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
502 		/* If a free tbl8 group is found clean it and set as VALID. */
503 		if (!tbl8_entry->valid_group) {
504 			struct rte_lpm_tbl_entry new_tbl8_entry = {
505 				.next_hop = 0,
506 				.valid = INVALID,
507 				.depth = 0,
508 				.valid_group = VALID,
509 			};
510 
511 			memset(&tbl8_entry[0], 0,
512 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
513 					sizeof(tbl8_entry[0]));
514 
515 			__atomic_store(tbl8_entry, &new_tbl8_entry,
516 					__ATOMIC_RELAXED);
517 
518 			/* Return group index for allocated tbl8 group. */
519 			return group_idx;
520 		}
521 	}
522 
523 	/* If there are no tbl8 groups free then return error. */
524 	return -ENOSPC;
525 }
526 
527 static int32_t
528 tbl8_alloc(struct __rte_lpm *i_lpm)
529 {
530 	int32_t group_idx; /* tbl8 group index. */
531 
532 	group_idx = _tbl8_alloc(i_lpm);
533 	if (group_idx == -ENOSPC && i_lpm->dq != NULL) {
534 		/* If there are no tbl8 groups try to reclaim one. */
535 		if (rte_rcu_qsbr_dq_reclaim(i_lpm->dq, 1,
536 				NULL, NULL, NULL) == 0)
537 			group_idx = _tbl8_alloc(i_lpm);
538 	}
539 
540 	return group_idx;
541 }
542 
543 static int32_t
544 tbl8_free(struct __rte_lpm *i_lpm, uint32_t tbl8_group_start)
545 {
546 	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
547 	int status;
548 
549 	if (i_lpm->v == NULL) {
550 		/* Set tbl8 group invalid*/
551 		__atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
552 				__ATOMIC_RELAXED);
553 	} else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
554 		/* Wait for quiescent state change. */
555 		rte_rcu_qsbr_synchronize(i_lpm->v,
556 			RTE_QSBR_THRID_INVALID);
557 		/* Set tbl8 group invalid*/
558 		__atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
559 				__ATOMIC_RELAXED);
560 	} else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
561 		/* Push into QSBR defer queue. */
562 		status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq,
563 				(void *)&tbl8_group_start);
564 		if (status == 1) {
565 			RTE_LOG(ERR, LPM, "Failed to push QSBR FIFO\n");
566 			return -rte_errno;
567 		}
568 	}
569 
570 	return 0;
571 }
572 
573 static __rte_noinline int32_t
574 add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
575 		uint32_t next_hop)
576 {
577 #define group_idx next_hop
578 	uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
579 
580 	/* Calculate the index into Table24. */
581 	tbl24_index = ip >> 8;
582 	tbl24_range = depth_to_range(depth);
583 
584 	for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
585 		/*
586 		 * For invalid OR valid and non-extended tbl 24 entries set
587 		 * entry.
588 		 */
589 		if (!i_lpm->lpm.tbl24[i].valid || (i_lpm->lpm.tbl24[i].valid_group == 0 &&
590 				i_lpm->lpm.tbl24[i].depth <= depth)) {
591 
592 			struct rte_lpm_tbl_entry new_tbl24_entry = {
593 				.next_hop = next_hop,
594 				.valid = VALID,
595 				.valid_group = 0,
596 				.depth = depth,
597 			};
598 
599 			/* Setting tbl24 entry in one go to avoid race
600 			 * conditions
601 			 */
602 			__atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
603 					__ATOMIC_RELEASE);
604 
605 			continue;
606 		}
607 
608 		if (i_lpm->lpm.tbl24[i].valid_group == 1) {
609 			/* If tbl24 entry is valid and extended calculate the
610 			 *  index into tbl8.
611 			 */
612 			tbl8_index = i_lpm->lpm.tbl24[i].group_idx *
613 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
614 			tbl8_group_end = tbl8_index +
615 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
616 
617 			for (j = tbl8_index; j < tbl8_group_end; j++) {
618 				if (!i_lpm->lpm.tbl8[j].valid ||
619 						i_lpm->lpm.tbl8[j].depth <= depth) {
620 					struct rte_lpm_tbl_entry
621 						new_tbl8_entry = {
622 						.valid = VALID,
623 						.valid_group = VALID,
624 						.depth = depth,
625 						.next_hop = next_hop,
626 					};
627 
628 					/*
629 					 * Setting tbl8 entry in one go to avoid
630 					 * race conditions
631 					 */
632 					__atomic_store(&i_lpm->lpm.tbl8[j],
633 						&new_tbl8_entry,
634 						__ATOMIC_RELAXED);
635 
636 					continue;
637 				}
638 			}
639 		}
640 	}
641 #undef group_idx
642 	return 0;
643 }
644 
645 static __rte_noinline int32_t
646 add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
647 		uint32_t next_hop)
648 {
649 #define group_idx next_hop
650 	uint32_t tbl24_index;
651 	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
652 		tbl8_range, i;
653 
654 	tbl24_index = (ip_masked >> 8);
655 	tbl8_range = depth_to_range(depth);
656 
657 	if (!i_lpm->lpm.tbl24[tbl24_index].valid) {
658 		/* Search for a free tbl8 group. */
659 		tbl8_group_index = tbl8_alloc(i_lpm);
660 
661 		/* Check tbl8 allocation was successful. */
662 		if (tbl8_group_index < 0) {
663 			return tbl8_group_index;
664 		}
665 
666 		/* Find index into tbl8 and range. */
667 		tbl8_index = (tbl8_group_index *
668 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
669 				(ip_masked & 0xFF);
670 
671 		/* Set tbl8 entry. */
672 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
673 			struct rte_lpm_tbl_entry new_tbl8_entry = {
674 				.valid = VALID,
675 				.depth = depth,
676 				.valid_group = i_lpm->lpm.tbl8[i].valid_group,
677 				.next_hop = next_hop,
678 			};
679 			__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
680 					__ATOMIC_RELAXED);
681 		}
682 
683 		/*
684 		 * Update tbl24 entry to point to new tbl8 entry. Note: The
685 		 * ext_flag and tbl8_index need to be updated simultaneously,
686 		 * so assign whole structure in one go
687 		 */
688 
689 		struct rte_lpm_tbl_entry new_tbl24_entry = {
690 			.group_idx = tbl8_group_index,
691 			.valid = VALID,
692 			.valid_group = 1,
693 			.depth = 0,
694 		};
695 
696 		/* The tbl24 entry must be written only after the
697 		 * tbl8 entries are written.
698 		 */
699 		__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
700 				__ATOMIC_RELEASE);
701 
702 	} /* If valid entry but not extended calculate the index into Table8. */
703 	else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) {
704 		/* Search for free tbl8 group. */
705 		tbl8_group_index = tbl8_alloc(i_lpm);
706 
707 		if (tbl8_group_index < 0) {
708 			return tbl8_group_index;
709 		}
710 
711 		tbl8_group_start = tbl8_group_index *
712 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
713 		tbl8_group_end = tbl8_group_start +
714 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
715 
716 		/* Populate new tbl8 with tbl24 value. */
717 		for (i = tbl8_group_start; i < tbl8_group_end; i++) {
718 			struct rte_lpm_tbl_entry new_tbl8_entry = {
719 				.valid = VALID,
720 				.depth = i_lpm->lpm.tbl24[tbl24_index].depth,
721 				.valid_group = i_lpm->lpm.tbl8[i].valid_group,
722 				.next_hop = i_lpm->lpm.tbl24[tbl24_index].next_hop,
723 			};
724 			__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
725 					__ATOMIC_RELAXED);
726 		}
727 
728 		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
729 
730 		/* Insert new rule into the tbl8 entry. */
731 		for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
732 			struct rte_lpm_tbl_entry new_tbl8_entry = {
733 				.valid = VALID,
734 				.depth = depth,
735 				.valid_group = i_lpm->lpm.tbl8[i].valid_group,
736 				.next_hop = next_hop,
737 			};
738 			__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
739 					__ATOMIC_RELAXED);
740 		}
741 
742 		/*
743 		 * Update tbl24 entry to point to new tbl8 entry. Note: The
744 		 * ext_flag and tbl8_index need to be updated simultaneously,
745 		 * so assign whole structure in one go.
746 		 */
747 
748 		struct rte_lpm_tbl_entry new_tbl24_entry = {
749 				.group_idx = tbl8_group_index,
750 				.valid = VALID,
751 				.valid_group = 1,
752 				.depth = 0,
753 		};
754 
755 		/* The tbl24 entry must be written only after the
756 		 * tbl8 entries are written.
757 		 */
758 		__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
759 				__ATOMIC_RELEASE);
760 
761 	} else { /*
762 		* If it is valid, extended entry calculate the index into tbl8.
763 		*/
764 		tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
765 		tbl8_group_start = tbl8_group_index *
766 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
767 		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
768 
769 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
770 
771 			if (!i_lpm->lpm.tbl8[i].valid ||
772 					i_lpm->lpm.tbl8[i].depth <= depth) {
773 				struct rte_lpm_tbl_entry new_tbl8_entry = {
774 					.valid = VALID,
775 					.depth = depth,
776 					.next_hop = next_hop,
777 					.valid_group = i_lpm->lpm.tbl8[i].valid_group,
778 				};
779 
780 				/*
781 				 * Setting tbl8 entry in one go to avoid race
782 				 * condition
783 				 */
784 				__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
785 						__ATOMIC_RELAXED);
786 
787 				continue;
788 			}
789 		}
790 	}
791 #undef group_idx
792 	return 0;
793 }
794 
795 /*
796  * Add a route
797  */
798 int
799 rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
800 		uint32_t next_hop)
801 {
802 	int32_t rule_index, status = 0;
803 	struct __rte_lpm *i_lpm;
804 	uint32_t ip_masked;
805 
806 	/* Check user arguments. */
807 	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
808 		return -EINVAL;
809 
810 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
811 	ip_masked = ip & depth_to_mask(depth);
812 
813 	/* Add the rule to the rule table. */
814 	rule_index = rule_add(i_lpm, ip_masked, depth, next_hop);
815 
816 	/* Skip table entries update if The rule is the same as
817 	 * the rule in the rules table.
818 	 */
819 	if (rule_index == -EEXIST)
820 		return 0;
821 
822 	/* If the is no space available for new rule return error. */
823 	if (rule_index < 0) {
824 		return rule_index;
825 	}
826 
827 	if (depth <= MAX_DEPTH_TBL24) {
828 		status = add_depth_small(i_lpm, ip_masked, depth, next_hop);
829 	} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
830 		status = add_depth_big(i_lpm, ip_masked, depth, next_hop);
831 
832 		/*
833 		 * If add fails due to exhaustion of tbl8 extensions delete
834 		 * rule that was added to rule table.
835 		 */
836 		if (status < 0) {
837 			rule_delete(i_lpm, rule_index, depth);
838 
839 			return status;
840 		}
841 	}
842 
843 	return 0;
844 }
845 
846 /*
847  * Look for a rule in the high-level rules table
848  */
849 int
850 rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
851 uint32_t *next_hop)
852 {
853 	struct __rte_lpm *i_lpm;
854 	uint32_t ip_masked;
855 	int32_t rule_index;
856 
857 	/* Check user arguments. */
858 	if ((lpm == NULL) ||
859 		(next_hop == NULL) ||
860 		(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
861 		return -EINVAL;
862 
863 	/* Look for the rule using rule_find. */
864 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
865 	ip_masked = ip & depth_to_mask(depth);
866 	rule_index = rule_find(i_lpm, ip_masked, depth);
867 
868 	if (rule_index >= 0) {
869 		*next_hop = i_lpm->rules_tbl[rule_index].next_hop;
870 		return 1;
871 	}
872 
873 	/* If rule is not found return 0. */
874 	return 0;
875 }
876 
877 static int32_t
878 find_previous_rule(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
879 		uint8_t *sub_rule_depth)
880 {
881 	int32_t rule_index;
882 	uint32_t ip_masked;
883 	uint8_t prev_depth;
884 
885 	for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
886 		ip_masked = ip & depth_to_mask(prev_depth);
887 
888 		rule_index = rule_find(i_lpm, ip_masked, prev_depth);
889 
890 		if (rule_index >= 0) {
891 			*sub_rule_depth = prev_depth;
892 			return rule_index;
893 		}
894 	}
895 
896 	return -1;
897 }
898 
899 static int32_t
900 delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
901 	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
902 {
903 #define group_idx next_hop
904 	uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
905 
906 	/* Calculate the range and index into Table24. */
907 	tbl24_range = depth_to_range(depth);
908 	tbl24_index = (ip_masked >> 8);
909 	struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
910 
911 	/*
912 	 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
913 	 * and a positive number indicates a sub_rule_index.
914 	 */
915 	if (sub_rule_index < 0) {
916 		/*
917 		 * If no replacement rule exists then invalidate entries
918 		 * associated with this rule.
919 		 */
920 		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
921 
922 			if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
923 					i_lpm->lpm.tbl24[i].depth <= depth) {
924 				__atomic_store(&i_lpm->lpm.tbl24[i],
925 					&zero_tbl24_entry, __ATOMIC_RELEASE);
926 			} else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
927 				/*
928 				 * If TBL24 entry is extended, then there has
929 				 * to be a rule with depth >= 25 in the
930 				 * associated TBL8 group.
931 				 */
932 
933 				tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
934 				tbl8_index = tbl8_group_index *
935 						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
936 
937 				for (j = tbl8_index; j < (tbl8_index +
938 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
939 
940 					if (i_lpm->lpm.tbl8[j].depth <= depth)
941 						i_lpm->lpm.tbl8[j].valid = INVALID;
942 				}
943 			}
944 		}
945 	} else {
946 		/*
947 		 * If a replacement rule exists then modify entries
948 		 * associated with this rule.
949 		 */
950 
951 		struct rte_lpm_tbl_entry new_tbl24_entry = {
952 			.next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
953 			.valid = VALID,
954 			.valid_group = 0,
955 			.depth = sub_rule_depth,
956 		};
957 
958 		struct rte_lpm_tbl_entry new_tbl8_entry = {
959 			.valid = VALID,
960 			.valid_group = VALID,
961 			.depth = sub_rule_depth,
962 			.next_hop = i_lpm->rules_tbl
963 			[sub_rule_index].next_hop,
964 		};
965 
966 		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
967 
968 			if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
969 					i_lpm->lpm.tbl24[i].depth <= depth) {
970 				__atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
971 						__ATOMIC_RELEASE);
972 			} else  if (i_lpm->lpm.tbl24[i].valid_group == 1) {
973 				/*
974 				 * If TBL24 entry is extended, then there has
975 				 * to be a rule with depth >= 25 in the
976 				 * associated TBL8 group.
977 				 */
978 
979 				tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
980 				tbl8_index = tbl8_group_index *
981 						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
982 
983 				for (j = tbl8_index; j < (tbl8_index +
984 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
985 
986 					if (i_lpm->lpm.tbl8[j].depth <= depth)
987 						__atomic_store(&i_lpm->lpm.tbl8[j],
988 							&new_tbl8_entry,
989 							__ATOMIC_RELAXED);
990 				}
991 			}
992 		}
993 	}
994 #undef group_idx
995 	return 0;
996 }
997 
998 /*
999  * Checks if table 8 group can be recycled.
1000  *
1001  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
1002  * Return of -EINVAL means tbl8 is empty and thus can be recycled
1003  * Return of value > -1 means tbl8 is in use but has all the same values and
1004  * thus can be recycled
1005  */
1006 static int32_t
1007 tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
1008 		uint32_t tbl8_group_start)
1009 {
1010 	uint32_t tbl8_group_end, i;
1011 	tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1012 
1013 	/*
1014 	 * Check the first entry of the given tbl8. If it is invalid we know
1015 	 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
1016 	 *  (As they would affect all entries in a tbl8) and thus this table
1017 	 *  can not be recycled.
1018 	 */
1019 	if (tbl8[tbl8_group_start].valid) {
1020 		/*
1021 		 * If first entry is valid check if the depth is less than 24
1022 		 * and if so check the rest of the entries to verify that they
1023 		 * are all of this depth.
1024 		 */
1025 		if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
1026 			for (i = (tbl8_group_start + 1); i < tbl8_group_end;
1027 					i++) {
1028 
1029 				if (tbl8[i].depth !=
1030 						tbl8[tbl8_group_start].depth) {
1031 
1032 					return -EEXIST;
1033 				}
1034 			}
1035 			/* If all entries are the same return the tb8 index */
1036 			return tbl8_group_start;
1037 		}
1038 
1039 		return -EEXIST;
1040 	}
1041 	/*
1042 	 * If the first entry is invalid check if the rest of the entries in
1043 	 * the tbl8 are invalid.
1044 	 */
1045 	for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
1046 		if (tbl8[i].valid)
1047 			return -EEXIST;
1048 	}
1049 	/* If no valid entries are found then return -EINVAL. */
1050 	return -EINVAL;
1051 }
1052 
1053 static int32_t
1054 delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
1055 	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
1056 {
1057 #define group_idx next_hop
1058 	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
1059 			tbl8_range, i;
1060 	int32_t tbl8_recycle_index, status = 0;
1061 
1062 	/*
1063 	 * Calculate the index into tbl24 and range. Note: All depths larger
1064 	 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
1065 	 */
1066 	tbl24_index = ip_masked >> 8;
1067 
1068 	/* Calculate the index into tbl8 and range. */
1069 	tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
1070 	tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
1071 	tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
1072 	tbl8_range = depth_to_range(depth);
1073 
1074 	if (sub_rule_index < 0) {
1075 		/*
1076 		 * Loop through the range of entries on tbl8 for which the
1077 		 * rule_to_delete must be removed or modified.
1078 		 */
1079 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1080 			if (i_lpm->lpm.tbl8[i].depth <= depth)
1081 				i_lpm->lpm.tbl8[i].valid = INVALID;
1082 		}
1083 	} else {
1084 		/* Set new tbl8 entry. */
1085 		struct rte_lpm_tbl_entry new_tbl8_entry = {
1086 			.valid = VALID,
1087 			.depth = sub_rule_depth,
1088 			.valid_group = i_lpm->lpm.tbl8[tbl8_group_start].valid_group,
1089 			.next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
1090 		};
1091 
1092 		/*
1093 		 * Loop through the range of entries on tbl8 for which the
1094 		 * rule_to_delete must be modified.
1095 		 */
1096 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
1097 			if (i_lpm->lpm.tbl8[i].depth <= depth)
1098 				__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
1099 						__ATOMIC_RELAXED);
1100 		}
1101 	}
1102 
1103 	/*
1104 	 * Check if there are any valid entries in this tbl8 group. If all
1105 	 * tbl8 entries are invalid we can free the tbl8 and invalidate the
1106 	 * associated tbl24 entry.
1107 	 */
1108 
1109 	tbl8_recycle_index = tbl8_recycle_check(i_lpm->lpm.tbl8, tbl8_group_start);
1110 
1111 	if (tbl8_recycle_index == -EINVAL) {
1112 		/* Set tbl24 before freeing tbl8 to avoid race condition.
1113 		 * Prevent the free of the tbl8 group from hoisting.
1114 		 */
1115 		i_lpm->lpm.tbl24[tbl24_index].valid = 0;
1116 		__atomic_thread_fence(__ATOMIC_RELEASE);
1117 		status = tbl8_free(i_lpm, tbl8_group_start);
1118 	} else if (tbl8_recycle_index > -1) {
1119 		/* Update tbl24 entry. */
1120 		struct rte_lpm_tbl_entry new_tbl24_entry = {
1121 			.next_hop = i_lpm->lpm.tbl8[tbl8_recycle_index].next_hop,
1122 			.valid = VALID,
1123 			.valid_group = 0,
1124 			.depth = i_lpm->lpm.tbl8[tbl8_recycle_index].depth,
1125 		};
1126 
1127 		/* Set tbl24 before freeing tbl8 to avoid race condition.
1128 		 * Prevent the free of the tbl8 group from hoisting.
1129 		 */
1130 		__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
1131 				__ATOMIC_RELAXED);
1132 		__atomic_thread_fence(__ATOMIC_RELEASE);
1133 		status = tbl8_free(i_lpm, tbl8_group_start);
1134 	}
1135 #undef group_idx
1136 	return status;
1137 }
1138 
1139 /*
1140  * Deletes a rule
1141  */
1142 int
1143 rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
1144 {
1145 	int32_t rule_to_delete_index, sub_rule_index;
1146 	struct __rte_lpm *i_lpm;
1147 	uint32_t ip_masked;
1148 	uint8_t sub_rule_depth;
1149 	/*
1150 	 * Check input arguments. Note: IP must be a positive integer of 32
1151 	 * bits in length therefore it need not be checked.
1152 	 */
1153 	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1154 		return -EINVAL;
1155 	}
1156 
1157 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
1158 	ip_masked = ip & depth_to_mask(depth);
1159 
1160 	/*
1161 	 * Find the index of the input rule, that needs to be deleted, in the
1162 	 * rule table.
1163 	 */
1164 	rule_to_delete_index = rule_find(i_lpm, ip_masked, depth);
1165 
1166 	/*
1167 	 * Check if rule_to_delete_index was found. If no rule was found the
1168 	 * function rule_find returns -EINVAL.
1169 	 */
1170 	if (rule_to_delete_index < 0)
1171 		return -EINVAL;
1172 
1173 	/* Delete the rule from the rule table. */
1174 	rule_delete(i_lpm, rule_to_delete_index, depth);
1175 
1176 	/*
1177 	 * Find rule to replace the rule_to_delete. If there is no rule to
1178 	 * replace the rule_to_delete we return -1 and invalidate the table
1179 	 * entries associated with this rule.
1180 	 */
1181 	sub_rule_depth = 0;
1182 	sub_rule_index = find_previous_rule(i_lpm, ip, depth, &sub_rule_depth);
1183 
1184 	/*
1185 	 * If the input depth value is less than 25 use function
1186 	 * delete_depth_small otherwise use delete_depth_big.
1187 	 */
1188 	if (depth <= MAX_DEPTH_TBL24) {
1189 		return delete_depth_small(i_lpm, ip_masked, depth,
1190 				sub_rule_index, sub_rule_depth);
1191 	} else { /* If depth > MAX_DEPTH_TBL24 */
1192 		return delete_depth_big(i_lpm, ip_masked, depth, sub_rule_index,
1193 				sub_rule_depth);
1194 	}
1195 }
1196 
1197 /*
1198  * Delete all rules from the LPM table.
1199  */
1200 void
1201 rte_lpm_delete_all(struct rte_lpm *lpm)
1202 {
1203 	struct __rte_lpm *i_lpm;
1204 
1205 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
1206 	/* Zero rule information. */
1207 	memset(i_lpm->rule_info, 0, sizeof(i_lpm->rule_info));
1208 
1209 	/* Zero tbl24. */
1210 	memset(i_lpm->lpm.tbl24, 0, sizeof(i_lpm->lpm.tbl24));
1211 
1212 	/* Zero tbl8. */
1213 	memset(i_lpm->lpm.tbl8, 0, sizeof(i_lpm->lpm.tbl8[0])
1214 			* RTE_LPM_TBL8_GROUP_NUM_ENTRIES * i_lpm->number_tbl8s);
1215 
1216 	/* Delete all rules form the rules table. */
1217 	memset(i_lpm->rules_tbl, 0, sizeof(i_lpm->rules_tbl[0]) * i_lpm->max_rules);
1218 }
1219