1*537d1343SAlexander V. Chernikov /* SPDX-License-Identifier: BSD-3-Clause
2*537d1343SAlexander V. Chernikov * Copyright(c) 2010-2014 Intel Corporation
3*537d1343SAlexander V. Chernikov */
4*537d1343SAlexander V. Chernikov
5*537d1343SAlexander V. Chernikov #include <sys/param.h>
6*537d1343SAlexander V. Chernikov #include <sys/ctype.h>
7*537d1343SAlexander V. Chernikov #include <sys/systm.h>
8*537d1343SAlexander V. Chernikov #include <sys/lock.h>
9*537d1343SAlexander V. Chernikov #include <sys/rwlock.h>
10*537d1343SAlexander V. Chernikov #include <sys/malloc.h>
11*537d1343SAlexander V. Chernikov #include <sys/mbuf.h>
12*537d1343SAlexander V. Chernikov #include <sys/socket.h>
13*537d1343SAlexander V. Chernikov #include <sys/kernel.h>
14*537d1343SAlexander V. Chernikov
15*537d1343SAlexander V. Chernikov int errno = 0, rte_errno = 0;
16*537d1343SAlexander V. Chernikov
17*537d1343SAlexander V. Chernikov #if 0
18*537d1343SAlexander V. Chernikov #include <string.h>
19*537d1343SAlexander V. Chernikov #include <stdint.h>
20*537d1343SAlexander V. Chernikov #include <errno.h>
21*537d1343SAlexander V. Chernikov #include <stdarg.h>
22*537d1343SAlexander V. Chernikov #include <stdio.h>
23*537d1343SAlexander V. Chernikov #include <sys/queue.h>
24*537d1343SAlexander V. Chernikov
25*537d1343SAlexander V. Chernikov #include <rte_log.h>
26*537d1343SAlexander V. Chernikov #include <rte_branch_prediction.h>
27*537d1343SAlexander V. Chernikov #include <rte_common.h>
28*537d1343SAlexander V. Chernikov #include <rte_memory.h> /* for definition of RTE_CACHE_LINE_SIZE */
29*537d1343SAlexander V. Chernikov #include <rte_malloc.h>
30*537d1343SAlexander V. Chernikov #include <rte_eal.h>
31*537d1343SAlexander V. Chernikov #include <rte_eal_memconfig.h>
32*537d1343SAlexander V. Chernikov #include <rte_per_lcore.h>
33*537d1343SAlexander V. Chernikov #include <rte_string_fns.h>
34*537d1343SAlexander V. Chernikov #include <rte_errno.h>
35*537d1343SAlexander V. Chernikov #include <rte_rwlock.h>
36*537d1343SAlexander V. Chernikov #include <rte_spinlock.h>
37*537d1343SAlexander V. Chernikov #include <rte_tailq.h>
38*537d1343SAlexander V. Chernikov #endif
39*537d1343SAlexander V. Chernikov
40*537d1343SAlexander V. Chernikov #include "rte_shim.h"
41*537d1343SAlexander V. Chernikov #include "rte_lpm.h"
42*537d1343SAlexander V. Chernikov
43*537d1343SAlexander V. Chernikov #if 0
44*537d1343SAlexander V. Chernikov TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
45*537d1343SAlexander V. Chernikov
46*537d1343SAlexander V. Chernikov static struct rte_tailq_elem rte_lpm_tailq = {
47*537d1343SAlexander V. Chernikov .name = "RTE_LPM",
48*537d1343SAlexander V. Chernikov };
49*537d1343SAlexander V. Chernikov EAL_REGISTER_TAILQ(rte_lpm_tailq)
50*537d1343SAlexander V. Chernikov #endif
51*537d1343SAlexander V. Chernikov
52*537d1343SAlexander V. Chernikov #define MAX_DEPTH_TBL24 24
53*537d1343SAlexander V. Chernikov
54*537d1343SAlexander V. Chernikov enum valid_flag {
55*537d1343SAlexander V. Chernikov INVALID = 0,
56*537d1343SAlexander V. Chernikov VALID
57*537d1343SAlexander V. Chernikov };
58*537d1343SAlexander V. Chernikov
59*537d1343SAlexander V. Chernikov /* Macro to enable/disable run-time checks. */
60*537d1343SAlexander V. Chernikov #if defined(RTE_LIBRTE_LPM_DEBUG)
61*537d1343SAlexander V. Chernikov #include <rte_debug.h>
62*537d1343SAlexander V. Chernikov #define VERIFY_DEPTH(depth) do { \
63*537d1343SAlexander V. Chernikov if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH)) \
64*537d1343SAlexander V. Chernikov rte_panic("LPM: Invalid depth (%u) at line %d", \
65*537d1343SAlexander V. Chernikov (unsigned)(depth), __LINE__); \
66*537d1343SAlexander V. Chernikov } while (0)
67*537d1343SAlexander V. Chernikov #else
68*537d1343SAlexander V. Chernikov #define VERIFY_DEPTH(depth)
69*537d1343SAlexander V. Chernikov #endif
70*537d1343SAlexander V. Chernikov
71*537d1343SAlexander V. Chernikov /*
72*537d1343SAlexander V. Chernikov * Converts a given depth value to its corresponding mask value.
73*537d1343SAlexander V. Chernikov *
74*537d1343SAlexander V. Chernikov * depth (IN) : range = 1 - 32
75*537d1343SAlexander V. Chernikov * mask (OUT) : 32bit mask
76*537d1343SAlexander V. Chernikov */
77*537d1343SAlexander V. Chernikov static uint32_t __attribute__((pure))
depth_to_mask(uint8_t depth)78*537d1343SAlexander V. Chernikov depth_to_mask(uint8_t depth)
79*537d1343SAlexander V. Chernikov {
80*537d1343SAlexander V. Chernikov VERIFY_DEPTH(depth);
81*537d1343SAlexander V. Chernikov
82*537d1343SAlexander V. Chernikov /* To calculate a mask start with a 1 on the left hand side and right
83*537d1343SAlexander V. Chernikov * shift while populating the left hand side with 1's
84*537d1343SAlexander V. Chernikov */
85*537d1343SAlexander V. Chernikov return (int)0x80000000 >> (depth - 1);
86*537d1343SAlexander V. Chernikov }
87*537d1343SAlexander V. Chernikov
88*537d1343SAlexander V. Chernikov /*
89*537d1343SAlexander V. Chernikov * Converts given depth value to its corresponding range value.
90*537d1343SAlexander V. Chernikov */
91*537d1343SAlexander V. Chernikov static uint32_t __attribute__((pure))
depth_to_range(uint8_t depth)92*537d1343SAlexander V. Chernikov depth_to_range(uint8_t depth)
93*537d1343SAlexander V. Chernikov {
94*537d1343SAlexander V. Chernikov VERIFY_DEPTH(depth);
95*537d1343SAlexander V. Chernikov
96*537d1343SAlexander V. Chernikov /*
97*537d1343SAlexander V. Chernikov * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
98*537d1343SAlexander V. Chernikov */
99*537d1343SAlexander V. Chernikov if (depth <= MAX_DEPTH_TBL24)
100*537d1343SAlexander V. Chernikov return 1 << (MAX_DEPTH_TBL24 - depth);
101*537d1343SAlexander V. Chernikov
102*537d1343SAlexander V. Chernikov /* Else if depth is greater than 24 */
103*537d1343SAlexander V. Chernikov return 1 << (RTE_LPM_MAX_DEPTH - depth);
104*537d1343SAlexander V. Chernikov }
105*537d1343SAlexander V. Chernikov
106*537d1343SAlexander V. Chernikov #if 0
107*537d1343SAlexander V. Chernikov /*
108*537d1343SAlexander V. Chernikov * Find an existing lpm table and return a pointer to it.
109*537d1343SAlexander V. Chernikov */
110*537d1343SAlexander V. Chernikov struct rte_lpm *
111*537d1343SAlexander V. Chernikov rte_lpm_find_existing(const char *name)
112*537d1343SAlexander V. Chernikov {
113*537d1343SAlexander V. Chernikov struct rte_lpm *l = NULL;
114*537d1343SAlexander V. Chernikov struct rte_tailq_entry *te;
115*537d1343SAlexander V. Chernikov struct rte_lpm_list *lpm_list;
116*537d1343SAlexander V. Chernikov
117*537d1343SAlexander V. Chernikov lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
118*537d1343SAlexander V. Chernikov
119*537d1343SAlexander V. Chernikov rte_mcfg_tailq_read_lock();
120*537d1343SAlexander V. Chernikov TAILQ_FOREACH(te, lpm_list, next) {
121*537d1343SAlexander V. Chernikov l = te->data;
122*537d1343SAlexander V. Chernikov if (strncmp(name, l->name, RTE_LPM_NAMESIZE) == 0)
123*537d1343SAlexander V. Chernikov break;
124*537d1343SAlexander V. Chernikov }
125*537d1343SAlexander V. Chernikov rte_mcfg_tailq_read_unlock();
126*537d1343SAlexander V. Chernikov
127*537d1343SAlexander V. Chernikov if (te == NULL) {
128*537d1343SAlexander V. Chernikov rte_errno = ENOENT;
129*537d1343SAlexander V. Chernikov return NULL;
130*537d1343SAlexander V. Chernikov }
131*537d1343SAlexander V. Chernikov
132*537d1343SAlexander V. Chernikov return l;
133*537d1343SAlexander V. Chernikov }
134*537d1343SAlexander V. Chernikov #endif
135*537d1343SAlexander V. Chernikov
136*537d1343SAlexander V. Chernikov /*
137*537d1343SAlexander V. Chernikov * Allocates memory for LPM object
138*537d1343SAlexander V. Chernikov */
139*537d1343SAlexander V. Chernikov struct rte_lpm *
rte_lpm_create(const char * name,int socket_id,const struct rte_lpm_config * config)140*537d1343SAlexander V. Chernikov rte_lpm_create(const char *name, int socket_id,
141*537d1343SAlexander V. Chernikov const struct rte_lpm_config *config)
142*537d1343SAlexander V. Chernikov {
143*537d1343SAlexander V. Chernikov char mem_name[RTE_LPM_NAMESIZE];
144*537d1343SAlexander V. Chernikov struct rte_lpm *lpm = NULL;
145*537d1343SAlexander V. Chernikov //struct rte_tailq_entry *te;
146*537d1343SAlexander V. Chernikov uint32_t mem_size, rules_size, tbl8s_size;
147*537d1343SAlexander V. Chernikov //struct rte_lpm_list *lpm_list;
148*537d1343SAlexander V. Chernikov
149*537d1343SAlexander V. Chernikov //lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
150*537d1343SAlexander V. Chernikov
151*537d1343SAlexander V. Chernikov RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
152*537d1343SAlexander V. Chernikov
153*537d1343SAlexander V. Chernikov /* Check user arguments. */
154*537d1343SAlexander V. Chernikov if ((name == NULL) || (socket_id < -1)
155*537d1343SAlexander V. Chernikov || config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
156*537d1343SAlexander V. Chernikov rte_errno = EINVAL;
157*537d1343SAlexander V. Chernikov return NULL;
158*537d1343SAlexander V. Chernikov }
159*537d1343SAlexander V. Chernikov
160*537d1343SAlexander V. Chernikov snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
161*537d1343SAlexander V. Chernikov
162*537d1343SAlexander V. Chernikov /* Determine the amount of memory to allocate. */
163*537d1343SAlexander V. Chernikov mem_size = sizeof(*lpm);
164*537d1343SAlexander V. Chernikov rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
165*537d1343SAlexander V. Chernikov tbl8s_size = (sizeof(struct rte_lpm_tbl_entry) *
166*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s);
167*537d1343SAlexander V. Chernikov
168*537d1343SAlexander V. Chernikov #if 0
169*537d1343SAlexander V. Chernikov rte_mcfg_tailq_write_lock();
170*537d1343SAlexander V. Chernikov
171*537d1343SAlexander V. Chernikov /* guarantee there's no existing */
172*537d1343SAlexander V. Chernikov TAILQ_FOREACH(te, lpm_list, next) {
173*537d1343SAlexander V. Chernikov lpm = te->data;
174*537d1343SAlexander V. Chernikov if (strncmp(name, lpm->name, RTE_LPM_NAMESIZE) == 0)
175*537d1343SAlexander V. Chernikov break;
176*537d1343SAlexander V. Chernikov }
177*537d1343SAlexander V. Chernikov
178*537d1343SAlexander V. Chernikov if (te != NULL) {
179*537d1343SAlexander V. Chernikov lpm = NULL;
180*537d1343SAlexander V. Chernikov rte_errno = EEXIST;
181*537d1343SAlexander V. Chernikov goto exit;
182*537d1343SAlexander V. Chernikov }
183*537d1343SAlexander V. Chernikov
184*537d1343SAlexander V. Chernikov /* allocate tailq entry */
185*537d1343SAlexander V. Chernikov te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
186*537d1343SAlexander V. Chernikov if (te == NULL) {
187*537d1343SAlexander V. Chernikov RTE_LOG(ERR, LPM, "Failed to allocate tailq entry\n");
188*537d1343SAlexander V. Chernikov rte_errno = ENOMEM;
189*537d1343SAlexander V. Chernikov goto exit;
190*537d1343SAlexander V. Chernikov }
191*537d1343SAlexander V. Chernikov #endif
192*537d1343SAlexander V. Chernikov
193*537d1343SAlexander V. Chernikov /* Allocate memory to store the LPM data structures. */
194*537d1343SAlexander V. Chernikov lpm = rte_zmalloc_socket(mem_name, mem_size,
195*537d1343SAlexander V. Chernikov RTE_CACHE_LINE_SIZE, socket_id);
196*537d1343SAlexander V. Chernikov if (lpm == NULL) {
197*537d1343SAlexander V. Chernikov RTE_LOG(ERR, LPM, "LPM memory allocation failed\n");
198*537d1343SAlexander V. Chernikov //rte_free(te);
199*537d1343SAlexander V. Chernikov rte_errno = ENOMEM;
200*537d1343SAlexander V. Chernikov goto exit;
201*537d1343SAlexander V. Chernikov }
202*537d1343SAlexander V. Chernikov
203*537d1343SAlexander V. Chernikov lpm->rules_tbl = rte_zmalloc_socket(NULL,
204*537d1343SAlexander V. Chernikov (size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
205*537d1343SAlexander V. Chernikov
206*537d1343SAlexander V. Chernikov if (lpm->rules_tbl == NULL) {
207*537d1343SAlexander V. Chernikov RTE_LOG(ERR, LPM, "LPM rules_tbl memory allocation failed\n");
208*537d1343SAlexander V. Chernikov rte_free(lpm);
209*537d1343SAlexander V. Chernikov lpm = NULL;
210*537d1343SAlexander V. Chernikov //rte_free(te);
211*537d1343SAlexander V. Chernikov rte_errno = ENOMEM;
212*537d1343SAlexander V. Chernikov goto exit;
213*537d1343SAlexander V. Chernikov }
214*537d1343SAlexander V. Chernikov
215*537d1343SAlexander V. Chernikov lpm->tbl8 = rte_zmalloc_socket(NULL,
216*537d1343SAlexander V. Chernikov (size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
217*537d1343SAlexander V. Chernikov
218*537d1343SAlexander V. Chernikov if (lpm->tbl8 == NULL) {
219*537d1343SAlexander V. Chernikov RTE_LOG(ERR, LPM, "LPM tbl8 memory allocation failed\n");
220*537d1343SAlexander V. Chernikov rte_free(lpm->rules_tbl);
221*537d1343SAlexander V. Chernikov rte_free(lpm);
222*537d1343SAlexander V. Chernikov lpm = NULL;
223*537d1343SAlexander V. Chernikov //rte_free(te);
224*537d1343SAlexander V. Chernikov rte_errno = ENOMEM;
225*537d1343SAlexander V. Chernikov goto exit;
226*537d1343SAlexander V. Chernikov }
227*537d1343SAlexander V. Chernikov
228*537d1343SAlexander V. Chernikov /* Save user arguments. */
229*537d1343SAlexander V. Chernikov lpm->max_rules = config->max_rules;
230*537d1343SAlexander V. Chernikov lpm->number_tbl8s = config->number_tbl8s;
231*537d1343SAlexander V. Chernikov strlcpy(lpm->name, name, sizeof(lpm->name));
232*537d1343SAlexander V. Chernikov
233*537d1343SAlexander V. Chernikov //te->data = lpm;
234*537d1343SAlexander V. Chernikov
235*537d1343SAlexander V. Chernikov //TAILQ_INSERT_TAIL(lpm_list, te, next);
236*537d1343SAlexander V. Chernikov
237*537d1343SAlexander V. Chernikov exit:
238*537d1343SAlexander V. Chernikov rte_mcfg_tailq_write_unlock();
239*537d1343SAlexander V. Chernikov
240*537d1343SAlexander V. Chernikov return lpm;
241*537d1343SAlexander V. Chernikov }
242*537d1343SAlexander V. Chernikov
243*537d1343SAlexander V. Chernikov /*
244*537d1343SAlexander V. Chernikov * Deallocates memory for given LPM table.
245*537d1343SAlexander V. Chernikov */
246*537d1343SAlexander V. Chernikov void
rte_lpm_free(struct rte_lpm * lpm)247*537d1343SAlexander V. Chernikov rte_lpm_free(struct rte_lpm *lpm)
248*537d1343SAlexander V. Chernikov {
249*537d1343SAlexander V. Chernikov #if 0
250*537d1343SAlexander V. Chernikov struct rte_lpm_list *lpm_list;
251*537d1343SAlexander V. Chernikov struct rte_tailq_entry *te;
252*537d1343SAlexander V. Chernikov
253*537d1343SAlexander V. Chernikov /* Check user arguments. */
254*537d1343SAlexander V. Chernikov if (lpm == NULL)
255*537d1343SAlexander V. Chernikov return;
256*537d1343SAlexander V. Chernikov
257*537d1343SAlexander V. Chernikov lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
258*537d1343SAlexander V. Chernikov
259*537d1343SAlexander V. Chernikov rte_mcfg_tailq_write_lock();
260*537d1343SAlexander V. Chernikov
261*537d1343SAlexander V. Chernikov /* find our tailq entry */
262*537d1343SAlexander V. Chernikov TAILQ_FOREACH(te, lpm_list, next) {
263*537d1343SAlexander V. Chernikov if (te->data == (void *) lpm)
264*537d1343SAlexander V. Chernikov break;
265*537d1343SAlexander V. Chernikov }
266*537d1343SAlexander V. Chernikov if (te != NULL)
267*537d1343SAlexander V. Chernikov TAILQ_REMOVE(lpm_list, te, next);
268*537d1343SAlexander V. Chernikov
269*537d1343SAlexander V. Chernikov rte_mcfg_tailq_write_unlock();
270*537d1343SAlexander V. Chernikov #endif
271*537d1343SAlexander V. Chernikov
272*537d1343SAlexander V. Chernikov rte_free(lpm->tbl8);
273*537d1343SAlexander V. Chernikov rte_free(lpm->rules_tbl);
274*537d1343SAlexander V. Chernikov rte_free(lpm);
275*537d1343SAlexander V. Chernikov //rte_free(te);
276*537d1343SAlexander V. Chernikov }
277*537d1343SAlexander V. Chernikov
278*537d1343SAlexander V. Chernikov #if 0
279*537d1343SAlexander V. Chernikov /*
280*537d1343SAlexander V. Chernikov * Adds a rule to the rule table.
281*537d1343SAlexander V. Chernikov *
282*537d1343SAlexander V. Chernikov * NOTE: The rule table is split into 32 groups. Each group contains rules that
283*537d1343SAlexander V. Chernikov * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
284*537d1343SAlexander V. Chernikov * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
285*537d1343SAlexander V. Chernikov * to refer to depth 1 because even though the depth range is 1 - 32, depths
286*537d1343SAlexander V. Chernikov * are stored in the rule table from 0 - 31.
287*537d1343SAlexander V. Chernikov * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
288*537d1343SAlexander V. Chernikov */
289*537d1343SAlexander V. Chernikov static int32_t
290*537d1343SAlexander V. Chernikov rule_add(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
291*537d1343SAlexander V. Chernikov uint32_t next_hop)
292*537d1343SAlexander V. Chernikov {
293*537d1343SAlexander V. Chernikov uint32_t rule_gindex, rule_index, last_rule;
294*537d1343SAlexander V. Chernikov int i;
295*537d1343SAlexander V. Chernikov
296*537d1343SAlexander V. Chernikov VERIFY_DEPTH(depth);
297*537d1343SAlexander V. Chernikov
298*537d1343SAlexander V. Chernikov /* Scan through rule group to see if rule already exists. */
299*537d1343SAlexander V. Chernikov if (lpm->rule_info[depth - 1].used_rules > 0) {
300*537d1343SAlexander V. Chernikov
301*537d1343SAlexander V. Chernikov /* rule_gindex stands for rule group index. */
302*537d1343SAlexander V. Chernikov rule_gindex = lpm->rule_info[depth - 1].first_rule;
303*537d1343SAlexander V. Chernikov /* Initialise rule_index to point to start of rule group. */
304*537d1343SAlexander V. Chernikov rule_index = rule_gindex;
305*537d1343SAlexander V. Chernikov /* Last rule = Last used rule in this rule group. */
306*537d1343SAlexander V. Chernikov last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
307*537d1343SAlexander V. Chernikov
308*537d1343SAlexander V. Chernikov for (; rule_index < last_rule; rule_index++) {
309*537d1343SAlexander V. Chernikov
310*537d1343SAlexander V. Chernikov /* If rule already exists update next hop and return. */
311*537d1343SAlexander V. Chernikov if (lpm->rules_tbl[rule_index].ip == ip_masked) {
312*537d1343SAlexander V. Chernikov
313*537d1343SAlexander V. Chernikov if (lpm->rules_tbl[rule_index].next_hop
314*537d1343SAlexander V. Chernikov == next_hop)
315*537d1343SAlexander V. Chernikov return -EEXIST;
316*537d1343SAlexander V. Chernikov lpm->rules_tbl[rule_index].next_hop = next_hop;
317*537d1343SAlexander V. Chernikov
318*537d1343SAlexander V. Chernikov return rule_index;
319*537d1343SAlexander V. Chernikov }
320*537d1343SAlexander V. Chernikov }
321*537d1343SAlexander V. Chernikov
322*537d1343SAlexander V. Chernikov if (rule_index == lpm->max_rules)
323*537d1343SAlexander V. Chernikov return -ENOSPC;
324*537d1343SAlexander V. Chernikov } else {
325*537d1343SAlexander V. Chernikov /* Calculate the position in which the rule will be stored. */
326*537d1343SAlexander V. Chernikov rule_index = 0;
327*537d1343SAlexander V. Chernikov
328*537d1343SAlexander V. Chernikov for (i = depth - 1; i > 0; i--) {
329*537d1343SAlexander V. Chernikov if (lpm->rule_info[i - 1].used_rules > 0) {
330*537d1343SAlexander V. Chernikov rule_index = lpm->rule_info[i - 1].first_rule
331*537d1343SAlexander V. Chernikov + lpm->rule_info[i - 1].used_rules;
332*537d1343SAlexander V. Chernikov break;
333*537d1343SAlexander V. Chernikov }
334*537d1343SAlexander V. Chernikov }
335*537d1343SAlexander V. Chernikov if (rule_index == lpm->max_rules)
336*537d1343SAlexander V. Chernikov return -ENOSPC;
337*537d1343SAlexander V. Chernikov
338*537d1343SAlexander V. Chernikov lpm->rule_info[depth - 1].first_rule = rule_index;
339*537d1343SAlexander V. Chernikov }
340*537d1343SAlexander V. Chernikov
341*537d1343SAlexander V. Chernikov /* Make room for the new rule in the array. */
342*537d1343SAlexander V. Chernikov for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
343*537d1343SAlexander V. Chernikov if (lpm->rule_info[i - 1].first_rule
344*537d1343SAlexander V. Chernikov + lpm->rule_info[i - 1].used_rules == lpm->max_rules)
345*537d1343SAlexander V. Chernikov return -ENOSPC;
346*537d1343SAlexander V. Chernikov
347*537d1343SAlexander V. Chernikov if (lpm->rule_info[i - 1].used_rules > 0) {
348*537d1343SAlexander V. Chernikov lpm->rules_tbl[lpm->rule_info[i - 1].first_rule
349*537d1343SAlexander V. Chernikov + lpm->rule_info[i - 1].used_rules]
350*537d1343SAlexander V. Chernikov = lpm->rules_tbl[lpm->rule_info[i - 1].first_rule];
351*537d1343SAlexander V. Chernikov lpm->rule_info[i - 1].first_rule++;
352*537d1343SAlexander V. Chernikov }
353*537d1343SAlexander V. Chernikov }
354*537d1343SAlexander V. Chernikov
355*537d1343SAlexander V. Chernikov /* Add the new rule. */
356*537d1343SAlexander V. Chernikov lpm->rules_tbl[rule_index].ip = ip_masked;
357*537d1343SAlexander V. Chernikov lpm->rules_tbl[rule_index].next_hop = next_hop;
358*537d1343SAlexander V. Chernikov
359*537d1343SAlexander V. Chernikov /* Increment the used rules counter for this rule group. */
360*537d1343SAlexander V. Chernikov lpm->rule_info[depth - 1].used_rules++;
361*537d1343SAlexander V. Chernikov
362*537d1343SAlexander V. Chernikov return rule_index;
363*537d1343SAlexander V. Chernikov }
364*537d1343SAlexander V. Chernikov
365*537d1343SAlexander V. Chernikov /*
366*537d1343SAlexander V. Chernikov * Delete a rule from the rule table.
367*537d1343SAlexander V. Chernikov * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
368*537d1343SAlexander V. Chernikov */
369*537d1343SAlexander V. Chernikov static void
370*537d1343SAlexander V. Chernikov rule_delete(struct rte_lpm *lpm, int32_t rule_index, uint8_t depth)
371*537d1343SAlexander V. Chernikov {
372*537d1343SAlexander V. Chernikov int i;
373*537d1343SAlexander V. Chernikov
374*537d1343SAlexander V. Chernikov VERIFY_DEPTH(depth);
375*537d1343SAlexander V. Chernikov
376*537d1343SAlexander V. Chernikov lpm->rules_tbl[rule_index] =
377*537d1343SAlexander V. Chernikov lpm->rules_tbl[lpm->rule_info[depth - 1].first_rule
378*537d1343SAlexander V. Chernikov + lpm->rule_info[depth - 1].used_rules - 1];
379*537d1343SAlexander V. Chernikov
380*537d1343SAlexander V. Chernikov for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
381*537d1343SAlexander V. Chernikov if (lpm->rule_info[i].used_rules > 0) {
382*537d1343SAlexander V. Chernikov lpm->rules_tbl[lpm->rule_info[i].first_rule - 1] =
383*537d1343SAlexander V. Chernikov lpm->rules_tbl[lpm->rule_info[i].first_rule
384*537d1343SAlexander V. Chernikov + lpm->rule_info[i].used_rules - 1];
385*537d1343SAlexander V. Chernikov lpm->rule_info[i].first_rule--;
386*537d1343SAlexander V. Chernikov }
387*537d1343SAlexander V. Chernikov }
388*537d1343SAlexander V. Chernikov
389*537d1343SAlexander V. Chernikov lpm->rule_info[depth - 1].used_rules--;
390*537d1343SAlexander V. Chernikov }
391*537d1343SAlexander V. Chernikov
392*537d1343SAlexander V. Chernikov /*
393*537d1343SAlexander V. Chernikov * Finds a rule in rule table.
394*537d1343SAlexander V. Chernikov * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
395*537d1343SAlexander V. Chernikov */
396*537d1343SAlexander V. Chernikov static int32_t
397*537d1343SAlexander V. Chernikov rule_find(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth)
398*537d1343SAlexander V. Chernikov {
399*537d1343SAlexander V. Chernikov uint32_t rule_gindex, last_rule, rule_index;
400*537d1343SAlexander V. Chernikov
401*537d1343SAlexander V. Chernikov VERIFY_DEPTH(depth);
402*537d1343SAlexander V. Chernikov
403*537d1343SAlexander V. Chernikov rule_gindex = lpm->rule_info[depth - 1].first_rule;
404*537d1343SAlexander V. Chernikov last_rule = rule_gindex + lpm->rule_info[depth - 1].used_rules;
405*537d1343SAlexander V. Chernikov
406*537d1343SAlexander V. Chernikov /* Scan used rules at given depth to find rule. */
407*537d1343SAlexander V. Chernikov for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
408*537d1343SAlexander V. Chernikov /* If rule is found return the rule index. */
409*537d1343SAlexander V. Chernikov if (lpm->rules_tbl[rule_index].ip == ip_masked)
410*537d1343SAlexander V. Chernikov return rule_index;
411*537d1343SAlexander V. Chernikov }
412*537d1343SAlexander V. Chernikov
413*537d1343SAlexander V. Chernikov /* If rule is not found return -EINVAL. */
414*537d1343SAlexander V. Chernikov return -EINVAL;
415*537d1343SAlexander V. Chernikov }
416*537d1343SAlexander V. Chernikov #endif
417*537d1343SAlexander V. Chernikov
418*537d1343SAlexander V. Chernikov /*
419*537d1343SAlexander V. Chernikov * Find, clean and allocate a tbl8.
420*537d1343SAlexander V. Chernikov */
421*537d1343SAlexander V. Chernikov static int32_t
tbl8_alloc(struct rte_lpm_tbl_entry * tbl8,uint32_t number_tbl8s)422*537d1343SAlexander V. Chernikov tbl8_alloc(struct rte_lpm_tbl_entry *tbl8, uint32_t number_tbl8s)
423*537d1343SAlexander V. Chernikov {
424*537d1343SAlexander V. Chernikov uint32_t group_idx; /* tbl8 group index. */
425*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry *tbl8_entry;
426*537d1343SAlexander V. Chernikov
427*537d1343SAlexander V. Chernikov /* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
428*537d1343SAlexander V. Chernikov for (group_idx = 0; group_idx < number_tbl8s; group_idx++) {
429*537d1343SAlexander V. Chernikov tbl8_entry = &tbl8[group_idx * RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
430*537d1343SAlexander V. Chernikov /* If a free tbl8 group is found clean it and set as VALID. */
431*537d1343SAlexander V. Chernikov if (!tbl8_entry->valid_group) {
432*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl8_entry = {
433*537d1343SAlexander V. Chernikov .next_hop = 0,
434*537d1343SAlexander V. Chernikov .valid = INVALID,
435*537d1343SAlexander V. Chernikov .depth = 0,
436*537d1343SAlexander V. Chernikov .valid_group = VALID,
437*537d1343SAlexander V. Chernikov };
438*537d1343SAlexander V. Chernikov
439*537d1343SAlexander V. Chernikov memset(&tbl8_entry[0], 0,
440*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
441*537d1343SAlexander V. Chernikov sizeof(tbl8_entry[0]));
442*537d1343SAlexander V. Chernikov
443*537d1343SAlexander V. Chernikov __atomic_store(tbl8_entry, &new_tbl8_entry,
444*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
445*537d1343SAlexander V. Chernikov
446*537d1343SAlexander V. Chernikov /* Return group index for allocated tbl8 group. */
447*537d1343SAlexander V. Chernikov return group_idx;
448*537d1343SAlexander V. Chernikov }
449*537d1343SAlexander V. Chernikov }
450*537d1343SAlexander V. Chernikov
451*537d1343SAlexander V. Chernikov /* If there are no tbl8 groups free then return error. */
452*537d1343SAlexander V. Chernikov return -ENOSPC;
453*537d1343SAlexander V. Chernikov }
454*537d1343SAlexander V. Chernikov
455*537d1343SAlexander V. Chernikov static void
tbl8_free(struct rte_lpm_tbl_entry * tbl8,uint32_t tbl8_group_start)456*537d1343SAlexander V. Chernikov tbl8_free(struct rte_lpm_tbl_entry *tbl8, uint32_t tbl8_group_start)
457*537d1343SAlexander V. Chernikov {
458*537d1343SAlexander V. Chernikov /* Set tbl8 group invalid*/
459*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
460*537d1343SAlexander V. Chernikov
461*537d1343SAlexander V. Chernikov __atomic_store(&tbl8[tbl8_group_start], &zero_tbl8_entry,
462*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
463*537d1343SAlexander V. Chernikov }
464*537d1343SAlexander V. Chernikov
465*537d1343SAlexander V. Chernikov static __rte_noinline int32_t
add_depth_small(struct rte_lpm * lpm,uint32_t ip,uint8_t depth,uint32_t next_hop)466*537d1343SAlexander V. Chernikov add_depth_small(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
467*537d1343SAlexander V. Chernikov uint32_t next_hop)
468*537d1343SAlexander V. Chernikov {
469*537d1343SAlexander V. Chernikov #define group_idx next_hop
470*537d1343SAlexander V. Chernikov uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
471*537d1343SAlexander V. Chernikov
472*537d1343SAlexander V. Chernikov /* Calculate the index into Table24. */
473*537d1343SAlexander V. Chernikov tbl24_index = ip >> 8;
474*537d1343SAlexander V. Chernikov tbl24_range = depth_to_range(depth);
475*537d1343SAlexander V. Chernikov
476*537d1343SAlexander V. Chernikov for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
477*537d1343SAlexander V. Chernikov /*
478*537d1343SAlexander V. Chernikov * For invalid OR valid and non-extended tbl 24 entries set
479*537d1343SAlexander V. Chernikov * entry.
480*537d1343SAlexander V. Chernikov */
481*537d1343SAlexander V. Chernikov if (!lpm->tbl24[i].valid || (lpm->tbl24[i].valid_group == 0 &&
482*537d1343SAlexander V. Chernikov lpm->tbl24[i].depth <= depth)) {
483*537d1343SAlexander V. Chernikov
484*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl24_entry = {
485*537d1343SAlexander V. Chernikov .next_hop = next_hop,
486*537d1343SAlexander V. Chernikov .valid = VALID,
487*537d1343SAlexander V. Chernikov .valid_group = 0,
488*537d1343SAlexander V. Chernikov .depth = depth,
489*537d1343SAlexander V. Chernikov };
490*537d1343SAlexander V. Chernikov
491*537d1343SAlexander V. Chernikov /* Setting tbl24 entry in one go to avoid race
492*537d1343SAlexander V. Chernikov * conditions
493*537d1343SAlexander V. Chernikov */
494*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
495*537d1343SAlexander V. Chernikov __ATOMIC_RELEASE);
496*537d1343SAlexander V. Chernikov
497*537d1343SAlexander V. Chernikov continue;
498*537d1343SAlexander V. Chernikov }
499*537d1343SAlexander V. Chernikov
500*537d1343SAlexander V. Chernikov if (lpm->tbl24[i].valid_group == 1) {
501*537d1343SAlexander V. Chernikov /* If tbl24 entry is valid and extended calculate the
502*537d1343SAlexander V. Chernikov * index into tbl8.
503*537d1343SAlexander V. Chernikov */
504*537d1343SAlexander V. Chernikov tbl8_index = lpm->tbl24[i].group_idx *
505*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
506*537d1343SAlexander V. Chernikov tbl8_group_end = tbl8_index +
507*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
508*537d1343SAlexander V. Chernikov
509*537d1343SAlexander V. Chernikov for (j = tbl8_index; j < tbl8_group_end; j++) {
510*537d1343SAlexander V. Chernikov if (!lpm->tbl8[j].valid ||
511*537d1343SAlexander V. Chernikov lpm->tbl8[j].depth <= depth) {
512*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry
513*537d1343SAlexander V. Chernikov new_tbl8_entry = {
514*537d1343SAlexander V. Chernikov .valid = VALID,
515*537d1343SAlexander V. Chernikov .valid_group = VALID,
516*537d1343SAlexander V. Chernikov .depth = depth,
517*537d1343SAlexander V. Chernikov .next_hop = next_hop,
518*537d1343SAlexander V. Chernikov };
519*537d1343SAlexander V. Chernikov
520*537d1343SAlexander V. Chernikov /*
521*537d1343SAlexander V. Chernikov * Setting tbl8 entry in one go to avoid
522*537d1343SAlexander V. Chernikov * race conditions
523*537d1343SAlexander V. Chernikov */
524*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl8[j],
525*537d1343SAlexander V. Chernikov &new_tbl8_entry,
526*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
527*537d1343SAlexander V. Chernikov
528*537d1343SAlexander V. Chernikov continue;
529*537d1343SAlexander V. Chernikov }
530*537d1343SAlexander V. Chernikov }
531*537d1343SAlexander V. Chernikov }
532*537d1343SAlexander V. Chernikov }
533*537d1343SAlexander V. Chernikov #undef group_idx
534*537d1343SAlexander V. Chernikov return 0;
535*537d1343SAlexander V. Chernikov }
536*537d1343SAlexander V. Chernikov
537*537d1343SAlexander V. Chernikov static __rte_noinline int32_t
add_depth_big(struct rte_lpm * lpm,uint32_t ip_masked,uint8_t depth,uint32_t next_hop)538*537d1343SAlexander V. Chernikov add_depth_big(struct rte_lpm *lpm, uint32_t ip_masked, uint8_t depth,
539*537d1343SAlexander V. Chernikov uint32_t next_hop)
540*537d1343SAlexander V. Chernikov {
541*537d1343SAlexander V. Chernikov #define group_idx next_hop
542*537d1343SAlexander V. Chernikov uint32_t tbl24_index;
543*537d1343SAlexander V. Chernikov int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
544*537d1343SAlexander V. Chernikov tbl8_range, i;
545*537d1343SAlexander V. Chernikov
546*537d1343SAlexander V. Chernikov tbl24_index = (ip_masked >> 8);
547*537d1343SAlexander V. Chernikov tbl8_range = depth_to_range(depth);
548*537d1343SAlexander V. Chernikov
549*537d1343SAlexander V. Chernikov if (!lpm->tbl24[tbl24_index].valid) {
550*537d1343SAlexander V. Chernikov /* Search for a free tbl8 group. */
551*537d1343SAlexander V. Chernikov tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
552*537d1343SAlexander V. Chernikov
553*537d1343SAlexander V. Chernikov /* Check tbl8 allocation was successful. */
554*537d1343SAlexander V. Chernikov if (tbl8_group_index < 0) {
555*537d1343SAlexander V. Chernikov return tbl8_group_index;
556*537d1343SAlexander V. Chernikov }
557*537d1343SAlexander V. Chernikov
558*537d1343SAlexander V. Chernikov /* Find index into tbl8 and range. */
559*537d1343SAlexander V. Chernikov tbl8_index = (tbl8_group_index *
560*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
561*537d1343SAlexander V. Chernikov (ip_masked & 0xFF);
562*537d1343SAlexander V. Chernikov
563*537d1343SAlexander V. Chernikov /* Set tbl8 entry. */
564*537d1343SAlexander V. Chernikov for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
565*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl8_entry = {
566*537d1343SAlexander V. Chernikov .valid = VALID,
567*537d1343SAlexander V. Chernikov .depth = depth,
568*537d1343SAlexander V. Chernikov .valid_group = lpm->tbl8[i].valid_group,
569*537d1343SAlexander V. Chernikov .next_hop = next_hop,
570*537d1343SAlexander V. Chernikov };
571*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
572*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
573*537d1343SAlexander V. Chernikov }
574*537d1343SAlexander V. Chernikov
575*537d1343SAlexander V. Chernikov /*
576*537d1343SAlexander V. Chernikov * Update tbl24 entry to point to new tbl8 entry. Note: The
577*537d1343SAlexander V. Chernikov * ext_flag and tbl8_index need to be updated simultaneously,
578*537d1343SAlexander V. Chernikov * so assign whole structure in one go
579*537d1343SAlexander V. Chernikov */
580*537d1343SAlexander V. Chernikov
581*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl24_entry = {
582*537d1343SAlexander V. Chernikov .group_idx = tbl8_group_index,
583*537d1343SAlexander V. Chernikov .valid = VALID,
584*537d1343SAlexander V. Chernikov .valid_group = 1,
585*537d1343SAlexander V. Chernikov .depth = 0,
586*537d1343SAlexander V. Chernikov };
587*537d1343SAlexander V. Chernikov
588*537d1343SAlexander V. Chernikov /* The tbl24 entry must be written only after the
589*537d1343SAlexander V. Chernikov * tbl8 entries are written.
590*537d1343SAlexander V. Chernikov */
591*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
592*537d1343SAlexander V. Chernikov __ATOMIC_RELEASE);
593*537d1343SAlexander V. Chernikov
594*537d1343SAlexander V. Chernikov } /* If valid entry but not extended calculate the index into Table8. */
595*537d1343SAlexander V. Chernikov else if (lpm->tbl24[tbl24_index].valid_group == 0) {
596*537d1343SAlexander V. Chernikov /* Search for free tbl8 group. */
597*537d1343SAlexander V. Chernikov tbl8_group_index = tbl8_alloc(lpm->tbl8, lpm->number_tbl8s);
598*537d1343SAlexander V. Chernikov
599*537d1343SAlexander V. Chernikov if (tbl8_group_index < 0) {
600*537d1343SAlexander V. Chernikov return tbl8_group_index;
601*537d1343SAlexander V. Chernikov }
602*537d1343SAlexander V. Chernikov
603*537d1343SAlexander V. Chernikov tbl8_group_start = tbl8_group_index *
604*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
605*537d1343SAlexander V. Chernikov tbl8_group_end = tbl8_group_start +
606*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
607*537d1343SAlexander V. Chernikov
608*537d1343SAlexander V. Chernikov /* Populate new tbl8 with tbl24 value. */
609*537d1343SAlexander V. Chernikov for (i = tbl8_group_start; i < tbl8_group_end; i++) {
610*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl8_entry = {
611*537d1343SAlexander V. Chernikov .valid = VALID,
612*537d1343SAlexander V. Chernikov .depth = lpm->tbl24[tbl24_index].depth,
613*537d1343SAlexander V. Chernikov .valid_group = lpm->tbl8[i].valid_group,
614*537d1343SAlexander V. Chernikov .next_hop = lpm->tbl24[tbl24_index].next_hop,
615*537d1343SAlexander V. Chernikov };
616*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
617*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
618*537d1343SAlexander V. Chernikov }
619*537d1343SAlexander V. Chernikov
620*537d1343SAlexander V. Chernikov tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
621*537d1343SAlexander V. Chernikov
622*537d1343SAlexander V. Chernikov /* Insert new rule into the tbl8 entry. */
623*537d1343SAlexander V. Chernikov for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
624*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl8_entry = {
625*537d1343SAlexander V. Chernikov .valid = VALID,
626*537d1343SAlexander V. Chernikov .depth = depth,
627*537d1343SAlexander V. Chernikov .valid_group = lpm->tbl8[i].valid_group,
628*537d1343SAlexander V. Chernikov .next_hop = next_hop,
629*537d1343SAlexander V. Chernikov };
630*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
631*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
632*537d1343SAlexander V. Chernikov }
633*537d1343SAlexander V. Chernikov
634*537d1343SAlexander V. Chernikov /*
635*537d1343SAlexander V. Chernikov * Update tbl24 entry to point to new tbl8 entry. Note: The
636*537d1343SAlexander V. Chernikov * ext_flag and tbl8_index need to be updated simultaneously,
637*537d1343SAlexander V. Chernikov * so assign whole structure in one go.
638*537d1343SAlexander V. Chernikov */
639*537d1343SAlexander V. Chernikov
640*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl24_entry = {
641*537d1343SAlexander V. Chernikov .group_idx = tbl8_group_index,
642*537d1343SAlexander V. Chernikov .valid = VALID,
643*537d1343SAlexander V. Chernikov .valid_group = 1,
644*537d1343SAlexander V. Chernikov .depth = 0,
645*537d1343SAlexander V. Chernikov };
646*537d1343SAlexander V. Chernikov
647*537d1343SAlexander V. Chernikov /* The tbl24 entry must be written only after the
648*537d1343SAlexander V. Chernikov * tbl8 entries are written.
649*537d1343SAlexander V. Chernikov */
650*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
651*537d1343SAlexander V. Chernikov __ATOMIC_RELEASE);
652*537d1343SAlexander V. Chernikov
653*537d1343SAlexander V. Chernikov } else { /*
654*537d1343SAlexander V. Chernikov * If it is valid, extended entry calculate the index into tbl8.
655*537d1343SAlexander V. Chernikov */
656*537d1343SAlexander V. Chernikov tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
657*537d1343SAlexander V. Chernikov tbl8_group_start = tbl8_group_index *
658*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
659*537d1343SAlexander V. Chernikov tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
660*537d1343SAlexander V. Chernikov
661*537d1343SAlexander V. Chernikov for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
662*537d1343SAlexander V. Chernikov
663*537d1343SAlexander V. Chernikov if (!lpm->tbl8[i].valid ||
664*537d1343SAlexander V. Chernikov lpm->tbl8[i].depth <= depth) {
665*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl8_entry = {
666*537d1343SAlexander V. Chernikov .valid = VALID,
667*537d1343SAlexander V. Chernikov .depth = depth,
668*537d1343SAlexander V. Chernikov .next_hop = next_hop,
669*537d1343SAlexander V. Chernikov .valid_group = lpm->tbl8[i].valid_group,
670*537d1343SAlexander V. Chernikov };
671*537d1343SAlexander V. Chernikov
672*537d1343SAlexander V. Chernikov /*
673*537d1343SAlexander V. Chernikov * Setting tbl8 entry in one go to avoid race
674*537d1343SAlexander V. Chernikov * condition
675*537d1343SAlexander V. Chernikov */
676*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
677*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
678*537d1343SAlexander V. Chernikov
679*537d1343SAlexander V. Chernikov continue;
680*537d1343SAlexander V. Chernikov }
681*537d1343SAlexander V. Chernikov }
682*537d1343SAlexander V. Chernikov }
683*537d1343SAlexander V. Chernikov #undef group_idx
684*537d1343SAlexander V. Chernikov return 0;
685*537d1343SAlexander V. Chernikov }
686*537d1343SAlexander V. Chernikov
687*537d1343SAlexander V. Chernikov /*
688*537d1343SAlexander V. Chernikov * Add a route
689*537d1343SAlexander V. Chernikov */
690*537d1343SAlexander V. Chernikov int
rte_lpm_add(struct rte_lpm * lpm,uint32_t ip,uint8_t depth,uint32_t next_hop)691*537d1343SAlexander V. Chernikov rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
692*537d1343SAlexander V. Chernikov uint32_t next_hop)
693*537d1343SAlexander V. Chernikov {
694*537d1343SAlexander V. Chernikov int32_t status = 0;
695*537d1343SAlexander V. Chernikov uint32_t ip_masked;
696*537d1343SAlexander V. Chernikov
697*537d1343SAlexander V. Chernikov /* Check user arguments. */
698*537d1343SAlexander V. Chernikov if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
699*537d1343SAlexander V. Chernikov return -EINVAL;
700*537d1343SAlexander V. Chernikov
701*537d1343SAlexander V. Chernikov ip_masked = ip & depth_to_mask(depth);
702*537d1343SAlexander V. Chernikov
703*537d1343SAlexander V. Chernikov #if 0
704*537d1343SAlexander V. Chernikov /* Add the rule to the rule table. */
705*537d1343SAlexander V. Chernikov rule_index = rule_add(lpm, ip_masked, depth, next_hop);
706*537d1343SAlexander V. Chernikov
707*537d1343SAlexander V. Chernikov /* Skip table entries update if The rule is the same as
708*537d1343SAlexander V. Chernikov * the rule in the rules table.
709*537d1343SAlexander V. Chernikov */
710*537d1343SAlexander V. Chernikov if (rule_index == -EEXIST)
711*537d1343SAlexander V. Chernikov return 0;
712*537d1343SAlexander V. Chernikov
713*537d1343SAlexander V. Chernikov /* If the is no space available for new rule return error. */
714*537d1343SAlexander V. Chernikov if (rule_index < 0) {
715*537d1343SAlexander V. Chernikov return rule_index;
716*537d1343SAlexander V. Chernikov }
717*537d1343SAlexander V. Chernikov #endif
718*537d1343SAlexander V. Chernikov
719*537d1343SAlexander V. Chernikov if (depth <= MAX_DEPTH_TBL24) {
720*537d1343SAlexander V. Chernikov status = add_depth_small(lpm, ip_masked, depth, next_hop);
721*537d1343SAlexander V. Chernikov } else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
722*537d1343SAlexander V. Chernikov status = add_depth_big(lpm, ip_masked, depth, next_hop);
723*537d1343SAlexander V. Chernikov
724*537d1343SAlexander V. Chernikov /*
725*537d1343SAlexander V. Chernikov * If add fails due to exhaustion of tbl8 extensions delete
726*537d1343SAlexander V. Chernikov * rule that was added to rule table.
727*537d1343SAlexander V. Chernikov */
728*537d1343SAlexander V. Chernikov if (status < 0) {
729*537d1343SAlexander V. Chernikov //rule_delete(lpm, rule_index, depth);
730*537d1343SAlexander V. Chernikov
731*537d1343SAlexander V. Chernikov return status;
732*537d1343SAlexander V. Chernikov }
733*537d1343SAlexander V. Chernikov }
734*537d1343SAlexander V. Chernikov
735*537d1343SAlexander V. Chernikov return 0;
736*537d1343SAlexander V. Chernikov }
737*537d1343SAlexander V. Chernikov
738*537d1343SAlexander V. Chernikov #if 0
739*537d1343SAlexander V. Chernikov /*
740*537d1343SAlexander V. Chernikov * Look for a rule in the high-level rules table
741*537d1343SAlexander V. Chernikov */
742*537d1343SAlexander V. Chernikov int
743*537d1343SAlexander V. Chernikov rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
744*537d1343SAlexander V. Chernikov uint32_t *next_hop)
745*537d1343SAlexander V. Chernikov {
746*537d1343SAlexander V. Chernikov uint32_t ip_masked;
747*537d1343SAlexander V. Chernikov int32_t rule_index;
748*537d1343SAlexander V. Chernikov
749*537d1343SAlexander V. Chernikov /* Check user arguments. */
750*537d1343SAlexander V. Chernikov if ((lpm == NULL) ||
751*537d1343SAlexander V. Chernikov (next_hop == NULL) ||
752*537d1343SAlexander V. Chernikov (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
753*537d1343SAlexander V. Chernikov return -EINVAL;
754*537d1343SAlexander V. Chernikov
755*537d1343SAlexander V. Chernikov /* Look for the rule using rule_find. */
756*537d1343SAlexander V. Chernikov ip_masked = ip & depth_to_mask(depth);
757*537d1343SAlexander V. Chernikov rule_index = rule_find(lpm, ip_masked, depth);
758*537d1343SAlexander V. Chernikov
759*537d1343SAlexander V. Chernikov if (rule_index >= 0) {
760*537d1343SAlexander V. Chernikov *next_hop = lpm->rules_tbl[rule_index].next_hop;
761*537d1343SAlexander V. Chernikov return 1;
762*537d1343SAlexander V. Chernikov }
763*537d1343SAlexander V. Chernikov
764*537d1343SAlexander V. Chernikov /* If rule is not found return 0. */
765*537d1343SAlexander V. Chernikov return 0;
766*537d1343SAlexander V. Chernikov }
767*537d1343SAlexander V. Chernikov
768*537d1343SAlexander V. Chernikov static int32_t
769*537d1343SAlexander V. Chernikov find_previous_rule(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
770*537d1343SAlexander V. Chernikov uint8_t *sub_rule_depth)
771*537d1343SAlexander V. Chernikov {
772*537d1343SAlexander V. Chernikov int32_t rule_index;
773*537d1343SAlexander V. Chernikov uint32_t ip_masked;
774*537d1343SAlexander V. Chernikov uint8_t prev_depth;
775*537d1343SAlexander V. Chernikov
776*537d1343SAlexander V. Chernikov for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
777*537d1343SAlexander V. Chernikov ip_masked = ip & depth_to_mask(prev_depth);
778*537d1343SAlexander V. Chernikov
779*537d1343SAlexander V. Chernikov rule_index = rule_find(lpm, ip_masked, prev_depth);
780*537d1343SAlexander V. Chernikov
781*537d1343SAlexander V. Chernikov if (rule_index >= 0) {
782*537d1343SAlexander V. Chernikov *sub_rule_depth = prev_depth;
783*537d1343SAlexander V. Chernikov return rule_index;
784*537d1343SAlexander V. Chernikov }
785*537d1343SAlexander V. Chernikov }
786*537d1343SAlexander V. Chernikov
787*537d1343SAlexander V. Chernikov return -1;
788*537d1343SAlexander V. Chernikov }
789*537d1343SAlexander V. Chernikov #endif
790*537d1343SAlexander V. Chernikov
791*537d1343SAlexander V. Chernikov static int32_t
delete_depth_small(struct rte_lpm * lpm,uint32_t ip_masked,uint8_t depth,uint32_t sub_rule_nhop,uint8_t sub_rule_depth)792*537d1343SAlexander V. Chernikov delete_depth_small(struct rte_lpm *lpm, uint32_t ip_masked,
793*537d1343SAlexander V. Chernikov uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth)
794*537d1343SAlexander V. Chernikov {
795*537d1343SAlexander V. Chernikov #define group_idx next_hop
796*537d1343SAlexander V. Chernikov uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
797*537d1343SAlexander V. Chernikov
798*537d1343SAlexander V. Chernikov /* Calculate the range and index into Table24. */
799*537d1343SAlexander V. Chernikov tbl24_range = depth_to_range(depth);
800*537d1343SAlexander V. Chernikov tbl24_index = (ip_masked >> 8);
801*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
802*537d1343SAlexander V. Chernikov
803*537d1343SAlexander V. Chernikov /*
804*537d1343SAlexander V. Chernikov * Firstly check the sub_rule_index. A -1 indicates no replacement rule
805*537d1343SAlexander V. Chernikov * and a positive number indicates a sub_rule_index.
806*537d1343SAlexander V. Chernikov */
807*537d1343SAlexander V. Chernikov if (sub_rule_nhop == 0) {
808*537d1343SAlexander V. Chernikov /*
809*537d1343SAlexander V. Chernikov * If no replacement rule exists then invalidate entries
810*537d1343SAlexander V. Chernikov * associated with this rule.
811*537d1343SAlexander V. Chernikov */
812*537d1343SAlexander V. Chernikov for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
813*537d1343SAlexander V. Chernikov
814*537d1343SAlexander V. Chernikov if (lpm->tbl24[i].valid_group == 0 &&
815*537d1343SAlexander V. Chernikov lpm->tbl24[i].depth <= depth) {
816*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl24[i],
817*537d1343SAlexander V. Chernikov &zero_tbl24_entry, __ATOMIC_RELEASE);
818*537d1343SAlexander V. Chernikov } else if (lpm->tbl24[i].valid_group == 1) {
819*537d1343SAlexander V. Chernikov /*
820*537d1343SAlexander V. Chernikov * If TBL24 entry is extended, then there has
821*537d1343SAlexander V. Chernikov * to be a rule with depth >= 25 in the
822*537d1343SAlexander V. Chernikov * associated TBL8 group.
823*537d1343SAlexander V. Chernikov */
824*537d1343SAlexander V. Chernikov
825*537d1343SAlexander V. Chernikov tbl8_group_index = lpm->tbl24[i].group_idx;
826*537d1343SAlexander V. Chernikov tbl8_index = tbl8_group_index *
827*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
828*537d1343SAlexander V. Chernikov
829*537d1343SAlexander V. Chernikov for (j = tbl8_index; j < (tbl8_index +
830*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
831*537d1343SAlexander V. Chernikov
832*537d1343SAlexander V. Chernikov if (lpm->tbl8[j].depth <= depth)
833*537d1343SAlexander V. Chernikov lpm->tbl8[j].valid = INVALID;
834*537d1343SAlexander V. Chernikov }
835*537d1343SAlexander V. Chernikov }
836*537d1343SAlexander V. Chernikov }
837*537d1343SAlexander V. Chernikov } else {
838*537d1343SAlexander V. Chernikov /*
839*537d1343SAlexander V. Chernikov * If a replacement rule exists then modify entries
840*537d1343SAlexander V. Chernikov * associated with this rule.
841*537d1343SAlexander V. Chernikov */
842*537d1343SAlexander V. Chernikov
843*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl24_entry = {
844*537d1343SAlexander V. Chernikov .next_hop = sub_rule_nhop,
845*537d1343SAlexander V. Chernikov .valid = VALID,
846*537d1343SAlexander V. Chernikov .valid_group = 0,
847*537d1343SAlexander V. Chernikov .depth = sub_rule_depth,
848*537d1343SAlexander V. Chernikov };
849*537d1343SAlexander V. Chernikov
850*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl8_entry = {
851*537d1343SAlexander V. Chernikov .valid = VALID,
852*537d1343SAlexander V. Chernikov .valid_group = VALID,
853*537d1343SAlexander V. Chernikov .depth = sub_rule_depth,
854*537d1343SAlexander V. Chernikov .next_hop = sub_rule_nhop,
855*537d1343SAlexander V. Chernikov };
856*537d1343SAlexander V. Chernikov
857*537d1343SAlexander V. Chernikov for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
858*537d1343SAlexander V. Chernikov
859*537d1343SAlexander V. Chernikov if (lpm->tbl24[i].valid_group == 0 &&
860*537d1343SAlexander V. Chernikov lpm->tbl24[i].depth <= depth) {
861*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl24[i], &new_tbl24_entry,
862*537d1343SAlexander V. Chernikov __ATOMIC_RELEASE);
863*537d1343SAlexander V. Chernikov } else if (lpm->tbl24[i].valid_group == 1) {
864*537d1343SAlexander V. Chernikov /*
865*537d1343SAlexander V. Chernikov * If TBL24 entry is extended, then there has
866*537d1343SAlexander V. Chernikov * to be a rule with depth >= 25 in the
867*537d1343SAlexander V. Chernikov * associated TBL8 group.
868*537d1343SAlexander V. Chernikov */
869*537d1343SAlexander V. Chernikov
870*537d1343SAlexander V. Chernikov tbl8_group_index = lpm->tbl24[i].group_idx;
871*537d1343SAlexander V. Chernikov tbl8_index = tbl8_group_index *
872*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
873*537d1343SAlexander V. Chernikov
874*537d1343SAlexander V. Chernikov for (j = tbl8_index; j < (tbl8_index +
875*537d1343SAlexander V. Chernikov RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
876*537d1343SAlexander V. Chernikov
877*537d1343SAlexander V. Chernikov if (lpm->tbl8[j].depth <= depth)
878*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl8[j],
879*537d1343SAlexander V. Chernikov &new_tbl8_entry,
880*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
881*537d1343SAlexander V. Chernikov }
882*537d1343SAlexander V. Chernikov }
883*537d1343SAlexander V. Chernikov }
884*537d1343SAlexander V. Chernikov }
885*537d1343SAlexander V. Chernikov #undef group_idx
886*537d1343SAlexander V. Chernikov return 0;
887*537d1343SAlexander V. Chernikov }
888*537d1343SAlexander V. Chernikov
889*537d1343SAlexander V. Chernikov /*
890*537d1343SAlexander V. Chernikov * Checks if table 8 group can be recycled.
891*537d1343SAlexander V. Chernikov *
892*537d1343SAlexander V. Chernikov * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
893*537d1343SAlexander V. Chernikov * Return of -EINVAL means tbl8 is empty and thus can be recycled
894*537d1343SAlexander V. Chernikov * Return of value > -1 means tbl8 is in use but has all the same values and
895*537d1343SAlexander V. Chernikov * thus can be recycled
896*537d1343SAlexander V. Chernikov */
897*537d1343SAlexander V. Chernikov static int32_t
tbl8_recycle_check(struct rte_lpm_tbl_entry * tbl8,uint32_t tbl8_group_start)898*537d1343SAlexander V. Chernikov tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
899*537d1343SAlexander V. Chernikov uint32_t tbl8_group_start)
900*537d1343SAlexander V. Chernikov {
901*537d1343SAlexander V. Chernikov uint32_t tbl8_group_end, i;
902*537d1343SAlexander V. Chernikov tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
903*537d1343SAlexander V. Chernikov
904*537d1343SAlexander V. Chernikov /*
905*537d1343SAlexander V. Chernikov * Check the first entry of the given tbl8. If it is invalid we know
906*537d1343SAlexander V. Chernikov * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
907*537d1343SAlexander V. Chernikov * (As they would affect all entries in a tbl8) and thus this table
908*537d1343SAlexander V. Chernikov * can not be recycled.
909*537d1343SAlexander V. Chernikov */
910*537d1343SAlexander V. Chernikov if (tbl8[tbl8_group_start].valid) {
911*537d1343SAlexander V. Chernikov /*
912*537d1343SAlexander V. Chernikov * If first entry is valid check if the depth is less than 24
913*537d1343SAlexander V. Chernikov * and if so check the rest of the entries to verify that they
914*537d1343SAlexander V. Chernikov * are all of this depth.
915*537d1343SAlexander V. Chernikov */
916*537d1343SAlexander V. Chernikov if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
917*537d1343SAlexander V. Chernikov for (i = (tbl8_group_start + 1); i < tbl8_group_end;
918*537d1343SAlexander V. Chernikov i++) {
919*537d1343SAlexander V. Chernikov
920*537d1343SAlexander V. Chernikov if (tbl8[i].depth !=
921*537d1343SAlexander V. Chernikov tbl8[tbl8_group_start].depth) {
922*537d1343SAlexander V. Chernikov
923*537d1343SAlexander V. Chernikov return -EEXIST;
924*537d1343SAlexander V. Chernikov }
925*537d1343SAlexander V. Chernikov }
926*537d1343SAlexander V. Chernikov /* If all entries are the same return the tb8 index */
927*537d1343SAlexander V. Chernikov return tbl8_group_start;
928*537d1343SAlexander V. Chernikov }
929*537d1343SAlexander V. Chernikov
930*537d1343SAlexander V. Chernikov return -EEXIST;
931*537d1343SAlexander V. Chernikov }
932*537d1343SAlexander V. Chernikov /*
933*537d1343SAlexander V. Chernikov * If the first entry is invalid check if the rest of the entries in
934*537d1343SAlexander V. Chernikov * the tbl8 are invalid.
935*537d1343SAlexander V. Chernikov */
936*537d1343SAlexander V. Chernikov for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
937*537d1343SAlexander V. Chernikov if (tbl8[i].valid)
938*537d1343SAlexander V. Chernikov return -EEXIST;
939*537d1343SAlexander V. Chernikov }
940*537d1343SAlexander V. Chernikov /* If no valid entries are found then return -EINVAL. */
941*537d1343SAlexander V. Chernikov return -EINVAL;
942*537d1343SAlexander V. Chernikov }
943*537d1343SAlexander V. Chernikov
944*537d1343SAlexander V. Chernikov static int32_t
delete_depth_big(struct rte_lpm * lpm,uint32_t ip_masked,uint8_t depth,uint32_t sub_rule_nhop,uint8_t sub_rule_depth)945*537d1343SAlexander V. Chernikov delete_depth_big(struct rte_lpm *lpm, uint32_t ip_masked,
946*537d1343SAlexander V. Chernikov uint8_t depth, uint32_t sub_rule_nhop, uint8_t sub_rule_depth)
947*537d1343SAlexander V. Chernikov {
948*537d1343SAlexander V. Chernikov #define group_idx next_hop
949*537d1343SAlexander V. Chernikov uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
950*537d1343SAlexander V. Chernikov tbl8_range, i;
951*537d1343SAlexander V. Chernikov int32_t tbl8_recycle_index;
952*537d1343SAlexander V. Chernikov
953*537d1343SAlexander V. Chernikov /*
954*537d1343SAlexander V. Chernikov * Calculate the index into tbl24 and range. Note: All depths larger
955*537d1343SAlexander V. Chernikov * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
956*537d1343SAlexander V. Chernikov */
957*537d1343SAlexander V. Chernikov tbl24_index = ip_masked >> 8;
958*537d1343SAlexander V. Chernikov
959*537d1343SAlexander V. Chernikov /* Calculate the index into tbl8 and range. */
960*537d1343SAlexander V. Chernikov tbl8_group_index = lpm->tbl24[tbl24_index].group_idx;
961*537d1343SAlexander V. Chernikov tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
962*537d1343SAlexander V. Chernikov tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
963*537d1343SAlexander V. Chernikov tbl8_range = depth_to_range(depth);
964*537d1343SAlexander V. Chernikov
965*537d1343SAlexander V. Chernikov if (sub_rule_nhop == 0) {
966*537d1343SAlexander V. Chernikov /*
967*537d1343SAlexander V. Chernikov * Loop through the range of entries on tbl8 for which the
968*537d1343SAlexander V. Chernikov * rule_to_delete must be removed or modified.
969*537d1343SAlexander V. Chernikov */
970*537d1343SAlexander V. Chernikov for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
971*537d1343SAlexander V. Chernikov if (lpm->tbl8[i].depth <= depth)
972*537d1343SAlexander V. Chernikov lpm->tbl8[i].valid = INVALID;
973*537d1343SAlexander V. Chernikov }
974*537d1343SAlexander V. Chernikov } else {
975*537d1343SAlexander V. Chernikov /* Set new tbl8 entry. */
976*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl8_entry = {
977*537d1343SAlexander V. Chernikov .valid = VALID,
978*537d1343SAlexander V. Chernikov .depth = sub_rule_depth,
979*537d1343SAlexander V. Chernikov .valid_group = lpm->tbl8[tbl8_group_start].valid_group,
980*537d1343SAlexander V. Chernikov .next_hop = sub_rule_nhop,
981*537d1343SAlexander V. Chernikov };
982*537d1343SAlexander V. Chernikov
983*537d1343SAlexander V. Chernikov /*
984*537d1343SAlexander V. Chernikov * Loop through the range of entries on tbl8 for which the
985*537d1343SAlexander V. Chernikov * rule_to_delete must be modified.
986*537d1343SAlexander V. Chernikov */
987*537d1343SAlexander V. Chernikov for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
988*537d1343SAlexander V. Chernikov if (lpm->tbl8[i].depth <= depth)
989*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl8[i], &new_tbl8_entry,
990*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
991*537d1343SAlexander V. Chernikov }
992*537d1343SAlexander V. Chernikov }
993*537d1343SAlexander V. Chernikov
994*537d1343SAlexander V. Chernikov /*
995*537d1343SAlexander V. Chernikov * Check if there are any valid entries in this tbl8 group. If all
996*537d1343SAlexander V. Chernikov * tbl8 entries are invalid we can free the tbl8 and invalidate the
997*537d1343SAlexander V. Chernikov * associated tbl24 entry.
998*537d1343SAlexander V. Chernikov */
999*537d1343SAlexander V. Chernikov
1000*537d1343SAlexander V. Chernikov tbl8_recycle_index = tbl8_recycle_check(lpm->tbl8, tbl8_group_start);
1001*537d1343SAlexander V. Chernikov
1002*537d1343SAlexander V. Chernikov if (tbl8_recycle_index == -EINVAL) {
1003*537d1343SAlexander V. Chernikov /* Set tbl24 before freeing tbl8 to avoid race condition.
1004*537d1343SAlexander V. Chernikov * Prevent the free of the tbl8 group from hoisting.
1005*537d1343SAlexander V. Chernikov */
1006*537d1343SAlexander V. Chernikov lpm->tbl24[tbl24_index].valid = 0;
1007*537d1343SAlexander V. Chernikov __atomic_thread_fence(__ATOMIC_RELEASE);
1008*537d1343SAlexander V. Chernikov tbl8_free(lpm->tbl8, tbl8_group_start);
1009*537d1343SAlexander V. Chernikov } else if (tbl8_recycle_index > -1) {
1010*537d1343SAlexander V. Chernikov /* Update tbl24 entry. */
1011*537d1343SAlexander V. Chernikov struct rte_lpm_tbl_entry new_tbl24_entry = {
1012*537d1343SAlexander V. Chernikov .next_hop = lpm->tbl8[tbl8_recycle_index].next_hop,
1013*537d1343SAlexander V. Chernikov .valid = VALID,
1014*537d1343SAlexander V. Chernikov .valid_group = 0,
1015*537d1343SAlexander V. Chernikov .depth = lpm->tbl8[tbl8_recycle_index].depth,
1016*537d1343SAlexander V. Chernikov };
1017*537d1343SAlexander V. Chernikov
1018*537d1343SAlexander V. Chernikov /* Set tbl24 before freeing tbl8 to avoid race condition.
1019*537d1343SAlexander V. Chernikov * Prevent the free of the tbl8 group from hoisting.
1020*537d1343SAlexander V. Chernikov */
1021*537d1343SAlexander V. Chernikov __atomic_store(&lpm->tbl24[tbl24_index], &new_tbl24_entry,
1022*537d1343SAlexander V. Chernikov __ATOMIC_RELAXED);
1023*537d1343SAlexander V. Chernikov __atomic_thread_fence(__ATOMIC_RELEASE);
1024*537d1343SAlexander V. Chernikov tbl8_free(lpm->tbl8, tbl8_group_start);
1025*537d1343SAlexander V. Chernikov }
1026*537d1343SAlexander V. Chernikov #undef group_idx
1027*537d1343SAlexander V. Chernikov return 0;
1028*537d1343SAlexander V. Chernikov }
1029*537d1343SAlexander V. Chernikov
1030*537d1343SAlexander V. Chernikov /*
1031*537d1343SAlexander V. Chernikov * Deletes a rule
1032*537d1343SAlexander V. Chernikov */
1033*537d1343SAlexander V. Chernikov int
rte_lpm_delete(struct rte_lpm * lpm,uint32_t ip,uint8_t depth,uint8_t sub_rule_depth,uint32_t sub_rule_nhop)1034*537d1343SAlexander V. Chernikov rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
1035*537d1343SAlexander V. Chernikov uint8_t sub_rule_depth, uint32_t sub_rule_nhop)
1036*537d1343SAlexander V. Chernikov {
1037*537d1343SAlexander V. Chernikov //int32_t rule_to_delete_index;
1038*537d1343SAlexander V. Chernikov uint32_t ip_masked;
1039*537d1343SAlexander V. Chernikov //uint8_t sub_rule_depth;
1040*537d1343SAlexander V. Chernikov /*
1041*537d1343SAlexander V. Chernikov * Check input arguments. Note: IP must be a positive integer of 32
1042*537d1343SAlexander V. Chernikov * bits in length therefore it need not be checked.
1043*537d1343SAlexander V. Chernikov */
1044*537d1343SAlexander V. Chernikov if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
1045*537d1343SAlexander V. Chernikov return -EINVAL;
1046*537d1343SAlexander V. Chernikov }
1047*537d1343SAlexander V. Chernikov
1048*537d1343SAlexander V. Chernikov ip_masked = ip & depth_to_mask(depth);
1049*537d1343SAlexander V. Chernikov
1050*537d1343SAlexander V. Chernikov #if 0
1051*537d1343SAlexander V. Chernikov /*
1052*537d1343SAlexander V. Chernikov * Find the index of the input rule, that needs to be deleted, in the
1053*537d1343SAlexander V. Chernikov * rule table.
1054*537d1343SAlexander V. Chernikov */
1055*537d1343SAlexander V. Chernikov rule_to_delete_index = rule_find(lpm, ip_masked, depth);
1056*537d1343SAlexander V. Chernikov
1057*537d1343SAlexander V. Chernikov /*
1058*537d1343SAlexander V. Chernikov * Check if rule_to_delete_index was found. If no rule was found the
1059*537d1343SAlexander V. Chernikov * function rule_find returns -EINVAL.
1060*537d1343SAlexander V. Chernikov */
1061*537d1343SAlexander V. Chernikov if (rule_to_delete_index < 0)
1062*537d1343SAlexander V. Chernikov return -EINVAL;
1063*537d1343SAlexander V. Chernikov
1064*537d1343SAlexander V. Chernikov /* Delete the rule from the rule table. */
1065*537d1343SAlexander V. Chernikov rule_delete(lpm, rule_to_delete_index, depth);
1066*537d1343SAlexander V. Chernikov #endif
1067*537d1343SAlexander V. Chernikov
1068*537d1343SAlexander V. Chernikov /*
1069*537d1343SAlexander V. Chernikov * Find rule to replace the rule_to_delete. If there is no rule to
1070*537d1343SAlexander V. Chernikov * replace the rule_to_delete we return -1 and invalidate the table
1071*537d1343SAlexander V. Chernikov * entries associated with this rule.
1072*537d1343SAlexander V. Chernikov */
1073*537d1343SAlexander V. Chernikov //sub_rule_depth = *psub_rule_depth;
1074*537d1343SAlexander V. Chernikov //sub_rule_index = find_previous_rule(lpm, ip, depth, &sub_rule_depth);
1075*537d1343SAlexander V. Chernikov
1076*537d1343SAlexander V. Chernikov /*
1077*537d1343SAlexander V. Chernikov * If the input depth value is less than 25 use function
1078*537d1343SAlexander V. Chernikov * delete_depth_small otherwise use delete_depth_big.
1079*537d1343SAlexander V. Chernikov */
1080*537d1343SAlexander V. Chernikov if (depth <= MAX_DEPTH_TBL24) {
1081*537d1343SAlexander V. Chernikov return delete_depth_small(lpm, ip_masked, depth,
1082*537d1343SAlexander V. Chernikov sub_rule_nhop, sub_rule_depth);
1083*537d1343SAlexander V. Chernikov } else { /* If depth > MAX_DEPTH_TBL24 */
1084*537d1343SAlexander V. Chernikov return delete_depth_big(lpm, ip_masked, depth, sub_rule_nhop,
1085*537d1343SAlexander V. Chernikov sub_rule_depth);
1086*537d1343SAlexander V. Chernikov }
1087*537d1343SAlexander V. Chernikov }
1088*537d1343SAlexander V. Chernikov
1089*537d1343SAlexander V. Chernikov /*
1090*537d1343SAlexander V. Chernikov * Delete all rules from the LPM table.
1091*537d1343SAlexander V. Chernikov */
1092*537d1343SAlexander V. Chernikov void
rte_lpm_delete_all(struct rte_lpm * lpm)1093*537d1343SAlexander V. Chernikov rte_lpm_delete_all(struct rte_lpm *lpm)
1094*537d1343SAlexander V. Chernikov {
1095*537d1343SAlexander V. Chernikov /* Zero rule information. */
1096*537d1343SAlexander V. Chernikov memset(lpm->rule_info, 0, sizeof(lpm->rule_info));
1097*537d1343SAlexander V. Chernikov
1098*537d1343SAlexander V. Chernikov /* Zero tbl24. */
1099*537d1343SAlexander V. Chernikov memset(lpm->tbl24, 0, sizeof(lpm->tbl24));
1100*537d1343SAlexander V. Chernikov
1101*537d1343SAlexander V. Chernikov /* Zero tbl8. */
1102*537d1343SAlexander V. Chernikov memset(lpm->tbl8, 0, sizeof(lpm->tbl8[0])
1103*537d1343SAlexander V. Chernikov * RTE_LPM_TBL8_GROUP_NUM_ENTRIES * lpm->number_tbl8s);
1104*537d1343SAlexander V. Chernikov
1105*537d1343SAlexander V. Chernikov /* Delete all rules form the rules table. */
1106*537d1343SAlexander V. Chernikov memset(lpm->rules_tbl, 0, sizeof(lpm->rules_tbl[0]) * lpm->max_rules);
1107*537d1343SAlexander V. Chernikov }
1108