xref: /dpdk/lib/lpm/rte_lpm.c (revision ff933786477febb08fcbd4c2f85cc158a140b46d)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright(c) 2010-2014 Intel Corporation
399a2dd95SBruce Richardson  * Copyright(c) 2020 Arm Limited
499a2dd95SBruce Richardson  */
599a2dd95SBruce Richardson 
699a2dd95SBruce Richardson #include <string.h>
799a2dd95SBruce Richardson #include <stdint.h>
899a2dd95SBruce Richardson #include <errno.h>
999a2dd95SBruce Richardson #include <stdio.h>
1099a2dd95SBruce Richardson #include <sys/queue.h>
1199a2dd95SBruce Richardson 
1299a2dd95SBruce Richardson #include <rte_log.h>
1399a2dd95SBruce Richardson #include <rte_common.h>
1499a2dd95SBruce Richardson #include <rte_malloc.h>
1599a2dd95SBruce Richardson #include <rte_eal_memconfig.h>
1699a2dd95SBruce Richardson #include <rte_string_fns.h>
1799a2dd95SBruce Richardson #include <rte_errno.h>
1899a2dd95SBruce Richardson #include <rte_tailq.h>
1999a2dd95SBruce Richardson 
2099a2dd95SBruce Richardson #include "rte_lpm.h"
21fdb83ffbSStephen Hemminger #include "lpm_log.h"
22fdb83ffbSStephen Hemminger 
23fdb83ffbSStephen Hemminger RTE_LOG_REGISTER_DEFAULT(lpm_logtype, INFO);
2499a2dd95SBruce Richardson 
2599a2dd95SBruce Richardson TAILQ_HEAD(rte_lpm_list, rte_tailq_entry);
2699a2dd95SBruce Richardson 
2799a2dd95SBruce Richardson static struct rte_tailq_elem rte_lpm_tailq = {
2899a2dd95SBruce Richardson 	.name = "RTE_LPM",
2999a2dd95SBruce Richardson };
3099a2dd95SBruce Richardson EAL_REGISTER_TAILQ(rte_lpm_tailq)
3199a2dd95SBruce Richardson 
3299a2dd95SBruce Richardson #define MAX_DEPTH_TBL24 24
3399a2dd95SBruce Richardson 
3499a2dd95SBruce Richardson enum valid_flag {
3599a2dd95SBruce Richardson 	INVALID = 0,
3699a2dd95SBruce Richardson 	VALID
3799a2dd95SBruce Richardson };
3899a2dd95SBruce Richardson 
3999a2dd95SBruce Richardson /** @internal Rule structure. */
4099a2dd95SBruce Richardson struct rte_lpm_rule {
4199a2dd95SBruce Richardson 	uint32_t ip; /**< Rule IP address. */
4299a2dd95SBruce Richardson 	uint32_t next_hop; /**< Rule next hop. */
4399a2dd95SBruce Richardson };
4499a2dd95SBruce Richardson 
4599a2dd95SBruce Richardson /** @internal Contains metadata about the rules table. */
4699a2dd95SBruce Richardson struct rte_lpm_rule_info {
4799a2dd95SBruce Richardson 	uint32_t used_rules; /**< Used rules so far. */
4899a2dd95SBruce Richardson 	uint32_t first_rule; /**< Indexes the first rule of a given depth. */
4999a2dd95SBruce Richardson };
5099a2dd95SBruce Richardson 
5199a2dd95SBruce Richardson /** @internal LPM structure. */
5299a2dd95SBruce Richardson struct __rte_lpm {
5399a2dd95SBruce Richardson 	/* Exposed LPM data. */
5499a2dd95SBruce Richardson 	struct rte_lpm lpm;
5599a2dd95SBruce Richardson 
5699a2dd95SBruce Richardson 	/* LPM metadata. */
5799a2dd95SBruce Richardson 	char name[RTE_LPM_NAMESIZE];        /**< Name of the lpm. */
5899a2dd95SBruce Richardson 	uint32_t max_rules; /**< Max. balanced rules per lpm. */
5999a2dd95SBruce Richardson 	uint32_t number_tbl8s; /**< Number of tbl8s. */
6099a2dd95SBruce Richardson 	/**< Rule info table. */
6199a2dd95SBruce Richardson 	struct rte_lpm_rule_info rule_info[RTE_LPM_MAX_DEPTH];
6299a2dd95SBruce Richardson 	struct rte_lpm_rule *rules_tbl; /**< LPM rules. */
6399a2dd95SBruce Richardson 
6499a2dd95SBruce Richardson 	/* RCU config. */
6599a2dd95SBruce Richardson 	struct rte_rcu_qsbr *v;		/* RCU QSBR variable. */
6699a2dd95SBruce Richardson 	enum rte_lpm_qsbr_mode rcu_mode;/* Blocking, defer queue. */
6799a2dd95SBruce Richardson 	struct rte_rcu_qsbr_dq *dq;	/* RCU QSBR defer queue. */
6899a2dd95SBruce Richardson };
6999a2dd95SBruce Richardson 
7099a2dd95SBruce Richardson /* Macro to enable/disable run-time checks. */
7199a2dd95SBruce Richardson #if defined(RTE_LIBRTE_LPM_DEBUG)
7299a2dd95SBruce Richardson #include <rte_debug.h>
7399a2dd95SBruce Richardson #define VERIFY_DEPTH(depth) do {                                \
7499a2dd95SBruce Richardson 	if ((depth == 0) || (depth > RTE_LPM_MAX_DEPTH))        \
7599a2dd95SBruce Richardson 		rte_panic("LPM: Invalid depth (%u) at line %d", \
7699a2dd95SBruce Richardson 				(unsigned)(depth), __LINE__);   \
7799a2dd95SBruce Richardson } while (0)
7899a2dd95SBruce Richardson #else
7999a2dd95SBruce Richardson #define VERIFY_DEPTH(depth)
8099a2dd95SBruce Richardson #endif
8199a2dd95SBruce Richardson 
8299a2dd95SBruce Richardson /*
8399a2dd95SBruce Richardson  * Converts a given depth value to its corresponding mask value.
8499a2dd95SBruce Richardson  *
8599a2dd95SBruce Richardson  * depth  (IN)		: range = 1 - 32
8699a2dd95SBruce Richardson  * mask   (OUT)		: 32bit mask
8799a2dd95SBruce Richardson  */
88*ff933786STyler Retzlaff static uint32_t __rte_pure
depth_to_mask(uint8_t depth)8999a2dd95SBruce Richardson depth_to_mask(uint8_t depth)
9099a2dd95SBruce Richardson {
9199a2dd95SBruce Richardson 	VERIFY_DEPTH(depth);
9299a2dd95SBruce Richardson 
9399a2dd95SBruce Richardson 	/* To calculate a mask start with a 1 on the left hand side and right
9499a2dd95SBruce Richardson 	 * shift while populating the left hand side with 1's
9599a2dd95SBruce Richardson 	 */
9699a2dd95SBruce Richardson 	return (int)0x80000000 >> (depth - 1);
9799a2dd95SBruce Richardson }
9899a2dd95SBruce Richardson 
9999a2dd95SBruce Richardson /*
10099a2dd95SBruce Richardson  * Converts given depth value to its corresponding range value.
10199a2dd95SBruce Richardson  */
102*ff933786STyler Retzlaff static uint32_t __rte_pure
depth_to_range(uint8_t depth)10399a2dd95SBruce Richardson depth_to_range(uint8_t depth)
10499a2dd95SBruce Richardson {
10599a2dd95SBruce Richardson 	VERIFY_DEPTH(depth);
10699a2dd95SBruce Richardson 
10799a2dd95SBruce Richardson 	/*
10899a2dd95SBruce Richardson 	 * Calculate tbl24 range. (Note: 2^depth = 1 << depth)
10999a2dd95SBruce Richardson 	 */
11099a2dd95SBruce Richardson 	if (depth <= MAX_DEPTH_TBL24)
11199a2dd95SBruce Richardson 		return 1 << (MAX_DEPTH_TBL24 - depth);
11299a2dd95SBruce Richardson 
11399a2dd95SBruce Richardson 	/* Else if depth is greater than 24 */
11499a2dd95SBruce Richardson 	return 1 << (RTE_LPM_MAX_DEPTH - depth);
11599a2dd95SBruce Richardson }
11699a2dd95SBruce Richardson 
11799a2dd95SBruce Richardson /*
11899a2dd95SBruce Richardson  * Find an existing lpm table and return a pointer to it.
11999a2dd95SBruce Richardson  */
12099a2dd95SBruce Richardson struct rte_lpm *
rte_lpm_find_existing(const char * name)12199a2dd95SBruce Richardson rte_lpm_find_existing(const char *name)
12299a2dd95SBruce Richardson {
12399a2dd95SBruce Richardson 	struct __rte_lpm *i_lpm = NULL;
12499a2dd95SBruce Richardson 	struct rte_tailq_entry *te;
12599a2dd95SBruce Richardson 	struct rte_lpm_list *lpm_list;
12699a2dd95SBruce Richardson 
12799a2dd95SBruce Richardson 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
12899a2dd95SBruce Richardson 
12999a2dd95SBruce Richardson 	rte_mcfg_tailq_read_lock();
13099a2dd95SBruce Richardson 	TAILQ_FOREACH(te, lpm_list, next) {
13199a2dd95SBruce Richardson 		i_lpm = te->data;
13299a2dd95SBruce Richardson 		if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
13399a2dd95SBruce Richardson 			break;
13499a2dd95SBruce Richardson 	}
13599a2dd95SBruce Richardson 	rte_mcfg_tailq_read_unlock();
13699a2dd95SBruce Richardson 
13799a2dd95SBruce Richardson 	if (te == NULL) {
13899a2dd95SBruce Richardson 		rte_errno = ENOENT;
13999a2dd95SBruce Richardson 		return NULL;
14099a2dd95SBruce Richardson 	}
14199a2dd95SBruce Richardson 
14299a2dd95SBruce Richardson 	return &i_lpm->lpm;
14399a2dd95SBruce Richardson }
14499a2dd95SBruce Richardson 
14599a2dd95SBruce Richardson /*
14699a2dd95SBruce Richardson  * Allocates memory for LPM object
14799a2dd95SBruce Richardson  */
14899a2dd95SBruce Richardson struct rte_lpm *
rte_lpm_create(const char * name,int socket_id,const struct rte_lpm_config * config)14999a2dd95SBruce Richardson rte_lpm_create(const char *name, int socket_id,
15099a2dd95SBruce Richardson 		const struct rte_lpm_config *config)
15199a2dd95SBruce Richardson {
15299a2dd95SBruce Richardson 	char mem_name[RTE_LPM_NAMESIZE];
15399a2dd95SBruce Richardson 	struct __rte_lpm *i_lpm;
15499a2dd95SBruce Richardson 	struct rte_lpm *lpm = NULL;
15599a2dd95SBruce Richardson 	struct rte_tailq_entry *te;
15699a2dd95SBruce Richardson 	uint32_t mem_size, rules_size, tbl8s_size;
15799a2dd95SBruce Richardson 	struct rte_lpm_list *lpm_list;
15899a2dd95SBruce Richardson 
15999a2dd95SBruce Richardson 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
16099a2dd95SBruce Richardson 
16199a2dd95SBruce Richardson 	RTE_BUILD_BUG_ON(sizeof(struct rte_lpm_tbl_entry) != 4);
16299a2dd95SBruce Richardson 
16399a2dd95SBruce Richardson 	/* Check user arguments. */
16499a2dd95SBruce Richardson 	if ((name == NULL) || (socket_id < -1) || (config->max_rules == 0)
16599a2dd95SBruce Richardson 			|| config->number_tbl8s > RTE_LPM_MAX_TBL8_NUM_GROUPS) {
16699a2dd95SBruce Richardson 		rte_errno = EINVAL;
16799a2dd95SBruce Richardson 		return NULL;
16899a2dd95SBruce Richardson 	}
16999a2dd95SBruce Richardson 
17099a2dd95SBruce Richardson 	snprintf(mem_name, sizeof(mem_name), "LPM_%s", name);
17199a2dd95SBruce Richardson 
17299a2dd95SBruce Richardson 	rte_mcfg_tailq_write_lock();
17399a2dd95SBruce Richardson 
17499a2dd95SBruce Richardson 	/* guarantee there's no existing */
17599a2dd95SBruce Richardson 	TAILQ_FOREACH(te, lpm_list, next) {
17699a2dd95SBruce Richardson 		i_lpm = te->data;
17799a2dd95SBruce Richardson 		if (strncmp(name, i_lpm->name, RTE_LPM_NAMESIZE) == 0)
17899a2dd95SBruce Richardson 			break;
17999a2dd95SBruce Richardson 	}
18099a2dd95SBruce Richardson 
18199a2dd95SBruce Richardson 	if (te != NULL) {
18299a2dd95SBruce Richardson 		rte_errno = EEXIST;
18399a2dd95SBruce Richardson 		goto exit;
18499a2dd95SBruce Richardson 	}
18599a2dd95SBruce Richardson 
18699a2dd95SBruce Richardson 	/* Determine the amount of memory to allocate. */
18799a2dd95SBruce Richardson 	mem_size = sizeof(*i_lpm);
18899a2dd95SBruce Richardson 	rules_size = sizeof(struct rte_lpm_rule) * config->max_rules;
18999a2dd95SBruce Richardson 	tbl8s_size = sizeof(struct rte_lpm_tbl_entry) *
19099a2dd95SBruce Richardson 			RTE_LPM_TBL8_GROUP_NUM_ENTRIES * config->number_tbl8s;
19199a2dd95SBruce Richardson 
19299a2dd95SBruce Richardson 	/* allocate tailq entry */
19399a2dd95SBruce Richardson 	te = rte_zmalloc("LPM_TAILQ_ENTRY", sizeof(*te), 0);
19499a2dd95SBruce Richardson 	if (te == NULL) {
195ae67895bSDavid Marchand 		LPM_LOG(ERR, "Failed to allocate tailq entry");
19699a2dd95SBruce Richardson 		rte_errno = ENOMEM;
19799a2dd95SBruce Richardson 		goto exit;
19899a2dd95SBruce Richardson 	}
19999a2dd95SBruce Richardson 
20099a2dd95SBruce Richardson 	/* Allocate memory to store the LPM data structures. */
20199a2dd95SBruce Richardson 	i_lpm = rte_zmalloc_socket(mem_name, mem_size,
20299a2dd95SBruce Richardson 			RTE_CACHE_LINE_SIZE, socket_id);
20399a2dd95SBruce Richardson 	if (i_lpm == NULL) {
204ae67895bSDavid Marchand 		LPM_LOG(ERR, "LPM memory allocation failed");
20599a2dd95SBruce Richardson 		rte_free(te);
20699a2dd95SBruce Richardson 		rte_errno = ENOMEM;
20799a2dd95SBruce Richardson 		goto exit;
20899a2dd95SBruce Richardson 	}
20999a2dd95SBruce Richardson 
21099a2dd95SBruce Richardson 	i_lpm->rules_tbl = rte_zmalloc_socket(NULL,
21199a2dd95SBruce Richardson 			(size_t)rules_size, RTE_CACHE_LINE_SIZE, socket_id);
21299a2dd95SBruce Richardson 
21399a2dd95SBruce Richardson 	if (i_lpm->rules_tbl == NULL) {
214ae67895bSDavid Marchand 		LPM_LOG(ERR, "LPM rules_tbl memory allocation failed");
21599a2dd95SBruce Richardson 		rte_free(i_lpm);
21699a2dd95SBruce Richardson 		i_lpm = NULL;
21799a2dd95SBruce Richardson 		rte_free(te);
21899a2dd95SBruce Richardson 		rte_errno = ENOMEM;
21999a2dd95SBruce Richardson 		goto exit;
22099a2dd95SBruce Richardson 	}
22199a2dd95SBruce Richardson 
22299a2dd95SBruce Richardson 	i_lpm->lpm.tbl8 = rte_zmalloc_socket(NULL,
22399a2dd95SBruce Richardson 			(size_t)tbl8s_size, RTE_CACHE_LINE_SIZE, socket_id);
22499a2dd95SBruce Richardson 
22599a2dd95SBruce Richardson 	if (i_lpm->lpm.tbl8 == NULL) {
226ae67895bSDavid Marchand 		LPM_LOG(ERR, "LPM tbl8 memory allocation failed");
22799a2dd95SBruce Richardson 		rte_free(i_lpm->rules_tbl);
22899a2dd95SBruce Richardson 		rte_free(i_lpm);
22999a2dd95SBruce Richardson 		i_lpm = NULL;
23099a2dd95SBruce Richardson 		rte_free(te);
23199a2dd95SBruce Richardson 		rte_errno = ENOMEM;
23299a2dd95SBruce Richardson 		goto exit;
23399a2dd95SBruce Richardson 	}
23499a2dd95SBruce Richardson 
23599a2dd95SBruce Richardson 	/* Save user arguments. */
23699a2dd95SBruce Richardson 	i_lpm->max_rules = config->max_rules;
23799a2dd95SBruce Richardson 	i_lpm->number_tbl8s = config->number_tbl8s;
23899a2dd95SBruce Richardson 	strlcpy(i_lpm->name, name, sizeof(i_lpm->name));
23999a2dd95SBruce Richardson 
24099a2dd95SBruce Richardson 	te->data = i_lpm;
24199a2dd95SBruce Richardson 	lpm = &i_lpm->lpm;
24299a2dd95SBruce Richardson 
24399a2dd95SBruce Richardson 	TAILQ_INSERT_TAIL(lpm_list, te, next);
24499a2dd95SBruce Richardson 
24599a2dd95SBruce Richardson exit:
24699a2dd95SBruce Richardson 	rte_mcfg_tailq_write_unlock();
24799a2dd95SBruce Richardson 
24899a2dd95SBruce Richardson 	return lpm;
24999a2dd95SBruce Richardson }
25099a2dd95SBruce Richardson 
25199a2dd95SBruce Richardson /*
25299a2dd95SBruce Richardson  * Deallocates memory for given LPM table.
25399a2dd95SBruce Richardson  */
25499a2dd95SBruce Richardson void
rte_lpm_free(struct rte_lpm * lpm)25599a2dd95SBruce Richardson rte_lpm_free(struct rte_lpm *lpm)
25699a2dd95SBruce Richardson {
25799a2dd95SBruce Richardson 	struct rte_lpm_list *lpm_list;
25899a2dd95SBruce Richardson 	struct rte_tailq_entry *te;
25999a2dd95SBruce Richardson 	struct __rte_lpm *i_lpm;
26099a2dd95SBruce Richardson 
26199a2dd95SBruce Richardson 	/* Check user arguments. */
26299a2dd95SBruce Richardson 	if (lpm == NULL)
26399a2dd95SBruce Richardson 		return;
26499a2dd95SBruce Richardson 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
26599a2dd95SBruce Richardson 
26699a2dd95SBruce Richardson 	lpm_list = RTE_TAILQ_CAST(rte_lpm_tailq.head, rte_lpm_list);
26799a2dd95SBruce Richardson 
26899a2dd95SBruce Richardson 	rte_mcfg_tailq_write_lock();
26999a2dd95SBruce Richardson 
27099a2dd95SBruce Richardson 	/* find our tailq entry */
27199a2dd95SBruce Richardson 	TAILQ_FOREACH(te, lpm_list, next) {
27299a2dd95SBruce Richardson 		if (te->data == (void *)i_lpm)
27399a2dd95SBruce Richardson 			break;
27499a2dd95SBruce Richardson 	}
27599a2dd95SBruce Richardson 	if (te != NULL)
27699a2dd95SBruce Richardson 		TAILQ_REMOVE(lpm_list, te, next);
27799a2dd95SBruce Richardson 
27899a2dd95SBruce Richardson 	rte_mcfg_tailq_write_unlock();
27999a2dd95SBruce Richardson 
28099a2dd95SBruce Richardson 	if (i_lpm->dq != NULL)
28199a2dd95SBruce Richardson 		rte_rcu_qsbr_dq_delete(i_lpm->dq);
28299a2dd95SBruce Richardson 	rte_free(i_lpm->lpm.tbl8);
28399a2dd95SBruce Richardson 	rte_free(i_lpm->rules_tbl);
28499a2dd95SBruce Richardson 	rte_free(i_lpm);
28599a2dd95SBruce Richardson 	rte_free(te);
28699a2dd95SBruce Richardson }
28799a2dd95SBruce Richardson 
28899a2dd95SBruce Richardson static void
__lpm_rcu_qsbr_free_resource(void * p,void * data,unsigned int n)28999a2dd95SBruce Richardson __lpm_rcu_qsbr_free_resource(void *p, void *data, unsigned int n)
29099a2dd95SBruce Richardson {
29199a2dd95SBruce Richardson 	struct rte_lpm_tbl_entry *tbl8 = ((struct __rte_lpm *)p)->lpm.tbl8;
29299a2dd95SBruce Richardson 	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
29399a2dd95SBruce Richardson 	uint32_t tbl8_group_index = *(uint32_t *)data;
29499a2dd95SBruce Richardson 
29599a2dd95SBruce Richardson 	RTE_SET_USED(n);
29699a2dd95SBruce Richardson 	/* Set tbl8 group invalid */
29799a2dd95SBruce Richardson 	__atomic_store(&tbl8[tbl8_group_index], &zero_tbl8_entry,
29899a2dd95SBruce Richardson 		__ATOMIC_RELAXED);
29999a2dd95SBruce Richardson }
30099a2dd95SBruce Richardson 
30199a2dd95SBruce Richardson /* Associate QSBR variable with an LPM object.
30299a2dd95SBruce Richardson  */
30399a2dd95SBruce Richardson int
rte_lpm_rcu_qsbr_add(struct rte_lpm * lpm,struct rte_lpm_rcu_config * cfg)30499a2dd95SBruce Richardson rte_lpm_rcu_qsbr_add(struct rte_lpm *lpm, struct rte_lpm_rcu_config *cfg)
30599a2dd95SBruce Richardson {
30699a2dd95SBruce Richardson 	struct rte_rcu_qsbr_dq_parameters params = {0};
30799a2dd95SBruce Richardson 	char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
30899a2dd95SBruce Richardson 	struct __rte_lpm *i_lpm;
30999a2dd95SBruce Richardson 
31099a2dd95SBruce Richardson 	if (lpm == NULL || cfg == NULL) {
31199a2dd95SBruce Richardson 		rte_errno = EINVAL;
31299a2dd95SBruce Richardson 		return 1;
31399a2dd95SBruce Richardson 	}
31499a2dd95SBruce Richardson 
31599a2dd95SBruce Richardson 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
31699a2dd95SBruce Richardson 	if (i_lpm->v != NULL) {
31799a2dd95SBruce Richardson 		rte_errno = EEXIST;
31899a2dd95SBruce Richardson 		return 1;
31999a2dd95SBruce Richardson 	}
32099a2dd95SBruce Richardson 
32199a2dd95SBruce Richardson 	if (cfg->mode == RTE_LPM_QSBR_MODE_SYNC) {
32299a2dd95SBruce Richardson 		/* No other things to do. */
32399a2dd95SBruce Richardson 	} else if (cfg->mode == RTE_LPM_QSBR_MODE_DQ) {
32499a2dd95SBruce Richardson 		/* Init QSBR defer queue. */
32599a2dd95SBruce Richardson 		snprintf(rcu_dq_name, sizeof(rcu_dq_name),
32699a2dd95SBruce Richardson 				"LPM_RCU_%s", i_lpm->name);
32799a2dd95SBruce Richardson 		params.name = rcu_dq_name;
32899a2dd95SBruce Richardson 		params.size = cfg->dq_size;
32999a2dd95SBruce Richardson 		if (params.size == 0)
33099a2dd95SBruce Richardson 			params.size = i_lpm->number_tbl8s;
33199a2dd95SBruce Richardson 		params.trigger_reclaim_limit = cfg->reclaim_thd;
33299a2dd95SBruce Richardson 		params.max_reclaim_size = cfg->reclaim_max;
33399a2dd95SBruce Richardson 		if (params.max_reclaim_size == 0)
33499a2dd95SBruce Richardson 			params.max_reclaim_size = RTE_LPM_RCU_DQ_RECLAIM_MAX;
33599a2dd95SBruce Richardson 		params.esize = sizeof(uint32_t);	/* tbl8 group index */
33699a2dd95SBruce Richardson 		params.free_fn = __lpm_rcu_qsbr_free_resource;
33799a2dd95SBruce Richardson 		params.p = i_lpm;
33899a2dd95SBruce Richardson 		params.v = cfg->v;
33999a2dd95SBruce Richardson 		i_lpm->dq = rte_rcu_qsbr_dq_create(&params);
34099a2dd95SBruce Richardson 		if (i_lpm->dq == NULL) {
341ae67895bSDavid Marchand 			LPM_LOG(ERR, "LPM defer queue creation failed");
34299a2dd95SBruce Richardson 			return 1;
34399a2dd95SBruce Richardson 		}
34499a2dd95SBruce Richardson 	} else {
34599a2dd95SBruce Richardson 		rte_errno = EINVAL;
34699a2dd95SBruce Richardson 		return 1;
34799a2dd95SBruce Richardson 	}
34899a2dd95SBruce Richardson 	i_lpm->rcu_mode = cfg->mode;
34999a2dd95SBruce Richardson 	i_lpm->v = cfg->v;
35099a2dd95SBruce Richardson 
35199a2dd95SBruce Richardson 	return 0;
35299a2dd95SBruce Richardson }
35399a2dd95SBruce Richardson 
35499a2dd95SBruce Richardson /*
35599a2dd95SBruce Richardson  * Adds a rule to the rule table.
35699a2dd95SBruce Richardson  *
35799a2dd95SBruce Richardson  * NOTE: The rule table is split into 32 groups. Each group contains rules that
35899a2dd95SBruce Richardson  * apply to a specific prefix depth (i.e. group 1 contains rules that apply to
35999a2dd95SBruce Richardson  * prefixes with a depth of 1 etc.). In the following code (depth - 1) is used
36099a2dd95SBruce Richardson  * to refer to depth 1 because even though the depth range is 1 - 32, depths
36199a2dd95SBruce Richardson  * are stored in the rule table from 0 - 31.
36299a2dd95SBruce Richardson  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
36399a2dd95SBruce Richardson  */
36499a2dd95SBruce Richardson static int32_t
rule_add(struct __rte_lpm * i_lpm,uint32_t ip_masked,uint8_t depth,uint32_t next_hop)36599a2dd95SBruce Richardson rule_add(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
36699a2dd95SBruce Richardson 	uint32_t next_hop)
36799a2dd95SBruce Richardson {
36899a2dd95SBruce Richardson 	uint32_t rule_gindex, rule_index, last_rule;
36999a2dd95SBruce Richardson 	int i;
37099a2dd95SBruce Richardson 
37199a2dd95SBruce Richardson 	VERIFY_DEPTH(depth);
37299a2dd95SBruce Richardson 
37399a2dd95SBruce Richardson 	/* Scan through rule group to see if rule already exists. */
37499a2dd95SBruce Richardson 	if (i_lpm->rule_info[depth - 1].used_rules > 0) {
37599a2dd95SBruce Richardson 
37699a2dd95SBruce Richardson 		/* rule_gindex stands for rule group index. */
37799a2dd95SBruce Richardson 		rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
37899a2dd95SBruce Richardson 		/* Initialise rule_index to point to start of rule group. */
37999a2dd95SBruce Richardson 		rule_index = rule_gindex;
38099a2dd95SBruce Richardson 		/* Last rule = Last used rule in this rule group. */
38199a2dd95SBruce Richardson 		last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
38299a2dd95SBruce Richardson 
38399a2dd95SBruce Richardson 		for (; rule_index < last_rule; rule_index++) {
38499a2dd95SBruce Richardson 
38599a2dd95SBruce Richardson 			/* If rule already exists update next hop and return. */
38699a2dd95SBruce Richardson 			if (i_lpm->rules_tbl[rule_index].ip == ip_masked) {
38799a2dd95SBruce Richardson 
38899a2dd95SBruce Richardson 				if (i_lpm->rules_tbl[rule_index].next_hop
38999a2dd95SBruce Richardson 						== next_hop)
39099a2dd95SBruce Richardson 					return -EEXIST;
39199a2dd95SBruce Richardson 				i_lpm->rules_tbl[rule_index].next_hop = next_hop;
39299a2dd95SBruce Richardson 
39399a2dd95SBruce Richardson 				return rule_index;
39499a2dd95SBruce Richardson 			}
39599a2dd95SBruce Richardson 		}
39699a2dd95SBruce Richardson 
39799a2dd95SBruce Richardson 		if (rule_index == i_lpm->max_rules)
39899a2dd95SBruce Richardson 			return -ENOSPC;
39999a2dd95SBruce Richardson 	} else {
40099a2dd95SBruce Richardson 		/* Calculate the position in which the rule will be stored. */
40199a2dd95SBruce Richardson 		rule_index = 0;
40299a2dd95SBruce Richardson 
40399a2dd95SBruce Richardson 		for (i = depth - 1; i > 0; i--) {
40499a2dd95SBruce Richardson 			if (i_lpm->rule_info[i - 1].used_rules > 0) {
40599a2dd95SBruce Richardson 				rule_index = i_lpm->rule_info[i - 1].first_rule
40699a2dd95SBruce Richardson 						+ i_lpm->rule_info[i - 1].used_rules;
40799a2dd95SBruce Richardson 				break;
40899a2dd95SBruce Richardson 			}
40999a2dd95SBruce Richardson 		}
41099a2dd95SBruce Richardson 		if (rule_index == i_lpm->max_rules)
41199a2dd95SBruce Richardson 			return -ENOSPC;
41299a2dd95SBruce Richardson 
41399a2dd95SBruce Richardson 		i_lpm->rule_info[depth - 1].first_rule = rule_index;
41499a2dd95SBruce Richardson 	}
41599a2dd95SBruce Richardson 
41699a2dd95SBruce Richardson 	/* Make room for the new rule in the array. */
41799a2dd95SBruce Richardson 	for (i = RTE_LPM_MAX_DEPTH; i > depth; i--) {
41899a2dd95SBruce Richardson 		if (i_lpm->rule_info[i - 1].first_rule
41999a2dd95SBruce Richardson 				+ i_lpm->rule_info[i - 1].used_rules == i_lpm->max_rules)
42099a2dd95SBruce Richardson 			return -ENOSPC;
42199a2dd95SBruce Richardson 
42299a2dd95SBruce Richardson 		if (i_lpm->rule_info[i - 1].used_rules > 0) {
42399a2dd95SBruce Richardson 			i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule
42499a2dd95SBruce Richardson 				+ i_lpm->rule_info[i - 1].used_rules]
42599a2dd95SBruce Richardson 					= i_lpm->rules_tbl[i_lpm->rule_info[i - 1].first_rule];
42699a2dd95SBruce Richardson 			i_lpm->rule_info[i - 1].first_rule++;
42799a2dd95SBruce Richardson 		}
42899a2dd95SBruce Richardson 	}
42999a2dd95SBruce Richardson 
43099a2dd95SBruce Richardson 	/* Add the new rule. */
43199a2dd95SBruce Richardson 	i_lpm->rules_tbl[rule_index].ip = ip_masked;
43299a2dd95SBruce Richardson 	i_lpm->rules_tbl[rule_index].next_hop = next_hop;
43399a2dd95SBruce Richardson 
43499a2dd95SBruce Richardson 	/* Increment the used rules counter for this rule group. */
43599a2dd95SBruce Richardson 	i_lpm->rule_info[depth - 1].used_rules++;
43699a2dd95SBruce Richardson 
43799a2dd95SBruce Richardson 	return rule_index;
43899a2dd95SBruce Richardson }
43999a2dd95SBruce Richardson 
44099a2dd95SBruce Richardson /*
44199a2dd95SBruce Richardson  * Delete a rule from the rule table.
44299a2dd95SBruce Richardson  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
44399a2dd95SBruce Richardson  */
44499a2dd95SBruce Richardson static void
rule_delete(struct __rte_lpm * i_lpm,int32_t rule_index,uint8_t depth)44599a2dd95SBruce Richardson rule_delete(struct __rte_lpm *i_lpm, int32_t rule_index, uint8_t depth)
44699a2dd95SBruce Richardson {
44799a2dd95SBruce Richardson 	int i;
44899a2dd95SBruce Richardson 
44999a2dd95SBruce Richardson 	VERIFY_DEPTH(depth);
45099a2dd95SBruce Richardson 
45199a2dd95SBruce Richardson 	i_lpm->rules_tbl[rule_index] =
45299a2dd95SBruce Richardson 			i_lpm->rules_tbl[i_lpm->rule_info[depth - 1].first_rule
45399a2dd95SBruce Richardson 			+ i_lpm->rule_info[depth - 1].used_rules - 1];
45499a2dd95SBruce Richardson 
45599a2dd95SBruce Richardson 	for (i = depth; i < RTE_LPM_MAX_DEPTH; i++) {
45699a2dd95SBruce Richardson 		if (i_lpm->rule_info[i].used_rules > 0) {
45799a2dd95SBruce Richardson 			i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule - 1] =
45899a2dd95SBruce Richardson 					i_lpm->rules_tbl[i_lpm->rule_info[i].first_rule
45999a2dd95SBruce Richardson 						+ i_lpm->rule_info[i].used_rules - 1];
46099a2dd95SBruce Richardson 			i_lpm->rule_info[i].first_rule--;
46199a2dd95SBruce Richardson 		}
46299a2dd95SBruce Richardson 	}
46399a2dd95SBruce Richardson 
46499a2dd95SBruce Richardson 	i_lpm->rule_info[depth - 1].used_rules--;
46599a2dd95SBruce Richardson }
46699a2dd95SBruce Richardson 
46799a2dd95SBruce Richardson /*
46899a2dd95SBruce Richardson  * Finds a rule in rule table.
46999a2dd95SBruce Richardson  * NOTE: Valid range for depth parameter is 1 .. 32 inclusive.
47099a2dd95SBruce Richardson  */
47199a2dd95SBruce Richardson static int32_t
rule_find(struct __rte_lpm * i_lpm,uint32_t ip_masked,uint8_t depth)47299a2dd95SBruce Richardson rule_find(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth)
47399a2dd95SBruce Richardson {
47499a2dd95SBruce Richardson 	uint32_t rule_gindex, last_rule, rule_index;
47599a2dd95SBruce Richardson 
47699a2dd95SBruce Richardson 	VERIFY_DEPTH(depth);
47799a2dd95SBruce Richardson 
47899a2dd95SBruce Richardson 	rule_gindex = i_lpm->rule_info[depth - 1].first_rule;
47999a2dd95SBruce Richardson 	last_rule = rule_gindex + i_lpm->rule_info[depth - 1].used_rules;
48099a2dd95SBruce Richardson 
48199a2dd95SBruce Richardson 	/* Scan used rules at given depth to find rule. */
48299a2dd95SBruce Richardson 	for (rule_index = rule_gindex; rule_index < last_rule; rule_index++) {
48399a2dd95SBruce Richardson 		/* If rule is found return the rule index. */
48499a2dd95SBruce Richardson 		if (i_lpm->rules_tbl[rule_index].ip == ip_masked)
48599a2dd95SBruce Richardson 			return rule_index;
48699a2dd95SBruce Richardson 	}
48799a2dd95SBruce Richardson 
48899a2dd95SBruce Richardson 	/* If rule is not found return -EINVAL. */
48999a2dd95SBruce Richardson 	return -EINVAL;
49099a2dd95SBruce Richardson }
49199a2dd95SBruce Richardson 
49299a2dd95SBruce Richardson /*
49399a2dd95SBruce Richardson  * Find, clean and allocate a tbl8.
49499a2dd95SBruce Richardson  */
49599a2dd95SBruce Richardson static int32_t
_tbl8_alloc(struct __rte_lpm * i_lpm)49699a2dd95SBruce Richardson _tbl8_alloc(struct __rte_lpm *i_lpm)
49799a2dd95SBruce Richardson {
49899a2dd95SBruce Richardson 	uint32_t group_idx; /* tbl8 group index. */
49999a2dd95SBruce Richardson 	struct rte_lpm_tbl_entry *tbl8_entry;
50099a2dd95SBruce Richardson 
50199a2dd95SBruce Richardson 	/* Scan through tbl8 to find a free (i.e. INVALID) tbl8 group. */
50299a2dd95SBruce Richardson 	for (group_idx = 0; group_idx < i_lpm->number_tbl8s; group_idx++) {
50399a2dd95SBruce Richardson 		tbl8_entry = &i_lpm->lpm.tbl8[group_idx *
50499a2dd95SBruce Richardson 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES];
50599a2dd95SBruce Richardson 		/* If a free tbl8 group is found clean it and set as VALID. */
50699a2dd95SBruce Richardson 		if (!tbl8_entry->valid_group) {
50799a2dd95SBruce Richardson 			struct rte_lpm_tbl_entry new_tbl8_entry = {
50899a2dd95SBruce Richardson 				.next_hop = 0,
50999a2dd95SBruce Richardson 				.valid = INVALID,
51099a2dd95SBruce Richardson 				.depth = 0,
51199a2dd95SBruce Richardson 				.valid_group = VALID,
51299a2dd95SBruce Richardson 			};
51399a2dd95SBruce Richardson 
51499a2dd95SBruce Richardson 			memset(&tbl8_entry[0], 0,
51599a2dd95SBruce Richardson 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES *
51699a2dd95SBruce Richardson 					sizeof(tbl8_entry[0]));
51799a2dd95SBruce Richardson 
51899a2dd95SBruce Richardson 			__atomic_store(tbl8_entry, &new_tbl8_entry,
51999a2dd95SBruce Richardson 					__ATOMIC_RELAXED);
52099a2dd95SBruce Richardson 
52199a2dd95SBruce Richardson 			/* Return group index for allocated tbl8 group. */
52299a2dd95SBruce Richardson 			return group_idx;
52399a2dd95SBruce Richardson 		}
52499a2dd95SBruce Richardson 	}
52599a2dd95SBruce Richardson 
52699a2dd95SBruce Richardson 	/* If there are no tbl8 groups free then return error. */
52799a2dd95SBruce Richardson 	return -ENOSPC;
52899a2dd95SBruce Richardson }
52999a2dd95SBruce Richardson 
53099a2dd95SBruce Richardson static int32_t
tbl8_alloc(struct __rte_lpm * i_lpm)53199a2dd95SBruce Richardson tbl8_alloc(struct __rte_lpm *i_lpm)
53299a2dd95SBruce Richardson {
53399a2dd95SBruce Richardson 	int32_t group_idx; /* tbl8 group index. */
53499a2dd95SBruce Richardson 
53599a2dd95SBruce Richardson 	group_idx = _tbl8_alloc(i_lpm);
53699a2dd95SBruce Richardson 	if (group_idx == -ENOSPC && i_lpm->dq != NULL) {
53799a2dd95SBruce Richardson 		/* If there are no tbl8 groups try to reclaim one. */
53899a2dd95SBruce Richardson 		if (rte_rcu_qsbr_dq_reclaim(i_lpm->dq, 1,
53999a2dd95SBruce Richardson 				NULL, NULL, NULL) == 0)
54099a2dd95SBruce Richardson 			group_idx = _tbl8_alloc(i_lpm);
54199a2dd95SBruce Richardson 	}
54299a2dd95SBruce Richardson 
54399a2dd95SBruce Richardson 	return group_idx;
54499a2dd95SBruce Richardson }
54599a2dd95SBruce Richardson 
54699a2dd95SBruce Richardson static int32_t
tbl8_free(struct __rte_lpm * i_lpm,uint32_t tbl8_group_start)54799a2dd95SBruce Richardson tbl8_free(struct __rte_lpm *i_lpm, uint32_t tbl8_group_start)
54899a2dd95SBruce Richardson {
54999a2dd95SBruce Richardson 	struct rte_lpm_tbl_entry zero_tbl8_entry = {0};
55099a2dd95SBruce Richardson 	int status;
55199a2dd95SBruce Richardson 
55299a2dd95SBruce Richardson 	if (i_lpm->v == NULL) {
55399a2dd95SBruce Richardson 		/* Set tbl8 group invalid*/
55499a2dd95SBruce Richardson 		__atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
55599a2dd95SBruce Richardson 				__ATOMIC_RELAXED);
55699a2dd95SBruce Richardson 	} else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_SYNC) {
55799a2dd95SBruce Richardson 		/* Wait for quiescent state change. */
55899a2dd95SBruce Richardson 		rte_rcu_qsbr_synchronize(i_lpm->v,
55999a2dd95SBruce Richardson 			RTE_QSBR_THRID_INVALID);
56099a2dd95SBruce Richardson 		/* Set tbl8 group invalid*/
56199a2dd95SBruce Richardson 		__atomic_store(&i_lpm->lpm.tbl8[tbl8_group_start], &zero_tbl8_entry,
56299a2dd95SBruce Richardson 				__ATOMIC_RELAXED);
56399a2dd95SBruce Richardson 	} else if (i_lpm->rcu_mode == RTE_LPM_QSBR_MODE_DQ) {
56499a2dd95SBruce Richardson 		/* Push into QSBR defer queue. */
56599a2dd95SBruce Richardson 		status = rte_rcu_qsbr_dq_enqueue(i_lpm->dq,
56699a2dd95SBruce Richardson 				(void *)&tbl8_group_start);
56799a2dd95SBruce Richardson 		if (status == 1) {
568ae67895bSDavid Marchand 			LPM_LOG(ERR, "Failed to push QSBR FIFO");
56999a2dd95SBruce Richardson 			return -rte_errno;
57099a2dd95SBruce Richardson 		}
57199a2dd95SBruce Richardson 	}
57299a2dd95SBruce Richardson 
57399a2dd95SBruce Richardson 	return 0;
57499a2dd95SBruce Richardson }
57599a2dd95SBruce Richardson 
57699a2dd95SBruce Richardson static __rte_noinline int32_t
add_depth_small(struct __rte_lpm * i_lpm,uint32_t ip,uint8_t depth,uint32_t next_hop)57799a2dd95SBruce Richardson add_depth_small(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
57899a2dd95SBruce Richardson 		uint32_t next_hop)
57999a2dd95SBruce Richardson {
58099a2dd95SBruce Richardson #define group_idx next_hop
58199a2dd95SBruce Richardson 	uint32_t tbl24_index, tbl24_range, tbl8_index, tbl8_group_end, i, j;
58299a2dd95SBruce Richardson 
58399a2dd95SBruce Richardson 	/* Calculate the index into Table24. */
58499a2dd95SBruce Richardson 	tbl24_index = ip >> 8;
58599a2dd95SBruce Richardson 	tbl24_range = depth_to_range(depth);
58699a2dd95SBruce Richardson 
58799a2dd95SBruce Richardson 	for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
58899a2dd95SBruce Richardson 		/*
58999a2dd95SBruce Richardson 		 * For invalid OR valid and non-extended tbl 24 entries set
59099a2dd95SBruce Richardson 		 * entry.
59199a2dd95SBruce Richardson 		 */
59299a2dd95SBruce Richardson 		if (!i_lpm->lpm.tbl24[i].valid || (i_lpm->lpm.tbl24[i].valid_group == 0 &&
59399a2dd95SBruce Richardson 				i_lpm->lpm.tbl24[i].depth <= depth)) {
59499a2dd95SBruce Richardson 
59599a2dd95SBruce Richardson 			struct rte_lpm_tbl_entry new_tbl24_entry = {
59699a2dd95SBruce Richardson 				.next_hop = next_hop,
59799a2dd95SBruce Richardson 				.valid = VALID,
59899a2dd95SBruce Richardson 				.valid_group = 0,
59999a2dd95SBruce Richardson 				.depth = depth,
60099a2dd95SBruce Richardson 			};
60199a2dd95SBruce Richardson 
60299a2dd95SBruce Richardson 			/* Setting tbl24 entry in one go to avoid race
60399a2dd95SBruce Richardson 			 * conditions
60499a2dd95SBruce Richardson 			 */
60599a2dd95SBruce Richardson 			__atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
60699a2dd95SBruce Richardson 					__ATOMIC_RELEASE);
60799a2dd95SBruce Richardson 
60899a2dd95SBruce Richardson 			continue;
60999a2dd95SBruce Richardson 		}
61099a2dd95SBruce Richardson 
61199a2dd95SBruce Richardson 		if (i_lpm->lpm.tbl24[i].valid_group == 1) {
61299a2dd95SBruce Richardson 			/* If tbl24 entry is valid and extended calculate the
61399a2dd95SBruce Richardson 			 *  index into tbl8.
61499a2dd95SBruce Richardson 			 */
61599a2dd95SBruce Richardson 			tbl8_index = i_lpm->lpm.tbl24[i].group_idx *
61699a2dd95SBruce Richardson 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
61799a2dd95SBruce Richardson 			tbl8_group_end = tbl8_index +
61899a2dd95SBruce Richardson 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
61999a2dd95SBruce Richardson 
62099a2dd95SBruce Richardson 			for (j = tbl8_index; j < tbl8_group_end; j++) {
62199a2dd95SBruce Richardson 				if (!i_lpm->lpm.tbl8[j].valid ||
62299a2dd95SBruce Richardson 						i_lpm->lpm.tbl8[j].depth <= depth) {
62399a2dd95SBruce Richardson 					struct rte_lpm_tbl_entry
62499a2dd95SBruce Richardson 						new_tbl8_entry = {
62599a2dd95SBruce Richardson 						.valid = VALID,
62699a2dd95SBruce Richardson 						.valid_group = VALID,
62799a2dd95SBruce Richardson 						.depth = depth,
62899a2dd95SBruce Richardson 						.next_hop = next_hop,
62999a2dd95SBruce Richardson 					};
63099a2dd95SBruce Richardson 
63199a2dd95SBruce Richardson 					/*
63299a2dd95SBruce Richardson 					 * Setting tbl8 entry in one go to avoid
63399a2dd95SBruce Richardson 					 * race conditions
63499a2dd95SBruce Richardson 					 */
63599a2dd95SBruce Richardson 					__atomic_store(&i_lpm->lpm.tbl8[j],
63699a2dd95SBruce Richardson 						&new_tbl8_entry,
63799a2dd95SBruce Richardson 						__ATOMIC_RELAXED);
63899a2dd95SBruce Richardson 
63999a2dd95SBruce Richardson 					continue;
64099a2dd95SBruce Richardson 				}
64199a2dd95SBruce Richardson 			}
64299a2dd95SBruce Richardson 		}
64399a2dd95SBruce Richardson 	}
64499a2dd95SBruce Richardson #undef group_idx
64599a2dd95SBruce Richardson 	return 0;
64699a2dd95SBruce Richardson }
64799a2dd95SBruce Richardson 
64899a2dd95SBruce Richardson static __rte_noinline int32_t
add_depth_big(struct __rte_lpm * i_lpm,uint32_t ip_masked,uint8_t depth,uint32_t next_hop)64999a2dd95SBruce Richardson add_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked, uint8_t depth,
65099a2dd95SBruce Richardson 		uint32_t next_hop)
65199a2dd95SBruce Richardson {
65299a2dd95SBruce Richardson #define group_idx next_hop
65399a2dd95SBruce Richardson 	uint32_t tbl24_index;
65499a2dd95SBruce Richardson 	int32_t tbl8_group_index, tbl8_group_start, tbl8_group_end, tbl8_index,
65599a2dd95SBruce Richardson 		tbl8_range, i;
65699a2dd95SBruce Richardson 
65799a2dd95SBruce Richardson 	tbl24_index = (ip_masked >> 8);
65899a2dd95SBruce Richardson 	tbl8_range = depth_to_range(depth);
65999a2dd95SBruce Richardson 
66099a2dd95SBruce Richardson 	if (!i_lpm->lpm.tbl24[tbl24_index].valid) {
66199a2dd95SBruce Richardson 		/* Search for a free tbl8 group. */
66299a2dd95SBruce Richardson 		tbl8_group_index = tbl8_alloc(i_lpm);
66399a2dd95SBruce Richardson 
66499a2dd95SBruce Richardson 		/* Check tbl8 allocation was successful. */
66599a2dd95SBruce Richardson 		if (tbl8_group_index < 0) {
66699a2dd95SBruce Richardson 			return tbl8_group_index;
66799a2dd95SBruce Richardson 		}
66899a2dd95SBruce Richardson 
66999a2dd95SBruce Richardson 		/* Find index into tbl8 and range. */
67099a2dd95SBruce Richardson 		tbl8_index = (tbl8_group_index *
67199a2dd95SBruce Richardson 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES) +
67299a2dd95SBruce Richardson 				(ip_masked & 0xFF);
67399a2dd95SBruce Richardson 
67499a2dd95SBruce Richardson 		/* Set tbl8 entry. */
67599a2dd95SBruce Richardson 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
67699a2dd95SBruce Richardson 			struct rte_lpm_tbl_entry new_tbl8_entry = {
67799a2dd95SBruce Richardson 				.valid = VALID,
67899a2dd95SBruce Richardson 				.depth = depth,
67999a2dd95SBruce Richardson 				.valid_group = i_lpm->lpm.tbl8[i].valid_group,
68099a2dd95SBruce Richardson 				.next_hop = next_hop,
68199a2dd95SBruce Richardson 			};
68299a2dd95SBruce Richardson 			__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
68399a2dd95SBruce Richardson 					__ATOMIC_RELAXED);
68499a2dd95SBruce Richardson 		}
68599a2dd95SBruce Richardson 
68699a2dd95SBruce Richardson 		/*
68799a2dd95SBruce Richardson 		 * Update tbl24 entry to point to new tbl8 entry. Note: The
68899a2dd95SBruce Richardson 		 * ext_flag and tbl8_index need to be updated simultaneously,
68999a2dd95SBruce Richardson 		 * so assign whole structure in one go
69099a2dd95SBruce Richardson 		 */
69199a2dd95SBruce Richardson 
69299a2dd95SBruce Richardson 		struct rte_lpm_tbl_entry new_tbl24_entry = {
69399a2dd95SBruce Richardson 			.group_idx = tbl8_group_index,
69499a2dd95SBruce Richardson 			.valid = VALID,
69599a2dd95SBruce Richardson 			.valid_group = 1,
69699a2dd95SBruce Richardson 			.depth = 0,
69799a2dd95SBruce Richardson 		};
69899a2dd95SBruce Richardson 
69999a2dd95SBruce Richardson 		/* The tbl24 entry must be written only after the
70099a2dd95SBruce Richardson 		 * tbl8 entries are written.
70199a2dd95SBruce Richardson 		 */
70299a2dd95SBruce Richardson 		__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
70399a2dd95SBruce Richardson 				__ATOMIC_RELEASE);
70499a2dd95SBruce Richardson 
70599a2dd95SBruce Richardson 	} /* If valid entry but not extended calculate the index into Table8. */
70699a2dd95SBruce Richardson 	else if (i_lpm->lpm.tbl24[tbl24_index].valid_group == 0) {
70799a2dd95SBruce Richardson 		/* Search for free tbl8 group. */
70899a2dd95SBruce Richardson 		tbl8_group_index = tbl8_alloc(i_lpm);
70999a2dd95SBruce Richardson 
71099a2dd95SBruce Richardson 		if (tbl8_group_index < 0) {
71199a2dd95SBruce Richardson 			return tbl8_group_index;
71299a2dd95SBruce Richardson 		}
71399a2dd95SBruce Richardson 
71499a2dd95SBruce Richardson 		tbl8_group_start = tbl8_group_index *
71599a2dd95SBruce Richardson 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
71699a2dd95SBruce Richardson 		tbl8_group_end = tbl8_group_start +
71799a2dd95SBruce Richardson 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
71899a2dd95SBruce Richardson 
71999a2dd95SBruce Richardson 		/* Populate new tbl8 with tbl24 value. */
72099a2dd95SBruce Richardson 		for (i = tbl8_group_start; i < tbl8_group_end; i++) {
72199a2dd95SBruce Richardson 			struct rte_lpm_tbl_entry new_tbl8_entry = {
72299a2dd95SBruce Richardson 				.valid = VALID,
72399a2dd95SBruce Richardson 				.depth = i_lpm->lpm.tbl24[tbl24_index].depth,
72499a2dd95SBruce Richardson 				.valid_group = i_lpm->lpm.tbl8[i].valid_group,
72599a2dd95SBruce Richardson 				.next_hop = i_lpm->lpm.tbl24[tbl24_index].next_hop,
72699a2dd95SBruce Richardson 			};
72799a2dd95SBruce Richardson 			__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
72899a2dd95SBruce Richardson 					__ATOMIC_RELAXED);
72999a2dd95SBruce Richardson 		}
73099a2dd95SBruce Richardson 
73199a2dd95SBruce Richardson 		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
73299a2dd95SBruce Richardson 
73399a2dd95SBruce Richardson 		/* Insert new rule into the tbl8 entry. */
73499a2dd95SBruce Richardson 		for (i = tbl8_index; i < tbl8_index + tbl8_range; i++) {
73599a2dd95SBruce Richardson 			struct rte_lpm_tbl_entry new_tbl8_entry = {
73699a2dd95SBruce Richardson 				.valid = VALID,
73799a2dd95SBruce Richardson 				.depth = depth,
73899a2dd95SBruce Richardson 				.valid_group = i_lpm->lpm.tbl8[i].valid_group,
73999a2dd95SBruce Richardson 				.next_hop = next_hop,
74099a2dd95SBruce Richardson 			};
74199a2dd95SBruce Richardson 			__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
74299a2dd95SBruce Richardson 					__ATOMIC_RELAXED);
74399a2dd95SBruce Richardson 		}
74499a2dd95SBruce Richardson 
74599a2dd95SBruce Richardson 		/*
74699a2dd95SBruce Richardson 		 * Update tbl24 entry to point to new tbl8 entry. Note: The
74799a2dd95SBruce Richardson 		 * ext_flag and tbl8_index need to be updated simultaneously,
74899a2dd95SBruce Richardson 		 * so assign whole structure in one go.
74999a2dd95SBruce Richardson 		 */
75099a2dd95SBruce Richardson 
75199a2dd95SBruce Richardson 		struct rte_lpm_tbl_entry new_tbl24_entry = {
75299a2dd95SBruce Richardson 				.group_idx = tbl8_group_index,
75399a2dd95SBruce Richardson 				.valid = VALID,
75499a2dd95SBruce Richardson 				.valid_group = 1,
75599a2dd95SBruce Richardson 				.depth = 0,
75699a2dd95SBruce Richardson 		};
75799a2dd95SBruce Richardson 
75899a2dd95SBruce Richardson 		/* The tbl24 entry must be written only after the
75999a2dd95SBruce Richardson 		 * tbl8 entries are written.
76099a2dd95SBruce Richardson 		 */
76199a2dd95SBruce Richardson 		__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
76299a2dd95SBruce Richardson 				__ATOMIC_RELEASE);
76399a2dd95SBruce Richardson 
76499a2dd95SBruce Richardson 	} else { /*
76599a2dd95SBruce Richardson 		* If it is valid, extended entry calculate the index into tbl8.
76699a2dd95SBruce Richardson 		*/
76799a2dd95SBruce Richardson 		tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
76899a2dd95SBruce Richardson 		tbl8_group_start = tbl8_group_index *
76999a2dd95SBruce Richardson 				RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
77099a2dd95SBruce Richardson 		tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
77199a2dd95SBruce Richardson 
77299a2dd95SBruce Richardson 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
77399a2dd95SBruce Richardson 
77499a2dd95SBruce Richardson 			if (!i_lpm->lpm.tbl8[i].valid ||
77599a2dd95SBruce Richardson 					i_lpm->lpm.tbl8[i].depth <= depth) {
77699a2dd95SBruce Richardson 				struct rte_lpm_tbl_entry new_tbl8_entry = {
77799a2dd95SBruce Richardson 					.valid = VALID,
77899a2dd95SBruce Richardson 					.depth = depth,
77999a2dd95SBruce Richardson 					.next_hop = next_hop,
78099a2dd95SBruce Richardson 					.valid_group = i_lpm->lpm.tbl8[i].valid_group,
78199a2dd95SBruce Richardson 				};
78299a2dd95SBruce Richardson 
78399a2dd95SBruce Richardson 				/*
78499a2dd95SBruce Richardson 				 * Setting tbl8 entry in one go to avoid race
78599a2dd95SBruce Richardson 				 * condition
78699a2dd95SBruce Richardson 				 */
78799a2dd95SBruce Richardson 				__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
78899a2dd95SBruce Richardson 						__ATOMIC_RELAXED);
78999a2dd95SBruce Richardson 
79099a2dd95SBruce Richardson 				continue;
79199a2dd95SBruce Richardson 			}
79299a2dd95SBruce Richardson 		}
79399a2dd95SBruce Richardson 	}
79499a2dd95SBruce Richardson #undef group_idx
79599a2dd95SBruce Richardson 	return 0;
79699a2dd95SBruce Richardson }
79799a2dd95SBruce Richardson 
79899a2dd95SBruce Richardson /*
79999a2dd95SBruce Richardson  * Add a route
80099a2dd95SBruce Richardson  */
80199a2dd95SBruce Richardson int
rte_lpm_add(struct rte_lpm * lpm,uint32_t ip,uint8_t depth,uint32_t next_hop)80299a2dd95SBruce Richardson rte_lpm_add(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
80399a2dd95SBruce Richardson 		uint32_t next_hop)
80499a2dd95SBruce Richardson {
80599a2dd95SBruce Richardson 	int32_t rule_index, status = 0;
80699a2dd95SBruce Richardson 	struct __rte_lpm *i_lpm;
80799a2dd95SBruce Richardson 	uint32_t ip_masked;
80899a2dd95SBruce Richardson 
80999a2dd95SBruce Richardson 	/* Check user arguments. */
81099a2dd95SBruce Richardson 	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
81199a2dd95SBruce Richardson 		return -EINVAL;
81299a2dd95SBruce Richardson 
81399a2dd95SBruce Richardson 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
81499a2dd95SBruce Richardson 	ip_masked = ip & depth_to_mask(depth);
81599a2dd95SBruce Richardson 
81699a2dd95SBruce Richardson 	/* Add the rule to the rule table. */
81799a2dd95SBruce Richardson 	rule_index = rule_add(i_lpm, ip_masked, depth, next_hop);
81899a2dd95SBruce Richardson 
81999a2dd95SBruce Richardson 	/* Skip table entries update if The rule is the same as
82099a2dd95SBruce Richardson 	 * the rule in the rules table.
82199a2dd95SBruce Richardson 	 */
82299a2dd95SBruce Richardson 	if (rule_index == -EEXIST)
82399a2dd95SBruce Richardson 		return 0;
82499a2dd95SBruce Richardson 
82599a2dd95SBruce Richardson 	/* If the is no space available for new rule return error. */
82699a2dd95SBruce Richardson 	if (rule_index < 0) {
82799a2dd95SBruce Richardson 		return rule_index;
82899a2dd95SBruce Richardson 	}
82999a2dd95SBruce Richardson 
83099a2dd95SBruce Richardson 	if (depth <= MAX_DEPTH_TBL24) {
83199a2dd95SBruce Richardson 		status = add_depth_small(i_lpm, ip_masked, depth, next_hop);
83299a2dd95SBruce Richardson 	} else { /* If depth > RTE_LPM_MAX_DEPTH_TBL24 */
83399a2dd95SBruce Richardson 		status = add_depth_big(i_lpm, ip_masked, depth, next_hop);
83499a2dd95SBruce Richardson 
83599a2dd95SBruce Richardson 		/*
83699a2dd95SBruce Richardson 		 * If add fails due to exhaustion of tbl8 extensions delete
83799a2dd95SBruce Richardson 		 * rule that was added to rule table.
83899a2dd95SBruce Richardson 		 */
83999a2dd95SBruce Richardson 		if (status < 0) {
84099a2dd95SBruce Richardson 			rule_delete(i_lpm, rule_index, depth);
84199a2dd95SBruce Richardson 
84299a2dd95SBruce Richardson 			return status;
84399a2dd95SBruce Richardson 		}
84499a2dd95SBruce Richardson 	}
84599a2dd95SBruce Richardson 
84699a2dd95SBruce Richardson 	return 0;
84799a2dd95SBruce Richardson }
84899a2dd95SBruce Richardson 
84999a2dd95SBruce Richardson /*
85099a2dd95SBruce Richardson  * Look for a rule in the high-level rules table
85199a2dd95SBruce Richardson  */
85299a2dd95SBruce Richardson int
rte_lpm_is_rule_present(struct rte_lpm * lpm,uint32_t ip,uint8_t depth,uint32_t * next_hop)85399a2dd95SBruce Richardson rte_lpm_is_rule_present(struct rte_lpm *lpm, uint32_t ip, uint8_t depth,
85499a2dd95SBruce Richardson uint32_t *next_hop)
85599a2dd95SBruce Richardson {
85699a2dd95SBruce Richardson 	struct __rte_lpm *i_lpm;
85799a2dd95SBruce Richardson 	uint32_t ip_masked;
85899a2dd95SBruce Richardson 	int32_t rule_index;
85999a2dd95SBruce Richardson 
86099a2dd95SBruce Richardson 	/* Check user arguments. */
86199a2dd95SBruce Richardson 	if ((lpm == NULL) ||
86299a2dd95SBruce Richardson 		(next_hop == NULL) ||
86399a2dd95SBruce Richardson 		(depth < 1) || (depth > RTE_LPM_MAX_DEPTH))
86499a2dd95SBruce Richardson 		return -EINVAL;
86599a2dd95SBruce Richardson 
86699a2dd95SBruce Richardson 	/* Look for the rule using rule_find. */
86799a2dd95SBruce Richardson 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
86899a2dd95SBruce Richardson 	ip_masked = ip & depth_to_mask(depth);
86999a2dd95SBruce Richardson 	rule_index = rule_find(i_lpm, ip_masked, depth);
87099a2dd95SBruce Richardson 
87199a2dd95SBruce Richardson 	if (rule_index >= 0) {
87299a2dd95SBruce Richardson 		*next_hop = i_lpm->rules_tbl[rule_index].next_hop;
87399a2dd95SBruce Richardson 		return 1;
87499a2dd95SBruce Richardson 	}
87599a2dd95SBruce Richardson 
87699a2dd95SBruce Richardson 	/* If rule is not found return 0. */
87799a2dd95SBruce Richardson 	return 0;
87899a2dd95SBruce Richardson }
87999a2dd95SBruce Richardson 
88099a2dd95SBruce Richardson static int32_t
find_previous_rule(struct __rte_lpm * i_lpm,uint32_t ip,uint8_t depth,uint8_t * sub_rule_depth)88199a2dd95SBruce Richardson find_previous_rule(struct __rte_lpm *i_lpm, uint32_t ip, uint8_t depth,
88299a2dd95SBruce Richardson 		uint8_t *sub_rule_depth)
88399a2dd95SBruce Richardson {
88499a2dd95SBruce Richardson 	int32_t rule_index;
88599a2dd95SBruce Richardson 	uint32_t ip_masked;
88699a2dd95SBruce Richardson 	uint8_t prev_depth;
88799a2dd95SBruce Richardson 
88899a2dd95SBruce Richardson 	for (prev_depth = (uint8_t)(depth - 1); prev_depth > 0; prev_depth--) {
88999a2dd95SBruce Richardson 		ip_masked = ip & depth_to_mask(prev_depth);
89099a2dd95SBruce Richardson 
89199a2dd95SBruce Richardson 		rule_index = rule_find(i_lpm, ip_masked, prev_depth);
89299a2dd95SBruce Richardson 
89399a2dd95SBruce Richardson 		if (rule_index >= 0) {
89499a2dd95SBruce Richardson 			*sub_rule_depth = prev_depth;
89599a2dd95SBruce Richardson 			return rule_index;
89699a2dd95SBruce Richardson 		}
89799a2dd95SBruce Richardson 	}
89899a2dd95SBruce Richardson 
89999a2dd95SBruce Richardson 	return -1;
90099a2dd95SBruce Richardson }
90199a2dd95SBruce Richardson 
90299a2dd95SBruce Richardson static int32_t
delete_depth_small(struct __rte_lpm * i_lpm,uint32_t ip_masked,uint8_t depth,int32_t sub_rule_index,uint8_t sub_rule_depth)90399a2dd95SBruce Richardson delete_depth_small(struct __rte_lpm *i_lpm, uint32_t ip_masked,
90499a2dd95SBruce Richardson 	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
90599a2dd95SBruce Richardson {
90699a2dd95SBruce Richardson #define group_idx next_hop
90799a2dd95SBruce Richardson 	uint32_t tbl24_range, tbl24_index, tbl8_group_index, tbl8_index, i, j;
90899a2dd95SBruce Richardson 
90999a2dd95SBruce Richardson 	/* Calculate the range and index into Table24. */
91099a2dd95SBruce Richardson 	tbl24_range = depth_to_range(depth);
91199a2dd95SBruce Richardson 	tbl24_index = (ip_masked >> 8);
91299a2dd95SBruce Richardson 	struct rte_lpm_tbl_entry zero_tbl24_entry = {0};
91399a2dd95SBruce Richardson 
91499a2dd95SBruce Richardson 	/*
91599a2dd95SBruce Richardson 	 * Firstly check the sub_rule_index. A -1 indicates no replacement rule
91699a2dd95SBruce Richardson 	 * and a positive number indicates a sub_rule_index.
91799a2dd95SBruce Richardson 	 */
91899a2dd95SBruce Richardson 	if (sub_rule_index < 0) {
91999a2dd95SBruce Richardson 		/*
92099a2dd95SBruce Richardson 		 * If no replacement rule exists then invalidate entries
92199a2dd95SBruce Richardson 		 * associated with this rule.
92299a2dd95SBruce Richardson 		 */
92399a2dd95SBruce Richardson 		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
92499a2dd95SBruce Richardson 
92599a2dd95SBruce Richardson 			if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
92699a2dd95SBruce Richardson 					i_lpm->lpm.tbl24[i].depth <= depth) {
92799a2dd95SBruce Richardson 				__atomic_store(&i_lpm->lpm.tbl24[i],
92899a2dd95SBruce Richardson 					&zero_tbl24_entry, __ATOMIC_RELEASE);
92999a2dd95SBruce Richardson 			} else if (i_lpm->lpm.tbl24[i].valid_group == 1) {
93099a2dd95SBruce Richardson 				/*
93199a2dd95SBruce Richardson 				 * If TBL24 entry is extended, then there has
93299a2dd95SBruce Richardson 				 * to be a rule with depth >= 25 in the
93399a2dd95SBruce Richardson 				 * associated TBL8 group.
93499a2dd95SBruce Richardson 				 */
93599a2dd95SBruce Richardson 
93699a2dd95SBruce Richardson 				tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
93799a2dd95SBruce Richardson 				tbl8_index = tbl8_group_index *
93899a2dd95SBruce Richardson 						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
93999a2dd95SBruce Richardson 
94099a2dd95SBruce Richardson 				for (j = tbl8_index; j < (tbl8_index +
94199a2dd95SBruce Richardson 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
94299a2dd95SBruce Richardson 
94399a2dd95SBruce Richardson 					if (i_lpm->lpm.tbl8[j].depth <= depth)
94499a2dd95SBruce Richardson 						i_lpm->lpm.tbl8[j].valid = INVALID;
94599a2dd95SBruce Richardson 				}
94699a2dd95SBruce Richardson 			}
94799a2dd95SBruce Richardson 		}
94899a2dd95SBruce Richardson 	} else {
94999a2dd95SBruce Richardson 		/*
95099a2dd95SBruce Richardson 		 * If a replacement rule exists then modify entries
95199a2dd95SBruce Richardson 		 * associated with this rule.
95299a2dd95SBruce Richardson 		 */
95399a2dd95SBruce Richardson 
95499a2dd95SBruce Richardson 		struct rte_lpm_tbl_entry new_tbl24_entry = {
95599a2dd95SBruce Richardson 			.next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
95699a2dd95SBruce Richardson 			.valid = VALID,
95799a2dd95SBruce Richardson 			.valid_group = 0,
95899a2dd95SBruce Richardson 			.depth = sub_rule_depth,
95999a2dd95SBruce Richardson 		};
96099a2dd95SBruce Richardson 
96199a2dd95SBruce Richardson 		struct rte_lpm_tbl_entry new_tbl8_entry = {
96299a2dd95SBruce Richardson 			.valid = VALID,
96399a2dd95SBruce Richardson 			.valid_group = VALID,
96499a2dd95SBruce Richardson 			.depth = sub_rule_depth,
96599a2dd95SBruce Richardson 			.next_hop = i_lpm->rules_tbl
96699a2dd95SBruce Richardson 			[sub_rule_index].next_hop,
96799a2dd95SBruce Richardson 		};
96899a2dd95SBruce Richardson 
96999a2dd95SBruce Richardson 		for (i = tbl24_index; i < (tbl24_index + tbl24_range); i++) {
97099a2dd95SBruce Richardson 
97199a2dd95SBruce Richardson 			if (i_lpm->lpm.tbl24[i].valid_group == 0 &&
97299a2dd95SBruce Richardson 					i_lpm->lpm.tbl24[i].depth <= depth) {
97399a2dd95SBruce Richardson 				__atomic_store(&i_lpm->lpm.tbl24[i], &new_tbl24_entry,
97499a2dd95SBruce Richardson 						__ATOMIC_RELEASE);
97599a2dd95SBruce Richardson 			} else  if (i_lpm->lpm.tbl24[i].valid_group == 1) {
97699a2dd95SBruce Richardson 				/*
97799a2dd95SBruce Richardson 				 * If TBL24 entry is extended, then there has
97899a2dd95SBruce Richardson 				 * to be a rule with depth >= 25 in the
97999a2dd95SBruce Richardson 				 * associated TBL8 group.
98099a2dd95SBruce Richardson 				 */
98199a2dd95SBruce Richardson 
98299a2dd95SBruce Richardson 				tbl8_group_index = i_lpm->lpm.tbl24[i].group_idx;
98399a2dd95SBruce Richardson 				tbl8_index = tbl8_group_index *
98499a2dd95SBruce Richardson 						RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
98599a2dd95SBruce Richardson 
98699a2dd95SBruce Richardson 				for (j = tbl8_index; j < (tbl8_index +
98799a2dd95SBruce Richardson 					RTE_LPM_TBL8_GROUP_NUM_ENTRIES); j++) {
98899a2dd95SBruce Richardson 
98999a2dd95SBruce Richardson 					if (i_lpm->lpm.tbl8[j].depth <= depth)
99099a2dd95SBruce Richardson 						__atomic_store(&i_lpm->lpm.tbl8[j],
99199a2dd95SBruce Richardson 							&new_tbl8_entry,
99299a2dd95SBruce Richardson 							__ATOMIC_RELAXED);
99399a2dd95SBruce Richardson 				}
99499a2dd95SBruce Richardson 			}
99599a2dd95SBruce Richardson 		}
99699a2dd95SBruce Richardson 	}
99799a2dd95SBruce Richardson #undef group_idx
99899a2dd95SBruce Richardson 	return 0;
99999a2dd95SBruce Richardson }
100099a2dd95SBruce Richardson 
100199a2dd95SBruce Richardson /*
100299a2dd95SBruce Richardson  * Checks if table 8 group can be recycled.
100399a2dd95SBruce Richardson  *
100499a2dd95SBruce Richardson  * Return of -EEXIST means tbl8 is in use and thus can not be recycled.
100599a2dd95SBruce Richardson  * Return of -EINVAL means tbl8 is empty and thus can be recycled
100699a2dd95SBruce Richardson  * Return of value > -1 means tbl8 is in use but has all the same values and
100799a2dd95SBruce Richardson  * thus can be recycled
100899a2dd95SBruce Richardson  */
100999a2dd95SBruce Richardson static int32_t
tbl8_recycle_check(struct rte_lpm_tbl_entry * tbl8,uint32_t tbl8_group_start)101099a2dd95SBruce Richardson tbl8_recycle_check(struct rte_lpm_tbl_entry *tbl8,
101199a2dd95SBruce Richardson 		uint32_t tbl8_group_start)
101299a2dd95SBruce Richardson {
101399a2dd95SBruce Richardson 	uint32_t tbl8_group_end, i;
101499a2dd95SBruce Richardson 	tbl8_group_end = tbl8_group_start + RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
101599a2dd95SBruce Richardson 
101699a2dd95SBruce Richardson 	/*
101799a2dd95SBruce Richardson 	 * Check the first entry of the given tbl8. If it is invalid we know
101899a2dd95SBruce Richardson 	 * this tbl8 does not contain any rule with a depth < RTE_LPM_MAX_DEPTH
101999a2dd95SBruce Richardson 	 *  (As they would affect all entries in a tbl8) and thus this table
102099a2dd95SBruce Richardson 	 *  can not be recycled.
102199a2dd95SBruce Richardson 	 */
102299a2dd95SBruce Richardson 	if (tbl8[tbl8_group_start].valid) {
102399a2dd95SBruce Richardson 		/*
102499a2dd95SBruce Richardson 		 * If first entry is valid check if the depth is less than 24
102599a2dd95SBruce Richardson 		 * and if so check the rest of the entries to verify that they
102699a2dd95SBruce Richardson 		 * are all of this depth.
102799a2dd95SBruce Richardson 		 */
102899a2dd95SBruce Richardson 		if (tbl8[tbl8_group_start].depth <= MAX_DEPTH_TBL24) {
102999a2dd95SBruce Richardson 			for (i = (tbl8_group_start + 1); i < tbl8_group_end;
103099a2dd95SBruce Richardson 					i++) {
103199a2dd95SBruce Richardson 
103299a2dd95SBruce Richardson 				if (tbl8[i].depth !=
103399a2dd95SBruce Richardson 						tbl8[tbl8_group_start].depth) {
103499a2dd95SBruce Richardson 
103599a2dd95SBruce Richardson 					return -EEXIST;
103699a2dd95SBruce Richardson 				}
103799a2dd95SBruce Richardson 			}
103899a2dd95SBruce Richardson 			/* If all entries are the same return the tb8 index */
103999a2dd95SBruce Richardson 			return tbl8_group_start;
104099a2dd95SBruce Richardson 		}
104199a2dd95SBruce Richardson 
104299a2dd95SBruce Richardson 		return -EEXIST;
104399a2dd95SBruce Richardson 	}
104499a2dd95SBruce Richardson 	/*
104599a2dd95SBruce Richardson 	 * If the first entry is invalid check if the rest of the entries in
104699a2dd95SBruce Richardson 	 * the tbl8 are invalid.
104799a2dd95SBruce Richardson 	 */
104899a2dd95SBruce Richardson 	for (i = (tbl8_group_start + 1); i < tbl8_group_end; i++) {
104999a2dd95SBruce Richardson 		if (tbl8[i].valid)
105099a2dd95SBruce Richardson 			return -EEXIST;
105199a2dd95SBruce Richardson 	}
105299a2dd95SBruce Richardson 	/* If no valid entries are found then return -EINVAL. */
105399a2dd95SBruce Richardson 	return -EINVAL;
105499a2dd95SBruce Richardson }
105599a2dd95SBruce Richardson 
105699a2dd95SBruce Richardson static int32_t
delete_depth_big(struct __rte_lpm * i_lpm,uint32_t ip_masked,uint8_t depth,int32_t sub_rule_index,uint8_t sub_rule_depth)105799a2dd95SBruce Richardson delete_depth_big(struct __rte_lpm *i_lpm, uint32_t ip_masked,
105899a2dd95SBruce Richardson 	uint8_t depth, int32_t sub_rule_index, uint8_t sub_rule_depth)
105999a2dd95SBruce Richardson {
106099a2dd95SBruce Richardson #define group_idx next_hop
106199a2dd95SBruce Richardson 	uint32_t tbl24_index, tbl8_group_index, tbl8_group_start, tbl8_index,
106299a2dd95SBruce Richardson 			tbl8_range, i;
106399a2dd95SBruce Richardson 	int32_t tbl8_recycle_index, status = 0;
106499a2dd95SBruce Richardson 
106599a2dd95SBruce Richardson 	/*
106699a2dd95SBruce Richardson 	 * Calculate the index into tbl24 and range. Note: All depths larger
106799a2dd95SBruce Richardson 	 * than MAX_DEPTH_TBL24 are associated with only one tbl24 entry.
106899a2dd95SBruce Richardson 	 */
106999a2dd95SBruce Richardson 	tbl24_index = ip_masked >> 8;
107099a2dd95SBruce Richardson 
107199a2dd95SBruce Richardson 	/* Calculate the index into tbl8 and range. */
107299a2dd95SBruce Richardson 	tbl8_group_index = i_lpm->lpm.tbl24[tbl24_index].group_idx;
107399a2dd95SBruce Richardson 	tbl8_group_start = tbl8_group_index * RTE_LPM_TBL8_GROUP_NUM_ENTRIES;
107499a2dd95SBruce Richardson 	tbl8_index = tbl8_group_start + (ip_masked & 0xFF);
107599a2dd95SBruce Richardson 	tbl8_range = depth_to_range(depth);
107699a2dd95SBruce Richardson 
107799a2dd95SBruce Richardson 	if (sub_rule_index < 0) {
107899a2dd95SBruce Richardson 		/*
107999a2dd95SBruce Richardson 		 * Loop through the range of entries on tbl8 for which the
108099a2dd95SBruce Richardson 		 * rule_to_delete must be removed or modified.
108199a2dd95SBruce Richardson 		 */
108299a2dd95SBruce Richardson 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
108399a2dd95SBruce Richardson 			if (i_lpm->lpm.tbl8[i].depth <= depth)
108499a2dd95SBruce Richardson 				i_lpm->lpm.tbl8[i].valid = INVALID;
108599a2dd95SBruce Richardson 		}
108699a2dd95SBruce Richardson 	} else {
108799a2dd95SBruce Richardson 		/* Set new tbl8 entry. */
108899a2dd95SBruce Richardson 		struct rte_lpm_tbl_entry new_tbl8_entry = {
108999a2dd95SBruce Richardson 			.valid = VALID,
109099a2dd95SBruce Richardson 			.depth = sub_rule_depth,
109199a2dd95SBruce Richardson 			.valid_group = i_lpm->lpm.tbl8[tbl8_group_start].valid_group,
109299a2dd95SBruce Richardson 			.next_hop = i_lpm->rules_tbl[sub_rule_index].next_hop,
109399a2dd95SBruce Richardson 		};
109499a2dd95SBruce Richardson 
109599a2dd95SBruce Richardson 		/*
109699a2dd95SBruce Richardson 		 * Loop through the range of entries on tbl8 for which the
109799a2dd95SBruce Richardson 		 * rule_to_delete must be modified.
109899a2dd95SBruce Richardson 		 */
109999a2dd95SBruce Richardson 		for (i = tbl8_index; i < (tbl8_index + tbl8_range); i++) {
110099a2dd95SBruce Richardson 			if (i_lpm->lpm.tbl8[i].depth <= depth)
110199a2dd95SBruce Richardson 				__atomic_store(&i_lpm->lpm.tbl8[i], &new_tbl8_entry,
110299a2dd95SBruce Richardson 						__ATOMIC_RELAXED);
110399a2dd95SBruce Richardson 		}
110499a2dd95SBruce Richardson 	}
110599a2dd95SBruce Richardson 
110699a2dd95SBruce Richardson 	/*
110799a2dd95SBruce Richardson 	 * Check if there are any valid entries in this tbl8 group. If all
110899a2dd95SBruce Richardson 	 * tbl8 entries are invalid we can free the tbl8 and invalidate the
110999a2dd95SBruce Richardson 	 * associated tbl24 entry.
111099a2dd95SBruce Richardson 	 */
111199a2dd95SBruce Richardson 
111299a2dd95SBruce Richardson 	tbl8_recycle_index = tbl8_recycle_check(i_lpm->lpm.tbl8, tbl8_group_start);
111399a2dd95SBruce Richardson 
111499a2dd95SBruce Richardson 	if (tbl8_recycle_index == -EINVAL) {
111599a2dd95SBruce Richardson 		/* Set tbl24 before freeing tbl8 to avoid race condition.
111699a2dd95SBruce Richardson 		 * Prevent the free of the tbl8 group from hoisting.
111799a2dd95SBruce Richardson 		 */
111899a2dd95SBruce Richardson 		i_lpm->lpm.tbl24[tbl24_index].valid = 0;
1119283d8437STyler Retzlaff 		rte_atomic_thread_fence(rte_memory_order_release);
112099a2dd95SBruce Richardson 		status = tbl8_free(i_lpm, tbl8_group_start);
112199a2dd95SBruce Richardson 	} else if (tbl8_recycle_index > -1) {
112299a2dd95SBruce Richardson 		/* Update tbl24 entry. */
112399a2dd95SBruce Richardson 		struct rte_lpm_tbl_entry new_tbl24_entry = {
112499a2dd95SBruce Richardson 			.next_hop = i_lpm->lpm.tbl8[tbl8_recycle_index].next_hop,
112599a2dd95SBruce Richardson 			.valid = VALID,
112699a2dd95SBruce Richardson 			.valid_group = 0,
112799a2dd95SBruce Richardson 			.depth = i_lpm->lpm.tbl8[tbl8_recycle_index].depth,
112899a2dd95SBruce Richardson 		};
112999a2dd95SBruce Richardson 
113099a2dd95SBruce Richardson 		/* Set tbl24 before freeing tbl8 to avoid race condition.
113199a2dd95SBruce Richardson 		 * Prevent the free of the tbl8 group from hoisting.
113299a2dd95SBruce Richardson 		 */
113399a2dd95SBruce Richardson 		__atomic_store(&i_lpm->lpm.tbl24[tbl24_index], &new_tbl24_entry,
113499a2dd95SBruce Richardson 				__ATOMIC_RELAXED);
1135283d8437STyler Retzlaff 		rte_atomic_thread_fence(rte_memory_order_release);
113699a2dd95SBruce Richardson 		status = tbl8_free(i_lpm, tbl8_group_start);
113799a2dd95SBruce Richardson 	}
113899a2dd95SBruce Richardson #undef group_idx
113999a2dd95SBruce Richardson 	return status;
114099a2dd95SBruce Richardson }
114199a2dd95SBruce Richardson 
114299a2dd95SBruce Richardson /*
114399a2dd95SBruce Richardson  * Deletes a rule
114499a2dd95SBruce Richardson  */
114599a2dd95SBruce Richardson int
rte_lpm_delete(struct rte_lpm * lpm,uint32_t ip,uint8_t depth)114699a2dd95SBruce Richardson rte_lpm_delete(struct rte_lpm *lpm, uint32_t ip, uint8_t depth)
114799a2dd95SBruce Richardson {
114899a2dd95SBruce Richardson 	int32_t rule_to_delete_index, sub_rule_index;
114999a2dd95SBruce Richardson 	struct __rte_lpm *i_lpm;
115099a2dd95SBruce Richardson 	uint32_t ip_masked;
115199a2dd95SBruce Richardson 	uint8_t sub_rule_depth;
115299a2dd95SBruce Richardson 	/*
115399a2dd95SBruce Richardson 	 * Check input arguments. Note: IP must be a positive integer of 32
115499a2dd95SBruce Richardson 	 * bits in length therefore it need not be checked.
115599a2dd95SBruce Richardson 	 */
115699a2dd95SBruce Richardson 	if ((lpm == NULL) || (depth < 1) || (depth > RTE_LPM_MAX_DEPTH)) {
115799a2dd95SBruce Richardson 		return -EINVAL;
115899a2dd95SBruce Richardson 	}
115999a2dd95SBruce Richardson 
116099a2dd95SBruce Richardson 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
116199a2dd95SBruce Richardson 	ip_masked = ip & depth_to_mask(depth);
116299a2dd95SBruce Richardson 
116399a2dd95SBruce Richardson 	/*
116499a2dd95SBruce Richardson 	 * Find the index of the input rule, that needs to be deleted, in the
116599a2dd95SBruce Richardson 	 * rule table.
116699a2dd95SBruce Richardson 	 */
116799a2dd95SBruce Richardson 	rule_to_delete_index = rule_find(i_lpm, ip_masked, depth);
116899a2dd95SBruce Richardson 
116999a2dd95SBruce Richardson 	/*
117099a2dd95SBruce Richardson 	 * Check if rule_to_delete_index was found. If no rule was found the
117199a2dd95SBruce Richardson 	 * function rule_find returns -EINVAL.
117299a2dd95SBruce Richardson 	 */
117399a2dd95SBruce Richardson 	if (rule_to_delete_index < 0)
117499a2dd95SBruce Richardson 		return -EINVAL;
117599a2dd95SBruce Richardson 
117699a2dd95SBruce Richardson 	/* Delete the rule from the rule table. */
117799a2dd95SBruce Richardson 	rule_delete(i_lpm, rule_to_delete_index, depth);
117899a2dd95SBruce Richardson 
117999a2dd95SBruce Richardson 	/*
118099a2dd95SBruce Richardson 	 * Find rule to replace the rule_to_delete. If there is no rule to
118199a2dd95SBruce Richardson 	 * replace the rule_to_delete we return -1 and invalidate the table
118299a2dd95SBruce Richardson 	 * entries associated with this rule.
118399a2dd95SBruce Richardson 	 */
118499a2dd95SBruce Richardson 	sub_rule_depth = 0;
118599a2dd95SBruce Richardson 	sub_rule_index = find_previous_rule(i_lpm, ip, depth, &sub_rule_depth);
118699a2dd95SBruce Richardson 
118799a2dd95SBruce Richardson 	/*
118899a2dd95SBruce Richardson 	 * If the input depth value is less than 25 use function
118999a2dd95SBruce Richardson 	 * delete_depth_small otherwise use delete_depth_big.
119099a2dd95SBruce Richardson 	 */
119199a2dd95SBruce Richardson 	if (depth <= MAX_DEPTH_TBL24) {
119299a2dd95SBruce Richardson 		return delete_depth_small(i_lpm, ip_masked, depth,
119399a2dd95SBruce Richardson 				sub_rule_index, sub_rule_depth);
119499a2dd95SBruce Richardson 	} else { /* If depth > MAX_DEPTH_TBL24 */
119599a2dd95SBruce Richardson 		return delete_depth_big(i_lpm, ip_masked, depth, sub_rule_index,
119699a2dd95SBruce Richardson 				sub_rule_depth);
119799a2dd95SBruce Richardson 	}
119899a2dd95SBruce Richardson }
119999a2dd95SBruce Richardson 
120099a2dd95SBruce Richardson /*
120199a2dd95SBruce Richardson  * Delete all rules from the LPM table.
120299a2dd95SBruce Richardson  */
120399a2dd95SBruce Richardson void
rte_lpm_delete_all(struct rte_lpm * lpm)120499a2dd95SBruce Richardson rte_lpm_delete_all(struct rte_lpm *lpm)
120599a2dd95SBruce Richardson {
120699a2dd95SBruce Richardson 	struct __rte_lpm *i_lpm;
120799a2dd95SBruce Richardson 
120899a2dd95SBruce Richardson 	i_lpm = container_of(lpm, struct __rte_lpm, lpm);
120999a2dd95SBruce Richardson 	/* Zero rule information. */
121099a2dd95SBruce Richardson 	memset(i_lpm->rule_info, 0, sizeof(i_lpm->rule_info));
121199a2dd95SBruce Richardson 
121299a2dd95SBruce Richardson 	/* Zero tbl24. */
121399a2dd95SBruce Richardson 	memset(i_lpm->lpm.tbl24, 0, sizeof(i_lpm->lpm.tbl24));
121499a2dd95SBruce Richardson 
121599a2dd95SBruce Richardson 	/* Zero tbl8. */
121699a2dd95SBruce Richardson 	memset(i_lpm->lpm.tbl8, 0, sizeof(i_lpm->lpm.tbl8[0])
121799a2dd95SBruce Richardson 			* RTE_LPM_TBL8_GROUP_NUM_ENTRIES * i_lpm->number_tbl8s);
121899a2dd95SBruce Richardson 
121999a2dd95SBruce Richardson 	/* Delete all rules form the rules table. */
122099a2dd95SBruce Richardson 	memset(i_lpm->rules_tbl, 0, sizeof(i_lpm->rules_tbl[0]) * i_lpm->max_rules);
122199a2dd95SBruce Richardson }
1222