xref: /dpdk/lib/table/rte_table_lpm.c (revision b9a87346b05c562dd6005ee025eca67a1a80bea8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <stdalign.h>
6 #include <stdio.h>
7 #include <string.h>
8 
9 #include <rte_common.h>
10 #include <rte_malloc.h>
11 #include <rte_byteorder.h>
12 #include <rte_log.h>
13 #include <rte_lpm.h>
14 
15 #include "rte_table_lpm.h"
16 
17 #include "table_log.h"
18 
19 #ifndef RTE_TABLE_LPM_MAX_NEXT_HOPS
20 #define RTE_TABLE_LPM_MAX_NEXT_HOPS                        65536
21 #endif
22 
23 #ifdef RTE_TABLE_STATS_COLLECT
24 
25 #define RTE_TABLE_LPM_STATS_PKTS_IN_ADD(table, val) \
26 	table->stats.n_pkts_in += val
27 #define RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(table, val) \
28 	table->stats.n_pkts_lookup_miss += val
29 
30 #else
31 
32 #define RTE_TABLE_LPM_STATS_PKTS_IN_ADD(table, val)
33 #define RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(table, val)
34 
35 #endif
36 
37 struct rte_table_lpm {
38 	struct rte_table_stats stats;
39 
40 	/* Input parameters */
41 	uint32_t entry_size;
42 	uint32_t entry_unique_size;
43 	uint32_t n_rules;
44 	uint32_t offset;
45 
46 	/* Handle to low-level LPM table */
47 	struct rte_lpm *lpm;
48 
49 	/* Next Hop Table (NHT) */
50 	uint32_t nht_users[RTE_TABLE_LPM_MAX_NEXT_HOPS];
51 	alignas(RTE_CACHE_LINE_SIZE) uint8_t nht[];
52 };
53 
54 static void *
55 rte_table_lpm_create(void *params, int socket_id, uint32_t entry_size)
56 {
57 	struct rte_table_lpm_params *p = params;
58 	struct rte_table_lpm *lpm;
59 	struct rte_lpm_config lpm_config;
60 
61 	uint32_t total_size, nht_size;
62 
63 	/* Check input parameters */
64 	if (p == NULL) {
65 		TABLE_LOG(ERR, "%s: NULL input parameters", __func__);
66 		return NULL;
67 	}
68 	if (p->n_rules == 0) {
69 		TABLE_LOG(ERR, "%s: Invalid n_rules", __func__);
70 		return NULL;
71 	}
72 	if (p->number_tbl8s == 0) {
73 		TABLE_LOG(ERR, "%s: Invalid number_tbl8s", __func__);
74 		return NULL;
75 	}
76 	if (p->entry_unique_size == 0) {
77 		TABLE_LOG(ERR, "%s: Invalid entry_unique_size",
78 			__func__);
79 		return NULL;
80 	}
81 	if (p->entry_unique_size > entry_size) {
82 		TABLE_LOG(ERR, "%s: Invalid entry_unique_size",
83 			__func__);
84 		return NULL;
85 	}
86 	if (p->name == NULL) {
87 		TABLE_LOG(ERR, "%s: Table name is NULL",
88 			__func__);
89 		return NULL;
90 	}
91 	entry_size = RTE_ALIGN(entry_size, sizeof(uint64_t));
92 
93 	/* Memory allocation */
94 	nht_size = RTE_TABLE_LPM_MAX_NEXT_HOPS * entry_size;
95 	total_size = sizeof(struct rte_table_lpm) + nht_size;
96 	lpm = rte_zmalloc_socket("TABLE", total_size, RTE_CACHE_LINE_SIZE,
97 		socket_id);
98 	if (lpm == NULL) {
99 		TABLE_LOG(ERR,
100 			"%s: Cannot allocate %u bytes for LPM table",
101 			__func__, total_size);
102 		return NULL;
103 	}
104 
105 	/* LPM low-level table creation */
106 	lpm_config.max_rules = p->n_rules;
107 	lpm_config.number_tbl8s = p->number_tbl8s;
108 	lpm_config.flags = p->flags;
109 	lpm->lpm = rte_lpm_create(p->name, socket_id, &lpm_config);
110 
111 	if (lpm->lpm == NULL) {
112 		rte_free(lpm);
113 		TABLE_LOG(ERR, "Unable to create low-level LPM table");
114 		return NULL;
115 	}
116 
117 	/* Memory initialization */
118 	lpm->entry_size = entry_size;
119 	lpm->entry_unique_size = p->entry_unique_size;
120 	lpm->n_rules = p->n_rules;
121 	lpm->offset = p->offset;
122 
123 	return lpm;
124 }
125 
126 static int
127 rte_table_lpm_free(void *table)
128 {
129 	struct rte_table_lpm *lpm = table;
130 
131 	/* Check input parameters */
132 	if (lpm == NULL) {
133 		TABLE_LOG(ERR, "%s: table parameter is NULL", __func__);
134 		return -EINVAL;
135 	}
136 
137 	/* Free previously allocated resources */
138 	rte_lpm_free(lpm->lpm);
139 	rte_free(lpm);
140 
141 	return 0;
142 }
143 
144 static int
145 nht_find_free(struct rte_table_lpm *lpm, uint32_t *pos)
146 {
147 	uint32_t i;
148 
149 	for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
150 		if (lpm->nht_users[i] == 0) {
151 			*pos = i;
152 			return 1;
153 		}
154 	}
155 
156 	return 0;
157 }
158 
159 static int
160 nht_find_existing(struct rte_table_lpm *lpm, void *entry, uint32_t *pos)
161 {
162 	uint32_t i;
163 
164 	for (i = 0; i < RTE_TABLE_LPM_MAX_NEXT_HOPS; i++) {
165 		uint8_t *nht_entry = &lpm->nht[i * lpm->entry_size];
166 
167 		if ((lpm->nht_users[i] > 0) && (memcmp(nht_entry, entry,
168 			lpm->entry_unique_size) == 0)) {
169 			*pos = i;
170 			return 1;
171 		}
172 	}
173 
174 	return 0;
175 }
176 
177 static int
178 rte_table_lpm_entry_add(
179 	void *table,
180 	void *key,
181 	void *entry,
182 	int *key_found,
183 	void **entry_ptr)
184 {
185 	struct rte_table_lpm *lpm = table;
186 	struct rte_table_lpm_key *ip_prefix = key;
187 	uint32_t nht_pos, nht_pos0_valid;
188 	int status;
189 	uint32_t nht_pos0 = 0;
190 
191 	/* Check input parameters */
192 	if (lpm == NULL) {
193 		TABLE_LOG(ERR, "%s: table parameter is NULL", __func__);
194 		return -EINVAL;
195 	}
196 	if (ip_prefix == NULL) {
197 		TABLE_LOG(ERR, "%s: ip_prefix parameter is NULL",
198 			__func__);
199 		return -EINVAL;
200 	}
201 	if (entry == NULL) {
202 		TABLE_LOG(ERR, "%s: entry parameter is NULL", __func__);
203 		return -EINVAL;
204 	}
205 
206 	if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
207 		TABLE_LOG(ERR, "%s: invalid depth (%d)",
208 			__func__, ip_prefix->depth);
209 		return -EINVAL;
210 	}
211 
212 	/* Check if rule is already present in the table */
213 	status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
214 		ip_prefix->depth, &nht_pos0);
215 	nht_pos0_valid = status > 0;
216 
217 	/* Find existing or free NHT entry */
218 	if (nht_find_existing(lpm, entry, &nht_pos) == 0) {
219 		uint8_t *nht_entry;
220 
221 		if (nht_find_free(lpm, &nht_pos) == 0) {
222 			TABLE_LOG(ERR, "%s: NHT full", __func__);
223 			return -1;
224 		}
225 
226 		nht_entry = &lpm->nht[nht_pos * lpm->entry_size];
227 		memcpy(nht_entry, entry, lpm->entry_size);
228 	}
229 
230 	/* Add rule to low level LPM table */
231 	if (rte_lpm_add(lpm->lpm, ip_prefix->ip, ip_prefix->depth, nht_pos) < 0) {
232 		TABLE_LOG(ERR, "%s: LPM rule add failed", __func__);
233 		return -1;
234 	}
235 
236 	/* Commit NHT changes */
237 	lpm->nht_users[nht_pos]++;
238 	lpm->nht_users[nht_pos0] -= nht_pos0_valid;
239 
240 	*key_found = nht_pos0_valid;
241 	*entry_ptr = (void *) &lpm->nht[nht_pos * lpm->entry_size];
242 	return 0;
243 }
244 
245 static int
246 rte_table_lpm_entry_delete(
247 	void *table,
248 	void *key,
249 	int *key_found,
250 	void *entry)
251 {
252 	struct rte_table_lpm *lpm = table;
253 	struct rte_table_lpm_key *ip_prefix = key;
254 	uint32_t nht_pos;
255 	int status;
256 
257 	/* Check input parameters */
258 	if (lpm == NULL) {
259 		TABLE_LOG(ERR, "%s: table parameter is NULL", __func__);
260 		return -EINVAL;
261 	}
262 	if (ip_prefix == NULL) {
263 		TABLE_LOG(ERR, "%s: ip_prefix parameter is NULL",
264 			__func__);
265 		return -EINVAL;
266 	}
267 	if ((ip_prefix->depth == 0) || (ip_prefix->depth > 32)) {
268 		TABLE_LOG(ERR, "%s: invalid depth (%d)", __func__,
269 			ip_prefix->depth);
270 		return -EINVAL;
271 	}
272 
273 	/* Return if rule is not present in the table */
274 	status = rte_lpm_is_rule_present(lpm->lpm, ip_prefix->ip,
275 		ip_prefix->depth, &nht_pos);
276 	if (status < 0) {
277 		TABLE_LOG(ERR, "%s: LPM algorithmic error", __func__);
278 		return -1;
279 	}
280 	if (status == 0) {
281 		*key_found = 0;
282 		return 0;
283 	}
284 
285 	/* Delete rule from the low-level LPM table */
286 	status = rte_lpm_delete(lpm->lpm, ip_prefix->ip, ip_prefix->depth);
287 	if (status) {
288 		TABLE_LOG(ERR, "%s: LPM rule delete failed", __func__);
289 		return -1;
290 	}
291 
292 	/* Commit NHT changes */
293 	lpm->nht_users[nht_pos]--;
294 
295 	*key_found = 1;
296 	if (entry)
297 		memcpy(entry, &lpm->nht[nht_pos * lpm->entry_size],
298 			lpm->entry_size);
299 
300 	return 0;
301 }
302 
303 static int
304 rte_table_lpm_lookup(
305 	void *table,
306 	struct rte_mbuf **pkts,
307 	uint64_t pkts_mask,
308 	uint64_t *lookup_hit_mask,
309 	void **entries)
310 {
311 	struct rte_table_lpm *lpm = (struct rte_table_lpm *) table;
312 	uint64_t pkts_out_mask = 0;
313 	uint32_t i;
314 
315 	__rte_unused uint32_t n_pkts_in = rte_popcount64(pkts_mask);
316 	RTE_TABLE_LPM_STATS_PKTS_IN_ADD(lpm, n_pkts_in);
317 
318 	pkts_out_mask = 0;
319 	for (i = 0; i < (uint32_t)(RTE_PORT_IN_BURST_SIZE_MAX -
320 		rte_clz64(pkts_mask)); i++) {
321 		uint64_t pkt_mask = 1LLU << i;
322 
323 		if (pkt_mask & pkts_mask) {
324 			struct rte_mbuf *pkt = pkts[i];
325 			uint32_t ip = rte_bswap32(
326 				RTE_MBUF_METADATA_UINT32(pkt, lpm->offset));
327 			int status;
328 			uint32_t nht_pos;
329 
330 			status = rte_lpm_lookup(lpm->lpm, ip, &nht_pos);
331 			if (status == 0) {
332 				pkts_out_mask |= pkt_mask;
333 				entries[i] = (void *) &lpm->nht[nht_pos *
334 					lpm->entry_size];
335 			}
336 		}
337 	}
338 
339 	*lookup_hit_mask = pkts_out_mask;
340 	RTE_TABLE_LPM_STATS_PKTS_LOOKUP_MISS(lpm, n_pkts_in - rte_popcount64(pkts_out_mask));
341 	return 0;
342 }
343 
344 static int
345 rte_table_lpm_stats_read(void *table, struct rte_table_stats *stats, int clear)
346 {
347 	struct rte_table_lpm *t = table;
348 
349 	if (stats != NULL)
350 		memcpy(stats, &t->stats, sizeof(t->stats));
351 
352 	if (clear)
353 		memset(&t->stats, 0, sizeof(t->stats));
354 
355 	return 0;
356 }
357 
358 struct rte_table_ops rte_table_lpm_ops = {
359 	.f_create = rte_table_lpm_create,
360 	.f_free = rte_table_lpm_free,
361 	.f_add = rte_table_lpm_entry_add,
362 	.f_delete = rte_table_lpm_entry_delete,
363 	.f_add_bulk = NULL,
364 	.f_delete_bulk = NULL,
365 	.f_lookup = rte_table_lpm_lookup,
366 	.f_stats = rte_table_lpm_stats_read,
367 };
368