xref: /dpdk/lib/table/rte_swx_table_wm.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <string.h>
6 #include <stdio.h>
7 #include <errno.h>
8 
9 #include <rte_common.h>
10 #include <rte_prefetch.h>
11 #include <rte_cycles.h>
12 #include <rte_acl.h>
13 
14 #include "rte_swx_table_wm.h"
15 
16 #ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES
17 #define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1
18 #endif
19 
20 #if RTE_SWX_TABLE_EM_USE_HUGE_PAGES
21 
22 #include <rte_malloc.h>
23 
24 static void *
25 env_malloc(size_t size, size_t alignment, int numa_node)
26 {
27 	return rte_zmalloc_socket(NULL, size, alignment, numa_node);
28 }
29 
30 static void
31 env_free(void *start, size_t size __rte_unused)
32 {
33 	rte_free(start);
34 }
35 
36 #else
37 
38 #include <numa.h>
39 
40 static void *
41 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
42 {
43 	return numa_alloc_onnode(size, numa_node);
44 }
45 
46 static void
47 env_free(void *start, size_t size)
48 {
49 	numa_free(start, size);
50 }
51 
52 #endif
53 
54 static char *get_unique_name(void)
55 {
56 	uint64_t tsc = rte_get_tsc_cycles();
57 	size_t size = sizeof(uint64_t) * 2 + 1;
58 	char *name = calloc(1, size);
59 
60 	if (!name)
61 		return NULL;
62 
63 	snprintf(name, size, "%016" PRIx64, tsc);
64 	return name;
65 }
66 
67 static uint32_t
68 count_entries(struct rte_swx_table_entry_list *entries)
69 {
70 	struct rte_swx_table_entry *entry;
71 	uint32_t n_entries = 0;
72 
73 	if (!entries)
74 		return 0;
75 
76 	TAILQ_FOREACH(entry, entries, node)
77 		n_entries++;
78 
79 	return n_entries;
80 }
81 
82 static int
83 acl_table_cfg_get(struct rte_acl_config *cfg, struct rte_swx_table_params *p)
84 {
85 	uint32_t byte_id = 0, field_id = 0;
86 
87 	/* cfg->num_categories. */
88 	cfg->num_categories = 1;
89 
90 	/* cfg->defs and cfg->num_fields. */
91 	for (byte_id = 0; byte_id < p->key_size; ) {
92 		uint32_t field_size = field_id ? 4 : 1;
93 		uint8_t byte = p->key_mask0 ? p->key_mask0[byte_id] : 0xFF;
94 
95 		if (!byte) {
96 			byte_id++;
97 			continue;
98 		}
99 
100 		if (field_id == RTE_ACL_MAX_FIELDS)
101 			return -1;
102 
103 		cfg->defs[field_id].type = RTE_ACL_FIELD_TYPE_BITMASK;
104 		cfg->defs[field_id].size = field_size;
105 		cfg->defs[field_id].field_index = field_id;
106 		cfg->defs[field_id].input_index = field_id;
107 		cfg->defs[field_id].offset = p->key_offset + byte_id;
108 
109 		field_id++;
110 		byte_id += field_size;
111 	}
112 
113 	if (!field_id)
114 		return -1;
115 
116 	cfg->num_fields = field_id;
117 
118 	/* cfg->max_size. */
119 	cfg->max_size = 0;
120 
121 	return 0;
122 }
123 
124 static void
125 acl_table_rule_field8(uint8_t *value,
126 	uint8_t *mask,
127 	uint8_t *key_mask0,
128 	uint8_t *key_mask,
129 	uint8_t *key,
130 	uint32_t offset)
131 {
132 	uint8_t km0, km;
133 
134 	km0 = key_mask0 ? key_mask0[offset] : 0xFF;
135 	km = key_mask ? key_mask[offset] : 0xFF;
136 
137 	*value = key[offset];
138 	*mask = km0 & km;
139 }
140 
141 static void
142 acl_table_rule_field32(uint32_t *value,
143 	uint32_t *mask,
144 	uint8_t *key_mask0,
145 	uint8_t *key_mask,
146 	uint8_t *key,
147 	uint32_t key_size,
148 	uint32_t offset)
149 {
150 	uint32_t km0[4], km[4], k[4];
151 	uint32_t byte_id;
152 
153 	/* Byte 0 = MSB, byte 3 = LSB. */
154 	for (byte_id = 0; byte_id < 4; byte_id++) {
155 		if (offset + byte_id >= key_size) {
156 			km0[byte_id] = 0;
157 			km[byte_id] = 0;
158 			k[byte_id] = 0;
159 			continue;
160 		}
161 
162 		km0[byte_id] = key_mask0 ? key_mask0[offset + byte_id] : 0xFF;
163 		km[byte_id] = key_mask ? key_mask[offset + byte_id] : 0xFF;
164 		k[byte_id] = key[offset + byte_id];
165 	}
166 
167 	*value = (k[0] << 24) |
168 		 (k[1] << 16) |
169 		 (k[2] << 8) |
170 		 k[3];
171 
172 	*mask = ((km[0] & km0[0]) << 24) |
173 		((km[1] & km0[1]) << 16) |
174 		((km[2] & km0[2]) << 8) |
175 		(km[3] & km0[3]);
176 }
177 
178 RTE_ACL_RULE_DEF(acl_rule, RTE_ACL_MAX_FIELDS);
179 
180 static struct rte_acl_rule *
181 acl_table_rules_get(struct rte_acl_config *acl_cfg,
182 	struct rte_swx_table_params *p,
183 	struct rte_swx_table_entry_list *entries,
184 	uint32_t n_entries)
185 {
186 	struct rte_swx_table_entry *entry;
187 	uint8_t *memory;
188 	uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
189 	uint32_t n_fields = acl_cfg->num_fields;
190 	uint32_t rule_id;
191 
192 	if (!n_entries)
193 		return NULL;
194 
195 	memory = malloc(n_entries * acl_rule_size);
196 	if (!memory)
197 		return NULL;
198 
199 	rule_id = 0;
200 	TAILQ_FOREACH(entry, entries, node) {
201 		uint8_t *m = &memory[rule_id * acl_rule_size];
202 		struct acl_rule *acl_rule = (struct acl_rule *)m;
203 		uint32_t field_id;
204 
205 		acl_rule->data.category_mask = 1;
206 		acl_rule->data.priority = RTE_ACL_MAX_PRIORITY -
207 			entry->key_priority;
208 		acl_rule->data.userdata = rule_id + 1;
209 
210 		for (field_id = 0; field_id < n_fields; field_id++) {
211 			struct rte_acl_field *f = &acl_rule->field[field_id];
212 			uint32_t size = acl_cfg->defs[field_id].size;
213 			uint32_t offset = acl_cfg->defs[field_id].offset -
214 				p->key_offset;
215 
216 			if (size == 1) {
217 				uint8_t value, mask;
218 
219 				acl_table_rule_field8(&value,
220 						      &mask,
221 						      p->key_mask0,
222 						      entry->key_mask,
223 						      entry->key,
224 						      offset);
225 
226 				f->value.u8 = value;
227 				f->mask_range.u8 = mask;
228 			} else {
229 				uint32_t value, mask;
230 
231 				acl_table_rule_field32(&value,
232 						       &mask,
233 						       p->key_mask0,
234 						       entry->key_mask,
235 						       entry->key,
236 						       p->key_size,
237 						       offset);
238 
239 				f->value.u32 = value;
240 				f->mask_range.u32 = mask;
241 			}
242 		}
243 
244 		rule_id++;
245 	}
246 
247 	return (struct rte_acl_rule *)memory;
248 }
249 
250 /* When the table to be created has no rules, the expected behavior is to always
251  * get lookup miss for any input key. To achieve this, we add a single bogus
252  * rule to the table with the rule user data set to 0, i.e. the value returned
253  * when lookup miss takes place. Whether lookup hit (the bogus rule is hit) or
254  * miss, a user data of 0 is returned, which for the ACL library is equivalent
255  * to lookup miss.
256  */
257 static struct rte_acl_rule *
258 acl_table_rules_default_get(struct rte_acl_config *acl_cfg)
259 {
260 	struct rte_acl_rule *acl_rule;
261 	uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
262 
263 	acl_rule = calloc(1, acl_rule_size);
264 	if (!acl_rule)
265 		return NULL;
266 
267 	acl_rule->data.category_mask = 1;
268 	acl_rule->data.priority = RTE_ACL_MAX_PRIORITY;
269 	acl_rule->data.userdata = 0;
270 
271 	memset(&acl_rule[1], 0xFF, acl_rule_size - sizeof(struct rte_acl_rule));
272 
273 	return acl_rule;
274 }
275 
276 static struct rte_acl_ctx *
277 acl_table_create(struct rte_swx_table_params *params,
278 	struct rte_swx_table_entry_list *entries,
279 	uint32_t n_entries,
280 	int numa_node)
281 {
282 	struct rte_acl_param acl_params = {0};
283 	struct rte_acl_config acl_cfg = {0};
284 	struct rte_acl_ctx *acl_ctx = NULL;
285 	struct rte_acl_rule *acl_rules = NULL;
286 	char *name = NULL;
287 	int status = 0;
288 
289 	/* ACL config data structures. */
290 	name = get_unique_name();
291 	if (!name) {
292 		status = -1;
293 		goto free_resources;
294 	}
295 
296 	status = acl_table_cfg_get(&acl_cfg, params);
297 	if (status)
298 		goto free_resources;
299 
300 	acl_rules = n_entries ?
301 		acl_table_rules_get(&acl_cfg, params, entries, n_entries) :
302 		acl_table_rules_default_get(&acl_cfg);
303 	if (!acl_rules) {
304 		status = -1;
305 		goto free_resources;
306 	}
307 
308 	n_entries = n_entries ? n_entries : 1;
309 
310 	/* ACL create. */
311 	acl_params.name = name;
312 	acl_params.socket_id = numa_node;
313 	acl_params.rule_size = RTE_ACL_RULE_SZ(acl_cfg.num_fields);
314 	acl_params.max_rule_num = n_entries;
315 
316 	acl_ctx = rte_acl_create(&acl_params);
317 	if (!acl_ctx) {
318 		status = -1;
319 		goto free_resources;
320 	}
321 
322 	/* ACL add rules. */
323 	status = rte_acl_add_rules(acl_ctx, acl_rules, n_entries);
324 	if (status)
325 		goto free_resources;
326 
327 	/* ACL build. */
328 	status = rte_acl_build(acl_ctx, &acl_cfg);
329 
330 free_resources:
331 	if (status && acl_ctx)
332 		rte_acl_free(acl_ctx);
333 
334 	free(acl_rules);
335 
336 	free(name);
337 
338 	return status ? NULL : acl_ctx;
339 }
340 
341 static void
342 entry_data_copy(uint8_t *data,
343 	struct rte_swx_table_entry_list *entries,
344 	uint32_t n_entries,
345 	uint32_t entry_data_size)
346 {
347 	struct rte_swx_table_entry *entry;
348 	uint32_t i = 0;
349 
350 	if (!n_entries)
351 		return;
352 
353 	TAILQ_FOREACH(entry, entries, node) {
354 		uint64_t *d = (uint64_t *)&data[i * entry_data_size];
355 
356 		d[0] = entry->action_id;
357 		memcpy(&d[1], entry->action_data, entry_data_size - 8);
358 
359 		i++;
360 	}
361 }
362 
363 struct table {
364 	struct rte_acl_ctx *acl_ctx;
365 	uint8_t *data;
366 	size_t total_size;
367 	uint32_t entry_data_size;
368 };
369 
370 static void
371 table_free(void *table)
372 {
373 	struct table *t = table;
374 
375 	if (!t)
376 		return;
377 
378 	if (t->acl_ctx)
379 		rte_acl_free(t->acl_ctx);
380 	env_free(t, t->total_size);
381 }
382 
383 static void *
384 table_create(struct rte_swx_table_params *params,
385 	     struct rte_swx_table_entry_list *entries,
386 	     const char *args __rte_unused,
387 	     int numa_node)
388 {
389 	struct table *t = NULL;
390 	size_t meta_sz, data_sz, total_size;
391 	uint32_t entry_data_size;
392 	uint32_t n_entries = count_entries(entries);
393 
394 	/* Check input arguments. */
395 	if (!params || !params->key_size)
396 		goto error;
397 
398 	/* Memory allocation and initialization. */
399 	entry_data_size = 8 + params->action_data_size;
400 	meta_sz = sizeof(struct table);
401 	data_sz = n_entries * entry_data_size;
402 	total_size = meta_sz + data_sz;
403 
404 	t = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
405 	if (!t)
406 		goto error;
407 
408 	memset(t, 0, total_size);
409 	t->entry_data_size = entry_data_size;
410 	t->total_size = total_size;
411 	t->data = (uint8_t *)&t[1];
412 
413 	t->acl_ctx = acl_table_create(params, entries, n_entries, numa_node);
414 	if (!t->acl_ctx)
415 		goto error;
416 
417 	entry_data_copy(t->data, entries, n_entries, entry_data_size);
418 
419 	return t;
420 
421 error:
422 	table_free(t);
423 	return NULL;
424 }
425 
426 struct mailbox {
427 
428 };
429 
430 static uint64_t
431 table_mailbox_size_get(void)
432 {
433 	return sizeof(struct mailbox);
434 }
435 
436 static int
437 table_lookup(void *table,
438 	     void *mailbox __rte_unused,
439 	     const uint8_t **key,
440 	     uint64_t *action_id,
441 	     uint8_t **action_data,
442 	     int *hit)
443 {
444 	struct table *t = table;
445 	uint8_t *data;
446 	uint32_t user_data;
447 
448 	rte_acl_classify(t->acl_ctx, key, &user_data, 1, 1);
449 	if (!user_data) {
450 		*hit = 0;
451 		return 1;
452 	}
453 
454 	data = &t->data[(user_data - 1) * t->entry_data_size];
455 	*action_id = ((uint64_t *)data)[0];
456 	*action_data = &data[8];
457 	*hit = 1;
458 	return 1;
459 }
460 
461 struct rte_swx_table_ops rte_swx_table_wildcard_match_ops = {
462 	.footprint_get = NULL,
463 	.mailbox_size_get = table_mailbox_size_get,
464 	.create = table_create,
465 	.add = NULL,
466 	.del = NULL,
467 	.lkp = (rte_swx_table_lookup_t)table_lookup,
468 	.free = table_free,
469 };
470