xref: /dpdk/lib/table/rte_swx_table_wm.c (revision 7917b0d38e92e8b9ec5a870415b791420e10f11a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 #include <stdlib.h>
5 #include <string.h>
6 #include <stdio.h>
7 
8 #include <rte_common.h>
9 #include <rte_cycles.h>
10 #include <rte_acl.h>
11 
12 #include "rte_swx_table_wm.h"
13 
14 #ifndef RTE_SWX_TABLE_EM_USE_HUGE_PAGES
15 #define RTE_SWX_TABLE_EM_USE_HUGE_PAGES 1
16 #endif
17 
18 #if RTE_SWX_TABLE_EM_USE_HUGE_PAGES
19 
20 #include <rte_malloc.h>
21 
22 static void *
23 env_malloc(size_t size, size_t alignment, int numa_node)
24 {
25 	return rte_zmalloc_socket(NULL, size, alignment, numa_node);
26 }
27 
28 static void
29 env_free(void *start, size_t size __rte_unused)
30 {
31 	rte_free(start);
32 }
33 
34 #else
35 
36 #include <numa.h>
37 
38 static void *
39 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node)
40 {
41 	return numa_alloc_onnode(size, numa_node);
42 }
43 
44 static void
45 env_free(void *start, size_t size)
46 {
47 	numa_free(start, size);
48 }
49 
50 #endif
51 
52 static char *get_unique_name(void)
53 {
54 	uint64_t tsc = rte_get_tsc_cycles();
55 	size_t size = sizeof(uint64_t) * 2 + 1;
56 	char *name = calloc(1, size);
57 
58 	if (!name)
59 		return NULL;
60 
61 	snprintf(name, size, "%016" PRIx64, tsc);
62 	return name;
63 }
64 
65 static uint32_t
66 count_entries(struct rte_swx_table_entry_list *entries)
67 {
68 	struct rte_swx_table_entry *entry;
69 	uint32_t n_entries = 0;
70 
71 	if (!entries)
72 		return 0;
73 
74 	TAILQ_FOREACH(entry, entries, node)
75 		n_entries++;
76 
77 	return n_entries;
78 }
79 
80 static int
81 acl_table_cfg_get(struct rte_acl_config *cfg, struct rte_swx_table_params *p)
82 {
83 	uint32_t byte_id = 0, field_id = 0;
84 
85 	/* cfg->num_categories. */
86 	cfg->num_categories = 1;
87 
88 	/* cfg->defs and cfg->num_fields. */
89 	for (byte_id = 0; byte_id < p->key_size; ) {
90 		uint32_t field_size = field_id ? 4 : 1;
91 		uint8_t byte = p->key_mask0 ? p->key_mask0[byte_id] : 0xFF;
92 
93 		if (!byte) {
94 			byte_id++;
95 			continue;
96 		}
97 
98 		if (field_id == RTE_ACL_MAX_FIELDS)
99 			return -1;
100 
101 		cfg->defs[field_id].type = RTE_ACL_FIELD_TYPE_BITMASK;
102 		cfg->defs[field_id].size = field_size;
103 		cfg->defs[field_id].field_index = field_id;
104 		cfg->defs[field_id].input_index = field_id;
105 		cfg->defs[field_id].offset = p->key_offset + byte_id;
106 
107 		field_id++;
108 		byte_id += field_size;
109 	}
110 
111 	if (!field_id)
112 		return -1;
113 
114 	cfg->num_fields = field_id;
115 
116 	/* cfg->max_size. */
117 	cfg->max_size = 0;
118 
119 	return 0;
120 }
121 
122 static void
123 acl_table_rule_field8(uint8_t *value,
124 	uint8_t *mask,
125 	uint8_t *key_mask0,
126 	uint8_t *key_mask,
127 	uint8_t *key,
128 	uint32_t offset)
129 {
130 	uint8_t km0, km;
131 
132 	km0 = key_mask0 ? key_mask0[offset] : 0xFF;
133 	km = key_mask ? key_mask[offset] : 0xFF;
134 
135 	*value = key[offset];
136 	*mask = km0 & km;
137 }
138 
139 static void
140 acl_table_rule_field32(uint32_t *value,
141 	uint32_t *mask,
142 	uint8_t *key_mask0,
143 	uint8_t *key_mask,
144 	uint8_t *key,
145 	uint32_t key_size,
146 	uint32_t offset)
147 {
148 	uint32_t km0[4], km[4], k[4];
149 	uint32_t byte_id;
150 
151 	/* Byte 0 = MSB, byte 3 = LSB. */
152 	for (byte_id = 0; byte_id < 4; byte_id++) {
153 		if (offset + byte_id >= key_size) {
154 			km0[byte_id] = 0;
155 			km[byte_id] = 0;
156 			k[byte_id] = 0;
157 			continue;
158 		}
159 
160 		km0[byte_id] = key_mask0 ? key_mask0[offset + byte_id] : 0xFF;
161 		km[byte_id] = key_mask ? key_mask[offset + byte_id] : 0xFF;
162 		k[byte_id] = key[offset + byte_id];
163 	}
164 
165 	*value = (k[0] << 24) |
166 		 (k[1] << 16) |
167 		 (k[2] << 8) |
168 		 k[3];
169 
170 	*mask = ((km[0] & km0[0]) << 24) |
171 		((km[1] & km0[1]) << 16) |
172 		((km[2] & km0[2]) << 8) |
173 		(km[3] & km0[3]);
174 }
175 
176 RTE_ACL_RULE_DEF(acl_rule, RTE_ACL_MAX_FIELDS);
177 
178 static struct rte_acl_rule *
179 acl_table_rules_get(struct rte_acl_config *acl_cfg,
180 	struct rte_swx_table_params *p,
181 	struct rte_swx_table_entry_list *entries,
182 	uint32_t n_entries)
183 {
184 	struct rte_swx_table_entry *entry;
185 	uint8_t *memory;
186 	uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
187 	uint32_t n_fields = acl_cfg->num_fields;
188 	uint32_t rule_id;
189 
190 	if (!n_entries)
191 		return NULL;
192 
193 	memory = malloc(n_entries * acl_rule_size);
194 	if (!memory)
195 		return NULL;
196 
197 	rule_id = 0;
198 	TAILQ_FOREACH(entry, entries, node) {
199 		uint8_t *m = &memory[rule_id * acl_rule_size];
200 		struct acl_rule *acl_rule = (struct acl_rule *)m;
201 		uint32_t field_id;
202 
203 		acl_rule->data.category_mask = 1;
204 		acl_rule->data.priority = RTE_ACL_MAX_PRIORITY -
205 			entry->key_priority;
206 		acl_rule->data.userdata = rule_id + 1;
207 
208 		for (field_id = 0; field_id < n_fields; field_id++) {
209 			struct rte_acl_field *f = &acl_rule->field[field_id];
210 			uint32_t size = acl_cfg->defs[field_id].size;
211 			uint32_t offset = acl_cfg->defs[field_id].offset -
212 				p->key_offset;
213 
214 			if (size == 1) {
215 				uint8_t value, mask;
216 
217 				acl_table_rule_field8(&value,
218 						      &mask,
219 						      p->key_mask0,
220 						      entry->key_mask,
221 						      entry->key,
222 						      offset);
223 
224 				f->value.u8 = value;
225 				f->mask_range.u8 = mask;
226 			} else {
227 				uint32_t value, mask;
228 
229 				acl_table_rule_field32(&value,
230 						       &mask,
231 						       p->key_mask0,
232 						       entry->key_mask,
233 						       entry->key,
234 						       p->key_size,
235 						       offset);
236 
237 				f->value.u32 = value;
238 				f->mask_range.u32 = mask;
239 			}
240 		}
241 
242 		rule_id++;
243 	}
244 
245 	return (struct rte_acl_rule *)memory;
246 }
247 
248 /* When the table to be created has no rules, the expected behavior is to always
249  * get lookup miss for any input key. To achieve this, we add a single bogus
250  * rule to the table with the rule user data set to 0, i.e. the value returned
251  * when lookup miss takes place. Whether lookup hit (the bogus rule is hit) or
252  * miss, a user data of 0 is returned, which for the ACL library is equivalent
253  * to lookup miss.
254  */
255 static struct rte_acl_rule *
256 acl_table_rules_default_get(struct rte_acl_config *acl_cfg)
257 {
258 	struct rte_acl_rule *acl_rule;
259 	uint32_t acl_rule_size = RTE_ACL_RULE_SZ(acl_cfg->num_fields);
260 
261 	acl_rule = calloc(1, acl_rule_size);
262 	if (!acl_rule)
263 		return NULL;
264 
265 	acl_rule->data.category_mask = 1;
266 	acl_rule->data.priority = RTE_ACL_MAX_PRIORITY;
267 	acl_rule->data.userdata = 0;
268 
269 	memset(&acl_rule[1], 0xFF, acl_rule_size - sizeof(struct rte_acl_rule));
270 
271 	return acl_rule;
272 }
273 
274 static struct rte_acl_ctx *
275 acl_table_create(struct rte_swx_table_params *params,
276 	struct rte_swx_table_entry_list *entries,
277 	uint32_t n_entries,
278 	int numa_node)
279 {
280 	struct rte_acl_param acl_params = {0};
281 	struct rte_acl_config acl_cfg = {0};
282 	struct rte_acl_ctx *acl_ctx = NULL;
283 	struct rte_acl_rule *acl_rules = NULL;
284 	char *name = NULL;
285 	int status = 0;
286 
287 	/* ACL config data structures. */
288 	name = get_unique_name();
289 	if (!name) {
290 		status = -1;
291 		goto free_resources;
292 	}
293 
294 	status = acl_table_cfg_get(&acl_cfg, params);
295 	if (status)
296 		goto free_resources;
297 
298 	acl_rules = n_entries ?
299 		acl_table_rules_get(&acl_cfg, params, entries, n_entries) :
300 		acl_table_rules_default_get(&acl_cfg);
301 	if (!acl_rules) {
302 		status = -1;
303 		goto free_resources;
304 	}
305 
306 	n_entries = n_entries ? n_entries : 1;
307 
308 	/* ACL create. */
309 	acl_params.name = name;
310 	acl_params.socket_id = numa_node;
311 	acl_params.rule_size = RTE_ACL_RULE_SZ(acl_cfg.num_fields);
312 	acl_params.max_rule_num = n_entries;
313 
314 	acl_ctx = rte_acl_create(&acl_params);
315 	if (!acl_ctx) {
316 		status = -1;
317 		goto free_resources;
318 	}
319 
320 	/* ACL add rules. */
321 	status = rte_acl_add_rules(acl_ctx, acl_rules, n_entries);
322 	if (status)
323 		goto free_resources;
324 
325 	/* ACL build. */
326 	status = rte_acl_build(acl_ctx, &acl_cfg);
327 
328 free_resources:
329 	if (status && acl_ctx)
330 		rte_acl_free(acl_ctx);
331 
332 	free(acl_rules);
333 
334 	free(name);
335 
336 	return status ? NULL : acl_ctx;
337 }
338 
339 static void
340 entry_data_copy(uint8_t *data,
341 	struct rte_swx_table_entry_list *entries,
342 	uint32_t n_entries,
343 	uint32_t entry_data_size)
344 {
345 	struct rte_swx_table_entry *entry;
346 	uint32_t i = 0;
347 
348 	if (!n_entries)
349 		return;
350 
351 	TAILQ_FOREACH(entry, entries, node) {
352 		uint64_t *d = (uint64_t *)&data[i * entry_data_size];
353 
354 		d[0] = entry->action_id;
355 		memcpy(&d[1], entry->action_data, entry_data_size - 8);
356 
357 		i++;
358 	}
359 }
360 
361 struct table {
362 	struct rte_acl_ctx *acl_ctx;
363 	uint8_t *data;
364 	size_t total_size;
365 	uint32_t entry_data_size;
366 };
367 
368 static void
369 table_free(void *table)
370 {
371 	struct table *t = table;
372 
373 	if (!t)
374 		return;
375 
376 	rte_acl_free(t->acl_ctx);
377 	env_free(t, t->total_size);
378 }
379 
380 static void *
381 table_create(struct rte_swx_table_params *params,
382 	     struct rte_swx_table_entry_list *entries,
383 	     const char *args __rte_unused,
384 	     int numa_node)
385 {
386 	struct table *t = NULL;
387 	size_t meta_sz, data_sz, total_size;
388 	uint32_t entry_data_size;
389 	uint32_t n_entries = count_entries(entries);
390 
391 	/* Check input arguments. */
392 	if (!params || !params->key_size)
393 		goto error;
394 
395 	/* Memory allocation and initialization. */
396 	entry_data_size = 8 + params->action_data_size;
397 	meta_sz = sizeof(struct table);
398 	data_sz = n_entries * entry_data_size;
399 	total_size = meta_sz + data_sz;
400 
401 	t = env_malloc(total_size, RTE_CACHE_LINE_SIZE, numa_node);
402 	if (!t)
403 		goto error;
404 
405 	memset(t, 0, total_size);
406 	t->entry_data_size = entry_data_size;
407 	t->total_size = total_size;
408 	t->data = (uint8_t *)&t[1];
409 
410 	t->acl_ctx = acl_table_create(params, entries, n_entries, numa_node);
411 	if (!t->acl_ctx)
412 		goto error;
413 
414 	entry_data_copy(t->data, entries, n_entries, entry_data_size);
415 
416 	return t;
417 
418 error:
419 	table_free(t);
420 	return NULL;
421 }
422 
423 struct mailbox {
424 
425 };
426 
427 static uint64_t
428 table_mailbox_size_get(void)
429 {
430 	return sizeof(struct mailbox);
431 }
432 
433 static int
434 table_lookup(void *table,
435 	     void *mailbox __rte_unused,
436 	     const uint8_t **key,
437 	     uint64_t *action_id,
438 	     uint8_t **action_data,
439 	     size_t *entry_id,
440 	     int *hit)
441 {
442 	struct table *t = table;
443 	uint8_t *data;
444 	uint32_t user_data;
445 
446 	rte_acl_classify(t->acl_ctx, key, &user_data, 1, 1);
447 	if (!user_data) {
448 		*hit = 0;
449 		return 1;
450 	}
451 
452 	data = &t->data[(user_data - 1) * t->entry_data_size];
453 	*action_id = ((uint64_t *)data)[0];
454 	*action_data = &data[8];
455 	*entry_id = user_data - 1;
456 	*hit = 1;
457 	return 1;
458 }
459 
460 struct rte_swx_table_ops rte_swx_table_wildcard_match_ops = {
461 	.footprint_get = NULL,
462 	.mailbox_size_get = table_mailbox_size_get,
463 	.create = table_create,
464 	.add = NULL,
465 	.del = NULL,
466 	.lkp = (rte_swx_table_lookup_t)table_lookup,
467 	.free = table_free,
468 };
469