xref: /dpdk/lib/acl/rte_acl.c (revision 3c4898ef762eeb2578b9ae3d7f6e3a0e5cbca8c8)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <rte_eal_memconfig.h>
6 #include <rte_string_fns.h>
7 #include <rte_acl.h>
8 #include <rte_tailq.h>
9 
10 #include "acl.h"
11 
12 TAILQ_HEAD(rte_acl_list, rte_tailq_entry);
13 
14 static struct rte_tailq_elem rte_acl_tailq = {
15 	.name = "RTE_ACL",
16 };
17 EAL_REGISTER_TAILQ(rte_acl_tailq)
18 
19 #ifndef CC_AVX512_SUPPORT
20 /*
21  * If the compiler doesn't support AVX512 instructions,
22  * then the dummy one would be used instead for AVX512 classify method.
23  */
24 int
25 rte_acl_classify_avx512x16(__rte_unused const struct rte_acl_ctx *ctx,
26 	__rte_unused const uint8_t **data,
27 	__rte_unused uint32_t *results,
28 	__rte_unused uint32_t num,
29 	__rte_unused uint32_t categories)
30 {
31 	return -ENOTSUP;
32 }
33 
34 int
35 rte_acl_classify_avx512x32(__rte_unused const struct rte_acl_ctx *ctx,
36 	__rte_unused const uint8_t **data,
37 	__rte_unused uint32_t *results,
38 	__rte_unused uint32_t num,
39 	__rte_unused uint32_t categories)
40 {
41 	return -ENOTSUP;
42 }
43 #endif
44 
45 #ifndef RTE_ARCH_X86
46 /*
47  * If ISA doesn't have AVX2 or SSE, provide dummy fallbacks
48  */
49 int
50 rte_acl_classify_avx2(__rte_unused const struct rte_acl_ctx *ctx,
51 	__rte_unused const uint8_t **data,
52 	__rte_unused uint32_t *results,
53 	__rte_unused uint32_t num,
54 	__rte_unused uint32_t categories)
55 {
56 	return -ENOTSUP;
57 }
58 int
59 rte_acl_classify_sse(__rte_unused const struct rte_acl_ctx *ctx,
60 	__rte_unused const uint8_t **data,
61 	__rte_unused uint32_t *results,
62 	__rte_unused uint32_t num,
63 	__rte_unused uint32_t categories)
64 {
65 	return -ENOTSUP;
66 }
67 #endif
68 
69 #ifndef RTE_ARCH_ARM
70 int
71 rte_acl_classify_neon(__rte_unused const struct rte_acl_ctx *ctx,
72 	__rte_unused const uint8_t **data,
73 	__rte_unused uint32_t *results,
74 	__rte_unused uint32_t num,
75 	__rte_unused uint32_t categories)
76 {
77 	return -ENOTSUP;
78 }
79 #endif
80 
81 #ifndef RTE_ARCH_PPC_64
82 int
83 rte_acl_classify_altivec(__rte_unused const struct rte_acl_ctx *ctx,
84 	__rte_unused const uint8_t **data,
85 	__rte_unused uint32_t *results,
86 	__rte_unused uint32_t num,
87 	__rte_unused uint32_t categories)
88 {
89 	return -ENOTSUP;
90 }
91 #endif
92 
93 static const rte_acl_classify_t classify_fns[] = {
94 	[RTE_ACL_CLASSIFY_DEFAULT] = rte_acl_classify_scalar,
95 	[RTE_ACL_CLASSIFY_SCALAR] = rte_acl_classify_scalar,
96 	[RTE_ACL_CLASSIFY_SSE] = rte_acl_classify_sse,
97 	[RTE_ACL_CLASSIFY_AVX2] = rte_acl_classify_avx2,
98 	[RTE_ACL_CLASSIFY_NEON] = rte_acl_classify_neon,
99 	[RTE_ACL_CLASSIFY_ALTIVEC] = rte_acl_classify_altivec,
100 	[RTE_ACL_CLASSIFY_AVX512X16] = rte_acl_classify_avx512x16,
101 	[RTE_ACL_CLASSIFY_AVX512X32] = rte_acl_classify_avx512x32,
102 };
103 
104 /*
105  * Helper function for acl_check_alg.
106  * Check support for ARM specific classify methods.
107  */
108 static int
109 acl_check_alg_arm(enum rte_acl_classify_alg alg)
110 {
111 	if (alg == RTE_ACL_CLASSIFY_NEON) {
112 #if defined(RTE_ARCH_ARM64)
113 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
114 			return 0;
115 #elif defined(RTE_ARCH_ARM)
116 		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_NEON) &&
117 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
118 			return 0;
119 #endif
120 		return -ENOTSUP;
121 	}
122 
123 	return -EINVAL;
124 }
125 
126 /*
127  * Helper function for acl_check_alg.
128  * Check support for PPC specific classify methods.
129  */
130 static int
131 acl_check_alg_ppc(enum rte_acl_classify_alg alg)
132 {
133 	if (alg == RTE_ACL_CLASSIFY_ALTIVEC) {
134 #if defined(RTE_ARCH_PPC_64)
135 		if (rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
136 			return 0;
137 #endif
138 		return -ENOTSUP;
139 	}
140 
141 	return -EINVAL;
142 }
143 
144 #ifdef CC_AVX512_SUPPORT
145 static int
146 acl_check_avx512_cpu_flags(void)
147 {
148 	return (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512F) &&
149 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512VL) &&
150 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512CD) &&
151 			rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX512BW));
152 }
153 #endif
154 
155 /*
156  * Helper function for acl_check_alg.
157  * Check support for x86 specific classify methods.
158  */
159 static int
160 acl_check_alg_x86(enum rte_acl_classify_alg alg)
161 {
162 	if (alg == RTE_ACL_CLASSIFY_AVX512X32) {
163 #ifdef CC_AVX512_SUPPORT
164 		if (acl_check_avx512_cpu_flags() != 0 &&
165 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_512)
166 			return 0;
167 #endif
168 		return -ENOTSUP;
169 	}
170 
171 	if (alg == RTE_ACL_CLASSIFY_AVX512X16) {
172 #ifdef CC_AVX512_SUPPORT
173 		if (acl_check_avx512_cpu_flags() != 0 &&
174 			rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
175 			return 0;
176 #endif
177 		return -ENOTSUP;
178 	}
179 
180 	if (alg == RTE_ACL_CLASSIFY_AVX2) {
181 #ifdef RTE_ARCH_X86
182 		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX2) &&
183 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_256)
184 			return 0;
185 #endif
186 		return -ENOTSUP;
187 	}
188 
189 	if (alg == RTE_ACL_CLASSIFY_SSE) {
190 #ifdef RTE_ARCH_X86
191 		if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1) &&
192 				rte_vect_get_max_simd_bitwidth() >= RTE_VECT_SIMD_128)
193 			return 0;
194 #endif
195 		return -ENOTSUP;
196 	}
197 
198 	return -EINVAL;
199 }
200 
201 /*
202  * Check if input alg is supported by given platform/binary.
203  * Note that both conditions should be met:
204  * - at build time compiler supports ISA used by given methods
205  * - at run time target cpu supports necessary ISA.
206  */
207 static int
208 acl_check_alg(enum rte_acl_classify_alg alg)
209 {
210 	switch (alg) {
211 	case RTE_ACL_CLASSIFY_NEON:
212 		return acl_check_alg_arm(alg);
213 	case RTE_ACL_CLASSIFY_ALTIVEC:
214 		return acl_check_alg_ppc(alg);
215 	case RTE_ACL_CLASSIFY_AVX512X32:
216 	case RTE_ACL_CLASSIFY_AVX512X16:
217 	case RTE_ACL_CLASSIFY_AVX2:
218 	case RTE_ACL_CLASSIFY_SSE:
219 		return acl_check_alg_x86(alg);
220 	/* scalar method is supported on all platforms */
221 	case RTE_ACL_CLASSIFY_SCALAR:
222 		return 0;
223 	default:
224 		return -EINVAL;
225 	}
226 }
227 
228 /*
229  * Get preferred alg for given platform.
230  */
231 static enum rte_acl_classify_alg
232 acl_get_best_alg(void)
233 {
234 	/*
235 	 * array of supported methods for each platform.
236 	 * Note that order is important - from most to less preferable.
237 	 */
238 	static const enum rte_acl_classify_alg alg[] = {
239 #if defined(RTE_ARCH_ARM)
240 		RTE_ACL_CLASSIFY_NEON,
241 #elif defined(RTE_ARCH_PPC_64)
242 		RTE_ACL_CLASSIFY_ALTIVEC,
243 #elif defined(RTE_ARCH_X86)
244 		RTE_ACL_CLASSIFY_AVX512X32,
245 		RTE_ACL_CLASSIFY_AVX512X16,
246 		RTE_ACL_CLASSIFY_AVX2,
247 		RTE_ACL_CLASSIFY_SSE,
248 #endif
249 		RTE_ACL_CLASSIFY_SCALAR,
250 	};
251 
252 	uint32_t i;
253 
254 	/* find best possible alg */
255 	for (i = 0; i != RTE_DIM(alg) && acl_check_alg(alg[i]) != 0; i++)
256 		;
257 
258 	/* we always have to find something suitable */
259 	RTE_VERIFY(i != RTE_DIM(alg));
260 	return alg[i];
261 }
262 
263 extern int
264 rte_acl_set_ctx_classify(struct rte_acl_ctx *ctx, enum rte_acl_classify_alg alg)
265 {
266 	int32_t rc;
267 
268 	/* formal parameters check */
269 	if (ctx == NULL || (uint32_t)alg >= RTE_DIM(classify_fns))
270 		return -EINVAL;
271 
272 	/* user asked us to select the *best* one */
273 	if (alg == RTE_ACL_CLASSIFY_DEFAULT)
274 		alg = acl_get_best_alg();
275 
276 	/* check that given alg is supported */
277 	rc = acl_check_alg(alg);
278 	if (rc != 0)
279 		return rc;
280 
281 	ctx->alg = alg;
282 	return 0;
283 }
284 
285 int
286 rte_acl_classify_alg(const struct rte_acl_ctx *ctx, const uint8_t **data,
287 	uint32_t *results, uint32_t num, uint32_t categories,
288 	enum rte_acl_classify_alg alg)
289 {
290 	if (categories != 1 &&
291 			((RTE_ACL_RESULTS_MULTIPLIER - 1) & categories) != 0)
292 		return -EINVAL;
293 
294 	return classify_fns[alg](ctx, data, results, num, categories);
295 }
296 
297 int
298 rte_acl_classify(const struct rte_acl_ctx *ctx, const uint8_t **data,
299 	uint32_t *results, uint32_t num, uint32_t categories)
300 {
301 	return rte_acl_classify_alg(ctx, data, results, num, categories,
302 		ctx->alg);
303 }
304 
305 struct rte_acl_ctx *
306 rte_acl_find_existing(const char *name)
307 {
308 	struct rte_acl_ctx *ctx = NULL;
309 	struct rte_acl_list *acl_list;
310 	struct rte_tailq_entry *te;
311 
312 	acl_list = RTE_TAILQ_CAST(rte_acl_tailq.head, rte_acl_list);
313 
314 	rte_mcfg_tailq_read_lock();
315 	TAILQ_FOREACH(te, acl_list, next) {
316 		ctx = (struct rte_acl_ctx *) te->data;
317 		if (strncmp(name, ctx->name, sizeof(ctx->name)) == 0)
318 			break;
319 	}
320 	rte_mcfg_tailq_read_unlock();
321 
322 	if (te == NULL) {
323 		rte_errno = ENOENT;
324 		return NULL;
325 	}
326 	return ctx;
327 }
328 
329 void
330 rte_acl_free(struct rte_acl_ctx *ctx)
331 {
332 	struct rte_acl_list *acl_list;
333 	struct rte_tailq_entry *te;
334 
335 	if (ctx == NULL)
336 		return;
337 
338 	acl_list = RTE_TAILQ_CAST(rte_acl_tailq.head, rte_acl_list);
339 
340 	rte_mcfg_tailq_write_lock();
341 
342 	/* find our tailq entry */
343 	TAILQ_FOREACH(te, acl_list, next) {
344 		if (te->data == (void *) ctx)
345 			break;
346 	}
347 	if (te == NULL) {
348 		rte_mcfg_tailq_write_unlock();
349 		return;
350 	}
351 
352 	TAILQ_REMOVE(acl_list, te, next);
353 
354 	rte_mcfg_tailq_write_unlock();
355 
356 	rte_free(ctx->mem);
357 	rte_free(ctx);
358 	rte_free(te);
359 }
360 
361 struct rte_acl_ctx *
362 rte_acl_create(const struct rte_acl_param *param)
363 {
364 	size_t sz;
365 	struct rte_acl_ctx *ctx;
366 	struct rte_acl_list *acl_list;
367 	struct rte_tailq_entry *te;
368 	char name[sizeof(ctx->name)];
369 
370 	acl_list = RTE_TAILQ_CAST(rte_acl_tailq.head, rte_acl_list);
371 
372 	/* check that input parameters are valid. */
373 	if (param == NULL || param->name == NULL) {
374 		rte_errno = EINVAL;
375 		return NULL;
376 	}
377 
378 	snprintf(name, sizeof(name), "ACL_%s", param->name);
379 
380 	/* calculate amount of memory required for pattern set. */
381 	sz = sizeof(*ctx) + param->max_rule_num * param->rule_size;
382 
383 	/* get EAL TAILQ lock. */
384 	rte_mcfg_tailq_write_lock();
385 
386 	/* if we already have one with that name */
387 	TAILQ_FOREACH(te, acl_list, next) {
388 		ctx = (struct rte_acl_ctx *) te->data;
389 		if (strncmp(param->name, ctx->name, sizeof(ctx->name)) == 0)
390 			break;
391 	}
392 
393 	/* if ACL with such name doesn't exist, then create a new one. */
394 	if (te == NULL) {
395 		ctx = NULL;
396 		te = rte_zmalloc("ACL_TAILQ_ENTRY", sizeof(*te), 0);
397 
398 		if (te == NULL) {
399 			RTE_LOG(ERR, ACL, "Cannot allocate tailq entry!\n");
400 			goto exit;
401 		}
402 
403 		ctx = rte_zmalloc_socket(name, sz, RTE_CACHE_LINE_SIZE, param->socket_id);
404 
405 		if (ctx == NULL) {
406 			RTE_LOG(ERR, ACL,
407 				"allocation of %zu bytes on socket %d for %s failed\n",
408 				sz, param->socket_id, name);
409 			rte_free(te);
410 			goto exit;
411 		}
412 		/* init new allocated context. */
413 		ctx->rules = ctx + 1;
414 		ctx->max_rules = param->max_rule_num;
415 		ctx->rule_sz = param->rule_size;
416 		ctx->socket_id = param->socket_id;
417 		ctx->alg = acl_get_best_alg();
418 		strlcpy(ctx->name, param->name, sizeof(ctx->name));
419 
420 		te->data = (void *) ctx;
421 
422 		TAILQ_INSERT_TAIL(acl_list, te, next);
423 	}
424 
425 exit:
426 	rte_mcfg_tailq_write_unlock();
427 	return ctx;
428 }
429 
430 static int
431 acl_add_rules(struct rte_acl_ctx *ctx, const void *rules, uint32_t num)
432 {
433 	uint8_t *pos;
434 
435 	if (num + ctx->num_rules > ctx->max_rules)
436 		return -ENOMEM;
437 
438 	pos = ctx->rules;
439 	pos += ctx->rule_sz * ctx->num_rules;
440 	memcpy(pos, rules, num * ctx->rule_sz);
441 	ctx->num_rules += num;
442 
443 	return 0;
444 }
445 
446 static int
447 acl_check_rule(const struct rte_acl_rule_data *rd)
448 {
449 	if ((RTE_LEN2MASK(RTE_ACL_MAX_CATEGORIES, typeof(rd->category_mask)) &
450 			rd->category_mask) == 0 ||
451 			rd->priority > RTE_ACL_MAX_PRIORITY ||
452 			rd->priority < RTE_ACL_MIN_PRIORITY)
453 		return -EINVAL;
454 	return 0;
455 }
456 
457 int
458 rte_acl_add_rules(struct rte_acl_ctx *ctx, const struct rte_acl_rule *rules,
459 	uint32_t num)
460 {
461 	const struct rte_acl_rule *rv;
462 	uint32_t i;
463 	int32_t rc;
464 
465 	if (ctx == NULL || rules == NULL || 0 == ctx->rule_sz)
466 		return -EINVAL;
467 
468 	for (i = 0; i != num; i++) {
469 		rv = (const struct rte_acl_rule *)
470 			((uintptr_t)rules + i * ctx->rule_sz);
471 		rc = acl_check_rule(&rv->data);
472 		if (rc != 0) {
473 			RTE_LOG(ERR, ACL, "%s(%s): rule #%u is invalid\n",
474 				__func__, ctx->name, i + 1);
475 			return rc;
476 		}
477 	}
478 
479 	return acl_add_rules(ctx, rules, num);
480 }
481 
482 /*
483  * Reset all rules.
484  * Note that RT structures are not affected.
485  */
486 void
487 rte_acl_reset_rules(struct rte_acl_ctx *ctx)
488 {
489 	if (ctx != NULL)
490 		ctx->num_rules = 0;
491 }
492 
493 /*
494  * Reset all rules and destroys RT structures.
495  */
496 void
497 rte_acl_reset(struct rte_acl_ctx *ctx)
498 {
499 	if (ctx != NULL) {
500 		rte_acl_reset_rules(ctx);
501 		rte_acl_build(ctx, &ctx->config);
502 	}
503 }
504 
505 /*
506  * Dump ACL context to the stdout.
507  */
508 void
509 rte_acl_dump(const struct rte_acl_ctx *ctx)
510 {
511 	if (!ctx)
512 		return;
513 	printf("acl context <%s>@%p\n", ctx->name, ctx);
514 	printf("  socket_id=%"PRId32"\n", ctx->socket_id);
515 	printf("  alg=%"PRId32"\n", ctx->alg);
516 	printf("  first_load_sz=%"PRIu32"\n", ctx->first_load_sz);
517 	printf("  max_rules=%"PRIu32"\n", ctx->max_rules);
518 	printf("  rule_size=%"PRIu32"\n", ctx->rule_sz);
519 	printf("  num_rules=%"PRIu32"\n", ctx->num_rules);
520 	printf("  num_categories=%"PRIu32"\n", ctx->num_categories);
521 	printf("  num_tries=%"PRIu32"\n", ctx->num_tries);
522 }
523 
524 /*
525  * Dump all ACL contexts to the stdout.
526  */
527 void
528 rte_acl_list_dump(void)
529 {
530 	struct rte_acl_ctx *ctx;
531 	struct rte_acl_list *acl_list;
532 	struct rte_tailq_entry *te;
533 
534 	acl_list = RTE_TAILQ_CAST(rte_acl_tailq.head, rte_acl_list);
535 
536 	rte_mcfg_tailq_read_lock();
537 	TAILQ_FOREACH(te, acl_list, next) {
538 		ctx = (struct rte_acl_ctx *) te->data;
539 		rte_acl_dump(ctx);
540 	}
541 	rte_mcfg_tailq_read_unlock();
542 }
543