1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_branch_prediction.h>
24 #include <rte_ring.h>
25 #include <rte_mempool.h>
26 #include <rte_spinlock.h>
27 #include <rte_malloc.h>
28
29 #ifdef RTE_LIB_HASH
30 #include <rte_hash.h>
31 #include <rte_fbk_hash.h>
32 #include <rte_jhash.h>
33 #endif /* RTE_LIB_HASH */
34
35 #ifdef RTE_LIB_LPM
36 #include <rte_lpm.h>
37 #endif /* RTE_LIB_LPM */
38
39 #include <rte_string_fns.h>
40
41 #include "test.h"
42
43 typedef int (*case_func_t)(void* arg);
44 typedef void (*case_clean_t)(unsigned lcore_id);
45
46 #define MAX_STRING_SIZE (256)
47 #define MAX_ITER_MULTI (16)
48 #define MAX_ITER_ONCE (4)
49 #define MAX_LPM_ITER_TIMES (6)
50
51 #define MEMPOOL_ELT_SIZE (sizeof(uint32_t))
52 #define MEMPOOL_SIZE (4)
53
54 #define MAX_LCORES (rte_memzone_max_get() / (MAX_ITER_MULTI * 4U))
55
56 static RTE_ATOMIC(uint32_t) obj_count;
57 static RTE_ATOMIC(uint32_t) synchro;
58
59 #define WAIT_SYNCHRO_FOR_WORKERS() do { \
60 if (lcore_self != rte_get_main_lcore()) \
61 rte_wait_until_equal_32((uint32_t *)(uintptr_t)&synchro, 1, \
62 rte_memory_order_relaxed); \
63 } while(0)
64
65 /*
66 * rte_eal_init only init once
67 */
68 static int
test_eal_init_once(__rte_unused void * arg)69 test_eal_init_once(__rte_unused void *arg)
70 {
71 unsigned lcore_self = rte_lcore_id();
72
73 WAIT_SYNCHRO_FOR_WORKERS();
74
75 /* silent the check in the caller */
76 rte_atomic_store_explicit(&obj_count, 1, rte_memory_order_relaxed);
77 if (rte_eal_init(0, NULL) != -1)
78 return -1;
79
80 return 0;
81 }
82
83 /*
84 * ring create/lookup reentrancy test
85 */
86 static void
ring_clean(unsigned int lcore_id)87 ring_clean(unsigned int lcore_id)
88 {
89 struct rte_ring *rp;
90 char ring_name[MAX_STRING_SIZE];
91 int i;
92
93 rp = rte_ring_lookup("fr_test_once");
94 rte_ring_free(rp);
95
96 for (i = 0; i < MAX_ITER_MULTI; i++) {
97 snprintf(ring_name, sizeof(ring_name),
98 "fr_test_%d_%d", lcore_id, i);
99 rp = rte_ring_lookup(ring_name);
100 rte_ring_free(rp);
101 }
102 }
103
104 static int
ring_create_lookup(__rte_unused void * arg)105 ring_create_lookup(__rte_unused void *arg)
106 {
107 unsigned lcore_self = rte_lcore_id();
108 struct rte_ring * rp;
109 char ring_name[MAX_STRING_SIZE];
110 int i;
111
112 WAIT_SYNCHRO_FOR_WORKERS();
113
114 /* create the same ring simultaneously on all threads */
115 for (i = 0; i < MAX_ITER_ONCE; i++) {
116 rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
117 if (rp != NULL)
118 rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
119 }
120
121 /* create/lookup new ring several times */
122 for (i = 0; i < MAX_ITER_MULTI; i++) {
123 snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
124 rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
125 if (NULL == rp)
126 return -1;
127 if (rte_ring_lookup(ring_name) != rp)
128 return -1;
129
130 /* verify all ring created successful */
131 if (rte_ring_lookup(ring_name) == NULL)
132 return -1;
133 }
134
135 return 0;
136 }
137
138 static void
my_obj_init(struct rte_mempool * mp,__rte_unused void * arg,void * obj,unsigned i)139 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
140 void *obj, unsigned i)
141 {
142 uint32_t *objnum = obj;
143 memset(obj, 0, mp->elt_size);
144 *objnum = i;
145 }
146
147 static void
mempool_clean(unsigned int lcore_id)148 mempool_clean(unsigned int lcore_id)
149 {
150 struct rte_mempool *mp;
151 char mempool_name[MAX_STRING_SIZE];
152 int i;
153
154 mp = rte_mempool_lookup("fr_test_once");
155 rte_mempool_free(mp);
156
157 for (i = 0; i < MAX_ITER_MULTI; i++) {
158 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
159 lcore_id, i);
160 mp = rte_mempool_lookup(mempool_name);
161 rte_mempool_free(mp);
162 }
163 }
164
165 static int
mempool_create_lookup(__rte_unused void * arg)166 mempool_create_lookup(__rte_unused void *arg)
167 {
168 unsigned lcore_self = rte_lcore_id();
169 struct rte_mempool * mp;
170 char mempool_name[MAX_STRING_SIZE];
171 int i;
172
173 WAIT_SYNCHRO_FOR_WORKERS();
174
175 /* create the same mempool simultaneously on all threads */
176 for (i = 0; i < MAX_ITER_ONCE; i++) {
177 mp = rte_mempool_create("fr_test_once", MEMPOOL_SIZE,
178 MEMPOOL_ELT_SIZE, 0, 0,
179 NULL, NULL,
180 my_obj_init, NULL,
181 SOCKET_ID_ANY, 0);
182 if (mp != NULL)
183 rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
184 }
185
186 /* create/lookup new ring several times */
187 for (i = 0; i < MAX_ITER_MULTI; i++) {
188 snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
189 mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
190 MEMPOOL_ELT_SIZE, 0, 0,
191 NULL, NULL,
192 my_obj_init, NULL,
193 SOCKET_ID_ANY, 0);
194 if (NULL == mp)
195 return -1;
196 if (rte_mempool_lookup(mempool_name) != mp)
197 return -1;
198
199 /* verify all ring created successful */
200 if (rte_mempool_lookup(mempool_name) == NULL)
201 return -1;
202 }
203
204 return 0;
205 }
206
207 #ifdef RTE_LIB_HASH
208 static void
hash_clean(unsigned lcore_id)209 hash_clean(unsigned lcore_id)
210 {
211 char hash_name[MAX_STRING_SIZE];
212 struct rte_hash *handle;
213 int i;
214
215 handle = rte_hash_find_existing("fr_test_once");
216 rte_hash_free(handle);
217
218 for (i = 0; i < MAX_ITER_MULTI; i++) {
219 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_id, i);
220
221 if ((handle = rte_hash_find_existing(hash_name)) != NULL)
222 rte_hash_free(handle);
223 }
224 }
225
226 static int
hash_create_free(__rte_unused void * arg)227 hash_create_free(__rte_unused void *arg)
228 {
229 unsigned lcore_self = rte_lcore_id();
230 struct rte_hash *handle;
231 char hash_name[MAX_STRING_SIZE];
232 int i;
233 struct rte_hash_parameters hash_params = {
234 .name = NULL,
235 .entries = 16,
236 .key_len = 4,
237 .hash_func = (rte_hash_function)rte_jhash_32b,
238 .hash_func_init_val = 0,
239 .socket_id = 0,
240 };
241
242 WAIT_SYNCHRO_FOR_WORKERS();
243
244 /* create the same hash simultaneously on all threads */
245 hash_params.name = "fr_test_once";
246 for (i = 0; i < MAX_ITER_ONCE; i++) {
247 handle = rte_hash_create(&hash_params);
248 if (handle != NULL)
249 rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
250 }
251
252 /* create multiple times simultaneously */
253 for (i = 0; i < MAX_ITER_MULTI; i++) {
254 snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
255 hash_params.name = hash_name;
256
257 handle = rte_hash_create(&hash_params);
258 if (NULL == handle)
259 return -1;
260
261 /* verify correct existing and then free all */
262 if (handle != rte_hash_find_existing(hash_name))
263 return -1;
264
265 rte_hash_free(handle);
266
267 /* verify free correct */
268 if (NULL != rte_hash_find_existing(hash_name))
269 return -1;
270 }
271
272 return 0;
273 }
274
275 static void
fbk_clean(unsigned lcore_id)276 fbk_clean(unsigned lcore_id)
277 {
278 char fbk_name[MAX_STRING_SIZE];
279 struct rte_fbk_hash_table *handle;
280 int i;
281
282 handle = rte_fbk_hash_find_existing("fr_test_once");
283 rte_fbk_hash_free(handle);
284
285 for (i = 0; i < MAX_ITER_MULTI; i++) {
286 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_id, i);
287
288 if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
289 rte_fbk_hash_free(handle);
290 }
291 }
292
293 static int
fbk_create_free(__rte_unused void * arg)294 fbk_create_free(__rte_unused void *arg)
295 {
296 unsigned lcore_self = rte_lcore_id();
297 struct rte_fbk_hash_table *handle;
298 char fbk_name[MAX_STRING_SIZE];
299 int i;
300 struct rte_fbk_hash_params fbk_params = {
301 .name = NULL,
302 .entries = 4,
303 .entries_per_bucket = 4,
304 .socket_id = 0,
305 .hash_func = rte_jhash_1word,
306 .init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
307 };
308
309 WAIT_SYNCHRO_FOR_WORKERS();
310
311 /* create the same fbk hash table simultaneously on all threads */
312 fbk_params.name = "fr_test_once";
313 for (i = 0; i < MAX_ITER_ONCE; i++) {
314 handle = rte_fbk_hash_create(&fbk_params);
315 if (handle != NULL)
316 rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
317 }
318
319 /* create multiple fbk tables simultaneously */
320 for (i = 0; i < MAX_ITER_MULTI; i++) {
321 snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
322 fbk_params.name = fbk_name;
323
324 handle = rte_fbk_hash_create(&fbk_params);
325 if (NULL == handle)
326 return -1;
327
328 /* verify correct existing and then free all */
329 if (handle != rte_fbk_hash_find_existing(fbk_name))
330 return -1;
331
332 rte_fbk_hash_free(handle);
333
334 /* verify free correct */
335 if (NULL != rte_fbk_hash_find_existing(fbk_name))
336 return -1;
337 }
338
339 return 0;
340 }
341 #endif /* RTE_LIB_HASH */
342
343 #ifdef RTE_LIB_LPM
344 static void
lpm_clean(unsigned int lcore_id)345 lpm_clean(unsigned int lcore_id)
346 {
347 char lpm_name[MAX_STRING_SIZE];
348 struct rte_lpm *lpm;
349 int i;
350
351 lpm = rte_lpm_find_existing("fr_test_once");
352 rte_lpm_free(lpm);
353
354 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
355 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_id, i);
356
357 if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
358 rte_lpm_free(lpm);
359 }
360 }
361
362 static int
lpm_create_free(__rte_unused void * arg)363 lpm_create_free(__rte_unused void *arg)
364 {
365 unsigned lcore_self = rte_lcore_id();
366 struct rte_lpm *lpm;
367 struct rte_lpm_config config;
368
369 config.max_rules = 4;
370 config.number_tbl8s = 256;
371 config.flags = 0;
372 char lpm_name[MAX_STRING_SIZE];
373 int i;
374
375 WAIT_SYNCHRO_FOR_WORKERS();
376
377 /* create the same lpm simultaneously on all threads */
378 for (i = 0; i < MAX_ITER_ONCE; i++) {
379 lpm = rte_lpm_create("fr_test_once", SOCKET_ID_ANY, &config);
380 if (lpm != NULL)
381 rte_atomic_fetch_add_explicit(&obj_count, 1, rte_memory_order_relaxed);
382 }
383
384 /* create multiple fbk tables simultaneously */
385 for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
386 snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
387 lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
388 if (NULL == lpm)
389 return -1;
390
391 /* verify correct existing and then free all */
392 if (lpm != rte_lpm_find_existing(lpm_name))
393 return -1;
394
395 rte_lpm_free(lpm);
396
397 /* verify free correct */
398 if (NULL != rte_lpm_find_existing(lpm_name))
399 return -1;
400 }
401
402 return 0;
403 }
404 #endif /* RTE_LIB_LPM */
405
406 struct test_case{
407 case_func_t func;
408 void* arg;
409 case_clean_t clean;
410 char name[MAX_STRING_SIZE];
411 };
412
413 /* All test cases in the test suite */
414 struct test_case test_cases[] = {
415 { test_eal_init_once, NULL, NULL, "eal init once" },
416 { ring_create_lookup, NULL, ring_clean, "ring create/lookup" },
417 { mempool_create_lookup, NULL, mempool_clean,
418 "mempool create/lookup" },
419 #ifdef RTE_LIB_HASH
420 { hash_create_free, NULL, hash_clean, "hash create/free" },
421 { fbk_create_free, NULL, fbk_clean, "fbk create/free" },
422 #endif /* RTE_LIB_HASH */
423 #ifdef RTE_LIB_LPM
424 { lpm_create_free, NULL, lpm_clean, "lpm create/free" },
425 #endif /* RTE_LIB_LPM */
426 };
427
428 /**
429 * launch test case in two separate thread
430 */
431 static int
launch_test(struct test_case * pt_case)432 launch_test(struct test_case *pt_case)
433 {
434 unsigned int lcore_id;
435 unsigned int cores;
436 unsigned int count;
437 int ret = 0;
438
439 if (pt_case->func == NULL)
440 return -1;
441
442 rte_atomic_store_explicit(&obj_count, 0, rte_memory_order_relaxed);
443 rte_atomic_store_explicit(&synchro, 0, rte_memory_order_relaxed);
444
445 cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
446 RTE_LCORE_FOREACH_WORKER(lcore_id) {
447 if (cores == 1)
448 break;
449 cores--;
450 rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
451 }
452
453 rte_atomic_store_explicit(&synchro, 1, rte_memory_order_relaxed);
454
455 if (pt_case->func(pt_case->arg) < 0)
456 ret = -1;
457
458 RTE_LCORE_FOREACH_WORKER(lcore_id) {
459 if (rte_eal_wait_lcore(lcore_id) < 0)
460 ret = -1;
461 }
462
463 RTE_LCORE_FOREACH(lcore_id) {
464 if (pt_case->clean != NULL)
465 pt_case->clean(lcore_id);
466 }
467
468 count = rte_atomic_load_explicit(&obj_count, rte_memory_order_relaxed);
469 if (count != 1) {
470 printf("%s: common object allocated %d times (should be 1)\n",
471 pt_case->name, count);
472 ret = -1;
473 }
474
475 return ret;
476 }
477
478 /**
479 * Main entry of func_reentrancy test
480 */
481 static int
test_func_reentrancy(void)482 test_func_reentrancy(void)
483 {
484 uint32_t case_id;
485 struct test_case *pt_case = NULL;
486
487 if (RTE_EXEC_ENV_IS_WINDOWS)
488 return TEST_SKIPPED;
489
490 if (rte_lcore_count() < 2) {
491 printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
492 return TEST_SKIPPED;
493 }
494 else if (rte_lcore_count() > MAX_LCORES)
495 printf("Too many lcores, some cores will be disabled\n");
496
497 for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
498 pt_case = &test_cases[case_id];
499 if (pt_case->func == NULL)
500 continue;
501
502 if (launch_test(pt_case) < 0) {
503 printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
504 return -1;
505 }
506 printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
507 }
508
509 return 0;
510 }
511
512 REGISTER_FAST_TEST(func_reentrancy_autotest, false, true, test_func_reentrancy);
513