xref: /dpdk/app/test/test_func_reentrancy.c (revision c9902a15bd005b6d4fe072cf7b60fe4ee679155f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13 
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ring.h>
26 #include <rte_mempool.h>
27 #include <rte_spinlock.h>
28 #include <rte_malloc.h>
29 
30 #ifdef RTE_LIB_HASH
31 #include <rte_hash.h>
32 #include <rte_fbk_hash.h>
33 #include <rte_jhash.h>
34 #endif /* RTE_LIB_HASH */
35 
36 #ifdef RTE_LIB_LPM
37 #include <rte_lpm.h>
38 #endif /* RTE_LIB_LPM */
39 
40 #include <rte_string_fns.h>
41 
42 #include "test.h"
43 
44 typedef int (*case_func_t)(void* arg);
45 typedef void (*case_clean_t)(unsigned lcore_id);
46 
47 #define MAX_STRING_SIZE                     (256)
48 #define MAX_ITER_MULTI                      (16)
49 #define MAX_ITER_ONCE                       (4)
50 #define MAX_LPM_ITER_TIMES                  (6)
51 
52 #define MEMPOOL_ELT_SIZE                    (sizeof(uint32_t))
53 #define MEMPOOL_SIZE                        (4)
54 
55 #define MAX_LCORES	(RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
56 
57 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
58 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
59 
60 #define WAIT_SYNCHRO_FOR_WORKERS()   do { \
61 	if (lcore_self != rte_get_main_lcore())                  \
62 		while (rte_atomic32_read(&synchro) == 0);        \
63 } while(0)
64 
65 /*
66  * rte_eal_init only init once
67  */
68 static int
69 test_eal_init_once(__rte_unused void *arg)
70 {
71 	unsigned lcore_self =  rte_lcore_id();
72 
73 	WAIT_SYNCHRO_FOR_WORKERS();
74 
75 	rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
76 	if (rte_eal_init(0, NULL) != -1)
77 		return -1;
78 
79 	return 0;
80 }
81 
82 /*
83  * ring create/lookup reentrancy test
84  */
85 static void
86 ring_clean(unsigned int lcore_id)
87 {
88 	struct rte_ring *rp;
89 	char ring_name[MAX_STRING_SIZE];
90 	int i;
91 
92 	rp = rte_ring_lookup("fr_test_once");
93 	if (rp != NULL)
94 		rte_ring_free(rp);
95 
96 	for (i = 0; i < MAX_ITER_MULTI; i++) {
97 		snprintf(ring_name, sizeof(ring_name),
98 				"fr_test_%d_%d", lcore_id, i);
99 		rp = rte_ring_lookup(ring_name);
100 		if (rp != NULL)
101 			rte_ring_free(rp);
102 	}
103 }
104 
105 static int
106 ring_create_lookup(__rte_unused void *arg)
107 {
108 	unsigned lcore_self = rte_lcore_id();
109 	struct rte_ring * rp;
110 	char ring_name[MAX_STRING_SIZE];
111 	int i;
112 
113 	WAIT_SYNCHRO_FOR_WORKERS();
114 
115 	/* create the same ring simultaneously on all threads */
116 	for (i = 0; i < MAX_ITER_ONCE; i++) {
117 		rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
118 		if (rp != NULL)
119 			rte_atomic32_inc(&obj_count);
120 	}
121 
122 	/* create/lookup new ring several times */
123 	for (i = 0; i < MAX_ITER_MULTI; i++) {
124 		snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
125 		rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
126 		if (NULL == rp)
127 			return -1;
128 		if (rte_ring_lookup(ring_name) != rp)
129 			return -1;
130 
131 		/* verify all ring created successful */
132 		if (rte_ring_lookup(ring_name) == NULL)
133 			return -1;
134 	}
135 
136 	return 0;
137 }
138 
139 static void
140 my_obj_init(struct rte_mempool *mp, __rte_unused void *arg,
141 	    void *obj, unsigned i)
142 {
143 	uint32_t *objnum = obj;
144 	memset(obj, 0, mp->elt_size);
145 	*objnum = i;
146 }
147 
148 static void
149 mempool_clean(unsigned int lcore_id)
150 {
151 	struct rte_mempool *mp;
152 	char mempool_name[MAX_STRING_SIZE];
153 	int i;
154 
155 	mp = rte_mempool_lookup("fr_test_once");
156 	if (mp != NULL)
157 		rte_mempool_free(mp);
158 
159 	for (i = 0; i < MAX_ITER_MULTI; i++) {
160 		snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
161 			 lcore_id, i);
162 		mp = rte_mempool_lookup(mempool_name);
163 		if (mp != NULL)
164 			rte_mempool_free(mp);
165 	}
166 }
167 
168 static int
169 mempool_create_lookup(__rte_unused void *arg)
170 {
171 	unsigned lcore_self = rte_lcore_id();
172 	struct rte_mempool * mp;
173 	char mempool_name[MAX_STRING_SIZE];
174 	int i;
175 
176 	WAIT_SYNCHRO_FOR_WORKERS();
177 
178 	/* create the same mempool simultaneously on all threads */
179 	for (i = 0; i < MAX_ITER_ONCE; i++) {
180 		mp = rte_mempool_create("fr_test_once",  MEMPOOL_SIZE,
181 					MEMPOOL_ELT_SIZE, 0, 0,
182 					NULL, NULL,
183 					my_obj_init, NULL,
184 					SOCKET_ID_ANY, 0);
185 		if (mp != NULL)
186 			rte_atomic32_inc(&obj_count);
187 	}
188 
189 	/* create/lookup new ring several times */
190 	for (i = 0; i < MAX_ITER_MULTI; i++) {
191 		snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
192 		mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
193 						MEMPOOL_ELT_SIZE, 0, 0,
194 						NULL, NULL,
195 						my_obj_init, NULL,
196 						SOCKET_ID_ANY, 0);
197 		if (NULL == mp)
198 			return -1;
199 		if (rte_mempool_lookup(mempool_name) != mp)
200 			return -1;
201 
202 		/* verify all ring created successful */
203 		if (rte_mempool_lookup(mempool_name) == NULL)
204 			return -1;
205 	}
206 
207 	return 0;
208 }
209 
210 #ifdef RTE_LIB_HASH
211 static void
212 hash_clean(unsigned lcore_id)
213 {
214 	char hash_name[MAX_STRING_SIZE];
215 	struct rte_hash *handle;
216 	int i;
217 
218 	handle = rte_hash_find_existing("fr_test_once");
219 	if (handle != NULL)
220 		rte_hash_free(handle);
221 
222 	for (i = 0; i < MAX_ITER_MULTI; i++) {
223 		snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d",  lcore_id, i);
224 
225 		if ((handle = rte_hash_find_existing(hash_name)) != NULL)
226 			rte_hash_free(handle);
227 	}
228 }
229 
230 static int
231 hash_create_free(__rte_unused void *arg)
232 {
233 	unsigned lcore_self = rte_lcore_id();
234 	struct rte_hash *handle;
235 	char hash_name[MAX_STRING_SIZE];
236 	int i;
237 	struct rte_hash_parameters hash_params = {
238 		.name = NULL,
239 		.entries = 16,
240 		.key_len = 4,
241 		.hash_func = (rte_hash_function)rte_jhash_32b,
242 		.hash_func_init_val = 0,
243 		.socket_id = 0,
244 	};
245 
246 	WAIT_SYNCHRO_FOR_WORKERS();
247 
248 	/* create the same hash simultaneously on all threads */
249 	hash_params.name = "fr_test_once";
250 	for (i = 0; i < MAX_ITER_ONCE; i++) {
251 		handle = rte_hash_create(&hash_params);
252 		if (handle != NULL)
253 			rte_atomic32_inc(&obj_count);
254 	}
255 
256 	/* create mutiple times simultaneously */
257 	for (i = 0; i < MAX_ITER_MULTI; i++) {
258 		snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
259 		hash_params.name = hash_name;
260 
261 		handle = rte_hash_create(&hash_params);
262 		if (NULL == handle)
263 			return -1;
264 
265 		/* verify correct existing and then free all */
266 		if (handle != rte_hash_find_existing(hash_name))
267 			return -1;
268 
269 		rte_hash_free(handle);
270 
271 		/* verify free correct */
272 		if (NULL != rte_hash_find_existing(hash_name))
273 			return -1;
274 	}
275 
276 	return 0;
277 }
278 
279 static void
280 fbk_clean(unsigned lcore_id)
281 {
282 	char fbk_name[MAX_STRING_SIZE];
283 	struct rte_fbk_hash_table *handle;
284 	int i;
285 
286 	handle = rte_fbk_hash_find_existing("fr_test_once");
287 	if (handle != NULL)
288 		rte_fbk_hash_free(handle);
289 
290 	for (i = 0; i < MAX_ITER_MULTI; i++) {
291 		snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d",  lcore_id, i);
292 
293 		if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
294 			rte_fbk_hash_free(handle);
295 	}
296 }
297 
298 static int
299 fbk_create_free(__rte_unused void *arg)
300 {
301 	unsigned lcore_self = rte_lcore_id();
302 	struct rte_fbk_hash_table *handle;
303 	char fbk_name[MAX_STRING_SIZE];
304 	int i;
305 	struct rte_fbk_hash_params fbk_params = {
306 		.name = NULL,
307 		.entries = 4,
308 		.entries_per_bucket = 4,
309 		.socket_id = 0,
310 		.hash_func = rte_jhash_1word,
311 		.init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
312 	};
313 
314 	WAIT_SYNCHRO_FOR_WORKERS();
315 
316 	/* create the same fbk hash table simultaneously on all threads */
317 	fbk_params.name = "fr_test_once";
318 	for (i = 0; i < MAX_ITER_ONCE; i++) {
319 		handle = rte_fbk_hash_create(&fbk_params);
320 		if (handle != NULL)
321 			rte_atomic32_inc(&obj_count);
322 	}
323 
324 	/* create mutiple fbk tables simultaneously */
325 	for (i = 0; i < MAX_ITER_MULTI; i++) {
326 		snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
327 		fbk_params.name = fbk_name;
328 
329 		handle = rte_fbk_hash_create(&fbk_params);
330 		if (NULL == handle)
331 			return -1;
332 
333 		/* verify correct existing and then free all */
334 		if (handle != rte_fbk_hash_find_existing(fbk_name))
335 			return -1;
336 
337 		rte_fbk_hash_free(handle);
338 
339 		/* verify free correct */
340 		if (NULL != rte_fbk_hash_find_existing(fbk_name))
341 			return -1;
342 	}
343 
344 	return 0;
345 }
346 #endif /* RTE_LIB_HASH */
347 
348 #ifdef RTE_LIB_LPM
349 static void
350 lpm_clean(unsigned int lcore_id)
351 {
352 	char lpm_name[MAX_STRING_SIZE];
353 	struct rte_lpm *lpm;
354 	int i;
355 
356 	lpm = rte_lpm_find_existing("fr_test_once");
357 	if (lpm != NULL)
358 		rte_lpm_free(lpm);
359 
360 	for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
361 		snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d",  lcore_id, i);
362 
363 		if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
364 			rte_lpm_free(lpm);
365 	}
366 }
367 
368 static int
369 lpm_create_free(__rte_unused void *arg)
370 {
371 	unsigned lcore_self = rte_lcore_id();
372 	struct rte_lpm *lpm;
373 	struct rte_lpm_config config;
374 
375 	config.max_rules = 4;
376 	config.number_tbl8s = 256;
377 	config.flags = 0;
378 	char lpm_name[MAX_STRING_SIZE];
379 	int i;
380 
381 	WAIT_SYNCHRO_FOR_WORKERS();
382 
383 	/* create the same lpm simultaneously on all threads */
384 	for (i = 0; i < MAX_ITER_ONCE; i++) {
385 		lpm = rte_lpm_create("fr_test_once",  SOCKET_ID_ANY, &config);
386 		if (lpm != NULL)
387 			rte_atomic32_inc(&obj_count);
388 	}
389 
390 	/* create mutiple fbk tables simultaneously */
391 	for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
392 		snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
393 		lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
394 		if (NULL == lpm)
395 			return -1;
396 
397 		/* verify correct existing and then free all */
398 		if (lpm != rte_lpm_find_existing(lpm_name))
399 			return -1;
400 
401 		rte_lpm_free(lpm);
402 
403 		/* verify free correct */
404 		if (NULL != rte_lpm_find_existing(lpm_name))
405 			return -1;
406 	}
407 
408 	return 0;
409 }
410 #endif /* RTE_LIB_LPM */
411 
412 struct test_case{
413 	case_func_t    func;
414 	void*          arg;
415 	case_clean_t   clean;
416 	char           name[MAX_STRING_SIZE];
417 };
418 
419 /* All test cases in the test suite */
420 struct test_case test_cases[] = {
421 	{ test_eal_init_once,     NULL,  NULL,         "eal init once" },
422 	{ ring_create_lookup,     NULL,  ring_clean,   "ring create/lookup" },
423 	{ mempool_create_lookup,  NULL,  mempool_clean,
424 			"mempool create/lookup" },
425 #ifdef RTE_LIB_HASH
426 	{ hash_create_free,       NULL,  hash_clean,   "hash create/free" },
427 	{ fbk_create_free,        NULL,  fbk_clean,    "fbk create/free" },
428 #endif /* RTE_LIB_HASH */
429 #ifdef RTE_LIB_LPM
430 	{ lpm_create_free,        NULL,  lpm_clean,    "lpm create/free" },
431 #endif /* RTE_LIB_LPM */
432 };
433 
434 /**
435  * launch test case in two separate thread
436  */
437 static int
438 launch_test(struct test_case *pt_case)
439 {
440 	unsigned int lcore_id;
441 	unsigned int cores;
442 	unsigned int count;
443 	int ret = 0;
444 
445 	if (pt_case->func == NULL)
446 		return -1;
447 
448 	rte_atomic32_set(&obj_count, 0);
449 	rte_atomic32_set(&synchro, 0);
450 
451 	cores = RTE_MIN(rte_lcore_count(), MAX_LCORES);
452 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
453 		if (cores == 1)
454 			break;
455 		cores--;
456 		rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
457 	}
458 
459 	rte_atomic32_set(&synchro, 1);
460 
461 	if (pt_case->func(pt_case->arg) < 0)
462 		ret = -1;
463 
464 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
465 		if (rte_eal_wait_lcore(lcore_id) < 0)
466 			ret = -1;
467 	}
468 
469 	RTE_LCORE_FOREACH(lcore_id) {
470 		if (pt_case->clean != NULL)
471 			pt_case->clean(lcore_id);
472 	}
473 
474 	count = rte_atomic32_read(&obj_count);
475 	if (count != 1) {
476 		printf("%s: common object allocated %d times (should be 1)\n",
477 			pt_case->name, count);
478 		ret = -1;
479 	}
480 
481 	return ret;
482 }
483 
484 /**
485  * Main entry of func_reentrancy test
486  */
487 static int
488 test_func_reentrancy(void)
489 {
490 	uint32_t case_id;
491 	struct test_case *pt_case = NULL;
492 
493 	if (rte_lcore_count() < 2) {
494 		printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
495 		return TEST_SKIPPED;
496 	}
497 	else if (rte_lcore_count() > MAX_LCORES)
498 		printf("Too many lcores, some cores will be disabled\n");
499 
500 	for (case_id = 0; case_id < RTE_DIM(test_cases); case_id++) {
501 		pt_case = &test_cases[case_id];
502 		if (pt_case->func == NULL)
503 			continue;
504 
505 		if (launch_test(pt_case) < 0) {
506 			printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
507 			return -1;
508 		}
509 		printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
510 	}
511 
512 	return 0;
513 }
514 
515 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);
516