xref: /dpdk/app/test/test_func_reentrancy.c (revision 089e5ed727a15da2729cfee9b63533dd120bd04c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation
3  */
4 
5 #include <string.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <stdint.h>
9 #include <inttypes.h>
10 #include <stdarg.h>
11 #include <errno.h>
12 #include <sys/queue.h>
13 
14 #include <rte_common.h>
15 #include <rte_log.h>
16 #include <rte_debug.h>
17 #include <rte_memory.h>
18 #include <rte_launch.h>
19 #include <rte_cycles.h>
20 #include <rte_eal.h>
21 #include <rte_per_lcore.h>
22 #include <rte_lcore.h>
23 #include <rte_atomic.h>
24 #include <rte_branch_prediction.h>
25 #include <rte_ring.h>
26 #include <rte_mempool.h>
27 #include <rte_spinlock.h>
28 #include <rte_malloc.h>
29 
30 #ifdef RTE_LIBRTE_HASH
31 #include <rte_hash.h>
32 #include <rte_fbk_hash.h>
33 #include <rte_jhash.h>
34 #endif /* RTE_LIBRTE_HASH */
35 
36 #ifdef RTE_LIBRTE_LPM
37 #include <rte_lpm.h>
38 #endif /* RTE_LIBRTE_LPM */
39 
40 #include <rte_string_fns.h>
41 
42 #include "test.h"
43 
44 typedef int (*case_func_t)(void* arg);
45 typedef void (*case_clean_t)(unsigned lcore_id);
46 
47 #define MAX_STRING_SIZE                     (256)
48 #define MAX_ITER_MULTI                      (16)
49 #define MAX_ITER_ONCE                       (4)
50 #define MAX_LPM_ITER_TIMES                  (6)
51 
52 #define MEMPOOL_ELT_SIZE                    (sizeof(uint32_t))
53 #define MEMPOOL_SIZE                        (4)
54 
55 #define MAX_LCORES	(RTE_MAX_MEMZONE / (MAX_ITER_MULTI * 4U))
56 
57 static rte_atomic32_t obj_count = RTE_ATOMIC32_INIT(0);
58 static rte_atomic32_t synchro = RTE_ATOMIC32_INIT(0);
59 
60 #define WAIT_SYNCHRO_FOR_SLAVES()   do{ \
61 	if (lcore_self != rte_get_master_lcore())                  \
62 		while (rte_atomic32_read(&synchro) == 0);        \
63 } while(0)
64 
65 /*
66  * rte_eal_init only init once
67  */
68 static int
69 test_eal_init_once(__attribute__((unused)) void *arg)
70 {
71 	unsigned lcore_self =  rte_lcore_id();
72 
73 	WAIT_SYNCHRO_FOR_SLAVES();
74 
75 	rte_atomic32_set(&obj_count, 1); /* silent the check in the caller */
76 	if (rte_eal_init(0, NULL) != -1)
77 		return -1;
78 
79 	return 0;
80 }
81 
82 /*
83  * ring create/lookup reentrancy test
84  */
85 static void
86 ring_clean(unsigned int lcore_id)
87 {
88 	struct rte_ring *rp;
89 	char ring_name[MAX_STRING_SIZE];
90 	int i;
91 
92 	for (i = 0; i < MAX_ITER_MULTI; i++) {
93 		snprintf(ring_name, sizeof(ring_name),
94 				"fr_test_%d_%d", lcore_id, i);
95 		rp = rte_ring_lookup(ring_name);
96 		if (rp != NULL)
97 			rte_ring_free(rp);
98 	}
99 }
100 
101 static int
102 ring_create_lookup(__attribute__((unused)) void *arg)
103 {
104 	unsigned lcore_self = rte_lcore_id();
105 	struct rte_ring * rp;
106 	char ring_name[MAX_STRING_SIZE];
107 	int i;
108 
109 	WAIT_SYNCHRO_FOR_SLAVES();
110 
111 	/* create the same ring simultaneously on all threads */
112 	for (i = 0; i < MAX_ITER_ONCE; i++) {
113 		rp = rte_ring_create("fr_test_once", 4096, SOCKET_ID_ANY, 0);
114 		if (rp != NULL)
115 			rte_atomic32_inc(&obj_count);
116 	}
117 
118 	/* create/lookup new ring several times */
119 	for (i = 0; i < MAX_ITER_MULTI; i++) {
120 		snprintf(ring_name, sizeof(ring_name), "fr_test_%d_%d", lcore_self, i);
121 		rp = rte_ring_create(ring_name, 4096, SOCKET_ID_ANY, 0);
122 		if (NULL == rp)
123 			return -1;
124 		if (rte_ring_lookup(ring_name) != rp)
125 			return -1;
126 
127 		/* verify all ring created successful */
128 		if (rte_ring_lookup(ring_name) == NULL)
129 			return -1;
130 	}
131 
132 	return 0;
133 }
134 
135 static void
136 my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
137 	    void *obj, unsigned i)
138 {
139 	uint32_t *objnum = obj;
140 	memset(obj, 0, mp->elt_size);
141 	*objnum = i;
142 }
143 
144 static void
145 mempool_clean(unsigned int lcore_id)
146 {
147 	struct rte_mempool *mp;
148 	char mempool_name[MAX_STRING_SIZE];
149 	int i;
150 
151 	/* verify all ring created successful */
152 	for (i = 0; i < MAX_ITER_MULTI; i++) {
153 		snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d",
154 			 lcore_id, i);
155 		mp = rte_mempool_lookup(mempool_name);
156 		if (mp != NULL)
157 			rte_mempool_free(mp);
158 	}
159 }
160 
161 static int
162 mempool_create_lookup(__attribute__((unused)) void *arg)
163 {
164 	unsigned lcore_self = rte_lcore_id();
165 	struct rte_mempool * mp;
166 	char mempool_name[MAX_STRING_SIZE];
167 	int i;
168 
169 	WAIT_SYNCHRO_FOR_SLAVES();
170 
171 	/* create the same mempool simultaneously on all threads */
172 	for (i = 0; i < MAX_ITER_ONCE; i++) {
173 		mp = rte_mempool_create("fr_test_once",  MEMPOOL_SIZE,
174 					MEMPOOL_ELT_SIZE, 0, 0,
175 					NULL, NULL,
176 					my_obj_init, NULL,
177 					SOCKET_ID_ANY, 0);
178 		if (mp != NULL)
179 			rte_atomic32_inc(&obj_count);
180 	}
181 
182 	/* create/lookup new ring several times */
183 	for (i = 0; i < MAX_ITER_MULTI; i++) {
184 		snprintf(mempool_name, sizeof(mempool_name), "fr_test_%d_%d", lcore_self, i);
185 		mp = rte_mempool_create(mempool_name, MEMPOOL_SIZE,
186 						MEMPOOL_ELT_SIZE, 0, 0,
187 						NULL, NULL,
188 						my_obj_init, NULL,
189 						SOCKET_ID_ANY, 0);
190 		if (NULL == mp)
191 			return -1;
192 		if (rte_mempool_lookup(mempool_name) != mp)
193 			return -1;
194 
195 		/* verify all ring created successful */
196 		if (rte_mempool_lookup(mempool_name) == NULL)
197 			return -1;
198 	}
199 
200 	return 0;
201 }
202 
203 #ifdef RTE_LIBRTE_HASH
204 static void
205 hash_clean(unsigned lcore_id)
206 {
207 	char hash_name[MAX_STRING_SIZE];
208 	struct rte_hash *handle;
209 	int i;
210 
211 	for (i = 0; i < MAX_ITER_MULTI; i++) {
212 		snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d",  lcore_id, i);
213 
214 		if ((handle = rte_hash_find_existing(hash_name)) != NULL)
215 			rte_hash_free(handle);
216 	}
217 }
218 
219 static int
220 hash_create_free(__attribute__((unused)) void *arg)
221 {
222 	unsigned lcore_self = rte_lcore_id();
223 	struct rte_hash *handle;
224 	char hash_name[MAX_STRING_SIZE];
225 	int i;
226 	struct rte_hash_parameters hash_params = {
227 		.name = NULL,
228 		.entries = 16,
229 		.key_len = 4,
230 		.hash_func = (rte_hash_function)rte_jhash_32b,
231 		.hash_func_init_val = 0,
232 		.socket_id = 0,
233 	};
234 
235 	WAIT_SYNCHRO_FOR_SLAVES();
236 
237 	/* create the same hash simultaneously on all threads */
238 	hash_params.name = "fr_test_once";
239 	for (i = 0; i < MAX_ITER_ONCE; i++) {
240 		handle = rte_hash_create(&hash_params);
241 		if (handle != NULL)
242 			rte_atomic32_inc(&obj_count);
243 	}
244 
245 	/* create mutiple times simultaneously */
246 	for (i = 0; i < MAX_ITER_MULTI; i++) {
247 		snprintf(hash_name, sizeof(hash_name), "fr_test_%d_%d", lcore_self, i);
248 		hash_params.name = hash_name;
249 
250 		handle = rte_hash_create(&hash_params);
251 		if (NULL == handle)
252 			return -1;
253 
254 		/* verify correct existing and then free all */
255 		if (handle != rte_hash_find_existing(hash_name))
256 			return -1;
257 
258 		rte_hash_free(handle);
259 
260 		/* verify free correct */
261 		if (NULL != rte_hash_find_existing(hash_name))
262 			return -1;
263 	}
264 
265 	return 0;
266 }
267 
268 static void
269 fbk_clean(unsigned lcore_id)
270 {
271 	char fbk_name[MAX_STRING_SIZE];
272 	struct rte_fbk_hash_table *handle;
273 	int i;
274 
275 	for (i = 0; i < MAX_ITER_MULTI; i++) {
276 		snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d",  lcore_id, i);
277 
278 		if ((handle = rte_fbk_hash_find_existing(fbk_name)) != NULL)
279 			rte_fbk_hash_free(handle);
280 	}
281 }
282 
283 static int
284 fbk_create_free(__attribute__((unused)) void *arg)
285 {
286 	unsigned lcore_self = rte_lcore_id();
287 	struct rte_fbk_hash_table *handle;
288 	char fbk_name[MAX_STRING_SIZE];
289 	int i;
290 	struct rte_fbk_hash_params fbk_params = {
291 		.name = NULL,
292 		.entries = 4,
293 		.entries_per_bucket = 4,
294 		.socket_id = 0,
295 		.hash_func = rte_jhash_1word,
296 		.init_val = RTE_FBK_HASH_INIT_VAL_DEFAULT,
297 	};
298 
299 	WAIT_SYNCHRO_FOR_SLAVES();
300 
301 	/* create the same fbk hash table simultaneously on all threads */
302 	fbk_params.name = "fr_test_once";
303 	for (i = 0; i < MAX_ITER_ONCE; i++) {
304 		handle = rte_fbk_hash_create(&fbk_params);
305 		if (handle != NULL)
306 			rte_atomic32_inc(&obj_count);
307 	}
308 
309 	/* create mutiple fbk tables simultaneously */
310 	for (i = 0; i < MAX_ITER_MULTI; i++) {
311 		snprintf(fbk_name, sizeof(fbk_name), "fr_test_%d_%d", lcore_self, i);
312 		fbk_params.name = fbk_name;
313 
314 		handle = rte_fbk_hash_create(&fbk_params);
315 		if (NULL == handle)
316 			return -1;
317 
318 		/* verify correct existing and then free all */
319 		if (handle != rte_fbk_hash_find_existing(fbk_name))
320 			return -1;
321 
322 		rte_fbk_hash_free(handle);
323 
324 		/* verify free correct */
325 		if (NULL != rte_fbk_hash_find_existing(fbk_name))
326 			return -1;
327 	}
328 
329 	return 0;
330 }
331 #endif /* RTE_LIBRTE_HASH */
332 
333 #ifdef RTE_LIBRTE_LPM
334 static void
335 lpm_clean(unsigned int lcore_id)
336 {
337 	char lpm_name[MAX_STRING_SIZE];
338 	struct rte_lpm *lpm;
339 	int i;
340 
341 	for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
342 		snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d",  lcore_id, i);
343 
344 		if ((lpm = rte_lpm_find_existing(lpm_name)) != NULL)
345 			rte_lpm_free(lpm);
346 	}
347 }
348 
349 static int
350 lpm_create_free(__attribute__((unused)) void *arg)
351 {
352 	unsigned lcore_self = rte_lcore_id();
353 	struct rte_lpm *lpm;
354 	struct rte_lpm_config config;
355 
356 	config.max_rules = 4;
357 	config.number_tbl8s = 256;
358 	config.flags = 0;
359 	char lpm_name[MAX_STRING_SIZE];
360 	int i;
361 
362 	WAIT_SYNCHRO_FOR_SLAVES();
363 
364 	/* create the same lpm simultaneously on all threads */
365 	for (i = 0; i < MAX_ITER_ONCE; i++) {
366 		lpm = rte_lpm_create("fr_test_once",  SOCKET_ID_ANY, &config);
367 		if (lpm != NULL)
368 			rte_atomic32_inc(&obj_count);
369 	}
370 
371 	/* create mutiple fbk tables simultaneously */
372 	for (i = 0; i < MAX_LPM_ITER_TIMES; i++) {
373 		snprintf(lpm_name, sizeof(lpm_name), "fr_test_%d_%d", lcore_self, i);
374 		lpm = rte_lpm_create(lpm_name, SOCKET_ID_ANY, &config);
375 		if (NULL == lpm)
376 			return -1;
377 
378 		/* verify correct existing and then free all */
379 		if (lpm != rte_lpm_find_existing(lpm_name))
380 			return -1;
381 
382 		rte_lpm_free(lpm);
383 
384 		/* verify free correct */
385 		if (NULL != rte_lpm_find_existing(lpm_name))
386 			return -1;
387 	}
388 
389 	return 0;
390 }
391 #endif /* RTE_LIBRTE_LPM */
392 
393 struct test_case{
394 	case_func_t    func;
395 	void*          arg;
396 	case_clean_t   clean;
397 	char           name[MAX_STRING_SIZE];
398 };
399 
400 /* All test cases in the test suite */
401 struct test_case test_cases[] = {
402 	{ test_eal_init_once,     NULL,  NULL,         "eal init once" },
403 	{ ring_create_lookup,     NULL,  ring_clean,   "ring create/lookup" },
404 	{ mempool_create_lookup,  NULL,  mempool_clean,
405 			"mempool create/lookup" },
406 #ifdef RTE_LIBRTE_HASH
407 	{ hash_create_free,       NULL,  hash_clean,   "hash create/free" },
408 	{ fbk_create_free,        NULL,  fbk_clean,    "fbk create/free" },
409 #endif /* RTE_LIBRTE_HASH */
410 #ifdef RTE_LIBRTE_LPM
411 	{ lpm_create_free,        NULL,  lpm_clean,    "lpm create/free" },
412 #endif /* RTE_LIBRTE_LPM */
413 };
414 
415 /**
416  * launch test case in two separate thread
417  */
418 static int
419 launch_test(struct test_case *pt_case)
420 {
421 	int ret = 0;
422 	unsigned lcore_id;
423 	unsigned cores_save = rte_lcore_count();
424 	unsigned cores = RTE_MIN(cores_save, MAX_LCORES);
425 	unsigned count;
426 
427 	if (pt_case->func == NULL)
428 		return -1;
429 
430 	rte_atomic32_set(&obj_count, 0);
431 	rte_atomic32_set(&synchro, 0);
432 
433 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
434 		if (cores == 1)
435 			break;
436 		cores--;
437 		rte_eal_remote_launch(pt_case->func, pt_case->arg, lcore_id);
438 	}
439 
440 	rte_atomic32_set(&synchro, 1);
441 
442 	if (pt_case->func(pt_case->arg) < 0)
443 		ret = -1;
444 
445 	cores = cores_save;
446 	RTE_LCORE_FOREACH_SLAVE(lcore_id) {
447 		if (cores == 1)
448 			break;
449 		cores--;
450 		if (rte_eal_wait_lcore(lcore_id) < 0)
451 			ret = -1;
452 
453 		if (pt_case->clean != NULL)
454 			pt_case->clean(lcore_id);
455 	}
456 
457 	count = rte_atomic32_read(&obj_count);
458 	if (count != 1) {
459 		printf("%s: common object allocated %d times (should be 1)\n",
460 			pt_case->name, count);
461 		ret = -1;
462 	}
463 
464 	return ret;
465 }
466 
467 /**
468  * Main entry of func_reentrancy test
469  */
470 static int
471 test_func_reentrancy(void)
472 {
473 	uint32_t case_id;
474 	struct test_case *pt_case = NULL;
475 
476 	if (rte_lcore_count() < 2) {
477 		printf("Not enough cores for func_reentrancy_autotest, expecting at least 2\n");
478 		return TEST_SKIPPED;
479 	}
480 	else if (rte_lcore_count() > MAX_LCORES)
481 		printf("Too many lcores, some cores will be disabled\n");
482 
483 	for (case_id = 0; case_id < sizeof(test_cases)/sizeof(struct test_case); case_id ++) {
484 		pt_case = &test_cases[case_id];
485 		if (pt_case->func == NULL)
486 			continue;
487 
488 		if (launch_test(pt_case) < 0) {
489 			printf("Func-ReEnt CASE %"PRIu32": %s FAIL\n", case_id, pt_case->name);
490 			return -1;
491 		}
492 		printf("Func-ReEnt CASE %"PRIu32": %s PASS\n", case_id, pt_case->name);
493 	}
494 
495 	return 0;
496 }
497 
498 REGISTER_TEST_COMMAND(func_reentrancy_autotest, test_func_reentrancy);
499