1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2019-2020 Arm Limited
3 */
4
5 #include <stdio.h>
6 #include <string.h>
7 #include <rte_pause.h>
8 #include <rte_rcu_qsbr.h>
9 #include <rte_hash.h>
10 #include <rte_hash_crc.h>
11 #include <rte_malloc.h>
12 #include <rte_cycles.h>
13 #include <rte_random.h>
14 #include <unistd.h>
15
16 #include "test.h"
17
18 /* Check condition and return an error if true. */
19 #define TEST_RCU_QSBR_RETURN_IF_ERROR(cond, str, ...) \
20 do { \
21 if (cond) { \
22 printf("ERROR file %s, line %d: " str "\n", __FILE__, \
23 __LINE__, ##__VA_ARGS__); \
24 return -1; \
25 } \
26 } while (0)
27
28 /* Check condition and go to label if true. */
29 #define TEST_RCU_QSBR_GOTO_IF_ERROR(label, cond, str, ...) \
30 do { \
31 if (cond) { \
32 printf("ERROR file %s, line %d: " str "\n", __FILE__, \
33 __LINE__, ##__VA_ARGS__); \
34 goto label; \
35 } \
36 } while (0)
37
38 /* Make sure that this has the same value as __RTE_QSBR_CNT_INIT */
39 #define TEST_RCU_QSBR_CNT_INIT 1
40
41 static uint16_t enabled_core_ids[RTE_MAX_LCORE];
42 static unsigned int num_cores;
43
44 static uint32_t *keys;
45 #define TOTAL_ENTRY (1024 * 8)
46 #define COUNTER_VALUE 4096
47 static uint32_t *hash_data[RTE_MAX_LCORE][TOTAL_ENTRY];
48 static uint8_t writer_done;
49 static uint8_t cb_failed;
50
51 static struct rte_rcu_qsbr *t[RTE_MAX_LCORE];
52 static struct rte_hash *h[RTE_MAX_LCORE];
53 static char hash_name[RTE_MAX_LCORE][8];
54
55 struct test_rcu_thread_info {
56 /* Index in RCU array */
57 int ir;
58 /* Index in hash array */
59 int ih;
60 /* lcore IDs registered on the RCU variable */
61 uint16_t r_core_ids[2];
62 };
63 static struct test_rcu_thread_info thread_info[RTE_MAX_LCORE/4];
64
65 static int
alloc_rcu(void)66 alloc_rcu(void)
67 {
68 int i;
69 size_t sz;
70
71 sz = rte_rcu_qsbr_get_memsize(RTE_MAX_LCORE);
72
73 for (i = 0; i < RTE_MAX_LCORE; i++)
74 t[i] = (struct rte_rcu_qsbr *)rte_zmalloc(NULL, sz,
75 RTE_CACHE_LINE_SIZE);
76
77 return 0;
78 }
79
80 static int
free_rcu(void)81 free_rcu(void)
82 {
83 int i;
84
85 for (i = 0; i < RTE_MAX_LCORE; i++)
86 rte_free(t[i]);
87
88 return 0;
89 }
90
91 /*
92 * rte_rcu_qsbr_thread_register: Add a reader thread, to the list of threads
93 * reporting their quiescent state on a QS variable.
94 */
95 static int
test_rcu_qsbr_get_memsize(void)96 test_rcu_qsbr_get_memsize(void)
97 {
98 size_t sz;
99
100 printf("\nTest rte_rcu_qsbr_thread_register()\n");
101
102 sz = rte_rcu_qsbr_get_memsize(0);
103 TEST_RCU_QSBR_RETURN_IF_ERROR((sz != 1), "Get Memsize for 0 threads");
104
105 sz = rte_rcu_qsbr_get_memsize(128);
106 /* For 128 threads,
107 * for machines with cache line size of 64B - 8384
108 * for machines with cache line size of 128 - 16768
109 */
110 if (RTE_CACHE_LINE_SIZE == 64)
111 TEST_RCU_QSBR_RETURN_IF_ERROR((sz != 8384),
112 "Get Memsize for 128 threads");
113 else if (RTE_CACHE_LINE_SIZE == 128)
114 TEST_RCU_QSBR_RETURN_IF_ERROR((sz != 16768),
115 "Get Memsize for 128 threads");
116
117 return 0;
118 }
119
120 /*
121 * rte_rcu_qsbr_init: Initialize a QSBR variable.
122 */
123 static int
test_rcu_qsbr_init(void)124 test_rcu_qsbr_init(void)
125 {
126 int r;
127
128 printf("\nTest rte_rcu_qsbr_init()\n");
129
130 r = rte_rcu_qsbr_init(NULL, RTE_MAX_LCORE);
131 TEST_RCU_QSBR_RETURN_IF_ERROR((r != 1), "NULL variable");
132
133 return 0;
134 }
135
136 /*
137 * rte_rcu_qsbr_thread_register: Add a reader thread, to the list of threads
138 * reporting their quiescent state on a QS variable.
139 */
140 static int
test_rcu_qsbr_thread_register(void)141 test_rcu_qsbr_thread_register(void)
142 {
143 int ret;
144
145 printf("\nTest rte_rcu_qsbr_thread_register()\n");
146
147 ret = rte_rcu_qsbr_thread_register(NULL, enabled_core_ids[0]);
148 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "NULL variable check");
149
150 ret = rte_rcu_qsbr_thread_register(NULL, 100000);
151 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
152 "NULL variable, invalid thread id");
153
154 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
155
156 /* Register valid thread id */
157 ret = rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
158 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1), "Valid thread id");
159
160 /* Re-registering should not return error */
161 ret = rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
162 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),
163 "Already registered thread id");
164
165 /* Register valid thread id - max allowed thread id */
166 ret = rte_rcu_qsbr_thread_register(t[0], RTE_MAX_LCORE - 1);
167 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1), "Max thread id");
168
169 ret = rte_rcu_qsbr_thread_register(t[0], 100000);
170 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
171 "NULL variable, invalid thread id");
172
173 return 0;
174 }
175
176 /*
177 * rte_rcu_qsbr_thread_unregister: Remove a reader thread, from the list of
178 * threads reporting their quiescent state on a QS variable.
179 */
180 static int
test_rcu_qsbr_thread_unregister(void)181 test_rcu_qsbr_thread_unregister(void)
182 {
183 unsigned int num_threads[3] = {1, RTE_MAX_LCORE, 1};
184 unsigned int i, j;
185 unsigned int skip_thread_id;
186 uint64_t token;
187 int ret;
188
189 printf("\nTest rte_rcu_qsbr_thread_unregister()\n");
190
191 ret = rte_rcu_qsbr_thread_unregister(NULL, enabled_core_ids[0]);
192 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "NULL variable check");
193
194 ret = rte_rcu_qsbr_thread_unregister(NULL, 100000);
195 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
196 "NULL variable, invalid thread id");
197
198 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
199
200 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
201
202 ret = rte_rcu_qsbr_thread_unregister(t[0], 100000);
203 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
204 "NULL variable, invalid thread id");
205
206 /* Find first disabled core */
207 for (i = 0; i < RTE_MAX_LCORE; i++) {
208 if (enabled_core_ids[i] == 0)
209 break;
210 }
211 /* Test with disabled lcore */
212 ret = rte_rcu_qsbr_thread_unregister(t[0], i);
213 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),
214 "disabled thread id");
215 /* Unregister already unregistered core */
216 ret = rte_rcu_qsbr_thread_unregister(t[0], i);
217 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),
218 "Already unregistered core");
219
220 /* Test with enabled lcore */
221 ret = rte_rcu_qsbr_thread_unregister(t[0], enabled_core_ids[0]);
222 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),
223 "enabled thread id");
224 /* Unregister already unregistered core */
225 ret = rte_rcu_qsbr_thread_unregister(t[0], enabled_core_ids[0]);
226 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1),
227 "Already unregistered core");
228
229 /*
230 * Test with different thread_ids:
231 * 1 - thread_id = 0
232 * 2 - All possible thread_ids, from 0 to RTE_MAX_LCORE
233 * 3 - thread_id = RTE_MAX_LCORE - 1
234 */
235 for (j = 0; j < 3; j++) {
236 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
237
238 for (i = 0; i < num_threads[j]; i++)
239 rte_rcu_qsbr_thread_register(t[0],
240 (j == 2) ? (RTE_MAX_LCORE - 1) : i);
241
242 token = rte_rcu_qsbr_start(t[0]);
243 TEST_RCU_QSBR_RETURN_IF_ERROR(
244 (token != (TEST_RCU_QSBR_CNT_INIT + 1)), "QSBR Start");
245 skip_thread_id = rte_rand() % RTE_MAX_LCORE;
246 /* Update quiescent state counter */
247 for (i = 0; i < num_threads[j]; i++) {
248 /* Skip one update */
249 if ((j == 1) && (i == skip_thread_id))
250 continue;
251 rte_rcu_qsbr_quiescent(t[0],
252 (j == 2) ? (RTE_MAX_LCORE - 1) : i);
253 }
254
255 if (j == 1) {
256 /* Validate the updates */
257 ret = rte_rcu_qsbr_check(t[0], token, false);
258 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
259 "Non-blocking QSBR check");
260 /* Update the previously skipped thread */
261 rte_rcu_qsbr_quiescent(t[0], skip_thread_id);
262 }
263
264 /* Validate the updates */
265 ret = rte_rcu_qsbr_check(t[0], token, false);
266 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
267 "Non-blocking QSBR check");
268
269 for (i = 0; i < num_threads[j]; i++)
270 rte_rcu_qsbr_thread_unregister(t[0],
271 (j == 2) ? (RTE_MAX_LCORE - 1) : i);
272
273 /* Check with no thread registered */
274 ret = rte_rcu_qsbr_check(t[0], token, true);
275 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0),
276 "Blocking QSBR check");
277 }
278 return 0;
279 }
280
281 /*
282 * rte_rcu_qsbr_start: Ask the worker threads to report the quiescent state
283 * status.
284 */
285 static int
test_rcu_qsbr_start(void)286 test_rcu_qsbr_start(void)
287 {
288 uint64_t token;
289 unsigned int i;
290
291 printf("\nTest rte_rcu_qsbr_start()\n");
292
293 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
294
295 for (i = 0; i < num_cores; i++)
296 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);
297
298 token = rte_rcu_qsbr_start(t[0]);
299 TEST_RCU_QSBR_RETURN_IF_ERROR(
300 (token != (TEST_RCU_QSBR_CNT_INIT + 1)), "QSBR Start");
301 return 0;
302 }
303
304 static int
test_rcu_qsbr_check_reader(void * arg)305 test_rcu_qsbr_check_reader(void *arg)
306 {
307 struct rte_rcu_qsbr *temp;
308 uint8_t read_type = (uint8_t)((uintptr_t)arg);
309 unsigned int i;
310
311 temp = t[read_type];
312
313 /* Update quiescent state counter */
314 for (i = 0; i < num_cores; i++) {
315 if (i % 2 == 0)
316 rte_rcu_qsbr_quiescent(temp, enabled_core_ids[i]);
317 else
318 rte_rcu_qsbr_thread_unregister(temp,
319 enabled_core_ids[i]);
320 }
321 return 0;
322 }
323
324 /*
325 * rte_rcu_qsbr_check: Checks if all the worker threads have entered the queis-
326 * cent state 'n' number of times. 'n' is provided in rte_rcu_qsbr_start API.
327 */
328 static int
test_rcu_qsbr_check(void)329 test_rcu_qsbr_check(void)
330 {
331 int ret;
332 unsigned int i;
333 uint64_t token;
334
335 printf("\nTest rte_rcu_qsbr_check()\n");
336
337 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
338
339 token = rte_rcu_qsbr_start(t[0]);
340 TEST_RCU_QSBR_RETURN_IF_ERROR(
341 (token != (TEST_RCU_QSBR_CNT_INIT + 1)), "QSBR Start");
342
343
344 ret = rte_rcu_qsbr_check(t[0], 0, false);
345 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Token = 0");
346
347 ret = rte_rcu_qsbr_check(t[0], token, true);
348 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Blocking QSBR check");
349
350 for (i = 0; i < num_cores; i++)
351 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);
352
353 ret = rte_rcu_qsbr_check(t[0], token, false);
354 /* Threads are offline, hence this should pass */
355 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Non-blocking QSBR check");
356
357 token = rte_rcu_qsbr_start(t[0]);
358 TEST_RCU_QSBR_RETURN_IF_ERROR(
359 (token != (TEST_RCU_QSBR_CNT_INIT + 2)), "QSBR Start");
360
361 ret = rte_rcu_qsbr_check(t[0], token, false);
362 /* Threads are offline, hence this should pass */
363 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Non-blocking QSBR check");
364
365 for (i = 0; i < num_cores; i++)
366 rte_rcu_qsbr_thread_unregister(t[0], enabled_core_ids[i]);
367
368 ret = rte_rcu_qsbr_check(t[0], token, true);
369 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "Blocking QSBR check");
370
371 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
372
373 for (i = 0; i < num_cores; i++)
374 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[i]);
375
376 token = rte_rcu_qsbr_start(t[0]);
377 TEST_RCU_QSBR_RETURN_IF_ERROR(
378 (token != (TEST_RCU_QSBR_CNT_INIT + 1)), "QSBR Start");
379
380 rte_eal_remote_launch(test_rcu_qsbr_check_reader, NULL,
381 enabled_core_ids[0]);
382
383 rte_eal_mp_wait_lcore();
384 ret = rte_rcu_qsbr_check(t[0], token, true);
385 TEST_RCU_QSBR_RETURN_IF_ERROR((ret != 1), "Blocking QSBR check");
386
387 return 0;
388 }
389
390 static int
test_rcu_qsbr_synchronize_reader(void * arg)391 test_rcu_qsbr_synchronize_reader(void *arg)
392 {
393 uint32_t lcore_id = rte_lcore_id();
394 (void)arg;
395
396 /* Register and become online */
397 rte_rcu_qsbr_thread_register(t[0], lcore_id);
398 rte_rcu_qsbr_thread_online(t[0], lcore_id);
399
400 while (!writer_done)
401 rte_rcu_qsbr_quiescent(t[0], lcore_id);
402
403 rte_rcu_qsbr_thread_offline(t[0], lcore_id);
404 rte_rcu_qsbr_thread_unregister(t[0], lcore_id);
405
406 return 0;
407 }
408
409 /*
410 * rte_rcu_qsbr_synchronize: Wait till all the reader threads have entered
411 * the quiescent state.
412 */
413 static int
test_rcu_qsbr_synchronize(void)414 test_rcu_qsbr_synchronize(void)
415 {
416 unsigned int i;
417
418 printf("\nTest rte_rcu_qsbr_synchronize()\n");
419
420 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
421
422 /* Test if the API returns when there are no threads reporting
423 * QS on the variable.
424 */
425 rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
426
427 /* Test if the API returns when there are threads registered
428 * but not online.
429 */
430 for (i = 0; i < RTE_MAX_LCORE; i++)
431 rte_rcu_qsbr_thread_register(t[0], i);
432 rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
433
434 /* Test if the API returns when the caller is also
435 * reporting the QS status.
436 */
437 rte_rcu_qsbr_thread_online(t[0], 0);
438 rte_rcu_qsbr_synchronize(t[0], 0);
439 rte_rcu_qsbr_thread_offline(t[0], 0);
440
441 /* Check the other boundary */
442 rte_rcu_qsbr_thread_online(t[0], RTE_MAX_LCORE - 1);
443 rte_rcu_qsbr_synchronize(t[0], RTE_MAX_LCORE - 1);
444 rte_rcu_qsbr_thread_offline(t[0], RTE_MAX_LCORE - 1);
445
446 /* Test if the API returns after unregistering all the threads */
447 for (i = 0; i < RTE_MAX_LCORE; i++)
448 rte_rcu_qsbr_thread_unregister(t[0], i);
449 rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
450
451 /* Test if the API returns with the live threads */
452 writer_done = 0;
453 for (i = 0; i < num_cores; i++)
454 rte_eal_remote_launch(test_rcu_qsbr_synchronize_reader,
455 NULL, enabled_core_ids[i]);
456 rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
457 rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
458 rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
459 rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
460 rte_rcu_qsbr_synchronize(t[0], RTE_QSBR_THRID_INVALID);
461
462 writer_done = 1;
463 rte_eal_mp_wait_lcore();
464
465 return 0;
466 }
467
468 /*
469 * rte_rcu_qsbr_thread_online: Add a registered reader thread, to
470 * the list of threads reporting their quiescent state on a QS variable.
471 */
472 static int
test_rcu_qsbr_thread_online(void)473 test_rcu_qsbr_thread_online(void)
474 {
475 int i, ret;
476 uint64_t token;
477
478 printf("Test rte_rcu_qsbr_thread_online()\n");
479
480 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
481
482 /* Register 2 threads to validate that only the
483 * online thread is waited upon.
484 */
485 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
486 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[1]);
487
488 /* Use qsbr_start to verify that the thread_online API
489 * succeeded.
490 */
491 token = rte_rcu_qsbr_start(t[0]);
492
493 /* Make the thread online */
494 rte_rcu_qsbr_thread_online(t[0], enabled_core_ids[0]);
495
496 /* Check if the thread is online */
497 ret = rte_rcu_qsbr_check(t[0], token, true);
498 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread online");
499
500 /* Check if the online thread, can report QS */
501 token = rte_rcu_qsbr_start(t[0]);
502 rte_rcu_qsbr_quiescent(t[0], enabled_core_ids[0]);
503 ret = rte_rcu_qsbr_check(t[0], token, true);
504 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread update");
505
506 /* Make all the threads online */
507 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
508 token = rte_rcu_qsbr_start(t[0]);
509 for (i = 0; i < RTE_MAX_LCORE; i++) {
510 rte_rcu_qsbr_thread_register(t[0], i);
511 rte_rcu_qsbr_thread_online(t[0], i);
512 }
513 /* Check if all the threads are online */
514 ret = rte_rcu_qsbr_check(t[0], token, true);
515 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread online");
516 /* Check if all the online threads can report QS */
517 token = rte_rcu_qsbr_start(t[0]);
518 for (i = 0; i < RTE_MAX_LCORE; i++)
519 rte_rcu_qsbr_quiescent(t[0], i);
520 ret = rte_rcu_qsbr_check(t[0], token, true);
521 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread update");
522
523 return 0;
524 }
525
526 /*
527 * rte_rcu_qsbr_thread_offline: Remove a registered reader thread, from
528 * the list of threads reporting their quiescent state on a QS variable.
529 */
530 static int
test_rcu_qsbr_thread_offline(void)531 test_rcu_qsbr_thread_offline(void)
532 {
533 int i, ret;
534 uint64_t token;
535
536 printf("\nTest rte_rcu_qsbr_thread_offline()\n");
537
538 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
539
540 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
541
542 /* Make the thread offline */
543 rte_rcu_qsbr_thread_offline(t[0], enabled_core_ids[0]);
544
545 /* Use qsbr_start to verify that the thread_offline API
546 * succeeded.
547 */
548 token = rte_rcu_qsbr_start(t[0]);
549 /* Check if the thread is offline */
550 ret = rte_rcu_qsbr_check(t[0], token, true);
551 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread offline");
552
553 /* Bring an offline thread online and check if it can
554 * report QS.
555 */
556 rte_rcu_qsbr_thread_online(t[0], enabled_core_ids[0]);
557 /* Check if the online thread, can report QS */
558 token = rte_rcu_qsbr_start(t[0]);
559 rte_rcu_qsbr_quiescent(t[0], enabled_core_ids[0]);
560 ret = rte_rcu_qsbr_check(t[0], token, true);
561 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "offline to online");
562
563 /*
564 * Check a sequence of online/status/offline/status/online/status
565 */
566 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
567 token = rte_rcu_qsbr_start(t[0]);
568 /* Make the threads online */
569 for (i = 0; i < RTE_MAX_LCORE; i++) {
570 rte_rcu_qsbr_thread_register(t[0], i);
571 rte_rcu_qsbr_thread_online(t[0], i);
572 }
573
574 /* Check if all the threads are online */
575 ret = rte_rcu_qsbr_check(t[0], token, true);
576 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "thread online");
577
578 /* Check if all the online threads can report QS */
579 token = rte_rcu_qsbr_start(t[0]);
580 for (i = 0; i < RTE_MAX_LCORE; i++)
581 rte_rcu_qsbr_quiescent(t[0], i);
582 ret = rte_rcu_qsbr_check(t[0], token, true);
583 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "report QS");
584
585 /* Make all the threads offline */
586 for (i = 0; i < RTE_MAX_LCORE; i++)
587 rte_rcu_qsbr_thread_offline(t[0], i);
588 /* Make sure these threads are not being waited on */
589 token = rte_rcu_qsbr_start(t[0]);
590 ret = rte_rcu_qsbr_check(t[0], token, true);
591 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "offline QS");
592
593 /* Make the threads online */
594 for (i = 0; i < RTE_MAX_LCORE; i++)
595 rte_rcu_qsbr_thread_online(t[0], i);
596 /* Check if all the online threads can report QS */
597 token = rte_rcu_qsbr_start(t[0]);
598 for (i = 0; i < RTE_MAX_LCORE; i++)
599 rte_rcu_qsbr_quiescent(t[0], i);
600 ret = rte_rcu_qsbr_check(t[0], token, true);
601 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "online again");
602
603 return 0;
604 }
605
606 static void
test_rcu_qsbr_free_resource1(void * p,void * e,unsigned int n)607 test_rcu_qsbr_free_resource1(void *p, void *e, unsigned int n)
608 {
609 if (p != NULL || e != NULL || n != 1) {
610 printf("%s: Test failed\n", __func__);
611 cb_failed = 1;
612 }
613 }
614
615 static void
test_rcu_qsbr_free_resource2(void * p,void * e,unsigned int n)616 test_rcu_qsbr_free_resource2(void *p, void *e, unsigned int n)
617 {
618 if (p != NULL || e == NULL || n != 1) {
619 printf("%s: Test failed\n", __func__);
620 cb_failed = 1;
621 }
622 }
623
624 /*
625 * rte_rcu_qsbr_dq_create: create a queue used to store the data structure
626 * elements that can be freed later. This queue is referred to as 'defer queue'.
627 */
628 static int
test_rcu_qsbr_dq_create(void)629 test_rcu_qsbr_dq_create(void)
630 {
631 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
632 struct rte_rcu_qsbr_dq_parameters params;
633 struct rte_rcu_qsbr_dq *dq;
634
635 printf("\nTest rte_rcu_qsbr_dq_create()\n");
636
637 /* Pass invalid parameters */
638 dq = rte_rcu_qsbr_dq_create(NULL);
639 TEST_RCU_QSBR_RETURN_IF_ERROR((dq != NULL), "dq create invalid params");
640
641 memset(¶ms, 0, sizeof(struct rte_rcu_qsbr_dq_parameters));
642 dq = rte_rcu_qsbr_dq_create(¶ms);
643 TEST_RCU_QSBR_RETURN_IF_ERROR((dq != NULL), "dq create invalid params");
644
645 snprintf(rcu_dq_name, sizeof(rcu_dq_name), "TEST_RCU");
646 params.name = rcu_dq_name;
647 dq = rte_rcu_qsbr_dq_create(¶ms);
648 TEST_RCU_QSBR_RETURN_IF_ERROR((dq != NULL), "dq create invalid params");
649
650 params.free_fn = test_rcu_qsbr_free_resource1;
651 dq = rte_rcu_qsbr_dq_create(¶ms);
652 TEST_RCU_QSBR_RETURN_IF_ERROR((dq != NULL), "dq create invalid params");
653
654 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
655 params.v = t[0];
656 dq = rte_rcu_qsbr_dq_create(¶ms);
657 TEST_RCU_QSBR_RETURN_IF_ERROR((dq != NULL), "dq create invalid params");
658
659 params.size = 1;
660 dq = rte_rcu_qsbr_dq_create(¶ms);
661 TEST_RCU_QSBR_RETURN_IF_ERROR((dq != NULL), "dq create invalid params");
662
663 params.esize = 3;
664 dq = rte_rcu_qsbr_dq_create(¶ms);
665 TEST_RCU_QSBR_RETURN_IF_ERROR((dq != NULL), "dq create invalid params");
666
667 params.trigger_reclaim_limit = 0;
668 params.max_reclaim_size = 0;
669 dq = rte_rcu_qsbr_dq_create(¶ms);
670 TEST_RCU_QSBR_RETURN_IF_ERROR((dq != NULL), "dq create invalid params");
671
672 /* Pass all valid parameters */
673 params.esize = 16;
674 params.trigger_reclaim_limit = 0;
675 params.max_reclaim_size = params.size;
676 dq = rte_rcu_qsbr_dq_create(¶ms);
677 TEST_RCU_QSBR_RETURN_IF_ERROR((dq == NULL), "dq create valid params");
678 rte_rcu_qsbr_dq_delete(dq);
679
680 params.esize = 16;
681 params.flags = RTE_RCU_QSBR_DQ_MT_UNSAFE;
682 dq = rte_rcu_qsbr_dq_create(¶ms);
683 TEST_RCU_QSBR_RETURN_IF_ERROR((dq == NULL), "dq create valid params");
684 rte_rcu_qsbr_dq_delete(dq);
685
686 return 0;
687 }
688
689 /*
690 * rte_rcu_qsbr_dq_enqueue: enqueue one resource to the defer queue,
691 * to be freed later after at least one grace period is over.
692 */
693 static int
test_rcu_qsbr_dq_enqueue(void)694 test_rcu_qsbr_dq_enqueue(void)
695 {
696 int ret;
697 uint64_t r;
698 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
699 struct rte_rcu_qsbr_dq_parameters params;
700 struct rte_rcu_qsbr_dq *dq;
701
702 printf("\nTest rte_rcu_qsbr_dq_enqueue()\n");
703
704 /* Create a queue with simple parameters */
705 memset(¶ms, 0, sizeof(struct rte_rcu_qsbr_dq_parameters));
706 snprintf(rcu_dq_name, sizeof(rcu_dq_name), "TEST_RCU");
707 params.name = rcu_dq_name;
708 params.free_fn = test_rcu_qsbr_free_resource1;
709 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
710 params.v = t[0];
711 params.size = 1;
712 params.esize = 16;
713 params.trigger_reclaim_limit = 0;
714 params.max_reclaim_size = params.size;
715 dq = rte_rcu_qsbr_dq_create(¶ms);
716 TEST_RCU_QSBR_RETURN_IF_ERROR((dq == NULL), "dq create valid params");
717
718 /* Pass invalid parameters */
719 ret = rte_rcu_qsbr_dq_enqueue(NULL, NULL);
720 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "dq enqueue invalid params");
721
722 ret = rte_rcu_qsbr_dq_enqueue(dq, NULL);
723 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "dq enqueue invalid params");
724
725 ret = rte_rcu_qsbr_dq_enqueue(NULL, &r);
726 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "dq enqueue invalid params");
727
728 ret = rte_rcu_qsbr_dq_delete(dq);
729 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 1), "dq delete valid params");
730
731 return 0;
732 }
733
734 /*
735 * rte_rcu_qsbr_dq_reclaim: Reclaim resources from the defer queue.
736 */
737 static int
test_rcu_qsbr_dq_reclaim(void)738 test_rcu_qsbr_dq_reclaim(void)
739 {
740 int ret;
741 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
742 struct rte_rcu_qsbr_dq_parameters params;
743 struct rte_rcu_qsbr_dq *dq;
744
745 printf("\nTest rte_rcu_qsbr_dq_reclaim()\n");
746
747 /* Pass invalid parameters */
748 ret = rte_rcu_qsbr_dq_reclaim(NULL, 10, NULL, NULL, NULL);
749 TEST_RCU_QSBR_RETURN_IF_ERROR((ret != 1), "dq reclaim invalid params");
750
751 /* Pass invalid parameters */
752 memset(¶ms, 0, sizeof(struct rte_rcu_qsbr_dq_parameters));
753 snprintf(rcu_dq_name, sizeof(rcu_dq_name), "TEST_RCU");
754 params.name = rcu_dq_name;
755 params.free_fn = test_rcu_qsbr_free_resource1;
756 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
757 params.v = t[0];
758 params.size = 1;
759 params.esize = 3;
760 params.trigger_reclaim_limit = 0;
761 params.max_reclaim_size = params.size;
762 dq = rte_rcu_qsbr_dq_create(¶ms);
763 ret = rte_rcu_qsbr_dq_reclaim(dq, 0, NULL, NULL, NULL);
764 TEST_RCU_QSBR_RETURN_IF_ERROR((ret != 1), "dq reclaim invalid params");
765
766 return 0;
767 }
768
769 /*
770 * rte_rcu_qsbr_dq_delete: Delete a defer queue.
771 */
772 static int
test_rcu_qsbr_dq_delete(void)773 test_rcu_qsbr_dq_delete(void)
774 {
775 int ret;
776 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
777 struct rte_rcu_qsbr_dq_parameters params;
778 struct rte_rcu_qsbr_dq *dq;
779
780 printf("\nTest rte_rcu_qsbr_dq_delete()\n");
781
782 /* Pass invalid parameters */
783 ret = rte_rcu_qsbr_dq_delete(NULL);
784 TEST_RCU_QSBR_RETURN_IF_ERROR((ret != 0), "dq delete invalid params");
785
786 memset(¶ms, 0, sizeof(struct rte_rcu_qsbr_dq_parameters));
787 snprintf(rcu_dq_name, sizeof(rcu_dq_name), "TEST_RCU");
788 params.name = rcu_dq_name;
789 params.free_fn = test_rcu_qsbr_free_resource1;
790 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
791 params.v = t[0];
792 params.size = 1;
793 params.esize = 16;
794 params.trigger_reclaim_limit = 0;
795 params.max_reclaim_size = params.size;
796 dq = rte_rcu_qsbr_dq_create(¶ms);
797 TEST_RCU_QSBR_RETURN_IF_ERROR((dq == NULL), "dq create valid params");
798 ret = rte_rcu_qsbr_dq_delete(dq);
799 TEST_RCU_QSBR_RETURN_IF_ERROR((ret != 0), "dq delete valid params");
800
801 return 0;
802 }
803
804 /*
805 * rte_rcu_qsbr_dq_enqueue: enqueue one resource to the defer queue,
806 * to be freed later after at least one grace period is over.
807 */
808 static int
test_rcu_qsbr_dq_functional(int32_t size,int32_t esize,uint32_t flags)809 test_rcu_qsbr_dq_functional(int32_t size, int32_t esize, uint32_t flags)
810 {
811 int i, j, ret;
812 char rcu_dq_name[RTE_RCU_QSBR_DQ_NAMESIZE];
813 struct rte_rcu_qsbr_dq_parameters params;
814 struct rte_rcu_qsbr_dq *dq;
815 uint64_t *e;
816 uint64_t sc = 200;
817 int max_entries;
818
819 printf("\nTest rte_rcu_qsbr_dq_xxx functional tests()\n");
820 printf("Size = %d, esize = %d, flags = 0x%x\n", size, esize, flags);
821
822 e = (uint64_t *)rte_zmalloc(NULL, esize, RTE_CACHE_LINE_SIZE);
823 if (e == NULL)
824 return 0;
825 cb_failed = 0;
826
827 /* Initialize the RCU variable. No threads are registered */
828 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
829
830 /* Create a queue with simple parameters */
831 memset(¶ms, 0, sizeof(struct rte_rcu_qsbr_dq_parameters));
832 snprintf(rcu_dq_name, sizeof(rcu_dq_name), "TEST_RCU");
833 params.name = rcu_dq_name;
834 params.flags = flags;
835 params.free_fn = test_rcu_qsbr_free_resource2;
836 params.v = t[0];
837 params.size = size;
838 params.esize = esize;
839 params.trigger_reclaim_limit = size >> 3;
840 params.max_reclaim_size = (size >> 4)?(size >> 4):1;
841 dq = rte_rcu_qsbr_dq_create(¶ms);
842 TEST_RCU_QSBR_RETURN_IF_ERROR((dq == NULL), "dq create valid params");
843
844 /* Given the size calculate the maximum number of entries
845 * that can be stored on the defer queue (look at the logic used
846 * in capacity calculation of rte_ring).
847 */
848 max_entries = rte_align32pow2(size + 1) - 1;
849 printf("max_entries = %d\n", max_entries);
850
851 /* Enqueue few counters starting with the value 'sc' */
852 /* The queue size will be rounded up to 2. The enqueue API also
853 * reclaims if the queue size is above certain limit. Since, there
854 * are no threads registered, reclamation succeeds. Hence, it should
855 * be possible to enqueue more than the provided queue size.
856 */
857 for (i = 0; i < 10; i++) {
858 ret = rte_rcu_qsbr_dq_enqueue(dq, e);
859 TEST_RCU_QSBR_GOTO_IF_ERROR(end, (ret != 0),
860 "dq enqueue functional, i = %d", i);
861 for (j = 0; j < esize/8; j++)
862 e[j] = sc++;
863 }
864
865 /* Validate that call back function did not return any error */
866 TEST_RCU_QSBR_GOTO_IF_ERROR(end, (cb_failed == 1), "CB failed");
867
868 /* Register a thread on the RCU QSBR variable. Reclamation will not
869 * succeed. It should not be possible to enqueue more than the size
870 * number of resources.
871 */
872 rte_rcu_qsbr_thread_register(t[0], 1);
873 rte_rcu_qsbr_thread_online(t[0], 1);
874
875 for (i = 0; i < max_entries; i++) {
876 ret = rte_rcu_qsbr_dq_enqueue(dq, e);
877 TEST_RCU_QSBR_GOTO_IF_ERROR(end, (ret != 0),
878 "dq enqueue functional, max_entries = %d, i = %d",
879 max_entries, i);
880 for (j = 0; j < esize/8; j++)
881 e[j] = sc++;
882 }
883
884 /* Enqueue fails as queue is full */
885 ret = rte_rcu_qsbr_dq_enqueue(dq, e);
886 TEST_RCU_QSBR_GOTO_IF_ERROR(end, (ret == 0), "defer queue is not full");
887
888 /* Delete should fail as there are elements in defer queue which
889 * cannot be reclaimed.
890 */
891 ret = rte_rcu_qsbr_dq_delete(dq);
892 TEST_RCU_QSBR_RETURN_IF_ERROR((ret == 0), "dq delete valid params");
893
894 /* Report quiescent state, enqueue should succeed */
895 rte_rcu_qsbr_quiescent(t[0], 1);
896 for (i = 0; i < max_entries; i++) {
897 ret = rte_rcu_qsbr_dq_enqueue(dq, e);
898 TEST_RCU_QSBR_GOTO_IF_ERROR(end, (ret != 0),
899 "dq enqueue functional");
900 for (j = 0; j < esize/8; j++)
901 e[j] = sc++;
902 }
903
904 /* Validate that call back function did not return any error */
905 TEST_RCU_QSBR_GOTO_IF_ERROR(end, (cb_failed == 1), "CB failed");
906
907 /* Queue is full */
908 ret = rte_rcu_qsbr_dq_enqueue(dq, e);
909 TEST_RCU_QSBR_GOTO_IF_ERROR(end, (ret == 0), "defer queue is not full");
910
911 /* Report quiescent state, delete should succeed */
912 rte_rcu_qsbr_quiescent(t[0], 1);
913 ret = rte_rcu_qsbr_dq_delete(dq);
914 TEST_RCU_QSBR_RETURN_IF_ERROR((ret != 0), "dq delete valid params");
915
916 rte_free(e);
917
918 /* Validate that call back function did not return any error */
919 TEST_RCU_QSBR_RETURN_IF_ERROR((cb_failed == 1), "CB failed");
920
921 return 0;
922
923 end:
924 rte_free(e);
925 ret = rte_rcu_qsbr_dq_delete(dq);
926 TEST_RCU_QSBR_RETURN_IF_ERROR((ret != 0), "dq delete valid params");
927 return -1;
928 }
929
930 /*
931 * rte_rcu_qsbr_dump: Dump status of a single QS variable to a file
932 */
933 static int
test_rcu_qsbr_dump(void)934 test_rcu_qsbr_dump(void)
935 {
936 unsigned int i;
937
938 printf("\nTest rte_rcu_qsbr_dump()\n");
939
940 /* Negative tests */
941 rte_rcu_qsbr_dump(NULL, t[0]);
942 rte_rcu_qsbr_dump(stdout, NULL);
943 rte_rcu_qsbr_dump(NULL, NULL);
944
945 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
946 rte_rcu_qsbr_init(t[1], RTE_MAX_LCORE);
947
948 /* QS variable with 0 core mask */
949 rte_rcu_qsbr_dump(stdout, t[0]);
950
951 rte_rcu_qsbr_thread_register(t[0], enabled_core_ids[0]);
952
953 for (i = 1; i < num_cores; i++)
954 rte_rcu_qsbr_thread_register(t[1], enabled_core_ids[i]);
955
956 rte_rcu_qsbr_dump(stdout, t[0]);
957 rte_rcu_qsbr_dump(stdout, t[1]);
958 printf("\n");
959 return 0;
960 }
961
962 static int
test_rcu_qsbr_reader(void * arg)963 test_rcu_qsbr_reader(void *arg)
964 {
965 struct rte_rcu_qsbr *temp;
966 struct rte_hash *hash = NULL;
967 int i;
968 uint32_t lcore_id = rte_lcore_id();
969 struct test_rcu_thread_info *ti;
970 uint32_t *pdata;
971
972 ti = (struct test_rcu_thread_info *)arg;
973 temp = t[ti->ir];
974 hash = h[ti->ih];
975
976 do {
977 rte_rcu_qsbr_thread_register(temp, lcore_id);
978 rte_rcu_qsbr_thread_online(temp, lcore_id);
979 for (i = 0; i < TOTAL_ENTRY; i++) {
980 rte_rcu_qsbr_lock(temp, lcore_id);
981 if (rte_hash_lookup_data(hash, keys+i,
982 (void **)&pdata) != -ENOENT) {
983 pdata[lcore_id] = 0;
984 while (pdata[lcore_id] < COUNTER_VALUE)
985 pdata[lcore_id]++;
986 }
987 rte_rcu_qsbr_unlock(temp, lcore_id);
988 }
989 /* Update quiescent state counter */
990 rte_rcu_qsbr_quiescent(temp, lcore_id);
991 rte_rcu_qsbr_thread_offline(temp, lcore_id);
992 rte_rcu_qsbr_thread_unregister(temp, lcore_id);
993 } while (!writer_done);
994
995 return 0;
996 }
997
998 static int
test_rcu_qsbr_writer(void * arg)999 test_rcu_qsbr_writer(void *arg)
1000 {
1001 uint64_t token;
1002 int32_t i, pos, del;
1003 uint32_t c;
1004 struct rte_rcu_qsbr *temp;
1005 struct rte_hash *hash = NULL;
1006 struct test_rcu_thread_info *ti;
1007
1008 ti = (struct test_rcu_thread_info *)arg;
1009 temp = t[ti->ir];
1010 hash = h[ti->ih];
1011
1012 /* Delete element from the shared data structure */
1013 del = rte_lcore_id() % TOTAL_ENTRY;
1014 pos = rte_hash_del_key(hash, keys + del);
1015 if (pos < 0) {
1016 printf("Delete key failed #%d\n", keys[del]);
1017 return -1;
1018 }
1019 /* Start the quiescent state query process */
1020 token = rte_rcu_qsbr_start(temp);
1021 /* Check the quiescent state status */
1022 rte_rcu_qsbr_check(temp, token, true);
1023 for (i = 0; i < 2; i++) {
1024 c = hash_data[ti->ih][del][ti->r_core_ids[i]];
1025 if (c != COUNTER_VALUE && c != 0) {
1026 printf("Reader lcore id %u did not complete = %u\t",
1027 rte_lcore_id(), c);
1028 return -1;
1029 }
1030 }
1031
1032 if (rte_hash_free_key_with_position(hash, pos) < 0) {
1033 printf("Failed to free the key #%d\n", keys[del]);
1034 return -1;
1035 }
1036 rte_free(hash_data[ti->ih][del]);
1037 hash_data[ti->ih][del] = NULL;
1038
1039 return 0;
1040 }
1041
1042 static struct rte_hash *
init_hash(int hash_id)1043 init_hash(int hash_id)
1044 {
1045 int i;
1046 struct rte_hash *h = NULL;
1047
1048 sprintf(hash_name[hash_id], "hash%d", hash_id);
1049 struct rte_hash_parameters hash_params = {
1050 .entries = TOTAL_ENTRY,
1051 .key_len = sizeof(uint32_t),
1052 .hash_func_init_val = 0,
1053 .socket_id = rte_socket_id(),
1054 .hash_func = rte_hash_crc,
1055 .extra_flag =
1056 RTE_HASH_EXTRA_FLAGS_RW_CONCURRENCY_LF,
1057 .name = hash_name[hash_id],
1058 };
1059
1060 h = rte_hash_create(&hash_params);
1061 if (h == NULL) {
1062 printf("Hash create Failed\n");
1063 return NULL;
1064 }
1065
1066 for (i = 0; i < TOTAL_ENTRY; i++) {
1067 hash_data[hash_id][i] =
1068 rte_zmalloc(NULL, sizeof(uint32_t) * RTE_MAX_LCORE, 0);
1069 if (hash_data[hash_id][i] == NULL) {
1070 printf("No memory\n");
1071 return NULL;
1072 }
1073 }
1074 keys = rte_malloc(NULL, sizeof(uint32_t) * TOTAL_ENTRY, 0);
1075 if (keys == NULL) {
1076 printf("No memory\n");
1077 return NULL;
1078 }
1079
1080 for (i = 0; i < TOTAL_ENTRY; i++)
1081 keys[i] = i;
1082
1083 for (i = 0; i < TOTAL_ENTRY; i++) {
1084 if (rte_hash_add_key_data(h, keys + i,
1085 (void *)((uintptr_t)hash_data[hash_id][i]))
1086 < 0) {
1087 printf("Hash key add Failed #%d\n", i);
1088 return NULL;
1089 }
1090 }
1091 return h;
1092 }
1093
1094 /*
1095 * Functional test:
1096 * Single writer, Single QS variable, simultaneous QSBR Queries
1097 */
1098 static int
test_rcu_qsbr_sw_sv_3qs(void)1099 test_rcu_qsbr_sw_sv_3qs(void)
1100 {
1101 uint64_t token[3];
1102 uint32_t c;
1103 int i, num_readers;
1104 int32_t pos[3];
1105
1106 writer_done = 0;
1107
1108 printf("Test: 1 writer, 1 QSBR variable, simultaneous QSBR queries\n");
1109
1110 rte_rcu_qsbr_init(t[0], RTE_MAX_LCORE);
1111
1112 /* Shared data structure created */
1113 h[0] = init_hash(0);
1114 if (h[0] == NULL) {
1115 printf("Hash init failed\n");
1116 goto error;
1117 }
1118
1119 /* No need to fill the registered core IDs as the writer
1120 * thread is not launched.
1121 */
1122 thread_info[0].ir = 0;
1123 thread_info[0].ih = 0;
1124
1125 /* Reader threads are launched */
1126 /* Keep the number of reader threads low to reduce
1127 * the execution time.
1128 */
1129 num_readers = num_cores < 4 ? num_cores : 4;
1130 for (i = 0; i < num_readers; i++)
1131 rte_eal_remote_launch(test_rcu_qsbr_reader, &thread_info[0],
1132 enabled_core_ids[i]);
1133
1134 /* Delete element from the shared data structure */
1135 pos[0] = rte_hash_del_key(h[0], keys + 0);
1136 if (pos[0] < 0) {
1137 printf("Delete key failed #%d\n", keys[0]);
1138 goto error;
1139 }
1140 /* Start the quiescent state query process */
1141 token[0] = rte_rcu_qsbr_start(t[0]);
1142
1143 /* Delete element from the shared data structure */
1144 pos[1] = rte_hash_del_key(h[0], keys + 3);
1145 if (pos[1] < 0) {
1146 printf("Delete key failed #%d\n", keys[3]);
1147 goto error;
1148 }
1149 /* Start the quiescent state query process */
1150 token[1] = rte_rcu_qsbr_start(t[0]);
1151
1152 /* Delete element from the shared data structure */
1153 pos[2] = rte_hash_del_key(h[0], keys + 6);
1154 if (pos[2] < 0) {
1155 printf("Delete key failed #%d\n", keys[6]);
1156 goto error;
1157 }
1158 /* Start the quiescent state query process */
1159 token[2] = rte_rcu_qsbr_start(t[0]);
1160
1161 /* Check the quiescent state status */
1162 rte_rcu_qsbr_check(t[0], token[0], true);
1163 for (i = 0; i < num_readers; i++) {
1164 c = hash_data[0][0][enabled_core_ids[i]];
1165 if (c != COUNTER_VALUE && c != 0) {
1166 printf("Reader lcore %d did not complete #0 = %d\n",
1167 enabled_core_ids[i], c);
1168 goto error;
1169 }
1170 }
1171
1172 if (rte_hash_free_key_with_position(h[0], pos[0]) < 0) {
1173 printf("Failed to free the key #%d\n", keys[0]);
1174 goto error;
1175 }
1176 rte_free(hash_data[0][0]);
1177 hash_data[0][0] = NULL;
1178
1179 /* Check the quiescent state status */
1180 rte_rcu_qsbr_check(t[0], token[1], true);
1181 for (i = 0; i < num_readers; i++) {
1182 c = hash_data[0][3][enabled_core_ids[i]];
1183 if (c != COUNTER_VALUE && c != 0) {
1184 printf("Reader lcore %d did not complete #3 = %d\n",
1185 enabled_core_ids[i], c);
1186 goto error;
1187 }
1188 }
1189
1190 if (rte_hash_free_key_with_position(h[0], pos[1]) < 0) {
1191 printf("Failed to free the key #%d\n", keys[3]);
1192 goto error;
1193 }
1194 rte_free(hash_data[0][3]);
1195 hash_data[0][3] = NULL;
1196
1197 /* Check the quiescent state status */
1198 rte_rcu_qsbr_check(t[0], token[2], true);
1199 for (i = 0; i < num_readers; i++) {
1200 c = hash_data[0][6][enabled_core_ids[i]];
1201 if (c != COUNTER_VALUE && c != 0) {
1202 printf("Reader lcore %d did not complete #6 = %d\n",
1203 enabled_core_ids[i], c);
1204 goto error;
1205 }
1206 }
1207
1208 if (rte_hash_free_key_with_position(h[0], pos[2]) < 0) {
1209 printf("Failed to free the key #%d\n", keys[6]);
1210 goto error;
1211 }
1212 rte_free(hash_data[0][6]);
1213 hash_data[0][6] = NULL;
1214
1215 writer_done = 1;
1216
1217 /* Wait and check return value from reader threads */
1218 for (i = 0; i < num_readers; i++)
1219 if (rte_eal_wait_lcore(enabled_core_ids[i]) < 0)
1220 goto error;
1221 rte_hash_free(h[0]);
1222 rte_free(keys);
1223
1224 return 0;
1225
1226 error:
1227 writer_done = 1;
1228 /* Wait until all readers have exited */
1229 rte_eal_mp_wait_lcore();
1230
1231 rte_hash_free(h[0]);
1232 rte_free(keys);
1233 for (i = 0; i < TOTAL_ENTRY; i++)
1234 rte_free(hash_data[0][i]);
1235
1236 return -1;
1237 }
1238
1239 /*
1240 * Multi writer, Multiple QS variable, simultaneous QSBR queries
1241 */
1242 static int
test_rcu_qsbr_mw_mv_mqs(void)1243 test_rcu_qsbr_mw_mv_mqs(void)
1244 {
1245 unsigned int i, j;
1246 unsigned int test_cores;
1247
1248 if (RTE_MAX_LCORE < 5 || num_cores < 4) {
1249 printf("Not enough cores for %s, expecting at least 5\n",
1250 __func__);
1251 return TEST_SKIPPED;
1252 }
1253
1254 writer_done = 0;
1255 test_cores = num_cores / 4;
1256 test_cores = test_cores * 4;
1257
1258 printf("Test: %d writers, %d QSBR variable, simultaneous QSBR queries\n",
1259 test_cores / 2, test_cores / 4);
1260
1261 for (i = 0; i < test_cores / 4; i++) {
1262 j = i * 4;
1263 rte_rcu_qsbr_init(t[i], RTE_MAX_LCORE);
1264 h[i] = init_hash(i);
1265 if (h[i] == NULL) {
1266 printf("Hash init failed\n");
1267 goto error;
1268 }
1269 thread_info[i].ir = i;
1270 thread_info[i].ih = i;
1271 thread_info[i].r_core_ids[0] = enabled_core_ids[j];
1272 thread_info[i].r_core_ids[1] = enabled_core_ids[j + 1];
1273
1274 /* Reader threads are launched */
1275 rte_eal_remote_launch(test_rcu_qsbr_reader,
1276 (void *)&thread_info[i],
1277 enabled_core_ids[j]);
1278 rte_eal_remote_launch(test_rcu_qsbr_reader,
1279 (void *)&thread_info[i],
1280 enabled_core_ids[j + 1]);
1281
1282 /* Writer threads are launched */
1283 rte_eal_remote_launch(test_rcu_qsbr_writer,
1284 (void *)&thread_info[i],
1285 enabled_core_ids[j + 2]);
1286 rte_eal_remote_launch(test_rcu_qsbr_writer,
1287 (void *)&thread_info[i],
1288 enabled_core_ids[j + 3]);
1289 }
1290
1291 /* Wait and check return value from writer threads */
1292 for (i = 0; i < test_cores / 4; i++) {
1293 j = i * 4;
1294 if (rte_eal_wait_lcore(enabled_core_ids[j + 2]) < 0)
1295 goto error;
1296
1297 if (rte_eal_wait_lcore(enabled_core_ids[j + 3]) < 0)
1298 goto error;
1299 }
1300 writer_done = 1;
1301
1302 /* Wait and check return value from reader threads */
1303 for (i = 0; i < test_cores / 4; i++) {
1304 j = i * 4;
1305 if (rte_eal_wait_lcore(enabled_core_ids[j]) < 0)
1306 goto error;
1307
1308 if (rte_eal_wait_lcore(enabled_core_ids[j + 1]) < 0)
1309 goto error;
1310 }
1311
1312 for (i = 0; i < test_cores / 4; i++)
1313 rte_hash_free(h[i]);
1314
1315 rte_free(keys);
1316
1317 return 0;
1318
1319 error:
1320 writer_done = 1;
1321 /* Wait until all readers and writers have exited */
1322 rte_eal_mp_wait_lcore();
1323
1324 for (i = 0; i < test_cores / 4; i++)
1325 rte_hash_free(h[i]);
1326 rte_free(keys);
1327 for (j = 0; j < test_cores / 4; j++)
1328 for (i = 0; i < TOTAL_ENTRY; i++)
1329 rte_free(hash_data[j][i]);
1330
1331 return -1;
1332 }
1333
1334 static int
test_rcu_qsbr_main(void)1335 test_rcu_qsbr_main(void)
1336 {
1337 uint16_t core_id;
1338
1339 num_cores = 0;
1340 RTE_LCORE_FOREACH_WORKER(core_id) {
1341 enabled_core_ids[num_cores] = core_id;
1342 num_cores++;
1343 }
1344
1345 /* Error-checking test cases */
1346 if (test_rcu_qsbr_get_memsize() < 0)
1347 goto test_fail;
1348
1349 if (test_rcu_qsbr_init() < 0)
1350 goto test_fail;
1351
1352 alloc_rcu();
1353
1354 if (test_rcu_qsbr_thread_register() < 0)
1355 goto test_fail;
1356
1357 if (test_rcu_qsbr_thread_unregister() < 0)
1358 goto test_fail;
1359
1360 if (test_rcu_qsbr_start() < 0)
1361 goto test_fail;
1362
1363 if (test_rcu_qsbr_check() < 0)
1364 goto test_fail;
1365
1366 if (test_rcu_qsbr_synchronize() < 0)
1367 goto test_fail;
1368
1369 if (test_rcu_qsbr_dump() < 0)
1370 goto test_fail;
1371
1372 if (test_rcu_qsbr_thread_online() < 0)
1373 goto test_fail;
1374
1375 if (test_rcu_qsbr_thread_offline() < 0)
1376 goto test_fail;
1377
1378 if (test_rcu_qsbr_dq_create() < 0)
1379 goto test_fail;
1380
1381 if (test_rcu_qsbr_dq_reclaim() < 0)
1382 goto test_fail;
1383
1384 if (test_rcu_qsbr_dq_delete() < 0)
1385 goto test_fail;
1386
1387 if (test_rcu_qsbr_dq_enqueue() < 0)
1388 goto test_fail;
1389
1390 printf("\nFunctional tests\n");
1391
1392 if (test_rcu_qsbr_sw_sv_3qs() < 0)
1393 goto test_fail;
1394
1395 if (test_rcu_qsbr_mw_mv_mqs() < 0)
1396 goto test_fail;
1397
1398 if (test_rcu_qsbr_dq_functional(1, 8, 0) < 0)
1399 goto test_fail;
1400
1401 if (test_rcu_qsbr_dq_functional(2, 8, RTE_RCU_QSBR_DQ_MT_UNSAFE) < 0)
1402 goto test_fail;
1403
1404 if (test_rcu_qsbr_dq_functional(303, 16, 0) < 0)
1405 goto test_fail;
1406
1407 if (test_rcu_qsbr_dq_functional(7, 128, RTE_RCU_QSBR_DQ_MT_UNSAFE) < 0)
1408 goto test_fail;
1409
1410 free_rcu();
1411
1412 printf("\n");
1413 return 0;
1414
1415 test_fail:
1416 free_rcu();
1417
1418 return -1;
1419 }
1420
1421 REGISTER_FAST_TEST(rcu_qsbr_autotest, true, true, test_rcu_qsbr_main);
1422