1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright (c) 2020 Red Hat, Inc.
3 */
4
5 #include <sched.h>
6 #include <string.h>
7 #include <unistd.h>
8
9 #include <rte_common.h>
10 #include <rte_errno.h>
11 #include <rte_lcore.h>
12 #include <rte_thread.h>
13 #include <rte_stdatomic.h>
14
15 #include "test.h"
16
17 #ifndef _POSIX_PRIORITY_SCHEDULING
18 /* sched_yield(2):
19 * POSIX systems on which sched_yield() is available define
20 * _POSIX_PRIORITY_SCHEDULING in <unistd.h>.
21 */
22 #define sched_yield()
23 #endif
24
25 struct thread_context {
26 enum { Thread_INIT, Thread_ERROR, Thread_DONE } state;
27 bool lcore_id_any;
28 rte_thread_t id;
29 RTE_ATOMIC(unsigned int) *registered_count;
30 };
31
thread_loop(void * arg)32 static uint32_t thread_loop(void *arg)
33 {
34 struct thread_context *t = arg;
35 unsigned int lcore_id;
36
37 lcore_id = rte_lcore_id();
38 if (lcore_id != LCORE_ID_ANY) {
39 printf("Error: incorrect lcore id for new thread %u\n", lcore_id);
40 t->state = Thread_ERROR;
41 }
42 if (rte_thread_register() < 0)
43 printf("Warning: could not register new thread (this might be expected during this test), reason %s\n",
44 rte_strerror(rte_errno));
45 lcore_id = rte_lcore_id();
46 if ((t->lcore_id_any && lcore_id != LCORE_ID_ANY) ||
47 (!t->lcore_id_any && lcore_id == LCORE_ID_ANY)) {
48 printf("Error: could not register new thread, got %u while %sexpecting %u\n",
49 lcore_id, t->lcore_id_any ? "" : "not ", LCORE_ID_ANY);
50 t->state = Thread_ERROR;
51 }
52 /* Report register happened to the control thread. */
53 rte_atomic_fetch_add_explicit(t->registered_count, 1, rte_memory_order_release);
54
55 /* Wait for release from the control thread. */
56 while (rte_atomic_load_explicit(t->registered_count, rte_memory_order_acquire) != 0)
57 sched_yield();
58 rte_thread_unregister();
59 lcore_id = rte_lcore_id();
60 if (lcore_id != LCORE_ID_ANY) {
61 printf("Error: could not unregister new thread, %u still assigned\n",
62 lcore_id);
63 t->state = Thread_ERROR;
64 }
65
66 if (t->state != Thread_ERROR)
67 t->state = Thread_DONE;
68
69 return 0;
70 }
71
72 static int
test_non_eal_lcores(unsigned int eal_threads_count)73 test_non_eal_lcores(unsigned int eal_threads_count)
74 {
75 struct thread_context thread_contexts[RTE_MAX_LCORE];
76 unsigned int non_eal_threads_count;
77 RTE_ATOMIC(unsigned int) registered_count;
78 struct thread_context *t;
79 unsigned int i;
80 int ret;
81
82 non_eal_threads_count = 0;
83 registered_count = 0;
84
85 /* Try to create as many threads as possible. */
86 for (i = 0; i < RTE_MAX_LCORE - eal_threads_count; i++) {
87 t = &thread_contexts[i];
88 t->state = Thread_INIT;
89 t->registered_count = ®istered_count;
90 t->lcore_id_any = false;
91 if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
92 break;
93 non_eal_threads_count++;
94 }
95 printf("non-EAL threads count: %u\n", non_eal_threads_count);
96 /* Wait all non-EAL threads to register. */
97 while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
98 non_eal_threads_count)
99 sched_yield();
100
101 /* We managed to create the max number of threads, let's try to create
102 * one more. This will allow one more check.
103 */
104 if (eal_threads_count + non_eal_threads_count < RTE_MAX_LCORE)
105 goto skip_lcore_any;
106 t = &thread_contexts[non_eal_threads_count];
107 t->state = Thread_INIT;
108 t->registered_count = ®istered_count;
109 t->lcore_id_any = true;
110 if (rte_thread_create(&t->id, NULL, thread_loop, t) == 0) {
111 non_eal_threads_count++;
112 printf("non-EAL threads count: %u\n", non_eal_threads_count);
113 while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
114 non_eal_threads_count)
115 sched_yield();
116 }
117
118 skip_lcore_any:
119 /* Release all threads, and check their states. */
120 rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
121 ret = 0;
122 for (i = 0; i < non_eal_threads_count; i++) {
123 t = &thread_contexts[i];
124 rte_thread_join(t->id, NULL);
125 if (t->state != Thread_DONE)
126 ret = -1;
127 }
128
129 return ret;
130 }
131
132 struct limit_lcore_context {
133 unsigned int init;
134 unsigned int max;
135 unsigned int uninit;
136 };
137
138 static int
limit_lcores_init(unsigned int lcore_id __rte_unused,void * arg)139 limit_lcores_init(unsigned int lcore_id __rte_unused, void *arg)
140 {
141 struct limit_lcore_context *l = arg;
142
143 l->init++;
144 if (l->init > l->max)
145 return -1;
146 return 0;
147 }
148
149 static void
limit_lcores_uninit(unsigned int lcore_id __rte_unused,void * arg)150 limit_lcores_uninit(unsigned int lcore_id __rte_unused, void *arg)
151 {
152 struct limit_lcore_context *l = arg;
153
154 l->uninit++;
155 }
156
157 static int
test_lcores_callback(unsigned int eal_threads_count)158 test_lcores_callback(unsigned int eal_threads_count)
159 {
160 struct limit_lcore_context l;
161 void *handle;
162
163 /* Refuse last lcore => callback register error. */
164 memset(&l, 0, sizeof(l));
165 l.max = eal_threads_count - 1;
166 handle = rte_lcore_callback_register("limit", limit_lcores_init,
167 limit_lcores_uninit, &l);
168 if (handle != NULL) {
169 printf("Error: lcore callback register should have failed\n");
170 goto error;
171 }
172 /* Refusal happens at the n th call to the init callback.
173 * Besides, n - 1 were accepted, so we expect as many uninit calls when
174 * the rollback happens.
175 */
176 if (l.init != eal_threads_count) {
177 printf("Error: lcore callback register failed but incorrect init calls, expected %u, got %u\n",
178 eal_threads_count, l.init);
179 goto error;
180 }
181 if (l.uninit != eal_threads_count - 1) {
182 printf("Error: lcore callback register failed but incorrect uninit calls, expected %u, got %u\n",
183 eal_threads_count - 1, l.uninit);
184 goto error;
185 }
186
187 /* Accept all lcore and unregister. */
188 memset(&l, 0, sizeof(l));
189 l.max = eal_threads_count;
190 handle = rte_lcore_callback_register("limit", limit_lcores_init,
191 limit_lcores_uninit, &l);
192 if (handle == NULL) {
193 printf("Error: lcore callback register failed\n");
194 goto error;
195 }
196 if (l.uninit != 0) {
197 printf("Error: lcore callback register succeeded but incorrect uninit calls, expected 0, got %u\n",
198 l.uninit);
199 goto error;
200 }
201 rte_lcore_callback_unregister(handle);
202 handle = NULL;
203 if (l.init != eal_threads_count) {
204 printf("Error: lcore callback unregister done but incorrect init calls, expected %u, got %u\n",
205 eal_threads_count, l.init);
206 goto error;
207 }
208 if (l.uninit != eal_threads_count) {
209 printf("Error: lcore callback unregister done but incorrect uninit calls, expected %u, got %u\n",
210 eal_threads_count, l.uninit);
211 goto error;
212 }
213
214 return 0;
215
216 error:
217 if (handle != NULL)
218 rte_lcore_callback_unregister(handle);
219
220 return -1;
221 }
222
223 static int
test_non_eal_lcores_callback(unsigned int eal_threads_count)224 test_non_eal_lcores_callback(unsigned int eal_threads_count)
225 {
226 struct thread_context thread_contexts[2];
227 unsigned int non_eal_threads_count = 0;
228 struct limit_lcore_context l[2] = {};
229 RTE_ATOMIC(unsigned int) registered_count = 0;
230 struct thread_context *t;
231 void *handle[2] = {};
232 unsigned int i;
233 int ret;
234
235 /* This test requires two empty slots to be sure lcore init refusal is
236 * because of callback execution.
237 */
238 if (eal_threads_count + 2 >= RTE_MAX_LCORE)
239 return 0;
240
241 /* Register two callbacks:
242 * - first one accepts any lcore,
243 * - second one accepts all EAL lcore + one more for the first non-EAL
244 * thread, then refuses the next lcore.
245 */
246 l[0].max = UINT_MAX;
247 handle[0] = rte_lcore_callback_register("no_limit", limit_lcores_init,
248 limit_lcores_uninit, &l[0]);
249 if (handle[0] == NULL) {
250 printf("Error: lcore callback [0] register failed\n");
251 goto error;
252 }
253 l[1].max = eal_threads_count + 1;
254 handle[1] = rte_lcore_callback_register("limit", limit_lcores_init,
255 limit_lcores_uninit, &l[1]);
256 if (handle[1] == NULL) {
257 printf("Error: lcore callback [1] register failed\n");
258 goto error;
259 }
260 if (l[0].init != eal_threads_count || l[1].init != eal_threads_count) {
261 printf("Error: lcore callbacks register succeeded but incorrect init calls, expected %u, %u, got %u, %u\n",
262 eal_threads_count, eal_threads_count,
263 l[0].init, l[1].init);
264 goto error;
265 }
266 if (l[0].uninit != 0 || l[1].uninit != 0) {
267 printf("Error: lcore callbacks register succeeded but incorrect uninit calls, expected 0, 1, got %u, %u\n",
268 l[0].uninit, l[1].uninit);
269 goto error;
270 }
271 /* First thread that expects a valid lcore id. */
272 t = &thread_contexts[0];
273 t->state = Thread_INIT;
274 t->registered_count = ®istered_count;
275 t->lcore_id_any = false;
276 if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
277 goto cleanup_threads;
278 non_eal_threads_count++;
279 while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
280 non_eal_threads_count)
281 sched_yield();
282 if (l[0].init != eal_threads_count + 1 ||
283 l[1].init != eal_threads_count + 1) {
284 printf("Error: incorrect init calls, expected %u, %u, got %u, %u\n",
285 eal_threads_count + 1, eal_threads_count + 1,
286 l[0].init, l[1].init);
287 goto cleanup_threads;
288 }
289 if (l[0].uninit != 0 || l[1].uninit != 0) {
290 printf("Error: incorrect uninit calls, expected 0, 0, got %u, %u\n",
291 l[0].uninit, l[1].uninit);
292 goto cleanup_threads;
293 }
294 /* Second thread, that expects LCORE_ID_ANY because of init refusal. */
295 t = &thread_contexts[1];
296 t->state = Thread_INIT;
297 t->registered_count = ®istered_count;
298 t->lcore_id_any = true;
299 if (rte_thread_create(&t->id, NULL, thread_loop, t) != 0)
300 goto cleanup_threads;
301 non_eal_threads_count++;
302 while (rte_atomic_load_explicit(®istered_count, rte_memory_order_acquire) !=
303 non_eal_threads_count)
304 sched_yield();
305 if (l[0].init != eal_threads_count + 2 ||
306 l[1].init != eal_threads_count + 2) {
307 printf("Error: incorrect init calls, expected %u, %u, got %u, %u\n",
308 eal_threads_count + 2, eal_threads_count + 2,
309 l[0].init, l[1].init);
310 goto cleanup_threads;
311 }
312 if (l[0].uninit != 1 || l[1].uninit != 0) {
313 printf("Error: incorrect uninit calls, expected 1, 0, got %u, %u\n",
314 l[0].uninit, l[1].uninit);
315 goto cleanup_threads;
316 }
317 rte_lcore_dump(stdout);
318 /* Release all threads, and check their states. */
319 rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
320 ret = 0;
321 for (i = 0; i < non_eal_threads_count; i++) {
322 t = &thread_contexts[i];
323 rte_thread_join(t->id, NULL);
324 if (t->state != Thread_DONE)
325 ret = -1;
326 }
327 if (ret < 0)
328 goto error;
329 rte_lcore_dump(stdout);
330 if (l[0].uninit != 2 || l[1].uninit != 1) {
331 printf("Error: threads reported having successfully registered and unregistered, but incorrect uninit calls, expected 2, 1, got %u, %u\n",
332 l[0].uninit, l[1].uninit);
333 goto error;
334 }
335 rte_lcore_callback_unregister(handle[0]);
336 rte_lcore_callback_unregister(handle[1]);
337 return 0;
338
339 cleanup_threads:
340 /* Release all threads */
341 rte_atomic_store_explicit(®istered_count, 0, rte_memory_order_release);
342 for (i = 0; i < non_eal_threads_count; i++) {
343 t = &thread_contexts[i];
344 rte_thread_join(t->id, NULL);
345 }
346 error:
347 if (handle[1] != NULL)
348 rte_lcore_callback_unregister(handle[1]);
349 if (handle[0] != NULL)
350 rte_lcore_callback_unregister(handle[0]);
351 return -1;
352 }
353
ctrl_thread_loop(void * arg)354 static uint32_t ctrl_thread_loop(void *arg)
355 {
356 struct thread_context *t = arg;
357
358 printf("Control thread running successfully\n");
359
360 /* Set the thread state to DONE */
361 t->state = Thread_DONE;
362
363 return 0;
364 }
365
366 static int
test_ctrl_thread(void)367 test_ctrl_thread(void)
368 {
369 struct thread_context ctrl_thread_context;
370 struct thread_context *t;
371
372 /* Create one control thread */
373 t = &ctrl_thread_context;
374 t->state = Thread_INIT;
375 if (rte_thread_create_control(&t->id, "dpdk-test-ctrlt",
376 ctrl_thread_loop, t) != 0)
377 return -1;
378
379 /* Wait till the control thread exits.
380 * This also acts as the barrier such that the memory operations
381 * in control thread are visible to this thread.
382 */
383 rte_thread_join(t->id, NULL);
384
385 /* Check if the control thread set the correct state */
386 if (t->state != Thread_DONE)
387 return -1;
388
389 return 0;
390 }
391
392 static int
test_lcores(void)393 test_lcores(void)
394 {
395 unsigned int eal_threads_count = 0;
396 unsigned int i;
397
398 for (i = 0; i < RTE_MAX_LCORE; i++) {
399 if (!rte_lcore_has_role(i, ROLE_OFF))
400 eal_threads_count++;
401 }
402 if (eal_threads_count == 0) {
403 printf("Error: something is broken, no EAL thread detected.\n");
404 return TEST_FAILED;
405 }
406 printf("EAL threads count: %u, RTE_MAX_LCORE=%u\n", eal_threads_count,
407 RTE_MAX_LCORE);
408 rte_lcore_dump(stdout);
409
410 if (test_non_eal_lcores(eal_threads_count) < 0)
411 return TEST_FAILED;
412
413 if (test_lcores_callback(eal_threads_count) < 0)
414 return TEST_FAILED;
415
416 if (test_non_eal_lcores_callback(eal_threads_count) < 0)
417 return TEST_FAILED;
418
419 if (test_ctrl_thread() < 0)
420 return TEST_FAILED;
421
422 return TEST_SUCCESS;
423 }
424
425 REGISTER_FAST_TEST(lcores_autotest, true, true, test_lcores);
426