1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright (c) 2020 Red Hat, Inc. 3 */ 4 5 #include <pthread.h> 6 #include <string.h> 7 8 #include <rte_common.h> 9 #include <rte_errno.h> 10 #include <rte_lcore.h> 11 12 #include "test.h" 13 14 struct thread_context { 15 enum { INIT, ERROR, DONE } state; 16 bool lcore_id_any; 17 pthread_t id; 18 unsigned int *registered_count; 19 }; 20 21 static void *thread_loop(void *arg) 22 { 23 struct thread_context *t = arg; 24 unsigned int lcore_id; 25 26 lcore_id = rte_lcore_id(); 27 if (lcore_id != LCORE_ID_ANY) { 28 printf("Error: incorrect lcore id for new thread %u\n", lcore_id); 29 t->state = ERROR; 30 } 31 if (rte_thread_register() < 0) 32 printf("Warning: could not register new thread (this might be expected during this test), reason %s\n", 33 rte_strerror(rte_errno)); 34 lcore_id = rte_lcore_id(); 35 if ((t->lcore_id_any && lcore_id != LCORE_ID_ANY) || 36 (!t->lcore_id_any && lcore_id == LCORE_ID_ANY)) { 37 printf("Error: could not register new thread, got %u while %sexpecting %u\n", 38 lcore_id, t->lcore_id_any ? "" : "not ", LCORE_ID_ANY); 39 t->state = ERROR; 40 } 41 /* Report register happened to the control thread. */ 42 __atomic_add_fetch(t->registered_count, 1, __ATOMIC_RELEASE); 43 44 /* Wait for release from the control thread. */ 45 while (__atomic_load_n(t->registered_count, __ATOMIC_ACQUIRE) != 0) 46 ; 47 rte_thread_unregister(); 48 lcore_id = rte_lcore_id(); 49 if (lcore_id != LCORE_ID_ANY) { 50 printf("Error: could not unregister new thread, %u still assigned\n", 51 lcore_id); 52 t->state = ERROR; 53 } 54 55 if (t->state != ERROR) 56 t->state = DONE; 57 58 return NULL; 59 } 60 61 static int 62 test_non_eal_lcores(unsigned int eal_threads_count) 63 { 64 struct thread_context thread_contexts[RTE_MAX_LCORE]; 65 unsigned int non_eal_threads_count; 66 unsigned int registered_count; 67 struct thread_context *t; 68 unsigned int i; 69 int ret; 70 71 non_eal_threads_count = 0; 72 registered_count = 0; 73 74 /* Try to create as many threads as possible. */ 75 for (i = 0; i < RTE_MAX_LCORE - eal_threads_count; i++) { 76 t = &thread_contexts[i]; 77 t->state = INIT; 78 t->registered_count = ®istered_count; 79 t->lcore_id_any = false; 80 if (pthread_create(&t->id, NULL, thread_loop, t) != 0) 81 break; 82 non_eal_threads_count++; 83 } 84 printf("non-EAL threads count: %u\n", non_eal_threads_count); 85 /* Wait all non-EAL threads to register. */ 86 while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) != 87 non_eal_threads_count) 88 ; 89 90 /* We managed to create the max number of threads, let's try to create 91 * one more. This will allow one more check. 92 */ 93 if (eal_threads_count + non_eal_threads_count < RTE_MAX_LCORE) 94 goto skip_lcore_any; 95 t = &thread_contexts[non_eal_threads_count]; 96 t->state = INIT; 97 t->registered_count = ®istered_count; 98 t->lcore_id_any = true; 99 if (pthread_create(&t->id, NULL, thread_loop, t) == 0) { 100 non_eal_threads_count++; 101 printf("non-EAL threads count: %u\n", non_eal_threads_count); 102 while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) != 103 non_eal_threads_count) 104 ; 105 } 106 107 skip_lcore_any: 108 /* Release all threads, and check their states. */ 109 __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE); 110 ret = 0; 111 for (i = 0; i < non_eal_threads_count; i++) { 112 t = &thread_contexts[i]; 113 pthread_join(t->id, NULL); 114 if (t->state != DONE) 115 ret = -1; 116 } 117 118 return ret; 119 } 120 121 struct limit_lcore_context { 122 unsigned int init; 123 unsigned int max; 124 unsigned int uninit; 125 }; 126 127 static int 128 limit_lcores_init(unsigned int lcore_id __rte_unused, void *arg) 129 { 130 struct limit_lcore_context *l = arg; 131 132 l->init++; 133 if (l->init > l->max) 134 return -1; 135 return 0; 136 } 137 138 static void 139 limit_lcores_uninit(unsigned int lcore_id __rte_unused, void *arg) 140 { 141 struct limit_lcore_context *l = arg; 142 143 l->uninit++; 144 } 145 146 static int 147 test_lcores_callback(unsigned int eal_threads_count) 148 { 149 struct limit_lcore_context l; 150 void *handle; 151 152 /* Refuse last lcore => callback register error. */ 153 memset(&l, 0, sizeof(l)); 154 l.max = eal_threads_count - 1; 155 handle = rte_lcore_callback_register("limit", limit_lcores_init, 156 limit_lcores_uninit, &l); 157 if (handle != NULL) { 158 printf("Error: lcore callback register should have failed\n"); 159 goto error; 160 } 161 /* Refusal happens at the n th call to the init callback. 162 * Besides, n - 1 were accepted, so we expect as many uninit calls when 163 * the rollback happens. 164 */ 165 if (l.init != eal_threads_count) { 166 printf("Error: lcore callback register failed but incorrect init calls, expected %u, got %u\n", 167 eal_threads_count, l.init); 168 goto error; 169 } 170 if (l.uninit != eal_threads_count - 1) { 171 printf("Error: lcore callback register failed but incorrect uninit calls, expected %u, got %u\n", 172 eal_threads_count - 1, l.uninit); 173 goto error; 174 } 175 176 /* Accept all lcore and unregister. */ 177 memset(&l, 0, sizeof(l)); 178 l.max = eal_threads_count; 179 handle = rte_lcore_callback_register("limit", limit_lcores_init, 180 limit_lcores_uninit, &l); 181 if (handle == NULL) { 182 printf("Error: lcore callback register failed\n"); 183 goto error; 184 } 185 if (l.uninit != 0) { 186 printf("Error: lcore callback register succeeded but incorrect uninit calls, expected 0, got %u\n", 187 l.uninit); 188 goto error; 189 } 190 rte_lcore_callback_unregister(handle); 191 handle = NULL; 192 if (l.init != eal_threads_count) { 193 printf("Error: lcore callback unregister done but incorrect init calls, expected %u, got %u\n", 194 eal_threads_count, l.init); 195 goto error; 196 } 197 if (l.uninit != eal_threads_count) { 198 printf("Error: lcore callback unregister done but incorrect uninit calls, expected %u, got %u\n", 199 eal_threads_count, l.uninit); 200 goto error; 201 } 202 203 return 0; 204 205 error: 206 if (handle != NULL) 207 rte_lcore_callback_unregister(handle); 208 209 return -1; 210 } 211 212 static int 213 test_non_eal_lcores_callback(unsigned int eal_threads_count) 214 { 215 struct thread_context thread_contexts[2]; 216 unsigned int non_eal_threads_count = 0; 217 struct limit_lcore_context l[2] = {}; 218 unsigned int registered_count = 0; 219 struct thread_context *t; 220 void *handle[2] = {}; 221 unsigned int i; 222 int ret; 223 224 /* This test requires two empty slots to be sure lcore init refusal is 225 * because of callback execution. 226 */ 227 if (eal_threads_count + 2 >= RTE_MAX_LCORE) 228 return 0; 229 230 /* Register two callbacks: 231 * - first one accepts any lcore, 232 * - second one accepts all EAL lcore + one more for the first non-EAL 233 * thread, then refuses the next lcore. 234 */ 235 l[0].max = UINT_MAX; 236 handle[0] = rte_lcore_callback_register("no_limit", limit_lcores_init, 237 limit_lcores_uninit, &l[0]); 238 if (handle[0] == NULL) { 239 printf("Error: lcore callback [0] register failed\n"); 240 goto error; 241 } 242 l[1].max = eal_threads_count + 1; 243 handle[1] = rte_lcore_callback_register("limit", limit_lcores_init, 244 limit_lcores_uninit, &l[1]); 245 if (handle[1] == NULL) { 246 printf("Error: lcore callback [1] register failed\n"); 247 goto error; 248 } 249 if (l[0].init != eal_threads_count || l[1].init != eal_threads_count) { 250 printf("Error: lcore callbacks register succeeded but incorrect init calls, expected %u, %u, got %u, %u\n", 251 eal_threads_count, eal_threads_count, 252 l[0].init, l[1].init); 253 goto error; 254 } 255 if (l[0].uninit != 0 || l[1].uninit != 0) { 256 printf("Error: lcore callbacks register succeeded but incorrect uninit calls, expected 0, 1, got %u, %u\n", 257 l[0].uninit, l[1].uninit); 258 goto error; 259 } 260 /* First thread that expects a valid lcore id. */ 261 t = &thread_contexts[0]; 262 t->state = INIT; 263 t->registered_count = ®istered_count; 264 t->lcore_id_any = false; 265 if (pthread_create(&t->id, NULL, thread_loop, t) != 0) 266 goto cleanup_threads; 267 non_eal_threads_count++; 268 while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) != 269 non_eal_threads_count) 270 ; 271 if (l[0].init != eal_threads_count + 1 || 272 l[1].init != eal_threads_count + 1) { 273 printf("Error: incorrect init calls, expected %u, %u, got %u, %u\n", 274 eal_threads_count + 1, eal_threads_count + 1, 275 l[0].init, l[1].init); 276 goto cleanup_threads; 277 } 278 if (l[0].uninit != 0 || l[1].uninit != 0) { 279 printf("Error: incorrect uninit calls, expected 0, 0, got %u, %u\n", 280 l[0].uninit, l[1].uninit); 281 goto cleanup_threads; 282 } 283 /* Second thread, that expects LCORE_ID_ANY because of init refusal. */ 284 t = &thread_contexts[1]; 285 t->state = INIT; 286 t->registered_count = ®istered_count; 287 t->lcore_id_any = true; 288 if (pthread_create(&t->id, NULL, thread_loop, t) != 0) 289 goto cleanup_threads; 290 non_eal_threads_count++; 291 while (__atomic_load_n(®istered_count, __ATOMIC_ACQUIRE) != 292 non_eal_threads_count) 293 ; 294 if (l[0].init != eal_threads_count + 2 || 295 l[1].init != eal_threads_count + 2) { 296 printf("Error: incorrect init calls, expected %u, %u, got %u, %u\n", 297 eal_threads_count + 2, eal_threads_count + 2, 298 l[0].init, l[1].init); 299 goto cleanup_threads; 300 } 301 if (l[0].uninit != 1 || l[1].uninit != 0) { 302 printf("Error: incorrect uninit calls, expected 1, 0, got %u, %u\n", 303 l[0].uninit, l[1].uninit); 304 goto cleanup_threads; 305 } 306 rte_lcore_dump(stdout); 307 /* Release all threads, and check their states. */ 308 __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE); 309 ret = 0; 310 for (i = 0; i < non_eal_threads_count; i++) { 311 t = &thread_contexts[i]; 312 pthread_join(t->id, NULL); 313 if (t->state != DONE) 314 ret = -1; 315 } 316 if (ret < 0) 317 goto error; 318 rte_lcore_dump(stdout); 319 if (l[0].uninit != 2 || l[1].uninit != 1) { 320 printf("Error: threads reported having successfully registered and unregistered, but incorrect uninit calls, expected 2, 1, got %u, %u\n", 321 l[0].uninit, l[1].uninit); 322 goto error; 323 } 324 rte_lcore_callback_unregister(handle[0]); 325 rte_lcore_callback_unregister(handle[1]); 326 return 0; 327 328 cleanup_threads: 329 /* Release all threads */ 330 __atomic_store_n(®istered_count, 0, __ATOMIC_RELEASE); 331 for (i = 0; i < non_eal_threads_count; i++) { 332 t = &thread_contexts[i]; 333 pthread_join(t->id, NULL); 334 } 335 error: 336 if (handle[1] != NULL) 337 rte_lcore_callback_unregister(handle[1]); 338 if (handle[0] != NULL) 339 rte_lcore_callback_unregister(handle[0]); 340 return -1; 341 } 342 343 static int 344 test_lcores(void) 345 { 346 unsigned int eal_threads_count = 0; 347 unsigned int i; 348 349 for (i = 0; i < RTE_MAX_LCORE; i++) { 350 if (!rte_lcore_has_role(i, ROLE_OFF)) 351 eal_threads_count++; 352 } 353 if (eal_threads_count == 0) { 354 printf("Error: something is broken, no EAL thread detected.\n"); 355 return TEST_FAILED; 356 } 357 printf("EAL threads count: %u, RTE_MAX_LCORE=%u\n", eal_threads_count, 358 RTE_MAX_LCORE); 359 rte_lcore_dump(stdout); 360 361 if (test_non_eal_lcores(eal_threads_count) < 0) 362 return TEST_FAILED; 363 364 if (test_lcores_callback(eal_threads_count) < 0) 365 return TEST_FAILED; 366 367 if (test_non_eal_lcores_callback(eal_threads_count) < 0) 368 return TEST_FAILED; 369 370 return TEST_SUCCESS; 371 } 372 373 REGISTER_TEST_COMMAND(lcores_autotest, test_lcores); 374