1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <errno.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <pthread.h> 9 #include <sched.h> 10 #include <assert.h> 11 #include <string.h> 12 13 #include <eal_trace_internal.h> 14 #include <rte_errno.h> 15 #include <rte_lcore.h> 16 #include <rte_log.h> 17 #include <rte_memory.h> 18 #include <rte_trace_point.h> 19 20 #include "eal_internal_cfg.h" 21 #include "eal_private.h" 22 #include "eal_thread.h" 23 #include "eal_trace.h" 24 25 RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY; 26 RTE_DEFINE_PER_LCORE(int, _thread_id) = -1; 27 static RTE_DEFINE_PER_LCORE(unsigned int, _socket_id) = 28 (unsigned int)SOCKET_ID_ANY; 29 static RTE_DEFINE_PER_LCORE(rte_cpuset_t, _cpuset); 30 31 unsigned rte_socket_id(void) 32 { 33 return RTE_PER_LCORE(_socket_id); 34 } 35 36 static int 37 eal_cpuset_socket_id(rte_cpuset_t *cpusetp) 38 { 39 unsigned cpu = 0; 40 int socket_id = SOCKET_ID_ANY; 41 int sid; 42 43 if (cpusetp == NULL) 44 return SOCKET_ID_ANY; 45 46 do { 47 if (!CPU_ISSET(cpu, cpusetp)) 48 continue; 49 50 if (socket_id == SOCKET_ID_ANY) 51 socket_id = eal_cpu_socket_id(cpu); 52 53 sid = eal_cpu_socket_id(cpu); 54 if (socket_id != sid) { 55 socket_id = SOCKET_ID_ANY; 56 break; 57 } 58 59 } while (++cpu < CPU_SETSIZE); 60 61 return socket_id; 62 } 63 64 static void 65 thread_update_affinity(rte_cpuset_t *cpusetp) 66 { 67 unsigned int lcore_id = rte_lcore_id(); 68 69 /* store socket_id in TLS for quick access */ 70 RTE_PER_LCORE(_socket_id) = 71 eal_cpuset_socket_id(cpusetp); 72 73 /* store cpuset in TLS for quick access */ 74 memmove(&RTE_PER_LCORE(_cpuset), cpusetp, 75 sizeof(rte_cpuset_t)); 76 77 if (lcore_id != (unsigned)LCORE_ID_ANY) { 78 /* EAL thread will update lcore_config */ 79 lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id); 80 memmove(&lcore_config[lcore_id].cpuset, cpusetp, 81 sizeof(rte_cpuset_t)); 82 } 83 } 84 85 int 86 rte_thread_set_affinity(rte_cpuset_t *cpusetp) 87 { 88 if (rte_thread_set_affinity_by_id(rte_thread_self(), cpusetp) != 0) { 89 RTE_LOG(ERR, EAL, "rte_thread_set_affinity_by_id failed\n"); 90 return -1; 91 } 92 93 thread_update_affinity(cpusetp); 94 return 0; 95 } 96 97 void 98 rte_thread_get_affinity(rte_cpuset_t *cpusetp) 99 { 100 assert(cpusetp); 101 memmove(cpusetp, &RTE_PER_LCORE(_cpuset), 102 sizeof(rte_cpuset_t)); 103 } 104 105 int 106 eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size) 107 { 108 unsigned cpu; 109 int ret; 110 unsigned int out = 0; 111 112 for (cpu = 0; cpu < CPU_SETSIZE; cpu++) { 113 if (!CPU_ISSET(cpu, cpuset)) 114 continue; 115 116 ret = snprintf(str + out, 117 size - out, "%u,", cpu); 118 if (ret < 0 || (unsigned)ret >= size - out) { 119 /* string will be truncated */ 120 ret = -1; 121 goto exit; 122 } 123 124 out += ret; 125 } 126 127 ret = 0; 128 exit: 129 /* remove the last separator */ 130 if (out > 0) 131 str[out - 1] = '\0'; 132 133 return ret; 134 } 135 136 int 137 eal_thread_dump_current_affinity(char *str, unsigned int size) 138 { 139 rte_cpuset_t cpuset; 140 141 rte_thread_get_affinity(&cpuset); 142 return eal_thread_dump_affinity(&cpuset, str, size); 143 } 144 145 void 146 __rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset) 147 { 148 /* set the lcore ID in per-lcore memory area */ 149 RTE_PER_LCORE(_lcore_id) = lcore_id; 150 151 /* acquire system unique id */ 152 rte_gettid(); 153 154 thread_update_affinity(cpuset); 155 156 __rte_trace_mem_per_thread_alloc(); 157 } 158 159 void 160 __rte_thread_uninit(void) 161 { 162 trace_mem_per_thread_free(); 163 164 RTE_PER_LCORE(_lcore_id) = LCORE_ID_ANY; 165 } 166 167 /* main loop of threads */ 168 __rte_noreturn uint32_t 169 eal_thread_loop(void *arg) 170 { 171 unsigned int lcore_id = (uintptr_t)arg; 172 char cpuset[RTE_CPU_AFFINITY_STR_LEN]; 173 int ret; 174 175 __rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset); 176 177 ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); 178 RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n", 179 lcore_id, rte_thread_self().opaque_id, cpuset, 180 ret == 0 ? "" : "..."); 181 182 rte_eal_trace_thread_lcore_ready(lcore_id, cpuset); 183 184 /* read on our pipe to get commands */ 185 while (1) { 186 lcore_function_t *f; 187 void *fct_arg; 188 189 eal_thread_wait_command(); 190 191 /* Set the state to 'RUNNING'. Use release order 192 * since 'state' variable is used as the guard variable. 193 */ 194 __atomic_store_n(&lcore_config[lcore_id].state, RUNNING, 195 __ATOMIC_RELEASE); 196 197 eal_thread_ack_command(); 198 199 /* Load 'f' with acquire order to ensure that 200 * the memory operations from the main thread 201 * are accessed only after update to 'f' is visible. 202 * Wait till the update to 'f' is visible to the worker. 203 */ 204 while ((f = __atomic_load_n(&lcore_config[lcore_id].f, 205 __ATOMIC_ACQUIRE)) == NULL) 206 rte_pause(); 207 208 rte_eal_trace_thread_lcore_running(lcore_id, f); 209 210 /* call the function and store the return value */ 211 fct_arg = lcore_config[lcore_id].arg; 212 ret = f(fct_arg); 213 lcore_config[lcore_id].ret = ret; 214 lcore_config[lcore_id].f = NULL; 215 lcore_config[lcore_id].arg = NULL; 216 217 /* Store the state with release order to ensure that 218 * the memory operations from the worker thread 219 * are completed before the state is updated. 220 * Use 'state' as the guard variable. 221 */ 222 __atomic_store_n(&lcore_config[lcore_id].state, WAIT, 223 __ATOMIC_RELEASE); 224 225 rte_eal_trace_thread_lcore_stopped(lcore_id); 226 } 227 228 /* never reached */ 229 /* return 0; */ 230 } 231 232 enum __rte_ctrl_thread_status { 233 CTRL_THREAD_LAUNCHING, /* Yet to call pthread_create function */ 234 CTRL_THREAD_RUNNING, /* Control thread is running successfully */ 235 CTRL_THREAD_ERROR /* Control thread encountered an error */ 236 }; 237 238 struct rte_thread_ctrl_params { 239 union { 240 void *(*ctrl_start_routine)(void *arg); 241 rte_thread_func control_start_routine; 242 } u; 243 void *arg; 244 int ret; 245 /* Control thread status. 246 * If the status is CTRL_THREAD_ERROR, 'ret' has the error code. 247 */ 248 enum __rte_ctrl_thread_status ctrl_thread_status; 249 }; 250 251 static int ctrl_thread_init(void *arg) 252 { 253 struct internal_config *internal_conf = 254 eal_get_internal_configuration(); 255 rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset; 256 struct rte_thread_ctrl_params *params = arg; 257 258 __rte_thread_init(rte_lcore_id(), cpuset); 259 /* Set control thread socket ID to SOCKET_ID_ANY 260 * as control threads may be scheduled on any NUMA node. 261 */ 262 RTE_PER_LCORE(_socket_id) = SOCKET_ID_ANY; 263 params->ret = rte_thread_set_affinity_by_id(rte_thread_self(), cpuset); 264 if (params->ret != 0) { 265 __atomic_store_n(¶ms->ctrl_thread_status, 266 CTRL_THREAD_ERROR, __ATOMIC_RELEASE); 267 return 1; 268 } 269 270 __atomic_store_n(¶ms->ctrl_thread_status, 271 CTRL_THREAD_RUNNING, __ATOMIC_RELEASE); 272 273 return 0; 274 } 275 276 static void *ctrl_thread_start(void *arg) 277 { 278 struct rte_thread_ctrl_params *params = arg; 279 void *start_arg = params->arg; 280 void *(*start_routine)(void *) = params->u.ctrl_start_routine; 281 282 if (ctrl_thread_init(arg) != 0) 283 return NULL; 284 285 return start_routine(start_arg); 286 } 287 288 static uint32_t control_thread_start(void *arg) 289 { 290 struct rte_thread_ctrl_params *params = arg; 291 void *start_arg = params->arg; 292 rte_thread_func start_routine = params->u.control_start_routine; 293 294 if (ctrl_thread_init(arg) != 0) 295 return 0; 296 297 return start_routine(start_arg); 298 } 299 300 int 301 rte_ctrl_thread_create(pthread_t *thread, const char *name, 302 const pthread_attr_t *attr, 303 void *(*start_routine)(void *), void *arg) 304 { 305 struct rte_thread_ctrl_params *params; 306 enum __rte_ctrl_thread_status ctrl_thread_status; 307 int ret; 308 309 params = malloc(sizeof(*params)); 310 if (!params) 311 return -ENOMEM; 312 313 params->u.ctrl_start_routine = start_routine; 314 params->arg = arg; 315 params->ret = 0; 316 params->ctrl_thread_status = CTRL_THREAD_LAUNCHING; 317 318 ret = pthread_create(thread, attr, ctrl_thread_start, (void *)params); 319 if (ret != 0) { 320 free(params); 321 return -ret; 322 } 323 324 if (name != NULL) 325 rte_thread_set_name((rte_thread_t){(uintptr_t)*thread}, name); 326 327 /* Wait for the control thread to initialize successfully */ 328 while ((ctrl_thread_status = 329 __atomic_load_n(¶ms->ctrl_thread_status, 330 __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) { 331 /* Yield the CPU. Using sched_yield call requires maintaining 332 * another implementation for Windows as sched_yield is not 333 * supported on Windows. 334 */ 335 rte_delay_us_sleep(1); 336 } 337 338 /* Check if the control thread encountered an error */ 339 if (ctrl_thread_status == CTRL_THREAD_ERROR) { 340 /* ctrl thread is exiting */ 341 rte_thread_join((rte_thread_t){(uintptr_t)*thread}, NULL); 342 } 343 344 ret = params->ret; 345 free(params); 346 347 return -ret; 348 } 349 350 int 351 rte_thread_create_control(rte_thread_t *thread, const char *name, 352 const rte_thread_attr_t *attr, rte_thread_func start_routine, 353 void *arg) 354 { 355 struct rte_thread_ctrl_params *params; 356 enum __rte_ctrl_thread_status ctrl_thread_status; 357 int ret; 358 359 params = malloc(sizeof(*params)); 360 if (params == NULL) 361 return -ENOMEM; 362 363 params->u.control_start_routine = start_routine; 364 params->arg = arg; 365 params->ret = 0; 366 params->ctrl_thread_status = CTRL_THREAD_LAUNCHING; 367 368 ret = rte_thread_create(thread, attr, control_thread_start, params); 369 if (ret != 0) { 370 free(params); 371 return -ret; 372 } 373 374 if (name != NULL) 375 rte_thread_set_name(*thread, name); 376 377 /* Wait for the control thread to initialize successfully */ 378 while ((ctrl_thread_status = 379 __atomic_load_n(¶ms->ctrl_thread_status, 380 __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) { 381 rte_delay_us_sleep(1); 382 } 383 384 /* Check if the control thread encountered an error */ 385 if (ctrl_thread_status == CTRL_THREAD_ERROR) { 386 /* ctrl thread is exiting */ 387 rte_thread_join(*thread, NULL); 388 } 389 390 ret = params->ret; 391 free(params); 392 393 return ret; 394 } 395 396 int 397 rte_thread_register(void) 398 { 399 unsigned int lcore_id; 400 rte_cpuset_t cpuset; 401 402 /* EAL init flushes all lcores, we can't register before. */ 403 if (eal_get_internal_configuration()->init_complete != 1) { 404 RTE_LOG(DEBUG, EAL, "Called %s before EAL init.\n", __func__); 405 rte_errno = EINVAL; 406 return -1; 407 } 408 if (!rte_mp_disable()) { 409 RTE_LOG(ERR, EAL, "Multiprocess in use, registering non-EAL threads is not supported.\n"); 410 rte_errno = EINVAL; 411 return -1; 412 } 413 if (rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset) != 0) 414 CPU_ZERO(&cpuset); 415 lcore_id = eal_lcore_non_eal_allocate(); 416 if (lcore_id >= RTE_MAX_LCORE) 417 lcore_id = LCORE_ID_ANY; 418 __rte_thread_init(lcore_id, &cpuset); 419 if (lcore_id == LCORE_ID_ANY) { 420 rte_errno = ENOMEM; 421 return -1; 422 } 423 RTE_LOG(DEBUG, EAL, "Registered non-EAL thread as lcore %u.\n", 424 lcore_id); 425 return 0; 426 } 427 428 void 429 rte_thread_unregister(void) 430 { 431 unsigned int lcore_id = rte_lcore_id(); 432 433 if (lcore_id != LCORE_ID_ANY) 434 eal_lcore_non_eal_release(lcore_id); 435 __rte_thread_uninit(); 436 if (lcore_id != LCORE_ID_ANY) 437 RTE_LOG(DEBUG, EAL, "Unregistered non-EAL thread (was lcore %u).\n", 438 lcore_id); 439 } 440 441 int 442 rte_thread_attr_init(rte_thread_attr_t *attr) 443 { 444 if (attr == NULL) 445 return EINVAL; 446 447 CPU_ZERO(&attr->cpuset); 448 attr->priority = RTE_THREAD_PRIORITY_NORMAL; 449 450 return 0; 451 } 452 453 int 454 rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, 455 enum rte_thread_priority priority) 456 { 457 if (thread_attr == NULL) 458 return EINVAL; 459 460 thread_attr->priority = priority; 461 462 return 0; 463 } 464 465 int 466 rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, 467 rte_cpuset_t *cpuset) 468 { 469 if (thread_attr == NULL) 470 return EINVAL; 471 472 if (cpuset == NULL) 473 return EINVAL; 474 475 thread_attr->cpuset = *cpuset; 476 477 return 0; 478 } 479 480 int 481 rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, 482 rte_cpuset_t *cpuset) 483 { 484 if (thread_attr == NULL) 485 return EINVAL; 486 487 if (cpuset == NULL) 488 return EINVAL; 489 490 *cpuset = thread_attr->cpuset; 491 492 return 0; 493 } 494