1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <errno.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <pthread.h> 9 #include <sched.h> 10 #include <assert.h> 11 #include <string.h> 12 13 #include <eal_trace_internal.h> 14 #include <rte_errno.h> 15 #include <rte_lcore.h> 16 #include <rte_log.h> 17 #include <rte_memory.h> 18 #include <rte_trace_point.h> 19 20 #include "eal_internal_cfg.h" 21 #include "eal_private.h" 22 #include "eal_thread.h" 23 #include "eal_trace.h" 24 25 RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY; 26 RTE_DEFINE_PER_LCORE(int, _thread_id) = -1; 27 static RTE_DEFINE_PER_LCORE(unsigned int, _socket_id) = 28 (unsigned int)SOCKET_ID_ANY; 29 static RTE_DEFINE_PER_LCORE(rte_cpuset_t, _cpuset); 30 31 unsigned rte_socket_id(void) 32 { 33 return RTE_PER_LCORE(_socket_id); 34 } 35 36 static int 37 eal_cpuset_socket_id(rte_cpuset_t *cpusetp) 38 { 39 unsigned cpu = 0; 40 int socket_id = SOCKET_ID_ANY; 41 int sid; 42 43 if (cpusetp == NULL) 44 return SOCKET_ID_ANY; 45 46 do { 47 if (!CPU_ISSET(cpu, cpusetp)) 48 continue; 49 50 if (socket_id == SOCKET_ID_ANY) 51 socket_id = eal_cpu_socket_id(cpu); 52 53 sid = eal_cpu_socket_id(cpu); 54 if (socket_id != sid) { 55 socket_id = SOCKET_ID_ANY; 56 break; 57 } 58 59 } while (++cpu < CPU_SETSIZE); 60 61 return socket_id; 62 } 63 64 static void 65 thread_update_affinity(rte_cpuset_t *cpusetp) 66 { 67 unsigned int lcore_id = rte_lcore_id(); 68 69 /* store socket_id in TLS for quick access */ 70 RTE_PER_LCORE(_socket_id) = 71 eal_cpuset_socket_id(cpusetp); 72 73 /* store cpuset in TLS for quick access */ 74 memmove(&RTE_PER_LCORE(_cpuset), cpusetp, 75 sizeof(rte_cpuset_t)); 76 77 if (lcore_id != (unsigned)LCORE_ID_ANY) { 78 /* EAL thread will update lcore_config */ 79 lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id); 80 memmove(&lcore_config[lcore_id].cpuset, cpusetp, 81 sizeof(rte_cpuset_t)); 82 } 83 } 84 85 int 86 rte_thread_set_affinity(rte_cpuset_t *cpusetp) 87 { 88 if (rte_thread_set_affinity_by_id(rte_thread_self(), cpusetp) != 0) { 89 RTE_LOG(ERR, EAL, "rte_thread_set_affinity_by_id failed\n"); 90 return -1; 91 } 92 93 thread_update_affinity(cpusetp); 94 return 0; 95 } 96 97 void 98 rte_thread_get_affinity(rte_cpuset_t *cpusetp) 99 { 100 assert(cpusetp); 101 memmove(cpusetp, &RTE_PER_LCORE(_cpuset), 102 sizeof(rte_cpuset_t)); 103 } 104 105 int 106 eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size) 107 { 108 unsigned cpu; 109 int ret; 110 unsigned int out = 0; 111 112 for (cpu = 0; cpu < CPU_SETSIZE; cpu++) { 113 if (!CPU_ISSET(cpu, cpuset)) 114 continue; 115 116 ret = snprintf(str + out, 117 size - out, "%u,", cpu); 118 if (ret < 0 || (unsigned)ret >= size - out) { 119 /* string will be truncated */ 120 ret = -1; 121 goto exit; 122 } 123 124 out += ret; 125 } 126 127 ret = 0; 128 exit: 129 /* remove the last separator */ 130 if (out > 0) 131 str[out - 1] = '\0'; 132 133 return ret; 134 } 135 136 int 137 eal_thread_dump_current_affinity(char *str, unsigned int size) 138 { 139 rte_cpuset_t cpuset; 140 141 rte_thread_get_affinity(&cpuset); 142 return eal_thread_dump_affinity(&cpuset, str, size); 143 } 144 145 void 146 __rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset) 147 { 148 /* set the lcore ID in per-lcore memory area */ 149 RTE_PER_LCORE(_lcore_id) = lcore_id; 150 151 /* acquire system unique id */ 152 rte_gettid(); 153 154 thread_update_affinity(cpuset); 155 156 __rte_trace_mem_per_thread_alloc(); 157 } 158 159 void 160 __rte_thread_uninit(void) 161 { 162 trace_mem_per_thread_free(); 163 164 RTE_PER_LCORE(_lcore_id) = LCORE_ID_ANY; 165 } 166 167 /* main loop of threads */ 168 __rte_noreturn uint32_t 169 eal_thread_loop(void *arg) 170 { 171 unsigned int lcore_id = (uintptr_t)arg; 172 char cpuset[RTE_CPU_AFFINITY_STR_LEN]; 173 int ret; 174 175 __rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset); 176 177 ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); 178 RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n", 179 lcore_id, (uintptr_t)pthread_self(), cpuset, 180 ret == 0 ? "" : "..."); 181 182 rte_eal_trace_thread_lcore_ready(lcore_id, cpuset); 183 184 /* read on our pipe to get commands */ 185 while (1) { 186 lcore_function_t *f; 187 void *fct_arg; 188 189 eal_thread_wait_command(); 190 191 /* Set the state to 'RUNNING'. Use release order 192 * since 'state' variable is used as the guard variable. 193 */ 194 __atomic_store_n(&lcore_config[lcore_id].state, RUNNING, 195 __ATOMIC_RELEASE); 196 197 eal_thread_ack_command(); 198 199 /* Load 'f' with acquire order to ensure that 200 * the memory operations from the main thread 201 * are accessed only after update to 'f' is visible. 202 * Wait till the update to 'f' is visible to the worker. 203 */ 204 while ((f = __atomic_load_n(&lcore_config[lcore_id].f, 205 __ATOMIC_ACQUIRE)) == NULL) 206 rte_pause(); 207 208 /* call the function and store the return value */ 209 fct_arg = lcore_config[lcore_id].arg; 210 ret = f(fct_arg); 211 lcore_config[lcore_id].ret = ret; 212 lcore_config[lcore_id].f = NULL; 213 lcore_config[lcore_id].arg = NULL; 214 215 /* Store the state with release order to ensure that 216 * the memory operations from the worker thread 217 * are completed before the state is updated. 218 * Use 'state' as the guard variable. 219 */ 220 __atomic_store_n(&lcore_config[lcore_id].state, WAIT, 221 __ATOMIC_RELEASE); 222 } 223 224 /* never reached */ 225 /* return 0; */ 226 } 227 228 enum __rte_ctrl_thread_status { 229 CTRL_THREAD_LAUNCHING, /* Yet to call pthread_create function */ 230 CTRL_THREAD_RUNNING, /* Control thread is running successfully */ 231 CTRL_THREAD_ERROR /* Control thread encountered an error */ 232 }; 233 234 struct rte_thread_ctrl_params { 235 union { 236 void *(*ctrl_start_routine)(void *arg); 237 rte_thread_func control_start_routine; 238 } u; 239 void *arg; 240 int ret; 241 /* Control thread status. 242 * If the status is CTRL_THREAD_ERROR, 'ret' has the error code. 243 */ 244 enum __rte_ctrl_thread_status ctrl_thread_status; 245 }; 246 247 static int ctrl_thread_init(void *arg) 248 { 249 struct internal_config *internal_conf = 250 eal_get_internal_configuration(); 251 rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset; 252 struct rte_thread_ctrl_params *params = arg; 253 254 __rte_thread_init(rte_lcore_id(), cpuset); 255 params->ret = rte_thread_set_affinity_by_id(rte_thread_self(), cpuset); 256 if (params->ret != 0) { 257 __atomic_store_n(¶ms->ctrl_thread_status, 258 CTRL_THREAD_ERROR, __ATOMIC_RELEASE); 259 return params->ret; 260 } 261 262 __atomic_store_n(¶ms->ctrl_thread_status, 263 CTRL_THREAD_RUNNING, __ATOMIC_RELEASE); 264 265 return 0; 266 } 267 268 static void *ctrl_thread_start(void *arg) 269 { 270 struct rte_thread_ctrl_params *params = arg; 271 void *(*start_routine)(void *) = params->u.ctrl_start_routine; 272 273 if (ctrl_thread_init(arg) != 0) 274 return NULL; 275 276 return start_routine(params->arg); 277 } 278 279 static uint32_t control_thread_start(void *arg) 280 { 281 struct rte_thread_ctrl_params *params = arg; 282 rte_thread_func start_routine = params->u.control_start_routine; 283 284 if (ctrl_thread_init(arg) != 0) 285 return params->ret; 286 287 return start_routine(params->arg); 288 } 289 290 int 291 rte_ctrl_thread_create(pthread_t *thread, const char *name, 292 const pthread_attr_t *attr, 293 void *(*start_routine)(void *), void *arg) 294 { 295 struct rte_thread_ctrl_params *params; 296 enum __rte_ctrl_thread_status ctrl_thread_status; 297 int ret; 298 299 params = malloc(sizeof(*params)); 300 if (!params) 301 return -ENOMEM; 302 303 params->u.ctrl_start_routine = start_routine; 304 params->arg = arg; 305 params->ret = 0; 306 params->ctrl_thread_status = CTRL_THREAD_LAUNCHING; 307 308 ret = pthread_create(thread, attr, ctrl_thread_start, (void *)params); 309 if (ret != 0) { 310 free(params); 311 return -ret; 312 } 313 314 if (name != NULL) 315 rte_thread_set_name((rte_thread_t){(uintptr_t)*thread}, name); 316 317 /* Wait for the control thread to initialize successfully */ 318 while ((ctrl_thread_status = 319 __atomic_load_n(¶ms->ctrl_thread_status, 320 __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) { 321 /* Yield the CPU. Using sched_yield call requires maintaining 322 * another implementation for Windows as sched_yield is not 323 * supported on Windows. 324 */ 325 rte_delay_us_sleep(1); 326 } 327 328 /* Check if the control thread encountered an error */ 329 if (ctrl_thread_status == CTRL_THREAD_ERROR) { 330 /* ctrl thread is exiting */ 331 pthread_join(*thread, NULL); 332 } 333 334 ret = params->ret; 335 free(params); 336 337 return -ret; 338 } 339 340 int 341 rte_thread_create_control(rte_thread_t *thread, const char *name, 342 const rte_thread_attr_t *attr, rte_thread_func start_routine, 343 void *arg) 344 { 345 struct rte_thread_ctrl_params *params; 346 enum __rte_ctrl_thread_status ctrl_thread_status; 347 int ret; 348 349 params = malloc(sizeof(*params)); 350 if (params == NULL) 351 return -ENOMEM; 352 353 params->u.control_start_routine = start_routine; 354 params->arg = arg; 355 params->ret = 0; 356 params->ctrl_thread_status = CTRL_THREAD_LAUNCHING; 357 358 ret = rte_thread_create(thread, attr, control_thread_start, params); 359 if (ret != 0) { 360 free(params); 361 return -ret; 362 } 363 364 if (name != NULL) 365 rte_thread_set_name(*thread, name); 366 367 /* Wait for the control thread to initialize successfully */ 368 while ((ctrl_thread_status = 369 __atomic_load_n(¶ms->ctrl_thread_status, 370 __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) { 371 rte_delay_us_sleep(1); 372 } 373 374 /* Check if the control thread encountered an error */ 375 if (ctrl_thread_status == CTRL_THREAD_ERROR) { 376 /* ctrl thread is exiting */ 377 rte_thread_join(*thread, NULL); 378 } 379 380 ret = params->ret; 381 free(params); 382 383 return ret; 384 } 385 386 int 387 rte_thread_register(void) 388 { 389 unsigned int lcore_id; 390 rte_cpuset_t cpuset; 391 392 /* EAL init flushes all lcores, we can't register before. */ 393 if (eal_get_internal_configuration()->init_complete != 1) { 394 RTE_LOG(DEBUG, EAL, "Called %s before EAL init.\n", __func__); 395 rte_errno = EINVAL; 396 return -1; 397 } 398 if (!rte_mp_disable()) { 399 RTE_LOG(ERR, EAL, "Multiprocess in use, registering non-EAL threads is not supported.\n"); 400 rte_errno = EINVAL; 401 return -1; 402 } 403 if (rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset) != 0) 404 CPU_ZERO(&cpuset); 405 lcore_id = eal_lcore_non_eal_allocate(); 406 if (lcore_id >= RTE_MAX_LCORE) 407 lcore_id = LCORE_ID_ANY; 408 __rte_thread_init(lcore_id, &cpuset); 409 if (lcore_id == LCORE_ID_ANY) { 410 rte_errno = ENOMEM; 411 return -1; 412 } 413 RTE_LOG(DEBUG, EAL, "Registered non-EAL thread as lcore %u.\n", 414 lcore_id); 415 return 0; 416 } 417 418 void 419 rte_thread_unregister(void) 420 { 421 unsigned int lcore_id = rte_lcore_id(); 422 423 if (lcore_id != LCORE_ID_ANY) 424 eal_lcore_non_eal_release(lcore_id); 425 __rte_thread_uninit(); 426 if (lcore_id != LCORE_ID_ANY) 427 RTE_LOG(DEBUG, EAL, "Unregistered non-EAL thread (was lcore %u).\n", 428 lcore_id); 429 } 430 431 int 432 rte_thread_attr_init(rte_thread_attr_t *attr) 433 { 434 if (attr == NULL) 435 return EINVAL; 436 437 CPU_ZERO(&attr->cpuset); 438 attr->priority = RTE_THREAD_PRIORITY_NORMAL; 439 440 return 0; 441 } 442 443 int 444 rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, 445 enum rte_thread_priority priority) 446 { 447 if (thread_attr == NULL) 448 return EINVAL; 449 450 thread_attr->priority = priority; 451 452 return 0; 453 } 454 455 int 456 rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, 457 rte_cpuset_t *cpuset) 458 { 459 if (thread_attr == NULL) 460 return EINVAL; 461 462 if (cpuset == NULL) 463 return EINVAL; 464 465 thread_attr->cpuset = *cpuset; 466 467 return 0; 468 } 469 470 int 471 rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, 472 rte_cpuset_t *cpuset) 473 { 474 if (thread_attr == NULL) 475 return EINVAL; 476 477 if (cpuset == NULL) 478 return EINVAL; 479 480 *cpuset = thread_attr->cpuset; 481 482 return 0; 483 } 484