1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <errno.h> 6 #include <stdio.h> 7 #include <stdlib.h> 8 #include <pthread.h> 9 #include <sched.h> 10 #include <assert.h> 11 #include <string.h> 12 13 #include <eal_trace_internal.h> 14 #include <rte_errno.h> 15 #include <rte_lcore.h> 16 #include <rte_log.h> 17 #include <rte_memory.h> 18 #include <rte_trace_point.h> 19 20 #include "eal_internal_cfg.h" 21 #include "eal_private.h" 22 #include "eal_thread.h" 23 #include "eal_trace.h" 24 25 RTE_DEFINE_PER_LCORE(unsigned int, _lcore_id) = LCORE_ID_ANY; 26 RTE_DEFINE_PER_LCORE(int, _thread_id) = -1; 27 static RTE_DEFINE_PER_LCORE(unsigned int, _socket_id) = 28 (unsigned int)SOCKET_ID_ANY; 29 static RTE_DEFINE_PER_LCORE(rte_cpuset_t, _cpuset); 30 31 unsigned rte_socket_id(void) 32 { 33 return RTE_PER_LCORE(_socket_id); 34 } 35 36 static int 37 eal_cpuset_socket_id(rte_cpuset_t *cpusetp) 38 { 39 unsigned cpu = 0; 40 int socket_id = SOCKET_ID_ANY; 41 int sid; 42 43 if (cpusetp == NULL) 44 return SOCKET_ID_ANY; 45 46 do { 47 if (!CPU_ISSET(cpu, cpusetp)) 48 continue; 49 50 if (socket_id == SOCKET_ID_ANY) 51 socket_id = eal_cpu_socket_id(cpu); 52 53 sid = eal_cpu_socket_id(cpu); 54 if (socket_id != sid) { 55 socket_id = SOCKET_ID_ANY; 56 break; 57 } 58 59 } while (++cpu < CPU_SETSIZE); 60 61 return socket_id; 62 } 63 64 static void 65 thread_update_affinity(rte_cpuset_t *cpusetp) 66 { 67 unsigned int lcore_id = rte_lcore_id(); 68 69 /* store socket_id in TLS for quick access */ 70 RTE_PER_LCORE(_socket_id) = 71 eal_cpuset_socket_id(cpusetp); 72 73 /* store cpuset in TLS for quick access */ 74 memmove(&RTE_PER_LCORE(_cpuset), cpusetp, 75 sizeof(rte_cpuset_t)); 76 77 if (lcore_id != (unsigned)LCORE_ID_ANY) { 78 /* EAL thread will update lcore_config */ 79 lcore_config[lcore_id].socket_id = RTE_PER_LCORE(_socket_id); 80 memmove(&lcore_config[lcore_id].cpuset, cpusetp, 81 sizeof(rte_cpuset_t)); 82 } 83 } 84 85 int 86 rte_thread_set_affinity(rte_cpuset_t *cpusetp) 87 { 88 if (rte_thread_set_affinity_by_id(rte_thread_self(), cpusetp) != 0) { 89 RTE_LOG(ERR, EAL, "rte_thread_set_affinity_by_id failed\n"); 90 return -1; 91 } 92 93 thread_update_affinity(cpusetp); 94 return 0; 95 } 96 97 void 98 rte_thread_get_affinity(rte_cpuset_t *cpusetp) 99 { 100 assert(cpusetp); 101 memmove(cpusetp, &RTE_PER_LCORE(_cpuset), 102 sizeof(rte_cpuset_t)); 103 } 104 105 int 106 eal_thread_dump_affinity(rte_cpuset_t *cpuset, char *str, unsigned int size) 107 { 108 unsigned cpu; 109 int ret; 110 unsigned int out = 0; 111 112 for (cpu = 0; cpu < CPU_SETSIZE; cpu++) { 113 if (!CPU_ISSET(cpu, cpuset)) 114 continue; 115 116 ret = snprintf(str + out, 117 size - out, "%u,", cpu); 118 if (ret < 0 || (unsigned)ret >= size - out) { 119 /* string will be truncated */ 120 ret = -1; 121 goto exit; 122 } 123 124 out += ret; 125 } 126 127 ret = 0; 128 exit: 129 /* remove the last separator */ 130 if (out > 0) 131 str[out - 1] = '\0'; 132 133 return ret; 134 } 135 136 int 137 eal_thread_dump_current_affinity(char *str, unsigned int size) 138 { 139 rte_cpuset_t cpuset; 140 141 rte_thread_get_affinity(&cpuset); 142 return eal_thread_dump_affinity(&cpuset, str, size); 143 } 144 145 void 146 __rte_thread_init(unsigned int lcore_id, rte_cpuset_t *cpuset) 147 { 148 /* set the lcore ID in per-lcore memory area */ 149 RTE_PER_LCORE(_lcore_id) = lcore_id; 150 151 /* acquire system unique id */ 152 rte_gettid(); 153 154 thread_update_affinity(cpuset); 155 156 __rte_trace_mem_per_thread_alloc(); 157 } 158 159 void 160 __rte_thread_uninit(void) 161 { 162 trace_mem_per_thread_free(); 163 164 RTE_PER_LCORE(_lcore_id) = LCORE_ID_ANY; 165 } 166 167 /* main loop of threads */ 168 __rte_noreturn uint32_t 169 eal_thread_loop(void *arg) 170 { 171 unsigned int lcore_id = (uintptr_t)arg; 172 char cpuset[RTE_CPU_AFFINITY_STR_LEN]; 173 int ret; 174 175 __rte_thread_init(lcore_id, &lcore_config[lcore_id].cpuset); 176 177 ret = eal_thread_dump_current_affinity(cpuset, sizeof(cpuset)); 178 RTE_LOG(DEBUG, EAL, "lcore %u is ready (tid=%zx;cpuset=[%s%s])\n", 179 lcore_id, (uintptr_t)pthread_self(), cpuset, 180 ret == 0 ? "" : "..."); 181 182 rte_eal_trace_thread_lcore_ready(lcore_id, cpuset); 183 184 /* read on our pipe to get commands */ 185 while (1) { 186 lcore_function_t *f; 187 void *fct_arg; 188 189 eal_thread_wait_command(); 190 191 /* Set the state to 'RUNNING'. Use release order 192 * since 'state' variable is used as the guard variable. 193 */ 194 __atomic_store_n(&lcore_config[lcore_id].state, RUNNING, 195 __ATOMIC_RELEASE); 196 197 eal_thread_ack_command(); 198 199 /* Load 'f' with acquire order to ensure that 200 * the memory operations from the main thread 201 * are accessed only after update to 'f' is visible. 202 * Wait till the update to 'f' is visible to the worker. 203 */ 204 while ((f = __atomic_load_n(&lcore_config[lcore_id].f, 205 __ATOMIC_ACQUIRE)) == NULL) 206 rte_pause(); 207 208 /* call the function and store the return value */ 209 fct_arg = lcore_config[lcore_id].arg; 210 ret = f(fct_arg); 211 lcore_config[lcore_id].ret = ret; 212 lcore_config[lcore_id].f = NULL; 213 lcore_config[lcore_id].arg = NULL; 214 215 /* Store the state with release order to ensure that 216 * the memory operations from the worker thread 217 * are completed before the state is updated. 218 * Use 'state' as the guard variable. 219 */ 220 __atomic_store_n(&lcore_config[lcore_id].state, WAIT, 221 __ATOMIC_RELEASE); 222 } 223 224 /* never reached */ 225 /* return 0; */ 226 } 227 228 enum __rte_ctrl_thread_status { 229 CTRL_THREAD_LAUNCHING, /* Yet to call pthread_create function */ 230 CTRL_THREAD_RUNNING, /* Control thread is running successfully */ 231 CTRL_THREAD_ERROR /* Control thread encountered an error */ 232 }; 233 234 struct rte_thread_ctrl_params { 235 union { 236 void *(*ctrl_start_routine)(void *arg); 237 rte_thread_func control_start_routine; 238 } u; 239 void *arg; 240 int ret; 241 /* Control thread status. 242 * If the status is CTRL_THREAD_ERROR, 'ret' has the error code. 243 */ 244 enum __rte_ctrl_thread_status ctrl_thread_status; 245 }; 246 247 static int ctrl_thread_init(void *arg) 248 { 249 struct internal_config *internal_conf = 250 eal_get_internal_configuration(); 251 rte_cpuset_t *cpuset = &internal_conf->ctrl_cpuset; 252 struct rte_thread_ctrl_params *params = arg; 253 254 __rte_thread_init(rte_lcore_id(), cpuset); 255 params->ret = rte_thread_set_affinity_by_id(rte_thread_self(), cpuset); 256 if (params->ret != 0) { 257 __atomic_store_n(¶ms->ctrl_thread_status, 258 CTRL_THREAD_ERROR, __ATOMIC_RELEASE); 259 return 1; 260 } 261 262 __atomic_store_n(¶ms->ctrl_thread_status, 263 CTRL_THREAD_RUNNING, __ATOMIC_RELEASE); 264 265 return 0; 266 } 267 268 static void *ctrl_thread_start(void *arg) 269 { 270 struct rte_thread_ctrl_params *params = arg; 271 void *start_arg = params->arg; 272 void *(*start_routine)(void *) = params->u.ctrl_start_routine; 273 274 if (ctrl_thread_init(arg) != 0) 275 return NULL; 276 277 return start_routine(start_arg); 278 } 279 280 static uint32_t control_thread_start(void *arg) 281 { 282 struct rte_thread_ctrl_params *params = arg; 283 void *start_arg = params->arg; 284 rte_thread_func start_routine = params->u.control_start_routine; 285 286 if (ctrl_thread_init(arg) != 0) 287 return 0; 288 289 return start_routine(start_arg); 290 } 291 292 int 293 rte_ctrl_thread_create(pthread_t *thread, const char *name, 294 const pthread_attr_t *attr, 295 void *(*start_routine)(void *), void *arg) 296 { 297 struct rte_thread_ctrl_params *params; 298 enum __rte_ctrl_thread_status ctrl_thread_status; 299 int ret; 300 301 params = malloc(sizeof(*params)); 302 if (!params) 303 return -ENOMEM; 304 305 params->u.ctrl_start_routine = start_routine; 306 params->arg = arg; 307 params->ret = 0; 308 params->ctrl_thread_status = CTRL_THREAD_LAUNCHING; 309 310 ret = pthread_create(thread, attr, ctrl_thread_start, (void *)params); 311 if (ret != 0) { 312 free(params); 313 return -ret; 314 } 315 316 if (name != NULL) 317 rte_thread_set_name((rte_thread_t){(uintptr_t)*thread}, name); 318 319 /* Wait for the control thread to initialize successfully */ 320 while ((ctrl_thread_status = 321 __atomic_load_n(¶ms->ctrl_thread_status, 322 __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) { 323 /* Yield the CPU. Using sched_yield call requires maintaining 324 * another implementation for Windows as sched_yield is not 325 * supported on Windows. 326 */ 327 rte_delay_us_sleep(1); 328 } 329 330 /* Check if the control thread encountered an error */ 331 if (ctrl_thread_status == CTRL_THREAD_ERROR) { 332 /* ctrl thread is exiting */ 333 pthread_join(*thread, NULL); 334 } 335 336 ret = params->ret; 337 free(params); 338 339 return -ret; 340 } 341 342 int 343 rte_thread_create_control(rte_thread_t *thread, const char *name, 344 const rte_thread_attr_t *attr, rte_thread_func start_routine, 345 void *arg) 346 { 347 struct rte_thread_ctrl_params *params; 348 enum __rte_ctrl_thread_status ctrl_thread_status; 349 int ret; 350 351 params = malloc(sizeof(*params)); 352 if (params == NULL) 353 return -ENOMEM; 354 355 params->u.control_start_routine = start_routine; 356 params->arg = arg; 357 params->ret = 0; 358 params->ctrl_thread_status = CTRL_THREAD_LAUNCHING; 359 360 ret = rte_thread_create(thread, attr, control_thread_start, params); 361 if (ret != 0) { 362 free(params); 363 return -ret; 364 } 365 366 if (name != NULL) 367 rte_thread_set_name(*thread, name); 368 369 /* Wait for the control thread to initialize successfully */ 370 while ((ctrl_thread_status = 371 __atomic_load_n(¶ms->ctrl_thread_status, 372 __ATOMIC_ACQUIRE)) == CTRL_THREAD_LAUNCHING) { 373 rte_delay_us_sleep(1); 374 } 375 376 /* Check if the control thread encountered an error */ 377 if (ctrl_thread_status == CTRL_THREAD_ERROR) { 378 /* ctrl thread is exiting */ 379 rte_thread_join(*thread, NULL); 380 } 381 382 ret = params->ret; 383 free(params); 384 385 return ret; 386 } 387 388 int 389 rte_thread_register(void) 390 { 391 unsigned int lcore_id; 392 rte_cpuset_t cpuset; 393 394 /* EAL init flushes all lcores, we can't register before. */ 395 if (eal_get_internal_configuration()->init_complete != 1) { 396 RTE_LOG(DEBUG, EAL, "Called %s before EAL init.\n", __func__); 397 rte_errno = EINVAL; 398 return -1; 399 } 400 if (!rte_mp_disable()) { 401 RTE_LOG(ERR, EAL, "Multiprocess in use, registering non-EAL threads is not supported.\n"); 402 rte_errno = EINVAL; 403 return -1; 404 } 405 if (rte_thread_get_affinity_by_id(rte_thread_self(), &cpuset) != 0) 406 CPU_ZERO(&cpuset); 407 lcore_id = eal_lcore_non_eal_allocate(); 408 if (lcore_id >= RTE_MAX_LCORE) 409 lcore_id = LCORE_ID_ANY; 410 __rte_thread_init(lcore_id, &cpuset); 411 if (lcore_id == LCORE_ID_ANY) { 412 rte_errno = ENOMEM; 413 return -1; 414 } 415 RTE_LOG(DEBUG, EAL, "Registered non-EAL thread as lcore %u.\n", 416 lcore_id); 417 return 0; 418 } 419 420 void 421 rte_thread_unregister(void) 422 { 423 unsigned int lcore_id = rte_lcore_id(); 424 425 if (lcore_id != LCORE_ID_ANY) 426 eal_lcore_non_eal_release(lcore_id); 427 __rte_thread_uninit(); 428 if (lcore_id != LCORE_ID_ANY) 429 RTE_LOG(DEBUG, EAL, "Unregistered non-EAL thread (was lcore %u).\n", 430 lcore_id); 431 } 432 433 int 434 rte_thread_attr_init(rte_thread_attr_t *attr) 435 { 436 if (attr == NULL) 437 return EINVAL; 438 439 CPU_ZERO(&attr->cpuset); 440 attr->priority = RTE_THREAD_PRIORITY_NORMAL; 441 442 return 0; 443 } 444 445 int 446 rte_thread_attr_set_priority(rte_thread_attr_t *thread_attr, 447 enum rte_thread_priority priority) 448 { 449 if (thread_attr == NULL) 450 return EINVAL; 451 452 thread_attr->priority = priority; 453 454 return 0; 455 } 456 457 int 458 rte_thread_attr_set_affinity(rte_thread_attr_t *thread_attr, 459 rte_cpuset_t *cpuset) 460 { 461 if (thread_attr == NULL) 462 return EINVAL; 463 464 if (cpuset == NULL) 465 return EINVAL; 466 467 thread_attr->cpuset = *cpuset; 468 469 return 0; 470 } 471 472 int 473 rte_thread_attr_get_affinity(rte_thread_attr_t *thread_attr, 474 rte_cpuset_t *cpuset) 475 { 476 if (thread_attr == NULL) 477 return EINVAL; 478 479 if (cpuset == NULL) 480 return EINVAL; 481 482 *cpuset = thread_attr->cpuset; 483 484 return 0; 485 } 486