1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <inttypes.h>
6 #include <stdlib.h>
7 #include <string.h>
8
9 #include <rte_common.h>
10 #include <rte_branch_prediction.h>
11 #include <rte_errno.h>
12 #include <rte_lcore.h>
13 #include <rte_log.h>
14 #ifndef RTE_EXEC_ENV_WINDOWS
15 #include <rte_telemetry.h>
16 #endif
17
18 #include "eal_private.h"
19 #include "eal_thread.h"
20
rte_get_main_lcore(void)21 unsigned int rte_get_main_lcore(void)
22 {
23 return rte_eal_get_configuration()->main_lcore;
24 }
25
rte_lcore_count(void)26 unsigned int rte_lcore_count(void)
27 {
28 return rte_eal_get_configuration()->lcore_count;
29 }
30
rte_lcore_index(int lcore_id)31 int rte_lcore_index(int lcore_id)
32 {
33 if (unlikely(lcore_id >= RTE_MAX_LCORE))
34 return -1;
35
36 if (lcore_id < 0) {
37 if (rte_lcore_id() == LCORE_ID_ANY)
38 return -1;
39
40 lcore_id = (int)rte_lcore_id();
41 }
42
43 return lcore_config[lcore_id].core_index;
44 }
45
rte_lcore_to_cpu_id(int lcore_id)46 int rte_lcore_to_cpu_id(int lcore_id)
47 {
48 if (unlikely(lcore_id >= RTE_MAX_LCORE))
49 return -1;
50
51 if (lcore_id < 0) {
52 if (rte_lcore_id() == LCORE_ID_ANY)
53 return -1;
54
55 lcore_id = (int)rte_lcore_id();
56 }
57
58 return lcore_config[lcore_id].core_id;
59 }
60
rte_lcore_cpuset(unsigned int lcore_id)61 rte_cpuset_t rte_lcore_cpuset(unsigned int lcore_id)
62 {
63 return lcore_config[lcore_id].cpuset;
64 }
65
66 enum rte_lcore_role_t
rte_eal_lcore_role(unsigned int lcore_id)67 rte_eal_lcore_role(unsigned int lcore_id)
68 {
69 struct rte_config *cfg = rte_eal_get_configuration();
70
71 if (lcore_id >= RTE_MAX_LCORE)
72 return ROLE_OFF;
73 return cfg->lcore_role[lcore_id];
74 }
75
76 int
rte_lcore_has_role(unsigned int lcore_id,enum rte_lcore_role_t role)77 rte_lcore_has_role(unsigned int lcore_id, enum rte_lcore_role_t role)
78 {
79 struct rte_config *cfg = rte_eal_get_configuration();
80
81 if (lcore_id >= RTE_MAX_LCORE)
82 return -EINVAL;
83
84 return cfg->lcore_role[lcore_id] == role;
85 }
86
rte_lcore_is_enabled(unsigned int lcore_id)87 int rte_lcore_is_enabled(unsigned int lcore_id)
88 {
89 struct rte_config *cfg = rte_eal_get_configuration();
90
91 if (lcore_id >= RTE_MAX_LCORE)
92 return 0;
93 return cfg->lcore_role[lcore_id] == ROLE_RTE;
94 }
95
rte_get_next_lcore(unsigned int i,int skip_main,int wrap)96 unsigned int rte_get_next_lcore(unsigned int i, int skip_main, int wrap)
97 {
98 i++;
99 if (wrap)
100 i %= RTE_MAX_LCORE;
101
102 while (i < RTE_MAX_LCORE) {
103 if (!rte_lcore_is_enabled(i) ||
104 (skip_main && (i == rte_get_main_lcore()))) {
105 i++;
106 if (wrap)
107 i %= RTE_MAX_LCORE;
108 continue;
109 }
110 break;
111 }
112 return i;
113 }
114
115 unsigned int
rte_lcore_to_socket_id(unsigned int lcore_id)116 rte_lcore_to_socket_id(unsigned int lcore_id)
117 {
118 return lcore_config[lcore_id].socket_id;
119 }
120
121 static int
socket_id_cmp(const void * a,const void * b)122 socket_id_cmp(const void *a, const void *b)
123 {
124 const int *lcore_id_a = a;
125 const int *lcore_id_b = b;
126
127 if (*lcore_id_a < *lcore_id_b)
128 return -1;
129 if (*lcore_id_a > *lcore_id_b)
130 return 1;
131 return 0;
132 }
133
134 /*
135 * Parse /sys/devices/system/cpu to get the number of physical and logical
136 * processors on the machine. The function will fill the cpu_info
137 * structure.
138 */
139 int
rte_eal_cpu_init(void)140 rte_eal_cpu_init(void)
141 {
142 /* pointer to global configuration */
143 struct rte_config *config = rte_eal_get_configuration();
144 unsigned lcore_id;
145 unsigned count = 0;
146 unsigned int socket_id, prev_socket_id;
147 int lcore_to_socket_id[RTE_MAX_LCORE];
148
149 /*
150 * Parse the maximum set of logical cores, detect the subset of running
151 * ones and enable them by default.
152 */
153 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
154 lcore_config[lcore_id].core_index = count;
155
156 /* init cpuset for per lcore config */
157 CPU_ZERO(&lcore_config[lcore_id].cpuset);
158
159 /* find socket first */
160 socket_id = eal_cpu_socket_id(lcore_id);
161 lcore_to_socket_id[lcore_id] = socket_id;
162
163 if (eal_cpu_detected(lcore_id) == 0) {
164 config->lcore_role[lcore_id] = ROLE_OFF;
165 lcore_config[lcore_id].core_index = -1;
166 continue;
167 }
168
169 /* By default, lcore 1:1 map to cpu id */
170 CPU_SET(lcore_id, &lcore_config[lcore_id].cpuset);
171
172 /* By default, each detected core is enabled */
173 config->lcore_role[lcore_id] = ROLE_RTE;
174 lcore_config[lcore_id].core_role = ROLE_RTE;
175 lcore_config[lcore_id].core_id = eal_cpu_core_id(lcore_id);
176 lcore_config[lcore_id].socket_id = socket_id;
177 EAL_LOG(DEBUG, "Detected lcore %u as "
178 "core %u on socket %u",
179 lcore_id, lcore_config[lcore_id].core_id,
180 lcore_config[lcore_id].socket_id);
181 count++;
182 }
183 for (; lcore_id < CPU_SETSIZE; lcore_id++) {
184 if (eal_cpu_detected(lcore_id) == 0)
185 continue;
186 EAL_LOG(DEBUG, "Skipped lcore %u as core %u on socket %u",
187 lcore_id, eal_cpu_core_id(lcore_id),
188 eal_cpu_socket_id(lcore_id));
189 }
190
191 /* Set the count of enabled logical cores of the EAL configuration */
192 config->lcore_count = count;
193 EAL_LOG(DEBUG,
194 "Maximum logical cores by configuration: %u",
195 RTE_MAX_LCORE);
196 EAL_LOG(INFO, "Detected CPU lcores: %u", config->lcore_count);
197
198 /* sort all socket id's in ascending order */
199 qsort(lcore_to_socket_id, RTE_DIM(lcore_to_socket_id),
200 sizeof(lcore_to_socket_id[0]), socket_id_cmp);
201
202 prev_socket_id = -1;
203 config->numa_node_count = 0;
204 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
205 socket_id = lcore_to_socket_id[lcore_id];
206 if (socket_id != prev_socket_id)
207 config->numa_nodes[config->numa_node_count++] =
208 socket_id;
209 prev_socket_id = socket_id;
210 }
211 EAL_LOG(INFO, "Detected NUMA nodes: %u", config->numa_node_count);
212
213 return 0;
214 }
215
216 unsigned int
rte_socket_count(void)217 rte_socket_count(void)
218 {
219 const struct rte_config *config = rte_eal_get_configuration();
220 return config->numa_node_count;
221 }
222
223 int
rte_socket_id_by_idx(unsigned int idx)224 rte_socket_id_by_idx(unsigned int idx)
225 {
226 const struct rte_config *config = rte_eal_get_configuration();
227 if (idx >= config->numa_node_count) {
228 rte_errno = EINVAL;
229 return -1;
230 }
231 return config->numa_nodes[idx];
232 }
233
234 static rte_rwlock_t lcore_lock = RTE_RWLOCK_INITIALIZER;
235 struct lcore_callback {
236 TAILQ_ENTRY(lcore_callback) next;
237 char *name;
238 rte_lcore_init_cb init;
239 rte_lcore_uninit_cb uninit;
240 void *arg;
241 };
242 static TAILQ_HEAD(lcore_callbacks_head, lcore_callback) lcore_callbacks =
243 TAILQ_HEAD_INITIALIZER(lcore_callbacks);
244
245 static int
callback_init(struct lcore_callback * callback,unsigned int lcore_id)246 callback_init(struct lcore_callback *callback, unsigned int lcore_id)
247 {
248 if (callback->init == NULL)
249 return 0;
250 EAL_LOG(DEBUG, "Call init for lcore callback %s, lcore_id %u",
251 callback->name, lcore_id);
252 return callback->init(lcore_id, callback->arg);
253 }
254
255 static void
callback_uninit(struct lcore_callback * callback,unsigned int lcore_id)256 callback_uninit(struct lcore_callback *callback, unsigned int lcore_id)
257 {
258 if (callback->uninit == NULL)
259 return;
260 EAL_LOG(DEBUG, "Call uninit for lcore callback %s, lcore_id %u",
261 callback->name, lcore_id);
262 callback->uninit(lcore_id, callback->arg);
263 }
264
265 static void
free_callback(struct lcore_callback * callback)266 free_callback(struct lcore_callback *callback)
267 {
268 free(callback->name);
269 free(callback);
270 }
271
272 void *
rte_lcore_callback_register(const char * name,rte_lcore_init_cb init,rte_lcore_uninit_cb uninit,void * arg)273 rte_lcore_callback_register(const char *name, rte_lcore_init_cb init,
274 rte_lcore_uninit_cb uninit, void *arg)
275 {
276 struct rte_config *cfg = rte_eal_get_configuration();
277 struct lcore_callback *callback;
278 unsigned int lcore_id;
279
280 if (name == NULL)
281 return NULL;
282 callback = calloc(1, sizeof(*callback));
283 if (callback == NULL)
284 return NULL;
285 if (asprintf(&callback->name, "%s-%p", name, arg) == -1) {
286 free(callback);
287 return NULL;
288 }
289 callback->init = init;
290 callback->uninit = uninit;
291 callback->arg = arg;
292 rte_rwlock_write_lock(&lcore_lock);
293 if (callback->init == NULL)
294 goto no_init;
295 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
296 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
297 continue;
298 if (callback_init(callback, lcore_id) == 0)
299 continue;
300 /* Callback refused init for this lcore, uninitialize all
301 * previous lcore.
302 */
303 while (lcore_id-- != 0) {
304 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
305 continue;
306 callback_uninit(callback, lcore_id);
307 }
308 free_callback(callback);
309 callback = NULL;
310 goto out;
311 }
312 no_init:
313 TAILQ_INSERT_TAIL(&lcore_callbacks, callback, next);
314 EAL_LOG(DEBUG, "Registered new lcore callback %s (%sinit, %suninit).",
315 callback->name, callback->init == NULL ? "NO " : "",
316 callback->uninit == NULL ? "NO " : "");
317 out:
318 rte_rwlock_write_unlock(&lcore_lock);
319 return callback;
320 }
321
322 void
rte_lcore_callback_unregister(void * handle)323 rte_lcore_callback_unregister(void *handle)
324 {
325 struct rte_config *cfg = rte_eal_get_configuration();
326 struct lcore_callback *callback = handle;
327 unsigned int lcore_id;
328
329 if (callback == NULL)
330 return;
331 rte_rwlock_write_lock(&lcore_lock);
332 if (callback->uninit == NULL)
333 goto no_uninit;
334 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
335 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
336 continue;
337 callback_uninit(callback, lcore_id);
338 }
339 no_uninit:
340 TAILQ_REMOVE(&lcore_callbacks, callback, next);
341 rte_rwlock_write_unlock(&lcore_lock);
342 EAL_LOG(DEBUG, "Unregistered lcore callback %s-%p.",
343 callback->name, callback->arg);
344 free_callback(callback);
345 }
346
347 unsigned int
eal_lcore_non_eal_allocate(void)348 eal_lcore_non_eal_allocate(void)
349 {
350 struct rte_config *cfg = rte_eal_get_configuration();
351 struct lcore_callback *callback;
352 struct lcore_callback *prev;
353 unsigned int lcore_id;
354
355 rte_rwlock_write_lock(&lcore_lock);
356 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
357 if (cfg->lcore_role[lcore_id] != ROLE_OFF)
358 continue;
359 cfg->lcore_role[lcore_id] = ROLE_NON_EAL;
360 cfg->lcore_count++;
361 break;
362 }
363 if (lcore_id == RTE_MAX_LCORE) {
364 EAL_LOG(DEBUG, "No lcore available.");
365 goto out;
366 }
367 TAILQ_FOREACH(callback, &lcore_callbacks, next) {
368 if (callback_init(callback, lcore_id) == 0)
369 continue;
370 /* Callback refused init for this lcore, call uninit for all
371 * previous callbacks.
372 */
373 prev = TAILQ_PREV(callback, lcore_callbacks_head, next);
374 while (prev != NULL) {
375 callback_uninit(prev, lcore_id);
376 prev = TAILQ_PREV(prev, lcore_callbacks_head, next);
377 }
378 EAL_LOG(DEBUG, "Initialization refused for lcore %u.",
379 lcore_id);
380 cfg->lcore_role[lcore_id] = ROLE_OFF;
381 cfg->lcore_count--;
382 lcore_id = RTE_MAX_LCORE;
383 goto out;
384 }
385 out:
386 rte_rwlock_write_unlock(&lcore_lock);
387 return lcore_id;
388 }
389
390 void
eal_lcore_non_eal_release(unsigned int lcore_id)391 eal_lcore_non_eal_release(unsigned int lcore_id)
392 {
393 struct rte_config *cfg = rte_eal_get_configuration();
394 struct lcore_callback *callback;
395
396 rte_rwlock_write_lock(&lcore_lock);
397 if (cfg->lcore_role[lcore_id] != ROLE_NON_EAL)
398 goto out;
399 TAILQ_FOREACH(callback, &lcore_callbacks, next)
400 callback_uninit(callback, lcore_id);
401 cfg->lcore_role[lcore_id] = ROLE_OFF;
402 cfg->lcore_count--;
403 out:
404 rte_rwlock_write_unlock(&lcore_lock);
405 }
406
407 int
rte_lcore_iterate(rte_lcore_iterate_cb cb,void * arg)408 rte_lcore_iterate(rte_lcore_iterate_cb cb, void *arg)
409 {
410 struct rte_config *cfg = rte_eal_get_configuration();
411 unsigned int lcore_id;
412 int ret = 0;
413
414 rte_rwlock_read_lock(&lcore_lock);
415 for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
416 if (cfg->lcore_role[lcore_id] == ROLE_OFF)
417 continue;
418 ret = cb(lcore_id, arg);
419 if (ret != 0)
420 break;
421 }
422 rte_rwlock_read_unlock(&lcore_lock);
423 return ret;
424 }
425
426 static const char *
lcore_role_str(enum rte_lcore_role_t role)427 lcore_role_str(enum rte_lcore_role_t role)
428 {
429 switch (role) {
430 case ROLE_RTE:
431 return "RTE";
432 case ROLE_SERVICE:
433 return "SERVICE";
434 case ROLE_NON_EAL:
435 return "NON_EAL";
436 default:
437 return "UNKNOWN";
438 }
439 }
440
441 static rte_lcore_usage_cb lcore_usage_cb;
442
443 void
rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)444 rte_lcore_register_usage_cb(rte_lcore_usage_cb cb)
445 {
446 lcore_usage_cb = cb;
447 }
448
449 static float
calc_usage_ratio(const struct rte_lcore_usage * usage)450 calc_usage_ratio(const struct rte_lcore_usage *usage)
451 {
452 return usage->total_cycles != 0 ?
453 (usage->busy_cycles * 100.0) / usage->total_cycles : (float)0;
454 }
455
456 static int
lcore_dump_cb(unsigned int lcore_id,void * arg)457 lcore_dump_cb(unsigned int lcore_id, void *arg)
458 {
459 struct rte_config *cfg = rte_eal_get_configuration();
460 char cpuset[RTE_CPU_AFFINITY_STR_LEN];
461 struct rte_lcore_usage usage;
462 rte_lcore_usage_cb usage_cb;
463 char *usage_str = NULL;
464 FILE *f = arg;
465 int ret;
466
467 /* The callback may not set all the fields in the structure, so clear it here. */
468 memset(&usage, 0, sizeof(usage));
469 /* Guard against concurrent modification of lcore_usage_cb. */
470 usage_cb = lcore_usage_cb;
471 if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
472 if (asprintf(&usage_str, ", busy cycles %"PRIu64"/%"PRIu64" (ratio %.02f%%)",
473 usage.busy_cycles, usage.total_cycles,
474 calc_usage_ratio(&usage)) < 0) {
475 return -ENOMEM;
476 }
477 }
478 ret = eal_thread_dump_affinity(&lcore_config[lcore_id].cpuset, cpuset,
479 sizeof(cpuset));
480 fprintf(f, "lcore %u, socket %u, role %s, cpuset %s%s%s\n", lcore_id,
481 rte_lcore_to_socket_id(lcore_id),
482 lcore_role_str(cfg->lcore_role[lcore_id]), cpuset,
483 ret == 0 ? "" : "...", usage_str != NULL ? usage_str : "");
484 free(usage_str);
485
486 return 0;
487 }
488
489 void
rte_lcore_dump(FILE * f)490 rte_lcore_dump(FILE *f)
491 {
492 rte_lcore_iterate(lcore_dump_cb, f);
493 }
494
495 #ifndef RTE_EXEC_ENV_WINDOWS
496 static int
lcore_telemetry_id_cb(unsigned int lcore_id,void * arg)497 lcore_telemetry_id_cb(unsigned int lcore_id, void *arg)
498 {
499 struct rte_tel_data *d = arg;
500
501 return rte_tel_data_add_array_int(d, lcore_id);
502 }
503
504 static int
handle_lcore_list(const char * cmd __rte_unused,const char * params __rte_unused,struct rte_tel_data * d)505 handle_lcore_list(const char *cmd __rte_unused, const char *params __rte_unused,
506 struct rte_tel_data *d)
507 {
508 int ret;
509
510 ret = rte_tel_data_start_array(d, RTE_TEL_INT_VAL);
511 if (ret == 0)
512 ret = rte_lcore_iterate(lcore_telemetry_id_cb, d);
513
514 return ret;
515 }
516
517 struct lcore_telemetry_info {
518 unsigned int lcore_id;
519 struct rte_tel_data *d;
520 };
521
522 static void
format_usage_ratio(char * buf,uint16_t size,const struct rte_lcore_usage * usage)523 format_usage_ratio(char *buf, uint16_t size, const struct rte_lcore_usage *usage)
524 {
525 float ratio = calc_usage_ratio(usage);
526 snprintf(buf, size, "%.02f%%", ratio);
527 }
528
529 static int
lcore_telemetry_info_cb(unsigned int lcore_id,void * arg)530 lcore_telemetry_info_cb(unsigned int lcore_id, void *arg)
531 {
532 struct rte_config *cfg = rte_eal_get_configuration();
533 struct lcore_telemetry_info *info = arg;
534 char ratio_str[RTE_TEL_MAX_STRING_LEN];
535 struct rte_lcore_usage usage;
536 struct rte_tel_data *cpuset;
537 rte_lcore_usage_cb usage_cb;
538 unsigned int cpu;
539
540 if (lcore_id != info->lcore_id)
541 return 0;
542
543 rte_tel_data_start_dict(info->d);
544 rte_tel_data_add_dict_int(info->d, "lcore_id", lcore_id);
545 rte_tel_data_add_dict_int(info->d, "socket", rte_lcore_to_socket_id(lcore_id));
546 rte_tel_data_add_dict_string(info->d, "role", lcore_role_str(cfg->lcore_role[lcore_id]));
547 cpuset = rte_tel_data_alloc();
548 if (cpuset == NULL)
549 return -ENOMEM;
550 rte_tel_data_start_array(cpuset, RTE_TEL_INT_VAL);
551 for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
552 if (CPU_ISSET(cpu, &lcore_config[lcore_id].cpuset))
553 rte_tel_data_add_array_int(cpuset, cpu);
554 }
555 rte_tel_data_add_dict_container(info->d, "cpuset", cpuset, 0);
556 /* The callback may not set all the fields in the structure, so clear it here. */
557 memset(&usage, 0, sizeof(usage));
558 /* Guard against concurrent modification of lcore_usage_cb. */
559 usage_cb = lcore_usage_cb;
560 if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
561 rte_tel_data_add_dict_uint(info->d, "total_cycles", usage.total_cycles);
562 rte_tel_data_add_dict_uint(info->d, "busy_cycles", usage.busy_cycles);
563 format_usage_ratio(ratio_str, sizeof(ratio_str), &usage);
564 rte_tel_data_add_dict_string(info->d, "usage_ratio", ratio_str);
565 }
566
567 /* Return non-zero positive value to stop iterating over lcore_id. */
568 return 1;
569 }
570
571 static int
handle_lcore_info(const char * cmd __rte_unused,const char * params,struct rte_tel_data * d)572 handle_lcore_info(const char *cmd __rte_unused, const char *params, struct rte_tel_data *d)
573 {
574 struct lcore_telemetry_info info = { .d = d };
575 unsigned long lcore_id;
576 char *endptr;
577
578 if (params == NULL)
579 return -EINVAL;
580 errno = 0;
581 lcore_id = strtoul(params, &endptr, 10);
582 if (errno)
583 return -errno;
584 if (*params == '\0' || *endptr != '\0' || lcore_id >= RTE_MAX_LCORE)
585 return -EINVAL;
586
587 info.lcore_id = lcore_id;
588
589 return rte_lcore_iterate(lcore_telemetry_info_cb, &info);
590 }
591
592 struct lcore_telemetry_usage {
593 struct rte_tel_data *lcore_ids;
594 struct rte_tel_data *total_cycles;
595 struct rte_tel_data *busy_cycles;
596 struct rte_tel_data *usage_ratio;
597 };
598
599 static int
lcore_telemetry_usage_cb(unsigned int lcore_id,void * arg)600 lcore_telemetry_usage_cb(unsigned int lcore_id, void *arg)
601 {
602 char ratio_str[RTE_TEL_MAX_STRING_LEN];
603 struct lcore_telemetry_usage *u = arg;
604 struct rte_lcore_usage usage;
605 rte_lcore_usage_cb usage_cb;
606
607 /* The callback may not set all the fields in the structure, so clear it here. */
608 memset(&usage, 0, sizeof(usage));
609 /* Guard against concurrent modification of lcore_usage_cb. */
610 usage_cb = lcore_usage_cb;
611 if (usage_cb != NULL && usage_cb(lcore_id, &usage) == 0) {
612 rte_tel_data_add_array_uint(u->lcore_ids, lcore_id);
613 rte_tel_data_add_array_uint(u->total_cycles, usage.total_cycles);
614 rte_tel_data_add_array_uint(u->busy_cycles, usage.busy_cycles);
615 format_usage_ratio(ratio_str, sizeof(ratio_str), &usage);
616 rte_tel_data_add_array_string(u->usage_ratio, ratio_str);
617 }
618
619 return 0;
620 }
621
622 static int
handle_lcore_usage(const char * cmd __rte_unused,const char * params __rte_unused,struct rte_tel_data * d)623 handle_lcore_usage(const char *cmd __rte_unused, const char *params __rte_unused,
624 struct rte_tel_data *d)
625 {
626 struct lcore_telemetry_usage usage;
627 struct rte_tel_data *total_cycles;
628 struct rte_tel_data *busy_cycles;
629 struct rte_tel_data *usage_ratio;
630 struct rte_tel_data *lcore_ids;
631
632 lcore_ids = rte_tel_data_alloc();
633 total_cycles = rte_tel_data_alloc();
634 busy_cycles = rte_tel_data_alloc();
635 usage_ratio = rte_tel_data_alloc();
636 if (lcore_ids == NULL || total_cycles == NULL || busy_cycles == NULL ||
637 usage_ratio == NULL) {
638 rte_tel_data_free(lcore_ids);
639 rte_tel_data_free(total_cycles);
640 rte_tel_data_free(busy_cycles);
641 rte_tel_data_free(usage_ratio);
642 return -ENOMEM;
643 }
644
645 rte_tel_data_start_dict(d);
646 rte_tel_data_start_array(lcore_ids, RTE_TEL_UINT_VAL);
647 rte_tel_data_start_array(total_cycles, RTE_TEL_UINT_VAL);
648 rte_tel_data_start_array(busy_cycles, RTE_TEL_UINT_VAL);
649 rte_tel_data_start_array(usage_ratio, RTE_TEL_STRING_VAL);
650 rte_tel_data_add_dict_container(d, "lcore_ids", lcore_ids, 0);
651 rte_tel_data_add_dict_container(d, "total_cycles", total_cycles, 0);
652 rte_tel_data_add_dict_container(d, "busy_cycles", busy_cycles, 0);
653 rte_tel_data_add_dict_container(d, "usage_ratio", usage_ratio, 0);
654 usage.lcore_ids = lcore_ids;
655 usage.total_cycles = total_cycles;
656 usage.busy_cycles = busy_cycles;
657 usage.usage_ratio = usage_ratio;
658
659 return rte_lcore_iterate(lcore_telemetry_usage_cb, &usage);
660 }
661
RTE_INIT(lcore_telemetry)662 RTE_INIT(lcore_telemetry)
663 {
664 rte_telemetry_register_cmd("/eal/lcore/list", handle_lcore_list,
665 "List of lcore ids. Takes no parameters");
666 rte_telemetry_register_cmd("/eal/lcore/info", handle_lcore_info,
667 "Returns lcore info. Parameters: int lcore_id");
668 rte_telemetry_register_cmd("/eal/lcore/usage", handle_lcore_usage,
669 "Returns lcore cycles usage. Takes no parameters");
670 }
671 #endif /* !RTE_EXEC_ENV_WINDOWS */
672