xref: /dpdk/lib/eal/common/eal_common_trace.c (revision 283d843722f11c4cb4714fa8661f4cfb7986b0e6)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2020 Marvell International Ltd.
3  */
4 
5 #include <stdlib.h>
6 #include <fnmatch.h>
7 #include <pthread.h>
8 #include <sys/queue.h>
9 #include <regex.h>
10 
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_lcore.h>
14 #include <rte_per_lcore.h>
15 #include <rte_string_fns.h>
16 
17 #include "eal_trace.h"
18 
19 RTE_DEFINE_PER_LCORE(volatile int, trace_point_sz);
20 RTE_DEFINE_PER_LCORE(void *, trace_mem);
21 static RTE_DEFINE_PER_LCORE(char *, ctf_field);
22 
23 static struct trace_point_head tp_list = STAILQ_HEAD_INITIALIZER(tp_list);
24 static struct trace trace = { .args = STAILQ_HEAD_INITIALIZER(trace.args), };
25 
26 struct trace *
trace_obj_get(void)27 trace_obj_get(void)
28 {
29 	return &trace;
30 }
31 
32 struct trace_point_head *
trace_list_head_get(void)33 trace_list_head_get(void)
34 {
35 	return &tp_list;
36 }
37 
38 int
eal_trace_init(void)39 eal_trace_init(void)
40 {
41 	struct trace_arg *arg;
42 
43 	/* Trace memory should start with 8B aligned for natural alignment */
44 	RTE_BUILD_BUG_ON((offsetof(struct __rte_trace_header, mem) % 8) != 0);
45 
46 	/* One of the trace point registration failed */
47 	if (trace.register_errno) {
48 		rte_errno = trace.register_errno;
49 		goto fail;
50 	}
51 
52 	rte_spinlock_init(&trace.lock);
53 
54 	/* Is duplicate trace name registered */
55 	if (trace_has_duplicate_entry())
56 		goto fail;
57 
58 	/* Generate UUID ver 4 with total size of events and number of
59 	 * events
60 	 */
61 	trace_uuid_generate();
62 
63 	/* Apply buffer size configuration for trace output */
64 	trace_bufsz_args_apply();
65 
66 	/* Generate CTF TDSL metadata */
67 	if (trace_metadata_create() < 0)
68 		goto fail;
69 
70 	/* Save current epoch timestamp for future use */
71 	if (trace_epoch_time_save() < 0)
72 		goto free_meta;
73 
74 	/* Apply global configurations */
75 	STAILQ_FOREACH(arg, &trace.args, next)
76 		trace_args_apply(arg->val);
77 
78 	rte_trace_mode_set(trace.mode);
79 
80 	return 0;
81 
82 free_meta:
83 	trace_metadata_destroy();
84 fail:
85 	trace_err("failed to initialize trace [%s]", rte_strerror(rte_errno));
86 	return -rte_errno;
87 }
88 
89 void
eal_trace_fini(void)90 eal_trace_fini(void)
91 {
92 	trace_mem_free();
93 	trace_metadata_destroy();
94 	eal_trace_args_free();
95 }
96 
97 bool
rte_trace_is_enabled(void)98 rte_trace_is_enabled(void)
99 {
100 	return rte_atomic_load_explicit(&trace.status, rte_memory_order_acquire) != 0;
101 }
102 
103 static void
trace_mode_set(rte_trace_point_t * t,enum rte_trace_mode mode)104 trace_mode_set(rte_trace_point_t *t, enum rte_trace_mode mode)
105 {
106 	if (mode == RTE_TRACE_MODE_OVERWRITE)
107 		rte_atomic_fetch_and_explicit(t, ~__RTE_TRACE_FIELD_ENABLE_DISCARD,
108 			rte_memory_order_release);
109 	else
110 		rte_atomic_fetch_or_explicit(t, __RTE_TRACE_FIELD_ENABLE_DISCARD,
111 			rte_memory_order_release);
112 }
113 
114 void
rte_trace_mode_set(enum rte_trace_mode mode)115 rte_trace_mode_set(enum rte_trace_mode mode)
116 {
117 	struct trace_point *tp;
118 
119 	STAILQ_FOREACH(tp, &tp_list, next)
120 		trace_mode_set(tp->handle, mode);
121 
122 	trace.mode = mode;
123 }
124 
125 enum
rte_trace_mode_get(void)126 rte_trace_mode rte_trace_mode_get(void)
127 {
128 	return trace.mode;
129 }
130 
131 static bool
trace_point_is_invalid(rte_trace_point_t * t)132 trace_point_is_invalid(rte_trace_point_t *t)
133 {
134 	return (t == NULL) || (trace_id_get(t) >= trace.nb_trace_points);
135 }
136 
137 bool
rte_trace_point_is_enabled(rte_trace_point_t * t)138 rte_trace_point_is_enabled(rte_trace_point_t *t)
139 {
140 	uint64_t val;
141 
142 	if (trace_point_is_invalid(t))
143 		return false;
144 
145 	val = rte_atomic_load_explicit(t, rte_memory_order_acquire);
146 	return (val & __RTE_TRACE_FIELD_ENABLE_MASK) != 0;
147 }
148 
149 int
rte_trace_point_enable(rte_trace_point_t * t)150 rte_trace_point_enable(rte_trace_point_t *t)
151 {
152 	uint64_t prev;
153 
154 	if (trace_point_is_invalid(t))
155 		return -ERANGE;
156 
157 	prev = rte_atomic_fetch_or_explicit(t, __RTE_TRACE_FIELD_ENABLE_MASK,
158 		rte_memory_order_release);
159 	if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) == 0)
160 		rte_atomic_fetch_add_explicit(&trace.status, 1, rte_memory_order_release);
161 	return 0;
162 }
163 
164 int
rte_trace_point_disable(rte_trace_point_t * t)165 rte_trace_point_disable(rte_trace_point_t *t)
166 {
167 	uint64_t prev;
168 
169 	if (trace_point_is_invalid(t))
170 		return -ERANGE;
171 
172 	prev = rte_atomic_fetch_and_explicit(t, ~__RTE_TRACE_FIELD_ENABLE_MASK,
173 		rte_memory_order_release);
174 	if ((prev & __RTE_TRACE_FIELD_ENABLE_MASK) != 0)
175 		rte_atomic_fetch_sub_explicit(&trace.status, 1, rte_memory_order_release);
176 	return 0;
177 }
178 
179 int
rte_trace_pattern(const char * pattern,bool enable)180 rte_trace_pattern(const char *pattern, bool enable)
181 {
182 	struct trace_point *tp;
183 	int rc = 0, found = 0;
184 
185 	STAILQ_FOREACH(tp, &tp_list, next) {
186 		if (fnmatch(pattern, tp->name, 0) != 0)
187 			continue;
188 
189 		if (enable)
190 			rc = rte_trace_point_enable(tp->handle);
191 		else
192 			rc = rte_trace_point_disable(tp->handle);
193 		if (rc < 0) {
194 			found = 0;
195 			break;
196 		}
197 		found = 1;
198 	}
199 
200 	return rc | found;
201 }
202 
203 int
rte_trace_regexp(const char * regex,bool enable)204 rte_trace_regexp(const char *regex, bool enable)
205 {
206 	struct trace_point *tp;
207 	int rc = 0, found = 0;
208 	regex_t r;
209 
210 	if (regcomp(&r, regex, 0) != 0)
211 		return -EINVAL;
212 
213 	STAILQ_FOREACH(tp, &tp_list, next) {
214 		if (regexec(&r, tp->name, 0, NULL, 0) != 0)
215 			continue;
216 
217 		if (enable)
218 			rc = rte_trace_point_enable(tp->handle);
219 		else
220 			rc = rte_trace_point_disable(tp->handle);
221 		if (rc < 0) {
222 			found = 0;
223 			break;
224 		}
225 		found = 1;
226 	}
227 	regfree(&r);
228 
229 	return rc | found;
230 }
231 
232 rte_trace_point_t *
rte_trace_point_lookup(const char * name)233 rte_trace_point_lookup(const char *name)
234 {
235 	struct trace_point *tp;
236 
237 	if (name == NULL)
238 		return NULL;
239 
240 	STAILQ_FOREACH(tp, &tp_list, next)
241 		if (strcmp(tp->name, name) == 0)
242 			return tp->handle;
243 
244 	return NULL;
245 }
246 
247 static void
trace_point_dump(FILE * f,struct trace_point * tp)248 trace_point_dump(FILE *f, struct trace_point *tp)
249 {
250 	rte_trace_point_t *handle = tp->handle;
251 
252 	fprintf(f, "\tid %d, %s, size is %d, %s\n",
253 		trace_id_get(handle), tp->name,
254 		(uint16_t)(*handle & __RTE_TRACE_FIELD_SIZE_MASK),
255 		rte_trace_point_is_enabled(handle) ? "enabled" : "disabled");
256 }
257 
258 static void
trace_lcore_mem_dump(FILE * f)259 trace_lcore_mem_dump(FILE *f)
260 {
261 	struct trace *trace = trace_obj_get();
262 	struct __rte_trace_header *header;
263 	uint32_t count;
264 
265 	rte_spinlock_lock(&trace->lock);
266 	if (trace->nb_trace_mem_list == 0)
267 		goto out;
268 	fprintf(f, "nb_trace_mem_list = %d\n", trace->nb_trace_mem_list);
269 	fprintf(f, "\nTrace mem info\n--------------\n");
270 	for (count = 0; count < trace->nb_trace_mem_list; count++) {
271 		header = trace->lcore_meta[count].mem;
272 		fprintf(f, "\tid %d, mem=%p, area=%s, lcore_id=%d, name=%s\n",
273 		count, header,
274 		trace_area_to_string(trace->lcore_meta[count].area),
275 		header->stream_header.lcore_id,
276 		header->stream_header.thread_name);
277 	}
278 out:
279 	rte_spinlock_unlock(&trace->lock);
280 }
281 
282 void
rte_trace_dump(FILE * f)283 rte_trace_dump(FILE *f)
284 {
285 	struct trace_point_head *tp_list = trace_list_head_get();
286 	struct trace *trace = trace_obj_get();
287 	struct trace_point *tp;
288 
289 	fprintf(f, "\nGlobal info\n-----------\n");
290 	fprintf(f, "status = %s\n",
291 		rte_trace_is_enabled() ? "enabled" : "disabled");
292 	fprintf(f, "mode = %s\n",
293 		trace_mode_to_string(rte_trace_mode_get()));
294 	fprintf(f, "dir = %s\n", trace->dir);
295 	fprintf(f, "buffer len = %d\n", trace->buff_len);
296 	fprintf(f, "number of trace points = %d\n", trace->nb_trace_points);
297 
298 	trace_lcore_mem_dump(f);
299 	fprintf(f, "\nTrace point info\n----------------\n");
300 	STAILQ_FOREACH(tp, tp_list, next)
301 		trace_point_dump(f, tp);
302 }
303 
304 static void
thread_get_name(rte_thread_t id,char * name,size_t len)305 thread_get_name(rte_thread_t id, char *name, size_t len)
306 {
307 #if defined(RTE_EXEC_ENV_LINUX) && defined(__GLIBC__) && defined(__GLIBC_PREREQ)
308 #if __GLIBC_PREREQ(2, 12)
309 	pthread_getname_np((pthread_t)id.opaque_id, name, len);
310 #endif
311 #endif
312 	RTE_SET_USED(id);
313 	RTE_SET_USED(name);
314 	RTE_SET_USED(len);
315 }
316 
317 void
__rte_trace_mem_per_thread_alloc(void)318 __rte_trace_mem_per_thread_alloc(void)
319 {
320 	struct trace *trace = trace_obj_get();
321 	struct __rte_trace_header *header;
322 	uint32_t count;
323 
324 	if (!rte_trace_is_enabled())
325 		return;
326 
327 	if (RTE_PER_LCORE(trace_mem))
328 		return;
329 
330 	rte_spinlock_lock(&trace->lock);
331 
332 	count = trace->nb_trace_mem_list;
333 
334 	/* Allocate room for storing the thread trace mem meta */
335 	trace->lcore_meta = realloc(trace->lcore_meta,
336 		sizeof(trace->lcore_meta[0]) * (count + 1));
337 
338 	/* Provide dummy space for fast path to consume */
339 	if (trace->lcore_meta == NULL) {
340 		trace_crit("trace mem meta memory realloc failed");
341 		header = NULL;
342 		goto fail;
343 	}
344 
345 	/* First attempt from huge page */
346 	header = eal_malloc_no_trace(NULL, trace_mem_sz(trace->buff_len), 8);
347 	if (header) {
348 		trace->lcore_meta[count].area = TRACE_AREA_HUGEPAGE;
349 		goto found;
350 	}
351 
352 	/* Second attempt from heap */
353 	header = malloc(trace_mem_sz(trace->buff_len));
354 	if (header == NULL) {
355 		trace_crit("trace mem malloc attempt failed");
356 		header = NULL;
357 		goto fail;
358 
359 	}
360 
361 	/* Second attempt from heap is success */
362 	trace->lcore_meta[count].area = TRACE_AREA_HEAP;
363 
364 	/* Initialize the trace header */
365 found:
366 	header->offset = 0;
367 	header->len = trace->buff_len;
368 	header->stream_header.magic = TRACE_CTF_MAGIC;
369 	rte_uuid_copy(header->stream_header.uuid, trace->uuid);
370 	header->stream_header.lcore_id = rte_lcore_id();
371 
372 	/* Store the thread name */
373 	char *name = header->stream_header.thread_name;
374 	memset(name, 0, __RTE_TRACE_EMIT_STRING_LEN_MAX);
375 	thread_get_name(rte_thread_self(), name,
376 		__RTE_TRACE_EMIT_STRING_LEN_MAX);
377 
378 	trace->lcore_meta[count].mem = header;
379 	trace->nb_trace_mem_list++;
380 fail:
381 	RTE_PER_LCORE(trace_mem) = header;
382 	rte_spinlock_unlock(&trace->lock);
383 }
384 
385 static void
trace_mem_per_thread_free_unlocked(struct thread_mem_meta * meta)386 trace_mem_per_thread_free_unlocked(struct thread_mem_meta *meta)
387 {
388 	if (meta->area == TRACE_AREA_HUGEPAGE)
389 		eal_free_no_trace(meta->mem);
390 	else if (meta->area == TRACE_AREA_HEAP)
391 		free(meta->mem);
392 }
393 
394 void
trace_mem_per_thread_free(void)395 trace_mem_per_thread_free(void)
396 {
397 	struct trace *trace = trace_obj_get();
398 	struct __rte_trace_header *header;
399 	uint32_t count;
400 
401 	header = RTE_PER_LCORE(trace_mem);
402 	if (header == NULL)
403 		return;
404 
405 	rte_spinlock_lock(&trace->lock);
406 	for (count = 0; count < trace->nb_trace_mem_list; count++) {
407 		if (trace->lcore_meta[count].mem == header)
408 			break;
409 	}
410 	if (count != trace->nb_trace_mem_list) {
411 		struct thread_mem_meta *meta = &trace->lcore_meta[count];
412 
413 		trace_mem_per_thread_free_unlocked(meta);
414 		if (count != trace->nb_trace_mem_list - 1) {
415 			memmove(meta, meta + 1,
416 				sizeof(*meta) *
417 				 (trace->nb_trace_mem_list - count - 1));
418 		}
419 		trace->nb_trace_mem_list--;
420 	}
421 	rte_spinlock_unlock(&trace->lock);
422 }
423 
424 void
trace_mem_free(void)425 trace_mem_free(void)
426 {
427 	struct trace *trace = trace_obj_get();
428 	uint32_t count;
429 
430 	rte_spinlock_lock(&trace->lock);
431 	for (count = 0; count < trace->nb_trace_mem_list; count++) {
432 		trace_mem_per_thread_free_unlocked(&trace->lcore_meta[count]);
433 	}
434 	trace->nb_trace_mem_list = 0;
435 	rte_spinlock_unlock(&trace->lock);
436 }
437 
438 void
__rte_trace_point_emit_field(size_t sz,const char * in,const char * datatype)439 __rte_trace_point_emit_field(size_t sz, const char *in, const char *datatype)
440 {
441 	char *field;
442 	char *fixup;
443 	int rc;
444 
445 	fixup = trace_metadata_fixup_field(in);
446 	if (fixup != NULL)
447 		in = fixup;
448 	rc = asprintf(&field, "%s        %s %s;\n",
449 		RTE_PER_LCORE(ctf_field) != NULL ?
450 			RTE_PER_LCORE(ctf_field) : "",
451 		datatype, in);
452 	free(RTE_PER_LCORE(ctf_field));
453 	free(fixup);
454 	if (rc == -1) {
455 		RTE_PER_LCORE(trace_point_sz) = 0;
456 		RTE_PER_LCORE(ctf_field) = NULL;
457 		trace_crit("could not allocate CTF field");
458 		return;
459 	}
460 	RTE_PER_LCORE(trace_point_sz) += sz;
461 	RTE_PER_LCORE(ctf_field) = field;
462 }
463 
464 int
__rte_trace_point_register(rte_trace_point_t * handle,const char * name,void (* register_fn)(void))465 __rte_trace_point_register(rte_trace_point_t *handle, const char *name,
466 		void (*register_fn)(void))
467 {
468 	struct trace_point *tp;
469 	uint16_t sz;
470 
471 	/* Sanity checks of arguments */
472 	if (name == NULL || register_fn == NULL || handle == NULL) {
473 		trace_err("invalid arguments");
474 		rte_errno = EINVAL;
475 		goto fail;
476 	}
477 
478 	/* Check the size of the trace point object */
479 	RTE_PER_LCORE(trace_point_sz) = 0;
480 	register_fn();
481 	if (RTE_PER_LCORE(trace_point_sz) == 0) {
482 		trace_err("missing rte_trace_emit_header() in register fn");
483 		rte_errno = EBADF;
484 		goto fail;
485 	}
486 
487 	/* Is size overflowed */
488 	if (RTE_PER_LCORE(trace_point_sz) > UINT16_MAX) {
489 		trace_err("trace point size overflowed");
490 		rte_errno = ENOSPC;
491 		goto fail;
492 	}
493 
494 	/* Are we running out of space to store trace points? */
495 	if (trace.nb_trace_points > UINT16_MAX) {
496 		trace_err("trace point exceeds the max count");
497 		rte_errno = ENOSPC;
498 		goto fail;
499 	}
500 
501 	/* Get the size of the trace point */
502 	sz = RTE_PER_LCORE(trace_point_sz);
503 	tp = calloc(1, sizeof(struct trace_point));
504 	if (tp == NULL) {
505 		trace_err("fail to allocate trace point memory");
506 		rte_errno = ENOMEM;
507 		goto fail;
508 	}
509 
510 	/* Initialize the trace point */
511 	tp->name = name;
512 
513 	/* Copy the accumulated fields description and clear it for the next
514 	 * trace point.
515 	 */
516 	tp->ctf_field = RTE_PER_LCORE(ctf_field);
517 	RTE_PER_LCORE(ctf_field) = NULL;
518 
519 	/* Form the trace handle */
520 	*handle = sz;
521 	*handle |= trace.nb_trace_points << __RTE_TRACE_FIELD_ID_SHIFT;
522 	trace_mode_set(handle, trace.mode);
523 
524 	trace.nb_trace_points++;
525 	tp->handle = handle;
526 
527 	/* Add the trace point at tail */
528 	STAILQ_INSERT_TAIL(&tp_list, tp, next);
529 	rte_atomic_thread_fence(rte_memory_order_release);
530 
531 	/* All Good !!! */
532 	return 0;
533 
534 fail:
535 	if (trace.register_errno == 0)
536 		trace.register_errno = rte_errno;
537 
538 	return -rte_errno;
539 }
540