xref: /spdk/lib/trace/trace.c (revision b02581a89058ebaebe03bd0e16e3b58adfe406c1)
1 /*   SPDX-License-Identifier: BSD-3-Clause
2  *   Copyright (C) 2016 Intel Corporation.
3  *   All rights reserved.
4  */
5 
6 #include "spdk/stdinc.h"
7 
8 #include "spdk/env.h"
9 #include "spdk/string.h"
10 #include "spdk/trace.h"
11 #include "spdk/util.h"
12 #include "spdk/barrier.h"
13 #include "spdk/log.h"
14 #include "spdk/cpuset.h"
15 #include "spdk/likely.h"
16 #include "spdk/bit_array.h"
17 #include "trace_internal.h"
18 
19 static int g_trace_fd = -1;
20 static char g_shm_name[64];
21 
22 static __thread uint32_t t_ut_array_index;
23 static __thread struct spdk_trace_history *t_ut_lcore_history;
24 
25 uint32_t g_user_thread_index_start;
26 struct spdk_trace_histories *g_trace_histories;
27 struct spdk_bit_array *g_ut_array;
28 pthread_mutex_t g_ut_array_mutex;
29 
30 static inline struct spdk_trace_entry *
31 get_trace_entry(struct spdk_trace_history *history, uint64_t offset)
32 {
33 	return &history->entries[offset & (history->num_entries - 1)];
34 }
35 
36 void
37 _spdk_trace_record(uint64_t tsc, uint16_t tpoint_id, uint16_t poller_id, uint32_t size,
38 		   uint64_t object_id, int num_args, ...)
39 {
40 	struct spdk_trace_history *lcore_history;
41 	struct spdk_trace_entry *next_entry;
42 	struct spdk_trace_entry_buffer *buffer;
43 	struct spdk_trace_tpoint *tpoint;
44 	struct spdk_trace_argument *argument;
45 	unsigned lcore, i, offset, num_entries, arglen, argoff, curlen;
46 	uint64_t intval;
47 	void *argval;
48 	va_list vl;
49 
50 	lcore = spdk_env_get_current_core();
51 	if (spdk_likely(lcore != SPDK_ENV_LCORE_ID_ANY)) {
52 		lcore_history = spdk_get_per_lcore_history(g_trace_histories, lcore);
53 	} else if (t_ut_lcore_history != NULL) {
54 		lcore_history = t_ut_lcore_history;
55 	} else {
56 		return;
57 	}
58 
59 	if (tsc == 0) {
60 		tsc = spdk_get_ticks();
61 	}
62 
63 	lcore_history->tpoint_count[tpoint_id]++;
64 
65 	tpoint = &g_trace_flags->tpoint[tpoint_id];
66 	/* Make sure that the number of arguments passed matches tracepoint definition */
67 	if (spdk_unlikely(tpoint->num_args != num_args)) {
68 		assert(0 && "Unexpected number of tracepoint arguments");
69 		return;
70 	}
71 
72 	/* Get next entry index in the circular buffer */
73 	next_entry = get_trace_entry(lcore_history, lcore_history->next_entry);
74 	next_entry->tsc = tsc;
75 	next_entry->tpoint_id = tpoint_id;
76 	next_entry->poller_id = poller_id;
77 	next_entry->size = size;
78 	next_entry->object_id = object_id;
79 
80 	num_entries = 1;
81 	buffer = (struct spdk_trace_entry_buffer *)next_entry;
82 	/* The initial offset needs to be adjusted by the fields present in the first entry
83 	 * (poller_id, size, etc.).
84 	 */
85 	offset = offsetof(struct spdk_trace_entry, args) -
86 		 offsetof(struct spdk_trace_entry_buffer, data);
87 
88 	va_start(vl, num_args);
89 	for (i = 0; i < tpoint->num_args; ++i) {
90 		argument = &tpoint->args[i];
91 		switch (argument->type) {
92 		case SPDK_TRACE_ARG_TYPE_STR:
93 			argval = va_arg(vl, void *);
94 			arglen = strnlen((const char *)argval, argument->size - 1) + 1;
95 			break;
96 		case SPDK_TRACE_ARG_TYPE_INT:
97 		case SPDK_TRACE_ARG_TYPE_PTR:
98 			if (argument->size == 8) {
99 				intval = va_arg(vl, uint64_t);
100 			} else {
101 				intval = va_arg(vl, uint32_t);
102 			}
103 			argval = &intval;
104 			arglen = argument->size;
105 			break;
106 		default:
107 			assert(0 && "Invalid trace argument type");
108 			return;
109 		}
110 
111 		/* Copy argument's data. For some argument types (strings) user is allowed to pass a
112 		 * value that is either larger or smaller than what's defined in the tracepoint's
113 		 * description. If the value is larger, we'll truncate it, while if it's smaller,
114 		 * we'll only fill portion of the buffer, without touching the rest. For instance,
115 		 * if the definition marks an argument as 40B and user passes 12B string, we'll only
116 		 * copy 13B (accounting for the NULL terminator).
117 		 */
118 		argoff = 0;
119 		while (argoff < argument->size) {
120 			/* Current buffer is full, we need to acquire another one */
121 			if (spdk_unlikely(offset == sizeof(buffer->data))) {
122 				buffer = (struct spdk_trace_entry_buffer *) get_trace_entry(
123 						 lcore_history,
124 						 lcore_history->next_entry + num_entries);
125 				buffer->tpoint_id = SPDK_TRACE_MAX_TPOINT_ID;
126 				buffer->tsc = tsc;
127 				num_entries++;
128 				offset = 0;
129 			}
130 
131 			curlen = spdk_min(sizeof(buffer->data) - offset, argument->size - argoff);
132 			if (spdk_likely(argoff < arglen)) {
133 				assert(argval != NULL);
134 				memcpy(&buffer->data[offset], (uint8_t *)argval + argoff,
135 				       spdk_min(curlen, arglen - argoff));
136 			}
137 
138 			offset += curlen;
139 			argoff += curlen;
140 		}
141 
142 		/* Make sure that truncated strings are NULL-terminated */
143 		if (spdk_unlikely(argument->type == SPDK_TRACE_ARG_TYPE_STR)) {
144 			assert(offset > 0);
145 			buffer->data[offset - 1] = '\0';
146 		}
147 	}
148 	va_end(vl);
149 
150 	/* Ensure all elements of the trace entry are visible to outside trace tools */
151 	spdk_smp_wmb();
152 	lcore_history->next_entry += num_entries;
153 }
154 
155 int
156 spdk_trace_register_user_thread(void)
157 {
158 	if (!g_ut_array) {
159 		SPDK_ERRLOG("user thread array not created\n");
160 		return -ENOMEM;
161 	}
162 
163 	if (spdk_env_get_current_core() != SPDK_ENV_LCORE_ID_ANY) {
164 		SPDK_ERRLOG("cannot register an user thread from a dedicated cpu %d\n",
165 			    spdk_env_get_current_core());
166 		return -EINVAL;
167 	}
168 
169 	pthread_mutex_lock(&g_ut_array_mutex);
170 
171 	t_ut_array_index = spdk_bit_array_find_first_clear(g_ut_array, 0);
172 	if (t_ut_array_index == UINT32_MAX) {
173 		SPDK_ERRLOG("could not find an entry in the user thread array\n");
174 		pthread_mutex_unlock(&g_ut_array_mutex);
175 		return -ENOENT;
176 	}
177 
178 	t_ut_lcore_history = spdk_get_per_lcore_history(g_trace_histories,
179 			     t_ut_array_index + g_user_thread_index_start);
180 
181 	spdk_bit_array_set(g_ut_array, t_ut_array_index);
182 
183 	pthread_mutex_unlock(&g_ut_array_mutex);
184 
185 	return 0;
186 }
187 
188 int
189 spdk_trace_unregister_user_thread(void)
190 {
191 	if (!g_ut_array) {
192 		SPDK_ERRLOG("user thread array not created\n");
193 		return -ENOMEM;
194 	}
195 
196 	if (spdk_env_get_current_core() != SPDK_ENV_LCORE_ID_ANY) {
197 		SPDK_ERRLOG("cannot unregister an user thread from a dedicated cpu %d\n",
198 			    spdk_env_get_current_core());
199 		return -EINVAL;
200 	}
201 
202 	pthread_mutex_lock(&g_ut_array_mutex);
203 
204 	spdk_bit_array_clear(g_ut_array, t_ut_array_index);
205 
206 	pthread_mutex_unlock(&g_ut_array_mutex);
207 
208 	return 0;
209 }
210 
211 int
212 spdk_trace_init(const char *shm_name, uint64_t num_entries, uint32_t num_threads)
213 {
214 	uint32_t i = 0, max_dedicated_cpu = 0;
215 	int histories_size;
216 	uint64_t lcore_offsets[SPDK_TRACE_MAX_LCORE + 1] = { 0 };
217 	struct spdk_cpuset cpuset = {};
218 
219 	/* 0 entries requested - skip trace initialization */
220 	if (num_entries == 0) {
221 		return 0;
222 	}
223 
224 	if (num_threads >= SPDK_TRACE_MAX_LCORE) {
225 		SPDK_ERRLOG("cannot alloc trace entries for %d user threads\n", num_threads);
226 		SPDK_ERRLOG("supported maximum %d threads\n", SPDK_TRACE_MAX_LCORE - 1);
227 		return 1;
228 	}
229 
230 	spdk_cpuset_zero(&cpuset);
231 	histories_size = sizeof(struct spdk_trace_flags);
232 	SPDK_ENV_FOREACH_CORE(i) {
233 		spdk_cpuset_set_cpu(&cpuset, i, true);
234 		lcore_offsets[i] = histories_size;
235 		histories_size += spdk_get_trace_history_size(num_entries);
236 		max_dedicated_cpu = i;
237 	}
238 
239 	g_user_thread_index_start = max_dedicated_cpu + 1;
240 
241 	if (g_user_thread_index_start + num_threads > SPDK_TRACE_MAX_LCORE) {
242 		SPDK_ERRLOG("user threads overlap with the threads on dedicated cpus\n");
243 		return 1;
244 	}
245 
246 	g_ut_array = spdk_bit_array_create(num_threads);
247 	if (!g_ut_array) {
248 		SPDK_ERRLOG("could not create bit array for threads\n");
249 		return 1;
250 	}
251 
252 	for (i = g_user_thread_index_start; i < g_user_thread_index_start + num_threads; i++) {
253 		lcore_offsets[i] = histories_size;
254 		histories_size += spdk_get_trace_history_size(num_entries);
255 	}
256 
257 	lcore_offsets[SPDK_TRACE_MAX_LCORE] = histories_size;
258 
259 	snprintf(g_shm_name, sizeof(g_shm_name), "%s", shm_name);
260 
261 	g_trace_fd = shm_open(shm_name, O_RDWR | O_CREAT, 0600);
262 	if (g_trace_fd == -1) {
263 		SPDK_ERRLOG("could not shm_open spdk_trace\n");
264 		SPDK_ERRLOG("errno=%d %s\n", errno, spdk_strerror(errno));
265 		spdk_bit_array_free(&g_ut_array);
266 		return 1;
267 	}
268 
269 	if (ftruncate(g_trace_fd, histories_size) != 0) {
270 		SPDK_ERRLOG("could not truncate shm\n");
271 		goto trace_init_err;
272 	}
273 
274 	g_trace_histories = mmap(NULL, histories_size, PROT_READ | PROT_WRITE,
275 				 MAP_SHARED, g_trace_fd, 0);
276 	if (g_trace_histories == MAP_FAILED) {
277 		SPDK_ERRLOG("could not mmap shm\n");
278 		goto trace_init_err;
279 	}
280 
281 	/* TODO: On FreeBSD, mlock on shm_open'd memory doesn't seem to work.  Docs say that kern.ipc.shm_use_phys=1
282 	 * should allow it, but forcing that doesn't seem to work either.  So for now just skip mlock on FreeBSD
283 	 * altogether.
284 	 */
285 #if defined(__linux__)
286 	if (mlock(g_trace_histories, histories_size) != 0) {
287 		SPDK_ERRLOG("Could not mlock shm for tracing - %s.\n", spdk_strerror(errno));
288 		if (errno == ENOMEM) {
289 			SPDK_ERRLOG("Check /dev/shm for old tracing files that can be deleted.\n");
290 		}
291 		goto trace_init_err;
292 	}
293 #endif
294 
295 	memset(g_trace_histories, 0, histories_size);
296 
297 	g_trace_flags = &g_trace_histories->flags;
298 
299 	g_trace_flags->tsc_rate = spdk_get_ticks_hz();
300 
301 	for (i = 0; i < SPDK_TRACE_MAX_LCORE; i++) {
302 		struct spdk_trace_history *lcore_history;
303 
304 		g_trace_flags->lcore_history_offsets[i] = lcore_offsets[i];
305 		if (lcore_offsets[i] == 0) {
306 			continue;
307 		}
308 
309 		if (i <= max_dedicated_cpu) {
310 			assert(spdk_cpuset_get_cpu(&cpuset, i));
311 		}
312 
313 		lcore_history = spdk_get_per_lcore_history(g_trace_histories, i);
314 		lcore_history->lcore = i;
315 		lcore_history->num_entries = num_entries;
316 	}
317 	g_trace_flags->lcore_history_offsets[SPDK_TRACE_MAX_LCORE] = lcore_offsets[SPDK_TRACE_MAX_LCORE];
318 
319 	spdk_trace_flags_init();
320 
321 	return 0;
322 
323 trace_init_err:
324 	if (g_trace_histories != MAP_FAILED) {
325 		munmap(g_trace_histories, histories_size);
326 	}
327 	close(g_trace_fd);
328 	g_trace_fd = -1;
329 	shm_unlink(shm_name);
330 	spdk_bit_array_free(&g_ut_array);
331 	g_trace_histories = NULL;
332 
333 	return 1;
334 
335 }
336 
337 void
338 spdk_trace_cleanup(void)
339 {
340 	bool unlink = true;
341 	int i;
342 	struct spdk_trace_history *lcore_history;
343 
344 	if (g_trace_histories == NULL) {
345 		return;
346 	}
347 
348 	/*
349 	 * Only unlink the shm if there were no trace_entry recorded. This ensures the file
350 	 * can be used after this process exits/crashes for debugging.
351 	 * Note that we have to calculate this value before g_trace_histories gets unmapped.
352 	 */
353 	for (i = 0; i < SPDK_TRACE_MAX_LCORE; i++) {
354 		lcore_history = spdk_get_per_lcore_history(g_trace_histories, i);
355 		if (lcore_history == NULL) {
356 			continue;
357 		}
358 		unlink = lcore_history->entries[0].tsc == 0;
359 		if (!unlink) {
360 			break;
361 		}
362 	}
363 
364 	munmap(g_trace_histories, sizeof(struct spdk_trace_histories));
365 	g_trace_histories = NULL;
366 	close(g_trace_fd);
367 	spdk_bit_array_free(&g_ut_array);
368 
369 	if (unlink) {
370 		shm_unlink(g_shm_name);
371 	}
372 }
373 
374 const char *
375 trace_get_shm_name(void)
376 {
377 	return g_shm_name;
378 }
379