1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 /*
27 * DTrace - Dynamic Tracing for Solaris
28 *
29 * This is the implementation of the Solaris Dynamic Tracing framework
30 * (DTrace). The user-visible interface to DTrace is described at length in
31 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace
32 * library, the in-kernel DTrace framework, and the DTrace providers are
33 * described in the block comments in the <sys/dtrace.h> header file. The
34 * internal architecture of DTrace is described in the block comments in the
35 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace
36 * implementation very much assume mastery of all of these sources; if one has
37 * an unanswered question about the implementation, one should consult them
38 * first.
39 *
40 * The functions here are ordered roughly as follows:
41 *
42 * - Probe context functions
43 * - Probe hashing functions
44 * - Non-probe context utility functions
45 * - Matching functions
46 * - Provider-to-Framework API functions
47 * - Probe management functions
48 * - DIF object functions
49 * - Format functions
50 * - Predicate functions
51 * - ECB functions
52 * - Buffer functions
53 * - Enabling functions
54 * - DOF functions
55 * - Anonymous enabling functions
56 * - Consumer state functions
57 * - Helper functions
58 * - Hook functions
59 * - Driver cookbook functions
60 *
61 * Each group of functions begins with a block comment labelled the "DTrace
62 * [Group] Functions", allowing one to find each block by searching forward
63 * on capital-f functions.
64 */
65 #include <sys/errno.h>
66 #include <sys/stat.h>
67 #include <sys/modctl.h>
68 #include <sys/conf.h>
69 #include <sys/systm.h>
70 #include <sys/ddi.h>
71 #include <sys/sunddi.h>
72 #include <sys/cpuvar.h>
73 #include <sys/kmem.h>
74 #include <sys/strsubr.h>
75 #include <sys/sysmacros.h>
76 #include <sys/dtrace_impl.h>
77 #include <sys/atomic.h>
78 #include <sys/cmn_err.h>
79 #include <sys/mutex_impl.h>
80 #include <sys/rwlock_impl.h>
81 #include <sys/ctf_api.h>
82 #include <sys/panic.h>
83 #include <sys/priv_impl.h>
84 #include <sys/policy.h>
85 #include <sys/cred_impl.h>
86 #include <sys/procfs_isa.h>
87 #include <sys/taskq.h>
88 #include <sys/mkdev.h>
89 #include <sys/kdi.h>
90 #include <sys/zone.h>
91 #include <sys/socket.h>
92 #include <netinet/in.h>
93
94 /*
95 * DTrace Tunable Variables
96 *
97 * The following variables may be tuned by adding a line to /etc/system that
98 * includes both the name of the DTrace module ("dtrace") and the name of the
99 * variable. For example:
100 *
101 * set dtrace:dtrace_destructive_disallow = 1
102 *
103 * In general, the only variables that one should be tuning this way are those
104 * that affect system-wide DTrace behavior, and for which the default behavior
105 * is undesirable. Most of these variables are tunable on a per-consumer
106 * basis using DTrace options, and need not be tuned on a system-wide basis.
107 * When tuning these variables, avoid pathological values; while some attempt
108 * is made to verify the integrity of these variables, they are not considered
109 * part of the supported interface to DTrace, and they are therefore not
110 * checked comprehensively. Further, these variables should not be tuned
111 * dynamically via "mdb -kw" or other means; they should only be tuned via
112 * /etc/system.
113 */
114 int dtrace_destructive_disallow = 0;
115 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024);
116 size_t dtrace_difo_maxsize = (256 * 1024);
117 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024);
118 size_t dtrace_global_maxsize = (16 * 1024);
119 size_t dtrace_actions_max = (16 * 1024);
120 size_t dtrace_retain_max = 1024;
121 dtrace_optval_t dtrace_helper_actions_max = 32;
122 dtrace_optval_t dtrace_helper_providers_max = 32;
123 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024);
124 size_t dtrace_strsize_default = 256;
125 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */
126 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */
127 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */
128 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */
129 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */
130 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */
131 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */
132 dtrace_optval_t dtrace_nspec_default = 1;
133 dtrace_optval_t dtrace_specsize_default = 32 * 1024;
134 dtrace_optval_t dtrace_stackframes_default = 20;
135 dtrace_optval_t dtrace_ustackframes_default = 20;
136 dtrace_optval_t dtrace_jstackframes_default = 50;
137 dtrace_optval_t dtrace_jstackstrsize_default = 512;
138 int dtrace_msgdsize_max = 128;
139 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */
140 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */
141 int dtrace_devdepth_max = 32;
142 int dtrace_err_verbose;
143 hrtime_t dtrace_deadman_interval = NANOSEC;
144 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC;
145 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC;
146
147 /*
148 * DTrace External Variables
149 *
150 * As dtrace(7D) is a kernel module, any DTrace variables are obviously
151 * available to DTrace consumers via the backtick (`) syntax. One of these,
152 * dtrace_zero, is made deliberately so: it is provided as a source of
153 * well-known, zero-filled memory. While this variable is not documented,
154 * it is used by some translators as an implementation detail.
155 */
156 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */
157
158 /*
159 * DTrace Internal Variables
160 */
161 static dev_info_t *dtrace_devi; /* device info */
162 static vmem_t *dtrace_arena; /* probe ID arena */
163 static vmem_t *dtrace_minor; /* minor number arena */
164 static taskq_t *dtrace_taskq; /* task queue */
165 static dtrace_probe_t **dtrace_probes; /* array of all probes */
166 static int dtrace_nprobes; /* number of probes */
167 static dtrace_provider_t *dtrace_provider; /* provider list */
168 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */
169 static int dtrace_opens; /* number of opens */
170 static int dtrace_helpers; /* number of helpers */
171 static void *dtrace_softstate; /* softstate pointer */
172 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */
173 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */
174 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */
175 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */
176 static int dtrace_toxranges; /* number of toxic ranges */
177 static int dtrace_toxranges_max; /* size of toxic range array */
178 static dtrace_anon_t dtrace_anon; /* anonymous enabling */
179 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */
180 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */
181 static kthread_t *dtrace_panicked; /* panicking thread */
182 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */
183 static dtrace_genid_t dtrace_probegen; /* current probe generation */
184 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */
185 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */
186 static dtrace_genid_t dtrace_retained_gen; /* current retained enab gen */
187 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */
188 static int dtrace_dynvar_failclean; /* dynvars failed to clean */
189
190 /*
191 * DTrace Locking
192 * DTrace is protected by three (relatively coarse-grained) locks:
193 *
194 * (1) dtrace_lock is required to manipulate essentially any DTrace state,
195 * including enabling state, probes, ECBs, consumer state, helper state,
196 * etc. Importantly, dtrace_lock is _not_ required when in probe context;
197 * probe context is lock-free -- synchronization is handled via the
198 * dtrace_sync() cross call mechanism.
199 *
200 * (2) dtrace_provider_lock is required when manipulating provider state, or
201 * when provider state must be held constant.
202 *
203 * (3) dtrace_meta_lock is required when manipulating meta provider state, or
204 * when meta provider state must be held constant.
205 *
206 * The lock ordering between these three locks is dtrace_meta_lock before
207 * dtrace_provider_lock before dtrace_lock. (In particular, there are
208 * several places where dtrace_provider_lock is held by the framework as it
209 * calls into the providers -- which then call back into the framework,
210 * grabbing dtrace_lock.)
211 *
212 * There are two other locks in the mix: mod_lock and cpu_lock. With respect
213 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical
214 * role as a coarse-grained lock; it is acquired before both of these locks.
215 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must
216 * be acquired _between_ dtrace_meta_lock and any other DTrace locks.
217 * mod_lock is similar with respect to dtrace_provider_lock in that it must be
218 * acquired _between_ dtrace_provider_lock and dtrace_lock.
219 */
220 static kmutex_t dtrace_lock; /* probe state lock */
221 static kmutex_t dtrace_provider_lock; /* provider state lock */
222 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */
223
224 /*
225 * DTrace Provider Variables
226 *
227 * These are the variables relating to DTrace as a provider (that is, the
228 * provider of the BEGIN, END, and ERROR probes).
229 */
230 static dtrace_pattr_t dtrace_provider_attr = {
231 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
232 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
233 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN },
234 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
235 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON },
236 };
237
238 static void
dtrace_nullop(void)239 dtrace_nullop(void)
240 {}
241
242 static int
dtrace_enable_nullop(void)243 dtrace_enable_nullop(void)
244 {
245 return (0);
246 }
247
248 static dtrace_pops_t dtrace_provider_ops = {
249 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop,
250 (void (*)(void *, struct modctl *))dtrace_nullop,
251 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop,
252 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
253 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
254 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop,
255 NULL,
256 NULL,
257 NULL,
258 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop
259 };
260
261 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */
262 static dtrace_id_t dtrace_probeid_end; /* special END probe */
263 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */
264
265 /*
266 * DTrace Helper Tracing Variables
267 */
268 uint32_t dtrace_helptrace_next = 0;
269 uint32_t dtrace_helptrace_nlocals;
270 char *dtrace_helptrace_buffer;
271 int dtrace_helptrace_bufsize = 512 * 1024;
272
273 #ifdef DEBUG
274 int dtrace_helptrace_enabled = 1;
275 #else
276 int dtrace_helptrace_enabled = 0;
277 #endif
278
279 /*
280 * DTrace Error Hashing
281 *
282 * On DEBUG kernels, DTrace will track the errors that has seen in a hash
283 * table. This is very useful for checking coverage of tests that are
284 * expected to induce DIF or DOF processing errors, and may be useful for
285 * debugging problems in the DIF code generator or in DOF generation . The
286 * error hash may be examined with the ::dtrace_errhash MDB dcmd.
287 */
288 #ifdef DEBUG
289 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ];
290 static const char *dtrace_errlast;
291 static kthread_t *dtrace_errthread;
292 static kmutex_t dtrace_errlock;
293 #endif
294
295 /*
296 * DTrace Macros and Constants
297 *
298 * These are various macros that are useful in various spots in the
299 * implementation, along with a few random constants that have no meaning
300 * outside of the implementation. There is no real structure to this cpp
301 * mishmash -- but is there ever?
302 */
303 #define DTRACE_HASHSTR(hash, probe) \
304 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs)))
305
306 #define DTRACE_HASHNEXT(hash, probe) \
307 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs)
308
309 #define DTRACE_HASHPREV(hash, probe) \
310 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs)
311
312 #define DTRACE_HASHEQ(hash, lhs, rhs) \
313 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \
314 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0)
315
316 #define DTRACE_AGGHASHSIZE_SLEW 17
317
318 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3)
319
320 /*
321 * The key for a thread-local variable consists of the lower 61 bits of the
322 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL.
323 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never
324 * equal to a variable identifier. This is necessary (but not sufficient) to
325 * assure that global associative arrays never collide with thread-local
326 * variables. To guarantee that they cannot collide, we must also define the
327 * order for keying dynamic variables. That order is:
328 *
329 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ]
330 *
331 * Because the variable-key and the tls-key are in orthogonal spaces, there is
332 * no way for a global variable key signature to match a thread-local key
333 * signature.
334 */
335 #define DTRACE_TLS_THRKEY(where) { \
336 uint_t intr = 0; \
337 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \
338 for (; actv; actv >>= 1) \
339 intr++; \
340 ASSERT(intr < (1 << 3)); \
341 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \
342 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \
343 }
344
345 #define DT_BSWAP_8(x) ((x) & 0xff)
346 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8))
347 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16))
348 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32))
349
350 #define DT_MASK_LO 0x00000000FFFFFFFFULL
351
352 #define DTRACE_STORE(type, tomax, offset, what) \
353 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what);
354
355 #ifndef __i386
356 #define DTRACE_ALIGNCHECK(addr, size, flags) \
357 if (addr & (size - 1)) { \
358 *flags |= CPU_DTRACE_BADALIGN; \
359 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
360 return (0); \
361 }
362 #else
363 #define DTRACE_ALIGNCHECK(addr, size, flags)
364 #endif
365
366 /*
367 * Test whether a range of memory starting at testaddr of size testsz falls
368 * within the range of memory described by addr, sz. We take care to avoid
369 * problems with overflow and underflow of the unsigned quantities, and
370 * disallow all negative sizes. Ranges of size 0 are allowed.
371 */
372 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \
373 ((testaddr) - (baseaddr) < (basesz) && \
374 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \
375 (testaddr) + (testsz) >= (testaddr))
376
377 /*
378 * Test whether alloc_sz bytes will fit in the scratch region. We isolate
379 * alloc_sz on the righthand side of the comparison in order to avoid overflow
380 * or underflow in the comparison with it. This is simpler than the INRANGE
381 * check above, because we know that the dtms_scratch_ptr is valid in the
382 * range. Allocations of size zero are allowed.
383 */
384 #define DTRACE_INSCRATCH(mstate, alloc_sz) \
385 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \
386 (mstate)->dtms_scratch_ptr >= (alloc_sz))
387
388 #define DTRACE_LOADFUNC(bits) \
389 /*CSTYLED*/ \
390 uint##bits##_t \
391 dtrace_load##bits(uintptr_t addr) \
392 { \
393 size_t size = bits / NBBY; \
394 /*CSTYLED*/ \
395 uint##bits##_t rval; \
396 int i; \
397 volatile uint16_t *flags = (volatile uint16_t *) \
398 &cpu_core[CPU->cpu_id].cpuc_dtrace_flags; \
399 \
400 DTRACE_ALIGNCHECK(addr, size, flags); \
401 \
402 for (i = 0; i < dtrace_toxranges; i++) { \
403 if (addr >= dtrace_toxrange[i].dtt_limit) \
404 continue; \
405 \
406 if (addr + size <= dtrace_toxrange[i].dtt_base) \
407 continue; \
408 \
409 /* \
410 * This address falls within a toxic region; return 0. \
411 */ \
412 *flags |= CPU_DTRACE_BADADDR; \
413 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = addr; \
414 return (0); \
415 } \
416 \
417 *flags |= CPU_DTRACE_NOFAULT; \
418 /*CSTYLED*/ \
419 rval = *((volatile uint##bits##_t *)addr); \
420 *flags &= ~CPU_DTRACE_NOFAULT; \
421 \
422 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \
423 }
424
425 #ifdef _LP64
426 #define dtrace_loadptr dtrace_load64
427 #else
428 #define dtrace_loadptr dtrace_load32
429 #endif
430
431 #define DTRACE_DYNHASH_FREE 0
432 #define DTRACE_DYNHASH_SINK 1
433 #define DTRACE_DYNHASH_VALID 2
434
435 #define DTRACE_MATCH_FAIL -1
436 #define DTRACE_MATCH_NEXT 0
437 #define DTRACE_MATCH_DONE 1
438 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0')
439 #define DTRACE_STATE_ALIGN 64
440
441 #define DTRACE_FLAGS2FLT(flags) \
442 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \
443 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \
444 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \
445 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \
446 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \
447 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \
448 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \
449 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \
450 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \
451 DTRACEFLT_UNKNOWN)
452
453 #define DTRACEACT_ISSTRING(act) \
454 ((act)->dta_kind == DTRACEACT_DIFEXPR && \
455 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING)
456
457 static size_t dtrace_strlen(const char *, size_t);
458 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id);
459 static void dtrace_enabling_provide(dtrace_provider_t *);
460 static int dtrace_enabling_match(dtrace_enabling_t *, int *);
461 static void dtrace_enabling_matchall(void);
462 static dtrace_state_t *dtrace_anon_grab(void);
463 static uint64_t dtrace_helper(int, dtrace_mstate_t *,
464 dtrace_state_t *, uint64_t, uint64_t);
465 static dtrace_helpers_t *dtrace_helpers_create(proc_t *);
466 static void dtrace_buffer_drop(dtrace_buffer_t *);
467 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t,
468 dtrace_state_t *, dtrace_mstate_t *);
469 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t,
470 dtrace_optval_t);
471 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *);
472 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *);
473
474 /*
475 * DTrace Probe Context Functions
476 *
477 * These functions are called from probe context. Because probe context is
478 * any context in which C may be called, arbitrarily locks may be held,
479 * interrupts may be disabled, we may be in arbitrary dispatched state, etc.
480 * As a result, functions called from probe context may only call other DTrace
481 * support functions -- they may not interact at all with the system at large.
482 * (Note that the ASSERT macro is made probe-context safe by redefining it in
483 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary
484 * loads are to be performed from probe context, they _must_ be in terms of
485 * the safe dtrace_load*() variants.
486 *
487 * Some functions in this block are not actually called from probe context;
488 * for these functions, there will be a comment above the function reading
489 * "Note: not called from probe context."
490 */
491 void
dtrace_panic(const char * format,...)492 dtrace_panic(const char *format, ...)
493 {
494 va_list alist;
495
496 va_start(alist, format);
497 dtrace_vpanic(format, alist);
498 va_end(alist);
499 }
500
501 int
dtrace_assfail(const char * a,const char * f,int l)502 dtrace_assfail(const char *a, const char *f, int l)
503 {
504 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l);
505
506 /*
507 * We just need something here that even the most clever compiler
508 * cannot optimize away.
509 */
510 return (a[(uintptr_t)f]);
511 }
512
513 /*
514 * Atomically increment a specified error counter from probe context.
515 */
516 static void
dtrace_error(uint32_t * counter)517 dtrace_error(uint32_t *counter)
518 {
519 /*
520 * Most counters stored to in probe context are per-CPU counters.
521 * However, there are some error conditions that are sufficiently
522 * arcane that they don't merit per-CPU storage. If these counters
523 * are incremented concurrently on different CPUs, scalability will be
524 * adversely affected -- but we don't expect them to be white-hot in a
525 * correctly constructed enabling...
526 */
527 uint32_t oval, nval;
528
529 do {
530 oval = *counter;
531
532 if ((nval = oval + 1) == 0) {
533 /*
534 * If the counter would wrap, set it to 1 -- assuring
535 * that the counter is never zero when we have seen
536 * errors. (The counter must be 32-bits because we
537 * aren't guaranteed a 64-bit compare&swap operation.)
538 * To save this code both the infamy of being fingered
539 * by a priggish news story and the indignity of being
540 * the target of a neo-puritan witch trial, we're
541 * carefully avoiding any colorful description of the
542 * likelihood of this condition -- but suffice it to
543 * say that it is only slightly more likely than the
544 * overflow of predicate cache IDs, as discussed in
545 * dtrace_predicate_create().
546 */
547 nval = 1;
548 }
549 } while (dtrace_cas32(counter, oval, nval) != oval);
550 }
551
552 /*
553 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a
554 * uint8_t, a uint16_t, a uint32_t and a uint64_t.
555 */
556 DTRACE_LOADFUNC(8)
557 DTRACE_LOADFUNC(16)
558 DTRACE_LOADFUNC(32)
559 DTRACE_LOADFUNC(64)
560
561 static int
dtrace_inscratch(uintptr_t dest,size_t size,dtrace_mstate_t * mstate)562 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate)
563 {
564 if (dest < mstate->dtms_scratch_base)
565 return (0);
566
567 if (dest + size < dest)
568 return (0);
569
570 if (dest + size > mstate->dtms_scratch_ptr)
571 return (0);
572
573 return (1);
574 }
575
576 static int
dtrace_canstore_statvar(uint64_t addr,size_t sz,dtrace_statvar_t ** svars,int nsvars)577 dtrace_canstore_statvar(uint64_t addr, size_t sz,
578 dtrace_statvar_t **svars, int nsvars)
579 {
580 int i;
581
582 for (i = 0; i < nsvars; i++) {
583 dtrace_statvar_t *svar = svars[i];
584
585 if (svar == NULL || svar->dtsv_size == 0)
586 continue;
587
588 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size))
589 return (1);
590 }
591
592 return (0);
593 }
594
595 /*
596 * Check to see if the address is within a memory region to which a store may
597 * be issued. This includes the DTrace scratch areas, and any DTrace variable
598 * region. The caller of dtrace_canstore() is responsible for performing any
599 * alignment checks that are needed before stores are actually executed.
600 */
601 static int
dtrace_canstore(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)602 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
603 dtrace_vstate_t *vstate)
604 {
605 /*
606 * First, check to see if the address is in scratch space...
607 */
608 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base,
609 mstate->dtms_scratch_size))
610 return (1);
611
612 /*
613 * Now check to see if it's a dynamic variable. This check will pick
614 * up both thread-local variables and any global dynamically-allocated
615 * variables.
616 */
617 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base,
618 vstate->dtvs_dynvars.dtds_size)) {
619 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
620 uintptr_t base = (uintptr_t)dstate->dtds_base +
621 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t));
622 uintptr_t chunkoffs;
623
624 /*
625 * Before we assume that we can store here, we need to make
626 * sure that it isn't in our metadata -- storing to our
627 * dynamic variable metadata would corrupt our state. For
628 * the range to not include any dynamic variable metadata,
629 * it must:
630 *
631 * (1) Start above the hash table that is at the base of
632 * the dynamic variable space
633 *
634 * (2) Have a starting chunk offset that is beyond the
635 * dtrace_dynvar_t that is at the base of every chunk
636 *
637 * (3) Not span a chunk boundary
638 *
639 */
640 if (addr < base)
641 return (0);
642
643 chunkoffs = (addr - base) % dstate->dtds_chunksize;
644
645 if (chunkoffs < sizeof (dtrace_dynvar_t))
646 return (0);
647
648 if (chunkoffs + sz > dstate->dtds_chunksize)
649 return (0);
650
651 return (1);
652 }
653
654 /*
655 * Finally, check the static local and global variables. These checks
656 * take the longest, so we perform them last.
657 */
658 if (dtrace_canstore_statvar(addr, sz,
659 vstate->dtvs_locals, vstate->dtvs_nlocals))
660 return (1);
661
662 if (dtrace_canstore_statvar(addr, sz,
663 vstate->dtvs_globals, vstate->dtvs_nglobals))
664 return (1);
665
666 return (0);
667 }
668
669
670 /*
671 * Convenience routine to check to see if the address is within a memory
672 * region in which a load may be issued given the user's privilege level;
673 * if not, it sets the appropriate error flags and loads 'addr' into the
674 * illegal value slot.
675 *
676 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement
677 * appropriate memory access protection.
678 */
679 static int
dtrace_canload(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)680 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
681 dtrace_vstate_t *vstate)
682 {
683 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
684
685 /*
686 * If we hold the privilege to read from kernel memory, then
687 * everything is readable.
688 */
689 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
690 return (1);
691
692 /*
693 * You can obviously read that which you can store.
694 */
695 if (dtrace_canstore(addr, sz, mstate, vstate))
696 return (1);
697
698 /*
699 * We're allowed to read from our own string table.
700 */
701 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab,
702 mstate->dtms_difo->dtdo_strlen))
703 return (1);
704
705 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV);
706 *illval = addr;
707 return (0);
708 }
709
710 /*
711 * Convenience routine to check to see if a given string is within a memory
712 * region in which a load may be issued given the user's privilege level;
713 * this exists so that we don't need to issue unnecessary dtrace_strlen()
714 * calls in the event that the user has all privileges.
715 */
716 static int
dtrace_strcanload(uint64_t addr,size_t sz,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)717 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate,
718 dtrace_vstate_t *vstate)
719 {
720 size_t strsz;
721
722 /*
723 * If we hold the privilege to read from kernel memory, then
724 * everything is readable.
725 */
726 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
727 return (1);
728
729 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz);
730 if (dtrace_canload(addr, strsz, mstate, vstate))
731 return (1);
732
733 return (0);
734 }
735
736 /*
737 * Convenience routine to check to see if a given variable is within a memory
738 * region in which a load may be issued given the user's privilege level.
739 */
740 static int
dtrace_vcanload(void * src,dtrace_diftype_t * type,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)741 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate,
742 dtrace_vstate_t *vstate)
743 {
744 size_t sz;
745 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
746
747 /*
748 * If we hold the privilege to read from kernel memory, then
749 * everything is readable.
750 */
751 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
752 return (1);
753
754 if (type->dtdt_kind == DIF_TYPE_STRING)
755 sz = dtrace_strlen(src,
756 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1;
757 else
758 sz = type->dtdt_size;
759
760 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate));
761 }
762
763 /*
764 * Compare two strings using safe loads.
765 */
766 static int
dtrace_strncmp(char * s1,char * s2,size_t limit)767 dtrace_strncmp(char *s1, char *s2, size_t limit)
768 {
769 uint8_t c1, c2;
770 volatile uint16_t *flags;
771
772 if (s1 == s2 || limit == 0)
773 return (0);
774
775 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
776
777 do {
778 if (s1 == NULL) {
779 c1 = '\0';
780 } else {
781 c1 = dtrace_load8((uintptr_t)s1++);
782 }
783
784 if (s2 == NULL) {
785 c2 = '\0';
786 } else {
787 c2 = dtrace_load8((uintptr_t)s2++);
788 }
789
790 if (c1 != c2)
791 return (c1 - c2);
792 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT));
793
794 return (0);
795 }
796
797 /*
798 * Compute strlen(s) for a string using safe memory accesses. The additional
799 * len parameter is used to specify a maximum length to ensure completion.
800 */
801 static size_t
dtrace_strlen(const char * s,size_t lim)802 dtrace_strlen(const char *s, size_t lim)
803 {
804 uint_t len;
805
806 for (len = 0; len != lim; len++) {
807 if (dtrace_load8((uintptr_t)s++) == '\0')
808 break;
809 }
810
811 return (len);
812 }
813
814 /*
815 * Check if an address falls within a toxic region.
816 */
817 static int
dtrace_istoxic(uintptr_t kaddr,size_t size)818 dtrace_istoxic(uintptr_t kaddr, size_t size)
819 {
820 uintptr_t taddr, tsize;
821 int i;
822
823 for (i = 0; i < dtrace_toxranges; i++) {
824 taddr = dtrace_toxrange[i].dtt_base;
825 tsize = dtrace_toxrange[i].dtt_limit - taddr;
826
827 if (kaddr - taddr < tsize) {
828 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
829 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = kaddr;
830 return (1);
831 }
832
833 if (taddr - kaddr < size) {
834 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
835 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = taddr;
836 return (1);
837 }
838 }
839
840 return (0);
841 }
842
843 /*
844 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe
845 * memory specified by the DIF program. The dst is assumed to be safe memory
846 * that we can store to directly because it is managed by DTrace. As with
847 * standard bcopy, overlapping copies are handled properly.
848 */
849 static void
dtrace_bcopy(const void * src,void * dst,size_t len)850 dtrace_bcopy(const void *src, void *dst, size_t len)
851 {
852 if (len != 0) {
853 uint8_t *s1 = dst;
854 const uint8_t *s2 = src;
855
856 if (s1 <= s2) {
857 do {
858 *s1++ = dtrace_load8((uintptr_t)s2++);
859 } while (--len != 0);
860 } else {
861 s2 += len;
862 s1 += len;
863
864 do {
865 *--s1 = dtrace_load8((uintptr_t)--s2);
866 } while (--len != 0);
867 }
868 }
869 }
870
871 /*
872 * Copy src to dst using safe memory accesses, up to either the specified
873 * length, or the point that a nul byte is encountered. The src is assumed to
874 * be unsafe memory specified by the DIF program. The dst is assumed to be
875 * safe memory that we can store to directly because it is managed by DTrace.
876 * Unlike dtrace_bcopy(), overlapping regions are not handled.
877 */
878 static void
dtrace_strcpy(const void * src,void * dst,size_t len)879 dtrace_strcpy(const void *src, void *dst, size_t len)
880 {
881 if (len != 0) {
882 uint8_t *s1 = dst, c;
883 const uint8_t *s2 = src;
884
885 do {
886 *s1++ = c = dtrace_load8((uintptr_t)s2++);
887 } while (--len != 0 && c != '\0');
888 }
889 }
890
891 /*
892 * Copy src to dst, deriving the size and type from the specified (BYREF)
893 * variable type. The src is assumed to be unsafe memory specified by the DIF
894 * program. The dst is assumed to be DTrace variable memory that is of the
895 * specified type; we assume that we can store to directly.
896 */
897 static void
dtrace_vcopy(void * src,void * dst,dtrace_diftype_t * type)898 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type)
899 {
900 ASSERT(type->dtdt_flags & DIF_TF_BYREF);
901
902 if (type->dtdt_kind == DIF_TYPE_STRING) {
903 dtrace_strcpy(src, dst, type->dtdt_size);
904 } else {
905 dtrace_bcopy(src, dst, type->dtdt_size);
906 }
907 }
908
909 /*
910 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be
911 * unsafe memory specified by the DIF program. The s2 data is assumed to be
912 * safe memory that we can access directly because it is managed by DTrace.
913 */
914 static int
dtrace_bcmp(const void * s1,const void * s2,size_t len)915 dtrace_bcmp(const void *s1, const void *s2, size_t len)
916 {
917 volatile uint16_t *flags;
918
919 flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
920
921 if (s1 == s2)
922 return (0);
923
924 if (s1 == NULL || s2 == NULL)
925 return (1);
926
927 if (s1 != s2 && len != 0) {
928 const uint8_t *ps1 = s1;
929 const uint8_t *ps2 = s2;
930
931 do {
932 if (dtrace_load8((uintptr_t)ps1++) != *ps2++)
933 return (1);
934 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT));
935 }
936 return (0);
937 }
938
939 /*
940 * Zero the specified region using a simple byte-by-byte loop. Note that this
941 * is for safe DTrace-managed memory only.
942 */
943 static void
dtrace_bzero(void * dst,size_t len)944 dtrace_bzero(void *dst, size_t len)
945 {
946 uchar_t *cp;
947
948 for (cp = dst; len != 0; len--)
949 *cp++ = 0;
950 }
951
952 static void
dtrace_add_128(uint64_t * addend1,uint64_t * addend2,uint64_t * sum)953 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum)
954 {
955 uint64_t result[2];
956
957 result[0] = addend1[0] + addend2[0];
958 result[1] = addend1[1] + addend2[1] +
959 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0);
960
961 sum[0] = result[0];
962 sum[1] = result[1];
963 }
964
965 /*
966 * Shift the 128-bit value in a by b. If b is positive, shift left.
967 * If b is negative, shift right.
968 */
969 static void
dtrace_shift_128(uint64_t * a,int b)970 dtrace_shift_128(uint64_t *a, int b)
971 {
972 uint64_t mask;
973
974 if (b == 0)
975 return;
976
977 if (b < 0) {
978 b = -b;
979 if (b >= 64) {
980 a[0] = a[1] >> (b - 64);
981 a[1] = 0;
982 } else {
983 a[0] >>= b;
984 mask = 1LL << (64 - b);
985 mask -= 1;
986 a[0] |= ((a[1] & mask) << (64 - b));
987 a[1] >>= b;
988 }
989 } else {
990 if (b >= 64) {
991 a[1] = a[0] << (b - 64);
992 a[0] = 0;
993 } else {
994 a[1] <<= b;
995 mask = a[0] >> (64 - b);
996 a[1] |= mask;
997 a[0] <<= b;
998 }
999 }
1000 }
1001
1002 /*
1003 * The basic idea is to break the 2 64-bit values into 4 32-bit values,
1004 * use native multiplication on those, and then re-combine into the
1005 * resulting 128-bit value.
1006 *
1007 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) =
1008 * hi1 * hi2 << 64 +
1009 * hi1 * lo2 << 32 +
1010 * hi2 * lo1 << 32 +
1011 * lo1 * lo2
1012 */
1013 static void
dtrace_multiply_128(uint64_t factor1,uint64_t factor2,uint64_t * product)1014 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product)
1015 {
1016 uint64_t hi1, hi2, lo1, lo2;
1017 uint64_t tmp[2];
1018
1019 hi1 = factor1 >> 32;
1020 hi2 = factor2 >> 32;
1021
1022 lo1 = factor1 & DT_MASK_LO;
1023 lo2 = factor2 & DT_MASK_LO;
1024
1025 product[0] = lo1 * lo2;
1026 product[1] = hi1 * hi2;
1027
1028 tmp[0] = hi1 * lo2;
1029 tmp[1] = 0;
1030 dtrace_shift_128(tmp, 32);
1031 dtrace_add_128(product, tmp, product);
1032
1033 tmp[0] = hi2 * lo1;
1034 tmp[1] = 0;
1035 dtrace_shift_128(tmp, 32);
1036 dtrace_add_128(product, tmp, product);
1037 }
1038
1039 /*
1040 * This privilege check should be used by actions and subroutines to
1041 * verify that the user credentials of the process that enabled the
1042 * invoking ECB match the target credentials
1043 */
1044 static int
dtrace_priv_proc_common_user(dtrace_state_t * state)1045 dtrace_priv_proc_common_user(dtrace_state_t *state)
1046 {
1047 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1048
1049 /*
1050 * We should always have a non-NULL state cred here, since if cred
1051 * is null (anonymous tracing), we fast-path bypass this routine.
1052 */
1053 ASSERT(s_cr != NULL);
1054
1055 if ((cr = CRED()) != NULL &&
1056 s_cr->cr_uid == cr->cr_uid &&
1057 s_cr->cr_uid == cr->cr_ruid &&
1058 s_cr->cr_uid == cr->cr_suid &&
1059 s_cr->cr_gid == cr->cr_gid &&
1060 s_cr->cr_gid == cr->cr_rgid &&
1061 s_cr->cr_gid == cr->cr_sgid)
1062 return (1);
1063
1064 return (0);
1065 }
1066
1067 /*
1068 * This privilege check should be used by actions and subroutines to
1069 * verify that the zone of the process that enabled the invoking ECB
1070 * matches the target credentials
1071 */
1072 static int
dtrace_priv_proc_common_zone(dtrace_state_t * state)1073 dtrace_priv_proc_common_zone(dtrace_state_t *state)
1074 {
1075 cred_t *cr, *s_cr = state->dts_cred.dcr_cred;
1076
1077 /*
1078 * We should always have a non-NULL state cred here, since if cred
1079 * is null (anonymous tracing), we fast-path bypass this routine.
1080 */
1081 ASSERT(s_cr != NULL);
1082
1083 if ((cr = CRED()) != NULL &&
1084 s_cr->cr_zone == cr->cr_zone)
1085 return (1);
1086
1087 return (0);
1088 }
1089
1090 /*
1091 * This privilege check should be used by actions and subroutines to
1092 * verify that the process has not setuid or changed credentials.
1093 */
1094 static int
dtrace_priv_proc_common_nocd()1095 dtrace_priv_proc_common_nocd()
1096 {
1097 proc_t *proc;
1098
1099 if ((proc = ttoproc(curthread)) != NULL &&
1100 !(proc->p_flag & SNOCD))
1101 return (1);
1102
1103 return (0);
1104 }
1105
1106 static int
dtrace_priv_proc_destructive(dtrace_state_t * state)1107 dtrace_priv_proc_destructive(dtrace_state_t *state)
1108 {
1109 int action = state->dts_cred.dcr_action;
1110
1111 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) &&
1112 dtrace_priv_proc_common_zone(state) == 0)
1113 goto bad;
1114
1115 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) &&
1116 dtrace_priv_proc_common_user(state) == 0)
1117 goto bad;
1118
1119 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) &&
1120 dtrace_priv_proc_common_nocd() == 0)
1121 goto bad;
1122
1123 return (1);
1124
1125 bad:
1126 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1127
1128 return (0);
1129 }
1130
1131 static int
dtrace_priv_proc_control(dtrace_state_t * state)1132 dtrace_priv_proc_control(dtrace_state_t *state)
1133 {
1134 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL)
1135 return (1);
1136
1137 if (dtrace_priv_proc_common_zone(state) &&
1138 dtrace_priv_proc_common_user(state) &&
1139 dtrace_priv_proc_common_nocd())
1140 return (1);
1141
1142 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1143
1144 return (0);
1145 }
1146
1147 static int
dtrace_priv_proc(dtrace_state_t * state)1148 dtrace_priv_proc(dtrace_state_t *state)
1149 {
1150 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC)
1151 return (1);
1152
1153 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV;
1154
1155 return (0);
1156 }
1157
1158 static int
dtrace_priv_kernel(dtrace_state_t * state)1159 dtrace_priv_kernel(dtrace_state_t *state)
1160 {
1161 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL)
1162 return (1);
1163
1164 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1165
1166 return (0);
1167 }
1168
1169 static int
dtrace_priv_kernel_destructive(dtrace_state_t * state)1170 dtrace_priv_kernel_destructive(dtrace_state_t *state)
1171 {
1172 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE)
1173 return (1);
1174
1175 cpu_core[CPU->cpu_id].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV;
1176
1177 return (0);
1178 }
1179
1180 /*
1181 * Note: not called from probe context. This function is called
1182 * asynchronously (and at a regular interval) from outside of probe context to
1183 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable
1184 * cleaning is explained in detail in <sys/dtrace_impl.h>.
1185 */
1186 void
dtrace_dynvar_clean(dtrace_dstate_t * dstate)1187 dtrace_dynvar_clean(dtrace_dstate_t *dstate)
1188 {
1189 dtrace_dynvar_t *dirty;
1190 dtrace_dstate_percpu_t *dcpu;
1191 dtrace_dynvar_t **rinsep;
1192 int i, j, work = 0;
1193
1194 for (i = 0; i < NCPU; i++) {
1195 dcpu = &dstate->dtds_percpu[i];
1196 rinsep = &dcpu->dtdsc_rinsing;
1197
1198 /*
1199 * If the dirty list is NULL, there is no dirty work to do.
1200 */
1201 if (dcpu->dtdsc_dirty == NULL)
1202 continue;
1203
1204 if (dcpu->dtdsc_rinsing != NULL) {
1205 /*
1206 * If the rinsing list is non-NULL, then it is because
1207 * this CPU was selected to accept another CPU's
1208 * dirty list -- and since that time, dirty buffers
1209 * have accumulated. This is a highly unlikely
1210 * condition, but we choose to ignore the dirty
1211 * buffers -- they'll be picked up a future cleanse.
1212 */
1213 continue;
1214 }
1215
1216 if (dcpu->dtdsc_clean != NULL) {
1217 /*
1218 * If the clean list is non-NULL, then we're in a
1219 * situation where a CPU has done deallocations (we
1220 * have a non-NULL dirty list) but no allocations (we
1221 * also have a non-NULL clean list). We can't simply
1222 * move the dirty list into the clean list on this
1223 * CPU, yet we also don't want to allow this condition
1224 * to persist, lest a short clean list prevent a
1225 * massive dirty list from being cleaned (which in
1226 * turn could lead to otherwise avoidable dynamic
1227 * drops). To deal with this, we look for some CPU
1228 * with a NULL clean list, NULL dirty list, and NULL
1229 * rinsing list -- and then we borrow this CPU to
1230 * rinse our dirty list.
1231 */
1232 for (j = 0; j < NCPU; j++) {
1233 dtrace_dstate_percpu_t *rinser;
1234
1235 rinser = &dstate->dtds_percpu[j];
1236
1237 if (rinser->dtdsc_rinsing != NULL)
1238 continue;
1239
1240 if (rinser->dtdsc_dirty != NULL)
1241 continue;
1242
1243 if (rinser->dtdsc_clean != NULL)
1244 continue;
1245
1246 rinsep = &rinser->dtdsc_rinsing;
1247 break;
1248 }
1249
1250 if (j == NCPU) {
1251 /*
1252 * We were unable to find another CPU that
1253 * could accept this dirty list -- we are
1254 * therefore unable to clean it now.
1255 */
1256 dtrace_dynvar_failclean++;
1257 continue;
1258 }
1259 }
1260
1261 work = 1;
1262
1263 /*
1264 * Atomically move the dirty list aside.
1265 */
1266 do {
1267 dirty = dcpu->dtdsc_dirty;
1268
1269 /*
1270 * Before we zap the dirty list, set the rinsing list.
1271 * (This allows for a potential assertion in
1272 * dtrace_dynvar(): if a free dynamic variable appears
1273 * on a hash chain, either the dirty list or the
1274 * rinsing list for some CPU must be non-NULL.)
1275 */
1276 *rinsep = dirty;
1277 dtrace_membar_producer();
1278 } while (dtrace_casptr(&dcpu->dtdsc_dirty,
1279 dirty, NULL) != dirty);
1280 }
1281
1282 if (!work) {
1283 /*
1284 * We have no work to do; we can simply return.
1285 */
1286 return;
1287 }
1288
1289 dtrace_sync();
1290
1291 for (i = 0; i < NCPU; i++) {
1292 dcpu = &dstate->dtds_percpu[i];
1293
1294 if (dcpu->dtdsc_rinsing == NULL)
1295 continue;
1296
1297 /*
1298 * We are now guaranteed that no hash chain contains a pointer
1299 * into this dirty list; we can make it clean.
1300 */
1301 ASSERT(dcpu->dtdsc_clean == NULL);
1302 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing;
1303 dcpu->dtdsc_rinsing = NULL;
1304 }
1305
1306 /*
1307 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make
1308 * sure that all CPUs have seen all of the dtdsc_clean pointers.
1309 * This prevents a race whereby a CPU incorrectly decides that
1310 * the state should be something other than DTRACE_DSTATE_CLEAN
1311 * after dtrace_dynvar_clean() has completed.
1312 */
1313 dtrace_sync();
1314
1315 dstate->dtds_state = DTRACE_DSTATE_CLEAN;
1316 }
1317
1318 /*
1319 * Depending on the value of the op parameter, this function looks-up,
1320 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an
1321 * allocation is requested, this function will return a pointer to a
1322 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no
1323 * variable can be allocated. If NULL is returned, the appropriate counter
1324 * will be incremented.
1325 */
1326 dtrace_dynvar_t *
dtrace_dynvar(dtrace_dstate_t * dstate,uint_t nkeys,dtrace_key_t * key,size_t dsize,dtrace_dynvar_op_t op,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate)1327 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys,
1328 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op,
1329 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate)
1330 {
1331 uint64_t hashval = DTRACE_DYNHASH_VALID;
1332 dtrace_dynhash_t *hash = dstate->dtds_hash;
1333 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL;
1334 processorid_t me = CPU->cpu_id, cpu = me;
1335 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me];
1336 size_t bucket, ksize;
1337 size_t chunksize = dstate->dtds_chunksize;
1338 uintptr_t kdata, lock, nstate;
1339 uint_t i;
1340
1341 ASSERT(nkeys != 0);
1342
1343 /*
1344 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time"
1345 * algorithm. For the by-value portions, we perform the algorithm in
1346 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a
1347 * bit, and seems to have only a minute effect on distribution. For
1348 * the by-reference data, we perform "One-at-a-time" iterating (safely)
1349 * over each referenced byte. It's painful to do this, but it's much
1350 * better than pathological hash distribution. The efficacy of the
1351 * hashing algorithm (and a comparison with other algorithms) may be
1352 * found by running the ::dtrace_dynstat MDB dcmd.
1353 */
1354 for (i = 0; i < nkeys; i++) {
1355 if (key[i].dttk_size == 0) {
1356 uint64_t val = key[i].dttk_value;
1357
1358 hashval += (val >> 48) & 0xffff;
1359 hashval += (hashval << 10);
1360 hashval ^= (hashval >> 6);
1361
1362 hashval += (val >> 32) & 0xffff;
1363 hashval += (hashval << 10);
1364 hashval ^= (hashval >> 6);
1365
1366 hashval += (val >> 16) & 0xffff;
1367 hashval += (hashval << 10);
1368 hashval ^= (hashval >> 6);
1369
1370 hashval += val & 0xffff;
1371 hashval += (hashval << 10);
1372 hashval ^= (hashval >> 6);
1373 } else {
1374 /*
1375 * This is incredibly painful, but it beats the hell
1376 * out of the alternative.
1377 */
1378 uint64_t j, size = key[i].dttk_size;
1379 uintptr_t base = (uintptr_t)key[i].dttk_value;
1380
1381 if (!dtrace_canload(base, size, mstate, vstate))
1382 break;
1383
1384 for (j = 0; j < size; j++) {
1385 hashval += dtrace_load8(base + j);
1386 hashval += (hashval << 10);
1387 hashval ^= (hashval >> 6);
1388 }
1389 }
1390 }
1391
1392 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT))
1393 return (NULL);
1394
1395 hashval += (hashval << 3);
1396 hashval ^= (hashval >> 11);
1397 hashval += (hashval << 15);
1398
1399 /*
1400 * There is a remote chance (ideally, 1 in 2^31) that our hashval
1401 * comes out to be one of our two sentinel hash values. If this
1402 * actually happens, we set the hashval to be a value known to be a
1403 * non-sentinel value.
1404 */
1405 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK)
1406 hashval = DTRACE_DYNHASH_VALID;
1407
1408 /*
1409 * Yes, it's painful to do a divide here. If the cycle count becomes
1410 * important here, tricks can be pulled to reduce it. (However, it's
1411 * critical that hash collisions be kept to an absolute minimum;
1412 * they're much more painful than a divide.) It's better to have a
1413 * solution that generates few collisions and still keeps things
1414 * relatively simple.
1415 */
1416 bucket = hashval % dstate->dtds_hashsize;
1417
1418 if (op == DTRACE_DYNVAR_DEALLOC) {
1419 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock;
1420
1421 for (;;) {
1422 while ((lock = *lockp) & 1)
1423 continue;
1424
1425 if (dtrace_casptr((void *)lockp,
1426 (void *)lock, (void *)(lock + 1)) == (void *)lock)
1427 break;
1428 }
1429
1430 dtrace_membar_producer();
1431 }
1432
1433 top:
1434 prev = NULL;
1435 lock = hash[bucket].dtdh_lock;
1436
1437 dtrace_membar_consumer();
1438
1439 start = hash[bucket].dtdh_chain;
1440 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK ||
1441 start->dtdv_hashval != DTRACE_DYNHASH_FREE ||
1442 op != DTRACE_DYNVAR_DEALLOC));
1443
1444 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) {
1445 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple;
1446 dtrace_key_t *dkey = &dtuple->dtt_key[0];
1447
1448 if (dvar->dtdv_hashval != hashval) {
1449 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) {
1450 /*
1451 * We've reached the sink, and therefore the
1452 * end of the hash chain; we can kick out of
1453 * the loop knowing that we have seen a valid
1454 * snapshot of state.
1455 */
1456 ASSERT(dvar->dtdv_next == NULL);
1457 ASSERT(dvar == &dtrace_dynhash_sink);
1458 break;
1459 }
1460
1461 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) {
1462 /*
1463 * We've gone off the rails: somewhere along
1464 * the line, one of the members of this hash
1465 * chain was deleted. Note that we could also
1466 * detect this by simply letting this loop run
1467 * to completion, as we would eventually hit
1468 * the end of the dirty list. However, we
1469 * want to avoid running the length of the
1470 * dirty list unnecessarily (it might be quite
1471 * long), so we catch this as early as
1472 * possible by detecting the hash marker. In
1473 * this case, we simply set dvar to NULL and
1474 * break; the conditional after the loop will
1475 * send us back to top.
1476 */
1477 dvar = NULL;
1478 break;
1479 }
1480
1481 goto next;
1482 }
1483
1484 if (dtuple->dtt_nkeys != nkeys)
1485 goto next;
1486
1487 for (i = 0; i < nkeys; i++, dkey++) {
1488 if (dkey->dttk_size != key[i].dttk_size)
1489 goto next; /* size or type mismatch */
1490
1491 if (dkey->dttk_size != 0) {
1492 if (dtrace_bcmp(
1493 (void *)(uintptr_t)key[i].dttk_value,
1494 (void *)(uintptr_t)dkey->dttk_value,
1495 dkey->dttk_size))
1496 goto next;
1497 } else {
1498 if (dkey->dttk_value != key[i].dttk_value)
1499 goto next;
1500 }
1501 }
1502
1503 if (op != DTRACE_DYNVAR_DEALLOC)
1504 return (dvar);
1505
1506 ASSERT(dvar->dtdv_next == NULL ||
1507 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE);
1508
1509 if (prev != NULL) {
1510 ASSERT(hash[bucket].dtdh_chain != dvar);
1511 ASSERT(start != dvar);
1512 ASSERT(prev->dtdv_next == dvar);
1513 prev->dtdv_next = dvar->dtdv_next;
1514 } else {
1515 if (dtrace_casptr(&hash[bucket].dtdh_chain,
1516 start, dvar->dtdv_next) != start) {
1517 /*
1518 * We have failed to atomically swing the
1519 * hash table head pointer, presumably because
1520 * of a conflicting allocation on another CPU.
1521 * We need to reread the hash chain and try
1522 * again.
1523 */
1524 goto top;
1525 }
1526 }
1527
1528 dtrace_membar_producer();
1529
1530 /*
1531 * Now set the hash value to indicate that it's free.
1532 */
1533 ASSERT(hash[bucket].dtdh_chain != dvar);
1534 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1535
1536 dtrace_membar_producer();
1537
1538 /*
1539 * Set the next pointer to point at the dirty list, and
1540 * atomically swing the dirty pointer to the newly freed dvar.
1541 */
1542 do {
1543 next = dcpu->dtdsc_dirty;
1544 dvar->dtdv_next = next;
1545 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next);
1546
1547 /*
1548 * Finally, unlock this hash bucket.
1549 */
1550 ASSERT(hash[bucket].dtdh_lock == lock);
1551 ASSERT(lock & 1);
1552 hash[bucket].dtdh_lock++;
1553
1554 return (NULL);
1555 next:
1556 prev = dvar;
1557 continue;
1558 }
1559
1560 if (dvar == NULL) {
1561 /*
1562 * If dvar is NULL, it is because we went off the rails:
1563 * one of the elements that we traversed in the hash chain
1564 * was deleted while we were traversing it. In this case,
1565 * we assert that we aren't doing a dealloc (deallocs lock
1566 * the hash bucket to prevent themselves from racing with
1567 * one another), and retry the hash chain traversal.
1568 */
1569 ASSERT(op != DTRACE_DYNVAR_DEALLOC);
1570 goto top;
1571 }
1572
1573 if (op != DTRACE_DYNVAR_ALLOC) {
1574 /*
1575 * If we are not to allocate a new variable, we want to
1576 * return NULL now. Before we return, check that the value
1577 * of the lock word hasn't changed. If it has, we may have
1578 * seen an inconsistent snapshot.
1579 */
1580 if (op == DTRACE_DYNVAR_NOALLOC) {
1581 if (hash[bucket].dtdh_lock != lock)
1582 goto top;
1583 } else {
1584 ASSERT(op == DTRACE_DYNVAR_DEALLOC);
1585 ASSERT(hash[bucket].dtdh_lock == lock);
1586 ASSERT(lock & 1);
1587 hash[bucket].dtdh_lock++;
1588 }
1589
1590 return (NULL);
1591 }
1592
1593 /*
1594 * We need to allocate a new dynamic variable. The size we need is the
1595 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the
1596 * size of any auxiliary key data (rounded up to 8-byte alignment) plus
1597 * the size of any referred-to data (dsize). We then round the final
1598 * size up to the chunksize for allocation.
1599 */
1600 for (ksize = 0, i = 0; i < nkeys; i++)
1601 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
1602
1603 /*
1604 * This should be pretty much impossible, but could happen if, say,
1605 * strange DIF specified the tuple. Ideally, this should be an
1606 * assertion and not an error condition -- but that requires that the
1607 * chunksize calculation in dtrace_difo_chunksize() be absolutely
1608 * bullet-proof. (That is, it must not be able to be fooled by
1609 * malicious DIF.) Given the lack of backwards branches in DIF,
1610 * solving this would presumably not amount to solving the Halting
1611 * Problem -- but it still seems awfully hard.
1612 */
1613 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) +
1614 ksize + dsize > chunksize) {
1615 dcpu->dtdsc_drops++;
1616 return (NULL);
1617 }
1618
1619 nstate = DTRACE_DSTATE_EMPTY;
1620
1621 do {
1622 retry:
1623 free = dcpu->dtdsc_free;
1624
1625 if (free == NULL) {
1626 dtrace_dynvar_t *clean = dcpu->dtdsc_clean;
1627 void *rval;
1628
1629 if (clean == NULL) {
1630 /*
1631 * We're out of dynamic variable space on
1632 * this CPU. Unless we have tried all CPUs,
1633 * we'll try to allocate from a different
1634 * CPU.
1635 */
1636 switch (dstate->dtds_state) {
1637 case DTRACE_DSTATE_CLEAN: {
1638 void *sp = &dstate->dtds_state;
1639
1640 if (++cpu >= NCPU)
1641 cpu = 0;
1642
1643 if (dcpu->dtdsc_dirty != NULL &&
1644 nstate == DTRACE_DSTATE_EMPTY)
1645 nstate = DTRACE_DSTATE_DIRTY;
1646
1647 if (dcpu->dtdsc_rinsing != NULL)
1648 nstate = DTRACE_DSTATE_RINSING;
1649
1650 dcpu = &dstate->dtds_percpu[cpu];
1651
1652 if (cpu != me)
1653 goto retry;
1654
1655 (void) dtrace_cas32(sp,
1656 DTRACE_DSTATE_CLEAN, nstate);
1657
1658 /*
1659 * To increment the correct bean
1660 * counter, take another lap.
1661 */
1662 goto retry;
1663 }
1664
1665 case DTRACE_DSTATE_DIRTY:
1666 dcpu->dtdsc_dirty_drops++;
1667 break;
1668
1669 case DTRACE_DSTATE_RINSING:
1670 dcpu->dtdsc_rinsing_drops++;
1671 break;
1672
1673 case DTRACE_DSTATE_EMPTY:
1674 dcpu->dtdsc_drops++;
1675 break;
1676 }
1677
1678 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP);
1679 return (NULL);
1680 }
1681
1682 /*
1683 * The clean list appears to be non-empty. We want to
1684 * move the clean list to the free list; we start by
1685 * moving the clean pointer aside.
1686 */
1687 if (dtrace_casptr(&dcpu->dtdsc_clean,
1688 clean, NULL) != clean) {
1689 /*
1690 * We are in one of two situations:
1691 *
1692 * (a) The clean list was switched to the
1693 * free list by another CPU.
1694 *
1695 * (b) The clean list was added to by the
1696 * cleansing cyclic.
1697 *
1698 * In either of these situations, we can
1699 * just reattempt the free list allocation.
1700 */
1701 goto retry;
1702 }
1703
1704 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE);
1705
1706 /*
1707 * Now we'll move the clean list to our free list.
1708 * It's impossible for this to fail: the only way
1709 * the free list can be updated is through this
1710 * code path, and only one CPU can own the clean list.
1711 * Thus, it would only be possible for this to fail if
1712 * this code were racing with dtrace_dynvar_clean().
1713 * (That is, if dtrace_dynvar_clean() updated the clean
1714 * list, and we ended up racing to update the free
1715 * list.) This race is prevented by the dtrace_sync()
1716 * in dtrace_dynvar_clean() -- which flushes the
1717 * owners of the clean lists out before resetting
1718 * the clean lists.
1719 */
1720 dcpu = &dstate->dtds_percpu[me];
1721 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean);
1722 ASSERT(rval == NULL);
1723 goto retry;
1724 }
1725
1726 dvar = free;
1727 new_free = dvar->dtdv_next;
1728 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free);
1729
1730 /*
1731 * We have now allocated a new chunk. We copy the tuple keys into the
1732 * tuple array and copy any referenced key data into the data space
1733 * following the tuple array. As we do this, we relocate dttk_value
1734 * in the final tuple to point to the key data address in the chunk.
1735 */
1736 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys];
1737 dvar->dtdv_data = (void *)(kdata + ksize);
1738 dvar->dtdv_tuple.dtt_nkeys = nkeys;
1739
1740 for (i = 0; i < nkeys; i++) {
1741 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i];
1742 size_t kesize = key[i].dttk_size;
1743
1744 if (kesize != 0) {
1745 dtrace_bcopy(
1746 (const void *)(uintptr_t)key[i].dttk_value,
1747 (void *)kdata, kesize);
1748 dkey->dttk_value = kdata;
1749 kdata += P2ROUNDUP(kesize, sizeof (uint64_t));
1750 } else {
1751 dkey->dttk_value = key[i].dttk_value;
1752 }
1753
1754 dkey->dttk_size = kesize;
1755 }
1756
1757 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE);
1758 dvar->dtdv_hashval = hashval;
1759 dvar->dtdv_next = start;
1760
1761 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start)
1762 return (dvar);
1763
1764 /*
1765 * The cas has failed. Either another CPU is adding an element to
1766 * this hash chain, or another CPU is deleting an element from this
1767 * hash chain. The simplest way to deal with both of these cases
1768 * (though not necessarily the most efficient) is to free our
1769 * allocated block and tail-call ourselves. Note that the free is
1770 * to the dirty list and _not_ to the free list. This is to prevent
1771 * races with allocators, above.
1772 */
1773 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE;
1774
1775 dtrace_membar_producer();
1776
1777 do {
1778 free = dcpu->dtdsc_dirty;
1779 dvar->dtdv_next = free;
1780 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free);
1781
1782 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate));
1783 }
1784
1785 /*ARGSUSED*/
1786 static void
dtrace_aggregate_min(uint64_t * oval,uint64_t nval,uint64_t arg)1787 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg)
1788 {
1789 if ((int64_t)nval < (int64_t)*oval)
1790 *oval = nval;
1791 }
1792
1793 /*ARGSUSED*/
1794 static void
dtrace_aggregate_max(uint64_t * oval,uint64_t nval,uint64_t arg)1795 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg)
1796 {
1797 if ((int64_t)nval > (int64_t)*oval)
1798 *oval = nval;
1799 }
1800
1801 static void
dtrace_aggregate_quantize(uint64_t * quanta,uint64_t nval,uint64_t incr)1802 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr)
1803 {
1804 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET;
1805 int64_t val = (int64_t)nval;
1806
1807 if (val < 0) {
1808 for (i = 0; i < zero; i++) {
1809 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) {
1810 quanta[i] += incr;
1811 return;
1812 }
1813 }
1814 } else {
1815 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) {
1816 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) {
1817 quanta[i - 1] += incr;
1818 return;
1819 }
1820 }
1821
1822 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr;
1823 return;
1824 }
1825
1826 ASSERT(0);
1827 }
1828
1829 static void
dtrace_aggregate_lquantize(uint64_t * lquanta,uint64_t nval,uint64_t incr)1830 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr)
1831 {
1832 uint64_t arg = *lquanta++;
1833 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
1834 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
1835 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
1836 int32_t val = (int32_t)nval, level;
1837
1838 ASSERT(step != 0);
1839 ASSERT(levels != 0);
1840
1841 if (val < base) {
1842 /*
1843 * This is an underflow.
1844 */
1845 lquanta[0] += incr;
1846 return;
1847 }
1848
1849 level = (val - base) / step;
1850
1851 if (level < levels) {
1852 lquanta[level + 1] += incr;
1853 return;
1854 }
1855
1856 /*
1857 * This is an overflow.
1858 */
1859 lquanta[levels + 1] += incr;
1860 }
1861
1862 /*ARGSUSED*/
1863 static void
dtrace_aggregate_avg(uint64_t * data,uint64_t nval,uint64_t arg)1864 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg)
1865 {
1866 data[0]++;
1867 data[1] += nval;
1868 }
1869
1870 /*ARGSUSED*/
1871 static void
dtrace_aggregate_stddev(uint64_t * data,uint64_t nval,uint64_t arg)1872 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg)
1873 {
1874 int64_t snval = (int64_t)nval;
1875 uint64_t tmp[2];
1876
1877 data[0]++;
1878 data[1] += nval;
1879
1880 /*
1881 * What we want to say here is:
1882 *
1883 * data[2] += nval * nval;
1884 *
1885 * But given that nval is 64-bit, we could easily overflow, so
1886 * we do this as 128-bit arithmetic.
1887 */
1888 if (snval < 0)
1889 snval = -snval;
1890
1891 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp);
1892 dtrace_add_128(data + 2, tmp, data + 2);
1893 }
1894
1895 /*ARGSUSED*/
1896 static void
dtrace_aggregate_count(uint64_t * oval,uint64_t nval,uint64_t arg)1897 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg)
1898 {
1899 *oval = *oval + 1;
1900 }
1901
1902 /*ARGSUSED*/
1903 static void
dtrace_aggregate_sum(uint64_t * oval,uint64_t nval,uint64_t arg)1904 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg)
1905 {
1906 *oval += nval;
1907 }
1908
1909 /*
1910 * Aggregate given the tuple in the principal data buffer, and the aggregating
1911 * action denoted by the specified dtrace_aggregation_t. The aggregation
1912 * buffer is specified as the buf parameter. This routine does not return
1913 * failure; if there is no space in the aggregation buffer, the data will be
1914 * dropped, and a corresponding counter incremented.
1915 */
1916 static void
dtrace_aggregate(dtrace_aggregation_t * agg,dtrace_buffer_t * dbuf,intptr_t offset,dtrace_buffer_t * buf,uint64_t expr,uint64_t arg)1917 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf,
1918 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg)
1919 {
1920 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec;
1921 uint32_t i, ndx, size, fsize;
1922 uint32_t align = sizeof (uint64_t) - 1;
1923 dtrace_aggbuffer_t *agb;
1924 dtrace_aggkey_t *key;
1925 uint32_t hashval = 0, limit, isstr;
1926 caddr_t tomax, data, kdata;
1927 dtrace_actkind_t action;
1928 dtrace_action_t *act;
1929 uintptr_t offs;
1930
1931 if (buf == NULL)
1932 return;
1933
1934 if (!agg->dtag_hasarg) {
1935 /*
1936 * Currently, only quantize() and lquantize() take additional
1937 * arguments, and they have the same semantics: an increment
1938 * value that defaults to 1 when not present. If additional
1939 * aggregating actions take arguments, the setting of the
1940 * default argument value will presumably have to become more
1941 * sophisticated...
1942 */
1943 arg = 1;
1944 }
1945
1946 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION;
1947 size = rec->dtrd_offset - agg->dtag_base;
1948 fsize = size + rec->dtrd_size;
1949
1950 ASSERT(dbuf->dtb_tomax != NULL);
1951 data = dbuf->dtb_tomax + offset + agg->dtag_base;
1952
1953 if ((tomax = buf->dtb_tomax) == NULL) {
1954 dtrace_buffer_drop(buf);
1955 return;
1956 }
1957
1958 /*
1959 * The metastructure is always at the bottom of the buffer.
1960 */
1961 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size -
1962 sizeof (dtrace_aggbuffer_t));
1963
1964 if (buf->dtb_offset == 0) {
1965 /*
1966 * We just kludge up approximately 1/8th of the size to be
1967 * buckets. If this guess ends up being routinely
1968 * off-the-mark, we may need to dynamically readjust this
1969 * based on past performance.
1970 */
1971 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t);
1972
1973 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) <
1974 (uintptr_t)tomax || hashsize == 0) {
1975 /*
1976 * We've been given a ludicrously small buffer;
1977 * increment our drop count and leave.
1978 */
1979 dtrace_buffer_drop(buf);
1980 return;
1981 }
1982
1983 /*
1984 * And now, a pathetic attempt to try to get a an odd (or
1985 * perchance, a prime) hash size for better hash distribution.
1986 */
1987 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3))
1988 hashsize -= DTRACE_AGGHASHSIZE_SLEW;
1989
1990 agb->dtagb_hashsize = hashsize;
1991 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb -
1992 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *));
1993 agb->dtagb_free = (uintptr_t)agb->dtagb_hash;
1994
1995 for (i = 0; i < agb->dtagb_hashsize; i++)
1996 agb->dtagb_hash[i] = NULL;
1997 }
1998
1999 ASSERT(agg->dtag_first != NULL);
2000 ASSERT(agg->dtag_first->dta_intuple);
2001
2002 /*
2003 * Calculate the hash value based on the key. Note that we _don't_
2004 * include the aggid in the hashing (but we will store it as part of
2005 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time"
2006 * algorithm: a simple, quick algorithm that has no known funnels, and
2007 * gets good distribution in practice. The efficacy of the hashing
2008 * algorithm (and a comparison with other algorithms) may be found by
2009 * running the ::dtrace_aggstat MDB dcmd.
2010 */
2011 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2012 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2013 limit = i + act->dta_rec.dtrd_size;
2014 ASSERT(limit <= size);
2015 isstr = DTRACEACT_ISSTRING(act);
2016
2017 for (; i < limit; i++) {
2018 hashval += data[i];
2019 hashval += (hashval << 10);
2020 hashval ^= (hashval >> 6);
2021
2022 if (isstr && data[i] == '\0')
2023 break;
2024 }
2025 }
2026
2027 hashval += (hashval << 3);
2028 hashval ^= (hashval >> 11);
2029 hashval += (hashval << 15);
2030
2031 /*
2032 * Yes, the divide here is expensive -- but it's generally the least
2033 * of the performance issues given the amount of data that we iterate
2034 * over to compute hash values, compare data, etc.
2035 */
2036 ndx = hashval % agb->dtagb_hashsize;
2037
2038 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) {
2039 ASSERT((caddr_t)key >= tomax);
2040 ASSERT((caddr_t)key < tomax + buf->dtb_size);
2041
2042 if (hashval != key->dtak_hashval || key->dtak_size != size)
2043 continue;
2044
2045 kdata = key->dtak_data;
2046 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size);
2047
2048 for (act = agg->dtag_first; act->dta_intuple;
2049 act = act->dta_next) {
2050 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2051 limit = i + act->dta_rec.dtrd_size;
2052 ASSERT(limit <= size);
2053 isstr = DTRACEACT_ISSTRING(act);
2054
2055 for (; i < limit; i++) {
2056 if (kdata[i] != data[i])
2057 goto next;
2058
2059 if (isstr && data[i] == '\0')
2060 break;
2061 }
2062 }
2063
2064 if (action != key->dtak_action) {
2065 /*
2066 * We are aggregating on the same value in the same
2067 * aggregation with two different aggregating actions.
2068 * (This should have been picked up in the compiler,
2069 * so we may be dealing with errant or devious DIF.)
2070 * This is an error condition; we indicate as much,
2071 * and return.
2072 */
2073 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2074 return;
2075 }
2076
2077 /*
2078 * This is a hit: we need to apply the aggregator to
2079 * the value at this key.
2080 */
2081 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg);
2082 return;
2083 next:
2084 continue;
2085 }
2086
2087 /*
2088 * We didn't find it. We need to allocate some zero-filled space,
2089 * link it into the hash table appropriately, and apply the aggregator
2090 * to the (zero-filled) value.
2091 */
2092 offs = buf->dtb_offset;
2093 while (offs & (align - 1))
2094 offs += sizeof (uint32_t);
2095
2096 /*
2097 * If we don't have enough room to both allocate a new key _and_
2098 * its associated data, increment the drop count and return.
2099 */
2100 if ((uintptr_t)tomax + offs + fsize >
2101 agb->dtagb_free - sizeof (dtrace_aggkey_t)) {
2102 dtrace_buffer_drop(buf);
2103 return;
2104 }
2105
2106 /*CONSTCOND*/
2107 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1)));
2108 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t));
2109 agb->dtagb_free -= sizeof (dtrace_aggkey_t);
2110
2111 key->dtak_data = kdata = tomax + offs;
2112 buf->dtb_offset = offs + fsize;
2113
2114 /*
2115 * Now copy the data across.
2116 */
2117 *((dtrace_aggid_t *)kdata) = agg->dtag_id;
2118
2119 for (i = sizeof (dtrace_aggid_t); i < size; i++)
2120 kdata[i] = data[i];
2121
2122 /*
2123 * Because strings are not zeroed out by default, we need to iterate
2124 * looking for actions that store strings, and we need to explicitly
2125 * pad these strings out with zeroes.
2126 */
2127 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) {
2128 int nul;
2129
2130 if (!DTRACEACT_ISSTRING(act))
2131 continue;
2132
2133 i = act->dta_rec.dtrd_offset - agg->dtag_base;
2134 limit = i + act->dta_rec.dtrd_size;
2135 ASSERT(limit <= size);
2136
2137 for (nul = 0; i < limit; i++) {
2138 if (nul) {
2139 kdata[i] = '\0';
2140 continue;
2141 }
2142
2143 if (data[i] != '\0')
2144 continue;
2145
2146 nul = 1;
2147 }
2148 }
2149
2150 for (i = size; i < fsize; i++)
2151 kdata[i] = 0;
2152
2153 key->dtak_hashval = hashval;
2154 key->dtak_size = size;
2155 key->dtak_action = action;
2156 key->dtak_next = agb->dtagb_hash[ndx];
2157 agb->dtagb_hash[ndx] = key;
2158
2159 /*
2160 * Finally, apply the aggregator.
2161 */
2162 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial;
2163 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg);
2164 }
2165
2166 /*
2167 * Given consumer state, this routine finds a speculation in the INACTIVE
2168 * state and transitions it into the ACTIVE state. If there is no speculation
2169 * in the INACTIVE state, 0 is returned. In this case, no error counter is
2170 * incremented -- it is up to the caller to take appropriate action.
2171 */
2172 static int
dtrace_speculation(dtrace_state_t * state)2173 dtrace_speculation(dtrace_state_t *state)
2174 {
2175 int i = 0;
2176 dtrace_speculation_state_t current;
2177 uint32_t *stat = &state->dts_speculations_unavail, count;
2178
2179 while (i < state->dts_nspeculations) {
2180 dtrace_speculation_t *spec = &state->dts_speculations[i];
2181
2182 current = spec->dtsp_state;
2183
2184 if (current != DTRACESPEC_INACTIVE) {
2185 if (current == DTRACESPEC_COMMITTINGMANY ||
2186 current == DTRACESPEC_COMMITTING ||
2187 current == DTRACESPEC_DISCARDING)
2188 stat = &state->dts_speculations_busy;
2189 i++;
2190 continue;
2191 }
2192
2193 if (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2194 current, DTRACESPEC_ACTIVE) == current)
2195 return (i + 1);
2196 }
2197
2198 /*
2199 * We couldn't find a speculation. If we found as much as a single
2200 * busy speculation buffer, we'll attribute this failure as "busy"
2201 * instead of "unavail".
2202 */
2203 do {
2204 count = *stat;
2205 } while (dtrace_cas32(stat, count, count + 1) != count);
2206
2207 return (0);
2208 }
2209
2210 /*
2211 * This routine commits an active speculation. If the specified speculation
2212 * is not in a valid state to perform a commit(), this routine will silently do
2213 * nothing. The state of the specified speculation is transitioned according
2214 * to the state transition diagram outlined in <sys/dtrace_impl.h>
2215 */
2216 static void
dtrace_speculation_commit(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)2217 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu,
2218 dtrace_specid_t which)
2219 {
2220 dtrace_speculation_t *spec;
2221 dtrace_buffer_t *src, *dest;
2222 uintptr_t daddr, saddr, dlimit;
2223 dtrace_speculation_state_t current, new;
2224 intptr_t offs;
2225
2226 if (which == 0)
2227 return;
2228
2229 if (which > state->dts_nspeculations) {
2230 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2231 return;
2232 }
2233
2234 spec = &state->dts_speculations[which - 1];
2235 src = &spec->dtsp_buffer[cpu];
2236 dest = &state->dts_buffer[cpu];
2237
2238 do {
2239 current = spec->dtsp_state;
2240
2241 if (current == DTRACESPEC_COMMITTINGMANY)
2242 break;
2243
2244 switch (current) {
2245 case DTRACESPEC_INACTIVE:
2246 case DTRACESPEC_DISCARDING:
2247 return;
2248
2249 case DTRACESPEC_COMMITTING:
2250 /*
2251 * This is only possible if we are (a) commit()'ing
2252 * without having done a prior speculate() on this CPU
2253 * and (b) racing with another commit() on a different
2254 * CPU. There's nothing to do -- we just assert that
2255 * our offset is 0.
2256 */
2257 ASSERT(src->dtb_offset == 0);
2258 return;
2259
2260 case DTRACESPEC_ACTIVE:
2261 new = DTRACESPEC_COMMITTING;
2262 break;
2263
2264 case DTRACESPEC_ACTIVEONE:
2265 /*
2266 * This speculation is active on one CPU. If our
2267 * buffer offset is non-zero, we know that the one CPU
2268 * must be us. Otherwise, we are committing on a
2269 * different CPU from the speculate(), and we must
2270 * rely on being asynchronously cleaned.
2271 */
2272 if (src->dtb_offset != 0) {
2273 new = DTRACESPEC_COMMITTING;
2274 break;
2275 }
2276 /*FALLTHROUGH*/
2277
2278 case DTRACESPEC_ACTIVEMANY:
2279 new = DTRACESPEC_COMMITTINGMANY;
2280 break;
2281
2282 default:
2283 ASSERT(0);
2284 }
2285 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2286 current, new) != current);
2287
2288 /*
2289 * We have set the state to indicate that we are committing this
2290 * speculation. Now reserve the necessary space in the destination
2291 * buffer.
2292 */
2293 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset,
2294 sizeof (uint64_t), state, NULL)) < 0) {
2295 dtrace_buffer_drop(dest);
2296 goto out;
2297 }
2298
2299 /*
2300 * We have the space; copy the buffer across. (Note that this is a
2301 * highly subobtimal bcopy(); in the unlikely event that this becomes
2302 * a serious performance issue, a high-performance DTrace-specific
2303 * bcopy() should obviously be invented.)
2304 */
2305 daddr = (uintptr_t)dest->dtb_tomax + offs;
2306 dlimit = daddr + src->dtb_offset;
2307 saddr = (uintptr_t)src->dtb_tomax;
2308
2309 /*
2310 * First, the aligned portion.
2311 */
2312 while (dlimit - daddr >= sizeof (uint64_t)) {
2313 *((uint64_t *)daddr) = *((uint64_t *)saddr);
2314
2315 daddr += sizeof (uint64_t);
2316 saddr += sizeof (uint64_t);
2317 }
2318
2319 /*
2320 * Now any left-over bit...
2321 */
2322 while (dlimit - daddr)
2323 *((uint8_t *)daddr++) = *((uint8_t *)saddr++);
2324
2325 /*
2326 * Finally, commit the reserved space in the destination buffer.
2327 */
2328 dest->dtb_offset = offs + src->dtb_offset;
2329
2330 out:
2331 /*
2332 * If we're lucky enough to be the only active CPU on this speculation
2333 * buffer, we can just set the state back to DTRACESPEC_INACTIVE.
2334 */
2335 if (current == DTRACESPEC_ACTIVE ||
2336 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) {
2337 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state,
2338 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE);
2339
2340 ASSERT(rval == DTRACESPEC_COMMITTING);
2341 }
2342
2343 src->dtb_offset = 0;
2344 src->dtb_xamot_drops += src->dtb_drops;
2345 src->dtb_drops = 0;
2346 }
2347
2348 /*
2349 * This routine discards an active speculation. If the specified speculation
2350 * is not in a valid state to perform a discard(), this routine will silently
2351 * do nothing. The state of the specified speculation is transitioned
2352 * according to the state transition diagram outlined in <sys/dtrace_impl.h>
2353 */
2354 static void
dtrace_speculation_discard(dtrace_state_t * state,processorid_t cpu,dtrace_specid_t which)2355 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu,
2356 dtrace_specid_t which)
2357 {
2358 dtrace_speculation_t *spec;
2359 dtrace_speculation_state_t current, new;
2360 dtrace_buffer_t *buf;
2361
2362 if (which == 0)
2363 return;
2364
2365 if (which > state->dts_nspeculations) {
2366 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2367 return;
2368 }
2369
2370 spec = &state->dts_speculations[which - 1];
2371 buf = &spec->dtsp_buffer[cpu];
2372
2373 do {
2374 current = spec->dtsp_state;
2375
2376 switch (current) {
2377 case DTRACESPEC_INACTIVE:
2378 case DTRACESPEC_COMMITTINGMANY:
2379 case DTRACESPEC_COMMITTING:
2380 case DTRACESPEC_DISCARDING:
2381 return;
2382
2383 case DTRACESPEC_ACTIVE:
2384 case DTRACESPEC_ACTIVEMANY:
2385 new = DTRACESPEC_DISCARDING;
2386 break;
2387
2388 case DTRACESPEC_ACTIVEONE:
2389 if (buf->dtb_offset != 0) {
2390 new = DTRACESPEC_INACTIVE;
2391 } else {
2392 new = DTRACESPEC_DISCARDING;
2393 }
2394 break;
2395
2396 default:
2397 ASSERT(0);
2398 }
2399 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2400 current, new) != current);
2401
2402 buf->dtb_offset = 0;
2403 buf->dtb_drops = 0;
2404 }
2405
2406 /*
2407 * Note: not called from probe context. This function is called
2408 * asynchronously from cross call context to clean any speculations that are
2409 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be
2410 * transitioned back to the INACTIVE state until all CPUs have cleaned the
2411 * speculation.
2412 */
2413 static void
dtrace_speculation_clean_here(dtrace_state_t * state)2414 dtrace_speculation_clean_here(dtrace_state_t *state)
2415 {
2416 dtrace_icookie_t cookie;
2417 processorid_t cpu = CPU->cpu_id;
2418 dtrace_buffer_t *dest = &state->dts_buffer[cpu];
2419 dtrace_specid_t i;
2420
2421 cookie = dtrace_interrupt_disable();
2422
2423 if (dest->dtb_tomax == NULL) {
2424 dtrace_interrupt_enable(cookie);
2425 return;
2426 }
2427
2428 for (i = 0; i < state->dts_nspeculations; i++) {
2429 dtrace_speculation_t *spec = &state->dts_speculations[i];
2430 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu];
2431
2432 if (src->dtb_tomax == NULL)
2433 continue;
2434
2435 if (spec->dtsp_state == DTRACESPEC_DISCARDING) {
2436 src->dtb_offset = 0;
2437 continue;
2438 }
2439
2440 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2441 continue;
2442
2443 if (src->dtb_offset == 0)
2444 continue;
2445
2446 dtrace_speculation_commit(state, cpu, i + 1);
2447 }
2448
2449 dtrace_interrupt_enable(cookie);
2450 }
2451
2452 /*
2453 * Note: not called from probe context. This function is called
2454 * asynchronously (and at a regular interval) to clean any speculations that
2455 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there
2456 * is work to be done, it cross calls all CPUs to perform that work;
2457 * COMMITMANY and DISCARDING speculations may not be transitioned back to the
2458 * INACTIVE state until they have been cleaned by all CPUs.
2459 */
2460 static void
dtrace_speculation_clean(dtrace_state_t * state)2461 dtrace_speculation_clean(dtrace_state_t *state)
2462 {
2463 int work = 0, rv;
2464 dtrace_specid_t i;
2465
2466 for (i = 0; i < state->dts_nspeculations; i++) {
2467 dtrace_speculation_t *spec = &state->dts_speculations[i];
2468
2469 ASSERT(!spec->dtsp_cleaning);
2470
2471 if (spec->dtsp_state != DTRACESPEC_DISCARDING &&
2472 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY)
2473 continue;
2474
2475 work++;
2476 spec->dtsp_cleaning = 1;
2477 }
2478
2479 if (!work)
2480 return;
2481
2482 dtrace_xcall(DTRACE_CPUALL,
2483 (dtrace_xcall_t)dtrace_speculation_clean_here, state);
2484
2485 /*
2486 * We now know that all CPUs have committed or discarded their
2487 * speculation buffers, as appropriate. We can now set the state
2488 * to inactive.
2489 */
2490 for (i = 0; i < state->dts_nspeculations; i++) {
2491 dtrace_speculation_t *spec = &state->dts_speculations[i];
2492 dtrace_speculation_state_t current, new;
2493
2494 if (!spec->dtsp_cleaning)
2495 continue;
2496
2497 current = spec->dtsp_state;
2498 ASSERT(current == DTRACESPEC_DISCARDING ||
2499 current == DTRACESPEC_COMMITTINGMANY);
2500
2501 new = DTRACESPEC_INACTIVE;
2502
2503 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new);
2504 ASSERT(rv == current);
2505 spec->dtsp_cleaning = 0;
2506 }
2507 }
2508
2509 /*
2510 * Called as part of a speculate() to get the speculative buffer associated
2511 * with a given speculation. Returns NULL if the specified speculation is not
2512 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and
2513 * the active CPU is not the specified CPU -- the speculation will be
2514 * atomically transitioned into the ACTIVEMANY state.
2515 */
2516 static dtrace_buffer_t *
dtrace_speculation_buffer(dtrace_state_t * state,processorid_t cpuid,dtrace_specid_t which)2517 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid,
2518 dtrace_specid_t which)
2519 {
2520 dtrace_speculation_t *spec;
2521 dtrace_speculation_state_t current, new;
2522 dtrace_buffer_t *buf;
2523
2524 if (which == 0)
2525 return (NULL);
2526
2527 if (which > state->dts_nspeculations) {
2528 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP;
2529 return (NULL);
2530 }
2531
2532 spec = &state->dts_speculations[which - 1];
2533 buf = &spec->dtsp_buffer[cpuid];
2534
2535 do {
2536 current = spec->dtsp_state;
2537
2538 switch (current) {
2539 case DTRACESPEC_INACTIVE:
2540 case DTRACESPEC_COMMITTINGMANY:
2541 case DTRACESPEC_DISCARDING:
2542 return (NULL);
2543
2544 case DTRACESPEC_COMMITTING:
2545 ASSERT(buf->dtb_offset == 0);
2546 return (NULL);
2547
2548 case DTRACESPEC_ACTIVEONE:
2549 /*
2550 * This speculation is currently active on one CPU.
2551 * Check the offset in the buffer; if it's non-zero,
2552 * that CPU must be us (and we leave the state alone).
2553 * If it's zero, assume that we're starting on a new
2554 * CPU -- and change the state to indicate that the
2555 * speculation is active on more than one CPU.
2556 */
2557 if (buf->dtb_offset != 0)
2558 return (buf);
2559
2560 new = DTRACESPEC_ACTIVEMANY;
2561 break;
2562
2563 case DTRACESPEC_ACTIVEMANY:
2564 return (buf);
2565
2566 case DTRACESPEC_ACTIVE:
2567 new = DTRACESPEC_ACTIVEONE;
2568 break;
2569
2570 default:
2571 ASSERT(0);
2572 }
2573 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state,
2574 current, new) != current);
2575
2576 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY);
2577 return (buf);
2578 }
2579
2580 /*
2581 * Return a string. In the event that the user lacks the privilege to access
2582 * arbitrary kernel memory, we copy the string out to scratch memory so that we
2583 * don't fail access checking.
2584 *
2585 * dtrace_dif_variable() uses this routine as a helper for various
2586 * builtin values such as 'execname' and 'probefunc.'
2587 */
2588 uintptr_t
dtrace_dif_varstr(uintptr_t addr,dtrace_state_t * state,dtrace_mstate_t * mstate)2589 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state,
2590 dtrace_mstate_t *mstate)
2591 {
2592 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
2593 uintptr_t ret;
2594 size_t strsz;
2595
2596 /*
2597 * The easy case: this probe is allowed to read all of memory, so
2598 * we can just return this as a vanilla pointer.
2599 */
2600 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0)
2601 return (addr);
2602
2603 /*
2604 * This is the tougher case: we copy the string in question from
2605 * kernel memory into scratch memory and return it that way: this
2606 * ensures that we won't trip up when access checking tests the
2607 * BYREF return value.
2608 */
2609 strsz = dtrace_strlen((char *)addr, size) + 1;
2610
2611 if (mstate->dtms_scratch_ptr + strsz >
2612 mstate->dtms_scratch_base + mstate->dtms_scratch_size) {
2613 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
2614 return (NULL);
2615 }
2616
2617 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr,
2618 strsz);
2619 ret = mstate->dtms_scratch_ptr;
2620 mstate->dtms_scratch_ptr += strsz;
2621 return (ret);
2622 }
2623
2624 /*
2625 * This function implements the DIF emulator's variable lookups. The emulator
2626 * passes a reserved variable identifier and optional built-in array index.
2627 */
2628 static uint64_t
dtrace_dif_variable(dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t v,uint64_t ndx)2629 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v,
2630 uint64_t ndx)
2631 {
2632 /*
2633 * If we're accessing one of the uncached arguments, we'll turn this
2634 * into a reference in the args array.
2635 */
2636 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) {
2637 ndx = v - DIF_VAR_ARG0;
2638 v = DIF_VAR_ARGS;
2639 }
2640
2641 switch (v) {
2642 case DIF_VAR_ARGS:
2643 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS);
2644 if (ndx >= sizeof (mstate->dtms_arg) /
2645 sizeof (mstate->dtms_arg[0])) {
2646 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2647 dtrace_provider_t *pv;
2648 uint64_t val;
2649
2650 pv = mstate->dtms_probe->dtpr_provider;
2651 if (pv->dtpv_pops.dtps_getargval != NULL)
2652 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg,
2653 mstate->dtms_probe->dtpr_id,
2654 mstate->dtms_probe->dtpr_arg, ndx, aframes);
2655 else
2656 val = dtrace_getarg(ndx, aframes);
2657
2658 /*
2659 * This is regrettably required to keep the compiler
2660 * from tail-optimizing the call to dtrace_getarg().
2661 * The condition always evaluates to true, but the
2662 * compiler has no way of figuring that out a priori.
2663 * (None of this would be necessary if the compiler
2664 * could be relied upon to _always_ tail-optimize
2665 * the call to dtrace_getarg() -- but it can't.)
2666 */
2667 if (mstate->dtms_probe != NULL)
2668 return (val);
2669
2670 ASSERT(0);
2671 }
2672
2673 return (mstate->dtms_arg[ndx]);
2674
2675 case DIF_VAR_UREGS: {
2676 klwp_t *lwp;
2677
2678 if (!dtrace_priv_proc(state))
2679 return (0);
2680
2681 if ((lwp = curthread->t_lwp) == NULL) {
2682 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR);
2683 cpu_core[CPU->cpu_id].cpuc_dtrace_illval = NULL;
2684 return (0);
2685 }
2686
2687 return (dtrace_getreg(lwp->lwp_regs, ndx));
2688 }
2689
2690 case DIF_VAR_CURTHREAD:
2691 if (!dtrace_priv_kernel(state))
2692 return (0);
2693 return ((uint64_t)(uintptr_t)curthread);
2694
2695 case DIF_VAR_TIMESTAMP:
2696 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) {
2697 mstate->dtms_timestamp = dtrace_gethrtime();
2698 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP;
2699 }
2700 return (mstate->dtms_timestamp);
2701
2702 case DIF_VAR_VTIMESTAMP:
2703 ASSERT(dtrace_vtime_references != 0);
2704 return (curthread->t_dtrace_vtime);
2705
2706 case DIF_VAR_WALLTIMESTAMP:
2707 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) {
2708 mstate->dtms_walltimestamp = dtrace_gethrestime();
2709 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP;
2710 }
2711 return (mstate->dtms_walltimestamp);
2712
2713 case DIF_VAR_IPL:
2714 if (!dtrace_priv_kernel(state))
2715 return (0);
2716 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) {
2717 mstate->dtms_ipl = dtrace_getipl();
2718 mstate->dtms_present |= DTRACE_MSTATE_IPL;
2719 }
2720 return (mstate->dtms_ipl);
2721
2722 case DIF_VAR_EPID:
2723 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID);
2724 return (mstate->dtms_epid);
2725
2726 case DIF_VAR_ID:
2727 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2728 return (mstate->dtms_probe->dtpr_id);
2729
2730 case DIF_VAR_STACKDEPTH:
2731 if (!dtrace_priv_kernel(state))
2732 return (0);
2733 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) {
2734 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2735
2736 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes);
2737 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH;
2738 }
2739 return (mstate->dtms_stackdepth);
2740
2741 case DIF_VAR_USTACKDEPTH:
2742 if (!dtrace_priv_proc(state))
2743 return (0);
2744 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) {
2745 /*
2746 * See comment in DIF_VAR_PID.
2747 */
2748 if (DTRACE_ANCHORED(mstate->dtms_probe) &&
2749 CPU_ON_INTR(CPU)) {
2750 mstate->dtms_ustackdepth = 0;
2751 } else {
2752 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2753 mstate->dtms_ustackdepth =
2754 dtrace_getustackdepth();
2755 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2756 }
2757 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH;
2758 }
2759 return (mstate->dtms_ustackdepth);
2760
2761 case DIF_VAR_CALLER:
2762 if (!dtrace_priv_kernel(state))
2763 return (0);
2764 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) {
2765 int aframes = mstate->dtms_probe->dtpr_aframes + 2;
2766
2767 if (!DTRACE_ANCHORED(mstate->dtms_probe)) {
2768 /*
2769 * If this is an unanchored probe, we are
2770 * required to go through the slow path:
2771 * dtrace_caller() only guarantees correct
2772 * results for anchored probes.
2773 */
2774 pc_t caller[2];
2775
2776 dtrace_getpcstack(caller, 2, aframes,
2777 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]);
2778 mstate->dtms_caller = caller[1];
2779 } else if ((mstate->dtms_caller =
2780 dtrace_caller(aframes)) == -1) {
2781 /*
2782 * We have failed to do this the quick way;
2783 * we must resort to the slower approach of
2784 * calling dtrace_getpcstack().
2785 */
2786 pc_t caller;
2787
2788 dtrace_getpcstack(&caller, 1, aframes, NULL);
2789 mstate->dtms_caller = caller;
2790 }
2791
2792 mstate->dtms_present |= DTRACE_MSTATE_CALLER;
2793 }
2794 return (mstate->dtms_caller);
2795
2796 case DIF_VAR_UCALLER:
2797 if (!dtrace_priv_proc(state))
2798 return (0);
2799
2800 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) {
2801 uint64_t ustack[3];
2802
2803 /*
2804 * dtrace_getupcstack() fills in the first uint64_t
2805 * with the current PID. The second uint64_t will
2806 * be the program counter at user-level. The third
2807 * uint64_t will contain the caller, which is what
2808 * we're after.
2809 */
2810 ustack[2] = NULL;
2811 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
2812 dtrace_getupcstack(ustack, 3);
2813 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
2814 mstate->dtms_ucaller = ustack[2];
2815 mstate->dtms_present |= DTRACE_MSTATE_UCALLER;
2816 }
2817
2818 return (mstate->dtms_ucaller);
2819
2820 case DIF_VAR_PROBEPROV:
2821 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2822 return (dtrace_dif_varstr(
2823 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name,
2824 state, mstate));
2825
2826 case DIF_VAR_PROBEMOD:
2827 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2828 return (dtrace_dif_varstr(
2829 (uintptr_t)mstate->dtms_probe->dtpr_mod,
2830 state, mstate));
2831
2832 case DIF_VAR_PROBEFUNC:
2833 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2834 return (dtrace_dif_varstr(
2835 (uintptr_t)mstate->dtms_probe->dtpr_func,
2836 state, mstate));
2837
2838 case DIF_VAR_PROBENAME:
2839 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE);
2840 return (dtrace_dif_varstr(
2841 (uintptr_t)mstate->dtms_probe->dtpr_name,
2842 state, mstate));
2843
2844 case DIF_VAR_PID:
2845 if (!dtrace_priv_proc(state))
2846 return (0);
2847
2848 /*
2849 * Note that we are assuming that an unanchored probe is
2850 * always due to a high-level interrupt. (And we're assuming
2851 * that there is only a single high level interrupt.)
2852 */
2853 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2854 return (pid0.pid_id);
2855
2856 /*
2857 * It is always safe to dereference one's own t_procp pointer:
2858 * it always points to a valid, allocated proc structure.
2859 * Further, it is always safe to dereference the p_pidp member
2860 * of one's own proc structure. (These are truisms becuase
2861 * threads and processes don't clean up their own state --
2862 * they leave that task to whomever reaps them.)
2863 */
2864 return ((uint64_t)curthread->t_procp->p_pidp->pid_id);
2865
2866 case DIF_VAR_PPID:
2867 if (!dtrace_priv_proc(state))
2868 return (0);
2869
2870 /*
2871 * See comment in DIF_VAR_PID.
2872 */
2873 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2874 return (pid0.pid_id);
2875
2876 /*
2877 * It is always safe to dereference one's own t_procp pointer:
2878 * it always points to a valid, allocated proc structure.
2879 * (This is true because threads don't clean up their own
2880 * state -- they leave that task to whomever reaps them.)
2881 */
2882 return ((uint64_t)curthread->t_procp->p_ppid);
2883
2884 case DIF_VAR_TID:
2885 /*
2886 * See comment in DIF_VAR_PID.
2887 */
2888 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2889 return (0);
2890
2891 return ((uint64_t)curthread->t_tid);
2892
2893 case DIF_VAR_EXECNAME:
2894 if (!dtrace_priv_proc(state))
2895 return (0);
2896
2897 /*
2898 * See comment in DIF_VAR_PID.
2899 */
2900 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2901 return ((uint64_t)(uintptr_t)p0.p_user.u_comm);
2902
2903 /*
2904 * It is always safe to dereference one's own t_procp pointer:
2905 * it always points to a valid, allocated proc structure.
2906 * (This is true because threads don't clean up their own
2907 * state -- they leave that task to whomever reaps them.)
2908 */
2909 return (dtrace_dif_varstr(
2910 (uintptr_t)curthread->t_procp->p_user.u_comm,
2911 state, mstate));
2912
2913 case DIF_VAR_ZONENAME:
2914 if (!dtrace_priv_proc(state))
2915 return (0);
2916
2917 /*
2918 * See comment in DIF_VAR_PID.
2919 */
2920 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2921 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name);
2922
2923 /*
2924 * It is always safe to dereference one's own t_procp pointer:
2925 * it always points to a valid, allocated proc structure.
2926 * (This is true because threads don't clean up their own
2927 * state -- they leave that task to whomever reaps them.)
2928 */
2929 return (dtrace_dif_varstr(
2930 (uintptr_t)curthread->t_procp->p_zone->zone_name,
2931 state, mstate));
2932
2933 case DIF_VAR_UID:
2934 if (!dtrace_priv_proc(state))
2935 return (0);
2936
2937 /*
2938 * See comment in DIF_VAR_PID.
2939 */
2940 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2941 return ((uint64_t)p0.p_cred->cr_uid);
2942
2943 /*
2944 * It is always safe to dereference one's own t_procp pointer:
2945 * it always points to a valid, allocated proc structure.
2946 * (This is true because threads don't clean up their own
2947 * state -- they leave that task to whomever reaps them.)
2948 *
2949 * Additionally, it is safe to dereference one's own process
2950 * credential, since this is never NULL after process birth.
2951 */
2952 return ((uint64_t)curthread->t_procp->p_cred->cr_uid);
2953
2954 case DIF_VAR_GID:
2955 if (!dtrace_priv_proc(state))
2956 return (0);
2957
2958 /*
2959 * See comment in DIF_VAR_PID.
2960 */
2961 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2962 return ((uint64_t)p0.p_cred->cr_gid);
2963
2964 /*
2965 * It is always safe to dereference one's own t_procp pointer:
2966 * it always points to a valid, allocated proc structure.
2967 * (This is true because threads don't clean up their own
2968 * state -- they leave that task to whomever reaps them.)
2969 *
2970 * Additionally, it is safe to dereference one's own process
2971 * credential, since this is never NULL after process birth.
2972 */
2973 return ((uint64_t)curthread->t_procp->p_cred->cr_gid);
2974
2975 case DIF_VAR_ERRNO: {
2976 klwp_t *lwp;
2977 if (!dtrace_priv_proc(state))
2978 return (0);
2979
2980 /*
2981 * See comment in DIF_VAR_PID.
2982 */
2983 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU))
2984 return (0);
2985
2986 /*
2987 * It is always safe to dereference one's own t_lwp pointer in
2988 * the event that this pointer is non-NULL. (This is true
2989 * because threads and lwps don't clean up their own state --
2990 * they leave that task to whomever reaps them.)
2991 */
2992 if ((lwp = curthread->t_lwp) == NULL)
2993 return (0);
2994
2995 return ((uint64_t)lwp->lwp_errno);
2996 }
2997 default:
2998 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
2999 return (0);
3000 }
3001 }
3002
3003 /*
3004 * Emulate the execution of DTrace ID subroutines invoked by the call opcode.
3005 * Notice that we don't bother validating the proper number of arguments or
3006 * their types in the tuple stack. This isn't needed because all argument
3007 * interpretation is safe because of our load safety -- the worst that can
3008 * happen is that a bogus program can obtain bogus results.
3009 */
3010 static void
dtrace_dif_subr(uint_t subr,uint_t rd,uint64_t * regs,dtrace_key_t * tupregs,int nargs,dtrace_mstate_t * mstate,dtrace_state_t * state)3011 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs,
3012 dtrace_key_t *tupregs, int nargs,
3013 dtrace_mstate_t *mstate, dtrace_state_t *state)
3014 {
3015 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
3016 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
3017 dtrace_vstate_t *vstate = &state->dts_vstate;
3018
3019 union {
3020 mutex_impl_t mi;
3021 uint64_t mx;
3022 } m;
3023
3024 union {
3025 krwlock_t ri;
3026 uintptr_t rw;
3027 } r;
3028
3029 switch (subr) {
3030 case DIF_SUBR_RAND:
3031 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875;
3032 break;
3033
3034 case DIF_SUBR_MUTEX_OWNED:
3035 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3036 mstate, vstate)) {
3037 regs[rd] = NULL;
3038 break;
3039 }
3040
3041 m.mx = dtrace_load64(tupregs[0].dttk_value);
3042 if (MUTEX_TYPE_ADAPTIVE(&m.mi))
3043 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER;
3044 else
3045 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock);
3046 break;
3047
3048 case DIF_SUBR_MUTEX_OWNER:
3049 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3050 mstate, vstate)) {
3051 regs[rd] = NULL;
3052 break;
3053 }
3054
3055 m.mx = dtrace_load64(tupregs[0].dttk_value);
3056 if (MUTEX_TYPE_ADAPTIVE(&m.mi) &&
3057 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER)
3058 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi);
3059 else
3060 regs[rd] = 0;
3061 break;
3062
3063 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE:
3064 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3065 mstate, vstate)) {
3066 regs[rd] = NULL;
3067 break;
3068 }
3069
3070 m.mx = dtrace_load64(tupregs[0].dttk_value);
3071 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi);
3072 break;
3073
3074 case DIF_SUBR_MUTEX_TYPE_SPIN:
3075 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t),
3076 mstate, vstate)) {
3077 regs[rd] = NULL;
3078 break;
3079 }
3080
3081 m.mx = dtrace_load64(tupregs[0].dttk_value);
3082 regs[rd] = MUTEX_TYPE_SPIN(&m.mi);
3083 break;
3084
3085 case DIF_SUBR_RW_READ_HELD: {
3086 uintptr_t tmp;
3087
3088 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t),
3089 mstate, vstate)) {
3090 regs[rd] = NULL;
3091 break;
3092 }
3093
3094 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3095 regs[rd] = _RW_READ_HELD(&r.ri, tmp);
3096 break;
3097 }
3098
3099 case DIF_SUBR_RW_WRITE_HELD:
3100 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3101 mstate, vstate)) {
3102 regs[rd] = NULL;
3103 break;
3104 }
3105
3106 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3107 regs[rd] = _RW_WRITE_HELD(&r.ri);
3108 break;
3109
3110 case DIF_SUBR_RW_ISWRITER:
3111 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t),
3112 mstate, vstate)) {
3113 regs[rd] = NULL;
3114 break;
3115 }
3116
3117 r.rw = dtrace_loadptr(tupregs[0].dttk_value);
3118 regs[rd] = _RW_ISWRITER(&r.ri);
3119 break;
3120
3121 case DIF_SUBR_BCOPY: {
3122 /*
3123 * We need to be sure that the destination is in the scratch
3124 * region -- no other region is allowed.
3125 */
3126 uintptr_t src = tupregs[0].dttk_value;
3127 uintptr_t dest = tupregs[1].dttk_value;
3128 size_t size = tupregs[2].dttk_value;
3129
3130 if (!dtrace_inscratch(dest, size, mstate)) {
3131 *flags |= CPU_DTRACE_BADADDR;
3132 *illval = regs[rd];
3133 break;
3134 }
3135
3136 if (!dtrace_canload(src, size, mstate, vstate)) {
3137 regs[rd] = NULL;
3138 break;
3139 }
3140
3141 dtrace_bcopy((void *)src, (void *)dest, size);
3142 break;
3143 }
3144
3145 case DIF_SUBR_ALLOCA:
3146 case DIF_SUBR_COPYIN: {
3147 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
3148 uint64_t size =
3149 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value;
3150 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size;
3151
3152 /*
3153 * This action doesn't require any credential checks since
3154 * probes will not activate in user contexts to which the
3155 * enabling user does not have permissions.
3156 */
3157
3158 /*
3159 * Rounding up the user allocation size could have overflowed
3160 * a large, bogus allocation (like -1ULL) to 0.
3161 */
3162 if (scratch_size < size ||
3163 !DTRACE_INSCRATCH(mstate, scratch_size)) {
3164 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3165 regs[rd] = NULL;
3166 break;
3167 }
3168
3169 if (subr == DIF_SUBR_COPYIN) {
3170 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3171 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3172 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3173 }
3174
3175 mstate->dtms_scratch_ptr += scratch_size;
3176 regs[rd] = dest;
3177 break;
3178 }
3179
3180 case DIF_SUBR_COPYINTO: {
3181 uint64_t size = tupregs[1].dttk_value;
3182 uintptr_t dest = tupregs[2].dttk_value;
3183
3184 /*
3185 * This action doesn't require any credential checks since
3186 * probes will not activate in user contexts to which the
3187 * enabling user does not have permissions.
3188 */
3189 if (!dtrace_inscratch(dest, size, mstate)) {
3190 *flags |= CPU_DTRACE_BADADDR;
3191 *illval = regs[rd];
3192 break;
3193 }
3194
3195 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3196 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags);
3197 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3198 break;
3199 }
3200
3201 case DIF_SUBR_COPYINSTR: {
3202 uintptr_t dest = mstate->dtms_scratch_ptr;
3203 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3204
3205 if (nargs > 1 && tupregs[1].dttk_value < size)
3206 size = tupregs[1].dttk_value + 1;
3207
3208 /*
3209 * This action doesn't require any credential checks since
3210 * probes will not activate in user contexts to which the
3211 * enabling user does not have permissions.
3212 */
3213 if (!DTRACE_INSCRATCH(mstate, size)) {
3214 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3215 regs[rd] = NULL;
3216 break;
3217 }
3218
3219 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3220 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags);
3221 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3222
3223 ((char *)dest)[size - 1] = '\0';
3224 mstate->dtms_scratch_ptr += size;
3225 regs[rd] = dest;
3226 break;
3227 }
3228
3229 case DIF_SUBR_MSGSIZE:
3230 case DIF_SUBR_MSGDSIZE: {
3231 uintptr_t baddr = tupregs[0].dttk_value, daddr;
3232 uintptr_t wptr, rptr;
3233 size_t count = 0;
3234 int cont = 0;
3235
3236 while (baddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3237
3238 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate,
3239 vstate)) {
3240 regs[rd] = NULL;
3241 break;
3242 }
3243
3244 wptr = dtrace_loadptr(baddr +
3245 offsetof(mblk_t, b_wptr));
3246
3247 rptr = dtrace_loadptr(baddr +
3248 offsetof(mblk_t, b_rptr));
3249
3250 if (wptr < rptr) {
3251 *flags |= CPU_DTRACE_BADADDR;
3252 *illval = tupregs[0].dttk_value;
3253 break;
3254 }
3255
3256 daddr = dtrace_loadptr(baddr +
3257 offsetof(mblk_t, b_datap));
3258
3259 baddr = dtrace_loadptr(baddr +
3260 offsetof(mblk_t, b_cont));
3261
3262 /*
3263 * We want to prevent against denial-of-service here,
3264 * so we're only going to search the list for
3265 * dtrace_msgdsize_max mblks.
3266 */
3267 if (cont++ > dtrace_msgdsize_max) {
3268 *flags |= CPU_DTRACE_ILLOP;
3269 break;
3270 }
3271
3272 if (subr == DIF_SUBR_MSGDSIZE) {
3273 if (dtrace_load8(daddr +
3274 offsetof(dblk_t, db_type)) != M_DATA)
3275 continue;
3276 }
3277
3278 count += wptr - rptr;
3279 }
3280
3281 if (!(*flags & CPU_DTRACE_FAULT))
3282 regs[rd] = count;
3283
3284 break;
3285 }
3286
3287 case DIF_SUBR_PROGENYOF: {
3288 pid_t pid = tupregs[0].dttk_value;
3289 proc_t *p;
3290 int rval = 0;
3291
3292 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3293
3294 for (p = curthread->t_procp; p != NULL; p = p->p_parent) {
3295 if (p->p_pidp->pid_id == pid) {
3296 rval = 1;
3297 break;
3298 }
3299 }
3300
3301 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3302
3303 regs[rd] = rval;
3304 break;
3305 }
3306
3307 case DIF_SUBR_SPECULATION:
3308 regs[rd] = dtrace_speculation(state);
3309 break;
3310
3311 case DIF_SUBR_COPYOUT: {
3312 uintptr_t kaddr = tupregs[0].dttk_value;
3313 uintptr_t uaddr = tupregs[1].dttk_value;
3314 uint64_t size = tupregs[2].dttk_value;
3315
3316 if (!dtrace_destructive_disallow &&
3317 dtrace_priv_proc_control(state) &&
3318 !dtrace_istoxic(kaddr, size)) {
3319 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3320 dtrace_copyout(kaddr, uaddr, size, flags);
3321 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3322 }
3323 break;
3324 }
3325
3326 case DIF_SUBR_COPYOUTSTR: {
3327 uintptr_t kaddr = tupregs[0].dttk_value;
3328 uintptr_t uaddr = tupregs[1].dttk_value;
3329 uint64_t size = tupregs[2].dttk_value;
3330
3331 if (!dtrace_destructive_disallow &&
3332 dtrace_priv_proc_control(state) &&
3333 !dtrace_istoxic(kaddr, size)) {
3334 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
3335 dtrace_copyoutstr(kaddr, uaddr, size, flags);
3336 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
3337 }
3338 break;
3339 }
3340
3341 case DIF_SUBR_STRLEN: {
3342 size_t sz;
3343 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value;
3344 sz = dtrace_strlen((char *)addr,
3345 state->dts_options[DTRACEOPT_STRSIZE]);
3346
3347 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) {
3348 regs[rd] = NULL;
3349 break;
3350 }
3351
3352 regs[rd] = sz;
3353
3354 break;
3355 }
3356
3357 case DIF_SUBR_STRCHR:
3358 case DIF_SUBR_STRRCHR: {
3359 /*
3360 * We're going to iterate over the string looking for the
3361 * specified character. We will iterate until we have reached
3362 * the string length or we have found the character. If this
3363 * is DIF_SUBR_STRRCHR, we will look for the last occurrence
3364 * of the specified character instead of the first.
3365 */
3366 uintptr_t saddr = tupregs[0].dttk_value;
3367 uintptr_t addr = tupregs[0].dttk_value;
3368 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE];
3369 char c, target = (char)tupregs[1].dttk_value;
3370
3371 for (regs[rd] = NULL; addr < limit; addr++) {
3372 if ((c = dtrace_load8(addr)) == target) {
3373 regs[rd] = addr;
3374
3375 if (subr == DIF_SUBR_STRCHR)
3376 break;
3377 }
3378
3379 if (c == '\0')
3380 break;
3381 }
3382
3383 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) {
3384 regs[rd] = NULL;
3385 break;
3386 }
3387
3388 break;
3389 }
3390
3391 case DIF_SUBR_STRSTR:
3392 case DIF_SUBR_INDEX:
3393 case DIF_SUBR_RINDEX: {
3394 /*
3395 * We're going to iterate over the string looking for the
3396 * specified string. We will iterate until we have reached
3397 * the string length or we have found the string. (Yes, this
3398 * is done in the most naive way possible -- but considering
3399 * that the string we're searching for is likely to be
3400 * relatively short, the complexity of Rabin-Karp or similar
3401 * hardly seems merited.)
3402 */
3403 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value;
3404 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value;
3405 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3406 size_t len = dtrace_strlen(addr, size);
3407 size_t sublen = dtrace_strlen(substr, size);
3408 char *limit = addr + len, *orig = addr;
3409 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1;
3410 int inc = 1;
3411
3412 regs[rd] = notfound;
3413
3414 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) {
3415 regs[rd] = NULL;
3416 break;
3417 }
3418
3419 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate,
3420 vstate)) {
3421 regs[rd] = NULL;
3422 break;
3423 }
3424
3425 /*
3426 * strstr() and index()/rindex() have similar semantics if
3427 * both strings are the empty string: strstr() returns a
3428 * pointer to the (empty) string, and index() and rindex()
3429 * both return index 0 (regardless of any position argument).
3430 */
3431 if (sublen == 0 && len == 0) {
3432 if (subr == DIF_SUBR_STRSTR)
3433 regs[rd] = (uintptr_t)addr;
3434 else
3435 regs[rd] = 0;
3436 break;
3437 }
3438
3439 if (subr != DIF_SUBR_STRSTR) {
3440 if (subr == DIF_SUBR_RINDEX) {
3441 limit = orig - 1;
3442 addr += len;
3443 inc = -1;
3444 }
3445
3446 /*
3447 * Both index() and rindex() take an optional position
3448 * argument that denotes the starting position.
3449 */
3450 if (nargs == 3) {
3451 int64_t pos = (int64_t)tupregs[2].dttk_value;
3452
3453 /*
3454 * If the position argument to index() is
3455 * negative, Perl implicitly clamps it at
3456 * zero. This semantic is a little surprising
3457 * given the special meaning of negative
3458 * positions to similar Perl functions like
3459 * substr(), but it appears to reflect a
3460 * notion that index() can start from a
3461 * negative index and increment its way up to
3462 * the string. Given this notion, Perl's
3463 * rindex() is at least self-consistent in
3464 * that it implicitly clamps positions greater
3465 * than the string length to be the string
3466 * length. Where Perl completely loses
3467 * coherence, however, is when the specified
3468 * substring is the empty string (""). In
3469 * this case, even if the position is
3470 * negative, rindex() returns 0 -- and even if
3471 * the position is greater than the length,
3472 * index() returns the string length. These
3473 * semantics violate the notion that index()
3474 * should never return a value less than the
3475 * specified position and that rindex() should
3476 * never return a value greater than the
3477 * specified position. (One assumes that
3478 * these semantics are artifacts of Perl's
3479 * implementation and not the results of
3480 * deliberate design -- it beggars belief that
3481 * even Larry Wall could desire such oddness.)
3482 * While in the abstract one would wish for
3483 * consistent position semantics across
3484 * substr(), index() and rindex() -- or at the
3485 * very least self-consistent position
3486 * semantics for index() and rindex() -- we
3487 * instead opt to keep with the extant Perl
3488 * semantics, in all their broken glory. (Do
3489 * we have more desire to maintain Perl's
3490 * semantics than Perl does? Probably.)
3491 */
3492 if (subr == DIF_SUBR_RINDEX) {
3493 if (pos < 0) {
3494 if (sublen == 0)
3495 regs[rd] = 0;
3496 break;
3497 }
3498
3499 if (pos > len)
3500 pos = len;
3501 } else {
3502 if (pos < 0)
3503 pos = 0;
3504
3505 if (pos >= len) {
3506 if (sublen == 0)
3507 regs[rd] = len;
3508 break;
3509 }
3510 }
3511
3512 addr = orig + pos;
3513 }
3514 }
3515
3516 for (regs[rd] = notfound; addr != limit; addr += inc) {
3517 if (dtrace_strncmp(addr, substr, sublen) == 0) {
3518 if (subr != DIF_SUBR_STRSTR) {
3519 /*
3520 * As D index() and rindex() are
3521 * modeled on Perl (and not on awk),
3522 * we return a zero-based (and not a
3523 * one-based) index. (For you Perl
3524 * weenies: no, we're not going to add
3525 * $[ -- and shouldn't you be at a con
3526 * or something?)
3527 */
3528 regs[rd] = (uintptr_t)(addr - orig);
3529 break;
3530 }
3531
3532 ASSERT(subr == DIF_SUBR_STRSTR);
3533 regs[rd] = (uintptr_t)addr;
3534 break;
3535 }
3536 }
3537
3538 break;
3539 }
3540
3541 case DIF_SUBR_STRTOK: {
3542 uintptr_t addr = tupregs[0].dttk_value;
3543 uintptr_t tokaddr = tupregs[1].dttk_value;
3544 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3545 uintptr_t limit, toklimit = tokaddr + size;
3546 uint8_t c, tokmap[32]; /* 256 / 8 */
3547 char *dest = (char *)mstate->dtms_scratch_ptr;
3548 int i;
3549
3550 /*
3551 * Check both the token buffer and (later) the input buffer,
3552 * since both could be non-scratch addresses.
3553 */
3554 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) {
3555 regs[rd] = NULL;
3556 break;
3557 }
3558
3559 if (!DTRACE_INSCRATCH(mstate, size)) {
3560 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3561 regs[rd] = NULL;
3562 break;
3563 }
3564
3565 if (addr == NULL) {
3566 /*
3567 * If the address specified is NULL, we use our saved
3568 * strtok pointer from the mstate. Note that this
3569 * means that the saved strtok pointer is _only_
3570 * valid within multiple enablings of the same probe --
3571 * it behaves like an implicit clause-local variable.
3572 */
3573 addr = mstate->dtms_strtok;
3574 } else {
3575 /*
3576 * If the user-specified address is non-NULL we must
3577 * access check it. This is the only time we have
3578 * a chance to do so, since this address may reside
3579 * in the string table of this clause-- future calls
3580 * (when we fetch addr from mstate->dtms_strtok)
3581 * would fail this access check.
3582 */
3583 if (!dtrace_strcanload(addr, size, mstate, vstate)) {
3584 regs[rd] = NULL;
3585 break;
3586 }
3587 }
3588
3589 /*
3590 * First, zero the token map, and then process the token
3591 * string -- setting a bit in the map for every character
3592 * found in the token string.
3593 */
3594 for (i = 0; i < sizeof (tokmap); i++)
3595 tokmap[i] = 0;
3596
3597 for (; tokaddr < toklimit; tokaddr++) {
3598 if ((c = dtrace_load8(tokaddr)) == '\0')
3599 break;
3600
3601 ASSERT((c >> 3) < sizeof (tokmap));
3602 tokmap[c >> 3] |= (1 << (c & 0x7));
3603 }
3604
3605 for (limit = addr + size; addr < limit; addr++) {
3606 /*
3607 * We're looking for a character that is _not_ contained
3608 * in the token string.
3609 */
3610 if ((c = dtrace_load8(addr)) == '\0')
3611 break;
3612
3613 if (!(tokmap[c >> 3] & (1 << (c & 0x7))))
3614 break;
3615 }
3616
3617 if (c == '\0') {
3618 /*
3619 * We reached the end of the string without finding
3620 * any character that was not in the token string.
3621 * We return NULL in this case, and we set the saved
3622 * address to NULL as well.
3623 */
3624 regs[rd] = NULL;
3625 mstate->dtms_strtok = NULL;
3626 break;
3627 }
3628
3629 /*
3630 * From here on, we're copying into the destination string.
3631 */
3632 for (i = 0; addr < limit && i < size - 1; addr++) {
3633 if ((c = dtrace_load8(addr)) == '\0')
3634 break;
3635
3636 if (tokmap[c >> 3] & (1 << (c & 0x7)))
3637 break;
3638
3639 ASSERT(i < size);
3640 dest[i++] = c;
3641 }
3642
3643 ASSERT(i < size);
3644 dest[i] = '\0';
3645 regs[rd] = (uintptr_t)dest;
3646 mstate->dtms_scratch_ptr += size;
3647 mstate->dtms_strtok = addr;
3648 break;
3649 }
3650
3651 case DIF_SUBR_SUBSTR: {
3652 uintptr_t s = tupregs[0].dttk_value;
3653 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3654 char *d = (char *)mstate->dtms_scratch_ptr;
3655 int64_t index = (int64_t)tupregs[1].dttk_value;
3656 int64_t remaining = (int64_t)tupregs[2].dttk_value;
3657 size_t len = dtrace_strlen((char *)s, size);
3658 int64_t i;
3659
3660 if (!dtrace_canload(s, len + 1, mstate, vstate)) {
3661 regs[rd] = NULL;
3662 break;
3663 }
3664
3665 if (!DTRACE_INSCRATCH(mstate, size)) {
3666 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3667 regs[rd] = NULL;
3668 break;
3669 }
3670
3671 if (nargs <= 2)
3672 remaining = (int64_t)size;
3673
3674 if (index < 0) {
3675 index += len;
3676
3677 if (index < 0 && index + remaining > 0) {
3678 remaining += index;
3679 index = 0;
3680 }
3681 }
3682
3683 if (index >= len || index < 0) {
3684 remaining = 0;
3685 } else if (remaining < 0) {
3686 remaining += len - index;
3687 } else if (index + remaining > size) {
3688 remaining = size - index;
3689 }
3690
3691 for (i = 0; i < remaining; i++) {
3692 if ((d[i] = dtrace_load8(s + index + i)) == '\0')
3693 break;
3694 }
3695
3696 d[i] = '\0';
3697
3698 mstate->dtms_scratch_ptr += size;
3699 regs[rd] = (uintptr_t)d;
3700 break;
3701 }
3702
3703 case DIF_SUBR_GETMAJOR:
3704 #ifdef _LP64
3705 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64;
3706 #else
3707 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ;
3708 #endif
3709 break;
3710
3711 case DIF_SUBR_GETMINOR:
3712 #ifdef _LP64
3713 regs[rd] = tupregs[0].dttk_value & MAXMIN64;
3714 #else
3715 regs[rd] = tupregs[0].dttk_value & MAXMIN;
3716 #endif
3717 break;
3718
3719 case DIF_SUBR_DDI_PATHNAME: {
3720 /*
3721 * This one is a galactic mess. We are going to roughly
3722 * emulate ddi_pathname(), but it's made more complicated
3723 * by the fact that we (a) want to include the minor name and
3724 * (b) must proceed iteratively instead of recursively.
3725 */
3726 uintptr_t dest = mstate->dtms_scratch_ptr;
3727 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3728 char *start = (char *)dest, *end = start + size - 1;
3729 uintptr_t daddr = tupregs[0].dttk_value;
3730 int64_t minor = (int64_t)tupregs[1].dttk_value;
3731 char *s;
3732 int i, len, depth = 0;
3733
3734 /*
3735 * Due to all the pointer jumping we do and context we must
3736 * rely upon, we just mandate that the user must have kernel
3737 * read privileges to use this routine.
3738 */
3739 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) {
3740 *flags |= CPU_DTRACE_KPRIV;
3741 *illval = daddr;
3742 regs[rd] = NULL;
3743 }
3744
3745 if (!DTRACE_INSCRATCH(mstate, size)) {
3746 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3747 regs[rd] = NULL;
3748 break;
3749 }
3750
3751 *end = '\0';
3752
3753 /*
3754 * We want to have a name for the minor. In order to do this,
3755 * we need to walk the minor list from the devinfo. We want
3756 * to be sure that we don't infinitely walk a circular list,
3757 * so we check for circularity by sending a scout pointer
3758 * ahead two elements for every element that we iterate over;
3759 * if the list is circular, these will ultimately point to the
3760 * same element. You may recognize this little trick as the
3761 * answer to a stupid interview question -- one that always
3762 * seems to be asked by those who had to have it laboriously
3763 * explained to them, and who can't even concisely describe
3764 * the conditions under which one would be forced to resort to
3765 * this technique. Needless to say, those conditions are
3766 * found here -- and probably only here. Is this the only use
3767 * of this infamous trick in shipping, production code? If it
3768 * isn't, it probably should be...
3769 */
3770 if (minor != -1) {
3771 uintptr_t maddr = dtrace_loadptr(daddr +
3772 offsetof(struct dev_info, devi_minor));
3773
3774 uintptr_t next = offsetof(struct ddi_minor_data, next);
3775 uintptr_t name = offsetof(struct ddi_minor_data,
3776 d_minor) + offsetof(struct ddi_minor, name);
3777 uintptr_t dev = offsetof(struct ddi_minor_data,
3778 d_minor) + offsetof(struct ddi_minor, dev);
3779 uintptr_t scout;
3780
3781 if (maddr != NULL)
3782 scout = dtrace_loadptr(maddr + next);
3783
3784 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3785 uint64_t m;
3786 #ifdef _LP64
3787 m = dtrace_load64(maddr + dev) & MAXMIN64;
3788 #else
3789 m = dtrace_load32(maddr + dev) & MAXMIN;
3790 #endif
3791 if (m != minor) {
3792 maddr = dtrace_loadptr(maddr + next);
3793
3794 if (scout == NULL)
3795 continue;
3796
3797 scout = dtrace_loadptr(scout + next);
3798
3799 if (scout == NULL)
3800 continue;
3801
3802 scout = dtrace_loadptr(scout + next);
3803
3804 if (scout == NULL)
3805 continue;
3806
3807 if (scout == maddr) {
3808 *flags |= CPU_DTRACE_ILLOP;
3809 break;
3810 }
3811
3812 continue;
3813 }
3814
3815 /*
3816 * We have the minor data. Now we need to
3817 * copy the minor's name into the end of the
3818 * pathname.
3819 */
3820 s = (char *)dtrace_loadptr(maddr + name);
3821 len = dtrace_strlen(s, size);
3822
3823 if (*flags & CPU_DTRACE_FAULT)
3824 break;
3825
3826 if (len != 0) {
3827 if ((end -= (len + 1)) < start)
3828 break;
3829
3830 *end = ':';
3831 }
3832
3833 for (i = 1; i <= len; i++)
3834 end[i] = dtrace_load8((uintptr_t)s++);
3835 break;
3836 }
3837 }
3838
3839 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) {
3840 ddi_node_state_t devi_state;
3841
3842 devi_state = dtrace_load32(daddr +
3843 offsetof(struct dev_info, devi_node_state));
3844
3845 if (*flags & CPU_DTRACE_FAULT)
3846 break;
3847
3848 if (devi_state >= DS_INITIALIZED) {
3849 s = (char *)dtrace_loadptr(daddr +
3850 offsetof(struct dev_info, devi_addr));
3851 len = dtrace_strlen(s, size);
3852
3853 if (*flags & CPU_DTRACE_FAULT)
3854 break;
3855
3856 if (len != 0) {
3857 if ((end -= (len + 1)) < start)
3858 break;
3859
3860 *end = '@';
3861 }
3862
3863 for (i = 1; i <= len; i++)
3864 end[i] = dtrace_load8((uintptr_t)s++);
3865 }
3866
3867 /*
3868 * Now for the node name...
3869 */
3870 s = (char *)dtrace_loadptr(daddr +
3871 offsetof(struct dev_info, devi_node_name));
3872
3873 daddr = dtrace_loadptr(daddr +
3874 offsetof(struct dev_info, devi_parent));
3875
3876 /*
3877 * If our parent is NULL (that is, if we're the root
3878 * node), we're going to use the special path
3879 * "devices".
3880 */
3881 if (daddr == NULL)
3882 s = "devices";
3883
3884 len = dtrace_strlen(s, size);
3885 if (*flags & CPU_DTRACE_FAULT)
3886 break;
3887
3888 if ((end -= (len + 1)) < start)
3889 break;
3890
3891 for (i = 1; i <= len; i++)
3892 end[i] = dtrace_load8((uintptr_t)s++);
3893 *end = '/';
3894
3895 if (depth++ > dtrace_devdepth_max) {
3896 *flags |= CPU_DTRACE_ILLOP;
3897 break;
3898 }
3899 }
3900
3901 if (end < start)
3902 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3903
3904 if (daddr == NULL) {
3905 regs[rd] = (uintptr_t)end;
3906 mstate->dtms_scratch_ptr += size;
3907 }
3908
3909 break;
3910 }
3911
3912 case DIF_SUBR_STRJOIN: {
3913 char *d = (char *)mstate->dtms_scratch_ptr;
3914 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
3915 uintptr_t s1 = tupregs[0].dttk_value;
3916 uintptr_t s2 = tupregs[1].dttk_value;
3917 int i = 0;
3918
3919 if (!dtrace_strcanload(s1, size, mstate, vstate) ||
3920 !dtrace_strcanload(s2, size, mstate, vstate)) {
3921 regs[rd] = NULL;
3922 break;
3923 }
3924
3925 if (!DTRACE_INSCRATCH(mstate, size)) {
3926 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3927 regs[rd] = NULL;
3928 break;
3929 }
3930
3931 for (;;) {
3932 if (i >= size) {
3933 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3934 regs[rd] = NULL;
3935 break;
3936 }
3937
3938 if ((d[i++] = dtrace_load8(s1++)) == '\0') {
3939 i--;
3940 break;
3941 }
3942 }
3943
3944 for (;;) {
3945 if (i >= size) {
3946 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3947 regs[rd] = NULL;
3948 break;
3949 }
3950
3951 if ((d[i++] = dtrace_load8(s2++)) == '\0')
3952 break;
3953 }
3954
3955 if (i < size) {
3956 mstate->dtms_scratch_ptr += i;
3957 regs[rd] = (uintptr_t)d;
3958 }
3959
3960 break;
3961 }
3962
3963 case DIF_SUBR_LLTOSTR: {
3964 int64_t i = (int64_t)tupregs[0].dttk_value;
3965 int64_t val = i < 0 ? i * -1 : i;
3966 uint64_t size = 22; /* enough room for 2^64 in decimal */
3967 char *end = (char *)mstate->dtms_scratch_ptr + size - 1;
3968
3969 if (!DTRACE_INSCRATCH(mstate, size)) {
3970 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
3971 regs[rd] = NULL;
3972 break;
3973 }
3974
3975 for (*end-- = '\0'; val; val /= 10)
3976 *end-- = '0' + (val % 10);
3977
3978 if (i == 0)
3979 *end-- = '0';
3980
3981 if (i < 0)
3982 *end-- = '-';
3983
3984 regs[rd] = (uintptr_t)end + 1;
3985 mstate->dtms_scratch_ptr += size;
3986 break;
3987 }
3988
3989 case DIF_SUBR_HTONS:
3990 case DIF_SUBR_NTOHS:
3991 #ifdef _BIG_ENDIAN
3992 regs[rd] = (uint16_t)tupregs[0].dttk_value;
3993 #else
3994 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value);
3995 #endif
3996 break;
3997
3998
3999 case DIF_SUBR_HTONL:
4000 case DIF_SUBR_NTOHL:
4001 #ifdef _BIG_ENDIAN
4002 regs[rd] = (uint32_t)tupregs[0].dttk_value;
4003 #else
4004 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value);
4005 #endif
4006 break;
4007
4008
4009 case DIF_SUBR_HTONLL:
4010 case DIF_SUBR_NTOHLL:
4011 #ifdef _BIG_ENDIAN
4012 regs[rd] = (uint64_t)tupregs[0].dttk_value;
4013 #else
4014 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value);
4015 #endif
4016 break;
4017
4018
4019 case DIF_SUBR_DIRNAME:
4020 case DIF_SUBR_BASENAME: {
4021 char *dest = (char *)mstate->dtms_scratch_ptr;
4022 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4023 uintptr_t src = tupregs[0].dttk_value;
4024 int i, j, len = dtrace_strlen((char *)src, size);
4025 int lastbase = -1, firstbase = -1, lastdir = -1;
4026 int start, end;
4027
4028 if (!dtrace_canload(src, len + 1, mstate, vstate)) {
4029 regs[rd] = NULL;
4030 break;
4031 }
4032
4033 if (!DTRACE_INSCRATCH(mstate, size)) {
4034 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4035 regs[rd] = NULL;
4036 break;
4037 }
4038
4039 /*
4040 * The basename and dirname for a zero-length string is
4041 * defined to be "."
4042 */
4043 if (len == 0) {
4044 len = 1;
4045 src = (uintptr_t)".";
4046 }
4047
4048 /*
4049 * Start from the back of the string, moving back toward the
4050 * front until we see a character that isn't a slash. That
4051 * character is the last character in the basename.
4052 */
4053 for (i = len - 1; i >= 0; i--) {
4054 if (dtrace_load8(src + i) != '/')
4055 break;
4056 }
4057
4058 if (i >= 0)
4059 lastbase = i;
4060
4061 /*
4062 * Starting from the last character in the basename, move
4063 * towards the front until we find a slash. The character
4064 * that we processed immediately before that is the first
4065 * character in the basename.
4066 */
4067 for (; i >= 0; i--) {
4068 if (dtrace_load8(src + i) == '/')
4069 break;
4070 }
4071
4072 if (i >= 0)
4073 firstbase = i + 1;
4074
4075 /*
4076 * Now keep going until we find a non-slash character. That
4077 * character is the last character in the dirname.
4078 */
4079 for (; i >= 0; i--) {
4080 if (dtrace_load8(src + i) != '/')
4081 break;
4082 }
4083
4084 if (i >= 0)
4085 lastdir = i;
4086
4087 ASSERT(!(lastbase == -1 && firstbase != -1));
4088 ASSERT(!(firstbase == -1 && lastdir != -1));
4089
4090 if (lastbase == -1) {
4091 /*
4092 * We didn't find a non-slash character. We know that
4093 * the length is non-zero, so the whole string must be
4094 * slashes. In either the dirname or the basename
4095 * case, we return '/'.
4096 */
4097 ASSERT(firstbase == -1);
4098 firstbase = lastbase = lastdir = 0;
4099 }
4100
4101 if (firstbase == -1) {
4102 /*
4103 * The entire string consists only of a basename
4104 * component. If we're looking for dirname, we need
4105 * to change our string to be just "."; if we're
4106 * looking for a basename, we'll just set the first
4107 * character of the basename to be 0.
4108 */
4109 if (subr == DIF_SUBR_DIRNAME) {
4110 ASSERT(lastdir == -1);
4111 src = (uintptr_t)".";
4112 lastdir = 0;
4113 } else {
4114 firstbase = 0;
4115 }
4116 }
4117
4118 if (subr == DIF_SUBR_DIRNAME) {
4119 if (lastdir == -1) {
4120 /*
4121 * We know that we have a slash in the name --
4122 * or lastdir would be set to 0, above. And
4123 * because lastdir is -1, we know that this
4124 * slash must be the first character. (That
4125 * is, the full string must be of the form
4126 * "/basename".) In this case, the last
4127 * character of the directory name is 0.
4128 */
4129 lastdir = 0;
4130 }
4131
4132 start = 0;
4133 end = lastdir;
4134 } else {
4135 ASSERT(subr == DIF_SUBR_BASENAME);
4136 ASSERT(firstbase != -1 && lastbase != -1);
4137 start = firstbase;
4138 end = lastbase;
4139 }
4140
4141 for (i = start, j = 0; i <= end && j < size - 1; i++, j++)
4142 dest[j] = dtrace_load8(src + i);
4143
4144 dest[j] = '\0';
4145 regs[rd] = (uintptr_t)dest;
4146 mstate->dtms_scratch_ptr += size;
4147 break;
4148 }
4149
4150 case DIF_SUBR_CLEANPATH: {
4151 char *dest = (char *)mstate->dtms_scratch_ptr, c;
4152 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE];
4153 uintptr_t src = tupregs[0].dttk_value;
4154 int i = 0, j = 0;
4155
4156 if (!dtrace_strcanload(src, size, mstate, vstate)) {
4157 regs[rd] = NULL;
4158 break;
4159 }
4160
4161 if (!DTRACE_INSCRATCH(mstate, size)) {
4162 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4163 regs[rd] = NULL;
4164 break;
4165 }
4166
4167 /*
4168 * Move forward, loading each character.
4169 */
4170 do {
4171 c = dtrace_load8(src + i++);
4172 next:
4173 if (j + 5 >= size) /* 5 = strlen("/..c\0") */
4174 break;
4175
4176 if (c != '/') {
4177 dest[j++] = c;
4178 continue;
4179 }
4180
4181 c = dtrace_load8(src + i++);
4182
4183 if (c == '/') {
4184 /*
4185 * We have two slashes -- we can just advance
4186 * to the next character.
4187 */
4188 goto next;
4189 }
4190
4191 if (c != '.') {
4192 /*
4193 * This is not "." and it's not ".." -- we can
4194 * just store the "/" and this character and
4195 * drive on.
4196 */
4197 dest[j++] = '/';
4198 dest[j++] = c;
4199 continue;
4200 }
4201
4202 c = dtrace_load8(src + i++);
4203
4204 if (c == '/') {
4205 /*
4206 * This is a "/./" component. We're not going
4207 * to store anything in the destination buffer;
4208 * we're just going to go to the next component.
4209 */
4210 goto next;
4211 }
4212
4213 if (c != '.') {
4214 /*
4215 * This is not ".." -- we can just store the
4216 * "/." and this character and continue
4217 * processing.
4218 */
4219 dest[j++] = '/';
4220 dest[j++] = '.';
4221 dest[j++] = c;
4222 continue;
4223 }
4224
4225 c = dtrace_load8(src + i++);
4226
4227 if (c != '/' && c != '\0') {
4228 /*
4229 * This is not ".." -- it's "..[mumble]".
4230 * We'll store the "/.." and this character
4231 * and continue processing.
4232 */
4233 dest[j++] = '/';
4234 dest[j++] = '.';
4235 dest[j++] = '.';
4236 dest[j++] = c;
4237 continue;
4238 }
4239
4240 /*
4241 * This is "/../" or "/..\0". We need to back up
4242 * our destination pointer until we find a "/".
4243 */
4244 i--;
4245 while (j != 0 && dest[--j] != '/')
4246 continue;
4247
4248 if (c == '\0')
4249 dest[++j] = '/';
4250 } while (c != '\0');
4251
4252 dest[j] = '\0';
4253 regs[rd] = (uintptr_t)dest;
4254 mstate->dtms_scratch_ptr += size;
4255 break;
4256 }
4257
4258 case DIF_SUBR_INET_NTOA:
4259 case DIF_SUBR_INET_NTOA6:
4260 case DIF_SUBR_INET_NTOP: {
4261 size_t size;
4262 int af, argi, i;
4263 char *base, *end;
4264
4265 if (subr == DIF_SUBR_INET_NTOP) {
4266 af = (int)tupregs[0].dttk_value;
4267 argi = 1;
4268 } else {
4269 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6;
4270 argi = 0;
4271 }
4272
4273 if (af == AF_INET) {
4274 ipaddr_t ip4;
4275 uint8_t *ptr8, val;
4276
4277 /*
4278 * Safely load the IPv4 address.
4279 */
4280 ip4 = dtrace_load32(tupregs[argi].dttk_value);
4281
4282 /*
4283 * Check an IPv4 string will fit in scratch.
4284 */
4285 size = INET_ADDRSTRLEN;
4286 if (!DTRACE_INSCRATCH(mstate, size)) {
4287 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4288 regs[rd] = NULL;
4289 break;
4290 }
4291 base = (char *)mstate->dtms_scratch_ptr;
4292 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4293
4294 /*
4295 * Stringify as a dotted decimal quad.
4296 */
4297 *end-- = '\0';
4298 ptr8 = (uint8_t *)&ip4;
4299 for (i = 3; i >= 0; i--) {
4300 val = ptr8[i];
4301
4302 if (val == 0) {
4303 *end-- = '0';
4304 } else {
4305 for (; val; val /= 10) {
4306 *end-- = '0' + (val % 10);
4307 }
4308 }
4309
4310 if (i > 0)
4311 *end-- = '.';
4312 }
4313 ASSERT(end + 1 >= base);
4314
4315 } else if (af == AF_INET6) {
4316 struct in6_addr ip6;
4317 int firstzero, tryzero, numzero, v6end;
4318 uint16_t val;
4319 const char digits[] = "0123456789abcdef";
4320
4321 /*
4322 * Stringify using RFC 1884 convention 2 - 16 bit
4323 * hexadecimal values with a zero-run compression.
4324 * Lower case hexadecimal digits are used.
4325 * eg, fe80::214:4fff:fe0b:76c8.
4326 * The IPv4 embedded form is returned for inet_ntop,
4327 * just the IPv4 string is returned for inet_ntoa6.
4328 */
4329
4330 /*
4331 * Safely load the IPv6 address.
4332 */
4333 dtrace_bcopy(
4334 (void *)(uintptr_t)tupregs[argi].dttk_value,
4335 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr));
4336
4337 /*
4338 * Check an IPv6 string will fit in scratch.
4339 */
4340 size = INET6_ADDRSTRLEN;
4341 if (!DTRACE_INSCRATCH(mstate, size)) {
4342 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
4343 regs[rd] = NULL;
4344 break;
4345 }
4346 base = (char *)mstate->dtms_scratch_ptr;
4347 end = (char *)mstate->dtms_scratch_ptr + size - 1;
4348 *end-- = '\0';
4349
4350 /*
4351 * Find the longest run of 16 bit zero values
4352 * for the single allowed zero compression - "::".
4353 */
4354 firstzero = -1;
4355 tryzero = -1;
4356 numzero = 1;
4357 for (i = 0; i < sizeof (struct in6_addr); i++) {
4358 if (ip6._S6_un._S6_u8[i] == 0 &&
4359 tryzero == -1 && i % 2 == 0) {
4360 tryzero = i;
4361 continue;
4362 }
4363
4364 if (tryzero != -1 &&
4365 (ip6._S6_un._S6_u8[i] != 0 ||
4366 i == sizeof (struct in6_addr) - 1)) {
4367
4368 if (i - tryzero <= numzero) {
4369 tryzero = -1;
4370 continue;
4371 }
4372
4373 firstzero = tryzero;
4374 numzero = i - i % 2 - tryzero;
4375 tryzero = -1;
4376
4377 if (ip6._S6_un._S6_u8[i] == 0 &&
4378 i == sizeof (struct in6_addr) - 1)
4379 numzero += 2;
4380 }
4381 }
4382 ASSERT(firstzero + numzero <= sizeof (struct in6_addr));
4383
4384 /*
4385 * Check for an IPv4 embedded address.
4386 */
4387 v6end = sizeof (struct in6_addr) - 2;
4388 if (IN6_IS_ADDR_V4MAPPED(&ip6) ||
4389 IN6_IS_ADDR_V4COMPAT(&ip6)) {
4390 for (i = sizeof (struct in6_addr) - 1;
4391 i >= DTRACE_V4MAPPED_OFFSET; i--) {
4392 ASSERT(end >= base);
4393
4394 val = ip6._S6_un._S6_u8[i];
4395
4396 if (val == 0) {
4397 *end-- = '0';
4398 } else {
4399 for (; val; val /= 10) {
4400 *end-- = '0' + val % 10;
4401 }
4402 }
4403
4404 if (i > DTRACE_V4MAPPED_OFFSET)
4405 *end-- = '.';
4406 }
4407
4408 if (subr == DIF_SUBR_INET_NTOA6)
4409 goto inetout;
4410
4411 /*
4412 * Set v6end to skip the IPv4 address that
4413 * we have already stringified.
4414 */
4415 v6end = 10;
4416 }
4417
4418 /*
4419 * Build the IPv6 string by working through the
4420 * address in reverse.
4421 */
4422 for (i = v6end; i >= 0; i -= 2) {
4423 ASSERT(end >= base);
4424
4425 if (i == firstzero + numzero - 2) {
4426 *end-- = ':';
4427 *end-- = ':';
4428 i -= numzero - 2;
4429 continue;
4430 }
4431
4432 if (i < 14 && i != firstzero - 2)
4433 *end-- = ':';
4434
4435 val = (ip6._S6_un._S6_u8[i] << 8) +
4436 ip6._S6_un._S6_u8[i + 1];
4437
4438 if (val == 0) {
4439 *end-- = '0';
4440 } else {
4441 for (; val; val /= 16) {
4442 *end-- = digits[val % 16];
4443 }
4444 }
4445 }
4446 ASSERT(end + 1 >= base);
4447
4448 } else {
4449 /*
4450 * The user didn't use AH_INET or AH_INET6.
4451 */
4452 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
4453 regs[rd] = NULL;
4454 break;
4455 }
4456
4457 inetout: regs[rd] = (uintptr_t)end + 1;
4458 mstate->dtms_scratch_ptr += size;
4459 break;
4460 }
4461
4462 }
4463 }
4464
4465 /*
4466 * Emulate the execution of DTrace IR instructions specified by the given
4467 * DIF object. This function is deliberately void of assertions as all of
4468 * the necessary checks are handled by a call to dtrace_difo_validate().
4469 */
4470 static uint64_t
dtrace_dif_emulate(dtrace_difo_t * difo,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate,dtrace_state_t * state)4471 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate,
4472 dtrace_vstate_t *vstate, dtrace_state_t *state)
4473 {
4474 const dif_instr_t *text = difo->dtdo_buf;
4475 const uint_t textlen = difo->dtdo_len;
4476 const char *strtab = difo->dtdo_strtab;
4477 const uint64_t *inttab = difo->dtdo_inttab;
4478
4479 uint64_t rval = 0;
4480 dtrace_statvar_t *svar;
4481 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars;
4482 dtrace_difv_t *v;
4483 volatile uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
4484 volatile uintptr_t *illval = &cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
4485
4486 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
4487 uint64_t regs[DIF_DIR_NREGS];
4488 uint64_t *tmp;
4489
4490 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0;
4491 int64_t cc_r;
4492 uint_t pc = 0, id, opc;
4493 uint8_t ttop = 0;
4494 dif_instr_t instr;
4495 uint_t r1, r2, rd;
4496
4497 /*
4498 * We stash the current DIF object into the machine state: we need it
4499 * for subsequent access checking.
4500 */
4501 mstate->dtms_difo = difo;
4502
4503 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */
4504
4505 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) {
4506 opc = pc;
4507
4508 instr = text[pc++];
4509 r1 = DIF_INSTR_R1(instr);
4510 r2 = DIF_INSTR_R2(instr);
4511 rd = DIF_INSTR_RD(instr);
4512
4513 switch (DIF_INSTR_OP(instr)) {
4514 case DIF_OP_OR:
4515 regs[rd] = regs[r1] | regs[r2];
4516 break;
4517 case DIF_OP_XOR:
4518 regs[rd] = regs[r1] ^ regs[r2];
4519 break;
4520 case DIF_OP_AND:
4521 regs[rd] = regs[r1] & regs[r2];
4522 break;
4523 case DIF_OP_SLL:
4524 regs[rd] = regs[r1] << regs[r2];
4525 break;
4526 case DIF_OP_SRL:
4527 regs[rd] = regs[r1] >> regs[r2];
4528 break;
4529 case DIF_OP_SUB:
4530 regs[rd] = regs[r1] - regs[r2];
4531 break;
4532 case DIF_OP_ADD:
4533 regs[rd] = regs[r1] + regs[r2];
4534 break;
4535 case DIF_OP_MUL:
4536 regs[rd] = regs[r1] * regs[r2];
4537 break;
4538 case DIF_OP_SDIV:
4539 if (regs[r2] == 0) {
4540 regs[rd] = 0;
4541 *flags |= CPU_DTRACE_DIVZERO;
4542 } else {
4543 regs[rd] = (int64_t)regs[r1] /
4544 (int64_t)regs[r2];
4545 }
4546 break;
4547
4548 case DIF_OP_UDIV:
4549 if (regs[r2] == 0) {
4550 regs[rd] = 0;
4551 *flags |= CPU_DTRACE_DIVZERO;
4552 } else {
4553 regs[rd] = regs[r1] / regs[r2];
4554 }
4555 break;
4556
4557 case DIF_OP_SREM:
4558 if (regs[r2] == 0) {
4559 regs[rd] = 0;
4560 *flags |= CPU_DTRACE_DIVZERO;
4561 } else {
4562 regs[rd] = (int64_t)regs[r1] %
4563 (int64_t)regs[r2];
4564 }
4565 break;
4566
4567 case DIF_OP_UREM:
4568 if (regs[r2] == 0) {
4569 regs[rd] = 0;
4570 *flags |= CPU_DTRACE_DIVZERO;
4571 } else {
4572 regs[rd] = regs[r1] % regs[r2];
4573 }
4574 break;
4575
4576 case DIF_OP_NOT:
4577 regs[rd] = ~regs[r1];
4578 break;
4579 case DIF_OP_MOV:
4580 regs[rd] = regs[r1];
4581 break;
4582 case DIF_OP_CMP:
4583 cc_r = regs[r1] - regs[r2];
4584 cc_n = cc_r < 0;
4585 cc_z = cc_r == 0;
4586 cc_v = 0;
4587 cc_c = regs[r1] < regs[r2];
4588 break;
4589 case DIF_OP_TST:
4590 cc_n = cc_v = cc_c = 0;
4591 cc_z = regs[r1] == 0;
4592 break;
4593 case DIF_OP_BA:
4594 pc = DIF_INSTR_LABEL(instr);
4595 break;
4596 case DIF_OP_BE:
4597 if (cc_z)
4598 pc = DIF_INSTR_LABEL(instr);
4599 break;
4600 case DIF_OP_BNE:
4601 if (cc_z == 0)
4602 pc = DIF_INSTR_LABEL(instr);
4603 break;
4604 case DIF_OP_BG:
4605 if ((cc_z | (cc_n ^ cc_v)) == 0)
4606 pc = DIF_INSTR_LABEL(instr);
4607 break;
4608 case DIF_OP_BGU:
4609 if ((cc_c | cc_z) == 0)
4610 pc = DIF_INSTR_LABEL(instr);
4611 break;
4612 case DIF_OP_BGE:
4613 if ((cc_n ^ cc_v) == 0)
4614 pc = DIF_INSTR_LABEL(instr);
4615 break;
4616 case DIF_OP_BGEU:
4617 if (cc_c == 0)
4618 pc = DIF_INSTR_LABEL(instr);
4619 break;
4620 case DIF_OP_BL:
4621 if (cc_n ^ cc_v)
4622 pc = DIF_INSTR_LABEL(instr);
4623 break;
4624 case DIF_OP_BLU:
4625 if (cc_c)
4626 pc = DIF_INSTR_LABEL(instr);
4627 break;
4628 case DIF_OP_BLE:
4629 if (cc_z | (cc_n ^ cc_v))
4630 pc = DIF_INSTR_LABEL(instr);
4631 break;
4632 case DIF_OP_BLEU:
4633 if (cc_c | cc_z)
4634 pc = DIF_INSTR_LABEL(instr);
4635 break;
4636 case DIF_OP_RLDSB:
4637 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4638 *flags |= CPU_DTRACE_KPRIV;
4639 *illval = regs[r1];
4640 break;
4641 }
4642 /*FALLTHROUGH*/
4643 case DIF_OP_LDSB:
4644 regs[rd] = (int8_t)dtrace_load8(regs[r1]);
4645 break;
4646 case DIF_OP_RLDSH:
4647 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4648 *flags |= CPU_DTRACE_KPRIV;
4649 *illval = regs[r1];
4650 break;
4651 }
4652 /*FALLTHROUGH*/
4653 case DIF_OP_LDSH:
4654 regs[rd] = (int16_t)dtrace_load16(regs[r1]);
4655 break;
4656 case DIF_OP_RLDSW:
4657 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4658 *flags |= CPU_DTRACE_KPRIV;
4659 *illval = regs[r1];
4660 break;
4661 }
4662 /*FALLTHROUGH*/
4663 case DIF_OP_LDSW:
4664 regs[rd] = (int32_t)dtrace_load32(regs[r1]);
4665 break;
4666 case DIF_OP_RLDUB:
4667 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) {
4668 *flags |= CPU_DTRACE_KPRIV;
4669 *illval = regs[r1];
4670 break;
4671 }
4672 /*FALLTHROUGH*/
4673 case DIF_OP_LDUB:
4674 regs[rd] = dtrace_load8(regs[r1]);
4675 break;
4676 case DIF_OP_RLDUH:
4677 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) {
4678 *flags |= CPU_DTRACE_KPRIV;
4679 *illval = regs[r1];
4680 break;
4681 }
4682 /*FALLTHROUGH*/
4683 case DIF_OP_LDUH:
4684 regs[rd] = dtrace_load16(regs[r1]);
4685 break;
4686 case DIF_OP_RLDUW:
4687 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) {
4688 *flags |= CPU_DTRACE_KPRIV;
4689 *illval = regs[r1];
4690 break;
4691 }
4692 /*FALLTHROUGH*/
4693 case DIF_OP_LDUW:
4694 regs[rd] = dtrace_load32(regs[r1]);
4695 break;
4696 case DIF_OP_RLDX:
4697 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) {
4698 *flags |= CPU_DTRACE_KPRIV;
4699 *illval = regs[r1];
4700 break;
4701 }
4702 /*FALLTHROUGH*/
4703 case DIF_OP_LDX:
4704 regs[rd] = dtrace_load64(regs[r1]);
4705 break;
4706 case DIF_OP_ULDSB:
4707 regs[rd] = (int8_t)
4708 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4709 break;
4710 case DIF_OP_ULDSH:
4711 regs[rd] = (int16_t)
4712 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
4713 break;
4714 case DIF_OP_ULDSW:
4715 regs[rd] = (int32_t)
4716 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
4717 break;
4718 case DIF_OP_ULDUB:
4719 regs[rd] =
4720 dtrace_fuword8((void *)(uintptr_t)regs[r1]);
4721 break;
4722 case DIF_OP_ULDUH:
4723 regs[rd] =
4724 dtrace_fuword16((void *)(uintptr_t)regs[r1]);
4725 break;
4726 case DIF_OP_ULDUW:
4727 regs[rd] =
4728 dtrace_fuword32((void *)(uintptr_t)regs[r1]);
4729 break;
4730 case DIF_OP_ULDX:
4731 regs[rd] =
4732 dtrace_fuword64((void *)(uintptr_t)regs[r1]);
4733 break;
4734 case DIF_OP_RET:
4735 rval = regs[rd];
4736 pc = textlen;
4737 break;
4738 case DIF_OP_NOP:
4739 break;
4740 case DIF_OP_SETX:
4741 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)];
4742 break;
4743 case DIF_OP_SETS:
4744 regs[rd] = (uint64_t)(uintptr_t)
4745 (strtab + DIF_INSTR_STRING(instr));
4746 break;
4747 case DIF_OP_SCMP: {
4748 size_t sz = state->dts_options[DTRACEOPT_STRSIZE];
4749 uintptr_t s1 = regs[r1];
4750 uintptr_t s2 = regs[r2];
4751
4752 if (s1 != NULL &&
4753 !dtrace_strcanload(s1, sz, mstate, vstate))
4754 break;
4755 if (s2 != NULL &&
4756 !dtrace_strcanload(s2, sz, mstate, vstate))
4757 break;
4758
4759 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz);
4760
4761 cc_n = cc_r < 0;
4762 cc_z = cc_r == 0;
4763 cc_v = cc_c = 0;
4764 break;
4765 }
4766 case DIF_OP_LDGA:
4767 regs[rd] = dtrace_dif_variable(mstate, state,
4768 r1, regs[r2]);
4769 break;
4770 case DIF_OP_LDGS:
4771 id = DIF_INSTR_VAR(instr);
4772
4773 if (id >= DIF_VAR_OTHER_UBASE) {
4774 uintptr_t a;
4775
4776 id -= DIF_VAR_OTHER_UBASE;
4777 svar = vstate->dtvs_globals[id];
4778 ASSERT(svar != NULL);
4779 v = &svar->dtsv_var;
4780
4781 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) {
4782 regs[rd] = svar->dtsv_data;
4783 break;
4784 }
4785
4786 a = (uintptr_t)svar->dtsv_data;
4787
4788 if (*(uint8_t *)a == UINT8_MAX) {
4789 /*
4790 * If the 0th byte is set to UINT8_MAX
4791 * then this is to be treated as a
4792 * reference to a NULL variable.
4793 */
4794 regs[rd] = NULL;
4795 } else {
4796 regs[rd] = a + sizeof (uint64_t);
4797 }
4798
4799 break;
4800 }
4801
4802 regs[rd] = dtrace_dif_variable(mstate, state, id, 0);
4803 break;
4804
4805 case DIF_OP_STGS:
4806 id = DIF_INSTR_VAR(instr);
4807
4808 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4809 id -= DIF_VAR_OTHER_UBASE;
4810
4811 svar = vstate->dtvs_globals[id];
4812 ASSERT(svar != NULL);
4813 v = &svar->dtsv_var;
4814
4815 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4816 uintptr_t a = (uintptr_t)svar->dtsv_data;
4817
4818 ASSERT(a != NULL);
4819 ASSERT(svar->dtsv_size != 0);
4820
4821 if (regs[rd] == NULL) {
4822 *(uint8_t *)a = UINT8_MAX;
4823 break;
4824 } else {
4825 *(uint8_t *)a = 0;
4826 a += sizeof (uint64_t);
4827 }
4828 if (!dtrace_vcanload(
4829 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
4830 mstate, vstate))
4831 break;
4832
4833 dtrace_vcopy((void *)(uintptr_t)regs[rd],
4834 (void *)a, &v->dtdv_type);
4835 break;
4836 }
4837
4838 svar->dtsv_data = regs[rd];
4839 break;
4840
4841 case DIF_OP_LDTA:
4842 /*
4843 * There are no DTrace built-in thread-local arrays at
4844 * present. This opcode is saved for future work.
4845 */
4846 *flags |= CPU_DTRACE_ILLOP;
4847 regs[rd] = 0;
4848 break;
4849
4850 case DIF_OP_LDLS:
4851 id = DIF_INSTR_VAR(instr);
4852
4853 if (id < DIF_VAR_OTHER_UBASE) {
4854 /*
4855 * For now, this has no meaning.
4856 */
4857 regs[rd] = 0;
4858 break;
4859 }
4860
4861 id -= DIF_VAR_OTHER_UBASE;
4862
4863 ASSERT(id < vstate->dtvs_nlocals);
4864 ASSERT(vstate->dtvs_locals != NULL);
4865
4866 svar = vstate->dtvs_locals[id];
4867 ASSERT(svar != NULL);
4868 v = &svar->dtsv_var;
4869
4870 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4871 uintptr_t a = (uintptr_t)svar->dtsv_data;
4872 size_t sz = v->dtdv_type.dtdt_size;
4873
4874 sz += sizeof (uint64_t);
4875 ASSERT(svar->dtsv_size == NCPU * sz);
4876 a += CPU->cpu_id * sz;
4877
4878 if (*(uint8_t *)a == UINT8_MAX) {
4879 /*
4880 * If the 0th byte is set to UINT8_MAX
4881 * then this is to be treated as a
4882 * reference to a NULL variable.
4883 */
4884 regs[rd] = NULL;
4885 } else {
4886 regs[rd] = a + sizeof (uint64_t);
4887 }
4888
4889 break;
4890 }
4891
4892 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
4893 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
4894 regs[rd] = tmp[CPU->cpu_id];
4895 break;
4896
4897 case DIF_OP_STLS:
4898 id = DIF_INSTR_VAR(instr);
4899
4900 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4901 id -= DIF_VAR_OTHER_UBASE;
4902 ASSERT(id < vstate->dtvs_nlocals);
4903
4904 ASSERT(vstate->dtvs_locals != NULL);
4905 svar = vstate->dtvs_locals[id];
4906 ASSERT(svar != NULL);
4907 v = &svar->dtsv_var;
4908
4909 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4910 uintptr_t a = (uintptr_t)svar->dtsv_data;
4911 size_t sz = v->dtdv_type.dtdt_size;
4912
4913 sz += sizeof (uint64_t);
4914 ASSERT(svar->dtsv_size == NCPU * sz);
4915 a += CPU->cpu_id * sz;
4916
4917 if (regs[rd] == NULL) {
4918 *(uint8_t *)a = UINT8_MAX;
4919 break;
4920 } else {
4921 *(uint8_t *)a = 0;
4922 a += sizeof (uint64_t);
4923 }
4924
4925 if (!dtrace_vcanload(
4926 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
4927 mstate, vstate))
4928 break;
4929
4930 dtrace_vcopy((void *)(uintptr_t)regs[rd],
4931 (void *)a, &v->dtdv_type);
4932 break;
4933 }
4934
4935 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t));
4936 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data;
4937 tmp[CPU->cpu_id] = regs[rd];
4938 break;
4939
4940 case DIF_OP_LDTS: {
4941 dtrace_dynvar_t *dvar;
4942 dtrace_key_t *key;
4943
4944 id = DIF_INSTR_VAR(instr);
4945 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4946 id -= DIF_VAR_OTHER_UBASE;
4947 v = &vstate->dtvs_tlocals[id];
4948
4949 key = &tupregs[DIF_DTR_NREGS];
4950 key[0].dttk_value = (uint64_t)id;
4951 key[0].dttk_size = 0;
4952 DTRACE_TLS_THRKEY(key[1].dttk_value);
4953 key[1].dttk_size = 0;
4954
4955 dvar = dtrace_dynvar(dstate, 2, key,
4956 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC,
4957 mstate, vstate);
4958
4959 if (dvar == NULL) {
4960 regs[rd] = 0;
4961 break;
4962 }
4963
4964 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
4965 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
4966 } else {
4967 regs[rd] = *((uint64_t *)dvar->dtdv_data);
4968 }
4969
4970 break;
4971 }
4972
4973 case DIF_OP_STTS: {
4974 dtrace_dynvar_t *dvar;
4975 dtrace_key_t *key;
4976
4977 id = DIF_INSTR_VAR(instr);
4978 ASSERT(id >= DIF_VAR_OTHER_UBASE);
4979 id -= DIF_VAR_OTHER_UBASE;
4980
4981 key = &tupregs[DIF_DTR_NREGS];
4982 key[0].dttk_value = (uint64_t)id;
4983 key[0].dttk_size = 0;
4984 DTRACE_TLS_THRKEY(key[1].dttk_value);
4985 key[1].dttk_size = 0;
4986 v = &vstate->dtvs_tlocals[id];
4987
4988 dvar = dtrace_dynvar(dstate, 2, key,
4989 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
4990 v->dtdv_type.dtdt_size : sizeof (uint64_t),
4991 regs[rd] ? DTRACE_DYNVAR_ALLOC :
4992 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
4993
4994 /*
4995 * Given that we're storing to thread-local data,
4996 * we need to flush our predicate cache.
4997 */
4998 curthread->t_predcache = NULL;
4999
5000 if (dvar == NULL)
5001 break;
5002
5003 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5004 if (!dtrace_vcanload(
5005 (void *)(uintptr_t)regs[rd],
5006 &v->dtdv_type, mstate, vstate))
5007 break;
5008
5009 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5010 dvar->dtdv_data, &v->dtdv_type);
5011 } else {
5012 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5013 }
5014
5015 break;
5016 }
5017
5018 case DIF_OP_SRA:
5019 regs[rd] = (int64_t)regs[r1] >> regs[r2];
5020 break;
5021
5022 case DIF_OP_CALL:
5023 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd,
5024 regs, tupregs, ttop, mstate, state);
5025 break;
5026
5027 case DIF_OP_PUSHTR:
5028 if (ttop == DIF_DTR_NREGS) {
5029 *flags |= CPU_DTRACE_TUPOFLOW;
5030 break;
5031 }
5032
5033 if (r1 == DIF_TYPE_STRING) {
5034 /*
5035 * If this is a string type and the size is 0,
5036 * we'll use the system-wide default string
5037 * size. Note that we are _not_ looking at
5038 * the value of the DTRACEOPT_STRSIZE option;
5039 * had this been set, we would expect to have
5040 * a non-zero size value in the "pushtr".
5041 */
5042 tupregs[ttop].dttk_size =
5043 dtrace_strlen((char *)(uintptr_t)regs[rd],
5044 regs[r2] ? regs[r2] :
5045 dtrace_strsize_default) + 1;
5046 } else {
5047 tupregs[ttop].dttk_size = regs[r2];
5048 }
5049
5050 tupregs[ttop++].dttk_value = regs[rd];
5051 break;
5052
5053 case DIF_OP_PUSHTV:
5054 if (ttop == DIF_DTR_NREGS) {
5055 *flags |= CPU_DTRACE_TUPOFLOW;
5056 break;
5057 }
5058
5059 tupregs[ttop].dttk_value = regs[rd];
5060 tupregs[ttop++].dttk_size = 0;
5061 break;
5062
5063 case DIF_OP_POPTS:
5064 if (ttop != 0)
5065 ttop--;
5066 break;
5067
5068 case DIF_OP_FLUSHTS:
5069 ttop = 0;
5070 break;
5071
5072 case DIF_OP_LDGAA:
5073 case DIF_OP_LDTAA: {
5074 dtrace_dynvar_t *dvar;
5075 dtrace_key_t *key = tupregs;
5076 uint_t nkeys = ttop;
5077
5078 id = DIF_INSTR_VAR(instr);
5079 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5080 id -= DIF_VAR_OTHER_UBASE;
5081
5082 key[nkeys].dttk_value = (uint64_t)id;
5083 key[nkeys++].dttk_size = 0;
5084
5085 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) {
5086 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5087 key[nkeys++].dttk_size = 0;
5088 v = &vstate->dtvs_tlocals[id];
5089 } else {
5090 v = &vstate->dtvs_globals[id]->dtsv_var;
5091 }
5092
5093 dvar = dtrace_dynvar(dstate, nkeys, key,
5094 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5095 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5096 DTRACE_DYNVAR_NOALLOC, mstate, vstate);
5097
5098 if (dvar == NULL) {
5099 regs[rd] = 0;
5100 break;
5101 }
5102
5103 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5104 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data;
5105 } else {
5106 regs[rd] = *((uint64_t *)dvar->dtdv_data);
5107 }
5108
5109 break;
5110 }
5111
5112 case DIF_OP_STGAA:
5113 case DIF_OP_STTAA: {
5114 dtrace_dynvar_t *dvar;
5115 dtrace_key_t *key = tupregs;
5116 uint_t nkeys = ttop;
5117
5118 id = DIF_INSTR_VAR(instr);
5119 ASSERT(id >= DIF_VAR_OTHER_UBASE);
5120 id -= DIF_VAR_OTHER_UBASE;
5121
5122 key[nkeys].dttk_value = (uint64_t)id;
5123 key[nkeys++].dttk_size = 0;
5124
5125 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) {
5126 DTRACE_TLS_THRKEY(key[nkeys].dttk_value);
5127 key[nkeys++].dttk_size = 0;
5128 v = &vstate->dtvs_tlocals[id];
5129 } else {
5130 v = &vstate->dtvs_globals[id]->dtsv_var;
5131 }
5132
5133 dvar = dtrace_dynvar(dstate, nkeys, key,
5134 v->dtdv_type.dtdt_size > sizeof (uint64_t) ?
5135 v->dtdv_type.dtdt_size : sizeof (uint64_t),
5136 regs[rd] ? DTRACE_DYNVAR_ALLOC :
5137 DTRACE_DYNVAR_DEALLOC, mstate, vstate);
5138
5139 if (dvar == NULL)
5140 break;
5141
5142 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) {
5143 if (!dtrace_vcanload(
5144 (void *)(uintptr_t)regs[rd], &v->dtdv_type,
5145 mstate, vstate))
5146 break;
5147
5148 dtrace_vcopy((void *)(uintptr_t)regs[rd],
5149 dvar->dtdv_data, &v->dtdv_type);
5150 } else {
5151 *((uint64_t *)dvar->dtdv_data) = regs[rd];
5152 }
5153
5154 break;
5155 }
5156
5157 case DIF_OP_ALLOCS: {
5158 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5159 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1];
5160
5161 /*
5162 * Rounding up the user allocation size could have
5163 * overflowed large, bogus allocations (like -1ULL) to
5164 * 0.
5165 */
5166 if (size < regs[r1] ||
5167 !DTRACE_INSCRATCH(mstate, size)) {
5168 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5169 regs[rd] = NULL;
5170 break;
5171 }
5172
5173 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size);
5174 mstate->dtms_scratch_ptr += size;
5175 regs[rd] = ptr;
5176 break;
5177 }
5178
5179 case DIF_OP_COPYS:
5180 if (!dtrace_canstore(regs[rd], regs[r2],
5181 mstate, vstate)) {
5182 *flags |= CPU_DTRACE_BADADDR;
5183 *illval = regs[rd];
5184 break;
5185 }
5186
5187 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate))
5188 break;
5189
5190 dtrace_bcopy((void *)(uintptr_t)regs[r1],
5191 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]);
5192 break;
5193
5194 case DIF_OP_STB:
5195 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) {
5196 *flags |= CPU_DTRACE_BADADDR;
5197 *illval = regs[rd];
5198 break;
5199 }
5200 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1];
5201 break;
5202
5203 case DIF_OP_STH:
5204 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) {
5205 *flags |= CPU_DTRACE_BADADDR;
5206 *illval = regs[rd];
5207 break;
5208 }
5209 if (regs[rd] & 1) {
5210 *flags |= CPU_DTRACE_BADALIGN;
5211 *illval = regs[rd];
5212 break;
5213 }
5214 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1];
5215 break;
5216
5217 case DIF_OP_STW:
5218 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) {
5219 *flags |= CPU_DTRACE_BADADDR;
5220 *illval = regs[rd];
5221 break;
5222 }
5223 if (regs[rd] & 3) {
5224 *flags |= CPU_DTRACE_BADALIGN;
5225 *illval = regs[rd];
5226 break;
5227 }
5228 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1];
5229 break;
5230
5231 case DIF_OP_STX:
5232 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) {
5233 *flags |= CPU_DTRACE_BADADDR;
5234 *illval = regs[rd];
5235 break;
5236 }
5237 if (regs[rd] & 7) {
5238 *flags |= CPU_DTRACE_BADALIGN;
5239 *illval = regs[rd];
5240 break;
5241 }
5242 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1];
5243 break;
5244 }
5245 }
5246
5247 if (!(*flags & CPU_DTRACE_FAULT))
5248 return (rval);
5249
5250 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t);
5251 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS;
5252
5253 return (0);
5254 }
5255
5256 static void
dtrace_action_breakpoint(dtrace_ecb_t * ecb)5257 dtrace_action_breakpoint(dtrace_ecb_t *ecb)
5258 {
5259 dtrace_probe_t *probe = ecb->dte_probe;
5260 dtrace_provider_t *prov = probe->dtpr_provider;
5261 char c[DTRACE_FULLNAMELEN + 80], *str;
5262 char *msg = "dtrace: breakpoint action at probe ";
5263 char *ecbmsg = " (ecb ";
5264 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4));
5265 uintptr_t val = (uintptr_t)ecb;
5266 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0;
5267
5268 if (dtrace_destructive_disallow)
5269 return;
5270
5271 /*
5272 * It's impossible to be taking action on the NULL probe.
5273 */
5274 ASSERT(probe != NULL);
5275
5276 /*
5277 * This is a poor man's (destitute man's?) sprintf(): we want to
5278 * print the provider name, module name, function name and name of
5279 * the probe, along with the hex address of the ECB with the breakpoint
5280 * action -- all of which we must place in the character buffer by
5281 * hand.
5282 */
5283 while (*msg != '\0')
5284 c[i++] = *msg++;
5285
5286 for (str = prov->dtpv_name; *str != '\0'; str++)
5287 c[i++] = *str;
5288 c[i++] = ':';
5289
5290 for (str = probe->dtpr_mod; *str != '\0'; str++)
5291 c[i++] = *str;
5292 c[i++] = ':';
5293
5294 for (str = probe->dtpr_func; *str != '\0'; str++)
5295 c[i++] = *str;
5296 c[i++] = ':';
5297
5298 for (str = probe->dtpr_name; *str != '\0'; str++)
5299 c[i++] = *str;
5300
5301 while (*ecbmsg != '\0')
5302 c[i++] = *ecbmsg++;
5303
5304 while (shift >= 0) {
5305 mask = (uintptr_t)0xf << shift;
5306
5307 if (val >= ((uintptr_t)1 << shift))
5308 c[i++] = "0123456789abcdef"[(val & mask) >> shift];
5309 shift -= 4;
5310 }
5311
5312 c[i++] = ')';
5313 c[i] = '\0';
5314
5315 debug_enter(c);
5316 }
5317
5318 static void
dtrace_action_panic(dtrace_ecb_t * ecb)5319 dtrace_action_panic(dtrace_ecb_t *ecb)
5320 {
5321 dtrace_probe_t *probe = ecb->dte_probe;
5322
5323 /*
5324 * It's impossible to be taking action on the NULL probe.
5325 */
5326 ASSERT(probe != NULL);
5327
5328 if (dtrace_destructive_disallow)
5329 return;
5330
5331 if (dtrace_panicked != NULL)
5332 return;
5333
5334 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL)
5335 return;
5336
5337 /*
5338 * We won the right to panic. (We want to be sure that only one
5339 * thread calls panic() from dtrace_probe(), and that panic() is
5340 * called exactly once.)
5341 */
5342 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)",
5343 probe->dtpr_provider->dtpv_name, probe->dtpr_mod,
5344 probe->dtpr_func, probe->dtpr_name, (void *)ecb);
5345 }
5346
5347 static void
dtrace_action_raise(uint64_t sig)5348 dtrace_action_raise(uint64_t sig)
5349 {
5350 if (dtrace_destructive_disallow)
5351 return;
5352
5353 if (sig >= NSIG) {
5354 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP);
5355 return;
5356 }
5357
5358 /*
5359 * raise() has a queue depth of 1 -- we ignore all subsequent
5360 * invocations of the raise() action.
5361 */
5362 if (curthread->t_dtrace_sig == 0)
5363 curthread->t_dtrace_sig = (uint8_t)sig;
5364
5365 curthread->t_sig_check = 1;
5366 aston(curthread);
5367 }
5368
5369 static void
dtrace_action_stop(void)5370 dtrace_action_stop(void)
5371 {
5372 if (dtrace_destructive_disallow)
5373 return;
5374
5375 if (!curthread->t_dtrace_stop) {
5376 curthread->t_dtrace_stop = 1;
5377 curthread->t_sig_check = 1;
5378 aston(curthread);
5379 }
5380 }
5381
5382 static void
dtrace_action_chill(dtrace_mstate_t * mstate,hrtime_t val)5383 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val)
5384 {
5385 hrtime_t now;
5386 volatile uint16_t *flags;
5387 cpu_t *cpu = CPU;
5388
5389 if (dtrace_destructive_disallow)
5390 return;
5391
5392 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags;
5393
5394 now = dtrace_gethrtime();
5395
5396 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) {
5397 /*
5398 * We need to advance the mark to the current time.
5399 */
5400 cpu->cpu_dtrace_chillmark = now;
5401 cpu->cpu_dtrace_chilled = 0;
5402 }
5403
5404 /*
5405 * Now check to see if the requested chill time would take us over
5406 * the maximum amount of time allowed in the chill interval. (Or
5407 * worse, if the calculation itself induces overflow.)
5408 */
5409 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max ||
5410 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) {
5411 *flags |= CPU_DTRACE_ILLOP;
5412 return;
5413 }
5414
5415 while (dtrace_gethrtime() - now < val)
5416 continue;
5417
5418 /*
5419 * Normally, we assure that the value of the variable "timestamp" does
5420 * not change within an ECB. The presence of chill() represents an
5421 * exception to this rule, however.
5422 */
5423 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP;
5424 cpu->cpu_dtrace_chilled += val;
5425 }
5426
5427 static void
dtrace_action_ustack(dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t * buf,uint64_t arg)5428 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state,
5429 uint64_t *buf, uint64_t arg)
5430 {
5431 int nframes = DTRACE_USTACK_NFRAMES(arg);
5432 int strsize = DTRACE_USTACK_STRSIZE(arg);
5433 uint64_t *pcs = &buf[1], *fps;
5434 char *str = (char *)&pcs[nframes];
5435 int size, offs = 0, i, j;
5436 uintptr_t old = mstate->dtms_scratch_ptr, saved;
5437 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
5438 char *sym;
5439
5440 /*
5441 * Should be taking a faster path if string space has not been
5442 * allocated.
5443 */
5444 ASSERT(strsize != 0);
5445
5446 /*
5447 * We will first allocate some temporary space for the frame pointers.
5448 */
5449 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8);
5450 size = (uintptr_t)fps - mstate->dtms_scratch_ptr +
5451 (nframes * sizeof (uint64_t));
5452
5453 if (!DTRACE_INSCRATCH(mstate, size)) {
5454 /*
5455 * Not enough room for our frame pointers -- need to indicate
5456 * that we ran out of scratch space.
5457 */
5458 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH);
5459 return;
5460 }
5461
5462 mstate->dtms_scratch_ptr += size;
5463 saved = mstate->dtms_scratch_ptr;
5464
5465 /*
5466 * Now get a stack with both program counters and frame pointers.
5467 */
5468 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5469 dtrace_getufpstack(buf, fps, nframes + 1);
5470 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5471
5472 /*
5473 * If that faulted, we're cooked.
5474 */
5475 if (*flags & CPU_DTRACE_FAULT)
5476 goto out;
5477
5478 /*
5479 * Now we want to walk up the stack, calling the USTACK helper. For
5480 * each iteration, we restore the scratch pointer.
5481 */
5482 for (i = 0; i < nframes; i++) {
5483 mstate->dtms_scratch_ptr = saved;
5484
5485 if (offs >= strsize)
5486 break;
5487
5488 sym = (char *)(uintptr_t)dtrace_helper(
5489 DTRACE_HELPER_ACTION_USTACK,
5490 mstate, state, pcs[i], fps[i]);
5491
5492 /*
5493 * If we faulted while running the helper, we're going to
5494 * clear the fault and null out the corresponding string.
5495 */
5496 if (*flags & CPU_DTRACE_FAULT) {
5497 *flags &= ~CPU_DTRACE_FAULT;
5498 str[offs++] = '\0';
5499 continue;
5500 }
5501
5502 if (sym == NULL) {
5503 str[offs++] = '\0';
5504 continue;
5505 }
5506
5507 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5508
5509 /*
5510 * Now copy in the string that the helper returned to us.
5511 */
5512 for (j = 0; offs + j < strsize; j++) {
5513 if ((str[offs + j] = sym[j]) == '\0')
5514 break;
5515 }
5516
5517 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5518
5519 offs += j + 1;
5520 }
5521
5522 if (offs >= strsize) {
5523 /*
5524 * If we didn't have room for all of the strings, we don't
5525 * abort processing -- this needn't be a fatal error -- but we
5526 * still want to increment a counter (dts_stkstroverflows) to
5527 * allow this condition to be warned about. (If this is from
5528 * a jstack() action, it is easily tuned via jstackstrsize.)
5529 */
5530 dtrace_error(&state->dts_stkstroverflows);
5531 }
5532
5533 while (offs < strsize)
5534 str[offs++] = '\0';
5535
5536 out:
5537 mstate->dtms_scratch_ptr = old;
5538 }
5539
5540 /*
5541 * If you're looking for the epicenter of DTrace, you just found it. This
5542 * is the function called by the provider to fire a probe -- from which all
5543 * subsequent probe-context DTrace activity emanates.
5544 */
5545 void
dtrace_probe(dtrace_id_t id,uintptr_t arg0,uintptr_t arg1,uintptr_t arg2,uintptr_t arg3,uintptr_t arg4)5546 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1,
5547 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4)
5548 {
5549 processorid_t cpuid;
5550 dtrace_icookie_t cookie;
5551 dtrace_probe_t *probe;
5552 dtrace_mstate_t mstate;
5553 dtrace_ecb_t *ecb;
5554 dtrace_action_t *act;
5555 intptr_t offs;
5556 size_t size;
5557 int vtime, onintr;
5558 volatile uint16_t *flags;
5559 hrtime_t now;
5560
5561 /*
5562 * Kick out immediately if this CPU is still being born (in which case
5563 * curthread will be set to -1) or the current thread can't allow
5564 * probes in its current context.
5565 */
5566 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE))
5567 return;
5568
5569 cookie = dtrace_interrupt_disable();
5570 probe = dtrace_probes[id - 1];
5571 cpuid = CPU->cpu_id;
5572 onintr = CPU_ON_INTR(CPU);
5573
5574 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE &&
5575 probe->dtpr_predcache == curthread->t_predcache) {
5576 /*
5577 * We have hit in the predicate cache; we know that
5578 * this predicate would evaluate to be false.
5579 */
5580 dtrace_interrupt_enable(cookie);
5581 return;
5582 }
5583
5584 if (panic_quiesce) {
5585 /*
5586 * We don't trace anything if we're panicking.
5587 */
5588 dtrace_interrupt_enable(cookie);
5589 return;
5590 }
5591
5592 now = dtrace_gethrtime();
5593 vtime = dtrace_vtime_references != 0;
5594
5595 if (vtime && curthread->t_dtrace_start)
5596 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start;
5597
5598 mstate.dtms_difo = NULL;
5599 mstate.dtms_probe = probe;
5600 mstate.dtms_strtok = NULL;
5601 mstate.dtms_arg[0] = arg0;
5602 mstate.dtms_arg[1] = arg1;
5603 mstate.dtms_arg[2] = arg2;
5604 mstate.dtms_arg[3] = arg3;
5605 mstate.dtms_arg[4] = arg4;
5606
5607 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags;
5608
5609 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
5610 dtrace_predicate_t *pred = ecb->dte_predicate;
5611 dtrace_state_t *state = ecb->dte_state;
5612 dtrace_buffer_t *buf = &state->dts_buffer[cpuid];
5613 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid];
5614 dtrace_vstate_t *vstate = &state->dts_vstate;
5615 dtrace_provider_t *prov = probe->dtpr_provider;
5616 int committed = 0;
5617 caddr_t tomax;
5618
5619 /*
5620 * A little subtlety with the following (seemingly innocuous)
5621 * declaration of the automatic 'val': by looking at the
5622 * code, you might think that it could be declared in the
5623 * action processing loop, below. (That is, it's only used in
5624 * the action processing loop.) However, it must be declared
5625 * out of that scope because in the case of DIF expression
5626 * arguments to aggregating actions, one iteration of the
5627 * action loop will use the last iteration's value.
5628 */
5629 #ifdef lint
5630 uint64_t val = 0;
5631 #else
5632 uint64_t val;
5633 #endif
5634
5635 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE;
5636 *flags &= ~CPU_DTRACE_ERROR;
5637
5638 if (prov == dtrace_provider) {
5639 /*
5640 * If dtrace itself is the provider of this probe,
5641 * we're only going to continue processing the ECB if
5642 * arg0 (the dtrace_state_t) is equal to the ECB's
5643 * creating state. (This prevents disjoint consumers
5644 * from seeing one another's metaprobes.)
5645 */
5646 if (arg0 != (uint64_t)(uintptr_t)state)
5647 continue;
5648 }
5649
5650 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) {
5651 /*
5652 * We're not currently active. If our provider isn't
5653 * the dtrace pseudo provider, we're not interested.
5654 */
5655 if (prov != dtrace_provider)
5656 continue;
5657
5658 /*
5659 * Now we must further check if we are in the BEGIN
5660 * probe. If we are, we will only continue processing
5661 * if we're still in WARMUP -- if one BEGIN enabling
5662 * has invoked the exit() action, we don't want to
5663 * evaluate subsequent BEGIN enablings.
5664 */
5665 if (probe->dtpr_id == dtrace_probeid_begin &&
5666 state->dts_activity != DTRACE_ACTIVITY_WARMUP) {
5667 ASSERT(state->dts_activity ==
5668 DTRACE_ACTIVITY_DRAINING);
5669 continue;
5670 }
5671 }
5672
5673 if (ecb->dte_cond) {
5674 /*
5675 * If the dte_cond bits indicate that this
5676 * consumer is only allowed to see user-mode firings
5677 * of this probe, call the provider's dtps_usermode()
5678 * entry point to check that the probe was fired
5679 * while in a user context. Skip this ECB if that's
5680 * not the case.
5681 */
5682 if ((ecb->dte_cond & DTRACE_COND_USERMODE) &&
5683 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg,
5684 probe->dtpr_id, probe->dtpr_arg) == 0)
5685 continue;
5686
5687 /*
5688 * This is more subtle than it looks. We have to be
5689 * absolutely certain that CRED() isn't going to
5690 * change out from under us so it's only legit to
5691 * examine that structure if we're in constrained
5692 * situations. Currently, the only times we'll this
5693 * check is if a non-super-user has enabled the
5694 * profile or syscall providers -- providers that
5695 * allow visibility of all processes. For the
5696 * profile case, the check above will ensure that
5697 * we're examining a user context.
5698 */
5699 if (ecb->dte_cond & DTRACE_COND_OWNER) {
5700 cred_t *cr;
5701 cred_t *s_cr =
5702 ecb->dte_state->dts_cred.dcr_cred;
5703 proc_t *proc;
5704
5705 ASSERT(s_cr != NULL);
5706
5707 if ((cr = CRED()) == NULL ||
5708 s_cr->cr_uid != cr->cr_uid ||
5709 s_cr->cr_uid != cr->cr_ruid ||
5710 s_cr->cr_uid != cr->cr_suid ||
5711 s_cr->cr_gid != cr->cr_gid ||
5712 s_cr->cr_gid != cr->cr_rgid ||
5713 s_cr->cr_gid != cr->cr_sgid ||
5714 (proc = ttoproc(curthread)) == NULL ||
5715 (proc->p_flag & SNOCD))
5716 continue;
5717 }
5718
5719 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) {
5720 cred_t *cr;
5721 cred_t *s_cr =
5722 ecb->dte_state->dts_cred.dcr_cred;
5723
5724 ASSERT(s_cr != NULL);
5725
5726 if ((cr = CRED()) == NULL ||
5727 s_cr->cr_zone->zone_id !=
5728 cr->cr_zone->zone_id)
5729 continue;
5730 }
5731 }
5732
5733 if (now - state->dts_alive > dtrace_deadman_timeout) {
5734 /*
5735 * We seem to be dead. Unless we (a) have kernel
5736 * destructive permissions (b) have expicitly enabled
5737 * destructive actions and (c) destructive actions have
5738 * not been disabled, we're going to transition into
5739 * the KILLED state, from which no further processing
5740 * on this state will be performed.
5741 */
5742 if (!dtrace_priv_kernel_destructive(state) ||
5743 !state->dts_cred.dcr_destructive ||
5744 dtrace_destructive_disallow) {
5745 void *activity = &state->dts_activity;
5746 dtrace_activity_t current;
5747
5748 do {
5749 current = state->dts_activity;
5750 } while (dtrace_cas32(activity, current,
5751 DTRACE_ACTIVITY_KILLED) != current);
5752
5753 continue;
5754 }
5755 }
5756
5757 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed,
5758 ecb->dte_alignment, state, &mstate)) < 0)
5759 continue;
5760
5761 tomax = buf->dtb_tomax;
5762 ASSERT(tomax != NULL);
5763
5764 if (ecb->dte_size != 0)
5765 DTRACE_STORE(uint32_t, tomax, offs, ecb->dte_epid);
5766
5767 mstate.dtms_epid = ecb->dte_epid;
5768 mstate.dtms_present |= DTRACE_MSTATE_EPID;
5769
5770 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL)
5771 mstate.dtms_access = DTRACE_ACCESS_KERNEL;
5772 else
5773 mstate.dtms_access = 0;
5774
5775 if (pred != NULL) {
5776 dtrace_difo_t *dp = pred->dtp_difo;
5777 int rval;
5778
5779 rval = dtrace_dif_emulate(dp, &mstate, vstate, state);
5780
5781 if (!(*flags & CPU_DTRACE_ERROR) && !rval) {
5782 dtrace_cacheid_t cid = probe->dtpr_predcache;
5783
5784 if (cid != DTRACE_CACHEIDNONE && !onintr) {
5785 /*
5786 * Update the predicate cache...
5787 */
5788 ASSERT(cid == pred->dtp_cacheid);
5789 curthread->t_predcache = cid;
5790 }
5791
5792 continue;
5793 }
5794 }
5795
5796 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) &&
5797 act != NULL; act = act->dta_next) {
5798 size_t valoffs;
5799 dtrace_difo_t *dp;
5800 dtrace_recdesc_t *rec = &act->dta_rec;
5801
5802 size = rec->dtrd_size;
5803 valoffs = offs + rec->dtrd_offset;
5804
5805 if (DTRACEACT_ISAGG(act->dta_kind)) {
5806 uint64_t v = 0xbad;
5807 dtrace_aggregation_t *agg;
5808
5809 agg = (dtrace_aggregation_t *)act;
5810
5811 if ((dp = act->dta_difo) != NULL)
5812 v = dtrace_dif_emulate(dp,
5813 &mstate, vstate, state);
5814
5815 if (*flags & CPU_DTRACE_ERROR)
5816 continue;
5817
5818 /*
5819 * Note that we always pass the expression
5820 * value from the previous iteration of the
5821 * action loop. This value will only be used
5822 * if there is an expression argument to the
5823 * aggregating action, denoted by the
5824 * dtag_hasarg field.
5825 */
5826 dtrace_aggregate(agg, buf,
5827 offs, aggbuf, v, val);
5828 continue;
5829 }
5830
5831 switch (act->dta_kind) {
5832 case DTRACEACT_STOP:
5833 if (dtrace_priv_proc_destructive(state))
5834 dtrace_action_stop();
5835 continue;
5836
5837 case DTRACEACT_BREAKPOINT:
5838 if (dtrace_priv_kernel_destructive(state))
5839 dtrace_action_breakpoint(ecb);
5840 continue;
5841
5842 case DTRACEACT_PANIC:
5843 if (dtrace_priv_kernel_destructive(state))
5844 dtrace_action_panic(ecb);
5845 continue;
5846
5847 case DTRACEACT_STACK:
5848 if (!dtrace_priv_kernel(state))
5849 continue;
5850
5851 dtrace_getpcstack((pc_t *)(tomax + valoffs),
5852 size / sizeof (pc_t), probe->dtpr_aframes,
5853 DTRACE_ANCHORED(probe) ? NULL :
5854 (uint32_t *)arg0);
5855
5856 continue;
5857
5858 case DTRACEACT_JSTACK:
5859 case DTRACEACT_USTACK:
5860 if (!dtrace_priv_proc(state))
5861 continue;
5862
5863 /*
5864 * See comment in DIF_VAR_PID.
5865 */
5866 if (DTRACE_ANCHORED(mstate.dtms_probe) &&
5867 CPU_ON_INTR(CPU)) {
5868 int depth = DTRACE_USTACK_NFRAMES(
5869 rec->dtrd_arg) + 1;
5870
5871 dtrace_bzero((void *)(tomax + valoffs),
5872 DTRACE_USTACK_STRSIZE(rec->dtrd_arg)
5873 + depth * sizeof (uint64_t));
5874
5875 continue;
5876 }
5877
5878 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 &&
5879 curproc->p_dtrace_helpers != NULL) {
5880 /*
5881 * This is the slow path -- we have
5882 * allocated string space, and we're
5883 * getting the stack of a process that
5884 * has helpers. Call into a separate
5885 * routine to perform this processing.
5886 */
5887 dtrace_action_ustack(&mstate, state,
5888 (uint64_t *)(tomax + valoffs),
5889 rec->dtrd_arg);
5890 continue;
5891 }
5892
5893 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT);
5894 dtrace_getupcstack((uint64_t *)
5895 (tomax + valoffs),
5896 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1);
5897 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT);
5898 continue;
5899
5900 default:
5901 break;
5902 }
5903
5904 dp = act->dta_difo;
5905 ASSERT(dp != NULL);
5906
5907 val = dtrace_dif_emulate(dp, &mstate, vstate, state);
5908
5909 if (*flags & CPU_DTRACE_ERROR)
5910 continue;
5911
5912 switch (act->dta_kind) {
5913 case DTRACEACT_SPECULATE:
5914 ASSERT(buf == &state->dts_buffer[cpuid]);
5915 buf = dtrace_speculation_buffer(state,
5916 cpuid, val);
5917
5918 if (buf == NULL) {
5919 *flags |= CPU_DTRACE_DROP;
5920 continue;
5921 }
5922
5923 offs = dtrace_buffer_reserve(buf,
5924 ecb->dte_needed, ecb->dte_alignment,
5925 state, NULL);
5926
5927 if (offs < 0) {
5928 *flags |= CPU_DTRACE_DROP;
5929 continue;
5930 }
5931
5932 tomax = buf->dtb_tomax;
5933 ASSERT(tomax != NULL);
5934
5935 if (ecb->dte_size != 0)
5936 DTRACE_STORE(uint32_t, tomax, offs,
5937 ecb->dte_epid);
5938 continue;
5939
5940 case DTRACEACT_CHILL:
5941 if (dtrace_priv_kernel_destructive(state))
5942 dtrace_action_chill(&mstate, val);
5943 continue;
5944
5945 case DTRACEACT_RAISE:
5946 if (dtrace_priv_proc_destructive(state))
5947 dtrace_action_raise(val);
5948 continue;
5949
5950 case DTRACEACT_COMMIT:
5951 ASSERT(!committed);
5952
5953 /*
5954 * We need to commit our buffer state.
5955 */
5956 if (ecb->dte_size)
5957 buf->dtb_offset = offs + ecb->dte_size;
5958 buf = &state->dts_buffer[cpuid];
5959 dtrace_speculation_commit(state, cpuid, val);
5960 committed = 1;
5961 continue;
5962
5963 case DTRACEACT_DISCARD:
5964 dtrace_speculation_discard(state, cpuid, val);
5965 continue;
5966
5967 case DTRACEACT_DIFEXPR:
5968 case DTRACEACT_LIBACT:
5969 case DTRACEACT_PRINTF:
5970 case DTRACEACT_PRINTA:
5971 case DTRACEACT_SYSTEM:
5972 case DTRACEACT_FREOPEN:
5973 break;
5974
5975 case DTRACEACT_SYM:
5976 case DTRACEACT_MOD:
5977 if (!dtrace_priv_kernel(state))
5978 continue;
5979 break;
5980
5981 case DTRACEACT_USYM:
5982 case DTRACEACT_UMOD:
5983 case DTRACEACT_UADDR: {
5984 struct pid *pid = curthread->t_procp->p_pidp;
5985
5986 if (!dtrace_priv_proc(state))
5987 continue;
5988
5989 DTRACE_STORE(uint64_t, tomax,
5990 valoffs, (uint64_t)pid->pid_id);
5991 DTRACE_STORE(uint64_t, tomax,
5992 valoffs + sizeof (uint64_t), val);
5993
5994 continue;
5995 }
5996
5997 case DTRACEACT_EXIT: {
5998 /*
5999 * For the exit action, we are going to attempt
6000 * to atomically set our activity to be
6001 * draining. If this fails (either because
6002 * another CPU has beat us to the exit action,
6003 * or because our current activity is something
6004 * other than ACTIVE or WARMUP), we will
6005 * continue. This assures that the exit action
6006 * can be successfully recorded at most once
6007 * when we're in the ACTIVE state. If we're
6008 * encountering the exit() action while in
6009 * COOLDOWN, however, we want to honor the new
6010 * status code. (We know that we're the only
6011 * thread in COOLDOWN, so there is no race.)
6012 */
6013 void *activity = &state->dts_activity;
6014 dtrace_activity_t current = state->dts_activity;
6015
6016 if (current == DTRACE_ACTIVITY_COOLDOWN)
6017 break;
6018
6019 if (current != DTRACE_ACTIVITY_WARMUP)
6020 current = DTRACE_ACTIVITY_ACTIVE;
6021
6022 if (dtrace_cas32(activity, current,
6023 DTRACE_ACTIVITY_DRAINING) != current) {
6024 *flags |= CPU_DTRACE_DROP;
6025 continue;
6026 }
6027
6028 break;
6029 }
6030
6031 default:
6032 ASSERT(0);
6033 }
6034
6035 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) {
6036 uintptr_t end = valoffs + size;
6037
6038 if (!dtrace_vcanload((void *)(uintptr_t)val,
6039 &dp->dtdo_rtype, &mstate, vstate))
6040 continue;
6041
6042 /*
6043 * If this is a string, we're going to only
6044 * load until we find the zero byte -- after
6045 * which we'll store zero bytes.
6046 */
6047 if (dp->dtdo_rtype.dtdt_kind ==
6048 DIF_TYPE_STRING) {
6049 char c = '\0' + 1;
6050 int intuple = act->dta_intuple;
6051 size_t s;
6052
6053 for (s = 0; s < size; s++) {
6054 if (c != '\0')
6055 c = dtrace_load8(val++);
6056
6057 DTRACE_STORE(uint8_t, tomax,
6058 valoffs++, c);
6059
6060 if (c == '\0' && intuple)
6061 break;
6062 }
6063
6064 continue;
6065 }
6066
6067 while (valoffs < end) {
6068 DTRACE_STORE(uint8_t, tomax, valoffs++,
6069 dtrace_load8(val++));
6070 }
6071
6072 continue;
6073 }
6074
6075 switch (size) {
6076 case 0:
6077 break;
6078
6079 case sizeof (uint8_t):
6080 DTRACE_STORE(uint8_t, tomax, valoffs, val);
6081 break;
6082 case sizeof (uint16_t):
6083 DTRACE_STORE(uint16_t, tomax, valoffs, val);
6084 break;
6085 case sizeof (uint32_t):
6086 DTRACE_STORE(uint32_t, tomax, valoffs, val);
6087 break;
6088 case sizeof (uint64_t):
6089 DTRACE_STORE(uint64_t, tomax, valoffs, val);
6090 break;
6091 default:
6092 /*
6093 * Any other size should have been returned by
6094 * reference, not by value.
6095 */
6096 ASSERT(0);
6097 break;
6098 }
6099 }
6100
6101 if (*flags & CPU_DTRACE_DROP)
6102 continue;
6103
6104 if (*flags & CPU_DTRACE_FAULT) {
6105 int ndx;
6106 dtrace_action_t *err;
6107
6108 buf->dtb_errors++;
6109
6110 if (probe->dtpr_id == dtrace_probeid_error) {
6111 /*
6112 * There's nothing we can do -- we had an
6113 * error on the error probe. We bump an
6114 * error counter to at least indicate that
6115 * this condition happened.
6116 */
6117 dtrace_error(&state->dts_dblerrors);
6118 continue;
6119 }
6120
6121 if (vtime) {
6122 /*
6123 * Before recursing on dtrace_probe(), we
6124 * need to explicitly clear out our start
6125 * time to prevent it from being accumulated
6126 * into t_dtrace_vtime.
6127 */
6128 curthread->t_dtrace_start = 0;
6129 }
6130
6131 /*
6132 * Iterate over the actions to figure out which action
6133 * we were processing when we experienced the error.
6134 * Note that act points _past_ the faulting action; if
6135 * act is ecb->dte_action, the fault was in the
6136 * predicate, if it's ecb->dte_action->dta_next it's
6137 * in action #1, and so on.
6138 */
6139 for (err = ecb->dte_action, ndx = 0;
6140 err != act; err = err->dta_next, ndx++)
6141 continue;
6142
6143 dtrace_probe_error(state, ecb->dte_epid, ndx,
6144 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ?
6145 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags),
6146 cpu_core[cpuid].cpuc_dtrace_illval);
6147
6148 continue;
6149 }
6150
6151 if (!committed)
6152 buf->dtb_offset = offs + ecb->dte_size;
6153 }
6154
6155 if (vtime)
6156 curthread->t_dtrace_start = dtrace_gethrtime();
6157
6158 dtrace_interrupt_enable(cookie);
6159 }
6160
6161 /*
6162 * DTrace Probe Hashing Functions
6163 *
6164 * The functions in this section (and indeed, the functions in remaining
6165 * sections) are not _called_ from probe context. (Any exceptions to this are
6166 * marked with a "Note:".) Rather, they are called from elsewhere in the
6167 * DTrace framework to look-up probes in, add probes to and remove probes from
6168 * the DTrace probe hashes. (Each probe is hashed by each element of the
6169 * probe tuple -- allowing for fast lookups, regardless of what was
6170 * specified.)
6171 */
6172 static uint_t
dtrace_hash_str(char * p)6173 dtrace_hash_str(char *p)
6174 {
6175 unsigned int g;
6176 uint_t hval = 0;
6177
6178 while (*p) {
6179 hval = (hval << 4) + *p++;
6180 if ((g = (hval & 0xf0000000)) != 0)
6181 hval ^= g >> 24;
6182 hval &= ~g;
6183 }
6184 return (hval);
6185 }
6186
6187 static dtrace_hash_t *
dtrace_hash_create(uintptr_t stroffs,uintptr_t nextoffs,uintptr_t prevoffs)6188 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs)
6189 {
6190 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP);
6191
6192 hash->dth_stroffs = stroffs;
6193 hash->dth_nextoffs = nextoffs;
6194 hash->dth_prevoffs = prevoffs;
6195
6196 hash->dth_size = 1;
6197 hash->dth_mask = hash->dth_size - 1;
6198
6199 hash->dth_tab = kmem_zalloc(hash->dth_size *
6200 sizeof (dtrace_hashbucket_t *), KM_SLEEP);
6201
6202 return (hash);
6203 }
6204
6205 static void
dtrace_hash_destroy(dtrace_hash_t * hash)6206 dtrace_hash_destroy(dtrace_hash_t *hash)
6207 {
6208 #ifdef DEBUG
6209 int i;
6210
6211 for (i = 0; i < hash->dth_size; i++)
6212 ASSERT(hash->dth_tab[i] == NULL);
6213 #endif
6214
6215 kmem_free(hash->dth_tab,
6216 hash->dth_size * sizeof (dtrace_hashbucket_t *));
6217 kmem_free(hash, sizeof (dtrace_hash_t));
6218 }
6219
6220 static void
dtrace_hash_resize(dtrace_hash_t * hash)6221 dtrace_hash_resize(dtrace_hash_t *hash)
6222 {
6223 int size = hash->dth_size, i, ndx;
6224 int new_size = hash->dth_size << 1;
6225 int new_mask = new_size - 1;
6226 dtrace_hashbucket_t **new_tab, *bucket, *next;
6227
6228 ASSERT((new_size & new_mask) == 0);
6229
6230 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP);
6231
6232 for (i = 0; i < size; i++) {
6233 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) {
6234 dtrace_probe_t *probe = bucket->dthb_chain;
6235
6236 ASSERT(probe != NULL);
6237 ndx = DTRACE_HASHSTR(hash, probe) & new_mask;
6238
6239 next = bucket->dthb_next;
6240 bucket->dthb_next = new_tab[ndx];
6241 new_tab[ndx] = bucket;
6242 }
6243 }
6244
6245 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *));
6246 hash->dth_tab = new_tab;
6247 hash->dth_size = new_size;
6248 hash->dth_mask = new_mask;
6249 }
6250
6251 static void
dtrace_hash_add(dtrace_hash_t * hash,dtrace_probe_t * new)6252 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new)
6253 {
6254 int hashval = DTRACE_HASHSTR(hash, new);
6255 int ndx = hashval & hash->dth_mask;
6256 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6257 dtrace_probe_t **nextp, **prevp;
6258
6259 for (; bucket != NULL; bucket = bucket->dthb_next) {
6260 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new))
6261 goto add;
6262 }
6263
6264 if ((hash->dth_nbuckets >> 1) > hash->dth_size) {
6265 dtrace_hash_resize(hash);
6266 dtrace_hash_add(hash, new);
6267 return;
6268 }
6269
6270 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP);
6271 bucket->dthb_next = hash->dth_tab[ndx];
6272 hash->dth_tab[ndx] = bucket;
6273 hash->dth_nbuckets++;
6274
6275 add:
6276 nextp = DTRACE_HASHNEXT(hash, new);
6277 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL);
6278 *nextp = bucket->dthb_chain;
6279
6280 if (bucket->dthb_chain != NULL) {
6281 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain);
6282 ASSERT(*prevp == NULL);
6283 *prevp = new;
6284 }
6285
6286 bucket->dthb_chain = new;
6287 bucket->dthb_len++;
6288 }
6289
6290 static dtrace_probe_t *
dtrace_hash_lookup(dtrace_hash_t * hash,dtrace_probe_t * template)6291 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template)
6292 {
6293 int hashval = DTRACE_HASHSTR(hash, template);
6294 int ndx = hashval & hash->dth_mask;
6295 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6296
6297 for (; bucket != NULL; bucket = bucket->dthb_next) {
6298 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6299 return (bucket->dthb_chain);
6300 }
6301
6302 return (NULL);
6303 }
6304
6305 static int
dtrace_hash_collisions(dtrace_hash_t * hash,dtrace_probe_t * template)6306 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template)
6307 {
6308 int hashval = DTRACE_HASHSTR(hash, template);
6309 int ndx = hashval & hash->dth_mask;
6310 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6311
6312 for (; bucket != NULL; bucket = bucket->dthb_next) {
6313 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template))
6314 return (bucket->dthb_len);
6315 }
6316
6317 return (NULL);
6318 }
6319
6320 static void
dtrace_hash_remove(dtrace_hash_t * hash,dtrace_probe_t * probe)6321 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe)
6322 {
6323 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask;
6324 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx];
6325
6326 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe);
6327 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe);
6328
6329 /*
6330 * Find the bucket that we're removing this probe from.
6331 */
6332 for (; bucket != NULL; bucket = bucket->dthb_next) {
6333 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe))
6334 break;
6335 }
6336
6337 ASSERT(bucket != NULL);
6338
6339 if (*prevp == NULL) {
6340 if (*nextp == NULL) {
6341 /*
6342 * The removed probe was the only probe on this
6343 * bucket; we need to remove the bucket.
6344 */
6345 dtrace_hashbucket_t *b = hash->dth_tab[ndx];
6346
6347 ASSERT(bucket->dthb_chain == probe);
6348 ASSERT(b != NULL);
6349
6350 if (b == bucket) {
6351 hash->dth_tab[ndx] = bucket->dthb_next;
6352 } else {
6353 while (b->dthb_next != bucket)
6354 b = b->dthb_next;
6355 b->dthb_next = bucket->dthb_next;
6356 }
6357
6358 ASSERT(hash->dth_nbuckets > 0);
6359 hash->dth_nbuckets--;
6360 kmem_free(bucket, sizeof (dtrace_hashbucket_t));
6361 return;
6362 }
6363
6364 bucket->dthb_chain = *nextp;
6365 } else {
6366 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp;
6367 }
6368
6369 if (*nextp != NULL)
6370 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp;
6371 }
6372
6373 /*
6374 * DTrace Utility Functions
6375 *
6376 * These are random utility functions that are _not_ called from probe context.
6377 */
6378 static int
dtrace_badattr(const dtrace_attribute_t * a)6379 dtrace_badattr(const dtrace_attribute_t *a)
6380 {
6381 return (a->dtat_name > DTRACE_STABILITY_MAX ||
6382 a->dtat_data > DTRACE_STABILITY_MAX ||
6383 a->dtat_class > DTRACE_CLASS_MAX);
6384 }
6385
6386 /*
6387 * Return a duplicate copy of a string. If the specified string is NULL,
6388 * this function returns a zero-length string.
6389 */
6390 static char *
dtrace_strdup(const char * str)6391 dtrace_strdup(const char *str)
6392 {
6393 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP);
6394
6395 if (str != NULL)
6396 (void) strcpy(new, str);
6397
6398 return (new);
6399 }
6400
6401 #define DTRACE_ISALPHA(c) \
6402 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z'))
6403
6404 static int
dtrace_badname(const char * s)6405 dtrace_badname(const char *s)
6406 {
6407 char c;
6408
6409 if (s == NULL || (c = *s++) == '\0')
6410 return (0);
6411
6412 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.')
6413 return (1);
6414
6415 while ((c = *s++) != '\0') {
6416 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') &&
6417 c != '-' && c != '_' && c != '.' && c != '`')
6418 return (1);
6419 }
6420
6421 return (0);
6422 }
6423
6424 static void
dtrace_cred2priv(cred_t * cr,uint32_t * privp,uid_t * uidp,zoneid_t * zoneidp)6425 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp)
6426 {
6427 uint32_t priv;
6428
6429 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
6430 /*
6431 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter.
6432 */
6433 priv = DTRACE_PRIV_ALL;
6434 } else {
6435 *uidp = crgetuid(cr);
6436 *zoneidp = crgetzoneid(cr);
6437
6438 priv = 0;
6439 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE))
6440 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER;
6441 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE))
6442 priv |= DTRACE_PRIV_USER;
6443 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE))
6444 priv |= DTRACE_PRIV_PROC;
6445 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
6446 priv |= DTRACE_PRIV_OWNER;
6447 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
6448 priv |= DTRACE_PRIV_ZONEOWNER;
6449 }
6450
6451 *privp = priv;
6452 }
6453
6454 #ifdef DTRACE_ERRDEBUG
6455 static void
dtrace_errdebug(const char * str)6456 dtrace_errdebug(const char *str)
6457 {
6458 int hval = dtrace_hash_str((char *)str) % DTRACE_ERRHASHSZ;
6459 int occupied = 0;
6460
6461 mutex_enter(&dtrace_errlock);
6462 dtrace_errlast = str;
6463 dtrace_errthread = curthread;
6464
6465 while (occupied++ < DTRACE_ERRHASHSZ) {
6466 if (dtrace_errhash[hval].dter_msg == str) {
6467 dtrace_errhash[hval].dter_count++;
6468 goto out;
6469 }
6470
6471 if (dtrace_errhash[hval].dter_msg != NULL) {
6472 hval = (hval + 1) % DTRACE_ERRHASHSZ;
6473 continue;
6474 }
6475
6476 dtrace_errhash[hval].dter_msg = str;
6477 dtrace_errhash[hval].dter_count = 1;
6478 goto out;
6479 }
6480
6481 panic("dtrace: undersized error hash");
6482 out:
6483 mutex_exit(&dtrace_errlock);
6484 }
6485 #endif
6486
6487 /*
6488 * DTrace Matching Functions
6489 *
6490 * These functions are used to match groups of probes, given some elements of
6491 * a probe tuple, or some globbed expressions for elements of a probe tuple.
6492 */
6493 static int
dtrace_match_priv(const dtrace_probe_t * prp,uint32_t priv,uid_t uid,zoneid_t zoneid)6494 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid,
6495 zoneid_t zoneid)
6496 {
6497 if (priv != DTRACE_PRIV_ALL) {
6498 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags;
6499 uint32_t match = priv & ppriv;
6500
6501 /*
6502 * No PRIV_DTRACE_* privileges...
6503 */
6504 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER |
6505 DTRACE_PRIV_KERNEL)) == 0)
6506 return (0);
6507
6508 /*
6509 * No matching bits, but there were bits to match...
6510 */
6511 if (match == 0 && ppriv != 0)
6512 return (0);
6513
6514 /*
6515 * Need to have permissions to the process, but don't...
6516 */
6517 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 &&
6518 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) {
6519 return (0);
6520 }
6521
6522 /*
6523 * Need to be in the same zone unless we possess the
6524 * privilege to examine all zones.
6525 */
6526 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 &&
6527 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) {
6528 return (0);
6529 }
6530 }
6531
6532 return (1);
6533 }
6534
6535 /*
6536 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which
6537 * consists of input pattern strings and an ops-vector to evaluate them.
6538 * This function returns >0 for match, 0 for no match, and <0 for error.
6539 */
6540 static int
dtrace_match_probe(const dtrace_probe_t * prp,const dtrace_probekey_t * pkp,uint32_t priv,uid_t uid,zoneid_t zoneid)6541 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp,
6542 uint32_t priv, uid_t uid, zoneid_t zoneid)
6543 {
6544 dtrace_provider_t *pvp = prp->dtpr_provider;
6545 int rv;
6546
6547 if (pvp->dtpv_defunct)
6548 return (0);
6549
6550 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0)
6551 return (rv);
6552
6553 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0)
6554 return (rv);
6555
6556 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0)
6557 return (rv);
6558
6559 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0)
6560 return (rv);
6561
6562 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0)
6563 return (0);
6564
6565 return (rv);
6566 }
6567
6568 /*
6569 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN)
6570 * interface for matching a glob pattern 'p' to an input string 's'. Unlike
6571 * libc's version, the kernel version only applies to 8-bit ASCII strings.
6572 * In addition, all of the recursion cases except for '*' matching have been
6573 * unwound. For '*', we still implement recursive evaluation, but a depth
6574 * counter is maintained and matching is aborted if we recurse too deep.
6575 * The function returns 0 if no match, >0 if match, and <0 if recursion error.
6576 */
6577 static int
dtrace_match_glob(const char * s,const char * p,int depth)6578 dtrace_match_glob(const char *s, const char *p, int depth)
6579 {
6580 const char *olds;
6581 char s1, c;
6582 int gs;
6583
6584 if (depth > DTRACE_PROBEKEY_MAXDEPTH)
6585 return (-1);
6586
6587 if (s == NULL)
6588 s = ""; /* treat NULL as empty string */
6589
6590 top:
6591 olds = s;
6592 s1 = *s++;
6593
6594 if (p == NULL)
6595 return (0);
6596
6597 if ((c = *p++) == '\0')
6598 return (s1 == '\0');
6599
6600 switch (c) {
6601 case '[': {
6602 int ok = 0, notflag = 0;
6603 char lc = '\0';
6604
6605 if (s1 == '\0')
6606 return (0);
6607
6608 if (*p == '!') {
6609 notflag = 1;
6610 p++;
6611 }
6612
6613 if ((c = *p++) == '\0')
6614 return (0);
6615
6616 do {
6617 if (c == '-' && lc != '\0' && *p != ']') {
6618 if ((c = *p++) == '\0')
6619 return (0);
6620 if (c == '\\' && (c = *p++) == '\0')
6621 return (0);
6622
6623 if (notflag) {
6624 if (s1 < lc || s1 > c)
6625 ok++;
6626 else
6627 return (0);
6628 } else if (lc <= s1 && s1 <= c)
6629 ok++;
6630
6631 } else if (c == '\\' && (c = *p++) == '\0')
6632 return (0);
6633
6634 lc = c; /* save left-hand 'c' for next iteration */
6635
6636 if (notflag) {
6637 if (s1 != c)
6638 ok++;
6639 else
6640 return (0);
6641 } else if (s1 == c)
6642 ok++;
6643
6644 if ((c = *p++) == '\0')
6645 return (0);
6646
6647 } while (c != ']');
6648
6649 if (ok)
6650 goto top;
6651
6652 return (0);
6653 }
6654
6655 case '\\':
6656 if ((c = *p++) == '\0')
6657 return (0);
6658 /*FALLTHRU*/
6659
6660 default:
6661 if (c != s1)
6662 return (0);
6663 /*FALLTHRU*/
6664
6665 case '?':
6666 if (s1 != '\0')
6667 goto top;
6668 return (0);
6669
6670 case '*':
6671 while (*p == '*')
6672 p++; /* consecutive *'s are identical to a single one */
6673
6674 if (*p == '\0')
6675 return (1);
6676
6677 for (s = olds; *s != '\0'; s++) {
6678 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0)
6679 return (gs);
6680 }
6681
6682 return (0);
6683 }
6684 }
6685
6686 /*ARGSUSED*/
6687 static int
dtrace_match_string(const char * s,const char * p,int depth)6688 dtrace_match_string(const char *s, const char *p, int depth)
6689 {
6690 return (s != NULL && strcmp(s, p) == 0);
6691 }
6692
6693 /*ARGSUSED*/
6694 static int
dtrace_match_nul(const char * s,const char * p,int depth)6695 dtrace_match_nul(const char *s, const char *p, int depth)
6696 {
6697 return (1); /* always match the empty pattern */
6698 }
6699
6700 /*ARGSUSED*/
6701 static int
dtrace_match_nonzero(const char * s,const char * p,int depth)6702 dtrace_match_nonzero(const char *s, const char *p, int depth)
6703 {
6704 return (s != NULL && s[0] != '\0');
6705 }
6706
6707 static int
dtrace_match(const dtrace_probekey_t * pkp,uint32_t priv,uid_t uid,zoneid_t zoneid,int (* matched)(dtrace_probe_t *,void *),void * arg)6708 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid,
6709 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg)
6710 {
6711 dtrace_probe_t template, *probe;
6712 dtrace_hash_t *hash = NULL;
6713 int len, rc, best = INT_MAX, nmatched = 0;
6714 dtrace_id_t i;
6715
6716 ASSERT(MUTEX_HELD(&dtrace_lock));
6717
6718 /*
6719 * If the probe ID is specified in the key, just lookup by ID and
6720 * invoke the match callback once if a matching probe is found.
6721 */
6722 if (pkp->dtpk_id != DTRACE_IDNONE) {
6723 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL &&
6724 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) {
6725 if ((*matched)(probe, arg) == DTRACE_MATCH_FAIL)
6726 return (DTRACE_MATCH_FAIL);
6727 nmatched++;
6728 }
6729 return (nmatched);
6730 }
6731
6732 template.dtpr_mod = (char *)pkp->dtpk_mod;
6733 template.dtpr_func = (char *)pkp->dtpk_func;
6734 template.dtpr_name = (char *)pkp->dtpk_name;
6735
6736 /*
6737 * We want to find the most distinct of the module name, function
6738 * name, and name. So for each one that is not a glob pattern or
6739 * empty string, we perform a lookup in the corresponding hash and
6740 * use the hash table with the fewest collisions to do our search.
6741 */
6742 if (pkp->dtpk_mmatch == &dtrace_match_string &&
6743 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) {
6744 best = len;
6745 hash = dtrace_bymod;
6746 }
6747
6748 if (pkp->dtpk_fmatch == &dtrace_match_string &&
6749 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) {
6750 best = len;
6751 hash = dtrace_byfunc;
6752 }
6753
6754 if (pkp->dtpk_nmatch == &dtrace_match_string &&
6755 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) {
6756 best = len;
6757 hash = dtrace_byname;
6758 }
6759
6760 /*
6761 * If we did not select a hash table, iterate over every probe and
6762 * invoke our callback for each one that matches our input probe key.
6763 */
6764 if (hash == NULL) {
6765 for (i = 0; i < dtrace_nprobes; i++) {
6766 if ((probe = dtrace_probes[i]) == NULL ||
6767 dtrace_match_probe(probe, pkp, priv, uid,
6768 zoneid) <= 0)
6769 continue;
6770
6771 nmatched++;
6772
6773 if ((rc = (*matched)(probe, arg)) !=
6774 DTRACE_MATCH_NEXT) {
6775 if (rc == DTRACE_MATCH_FAIL)
6776 return (DTRACE_MATCH_FAIL);
6777 break;
6778 }
6779 }
6780
6781 return (nmatched);
6782 }
6783
6784 /*
6785 * If we selected a hash table, iterate over each probe of the same key
6786 * name and invoke the callback for every probe that matches the other
6787 * attributes of our input probe key.
6788 */
6789 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL;
6790 probe = *(DTRACE_HASHNEXT(hash, probe))) {
6791
6792 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0)
6793 continue;
6794
6795 nmatched++;
6796
6797 if ((rc = (*matched)(probe, arg)) != DTRACE_MATCH_NEXT) {
6798 if (rc == DTRACE_MATCH_FAIL)
6799 return (DTRACE_MATCH_FAIL);
6800 break;
6801 }
6802 }
6803
6804 return (nmatched);
6805 }
6806
6807 /*
6808 * Return the function pointer dtrace_probecmp() should use to compare the
6809 * specified pattern with a string. For NULL or empty patterns, we select
6810 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob().
6811 * For non-empty non-glob strings, we use dtrace_match_string().
6812 */
6813 static dtrace_probekey_f *
dtrace_probekey_func(const char * p)6814 dtrace_probekey_func(const char *p)
6815 {
6816 char c;
6817
6818 if (p == NULL || *p == '\0')
6819 return (&dtrace_match_nul);
6820
6821 while ((c = *p++) != '\0') {
6822 if (c == '[' || c == '?' || c == '*' || c == '\\')
6823 return (&dtrace_match_glob);
6824 }
6825
6826 return (&dtrace_match_string);
6827 }
6828
6829 /*
6830 * Build a probe comparison key for use with dtrace_match_probe() from the
6831 * given probe description. By convention, a null key only matches anchored
6832 * probes: if each field is the empty string, reset dtpk_fmatch to
6833 * dtrace_match_nonzero().
6834 */
6835 static void
dtrace_probekey(const dtrace_probedesc_t * pdp,dtrace_probekey_t * pkp)6836 dtrace_probekey(const dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp)
6837 {
6838 pkp->dtpk_prov = pdp->dtpd_provider;
6839 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider);
6840
6841 pkp->dtpk_mod = pdp->dtpd_mod;
6842 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod);
6843
6844 pkp->dtpk_func = pdp->dtpd_func;
6845 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func);
6846
6847 pkp->dtpk_name = pdp->dtpd_name;
6848 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name);
6849
6850 pkp->dtpk_id = pdp->dtpd_id;
6851
6852 if (pkp->dtpk_id == DTRACE_IDNONE &&
6853 pkp->dtpk_pmatch == &dtrace_match_nul &&
6854 pkp->dtpk_mmatch == &dtrace_match_nul &&
6855 pkp->dtpk_fmatch == &dtrace_match_nul &&
6856 pkp->dtpk_nmatch == &dtrace_match_nul)
6857 pkp->dtpk_fmatch = &dtrace_match_nonzero;
6858 }
6859
6860 /*
6861 * DTrace Provider-to-Framework API Functions
6862 *
6863 * These functions implement much of the Provider-to-Framework API, as
6864 * described in <sys/dtrace.h>. The parts of the API not in this section are
6865 * the functions in the API for probe management (found below), and
6866 * dtrace_probe() itself (found above).
6867 */
6868
6869 /*
6870 * Register the calling provider with the DTrace framework. This should
6871 * generally be called by DTrace providers in their attach(9E) entry point.
6872 */
6873 int
dtrace_register(const char * name,const dtrace_pattr_t * pap,uint32_t priv,cred_t * cr,const dtrace_pops_t * pops,void * arg,dtrace_provider_id_t * idp)6874 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv,
6875 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp)
6876 {
6877 dtrace_provider_t *provider;
6878
6879 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) {
6880 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6881 "arguments", name ? name : "<NULL>");
6882 return (EINVAL);
6883 }
6884
6885 if (name[0] == '\0' || dtrace_badname(name)) {
6886 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6887 "provider name", name);
6888 return (EINVAL);
6889 }
6890
6891 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) ||
6892 pops->dtps_enable == NULL || pops->dtps_disable == NULL ||
6893 pops->dtps_destroy == NULL ||
6894 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) {
6895 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6896 "provider ops", name);
6897 return (EINVAL);
6898 }
6899
6900 if (dtrace_badattr(&pap->dtpa_provider) ||
6901 dtrace_badattr(&pap->dtpa_mod) ||
6902 dtrace_badattr(&pap->dtpa_func) ||
6903 dtrace_badattr(&pap->dtpa_name) ||
6904 dtrace_badattr(&pap->dtpa_args)) {
6905 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6906 "provider attributes", name);
6907 return (EINVAL);
6908 }
6909
6910 if (priv & ~DTRACE_PRIV_ALL) {
6911 cmn_err(CE_WARN, "failed to register provider '%s': invalid "
6912 "privilege attributes", name);
6913 return (EINVAL);
6914 }
6915
6916 if ((priv & DTRACE_PRIV_KERNEL) &&
6917 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) &&
6918 pops->dtps_usermode == NULL) {
6919 cmn_err(CE_WARN, "failed to register provider '%s': need "
6920 "dtps_usermode() op for given privilege attributes", name);
6921 return (EINVAL);
6922 }
6923
6924 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP);
6925 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
6926 (void) strcpy(provider->dtpv_name, name);
6927
6928 provider->dtpv_attr = *pap;
6929 provider->dtpv_priv.dtpp_flags = priv;
6930 if (cr != NULL) {
6931 provider->dtpv_priv.dtpp_uid = crgetuid(cr);
6932 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr);
6933 }
6934 provider->dtpv_pops = *pops;
6935
6936 if (pops->dtps_provide == NULL) {
6937 ASSERT(pops->dtps_provide_module != NULL);
6938 provider->dtpv_pops.dtps_provide =
6939 (void (*)(void *, const dtrace_probedesc_t *))dtrace_nullop;
6940 }
6941
6942 if (pops->dtps_provide_module == NULL) {
6943 ASSERT(pops->dtps_provide != NULL);
6944 provider->dtpv_pops.dtps_provide_module =
6945 (void (*)(void *, struct modctl *))dtrace_nullop;
6946 }
6947
6948 if (pops->dtps_suspend == NULL) {
6949 ASSERT(pops->dtps_resume == NULL);
6950 provider->dtpv_pops.dtps_suspend =
6951 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
6952 provider->dtpv_pops.dtps_resume =
6953 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop;
6954 }
6955
6956 provider->dtpv_arg = arg;
6957 *idp = (dtrace_provider_id_t)provider;
6958
6959 if (pops == &dtrace_provider_ops) {
6960 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
6961 ASSERT(MUTEX_HELD(&dtrace_lock));
6962 ASSERT(dtrace_anon.dta_enabling == NULL);
6963
6964 /*
6965 * We make sure that the DTrace provider is at the head of
6966 * the provider chain.
6967 */
6968 provider->dtpv_next = dtrace_provider;
6969 dtrace_provider = provider;
6970 return (0);
6971 }
6972
6973 mutex_enter(&dtrace_provider_lock);
6974 mutex_enter(&dtrace_lock);
6975
6976 /*
6977 * If there is at least one provider registered, we'll add this
6978 * provider after the first provider.
6979 */
6980 if (dtrace_provider != NULL) {
6981 provider->dtpv_next = dtrace_provider->dtpv_next;
6982 dtrace_provider->dtpv_next = provider;
6983 } else {
6984 dtrace_provider = provider;
6985 }
6986
6987 if (dtrace_retained != NULL) {
6988 dtrace_enabling_provide(provider);
6989
6990 /*
6991 * Now we need to call dtrace_enabling_matchall() -- which
6992 * will acquire cpu_lock and dtrace_lock. We therefore need
6993 * to drop all of our locks before calling into it...
6994 */
6995 mutex_exit(&dtrace_lock);
6996 mutex_exit(&dtrace_provider_lock);
6997 dtrace_enabling_matchall();
6998
6999 return (0);
7000 }
7001
7002 mutex_exit(&dtrace_lock);
7003 mutex_exit(&dtrace_provider_lock);
7004
7005 return (0);
7006 }
7007
7008 /*
7009 * Unregister the specified provider from the DTrace framework. This should
7010 * generally be called by DTrace providers in their detach(9E) entry point.
7011 */
7012 int
dtrace_unregister(dtrace_provider_id_t id)7013 dtrace_unregister(dtrace_provider_id_t id)
7014 {
7015 dtrace_provider_t *old = (dtrace_provider_t *)id;
7016 dtrace_provider_t *prev = NULL;
7017 int i, self = 0;
7018 dtrace_probe_t *probe, *first = NULL;
7019
7020 if (old->dtpv_pops.dtps_enable ==
7021 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop) {
7022 /*
7023 * If DTrace itself is the provider, we're called with locks
7024 * already held.
7025 */
7026 ASSERT(old == dtrace_provider);
7027 ASSERT(dtrace_devi != NULL);
7028 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7029 ASSERT(MUTEX_HELD(&dtrace_lock));
7030 self = 1;
7031
7032 if (dtrace_provider->dtpv_next != NULL) {
7033 /*
7034 * There's another provider here; return failure.
7035 */
7036 return (EBUSY);
7037 }
7038 } else {
7039 mutex_enter(&dtrace_provider_lock);
7040 mutex_enter(&mod_lock);
7041 mutex_enter(&dtrace_lock);
7042 }
7043
7044 /*
7045 * If anyone has /dev/dtrace open, or if there are anonymous enabled
7046 * probes, we refuse to let providers slither away, unless this
7047 * provider has already been explicitly invalidated.
7048 */
7049 if (!old->dtpv_defunct &&
7050 (dtrace_opens || (dtrace_anon.dta_state != NULL &&
7051 dtrace_anon.dta_state->dts_necbs > 0))) {
7052 if (!self) {
7053 mutex_exit(&dtrace_lock);
7054 mutex_exit(&mod_lock);
7055 mutex_exit(&dtrace_provider_lock);
7056 }
7057 return (EBUSY);
7058 }
7059
7060 /*
7061 * Attempt to destroy the probes associated with this provider.
7062 */
7063 for (i = 0; i < dtrace_nprobes; i++) {
7064 if ((probe = dtrace_probes[i]) == NULL)
7065 continue;
7066
7067 if (probe->dtpr_provider != old)
7068 continue;
7069
7070 if (probe->dtpr_ecb == NULL)
7071 continue;
7072
7073 /*
7074 * We have at least one ECB; we can't remove this provider.
7075 */
7076 if (!self) {
7077 mutex_exit(&dtrace_lock);
7078 mutex_exit(&mod_lock);
7079 mutex_exit(&dtrace_provider_lock);
7080 }
7081 return (EBUSY);
7082 }
7083
7084 /*
7085 * All of the probes for this provider are disabled; we can safely
7086 * remove all of them from their hash chains and from the probe array.
7087 */
7088 for (i = 0; i < dtrace_nprobes; i++) {
7089 if ((probe = dtrace_probes[i]) == NULL)
7090 continue;
7091
7092 if (probe->dtpr_provider != old)
7093 continue;
7094
7095 dtrace_probes[i] = NULL;
7096
7097 dtrace_hash_remove(dtrace_bymod, probe);
7098 dtrace_hash_remove(dtrace_byfunc, probe);
7099 dtrace_hash_remove(dtrace_byname, probe);
7100
7101 if (first == NULL) {
7102 first = probe;
7103 probe->dtpr_nextmod = NULL;
7104 } else {
7105 probe->dtpr_nextmod = first;
7106 first = probe;
7107 }
7108 }
7109
7110 /*
7111 * The provider's probes have been removed from the hash chains and
7112 * from the probe array. Now issue a dtrace_sync() to be sure that
7113 * everyone has cleared out from any probe array processing.
7114 */
7115 dtrace_sync();
7116
7117 for (probe = first; probe != NULL; probe = first) {
7118 first = probe->dtpr_nextmod;
7119
7120 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id,
7121 probe->dtpr_arg);
7122 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7123 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7124 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7125 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1);
7126 kmem_free(probe, sizeof (dtrace_probe_t));
7127 }
7128
7129 if ((prev = dtrace_provider) == old) {
7130 ASSERT(self || dtrace_devi == NULL);
7131 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL);
7132 dtrace_provider = old->dtpv_next;
7133 } else {
7134 while (prev != NULL && prev->dtpv_next != old)
7135 prev = prev->dtpv_next;
7136
7137 if (prev == NULL) {
7138 panic("attempt to unregister non-existent "
7139 "dtrace provider %p\n", (void *)id);
7140 }
7141
7142 prev->dtpv_next = old->dtpv_next;
7143 }
7144
7145 if (!self) {
7146 mutex_exit(&dtrace_lock);
7147 mutex_exit(&mod_lock);
7148 mutex_exit(&dtrace_provider_lock);
7149 }
7150
7151 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1);
7152 kmem_free(old, sizeof (dtrace_provider_t));
7153
7154 return (0);
7155 }
7156
7157 /*
7158 * Invalidate the specified provider. All subsequent probe lookups for the
7159 * specified provider will fail, but its probes will not be removed.
7160 */
7161 void
dtrace_invalidate(dtrace_provider_id_t id)7162 dtrace_invalidate(dtrace_provider_id_t id)
7163 {
7164 dtrace_provider_t *pvp = (dtrace_provider_t *)id;
7165
7166 ASSERT(pvp->dtpv_pops.dtps_enable !=
7167 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
7168
7169 mutex_enter(&dtrace_provider_lock);
7170 mutex_enter(&dtrace_lock);
7171
7172 pvp->dtpv_defunct = 1;
7173
7174 mutex_exit(&dtrace_lock);
7175 mutex_exit(&dtrace_provider_lock);
7176 }
7177
7178 /*
7179 * Indicate whether or not DTrace has attached.
7180 */
7181 int
dtrace_attached(void)7182 dtrace_attached(void)
7183 {
7184 /*
7185 * dtrace_provider will be non-NULL iff the DTrace driver has
7186 * attached. (It's non-NULL because DTrace is always itself a
7187 * provider.)
7188 */
7189 return (dtrace_provider != NULL);
7190 }
7191
7192 /*
7193 * Remove all the unenabled probes for the given provider. This function is
7194 * not unlike dtrace_unregister(), except that it doesn't remove the provider
7195 * -- just as many of its associated probes as it can.
7196 */
7197 int
dtrace_condense(dtrace_provider_id_t id)7198 dtrace_condense(dtrace_provider_id_t id)
7199 {
7200 dtrace_provider_t *prov = (dtrace_provider_t *)id;
7201 int i;
7202 dtrace_probe_t *probe;
7203
7204 /*
7205 * Make sure this isn't the dtrace provider itself.
7206 */
7207 ASSERT(prov->dtpv_pops.dtps_enable !=
7208 (int (*)(void *, dtrace_id_t, void *))dtrace_enable_nullop);
7209
7210 mutex_enter(&dtrace_provider_lock);
7211 mutex_enter(&dtrace_lock);
7212
7213 /*
7214 * Attempt to destroy the probes associated with this provider.
7215 */
7216 for (i = 0; i < dtrace_nprobes; i++) {
7217 if ((probe = dtrace_probes[i]) == NULL)
7218 continue;
7219
7220 if (probe->dtpr_provider != prov)
7221 continue;
7222
7223 if (probe->dtpr_ecb != NULL)
7224 continue;
7225
7226 dtrace_probes[i] = NULL;
7227
7228 dtrace_hash_remove(dtrace_bymod, probe);
7229 dtrace_hash_remove(dtrace_byfunc, probe);
7230 dtrace_hash_remove(dtrace_byname, probe);
7231
7232 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1,
7233 probe->dtpr_arg);
7234 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
7235 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
7236 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
7237 kmem_free(probe, sizeof (dtrace_probe_t));
7238 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1);
7239 }
7240
7241 mutex_exit(&dtrace_lock);
7242 mutex_exit(&dtrace_provider_lock);
7243
7244 return (0);
7245 }
7246
7247 /*
7248 * DTrace Probe Management Functions
7249 *
7250 * The functions in this section perform the DTrace probe management,
7251 * including functions to create probes, look-up probes, and call into the
7252 * providers to request that probes be provided. Some of these functions are
7253 * in the Provider-to-Framework API; these functions can be identified by the
7254 * fact that they are not declared "static".
7255 */
7256
7257 /*
7258 * Create a probe with the specified module name, function name, and name.
7259 */
7260 dtrace_id_t
dtrace_probe_create(dtrace_provider_id_t prov,const char * mod,const char * func,const char * name,int aframes,void * arg)7261 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod,
7262 const char *func, const char *name, int aframes, void *arg)
7263 {
7264 dtrace_probe_t *probe, **probes;
7265 dtrace_provider_t *provider = (dtrace_provider_t *)prov;
7266 dtrace_id_t id;
7267
7268 if (provider == dtrace_provider) {
7269 ASSERT(MUTEX_HELD(&dtrace_lock));
7270 } else {
7271 mutex_enter(&dtrace_lock);
7272 }
7273
7274 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1,
7275 VM_BESTFIT | VM_SLEEP);
7276 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP);
7277
7278 probe->dtpr_id = id;
7279 probe->dtpr_gen = dtrace_probegen++;
7280 probe->dtpr_mod = dtrace_strdup(mod);
7281 probe->dtpr_func = dtrace_strdup(func);
7282 probe->dtpr_name = dtrace_strdup(name);
7283 probe->dtpr_arg = arg;
7284 probe->dtpr_aframes = aframes;
7285 probe->dtpr_provider = provider;
7286
7287 dtrace_hash_add(dtrace_bymod, probe);
7288 dtrace_hash_add(dtrace_byfunc, probe);
7289 dtrace_hash_add(dtrace_byname, probe);
7290
7291 if (id - 1 >= dtrace_nprobes) {
7292 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *);
7293 size_t nsize = osize << 1;
7294
7295 if (nsize == 0) {
7296 ASSERT(osize == 0);
7297 ASSERT(dtrace_probes == NULL);
7298 nsize = sizeof (dtrace_probe_t *);
7299 }
7300
7301 probes = kmem_zalloc(nsize, KM_SLEEP);
7302
7303 if (dtrace_probes == NULL) {
7304 ASSERT(osize == 0);
7305 dtrace_probes = probes;
7306 dtrace_nprobes = 1;
7307 } else {
7308 dtrace_probe_t **oprobes = dtrace_probes;
7309
7310 bcopy(oprobes, probes, osize);
7311 dtrace_membar_producer();
7312 dtrace_probes = probes;
7313
7314 dtrace_sync();
7315
7316 /*
7317 * All CPUs are now seeing the new probes array; we can
7318 * safely free the old array.
7319 */
7320 kmem_free(oprobes, osize);
7321 dtrace_nprobes <<= 1;
7322 }
7323
7324 ASSERT(id - 1 < dtrace_nprobes);
7325 }
7326
7327 ASSERT(dtrace_probes[id - 1] == NULL);
7328 dtrace_probes[id - 1] = probe;
7329
7330 if (provider != dtrace_provider)
7331 mutex_exit(&dtrace_lock);
7332
7333 return (id);
7334 }
7335
7336 static dtrace_probe_t *
dtrace_probe_lookup_id(dtrace_id_t id)7337 dtrace_probe_lookup_id(dtrace_id_t id)
7338 {
7339 ASSERT(MUTEX_HELD(&dtrace_lock));
7340
7341 if (id == 0 || id > dtrace_nprobes)
7342 return (NULL);
7343
7344 return (dtrace_probes[id - 1]);
7345 }
7346
7347 static int
dtrace_probe_lookup_match(dtrace_probe_t * probe,void * arg)7348 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg)
7349 {
7350 *((dtrace_id_t *)arg) = probe->dtpr_id;
7351
7352 return (DTRACE_MATCH_DONE);
7353 }
7354
7355 /*
7356 * Look up a probe based on provider and one or more of module name, function
7357 * name and probe name.
7358 */
7359 dtrace_id_t
dtrace_probe_lookup(dtrace_provider_id_t prid,const char * mod,const char * func,const char * name)7360 dtrace_probe_lookup(dtrace_provider_id_t prid, const char *mod,
7361 const char *func, const char *name)
7362 {
7363 dtrace_probekey_t pkey;
7364 dtrace_id_t id;
7365 int match;
7366
7367 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name;
7368 pkey.dtpk_pmatch = &dtrace_match_string;
7369 pkey.dtpk_mod = mod;
7370 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul;
7371 pkey.dtpk_func = func;
7372 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul;
7373 pkey.dtpk_name = name;
7374 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul;
7375 pkey.dtpk_id = DTRACE_IDNONE;
7376
7377 mutex_enter(&dtrace_lock);
7378 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0,
7379 dtrace_probe_lookup_match, &id);
7380 mutex_exit(&dtrace_lock);
7381
7382 ASSERT(match == 1 || match == 0);
7383 return (match ? id : 0);
7384 }
7385
7386 /*
7387 * Returns the probe argument associated with the specified probe.
7388 */
7389 void *
dtrace_probe_arg(dtrace_provider_id_t id,dtrace_id_t pid)7390 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid)
7391 {
7392 dtrace_probe_t *probe;
7393 void *rval = NULL;
7394
7395 mutex_enter(&dtrace_lock);
7396
7397 if ((probe = dtrace_probe_lookup_id(pid)) != NULL &&
7398 probe->dtpr_provider == (dtrace_provider_t *)id)
7399 rval = probe->dtpr_arg;
7400
7401 mutex_exit(&dtrace_lock);
7402
7403 return (rval);
7404 }
7405
7406 /*
7407 * Copy a probe into a probe description.
7408 */
7409 static void
dtrace_probe_description(const dtrace_probe_t * prp,dtrace_probedesc_t * pdp)7410 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp)
7411 {
7412 bzero(pdp, sizeof (dtrace_probedesc_t));
7413 pdp->dtpd_id = prp->dtpr_id;
7414
7415 (void) strncpy(pdp->dtpd_provider,
7416 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1);
7417
7418 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1);
7419 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1);
7420 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1);
7421 }
7422
7423 /*
7424 * Called to indicate that a probe -- or probes -- should be provided by a
7425 * specfied provider. If the specified description is NULL, the provider will
7426 * be told to provide all of its probes. (This is done whenever a new
7427 * consumer comes along, or whenever a retained enabling is to be matched.) If
7428 * the specified description is non-NULL, the provider is given the
7429 * opportunity to dynamically provide the specified probe, allowing providers
7430 * to support the creation of probes on-the-fly. (So-called _autocreated_
7431 * probes.) If the provider is NULL, the operations will be applied to all
7432 * providers; if the provider is non-NULL the operations will only be applied
7433 * to the specified provider. The dtrace_provider_lock must be held, and the
7434 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation
7435 * will need to grab the dtrace_lock when it reenters the framework through
7436 * dtrace_probe_lookup(), dtrace_probe_create(), etc.
7437 */
7438 static void
dtrace_probe_provide(dtrace_probedesc_t * desc,dtrace_provider_t * prv)7439 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv)
7440 {
7441 struct modctl *ctl;
7442 int all = 0;
7443
7444 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
7445
7446 if (prv == NULL) {
7447 all = 1;
7448 prv = dtrace_provider;
7449 }
7450
7451 do {
7452 /*
7453 * First, call the blanket provide operation.
7454 */
7455 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc);
7456
7457 /*
7458 * Now call the per-module provide operation. We will grab
7459 * mod_lock to prevent the list from being modified. Note
7460 * that this also prevents the mod_busy bits from changing.
7461 * (mod_busy can only be changed with mod_lock held.)
7462 */
7463 mutex_enter(&mod_lock);
7464
7465 ctl = &modules;
7466 do {
7467 if (ctl->mod_busy || ctl->mod_mp == NULL)
7468 continue;
7469
7470 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
7471
7472 } while ((ctl = ctl->mod_next) != &modules);
7473
7474 mutex_exit(&mod_lock);
7475 } while (all && (prv = prv->dtpv_next) != NULL);
7476 }
7477
7478 /*
7479 * Iterate over each probe, and call the Framework-to-Provider API function
7480 * denoted by offs.
7481 */
7482 static void
dtrace_probe_foreach(uintptr_t offs)7483 dtrace_probe_foreach(uintptr_t offs)
7484 {
7485 dtrace_provider_t *prov;
7486 void (*func)(void *, dtrace_id_t, void *);
7487 dtrace_probe_t *probe;
7488 dtrace_icookie_t cookie;
7489 int i;
7490
7491 /*
7492 * We disable interrupts to walk through the probe array. This is
7493 * safe -- the dtrace_sync() in dtrace_unregister() assures that we
7494 * won't see stale data.
7495 */
7496 cookie = dtrace_interrupt_disable();
7497
7498 for (i = 0; i < dtrace_nprobes; i++) {
7499 if ((probe = dtrace_probes[i]) == NULL)
7500 continue;
7501
7502 if (probe->dtpr_ecb == NULL) {
7503 /*
7504 * This probe isn't enabled -- don't call the function.
7505 */
7506 continue;
7507 }
7508
7509 prov = probe->dtpr_provider;
7510 func = *((void(**)(void *, dtrace_id_t, void *))
7511 ((uintptr_t)&prov->dtpv_pops + offs));
7512
7513 func(prov->dtpv_arg, i + 1, probe->dtpr_arg);
7514 }
7515
7516 dtrace_interrupt_enable(cookie);
7517 }
7518
7519 static int
dtrace_probe_enable(const dtrace_probedesc_t * desc,dtrace_enabling_t * enab)7520 dtrace_probe_enable(const dtrace_probedesc_t *desc, dtrace_enabling_t *enab)
7521 {
7522 dtrace_probekey_t pkey;
7523 uint32_t priv;
7524 uid_t uid;
7525 zoneid_t zoneid;
7526
7527 ASSERT(MUTEX_HELD(&dtrace_lock));
7528 dtrace_ecb_create_cache = NULL;
7529
7530 if (desc == NULL) {
7531 /*
7532 * If we're passed a NULL description, we're being asked to
7533 * create an ECB with a NULL probe.
7534 */
7535 (void) dtrace_ecb_create_enable(NULL, enab);
7536 return (0);
7537 }
7538
7539 dtrace_probekey(desc, &pkey);
7540 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred,
7541 &priv, &uid, &zoneid);
7542
7543 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable,
7544 enab));
7545 }
7546
7547 /*
7548 * DTrace Helper Provider Functions
7549 */
7550 static void
dtrace_dofattr2attr(dtrace_attribute_t * attr,const dof_attr_t dofattr)7551 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr)
7552 {
7553 attr->dtat_name = DOF_ATTR_NAME(dofattr);
7554 attr->dtat_data = DOF_ATTR_DATA(dofattr);
7555 attr->dtat_class = DOF_ATTR_CLASS(dofattr);
7556 }
7557
7558 static void
dtrace_dofprov2hprov(dtrace_helper_provdesc_t * hprov,const dof_provider_t * dofprov,char * strtab)7559 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov,
7560 const dof_provider_t *dofprov, char *strtab)
7561 {
7562 hprov->dthpv_provname = strtab + dofprov->dofpv_name;
7563 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider,
7564 dofprov->dofpv_provattr);
7565 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod,
7566 dofprov->dofpv_modattr);
7567 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func,
7568 dofprov->dofpv_funcattr);
7569 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name,
7570 dofprov->dofpv_nameattr);
7571 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args,
7572 dofprov->dofpv_argsattr);
7573 }
7574
7575 static void
dtrace_helper_provide_one(dof_helper_t * dhp,dof_sec_t * sec,pid_t pid)7576 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
7577 {
7578 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7579 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7580 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
7581 dof_provider_t *provider;
7582 dof_probe_t *probe;
7583 uint32_t *off, *enoff;
7584 uint8_t *arg;
7585 char *strtab;
7586 uint_t i, nprobes;
7587 dtrace_helper_provdesc_t dhpv;
7588 dtrace_helper_probedesc_t dhpb;
7589 dtrace_meta_t *meta = dtrace_meta_pid;
7590 dtrace_mops_t *mops = &meta->dtm_mops;
7591 void *parg;
7592
7593 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
7594 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7595 provider->dofpv_strtab * dof->dofh_secsize);
7596 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7597 provider->dofpv_probes * dof->dofh_secsize);
7598 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7599 provider->dofpv_prargs * dof->dofh_secsize);
7600 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7601 provider->dofpv_proffs * dof->dofh_secsize);
7602
7603 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
7604 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset);
7605 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
7606 enoff = NULL;
7607
7608 /*
7609 * See dtrace_helper_provider_validate().
7610 */
7611 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
7612 provider->dofpv_prenoffs != DOF_SECT_NONE) {
7613 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7614 provider->dofpv_prenoffs * dof->dofh_secsize);
7615 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset);
7616 }
7617
7618 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
7619
7620 /*
7621 * Create the provider.
7622 */
7623 dtrace_dofprov2hprov(&dhpv, provider, strtab);
7624
7625 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL)
7626 return;
7627
7628 meta->dtm_count++;
7629
7630 /*
7631 * Create the probes.
7632 */
7633 for (i = 0; i < nprobes; i++) {
7634 probe = (dof_probe_t *)(uintptr_t)(daddr +
7635 prb_sec->dofs_offset + i * prb_sec->dofs_entsize);
7636
7637 dhpb.dthpb_mod = dhp->dofhp_mod;
7638 dhpb.dthpb_func = strtab + probe->dofpr_func;
7639 dhpb.dthpb_name = strtab + probe->dofpr_name;
7640 dhpb.dthpb_base = probe->dofpr_addr;
7641 dhpb.dthpb_offs = off + probe->dofpr_offidx;
7642 dhpb.dthpb_noffs = probe->dofpr_noffs;
7643 if (enoff != NULL) {
7644 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx;
7645 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs;
7646 } else {
7647 dhpb.dthpb_enoffs = NULL;
7648 dhpb.dthpb_nenoffs = 0;
7649 }
7650 dhpb.dthpb_args = arg + probe->dofpr_argidx;
7651 dhpb.dthpb_nargc = probe->dofpr_nargc;
7652 dhpb.dthpb_xargc = probe->dofpr_xargc;
7653 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv;
7654 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv;
7655
7656 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb);
7657 }
7658 }
7659
7660 static void
dtrace_helper_provide(dof_helper_t * dhp,pid_t pid)7661 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid)
7662 {
7663 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7664 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7665 int i;
7666
7667 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
7668
7669 for (i = 0; i < dof->dofh_secnum; i++) {
7670 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
7671 dof->dofh_secoff + i * dof->dofh_secsize);
7672
7673 if (sec->dofs_type != DOF_SECT_PROVIDER)
7674 continue;
7675
7676 dtrace_helper_provide_one(dhp, sec, pid);
7677 }
7678
7679 /*
7680 * We may have just created probes, so we must now rematch against
7681 * any retained enablings. Note that this call will acquire both
7682 * cpu_lock and dtrace_lock; the fact that we are holding
7683 * dtrace_meta_lock now is what defines the ordering with respect to
7684 * these three locks.
7685 */
7686 dtrace_enabling_matchall();
7687 }
7688
7689 static void
dtrace_helper_provider_remove_one(dof_helper_t * dhp,dof_sec_t * sec,pid_t pid)7690 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid)
7691 {
7692 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7693 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7694 dof_sec_t *str_sec;
7695 dof_provider_t *provider;
7696 char *strtab;
7697 dtrace_helper_provdesc_t dhpv;
7698 dtrace_meta_t *meta = dtrace_meta_pid;
7699 dtrace_mops_t *mops = &meta->dtm_mops;
7700
7701 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
7702 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff +
7703 provider->dofpv_strtab * dof->dofh_secsize);
7704
7705 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
7706
7707 /*
7708 * Create the provider.
7709 */
7710 dtrace_dofprov2hprov(&dhpv, provider, strtab);
7711
7712 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid);
7713
7714 meta->dtm_count--;
7715 }
7716
7717 static void
dtrace_helper_provider_remove(dof_helper_t * dhp,pid_t pid)7718 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid)
7719 {
7720 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof;
7721 dof_hdr_t *dof = (dof_hdr_t *)daddr;
7722 int i;
7723
7724 ASSERT(MUTEX_HELD(&dtrace_meta_lock));
7725
7726 for (i = 0; i < dof->dofh_secnum; i++) {
7727 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
7728 dof->dofh_secoff + i * dof->dofh_secsize);
7729
7730 if (sec->dofs_type != DOF_SECT_PROVIDER)
7731 continue;
7732
7733 dtrace_helper_provider_remove_one(dhp, sec, pid);
7734 }
7735 }
7736
7737 /*
7738 * DTrace Meta Provider-to-Framework API Functions
7739 *
7740 * These functions implement the Meta Provider-to-Framework API, as described
7741 * in <sys/dtrace.h>.
7742 */
7743 int
dtrace_meta_register(const char * name,const dtrace_mops_t * mops,void * arg,dtrace_meta_provider_id_t * idp)7744 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg,
7745 dtrace_meta_provider_id_t *idp)
7746 {
7747 dtrace_meta_t *meta;
7748 dtrace_helpers_t *help, *next;
7749 int i;
7750
7751 *idp = DTRACE_METAPROVNONE;
7752
7753 /*
7754 * We strictly don't need the name, but we hold onto it for
7755 * debuggability. All hail error queues!
7756 */
7757 if (name == NULL) {
7758 cmn_err(CE_WARN, "failed to register meta-provider: "
7759 "invalid name");
7760 return (EINVAL);
7761 }
7762
7763 if (mops == NULL ||
7764 mops->dtms_create_probe == NULL ||
7765 mops->dtms_provide_pid == NULL ||
7766 mops->dtms_remove_pid == NULL) {
7767 cmn_err(CE_WARN, "failed to register meta-register %s: "
7768 "invalid ops", name);
7769 return (EINVAL);
7770 }
7771
7772 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP);
7773 meta->dtm_mops = *mops;
7774 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
7775 (void) strcpy(meta->dtm_name, name);
7776 meta->dtm_arg = arg;
7777
7778 mutex_enter(&dtrace_meta_lock);
7779 mutex_enter(&dtrace_lock);
7780
7781 if (dtrace_meta_pid != NULL) {
7782 mutex_exit(&dtrace_lock);
7783 mutex_exit(&dtrace_meta_lock);
7784 cmn_err(CE_WARN, "failed to register meta-register %s: "
7785 "user-land meta-provider exists", name);
7786 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1);
7787 kmem_free(meta, sizeof (dtrace_meta_t));
7788 return (EINVAL);
7789 }
7790
7791 dtrace_meta_pid = meta;
7792 *idp = (dtrace_meta_provider_id_t)meta;
7793
7794 /*
7795 * If there are providers and probes ready to go, pass them
7796 * off to the new meta provider now.
7797 */
7798
7799 help = dtrace_deferred_pid;
7800 dtrace_deferred_pid = NULL;
7801
7802 mutex_exit(&dtrace_lock);
7803
7804 while (help != NULL) {
7805 for (i = 0; i < help->dthps_nprovs; i++) {
7806 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
7807 help->dthps_pid);
7808 }
7809
7810 next = help->dthps_next;
7811 help->dthps_next = NULL;
7812 help->dthps_prev = NULL;
7813 help->dthps_deferred = 0;
7814 help = next;
7815 }
7816
7817 mutex_exit(&dtrace_meta_lock);
7818
7819 return (0);
7820 }
7821
7822 int
dtrace_meta_unregister(dtrace_meta_provider_id_t id)7823 dtrace_meta_unregister(dtrace_meta_provider_id_t id)
7824 {
7825 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id;
7826
7827 mutex_enter(&dtrace_meta_lock);
7828 mutex_enter(&dtrace_lock);
7829
7830 if (old == dtrace_meta_pid) {
7831 pp = &dtrace_meta_pid;
7832 } else {
7833 panic("attempt to unregister non-existent "
7834 "dtrace meta-provider %p\n", (void *)old);
7835 }
7836
7837 if (old->dtm_count != 0) {
7838 mutex_exit(&dtrace_lock);
7839 mutex_exit(&dtrace_meta_lock);
7840 return (EBUSY);
7841 }
7842
7843 *pp = NULL;
7844
7845 mutex_exit(&dtrace_lock);
7846 mutex_exit(&dtrace_meta_lock);
7847
7848 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1);
7849 kmem_free(old, sizeof (dtrace_meta_t));
7850
7851 return (0);
7852 }
7853
7854
7855 /*
7856 * DTrace DIF Object Functions
7857 */
7858 static int
dtrace_difo_err(uint_t pc,const char * format,...)7859 dtrace_difo_err(uint_t pc, const char *format, ...)
7860 {
7861 if (dtrace_err_verbose) {
7862 va_list alist;
7863
7864 (void) uprintf("dtrace DIF object error: [%u]: ", pc);
7865 va_start(alist, format);
7866 (void) vuprintf(format, alist);
7867 va_end(alist);
7868 }
7869
7870 #ifdef DTRACE_ERRDEBUG
7871 dtrace_errdebug(format);
7872 #endif
7873 return (1);
7874 }
7875
7876 /*
7877 * Validate a DTrace DIF object by checking the IR instructions. The following
7878 * rules are currently enforced by dtrace_difo_validate():
7879 *
7880 * 1. Each instruction must have a valid opcode
7881 * 2. Each register, string, variable, or subroutine reference must be valid
7882 * 3. No instruction can modify register %r0 (must be zero)
7883 * 4. All instruction reserved bits must be set to zero
7884 * 5. The last instruction must be a "ret" instruction
7885 * 6. All branch targets must reference a valid instruction _after_ the branch
7886 */
7887 static int
dtrace_difo_validate(dtrace_difo_t * dp,dtrace_vstate_t * vstate,uint_t nregs,cred_t * cr)7888 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs,
7889 cred_t *cr)
7890 {
7891 int err = 0, i;
7892 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
7893 int kcheckload;
7894 uint_t pc;
7895
7896 kcheckload = cr == NULL ||
7897 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0;
7898
7899 dp->dtdo_destructive = 0;
7900
7901 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) {
7902 dif_instr_t instr = dp->dtdo_buf[pc];
7903
7904 uint_t r1 = DIF_INSTR_R1(instr);
7905 uint_t r2 = DIF_INSTR_R2(instr);
7906 uint_t rd = DIF_INSTR_RD(instr);
7907 uint_t rs = DIF_INSTR_RS(instr);
7908 uint_t label = DIF_INSTR_LABEL(instr);
7909 uint_t v = DIF_INSTR_VAR(instr);
7910 uint_t subr = DIF_INSTR_SUBR(instr);
7911 uint_t type = DIF_INSTR_TYPE(instr);
7912 uint_t op = DIF_INSTR_OP(instr);
7913
7914 switch (op) {
7915 case DIF_OP_OR:
7916 case DIF_OP_XOR:
7917 case DIF_OP_AND:
7918 case DIF_OP_SLL:
7919 case DIF_OP_SRL:
7920 case DIF_OP_SRA:
7921 case DIF_OP_SUB:
7922 case DIF_OP_ADD:
7923 case DIF_OP_MUL:
7924 case DIF_OP_SDIV:
7925 case DIF_OP_UDIV:
7926 case DIF_OP_SREM:
7927 case DIF_OP_UREM:
7928 case DIF_OP_COPYS:
7929 if (r1 >= nregs)
7930 err += efunc(pc, "invalid register %u\n", r1);
7931 if (r2 >= nregs)
7932 err += efunc(pc, "invalid register %u\n", r2);
7933 if (rd >= nregs)
7934 err += efunc(pc, "invalid register %u\n", rd);
7935 if (rd == 0)
7936 err += efunc(pc, "cannot write to %r0\n");
7937 break;
7938 case DIF_OP_NOT:
7939 case DIF_OP_MOV:
7940 case DIF_OP_ALLOCS:
7941 if (r1 >= nregs)
7942 err += efunc(pc, "invalid register %u\n", r1);
7943 if (r2 != 0)
7944 err += efunc(pc, "non-zero reserved bits\n");
7945 if (rd >= nregs)
7946 err += efunc(pc, "invalid register %u\n", rd);
7947 if (rd == 0)
7948 err += efunc(pc, "cannot write to %r0\n");
7949 break;
7950 case DIF_OP_LDSB:
7951 case DIF_OP_LDSH:
7952 case DIF_OP_LDSW:
7953 case DIF_OP_LDUB:
7954 case DIF_OP_LDUH:
7955 case DIF_OP_LDUW:
7956 case DIF_OP_LDX:
7957 if (r1 >= nregs)
7958 err += efunc(pc, "invalid register %u\n", r1);
7959 if (r2 != 0)
7960 err += efunc(pc, "non-zero reserved bits\n");
7961 if (rd >= nregs)
7962 err += efunc(pc, "invalid register %u\n", rd);
7963 if (rd == 0)
7964 err += efunc(pc, "cannot write to %r0\n");
7965 if (kcheckload)
7966 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op +
7967 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd);
7968 break;
7969 case DIF_OP_RLDSB:
7970 case DIF_OP_RLDSH:
7971 case DIF_OP_RLDSW:
7972 case DIF_OP_RLDUB:
7973 case DIF_OP_RLDUH:
7974 case DIF_OP_RLDUW:
7975 case DIF_OP_RLDX:
7976 if (r1 >= nregs)
7977 err += efunc(pc, "invalid register %u\n", r1);
7978 if (r2 != 0)
7979 err += efunc(pc, "non-zero reserved bits\n");
7980 if (rd >= nregs)
7981 err += efunc(pc, "invalid register %u\n", rd);
7982 if (rd == 0)
7983 err += efunc(pc, "cannot write to %r0\n");
7984 break;
7985 case DIF_OP_ULDSB:
7986 case DIF_OP_ULDSH:
7987 case DIF_OP_ULDSW:
7988 case DIF_OP_ULDUB:
7989 case DIF_OP_ULDUH:
7990 case DIF_OP_ULDUW:
7991 case DIF_OP_ULDX:
7992 if (r1 >= nregs)
7993 err += efunc(pc, "invalid register %u\n", r1);
7994 if (r2 != 0)
7995 err += efunc(pc, "non-zero reserved bits\n");
7996 if (rd >= nregs)
7997 err += efunc(pc, "invalid register %u\n", rd);
7998 if (rd == 0)
7999 err += efunc(pc, "cannot write to %r0\n");
8000 break;
8001 case DIF_OP_STB:
8002 case DIF_OP_STH:
8003 case DIF_OP_STW:
8004 case DIF_OP_STX:
8005 if (r1 >= nregs)
8006 err += efunc(pc, "invalid register %u\n", r1);
8007 if (r2 != 0)
8008 err += efunc(pc, "non-zero reserved bits\n");
8009 if (rd >= nregs)
8010 err += efunc(pc, "invalid register %u\n", rd);
8011 if (rd == 0)
8012 err += efunc(pc, "cannot write to 0 address\n");
8013 break;
8014 case DIF_OP_CMP:
8015 case DIF_OP_SCMP:
8016 if (r1 >= nregs)
8017 err += efunc(pc, "invalid register %u\n", r1);
8018 if (r2 >= nregs)
8019 err += efunc(pc, "invalid register %u\n", r2);
8020 if (rd != 0)
8021 err += efunc(pc, "non-zero reserved bits\n");
8022 break;
8023 case DIF_OP_TST:
8024 if (r1 >= nregs)
8025 err += efunc(pc, "invalid register %u\n", r1);
8026 if (r2 != 0 || rd != 0)
8027 err += efunc(pc, "non-zero reserved bits\n");
8028 break;
8029 case DIF_OP_BA:
8030 case DIF_OP_BE:
8031 case DIF_OP_BNE:
8032 case DIF_OP_BG:
8033 case DIF_OP_BGU:
8034 case DIF_OP_BGE:
8035 case DIF_OP_BGEU:
8036 case DIF_OP_BL:
8037 case DIF_OP_BLU:
8038 case DIF_OP_BLE:
8039 case DIF_OP_BLEU:
8040 if (label >= dp->dtdo_len) {
8041 err += efunc(pc, "invalid branch target %u\n",
8042 label);
8043 }
8044 if (label <= pc) {
8045 err += efunc(pc, "backward branch to %u\n",
8046 label);
8047 }
8048 break;
8049 case DIF_OP_RET:
8050 if (r1 != 0 || r2 != 0)
8051 err += efunc(pc, "non-zero reserved bits\n");
8052 if (rd >= nregs)
8053 err += efunc(pc, "invalid register %u\n", rd);
8054 break;
8055 case DIF_OP_NOP:
8056 case DIF_OP_POPTS:
8057 case DIF_OP_FLUSHTS:
8058 if (r1 != 0 || r2 != 0 || rd != 0)
8059 err += efunc(pc, "non-zero reserved bits\n");
8060 break;
8061 case DIF_OP_SETX:
8062 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) {
8063 err += efunc(pc, "invalid integer ref %u\n",
8064 DIF_INSTR_INTEGER(instr));
8065 }
8066 if (rd >= nregs)
8067 err += efunc(pc, "invalid register %u\n", rd);
8068 if (rd == 0)
8069 err += efunc(pc, "cannot write to %r0\n");
8070 break;
8071 case DIF_OP_SETS:
8072 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) {
8073 err += efunc(pc, "invalid string ref %u\n",
8074 DIF_INSTR_STRING(instr));
8075 }
8076 if (rd >= nregs)
8077 err += efunc(pc, "invalid register %u\n", rd);
8078 if (rd == 0)
8079 err += efunc(pc, "cannot write to %r0\n");
8080 break;
8081 case DIF_OP_LDGA:
8082 case DIF_OP_LDTA:
8083 if (r1 > DIF_VAR_ARRAY_MAX)
8084 err += efunc(pc, "invalid array %u\n", r1);
8085 if (r2 >= nregs)
8086 err += efunc(pc, "invalid register %u\n", r2);
8087 if (rd >= nregs)
8088 err += efunc(pc, "invalid register %u\n", rd);
8089 if (rd == 0)
8090 err += efunc(pc, "cannot write to %r0\n");
8091 break;
8092 case DIF_OP_LDGS:
8093 case DIF_OP_LDTS:
8094 case DIF_OP_LDLS:
8095 case DIF_OP_LDGAA:
8096 case DIF_OP_LDTAA:
8097 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX)
8098 err += efunc(pc, "invalid variable %u\n", v);
8099 if (rd >= nregs)
8100 err += efunc(pc, "invalid register %u\n", rd);
8101 if (rd == 0)
8102 err += efunc(pc, "cannot write to %r0\n");
8103 break;
8104 case DIF_OP_STGS:
8105 case DIF_OP_STTS:
8106 case DIF_OP_STLS:
8107 case DIF_OP_STGAA:
8108 case DIF_OP_STTAA:
8109 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX)
8110 err += efunc(pc, "invalid variable %u\n", v);
8111 if (rs >= nregs)
8112 err += efunc(pc, "invalid register %u\n", rd);
8113 break;
8114 case DIF_OP_CALL:
8115 if (subr > DIF_SUBR_MAX)
8116 err += efunc(pc, "invalid subr %u\n", subr);
8117 if (rd >= nregs)
8118 err += efunc(pc, "invalid register %u\n", rd);
8119 if (rd == 0)
8120 err += efunc(pc, "cannot write to %r0\n");
8121
8122 if (subr == DIF_SUBR_COPYOUT ||
8123 subr == DIF_SUBR_COPYOUTSTR) {
8124 dp->dtdo_destructive = 1;
8125 }
8126 break;
8127 case DIF_OP_PUSHTR:
8128 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF)
8129 err += efunc(pc, "invalid ref type %u\n", type);
8130 if (r2 >= nregs)
8131 err += efunc(pc, "invalid register %u\n", r2);
8132 if (rs >= nregs)
8133 err += efunc(pc, "invalid register %u\n", rs);
8134 break;
8135 case DIF_OP_PUSHTV:
8136 if (type != DIF_TYPE_CTF)
8137 err += efunc(pc, "invalid val type %u\n", type);
8138 if (r2 >= nregs)
8139 err += efunc(pc, "invalid register %u\n", r2);
8140 if (rs >= nregs)
8141 err += efunc(pc, "invalid register %u\n", rs);
8142 break;
8143 default:
8144 err += efunc(pc, "invalid opcode %u\n",
8145 DIF_INSTR_OP(instr));
8146 }
8147 }
8148
8149 if (dp->dtdo_len != 0 &&
8150 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) {
8151 err += efunc(dp->dtdo_len - 1,
8152 "expected 'ret' as last DIF instruction\n");
8153 }
8154
8155 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) {
8156 /*
8157 * If we're not returning by reference, the size must be either
8158 * 0 or the size of one of the base types.
8159 */
8160 switch (dp->dtdo_rtype.dtdt_size) {
8161 case 0:
8162 case sizeof (uint8_t):
8163 case sizeof (uint16_t):
8164 case sizeof (uint32_t):
8165 case sizeof (uint64_t):
8166 break;
8167
8168 default:
8169 err += efunc(dp->dtdo_len - 1, "bad return size\n");
8170 }
8171 }
8172
8173 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) {
8174 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL;
8175 dtrace_diftype_t *vt, *et;
8176 uint_t id, ndx;
8177
8178 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL &&
8179 v->dtdv_scope != DIFV_SCOPE_THREAD &&
8180 v->dtdv_scope != DIFV_SCOPE_LOCAL) {
8181 err += efunc(i, "unrecognized variable scope %d\n",
8182 v->dtdv_scope);
8183 break;
8184 }
8185
8186 if (v->dtdv_kind != DIFV_KIND_ARRAY &&
8187 v->dtdv_kind != DIFV_KIND_SCALAR) {
8188 err += efunc(i, "unrecognized variable type %d\n",
8189 v->dtdv_kind);
8190 break;
8191 }
8192
8193 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) {
8194 err += efunc(i, "%d exceeds variable id limit\n", id);
8195 break;
8196 }
8197
8198 if (id < DIF_VAR_OTHER_UBASE)
8199 continue;
8200
8201 /*
8202 * For user-defined variables, we need to check that this
8203 * definition is identical to any previous definition that we
8204 * encountered.
8205 */
8206 ndx = id - DIF_VAR_OTHER_UBASE;
8207
8208 switch (v->dtdv_scope) {
8209 case DIFV_SCOPE_GLOBAL:
8210 if (ndx < vstate->dtvs_nglobals) {
8211 dtrace_statvar_t *svar;
8212
8213 if ((svar = vstate->dtvs_globals[ndx]) != NULL)
8214 existing = &svar->dtsv_var;
8215 }
8216
8217 break;
8218
8219 case DIFV_SCOPE_THREAD:
8220 if (ndx < vstate->dtvs_ntlocals)
8221 existing = &vstate->dtvs_tlocals[ndx];
8222 break;
8223
8224 case DIFV_SCOPE_LOCAL:
8225 if (ndx < vstate->dtvs_nlocals) {
8226 dtrace_statvar_t *svar;
8227
8228 if ((svar = vstate->dtvs_locals[ndx]) != NULL)
8229 existing = &svar->dtsv_var;
8230 }
8231
8232 break;
8233 }
8234
8235 vt = &v->dtdv_type;
8236
8237 if (vt->dtdt_flags & DIF_TF_BYREF) {
8238 if (vt->dtdt_size == 0) {
8239 err += efunc(i, "zero-sized variable\n");
8240 break;
8241 }
8242
8243 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL &&
8244 vt->dtdt_size > dtrace_global_maxsize) {
8245 err += efunc(i, "oversized by-ref global\n");
8246 break;
8247 }
8248 }
8249
8250 if (existing == NULL || existing->dtdv_id == 0)
8251 continue;
8252
8253 ASSERT(existing->dtdv_id == v->dtdv_id);
8254 ASSERT(existing->dtdv_scope == v->dtdv_scope);
8255
8256 if (existing->dtdv_kind != v->dtdv_kind)
8257 err += efunc(i, "%d changed variable kind\n", id);
8258
8259 et = &existing->dtdv_type;
8260
8261 if (vt->dtdt_flags != et->dtdt_flags) {
8262 err += efunc(i, "%d changed variable type flags\n", id);
8263 break;
8264 }
8265
8266 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) {
8267 err += efunc(i, "%d changed variable type size\n", id);
8268 break;
8269 }
8270 }
8271
8272 return (err);
8273 }
8274
8275 /*
8276 * Validate a DTrace DIF object that it is to be used as a helper. Helpers
8277 * are much more constrained than normal DIFOs. Specifically, they may
8278 * not:
8279 *
8280 * 1. Make calls to subroutines other than copyin(), copyinstr() or
8281 * miscellaneous string routines
8282 * 2. Access DTrace variables other than the args[] array, and the
8283 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables.
8284 * 3. Have thread-local variables.
8285 * 4. Have dynamic variables.
8286 */
8287 static int
dtrace_difo_validate_helper(dtrace_difo_t * dp)8288 dtrace_difo_validate_helper(dtrace_difo_t *dp)
8289 {
8290 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err;
8291 int err = 0;
8292 uint_t pc;
8293
8294 for (pc = 0; pc < dp->dtdo_len; pc++) {
8295 dif_instr_t instr = dp->dtdo_buf[pc];
8296
8297 uint_t v = DIF_INSTR_VAR(instr);
8298 uint_t subr = DIF_INSTR_SUBR(instr);
8299 uint_t op = DIF_INSTR_OP(instr);
8300
8301 switch (op) {
8302 case DIF_OP_OR:
8303 case DIF_OP_XOR:
8304 case DIF_OP_AND:
8305 case DIF_OP_SLL:
8306 case DIF_OP_SRL:
8307 case DIF_OP_SRA:
8308 case DIF_OP_SUB:
8309 case DIF_OP_ADD:
8310 case DIF_OP_MUL:
8311 case DIF_OP_SDIV:
8312 case DIF_OP_UDIV:
8313 case DIF_OP_SREM:
8314 case DIF_OP_UREM:
8315 case DIF_OP_COPYS:
8316 case DIF_OP_NOT:
8317 case DIF_OP_MOV:
8318 case DIF_OP_RLDSB:
8319 case DIF_OP_RLDSH:
8320 case DIF_OP_RLDSW:
8321 case DIF_OP_RLDUB:
8322 case DIF_OP_RLDUH:
8323 case DIF_OP_RLDUW:
8324 case DIF_OP_RLDX:
8325 case DIF_OP_ULDSB:
8326 case DIF_OP_ULDSH:
8327 case DIF_OP_ULDSW:
8328 case DIF_OP_ULDUB:
8329 case DIF_OP_ULDUH:
8330 case DIF_OP_ULDUW:
8331 case DIF_OP_ULDX:
8332 case DIF_OP_STB:
8333 case DIF_OP_STH:
8334 case DIF_OP_STW:
8335 case DIF_OP_STX:
8336 case DIF_OP_ALLOCS:
8337 case DIF_OP_CMP:
8338 case DIF_OP_SCMP:
8339 case DIF_OP_TST:
8340 case DIF_OP_BA:
8341 case DIF_OP_BE:
8342 case DIF_OP_BNE:
8343 case DIF_OP_BG:
8344 case DIF_OP_BGU:
8345 case DIF_OP_BGE:
8346 case DIF_OP_BGEU:
8347 case DIF_OP_BL:
8348 case DIF_OP_BLU:
8349 case DIF_OP_BLE:
8350 case DIF_OP_BLEU:
8351 case DIF_OP_RET:
8352 case DIF_OP_NOP:
8353 case DIF_OP_POPTS:
8354 case DIF_OP_FLUSHTS:
8355 case DIF_OP_SETX:
8356 case DIF_OP_SETS:
8357 case DIF_OP_LDGA:
8358 case DIF_OP_LDLS:
8359 case DIF_OP_STGS:
8360 case DIF_OP_STLS:
8361 case DIF_OP_PUSHTR:
8362 case DIF_OP_PUSHTV:
8363 break;
8364
8365 case DIF_OP_LDGS:
8366 if (v >= DIF_VAR_OTHER_UBASE)
8367 break;
8368
8369 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9)
8370 break;
8371
8372 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID ||
8373 v == DIF_VAR_PPID || v == DIF_VAR_TID ||
8374 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME ||
8375 v == DIF_VAR_UID || v == DIF_VAR_GID)
8376 break;
8377
8378 err += efunc(pc, "illegal variable %u\n", v);
8379 break;
8380
8381 case DIF_OP_LDTA:
8382 case DIF_OP_LDTS:
8383 case DIF_OP_LDGAA:
8384 case DIF_OP_LDTAA:
8385 err += efunc(pc, "illegal dynamic variable load\n");
8386 break;
8387
8388 case DIF_OP_STTS:
8389 case DIF_OP_STGAA:
8390 case DIF_OP_STTAA:
8391 err += efunc(pc, "illegal dynamic variable store\n");
8392 break;
8393
8394 case DIF_OP_CALL:
8395 if (subr == DIF_SUBR_ALLOCA ||
8396 subr == DIF_SUBR_BCOPY ||
8397 subr == DIF_SUBR_COPYIN ||
8398 subr == DIF_SUBR_COPYINTO ||
8399 subr == DIF_SUBR_COPYINSTR ||
8400 subr == DIF_SUBR_INDEX ||
8401 subr == DIF_SUBR_INET_NTOA ||
8402 subr == DIF_SUBR_INET_NTOA6 ||
8403 subr == DIF_SUBR_INET_NTOP ||
8404 subr == DIF_SUBR_LLTOSTR ||
8405 subr == DIF_SUBR_RINDEX ||
8406 subr == DIF_SUBR_STRCHR ||
8407 subr == DIF_SUBR_STRJOIN ||
8408 subr == DIF_SUBR_STRRCHR ||
8409 subr == DIF_SUBR_STRSTR ||
8410 subr == DIF_SUBR_HTONS ||
8411 subr == DIF_SUBR_HTONL ||
8412 subr == DIF_SUBR_HTONLL ||
8413 subr == DIF_SUBR_NTOHS ||
8414 subr == DIF_SUBR_NTOHL ||
8415 subr == DIF_SUBR_NTOHLL)
8416 break;
8417
8418 err += efunc(pc, "invalid subr %u\n", subr);
8419 break;
8420
8421 default:
8422 err += efunc(pc, "invalid opcode %u\n",
8423 DIF_INSTR_OP(instr));
8424 }
8425 }
8426
8427 return (err);
8428 }
8429
8430 /*
8431 * Returns 1 if the expression in the DIF object can be cached on a per-thread
8432 * basis; 0 if not.
8433 */
8434 static int
dtrace_difo_cacheable(dtrace_difo_t * dp)8435 dtrace_difo_cacheable(dtrace_difo_t *dp)
8436 {
8437 int i;
8438
8439 if (dp == NULL)
8440 return (0);
8441
8442 for (i = 0; i < dp->dtdo_varlen; i++) {
8443 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8444
8445 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL)
8446 continue;
8447
8448 switch (v->dtdv_id) {
8449 case DIF_VAR_CURTHREAD:
8450 case DIF_VAR_PID:
8451 case DIF_VAR_TID:
8452 case DIF_VAR_EXECNAME:
8453 case DIF_VAR_ZONENAME:
8454 break;
8455
8456 default:
8457 return (0);
8458 }
8459 }
8460
8461 /*
8462 * This DIF object may be cacheable. Now we need to look for any
8463 * array loading instructions, any memory loading instructions, or
8464 * any stores to thread-local variables.
8465 */
8466 for (i = 0; i < dp->dtdo_len; i++) {
8467 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]);
8468
8469 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) ||
8470 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) ||
8471 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) ||
8472 op == DIF_OP_LDGA || op == DIF_OP_STTS)
8473 return (0);
8474 }
8475
8476 return (1);
8477 }
8478
8479 static void
dtrace_difo_hold(dtrace_difo_t * dp)8480 dtrace_difo_hold(dtrace_difo_t *dp)
8481 {
8482 int i;
8483
8484 ASSERT(MUTEX_HELD(&dtrace_lock));
8485
8486 dp->dtdo_refcnt++;
8487 ASSERT(dp->dtdo_refcnt != 0);
8488
8489 /*
8490 * We need to check this DIF object for references to the variable
8491 * DIF_VAR_VTIMESTAMP.
8492 */
8493 for (i = 0; i < dp->dtdo_varlen; i++) {
8494 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8495
8496 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8497 continue;
8498
8499 if (dtrace_vtime_references++ == 0)
8500 dtrace_vtime_enable();
8501 }
8502 }
8503
8504 /*
8505 * This routine calculates the dynamic variable chunksize for a given DIF
8506 * object. The calculation is not fool-proof, and can probably be tricked by
8507 * malicious DIF -- but it works for all compiler-generated DIF. Because this
8508 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail
8509 * if a dynamic variable size exceeds the chunksize.
8510 */
8511 static void
dtrace_difo_chunksize(dtrace_difo_t * dp,dtrace_vstate_t * vstate)8512 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8513 {
8514 uint64_t sval;
8515 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */
8516 const dif_instr_t *text = dp->dtdo_buf;
8517 uint_t pc, srd = 0;
8518 uint_t ttop = 0;
8519 size_t size, ksize;
8520 uint_t id, i;
8521
8522 for (pc = 0; pc < dp->dtdo_len; pc++) {
8523 dif_instr_t instr = text[pc];
8524 uint_t op = DIF_INSTR_OP(instr);
8525 uint_t rd = DIF_INSTR_RD(instr);
8526 uint_t r1 = DIF_INSTR_R1(instr);
8527 uint_t nkeys = 0;
8528 uchar_t scope;
8529
8530 dtrace_key_t *key = tupregs;
8531
8532 switch (op) {
8533 case DIF_OP_SETX:
8534 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)];
8535 srd = rd;
8536 continue;
8537
8538 case DIF_OP_STTS:
8539 key = &tupregs[DIF_DTR_NREGS];
8540 key[0].dttk_size = 0;
8541 key[1].dttk_size = 0;
8542 nkeys = 2;
8543 scope = DIFV_SCOPE_THREAD;
8544 break;
8545
8546 case DIF_OP_STGAA:
8547 case DIF_OP_STTAA:
8548 nkeys = ttop;
8549
8550 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA)
8551 key[nkeys++].dttk_size = 0;
8552
8553 key[nkeys++].dttk_size = 0;
8554
8555 if (op == DIF_OP_STTAA) {
8556 scope = DIFV_SCOPE_THREAD;
8557 } else {
8558 scope = DIFV_SCOPE_GLOBAL;
8559 }
8560
8561 break;
8562
8563 case DIF_OP_PUSHTR:
8564 if (ttop == DIF_DTR_NREGS)
8565 return;
8566
8567 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) {
8568 /*
8569 * If the register for the size of the "pushtr"
8570 * is %r0 (or the value is 0) and the type is
8571 * a string, we'll use the system-wide default
8572 * string size.
8573 */
8574 tupregs[ttop++].dttk_size =
8575 dtrace_strsize_default;
8576 } else {
8577 if (srd == 0)
8578 return;
8579
8580 tupregs[ttop++].dttk_size = sval;
8581 }
8582
8583 break;
8584
8585 case DIF_OP_PUSHTV:
8586 if (ttop == DIF_DTR_NREGS)
8587 return;
8588
8589 tupregs[ttop++].dttk_size = 0;
8590 break;
8591
8592 case DIF_OP_FLUSHTS:
8593 ttop = 0;
8594 break;
8595
8596 case DIF_OP_POPTS:
8597 if (ttop != 0)
8598 ttop--;
8599 break;
8600 }
8601
8602 sval = 0;
8603 srd = 0;
8604
8605 if (nkeys == 0)
8606 continue;
8607
8608 /*
8609 * We have a dynamic variable allocation; calculate its size.
8610 */
8611 for (ksize = 0, i = 0; i < nkeys; i++)
8612 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t));
8613
8614 size = sizeof (dtrace_dynvar_t);
8615 size += sizeof (dtrace_key_t) * (nkeys - 1);
8616 size += ksize;
8617
8618 /*
8619 * Now we need to determine the size of the stored data.
8620 */
8621 id = DIF_INSTR_VAR(instr);
8622
8623 for (i = 0; i < dp->dtdo_varlen; i++) {
8624 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8625
8626 if (v->dtdv_id == id && v->dtdv_scope == scope) {
8627 size += v->dtdv_type.dtdt_size;
8628 break;
8629 }
8630 }
8631
8632 if (i == dp->dtdo_varlen)
8633 return;
8634
8635 /*
8636 * We have the size. If this is larger than the chunk size
8637 * for our dynamic variable state, reset the chunk size.
8638 */
8639 size = P2ROUNDUP(size, sizeof (uint64_t));
8640
8641 if (size > vstate->dtvs_dynvars.dtds_chunksize)
8642 vstate->dtvs_dynvars.dtds_chunksize = size;
8643 }
8644 }
8645
8646 static void
dtrace_difo_init(dtrace_difo_t * dp,dtrace_vstate_t * vstate)8647 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8648 {
8649 int i, oldsvars, osz, nsz, otlocals, ntlocals;
8650 uint_t id;
8651
8652 ASSERT(MUTEX_HELD(&dtrace_lock));
8653 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0);
8654
8655 for (i = 0; i < dp->dtdo_varlen; i++) {
8656 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8657 dtrace_statvar_t *svar, ***svarp;
8658 size_t dsize = 0;
8659 uint8_t scope = v->dtdv_scope;
8660 int *np;
8661
8662 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
8663 continue;
8664
8665 id -= DIF_VAR_OTHER_UBASE;
8666
8667 switch (scope) {
8668 case DIFV_SCOPE_THREAD:
8669 while (id >= (otlocals = vstate->dtvs_ntlocals)) {
8670 dtrace_difv_t *tlocals;
8671
8672 if ((ntlocals = (otlocals << 1)) == 0)
8673 ntlocals = 1;
8674
8675 osz = otlocals * sizeof (dtrace_difv_t);
8676 nsz = ntlocals * sizeof (dtrace_difv_t);
8677
8678 tlocals = kmem_zalloc(nsz, KM_SLEEP);
8679
8680 if (osz != 0) {
8681 bcopy(vstate->dtvs_tlocals,
8682 tlocals, osz);
8683 kmem_free(vstate->dtvs_tlocals, osz);
8684 }
8685
8686 vstate->dtvs_tlocals = tlocals;
8687 vstate->dtvs_ntlocals = ntlocals;
8688 }
8689
8690 vstate->dtvs_tlocals[id] = *v;
8691 continue;
8692
8693 case DIFV_SCOPE_LOCAL:
8694 np = &vstate->dtvs_nlocals;
8695 svarp = &vstate->dtvs_locals;
8696
8697 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
8698 dsize = NCPU * (v->dtdv_type.dtdt_size +
8699 sizeof (uint64_t));
8700 else
8701 dsize = NCPU * sizeof (uint64_t);
8702
8703 break;
8704
8705 case DIFV_SCOPE_GLOBAL:
8706 np = &vstate->dtvs_nglobals;
8707 svarp = &vstate->dtvs_globals;
8708
8709 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF)
8710 dsize = v->dtdv_type.dtdt_size +
8711 sizeof (uint64_t);
8712
8713 break;
8714
8715 default:
8716 ASSERT(0);
8717 }
8718
8719 while (id >= (oldsvars = *np)) {
8720 dtrace_statvar_t **statics;
8721 int newsvars, oldsize, newsize;
8722
8723 if ((newsvars = (oldsvars << 1)) == 0)
8724 newsvars = 1;
8725
8726 oldsize = oldsvars * sizeof (dtrace_statvar_t *);
8727 newsize = newsvars * sizeof (dtrace_statvar_t *);
8728
8729 statics = kmem_zalloc(newsize, KM_SLEEP);
8730
8731 if (oldsize != 0) {
8732 bcopy(*svarp, statics, oldsize);
8733 kmem_free(*svarp, oldsize);
8734 }
8735
8736 *svarp = statics;
8737 *np = newsvars;
8738 }
8739
8740 if ((svar = (*svarp)[id]) == NULL) {
8741 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP);
8742 svar->dtsv_var = *v;
8743
8744 if ((svar->dtsv_size = dsize) != 0) {
8745 svar->dtsv_data = (uint64_t)(uintptr_t)
8746 kmem_zalloc(dsize, KM_SLEEP);
8747 }
8748
8749 (*svarp)[id] = svar;
8750 }
8751
8752 svar->dtsv_refcnt++;
8753 }
8754
8755 dtrace_difo_chunksize(dp, vstate);
8756 dtrace_difo_hold(dp);
8757 }
8758
8759 static dtrace_difo_t *
dtrace_difo_duplicate(dtrace_difo_t * dp,dtrace_vstate_t * vstate)8760 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8761 {
8762 dtrace_difo_t *new;
8763 size_t sz;
8764
8765 ASSERT(dp->dtdo_buf != NULL);
8766 ASSERT(dp->dtdo_refcnt != 0);
8767
8768 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
8769
8770 ASSERT(dp->dtdo_buf != NULL);
8771 sz = dp->dtdo_len * sizeof (dif_instr_t);
8772 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP);
8773 bcopy(dp->dtdo_buf, new->dtdo_buf, sz);
8774 new->dtdo_len = dp->dtdo_len;
8775
8776 if (dp->dtdo_strtab != NULL) {
8777 ASSERT(dp->dtdo_strlen != 0);
8778 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP);
8779 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen);
8780 new->dtdo_strlen = dp->dtdo_strlen;
8781 }
8782
8783 if (dp->dtdo_inttab != NULL) {
8784 ASSERT(dp->dtdo_intlen != 0);
8785 sz = dp->dtdo_intlen * sizeof (uint64_t);
8786 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP);
8787 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz);
8788 new->dtdo_intlen = dp->dtdo_intlen;
8789 }
8790
8791 if (dp->dtdo_vartab != NULL) {
8792 ASSERT(dp->dtdo_varlen != 0);
8793 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t);
8794 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP);
8795 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz);
8796 new->dtdo_varlen = dp->dtdo_varlen;
8797 }
8798
8799 dtrace_difo_init(new, vstate);
8800 return (new);
8801 }
8802
8803 static void
dtrace_difo_destroy(dtrace_difo_t * dp,dtrace_vstate_t * vstate)8804 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8805 {
8806 int i;
8807
8808 ASSERT(dp->dtdo_refcnt == 0);
8809
8810 for (i = 0; i < dp->dtdo_varlen; i++) {
8811 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8812 dtrace_statvar_t *svar, **svarp;
8813 uint_t id;
8814 uint8_t scope = v->dtdv_scope;
8815 int *np;
8816
8817 switch (scope) {
8818 case DIFV_SCOPE_THREAD:
8819 continue;
8820
8821 case DIFV_SCOPE_LOCAL:
8822 np = &vstate->dtvs_nlocals;
8823 svarp = vstate->dtvs_locals;
8824 break;
8825
8826 case DIFV_SCOPE_GLOBAL:
8827 np = &vstate->dtvs_nglobals;
8828 svarp = vstate->dtvs_globals;
8829 break;
8830
8831 default:
8832 ASSERT(0);
8833 }
8834
8835 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE)
8836 continue;
8837
8838 id -= DIF_VAR_OTHER_UBASE;
8839 ASSERT(id < *np);
8840
8841 svar = svarp[id];
8842 ASSERT(svar != NULL);
8843 ASSERT(svar->dtsv_refcnt > 0);
8844
8845 if (--svar->dtsv_refcnt > 0)
8846 continue;
8847
8848 if (svar->dtsv_size != 0) {
8849 ASSERT(svar->dtsv_data != NULL);
8850 kmem_free((void *)(uintptr_t)svar->dtsv_data,
8851 svar->dtsv_size);
8852 }
8853
8854 kmem_free(svar, sizeof (dtrace_statvar_t));
8855 svarp[id] = NULL;
8856 }
8857
8858 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
8859 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
8860 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
8861 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
8862
8863 kmem_free(dp, sizeof (dtrace_difo_t));
8864 }
8865
8866 static void
dtrace_difo_release(dtrace_difo_t * dp,dtrace_vstate_t * vstate)8867 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate)
8868 {
8869 int i;
8870
8871 ASSERT(MUTEX_HELD(&dtrace_lock));
8872 ASSERT(dp->dtdo_refcnt != 0);
8873
8874 for (i = 0; i < dp->dtdo_varlen; i++) {
8875 dtrace_difv_t *v = &dp->dtdo_vartab[i];
8876
8877 if (v->dtdv_id != DIF_VAR_VTIMESTAMP)
8878 continue;
8879
8880 ASSERT(dtrace_vtime_references > 0);
8881 if (--dtrace_vtime_references == 0)
8882 dtrace_vtime_disable();
8883 }
8884
8885 if (--dp->dtdo_refcnt == 0)
8886 dtrace_difo_destroy(dp, vstate);
8887 }
8888
8889 /*
8890 * DTrace Format Functions
8891 */
8892 static uint16_t
dtrace_format_add(dtrace_state_t * state,char * str)8893 dtrace_format_add(dtrace_state_t *state, char *str)
8894 {
8895 char *fmt, **new;
8896 uint16_t ndx, len = strlen(str) + 1;
8897
8898 fmt = kmem_zalloc(len, KM_SLEEP);
8899 bcopy(str, fmt, len);
8900
8901 for (ndx = 0; ndx < state->dts_nformats; ndx++) {
8902 if (state->dts_formats[ndx] == NULL) {
8903 state->dts_formats[ndx] = fmt;
8904 return (ndx + 1);
8905 }
8906 }
8907
8908 if (state->dts_nformats == USHRT_MAX) {
8909 /*
8910 * This is only likely if a denial-of-service attack is being
8911 * attempted. As such, it's okay to fail silently here.
8912 */
8913 kmem_free(fmt, len);
8914 return (0);
8915 }
8916
8917 /*
8918 * For simplicity, we always resize the formats array to be exactly the
8919 * number of formats.
8920 */
8921 ndx = state->dts_nformats++;
8922 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP);
8923
8924 if (state->dts_formats != NULL) {
8925 ASSERT(ndx != 0);
8926 bcopy(state->dts_formats, new, ndx * sizeof (char *));
8927 kmem_free(state->dts_formats, ndx * sizeof (char *));
8928 }
8929
8930 state->dts_formats = new;
8931 state->dts_formats[ndx] = fmt;
8932
8933 return (ndx + 1);
8934 }
8935
8936 static void
dtrace_format_remove(dtrace_state_t * state,uint16_t format)8937 dtrace_format_remove(dtrace_state_t *state, uint16_t format)
8938 {
8939 char *fmt;
8940
8941 ASSERT(state->dts_formats != NULL);
8942 ASSERT(format <= state->dts_nformats);
8943 ASSERT(state->dts_formats[format - 1] != NULL);
8944
8945 fmt = state->dts_formats[format - 1];
8946 kmem_free(fmt, strlen(fmt) + 1);
8947 state->dts_formats[format - 1] = NULL;
8948 }
8949
8950 static void
dtrace_format_destroy(dtrace_state_t * state)8951 dtrace_format_destroy(dtrace_state_t *state)
8952 {
8953 int i;
8954
8955 if (state->dts_nformats == 0) {
8956 ASSERT(state->dts_formats == NULL);
8957 return;
8958 }
8959
8960 ASSERT(state->dts_formats != NULL);
8961
8962 for (i = 0; i < state->dts_nformats; i++) {
8963 char *fmt = state->dts_formats[i];
8964
8965 if (fmt == NULL)
8966 continue;
8967
8968 kmem_free(fmt, strlen(fmt) + 1);
8969 }
8970
8971 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *));
8972 state->dts_nformats = 0;
8973 state->dts_formats = NULL;
8974 }
8975
8976 /*
8977 * DTrace Predicate Functions
8978 */
8979 static dtrace_predicate_t *
dtrace_predicate_create(dtrace_difo_t * dp)8980 dtrace_predicate_create(dtrace_difo_t *dp)
8981 {
8982 dtrace_predicate_t *pred;
8983
8984 ASSERT(MUTEX_HELD(&dtrace_lock));
8985 ASSERT(dp->dtdo_refcnt != 0);
8986
8987 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP);
8988 pred->dtp_difo = dp;
8989 pred->dtp_refcnt = 1;
8990
8991 if (!dtrace_difo_cacheable(dp))
8992 return (pred);
8993
8994 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) {
8995 /*
8996 * This is only theoretically possible -- we have had 2^32
8997 * cacheable predicates on this machine. We cannot allow any
8998 * more predicates to become cacheable: as unlikely as it is,
8999 * there may be a thread caching a (now stale) predicate cache
9000 * ID. (N.B.: the temptation is being successfully resisted to
9001 * have this cmn_err() "Holy shit -- we executed this code!")
9002 */
9003 return (pred);
9004 }
9005
9006 pred->dtp_cacheid = dtrace_predcache_id++;
9007
9008 return (pred);
9009 }
9010
9011 static void
dtrace_predicate_hold(dtrace_predicate_t * pred)9012 dtrace_predicate_hold(dtrace_predicate_t *pred)
9013 {
9014 ASSERT(MUTEX_HELD(&dtrace_lock));
9015 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0);
9016 ASSERT(pred->dtp_refcnt > 0);
9017
9018 pred->dtp_refcnt++;
9019 }
9020
9021 static void
dtrace_predicate_release(dtrace_predicate_t * pred,dtrace_vstate_t * vstate)9022 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate)
9023 {
9024 dtrace_difo_t *dp = pred->dtp_difo;
9025
9026 ASSERT(MUTEX_HELD(&dtrace_lock));
9027 ASSERT(dp != NULL && dp->dtdo_refcnt != 0);
9028 ASSERT(pred->dtp_refcnt > 0);
9029
9030 if (--pred->dtp_refcnt == 0) {
9031 dtrace_difo_release(pred->dtp_difo, vstate);
9032 kmem_free(pred, sizeof (dtrace_predicate_t));
9033 }
9034 }
9035
9036 /*
9037 * DTrace Action Description Functions
9038 */
9039 static dtrace_actdesc_t *
dtrace_actdesc_create(dtrace_actkind_t kind,uint32_t ntuple,uint64_t uarg,uint64_t arg)9040 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple,
9041 uint64_t uarg, uint64_t arg)
9042 {
9043 dtrace_actdesc_t *act;
9044
9045 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL &&
9046 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA));
9047
9048 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP);
9049 act->dtad_kind = kind;
9050 act->dtad_ntuple = ntuple;
9051 act->dtad_uarg = uarg;
9052 act->dtad_arg = arg;
9053 act->dtad_refcnt = 1;
9054
9055 return (act);
9056 }
9057
9058 static void
dtrace_actdesc_hold(dtrace_actdesc_t * act)9059 dtrace_actdesc_hold(dtrace_actdesc_t *act)
9060 {
9061 ASSERT(act->dtad_refcnt >= 1);
9062 act->dtad_refcnt++;
9063 }
9064
9065 static void
dtrace_actdesc_release(dtrace_actdesc_t * act,dtrace_vstate_t * vstate)9066 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate)
9067 {
9068 dtrace_actkind_t kind = act->dtad_kind;
9069 dtrace_difo_t *dp;
9070
9071 ASSERT(act->dtad_refcnt >= 1);
9072
9073 if (--act->dtad_refcnt != 0)
9074 return;
9075
9076 if ((dp = act->dtad_difo) != NULL)
9077 dtrace_difo_release(dp, vstate);
9078
9079 if (DTRACEACT_ISPRINTFLIKE(kind)) {
9080 char *str = (char *)(uintptr_t)act->dtad_arg;
9081
9082 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) ||
9083 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA));
9084
9085 if (str != NULL)
9086 kmem_free(str, strlen(str) + 1);
9087 }
9088
9089 kmem_free(act, sizeof (dtrace_actdesc_t));
9090 }
9091
9092 /*
9093 * DTrace ECB Functions
9094 */
9095 static dtrace_ecb_t *
dtrace_ecb_add(dtrace_state_t * state,dtrace_probe_t * probe)9096 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe)
9097 {
9098 dtrace_ecb_t *ecb;
9099 dtrace_epid_t epid;
9100
9101 ASSERT(MUTEX_HELD(&dtrace_lock));
9102
9103 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP);
9104 ecb->dte_predicate = NULL;
9105 ecb->dte_probe = probe;
9106
9107 /*
9108 * The default size is the size of the default action: recording
9109 * the epid.
9110 */
9111 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9112 ecb->dte_alignment = sizeof (dtrace_epid_t);
9113
9114 epid = state->dts_epid++;
9115
9116 if (epid - 1 >= state->dts_necbs) {
9117 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs;
9118 int necbs = state->dts_necbs << 1;
9119
9120 ASSERT(epid == state->dts_necbs + 1);
9121
9122 if (necbs == 0) {
9123 ASSERT(oecbs == NULL);
9124 necbs = 1;
9125 }
9126
9127 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP);
9128
9129 if (oecbs != NULL)
9130 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs));
9131
9132 dtrace_membar_producer();
9133 state->dts_ecbs = ecbs;
9134
9135 if (oecbs != NULL) {
9136 /*
9137 * If this state is active, we must dtrace_sync()
9138 * before we can free the old dts_ecbs array: we're
9139 * coming in hot, and there may be active ring
9140 * buffer processing (which indexes into the dts_ecbs
9141 * array) on another CPU.
9142 */
9143 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
9144 dtrace_sync();
9145
9146 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs));
9147 }
9148
9149 dtrace_membar_producer();
9150 state->dts_necbs = necbs;
9151 }
9152
9153 ecb->dte_state = state;
9154
9155 ASSERT(state->dts_ecbs[epid - 1] == NULL);
9156 dtrace_membar_producer();
9157 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb;
9158
9159 return (ecb);
9160 }
9161
9162 static int
dtrace_ecb_enable(dtrace_ecb_t * ecb)9163 dtrace_ecb_enable(dtrace_ecb_t *ecb)
9164 {
9165 dtrace_probe_t *probe = ecb->dte_probe;
9166
9167 ASSERT(MUTEX_HELD(&cpu_lock));
9168 ASSERT(MUTEX_HELD(&dtrace_lock));
9169 ASSERT(ecb->dte_next == NULL);
9170
9171 if (probe == NULL) {
9172 /*
9173 * This is the NULL probe -- there's nothing to do.
9174 */
9175 return (0);
9176 }
9177
9178 if (probe->dtpr_ecb == NULL) {
9179 dtrace_provider_t *prov = probe->dtpr_provider;
9180
9181 /*
9182 * We're the first ECB on this probe.
9183 */
9184 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb;
9185
9186 if (ecb->dte_predicate != NULL)
9187 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid;
9188
9189 return (prov->dtpv_pops.dtps_enable(prov->dtpv_arg,
9190 probe->dtpr_id, probe->dtpr_arg));
9191 } else {
9192 /*
9193 * This probe is already active. Swing the last pointer to
9194 * point to the new ECB, and issue a dtrace_sync() to assure
9195 * that all CPUs have seen the change.
9196 */
9197 ASSERT(probe->dtpr_ecb_last != NULL);
9198 probe->dtpr_ecb_last->dte_next = ecb;
9199 probe->dtpr_ecb_last = ecb;
9200 probe->dtpr_predcache = 0;
9201
9202 dtrace_sync();
9203 return (0);
9204 }
9205 }
9206
9207 static void
dtrace_ecb_resize(dtrace_ecb_t * ecb)9208 dtrace_ecb_resize(dtrace_ecb_t *ecb)
9209 {
9210 uint32_t maxalign = sizeof (dtrace_epid_t);
9211 uint32_t align = sizeof (uint8_t), offs, diff;
9212 dtrace_action_t *act;
9213 int wastuple = 0;
9214 uint32_t aggbase = UINT32_MAX;
9215 dtrace_state_t *state = ecb->dte_state;
9216
9217 /*
9218 * If we record anything, we always record the epid. (And we always
9219 * record it first.)
9220 */
9221 offs = sizeof (dtrace_epid_t);
9222 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_epid_t);
9223
9224 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9225 dtrace_recdesc_t *rec = &act->dta_rec;
9226
9227 if ((align = rec->dtrd_alignment) > maxalign)
9228 maxalign = align;
9229
9230 if (!wastuple && act->dta_intuple) {
9231 /*
9232 * This is the first record in a tuple. Align the
9233 * offset to be at offset 4 in an 8-byte aligned
9234 * block.
9235 */
9236 diff = offs + sizeof (dtrace_aggid_t);
9237
9238 if (diff = (diff & (sizeof (uint64_t) - 1)))
9239 offs += sizeof (uint64_t) - diff;
9240
9241 aggbase = offs - sizeof (dtrace_aggid_t);
9242 ASSERT(!(aggbase & (sizeof (uint64_t) - 1)));
9243 }
9244
9245 /*LINTED*/
9246 if (rec->dtrd_size != 0 && (diff = (offs & (align - 1)))) {
9247 /*
9248 * The current offset is not properly aligned; align it.
9249 */
9250 offs += align - diff;
9251 }
9252
9253 rec->dtrd_offset = offs;
9254
9255 if (offs + rec->dtrd_size > ecb->dte_needed) {
9256 ecb->dte_needed = offs + rec->dtrd_size;
9257
9258 if (ecb->dte_needed > state->dts_needed)
9259 state->dts_needed = ecb->dte_needed;
9260 }
9261
9262 if (DTRACEACT_ISAGG(act->dta_kind)) {
9263 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9264 dtrace_action_t *first = agg->dtag_first, *prev;
9265
9266 ASSERT(rec->dtrd_size != 0 && first != NULL);
9267 ASSERT(wastuple);
9268 ASSERT(aggbase != UINT32_MAX);
9269
9270 agg->dtag_base = aggbase;
9271
9272 while ((prev = first->dta_prev) != NULL &&
9273 DTRACEACT_ISAGG(prev->dta_kind)) {
9274 agg = (dtrace_aggregation_t *)prev;
9275 first = agg->dtag_first;
9276 }
9277
9278 if (prev != NULL) {
9279 offs = prev->dta_rec.dtrd_offset +
9280 prev->dta_rec.dtrd_size;
9281 } else {
9282 offs = sizeof (dtrace_epid_t);
9283 }
9284 wastuple = 0;
9285 } else {
9286 if (!act->dta_intuple)
9287 ecb->dte_size = offs + rec->dtrd_size;
9288
9289 offs += rec->dtrd_size;
9290 }
9291
9292 wastuple = act->dta_intuple;
9293 }
9294
9295 if ((act = ecb->dte_action) != NULL &&
9296 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) &&
9297 ecb->dte_size == sizeof (dtrace_epid_t)) {
9298 /*
9299 * If the size is still sizeof (dtrace_epid_t), then all
9300 * actions store no data; set the size to 0.
9301 */
9302 ecb->dte_alignment = maxalign;
9303 ecb->dte_size = 0;
9304
9305 /*
9306 * If the needed space is still sizeof (dtrace_epid_t), then
9307 * all actions need no additional space; set the needed
9308 * size to 0.
9309 */
9310 if (ecb->dte_needed == sizeof (dtrace_epid_t))
9311 ecb->dte_needed = 0;
9312
9313 return;
9314 }
9315
9316 /*
9317 * Set our alignment, and make sure that the dte_size and dte_needed
9318 * are aligned to the size of an EPID.
9319 */
9320 ecb->dte_alignment = maxalign;
9321 ecb->dte_size = (ecb->dte_size + (sizeof (dtrace_epid_t) - 1)) &
9322 ~(sizeof (dtrace_epid_t) - 1);
9323 ecb->dte_needed = (ecb->dte_needed + (sizeof (dtrace_epid_t) - 1)) &
9324 ~(sizeof (dtrace_epid_t) - 1);
9325 ASSERT(ecb->dte_size <= ecb->dte_needed);
9326 }
9327
9328 static dtrace_action_t *
dtrace_ecb_aggregation_create(dtrace_ecb_t * ecb,dtrace_actdesc_t * desc)9329 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9330 {
9331 dtrace_aggregation_t *agg;
9332 size_t size = sizeof (uint64_t);
9333 int ntuple = desc->dtad_ntuple;
9334 dtrace_action_t *act;
9335 dtrace_recdesc_t *frec;
9336 dtrace_aggid_t aggid;
9337 dtrace_state_t *state = ecb->dte_state;
9338
9339 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP);
9340 agg->dtag_ecb = ecb;
9341
9342 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind));
9343
9344 switch (desc->dtad_kind) {
9345 case DTRACEAGG_MIN:
9346 agg->dtag_initial = INT64_MAX;
9347 agg->dtag_aggregate = dtrace_aggregate_min;
9348 break;
9349
9350 case DTRACEAGG_MAX:
9351 agg->dtag_initial = INT64_MIN;
9352 agg->dtag_aggregate = dtrace_aggregate_max;
9353 break;
9354
9355 case DTRACEAGG_COUNT:
9356 agg->dtag_aggregate = dtrace_aggregate_count;
9357 break;
9358
9359 case DTRACEAGG_QUANTIZE:
9360 agg->dtag_aggregate = dtrace_aggregate_quantize;
9361 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) *
9362 sizeof (uint64_t);
9363 break;
9364
9365 case DTRACEAGG_LQUANTIZE: {
9366 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg);
9367 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg);
9368
9369 agg->dtag_initial = desc->dtad_arg;
9370 agg->dtag_aggregate = dtrace_aggregate_lquantize;
9371
9372 if (step == 0 || levels == 0)
9373 goto err;
9374
9375 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t);
9376 break;
9377 }
9378
9379 case DTRACEAGG_AVG:
9380 agg->dtag_aggregate = dtrace_aggregate_avg;
9381 size = sizeof (uint64_t) * 2;
9382 break;
9383
9384 case DTRACEAGG_STDDEV:
9385 agg->dtag_aggregate = dtrace_aggregate_stddev;
9386 size = sizeof (uint64_t) * 4;
9387 break;
9388
9389 case DTRACEAGG_SUM:
9390 agg->dtag_aggregate = dtrace_aggregate_sum;
9391 break;
9392
9393 default:
9394 goto err;
9395 }
9396
9397 agg->dtag_action.dta_rec.dtrd_size = size;
9398
9399 if (ntuple == 0)
9400 goto err;
9401
9402 /*
9403 * We must make sure that we have enough actions for the n-tuple.
9404 */
9405 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) {
9406 if (DTRACEACT_ISAGG(act->dta_kind))
9407 break;
9408
9409 if (--ntuple == 0) {
9410 /*
9411 * This is the action with which our n-tuple begins.
9412 */
9413 agg->dtag_first = act;
9414 goto success;
9415 }
9416 }
9417
9418 /*
9419 * This n-tuple is short by ntuple elements. Return failure.
9420 */
9421 ASSERT(ntuple != 0);
9422 err:
9423 kmem_free(agg, sizeof (dtrace_aggregation_t));
9424 return (NULL);
9425
9426 success:
9427 /*
9428 * If the last action in the tuple has a size of zero, it's actually
9429 * an expression argument for the aggregating action.
9430 */
9431 ASSERT(ecb->dte_action_last != NULL);
9432 act = ecb->dte_action_last;
9433
9434 if (act->dta_kind == DTRACEACT_DIFEXPR) {
9435 ASSERT(act->dta_difo != NULL);
9436
9437 if (act->dta_difo->dtdo_rtype.dtdt_size == 0)
9438 agg->dtag_hasarg = 1;
9439 }
9440
9441 /*
9442 * We need to allocate an id for this aggregation.
9443 */
9444 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1,
9445 VM_BESTFIT | VM_SLEEP);
9446
9447 if (aggid - 1 >= state->dts_naggregations) {
9448 dtrace_aggregation_t **oaggs = state->dts_aggregations;
9449 dtrace_aggregation_t **aggs;
9450 int naggs = state->dts_naggregations << 1;
9451 int onaggs = state->dts_naggregations;
9452
9453 ASSERT(aggid == state->dts_naggregations + 1);
9454
9455 if (naggs == 0) {
9456 ASSERT(oaggs == NULL);
9457 naggs = 1;
9458 }
9459
9460 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP);
9461
9462 if (oaggs != NULL) {
9463 bcopy(oaggs, aggs, onaggs * sizeof (*aggs));
9464 kmem_free(oaggs, onaggs * sizeof (*aggs));
9465 }
9466
9467 state->dts_aggregations = aggs;
9468 state->dts_naggregations = naggs;
9469 }
9470
9471 ASSERT(state->dts_aggregations[aggid - 1] == NULL);
9472 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg;
9473
9474 frec = &agg->dtag_first->dta_rec;
9475 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t))
9476 frec->dtrd_alignment = sizeof (dtrace_aggid_t);
9477
9478 for (act = agg->dtag_first; act != NULL; act = act->dta_next) {
9479 ASSERT(!act->dta_intuple);
9480 act->dta_intuple = 1;
9481 }
9482
9483 return (&agg->dtag_action);
9484 }
9485
9486 static void
dtrace_ecb_aggregation_destroy(dtrace_ecb_t * ecb,dtrace_action_t * act)9487 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act)
9488 {
9489 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act;
9490 dtrace_state_t *state = ecb->dte_state;
9491 dtrace_aggid_t aggid = agg->dtag_id;
9492
9493 ASSERT(DTRACEACT_ISAGG(act->dta_kind));
9494 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1);
9495
9496 ASSERT(state->dts_aggregations[aggid - 1] == agg);
9497 state->dts_aggregations[aggid - 1] = NULL;
9498
9499 kmem_free(agg, sizeof (dtrace_aggregation_t));
9500 }
9501
9502 static int
dtrace_ecb_action_add(dtrace_ecb_t * ecb,dtrace_actdesc_t * desc)9503 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc)
9504 {
9505 dtrace_action_t *action, *last;
9506 dtrace_difo_t *dp = desc->dtad_difo;
9507 uint32_t size = 0, align = sizeof (uint8_t), mask;
9508 uint16_t format = 0;
9509 dtrace_recdesc_t *rec;
9510 dtrace_state_t *state = ecb->dte_state;
9511 dtrace_optval_t *opt = state->dts_options, nframes, strsize;
9512 uint64_t arg = desc->dtad_arg;
9513
9514 ASSERT(MUTEX_HELD(&dtrace_lock));
9515 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1);
9516
9517 if (DTRACEACT_ISAGG(desc->dtad_kind)) {
9518 /*
9519 * If this is an aggregating action, there must be neither
9520 * a speculate nor a commit on the action chain.
9521 */
9522 dtrace_action_t *act;
9523
9524 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
9525 if (act->dta_kind == DTRACEACT_COMMIT)
9526 return (EINVAL);
9527
9528 if (act->dta_kind == DTRACEACT_SPECULATE)
9529 return (EINVAL);
9530 }
9531
9532 action = dtrace_ecb_aggregation_create(ecb, desc);
9533
9534 if (action == NULL)
9535 return (EINVAL);
9536 } else {
9537 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) ||
9538 (desc->dtad_kind == DTRACEACT_DIFEXPR &&
9539 dp != NULL && dp->dtdo_destructive)) {
9540 state->dts_destructive = 1;
9541 }
9542
9543 switch (desc->dtad_kind) {
9544 case DTRACEACT_PRINTF:
9545 case DTRACEACT_PRINTA:
9546 case DTRACEACT_SYSTEM:
9547 case DTRACEACT_FREOPEN:
9548 /*
9549 * We know that our arg is a string -- turn it into a
9550 * format.
9551 */
9552 if (arg == NULL) {
9553 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA);
9554 format = 0;
9555 } else {
9556 ASSERT(arg != NULL);
9557 ASSERT(arg > KERNELBASE);
9558 format = dtrace_format_add(state,
9559 (char *)(uintptr_t)arg);
9560 }
9561
9562 /*FALLTHROUGH*/
9563 case DTRACEACT_LIBACT:
9564 case DTRACEACT_DIFEXPR:
9565 if (dp == NULL)
9566 return (EINVAL);
9567
9568 if ((size = dp->dtdo_rtype.dtdt_size) != 0)
9569 break;
9570
9571 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) {
9572 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9573 return (EINVAL);
9574
9575 size = opt[DTRACEOPT_STRSIZE];
9576 }
9577
9578 break;
9579
9580 case DTRACEACT_STACK:
9581 if ((nframes = arg) == 0) {
9582 nframes = opt[DTRACEOPT_STACKFRAMES];
9583 ASSERT(nframes > 0);
9584 arg = nframes;
9585 }
9586
9587 size = nframes * sizeof (pc_t);
9588 break;
9589
9590 case DTRACEACT_JSTACK:
9591 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0)
9592 strsize = opt[DTRACEOPT_JSTACKSTRSIZE];
9593
9594 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0)
9595 nframes = opt[DTRACEOPT_JSTACKFRAMES];
9596
9597 arg = DTRACE_USTACK_ARG(nframes, strsize);
9598
9599 /*FALLTHROUGH*/
9600 case DTRACEACT_USTACK:
9601 if (desc->dtad_kind != DTRACEACT_JSTACK &&
9602 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) {
9603 strsize = DTRACE_USTACK_STRSIZE(arg);
9604 nframes = opt[DTRACEOPT_USTACKFRAMES];
9605 ASSERT(nframes > 0);
9606 arg = DTRACE_USTACK_ARG(nframes, strsize);
9607 }
9608
9609 /*
9610 * Save a slot for the pid.
9611 */
9612 size = (nframes + 1) * sizeof (uint64_t);
9613 size += DTRACE_USTACK_STRSIZE(arg);
9614 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t)));
9615
9616 break;
9617
9618 case DTRACEACT_SYM:
9619 case DTRACEACT_MOD:
9620 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) !=
9621 sizeof (uint64_t)) ||
9622 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9623 return (EINVAL);
9624 break;
9625
9626 case DTRACEACT_USYM:
9627 case DTRACEACT_UMOD:
9628 case DTRACEACT_UADDR:
9629 if (dp == NULL ||
9630 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) ||
9631 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9632 return (EINVAL);
9633
9634 /*
9635 * We have a slot for the pid, plus a slot for the
9636 * argument. To keep things simple (aligned with
9637 * bitness-neutral sizing), we store each as a 64-bit
9638 * quantity.
9639 */
9640 size = 2 * sizeof (uint64_t);
9641 break;
9642
9643 case DTRACEACT_STOP:
9644 case DTRACEACT_BREAKPOINT:
9645 case DTRACEACT_PANIC:
9646 break;
9647
9648 case DTRACEACT_CHILL:
9649 case DTRACEACT_DISCARD:
9650 case DTRACEACT_RAISE:
9651 if (dp == NULL)
9652 return (EINVAL);
9653 break;
9654
9655 case DTRACEACT_EXIT:
9656 if (dp == NULL ||
9657 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) ||
9658 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF))
9659 return (EINVAL);
9660 break;
9661
9662 case DTRACEACT_SPECULATE:
9663 if (ecb->dte_size > sizeof (dtrace_epid_t))
9664 return (EINVAL);
9665
9666 if (dp == NULL)
9667 return (EINVAL);
9668
9669 state->dts_speculates = 1;
9670 break;
9671
9672 case DTRACEACT_COMMIT: {
9673 dtrace_action_t *act = ecb->dte_action;
9674
9675 for (; act != NULL; act = act->dta_next) {
9676 if (act->dta_kind == DTRACEACT_COMMIT)
9677 return (EINVAL);
9678 }
9679
9680 if (dp == NULL)
9681 return (EINVAL);
9682 break;
9683 }
9684
9685 default:
9686 return (EINVAL);
9687 }
9688
9689 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) {
9690 /*
9691 * If this is a data-storing action or a speculate,
9692 * we must be sure that there isn't a commit on the
9693 * action chain.
9694 */
9695 dtrace_action_t *act = ecb->dte_action;
9696
9697 for (; act != NULL; act = act->dta_next) {
9698 if (act->dta_kind == DTRACEACT_COMMIT)
9699 return (EINVAL);
9700 }
9701 }
9702
9703 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP);
9704 action->dta_rec.dtrd_size = size;
9705 }
9706
9707 action->dta_refcnt = 1;
9708 rec = &action->dta_rec;
9709 size = rec->dtrd_size;
9710
9711 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) {
9712 if (!(size & mask)) {
9713 align = mask + 1;
9714 break;
9715 }
9716 }
9717
9718 action->dta_kind = desc->dtad_kind;
9719
9720 if ((action->dta_difo = dp) != NULL)
9721 dtrace_difo_hold(dp);
9722
9723 rec->dtrd_action = action->dta_kind;
9724 rec->dtrd_arg = arg;
9725 rec->dtrd_uarg = desc->dtad_uarg;
9726 rec->dtrd_alignment = (uint16_t)align;
9727 rec->dtrd_format = format;
9728
9729 if ((last = ecb->dte_action_last) != NULL) {
9730 ASSERT(ecb->dte_action != NULL);
9731 action->dta_prev = last;
9732 last->dta_next = action;
9733 } else {
9734 ASSERT(ecb->dte_action == NULL);
9735 ecb->dte_action = action;
9736 }
9737
9738 ecb->dte_action_last = action;
9739
9740 return (0);
9741 }
9742
9743 static void
dtrace_ecb_action_remove(dtrace_ecb_t * ecb)9744 dtrace_ecb_action_remove(dtrace_ecb_t *ecb)
9745 {
9746 dtrace_action_t *act = ecb->dte_action, *next;
9747 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate;
9748 dtrace_difo_t *dp;
9749 uint16_t format;
9750
9751 if (act != NULL && act->dta_refcnt > 1) {
9752 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1);
9753 act->dta_refcnt--;
9754 } else {
9755 for (; act != NULL; act = next) {
9756 next = act->dta_next;
9757 ASSERT(next != NULL || act == ecb->dte_action_last);
9758 ASSERT(act->dta_refcnt == 1);
9759
9760 if ((format = act->dta_rec.dtrd_format) != 0)
9761 dtrace_format_remove(ecb->dte_state, format);
9762
9763 if ((dp = act->dta_difo) != NULL)
9764 dtrace_difo_release(dp, vstate);
9765
9766 if (DTRACEACT_ISAGG(act->dta_kind)) {
9767 dtrace_ecb_aggregation_destroy(ecb, act);
9768 } else {
9769 kmem_free(act, sizeof (dtrace_action_t));
9770 }
9771 }
9772 }
9773
9774 ecb->dte_action = NULL;
9775 ecb->dte_action_last = NULL;
9776 ecb->dte_size = sizeof (dtrace_epid_t);
9777 }
9778
9779 static void
dtrace_ecb_disable(dtrace_ecb_t * ecb)9780 dtrace_ecb_disable(dtrace_ecb_t *ecb)
9781 {
9782 /*
9783 * We disable the ECB by removing it from its probe.
9784 */
9785 dtrace_ecb_t *pecb, *prev = NULL;
9786 dtrace_probe_t *probe = ecb->dte_probe;
9787
9788 ASSERT(MUTEX_HELD(&dtrace_lock));
9789
9790 if (probe == NULL) {
9791 /*
9792 * This is the NULL probe; there is nothing to disable.
9793 */
9794 return;
9795 }
9796
9797 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) {
9798 if (pecb == ecb)
9799 break;
9800 prev = pecb;
9801 }
9802
9803 ASSERT(pecb != NULL);
9804
9805 if (prev == NULL) {
9806 probe->dtpr_ecb = ecb->dte_next;
9807 } else {
9808 prev->dte_next = ecb->dte_next;
9809 }
9810
9811 if (ecb == probe->dtpr_ecb_last) {
9812 ASSERT(ecb->dte_next == NULL);
9813 probe->dtpr_ecb_last = prev;
9814 }
9815
9816 /*
9817 * The ECB has been disconnected from the probe; now sync to assure
9818 * that all CPUs have seen the change before returning.
9819 */
9820 dtrace_sync();
9821
9822 if (probe->dtpr_ecb == NULL) {
9823 /*
9824 * That was the last ECB on the probe; clear the predicate
9825 * cache ID for the probe, disable it and sync one more time
9826 * to assure that we'll never hit it again.
9827 */
9828 dtrace_provider_t *prov = probe->dtpr_provider;
9829
9830 ASSERT(ecb->dte_next == NULL);
9831 ASSERT(probe->dtpr_ecb_last == NULL);
9832 probe->dtpr_predcache = DTRACE_CACHEIDNONE;
9833 prov->dtpv_pops.dtps_disable(prov->dtpv_arg,
9834 probe->dtpr_id, probe->dtpr_arg);
9835 dtrace_sync();
9836 } else {
9837 /*
9838 * There is at least one ECB remaining on the probe. If there
9839 * is _exactly_ one, set the probe's predicate cache ID to be
9840 * the predicate cache ID of the remaining ECB.
9841 */
9842 ASSERT(probe->dtpr_ecb_last != NULL);
9843 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE);
9844
9845 if (probe->dtpr_ecb == probe->dtpr_ecb_last) {
9846 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate;
9847
9848 ASSERT(probe->dtpr_ecb->dte_next == NULL);
9849
9850 if (p != NULL)
9851 probe->dtpr_predcache = p->dtp_cacheid;
9852 }
9853
9854 ecb->dte_next = NULL;
9855 }
9856 }
9857
9858 static void
dtrace_ecb_destroy(dtrace_ecb_t * ecb)9859 dtrace_ecb_destroy(dtrace_ecb_t *ecb)
9860 {
9861 dtrace_state_t *state = ecb->dte_state;
9862 dtrace_vstate_t *vstate = &state->dts_vstate;
9863 dtrace_predicate_t *pred;
9864 dtrace_epid_t epid = ecb->dte_epid;
9865
9866 ASSERT(MUTEX_HELD(&dtrace_lock));
9867 ASSERT(ecb->dte_next == NULL);
9868 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb);
9869
9870 if ((pred = ecb->dte_predicate) != NULL)
9871 dtrace_predicate_release(pred, vstate);
9872
9873 dtrace_ecb_action_remove(ecb);
9874
9875 ASSERT(state->dts_ecbs[epid - 1] == ecb);
9876 state->dts_ecbs[epid - 1] = NULL;
9877
9878 kmem_free(ecb, sizeof (dtrace_ecb_t));
9879 }
9880
9881 static dtrace_ecb_t *
dtrace_ecb_create(dtrace_state_t * state,dtrace_probe_t * probe,dtrace_enabling_t * enab)9882 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe,
9883 dtrace_enabling_t *enab)
9884 {
9885 dtrace_ecb_t *ecb;
9886 dtrace_predicate_t *pred;
9887 dtrace_actdesc_t *act;
9888 dtrace_provider_t *prov;
9889 dtrace_ecbdesc_t *desc = enab->dten_current;
9890
9891 ASSERT(MUTEX_HELD(&dtrace_lock));
9892 ASSERT(state != NULL);
9893
9894 ecb = dtrace_ecb_add(state, probe);
9895 ecb->dte_uarg = desc->dted_uarg;
9896
9897 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) {
9898 dtrace_predicate_hold(pred);
9899 ecb->dte_predicate = pred;
9900 }
9901
9902 if (probe != NULL) {
9903 /*
9904 * If the provider shows more leg than the consumer is old
9905 * enough to see, we need to enable the appropriate implicit
9906 * predicate bits to prevent the ecb from activating at
9907 * revealing times.
9908 *
9909 * Providers specifying DTRACE_PRIV_USER at register time
9910 * are stating that they need the /proc-style privilege
9911 * model to be enforced, and this is what DTRACE_COND_OWNER
9912 * and DTRACE_COND_ZONEOWNER will then do at probe time.
9913 */
9914 prov = probe->dtpr_provider;
9915 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) &&
9916 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
9917 ecb->dte_cond |= DTRACE_COND_OWNER;
9918
9919 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) &&
9920 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER))
9921 ecb->dte_cond |= DTRACE_COND_ZONEOWNER;
9922
9923 /*
9924 * If the provider shows us kernel innards and the user
9925 * is lacking sufficient privilege, enable the
9926 * DTRACE_COND_USERMODE implicit predicate.
9927 */
9928 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) &&
9929 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL))
9930 ecb->dte_cond |= DTRACE_COND_USERMODE;
9931 }
9932
9933 if (dtrace_ecb_create_cache != NULL) {
9934 /*
9935 * If we have a cached ecb, we'll use its action list instead
9936 * of creating our own (saving both time and space).
9937 */
9938 dtrace_ecb_t *cached = dtrace_ecb_create_cache;
9939 dtrace_action_t *act = cached->dte_action;
9940
9941 if (act != NULL) {
9942 ASSERT(act->dta_refcnt > 0);
9943 act->dta_refcnt++;
9944 ecb->dte_action = act;
9945 ecb->dte_action_last = cached->dte_action_last;
9946 ecb->dte_needed = cached->dte_needed;
9947 ecb->dte_size = cached->dte_size;
9948 ecb->dte_alignment = cached->dte_alignment;
9949 }
9950
9951 return (ecb);
9952 }
9953
9954 for (act = desc->dted_action; act != NULL; act = act->dtad_next) {
9955 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) {
9956 dtrace_ecb_destroy(ecb);
9957 return (NULL);
9958 }
9959 }
9960
9961 dtrace_ecb_resize(ecb);
9962
9963 return (dtrace_ecb_create_cache = ecb);
9964 }
9965
9966 static int
dtrace_ecb_create_enable(dtrace_probe_t * probe,void * arg)9967 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg)
9968 {
9969 dtrace_ecb_t *ecb;
9970 dtrace_enabling_t *enab = arg;
9971 dtrace_state_t *state = enab->dten_vstate->dtvs_state;
9972
9973 ASSERT(state != NULL);
9974
9975 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) {
9976 /*
9977 * This probe was created in a generation for which this
9978 * enabling has previously created ECBs; we don't want to
9979 * enable it again, so just kick out.
9980 */
9981 return (DTRACE_MATCH_NEXT);
9982 }
9983
9984 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL)
9985 return (DTRACE_MATCH_DONE);
9986
9987 if (dtrace_ecb_enable(ecb) < 0)
9988 return (DTRACE_MATCH_FAIL);
9989
9990 return (DTRACE_MATCH_NEXT);
9991 }
9992
9993 static dtrace_ecb_t *
dtrace_epid2ecb(dtrace_state_t * state,dtrace_epid_t id)9994 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id)
9995 {
9996 dtrace_ecb_t *ecb;
9997
9998 ASSERT(MUTEX_HELD(&dtrace_lock));
9999
10000 if (id == 0 || id > state->dts_necbs)
10001 return (NULL);
10002
10003 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL);
10004 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id);
10005
10006 return (state->dts_ecbs[id - 1]);
10007 }
10008
10009 static dtrace_aggregation_t *
dtrace_aggid2agg(dtrace_state_t * state,dtrace_aggid_t id)10010 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id)
10011 {
10012 dtrace_aggregation_t *agg;
10013
10014 ASSERT(MUTEX_HELD(&dtrace_lock));
10015
10016 if (id == 0 || id > state->dts_naggregations)
10017 return (NULL);
10018
10019 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL);
10020 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL ||
10021 agg->dtag_id == id);
10022
10023 return (state->dts_aggregations[id - 1]);
10024 }
10025
10026 /*
10027 * DTrace Buffer Functions
10028 *
10029 * The following functions manipulate DTrace buffers. Most of these functions
10030 * are called in the context of establishing or processing consumer state;
10031 * exceptions are explicitly noted.
10032 */
10033
10034 /*
10035 * Note: called from cross call context. This function switches the two
10036 * buffers on a given CPU. The atomicity of this operation is assured by
10037 * disabling interrupts while the actual switch takes place; the disabling of
10038 * interrupts serializes the execution with any execution of dtrace_probe() on
10039 * the same CPU.
10040 */
10041 static void
dtrace_buffer_switch(dtrace_buffer_t * buf)10042 dtrace_buffer_switch(dtrace_buffer_t *buf)
10043 {
10044 caddr_t tomax = buf->dtb_tomax;
10045 caddr_t xamot = buf->dtb_xamot;
10046 dtrace_icookie_t cookie;
10047
10048 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10049 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING));
10050
10051 cookie = dtrace_interrupt_disable();
10052 buf->dtb_tomax = xamot;
10053 buf->dtb_xamot = tomax;
10054 buf->dtb_xamot_drops = buf->dtb_drops;
10055 buf->dtb_xamot_offset = buf->dtb_offset;
10056 buf->dtb_xamot_errors = buf->dtb_errors;
10057 buf->dtb_xamot_flags = buf->dtb_flags;
10058 buf->dtb_offset = 0;
10059 buf->dtb_drops = 0;
10060 buf->dtb_errors = 0;
10061 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED);
10062 dtrace_interrupt_enable(cookie);
10063 }
10064
10065 /*
10066 * Note: called from cross call context. This function activates a buffer
10067 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation
10068 * is guaranteed by the disabling of interrupts.
10069 */
10070 static void
dtrace_buffer_activate(dtrace_state_t * state)10071 dtrace_buffer_activate(dtrace_state_t *state)
10072 {
10073 dtrace_buffer_t *buf;
10074 dtrace_icookie_t cookie = dtrace_interrupt_disable();
10075
10076 buf = &state->dts_buffer[CPU->cpu_id];
10077
10078 if (buf->dtb_tomax != NULL) {
10079 /*
10080 * We might like to assert that the buffer is marked inactive,
10081 * but this isn't necessarily true: the buffer for the CPU
10082 * that processes the BEGIN probe has its buffer activated
10083 * manually. In this case, we take the (harmless) action
10084 * re-clearing the bit INACTIVE bit.
10085 */
10086 buf->dtb_flags &= ~DTRACEBUF_INACTIVE;
10087 }
10088
10089 dtrace_interrupt_enable(cookie);
10090 }
10091
10092 static int
dtrace_buffer_alloc(dtrace_buffer_t * bufs,size_t size,int flags,processorid_t cpu)10093 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags,
10094 processorid_t cpu)
10095 {
10096 cpu_t *cp;
10097 dtrace_buffer_t *buf;
10098
10099 ASSERT(MUTEX_HELD(&cpu_lock));
10100 ASSERT(MUTEX_HELD(&dtrace_lock));
10101
10102 if (size > dtrace_nonroot_maxsize &&
10103 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE))
10104 return (EFBIG);
10105
10106 cp = cpu_list;
10107
10108 do {
10109 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10110 continue;
10111
10112 buf = &bufs[cp->cpu_id];
10113
10114 /*
10115 * If there is already a buffer allocated for this CPU, it
10116 * is only possible that this is a DR event. In this case,
10117 * the buffer size must match our specified size.
10118 */
10119 if (buf->dtb_tomax != NULL) {
10120 ASSERT(buf->dtb_size == size);
10121 continue;
10122 }
10123
10124 ASSERT(buf->dtb_xamot == NULL);
10125
10126 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10127 goto err;
10128
10129 buf->dtb_size = size;
10130 buf->dtb_flags = flags;
10131 buf->dtb_offset = 0;
10132 buf->dtb_drops = 0;
10133
10134 if (flags & DTRACEBUF_NOSWITCH)
10135 continue;
10136
10137 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
10138 goto err;
10139 } while ((cp = cp->cpu_next) != cpu_list);
10140
10141 return (0);
10142
10143 err:
10144 cp = cpu_list;
10145
10146 do {
10147 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id)
10148 continue;
10149
10150 buf = &bufs[cp->cpu_id];
10151
10152 if (buf->dtb_xamot != NULL) {
10153 ASSERT(buf->dtb_tomax != NULL);
10154 ASSERT(buf->dtb_size == size);
10155 kmem_free(buf->dtb_xamot, size);
10156 }
10157
10158 if (buf->dtb_tomax != NULL) {
10159 ASSERT(buf->dtb_size == size);
10160 kmem_free(buf->dtb_tomax, size);
10161 }
10162
10163 buf->dtb_tomax = NULL;
10164 buf->dtb_xamot = NULL;
10165 buf->dtb_size = 0;
10166 } while ((cp = cp->cpu_next) != cpu_list);
10167
10168 return (ENOMEM);
10169 }
10170
10171 /*
10172 * Note: called from probe context. This function just increments the drop
10173 * count on a buffer. It has been made a function to allow for the
10174 * possibility of understanding the source of mysterious drop counts. (A
10175 * problem for which one may be particularly disappointed that DTrace cannot
10176 * be used to understand DTrace.)
10177 */
10178 static void
dtrace_buffer_drop(dtrace_buffer_t * buf)10179 dtrace_buffer_drop(dtrace_buffer_t *buf)
10180 {
10181 buf->dtb_drops++;
10182 }
10183
10184 /*
10185 * Note: called from probe context. This function is called to reserve space
10186 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the
10187 * mstate. Returns the new offset in the buffer, or a negative value if an
10188 * error has occurred.
10189 */
10190 static intptr_t
dtrace_buffer_reserve(dtrace_buffer_t * buf,size_t needed,size_t align,dtrace_state_t * state,dtrace_mstate_t * mstate)10191 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align,
10192 dtrace_state_t *state, dtrace_mstate_t *mstate)
10193 {
10194 intptr_t offs = buf->dtb_offset, soffs;
10195 intptr_t woffs;
10196 caddr_t tomax;
10197 size_t total;
10198
10199 if (buf->dtb_flags & DTRACEBUF_INACTIVE)
10200 return (-1);
10201
10202 if ((tomax = buf->dtb_tomax) == NULL) {
10203 dtrace_buffer_drop(buf);
10204 return (-1);
10205 }
10206
10207 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) {
10208 while (offs & (align - 1)) {
10209 /*
10210 * Assert that our alignment is off by a number which
10211 * is itself sizeof (uint32_t) aligned.
10212 */
10213 ASSERT(!((align - (offs & (align - 1))) &
10214 (sizeof (uint32_t) - 1)));
10215 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10216 offs += sizeof (uint32_t);
10217 }
10218
10219 if ((soffs = offs + needed) > buf->dtb_size) {
10220 dtrace_buffer_drop(buf);
10221 return (-1);
10222 }
10223
10224 if (mstate == NULL)
10225 return (offs);
10226
10227 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs;
10228 mstate->dtms_scratch_size = buf->dtb_size - soffs;
10229 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10230
10231 return (offs);
10232 }
10233
10234 if (buf->dtb_flags & DTRACEBUF_FILL) {
10235 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN &&
10236 (buf->dtb_flags & DTRACEBUF_FULL))
10237 return (-1);
10238 goto out;
10239 }
10240
10241 total = needed + (offs & (align - 1));
10242
10243 /*
10244 * For a ring buffer, life is quite a bit more complicated. Before
10245 * we can store any padding, we need to adjust our wrapping offset.
10246 * (If we've never before wrapped or we're not about to, no adjustment
10247 * is required.)
10248 */
10249 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) ||
10250 offs + total > buf->dtb_size) {
10251 woffs = buf->dtb_xamot_offset;
10252
10253 if (offs + total > buf->dtb_size) {
10254 /*
10255 * We can't fit in the end of the buffer. First, a
10256 * sanity check that we can fit in the buffer at all.
10257 */
10258 if (total > buf->dtb_size) {
10259 dtrace_buffer_drop(buf);
10260 return (-1);
10261 }
10262
10263 /*
10264 * We're going to be storing at the top of the buffer,
10265 * so now we need to deal with the wrapped offset. We
10266 * only reset our wrapped offset to 0 if it is
10267 * currently greater than the current offset. If it
10268 * is less than the current offset, it is because a
10269 * previous allocation induced a wrap -- but the
10270 * allocation didn't subsequently take the space due
10271 * to an error or false predicate evaluation. In this
10272 * case, we'll just leave the wrapped offset alone: if
10273 * the wrapped offset hasn't been advanced far enough
10274 * for this allocation, it will be adjusted in the
10275 * lower loop.
10276 */
10277 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
10278 if (woffs >= offs)
10279 woffs = 0;
10280 } else {
10281 woffs = 0;
10282 }
10283
10284 /*
10285 * Now we know that we're going to be storing to the
10286 * top of the buffer and that there is room for us
10287 * there. We need to clear the buffer from the current
10288 * offset to the end (there may be old gunk there).
10289 */
10290 while (offs < buf->dtb_size)
10291 tomax[offs++] = 0;
10292
10293 /*
10294 * We need to set our offset to zero. And because we
10295 * are wrapping, we need to set the bit indicating as
10296 * much. We can also adjust our needed space back
10297 * down to the space required by the ECB -- we know
10298 * that the top of the buffer is aligned.
10299 */
10300 offs = 0;
10301 total = needed;
10302 buf->dtb_flags |= DTRACEBUF_WRAPPED;
10303 } else {
10304 /*
10305 * There is room for us in the buffer, so we simply
10306 * need to check the wrapped offset.
10307 */
10308 if (woffs < offs) {
10309 /*
10310 * The wrapped offset is less than the offset.
10311 * This can happen if we allocated buffer space
10312 * that induced a wrap, but then we didn't
10313 * subsequently take the space due to an error
10314 * or false predicate evaluation. This is
10315 * okay; we know that _this_ allocation isn't
10316 * going to induce a wrap. We still can't
10317 * reset the wrapped offset to be zero,
10318 * however: the space may have been trashed in
10319 * the previous failed probe attempt. But at
10320 * least the wrapped offset doesn't need to
10321 * be adjusted at all...
10322 */
10323 goto out;
10324 }
10325 }
10326
10327 while (offs + total > woffs) {
10328 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs);
10329 size_t size;
10330
10331 if (epid == DTRACE_EPIDNONE) {
10332 size = sizeof (uint32_t);
10333 } else {
10334 ASSERT(epid <= state->dts_necbs);
10335 ASSERT(state->dts_ecbs[epid - 1] != NULL);
10336
10337 size = state->dts_ecbs[epid - 1]->dte_size;
10338 }
10339
10340 ASSERT(woffs + size <= buf->dtb_size);
10341 ASSERT(size != 0);
10342
10343 if (woffs + size == buf->dtb_size) {
10344 /*
10345 * We've reached the end of the buffer; we want
10346 * to set the wrapped offset to 0 and break
10347 * out. However, if the offs is 0, then we're
10348 * in a strange edge-condition: the amount of
10349 * space that we want to reserve plus the size
10350 * of the record that we're overwriting is
10351 * greater than the size of the buffer. This
10352 * is problematic because if we reserve the
10353 * space but subsequently don't consume it (due
10354 * to a failed predicate or error) the wrapped
10355 * offset will be 0 -- yet the EPID at offset 0
10356 * will not be committed. This situation is
10357 * relatively easy to deal with: if we're in
10358 * this case, the buffer is indistinguishable
10359 * from one that hasn't wrapped; we need only
10360 * finish the job by clearing the wrapped bit,
10361 * explicitly setting the offset to be 0, and
10362 * zero'ing out the old data in the buffer.
10363 */
10364 if (offs == 0) {
10365 buf->dtb_flags &= ~DTRACEBUF_WRAPPED;
10366 buf->dtb_offset = 0;
10367 woffs = total;
10368
10369 while (woffs < buf->dtb_size)
10370 tomax[woffs++] = 0;
10371 }
10372
10373 woffs = 0;
10374 break;
10375 }
10376
10377 woffs += size;
10378 }
10379
10380 /*
10381 * We have a wrapped offset. It may be that the wrapped offset
10382 * has become zero -- that's okay.
10383 */
10384 buf->dtb_xamot_offset = woffs;
10385 }
10386
10387 out:
10388 /*
10389 * Now we can plow the buffer with any necessary padding.
10390 */
10391 while (offs & (align - 1)) {
10392 /*
10393 * Assert that our alignment is off by a number which
10394 * is itself sizeof (uint32_t) aligned.
10395 */
10396 ASSERT(!((align - (offs & (align - 1))) &
10397 (sizeof (uint32_t) - 1)));
10398 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE);
10399 offs += sizeof (uint32_t);
10400 }
10401
10402 if (buf->dtb_flags & DTRACEBUF_FILL) {
10403 if (offs + needed > buf->dtb_size - state->dts_reserve) {
10404 buf->dtb_flags |= DTRACEBUF_FULL;
10405 return (-1);
10406 }
10407 }
10408
10409 if (mstate == NULL)
10410 return (offs);
10411
10412 /*
10413 * For ring buffers and fill buffers, the scratch space is always
10414 * the inactive buffer.
10415 */
10416 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot;
10417 mstate->dtms_scratch_size = buf->dtb_size;
10418 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base;
10419
10420 return (offs);
10421 }
10422
10423 static void
dtrace_buffer_polish(dtrace_buffer_t * buf)10424 dtrace_buffer_polish(dtrace_buffer_t *buf)
10425 {
10426 ASSERT(buf->dtb_flags & DTRACEBUF_RING);
10427 ASSERT(MUTEX_HELD(&dtrace_lock));
10428
10429 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED))
10430 return;
10431
10432 /*
10433 * We need to polish the ring buffer. There are three cases:
10434 *
10435 * - The first (and presumably most common) is that there is no gap
10436 * between the buffer offset and the wrapped offset. In this case,
10437 * there is nothing in the buffer that isn't valid data; we can
10438 * mark the buffer as polished and return.
10439 *
10440 * - The second (less common than the first but still more common
10441 * than the third) is that there is a gap between the buffer offset
10442 * and the wrapped offset, and the wrapped offset is larger than the
10443 * buffer offset. This can happen because of an alignment issue, or
10444 * can happen because of a call to dtrace_buffer_reserve() that
10445 * didn't subsequently consume the buffer space. In this case,
10446 * we need to zero the data from the buffer offset to the wrapped
10447 * offset.
10448 *
10449 * - The third (and least common) is that there is a gap between the
10450 * buffer offset and the wrapped offset, but the wrapped offset is
10451 * _less_ than the buffer offset. This can only happen because a
10452 * call to dtrace_buffer_reserve() induced a wrap, but the space
10453 * was not subsequently consumed. In this case, we need to zero the
10454 * space from the offset to the end of the buffer _and_ from the
10455 * top of the buffer to the wrapped offset.
10456 */
10457 if (buf->dtb_offset < buf->dtb_xamot_offset) {
10458 bzero(buf->dtb_tomax + buf->dtb_offset,
10459 buf->dtb_xamot_offset - buf->dtb_offset);
10460 }
10461
10462 if (buf->dtb_offset > buf->dtb_xamot_offset) {
10463 bzero(buf->dtb_tomax + buf->dtb_offset,
10464 buf->dtb_size - buf->dtb_offset);
10465 bzero(buf->dtb_tomax, buf->dtb_xamot_offset);
10466 }
10467 }
10468
10469 static void
dtrace_buffer_free(dtrace_buffer_t * bufs)10470 dtrace_buffer_free(dtrace_buffer_t *bufs)
10471 {
10472 int i;
10473
10474 for (i = 0; i < NCPU; i++) {
10475 dtrace_buffer_t *buf = &bufs[i];
10476
10477 if (buf->dtb_tomax == NULL) {
10478 ASSERT(buf->dtb_xamot == NULL);
10479 ASSERT(buf->dtb_size == 0);
10480 continue;
10481 }
10482
10483 if (buf->dtb_xamot != NULL) {
10484 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
10485 kmem_free(buf->dtb_xamot, buf->dtb_size);
10486 }
10487
10488 kmem_free(buf->dtb_tomax, buf->dtb_size);
10489 buf->dtb_size = 0;
10490 buf->dtb_tomax = NULL;
10491 buf->dtb_xamot = NULL;
10492 }
10493 }
10494
10495 /*
10496 * DTrace Enabling Functions
10497 */
10498 static dtrace_enabling_t *
dtrace_enabling_create(dtrace_vstate_t * vstate)10499 dtrace_enabling_create(dtrace_vstate_t *vstate)
10500 {
10501 dtrace_enabling_t *enab;
10502
10503 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP);
10504 enab->dten_vstate = vstate;
10505
10506 return (enab);
10507 }
10508
10509 static void
dtrace_enabling_add(dtrace_enabling_t * enab,dtrace_ecbdesc_t * ecb)10510 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb)
10511 {
10512 dtrace_ecbdesc_t **ndesc;
10513 size_t osize, nsize;
10514
10515 /*
10516 * We can't add to enablings after we've enabled them, or after we've
10517 * retained them.
10518 */
10519 ASSERT(enab->dten_probegen == 0);
10520 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
10521
10522 if (enab->dten_ndesc < enab->dten_maxdesc) {
10523 enab->dten_desc[enab->dten_ndesc++] = ecb;
10524 return;
10525 }
10526
10527 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
10528
10529 if (enab->dten_maxdesc == 0) {
10530 enab->dten_maxdesc = 1;
10531 } else {
10532 enab->dten_maxdesc <<= 1;
10533 }
10534
10535 ASSERT(enab->dten_ndesc < enab->dten_maxdesc);
10536
10537 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *);
10538 ndesc = kmem_zalloc(nsize, KM_SLEEP);
10539 bcopy(enab->dten_desc, ndesc, osize);
10540 kmem_free(enab->dten_desc, osize);
10541
10542 enab->dten_desc = ndesc;
10543 enab->dten_desc[enab->dten_ndesc++] = ecb;
10544 }
10545
10546 static void
dtrace_enabling_addlike(dtrace_enabling_t * enab,dtrace_ecbdesc_t * ecb,dtrace_probedesc_t * pd)10547 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb,
10548 dtrace_probedesc_t *pd)
10549 {
10550 dtrace_ecbdesc_t *new;
10551 dtrace_predicate_t *pred;
10552 dtrace_actdesc_t *act;
10553
10554 /*
10555 * We're going to create a new ECB description that matches the
10556 * specified ECB in every way, but has the specified probe description.
10557 */
10558 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
10559
10560 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL)
10561 dtrace_predicate_hold(pred);
10562
10563 for (act = ecb->dted_action; act != NULL; act = act->dtad_next)
10564 dtrace_actdesc_hold(act);
10565
10566 new->dted_action = ecb->dted_action;
10567 new->dted_pred = ecb->dted_pred;
10568 new->dted_probe = *pd;
10569 new->dted_uarg = ecb->dted_uarg;
10570
10571 dtrace_enabling_add(enab, new);
10572 }
10573
10574 static void
dtrace_enabling_dump(dtrace_enabling_t * enab)10575 dtrace_enabling_dump(dtrace_enabling_t *enab)
10576 {
10577 int i;
10578
10579 for (i = 0; i < enab->dten_ndesc; i++) {
10580 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe;
10581
10582 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i,
10583 desc->dtpd_provider, desc->dtpd_mod,
10584 desc->dtpd_func, desc->dtpd_name);
10585 }
10586 }
10587
10588 static void
dtrace_enabling_destroy(dtrace_enabling_t * enab)10589 dtrace_enabling_destroy(dtrace_enabling_t *enab)
10590 {
10591 int i;
10592 dtrace_ecbdesc_t *ep;
10593 dtrace_vstate_t *vstate = enab->dten_vstate;
10594
10595 ASSERT(MUTEX_HELD(&dtrace_lock));
10596
10597 for (i = 0; i < enab->dten_ndesc; i++) {
10598 dtrace_actdesc_t *act, *next;
10599 dtrace_predicate_t *pred;
10600
10601 ep = enab->dten_desc[i];
10602
10603 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL)
10604 dtrace_predicate_release(pred, vstate);
10605
10606 for (act = ep->dted_action; act != NULL; act = next) {
10607 next = act->dtad_next;
10608 dtrace_actdesc_release(act, vstate);
10609 }
10610
10611 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
10612 }
10613
10614 kmem_free(enab->dten_desc,
10615 enab->dten_maxdesc * sizeof (dtrace_enabling_t *));
10616
10617 /*
10618 * If this was a retained enabling, decrement the dts_nretained count
10619 * and take it off of the dtrace_retained list.
10620 */
10621 if (enab->dten_prev != NULL || enab->dten_next != NULL ||
10622 dtrace_retained == enab) {
10623 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10624 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0);
10625 enab->dten_vstate->dtvs_state->dts_nretained--;
10626 dtrace_retained_gen++;
10627 }
10628
10629 if (enab->dten_prev == NULL) {
10630 if (dtrace_retained == enab) {
10631 dtrace_retained = enab->dten_next;
10632
10633 if (dtrace_retained != NULL)
10634 dtrace_retained->dten_prev = NULL;
10635 }
10636 } else {
10637 ASSERT(enab != dtrace_retained);
10638 ASSERT(dtrace_retained != NULL);
10639 enab->dten_prev->dten_next = enab->dten_next;
10640 }
10641
10642 if (enab->dten_next != NULL) {
10643 ASSERT(dtrace_retained != NULL);
10644 enab->dten_next->dten_prev = enab->dten_prev;
10645 }
10646
10647 kmem_free(enab, sizeof (dtrace_enabling_t));
10648 }
10649
10650 static int
dtrace_enabling_retain(dtrace_enabling_t * enab)10651 dtrace_enabling_retain(dtrace_enabling_t *enab)
10652 {
10653 dtrace_state_t *state;
10654
10655 ASSERT(MUTEX_HELD(&dtrace_lock));
10656 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL);
10657 ASSERT(enab->dten_vstate != NULL);
10658
10659 state = enab->dten_vstate->dtvs_state;
10660 ASSERT(state != NULL);
10661
10662 /*
10663 * We only allow each state to retain dtrace_retain_max enablings.
10664 */
10665 if (state->dts_nretained >= dtrace_retain_max)
10666 return (ENOSPC);
10667
10668 state->dts_nretained++;
10669 dtrace_retained_gen++;
10670
10671 if (dtrace_retained == NULL) {
10672 dtrace_retained = enab;
10673 return (0);
10674 }
10675
10676 enab->dten_next = dtrace_retained;
10677 dtrace_retained->dten_prev = enab;
10678 dtrace_retained = enab;
10679
10680 return (0);
10681 }
10682
10683 static int
dtrace_enabling_replicate(dtrace_state_t * state,dtrace_probedesc_t * match,dtrace_probedesc_t * create)10684 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match,
10685 dtrace_probedesc_t *create)
10686 {
10687 dtrace_enabling_t *new, *enab;
10688 int found = 0, err = ENOENT;
10689
10690 ASSERT(MUTEX_HELD(&dtrace_lock));
10691 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN);
10692 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN);
10693 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN);
10694 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN);
10695
10696 new = dtrace_enabling_create(&state->dts_vstate);
10697
10698 /*
10699 * Iterate over all retained enablings, looking for enablings that
10700 * match the specified state.
10701 */
10702 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
10703 int i;
10704
10705 /*
10706 * dtvs_state can only be NULL for helper enablings -- and
10707 * helper enablings can't be retained.
10708 */
10709 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10710
10711 if (enab->dten_vstate->dtvs_state != state)
10712 continue;
10713
10714 /*
10715 * Now iterate over each probe description; we're looking for
10716 * an exact match to the specified probe description.
10717 */
10718 for (i = 0; i < enab->dten_ndesc; i++) {
10719 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
10720 dtrace_probedesc_t *pd = &ep->dted_probe;
10721
10722 if (strcmp(pd->dtpd_provider, match->dtpd_provider))
10723 continue;
10724
10725 if (strcmp(pd->dtpd_mod, match->dtpd_mod))
10726 continue;
10727
10728 if (strcmp(pd->dtpd_func, match->dtpd_func))
10729 continue;
10730
10731 if (strcmp(pd->dtpd_name, match->dtpd_name))
10732 continue;
10733
10734 /*
10735 * We have a winning probe! Add it to our growing
10736 * enabling.
10737 */
10738 found = 1;
10739 dtrace_enabling_addlike(new, ep, create);
10740 }
10741 }
10742
10743 if (!found || (err = dtrace_enabling_retain(new)) != 0) {
10744 dtrace_enabling_destroy(new);
10745 return (err);
10746 }
10747
10748 return (0);
10749 }
10750
10751 static void
dtrace_enabling_retract(dtrace_state_t * state)10752 dtrace_enabling_retract(dtrace_state_t *state)
10753 {
10754 dtrace_enabling_t *enab, *next;
10755
10756 ASSERT(MUTEX_HELD(&dtrace_lock));
10757
10758 /*
10759 * Iterate over all retained enablings, destroy the enablings retained
10760 * for the specified state.
10761 */
10762 for (enab = dtrace_retained; enab != NULL; enab = next) {
10763 next = enab->dten_next;
10764
10765 /*
10766 * dtvs_state can only be NULL for helper enablings -- and
10767 * helper enablings can't be retained.
10768 */
10769 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10770
10771 if (enab->dten_vstate->dtvs_state == state) {
10772 ASSERT(state->dts_nretained > 0);
10773 dtrace_enabling_destroy(enab);
10774 }
10775 }
10776
10777 ASSERT(state->dts_nretained == 0);
10778 }
10779
10780 static int
dtrace_enabling_match(dtrace_enabling_t * enab,int * nmatched)10781 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched)
10782 {
10783 int i = 0;
10784 int total_matched = 0, matched = 0;
10785
10786 ASSERT(MUTEX_HELD(&cpu_lock));
10787 ASSERT(MUTEX_HELD(&dtrace_lock));
10788
10789 for (i = 0; i < enab->dten_ndesc; i++) {
10790 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
10791
10792 enab->dten_current = ep;
10793 enab->dten_error = 0;
10794
10795 /*
10796 * If a provider failed to enable a probe then get out and
10797 * let the consumer know we failed.
10798 */
10799 if ((matched = dtrace_probe_enable(&ep->dted_probe, enab)) < 0)
10800 return (EBUSY);
10801
10802 total_matched += matched;
10803
10804 if (enab->dten_error != 0) {
10805 /*
10806 * If we get an error half-way through enabling the
10807 * probes, we kick out -- perhaps with some number of
10808 * them enabled. Leaving enabled probes enabled may
10809 * be slightly confusing for user-level, but we expect
10810 * that no one will attempt to actually drive on in
10811 * the face of such errors. If this is an anonymous
10812 * enabling (indicated with a NULL nmatched pointer),
10813 * we cmn_err() a message. We aren't expecting to
10814 * get such an error -- such as it can exist at all,
10815 * it would be a result of corrupted DOF in the driver
10816 * properties.
10817 */
10818 if (nmatched == NULL) {
10819 cmn_err(CE_WARN, "dtrace_enabling_match() "
10820 "error on %p: %d", (void *)ep,
10821 enab->dten_error);
10822 }
10823
10824 return (enab->dten_error);
10825 }
10826 }
10827
10828 enab->dten_probegen = dtrace_probegen;
10829 if (nmatched != NULL)
10830 *nmatched = total_matched;
10831
10832 return (0);
10833 }
10834
10835 static void
dtrace_enabling_matchall(void)10836 dtrace_enabling_matchall(void)
10837 {
10838 dtrace_enabling_t *enab;
10839
10840 mutex_enter(&cpu_lock);
10841 mutex_enter(&dtrace_lock);
10842
10843 /*
10844 * Iterate over all retained enablings to see if any probes match
10845 * against them. We only perform this operation on enablings for which
10846 * we have sufficient permissions by virtue of being in the global zone
10847 * or in the same zone as the DTrace client. Because we can be called
10848 * after dtrace_detach() has been called, we cannot assert that there
10849 * are retained enablings. We can safely load from dtrace_retained,
10850 * however: the taskq_destroy() at the end of dtrace_detach() will
10851 * block pending our completion.
10852 */
10853 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
10854 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred;
10855
10856 if (INGLOBALZONE(curproc) ||
10857 cr != NULL && getzoneid() == crgetzoneid(cr))
10858 (void) dtrace_enabling_match(enab, NULL);
10859 }
10860
10861 mutex_exit(&dtrace_lock);
10862 mutex_exit(&cpu_lock);
10863 }
10864
10865 /*
10866 * If an enabling is to be enabled without having matched probes (that is, if
10867 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the
10868 * enabling must be _primed_ by creating an ECB for every ECB description.
10869 * This must be done to assure that we know the number of speculations, the
10870 * number of aggregations, the minimum buffer size needed, etc. before we
10871 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually
10872 * enabling any probes, we create ECBs for every ECB decription, but with a
10873 * NULL probe -- which is exactly what this function does.
10874 */
10875 static void
dtrace_enabling_prime(dtrace_state_t * state)10876 dtrace_enabling_prime(dtrace_state_t *state)
10877 {
10878 dtrace_enabling_t *enab;
10879 int i;
10880
10881 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) {
10882 ASSERT(enab->dten_vstate->dtvs_state != NULL);
10883
10884 if (enab->dten_vstate->dtvs_state != state)
10885 continue;
10886
10887 /*
10888 * We don't want to prime an enabling more than once, lest
10889 * we allow a malicious user to induce resource exhaustion.
10890 * (The ECBs that result from priming an enabling aren't
10891 * leaked -- but they also aren't deallocated until the
10892 * consumer state is destroyed.)
10893 */
10894 if (enab->dten_primed)
10895 continue;
10896
10897 for (i = 0; i < enab->dten_ndesc; i++) {
10898 enab->dten_current = enab->dten_desc[i];
10899 (void) dtrace_probe_enable(NULL, enab);
10900 }
10901
10902 enab->dten_primed = 1;
10903 }
10904 }
10905
10906 /*
10907 * Called to indicate that probes should be provided due to retained
10908 * enablings. This is implemented in terms of dtrace_probe_provide(), but it
10909 * must take an initial lap through the enabling calling the dtps_provide()
10910 * entry point explicitly to allow for autocreated probes.
10911 */
10912 static void
dtrace_enabling_provide(dtrace_provider_t * prv)10913 dtrace_enabling_provide(dtrace_provider_t *prv)
10914 {
10915 int i, all = 0;
10916 dtrace_probedesc_t desc;
10917 dtrace_genid_t gen;
10918
10919 ASSERT(MUTEX_HELD(&dtrace_lock));
10920 ASSERT(MUTEX_HELD(&dtrace_provider_lock));
10921
10922 if (prv == NULL) {
10923 all = 1;
10924 prv = dtrace_provider;
10925 }
10926
10927 do {
10928 dtrace_enabling_t *enab;
10929 void *parg = prv->dtpv_arg;
10930
10931 retry:
10932 gen = dtrace_retained_gen;
10933 for (enab = dtrace_retained; enab != NULL;
10934 enab = enab->dten_next) {
10935 for (i = 0; i < enab->dten_ndesc; i++) {
10936 desc = enab->dten_desc[i]->dted_probe;
10937 mutex_exit(&dtrace_lock);
10938 prv->dtpv_pops.dtps_provide(parg, &desc);
10939 mutex_enter(&dtrace_lock);
10940 /*
10941 * Process the retained enablings again if
10942 * they have changed while we weren't holding
10943 * dtrace_lock.
10944 */
10945 if (gen != dtrace_retained_gen)
10946 goto retry;
10947 }
10948 }
10949 } while (all && (prv = prv->dtpv_next) != NULL);
10950
10951 mutex_exit(&dtrace_lock);
10952 dtrace_probe_provide(NULL, all ? NULL : prv);
10953 mutex_enter(&dtrace_lock);
10954 }
10955
10956 /*
10957 * DTrace DOF Functions
10958 */
10959 /*ARGSUSED*/
10960 static void
dtrace_dof_error(dof_hdr_t * dof,const char * str)10961 dtrace_dof_error(dof_hdr_t *dof, const char *str)
10962 {
10963 if (dtrace_err_verbose)
10964 cmn_err(CE_WARN, "failed to process DOF: %s", str);
10965
10966 #ifdef DTRACE_ERRDEBUG
10967 dtrace_errdebug(str);
10968 #endif
10969 }
10970
10971 /*
10972 * Create DOF out of a currently enabled state. Right now, we only create
10973 * DOF containing the run-time options -- but this could be expanded to create
10974 * complete DOF representing the enabled state.
10975 */
10976 static dof_hdr_t *
dtrace_dof_create(dtrace_state_t * state)10977 dtrace_dof_create(dtrace_state_t *state)
10978 {
10979 dof_hdr_t *dof;
10980 dof_sec_t *sec;
10981 dof_optdesc_t *opt;
10982 int i, len = sizeof (dof_hdr_t) +
10983 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) +
10984 sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
10985
10986 ASSERT(MUTEX_HELD(&dtrace_lock));
10987
10988 dof = kmem_zalloc(len, KM_SLEEP);
10989 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0;
10990 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1;
10991 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2;
10992 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3;
10993
10994 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE;
10995 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE;
10996 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION;
10997 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION;
10998 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS;
10999 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS;
11000
11001 dof->dofh_flags = 0;
11002 dof->dofh_hdrsize = sizeof (dof_hdr_t);
11003 dof->dofh_secsize = sizeof (dof_sec_t);
11004 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */
11005 dof->dofh_secoff = sizeof (dof_hdr_t);
11006 dof->dofh_loadsz = len;
11007 dof->dofh_filesz = len;
11008 dof->dofh_pad = 0;
11009
11010 /*
11011 * Fill in the option section header...
11012 */
11013 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t));
11014 sec->dofs_type = DOF_SECT_OPTDESC;
11015 sec->dofs_align = sizeof (uint64_t);
11016 sec->dofs_flags = DOF_SECF_LOAD;
11017 sec->dofs_entsize = sizeof (dof_optdesc_t);
11018
11019 opt = (dof_optdesc_t *)((uintptr_t)sec +
11020 roundup(sizeof (dof_sec_t), sizeof (uint64_t)));
11021
11022 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof;
11023 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX;
11024
11025 for (i = 0; i < DTRACEOPT_MAX; i++) {
11026 opt[i].dofo_option = i;
11027 opt[i].dofo_strtab = DOF_SECIDX_NONE;
11028 opt[i].dofo_value = state->dts_options[i];
11029 }
11030
11031 return (dof);
11032 }
11033
11034 static dof_hdr_t *
dtrace_dof_copyin(uintptr_t uarg,int * errp)11035 dtrace_dof_copyin(uintptr_t uarg, int *errp)
11036 {
11037 dof_hdr_t hdr, *dof;
11038
11039 ASSERT(!MUTEX_HELD(&dtrace_lock));
11040
11041 /*
11042 * First, we're going to copyin() the sizeof (dof_hdr_t).
11043 */
11044 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) {
11045 dtrace_dof_error(NULL, "failed to copyin DOF header");
11046 *errp = EFAULT;
11047 return (NULL);
11048 }
11049
11050 /*
11051 * Now we'll allocate the entire DOF and copy it in -- provided
11052 * that the length isn't outrageous.
11053 */
11054 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) {
11055 dtrace_dof_error(&hdr, "load size exceeds maximum");
11056 *errp = E2BIG;
11057 return (NULL);
11058 }
11059
11060 if (hdr.dofh_loadsz < sizeof (hdr)) {
11061 dtrace_dof_error(&hdr, "invalid load size");
11062 *errp = EINVAL;
11063 return (NULL);
11064 }
11065
11066 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP);
11067
11068 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0 ||
11069 dof->dofh_loadsz != hdr.dofh_loadsz) {
11070 kmem_free(dof, hdr.dofh_loadsz);
11071 *errp = EFAULT;
11072 return (NULL);
11073 }
11074
11075 return (dof);
11076 }
11077
11078 static dof_hdr_t *
dtrace_dof_property(const char * name)11079 dtrace_dof_property(const char *name)
11080 {
11081 uchar_t *buf;
11082 uint64_t loadsz;
11083 unsigned int len, i;
11084 dof_hdr_t *dof;
11085
11086 /*
11087 * Unfortunately, array of values in .conf files are always (and
11088 * only) interpreted to be integer arrays. We must read our DOF
11089 * as an integer array, and then squeeze it into a byte array.
11090 */
11091 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0,
11092 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS)
11093 return (NULL);
11094
11095 for (i = 0; i < len; i++)
11096 buf[i] = (uchar_t)(((int *)buf)[i]);
11097
11098 if (len < sizeof (dof_hdr_t)) {
11099 ddi_prop_free(buf);
11100 dtrace_dof_error(NULL, "truncated header");
11101 return (NULL);
11102 }
11103
11104 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) {
11105 ddi_prop_free(buf);
11106 dtrace_dof_error(NULL, "truncated DOF");
11107 return (NULL);
11108 }
11109
11110 if (loadsz >= dtrace_dof_maxsize) {
11111 ddi_prop_free(buf);
11112 dtrace_dof_error(NULL, "oversized DOF");
11113 return (NULL);
11114 }
11115
11116 dof = kmem_alloc(loadsz, KM_SLEEP);
11117 bcopy(buf, dof, loadsz);
11118 ddi_prop_free(buf);
11119
11120 return (dof);
11121 }
11122
11123 static void
dtrace_dof_destroy(dof_hdr_t * dof)11124 dtrace_dof_destroy(dof_hdr_t *dof)
11125 {
11126 kmem_free(dof, dof->dofh_loadsz);
11127 }
11128
11129 /*
11130 * Return the dof_sec_t pointer corresponding to a given section index. If the
11131 * index is not valid, dtrace_dof_error() is called and NULL is returned. If
11132 * a type other than DOF_SECT_NONE is specified, the header is checked against
11133 * this type and NULL is returned if the types do not match.
11134 */
11135 static dof_sec_t *
dtrace_dof_sect(dof_hdr_t * dof,uint32_t type,dof_secidx_t i)11136 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i)
11137 {
11138 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)
11139 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize);
11140
11141 if (i >= dof->dofh_secnum) {
11142 dtrace_dof_error(dof, "referenced section index is invalid");
11143 return (NULL);
11144 }
11145
11146 if (!(sec->dofs_flags & DOF_SECF_LOAD)) {
11147 dtrace_dof_error(dof, "referenced section is not loadable");
11148 return (NULL);
11149 }
11150
11151 if (type != DOF_SECT_NONE && type != sec->dofs_type) {
11152 dtrace_dof_error(dof, "referenced section is the wrong type");
11153 return (NULL);
11154 }
11155
11156 return (sec);
11157 }
11158
11159 static dtrace_probedesc_t *
dtrace_dof_probedesc(dof_hdr_t * dof,dof_sec_t * sec,dtrace_probedesc_t * desc)11160 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc)
11161 {
11162 dof_probedesc_t *probe;
11163 dof_sec_t *strtab;
11164 uintptr_t daddr = (uintptr_t)dof;
11165 uintptr_t str;
11166 size_t size;
11167
11168 if (sec->dofs_type != DOF_SECT_PROBEDESC) {
11169 dtrace_dof_error(dof, "invalid probe section");
11170 return (NULL);
11171 }
11172
11173 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11174 dtrace_dof_error(dof, "bad alignment in probe description");
11175 return (NULL);
11176 }
11177
11178 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) {
11179 dtrace_dof_error(dof, "truncated probe description");
11180 return (NULL);
11181 }
11182
11183 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset);
11184 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab);
11185
11186 if (strtab == NULL)
11187 return (NULL);
11188
11189 str = daddr + strtab->dofs_offset;
11190 size = strtab->dofs_size;
11191
11192 if (probe->dofp_provider >= strtab->dofs_size) {
11193 dtrace_dof_error(dof, "corrupt probe provider");
11194 return (NULL);
11195 }
11196
11197 (void) strncpy(desc->dtpd_provider,
11198 (char *)(str + probe->dofp_provider),
11199 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider));
11200
11201 if (probe->dofp_mod >= strtab->dofs_size) {
11202 dtrace_dof_error(dof, "corrupt probe module");
11203 return (NULL);
11204 }
11205
11206 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod),
11207 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod));
11208
11209 if (probe->dofp_func >= strtab->dofs_size) {
11210 dtrace_dof_error(dof, "corrupt probe function");
11211 return (NULL);
11212 }
11213
11214 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func),
11215 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func));
11216
11217 if (probe->dofp_name >= strtab->dofs_size) {
11218 dtrace_dof_error(dof, "corrupt probe name");
11219 return (NULL);
11220 }
11221
11222 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name),
11223 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name));
11224
11225 return (desc);
11226 }
11227
11228 static dtrace_difo_t *
dtrace_dof_difo(dof_hdr_t * dof,dof_sec_t * sec,dtrace_vstate_t * vstate,cred_t * cr)11229 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11230 cred_t *cr)
11231 {
11232 dtrace_difo_t *dp;
11233 size_t ttl = 0;
11234 dof_difohdr_t *dofd;
11235 uintptr_t daddr = (uintptr_t)dof;
11236 size_t max = dtrace_difo_maxsize;
11237 int i, l, n;
11238
11239 static const struct {
11240 int section;
11241 int bufoffs;
11242 int lenoffs;
11243 int entsize;
11244 int align;
11245 const char *msg;
11246 } difo[] = {
11247 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf),
11248 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t),
11249 sizeof (dif_instr_t), "multiple DIF sections" },
11250
11251 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab),
11252 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t),
11253 sizeof (uint64_t), "multiple integer tables" },
11254
11255 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab),
11256 offsetof(dtrace_difo_t, dtdo_strlen), 0,
11257 sizeof (char), "multiple string tables" },
11258
11259 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab),
11260 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t),
11261 sizeof (uint_t), "multiple variable tables" },
11262
11263 { DOF_SECT_NONE, 0, 0, 0, NULL }
11264 };
11265
11266 if (sec->dofs_type != DOF_SECT_DIFOHDR) {
11267 dtrace_dof_error(dof, "invalid DIFO header section");
11268 return (NULL);
11269 }
11270
11271 if (sec->dofs_align != sizeof (dof_secidx_t)) {
11272 dtrace_dof_error(dof, "bad alignment in DIFO header");
11273 return (NULL);
11274 }
11275
11276 if (sec->dofs_size < sizeof (dof_difohdr_t) ||
11277 sec->dofs_size % sizeof (dof_secidx_t)) {
11278 dtrace_dof_error(dof, "bad size in DIFO header");
11279 return (NULL);
11280 }
11281
11282 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11283 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1;
11284
11285 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP);
11286 dp->dtdo_rtype = dofd->dofd_rtype;
11287
11288 for (l = 0; l < n; l++) {
11289 dof_sec_t *subsec;
11290 void **bufp;
11291 uint32_t *lenp;
11292
11293 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE,
11294 dofd->dofd_links[l])) == NULL)
11295 goto err; /* invalid section link */
11296
11297 if (ttl + subsec->dofs_size > max) {
11298 dtrace_dof_error(dof, "exceeds maximum size");
11299 goto err;
11300 }
11301
11302 ttl += subsec->dofs_size;
11303
11304 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) {
11305 if (subsec->dofs_type != difo[i].section)
11306 continue;
11307
11308 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) {
11309 dtrace_dof_error(dof, "section not loaded");
11310 goto err;
11311 }
11312
11313 if (subsec->dofs_align != difo[i].align) {
11314 dtrace_dof_error(dof, "bad alignment");
11315 goto err;
11316 }
11317
11318 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs);
11319 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs);
11320
11321 if (*bufp != NULL) {
11322 dtrace_dof_error(dof, difo[i].msg);
11323 goto err;
11324 }
11325
11326 if (difo[i].entsize != subsec->dofs_entsize) {
11327 dtrace_dof_error(dof, "entry size mismatch");
11328 goto err;
11329 }
11330
11331 if (subsec->dofs_entsize != 0 &&
11332 (subsec->dofs_size % subsec->dofs_entsize) != 0) {
11333 dtrace_dof_error(dof, "corrupt entry size");
11334 goto err;
11335 }
11336
11337 *lenp = subsec->dofs_size;
11338 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP);
11339 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset),
11340 *bufp, subsec->dofs_size);
11341
11342 if (subsec->dofs_entsize != 0)
11343 *lenp /= subsec->dofs_entsize;
11344
11345 break;
11346 }
11347
11348 /*
11349 * If we encounter a loadable DIFO sub-section that is not
11350 * known to us, assume this is a broken program and fail.
11351 */
11352 if (difo[i].section == DOF_SECT_NONE &&
11353 (subsec->dofs_flags & DOF_SECF_LOAD)) {
11354 dtrace_dof_error(dof, "unrecognized DIFO subsection");
11355 goto err;
11356 }
11357 }
11358
11359 if (dp->dtdo_buf == NULL) {
11360 /*
11361 * We can't have a DIF object without DIF text.
11362 */
11363 dtrace_dof_error(dof, "missing DIF text");
11364 goto err;
11365 }
11366
11367 /*
11368 * Before we validate the DIF object, run through the variable table
11369 * looking for the strings -- if any of their size are under, we'll set
11370 * their size to be the system-wide default string size. Note that
11371 * this should _not_ happen if the "strsize" option has been set --
11372 * in this case, the compiler should have set the size to reflect the
11373 * setting of the option.
11374 */
11375 for (i = 0; i < dp->dtdo_varlen; i++) {
11376 dtrace_difv_t *v = &dp->dtdo_vartab[i];
11377 dtrace_diftype_t *t = &v->dtdv_type;
11378
11379 if (v->dtdv_id < DIF_VAR_OTHER_UBASE)
11380 continue;
11381
11382 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0)
11383 t->dtdt_size = dtrace_strsize_default;
11384 }
11385
11386 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0)
11387 goto err;
11388
11389 dtrace_difo_init(dp, vstate);
11390 return (dp);
11391
11392 err:
11393 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t));
11394 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t));
11395 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen);
11396 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t));
11397
11398 kmem_free(dp, sizeof (dtrace_difo_t));
11399 return (NULL);
11400 }
11401
11402 static dtrace_predicate_t *
dtrace_dof_predicate(dof_hdr_t * dof,dof_sec_t * sec,dtrace_vstate_t * vstate,cred_t * cr)11403 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11404 cred_t *cr)
11405 {
11406 dtrace_difo_t *dp;
11407
11408 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL)
11409 return (NULL);
11410
11411 return (dtrace_predicate_create(dp));
11412 }
11413
11414 static dtrace_actdesc_t *
dtrace_dof_actdesc(dof_hdr_t * dof,dof_sec_t * sec,dtrace_vstate_t * vstate,cred_t * cr)11415 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11416 cred_t *cr)
11417 {
11418 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next;
11419 dof_actdesc_t *desc;
11420 dof_sec_t *difosec;
11421 size_t offs;
11422 uintptr_t daddr = (uintptr_t)dof;
11423 uint64_t arg;
11424 dtrace_actkind_t kind;
11425
11426 if (sec->dofs_type != DOF_SECT_ACTDESC) {
11427 dtrace_dof_error(dof, "invalid action section");
11428 return (NULL);
11429 }
11430
11431 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) {
11432 dtrace_dof_error(dof, "truncated action description");
11433 return (NULL);
11434 }
11435
11436 if (sec->dofs_align != sizeof (uint64_t)) {
11437 dtrace_dof_error(dof, "bad alignment in action description");
11438 return (NULL);
11439 }
11440
11441 if (sec->dofs_size < sec->dofs_entsize) {
11442 dtrace_dof_error(dof, "section entry size exceeds total size");
11443 return (NULL);
11444 }
11445
11446 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) {
11447 dtrace_dof_error(dof, "bad entry size in action description");
11448 return (NULL);
11449 }
11450
11451 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) {
11452 dtrace_dof_error(dof, "actions exceed dtrace_actions_max");
11453 return (NULL);
11454 }
11455
11456 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) {
11457 desc = (dof_actdesc_t *)(daddr +
11458 (uintptr_t)sec->dofs_offset + offs);
11459 kind = (dtrace_actkind_t)desc->dofa_kind;
11460
11461 if (DTRACEACT_ISPRINTFLIKE(kind) &&
11462 (kind != DTRACEACT_PRINTA ||
11463 desc->dofa_strtab != DOF_SECIDX_NONE)) {
11464 dof_sec_t *strtab;
11465 char *str, *fmt;
11466 uint64_t i;
11467
11468 /*
11469 * printf()-like actions must have a format string.
11470 */
11471 if ((strtab = dtrace_dof_sect(dof,
11472 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL)
11473 goto err;
11474
11475 str = (char *)((uintptr_t)dof +
11476 (uintptr_t)strtab->dofs_offset);
11477
11478 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) {
11479 if (str[i] == '\0')
11480 break;
11481 }
11482
11483 if (i >= strtab->dofs_size) {
11484 dtrace_dof_error(dof, "bogus format string");
11485 goto err;
11486 }
11487
11488 if (i == desc->dofa_arg) {
11489 dtrace_dof_error(dof, "empty format string");
11490 goto err;
11491 }
11492
11493 i -= desc->dofa_arg;
11494 fmt = kmem_alloc(i + 1, KM_SLEEP);
11495 bcopy(&str[desc->dofa_arg], fmt, i + 1);
11496 arg = (uint64_t)(uintptr_t)fmt;
11497 } else {
11498 if (kind == DTRACEACT_PRINTA) {
11499 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE);
11500 arg = 0;
11501 } else {
11502 arg = desc->dofa_arg;
11503 }
11504 }
11505
11506 act = dtrace_actdesc_create(kind, desc->dofa_ntuple,
11507 desc->dofa_uarg, arg);
11508
11509 if (last != NULL) {
11510 last->dtad_next = act;
11511 } else {
11512 first = act;
11513 }
11514
11515 last = act;
11516
11517 if (desc->dofa_difo == DOF_SECIDX_NONE)
11518 continue;
11519
11520 if ((difosec = dtrace_dof_sect(dof,
11521 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL)
11522 goto err;
11523
11524 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr);
11525
11526 if (act->dtad_difo == NULL)
11527 goto err;
11528 }
11529
11530 ASSERT(first != NULL);
11531 return (first);
11532
11533 err:
11534 for (act = first; act != NULL; act = next) {
11535 next = act->dtad_next;
11536 dtrace_actdesc_release(act, vstate);
11537 }
11538
11539 return (NULL);
11540 }
11541
11542 static dtrace_ecbdesc_t *
dtrace_dof_ecbdesc(dof_hdr_t * dof,dof_sec_t * sec,dtrace_vstate_t * vstate,cred_t * cr)11543 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate,
11544 cred_t *cr)
11545 {
11546 dtrace_ecbdesc_t *ep;
11547 dof_ecbdesc_t *ecb;
11548 dtrace_probedesc_t *desc;
11549 dtrace_predicate_t *pred = NULL;
11550
11551 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) {
11552 dtrace_dof_error(dof, "truncated ECB description");
11553 return (NULL);
11554 }
11555
11556 if (sec->dofs_align != sizeof (uint64_t)) {
11557 dtrace_dof_error(dof, "bad alignment in ECB description");
11558 return (NULL);
11559 }
11560
11561 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset);
11562 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes);
11563
11564 if (sec == NULL)
11565 return (NULL);
11566
11567 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP);
11568 ep->dted_uarg = ecb->dofe_uarg;
11569 desc = &ep->dted_probe;
11570
11571 if (dtrace_dof_probedesc(dof, sec, desc) == NULL)
11572 goto err;
11573
11574 if (ecb->dofe_pred != DOF_SECIDX_NONE) {
11575 if ((sec = dtrace_dof_sect(dof,
11576 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL)
11577 goto err;
11578
11579 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL)
11580 goto err;
11581
11582 ep->dted_pred.dtpdd_predicate = pred;
11583 }
11584
11585 if (ecb->dofe_actions != DOF_SECIDX_NONE) {
11586 if ((sec = dtrace_dof_sect(dof,
11587 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL)
11588 goto err;
11589
11590 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr);
11591
11592 if (ep->dted_action == NULL)
11593 goto err;
11594 }
11595
11596 return (ep);
11597
11598 err:
11599 if (pred != NULL)
11600 dtrace_predicate_release(pred, vstate);
11601 kmem_free(ep, sizeof (dtrace_ecbdesc_t));
11602 return (NULL);
11603 }
11604
11605 /*
11606 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the
11607 * specified DOF. At present, this amounts to simply adding 'ubase' to the
11608 * site of any user SETX relocations to account for load object base address.
11609 * In the future, if we need other relocations, this function can be extended.
11610 */
11611 static int
dtrace_dof_relocate(dof_hdr_t * dof,dof_sec_t * sec,uint64_t ubase)11612 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase)
11613 {
11614 uintptr_t daddr = (uintptr_t)dof;
11615 dof_relohdr_t *dofr =
11616 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset);
11617 dof_sec_t *ss, *rs, *ts;
11618 dof_relodesc_t *r;
11619 uint_t i, n;
11620
11621 if (sec->dofs_size < sizeof (dof_relohdr_t) ||
11622 sec->dofs_align != sizeof (dof_secidx_t)) {
11623 dtrace_dof_error(dof, "invalid relocation header");
11624 return (-1);
11625 }
11626
11627 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab);
11628 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec);
11629 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec);
11630
11631 if (ss == NULL || rs == NULL || ts == NULL)
11632 return (-1); /* dtrace_dof_error() has been called already */
11633
11634 if (rs->dofs_entsize < sizeof (dof_relodesc_t) ||
11635 rs->dofs_align != sizeof (uint64_t)) {
11636 dtrace_dof_error(dof, "invalid relocation section");
11637 return (-1);
11638 }
11639
11640 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset);
11641 n = rs->dofs_size / rs->dofs_entsize;
11642
11643 for (i = 0; i < n; i++) {
11644 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset;
11645
11646 switch (r->dofr_type) {
11647 case DOF_RELO_NONE:
11648 break;
11649 case DOF_RELO_SETX:
11650 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset +
11651 sizeof (uint64_t) > ts->dofs_size) {
11652 dtrace_dof_error(dof, "bad relocation offset");
11653 return (-1);
11654 }
11655
11656 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) {
11657 dtrace_dof_error(dof, "misaligned setx relo");
11658 return (-1);
11659 }
11660
11661 *(uint64_t *)taddr += ubase;
11662 break;
11663 default:
11664 dtrace_dof_error(dof, "invalid relocation type");
11665 return (-1);
11666 }
11667
11668 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize);
11669 }
11670
11671 return (0);
11672 }
11673
11674 /*
11675 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated
11676 * header: it should be at the front of a memory region that is at least
11677 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in
11678 * size. It need not be validated in any other way.
11679 */
11680 static int
dtrace_dof_slurp(dof_hdr_t * dof,dtrace_vstate_t * vstate,cred_t * cr,dtrace_enabling_t ** enabp,uint64_t ubase,int noprobes)11681 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr,
11682 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes)
11683 {
11684 uint64_t len = dof->dofh_loadsz, seclen;
11685 uintptr_t daddr = (uintptr_t)dof;
11686 dtrace_ecbdesc_t *ep;
11687 dtrace_enabling_t *enab;
11688 uint_t i;
11689
11690 ASSERT(MUTEX_HELD(&dtrace_lock));
11691 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t));
11692
11693 /*
11694 * Check the DOF header identification bytes. In addition to checking
11695 * valid settings, we also verify that unused bits/bytes are zeroed so
11696 * we can use them later without fear of regressing existing binaries.
11697 */
11698 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0],
11699 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) {
11700 dtrace_dof_error(dof, "DOF magic string mismatch");
11701 return (-1);
11702 }
11703
11704 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 &&
11705 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) {
11706 dtrace_dof_error(dof, "DOF has invalid data model");
11707 return (-1);
11708 }
11709
11710 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) {
11711 dtrace_dof_error(dof, "DOF encoding mismatch");
11712 return (-1);
11713 }
11714
11715 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
11716 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) {
11717 dtrace_dof_error(dof, "DOF version mismatch");
11718 return (-1);
11719 }
11720
11721 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) {
11722 dtrace_dof_error(dof, "DOF uses unsupported instruction set");
11723 return (-1);
11724 }
11725
11726 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) {
11727 dtrace_dof_error(dof, "DOF uses too many integer registers");
11728 return (-1);
11729 }
11730
11731 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) {
11732 dtrace_dof_error(dof, "DOF uses too many tuple registers");
11733 return (-1);
11734 }
11735
11736 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) {
11737 if (dof->dofh_ident[i] != 0) {
11738 dtrace_dof_error(dof, "DOF has invalid ident byte set");
11739 return (-1);
11740 }
11741 }
11742
11743 if (dof->dofh_flags & ~DOF_FL_VALID) {
11744 dtrace_dof_error(dof, "DOF has invalid flag bits set");
11745 return (-1);
11746 }
11747
11748 if (dof->dofh_secsize == 0) {
11749 dtrace_dof_error(dof, "zero section header size");
11750 return (-1);
11751 }
11752
11753 /*
11754 * Check that the section headers don't exceed the amount of DOF
11755 * data. Note that we cast the section size and number of sections
11756 * to uint64_t's to prevent possible overflow in the multiplication.
11757 */
11758 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize;
11759
11760 if (dof->dofh_secoff > len || seclen > len ||
11761 dof->dofh_secoff + seclen > len) {
11762 dtrace_dof_error(dof, "truncated section headers");
11763 return (-1);
11764 }
11765
11766 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) {
11767 dtrace_dof_error(dof, "misaligned section headers");
11768 return (-1);
11769 }
11770
11771 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) {
11772 dtrace_dof_error(dof, "misaligned section size");
11773 return (-1);
11774 }
11775
11776 /*
11777 * Take an initial pass through the section headers to be sure that
11778 * the headers don't have stray offsets. If the 'noprobes' flag is
11779 * set, do not permit sections relating to providers, probes, or args.
11780 */
11781 for (i = 0; i < dof->dofh_secnum; i++) {
11782 dof_sec_t *sec = (dof_sec_t *)(daddr +
11783 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11784
11785 if (noprobes) {
11786 switch (sec->dofs_type) {
11787 case DOF_SECT_PROVIDER:
11788 case DOF_SECT_PROBES:
11789 case DOF_SECT_PRARGS:
11790 case DOF_SECT_PROFFS:
11791 dtrace_dof_error(dof, "illegal sections "
11792 "for enabling");
11793 return (-1);
11794 }
11795 }
11796
11797 if (DOF_SEC_ISLOADABLE(sec->dofs_type) &&
11798 !(sec->dofs_flags & DOF_SECF_LOAD)) {
11799 dtrace_dof_error(dof, "loadable section with load "
11800 "flag unset");
11801 return (-1);
11802 }
11803
11804 if (!(sec->dofs_flags & DOF_SECF_LOAD))
11805 continue; /* just ignore non-loadable sections */
11806
11807 if (sec->dofs_align & (sec->dofs_align - 1)) {
11808 dtrace_dof_error(dof, "bad section alignment");
11809 return (-1);
11810 }
11811
11812 if (sec->dofs_offset & (sec->dofs_align - 1)) {
11813 dtrace_dof_error(dof, "misaligned section");
11814 return (-1);
11815 }
11816
11817 if (sec->dofs_offset > len || sec->dofs_size > len ||
11818 sec->dofs_offset + sec->dofs_size > len) {
11819 dtrace_dof_error(dof, "corrupt section header");
11820 return (-1);
11821 }
11822
11823 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr +
11824 sec->dofs_offset + sec->dofs_size - 1) != '\0') {
11825 dtrace_dof_error(dof, "non-terminating string table");
11826 return (-1);
11827 }
11828 }
11829
11830 /*
11831 * Take a second pass through the sections and locate and perform any
11832 * relocations that are present. We do this after the first pass to
11833 * be sure that all sections have had their headers validated.
11834 */
11835 for (i = 0; i < dof->dofh_secnum; i++) {
11836 dof_sec_t *sec = (dof_sec_t *)(daddr +
11837 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11838
11839 if (!(sec->dofs_flags & DOF_SECF_LOAD))
11840 continue; /* skip sections that are not loadable */
11841
11842 switch (sec->dofs_type) {
11843 case DOF_SECT_URELHDR:
11844 if (dtrace_dof_relocate(dof, sec, ubase) != 0)
11845 return (-1);
11846 break;
11847 }
11848 }
11849
11850 if ((enab = *enabp) == NULL)
11851 enab = *enabp = dtrace_enabling_create(vstate);
11852
11853 for (i = 0; i < dof->dofh_secnum; i++) {
11854 dof_sec_t *sec = (dof_sec_t *)(daddr +
11855 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11856
11857 if (sec->dofs_type != DOF_SECT_ECBDESC)
11858 continue;
11859
11860 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) {
11861 dtrace_enabling_destroy(enab);
11862 *enabp = NULL;
11863 return (-1);
11864 }
11865
11866 dtrace_enabling_add(enab, ep);
11867 }
11868
11869 return (0);
11870 }
11871
11872 /*
11873 * Process DOF for any options. This routine assumes that the DOF has been
11874 * at least processed by dtrace_dof_slurp().
11875 */
11876 static int
dtrace_dof_options(dof_hdr_t * dof,dtrace_state_t * state)11877 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state)
11878 {
11879 int i, rval;
11880 uint32_t entsize;
11881 size_t offs;
11882 dof_optdesc_t *desc;
11883
11884 for (i = 0; i < dof->dofh_secnum; i++) {
11885 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof +
11886 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize);
11887
11888 if (sec->dofs_type != DOF_SECT_OPTDESC)
11889 continue;
11890
11891 if (sec->dofs_align != sizeof (uint64_t)) {
11892 dtrace_dof_error(dof, "bad alignment in "
11893 "option description");
11894 return (EINVAL);
11895 }
11896
11897 if ((entsize = sec->dofs_entsize) == 0) {
11898 dtrace_dof_error(dof, "zeroed option entry size");
11899 return (EINVAL);
11900 }
11901
11902 if (entsize < sizeof (dof_optdesc_t)) {
11903 dtrace_dof_error(dof, "bad option entry size");
11904 return (EINVAL);
11905 }
11906
11907 for (offs = 0; offs < sec->dofs_size; offs += entsize) {
11908 desc = (dof_optdesc_t *)((uintptr_t)dof +
11909 (uintptr_t)sec->dofs_offset + offs);
11910
11911 if (desc->dofo_strtab != DOF_SECIDX_NONE) {
11912 dtrace_dof_error(dof, "non-zero option string");
11913 return (EINVAL);
11914 }
11915
11916 if (desc->dofo_value == DTRACEOPT_UNSET) {
11917 dtrace_dof_error(dof, "unset option");
11918 return (EINVAL);
11919 }
11920
11921 if ((rval = dtrace_state_option(state,
11922 desc->dofo_option, desc->dofo_value)) != 0) {
11923 dtrace_dof_error(dof, "rejected option");
11924 return (rval);
11925 }
11926 }
11927 }
11928
11929 return (0);
11930 }
11931
11932 /*
11933 * DTrace Consumer State Functions
11934 */
11935 int
dtrace_dstate_init(dtrace_dstate_t * dstate,size_t size)11936 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size)
11937 {
11938 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize;
11939 void *base;
11940 uintptr_t limit;
11941 dtrace_dynvar_t *dvar, *next, *start;
11942 int i;
11943
11944 ASSERT(MUTEX_HELD(&dtrace_lock));
11945 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL);
11946
11947 bzero(dstate, sizeof (dtrace_dstate_t));
11948
11949 if ((dstate->dtds_chunksize = chunksize) == 0)
11950 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE;
11951
11952 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)))
11953 size = min;
11954
11955 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL)
11956 return (ENOMEM);
11957
11958 dstate->dtds_size = size;
11959 dstate->dtds_base = base;
11960 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP);
11961 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t));
11962
11963 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t));
11964
11965 if (hashsize != 1 && (hashsize & 1))
11966 hashsize--;
11967
11968 dstate->dtds_hashsize = hashsize;
11969 dstate->dtds_hash = dstate->dtds_base;
11970
11971 /*
11972 * Set all of our hash buckets to point to the single sink, and (if
11973 * it hasn't already been set), set the sink's hash value to be the
11974 * sink sentinel value. The sink is needed for dynamic variable
11975 * lookups to know that they have iterated over an entire, valid hash
11976 * chain.
11977 */
11978 for (i = 0; i < hashsize; i++)
11979 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink;
11980
11981 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK)
11982 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK;
11983
11984 /*
11985 * Determine number of active CPUs. Divide free list evenly among
11986 * active CPUs.
11987 */
11988 start = (dtrace_dynvar_t *)
11989 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t));
11990 limit = (uintptr_t)base + size;
11991
11992 maxper = (limit - (uintptr_t)start) / NCPU;
11993 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize;
11994
11995 for (i = 0; i < NCPU; i++) {
11996 dstate->dtds_percpu[i].dtdsc_free = dvar = start;
11997
11998 /*
11999 * If we don't even have enough chunks to make it once through
12000 * NCPUs, we're just going to allocate everything to the first
12001 * CPU. And if we're on the last CPU, we're going to allocate
12002 * whatever is left over. In either case, we set the limit to
12003 * be the limit of the dynamic variable space.
12004 */
12005 if (maxper == 0 || i == NCPU - 1) {
12006 limit = (uintptr_t)base + size;
12007 start = NULL;
12008 } else {
12009 limit = (uintptr_t)start + maxper;
12010 start = (dtrace_dynvar_t *)limit;
12011 }
12012
12013 ASSERT(limit <= (uintptr_t)base + size);
12014
12015 for (;;) {
12016 next = (dtrace_dynvar_t *)((uintptr_t)dvar +
12017 dstate->dtds_chunksize);
12018
12019 if ((uintptr_t)next + dstate->dtds_chunksize >= limit)
12020 break;
12021
12022 dvar->dtdv_next = next;
12023 dvar = next;
12024 }
12025
12026 if (maxper == 0)
12027 break;
12028 }
12029
12030 return (0);
12031 }
12032
12033 void
dtrace_dstate_fini(dtrace_dstate_t * dstate)12034 dtrace_dstate_fini(dtrace_dstate_t *dstate)
12035 {
12036 ASSERT(MUTEX_HELD(&cpu_lock));
12037
12038 if (dstate->dtds_base == NULL)
12039 return;
12040
12041 kmem_free(dstate->dtds_base, dstate->dtds_size);
12042 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu);
12043 }
12044
12045 static void
dtrace_vstate_fini(dtrace_vstate_t * vstate)12046 dtrace_vstate_fini(dtrace_vstate_t *vstate)
12047 {
12048 /*
12049 * Logical XOR, where are you?
12050 */
12051 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL));
12052
12053 if (vstate->dtvs_nglobals > 0) {
12054 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals *
12055 sizeof (dtrace_statvar_t *));
12056 }
12057
12058 if (vstate->dtvs_ntlocals > 0) {
12059 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals *
12060 sizeof (dtrace_difv_t));
12061 }
12062
12063 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL));
12064
12065 if (vstate->dtvs_nlocals > 0) {
12066 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals *
12067 sizeof (dtrace_statvar_t *));
12068 }
12069 }
12070
12071 static void
dtrace_state_clean(dtrace_state_t * state)12072 dtrace_state_clean(dtrace_state_t *state)
12073 {
12074 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE)
12075 return;
12076
12077 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars);
12078 dtrace_speculation_clean(state);
12079 }
12080
12081 static void
dtrace_state_deadman(dtrace_state_t * state)12082 dtrace_state_deadman(dtrace_state_t *state)
12083 {
12084 hrtime_t now;
12085
12086 dtrace_sync();
12087
12088 now = dtrace_gethrtime();
12089
12090 if (state != dtrace_anon.dta_state &&
12091 now - state->dts_laststatus >= dtrace_deadman_user)
12092 return;
12093
12094 /*
12095 * We must be sure that dts_alive never appears to be less than the
12096 * value upon entry to dtrace_state_deadman(), and because we lack a
12097 * dtrace_cas64(), we cannot store to it atomically. We thus instead
12098 * store INT64_MAX to it, followed by a memory barrier, followed by
12099 * the new value. This assures that dts_alive never appears to be
12100 * less than its true value, regardless of the order in which the
12101 * stores to the underlying storage are issued.
12102 */
12103 state->dts_alive = INT64_MAX;
12104 dtrace_membar_producer();
12105 state->dts_alive = now;
12106 }
12107
12108 dtrace_state_t *
dtrace_state_create(dev_t * devp,cred_t * cr)12109 dtrace_state_create(dev_t *devp, cred_t *cr)
12110 {
12111 minor_t minor;
12112 major_t major;
12113 char c[30];
12114 dtrace_state_t *state;
12115 dtrace_optval_t *opt;
12116 int bufsize = NCPU * sizeof (dtrace_buffer_t), i;
12117
12118 ASSERT(MUTEX_HELD(&dtrace_lock));
12119 ASSERT(MUTEX_HELD(&cpu_lock));
12120
12121 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1,
12122 VM_BESTFIT | VM_SLEEP);
12123
12124 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) {
12125 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12126 return (NULL);
12127 }
12128
12129 state = ddi_get_soft_state(dtrace_softstate, minor);
12130 state->dts_epid = DTRACE_EPIDNONE + 1;
12131
12132 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", minor);
12133 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1,
12134 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
12135
12136 if (devp != NULL) {
12137 major = getemajor(*devp);
12138 } else {
12139 major = ddi_driver_major(dtrace_devi);
12140 }
12141
12142 state->dts_dev = makedevice(major, minor);
12143
12144 if (devp != NULL)
12145 *devp = state->dts_dev;
12146
12147 /*
12148 * We allocate NCPU buffers. On the one hand, this can be quite
12149 * a bit of memory per instance (nearly 36K on a Starcat). On the
12150 * other hand, it saves an additional memory reference in the probe
12151 * path.
12152 */
12153 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP);
12154 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP);
12155 state->dts_cleaner = CYCLIC_NONE;
12156 state->dts_deadman = CYCLIC_NONE;
12157 state->dts_vstate.dtvs_state = state;
12158
12159 for (i = 0; i < DTRACEOPT_MAX; i++)
12160 state->dts_options[i] = DTRACEOPT_UNSET;
12161
12162 /*
12163 * Set the default options.
12164 */
12165 opt = state->dts_options;
12166 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH;
12167 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO;
12168 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default;
12169 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default;
12170 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL;
12171 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default;
12172 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default;
12173 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default;
12174 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default;
12175 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default;
12176 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default;
12177 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default;
12178 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default;
12179 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default;
12180
12181 state->dts_activity = DTRACE_ACTIVITY_INACTIVE;
12182
12183 /*
12184 * Depending on the user credentials, we set flag bits which alter probe
12185 * visibility or the amount of destructiveness allowed. In the case of
12186 * actual anonymous tracing, or the possession of all privileges, all of
12187 * the normal checks are bypassed.
12188 */
12189 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) {
12190 state->dts_cred.dcr_visible = DTRACE_CRV_ALL;
12191 state->dts_cred.dcr_action = DTRACE_CRA_ALL;
12192 } else {
12193 /*
12194 * Set up the credentials for this instantiation. We take a
12195 * hold on the credential to prevent it from disappearing on
12196 * us; this in turn prevents the zone_t referenced by this
12197 * credential from disappearing. This means that we can
12198 * examine the credential and the zone from probe context.
12199 */
12200 crhold(cr);
12201 state->dts_cred.dcr_cred = cr;
12202
12203 /*
12204 * CRA_PROC means "we have *some* privilege for dtrace" and
12205 * unlocks the use of variables like pid, zonename, etc.
12206 */
12207 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) ||
12208 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12209 state->dts_cred.dcr_action |= DTRACE_CRA_PROC;
12210 }
12211
12212 /*
12213 * dtrace_user allows use of syscall and profile providers.
12214 * If the user also has proc_owner and/or proc_zone, we
12215 * extend the scope to include additional visibility and
12216 * destructive power.
12217 */
12218 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) {
12219 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) {
12220 state->dts_cred.dcr_visible |=
12221 DTRACE_CRV_ALLPROC;
12222
12223 state->dts_cred.dcr_action |=
12224 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12225 }
12226
12227 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) {
12228 state->dts_cred.dcr_visible |=
12229 DTRACE_CRV_ALLZONE;
12230
12231 state->dts_cred.dcr_action |=
12232 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12233 }
12234
12235 /*
12236 * If we have all privs in whatever zone this is,
12237 * we can do destructive things to processes which
12238 * have altered credentials.
12239 */
12240 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12241 cr->cr_zone->zone_privset)) {
12242 state->dts_cred.dcr_action |=
12243 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12244 }
12245 }
12246
12247 /*
12248 * Holding the dtrace_kernel privilege also implies that
12249 * the user has the dtrace_user privilege from a visibility
12250 * perspective. But without further privileges, some
12251 * destructive actions are not available.
12252 */
12253 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) {
12254 /*
12255 * Make all probes in all zones visible. However,
12256 * this doesn't mean that all actions become available
12257 * to all zones.
12258 */
12259 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL |
12260 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE;
12261
12262 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL |
12263 DTRACE_CRA_PROC;
12264 /*
12265 * Holding proc_owner means that destructive actions
12266 * for *this* zone are allowed.
12267 */
12268 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12269 state->dts_cred.dcr_action |=
12270 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12271
12272 /*
12273 * Holding proc_zone means that destructive actions
12274 * for this user/group ID in all zones is allowed.
12275 */
12276 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12277 state->dts_cred.dcr_action |=
12278 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12279
12280 /*
12281 * If we have all privs in whatever zone this is,
12282 * we can do destructive things to processes which
12283 * have altered credentials.
12284 */
12285 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE),
12286 cr->cr_zone->zone_privset)) {
12287 state->dts_cred.dcr_action |=
12288 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG;
12289 }
12290 }
12291
12292 /*
12293 * Holding the dtrace_proc privilege gives control over fasttrap
12294 * and pid providers. We need to grant wider destructive
12295 * privileges in the event that the user has proc_owner and/or
12296 * proc_zone.
12297 */
12298 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) {
12299 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE))
12300 state->dts_cred.dcr_action |=
12301 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER;
12302
12303 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE))
12304 state->dts_cred.dcr_action |=
12305 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE;
12306 }
12307 }
12308
12309 return (state);
12310 }
12311
12312 static int
dtrace_state_buffer(dtrace_state_t * state,dtrace_buffer_t * buf,int which)12313 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which)
12314 {
12315 dtrace_optval_t *opt = state->dts_options, size;
12316 processorid_t cpu;
12317 int flags = 0, rval;
12318
12319 ASSERT(MUTEX_HELD(&dtrace_lock));
12320 ASSERT(MUTEX_HELD(&cpu_lock));
12321 ASSERT(which < DTRACEOPT_MAX);
12322 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE ||
12323 (state == dtrace_anon.dta_state &&
12324 state->dts_activity == DTRACE_ACTIVITY_ACTIVE));
12325
12326 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0)
12327 return (0);
12328
12329 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET)
12330 cpu = opt[DTRACEOPT_CPU];
12331
12332 if (which == DTRACEOPT_SPECSIZE)
12333 flags |= DTRACEBUF_NOSWITCH;
12334
12335 if (which == DTRACEOPT_BUFSIZE) {
12336 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING)
12337 flags |= DTRACEBUF_RING;
12338
12339 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL)
12340 flags |= DTRACEBUF_FILL;
12341
12342 if (state != dtrace_anon.dta_state ||
12343 state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
12344 flags |= DTRACEBUF_INACTIVE;
12345 }
12346
12347 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) {
12348 /*
12349 * The size must be 8-byte aligned. If the size is not 8-byte
12350 * aligned, drop it down by the difference.
12351 */
12352 if (size & (sizeof (uint64_t) - 1))
12353 size -= size & (sizeof (uint64_t) - 1);
12354
12355 if (size < state->dts_reserve) {
12356 /*
12357 * Buffers always must be large enough to accommodate
12358 * their prereserved space. We return E2BIG instead
12359 * of ENOMEM in this case to allow for user-level
12360 * software to differentiate the cases.
12361 */
12362 return (E2BIG);
12363 }
12364
12365 rval = dtrace_buffer_alloc(buf, size, flags, cpu);
12366
12367 if (rval != ENOMEM) {
12368 opt[which] = size;
12369 return (rval);
12370 }
12371
12372 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
12373 return (rval);
12374 }
12375
12376 return (ENOMEM);
12377 }
12378
12379 static int
dtrace_state_buffers(dtrace_state_t * state)12380 dtrace_state_buffers(dtrace_state_t *state)
12381 {
12382 dtrace_speculation_t *spec = state->dts_speculations;
12383 int rval, i;
12384
12385 if ((rval = dtrace_state_buffer(state, state->dts_buffer,
12386 DTRACEOPT_BUFSIZE)) != 0)
12387 return (rval);
12388
12389 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer,
12390 DTRACEOPT_AGGSIZE)) != 0)
12391 return (rval);
12392
12393 for (i = 0; i < state->dts_nspeculations; i++) {
12394 if ((rval = dtrace_state_buffer(state,
12395 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0)
12396 return (rval);
12397 }
12398
12399 return (0);
12400 }
12401
12402 static void
dtrace_state_prereserve(dtrace_state_t * state)12403 dtrace_state_prereserve(dtrace_state_t *state)
12404 {
12405 dtrace_ecb_t *ecb;
12406 dtrace_probe_t *probe;
12407
12408 state->dts_reserve = 0;
12409
12410 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL)
12411 return;
12412
12413 /*
12414 * If our buffer policy is a "fill" buffer policy, we need to set the
12415 * prereserved space to be the space required by the END probes.
12416 */
12417 probe = dtrace_probes[dtrace_probeid_end - 1];
12418 ASSERT(probe != NULL);
12419
12420 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) {
12421 if (ecb->dte_state != state)
12422 continue;
12423
12424 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment;
12425 }
12426 }
12427
12428 static int
dtrace_state_go(dtrace_state_t * state,processorid_t * cpu)12429 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu)
12430 {
12431 dtrace_optval_t *opt = state->dts_options, sz, nspec;
12432 dtrace_speculation_t *spec;
12433 dtrace_buffer_t *buf;
12434 cyc_handler_t hdlr;
12435 cyc_time_t when;
12436 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t);
12437 dtrace_icookie_t cookie;
12438
12439 mutex_enter(&cpu_lock);
12440 mutex_enter(&dtrace_lock);
12441
12442 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
12443 rval = EBUSY;
12444 goto out;
12445 }
12446
12447 /*
12448 * Before we can perform any checks, we must prime all of the
12449 * retained enablings that correspond to this state.
12450 */
12451 dtrace_enabling_prime(state);
12452
12453 if (state->dts_destructive && !state->dts_cred.dcr_destructive) {
12454 rval = EACCES;
12455 goto out;
12456 }
12457
12458 dtrace_state_prereserve(state);
12459
12460 /*
12461 * Now we want to do is try to allocate our speculations.
12462 * We do not automatically resize the number of speculations; if
12463 * this fails, we will fail the operation.
12464 */
12465 nspec = opt[DTRACEOPT_NSPEC];
12466 ASSERT(nspec != DTRACEOPT_UNSET);
12467
12468 if (nspec > INT_MAX) {
12469 rval = ENOMEM;
12470 goto out;
12471 }
12472
12473 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP);
12474
12475 if (spec == NULL) {
12476 rval = ENOMEM;
12477 goto out;
12478 }
12479
12480 state->dts_speculations = spec;
12481 state->dts_nspeculations = (int)nspec;
12482
12483 for (i = 0; i < nspec; i++) {
12484 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) {
12485 rval = ENOMEM;
12486 goto err;
12487 }
12488
12489 spec[i].dtsp_buffer = buf;
12490 }
12491
12492 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) {
12493 if (dtrace_anon.dta_state == NULL) {
12494 rval = ENOENT;
12495 goto out;
12496 }
12497
12498 if (state->dts_necbs != 0) {
12499 rval = EALREADY;
12500 goto out;
12501 }
12502
12503 state->dts_anon = dtrace_anon_grab();
12504 ASSERT(state->dts_anon != NULL);
12505 state = state->dts_anon;
12506
12507 /*
12508 * We want "grabanon" to be set in the grabbed state, so we'll
12509 * copy that option value from the grabbing state into the
12510 * grabbed state.
12511 */
12512 state->dts_options[DTRACEOPT_GRABANON] =
12513 opt[DTRACEOPT_GRABANON];
12514
12515 *cpu = dtrace_anon.dta_beganon;
12516
12517 /*
12518 * If the anonymous state is active (as it almost certainly
12519 * is if the anonymous enabling ultimately matched anything),
12520 * we don't allow any further option processing -- but we
12521 * don't return failure.
12522 */
12523 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
12524 goto out;
12525 }
12526
12527 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET &&
12528 opt[DTRACEOPT_AGGSIZE] != 0) {
12529 if (state->dts_aggregations == NULL) {
12530 /*
12531 * We're not going to create an aggregation buffer
12532 * because we don't have any ECBs that contain
12533 * aggregations -- set this option to 0.
12534 */
12535 opt[DTRACEOPT_AGGSIZE] = 0;
12536 } else {
12537 /*
12538 * If we have an aggregation buffer, we must also have
12539 * a buffer to use as scratch.
12540 */
12541 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET ||
12542 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) {
12543 opt[DTRACEOPT_BUFSIZE] = state->dts_needed;
12544 }
12545 }
12546 }
12547
12548 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET &&
12549 opt[DTRACEOPT_SPECSIZE] != 0) {
12550 if (!state->dts_speculates) {
12551 /*
12552 * We're not going to create speculation buffers
12553 * because we don't have any ECBs that actually
12554 * speculate -- set the speculation size to 0.
12555 */
12556 opt[DTRACEOPT_SPECSIZE] = 0;
12557 }
12558 }
12559
12560 /*
12561 * The bare minimum size for any buffer that we're actually going to
12562 * do anything to is sizeof (uint64_t).
12563 */
12564 sz = sizeof (uint64_t);
12565
12566 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) ||
12567 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) ||
12568 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) {
12569 /*
12570 * A buffer size has been explicitly set to 0 (or to a size
12571 * that will be adjusted to 0) and we need the space -- we
12572 * need to return failure. We return ENOSPC to differentiate
12573 * it from failing to allocate a buffer due to failure to meet
12574 * the reserve (for which we return E2BIG).
12575 */
12576 rval = ENOSPC;
12577 goto out;
12578 }
12579
12580 if ((rval = dtrace_state_buffers(state)) != 0)
12581 goto err;
12582
12583 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET)
12584 sz = dtrace_dstate_defsize;
12585
12586 do {
12587 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz);
12588
12589 if (rval == 0)
12590 break;
12591
12592 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL)
12593 goto err;
12594 } while (sz >>= 1);
12595
12596 opt[DTRACEOPT_DYNVARSIZE] = sz;
12597
12598 if (rval != 0)
12599 goto err;
12600
12601 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max)
12602 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max;
12603
12604 if (opt[DTRACEOPT_CLEANRATE] == 0)
12605 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
12606
12607 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min)
12608 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min;
12609
12610 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max)
12611 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max;
12612
12613 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean;
12614 hdlr.cyh_arg = state;
12615 hdlr.cyh_level = CY_LOW_LEVEL;
12616
12617 when.cyt_when = 0;
12618 when.cyt_interval = opt[DTRACEOPT_CLEANRATE];
12619
12620 state->dts_cleaner = cyclic_add(&hdlr, &when);
12621
12622 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman;
12623 hdlr.cyh_arg = state;
12624 hdlr.cyh_level = CY_LOW_LEVEL;
12625
12626 when.cyt_when = 0;
12627 when.cyt_interval = dtrace_deadman_interval;
12628
12629 state->dts_alive = state->dts_laststatus = dtrace_gethrtime();
12630 state->dts_deadman = cyclic_add(&hdlr, &when);
12631
12632 state->dts_activity = DTRACE_ACTIVITY_WARMUP;
12633
12634 /*
12635 * Now it's time to actually fire the BEGIN probe. We need to disable
12636 * interrupts here both to record the CPU on which we fired the BEGIN
12637 * probe (the data from this CPU will be processed first at user
12638 * level) and to manually activate the buffer for this CPU.
12639 */
12640 cookie = dtrace_interrupt_disable();
12641 *cpu = CPU->cpu_id;
12642 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE);
12643 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE;
12644
12645 dtrace_probe(dtrace_probeid_begin,
12646 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
12647 dtrace_interrupt_enable(cookie);
12648 /*
12649 * We may have had an exit action from a BEGIN probe; only change our
12650 * state to ACTIVE if we're still in WARMUP.
12651 */
12652 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP ||
12653 state->dts_activity == DTRACE_ACTIVITY_DRAINING);
12654
12655 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP)
12656 state->dts_activity = DTRACE_ACTIVITY_ACTIVE;
12657
12658 /*
12659 * Regardless of whether or not now we're in ACTIVE or DRAINING, we
12660 * want each CPU to transition its principal buffer out of the
12661 * INACTIVE state. Doing this assures that no CPU will suddenly begin
12662 * processing an ECB halfway down a probe's ECB chain; all CPUs will
12663 * atomically transition from processing none of a state's ECBs to
12664 * processing all of them.
12665 */
12666 dtrace_xcall(DTRACE_CPUALL,
12667 (dtrace_xcall_t)dtrace_buffer_activate, state);
12668 goto out;
12669
12670 err:
12671 dtrace_buffer_free(state->dts_buffer);
12672 dtrace_buffer_free(state->dts_aggbuffer);
12673
12674 if ((nspec = state->dts_nspeculations) == 0) {
12675 ASSERT(state->dts_speculations == NULL);
12676 goto out;
12677 }
12678
12679 spec = state->dts_speculations;
12680 ASSERT(spec != NULL);
12681
12682 for (i = 0; i < state->dts_nspeculations; i++) {
12683 if ((buf = spec[i].dtsp_buffer) == NULL)
12684 break;
12685
12686 dtrace_buffer_free(buf);
12687 kmem_free(buf, bufsize);
12688 }
12689
12690 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
12691 state->dts_nspeculations = 0;
12692 state->dts_speculations = NULL;
12693
12694 out:
12695 mutex_exit(&dtrace_lock);
12696 mutex_exit(&cpu_lock);
12697
12698 return (rval);
12699 }
12700
12701 static int
dtrace_state_stop(dtrace_state_t * state,processorid_t * cpu)12702 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu)
12703 {
12704 dtrace_icookie_t cookie;
12705
12706 ASSERT(MUTEX_HELD(&dtrace_lock));
12707
12708 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE &&
12709 state->dts_activity != DTRACE_ACTIVITY_DRAINING)
12710 return (EINVAL);
12711
12712 /*
12713 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync
12714 * to be sure that every CPU has seen it. See below for the details
12715 * on why this is done.
12716 */
12717 state->dts_activity = DTRACE_ACTIVITY_DRAINING;
12718 dtrace_sync();
12719
12720 /*
12721 * By this point, it is impossible for any CPU to be still processing
12722 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to
12723 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any
12724 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe()
12725 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN
12726 * iff we're in the END probe.
12727 */
12728 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN;
12729 dtrace_sync();
12730 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN);
12731
12732 /*
12733 * Finally, we can release the reserve and call the END probe. We
12734 * disable interrupts across calling the END probe to allow us to
12735 * return the CPU on which we actually called the END probe. This
12736 * allows user-land to be sure that this CPU's principal buffer is
12737 * processed last.
12738 */
12739 state->dts_reserve = 0;
12740
12741 cookie = dtrace_interrupt_disable();
12742 *cpu = CPU->cpu_id;
12743 dtrace_probe(dtrace_probeid_end,
12744 (uint64_t)(uintptr_t)state, 0, 0, 0, 0);
12745 dtrace_interrupt_enable(cookie);
12746
12747 state->dts_activity = DTRACE_ACTIVITY_STOPPED;
12748 dtrace_sync();
12749
12750 return (0);
12751 }
12752
12753 static int
dtrace_state_option(dtrace_state_t * state,dtrace_optid_t option,dtrace_optval_t val)12754 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option,
12755 dtrace_optval_t val)
12756 {
12757 ASSERT(MUTEX_HELD(&dtrace_lock));
12758
12759 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE)
12760 return (EBUSY);
12761
12762 if (option >= DTRACEOPT_MAX)
12763 return (EINVAL);
12764
12765 if (option != DTRACEOPT_CPU && val < 0)
12766 return (EINVAL);
12767
12768 switch (option) {
12769 case DTRACEOPT_DESTRUCTIVE:
12770 if (dtrace_destructive_disallow)
12771 return (EACCES);
12772
12773 state->dts_cred.dcr_destructive = 1;
12774 break;
12775
12776 case DTRACEOPT_BUFSIZE:
12777 case DTRACEOPT_DYNVARSIZE:
12778 case DTRACEOPT_AGGSIZE:
12779 case DTRACEOPT_SPECSIZE:
12780 case DTRACEOPT_STRSIZE:
12781 if (val < 0)
12782 return (EINVAL);
12783
12784 if (val >= LONG_MAX) {
12785 /*
12786 * If this is an otherwise negative value, set it to
12787 * the highest multiple of 128m less than LONG_MAX.
12788 * Technically, we're adjusting the size without
12789 * regard to the buffer resizing policy, but in fact,
12790 * this has no effect -- if we set the buffer size to
12791 * ~LONG_MAX and the buffer policy is ultimately set to
12792 * be "manual", the buffer allocation is guaranteed to
12793 * fail, if only because the allocation requires two
12794 * buffers. (We set the the size to the highest
12795 * multiple of 128m because it ensures that the size
12796 * will remain a multiple of a megabyte when
12797 * repeatedly halved -- all the way down to 15m.)
12798 */
12799 val = LONG_MAX - (1 << 27) + 1;
12800 }
12801 }
12802
12803 state->dts_options[option] = val;
12804
12805 return (0);
12806 }
12807
12808 static void
dtrace_state_destroy(dtrace_state_t * state)12809 dtrace_state_destroy(dtrace_state_t *state)
12810 {
12811 dtrace_ecb_t *ecb;
12812 dtrace_vstate_t *vstate = &state->dts_vstate;
12813 minor_t minor = getminor(state->dts_dev);
12814 int i, bufsize = NCPU * sizeof (dtrace_buffer_t);
12815 dtrace_speculation_t *spec = state->dts_speculations;
12816 int nspec = state->dts_nspeculations;
12817 uint32_t match;
12818
12819 ASSERT(MUTEX_HELD(&dtrace_lock));
12820 ASSERT(MUTEX_HELD(&cpu_lock));
12821
12822 /*
12823 * First, retract any retained enablings for this state.
12824 */
12825 dtrace_enabling_retract(state);
12826 ASSERT(state->dts_nretained == 0);
12827
12828 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE ||
12829 state->dts_activity == DTRACE_ACTIVITY_DRAINING) {
12830 /*
12831 * We have managed to come into dtrace_state_destroy() on a
12832 * hot enabling -- almost certainly because of a disorderly
12833 * shutdown of a consumer. (That is, a consumer that is
12834 * exiting without having called dtrace_stop().) In this case,
12835 * we're going to set our activity to be KILLED, and then
12836 * issue a sync to be sure that everyone is out of probe
12837 * context before we start blowing away ECBs.
12838 */
12839 state->dts_activity = DTRACE_ACTIVITY_KILLED;
12840 dtrace_sync();
12841 }
12842
12843 /*
12844 * Release the credential hold we took in dtrace_state_create().
12845 */
12846 if (state->dts_cred.dcr_cred != NULL)
12847 crfree(state->dts_cred.dcr_cred);
12848
12849 /*
12850 * Now we can safely disable and destroy any enabled probes. Because
12851 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress
12852 * (especially if they're all enabled), we take two passes through the
12853 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and
12854 * in the second we disable whatever is left over.
12855 */
12856 for (match = DTRACE_PRIV_KERNEL; ; match = 0) {
12857 for (i = 0; i < state->dts_necbs; i++) {
12858 if ((ecb = state->dts_ecbs[i]) == NULL)
12859 continue;
12860
12861 if (match && ecb->dte_probe != NULL) {
12862 dtrace_probe_t *probe = ecb->dte_probe;
12863 dtrace_provider_t *prov = probe->dtpr_provider;
12864
12865 if (!(prov->dtpv_priv.dtpp_flags & match))
12866 continue;
12867 }
12868
12869 dtrace_ecb_disable(ecb);
12870 dtrace_ecb_destroy(ecb);
12871 }
12872
12873 if (!match)
12874 break;
12875 }
12876
12877 /*
12878 * Before we free the buffers, perform one more sync to assure that
12879 * every CPU is out of probe context.
12880 */
12881 dtrace_sync();
12882
12883 dtrace_buffer_free(state->dts_buffer);
12884 dtrace_buffer_free(state->dts_aggbuffer);
12885
12886 for (i = 0; i < nspec; i++)
12887 dtrace_buffer_free(spec[i].dtsp_buffer);
12888
12889 if (state->dts_cleaner != CYCLIC_NONE)
12890 cyclic_remove(state->dts_cleaner);
12891
12892 if (state->dts_deadman != CYCLIC_NONE)
12893 cyclic_remove(state->dts_deadman);
12894
12895 dtrace_dstate_fini(&vstate->dtvs_dynvars);
12896 dtrace_vstate_fini(vstate);
12897 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *));
12898
12899 if (state->dts_aggregations != NULL) {
12900 #ifdef DEBUG
12901 for (i = 0; i < state->dts_naggregations; i++)
12902 ASSERT(state->dts_aggregations[i] == NULL);
12903 #endif
12904 ASSERT(state->dts_naggregations > 0);
12905 kmem_free(state->dts_aggregations,
12906 state->dts_naggregations * sizeof (dtrace_aggregation_t *));
12907 }
12908
12909 kmem_free(state->dts_buffer, bufsize);
12910 kmem_free(state->dts_aggbuffer, bufsize);
12911
12912 for (i = 0; i < nspec; i++)
12913 kmem_free(spec[i].dtsp_buffer, bufsize);
12914
12915 kmem_free(spec, nspec * sizeof (dtrace_speculation_t));
12916
12917 dtrace_format_destroy(state);
12918
12919 vmem_destroy(state->dts_aggid_arena);
12920 ddi_soft_state_free(dtrace_softstate, minor);
12921 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1);
12922 }
12923
12924 /*
12925 * DTrace Anonymous Enabling Functions
12926 */
12927 static dtrace_state_t *
dtrace_anon_grab(void)12928 dtrace_anon_grab(void)
12929 {
12930 dtrace_state_t *state;
12931
12932 ASSERT(MUTEX_HELD(&dtrace_lock));
12933
12934 if ((state = dtrace_anon.dta_state) == NULL) {
12935 ASSERT(dtrace_anon.dta_enabling == NULL);
12936 return (NULL);
12937 }
12938
12939 ASSERT(dtrace_anon.dta_enabling != NULL);
12940 ASSERT(dtrace_retained != NULL);
12941
12942 dtrace_enabling_destroy(dtrace_anon.dta_enabling);
12943 dtrace_anon.dta_enabling = NULL;
12944 dtrace_anon.dta_state = NULL;
12945
12946 return (state);
12947 }
12948
12949 static void
dtrace_anon_property(void)12950 dtrace_anon_property(void)
12951 {
12952 int i, rv;
12953 dtrace_state_t *state;
12954 dof_hdr_t *dof;
12955 char c[32]; /* enough for "dof-data-" + digits */
12956
12957 ASSERT(MUTEX_HELD(&dtrace_lock));
12958 ASSERT(MUTEX_HELD(&cpu_lock));
12959
12960 for (i = 0; ; i++) {
12961 (void) snprintf(c, sizeof (c), "dof-data-%d", i);
12962
12963 dtrace_err_verbose = 1;
12964
12965 if ((dof = dtrace_dof_property(c)) == NULL) {
12966 dtrace_err_verbose = 0;
12967 break;
12968 }
12969
12970 /*
12971 * We want to create anonymous state, so we need to transition
12972 * the kernel debugger to indicate that DTrace is active. If
12973 * this fails (e.g. because the debugger has modified text in
12974 * some way), we won't continue with the processing.
12975 */
12976 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
12977 cmn_err(CE_NOTE, "kernel debugger active; anonymous "
12978 "enabling ignored.");
12979 dtrace_dof_destroy(dof);
12980 break;
12981 }
12982
12983 /*
12984 * If we haven't allocated an anonymous state, we'll do so now.
12985 */
12986 if ((state = dtrace_anon.dta_state) == NULL) {
12987 state = dtrace_state_create(NULL, NULL);
12988 dtrace_anon.dta_state = state;
12989
12990 if (state == NULL) {
12991 /*
12992 * This basically shouldn't happen: the only
12993 * failure mode from dtrace_state_create() is a
12994 * failure of ddi_soft_state_zalloc() that
12995 * itself should never happen. Still, the
12996 * interface allows for a failure mode, and
12997 * we want to fail as gracefully as possible:
12998 * we'll emit an error message and cease
12999 * processing anonymous state in this case.
13000 */
13001 cmn_err(CE_WARN, "failed to create "
13002 "anonymous state");
13003 dtrace_dof_destroy(dof);
13004 break;
13005 }
13006 }
13007
13008 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(),
13009 &dtrace_anon.dta_enabling, 0, B_TRUE);
13010
13011 if (rv == 0)
13012 rv = dtrace_dof_options(dof, state);
13013
13014 dtrace_err_verbose = 0;
13015 dtrace_dof_destroy(dof);
13016
13017 if (rv != 0) {
13018 /*
13019 * This is malformed DOF; chuck any anonymous state
13020 * that we created.
13021 */
13022 ASSERT(dtrace_anon.dta_enabling == NULL);
13023 dtrace_state_destroy(state);
13024 dtrace_anon.dta_state = NULL;
13025 break;
13026 }
13027
13028 ASSERT(dtrace_anon.dta_enabling != NULL);
13029 }
13030
13031 if (dtrace_anon.dta_enabling != NULL) {
13032 int rval;
13033
13034 /*
13035 * dtrace_enabling_retain() can only fail because we are
13036 * trying to retain more enablings than are allowed -- but
13037 * we only have one anonymous enabling, and we are guaranteed
13038 * to be allowed at least one retained enabling; we assert
13039 * that dtrace_enabling_retain() returns success.
13040 */
13041 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling);
13042 ASSERT(rval == 0);
13043
13044 dtrace_enabling_dump(dtrace_anon.dta_enabling);
13045 }
13046 }
13047
13048 /*
13049 * DTrace Helper Functions
13050 */
13051 static void
dtrace_helper_trace(dtrace_helper_action_t * helper,dtrace_mstate_t * mstate,dtrace_vstate_t * vstate,int where)13052 dtrace_helper_trace(dtrace_helper_action_t *helper,
13053 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where)
13054 {
13055 uint32_t size, next, nnext, i;
13056 dtrace_helptrace_t *ent;
13057 uint16_t flags = cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
13058
13059 if (!dtrace_helptrace_enabled)
13060 return;
13061
13062 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals);
13063
13064 /*
13065 * What would a tracing framework be without its own tracing
13066 * framework? (Well, a hell of a lot simpler, for starters...)
13067 */
13068 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals *
13069 sizeof (uint64_t) - sizeof (uint64_t);
13070
13071 /*
13072 * Iterate until we can allocate a slot in the trace buffer.
13073 */
13074 do {
13075 next = dtrace_helptrace_next;
13076
13077 if (next + size < dtrace_helptrace_bufsize) {
13078 nnext = next + size;
13079 } else {
13080 nnext = size;
13081 }
13082 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next);
13083
13084 /*
13085 * We have our slot; fill it in.
13086 */
13087 if (nnext == size)
13088 next = 0;
13089
13090 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next];
13091 ent->dtht_helper = helper;
13092 ent->dtht_where = where;
13093 ent->dtht_nlocals = vstate->dtvs_nlocals;
13094
13095 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ?
13096 mstate->dtms_fltoffs : -1;
13097 ent->dtht_fault = DTRACE_FLAGS2FLT(flags);
13098 ent->dtht_illval = cpu_core[CPU->cpu_id].cpuc_dtrace_illval;
13099
13100 for (i = 0; i < vstate->dtvs_nlocals; i++) {
13101 dtrace_statvar_t *svar;
13102
13103 if ((svar = vstate->dtvs_locals[i]) == NULL)
13104 continue;
13105
13106 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t));
13107 ent->dtht_locals[i] =
13108 ((uint64_t *)(uintptr_t)svar->dtsv_data)[CPU->cpu_id];
13109 }
13110 }
13111
13112 static uint64_t
dtrace_helper(int which,dtrace_mstate_t * mstate,dtrace_state_t * state,uint64_t arg0,uint64_t arg1)13113 dtrace_helper(int which, dtrace_mstate_t *mstate,
13114 dtrace_state_t *state, uint64_t arg0, uint64_t arg1)
13115 {
13116 uint16_t *flags = &cpu_core[CPU->cpu_id].cpuc_dtrace_flags;
13117 uint64_t sarg0 = mstate->dtms_arg[0];
13118 uint64_t sarg1 = mstate->dtms_arg[1];
13119 uint64_t rval;
13120 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers;
13121 dtrace_helper_action_t *helper;
13122 dtrace_vstate_t *vstate;
13123 dtrace_difo_t *pred;
13124 int i, trace = dtrace_helptrace_enabled;
13125
13126 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS);
13127
13128 if (helpers == NULL)
13129 return (0);
13130
13131 if ((helper = helpers->dthps_actions[which]) == NULL)
13132 return (0);
13133
13134 vstate = &helpers->dthps_vstate;
13135 mstate->dtms_arg[0] = arg0;
13136 mstate->dtms_arg[1] = arg1;
13137
13138 /*
13139 * Now iterate over each helper. If its predicate evaluates to 'true',
13140 * we'll call the corresponding actions. Note that the below calls
13141 * to dtrace_dif_emulate() may set faults in machine state. This is
13142 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow
13143 * the stored DIF offset with its own (which is the desired behavior).
13144 * Also, note the calls to dtrace_dif_emulate() may allocate scratch
13145 * from machine state; this is okay, too.
13146 */
13147 for (; helper != NULL; helper = helper->dtha_next) {
13148 if ((pred = helper->dtha_predicate) != NULL) {
13149 if (trace)
13150 dtrace_helper_trace(helper, mstate, vstate, 0);
13151
13152 if (!dtrace_dif_emulate(pred, mstate, vstate, state))
13153 goto next;
13154
13155 if (*flags & CPU_DTRACE_FAULT)
13156 goto err;
13157 }
13158
13159 for (i = 0; i < helper->dtha_nactions; i++) {
13160 if (trace)
13161 dtrace_helper_trace(helper,
13162 mstate, vstate, i + 1);
13163
13164 rval = dtrace_dif_emulate(helper->dtha_actions[i],
13165 mstate, vstate, state);
13166
13167 if (*flags & CPU_DTRACE_FAULT)
13168 goto err;
13169 }
13170
13171 next:
13172 if (trace)
13173 dtrace_helper_trace(helper, mstate, vstate,
13174 DTRACE_HELPTRACE_NEXT);
13175 }
13176
13177 if (trace)
13178 dtrace_helper_trace(helper, mstate, vstate,
13179 DTRACE_HELPTRACE_DONE);
13180
13181 /*
13182 * Restore the arg0 that we saved upon entry.
13183 */
13184 mstate->dtms_arg[0] = sarg0;
13185 mstate->dtms_arg[1] = sarg1;
13186
13187 return (rval);
13188
13189 err:
13190 if (trace)
13191 dtrace_helper_trace(helper, mstate, vstate,
13192 DTRACE_HELPTRACE_ERR);
13193
13194 /*
13195 * Restore the arg0 that we saved upon entry.
13196 */
13197 mstate->dtms_arg[0] = sarg0;
13198 mstate->dtms_arg[1] = sarg1;
13199
13200 return (NULL);
13201 }
13202
13203 static void
dtrace_helper_action_destroy(dtrace_helper_action_t * helper,dtrace_vstate_t * vstate)13204 dtrace_helper_action_destroy(dtrace_helper_action_t *helper,
13205 dtrace_vstate_t *vstate)
13206 {
13207 int i;
13208
13209 if (helper->dtha_predicate != NULL)
13210 dtrace_difo_release(helper->dtha_predicate, vstate);
13211
13212 for (i = 0; i < helper->dtha_nactions; i++) {
13213 ASSERT(helper->dtha_actions[i] != NULL);
13214 dtrace_difo_release(helper->dtha_actions[i], vstate);
13215 }
13216
13217 kmem_free(helper->dtha_actions,
13218 helper->dtha_nactions * sizeof (dtrace_difo_t *));
13219 kmem_free(helper, sizeof (dtrace_helper_action_t));
13220 }
13221
13222 static int
dtrace_helper_destroygen(int gen)13223 dtrace_helper_destroygen(int gen)
13224 {
13225 proc_t *p = curproc;
13226 dtrace_helpers_t *help = p->p_dtrace_helpers;
13227 dtrace_vstate_t *vstate;
13228 int i;
13229
13230 ASSERT(MUTEX_HELD(&dtrace_lock));
13231
13232 if (help == NULL || gen > help->dthps_generation)
13233 return (EINVAL);
13234
13235 vstate = &help->dthps_vstate;
13236
13237 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13238 dtrace_helper_action_t *last = NULL, *h, *next;
13239
13240 for (h = help->dthps_actions[i]; h != NULL; h = next) {
13241 next = h->dtha_next;
13242
13243 if (h->dtha_generation == gen) {
13244 if (last != NULL) {
13245 last->dtha_next = next;
13246 } else {
13247 help->dthps_actions[i] = next;
13248 }
13249
13250 dtrace_helper_action_destroy(h, vstate);
13251 } else {
13252 last = h;
13253 }
13254 }
13255 }
13256
13257 /*
13258 * Interate until we've cleared out all helper providers with the
13259 * given generation number.
13260 */
13261 for (;;) {
13262 dtrace_helper_provider_t *prov;
13263
13264 /*
13265 * Look for a helper provider with the right generation. We
13266 * have to start back at the beginning of the list each time
13267 * because we drop dtrace_lock. It's unlikely that we'll make
13268 * more than two passes.
13269 */
13270 for (i = 0; i < help->dthps_nprovs; i++) {
13271 prov = help->dthps_provs[i];
13272
13273 if (prov->dthp_generation == gen)
13274 break;
13275 }
13276
13277 /*
13278 * If there were no matches, we're done.
13279 */
13280 if (i == help->dthps_nprovs)
13281 break;
13282
13283 /*
13284 * Move the last helper provider into this slot.
13285 */
13286 help->dthps_nprovs--;
13287 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs];
13288 help->dthps_provs[help->dthps_nprovs] = NULL;
13289
13290 mutex_exit(&dtrace_lock);
13291
13292 /*
13293 * If we have a meta provider, remove this helper provider.
13294 */
13295 mutex_enter(&dtrace_meta_lock);
13296 if (dtrace_meta_pid != NULL) {
13297 ASSERT(dtrace_deferred_pid == NULL);
13298 dtrace_helper_provider_remove(&prov->dthp_prov,
13299 p->p_pid);
13300 }
13301 mutex_exit(&dtrace_meta_lock);
13302
13303 dtrace_helper_provider_destroy(prov);
13304
13305 mutex_enter(&dtrace_lock);
13306 }
13307
13308 return (0);
13309 }
13310
13311 static int
dtrace_helper_validate(dtrace_helper_action_t * helper)13312 dtrace_helper_validate(dtrace_helper_action_t *helper)
13313 {
13314 int err = 0, i;
13315 dtrace_difo_t *dp;
13316
13317 if ((dp = helper->dtha_predicate) != NULL)
13318 err += dtrace_difo_validate_helper(dp);
13319
13320 for (i = 0; i < helper->dtha_nactions; i++)
13321 err += dtrace_difo_validate_helper(helper->dtha_actions[i]);
13322
13323 return (err == 0);
13324 }
13325
13326 static int
dtrace_helper_action_add(int which,dtrace_ecbdesc_t * ep)13327 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep)
13328 {
13329 dtrace_helpers_t *help;
13330 dtrace_helper_action_t *helper, *last;
13331 dtrace_actdesc_t *act;
13332 dtrace_vstate_t *vstate;
13333 dtrace_predicate_t *pred;
13334 int count = 0, nactions = 0, i;
13335
13336 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS)
13337 return (EINVAL);
13338
13339 help = curproc->p_dtrace_helpers;
13340 last = help->dthps_actions[which];
13341 vstate = &help->dthps_vstate;
13342
13343 for (count = 0; last != NULL; last = last->dtha_next) {
13344 count++;
13345 if (last->dtha_next == NULL)
13346 break;
13347 }
13348
13349 /*
13350 * If we already have dtrace_helper_actions_max helper actions for this
13351 * helper action type, we'll refuse to add a new one.
13352 */
13353 if (count >= dtrace_helper_actions_max)
13354 return (ENOSPC);
13355
13356 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP);
13357 helper->dtha_generation = help->dthps_generation;
13358
13359 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) {
13360 ASSERT(pred->dtp_difo != NULL);
13361 dtrace_difo_hold(pred->dtp_difo);
13362 helper->dtha_predicate = pred->dtp_difo;
13363 }
13364
13365 for (act = ep->dted_action; act != NULL; act = act->dtad_next) {
13366 if (act->dtad_kind != DTRACEACT_DIFEXPR)
13367 goto err;
13368
13369 if (act->dtad_difo == NULL)
13370 goto err;
13371
13372 nactions++;
13373 }
13374
13375 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) *
13376 (helper->dtha_nactions = nactions), KM_SLEEP);
13377
13378 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) {
13379 dtrace_difo_hold(act->dtad_difo);
13380 helper->dtha_actions[i++] = act->dtad_difo;
13381 }
13382
13383 if (!dtrace_helper_validate(helper))
13384 goto err;
13385
13386 if (last == NULL) {
13387 help->dthps_actions[which] = helper;
13388 } else {
13389 last->dtha_next = helper;
13390 }
13391
13392 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) {
13393 dtrace_helptrace_nlocals = vstate->dtvs_nlocals;
13394 dtrace_helptrace_next = 0;
13395 }
13396
13397 return (0);
13398 err:
13399 dtrace_helper_action_destroy(helper, vstate);
13400 return (EINVAL);
13401 }
13402
13403 static void
dtrace_helper_provider_register(proc_t * p,dtrace_helpers_t * help,dof_helper_t * dofhp)13404 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help,
13405 dof_helper_t *dofhp)
13406 {
13407 ASSERT(MUTEX_NOT_HELD(&dtrace_lock));
13408
13409 mutex_enter(&dtrace_meta_lock);
13410 mutex_enter(&dtrace_lock);
13411
13412 if (!dtrace_attached() || dtrace_meta_pid == NULL) {
13413 /*
13414 * If the dtrace module is loaded but not attached, or if
13415 * there aren't isn't a meta provider registered to deal with
13416 * these provider descriptions, we need to postpone creating
13417 * the actual providers until later.
13418 */
13419
13420 if (help->dthps_next == NULL && help->dthps_prev == NULL &&
13421 dtrace_deferred_pid != help) {
13422 help->dthps_deferred = 1;
13423 help->dthps_pid = p->p_pid;
13424 help->dthps_next = dtrace_deferred_pid;
13425 help->dthps_prev = NULL;
13426 if (dtrace_deferred_pid != NULL)
13427 dtrace_deferred_pid->dthps_prev = help;
13428 dtrace_deferred_pid = help;
13429 }
13430
13431 mutex_exit(&dtrace_lock);
13432
13433 } else if (dofhp != NULL) {
13434 /*
13435 * If the dtrace module is loaded and we have a particular
13436 * helper provider description, pass that off to the
13437 * meta provider.
13438 */
13439
13440 mutex_exit(&dtrace_lock);
13441
13442 dtrace_helper_provide(dofhp, p->p_pid);
13443
13444 } else {
13445 /*
13446 * Otherwise, just pass all the helper provider descriptions
13447 * off to the meta provider.
13448 */
13449
13450 int i;
13451 mutex_exit(&dtrace_lock);
13452
13453 for (i = 0; i < help->dthps_nprovs; i++) {
13454 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov,
13455 p->p_pid);
13456 }
13457 }
13458
13459 mutex_exit(&dtrace_meta_lock);
13460 }
13461
13462 static int
dtrace_helper_provider_add(dof_helper_t * dofhp,int gen)13463 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen)
13464 {
13465 dtrace_helpers_t *help;
13466 dtrace_helper_provider_t *hprov, **tmp_provs;
13467 uint_t tmp_maxprovs, i;
13468
13469 ASSERT(MUTEX_HELD(&dtrace_lock));
13470
13471 help = curproc->p_dtrace_helpers;
13472 ASSERT(help != NULL);
13473
13474 /*
13475 * If we already have dtrace_helper_providers_max helper providers,
13476 * we're refuse to add a new one.
13477 */
13478 if (help->dthps_nprovs >= dtrace_helper_providers_max)
13479 return (ENOSPC);
13480
13481 /*
13482 * Check to make sure this isn't a duplicate.
13483 */
13484 for (i = 0; i < help->dthps_nprovs; i++) {
13485 if (dofhp->dofhp_addr ==
13486 help->dthps_provs[i]->dthp_prov.dofhp_addr)
13487 return (EALREADY);
13488 }
13489
13490 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP);
13491 hprov->dthp_prov = *dofhp;
13492 hprov->dthp_ref = 1;
13493 hprov->dthp_generation = gen;
13494
13495 /*
13496 * Allocate a bigger table for helper providers if it's already full.
13497 */
13498 if (help->dthps_maxprovs == help->dthps_nprovs) {
13499 tmp_maxprovs = help->dthps_maxprovs;
13500 tmp_provs = help->dthps_provs;
13501
13502 if (help->dthps_maxprovs == 0)
13503 help->dthps_maxprovs = 2;
13504 else
13505 help->dthps_maxprovs *= 2;
13506 if (help->dthps_maxprovs > dtrace_helper_providers_max)
13507 help->dthps_maxprovs = dtrace_helper_providers_max;
13508
13509 ASSERT(tmp_maxprovs < help->dthps_maxprovs);
13510
13511 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs *
13512 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
13513
13514 if (tmp_provs != NULL) {
13515 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs *
13516 sizeof (dtrace_helper_provider_t *));
13517 kmem_free(tmp_provs, tmp_maxprovs *
13518 sizeof (dtrace_helper_provider_t *));
13519 }
13520 }
13521
13522 help->dthps_provs[help->dthps_nprovs] = hprov;
13523 help->dthps_nprovs++;
13524
13525 return (0);
13526 }
13527
13528 static void
dtrace_helper_provider_destroy(dtrace_helper_provider_t * hprov)13529 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov)
13530 {
13531 mutex_enter(&dtrace_lock);
13532
13533 if (--hprov->dthp_ref == 0) {
13534 dof_hdr_t *dof;
13535 mutex_exit(&dtrace_lock);
13536 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof;
13537 dtrace_dof_destroy(dof);
13538 kmem_free(hprov, sizeof (dtrace_helper_provider_t));
13539 } else {
13540 mutex_exit(&dtrace_lock);
13541 }
13542 }
13543
13544 static int
dtrace_helper_provider_validate(dof_hdr_t * dof,dof_sec_t * sec)13545 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec)
13546 {
13547 uintptr_t daddr = (uintptr_t)dof;
13548 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec;
13549 dof_provider_t *provider;
13550 dof_probe_t *probe;
13551 uint8_t *arg;
13552 char *strtab, *typestr;
13553 dof_stridx_t typeidx;
13554 size_t typesz;
13555 uint_t nprobes, j, k;
13556
13557 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER);
13558
13559 if (sec->dofs_offset & (sizeof (uint_t) - 1)) {
13560 dtrace_dof_error(dof, "misaligned section offset");
13561 return (-1);
13562 }
13563
13564 /*
13565 * The section needs to be large enough to contain the DOF provider
13566 * structure appropriate for the given version.
13567 */
13568 if (sec->dofs_size <
13569 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ?
13570 offsetof(dof_provider_t, dofpv_prenoffs) :
13571 sizeof (dof_provider_t))) {
13572 dtrace_dof_error(dof, "provider section too small");
13573 return (-1);
13574 }
13575
13576 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset);
13577 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab);
13578 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes);
13579 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs);
13580 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs);
13581
13582 if (str_sec == NULL || prb_sec == NULL ||
13583 arg_sec == NULL || off_sec == NULL)
13584 return (-1);
13585
13586 enoff_sec = NULL;
13587
13588 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 &&
13589 provider->dofpv_prenoffs != DOF_SECT_NONE &&
13590 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS,
13591 provider->dofpv_prenoffs)) == NULL)
13592 return (-1);
13593
13594 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset);
13595
13596 if (provider->dofpv_name >= str_sec->dofs_size ||
13597 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) {
13598 dtrace_dof_error(dof, "invalid provider name");
13599 return (-1);
13600 }
13601
13602 if (prb_sec->dofs_entsize == 0 ||
13603 prb_sec->dofs_entsize > prb_sec->dofs_size) {
13604 dtrace_dof_error(dof, "invalid entry size");
13605 return (-1);
13606 }
13607
13608 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) {
13609 dtrace_dof_error(dof, "misaligned entry size");
13610 return (-1);
13611 }
13612
13613 if (off_sec->dofs_entsize != sizeof (uint32_t)) {
13614 dtrace_dof_error(dof, "invalid entry size");
13615 return (-1);
13616 }
13617
13618 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) {
13619 dtrace_dof_error(dof, "misaligned section offset");
13620 return (-1);
13621 }
13622
13623 if (arg_sec->dofs_entsize != sizeof (uint8_t)) {
13624 dtrace_dof_error(dof, "invalid entry size");
13625 return (-1);
13626 }
13627
13628 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset);
13629
13630 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize;
13631
13632 /*
13633 * Take a pass through the probes to check for errors.
13634 */
13635 for (j = 0; j < nprobes; j++) {
13636 probe = (dof_probe_t *)(uintptr_t)(daddr +
13637 prb_sec->dofs_offset + j * prb_sec->dofs_entsize);
13638
13639 if (probe->dofpr_func >= str_sec->dofs_size) {
13640 dtrace_dof_error(dof, "invalid function name");
13641 return (-1);
13642 }
13643
13644 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) {
13645 dtrace_dof_error(dof, "function name too long");
13646 return (-1);
13647 }
13648
13649 if (probe->dofpr_name >= str_sec->dofs_size ||
13650 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) {
13651 dtrace_dof_error(dof, "invalid probe name");
13652 return (-1);
13653 }
13654
13655 /*
13656 * The offset count must not wrap the index, and the offsets
13657 * must also not overflow the section's data.
13658 */
13659 if (probe->dofpr_offidx + probe->dofpr_noffs <
13660 probe->dofpr_offidx ||
13661 (probe->dofpr_offidx + probe->dofpr_noffs) *
13662 off_sec->dofs_entsize > off_sec->dofs_size) {
13663 dtrace_dof_error(dof, "invalid probe offset");
13664 return (-1);
13665 }
13666
13667 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) {
13668 /*
13669 * If there's no is-enabled offset section, make sure
13670 * there aren't any is-enabled offsets. Otherwise
13671 * perform the same checks as for probe offsets
13672 * (immediately above).
13673 */
13674 if (enoff_sec == NULL) {
13675 if (probe->dofpr_enoffidx != 0 ||
13676 probe->dofpr_nenoffs != 0) {
13677 dtrace_dof_error(dof, "is-enabled "
13678 "offsets with null section");
13679 return (-1);
13680 }
13681 } else if (probe->dofpr_enoffidx +
13682 probe->dofpr_nenoffs < probe->dofpr_enoffidx ||
13683 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) *
13684 enoff_sec->dofs_entsize > enoff_sec->dofs_size) {
13685 dtrace_dof_error(dof, "invalid is-enabled "
13686 "offset");
13687 return (-1);
13688 }
13689
13690 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) {
13691 dtrace_dof_error(dof, "zero probe and "
13692 "is-enabled offsets");
13693 return (-1);
13694 }
13695 } else if (probe->dofpr_noffs == 0) {
13696 dtrace_dof_error(dof, "zero probe offsets");
13697 return (-1);
13698 }
13699
13700 if (probe->dofpr_argidx + probe->dofpr_xargc <
13701 probe->dofpr_argidx ||
13702 (probe->dofpr_argidx + probe->dofpr_xargc) *
13703 arg_sec->dofs_entsize > arg_sec->dofs_size) {
13704 dtrace_dof_error(dof, "invalid args");
13705 return (-1);
13706 }
13707
13708 typeidx = probe->dofpr_nargv;
13709 typestr = strtab + probe->dofpr_nargv;
13710 for (k = 0; k < probe->dofpr_nargc; k++) {
13711 if (typeidx >= str_sec->dofs_size) {
13712 dtrace_dof_error(dof, "bad "
13713 "native argument type");
13714 return (-1);
13715 }
13716
13717 typesz = strlen(typestr) + 1;
13718 if (typesz > DTRACE_ARGTYPELEN) {
13719 dtrace_dof_error(dof, "native "
13720 "argument type too long");
13721 return (-1);
13722 }
13723 typeidx += typesz;
13724 typestr += typesz;
13725 }
13726
13727 typeidx = probe->dofpr_xargv;
13728 typestr = strtab + probe->dofpr_xargv;
13729 for (k = 0; k < probe->dofpr_xargc; k++) {
13730 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) {
13731 dtrace_dof_error(dof, "bad "
13732 "native argument index");
13733 return (-1);
13734 }
13735
13736 if (typeidx >= str_sec->dofs_size) {
13737 dtrace_dof_error(dof, "bad "
13738 "translated argument type");
13739 return (-1);
13740 }
13741
13742 typesz = strlen(typestr) + 1;
13743 if (typesz > DTRACE_ARGTYPELEN) {
13744 dtrace_dof_error(dof, "translated argument "
13745 "type too long");
13746 return (-1);
13747 }
13748
13749 typeidx += typesz;
13750 typestr += typesz;
13751 }
13752 }
13753
13754 return (0);
13755 }
13756
13757 static int
dtrace_helper_slurp(dof_hdr_t * dof,dof_helper_t * dhp)13758 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp)
13759 {
13760 dtrace_helpers_t *help;
13761 dtrace_vstate_t *vstate;
13762 dtrace_enabling_t *enab = NULL;
13763 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1;
13764 uintptr_t daddr = (uintptr_t)dof;
13765
13766 ASSERT(MUTEX_HELD(&dtrace_lock));
13767
13768 if ((help = curproc->p_dtrace_helpers) == NULL)
13769 help = dtrace_helpers_create(curproc);
13770
13771 vstate = &help->dthps_vstate;
13772
13773 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab,
13774 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) {
13775 dtrace_dof_destroy(dof);
13776 return (rv);
13777 }
13778
13779 /*
13780 * Look for helper providers and validate their descriptions.
13781 */
13782 if (dhp != NULL) {
13783 for (i = 0; i < dof->dofh_secnum; i++) {
13784 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr +
13785 dof->dofh_secoff + i * dof->dofh_secsize);
13786
13787 if (sec->dofs_type != DOF_SECT_PROVIDER)
13788 continue;
13789
13790 if (dtrace_helper_provider_validate(dof, sec) != 0) {
13791 dtrace_enabling_destroy(enab);
13792 dtrace_dof_destroy(dof);
13793 return (-1);
13794 }
13795
13796 nprovs++;
13797 }
13798 }
13799
13800 /*
13801 * Now we need to walk through the ECB descriptions in the enabling.
13802 */
13803 for (i = 0; i < enab->dten_ndesc; i++) {
13804 dtrace_ecbdesc_t *ep = enab->dten_desc[i];
13805 dtrace_probedesc_t *desc = &ep->dted_probe;
13806
13807 if (strcmp(desc->dtpd_provider, "dtrace") != 0)
13808 continue;
13809
13810 if (strcmp(desc->dtpd_mod, "helper") != 0)
13811 continue;
13812
13813 if (strcmp(desc->dtpd_func, "ustack") != 0)
13814 continue;
13815
13816 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK,
13817 ep)) != 0) {
13818 /*
13819 * Adding this helper action failed -- we are now going
13820 * to rip out the entire generation and return failure.
13821 */
13822 (void) dtrace_helper_destroygen(help->dthps_generation);
13823 dtrace_enabling_destroy(enab);
13824 dtrace_dof_destroy(dof);
13825 return (-1);
13826 }
13827
13828 nhelpers++;
13829 }
13830
13831 if (nhelpers < enab->dten_ndesc)
13832 dtrace_dof_error(dof, "unmatched helpers");
13833
13834 gen = help->dthps_generation++;
13835 dtrace_enabling_destroy(enab);
13836
13837 if (dhp != NULL && nprovs > 0) {
13838 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof;
13839 if (dtrace_helper_provider_add(dhp, gen) == 0) {
13840 mutex_exit(&dtrace_lock);
13841 dtrace_helper_provider_register(curproc, help, dhp);
13842 mutex_enter(&dtrace_lock);
13843
13844 destroy = 0;
13845 }
13846 }
13847
13848 if (destroy)
13849 dtrace_dof_destroy(dof);
13850
13851 return (gen);
13852 }
13853
13854 static dtrace_helpers_t *
dtrace_helpers_create(proc_t * p)13855 dtrace_helpers_create(proc_t *p)
13856 {
13857 dtrace_helpers_t *help;
13858
13859 ASSERT(MUTEX_HELD(&dtrace_lock));
13860 ASSERT(p->p_dtrace_helpers == NULL);
13861
13862 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP);
13863 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) *
13864 DTRACE_NHELPER_ACTIONS, KM_SLEEP);
13865
13866 p->p_dtrace_helpers = help;
13867 dtrace_helpers++;
13868
13869 return (help);
13870 }
13871
13872 static void
dtrace_helpers_destroy(void)13873 dtrace_helpers_destroy(void)
13874 {
13875 dtrace_helpers_t *help;
13876 dtrace_vstate_t *vstate;
13877 proc_t *p = curproc;
13878 int i;
13879
13880 mutex_enter(&dtrace_lock);
13881
13882 ASSERT(p->p_dtrace_helpers != NULL);
13883 ASSERT(dtrace_helpers > 0);
13884
13885 help = p->p_dtrace_helpers;
13886 vstate = &help->dthps_vstate;
13887
13888 /*
13889 * We're now going to lose the help from this process.
13890 */
13891 p->p_dtrace_helpers = NULL;
13892 dtrace_sync();
13893
13894 /*
13895 * Destory the helper actions.
13896 */
13897 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13898 dtrace_helper_action_t *h, *next;
13899
13900 for (h = help->dthps_actions[i]; h != NULL; h = next) {
13901 next = h->dtha_next;
13902 dtrace_helper_action_destroy(h, vstate);
13903 h = next;
13904 }
13905 }
13906
13907 mutex_exit(&dtrace_lock);
13908
13909 /*
13910 * Destroy the helper providers.
13911 */
13912 if (help->dthps_maxprovs > 0) {
13913 mutex_enter(&dtrace_meta_lock);
13914 if (dtrace_meta_pid != NULL) {
13915 ASSERT(dtrace_deferred_pid == NULL);
13916
13917 for (i = 0; i < help->dthps_nprovs; i++) {
13918 dtrace_helper_provider_remove(
13919 &help->dthps_provs[i]->dthp_prov, p->p_pid);
13920 }
13921 } else {
13922 mutex_enter(&dtrace_lock);
13923 ASSERT(help->dthps_deferred == 0 ||
13924 help->dthps_next != NULL ||
13925 help->dthps_prev != NULL ||
13926 help == dtrace_deferred_pid);
13927
13928 /*
13929 * Remove the helper from the deferred list.
13930 */
13931 if (help->dthps_next != NULL)
13932 help->dthps_next->dthps_prev = help->dthps_prev;
13933 if (help->dthps_prev != NULL)
13934 help->dthps_prev->dthps_next = help->dthps_next;
13935 if (dtrace_deferred_pid == help) {
13936 dtrace_deferred_pid = help->dthps_next;
13937 ASSERT(help->dthps_prev == NULL);
13938 }
13939
13940 mutex_exit(&dtrace_lock);
13941 }
13942
13943 mutex_exit(&dtrace_meta_lock);
13944
13945 for (i = 0; i < help->dthps_nprovs; i++) {
13946 dtrace_helper_provider_destroy(help->dthps_provs[i]);
13947 }
13948
13949 kmem_free(help->dthps_provs, help->dthps_maxprovs *
13950 sizeof (dtrace_helper_provider_t *));
13951 }
13952
13953 mutex_enter(&dtrace_lock);
13954
13955 dtrace_vstate_fini(&help->dthps_vstate);
13956 kmem_free(help->dthps_actions,
13957 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS);
13958 kmem_free(help, sizeof (dtrace_helpers_t));
13959
13960 --dtrace_helpers;
13961 mutex_exit(&dtrace_lock);
13962 }
13963
13964 static void
dtrace_helpers_duplicate(proc_t * from,proc_t * to)13965 dtrace_helpers_duplicate(proc_t *from, proc_t *to)
13966 {
13967 dtrace_helpers_t *help, *newhelp;
13968 dtrace_helper_action_t *helper, *new, *last;
13969 dtrace_difo_t *dp;
13970 dtrace_vstate_t *vstate;
13971 int i, j, sz, hasprovs = 0;
13972
13973 mutex_enter(&dtrace_lock);
13974 ASSERT(from->p_dtrace_helpers != NULL);
13975 ASSERT(dtrace_helpers > 0);
13976
13977 help = from->p_dtrace_helpers;
13978 newhelp = dtrace_helpers_create(to);
13979 ASSERT(to->p_dtrace_helpers != NULL);
13980
13981 newhelp->dthps_generation = help->dthps_generation;
13982 vstate = &newhelp->dthps_vstate;
13983
13984 /*
13985 * Duplicate the helper actions.
13986 */
13987 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) {
13988 if ((helper = help->dthps_actions[i]) == NULL)
13989 continue;
13990
13991 for (last = NULL; helper != NULL; helper = helper->dtha_next) {
13992 new = kmem_zalloc(sizeof (dtrace_helper_action_t),
13993 KM_SLEEP);
13994 new->dtha_generation = helper->dtha_generation;
13995
13996 if ((dp = helper->dtha_predicate) != NULL) {
13997 dp = dtrace_difo_duplicate(dp, vstate);
13998 new->dtha_predicate = dp;
13999 }
14000
14001 new->dtha_nactions = helper->dtha_nactions;
14002 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions;
14003 new->dtha_actions = kmem_alloc(sz, KM_SLEEP);
14004
14005 for (j = 0; j < new->dtha_nactions; j++) {
14006 dtrace_difo_t *dp = helper->dtha_actions[j];
14007
14008 ASSERT(dp != NULL);
14009 dp = dtrace_difo_duplicate(dp, vstate);
14010 new->dtha_actions[j] = dp;
14011 }
14012
14013 if (last != NULL) {
14014 last->dtha_next = new;
14015 } else {
14016 newhelp->dthps_actions[i] = new;
14017 }
14018
14019 last = new;
14020 }
14021 }
14022
14023 /*
14024 * Duplicate the helper providers and register them with the
14025 * DTrace framework.
14026 */
14027 if (help->dthps_nprovs > 0) {
14028 newhelp->dthps_nprovs = help->dthps_nprovs;
14029 newhelp->dthps_maxprovs = help->dthps_nprovs;
14030 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs *
14031 sizeof (dtrace_helper_provider_t *), KM_SLEEP);
14032 for (i = 0; i < newhelp->dthps_nprovs; i++) {
14033 newhelp->dthps_provs[i] = help->dthps_provs[i];
14034 newhelp->dthps_provs[i]->dthp_ref++;
14035 }
14036
14037 hasprovs = 1;
14038 }
14039
14040 mutex_exit(&dtrace_lock);
14041
14042 if (hasprovs)
14043 dtrace_helper_provider_register(to, newhelp, NULL);
14044 }
14045
14046 /*
14047 * DTrace Hook Functions
14048 */
14049 static void
dtrace_module_loaded(struct modctl * ctl)14050 dtrace_module_loaded(struct modctl *ctl)
14051 {
14052 dtrace_provider_t *prv;
14053
14054 mutex_enter(&dtrace_provider_lock);
14055 mutex_enter(&mod_lock);
14056
14057 ASSERT(ctl->mod_busy);
14058
14059 /*
14060 * We're going to call each providers per-module provide operation
14061 * specifying only this module.
14062 */
14063 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next)
14064 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl);
14065
14066 mutex_exit(&mod_lock);
14067 mutex_exit(&dtrace_provider_lock);
14068
14069 /*
14070 * If we have any retained enablings, we need to match against them.
14071 * Enabling probes requires that cpu_lock be held, and we cannot hold
14072 * cpu_lock here -- it is legal for cpu_lock to be held when loading a
14073 * module. (In particular, this happens when loading scheduling
14074 * classes.) So if we have any retained enablings, we need to dispatch
14075 * our task queue to do the match for us.
14076 */
14077 mutex_enter(&dtrace_lock);
14078
14079 if (dtrace_retained == NULL) {
14080 mutex_exit(&dtrace_lock);
14081 return;
14082 }
14083
14084 (void) taskq_dispatch(dtrace_taskq,
14085 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP);
14086
14087 mutex_exit(&dtrace_lock);
14088
14089 /*
14090 * And now, for a little heuristic sleaze: in general, we want to
14091 * match modules as soon as they load. However, we cannot guarantee
14092 * this, because it would lead us to the lock ordering violation
14093 * outlined above. The common case, of course, is that cpu_lock is
14094 * _not_ held -- so we delay here for a clock tick, hoping that that's
14095 * long enough for the task queue to do its work. If it's not, it's
14096 * not a serious problem -- it just means that the module that we
14097 * just loaded may not be immediately instrumentable.
14098 */
14099 delay(1);
14100 }
14101
14102 static void
dtrace_module_unloaded(struct modctl * ctl)14103 dtrace_module_unloaded(struct modctl *ctl)
14104 {
14105 dtrace_probe_t template, *probe, *first, *next;
14106 dtrace_provider_t *prov;
14107
14108 template.dtpr_mod = ctl->mod_modname;
14109
14110 mutex_enter(&dtrace_provider_lock);
14111 mutex_enter(&mod_lock);
14112 mutex_enter(&dtrace_lock);
14113
14114 if (dtrace_bymod == NULL) {
14115 /*
14116 * The DTrace module is loaded (obviously) but not attached;
14117 * we don't have any work to do.
14118 */
14119 mutex_exit(&dtrace_provider_lock);
14120 mutex_exit(&mod_lock);
14121 mutex_exit(&dtrace_lock);
14122 return;
14123 }
14124
14125 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template);
14126 probe != NULL; probe = probe->dtpr_nextmod) {
14127 if (probe->dtpr_ecb != NULL) {
14128 mutex_exit(&dtrace_provider_lock);
14129 mutex_exit(&mod_lock);
14130 mutex_exit(&dtrace_lock);
14131
14132 /*
14133 * This shouldn't _actually_ be possible -- we're
14134 * unloading a module that has an enabled probe in it.
14135 * (It's normally up to the provider to make sure that
14136 * this can't happen.) However, because dtps_enable()
14137 * doesn't have a failure mode, there can be an
14138 * enable/unload race. Upshot: we don't want to
14139 * assert, but we're not going to disable the
14140 * probe, either.
14141 */
14142 if (dtrace_err_verbose) {
14143 cmn_err(CE_WARN, "unloaded module '%s' had "
14144 "enabled probes", ctl->mod_modname);
14145 }
14146
14147 return;
14148 }
14149 }
14150
14151 probe = first;
14152
14153 for (first = NULL; probe != NULL; probe = next) {
14154 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe);
14155
14156 dtrace_probes[probe->dtpr_id - 1] = NULL;
14157
14158 next = probe->dtpr_nextmod;
14159 dtrace_hash_remove(dtrace_bymod, probe);
14160 dtrace_hash_remove(dtrace_byfunc, probe);
14161 dtrace_hash_remove(dtrace_byname, probe);
14162
14163 if (first == NULL) {
14164 first = probe;
14165 probe->dtpr_nextmod = NULL;
14166 } else {
14167 probe->dtpr_nextmod = first;
14168 first = probe;
14169 }
14170 }
14171
14172 /*
14173 * We've removed all of the module's probes from the hash chains and
14174 * from the probe array. Now issue a dtrace_sync() to be sure that
14175 * everyone has cleared out from any probe array processing.
14176 */
14177 dtrace_sync();
14178
14179 for (probe = first; probe != NULL; probe = first) {
14180 first = probe->dtpr_nextmod;
14181 prov = probe->dtpr_provider;
14182 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id,
14183 probe->dtpr_arg);
14184 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1);
14185 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1);
14186 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1);
14187 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1);
14188 kmem_free(probe, sizeof (dtrace_probe_t));
14189 }
14190
14191 mutex_exit(&dtrace_lock);
14192 mutex_exit(&mod_lock);
14193 mutex_exit(&dtrace_provider_lock);
14194 }
14195
14196 void
dtrace_suspend(void)14197 dtrace_suspend(void)
14198 {
14199 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend));
14200 }
14201
14202 void
dtrace_resume(void)14203 dtrace_resume(void)
14204 {
14205 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume));
14206 }
14207
14208 static int
dtrace_cpu_setup(cpu_setup_t what,processorid_t cpu)14209 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu)
14210 {
14211 ASSERT(MUTEX_HELD(&cpu_lock));
14212 mutex_enter(&dtrace_lock);
14213
14214 switch (what) {
14215 case CPU_CONFIG: {
14216 dtrace_state_t *state;
14217 dtrace_optval_t *opt, rs, c;
14218
14219 /*
14220 * For now, we only allocate a new buffer for anonymous state.
14221 */
14222 if ((state = dtrace_anon.dta_state) == NULL)
14223 break;
14224
14225 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE)
14226 break;
14227
14228 opt = state->dts_options;
14229 c = opt[DTRACEOPT_CPU];
14230
14231 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu)
14232 break;
14233
14234 /*
14235 * Regardless of what the actual policy is, we're going to
14236 * temporarily set our resize policy to be manual. We're
14237 * also going to temporarily set our CPU option to denote
14238 * the newly configured CPU.
14239 */
14240 rs = opt[DTRACEOPT_BUFRESIZE];
14241 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL;
14242 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu;
14243
14244 (void) dtrace_state_buffers(state);
14245
14246 opt[DTRACEOPT_BUFRESIZE] = rs;
14247 opt[DTRACEOPT_CPU] = c;
14248
14249 break;
14250 }
14251
14252 case CPU_UNCONFIG:
14253 /*
14254 * We don't free the buffer in the CPU_UNCONFIG case. (The
14255 * buffer will be freed when the consumer exits.)
14256 */
14257 break;
14258
14259 default:
14260 break;
14261 }
14262
14263 mutex_exit(&dtrace_lock);
14264 return (0);
14265 }
14266
14267 static void
dtrace_cpu_setup_initial(processorid_t cpu)14268 dtrace_cpu_setup_initial(processorid_t cpu)
14269 {
14270 (void) dtrace_cpu_setup(CPU_CONFIG, cpu);
14271 }
14272
14273 static void
dtrace_toxrange_add(uintptr_t base,uintptr_t limit)14274 dtrace_toxrange_add(uintptr_t base, uintptr_t limit)
14275 {
14276 if (dtrace_toxranges >= dtrace_toxranges_max) {
14277 int osize, nsize;
14278 dtrace_toxrange_t *range;
14279
14280 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
14281
14282 if (osize == 0) {
14283 ASSERT(dtrace_toxrange == NULL);
14284 ASSERT(dtrace_toxranges_max == 0);
14285 dtrace_toxranges_max = 1;
14286 } else {
14287 dtrace_toxranges_max <<= 1;
14288 }
14289
14290 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t);
14291 range = kmem_zalloc(nsize, KM_SLEEP);
14292
14293 if (dtrace_toxrange != NULL) {
14294 ASSERT(osize != 0);
14295 bcopy(dtrace_toxrange, range, osize);
14296 kmem_free(dtrace_toxrange, osize);
14297 }
14298
14299 dtrace_toxrange = range;
14300 }
14301
14302 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == NULL);
14303 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == NULL);
14304
14305 dtrace_toxrange[dtrace_toxranges].dtt_base = base;
14306 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit;
14307 dtrace_toxranges++;
14308 }
14309
14310 /*
14311 * DTrace Driver Cookbook Functions
14312 */
14313 /*ARGSUSED*/
14314 static int
dtrace_attach(dev_info_t * devi,ddi_attach_cmd_t cmd)14315 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
14316 {
14317 dtrace_provider_id_t id;
14318 dtrace_state_t *state = NULL;
14319 dtrace_enabling_t *enab;
14320
14321 mutex_enter(&cpu_lock);
14322 mutex_enter(&dtrace_provider_lock);
14323 mutex_enter(&dtrace_lock);
14324
14325 if (ddi_soft_state_init(&dtrace_softstate,
14326 sizeof (dtrace_state_t), 0) != 0) {
14327 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state");
14328 mutex_exit(&cpu_lock);
14329 mutex_exit(&dtrace_provider_lock);
14330 mutex_exit(&dtrace_lock);
14331 return (DDI_FAILURE);
14332 }
14333
14334 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR,
14335 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE ||
14336 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR,
14337 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) {
14338 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes");
14339 ddi_remove_minor_node(devi, NULL);
14340 ddi_soft_state_fini(&dtrace_softstate);
14341 mutex_exit(&cpu_lock);
14342 mutex_exit(&dtrace_provider_lock);
14343 mutex_exit(&dtrace_lock);
14344 return (DDI_FAILURE);
14345 }
14346
14347 ddi_report_dev(devi);
14348 dtrace_devi = devi;
14349
14350 dtrace_modload = dtrace_module_loaded;
14351 dtrace_modunload = dtrace_module_unloaded;
14352 dtrace_cpu_init = dtrace_cpu_setup_initial;
14353 dtrace_helpers_cleanup = dtrace_helpers_destroy;
14354 dtrace_helpers_fork = dtrace_helpers_duplicate;
14355 dtrace_cpustart_init = dtrace_suspend;
14356 dtrace_cpustart_fini = dtrace_resume;
14357 dtrace_debugger_init = dtrace_suspend;
14358 dtrace_debugger_fini = dtrace_resume;
14359
14360 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
14361
14362 ASSERT(MUTEX_HELD(&cpu_lock));
14363
14364 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1,
14365 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER);
14366 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE,
14367 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0,
14368 VM_SLEEP | VMC_IDENTIFIER);
14369 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri,
14370 1, INT_MAX, 0);
14371
14372 dtrace_state_cache = kmem_cache_create("dtrace_state_cache",
14373 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN,
14374 NULL, NULL, NULL, NULL, NULL, 0);
14375
14376 ASSERT(MUTEX_HELD(&cpu_lock));
14377 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod),
14378 offsetof(dtrace_probe_t, dtpr_nextmod),
14379 offsetof(dtrace_probe_t, dtpr_prevmod));
14380
14381 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func),
14382 offsetof(dtrace_probe_t, dtpr_nextfunc),
14383 offsetof(dtrace_probe_t, dtpr_prevfunc));
14384
14385 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name),
14386 offsetof(dtrace_probe_t, dtpr_nextname),
14387 offsetof(dtrace_probe_t, dtpr_prevname));
14388
14389 if (dtrace_retain_max < 1) {
14390 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; "
14391 "setting to 1", dtrace_retain_max);
14392 dtrace_retain_max = 1;
14393 }
14394
14395 /*
14396 * Now discover our toxic ranges.
14397 */
14398 dtrace_toxic_ranges(dtrace_toxrange_add);
14399
14400 /*
14401 * Before we register ourselves as a provider to our own framework,
14402 * we would like to assert that dtrace_provider is NULL -- but that's
14403 * not true if we were loaded as a dependency of a DTrace provider.
14404 * Once we've registered, we can assert that dtrace_provider is our
14405 * pseudo provider.
14406 */
14407 (void) dtrace_register("dtrace", &dtrace_provider_attr,
14408 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id);
14409
14410 ASSERT(dtrace_provider != NULL);
14411 ASSERT((dtrace_provider_id_t)dtrace_provider == id);
14412
14413 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t)
14414 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL);
14415 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t)
14416 dtrace_provider, NULL, NULL, "END", 0, NULL);
14417 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t)
14418 dtrace_provider, NULL, NULL, "ERROR", 1, NULL);
14419
14420 dtrace_anon_property();
14421 mutex_exit(&cpu_lock);
14422
14423 /*
14424 * If DTrace helper tracing is enabled, we need to allocate the
14425 * trace buffer and initialize the values.
14426 */
14427 if (dtrace_helptrace_enabled) {
14428 ASSERT(dtrace_helptrace_buffer == NULL);
14429 dtrace_helptrace_buffer =
14430 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP);
14431 dtrace_helptrace_next = 0;
14432 }
14433
14434 /*
14435 * If there are already providers, we must ask them to provide their
14436 * probes, and then match any anonymous enabling against them. Note
14437 * that there should be no other retained enablings at this time:
14438 * the only retained enablings at this time should be the anonymous
14439 * enabling.
14440 */
14441 if (dtrace_anon.dta_enabling != NULL) {
14442 ASSERT(dtrace_retained == dtrace_anon.dta_enabling);
14443
14444 dtrace_enabling_provide(NULL);
14445 state = dtrace_anon.dta_state;
14446
14447 /*
14448 * We couldn't hold cpu_lock across the above call to
14449 * dtrace_enabling_provide(), but we must hold it to actually
14450 * enable the probes. We have to drop all of our locks, pick
14451 * up cpu_lock, and regain our locks before matching the
14452 * retained anonymous enabling.
14453 */
14454 mutex_exit(&dtrace_lock);
14455 mutex_exit(&dtrace_provider_lock);
14456
14457 mutex_enter(&cpu_lock);
14458 mutex_enter(&dtrace_provider_lock);
14459 mutex_enter(&dtrace_lock);
14460
14461 if ((enab = dtrace_anon.dta_enabling) != NULL)
14462 (void) dtrace_enabling_match(enab, NULL);
14463
14464 mutex_exit(&cpu_lock);
14465 }
14466
14467 mutex_exit(&dtrace_lock);
14468 mutex_exit(&dtrace_provider_lock);
14469
14470 if (state != NULL) {
14471 /*
14472 * If we created any anonymous state, set it going now.
14473 */
14474 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon);
14475 }
14476
14477 return (DDI_SUCCESS);
14478 }
14479
14480 /*ARGSUSED*/
14481 static int
dtrace_open(dev_t * devp,int flag,int otyp,cred_t * cred_p)14482 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
14483 {
14484 dtrace_state_t *state;
14485 uint32_t priv;
14486 uid_t uid;
14487 zoneid_t zoneid;
14488
14489 if (getminor(*devp) == DTRACEMNRN_HELPER)
14490 return (0);
14491
14492 /*
14493 * If this wasn't an open with the "helper" minor, then it must be
14494 * the "dtrace" minor.
14495 */
14496 if (getminor(*devp) != DTRACEMNRN_DTRACE)
14497 return (ENXIO);
14498
14499 /*
14500 * If no DTRACE_PRIV_* bits are set in the credential, then the
14501 * caller lacks sufficient permission to do anything with DTrace.
14502 */
14503 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid);
14504 if (priv == DTRACE_PRIV_NONE)
14505 return (EACCES);
14506
14507 /*
14508 * Ask all providers to provide all their probes.
14509 */
14510 mutex_enter(&dtrace_provider_lock);
14511 dtrace_probe_provide(NULL, NULL);
14512 mutex_exit(&dtrace_provider_lock);
14513
14514 mutex_enter(&cpu_lock);
14515 mutex_enter(&dtrace_lock);
14516 dtrace_opens++;
14517 dtrace_membar_producer();
14518
14519 /*
14520 * If the kernel debugger is active (that is, if the kernel debugger
14521 * modified text in some way), we won't allow the open.
14522 */
14523 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) {
14524 dtrace_opens--;
14525 mutex_exit(&cpu_lock);
14526 mutex_exit(&dtrace_lock);
14527 return (EBUSY);
14528 }
14529
14530 state = dtrace_state_create(devp, cred_p);
14531 mutex_exit(&cpu_lock);
14532
14533 if (state == NULL) {
14534 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
14535 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
14536 mutex_exit(&dtrace_lock);
14537 return (EAGAIN);
14538 }
14539
14540 mutex_exit(&dtrace_lock);
14541
14542 return (0);
14543 }
14544
14545 /*ARGSUSED*/
14546 static int
dtrace_close(dev_t dev,int flag,int otyp,cred_t * cred_p)14547 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
14548 {
14549 minor_t minor = getminor(dev);
14550 dtrace_state_t *state;
14551
14552 if (minor == DTRACEMNRN_HELPER)
14553 return (0);
14554
14555 state = ddi_get_soft_state(dtrace_softstate, minor);
14556
14557 mutex_enter(&cpu_lock);
14558 mutex_enter(&dtrace_lock);
14559
14560 if (state->dts_anon) {
14561 /*
14562 * There is anonymous state. Destroy that first.
14563 */
14564 ASSERT(dtrace_anon.dta_state == NULL);
14565 dtrace_state_destroy(state->dts_anon);
14566 }
14567
14568 dtrace_state_destroy(state);
14569 ASSERT(dtrace_opens > 0);
14570
14571 /*
14572 * Only relinquish control of the kernel debugger interface when there
14573 * are no consumers and no anonymous enablings.
14574 */
14575 if (--dtrace_opens == 0 && dtrace_anon.dta_enabling == NULL)
14576 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
14577
14578 mutex_exit(&dtrace_lock);
14579 mutex_exit(&cpu_lock);
14580
14581 return (0);
14582 }
14583
14584 /*ARGSUSED*/
14585 static int
dtrace_ioctl_helper(int cmd,intptr_t arg,int * rv)14586 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv)
14587 {
14588 int rval;
14589 dof_helper_t help, *dhp = NULL;
14590
14591 switch (cmd) {
14592 case DTRACEHIOC_ADDDOF:
14593 if (copyin((void *)arg, &help, sizeof (help)) != 0) {
14594 dtrace_dof_error(NULL, "failed to copyin DOF helper");
14595 return (EFAULT);
14596 }
14597
14598 dhp = &help;
14599 arg = (intptr_t)help.dofhp_dof;
14600 /*FALLTHROUGH*/
14601
14602 case DTRACEHIOC_ADD: {
14603 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval);
14604
14605 if (dof == NULL)
14606 return (rval);
14607
14608 mutex_enter(&dtrace_lock);
14609
14610 /*
14611 * dtrace_helper_slurp() takes responsibility for the dof --
14612 * it may free it now or it may save it and free it later.
14613 */
14614 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) {
14615 *rv = rval;
14616 rval = 0;
14617 } else {
14618 rval = EINVAL;
14619 }
14620
14621 mutex_exit(&dtrace_lock);
14622 return (rval);
14623 }
14624
14625 case DTRACEHIOC_REMOVE: {
14626 mutex_enter(&dtrace_lock);
14627 rval = dtrace_helper_destroygen(arg);
14628 mutex_exit(&dtrace_lock);
14629
14630 return (rval);
14631 }
14632
14633 default:
14634 break;
14635 }
14636
14637 return (ENOTTY);
14638 }
14639
14640 /*ARGSUSED*/
14641 static int
dtrace_ioctl(dev_t dev,int cmd,intptr_t arg,int md,cred_t * cr,int * rv)14642 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv)
14643 {
14644 minor_t minor = getminor(dev);
14645 dtrace_state_t *state;
14646 int rval;
14647
14648 if (minor == DTRACEMNRN_HELPER)
14649 return (dtrace_ioctl_helper(cmd, arg, rv));
14650
14651 state = ddi_get_soft_state(dtrace_softstate, minor);
14652
14653 if (state->dts_anon) {
14654 ASSERT(dtrace_anon.dta_state == NULL);
14655 state = state->dts_anon;
14656 }
14657
14658 switch (cmd) {
14659 case DTRACEIOC_PROVIDER: {
14660 dtrace_providerdesc_t pvd;
14661 dtrace_provider_t *pvp;
14662
14663 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0)
14664 return (EFAULT);
14665
14666 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0';
14667 mutex_enter(&dtrace_provider_lock);
14668
14669 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) {
14670 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0)
14671 break;
14672 }
14673
14674 mutex_exit(&dtrace_provider_lock);
14675
14676 if (pvp == NULL)
14677 return (ESRCH);
14678
14679 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t));
14680 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t));
14681 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0)
14682 return (EFAULT);
14683
14684 return (0);
14685 }
14686
14687 case DTRACEIOC_EPROBE: {
14688 dtrace_eprobedesc_t epdesc;
14689 dtrace_ecb_t *ecb;
14690 dtrace_action_t *act;
14691 void *buf;
14692 size_t size;
14693 uintptr_t dest;
14694 int nrecs;
14695
14696 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0)
14697 return (EFAULT);
14698
14699 mutex_enter(&dtrace_lock);
14700
14701 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) {
14702 mutex_exit(&dtrace_lock);
14703 return (EINVAL);
14704 }
14705
14706 if (ecb->dte_probe == NULL) {
14707 mutex_exit(&dtrace_lock);
14708 return (EINVAL);
14709 }
14710
14711 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id;
14712 epdesc.dtepd_uarg = ecb->dte_uarg;
14713 epdesc.dtepd_size = ecb->dte_size;
14714
14715 nrecs = epdesc.dtepd_nrecs;
14716 epdesc.dtepd_nrecs = 0;
14717 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
14718 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
14719 continue;
14720
14721 epdesc.dtepd_nrecs++;
14722 }
14723
14724 /*
14725 * Now that we have the size, we need to allocate a temporary
14726 * buffer in which to store the complete description. We need
14727 * the temporary buffer to be able to drop dtrace_lock()
14728 * across the copyout(), below.
14729 */
14730 size = sizeof (dtrace_eprobedesc_t) +
14731 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t));
14732
14733 buf = kmem_alloc(size, KM_SLEEP);
14734 dest = (uintptr_t)buf;
14735
14736 bcopy(&epdesc, (void *)dest, sizeof (epdesc));
14737 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]);
14738
14739 for (act = ecb->dte_action; act != NULL; act = act->dta_next) {
14740 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple)
14741 continue;
14742
14743 if (nrecs-- == 0)
14744 break;
14745
14746 bcopy(&act->dta_rec, (void *)dest,
14747 sizeof (dtrace_recdesc_t));
14748 dest += sizeof (dtrace_recdesc_t);
14749 }
14750
14751 mutex_exit(&dtrace_lock);
14752
14753 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
14754 kmem_free(buf, size);
14755 return (EFAULT);
14756 }
14757
14758 kmem_free(buf, size);
14759 return (0);
14760 }
14761
14762 case DTRACEIOC_AGGDESC: {
14763 dtrace_aggdesc_t aggdesc;
14764 dtrace_action_t *act;
14765 dtrace_aggregation_t *agg;
14766 int nrecs;
14767 uint32_t offs;
14768 dtrace_recdesc_t *lrec;
14769 void *buf;
14770 size_t size;
14771 uintptr_t dest;
14772
14773 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0)
14774 return (EFAULT);
14775
14776 mutex_enter(&dtrace_lock);
14777
14778 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) {
14779 mutex_exit(&dtrace_lock);
14780 return (EINVAL);
14781 }
14782
14783 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid;
14784
14785 nrecs = aggdesc.dtagd_nrecs;
14786 aggdesc.dtagd_nrecs = 0;
14787
14788 offs = agg->dtag_base;
14789 lrec = &agg->dtag_action.dta_rec;
14790 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs;
14791
14792 for (act = agg->dtag_first; ; act = act->dta_next) {
14793 ASSERT(act->dta_intuple ||
14794 DTRACEACT_ISAGG(act->dta_kind));
14795
14796 /*
14797 * If this action has a record size of zero, it
14798 * denotes an argument to the aggregating action.
14799 * Because the presence of this record doesn't (or
14800 * shouldn't) affect the way the data is interpreted,
14801 * we don't copy it out to save user-level the
14802 * confusion of dealing with a zero-length record.
14803 */
14804 if (act->dta_rec.dtrd_size == 0) {
14805 ASSERT(agg->dtag_hasarg);
14806 continue;
14807 }
14808
14809 aggdesc.dtagd_nrecs++;
14810
14811 if (act == &agg->dtag_action)
14812 break;
14813 }
14814
14815 /*
14816 * Now that we have the size, we need to allocate a temporary
14817 * buffer in which to store the complete description. We need
14818 * the temporary buffer to be able to drop dtrace_lock()
14819 * across the copyout(), below.
14820 */
14821 size = sizeof (dtrace_aggdesc_t) +
14822 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t));
14823
14824 buf = kmem_alloc(size, KM_SLEEP);
14825 dest = (uintptr_t)buf;
14826
14827 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc));
14828 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]);
14829
14830 for (act = agg->dtag_first; ; act = act->dta_next) {
14831 dtrace_recdesc_t rec = act->dta_rec;
14832
14833 /*
14834 * See the comment in the above loop for why we pass
14835 * over zero-length records.
14836 */
14837 if (rec.dtrd_size == 0) {
14838 ASSERT(agg->dtag_hasarg);
14839 continue;
14840 }
14841
14842 if (nrecs-- == 0)
14843 break;
14844
14845 rec.dtrd_offset -= offs;
14846 bcopy(&rec, (void *)dest, sizeof (rec));
14847 dest += sizeof (dtrace_recdesc_t);
14848
14849 if (act == &agg->dtag_action)
14850 break;
14851 }
14852
14853 mutex_exit(&dtrace_lock);
14854
14855 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) {
14856 kmem_free(buf, size);
14857 return (EFAULT);
14858 }
14859
14860 kmem_free(buf, size);
14861 return (0);
14862 }
14863
14864 case DTRACEIOC_ENABLE: {
14865 dof_hdr_t *dof;
14866 dtrace_enabling_t *enab = NULL;
14867 dtrace_vstate_t *vstate;
14868 int err = 0;
14869
14870 *rv = 0;
14871
14872 /*
14873 * If a NULL argument has been passed, we take this as our
14874 * cue to reevaluate our enablings.
14875 */
14876 if (arg == NULL) {
14877 dtrace_enabling_matchall();
14878
14879 return (0);
14880 }
14881
14882 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL)
14883 return (rval);
14884
14885 mutex_enter(&cpu_lock);
14886 mutex_enter(&dtrace_lock);
14887 vstate = &state->dts_vstate;
14888
14889 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) {
14890 mutex_exit(&dtrace_lock);
14891 mutex_exit(&cpu_lock);
14892 dtrace_dof_destroy(dof);
14893 return (EBUSY);
14894 }
14895
14896 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) {
14897 mutex_exit(&dtrace_lock);
14898 mutex_exit(&cpu_lock);
14899 dtrace_dof_destroy(dof);
14900 return (EINVAL);
14901 }
14902
14903 if ((rval = dtrace_dof_options(dof, state)) != 0) {
14904 dtrace_enabling_destroy(enab);
14905 mutex_exit(&dtrace_lock);
14906 mutex_exit(&cpu_lock);
14907 dtrace_dof_destroy(dof);
14908 return (rval);
14909 }
14910
14911 if ((err = dtrace_enabling_match(enab, rv)) == 0) {
14912 err = dtrace_enabling_retain(enab);
14913 } else {
14914 dtrace_enabling_destroy(enab);
14915 }
14916
14917 mutex_exit(&cpu_lock);
14918 mutex_exit(&dtrace_lock);
14919 dtrace_dof_destroy(dof);
14920
14921 return (err);
14922 }
14923
14924 case DTRACEIOC_REPLICATE: {
14925 dtrace_repldesc_t desc;
14926 dtrace_probedesc_t *match = &desc.dtrpd_match;
14927 dtrace_probedesc_t *create = &desc.dtrpd_create;
14928 int err;
14929
14930 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
14931 return (EFAULT);
14932
14933 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
14934 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
14935 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
14936 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
14937
14938 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
14939 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
14940 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
14941 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0';
14942
14943 mutex_enter(&dtrace_lock);
14944 err = dtrace_enabling_replicate(state, match, create);
14945 mutex_exit(&dtrace_lock);
14946
14947 return (err);
14948 }
14949
14950 case DTRACEIOC_PROBEMATCH:
14951 case DTRACEIOC_PROBES: {
14952 dtrace_probe_t *probe = NULL;
14953 dtrace_probedesc_t desc;
14954 dtrace_probekey_t pkey;
14955 dtrace_id_t i;
14956 int m = 0;
14957 uint32_t priv;
14958 uid_t uid;
14959 zoneid_t zoneid;
14960
14961 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
14962 return (EFAULT);
14963
14964 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0';
14965 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0';
14966 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0';
14967 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0';
14968
14969 /*
14970 * Before we attempt to match this probe, we want to give
14971 * all providers the opportunity to provide it.
14972 */
14973 if (desc.dtpd_id == DTRACE_IDNONE) {
14974 mutex_enter(&dtrace_provider_lock);
14975 dtrace_probe_provide(&desc, NULL);
14976 mutex_exit(&dtrace_provider_lock);
14977 desc.dtpd_id++;
14978 }
14979
14980 if (cmd == DTRACEIOC_PROBEMATCH) {
14981 dtrace_probekey(&desc, &pkey);
14982 pkey.dtpk_id = DTRACE_IDNONE;
14983 }
14984
14985 dtrace_cred2priv(cr, &priv, &uid, &zoneid);
14986
14987 mutex_enter(&dtrace_lock);
14988
14989 if (cmd == DTRACEIOC_PROBEMATCH) {
14990 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
14991 if ((probe = dtrace_probes[i - 1]) != NULL &&
14992 (m = dtrace_match_probe(probe, &pkey,
14993 priv, uid, zoneid)) != 0)
14994 break;
14995 }
14996
14997 if (m < 0) {
14998 mutex_exit(&dtrace_lock);
14999 return (EINVAL);
15000 }
15001
15002 } else {
15003 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) {
15004 if ((probe = dtrace_probes[i - 1]) != NULL &&
15005 dtrace_match_priv(probe, priv, uid, zoneid))
15006 break;
15007 }
15008 }
15009
15010 if (probe == NULL) {
15011 mutex_exit(&dtrace_lock);
15012 return (ESRCH);
15013 }
15014
15015 dtrace_probe_description(probe, &desc);
15016 mutex_exit(&dtrace_lock);
15017
15018 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15019 return (EFAULT);
15020
15021 return (0);
15022 }
15023
15024 case DTRACEIOC_PROBEARG: {
15025 dtrace_argdesc_t desc;
15026 dtrace_probe_t *probe;
15027 dtrace_provider_t *prov;
15028
15029 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15030 return (EFAULT);
15031
15032 if (desc.dtargd_id == DTRACE_IDNONE)
15033 return (EINVAL);
15034
15035 if (desc.dtargd_ndx == DTRACE_ARGNONE)
15036 return (EINVAL);
15037
15038 mutex_enter(&dtrace_provider_lock);
15039 mutex_enter(&mod_lock);
15040 mutex_enter(&dtrace_lock);
15041
15042 if (desc.dtargd_id > dtrace_nprobes) {
15043 mutex_exit(&dtrace_lock);
15044 mutex_exit(&mod_lock);
15045 mutex_exit(&dtrace_provider_lock);
15046 return (EINVAL);
15047 }
15048
15049 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) {
15050 mutex_exit(&dtrace_lock);
15051 mutex_exit(&mod_lock);
15052 mutex_exit(&dtrace_provider_lock);
15053 return (EINVAL);
15054 }
15055
15056 mutex_exit(&dtrace_lock);
15057
15058 prov = probe->dtpr_provider;
15059
15060 if (prov->dtpv_pops.dtps_getargdesc == NULL) {
15061 /*
15062 * There isn't any typed information for this probe.
15063 * Set the argument number to DTRACE_ARGNONE.
15064 */
15065 desc.dtargd_ndx = DTRACE_ARGNONE;
15066 } else {
15067 desc.dtargd_native[0] = '\0';
15068 desc.dtargd_xlate[0] = '\0';
15069 desc.dtargd_mapping = desc.dtargd_ndx;
15070
15071 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg,
15072 probe->dtpr_id, probe->dtpr_arg, &desc);
15073 }
15074
15075 mutex_exit(&mod_lock);
15076 mutex_exit(&dtrace_provider_lock);
15077
15078 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15079 return (EFAULT);
15080
15081 return (0);
15082 }
15083
15084 case DTRACEIOC_GO: {
15085 processorid_t cpuid;
15086 rval = dtrace_state_go(state, &cpuid);
15087
15088 if (rval != 0)
15089 return (rval);
15090
15091 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15092 return (EFAULT);
15093
15094 return (0);
15095 }
15096
15097 case DTRACEIOC_STOP: {
15098 processorid_t cpuid;
15099
15100 mutex_enter(&dtrace_lock);
15101 rval = dtrace_state_stop(state, &cpuid);
15102 mutex_exit(&dtrace_lock);
15103
15104 if (rval != 0)
15105 return (rval);
15106
15107 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0)
15108 return (EFAULT);
15109
15110 return (0);
15111 }
15112
15113 case DTRACEIOC_DOFGET: {
15114 dof_hdr_t hdr, *dof;
15115 uint64_t len;
15116
15117 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0)
15118 return (EFAULT);
15119
15120 mutex_enter(&dtrace_lock);
15121 dof = dtrace_dof_create(state);
15122 mutex_exit(&dtrace_lock);
15123
15124 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz);
15125 rval = copyout(dof, (void *)arg, len);
15126 dtrace_dof_destroy(dof);
15127
15128 return (rval == 0 ? 0 : EFAULT);
15129 }
15130
15131 case DTRACEIOC_AGGSNAP:
15132 case DTRACEIOC_BUFSNAP: {
15133 dtrace_bufdesc_t desc;
15134 caddr_t cached;
15135 dtrace_buffer_t *buf;
15136
15137 if (copyin((void *)arg, &desc, sizeof (desc)) != 0)
15138 return (EFAULT);
15139
15140 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU)
15141 return (EINVAL);
15142
15143 mutex_enter(&dtrace_lock);
15144
15145 if (cmd == DTRACEIOC_BUFSNAP) {
15146 buf = &state->dts_buffer[desc.dtbd_cpu];
15147 } else {
15148 buf = &state->dts_aggbuffer[desc.dtbd_cpu];
15149 }
15150
15151 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) {
15152 size_t sz = buf->dtb_offset;
15153
15154 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) {
15155 mutex_exit(&dtrace_lock);
15156 return (EBUSY);
15157 }
15158
15159 /*
15160 * If this buffer has already been consumed, we're
15161 * going to indicate that there's nothing left here
15162 * to consume.
15163 */
15164 if (buf->dtb_flags & DTRACEBUF_CONSUMED) {
15165 mutex_exit(&dtrace_lock);
15166
15167 desc.dtbd_size = 0;
15168 desc.dtbd_drops = 0;
15169 desc.dtbd_errors = 0;
15170 desc.dtbd_oldest = 0;
15171 sz = sizeof (desc);
15172
15173 if (copyout(&desc, (void *)arg, sz) != 0)
15174 return (EFAULT);
15175
15176 return (0);
15177 }
15178
15179 /*
15180 * If this is a ring buffer that has wrapped, we want
15181 * to copy the whole thing out.
15182 */
15183 if (buf->dtb_flags & DTRACEBUF_WRAPPED) {
15184 dtrace_buffer_polish(buf);
15185 sz = buf->dtb_size;
15186 }
15187
15188 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) {
15189 mutex_exit(&dtrace_lock);
15190 return (EFAULT);
15191 }
15192
15193 desc.dtbd_size = sz;
15194 desc.dtbd_drops = buf->dtb_drops;
15195 desc.dtbd_errors = buf->dtb_errors;
15196 desc.dtbd_oldest = buf->dtb_xamot_offset;
15197
15198 mutex_exit(&dtrace_lock);
15199
15200 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15201 return (EFAULT);
15202
15203 buf->dtb_flags |= DTRACEBUF_CONSUMED;
15204
15205 return (0);
15206 }
15207
15208 if (buf->dtb_tomax == NULL) {
15209 ASSERT(buf->dtb_xamot == NULL);
15210 mutex_exit(&dtrace_lock);
15211 return (ENOENT);
15212 }
15213
15214 cached = buf->dtb_tomax;
15215 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH));
15216
15217 dtrace_xcall(desc.dtbd_cpu,
15218 (dtrace_xcall_t)dtrace_buffer_switch, buf);
15219
15220 state->dts_errors += buf->dtb_xamot_errors;
15221
15222 /*
15223 * If the buffers did not actually switch, then the cross call
15224 * did not take place -- presumably because the given CPU is
15225 * not in the ready set. If this is the case, we'll return
15226 * ENOENT.
15227 */
15228 if (buf->dtb_tomax == cached) {
15229 ASSERT(buf->dtb_xamot != cached);
15230 mutex_exit(&dtrace_lock);
15231 return (ENOENT);
15232 }
15233
15234 ASSERT(cached == buf->dtb_xamot);
15235
15236 /*
15237 * We have our snapshot; now copy it out.
15238 */
15239 if (copyout(buf->dtb_xamot, desc.dtbd_data,
15240 buf->dtb_xamot_offset) != 0) {
15241 mutex_exit(&dtrace_lock);
15242 return (EFAULT);
15243 }
15244
15245 desc.dtbd_size = buf->dtb_xamot_offset;
15246 desc.dtbd_drops = buf->dtb_xamot_drops;
15247 desc.dtbd_errors = buf->dtb_xamot_errors;
15248 desc.dtbd_oldest = 0;
15249
15250 mutex_exit(&dtrace_lock);
15251
15252 /*
15253 * Finally, copy out the buffer description.
15254 */
15255 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0)
15256 return (EFAULT);
15257
15258 return (0);
15259 }
15260
15261 case DTRACEIOC_CONF: {
15262 dtrace_conf_t conf;
15263
15264 bzero(&conf, sizeof (conf));
15265 conf.dtc_difversion = DIF_VERSION;
15266 conf.dtc_difintregs = DIF_DIR_NREGS;
15267 conf.dtc_diftupregs = DIF_DTR_NREGS;
15268 conf.dtc_ctfmodel = CTF_MODEL_NATIVE;
15269
15270 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0)
15271 return (EFAULT);
15272
15273 return (0);
15274 }
15275
15276 case DTRACEIOC_STATUS: {
15277 dtrace_status_t stat;
15278 dtrace_dstate_t *dstate;
15279 int i, j;
15280 uint64_t nerrs;
15281
15282 /*
15283 * See the comment in dtrace_state_deadman() for the reason
15284 * for setting dts_laststatus to INT64_MAX before setting
15285 * it to the correct value.
15286 */
15287 state->dts_laststatus = INT64_MAX;
15288 dtrace_membar_producer();
15289 state->dts_laststatus = dtrace_gethrtime();
15290
15291 bzero(&stat, sizeof (stat));
15292
15293 mutex_enter(&dtrace_lock);
15294
15295 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) {
15296 mutex_exit(&dtrace_lock);
15297 return (ENOENT);
15298 }
15299
15300 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING)
15301 stat.dtst_exiting = 1;
15302
15303 nerrs = state->dts_errors;
15304 dstate = &state->dts_vstate.dtvs_dynvars;
15305
15306 for (i = 0; i < NCPU; i++) {
15307 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i];
15308
15309 stat.dtst_dyndrops += dcpu->dtdsc_drops;
15310 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops;
15311 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops;
15312
15313 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL)
15314 stat.dtst_filled++;
15315
15316 nerrs += state->dts_buffer[i].dtb_errors;
15317
15318 for (j = 0; j < state->dts_nspeculations; j++) {
15319 dtrace_speculation_t *spec;
15320 dtrace_buffer_t *buf;
15321
15322 spec = &state->dts_speculations[j];
15323 buf = &spec->dtsp_buffer[i];
15324 stat.dtst_specdrops += buf->dtb_xamot_drops;
15325 }
15326 }
15327
15328 stat.dtst_specdrops_busy = state->dts_speculations_busy;
15329 stat.dtst_specdrops_unavail = state->dts_speculations_unavail;
15330 stat.dtst_stkstroverflows = state->dts_stkstroverflows;
15331 stat.dtst_dblerrors = state->dts_dblerrors;
15332 stat.dtst_killed =
15333 (state->dts_activity == DTRACE_ACTIVITY_KILLED);
15334 stat.dtst_errors = nerrs;
15335
15336 mutex_exit(&dtrace_lock);
15337
15338 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0)
15339 return (EFAULT);
15340
15341 return (0);
15342 }
15343
15344 case DTRACEIOC_FORMAT: {
15345 dtrace_fmtdesc_t fmt;
15346 char *str;
15347 int len;
15348
15349 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0)
15350 return (EFAULT);
15351
15352 mutex_enter(&dtrace_lock);
15353
15354 if (fmt.dtfd_format == 0 ||
15355 fmt.dtfd_format > state->dts_nformats) {
15356 mutex_exit(&dtrace_lock);
15357 return (EINVAL);
15358 }
15359
15360 /*
15361 * Format strings are allocated contiguously and they are
15362 * never freed; if a format index is less than the number
15363 * of formats, we can assert that the format map is non-NULL
15364 * and that the format for the specified index is non-NULL.
15365 */
15366 ASSERT(state->dts_formats != NULL);
15367 str = state->dts_formats[fmt.dtfd_format - 1];
15368 ASSERT(str != NULL);
15369
15370 len = strlen(str) + 1;
15371
15372 if (len > fmt.dtfd_length) {
15373 fmt.dtfd_length = len;
15374
15375 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) {
15376 mutex_exit(&dtrace_lock);
15377 return (EINVAL);
15378 }
15379 } else {
15380 if (copyout(str, fmt.dtfd_string, len) != 0) {
15381 mutex_exit(&dtrace_lock);
15382 return (EINVAL);
15383 }
15384 }
15385
15386 mutex_exit(&dtrace_lock);
15387 return (0);
15388 }
15389
15390 default:
15391 break;
15392 }
15393
15394 return (ENOTTY);
15395 }
15396
15397 /*ARGSUSED*/
15398 static int
dtrace_detach(dev_info_t * dip,ddi_detach_cmd_t cmd)15399 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
15400 {
15401 dtrace_state_t *state;
15402
15403 switch (cmd) {
15404 case DDI_DETACH:
15405 break;
15406
15407 case DDI_SUSPEND:
15408 return (DDI_SUCCESS);
15409
15410 default:
15411 return (DDI_FAILURE);
15412 }
15413
15414 mutex_enter(&cpu_lock);
15415 mutex_enter(&dtrace_provider_lock);
15416 mutex_enter(&dtrace_lock);
15417
15418 ASSERT(dtrace_opens == 0);
15419
15420 if (dtrace_helpers > 0) {
15421 mutex_exit(&dtrace_provider_lock);
15422 mutex_exit(&dtrace_lock);
15423 mutex_exit(&cpu_lock);
15424 return (DDI_FAILURE);
15425 }
15426
15427 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) {
15428 mutex_exit(&dtrace_provider_lock);
15429 mutex_exit(&dtrace_lock);
15430 mutex_exit(&cpu_lock);
15431 return (DDI_FAILURE);
15432 }
15433
15434 dtrace_provider = NULL;
15435
15436 if ((state = dtrace_anon_grab()) != NULL) {
15437 /*
15438 * If there were ECBs on this state, the provider should
15439 * have not been allowed to detach; assert that there is
15440 * none.
15441 */
15442 ASSERT(state->dts_necbs == 0);
15443 dtrace_state_destroy(state);
15444
15445 /*
15446 * If we're being detached with anonymous state, we need to
15447 * indicate to the kernel debugger that DTrace is now inactive.
15448 */
15449 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE);
15450 }
15451
15452 bzero(&dtrace_anon, sizeof (dtrace_anon_t));
15453 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL);
15454 dtrace_cpu_init = NULL;
15455 dtrace_helpers_cleanup = NULL;
15456 dtrace_helpers_fork = NULL;
15457 dtrace_cpustart_init = NULL;
15458 dtrace_cpustart_fini = NULL;
15459 dtrace_debugger_init = NULL;
15460 dtrace_debugger_fini = NULL;
15461 dtrace_modload = NULL;
15462 dtrace_modunload = NULL;
15463
15464 mutex_exit(&cpu_lock);
15465
15466 if (dtrace_helptrace_enabled) {
15467 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize);
15468 dtrace_helptrace_buffer = NULL;
15469 }
15470
15471 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *));
15472 dtrace_probes = NULL;
15473 dtrace_nprobes = 0;
15474
15475 dtrace_hash_destroy(dtrace_bymod);
15476 dtrace_hash_destroy(dtrace_byfunc);
15477 dtrace_hash_destroy(dtrace_byname);
15478 dtrace_bymod = NULL;
15479 dtrace_byfunc = NULL;
15480 dtrace_byname = NULL;
15481
15482 kmem_cache_destroy(dtrace_state_cache);
15483 vmem_destroy(dtrace_minor);
15484 vmem_destroy(dtrace_arena);
15485
15486 if (dtrace_toxrange != NULL) {
15487 kmem_free(dtrace_toxrange,
15488 dtrace_toxranges_max * sizeof (dtrace_toxrange_t));
15489 dtrace_toxrange = NULL;
15490 dtrace_toxranges = 0;
15491 dtrace_toxranges_max = 0;
15492 }
15493
15494 ddi_remove_minor_node(dtrace_devi, NULL);
15495 dtrace_devi = NULL;
15496
15497 ddi_soft_state_fini(&dtrace_softstate);
15498
15499 ASSERT(dtrace_vtime_references == 0);
15500 ASSERT(dtrace_opens == 0);
15501 ASSERT(dtrace_retained == NULL);
15502
15503 mutex_exit(&dtrace_lock);
15504 mutex_exit(&dtrace_provider_lock);
15505
15506 /*
15507 * We don't destroy the task queue until after we have dropped our
15508 * locks (taskq_destroy() may block on running tasks). To prevent
15509 * attempting to do work after we have effectively detached but before
15510 * the task queue has been destroyed, all tasks dispatched via the
15511 * task queue must check that DTrace is still attached before
15512 * performing any operation.
15513 */
15514 taskq_destroy(dtrace_taskq);
15515 dtrace_taskq = NULL;
15516
15517 return (DDI_SUCCESS);
15518 }
15519
15520 /*ARGSUSED*/
15521 static int
dtrace_info(dev_info_t * dip,ddi_info_cmd_t infocmd,void * arg,void ** result)15522 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
15523 {
15524 int error;
15525
15526 switch (infocmd) {
15527 case DDI_INFO_DEVT2DEVINFO:
15528 *result = (void *)dtrace_devi;
15529 error = DDI_SUCCESS;
15530 break;
15531 case DDI_INFO_DEVT2INSTANCE:
15532 *result = (void *)0;
15533 error = DDI_SUCCESS;
15534 break;
15535 default:
15536 error = DDI_FAILURE;
15537 }
15538 return (error);
15539 }
15540
15541 static struct cb_ops dtrace_cb_ops = {
15542 dtrace_open, /* open */
15543 dtrace_close, /* close */
15544 nulldev, /* strategy */
15545 nulldev, /* print */
15546 nodev, /* dump */
15547 nodev, /* read */
15548 nodev, /* write */
15549 dtrace_ioctl, /* ioctl */
15550 nodev, /* devmap */
15551 nodev, /* mmap */
15552 nodev, /* segmap */
15553 nochpoll, /* poll */
15554 ddi_prop_op, /* cb_prop_op */
15555 0, /* streamtab */
15556 D_NEW | D_MP /* Driver compatibility flag */
15557 };
15558
15559 static struct dev_ops dtrace_ops = {
15560 DEVO_REV, /* devo_rev */
15561 0, /* refcnt */
15562 dtrace_info, /* get_dev_info */
15563 nulldev, /* identify */
15564 nulldev, /* probe */
15565 dtrace_attach, /* attach */
15566 dtrace_detach, /* detach */
15567 nodev, /* reset */
15568 &dtrace_cb_ops, /* driver operations */
15569 NULL, /* bus operations */
15570 nodev, /* dev power */
15571 ddi_quiesce_not_needed, /* quiesce */
15572 };
15573
15574 static struct modldrv modldrv = {
15575 &mod_driverops, /* module type (this is a pseudo driver) */
15576 "Dynamic Tracing", /* name of module */
15577 &dtrace_ops, /* driver ops */
15578 };
15579
15580 static struct modlinkage modlinkage = {
15581 MODREV_1,
15582 (void *)&modldrv,
15583 NULL
15584 };
15585
15586 int
_init(void)15587 _init(void)
15588 {
15589 return (mod_install(&modlinkage));
15590 }
15591
15592 int
_info(struct modinfo * modinfop)15593 _info(struct modinfo *modinfop)
15594 {
15595 return (mod_info(&modlinkage, modinfop));
15596 }
15597
15598 int
_fini(void)15599 _fini(void)
15600 {
15601 return (mod_remove(&modlinkage));
15602 }
15603