1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 * 21 * $FreeBSD$ 22 */ 23 24 /* 25 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 26 * Copyright (c) 2012 by Delphix. All rights reserved 27 * Use is subject to license terms. 28 */ 29 30 #pragma ident "%Z%%M% %I% %E% SMI" 31 32 /* 33 * DTrace - Dynamic Tracing for Solaris 34 * 35 * This is the implementation of the Solaris Dynamic Tracing framework 36 * (DTrace). The user-visible interface to DTrace is described at length in 37 * the "Solaris Dynamic Tracing Guide". The interfaces between the libdtrace 38 * library, the in-kernel DTrace framework, and the DTrace providers are 39 * described in the block comments in the <sys/dtrace.h> header file. The 40 * internal architecture of DTrace is described in the block comments in the 41 * <sys/dtrace_impl.h> header file. The comments contained within the DTrace 42 * implementation very much assume mastery of all of these sources; if one has 43 * an unanswered question about the implementation, one should consult them 44 * first. 45 * 46 * The functions here are ordered roughly as follows: 47 * 48 * - Probe context functions 49 * - Probe hashing functions 50 * - Non-probe context utility functions 51 * - Matching functions 52 * - Provider-to-Framework API functions 53 * - Probe management functions 54 * - DIF object functions 55 * - Format functions 56 * - Predicate functions 57 * - ECB functions 58 * - Buffer functions 59 * - Enabling functions 60 * - DOF functions 61 * - Anonymous enabling functions 62 * - Consumer state functions 63 * - Helper functions 64 * - Hook functions 65 * - Driver cookbook functions 66 * 67 * Each group of functions begins with a block comment labelled the "DTrace 68 * [Group] Functions", allowing one to find each block by searching forward 69 * on capital-f functions. 70 */ 71 #include <sys/errno.h> 72 #if !defined(sun) 73 #include <sys/time.h> 74 #endif 75 #include <sys/stat.h> 76 #include <sys/modctl.h> 77 #include <sys/conf.h> 78 #include <sys/systm.h> 79 #if defined(sun) 80 #include <sys/ddi.h> 81 #include <sys/sunddi.h> 82 #endif 83 #include <sys/cpuvar.h> 84 #include <sys/kmem.h> 85 #if defined(sun) 86 #include <sys/strsubr.h> 87 #endif 88 #include <sys/sysmacros.h> 89 #include <sys/dtrace_impl.h> 90 #include <sys/atomic.h> 91 #include <sys/cmn_err.h> 92 #if defined(sun) 93 #include <sys/mutex_impl.h> 94 #include <sys/rwlock_impl.h> 95 #endif 96 #include <sys/ctf_api.h> 97 #if defined(sun) 98 #include <sys/panic.h> 99 #include <sys/priv_impl.h> 100 #endif 101 #include <sys/policy.h> 102 #if defined(sun) 103 #include <sys/cred_impl.h> 104 #include <sys/procfs_isa.h> 105 #endif 106 #include <sys/taskq.h> 107 #if defined(sun) 108 #include <sys/mkdev.h> 109 #include <sys/kdi.h> 110 #endif 111 #include <sys/zone.h> 112 #include <sys/socket.h> 113 #include <netinet/in.h> 114 115 /* FreeBSD includes: */ 116 #if !defined(sun) 117 #include <sys/callout.h> 118 #include <sys/ctype.h> 119 #include <sys/limits.h> 120 #include <sys/kdb.h> 121 #include <sys/kernel.h> 122 #include <sys/malloc.h> 123 #include <sys/sysctl.h> 124 #include <sys/lock.h> 125 #include <sys/mutex.h> 126 #include <sys/rwlock.h> 127 #include <sys/sx.h> 128 #include <sys/dtrace_bsd.h> 129 #include <netinet/in.h> 130 #include "dtrace_cddl.h" 131 #include "dtrace_debug.c" 132 #endif 133 134 /* 135 * DTrace Tunable Variables 136 * 137 * The following variables may be tuned by adding a line to /etc/system that 138 * includes both the name of the DTrace module ("dtrace") and the name of the 139 * variable. For example: 140 * 141 * set dtrace:dtrace_destructive_disallow = 1 142 * 143 * In general, the only variables that one should be tuning this way are those 144 * that affect system-wide DTrace behavior, and for which the default behavior 145 * is undesirable. Most of these variables are tunable on a per-consumer 146 * basis using DTrace options, and need not be tuned on a system-wide basis. 147 * When tuning these variables, avoid pathological values; while some attempt 148 * is made to verify the integrity of these variables, they are not considered 149 * part of the supported interface to DTrace, and they are therefore not 150 * checked comprehensively. Further, these variables should not be tuned 151 * dynamically via "mdb -kw" or other means; they should only be tuned via 152 * /etc/system. 153 */ 154 int dtrace_destructive_disallow = 0; 155 dtrace_optval_t dtrace_nonroot_maxsize = (16 * 1024 * 1024); 156 size_t dtrace_difo_maxsize = (256 * 1024); 157 dtrace_optval_t dtrace_dof_maxsize = (256 * 1024); 158 size_t dtrace_global_maxsize = (16 * 1024); 159 size_t dtrace_actions_max = (16 * 1024); 160 size_t dtrace_retain_max = 1024; 161 dtrace_optval_t dtrace_helper_actions_max = 128; 162 dtrace_optval_t dtrace_helper_providers_max = 32; 163 dtrace_optval_t dtrace_dstate_defsize = (1 * 1024 * 1024); 164 size_t dtrace_strsize_default = 256; 165 dtrace_optval_t dtrace_cleanrate_default = 9900990; /* 101 hz */ 166 dtrace_optval_t dtrace_cleanrate_min = 200000; /* 5000 hz */ 167 dtrace_optval_t dtrace_cleanrate_max = (uint64_t)60 * NANOSEC; /* 1/minute */ 168 dtrace_optval_t dtrace_aggrate_default = NANOSEC; /* 1 hz */ 169 dtrace_optval_t dtrace_statusrate_default = NANOSEC; /* 1 hz */ 170 dtrace_optval_t dtrace_statusrate_max = (hrtime_t)10 * NANOSEC; /* 6/minute */ 171 dtrace_optval_t dtrace_switchrate_default = NANOSEC; /* 1 hz */ 172 dtrace_optval_t dtrace_nspec_default = 1; 173 dtrace_optval_t dtrace_specsize_default = 32 * 1024; 174 dtrace_optval_t dtrace_stackframes_default = 20; 175 dtrace_optval_t dtrace_ustackframes_default = 20; 176 dtrace_optval_t dtrace_jstackframes_default = 50; 177 dtrace_optval_t dtrace_jstackstrsize_default = 512; 178 int dtrace_msgdsize_max = 128; 179 hrtime_t dtrace_chill_max = 500 * (NANOSEC / MILLISEC); /* 500 ms */ 180 hrtime_t dtrace_chill_interval = NANOSEC; /* 1000 ms */ 181 int dtrace_devdepth_max = 32; 182 int dtrace_err_verbose; 183 hrtime_t dtrace_deadman_interval = NANOSEC; 184 hrtime_t dtrace_deadman_timeout = (hrtime_t)10 * NANOSEC; 185 hrtime_t dtrace_deadman_user = (hrtime_t)30 * NANOSEC; 186 hrtime_t dtrace_unregister_defunct_reap = (hrtime_t)60 * NANOSEC; 187 188 /* 189 * DTrace External Variables 190 * 191 * As dtrace(7D) is a kernel module, any DTrace variables are obviously 192 * available to DTrace consumers via the backtick (`) syntax. One of these, 193 * dtrace_zero, is made deliberately so: it is provided as a source of 194 * well-known, zero-filled memory. While this variable is not documented, 195 * it is used by some translators as an implementation detail. 196 */ 197 const char dtrace_zero[256] = { 0 }; /* zero-filled memory */ 198 199 /* 200 * DTrace Internal Variables 201 */ 202 #if defined(sun) 203 static dev_info_t *dtrace_devi; /* device info */ 204 #endif 205 #if defined(sun) 206 static vmem_t *dtrace_arena; /* probe ID arena */ 207 static vmem_t *dtrace_minor; /* minor number arena */ 208 #else 209 static taskq_t *dtrace_taskq; /* task queue */ 210 static struct unrhdr *dtrace_arena; /* Probe ID number. */ 211 #endif 212 static dtrace_probe_t **dtrace_probes; /* array of all probes */ 213 static int dtrace_nprobes; /* number of probes */ 214 static dtrace_provider_t *dtrace_provider; /* provider list */ 215 static dtrace_meta_t *dtrace_meta_pid; /* user-land meta provider */ 216 static int dtrace_opens; /* number of opens */ 217 static int dtrace_helpers; /* number of helpers */ 218 #if defined(sun) 219 static void *dtrace_softstate; /* softstate pointer */ 220 #endif 221 static dtrace_hash_t *dtrace_bymod; /* probes hashed by module */ 222 static dtrace_hash_t *dtrace_byfunc; /* probes hashed by function */ 223 static dtrace_hash_t *dtrace_byname; /* probes hashed by name */ 224 static dtrace_toxrange_t *dtrace_toxrange; /* toxic range array */ 225 static int dtrace_toxranges; /* number of toxic ranges */ 226 static int dtrace_toxranges_max; /* size of toxic range array */ 227 static dtrace_anon_t dtrace_anon; /* anonymous enabling */ 228 static kmem_cache_t *dtrace_state_cache; /* cache for dynamic state */ 229 static uint64_t dtrace_vtime_references; /* number of vtimestamp refs */ 230 static kthread_t *dtrace_panicked; /* panicking thread */ 231 static dtrace_ecb_t *dtrace_ecb_create_cache; /* cached created ECB */ 232 static dtrace_genid_t dtrace_probegen; /* current probe generation */ 233 static dtrace_helpers_t *dtrace_deferred_pid; /* deferred helper list */ 234 static dtrace_enabling_t *dtrace_retained; /* list of retained enablings */ 235 static dtrace_dynvar_t dtrace_dynhash_sink; /* end of dynamic hash chains */ 236 #if !defined(sun) 237 static struct mtx dtrace_unr_mtx; 238 MTX_SYSINIT(dtrace_unr_mtx, &dtrace_unr_mtx, "Unique resource identifier", MTX_DEF); 239 int dtrace_in_probe; /* non-zero if executing a probe */ 240 #if defined(__i386__) || defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 241 uintptr_t dtrace_in_probe_addr; /* Address of invop when already in probe */ 242 #endif 243 #endif 244 245 /* 246 * DTrace Locking 247 * DTrace is protected by three (relatively coarse-grained) locks: 248 * 249 * (1) dtrace_lock is required to manipulate essentially any DTrace state, 250 * including enabling state, probes, ECBs, consumer state, helper state, 251 * etc. Importantly, dtrace_lock is _not_ required when in probe context; 252 * probe context is lock-free -- synchronization is handled via the 253 * dtrace_sync() cross call mechanism. 254 * 255 * (2) dtrace_provider_lock is required when manipulating provider state, or 256 * when provider state must be held constant. 257 * 258 * (3) dtrace_meta_lock is required when manipulating meta provider state, or 259 * when meta provider state must be held constant. 260 * 261 * The lock ordering between these three locks is dtrace_meta_lock before 262 * dtrace_provider_lock before dtrace_lock. (In particular, there are 263 * several places where dtrace_provider_lock is held by the framework as it 264 * calls into the providers -- which then call back into the framework, 265 * grabbing dtrace_lock.) 266 * 267 * There are two other locks in the mix: mod_lock and cpu_lock. With respect 268 * to dtrace_provider_lock and dtrace_lock, cpu_lock continues its historical 269 * role as a coarse-grained lock; it is acquired before both of these locks. 270 * With respect to dtrace_meta_lock, its behavior is stranger: cpu_lock must 271 * be acquired _between_ dtrace_meta_lock and any other DTrace locks. 272 * mod_lock is similar with respect to dtrace_provider_lock in that it must be 273 * acquired _between_ dtrace_provider_lock and dtrace_lock. 274 */ 275 static kmutex_t dtrace_lock; /* probe state lock */ 276 static kmutex_t dtrace_provider_lock; /* provider state lock */ 277 static kmutex_t dtrace_meta_lock; /* meta-provider state lock */ 278 279 #if !defined(sun) 280 /* XXX FreeBSD hacks. */ 281 #define cr_suid cr_svuid 282 #define cr_sgid cr_svgid 283 #define ipaddr_t in_addr_t 284 #define mod_modname pathname 285 #define vuprintf vprintf 286 #define ttoproc(_a) ((_a)->td_proc) 287 #define crgetzoneid(_a) 0 288 #define NCPU MAXCPU 289 #define SNOCD 0 290 #define CPU_ON_INTR(_a) 0 291 292 #define PRIV_EFFECTIVE (1 << 0) 293 #define PRIV_DTRACE_KERNEL (1 << 1) 294 #define PRIV_DTRACE_PROC (1 << 2) 295 #define PRIV_DTRACE_USER (1 << 3) 296 #define PRIV_PROC_OWNER (1 << 4) 297 #define PRIV_PROC_ZONE (1 << 5) 298 #define PRIV_ALL ~0 299 300 SYSCTL_NODE(_debug, OID_AUTO, dtrace, CTLFLAG_RD, 0, "DTrace Information"); 301 #endif 302 303 #if defined(sun) 304 #define curcpu CPU->cpu_id 305 #endif 306 307 308 /* 309 * DTrace Provider Variables 310 * 311 * These are the variables relating to DTrace as a provider (that is, the 312 * provider of the BEGIN, END, and ERROR probes). 313 */ 314 static dtrace_pattr_t dtrace_provider_attr = { 315 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 316 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 317 { DTRACE_STABILITY_PRIVATE, DTRACE_STABILITY_PRIVATE, DTRACE_CLASS_UNKNOWN }, 318 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 319 { DTRACE_STABILITY_STABLE, DTRACE_STABILITY_STABLE, DTRACE_CLASS_COMMON }, 320 }; 321 322 static void 323 dtrace_nullop(void) 324 {} 325 326 static dtrace_pops_t dtrace_provider_ops = { 327 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop, 328 (void (*)(void *, modctl_t *))dtrace_nullop, 329 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 330 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 331 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 332 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop, 333 NULL, 334 NULL, 335 NULL, 336 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop 337 }; 338 339 static dtrace_id_t dtrace_probeid_begin; /* special BEGIN probe */ 340 static dtrace_id_t dtrace_probeid_end; /* special END probe */ 341 dtrace_id_t dtrace_probeid_error; /* special ERROR probe */ 342 343 /* 344 * DTrace Helper Tracing Variables 345 */ 346 uint32_t dtrace_helptrace_next = 0; 347 uint32_t dtrace_helptrace_nlocals; 348 char *dtrace_helptrace_buffer; 349 int dtrace_helptrace_bufsize = 512 * 1024; 350 351 #ifdef DEBUG 352 int dtrace_helptrace_enabled = 1; 353 #else 354 int dtrace_helptrace_enabled = 0; 355 #endif 356 357 /* 358 * DTrace Error Hashing 359 * 360 * On DEBUG kernels, DTrace will track the errors that has seen in a hash 361 * table. This is very useful for checking coverage of tests that are 362 * expected to induce DIF or DOF processing errors, and may be useful for 363 * debugging problems in the DIF code generator or in DOF generation . The 364 * error hash may be examined with the ::dtrace_errhash MDB dcmd. 365 */ 366 #ifdef DEBUG 367 static dtrace_errhash_t dtrace_errhash[DTRACE_ERRHASHSZ]; 368 static const char *dtrace_errlast; 369 static kthread_t *dtrace_errthread; 370 static kmutex_t dtrace_errlock; 371 #endif 372 373 /* 374 * DTrace Macros and Constants 375 * 376 * These are various macros that are useful in various spots in the 377 * implementation, along with a few random constants that have no meaning 378 * outside of the implementation. There is no real structure to this cpp 379 * mishmash -- but is there ever? 380 */ 381 #define DTRACE_HASHSTR(hash, probe) \ 382 dtrace_hash_str(*((char **)((uintptr_t)(probe) + (hash)->dth_stroffs))) 383 384 #define DTRACE_HASHNEXT(hash, probe) \ 385 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_nextoffs) 386 387 #define DTRACE_HASHPREV(hash, probe) \ 388 (dtrace_probe_t **)((uintptr_t)(probe) + (hash)->dth_prevoffs) 389 390 #define DTRACE_HASHEQ(hash, lhs, rhs) \ 391 (strcmp(*((char **)((uintptr_t)(lhs) + (hash)->dth_stroffs)), \ 392 *((char **)((uintptr_t)(rhs) + (hash)->dth_stroffs))) == 0) 393 394 #define DTRACE_AGGHASHSIZE_SLEW 17 395 396 #define DTRACE_V4MAPPED_OFFSET (sizeof (uint32_t) * 3) 397 398 /* 399 * The key for a thread-local variable consists of the lower 61 bits of the 400 * t_did, plus the 3 bits of the highest active interrupt above LOCK_LEVEL. 401 * We add DIF_VARIABLE_MAX to t_did to assure that the thread key is never 402 * equal to a variable identifier. This is necessary (but not sufficient) to 403 * assure that global associative arrays never collide with thread-local 404 * variables. To guarantee that they cannot collide, we must also define the 405 * order for keying dynamic variables. That order is: 406 * 407 * [ key0 ] ... [ keyn ] [ variable-key ] [ tls-key ] 408 * 409 * Because the variable-key and the tls-key are in orthogonal spaces, there is 410 * no way for a global variable key signature to match a thread-local key 411 * signature. 412 */ 413 #if defined(sun) 414 #define DTRACE_TLS_THRKEY(where) { \ 415 uint_t intr = 0; \ 416 uint_t actv = CPU->cpu_intr_actv >> (LOCK_LEVEL + 1); \ 417 for (; actv; actv >>= 1) \ 418 intr++; \ 419 ASSERT(intr < (1 << 3)); \ 420 (where) = ((curthread->t_did + DIF_VARIABLE_MAX) & \ 421 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 422 } 423 #else 424 #define DTRACE_TLS_THRKEY(where) { \ 425 solaris_cpu_t *_c = &solaris_cpu[curcpu]; \ 426 uint_t intr = 0; \ 427 uint_t actv = _c->cpu_intr_actv; \ 428 for (; actv; actv >>= 1) \ 429 intr++; \ 430 ASSERT(intr < (1 << 3)); \ 431 (where) = ((curthread->td_tid + DIF_VARIABLE_MAX) & \ 432 (((uint64_t)1 << 61) - 1)) | ((uint64_t)intr << 61); \ 433 } 434 #endif 435 436 #define DT_BSWAP_8(x) ((x) & 0xff) 437 #define DT_BSWAP_16(x) ((DT_BSWAP_8(x) << 8) | DT_BSWAP_8((x) >> 8)) 438 #define DT_BSWAP_32(x) ((DT_BSWAP_16(x) << 16) | DT_BSWAP_16((x) >> 16)) 439 #define DT_BSWAP_64(x) ((DT_BSWAP_32(x) << 32) | DT_BSWAP_32((x) >> 32)) 440 441 #define DT_MASK_LO 0x00000000FFFFFFFFULL 442 443 #define DTRACE_STORE(type, tomax, offset, what) \ 444 *((type *)((uintptr_t)(tomax) + (uintptr_t)offset)) = (type)(what); 445 446 #ifndef __x86 447 #define DTRACE_ALIGNCHECK(addr, size, flags) \ 448 if (addr & (size - 1)) { \ 449 *flags |= CPU_DTRACE_BADALIGN; \ 450 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 451 return (0); \ 452 } 453 #else 454 #define DTRACE_ALIGNCHECK(addr, size, flags) 455 #endif 456 457 /* 458 * Test whether a range of memory starting at testaddr of size testsz falls 459 * within the range of memory described by addr, sz. We take care to avoid 460 * problems with overflow and underflow of the unsigned quantities, and 461 * disallow all negative sizes. Ranges of size 0 are allowed. 462 */ 463 #define DTRACE_INRANGE(testaddr, testsz, baseaddr, basesz) \ 464 ((testaddr) - (baseaddr) < (basesz) && \ 465 (testaddr) + (testsz) - (baseaddr) <= (basesz) && \ 466 (testaddr) + (testsz) >= (testaddr)) 467 468 /* 469 * Test whether alloc_sz bytes will fit in the scratch region. We isolate 470 * alloc_sz on the righthand side of the comparison in order to avoid overflow 471 * or underflow in the comparison with it. This is simpler than the INRANGE 472 * check above, because we know that the dtms_scratch_ptr is valid in the 473 * range. Allocations of size zero are allowed. 474 */ 475 #define DTRACE_INSCRATCH(mstate, alloc_sz) \ 476 ((mstate)->dtms_scratch_base + (mstate)->dtms_scratch_size - \ 477 (mstate)->dtms_scratch_ptr >= (alloc_sz)) 478 479 #define DTRACE_LOADFUNC(bits) \ 480 /*CSTYLED*/ \ 481 uint##bits##_t \ 482 dtrace_load##bits(uintptr_t addr) \ 483 { \ 484 size_t size = bits / NBBY; \ 485 /*CSTYLED*/ \ 486 uint##bits##_t rval; \ 487 int i; \ 488 volatile uint16_t *flags = (volatile uint16_t *) \ 489 &cpu_core[curcpu].cpuc_dtrace_flags; \ 490 \ 491 DTRACE_ALIGNCHECK(addr, size, flags); \ 492 \ 493 for (i = 0; i < dtrace_toxranges; i++) { \ 494 if (addr >= dtrace_toxrange[i].dtt_limit) \ 495 continue; \ 496 \ 497 if (addr + size <= dtrace_toxrange[i].dtt_base) \ 498 continue; \ 499 \ 500 /* \ 501 * This address falls within a toxic region; return 0. \ 502 */ \ 503 *flags |= CPU_DTRACE_BADADDR; \ 504 cpu_core[curcpu].cpuc_dtrace_illval = addr; \ 505 return (0); \ 506 } \ 507 \ 508 *flags |= CPU_DTRACE_NOFAULT; \ 509 /*CSTYLED*/ \ 510 rval = *((volatile uint##bits##_t *)addr); \ 511 *flags &= ~CPU_DTRACE_NOFAULT; \ 512 \ 513 return (!(*flags & CPU_DTRACE_FAULT) ? rval : 0); \ 514 } 515 516 #ifdef _LP64 517 #define dtrace_loadptr dtrace_load64 518 #else 519 #define dtrace_loadptr dtrace_load32 520 #endif 521 522 #define DTRACE_DYNHASH_FREE 0 523 #define DTRACE_DYNHASH_SINK 1 524 #define DTRACE_DYNHASH_VALID 2 525 526 #define DTRACE_MATCH_NEXT 0 527 #define DTRACE_MATCH_DONE 1 528 #define DTRACE_ANCHORED(probe) ((probe)->dtpr_func[0] != '\0') 529 #define DTRACE_STATE_ALIGN 64 530 531 #define DTRACE_FLAGS2FLT(flags) \ 532 (((flags) & CPU_DTRACE_BADADDR) ? DTRACEFLT_BADADDR : \ 533 ((flags) & CPU_DTRACE_ILLOP) ? DTRACEFLT_ILLOP : \ 534 ((flags) & CPU_DTRACE_DIVZERO) ? DTRACEFLT_DIVZERO : \ 535 ((flags) & CPU_DTRACE_KPRIV) ? DTRACEFLT_KPRIV : \ 536 ((flags) & CPU_DTRACE_UPRIV) ? DTRACEFLT_UPRIV : \ 537 ((flags) & CPU_DTRACE_TUPOFLOW) ? DTRACEFLT_TUPOFLOW : \ 538 ((flags) & CPU_DTRACE_BADALIGN) ? DTRACEFLT_BADALIGN : \ 539 ((flags) & CPU_DTRACE_NOSCRATCH) ? DTRACEFLT_NOSCRATCH : \ 540 ((flags) & CPU_DTRACE_BADSTACK) ? DTRACEFLT_BADSTACK : \ 541 DTRACEFLT_UNKNOWN) 542 543 #define DTRACEACT_ISSTRING(act) \ 544 ((act)->dta_kind == DTRACEACT_DIFEXPR && \ 545 (act)->dta_difo->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) 546 547 /* Function prototype definitions: */ 548 static size_t dtrace_strlen(const char *, size_t); 549 static dtrace_probe_t *dtrace_probe_lookup_id(dtrace_id_t id); 550 static void dtrace_enabling_provide(dtrace_provider_t *); 551 static int dtrace_enabling_match(dtrace_enabling_t *, int *); 552 static void dtrace_enabling_matchall(void); 553 static void dtrace_enabling_reap(void); 554 static dtrace_state_t *dtrace_anon_grab(void); 555 static uint64_t dtrace_helper(int, dtrace_mstate_t *, 556 dtrace_state_t *, uint64_t, uint64_t); 557 static dtrace_helpers_t *dtrace_helpers_create(proc_t *); 558 static void dtrace_buffer_drop(dtrace_buffer_t *); 559 static int dtrace_buffer_consumed(dtrace_buffer_t *, hrtime_t when); 560 static intptr_t dtrace_buffer_reserve(dtrace_buffer_t *, size_t, size_t, 561 dtrace_state_t *, dtrace_mstate_t *); 562 static int dtrace_state_option(dtrace_state_t *, dtrace_optid_t, 563 dtrace_optval_t); 564 static int dtrace_ecb_create_enable(dtrace_probe_t *, void *); 565 static void dtrace_helper_provider_destroy(dtrace_helper_provider_t *); 566 uint16_t dtrace_load16(uintptr_t); 567 uint32_t dtrace_load32(uintptr_t); 568 uint64_t dtrace_load64(uintptr_t); 569 uint8_t dtrace_load8(uintptr_t); 570 void dtrace_dynvar_clean(dtrace_dstate_t *); 571 dtrace_dynvar_t *dtrace_dynvar(dtrace_dstate_t *, uint_t, dtrace_key_t *, 572 size_t, dtrace_dynvar_op_t, dtrace_mstate_t *, dtrace_vstate_t *); 573 uintptr_t dtrace_dif_varstr(uintptr_t, dtrace_state_t *, dtrace_mstate_t *); 574 575 /* 576 * DTrace Probe Context Functions 577 * 578 * These functions are called from probe context. Because probe context is 579 * any context in which C may be called, arbitrarily locks may be held, 580 * interrupts may be disabled, we may be in arbitrary dispatched state, etc. 581 * As a result, functions called from probe context may only call other DTrace 582 * support functions -- they may not interact at all with the system at large. 583 * (Note that the ASSERT macro is made probe-context safe by redefining it in 584 * terms of dtrace_assfail(), a probe-context safe function.) If arbitrary 585 * loads are to be performed from probe context, they _must_ be in terms of 586 * the safe dtrace_load*() variants. 587 * 588 * Some functions in this block are not actually called from probe context; 589 * for these functions, there will be a comment above the function reading 590 * "Note: not called from probe context." 591 */ 592 void 593 dtrace_panic(const char *format, ...) 594 { 595 va_list alist; 596 597 va_start(alist, format); 598 dtrace_vpanic(format, alist); 599 va_end(alist); 600 } 601 602 int 603 dtrace_assfail(const char *a, const char *f, int l) 604 { 605 dtrace_panic("assertion failed: %s, file: %s, line: %d", a, f, l); 606 607 /* 608 * We just need something here that even the most clever compiler 609 * cannot optimize away. 610 */ 611 return (a[(uintptr_t)f]); 612 } 613 614 /* 615 * Atomically increment a specified error counter from probe context. 616 */ 617 static void 618 dtrace_error(uint32_t *counter) 619 { 620 /* 621 * Most counters stored to in probe context are per-CPU counters. 622 * However, there are some error conditions that are sufficiently 623 * arcane that they don't merit per-CPU storage. If these counters 624 * are incremented concurrently on different CPUs, scalability will be 625 * adversely affected -- but we don't expect them to be white-hot in a 626 * correctly constructed enabling... 627 */ 628 uint32_t oval, nval; 629 630 do { 631 oval = *counter; 632 633 if ((nval = oval + 1) == 0) { 634 /* 635 * If the counter would wrap, set it to 1 -- assuring 636 * that the counter is never zero when we have seen 637 * errors. (The counter must be 32-bits because we 638 * aren't guaranteed a 64-bit compare&swap operation.) 639 * To save this code both the infamy of being fingered 640 * by a priggish news story and the indignity of being 641 * the target of a neo-puritan witch trial, we're 642 * carefully avoiding any colorful description of the 643 * likelihood of this condition -- but suffice it to 644 * say that it is only slightly more likely than the 645 * overflow of predicate cache IDs, as discussed in 646 * dtrace_predicate_create(). 647 */ 648 nval = 1; 649 } 650 } while (dtrace_cas32(counter, oval, nval) != oval); 651 } 652 653 /* 654 * Use the DTRACE_LOADFUNC macro to define functions for each of loading a 655 * uint8_t, a uint16_t, a uint32_t and a uint64_t. 656 */ 657 DTRACE_LOADFUNC(8) 658 DTRACE_LOADFUNC(16) 659 DTRACE_LOADFUNC(32) 660 DTRACE_LOADFUNC(64) 661 662 static int 663 dtrace_inscratch(uintptr_t dest, size_t size, dtrace_mstate_t *mstate) 664 { 665 if (dest < mstate->dtms_scratch_base) 666 return (0); 667 668 if (dest + size < dest) 669 return (0); 670 671 if (dest + size > mstate->dtms_scratch_ptr) 672 return (0); 673 674 return (1); 675 } 676 677 static int 678 dtrace_canstore_statvar(uint64_t addr, size_t sz, 679 dtrace_statvar_t **svars, int nsvars) 680 { 681 int i; 682 683 for (i = 0; i < nsvars; i++) { 684 dtrace_statvar_t *svar = svars[i]; 685 686 if (svar == NULL || svar->dtsv_size == 0) 687 continue; 688 689 if (DTRACE_INRANGE(addr, sz, svar->dtsv_data, svar->dtsv_size)) 690 return (1); 691 } 692 693 return (0); 694 } 695 696 /* 697 * Check to see if the address is within a memory region to which a store may 698 * be issued. This includes the DTrace scratch areas, and any DTrace variable 699 * region. The caller of dtrace_canstore() is responsible for performing any 700 * alignment checks that are needed before stores are actually executed. 701 */ 702 static int 703 dtrace_canstore(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 704 dtrace_vstate_t *vstate) 705 { 706 /* 707 * First, check to see if the address is in scratch space... 708 */ 709 if (DTRACE_INRANGE(addr, sz, mstate->dtms_scratch_base, 710 mstate->dtms_scratch_size)) 711 return (1); 712 713 /* 714 * Now check to see if it's a dynamic variable. This check will pick 715 * up both thread-local variables and any global dynamically-allocated 716 * variables. 717 */ 718 if (DTRACE_INRANGE(addr, sz, (uintptr_t)vstate->dtvs_dynvars.dtds_base, 719 vstate->dtvs_dynvars.dtds_size)) { 720 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 721 uintptr_t base = (uintptr_t)dstate->dtds_base + 722 (dstate->dtds_hashsize * sizeof (dtrace_dynhash_t)); 723 uintptr_t chunkoffs; 724 725 /* 726 * Before we assume that we can store here, we need to make 727 * sure that it isn't in our metadata -- storing to our 728 * dynamic variable metadata would corrupt our state. For 729 * the range to not include any dynamic variable metadata, 730 * it must: 731 * 732 * (1) Start above the hash table that is at the base of 733 * the dynamic variable space 734 * 735 * (2) Have a starting chunk offset that is beyond the 736 * dtrace_dynvar_t that is at the base of every chunk 737 * 738 * (3) Not span a chunk boundary 739 * 740 */ 741 if (addr < base) 742 return (0); 743 744 chunkoffs = (addr - base) % dstate->dtds_chunksize; 745 746 if (chunkoffs < sizeof (dtrace_dynvar_t)) 747 return (0); 748 749 if (chunkoffs + sz > dstate->dtds_chunksize) 750 return (0); 751 752 return (1); 753 } 754 755 /* 756 * Finally, check the static local and global variables. These checks 757 * take the longest, so we perform them last. 758 */ 759 if (dtrace_canstore_statvar(addr, sz, 760 vstate->dtvs_locals, vstate->dtvs_nlocals)) 761 return (1); 762 763 if (dtrace_canstore_statvar(addr, sz, 764 vstate->dtvs_globals, vstate->dtvs_nglobals)) 765 return (1); 766 767 return (0); 768 } 769 770 771 /* 772 * Convenience routine to check to see if the address is within a memory 773 * region in which a load may be issued given the user's privilege level; 774 * if not, it sets the appropriate error flags and loads 'addr' into the 775 * illegal value slot. 776 * 777 * DTrace subroutines (DIF_SUBR_*) should use this helper to implement 778 * appropriate memory access protection. 779 */ 780 static int 781 dtrace_canload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 782 dtrace_vstate_t *vstate) 783 { 784 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 785 786 /* 787 * If we hold the privilege to read from kernel memory, then 788 * everything is readable. 789 */ 790 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 791 return (1); 792 793 /* 794 * You can obviously read that which you can store. 795 */ 796 if (dtrace_canstore(addr, sz, mstate, vstate)) 797 return (1); 798 799 /* 800 * We're allowed to read from our own string table. 801 */ 802 if (DTRACE_INRANGE(addr, sz, (uintptr_t)mstate->dtms_difo->dtdo_strtab, 803 mstate->dtms_difo->dtdo_strlen)) 804 return (1); 805 806 DTRACE_CPUFLAG_SET(CPU_DTRACE_KPRIV); 807 *illval = addr; 808 return (0); 809 } 810 811 /* 812 * Convenience routine to check to see if a given string is within a memory 813 * region in which a load may be issued given the user's privilege level; 814 * this exists so that we don't need to issue unnecessary dtrace_strlen() 815 * calls in the event that the user has all privileges. 816 */ 817 static int 818 dtrace_strcanload(uint64_t addr, size_t sz, dtrace_mstate_t *mstate, 819 dtrace_vstate_t *vstate) 820 { 821 size_t strsz; 822 823 /* 824 * If we hold the privilege to read from kernel memory, then 825 * everything is readable. 826 */ 827 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 828 return (1); 829 830 strsz = 1 + dtrace_strlen((char *)(uintptr_t)addr, sz); 831 if (dtrace_canload(addr, strsz, mstate, vstate)) 832 return (1); 833 834 return (0); 835 } 836 837 /* 838 * Convenience routine to check to see if a given variable is within a memory 839 * region in which a load may be issued given the user's privilege level. 840 */ 841 static int 842 dtrace_vcanload(void *src, dtrace_diftype_t *type, dtrace_mstate_t *mstate, 843 dtrace_vstate_t *vstate) 844 { 845 size_t sz; 846 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 847 848 /* 849 * If we hold the privilege to read from kernel memory, then 850 * everything is readable. 851 */ 852 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 853 return (1); 854 855 if (type->dtdt_kind == DIF_TYPE_STRING) 856 sz = dtrace_strlen(src, 857 vstate->dtvs_state->dts_options[DTRACEOPT_STRSIZE]) + 1; 858 else 859 sz = type->dtdt_size; 860 861 return (dtrace_canload((uintptr_t)src, sz, mstate, vstate)); 862 } 863 864 /* 865 * Compare two strings using safe loads. 866 */ 867 static int 868 dtrace_strncmp(char *s1, char *s2, size_t limit) 869 { 870 uint8_t c1, c2; 871 volatile uint16_t *flags; 872 873 if (s1 == s2 || limit == 0) 874 return (0); 875 876 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 877 878 do { 879 if (s1 == NULL) { 880 c1 = '\0'; 881 } else { 882 c1 = dtrace_load8((uintptr_t)s1++); 883 } 884 885 if (s2 == NULL) { 886 c2 = '\0'; 887 } else { 888 c2 = dtrace_load8((uintptr_t)s2++); 889 } 890 891 if (c1 != c2) 892 return (c1 - c2); 893 } while (--limit && c1 != '\0' && !(*flags & CPU_DTRACE_FAULT)); 894 895 return (0); 896 } 897 898 /* 899 * Compute strlen(s) for a string using safe memory accesses. The additional 900 * len parameter is used to specify a maximum length to ensure completion. 901 */ 902 static size_t 903 dtrace_strlen(const char *s, size_t lim) 904 { 905 uint_t len; 906 907 for (len = 0; len != lim; len++) { 908 if (dtrace_load8((uintptr_t)s++) == '\0') 909 break; 910 } 911 912 return (len); 913 } 914 915 /* 916 * Check if an address falls within a toxic region. 917 */ 918 static int 919 dtrace_istoxic(uintptr_t kaddr, size_t size) 920 { 921 uintptr_t taddr, tsize; 922 int i; 923 924 for (i = 0; i < dtrace_toxranges; i++) { 925 taddr = dtrace_toxrange[i].dtt_base; 926 tsize = dtrace_toxrange[i].dtt_limit - taddr; 927 928 if (kaddr - taddr < tsize) { 929 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 930 cpu_core[curcpu].cpuc_dtrace_illval = kaddr; 931 return (1); 932 } 933 934 if (taddr - kaddr < size) { 935 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 936 cpu_core[curcpu].cpuc_dtrace_illval = taddr; 937 return (1); 938 } 939 } 940 941 return (0); 942 } 943 944 /* 945 * Copy src to dst using safe memory accesses. The src is assumed to be unsafe 946 * memory specified by the DIF program. The dst is assumed to be safe memory 947 * that we can store to directly because it is managed by DTrace. As with 948 * standard bcopy, overlapping copies are handled properly. 949 */ 950 static void 951 dtrace_bcopy(const void *src, void *dst, size_t len) 952 { 953 if (len != 0) { 954 uint8_t *s1 = dst; 955 const uint8_t *s2 = src; 956 957 if (s1 <= s2) { 958 do { 959 *s1++ = dtrace_load8((uintptr_t)s2++); 960 } while (--len != 0); 961 } else { 962 s2 += len; 963 s1 += len; 964 965 do { 966 *--s1 = dtrace_load8((uintptr_t)--s2); 967 } while (--len != 0); 968 } 969 } 970 } 971 972 /* 973 * Copy src to dst using safe memory accesses, up to either the specified 974 * length, or the point that a nul byte is encountered. The src is assumed to 975 * be unsafe memory specified by the DIF program. The dst is assumed to be 976 * safe memory that we can store to directly because it is managed by DTrace. 977 * Unlike dtrace_bcopy(), overlapping regions are not handled. 978 */ 979 static void 980 dtrace_strcpy(const void *src, void *dst, size_t len) 981 { 982 if (len != 0) { 983 uint8_t *s1 = dst, c; 984 const uint8_t *s2 = src; 985 986 do { 987 *s1++ = c = dtrace_load8((uintptr_t)s2++); 988 } while (--len != 0 && c != '\0'); 989 } 990 } 991 992 /* 993 * Copy src to dst, deriving the size and type from the specified (BYREF) 994 * variable type. The src is assumed to be unsafe memory specified by the DIF 995 * program. The dst is assumed to be DTrace variable memory that is of the 996 * specified type; we assume that we can store to directly. 997 */ 998 static void 999 dtrace_vcopy(void *src, void *dst, dtrace_diftype_t *type) 1000 { 1001 ASSERT(type->dtdt_flags & DIF_TF_BYREF); 1002 1003 if (type->dtdt_kind == DIF_TYPE_STRING) { 1004 dtrace_strcpy(src, dst, type->dtdt_size); 1005 } else { 1006 dtrace_bcopy(src, dst, type->dtdt_size); 1007 } 1008 } 1009 1010 /* 1011 * Compare s1 to s2 using safe memory accesses. The s1 data is assumed to be 1012 * unsafe memory specified by the DIF program. The s2 data is assumed to be 1013 * safe memory that we can access directly because it is managed by DTrace. 1014 */ 1015 static int 1016 dtrace_bcmp(const void *s1, const void *s2, size_t len) 1017 { 1018 volatile uint16_t *flags; 1019 1020 flags = (volatile uint16_t *)&cpu_core[curcpu].cpuc_dtrace_flags; 1021 1022 if (s1 == s2) 1023 return (0); 1024 1025 if (s1 == NULL || s2 == NULL) 1026 return (1); 1027 1028 if (s1 != s2 && len != 0) { 1029 const uint8_t *ps1 = s1; 1030 const uint8_t *ps2 = s2; 1031 1032 do { 1033 if (dtrace_load8((uintptr_t)ps1++) != *ps2++) 1034 return (1); 1035 } while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); 1036 } 1037 return (0); 1038 } 1039 1040 /* 1041 * Zero the specified region using a simple byte-by-byte loop. Note that this 1042 * is for safe DTrace-managed memory only. 1043 */ 1044 static void 1045 dtrace_bzero(void *dst, size_t len) 1046 { 1047 uchar_t *cp; 1048 1049 for (cp = dst; len != 0; len--) 1050 *cp++ = 0; 1051 } 1052 1053 static void 1054 dtrace_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 1055 { 1056 uint64_t result[2]; 1057 1058 result[0] = addend1[0] + addend2[0]; 1059 result[1] = addend1[1] + addend2[1] + 1060 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 1061 1062 sum[0] = result[0]; 1063 sum[1] = result[1]; 1064 } 1065 1066 /* 1067 * Shift the 128-bit value in a by b. If b is positive, shift left. 1068 * If b is negative, shift right. 1069 */ 1070 static void 1071 dtrace_shift_128(uint64_t *a, int b) 1072 { 1073 uint64_t mask; 1074 1075 if (b == 0) 1076 return; 1077 1078 if (b < 0) { 1079 b = -b; 1080 if (b >= 64) { 1081 a[0] = a[1] >> (b - 64); 1082 a[1] = 0; 1083 } else { 1084 a[0] >>= b; 1085 mask = 1LL << (64 - b); 1086 mask -= 1; 1087 a[0] |= ((a[1] & mask) << (64 - b)); 1088 a[1] >>= b; 1089 } 1090 } else { 1091 if (b >= 64) { 1092 a[1] = a[0] << (b - 64); 1093 a[0] = 0; 1094 } else { 1095 a[1] <<= b; 1096 mask = a[0] >> (64 - b); 1097 a[1] |= mask; 1098 a[0] <<= b; 1099 } 1100 } 1101 } 1102 1103 /* 1104 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 1105 * use native multiplication on those, and then re-combine into the 1106 * resulting 128-bit value. 1107 * 1108 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 1109 * hi1 * hi2 << 64 + 1110 * hi1 * lo2 << 32 + 1111 * hi2 * lo1 << 32 + 1112 * lo1 * lo2 1113 */ 1114 static void 1115 dtrace_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 1116 { 1117 uint64_t hi1, hi2, lo1, lo2; 1118 uint64_t tmp[2]; 1119 1120 hi1 = factor1 >> 32; 1121 hi2 = factor2 >> 32; 1122 1123 lo1 = factor1 & DT_MASK_LO; 1124 lo2 = factor2 & DT_MASK_LO; 1125 1126 product[0] = lo1 * lo2; 1127 product[1] = hi1 * hi2; 1128 1129 tmp[0] = hi1 * lo2; 1130 tmp[1] = 0; 1131 dtrace_shift_128(tmp, 32); 1132 dtrace_add_128(product, tmp, product); 1133 1134 tmp[0] = hi2 * lo1; 1135 tmp[1] = 0; 1136 dtrace_shift_128(tmp, 32); 1137 dtrace_add_128(product, tmp, product); 1138 } 1139 1140 /* 1141 * This privilege check should be used by actions and subroutines to 1142 * verify that the user credentials of the process that enabled the 1143 * invoking ECB match the target credentials 1144 */ 1145 static int 1146 dtrace_priv_proc_common_user(dtrace_state_t *state) 1147 { 1148 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1149 1150 /* 1151 * We should always have a non-NULL state cred here, since if cred 1152 * is null (anonymous tracing), we fast-path bypass this routine. 1153 */ 1154 ASSERT(s_cr != NULL); 1155 1156 if ((cr = CRED()) != NULL && 1157 s_cr->cr_uid == cr->cr_uid && 1158 s_cr->cr_uid == cr->cr_ruid && 1159 s_cr->cr_uid == cr->cr_suid && 1160 s_cr->cr_gid == cr->cr_gid && 1161 s_cr->cr_gid == cr->cr_rgid && 1162 s_cr->cr_gid == cr->cr_sgid) 1163 return (1); 1164 1165 return (0); 1166 } 1167 1168 /* 1169 * This privilege check should be used by actions and subroutines to 1170 * verify that the zone of the process that enabled the invoking ECB 1171 * matches the target credentials 1172 */ 1173 static int 1174 dtrace_priv_proc_common_zone(dtrace_state_t *state) 1175 { 1176 #if defined(sun) 1177 cred_t *cr, *s_cr = state->dts_cred.dcr_cred; 1178 1179 /* 1180 * We should always have a non-NULL state cred here, since if cred 1181 * is null (anonymous tracing), we fast-path bypass this routine. 1182 */ 1183 ASSERT(s_cr != NULL); 1184 1185 if ((cr = CRED()) != NULL && 1186 s_cr->cr_zone == cr->cr_zone) 1187 return (1); 1188 1189 return (0); 1190 #else 1191 return (1); 1192 #endif 1193 } 1194 1195 /* 1196 * This privilege check should be used by actions and subroutines to 1197 * verify that the process has not setuid or changed credentials. 1198 */ 1199 static int 1200 dtrace_priv_proc_common_nocd(void) 1201 { 1202 proc_t *proc; 1203 1204 if ((proc = ttoproc(curthread)) != NULL && 1205 !(proc->p_flag & SNOCD)) 1206 return (1); 1207 1208 return (0); 1209 } 1210 1211 static int 1212 dtrace_priv_proc_destructive(dtrace_state_t *state) 1213 { 1214 int action = state->dts_cred.dcr_action; 1215 1216 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE) == 0) && 1217 dtrace_priv_proc_common_zone(state) == 0) 1218 goto bad; 1219 1220 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER) == 0) && 1221 dtrace_priv_proc_common_user(state) == 0) 1222 goto bad; 1223 1224 if (((action & DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG) == 0) && 1225 dtrace_priv_proc_common_nocd() == 0) 1226 goto bad; 1227 1228 return (1); 1229 1230 bad: 1231 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1232 1233 return (0); 1234 } 1235 1236 static int 1237 dtrace_priv_proc_control(dtrace_state_t *state) 1238 { 1239 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC_CONTROL) 1240 return (1); 1241 1242 if (dtrace_priv_proc_common_zone(state) && 1243 dtrace_priv_proc_common_user(state) && 1244 dtrace_priv_proc_common_nocd()) 1245 return (1); 1246 1247 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1248 1249 return (0); 1250 } 1251 1252 static int 1253 dtrace_priv_proc(dtrace_state_t *state) 1254 { 1255 if (state->dts_cred.dcr_action & DTRACE_CRA_PROC) 1256 return (1); 1257 1258 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_UPRIV; 1259 1260 return (0); 1261 } 1262 1263 static int 1264 dtrace_priv_kernel(dtrace_state_t *state) 1265 { 1266 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL) 1267 return (1); 1268 1269 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1270 1271 return (0); 1272 } 1273 1274 static int 1275 dtrace_priv_kernel_destructive(dtrace_state_t *state) 1276 { 1277 if (state->dts_cred.dcr_action & DTRACE_CRA_KERNEL_DESTRUCTIVE) 1278 return (1); 1279 1280 cpu_core[curcpu].cpuc_dtrace_flags |= CPU_DTRACE_KPRIV; 1281 1282 return (0); 1283 } 1284 1285 /* 1286 * Note: not called from probe context. This function is called 1287 * asynchronously (and at a regular interval) from outside of probe context to 1288 * clean the dirty dynamic variable lists on all CPUs. Dynamic variable 1289 * cleaning is explained in detail in <sys/dtrace_impl.h>. 1290 */ 1291 void 1292 dtrace_dynvar_clean(dtrace_dstate_t *dstate) 1293 { 1294 dtrace_dynvar_t *dirty; 1295 dtrace_dstate_percpu_t *dcpu; 1296 int i, work = 0; 1297 1298 for (i = 0; i < NCPU; i++) { 1299 dcpu = &dstate->dtds_percpu[i]; 1300 1301 ASSERT(dcpu->dtdsc_rinsing == NULL); 1302 1303 /* 1304 * If the dirty list is NULL, there is no dirty work to do. 1305 */ 1306 if (dcpu->dtdsc_dirty == NULL) 1307 continue; 1308 1309 /* 1310 * If the clean list is non-NULL, then we're not going to do 1311 * any work for this CPU -- it means that there has not been 1312 * a dtrace_dynvar() allocation on this CPU (or from this CPU) 1313 * since the last time we cleaned house. 1314 */ 1315 if (dcpu->dtdsc_clean != NULL) 1316 continue; 1317 1318 work = 1; 1319 1320 /* 1321 * Atomically move the dirty list aside. 1322 */ 1323 do { 1324 dirty = dcpu->dtdsc_dirty; 1325 1326 /* 1327 * Before we zap the dirty list, set the rinsing list. 1328 * (This allows for a potential assertion in 1329 * dtrace_dynvar(): if a free dynamic variable appears 1330 * on a hash chain, either the dirty list or the 1331 * rinsing list for some CPU must be non-NULL.) 1332 */ 1333 dcpu->dtdsc_rinsing = dirty; 1334 dtrace_membar_producer(); 1335 } while (dtrace_casptr(&dcpu->dtdsc_dirty, 1336 dirty, NULL) != dirty); 1337 } 1338 1339 if (!work) { 1340 /* 1341 * We have no work to do; we can simply return. 1342 */ 1343 return; 1344 } 1345 1346 dtrace_sync(); 1347 1348 for (i = 0; i < NCPU; i++) { 1349 dcpu = &dstate->dtds_percpu[i]; 1350 1351 if (dcpu->dtdsc_rinsing == NULL) 1352 continue; 1353 1354 /* 1355 * We are now guaranteed that no hash chain contains a pointer 1356 * into this dirty list; we can make it clean. 1357 */ 1358 ASSERT(dcpu->dtdsc_clean == NULL); 1359 dcpu->dtdsc_clean = dcpu->dtdsc_rinsing; 1360 dcpu->dtdsc_rinsing = NULL; 1361 } 1362 1363 /* 1364 * Before we actually set the state to be DTRACE_DSTATE_CLEAN, make 1365 * sure that all CPUs have seen all of the dtdsc_clean pointers. 1366 * This prevents a race whereby a CPU incorrectly decides that 1367 * the state should be something other than DTRACE_DSTATE_CLEAN 1368 * after dtrace_dynvar_clean() has completed. 1369 */ 1370 dtrace_sync(); 1371 1372 dstate->dtds_state = DTRACE_DSTATE_CLEAN; 1373 } 1374 1375 /* 1376 * Depending on the value of the op parameter, this function looks-up, 1377 * allocates or deallocates an arbitrarily-keyed dynamic variable. If an 1378 * allocation is requested, this function will return a pointer to a 1379 * dtrace_dynvar_t corresponding to the allocated variable -- or NULL if no 1380 * variable can be allocated. If NULL is returned, the appropriate counter 1381 * will be incremented. 1382 */ 1383 dtrace_dynvar_t * 1384 dtrace_dynvar(dtrace_dstate_t *dstate, uint_t nkeys, 1385 dtrace_key_t *key, size_t dsize, dtrace_dynvar_op_t op, 1386 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate) 1387 { 1388 uint64_t hashval = DTRACE_DYNHASH_VALID; 1389 dtrace_dynhash_t *hash = dstate->dtds_hash; 1390 dtrace_dynvar_t *free, *new_free, *next, *dvar, *start, *prev = NULL; 1391 processorid_t me = curcpu, cpu = me; 1392 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[me]; 1393 size_t bucket, ksize; 1394 size_t chunksize = dstate->dtds_chunksize; 1395 uintptr_t kdata, lock, nstate; 1396 uint_t i; 1397 1398 ASSERT(nkeys != 0); 1399 1400 /* 1401 * Hash the key. As with aggregations, we use Jenkins' "One-at-a-time" 1402 * algorithm. For the by-value portions, we perform the algorithm in 1403 * 16-bit chunks (as opposed to 8-bit chunks). This speeds things up a 1404 * bit, and seems to have only a minute effect on distribution. For 1405 * the by-reference data, we perform "One-at-a-time" iterating (safely) 1406 * over each referenced byte. It's painful to do this, but it's much 1407 * better than pathological hash distribution. The efficacy of the 1408 * hashing algorithm (and a comparison with other algorithms) may be 1409 * found by running the ::dtrace_dynstat MDB dcmd. 1410 */ 1411 for (i = 0; i < nkeys; i++) { 1412 if (key[i].dttk_size == 0) { 1413 uint64_t val = key[i].dttk_value; 1414 1415 hashval += (val >> 48) & 0xffff; 1416 hashval += (hashval << 10); 1417 hashval ^= (hashval >> 6); 1418 1419 hashval += (val >> 32) & 0xffff; 1420 hashval += (hashval << 10); 1421 hashval ^= (hashval >> 6); 1422 1423 hashval += (val >> 16) & 0xffff; 1424 hashval += (hashval << 10); 1425 hashval ^= (hashval >> 6); 1426 1427 hashval += val & 0xffff; 1428 hashval += (hashval << 10); 1429 hashval ^= (hashval >> 6); 1430 } else { 1431 /* 1432 * This is incredibly painful, but it beats the hell 1433 * out of the alternative. 1434 */ 1435 uint64_t j, size = key[i].dttk_size; 1436 uintptr_t base = (uintptr_t)key[i].dttk_value; 1437 1438 if (!dtrace_canload(base, size, mstate, vstate)) 1439 break; 1440 1441 for (j = 0; j < size; j++) { 1442 hashval += dtrace_load8(base + j); 1443 hashval += (hashval << 10); 1444 hashval ^= (hashval >> 6); 1445 } 1446 } 1447 } 1448 1449 if (DTRACE_CPUFLAG_ISSET(CPU_DTRACE_FAULT)) 1450 return (NULL); 1451 1452 hashval += (hashval << 3); 1453 hashval ^= (hashval >> 11); 1454 hashval += (hashval << 15); 1455 1456 /* 1457 * There is a remote chance (ideally, 1 in 2^31) that our hashval 1458 * comes out to be one of our two sentinel hash values. If this 1459 * actually happens, we set the hashval to be a value known to be a 1460 * non-sentinel value. 1461 */ 1462 if (hashval == DTRACE_DYNHASH_FREE || hashval == DTRACE_DYNHASH_SINK) 1463 hashval = DTRACE_DYNHASH_VALID; 1464 1465 /* 1466 * Yes, it's painful to do a divide here. If the cycle count becomes 1467 * important here, tricks can be pulled to reduce it. (However, it's 1468 * critical that hash collisions be kept to an absolute minimum; 1469 * they're much more painful than a divide.) It's better to have a 1470 * solution that generates few collisions and still keeps things 1471 * relatively simple. 1472 */ 1473 bucket = hashval % dstate->dtds_hashsize; 1474 1475 if (op == DTRACE_DYNVAR_DEALLOC) { 1476 volatile uintptr_t *lockp = &hash[bucket].dtdh_lock; 1477 1478 for (;;) { 1479 while ((lock = *lockp) & 1) 1480 continue; 1481 1482 if (dtrace_casptr((volatile void *)lockp, 1483 (volatile void *)lock, (volatile void *)(lock + 1)) == (void *)lock) 1484 break; 1485 } 1486 1487 dtrace_membar_producer(); 1488 } 1489 1490 top: 1491 prev = NULL; 1492 lock = hash[bucket].dtdh_lock; 1493 1494 dtrace_membar_consumer(); 1495 1496 start = hash[bucket].dtdh_chain; 1497 ASSERT(start != NULL && (start->dtdv_hashval == DTRACE_DYNHASH_SINK || 1498 start->dtdv_hashval != DTRACE_DYNHASH_FREE || 1499 op != DTRACE_DYNVAR_DEALLOC)); 1500 1501 for (dvar = start; dvar != NULL; dvar = dvar->dtdv_next) { 1502 dtrace_tuple_t *dtuple = &dvar->dtdv_tuple; 1503 dtrace_key_t *dkey = &dtuple->dtt_key[0]; 1504 1505 if (dvar->dtdv_hashval != hashval) { 1506 if (dvar->dtdv_hashval == DTRACE_DYNHASH_SINK) { 1507 /* 1508 * We've reached the sink, and therefore the 1509 * end of the hash chain; we can kick out of 1510 * the loop knowing that we have seen a valid 1511 * snapshot of state. 1512 */ 1513 ASSERT(dvar->dtdv_next == NULL); 1514 ASSERT(dvar == &dtrace_dynhash_sink); 1515 break; 1516 } 1517 1518 if (dvar->dtdv_hashval == DTRACE_DYNHASH_FREE) { 1519 /* 1520 * We've gone off the rails: somewhere along 1521 * the line, one of the members of this hash 1522 * chain was deleted. Note that we could also 1523 * detect this by simply letting this loop run 1524 * to completion, as we would eventually hit 1525 * the end of the dirty list. However, we 1526 * want to avoid running the length of the 1527 * dirty list unnecessarily (it might be quite 1528 * long), so we catch this as early as 1529 * possible by detecting the hash marker. In 1530 * this case, we simply set dvar to NULL and 1531 * break; the conditional after the loop will 1532 * send us back to top. 1533 */ 1534 dvar = NULL; 1535 break; 1536 } 1537 1538 goto next; 1539 } 1540 1541 if (dtuple->dtt_nkeys != nkeys) 1542 goto next; 1543 1544 for (i = 0; i < nkeys; i++, dkey++) { 1545 if (dkey->dttk_size != key[i].dttk_size) 1546 goto next; /* size or type mismatch */ 1547 1548 if (dkey->dttk_size != 0) { 1549 if (dtrace_bcmp( 1550 (void *)(uintptr_t)key[i].dttk_value, 1551 (void *)(uintptr_t)dkey->dttk_value, 1552 dkey->dttk_size)) 1553 goto next; 1554 } else { 1555 if (dkey->dttk_value != key[i].dttk_value) 1556 goto next; 1557 } 1558 } 1559 1560 if (op != DTRACE_DYNVAR_DEALLOC) 1561 return (dvar); 1562 1563 ASSERT(dvar->dtdv_next == NULL || 1564 dvar->dtdv_next->dtdv_hashval != DTRACE_DYNHASH_FREE); 1565 1566 if (prev != NULL) { 1567 ASSERT(hash[bucket].dtdh_chain != dvar); 1568 ASSERT(start != dvar); 1569 ASSERT(prev->dtdv_next == dvar); 1570 prev->dtdv_next = dvar->dtdv_next; 1571 } else { 1572 if (dtrace_casptr(&hash[bucket].dtdh_chain, 1573 start, dvar->dtdv_next) != start) { 1574 /* 1575 * We have failed to atomically swing the 1576 * hash table head pointer, presumably because 1577 * of a conflicting allocation on another CPU. 1578 * We need to reread the hash chain and try 1579 * again. 1580 */ 1581 goto top; 1582 } 1583 } 1584 1585 dtrace_membar_producer(); 1586 1587 /* 1588 * Now set the hash value to indicate that it's free. 1589 */ 1590 ASSERT(hash[bucket].dtdh_chain != dvar); 1591 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1592 1593 dtrace_membar_producer(); 1594 1595 /* 1596 * Set the next pointer to point at the dirty list, and 1597 * atomically swing the dirty pointer to the newly freed dvar. 1598 */ 1599 do { 1600 next = dcpu->dtdsc_dirty; 1601 dvar->dtdv_next = next; 1602 } while (dtrace_casptr(&dcpu->dtdsc_dirty, next, dvar) != next); 1603 1604 /* 1605 * Finally, unlock this hash bucket. 1606 */ 1607 ASSERT(hash[bucket].dtdh_lock == lock); 1608 ASSERT(lock & 1); 1609 hash[bucket].dtdh_lock++; 1610 1611 return (NULL); 1612 next: 1613 prev = dvar; 1614 continue; 1615 } 1616 1617 if (dvar == NULL) { 1618 /* 1619 * If dvar is NULL, it is because we went off the rails: 1620 * one of the elements that we traversed in the hash chain 1621 * was deleted while we were traversing it. In this case, 1622 * we assert that we aren't doing a dealloc (deallocs lock 1623 * the hash bucket to prevent themselves from racing with 1624 * one another), and retry the hash chain traversal. 1625 */ 1626 ASSERT(op != DTRACE_DYNVAR_DEALLOC); 1627 goto top; 1628 } 1629 1630 if (op != DTRACE_DYNVAR_ALLOC) { 1631 /* 1632 * If we are not to allocate a new variable, we want to 1633 * return NULL now. Before we return, check that the value 1634 * of the lock word hasn't changed. If it has, we may have 1635 * seen an inconsistent snapshot. 1636 */ 1637 if (op == DTRACE_DYNVAR_NOALLOC) { 1638 if (hash[bucket].dtdh_lock != lock) 1639 goto top; 1640 } else { 1641 ASSERT(op == DTRACE_DYNVAR_DEALLOC); 1642 ASSERT(hash[bucket].dtdh_lock == lock); 1643 ASSERT(lock & 1); 1644 hash[bucket].dtdh_lock++; 1645 } 1646 1647 return (NULL); 1648 } 1649 1650 /* 1651 * We need to allocate a new dynamic variable. The size we need is the 1652 * size of dtrace_dynvar plus the size of nkeys dtrace_key_t's plus the 1653 * size of any auxiliary key data (rounded up to 8-byte alignment) plus 1654 * the size of any referred-to data (dsize). We then round the final 1655 * size up to the chunksize for allocation. 1656 */ 1657 for (ksize = 0, i = 0; i < nkeys; i++) 1658 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 1659 1660 /* 1661 * This should be pretty much impossible, but could happen if, say, 1662 * strange DIF specified the tuple. Ideally, this should be an 1663 * assertion and not an error condition -- but that requires that the 1664 * chunksize calculation in dtrace_difo_chunksize() be absolutely 1665 * bullet-proof. (That is, it must not be able to be fooled by 1666 * malicious DIF.) Given the lack of backwards branches in DIF, 1667 * solving this would presumably not amount to solving the Halting 1668 * Problem -- but it still seems awfully hard. 1669 */ 1670 if (sizeof (dtrace_dynvar_t) + sizeof (dtrace_key_t) * (nkeys - 1) + 1671 ksize + dsize > chunksize) { 1672 dcpu->dtdsc_drops++; 1673 return (NULL); 1674 } 1675 1676 nstate = DTRACE_DSTATE_EMPTY; 1677 1678 do { 1679 retry: 1680 free = dcpu->dtdsc_free; 1681 1682 if (free == NULL) { 1683 dtrace_dynvar_t *clean = dcpu->dtdsc_clean; 1684 void *rval; 1685 1686 if (clean == NULL) { 1687 /* 1688 * We're out of dynamic variable space on 1689 * this CPU. Unless we have tried all CPUs, 1690 * we'll try to allocate from a different 1691 * CPU. 1692 */ 1693 switch (dstate->dtds_state) { 1694 case DTRACE_DSTATE_CLEAN: { 1695 void *sp = &dstate->dtds_state; 1696 1697 if (++cpu >= NCPU) 1698 cpu = 0; 1699 1700 if (dcpu->dtdsc_dirty != NULL && 1701 nstate == DTRACE_DSTATE_EMPTY) 1702 nstate = DTRACE_DSTATE_DIRTY; 1703 1704 if (dcpu->dtdsc_rinsing != NULL) 1705 nstate = DTRACE_DSTATE_RINSING; 1706 1707 dcpu = &dstate->dtds_percpu[cpu]; 1708 1709 if (cpu != me) 1710 goto retry; 1711 1712 (void) dtrace_cas32(sp, 1713 DTRACE_DSTATE_CLEAN, nstate); 1714 1715 /* 1716 * To increment the correct bean 1717 * counter, take another lap. 1718 */ 1719 goto retry; 1720 } 1721 1722 case DTRACE_DSTATE_DIRTY: 1723 dcpu->dtdsc_dirty_drops++; 1724 break; 1725 1726 case DTRACE_DSTATE_RINSING: 1727 dcpu->dtdsc_rinsing_drops++; 1728 break; 1729 1730 case DTRACE_DSTATE_EMPTY: 1731 dcpu->dtdsc_drops++; 1732 break; 1733 } 1734 1735 DTRACE_CPUFLAG_SET(CPU_DTRACE_DROP); 1736 return (NULL); 1737 } 1738 1739 /* 1740 * The clean list appears to be non-empty. We want to 1741 * move the clean list to the free list; we start by 1742 * moving the clean pointer aside. 1743 */ 1744 if (dtrace_casptr(&dcpu->dtdsc_clean, 1745 clean, NULL) != clean) { 1746 /* 1747 * We are in one of two situations: 1748 * 1749 * (a) The clean list was switched to the 1750 * free list by another CPU. 1751 * 1752 * (b) The clean list was added to by the 1753 * cleansing cyclic. 1754 * 1755 * In either of these situations, we can 1756 * just reattempt the free list allocation. 1757 */ 1758 goto retry; 1759 } 1760 1761 ASSERT(clean->dtdv_hashval == DTRACE_DYNHASH_FREE); 1762 1763 /* 1764 * Now we'll move the clean list to the free list. 1765 * It's impossible for this to fail: the only way 1766 * the free list can be updated is through this 1767 * code path, and only one CPU can own the clean list. 1768 * Thus, it would only be possible for this to fail if 1769 * this code were racing with dtrace_dynvar_clean(). 1770 * (That is, if dtrace_dynvar_clean() updated the clean 1771 * list, and we ended up racing to update the free 1772 * list.) This race is prevented by the dtrace_sync() 1773 * in dtrace_dynvar_clean() -- which flushes the 1774 * owners of the clean lists out before resetting 1775 * the clean lists. 1776 */ 1777 rval = dtrace_casptr(&dcpu->dtdsc_free, NULL, clean); 1778 ASSERT(rval == NULL); 1779 goto retry; 1780 } 1781 1782 dvar = free; 1783 new_free = dvar->dtdv_next; 1784 } while (dtrace_casptr(&dcpu->dtdsc_free, free, new_free) != free); 1785 1786 /* 1787 * We have now allocated a new chunk. We copy the tuple keys into the 1788 * tuple array and copy any referenced key data into the data space 1789 * following the tuple array. As we do this, we relocate dttk_value 1790 * in the final tuple to point to the key data address in the chunk. 1791 */ 1792 kdata = (uintptr_t)&dvar->dtdv_tuple.dtt_key[nkeys]; 1793 dvar->dtdv_data = (void *)(kdata + ksize); 1794 dvar->dtdv_tuple.dtt_nkeys = nkeys; 1795 1796 for (i = 0; i < nkeys; i++) { 1797 dtrace_key_t *dkey = &dvar->dtdv_tuple.dtt_key[i]; 1798 size_t kesize = key[i].dttk_size; 1799 1800 if (kesize != 0) { 1801 dtrace_bcopy( 1802 (const void *)(uintptr_t)key[i].dttk_value, 1803 (void *)kdata, kesize); 1804 dkey->dttk_value = kdata; 1805 kdata += P2ROUNDUP(kesize, sizeof (uint64_t)); 1806 } else { 1807 dkey->dttk_value = key[i].dttk_value; 1808 } 1809 1810 dkey->dttk_size = kesize; 1811 } 1812 1813 ASSERT(dvar->dtdv_hashval == DTRACE_DYNHASH_FREE); 1814 dvar->dtdv_hashval = hashval; 1815 dvar->dtdv_next = start; 1816 1817 if (dtrace_casptr(&hash[bucket].dtdh_chain, start, dvar) == start) 1818 return (dvar); 1819 1820 /* 1821 * The cas has failed. Either another CPU is adding an element to 1822 * this hash chain, or another CPU is deleting an element from this 1823 * hash chain. The simplest way to deal with both of these cases 1824 * (though not necessarily the most efficient) is to free our 1825 * allocated block and tail-call ourselves. Note that the free is 1826 * to the dirty list and _not_ to the free list. This is to prevent 1827 * races with allocators, above. 1828 */ 1829 dvar->dtdv_hashval = DTRACE_DYNHASH_FREE; 1830 1831 dtrace_membar_producer(); 1832 1833 do { 1834 free = dcpu->dtdsc_dirty; 1835 dvar->dtdv_next = free; 1836 } while (dtrace_casptr(&dcpu->dtdsc_dirty, free, dvar) != free); 1837 1838 return (dtrace_dynvar(dstate, nkeys, key, dsize, op, mstate, vstate)); 1839 } 1840 1841 /*ARGSUSED*/ 1842 static void 1843 dtrace_aggregate_min(uint64_t *oval, uint64_t nval, uint64_t arg) 1844 { 1845 if ((int64_t)nval < (int64_t)*oval) 1846 *oval = nval; 1847 } 1848 1849 /*ARGSUSED*/ 1850 static void 1851 dtrace_aggregate_max(uint64_t *oval, uint64_t nval, uint64_t arg) 1852 { 1853 if ((int64_t)nval > (int64_t)*oval) 1854 *oval = nval; 1855 } 1856 1857 static void 1858 dtrace_aggregate_quantize(uint64_t *quanta, uint64_t nval, uint64_t incr) 1859 { 1860 int i, zero = DTRACE_QUANTIZE_ZEROBUCKET; 1861 int64_t val = (int64_t)nval; 1862 1863 if (val < 0) { 1864 for (i = 0; i < zero; i++) { 1865 if (val <= DTRACE_QUANTIZE_BUCKETVAL(i)) { 1866 quanta[i] += incr; 1867 return; 1868 } 1869 } 1870 } else { 1871 for (i = zero + 1; i < DTRACE_QUANTIZE_NBUCKETS; i++) { 1872 if (val < DTRACE_QUANTIZE_BUCKETVAL(i)) { 1873 quanta[i - 1] += incr; 1874 return; 1875 } 1876 } 1877 1878 quanta[DTRACE_QUANTIZE_NBUCKETS - 1] += incr; 1879 return; 1880 } 1881 1882 ASSERT(0); 1883 } 1884 1885 static void 1886 dtrace_aggregate_lquantize(uint64_t *lquanta, uint64_t nval, uint64_t incr) 1887 { 1888 uint64_t arg = *lquanta++; 1889 int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1890 uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1891 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1892 int32_t val = (int32_t)nval, level; 1893 1894 ASSERT(step != 0); 1895 ASSERT(levels != 0); 1896 1897 if (val < base) { 1898 /* 1899 * This is an underflow. 1900 */ 1901 lquanta[0] += incr; 1902 return; 1903 } 1904 1905 level = (val - base) / step; 1906 1907 if (level < levels) { 1908 lquanta[level + 1] += incr; 1909 return; 1910 } 1911 1912 /* 1913 * This is an overflow. 1914 */ 1915 lquanta[levels + 1] += incr; 1916 } 1917 1918 static int 1919 dtrace_aggregate_llquantize_bucket(uint16_t factor, uint16_t low, 1920 uint16_t high, uint16_t nsteps, int64_t value) 1921 { 1922 int64_t this = 1, last, next; 1923 int base = 1, order; 1924 1925 ASSERT(factor <= nsteps); 1926 ASSERT(nsteps % factor == 0); 1927 1928 for (order = 0; order < low; order++) 1929 this *= factor; 1930 1931 /* 1932 * If our value is less than our factor taken to the power of the 1933 * low order of magnitude, it goes into the zeroth bucket. 1934 */ 1935 if (value < (last = this)) 1936 return (0); 1937 1938 for (this *= factor; order <= high; order++) { 1939 int nbuckets = this > nsteps ? nsteps : this; 1940 1941 if ((next = this * factor) < this) { 1942 /* 1943 * We should not generally get log/linear quantizations 1944 * with a high magnitude that allows 64-bits to 1945 * overflow, but we nonetheless protect against this 1946 * by explicitly checking for overflow, and clamping 1947 * our value accordingly. 1948 */ 1949 value = this - 1; 1950 } 1951 1952 if (value < this) { 1953 /* 1954 * If our value lies within this order of magnitude, 1955 * determine its position by taking the offset within 1956 * the order of magnitude, dividing by the bucket 1957 * width, and adding to our (accumulated) base. 1958 */ 1959 return (base + (value - last) / (this / nbuckets)); 1960 } 1961 1962 base += nbuckets - (nbuckets / factor); 1963 last = this; 1964 this = next; 1965 } 1966 1967 /* 1968 * Our value is greater than or equal to our factor taken to the 1969 * power of one plus the high magnitude -- return the top bucket. 1970 */ 1971 return (base); 1972 } 1973 1974 static void 1975 dtrace_aggregate_llquantize(uint64_t *llquanta, uint64_t nval, uint64_t incr) 1976 { 1977 uint64_t arg = *llquanta++; 1978 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1979 uint16_t low = DTRACE_LLQUANTIZE_LOW(arg); 1980 uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg); 1981 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1982 1983 llquanta[dtrace_aggregate_llquantize_bucket(factor, 1984 low, high, nsteps, nval)] += incr; 1985 } 1986 1987 /*ARGSUSED*/ 1988 static void 1989 dtrace_aggregate_avg(uint64_t *data, uint64_t nval, uint64_t arg) 1990 { 1991 data[0]++; 1992 data[1] += nval; 1993 } 1994 1995 /*ARGSUSED*/ 1996 static void 1997 dtrace_aggregate_stddev(uint64_t *data, uint64_t nval, uint64_t arg) 1998 { 1999 int64_t snval = (int64_t)nval; 2000 uint64_t tmp[2]; 2001 2002 data[0]++; 2003 data[1] += nval; 2004 2005 /* 2006 * What we want to say here is: 2007 * 2008 * data[2] += nval * nval; 2009 * 2010 * But given that nval is 64-bit, we could easily overflow, so 2011 * we do this as 128-bit arithmetic. 2012 */ 2013 if (snval < 0) 2014 snval = -snval; 2015 2016 dtrace_multiply_128((uint64_t)snval, (uint64_t)snval, tmp); 2017 dtrace_add_128(data + 2, tmp, data + 2); 2018 } 2019 2020 /*ARGSUSED*/ 2021 static void 2022 dtrace_aggregate_count(uint64_t *oval, uint64_t nval, uint64_t arg) 2023 { 2024 *oval = *oval + 1; 2025 } 2026 2027 /*ARGSUSED*/ 2028 static void 2029 dtrace_aggregate_sum(uint64_t *oval, uint64_t nval, uint64_t arg) 2030 { 2031 *oval += nval; 2032 } 2033 2034 /* 2035 * Aggregate given the tuple in the principal data buffer, and the aggregating 2036 * action denoted by the specified dtrace_aggregation_t. The aggregation 2037 * buffer is specified as the buf parameter. This routine does not return 2038 * failure; if there is no space in the aggregation buffer, the data will be 2039 * dropped, and a corresponding counter incremented. 2040 */ 2041 static void 2042 dtrace_aggregate(dtrace_aggregation_t *agg, dtrace_buffer_t *dbuf, 2043 intptr_t offset, dtrace_buffer_t *buf, uint64_t expr, uint64_t arg) 2044 { 2045 dtrace_recdesc_t *rec = &agg->dtag_action.dta_rec; 2046 uint32_t i, ndx, size, fsize; 2047 uint32_t align = sizeof (uint64_t) - 1; 2048 dtrace_aggbuffer_t *agb; 2049 dtrace_aggkey_t *key; 2050 uint32_t hashval = 0, limit, isstr; 2051 caddr_t tomax, data, kdata; 2052 dtrace_actkind_t action; 2053 dtrace_action_t *act; 2054 uintptr_t offs; 2055 2056 if (buf == NULL) 2057 return; 2058 2059 if (!agg->dtag_hasarg) { 2060 /* 2061 * Currently, only quantize() and lquantize() take additional 2062 * arguments, and they have the same semantics: an increment 2063 * value that defaults to 1 when not present. If additional 2064 * aggregating actions take arguments, the setting of the 2065 * default argument value will presumably have to become more 2066 * sophisticated... 2067 */ 2068 arg = 1; 2069 } 2070 2071 action = agg->dtag_action.dta_kind - DTRACEACT_AGGREGATION; 2072 size = rec->dtrd_offset - agg->dtag_base; 2073 fsize = size + rec->dtrd_size; 2074 2075 ASSERT(dbuf->dtb_tomax != NULL); 2076 data = dbuf->dtb_tomax + offset + agg->dtag_base; 2077 2078 if ((tomax = buf->dtb_tomax) == NULL) { 2079 dtrace_buffer_drop(buf); 2080 return; 2081 } 2082 2083 /* 2084 * The metastructure is always at the bottom of the buffer. 2085 */ 2086 agb = (dtrace_aggbuffer_t *)(tomax + buf->dtb_size - 2087 sizeof (dtrace_aggbuffer_t)); 2088 2089 if (buf->dtb_offset == 0) { 2090 /* 2091 * We just kludge up approximately 1/8th of the size to be 2092 * buckets. If this guess ends up being routinely 2093 * off-the-mark, we may need to dynamically readjust this 2094 * based on past performance. 2095 */ 2096 uintptr_t hashsize = (buf->dtb_size >> 3) / sizeof (uintptr_t); 2097 2098 if ((uintptr_t)agb - hashsize * sizeof (dtrace_aggkey_t *) < 2099 (uintptr_t)tomax || hashsize == 0) { 2100 /* 2101 * We've been given a ludicrously small buffer; 2102 * increment our drop count and leave. 2103 */ 2104 dtrace_buffer_drop(buf); 2105 return; 2106 } 2107 2108 /* 2109 * And now, a pathetic attempt to try to get a an odd (or 2110 * perchance, a prime) hash size for better hash distribution. 2111 */ 2112 if (hashsize > (DTRACE_AGGHASHSIZE_SLEW << 3)) 2113 hashsize -= DTRACE_AGGHASHSIZE_SLEW; 2114 2115 agb->dtagb_hashsize = hashsize; 2116 agb->dtagb_hash = (dtrace_aggkey_t **)((uintptr_t)agb - 2117 agb->dtagb_hashsize * sizeof (dtrace_aggkey_t *)); 2118 agb->dtagb_free = (uintptr_t)agb->dtagb_hash; 2119 2120 for (i = 0; i < agb->dtagb_hashsize; i++) 2121 agb->dtagb_hash[i] = NULL; 2122 } 2123 2124 ASSERT(agg->dtag_first != NULL); 2125 ASSERT(agg->dtag_first->dta_intuple); 2126 2127 /* 2128 * Calculate the hash value based on the key. Note that we _don't_ 2129 * include the aggid in the hashing (but we will store it as part of 2130 * the key). The hashing algorithm is Bob Jenkins' "One-at-a-time" 2131 * algorithm: a simple, quick algorithm that has no known funnels, and 2132 * gets good distribution in practice. The efficacy of the hashing 2133 * algorithm (and a comparison with other algorithms) may be found by 2134 * running the ::dtrace_aggstat MDB dcmd. 2135 */ 2136 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2137 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2138 limit = i + act->dta_rec.dtrd_size; 2139 ASSERT(limit <= size); 2140 isstr = DTRACEACT_ISSTRING(act); 2141 2142 for (; i < limit; i++) { 2143 hashval += data[i]; 2144 hashval += (hashval << 10); 2145 hashval ^= (hashval >> 6); 2146 2147 if (isstr && data[i] == '\0') 2148 break; 2149 } 2150 } 2151 2152 hashval += (hashval << 3); 2153 hashval ^= (hashval >> 11); 2154 hashval += (hashval << 15); 2155 2156 /* 2157 * Yes, the divide here is expensive -- but it's generally the least 2158 * of the performance issues given the amount of data that we iterate 2159 * over to compute hash values, compare data, etc. 2160 */ 2161 ndx = hashval % agb->dtagb_hashsize; 2162 2163 for (key = agb->dtagb_hash[ndx]; key != NULL; key = key->dtak_next) { 2164 ASSERT((caddr_t)key >= tomax); 2165 ASSERT((caddr_t)key < tomax + buf->dtb_size); 2166 2167 if (hashval != key->dtak_hashval || key->dtak_size != size) 2168 continue; 2169 2170 kdata = key->dtak_data; 2171 ASSERT(kdata >= tomax && kdata < tomax + buf->dtb_size); 2172 2173 for (act = agg->dtag_first; act->dta_intuple; 2174 act = act->dta_next) { 2175 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2176 limit = i + act->dta_rec.dtrd_size; 2177 ASSERT(limit <= size); 2178 isstr = DTRACEACT_ISSTRING(act); 2179 2180 for (; i < limit; i++) { 2181 if (kdata[i] != data[i]) 2182 goto next; 2183 2184 if (isstr && data[i] == '\0') 2185 break; 2186 } 2187 } 2188 2189 if (action != key->dtak_action) { 2190 /* 2191 * We are aggregating on the same value in the same 2192 * aggregation with two different aggregating actions. 2193 * (This should have been picked up in the compiler, 2194 * so we may be dealing with errant or devious DIF.) 2195 * This is an error condition; we indicate as much, 2196 * and return. 2197 */ 2198 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 2199 return; 2200 } 2201 2202 /* 2203 * This is a hit: we need to apply the aggregator to 2204 * the value at this key. 2205 */ 2206 agg->dtag_aggregate((uint64_t *)(kdata + size), expr, arg); 2207 return; 2208 next: 2209 continue; 2210 } 2211 2212 /* 2213 * We didn't find it. We need to allocate some zero-filled space, 2214 * link it into the hash table appropriately, and apply the aggregator 2215 * to the (zero-filled) value. 2216 */ 2217 offs = buf->dtb_offset; 2218 while (offs & (align - 1)) 2219 offs += sizeof (uint32_t); 2220 2221 /* 2222 * If we don't have enough room to both allocate a new key _and_ 2223 * its associated data, increment the drop count and return. 2224 */ 2225 if ((uintptr_t)tomax + offs + fsize > 2226 agb->dtagb_free - sizeof (dtrace_aggkey_t)) { 2227 dtrace_buffer_drop(buf); 2228 return; 2229 } 2230 2231 /*CONSTCOND*/ 2232 ASSERT(!(sizeof (dtrace_aggkey_t) & (sizeof (uintptr_t) - 1))); 2233 key = (dtrace_aggkey_t *)(agb->dtagb_free - sizeof (dtrace_aggkey_t)); 2234 agb->dtagb_free -= sizeof (dtrace_aggkey_t); 2235 2236 key->dtak_data = kdata = tomax + offs; 2237 buf->dtb_offset = offs + fsize; 2238 2239 /* 2240 * Now copy the data across. 2241 */ 2242 *((dtrace_aggid_t *)kdata) = agg->dtag_id; 2243 2244 for (i = sizeof (dtrace_aggid_t); i < size; i++) 2245 kdata[i] = data[i]; 2246 2247 /* 2248 * Because strings are not zeroed out by default, we need to iterate 2249 * looking for actions that store strings, and we need to explicitly 2250 * pad these strings out with zeroes. 2251 */ 2252 for (act = agg->dtag_first; act->dta_intuple; act = act->dta_next) { 2253 int nul; 2254 2255 if (!DTRACEACT_ISSTRING(act)) 2256 continue; 2257 2258 i = act->dta_rec.dtrd_offset - agg->dtag_base; 2259 limit = i + act->dta_rec.dtrd_size; 2260 ASSERT(limit <= size); 2261 2262 for (nul = 0; i < limit; i++) { 2263 if (nul) { 2264 kdata[i] = '\0'; 2265 continue; 2266 } 2267 2268 if (data[i] != '\0') 2269 continue; 2270 2271 nul = 1; 2272 } 2273 } 2274 2275 for (i = size; i < fsize; i++) 2276 kdata[i] = 0; 2277 2278 key->dtak_hashval = hashval; 2279 key->dtak_size = size; 2280 key->dtak_action = action; 2281 key->dtak_next = agb->dtagb_hash[ndx]; 2282 agb->dtagb_hash[ndx] = key; 2283 2284 /* 2285 * Finally, apply the aggregator. 2286 */ 2287 *((uint64_t *)(key->dtak_data + size)) = agg->dtag_initial; 2288 agg->dtag_aggregate((uint64_t *)(key->dtak_data + size), expr, arg); 2289 } 2290 2291 /* 2292 * Given consumer state, this routine finds a speculation in the INACTIVE 2293 * state and transitions it into the ACTIVE state. If there is no speculation 2294 * in the INACTIVE state, 0 is returned. In this case, no error counter is 2295 * incremented -- it is up to the caller to take appropriate action. 2296 */ 2297 static int 2298 dtrace_speculation(dtrace_state_t *state) 2299 { 2300 int i = 0; 2301 dtrace_speculation_state_t current; 2302 uint32_t *stat = &state->dts_speculations_unavail, count; 2303 2304 while (i < state->dts_nspeculations) { 2305 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2306 2307 current = spec->dtsp_state; 2308 2309 if (current != DTRACESPEC_INACTIVE) { 2310 if (current == DTRACESPEC_COMMITTINGMANY || 2311 current == DTRACESPEC_COMMITTING || 2312 current == DTRACESPEC_DISCARDING) 2313 stat = &state->dts_speculations_busy; 2314 i++; 2315 continue; 2316 } 2317 2318 if (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2319 current, DTRACESPEC_ACTIVE) == current) 2320 return (i + 1); 2321 } 2322 2323 /* 2324 * We couldn't find a speculation. If we found as much as a single 2325 * busy speculation buffer, we'll attribute this failure as "busy" 2326 * instead of "unavail". 2327 */ 2328 do { 2329 count = *stat; 2330 } while (dtrace_cas32(stat, count, count + 1) != count); 2331 2332 return (0); 2333 } 2334 2335 /* 2336 * This routine commits an active speculation. If the specified speculation 2337 * is not in a valid state to perform a commit(), this routine will silently do 2338 * nothing. The state of the specified speculation is transitioned according 2339 * to the state transition diagram outlined in <sys/dtrace_impl.h> 2340 */ 2341 static void 2342 dtrace_speculation_commit(dtrace_state_t *state, processorid_t cpu, 2343 dtrace_specid_t which) 2344 { 2345 dtrace_speculation_t *spec; 2346 dtrace_buffer_t *src, *dest; 2347 uintptr_t daddr, saddr, dlimit, slimit; 2348 dtrace_speculation_state_t current, new = 0; 2349 intptr_t offs; 2350 uint64_t timestamp; 2351 2352 if (which == 0) 2353 return; 2354 2355 if (which > state->dts_nspeculations) { 2356 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2357 return; 2358 } 2359 2360 spec = &state->dts_speculations[which - 1]; 2361 src = &spec->dtsp_buffer[cpu]; 2362 dest = &state->dts_buffer[cpu]; 2363 2364 do { 2365 current = spec->dtsp_state; 2366 2367 if (current == DTRACESPEC_COMMITTINGMANY) 2368 break; 2369 2370 switch (current) { 2371 case DTRACESPEC_INACTIVE: 2372 case DTRACESPEC_DISCARDING: 2373 return; 2374 2375 case DTRACESPEC_COMMITTING: 2376 /* 2377 * This is only possible if we are (a) commit()'ing 2378 * without having done a prior speculate() on this CPU 2379 * and (b) racing with another commit() on a different 2380 * CPU. There's nothing to do -- we just assert that 2381 * our offset is 0. 2382 */ 2383 ASSERT(src->dtb_offset == 0); 2384 return; 2385 2386 case DTRACESPEC_ACTIVE: 2387 new = DTRACESPEC_COMMITTING; 2388 break; 2389 2390 case DTRACESPEC_ACTIVEONE: 2391 /* 2392 * This speculation is active on one CPU. If our 2393 * buffer offset is non-zero, we know that the one CPU 2394 * must be us. Otherwise, we are committing on a 2395 * different CPU from the speculate(), and we must 2396 * rely on being asynchronously cleaned. 2397 */ 2398 if (src->dtb_offset != 0) { 2399 new = DTRACESPEC_COMMITTING; 2400 break; 2401 } 2402 /*FALLTHROUGH*/ 2403 2404 case DTRACESPEC_ACTIVEMANY: 2405 new = DTRACESPEC_COMMITTINGMANY; 2406 break; 2407 2408 default: 2409 ASSERT(0); 2410 } 2411 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2412 current, new) != current); 2413 2414 /* 2415 * We have set the state to indicate that we are committing this 2416 * speculation. Now reserve the necessary space in the destination 2417 * buffer. 2418 */ 2419 if ((offs = dtrace_buffer_reserve(dest, src->dtb_offset, 2420 sizeof (uint64_t), state, NULL)) < 0) { 2421 dtrace_buffer_drop(dest); 2422 goto out; 2423 } 2424 2425 /* 2426 * We have sufficient space to copy the speculative buffer into the 2427 * primary buffer. First, modify the speculative buffer, filling 2428 * in the timestamp of all entries with the current time. The data 2429 * must have the commit() time rather than the time it was traced, 2430 * so that all entries in the primary buffer are in timestamp order. 2431 */ 2432 timestamp = dtrace_gethrtime(); 2433 saddr = (uintptr_t)src->dtb_tomax; 2434 slimit = saddr + src->dtb_offset; 2435 while (saddr < slimit) { 2436 size_t size; 2437 dtrace_rechdr_t *dtrh = (dtrace_rechdr_t *)saddr; 2438 2439 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 2440 saddr += sizeof (dtrace_epid_t); 2441 continue; 2442 } 2443 ASSERT3U(dtrh->dtrh_epid, <=, state->dts_necbs); 2444 size = state->dts_ecbs[dtrh->dtrh_epid - 1]->dte_size; 2445 2446 ASSERT3U(saddr + size, <=, slimit); 2447 ASSERT3U(size, >=, sizeof (dtrace_rechdr_t)); 2448 ASSERT3U(DTRACE_RECORD_LOAD_TIMESTAMP(dtrh), ==, UINT64_MAX); 2449 2450 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, timestamp); 2451 2452 saddr += size; 2453 } 2454 2455 /* 2456 * Copy the buffer across. (Note that this is a 2457 * highly subobtimal bcopy(); in the unlikely event that this becomes 2458 * a serious performance issue, a high-performance DTrace-specific 2459 * bcopy() should obviously be invented.) 2460 */ 2461 daddr = (uintptr_t)dest->dtb_tomax + offs; 2462 dlimit = daddr + src->dtb_offset; 2463 saddr = (uintptr_t)src->dtb_tomax; 2464 2465 /* 2466 * First, the aligned portion. 2467 */ 2468 while (dlimit - daddr >= sizeof (uint64_t)) { 2469 *((uint64_t *)daddr) = *((uint64_t *)saddr); 2470 2471 daddr += sizeof (uint64_t); 2472 saddr += sizeof (uint64_t); 2473 } 2474 2475 /* 2476 * Now any left-over bit... 2477 */ 2478 while (dlimit - daddr) 2479 *((uint8_t *)daddr++) = *((uint8_t *)saddr++); 2480 2481 /* 2482 * Finally, commit the reserved space in the destination buffer. 2483 */ 2484 dest->dtb_offset = offs + src->dtb_offset; 2485 2486 out: 2487 /* 2488 * If we're lucky enough to be the only active CPU on this speculation 2489 * buffer, we can just set the state back to DTRACESPEC_INACTIVE. 2490 */ 2491 if (current == DTRACESPEC_ACTIVE || 2492 (current == DTRACESPEC_ACTIVEONE && new == DTRACESPEC_COMMITTING)) { 2493 uint32_t rval = dtrace_cas32((uint32_t *)&spec->dtsp_state, 2494 DTRACESPEC_COMMITTING, DTRACESPEC_INACTIVE); 2495 2496 ASSERT(rval == DTRACESPEC_COMMITTING); 2497 } 2498 2499 src->dtb_offset = 0; 2500 src->dtb_xamot_drops += src->dtb_drops; 2501 src->dtb_drops = 0; 2502 } 2503 2504 /* 2505 * This routine discards an active speculation. If the specified speculation 2506 * is not in a valid state to perform a discard(), this routine will silently 2507 * do nothing. The state of the specified speculation is transitioned 2508 * according to the state transition diagram outlined in <sys/dtrace_impl.h> 2509 */ 2510 static void 2511 dtrace_speculation_discard(dtrace_state_t *state, processorid_t cpu, 2512 dtrace_specid_t which) 2513 { 2514 dtrace_speculation_t *spec; 2515 dtrace_speculation_state_t current, new = 0; 2516 dtrace_buffer_t *buf; 2517 2518 if (which == 0) 2519 return; 2520 2521 if (which > state->dts_nspeculations) { 2522 cpu_core[cpu].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2523 return; 2524 } 2525 2526 spec = &state->dts_speculations[which - 1]; 2527 buf = &spec->dtsp_buffer[cpu]; 2528 2529 do { 2530 current = spec->dtsp_state; 2531 2532 switch (current) { 2533 case DTRACESPEC_INACTIVE: 2534 case DTRACESPEC_COMMITTINGMANY: 2535 case DTRACESPEC_COMMITTING: 2536 case DTRACESPEC_DISCARDING: 2537 return; 2538 2539 case DTRACESPEC_ACTIVE: 2540 case DTRACESPEC_ACTIVEMANY: 2541 new = DTRACESPEC_DISCARDING; 2542 break; 2543 2544 case DTRACESPEC_ACTIVEONE: 2545 if (buf->dtb_offset != 0) { 2546 new = DTRACESPEC_INACTIVE; 2547 } else { 2548 new = DTRACESPEC_DISCARDING; 2549 } 2550 break; 2551 2552 default: 2553 ASSERT(0); 2554 } 2555 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2556 current, new) != current); 2557 2558 buf->dtb_offset = 0; 2559 buf->dtb_drops = 0; 2560 } 2561 2562 /* 2563 * Note: not called from probe context. This function is called 2564 * asynchronously from cross call context to clean any speculations that are 2565 * in the COMMITTINGMANY or DISCARDING states. These speculations may not be 2566 * transitioned back to the INACTIVE state until all CPUs have cleaned the 2567 * speculation. 2568 */ 2569 static void 2570 dtrace_speculation_clean_here(dtrace_state_t *state) 2571 { 2572 dtrace_icookie_t cookie; 2573 processorid_t cpu = curcpu; 2574 dtrace_buffer_t *dest = &state->dts_buffer[cpu]; 2575 dtrace_specid_t i; 2576 2577 cookie = dtrace_interrupt_disable(); 2578 2579 if (dest->dtb_tomax == NULL) { 2580 dtrace_interrupt_enable(cookie); 2581 return; 2582 } 2583 2584 for (i = 0; i < state->dts_nspeculations; i++) { 2585 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2586 dtrace_buffer_t *src = &spec->dtsp_buffer[cpu]; 2587 2588 if (src->dtb_tomax == NULL) 2589 continue; 2590 2591 if (spec->dtsp_state == DTRACESPEC_DISCARDING) { 2592 src->dtb_offset = 0; 2593 continue; 2594 } 2595 2596 if (spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2597 continue; 2598 2599 if (src->dtb_offset == 0) 2600 continue; 2601 2602 dtrace_speculation_commit(state, cpu, i + 1); 2603 } 2604 2605 dtrace_interrupt_enable(cookie); 2606 } 2607 2608 /* 2609 * Note: not called from probe context. This function is called 2610 * asynchronously (and at a regular interval) to clean any speculations that 2611 * are in the COMMITTINGMANY or DISCARDING states. If it discovers that there 2612 * is work to be done, it cross calls all CPUs to perform that work; 2613 * COMMITMANY and DISCARDING speculations may not be transitioned back to the 2614 * INACTIVE state until they have been cleaned by all CPUs. 2615 */ 2616 static void 2617 dtrace_speculation_clean(dtrace_state_t *state) 2618 { 2619 int work = 0, rv; 2620 dtrace_specid_t i; 2621 2622 for (i = 0; i < state->dts_nspeculations; i++) { 2623 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2624 2625 ASSERT(!spec->dtsp_cleaning); 2626 2627 if (spec->dtsp_state != DTRACESPEC_DISCARDING && 2628 spec->dtsp_state != DTRACESPEC_COMMITTINGMANY) 2629 continue; 2630 2631 work++; 2632 spec->dtsp_cleaning = 1; 2633 } 2634 2635 if (!work) 2636 return; 2637 2638 dtrace_xcall(DTRACE_CPUALL, 2639 (dtrace_xcall_t)dtrace_speculation_clean_here, state); 2640 2641 /* 2642 * We now know that all CPUs have committed or discarded their 2643 * speculation buffers, as appropriate. We can now set the state 2644 * to inactive. 2645 */ 2646 for (i = 0; i < state->dts_nspeculations; i++) { 2647 dtrace_speculation_t *spec = &state->dts_speculations[i]; 2648 dtrace_speculation_state_t current, new; 2649 2650 if (!spec->dtsp_cleaning) 2651 continue; 2652 2653 current = spec->dtsp_state; 2654 ASSERT(current == DTRACESPEC_DISCARDING || 2655 current == DTRACESPEC_COMMITTINGMANY); 2656 2657 new = DTRACESPEC_INACTIVE; 2658 2659 rv = dtrace_cas32((uint32_t *)&spec->dtsp_state, current, new); 2660 ASSERT(rv == current); 2661 spec->dtsp_cleaning = 0; 2662 } 2663 } 2664 2665 /* 2666 * Called as part of a speculate() to get the speculative buffer associated 2667 * with a given speculation. Returns NULL if the specified speculation is not 2668 * in an ACTIVE state. If the speculation is in the ACTIVEONE state -- and 2669 * the active CPU is not the specified CPU -- the speculation will be 2670 * atomically transitioned into the ACTIVEMANY state. 2671 */ 2672 static dtrace_buffer_t * 2673 dtrace_speculation_buffer(dtrace_state_t *state, processorid_t cpuid, 2674 dtrace_specid_t which) 2675 { 2676 dtrace_speculation_t *spec; 2677 dtrace_speculation_state_t current, new = 0; 2678 dtrace_buffer_t *buf; 2679 2680 if (which == 0) 2681 return (NULL); 2682 2683 if (which > state->dts_nspeculations) { 2684 cpu_core[cpuid].cpuc_dtrace_flags |= CPU_DTRACE_ILLOP; 2685 return (NULL); 2686 } 2687 2688 spec = &state->dts_speculations[which - 1]; 2689 buf = &spec->dtsp_buffer[cpuid]; 2690 2691 do { 2692 current = spec->dtsp_state; 2693 2694 switch (current) { 2695 case DTRACESPEC_INACTIVE: 2696 case DTRACESPEC_COMMITTINGMANY: 2697 case DTRACESPEC_DISCARDING: 2698 return (NULL); 2699 2700 case DTRACESPEC_COMMITTING: 2701 ASSERT(buf->dtb_offset == 0); 2702 return (NULL); 2703 2704 case DTRACESPEC_ACTIVEONE: 2705 /* 2706 * This speculation is currently active on one CPU. 2707 * Check the offset in the buffer; if it's non-zero, 2708 * that CPU must be us (and we leave the state alone). 2709 * If it's zero, assume that we're starting on a new 2710 * CPU -- and change the state to indicate that the 2711 * speculation is active on more than one CPU. 2712 */ 2713 if (buf->dtb_offset != 0) 2714 return (buf); 2715 2716 new = DTRACESPEC_ACTIVEMANY; 2717 break; 2718 2719 case DTRACESPEC_ACTIVEMANY: 2720 return (buf); 2721 2722 case DTRACESPEC_ACTIVE: 2723 new = DTRACESPEC_ACTIVEONE; 2724 break; 2725 2726 default: 2727 ASSERT(0); 2728 } 2729 } while (dtrace_cas32((uint32_t *)&spec->dtsp_state, 2730 current, new) != current); 2731 2732 ASSERT(new == DTRACESPEC_ACTIVEONE || new == DTRACESPEC_ACTIVEMANY); 2733 return (buf); 2734 } 2735 2736 /* 2737 * Return a string. In the event that the user lacks the privilege to access 2738 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2739 * don't fail access checking. 2740 * 2741 * dtrace_dif_variable() uses this routine as a helper for various 2742 * builtin values such as 'execname' and 'probefunc.' 2743 */ 2744 uintptr_t 2745 dtrace_dif_varstr(uintptr_t addr, dtrace_state_t *state, 2746 dtrace_mstate_t *mstate) 2747 { 2748 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 2749 uintptr_t ret; 2750 size_t strsz; 2751 2752 /* 2753 * The easy case: this probe is allowed to read all of memory, so 2754 * we can just return this as a vanilla pointer. 2755 */ 2756 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) != 0) 2757 return (addr); 2758 2759 /* 2760 * This is the tougher case: we copy the string in question from 2761 * kernel memory into scratch memory and return it that way: this 2762 * ensures that we won't trip up when access checking tests the 2763 * BYREF return value. 2764 */ 2765 strsz = dtrace_strlen((char *)addr, size) + 1; 2766 2767 if (mstate->dtms_scratch_ptr + strsz > 2768 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2769 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2770 return (0); 2771 } 2772 2773 dtrace_strcpy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2774 strsz); 2775 ret = mstate->dtms_scratch_ptr; 2776 mstate->dtms_scratch_ptr += strsz; 2777 return (ret); 2778 } 2779 2780 /* 2781 * Return a string from a memoy address which is known to have one or 2782 * more concatenated, individually zero terminated, sub-strings. 2783 * In the event that the user lacks the privilege to access 2784 * arbitrary kernel memory, we copy the string out to scratch memory so that we 2785 * don't fail access checking. 2786 * 2787 * dtrace_dif_variable() uses this routine as a helper for various 2788 * builtin values such as 'execargs'. 2789 */ 2790 static uintptr_t 2791 dtrace_dif_varstrz(uintptr_t addr, size_t strsz, dtrace_state_t *state, 2792 dtrace_mstate_t *mstate) 2793 { 2794 char *p; 2795 size_t i; 2796 uintptr_t ret; 2797 2798 if (mstate->dtms_scratch_ptr + strsz > 2799 mstate->dtms_scratch_base + mstate->dtms_scratch_size) { 2800 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 2801 return (0); 2802 } 2803 2804 dtrace_bcopy((const void *)addr, (void *)mstate->dtms_scratch_ptr, 2805 strsz); 2806 2807 /* Replace sub-string termination characters with a space. */ 2808 for (p = (char *) mstate->dtms_scratch_ptr, i = 0; i < strsz - 1; 2809 p++, i++) 2810 if (*p == '\0') 2811 *p = ' '; 2812 2813 ret = mstate->dtms_scratch_ptr; 2814 mstate->dtms_scratch_ptr += strsz; 2815 return (ret); 2816 } 2817 2818 /* 2819 * This function implements the DIF emulator's variable lookups. The emulator 2820 * passes a reserved variable identifier and optional built-in array index. 2821 */ 2822 static uint64_t 2823 dtrace_dif_variable(dtrace_mstate_t *mstate, dtrace_state_t *state, uint64_t v, 2824 uint64_t ndx) 2825 { 2826 /* 2827 * If we're accessing one of the uncached arguments, we'll turn this 2828 * into a reference in the args array. 2829 */ 2830 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) { 2831 ndx = v - DIF_VAR_ARG0; 2832 v = DIF_VAR_ARGS; 2833 } 2834 2835 switch (v) { 2836 case DIF_VAR_ARGS: 2837 ASSERT(mstate->dtms_present & DTRACE_MSTATE_ARGS); 2838 if (ndx >= sizeof (mstate->dtms_arg) / 2839 sizeof (mstate->dtms_arg[0])) { 2840 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2841 dtrace_provider_t *pv; 2842 uint64_t val; 2843 2844 pv = mstate->dtms_probe->dtpr_provider; 2845 if (pv->dtpv_pops.dtps_getargval != NULL) 2846 val = pv->dtpv_pops.dtps_getargval(pv->dtpv_arg, 2847 mstate->dtms_probe->dtpr_id, 2848 mstate->dtms_probe->dtpr_arg, ndx, aframes); 2849 else 2850 val = dtrace_getarg(ndx, aframes); 2851 2852 /* 2853 * This is regrettably required to keep the compiler 2854 * from tail-optimizing the call to dtrace_getarg(). 2855 * The condition always evaluates to true, but the 2856 * compiler has no way of figuring that out a priori. 2857 * (None of this would be necessary if the compiler 2858 * could be relied upon to _always_ tail-optimize 2859 * the call to dtrace_getarg() -- but it can't.) 2860 */ 2861 if (mstate->dtms_probe != NULL) 2862 return (val); 2863 2864 ASSERT(0); 2865 } 2866 2867 return (mstate->dtms_arg[ndx]); 2868 2869 #if defined(sun) 2870 case DIF_VAR_UREGS: { 2871 klwp_t *lwp; 2872 2873 if (!dtrace_priv_proc(state)) 2874 return (0); 2875 2876 if ((lwp = curthread->t_lwp) == NULL) { 2877 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2878 cpu_core[curcpu].cpuc_dtrace_illval = NULL; 2879 return (0); 2880 } 2881 2882 return (dtrace_getreg(lwp->lwp_regs, ndx)); 2883 return (0); 2884 } 2885 #else 2886 case DIF_VAR_UREGS: { 2887 struct trapframe *tframe; 2888 2889 if (!dtrace_priv_proc(state)) 2890 return (0); 2891 2892 if ((tframe = curthread->td_frame) == NULL) { 2893 DTRACE_CPUFLAG_SET(CPU_DTRACE_BADADDR); 2894 cpu_core[curcpu].cpuc_dtrace_illval = 0; 2895 return (0); 2896 } 2897 2898 return (dtrace_getreg(tframe, ndx)); 2899 } 2900 #endif 2901 2902 case DIF_VAR_CURTHREAD: 2903 if (!dtrace_priv_kernel(state)) 2904 return (0); 2905 return ((uint64_t)(uintptr_t)curthread); 2906 2907 case DIF_VAR_TIMESTAMP: 2908 if (!(mstate->dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 2909 mstate->dtms_timestamp = dtrace_gethrtime(); 2910 mstate->dtms_present |= DTRACE_MSTATE_TIMESTAMP; 2911 } 2912 return (mstate->dtms_timestamp); 2913 2914 case DIF_VAR_VTIMESTAMP: 2915 ASSERT(dtrace_vtime_references != 0); 2916 return (curthread->t_dtrace_vtime); 2917 2918 case DIF_VAR_WALLTIMESTAMP: 2919 if (!(mstate->dtms_present & DTRACE_MSTATE_WALLTIMESTAMP)) { 2920 mstate->dtms_walltimestamp = dtrace_gethrestime(); 2921 mstate->dtms_present |= DTRACE_MSTATE_WALLTIMESTAMP; 2922 } 2923 return (mstate->dtms_walltimestamp); 2924 2925 #if defined(sun) 2926 case DIF_VAR_IPL: 2927 if (!dtrace_priv_kernel(state)) 2928 return (0); 2929 if (!(mstate->dtms_present & DTRACE_MSTATE_IPL)) { 2930 mstate->dtms_ipl = dtrace_getipl(); 2931 mstate->dtms_present |= DTRACE_MSTATE_IPL; 2932 } 2933 return (mstate->dtms_ipl); 2934 #endif 2935 2936 case DIF_VAR_EPID: 2937 ASSERT(mstate->dtms_present & DTRACE_MSTATE_EPID); 2938 return (mstate->dtms_epid); 2939 2940 case DIF_VAR_ID: 2941 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 2942 return (mstate->dtms_probe->dtpr_id); 2943 2944 case DIF_VAR_STACKDEPTH: 2945 if (!dtrace_priv_kernel(state)) 2946 return (0); 2947 if (!(mstate->dtms_present & DTRACE_MSTATE_STACKDEPTH)) { 2948 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2949 2950 mstate->dtms_stackdepth = dtrace_getstackdepth(aframes); 2951 mstate->dtms_present |= DTRACE_MSTATE_STACKDEPTH; 2952 } 2953 return (mstate->dtms_stackdepth); 2954 2955 case DIF_VAR_USTACKDEPTH: 2956 if (!dtrace_priv_proc(state)) 2957 return (0); 2958 if (!(mstate->dtms_present & DTRACE_MSTATE_USTACKDEPTH)) { 2959 /* 2960 * See comment in DIF_VAR_PID. 2961 */ 2962 if (DTRACE_ANCHORED(mstate->dtms_probe) && 2963 CPU_ON_INTR(CPU)) { 2964 mstate->dtms_ustackdepth = 0; 2965 } else { 2966 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 2967 mstate->dtms_ustackdepth = 2968 dtrace_getustackdepth(); 2969 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 2970 } 2971 mstate->dtms_present |= DTRACE_MSTATE_USTACKDEPTH; 2972 } 2973 return (mstate->dtms_ustackdepth); 2974 2975 case DIF_VAR_CALLER: 2976 if (!dtrace_priv_kernel(state)) 2977 return (0); 2978 if (!(mstate->dtms_present & DTRACE_MSTATE_CALLER)) { 2979 int aframes = mstate->dtms_probe->dtpr_aframes + 2; 2980 2981 if (!DTRACE_ANCHORED(mstate->dtms_probe)) { 2982 /* 2983 * If this is an unanchored probe, we are 2984 * required to go through the slow path: 2985 * dtrace_caller() only guarantees correct 2986 * results for anchored probes. 2987 */ 2988 pc_t caller[2] = {0, 0}; 2989 2990 dtrace_getpcstack(caller, 2, aframes, 2991 (uint32_t *)(uintptr_t)mstate->dtms_arg[0]); 2992 mstate->dtms_caller = caller[1]; 2993 } else if ((mstate->dtms_caller = 2994 dtrace_caller(aframes)) == -1) { 2995 /* 2996 * We have failed to do this the quick way; 2997 * we must resort to the slower approach of 2998 * calling dtrace_getpcstack(). 2999 */ 3000 pc_t caller = 0; 3001 3002 dtrace_getpcstack(&caller, 1, aframes, NULL); 3003 mstate->dtms_caller = caller; 3004 } 3005 3006 mstate->dtms_present |= DTRACE_MSTATE_CALLER; 3007 } 3008 return (mstate->dtms_caller); 3009 3010 case DIF_VAR_UCALLER: 3011 if (!dtrace_priv_proc(state)) 3012 return (0); 3013 3014 if (!(mstate->dtms_present & DTRACE_MSTATE_UCALLER)) { 3015 uint64_t ustack[3]; 3016 3017 /* 3018 * dtrace_getupcstack() fills in the first uint64_t 3019 * with the current PID. The second uint64_t will 3020 * be the program counter at user-level. The third 3021 * uint64_t will contain the caller, which is what 3022 * we're after. 3023 */ 3024 ustack[2] = 0; 3025 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3026 dtrace_getupcstack(ustack, 3); 3027 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3028 mstate->dtms_ucaller = ustack[2]; 3029 mstate->dtms_present |= DTRACE_MSTATE_UCALLER; 3030 } 3031 3032 return (mstate->dtms_ucaller); 3033 3034 case DIF_VAR_PROBEPROV: 3035 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3036 return (dtrace_dif_varstr( 3037 (uintptr_t)mstate->dtms_probe->dtpr_provider->dtpv_name, 3038 state, mstate)); 3039 3040 case DIF_VAR_PROBEMOD: 3041 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3042 return (dtrace_dif_varstr( 3043 (uintptr_t)mstate->dtms_probe->dtpr_mod, 3044 state, mstate)); 3045 3046 case DIF_VAR_PROBEFUNC: 3047 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3048 return (dtrace_dif_varstr( 3049 (uintptr_t)mstate->dtms_probe->dtpr_func, 3050 state, mstate)); 3051 3052 case DIF_VAR_PROBENAME: 3053 ASSERT(mstate->dtms_present & DTRACE_MSTATE_PROBE); 3054 return (dtrace_dif_varstr( 3055 (uintptr_t)mstate->dtms_probe->dtpr_name, 3056 state, mstate)); 3057 3058 case DIF_VAR_PID: 3059 if (!dtrace_priv_proc(state)) 3060 return (0); 3061 3062 #if defined(sun) 3063 /* 3064 * Note that we are assuming that an unanchored probe is 3065 * always due to a high-level interrupt. (And we're assuming 3066 * that there is only a single high level interrupt.) 3067 */ 3068 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3069 return (pid0.pid_id); 3070 3071 /* 3072 * It is always safe to dereference one's own t_procp pointer: 3073 * it always points to a valid, allocated proc structure. 3074 * Further, it is always safe to dereference the p_pidp member 3075 * of one's own proc structure. (These are truisms becuase 3076 * threads and processes don't clean up their own state -- 3077 * they leave that task to whomever reaps them.) 3078 */ 3079 return ((uint64_t)curthread->t_procp->p_pidp->pid_id); 3080 #else 3081 return ((uint64_t)curproc->p_pid); 3082 #endif 3083 3084 case DIF_VAR_PPID: 3085 if (!dtrace_priv_proc(state)) 3086 return (0); 3087 3088 #if defined(sun) 3089 /* 3090 * See comment in DIF_VAR_PID. 3091 */ 3092 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3093 return (pid0.pid_id); 3094 3095 /* 3096 * It is always safe to dereference one's own t_procp pointer: 3097 * it always points to a valid, allocated proc structure. 3098 * (This is true because threads don't clean up their own 3099 * state -- they leave that task to whomever reaps them.) 3100 */ 3101 return ((uint64_t)curthread->t_procp->p_ppid); 3102 #else 3103 return ((uint64_t)curproc->p_pptr->p_pid); 3104 #endif 3105 3106 case DIF_VAR_TID: 3107 #if defined(sun) 3108 /* 3109 * See comment in DIF_VAR_PID. 3110 */ 3111 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3112 return (0); 3113 #endif 3114 3115 return ((uint64_t)curthread->t_tid); 3116 3117 case DIF_VAR_EXECARGS: { 3118 struct pargs *p_args = curthread->td_proc->p_args; 3119 3120 if (p_args == NULL) 3121 return(0); 3122 3123 return (dtrace_dif_varstrz( 3124 (uintptr_t) p_args->ar_args, p_args->ar_length, state, mstate)); 3125 } 3126 3127 case DIF_VAR_EXECNAME: 3128 #if defined(sun) 3129 if (!dtrace_priv_proc(state)) 3130 return (0); 3131 3132 /* 3133 * See comment in DIF_VAR_PID. 3134 */ 3135 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3136 return ((uint64_t)(uintptr_t)p0.p_user.u_comm); 3137 3138 /* 3139 * It is always safe to dereference one's own t_procp pointer: 3140 * it always points to a valid, allocated proc structure. 3141 * (This is true because threads don't clean up their own 3142 * state -- they leave that task to whomever reaps them.) 3143 */ 3144 return (dtrace_dif_varstr( 3145 (uintptr_t)curthread->t_procp->p_user.u_comm, 3146 state, mstate)); 3147 #else 3148 return (dtrace_dif_varstr( 3149 (uintptr_t) curthread->td_proc->p_comm, state, mstate)); 3150 #endif 3151 3152 case DIF_VAR_ZONENAME: 3153 #if defined(sun) 3154 if (!dtrace_priv_proc(state)) 3155 return (0); 3156 3157 /* 3158 * See comment in DIF_VAR_PID. 3159 */ 3160 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3161 return ((uint64_t)(uintptr_t)p0.p_zone->zone_name); 3162 3163 /* 3164 * It is always safe to dereference one's own t_procp pointer: 3165 * it always points to a valid, allocated proc structure. 3166 * (This is true because threads don't clean up their own 3167 * state -- they leave that task to whomever reaps them.) 3168 */ 3169 return (dtrace_dif_varstr( 3170 (uintptr_t)curthread->t_procp->p_zone->zone_name, 3171 state, mstate)); 3172 #else 3173 return (0); 3174 #endif 3175 3176 case DIF_VAR_UID: 3177 if (!dtrace_priv_proc(state)) 3178 return (0); 3179 3180 #if defined(sun) 3181 /* 3182 * See comment in DIF_VAR_PID. 3183 */ 3184 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3185 return ((uint64_t)p0.p_cred->cr_uid); 3186 #endif 3187 3188 /* 3189 * It is always safe to dereference one's own t_procp pointer: 3190 * it always points to a valid, allocated proc structure. 3191 * (This is true because threads don't clean up their own 3192 * state -- they leave that task to whomever reaps them.) 3193 * 3194 * Additionally, it is safe to dereference one's own process 3195 * credential, since this is never NULL after process birth. 3196 */ 3197 return ((uint64_t)curthread->t_procp->p_cred->cr_uid); 3198 3199 case DIF_VAR_GID: 3200 if (!dtrace_priv_proc(state)) 3201 return (0); 3202 3203 #if defined(sun) 3204 /* 3205 * See comment in DIF_VAR_PID. 3206 */ 3207 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3208 return ((uint64_t)p0.p_cred->cr_gid); 3209 #endif 3210 3211 /* 3212 * It is always safe to dereference one's own t_procp pointer: 3213 * it always points to a valid, allocated proc structure. 3214 * (This is true because threads don't clean up their own 3215 * state -- they leave that task to whomever reaps them.) 3216 * 3217 * Additionally, it is safe to dereference one's own process 3218 * credential, since this is never NULL after process birth. 3219 */ 3220 return ((uint64_t)curthread->t_procp->p_cred->cr_gid); 3221 3222 case DIF_VAR_ERRNO: { 3223 #if defined(sun) 3224 klwp_t *lwp; 3225 if (!dtrace_priv_proc(state)) 3226 return (0); 3227 3228 /* 3229 * See comment in DIF_VAR_PID. 3230 */ 3231 if (DTRACE_ANCHORED(mstate->dtms_probe) && CPU_ON_INTR(CPU)) 3232 return (0); 3233 3234 /* 3235 * It is always safe to dereference one's own t_lwp pointer in 3236 * the event that this pointer is non-NULL. (This is true 3237 * because threads and lwps don't clean up their own state -- 3238 * they leave that task to whomever reaps them.) 3239 */ 3240 if ((lwp = curthread->t_lwp) == NULL) 3241 return (0); 3242 3243 return ((uint64_t)lwp->lwp_errno); 3244 #else 3245 return (curthread->td_errno); 3246 #endif 3247 } 3248 #if !defined(sun) 3249 case DIF_VAR_CPU: { 3250 return curcpu; 3251 } 3252 #endif 3253 default: 3254 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 3255 return (0); 3256 } 3257 } 3258 3259 /* 3260 * Emulate the execution of DTrace ID subroutines invoked by the call opcode. 3261 * Notice that we don't bother validating the proper number of arguments or 3262 * their types in the tuple stack. This isn't needed because all argument 3263 * interpretation is safe because of our load safety -- the worst that can 3264 * happen is that a bogus program can obtain bogus results. 3265 */ 3266 static void 3267 dtrace_dif_subr(uint_t subr, uint_t rd, uint64_t *regs, 3268 dtrace_key_t *tupregs, int nargs, 3269 dtrace_mstate_t *mstate, dtrace_state_t *state) 3270 { 3271 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 3272 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 3273 dtrace_vstate_t *vstate = &state->dts_vstate; 3274 3275 #if defined(sun) 3276 union { 3277 mutex_impl_t mi; 3278 uint64_t mx; 3279 } m; 3280 3281 union { 3282 krwlock_t ri; 3283 uintptr_t rw; 3284 } r; 3285 #else 3286 struct thread *lowner; 3287 union { 3288 struct lock_object *li; 3289 uintptr_t lx; 3290 } l; 3291 #endif 3292 3293 switch (subr) { 3294 case DIF_SUBR_RAND: 3295 regs[rd] = (dtrace_gethrtime() * 2416 + 374441) % 1771875; 3296 break; 3297 3298 #if defined(sun) 3299 case DIF_SUBR_MUTEX_OWNED: 3300 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3301 mstate, vstate)) { 3302 regs[rd] = 0; 3303 break; 3304 } 3305 3306 m.mx = dtrace_load64(tupregs[0].dttk_value); 3307 if (MUTEX_TYPE_ADAPTIVE(&m.mi)) 3308 regs[rd] = MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER; 3309 else 3310 regs[rd] = LOCK_HELD(&m.mi.m_spin.m_spinlock); 3311 break; 3312 3313 case DIF_SUBR_MUTEX_OWNER: 3314 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3315 mstate, vstate)) { 3316 regs[rd] = 0; 3317 break; 3318 } 3319 3320 m.mx = dtrace_load64(tupregs[0].dttk_value); 3321 if (MUTEX_TYPE_ADAPTIVE(&m.mi) && 3322 MUTEX_OWNER(&m.mi) != MUTEX_NO_OWNER) 3323 regs[rd] = (uintptr_t)MUTEX_OWNER(&m.mi); 3324 else 3325 regs[rd] = 0; 3326 break; 3327 3328 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3329 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3330 mstate, vstate)) { 3331 regs[rd] = 0; 3332 break; 3333 } 3334 3335 m.mx = dtrace_load64(tupregs[0].dttk_value); 3336 regs[rd] = MUTEX_TYPE_ADAPTIVE(&m.mi); 3337 break; 3338 3339 case DIF_SUBR_MUTEX_TYPE_SPIN: 3340 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (kmutex_t), 3341 mstate, vstate)) { 3342 regs[rd] = 0; 3343 break; 3344 } 3345 3346 m.mx = dtrace_load64(tupregs[0].dttk_value); 3347 regs[rd] = MUTEX_TYPE_SPIN(&m.mi); 3348 break; 3349 3350 case DIF_SUBR_RW_READ_HELD: { 3351 uintptr_t tmp; 3352 3353 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3354 mstate, vstate)) { 3355 regs[rd] = 0; 3356 break; 3357 } 3358 3359 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3360 regs[rd] = _RW_READ_HELD(&r.ri, tmp); 3361 break; 3362 } 3363 3364 case DIF_SUBR_RW_WRITE_HELD: 3365 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3366 mstate, vstate)) { 3367 regs[rd] = 0; 3368 break; 3369 } 3370 3371 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3372 regs[rd] = _RW_WRITE_HELD(&r.ri); 3373 break; 3374 3375 case DIF_SUBR_RW_ISWRITER: 3376 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (krwlock_t), 3377 mstate, vstate)) { 3378 regs[rd] = 0; 3379 break; 3380 } 3381 3382 r.rw = dtrace_loadptr(tupregs[0].dttk_value); 3383 regs[rd] = _RW_ISWRITER(&r.ri); 3384 break; 3385 3386 #else 3387 case DIF_SUBR_MUTEX_OWNED: 3388 if (!dtrace_canload(tupregs[0].dttk_value, 3389 sizeof (struct lock_object), mstate, vstate)) { 3390 regs[rd] = 0; 3391 break; 3392 } 3393 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3394 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3395 break; 3396 3397 case DIF_SUBR_MUTEX_OWNER: 3398 if (!dtrace_canload(tupregs[0].dttk_value, 3399 sizeof (struct lock_object), mstate, vstate)) { 3400 regs[rd] = 0; 3401 break; 3402 } 3403 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3404 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3405 regs[rd] = (uintptr_t)lowner; 3406 break; 3407 3408 case DIF_SUBR_MUTEX_TYPE_ADAPTIVE: 3409 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3410 mstate, vstate)) { 3411 regs[rd] = 0; 3412 break; 3413 } 3414 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3415 /* XXX - should be only LC_SLEEPABLE? */ 3416 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & 3417 (LC_SLEEPLOCK | LC_SLEEPABLE)) != 0; 3418 break; 3419 3420 case DIF_SUBR_MUTEX_TYPE_SPIN: 3421 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (struct mtx), 3422 mstate, vstate)) { 3423 regs[rd] = 0; 3424 break; 3425 } 3426 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3427 regs[rd] = (LOCK_CLASS(l.li)->lc_flags & LC_SPINLOCK) != 0; 3428 break; 3429 3430 case DIF_SUBR_RW_READ_HELD: 3431 case DIF_SUBR_SX_SHARED_HELD: 3432 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3433 mstate, vstate)) { 3434 regs[rd] = 0; 3435 break; 3436 } 3437 l.lx = dtrace_loadptr((uintptr_t)&tupregs[0].dttk_value); 3438 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3439 lowner == NULL; 3440 break; 3441 3442 case DIF_SUBR_RW_WRITE_HELD: 3443 case DIF_SUBR_SX_EXCLUSIVE_HELD: 3444 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3445 mstate, vstate)) { 3446 regs[rd] = 0; 3447 break; 3448 } 3449 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3450 LOCK_CLASS(l.li)->lc_owner(l.li, &lowner); 3451 regs[rd] = (lowner == curthread); 3452 break; 3453 3454 case DIF_SUBR_RW_ISWRITER: 3455 case DIF_SUBR_SX_ISEXCLUSIVE: 3456 if (!dtrace_canload(tupregs[0].dttk_value, sizeof (uintptr_t), 3457 mstate, vstate)) { 3458 regs[rd] = 0; 3459 break; 3460 } 3461 l.lx = dtrace_loadptr(tupregs[0].dttk_value); 3462 regs[rd] = LOCK_CLASS(l.li)->lc_owner(l.li, &lowner) && 3463 lowner != NULL; 3464 break; 3465 #endif /* ! defined(sun) */ 3466 3467 case DIF_SUBR_BCOPY: { 3468 /* 3469 * We need to be sure that the destination is in the scratch 3470 * region -- no other region is allowed. 3471 */ 3472 uintptr_t src = tupregs[0].dttk_value; 3473 uintptr_t dest = tupregs[1].dttk_value; 3474 size_t size = tupregs[2].dttk_value; 3475 3476 if (!dtrace_inscratch(dest, size, mstate)) { 3477 *flags |= CPU_DTRACE_BADADDR; 3478 *illval = regs[rd]; 3479 break; 3480 } 3481 3482 if (!dtrace_canload(src, size, mstate, vstate)) { 3483 regs[rd] = 0; 3484 break; 3485 } 3486 3487 dtrace_bcopy((void *)src, (void *)dest, size); 3488 break; 3489 } 3490 3491 case DIF_SUBR_ALLOCA: 3492 case DIF_SUBR_COPYIN: { 3493 uintptr_t dest = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 3494 uint64_t size = 3495 tupregs[subr == DIF_SUBR_ALLOCA ? 0 : 1].dttk_value; 3496 size_t scratch_size = (dest - mstate->dtms_scratch_ptr) + size; 3497 3498 /* 3499 * This action doesn't require any credential checks since 3500 * probes will not activate in user contexts to which the 3501 * enabling user does not have permissions. 3502 */ 3503 3504 /* 3505 * Rounding up the user allocation size could have overflowed 3506 * a large, bogus allocation (like -1ULL) to 0. 3507 */ 3508 if (scratch_size < size || 3509 !DTRACE_INSCRATCH(mstate, scratch_size)) { 3510 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3511 regs[rd] = 0; 3512 break; 3513 } 3514 3515 if (subr == DIF_SUBR_COPYIN) { 3516 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3517 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3518 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3519 } 3520 3521 mstate->dtms_scratch_ptr += scratch_size; 3522 regs[rd] = dest; 3523 break; 3524 } 3525 3526 case DIF_SUBR_COPYINTO: { 3527 uint64_t size = tupregs[1].dttk_value; 3528 uintptr_t dest = tupregs[2].dttk_value; 3529 3530 /* 3531 * This action doesn't require any credential checks since 3532 * probes will not activate in user contexts to which the 3533 * enabling user does not have permissions. 3534 */ 3535 if (!dtrace_inscratch(dest, size, mstate)) { 3536 *flags |= CPU_DTRACE_BADADDR; 3537 *illval = regs[rd]; 3538 break; 3539 } 3540 3541 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3542 dtrace_copyin(tupregs[0].dttk_value, dest, size, flags); 3543 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3544 break; 3545 } 3546 3547 case DIF_SUBR_COPYINSTR: { 3548 uintptr_t dest = mstate->dtms_scratch_ptr; 3549 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3550 3551 if (nargs > 1 && tupregs[1].dttk_value < size) 3552 size = tupregs[1].dttk_value + 1; 3553 3554 /* 3555 * This action doesn't require any credential checks since 3556 * probes will not activate in user contexts to which the 3557 * enabling user does not have permissions. 3558 */ 3559 if (!DTRACE_INSCRATCH(mstate, size)) { 3560 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3561 regs[rd] = 0; 3562 break; 3563 } 3564 3565 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3566 dtrace_copyinstr(tupregs[0].dttk_value, dest, size, flags); 3567 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3568 3569 ((char *)dest)[size - 1] = '\0'; 3570 mstate->dtms_scratch_ptr += size; 3571 regs[rd] = dest; 3572 break; 3573 } 3574 3575 #if defined(sun) 3576 case DIF_SUBR_MSGSIZE: 3577 case DIF_SUBR_MSGDSIZE: { 3578 uintptr_t baddr = tupregs[0].dttk_value, daddr; 3579 uintptr_t wptr, rptr; 3580 size_t count = 0; 3581 int cont = 0; 3582 3583 while (baddr != 0 && !(*flags & CPU_DTRACE_FAULT)) { 3584 3585 if (!dtrace_canload(baddr, sizeof (mblk_t), mstate, 3586 vstate)) { 3587 regs[rd] = 0; 3588 break; 3589 } 3590 3591 wptr = dtrace_loadptr(baddr + 3592 offsetof(mblk_t, b_wptr)); 3593 3594 rptr = dtrace_loadptr(baddr + 3595 offsetof(mblk_t, b_rptr)); 3596 3597 if (wptr < rptr) { 3598 *flags |= CPU_DTRACE_BADADDR; 3599 *illval = tupregs[0].dttk_value; 3600 break; 3601 } 3602 3603 daddr = dtrace_loadptr(baddr + 3604 offsetof(mblk_t, b_datap)); 3605 3606 baddr = dtrace_loadptr(baddr + 3607 offsetof(mblk_t, b_cont)); 3608 3609 /* 3610 * We want to prevent against denial-of-service here, 3611 * so we're only going to search the list for 3612 * dtrace_msgdsize_max mblks. 3613 */ 3614 if (cont++ > dtrace_msgdsize_max) { 3615 *flags |= CPU_DTRACE_ILLOP; 3616 break; 3617 } 3618 3619 if (subr == DIF_SUBR_MSGDSIZE) { 3620 if (dtrace_load8(daddr + 3621 offsetof(dblk_t, db_type)) != M_DATA) 3622 continue; 3623 } 3624 3625 count += wptr - rptr; 3626 } 3627 3628 if (!(*flags & CPU_DTRACE_FAULT)) 3629 regs[rd] = count; 3630 3631 break; 3632 } 3633 #endif 3634 3635 case DIF_SUBR_PROGENYOF: { 3636 pid_t pid = tupregs[0].dttk_value; 3637 proc_t *p; 3638 int rval = 0; 3639 3640 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3641 3642 for (p = curthread->t_procp; p != NULL; p = p->p_parent) { 3643 #if defined(sun) 3644 if (p->p_pidp->pid_id == pid) { 3645 #else 3646 if (p->p_pid == pid) { 3647 #endif 3648 rval = 1; 3649 break; 3650 } 3651 } 3652 3653 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3654 3655 regs[rd] = rval; 3656 break; 3657 } 3658 3659 case DIF_SUBR_SPECULATION: 3660 regs[rd] = dtrace_speculation(state); 3661 break; 3662 3663 case DIF_SUBR_COPYOUT: { 3664 uintptr_t kaddr = tupregs[0].dttk_value; 3665 uintptr_t uaddr = tupregs[1].dttk_value; 3666 uint64_t size = tupregs[2].dttk_value; 3667 3668 if (!dtrace_destructive_disallow && 3669 dtrace_priv_proc_control(state) && 3670 !dtrace_istoxic(kaddr, size)) { 3671 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3672 dtrace_copyout(kaddr, uaddr, size, flags); 3673 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3674 } 3675 break; 3676 } 3677 3678 case DIF_SUBR_COPYOUTSTR: { 3679 uintptr_t kaddr = tupregs[0].dttk_value; 3680 uintptr_t uaddr = tupregs[1].dttk_value; 3681 uint64_t size = tupregs[2].dttk_value; 3682 3683 if (!dtrace_destructive_disallow && 3684 dtrace_priv_proc_control(state) && 3685 !dtrace_istoxic(kaddr, size)) { 3686 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 3687 dtrace_copyoutstr(kaddr, uaddr, size, flags); 3688 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 3689 } 3690 break; 3691 } 3692 3693 case DIF_SUBR_STRLEN: { 3694 size_t sz; 3695 uintptr_t addr = (uintptr_t)tupregs[0].dttk_value; 3696 sz = dtrace_strlen((char *)addr, 3697 state->dts_options[DTRACEOPT_STRSIZE]); 3698 3699 if (!dtrace_canload(addr, sz + 1, mstate, vstate)) { 3700 regs[rd] = 0; 3701 break; 3702 } 3703 3704 regs[rd] = sz; 3705 3706 break; 3707 } 3708 3709 case DIF_SUBR_STRCHR: 3710 case DIF_SUBR_STRRCHR: { 3711 /* 3712 * We're going to iterate over the string looking for the 3713 * specified character. We will iterate until we have reached 3714 * the string length or we have found the character. If this 3715 * is DIF_SUBR_STRRCHR, we will look for the last occurrence 3716 * of the specified character instead of the first. 3717 */ 3718 uintptr_t saddr = tupregs[0].dttk_value; 3719 uintptr_t addr = tupregs[0].dttk_value; 3720 uintptr_t limit = addr + state->dts_options[DTRACEOPT_STRSIZE]; 3721 char c, target = (char)tupregs[1].dttk_value; 3722 3723 for (regs[rd] = 0; addr < limit; addr++) { 3724 if ((c = dtrace_load8(addr)) == target) { 3725 regs[rd] = addr; 3726 3727 if (subr == DIF_SUBR_STRCHR) 3728 break; 3729 } 3730 3731 if (c == '\0') 3732 break; 3733 } 3734 3735 if (!dtrace_canload(saddr, addr - saddr, mstate, vstate)) { 3736 regs[rd] = 0; 3737 break; 3738 } 3739 3740 break; 3741 } 3742 3743 case DIF_SUBR_STRSTR: 3744 case DIF_SUBR_INDEX: 3745 case DIF_SUBR_RINDEX: { 3746 /* 3747 * We're going to iterate over the string looking for the 3748 * specified string. We will iterate until we have reached 3749 * the string length or we have found the string. (Yes, this 3750 * is done in the most naive way possible -- but considering 3751 * that the string we're searching for is likely to be 3752 * relatively short, the complexity of Rabin-Karp or similar 3753 * hardly seems merited.) 3754 */ 3755 char *addr = (char *)(uintptr_t)tupregs[0].dttk_value; 3756 char *substr = (char *)(uintptr_t)tupregs[1].dttk_value; 3757 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3758 size_t len = dtrace_strlen(addr, size); 3759 size_t sublen = dtrace_strlen(substr, size); 3760 char *limit = addr + len, *orig = addr; 3761 int notfound = subr == DIF_SUBR_STRSTR ? 0 : -1; 3762 int inc = 1; 3763 3764 regs[rd] = notfound; 3765 3766 if (!dtrace_canload((uintptr_t)addr, len + 1, mstate, vstate)) { 3767 regs[rd] = 0; 3768 break; 3769 } 3770 3771 if (!dtrace_canload((uintptr_t)substr, sublen + 1, mstate, 3772 vstate)) { 3773 regs[rd] = 0; 3774 break; 3775 } 3776 3777 /* 3778 * strstr() and index()/rindex() have similar semantics if 3779 * both strings are the empty string: strstr() returns a 3780 * pointer to the (empty) string, and index() and rindex() 3781 * both return index 0 (regardless of any position argument). 3782 */ 3783 if (sublen == 0 && len == 0) { 3784 if (subr == DIF_SUBR_STRSTR) 3785 regs[rd] = (uintptr_t)addr; 3786 else 3787 regs[rd] = 0; 3788 break; 3789 } 3790 3791 if (subr != DIF_SUBR_STRSTR) { 3792 if (subr == DIF_SUBR_RINDEX) { 3793 limit = orig - 1; 3794 addr += len; 3795 inc = -1; 3796 } 3797 3798 /* 3799 * Both index() and rindex() take an optional position 3800 * argument that denotes the starting position. 3801 */ 3802 if (nargs == 3) { 3803 int64_t pos = (int64_t)tupregs[2].dttk_value; 3804 3805 /* 3806 * If the position argument to index() is 3807 * negative, Perl implicitly clamps it at 3808 * zero. This semantic is a little surprising 3809 * given the special meaning of negative 3810 * positions to similar Perl functions like 3811 * substr(), but it appears to reflect a 3812 * notion that index() can start from a 3813 * negative index and increment its way up to 3814 * the string. Given this notion, Perl's 3815 * rindex() is at least self-consistent in 3816 * that it implicitly clamps positions greater 3817 * than the string length to be the string 3818 * length. Where Perl completely loses 3819 * coherence, however, is when the specified 3820 * substring is the empty string (""). In 3821 * this case, even if the position is 3822 * negative, rindex() returns 0 -- and even if 3823 * the position is greater than the length, 3824 * index() returns the string length. These 3825 * semantics violate the notion that index() 3826 * should never return a value less than the 3827 * specified position and that rindex() should 3828 * never return a value greater than the 3829 * specified position. (One assumes that 3830 * these semantics are artifacts of Perl's 3831 * implementation and not the results of 3832 * deliberate design -- it beggars belief that 3833 * even Larry Wall could desire such oddness.) 3834 * While in the abstract one would wish for 3835 * consistent position semantics across 3836 * substr(), index() and rindex() -- or at the 3837 * very least self-consistent position 3838 * semantics for index() and rindex() -- we 3839 * instead opt to keep with the extant Perl 3840 * semantics, in all their broken glory. (Do 3841 * we have more desire to maintain Perl's 3842 * semantics than Perl does? Probably.) 3843 */ 3844 if (subr == DIF_SUBR_RINDEX) { 3845 if (pos < 0) { 3846 if (sublen == 0) 3847 regs[rd] = 0; 3848 break; 3849 } 3850 3851 if (pos > len) 3852 pos = len; 3853 } else { 3854 if (pos < 0) 3855 pos = 0; 3856 3857 if (pos >= len) { 3858 if (sublen == 0) 3859 regs[rd] = len; 3860 break; 3861 } 3862 } 3863 3864 addr = orig + pos; 3865 } 3866 } 3867 3868 for (regs[rd] = notfound; addr != limit; addr += inc) { 3869 if (dtrace_strncmp(addr, substr, sublen) == 0) { 3870 if (subr != DIF_SUBR_STRSTR) { 3871 /* 3872 * As D index() and rindex() are 3873 * modeled on Perl (and not on awk), 3874 * we return a zero-based (and not a 3875 * one-based) index. (For you Perl 3876 * weenies: no, we're not going to add 3877 * $[ -- and shouldn't you be at a con 3878 * or something?) 3879 */ 3880 regs[rd] = (uintptr_t)(addr - orig); 3881 break; 3882 } 3883 3884 ASSERT(subr == DIF_SUBR_STRSTR); 3885 regs[rd] = (uintptr_t)addr; 3886 break; 3887 } 3888 } 3889 3890 break; 3891 } 3892 3893 case DIF_SUBR_STRTOK: { 3894 uintptr_t addr = tupregs[0].dttk_value; 3895 uintptr_t tokaddr = tupregs[1].dttk_value; 3896 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 3897 uintptr_t limit, toklimit = tokaddr + size; 3898 uint8_t c = 0, tokmap[32]; /* 256 / 8 */ 3899 char *dest = (char *)mstate->dtms_scratch_ptr; 3900 int i; 3901 3902 /* 3903 * Check both the token buffer and (later) the input buffer, 3904 * since both could be non-scratch addresses. 3905 */ 3906 if (!dtrace_strcanload(tokaddr, size, mstate, vstate)) { 3907 regs[rd] = 0; 3908 break; 3909 } 3910 3911 if (!DTRACE_INSCRATCH(mstate, size)) { 3912 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 3913 regs[rd] = 0; 3914 break; 3915 } 3916 3917 if (addr == 0) { 3918 /* 3919 * If the address specified is NULL, we use our saved 3920 * strtok pointer from the mstate. Note that this 3921 * means that the saved strtok pointer is _only_ 3922 * valid within multiple enablings of the same probe -- 3923 * it behaves like an implicit clause-local variable. 3924 */ 3925 addr = mstate->dtms_strtok; 3926 } else { 3927 /* 3928 * If the user-specified address is non-NULL we must 3929 * access check it. This is the only time we have 3930 * a chance to do so, since this address may reside 3931 * in the string table of this clause-- future calls 3932 * (when we fetch addr from mstate->dtms_strtok) 3933 * would fail this access check. 3934 */ 3935 if (!dtrace_strcanload(addr, size, mstate, vstate)) { 3936 regs[rd] = 0; 3937 break; 3938 } 3939 } 3940 3941 /* 3942 * First, zero the token map, and then process the token 3943 * string -- setting a bit in the map for every character 3944 * found in the token string. 3945 */ 3946 for (i = 0; i < sizeof (tokmap); i++) 3947 tokmap[i] = 0; 3948 3949 for (; tokaddr < toklimit; tokaddr++) { 3950 if ((c = dtrace_load8(tokaddr)) == '\0') 3951 break; 3952 3953 ASSERT((c >> 3) < sizeof (tokmap)); 3954 tokmap[c >> 3] |= (1 << (c & 0x7)); 3955 } 3956 3957 for (limit = addr + size; addr < limit; addr++) { 3958 /* 3959 * We're looking for a character that is _not_ contained 3960 * in the token string. 3961 */ 3962 if ((c = dtrace_load8(addr)) == '\0') 3963 break; 3964 3965 if (!(tokmap[c >> 3] & (1 << (c & 0x7)))) 3966 break; 3967 } 3968 3969 if (c == '\0') { 3970 /* 3971 * We reached the end of the string without finding 3972 * any character that was not in the token string. 3973 * We return NULL in this case, and we set the saved 3974 * address to NULL as well. 3975 */ 3976 regs[rd] = 0; 3977 mstate->dtms_strtok = 0; 3978 break; 3979 } 3980 3981 /* 3982 * From here on, we're copying into the destination string. 3983 */ 3984 for (i = 0; addr < limit && i < size - 1; addr++) { 3985 if ((c = dtrace_load8(addr)) == '\0') 3986 break; 3987 3988 if (tokmap[c >> 3] & (1 << (c & 0x7))) 3989 break; 3990 3991 ASSERT(i < size); 3992 dest[i++] = c; 3993 } 3994 3995 ASSERT(i < size); 3996 dest[i] = '\0'; 3997 regs[rd] = (uintptr_t)dest; 3998 mstate->dtms_scratch_ptr += size; 3999 mstate->dtms_strtok = addr; 4000 break; 4001 } 4002 4003 case DIF_SUBR_SUBSTR: { 4004 uintptr_t s = tupregs[0].dttk_value; 4005 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4006 char *d = (char *)mstate->dtms_scratch_ptr; 4007 int64_t index = (int64_t)tupregs[1].dttk_value; 4008 int64_t remaining = (int64_t)tupregs[2].dttk_value; 4009 size_t len = dtrace_strlen((char *)s, size); 4010 int64_t i = 0; 4011 4012 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4013 regs[rd] = 0; 4014 break; 4015 } 4016 4017 if (!DTRACE_INSCRATCH(mstate, size)) { 4018 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4019 regs[rd] = 0; 4020 break; 4021 } 4022 4023 if (nargs <= 2) 4024 remaining = (int64_t)size; 4025 4026 if (index < 0) { 4027 index += len; 4028 4029 if (index < 0 && index + remaining > 0) { 4030 remaining += index; 4031 index = 0; 4032 } 4033 } 4034 4035 if (index >= len || index < 0) { 4036 remaining = 0; 4037 } else if (remaining < 0) { 4038 remaining += len - index; 4039 } else if (index + remaining > size) { 4040 remaining = size - index; 4041 } 4042 4043 for (i = 0; i < remaining; i++) { 4044 if ((d[i] = dtrace_load8(s + index + i)) == '\0') 4045 break; 4046 } 4047 4048 d[i] = '\0'; 4049 4050 mstate->dtms_scratch_ptr += size; 4051 regs[rd] = (uintptr_t)d; 4052 break; 4053 } 4054 4055 case DIF_SUBR_TOUPPER: 4056 case DIF_SUBR_TOLOWER: { 4057 uintptr_t s = tupregs[0].dttk_value; 4058 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4059 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4060 size_t len = dtrace_strlen((char *)s, size); 4061 char lower, upper, convert; 4062 int64_t i; 4063 4064 if (subr == DIF_SUBR_TOUPPER) { 4065 lower = 'a'; 4066 upper = 'z'; 4067 convert = 'A'; 4068 } else { 4069 lower = 'A'; 4070 upper = 'Z'; 4071 convert = 'a'; 4072 } 4073 4074 if (!dtrace_canload(s, len + 1, mstate, vstate)) { 4075 regs[rd] = 0; 4076 break; 4077 } 4078 4079 if (!DTRACE_INSCRATCH(mstate, size)) { 4080 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4081 regs[rd] = 0; 4082 break; 4083 } 4084 4085 for (i = 0; i < size - 1; i++) { 4086 if ((c = dtrace_load8(s + i)) == '\0') 4087 break; 4088 4089 if (c >= lower && c <= upper) 4090 c = convert + (c - lower); 4091 4092 dest[i] = c; 4093 } 4094 4095 ASSERT(i < size); 4096 dest[i] = '\0'; 4097 regs[rd] = (uintptr_t)dest; 4098 mstate->dtms_scratch_ptr += size; 4099 break; 4100 } 4101 4102 #if defined(sun) 4103 case DIF_SUBR_GETMAJOR: 4104 #ifdef _LP64 4105 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR64) & MAXMAJ64; 4106 #else 4107 regs[rd] = (tupregs[0].dttk_value >> NBITSMINOR) & MAXMAJ; 4108 #endif 4109 break; 4110 4111 case DIF_SUBR_GETMINOR: 4112 #ifdef _LP64 4113 regs[rd] = tupregs[0].dttk_value & MAXMIN64; 4114 #else 4115 regs[rd] = tupregs[0].dttk_value & MAXMIN; 4116 #endif 4117 break; 4118 4119 case DIF_SUBR_DDI_PATHNAME: { 4120 /* 4121 * This one is a galactic mess. We are going to roughly 4122 * emulate ddi_pathname(), but it's made more complicated 4123 * by the fact that we (a) want to include the minor name and 4124 * (b) must proceed iteratively instead of recursively. 4125 */ 4126 uintptr_t dest = mstate->dtms_scratch_ptr; 4127 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4128 char *start = (char *)dest, *end = start + size - 1; 4129 uintptr_t daddr = tupregs[0].dttk_value; 4130 int64_t minor = (int64_t)tupregs[1].dttk_value; 4131 char *s; 4132 int i, len, depth = 0; 4133 4134 /* 4135 * Due to all the pointer jumping we do and context we must 4136 * rely upon, we just mandate that the user must have kernel 4137 * read privileges to use this routine. 4138 */ 4139 if ((mstate->dtms_access & DTRACE_ACCESS_KERNEL) == 0) { 4140 *flags |= CPU_DTRACE_KPRIV; 4141 *illval = daddr; 4142 regs[rd] = 0; 4143 } 4144 4145 if (!DTRACE_INSCRATCH(mstate, size)) { 4146 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4147 regs[rd] = 0; 4148 break; 4149 } 4150 4151 *end = '\0'; 4152 4153 /* 4154 * We want to have a name for the minor. In order to do this, 4155 * we need to walk the minor list from the devinfo. We want 4156 * to be sure that we don't infinitely walk a circular list, 4157 * so we check for circularity by sending a scout pointer 4158 * ahead two elements for every element that we iterate over; 4159 * if the list is circular, these will ultimately point to the 4160 * same element. You may recognize this little trick as the 4161 * answer to a stupid interview question -- one that always 4162 * seems to be asked by those who had to have it laboriously 4163 * explained to them, and who can't even concisely describe 4164 * the conditions under which one would be forced to resort to 4165 * this technique. Needless to say, those conditions are 4166 * found here -- and probably only here. Is this the only use 4167 * of this infamous trick in shipping, production code? If it 4168 * isn't, it probably should be... 4169 */ 4170 if (minor != -1) { 4171 uintptr_t maddr = dtrace_loadptr(daddr + 4172 offsetof(struct dev_info, devi_minor)); 4173 4174 uintptr_t next = offsetof(struct ddi_minor_data, next); 4175 uintptr_t name = offsetof(struct ddi_minor_data, 4176 d_minor) + offsetof(struct ddi_minor, name); 4177 uintptr_t dev = offsetof(struct ddi_minor_data, 4178 d_minor) + offsetof(struct ddi_minor, dev); 4179 uintptr_t scout; 4180 4181 if (maddr != NULL) 4182 scout = dtrace_loadptr(maddr + next); 4183 4184 while (maddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4185 uint64_t m; 4186 #ifdef _LP64 4187 m = dtrace_load64(maddr + dev) & MAXMIN64; 4188 #else 4189 m = dtrace_load32(maddr + dev) & MAXMIN; 4190 #endif 4191 if (m != minor) { 4192 maddr = dtrace_loadptr(maddr + next); 4193 4194 if (scout == NULL) 4195 continue; 4196 4197 scout = dtrace_loadptr(scout + next); 4198 4199 if (scout == NULL) 4200 continue; 4201 4202 scout = dtrace_loadptr(scout + next); 4203 4204 if (scout == NULL) 4205 continue; 4206 4207 if (scout == maddr) { 4208 *flags |= CPU_DTRACE_ILLOP; 4209 break; 4210 } 4211 4212 continue; 4213 } 4214 4215 /* 4216 * We have the minor data. Now we need to 4217 * copy the minor's name into the end of the 4218 * pathname. 4219 */ 4220 s = (char *)dtrace_loadptr(maddr + name); 4221 len = dtrace_strlen(s, size); 4222 4223 if (*flags & CPU_DTRACE_FAULT) 4224 break; 4225 4226 if (len != 0) { 4227 if ((end -= (len + 1)) < start) 4228 break; 4229 4230 *end = ':'; 4231 } 4232 4233 for (i = 1; i <= len; i++) 4234 end[i] = dtrace_load8((uintptr_t)s++); 4235 break; 4236 } 4237 } 4238 4239 while (daddr != NULL && !(*flags & CPU_DTRACE_FAULT)) { 4240 ddi_node_state_t devi_state; 4241 4242 devi_state = dtrace_load32(daddr + 4243 offsetof(struct dev_info, devi_node_state)); 4244 4245 if (*flags & CPU_DTRACE_FAULT) 4246 break; 4247 4248 if (devi_state >= DS_INITIALIZED) { 4249 s = (char *)dtrace_loadptr(daddr + 4250 offsetof(struct dev_info, devi_addr)); 4251 len = dtrace_strlen(s, size); 4252 4253 if (*flags & CPU_DTRACE_FAULT) 4254 break; 4255 4256 if (len != 0) { 4257 if ((end -= (len + 1)) < start) 4258 break; 4259 4260 *end = '@'; 4261 } 4262 4263 for (i = 1; i <= len; i++) 4264 end[i] = dtrace_load8((uintptr_t)s++); 4265 } 4266 4267 /* 4268 * Now for the node name... 4269 */ 4270 s = (char *)dtrace_loadptr(daddr + 4271 offsetof(struct dev_info, devi_node_name)); 4272 4273 daddr = dtrace_loadptr(daddr + 4274 offsetof(struct dev_info, devi_parent)); 4275 4276 /* 4277 * If our parent is NULL (that is, if we're the root 4278 * node), we're going to use the special path 4279 * "devices". 4280 */ 4281 if (daddr == 0) 4282 s = "devices"; 4283 4284 len = dtrace_strlen(s, size); 4285 if (*flags & CPU_DTRACE_FAULT) 4286 break; 4287 4288 if ((end -= (len + 1)) < start) 4289 break; 4290 4291 for (i = 1; i <= len; i++) 4292 end[i] = dtrace_load8((uintptr_t)s++); 4293 *end = '/'; 4294 4295 if (depth++ > dtrace_devdepth_max) { 4296 *flags |= CPU_DTRACE_ILLOP; 4297 break; 4298 } 4299 } 4300 4301 if (end < start) 4302 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4303 4304 if (daddr == 0) { 4305 regs[rd] = (uintptr_t)end; 4306 mstate->dtms_scratch_ptr += size; 4307 } 4308 4309 break; 4310 } 4311 #endif 4312 4313 case DIF_SUBR_STRJOIN: { 4314 char *d = (char *)mstate->dtms_scratch_ptr; 4315 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4316 uintptr_t s1 = tupregs[0].dttk_value; 4317 uintptr_t s2 = tupregs[1].dttk_value; 4318 int i = 0; 4319 4320 if (!dtrace_strcanload(s1, size, mstate, vstate) || 4321 !dtrace_strcanload(s2, size, mstate, vstate)) { 4322 regs[rd] = 0; 4323 break; 4324 } 4325 4326 if (!DTRACE_INSCRATCH(mstate, size)) { 4327 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4328 regs[rd] = 0; 4329 break; 4330 } 4331 4332 for (;;) { 4333 if (i >= size) { 4334 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4335 regs[rd] = 0; 4336 break; 4337 } 4338 4339 if ((d[i++] = dtrace_load8(s1++)) == '\0') { 4340 i--; 4341 break; 4342 } 4343 } 4344 4345 for (;;) { 4346 if (i >= size) { 4347 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4348 regs[rd] = 0; 4349 break; 4350 } 4351 4352 if ((d[i++] = dtrace_load8(s2++)) == '\0') 4353 break; 4354 } 4355 4356 if (i < size) { 4357 mstate->dtms_scratch_ptr += i; 4358 regs[rd] = (uintptr_t)d; 4359 } 4360 4361 break; 4362 } 4363 4364 case DIF_SUBR_LLTOSTR: { 4365 int64_t i = (int64_t)tupregs[0].dttk_value; 4366 uint64_t val, digit; 4367 uint64_t size = 65; /* enough room for 2^64 in binary */ 4368 char *end = (char *)mstate->dtms_scratch_ptr + size - 1; 4369 int base = 10; 4370 4371 if (nargs > 1) { 4372 if ((base = tupregs[1].dttk_value) <= 1 || 4373 base > ('z' - 'a' + 1) + ('9' - '0' + 1)) { 4374 *flags |= CPU_DTRACE_ILLOP; 4375 break; 4376 } 4377 } 4378 4379 val = (base == 10 && i < 0) ? i * -1 : i; 4380 4381 if (!DTRACE_INSCRATCH(mstate, size)) { 4382 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4383 regs[rd] = 0; 4384 break; 4385 } 4386 4387 for (*end-- = '\0'; val; val /= base) { 4388 if ((digit = val % base) <= '9' - '0') { 4389 *end-- = '0' + digit; 4390 } else { 4391 *end-- = 'a' + (digit - ('9' - '0') - 1); 4392 } 4393 } 4394 4395 if (i == 0 && base == 16) 4396 *end-- = '0'; 4397 4398 if (base == 16) 4399 *end-- = 'x'; 4400 4401 if (i == 0 || base == 8 || base == 16) 4402 *end-- = '0'; 4403 4404 if (i < 0 && base == 10) 4405 *end-- = '-'; 4406 4407 regs[rd] = (uintptr_t)end + 1; 4408 mstate->dtms_scratch_ptr += size; 4409 break; 4410 } 4411 4412 case DIF_SUBR_HTONS: 4413 case DIF_SUBR_NTOHS: 4414 #if BYTE_ORDER == BIG_ENDIAN 4415 regs[rd] = (uint16_t)tupregs[0].dttk_value; 4416 #else 4417 regs[rd] = DT_BSWAP_16((uint16_t)tupregs[0].dttk_value); 4418 #endif 4419 break; 4420 4421 4422 case DIF_SUBR_HTONL: 4423 case DIF_SUBR_NTOHL: 4424 #if BYTE_ORDER == BIG_ENDIAN 4425 regs[rd] = (uint32_t)tupregs[0].dttk_value; 4426 #else 4427 regs[rd] = DT_BSWAP_32((uint32_t)tupregs[0].dttk_value); 4428 #endif 4429 break; 4430 4431 4432 case DIF_SUBR_HTONLL: 4433 case DIF_SUBR_NTOHLL: 4434 #if BYTE_ORDER == BIG_ENDIAN 4435 regs[rd] = (uint64_t)tupregs[0].dttk_value; 4436 #else 4437 regs[rd] = DT_BSWAP_64((uint64_t)tupregs[0].dttk_value); 4438 #endif 4439 break; 4440 4441 4442 case DIF_SUBR_DIRNAME: 4443 case DIF_SUBR_BASENAME: { 4444 char *dest = (char *)mstate->dtms_scratch_ptr; 4445 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4446 uintptr_t src = tupregs[0].dttk_value; 4447 int i, j, len = dtrace_strlen((char *)src, size); 4448 int lastbase = -1, firstbase = -1, lastdir = -1; 4449 int start, end; 4450 4451 if (!dtrace_canload(src, len + 1, mstate, vstate)) { 4452 regs[rd] = 0; 4453 break; 4454 } 4455 4456 if (!DTRACE_INSCRATCH(mstate, size)) { 4457 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4458 regs[rd] = 0; 4459 break; 4460 } 4461 4462 /* 4463 * The basename and dirname for a zero-length string is 4464 * defined to be "." 4465 */ 4466 if (len == 0) { 4467 len = 1; 4468 src = (uintptr_t)"."; 4469 } 4470 4471 /* 4472 * Start from the back of the string, moving back toward the 4473 * front until we see a character that isn't a slash. That 4474 * character is the last character in the basename. 4475 */ 4476 for (i = len - 1; i >= 0; i--) { 4477 if (dtrace_load8(src + i) != '/') 4478 break; 4479 } 4480 4481 if (i >= 0) 4482 lastbase = i; 4483 4484 /* 4485 * Starting from the last character in the basename, move 4486 * towards the front until we find a slash. The character 4487 * that we processed immediately before that is the first 4488 * character in the basename. 4489 */ 4490 for (; i >= 0; i--) { 4491 if (dtrace_load8(src + i) == '/') 4492 break; 4493 } 4494 4495 if (i >= 0) 4496 firstbase = i + 1; 4497 4498 /* 4499 * Now keep going until we find a non-slash character. That 4500 * character is the last character in the dirname. 4501 */ 4502 for (; i >= 0; i--) { 4503 if (dtrace_load8(src + i) != '/') 4504 break; 4505 } 4506 4507 if (i >= 0) 4508 lastdir = i; 4509 4510 ASSERT(!(lastbase == -1 && firstbase != -1)); 4511 ASSERT(!(firstbase == -1 && lastdir != -1)); 4512 4513 if (lastbase == -1) { 4514 /* 4515 * We didn't find a non-slash character. We know that 4516 * the length is non-zero, so the whole string must be 4517 * slashes. In either the dirname or the basename 4518 * case, we return '/'. 4519 */ 4520 ASSERT(firstbase == -1); 4521 firstbase = lastbase = lastdir = 0; 4522 } 4523 4524 if (firstbase == -1) { 4525 /* 4526 * The entire string consists only of a basename 4527 * component. If we're looking for dirname, we need 4528 * to change our string to be just "."; if we're 4529 * looking for a basename, we'll just set the first 4530 * character of the basename to be 0. 4531 */ 4532 if (subr == DIF_SUBR_DIRNAME) { 4533 ASSERT(lastdir == -1); 4534 src = (uintptr_t)"."; 4535 lastdir = 0; 4536 } else { 4537 firstbase = 0; 4538 } 4539 } 4540 4541 if (subr == DIF_SUBR_DIRNAME) { 4542 if (lastdir == -1) { 4543 /* 4544 * We know that we have a slash in the name -- 4545 * or lastdir would be set to 0, above. And 4546 * because lastdir is -1, we know that this 4547 * slash must be the first character. (That 4548 * is, the full string must be of the form 4549 * "/basename".) In this case, the last 4550 * character of the directory name is 0. 4551 */ 4552 lastdir = 0; 4553 } 4554 4555 start = 0; 4556 end = lastdir; 4557 } else { 4558 ASSERT(subr == DIF_SUBR_BASENAME); 4559 ASSERT(firstbase != -1 && lastbase != -1); 4560 start = firstbase; 4561 end = lastbase; 4562 } 4563 4564 for (i = start, j = 0; i <= end && j < size - 1; i++, j++) 4565 dest[j] = dtrace_load8(src + i); 4566 4567 dest[j] = '\0'; 4568 regs[rd] = (uintptr_t)dest; 4569 mstate->dtms_scratch_ptr += size; 4570 break; 4571 } 4572 4573 case DIF_SUBR_CLEANPATH: { 4574 char *dest = (char *)mstate->dtms_scratch_ptr, c; 4575 uint64_t size = state->dts_options[DTRACEOPT_STRSIZE]; 4576 uintptr_t src = tupregs[0].dttk_value; 4577 int i = 0, j = 0; 4578 4579 if (!dtrace_strcanload(src, size, mstate, vstate)) { 4580 regs[rd] = 0; 4581 break; 4582 } 4583 4584 if (!DTRACE_INSCRATCH(mstate, size)) { 4585 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4586 regs[rd] = 0; 4587 break; 4588 } 4589 4590 /* 4591 * Move forward, loading each character. 4592 */ 4593 do { 4594 c = dtrace_load8(src + i++); 4595 next: 4596 if (j + 5 >= size) /* 5 = strlen("/..c\0") */ 4597 break; 4598 4599 if (c != '/') { 4600 dest[j++] = c; 4601 continue; 4602 } 4603 4604 c = dtrace_load8(src + i++); 4605 4606 if (c == '/') { 4607 /* 4608 * We have two slashes -- we can just advance 4609 * to the next character. 4610 */ 4611 goto next; 4612 } 4613 4614 if (c != '.') { 4615 /* 4616 * This is not "." and it's not ".." -- we can 4617 * just store the "/" and this character and 4618 * drive on. 4619 */ 4620 dest[j++] = '/'; 4621 dest[j++] = c; 4622 continue; 4623 } 4624 4625 c = dtrace_load8(src + i++); 4626 4627 if (c == '/') { 4628 /* 4629 * This is a "/./" component. We're not going 4630 * to store anything in the destination buffer; 4631 * we're just going to go to the next component. 4632 */ 4633 goto next; 4634 } 4635 4636 if (c != '.') { 4637 /* 4638 * This is not ".." -- we can just store the 4639 * "/." and this character and continue 4640 * processing. 4641 */ 4642 dest[j++] = '/'; 4643 dest[j++] = '.'; 4644 dest[j++] = c; 4645 continue; 4646 } 4647 4648 c = dtrace_load8(src + i++); 4649 4650 if (c != '/' && c != '\0') { 4651 /* 4652 * This is not ".." -- it's "..[mumble]". 4653 * We'll store the "/.." and this character 4654 * and continue processing. 4655 */ 4656 dest[j++] = '/'; 4657 dest[j++] = '.'; 4658 dest[j++] = '.'; 4659 dest[j++] = c; 4660 continue; 4661 } 4662 4663 /* 4664 * This is "/../" or "/..\0". We need to back up 4665 * our destination pointer until we find a "/". 4666 */ 4667 i--; 4668 while (j != 0 && dest[--j] != '/') 4669 continue; 4670 4671 if (c == '\0') 4672 dest[++j] = '/'; 4673 } while (c != '\0'); 4674 4675 dest[j] = '\0'; 4676 regs[rd] = (uintptr_t)dest; 4677 mstate->dtms_scratch_ptr += size; 4678 break; 4679 } 4680 4681 case DIF_SUBR_INET_NTOA: 4682 case DIF_SUBR_INET_NTOA6: 4683 case DIF_SUBR_INET_NTOP: { 4684 size_t size; 4685 int af, argi, i; 4686 char *base, *end; 4687 4688 if (subr == DIF_SUBR_INET_NTOP) { 4689 af = (int)tupregs[0].dttk_value; 4690 argi = 1; 4691 } else { 4692 af = subr == DIF_SUBR_INET_NTOA ? AF_INET: AF_INET6; 4693 argi = 0; 4694 } 4695 4696 if (af == AF_INET) { 4697 ipaddr_t ip4; 4698 uint8_t *ptr8, val; 4699 4700 /* 4701 * Safely load the IPv4 address. 4702 */ 4703 ip4 = dtrace_load32(tupregs[argi].dttk_value); 4704 4705 /* 4706 * Check an IPv4 string will fit in scratch. 4707 */ 4708 size = INET_ADDRSTRLEN; 4709 if (!DTRACE_INSCRATCH(mstate, size)) { 4710 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4711 regs[rd] = 0; 4712 break; 4713 } 4714 base = (char *)mstate->dtms_scratch_ptr; 4715 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4716 4717 /* 4718 * Stringify as a dotted decimal quad. 4719 */ 4720 *end-- = '\0'; 4721 ptr8 = (uint8_t *)&ip4; 4722 for (i = 3; i >= 0; i--) { 4723 val = ptr8[i]; 4724 4725 if (val == 0) { 4726 *end-- = '0'; 4727 } else { 4728 for (; val; val /= 10) { 4729 *end-- = '0' + (val % 10); 4730 } 4731 } 4732 4733 if (i > 0) 4734 *end-- = '.'; 4735 } 4736 ASSERT(end + 1 >= base); 4737 4738 } else if (af == AF_INET6) { 4739 struct in6_addr ip6; 4740 int firstzero, tryzero, numzero, v6end; 4741 uint16_t val; 4742 const char digits[] = "0123456789abcdef"; 4743 4744 /* 4745 * Stringify using RFC 1884 convention 2 - 16 bit 4746 * hexadecimal values with a zero-run compression. 4747 * Lower case hexadecimal digits are used. 4748 * eg, fe80::214:4fff:fe0b:76c8. 4749 * The IPv4 embedded form is returned for inet_ntop, 4750 * just the IPv4 string is returned for inet_ntoa6. 4751 */ 4752 4753 /* 4754 * Safely load the IPv6 address. 4755 */ 4756 dtrace_bcopy( 4757 (void *)(uintptr_t)tupregs[argi].dttk_value, 4758 (void *)(uintptr_t)&ip6, sizeof (struct in6_addr)); 4759 4760 /* 4761 * Check an IPv6 string will fit in scratch. 4762 */ 4763 size = INET6_ADDRSTRLEN; 4764 if (!DTRACE_INSCRATCH(mstate, size)) { 4765 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 4766 regs[rd] = 0; 4767 break; 4768 } 4769 base = (char *)mstate->dtms_scratch_ptr; 4770 end = (char *)mstate->dtms_scratch_ptr + size - 1; 4771 *end-- = '\0'; 4772 4773 /* 4774 * Find the longest run of 16 bit zero values 4775 * for the single allowed zero compression - "::". 4776 */ 4777 firstzero = -1; 4778 tryzero = -1; 4779 numzero = 1; 4780 for (i = 0; i < sizeof (struct in6_addr); i++) { 4781 #if defined(sun) 4782 if (ip6._S6_un._S6_u8[i] == 0 && 4783 #else 4784 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4785 #endif 4786 tryzero == -1 && i % 2 == 0) { 4787 tryzero = i; 4788 continue; 4789 } 4790 4791 if (tryzero != -1 && 4792 #if defined(sun) 4793 (ip6._S6_un._S6_u8[i] != 0 || 4794 #else 4795 (ip6.__u6_addr.__u6_addr8[i] != 0 || 4796 #endif 4797 i == sizeof (struct in6_addr) - 1)) { 4798 4799 if (i - tryzero <= numzero) { 4800 tryzero = -1; 4801 continue; 4802 } 4803 4804 firstzero = tryzero; 4805 numzero = i - i % 2 - tryzero; 4806 tryzero = -1; 4807 4808 #if defined(sun) 4809 if (ip6._S6_un._S6_u8[i] == 0 && 4810 #else 4811 if (ip6.__u6_addr.__u6_addr8[i] == 0 && 4812 #endif 4813 i == sizeof (struct in6_addr) - 1) 4814 numzero += 2; 4815 } 4816 } 4817 ASSERT(firstzero + numzero <= sizeof (struct in6_addr)); 4818 4819 /* 4820 * Check for an IPv4 embedded address. 4821 */ 4822 v6end = sizeof (struct in6_addr) - 2; 4823 if (IN6_IS_ADDR_V4MAPPED(&ip6) || 4824 IN6_IS_ADDR_V4COMPAT(&ip6)) { 4825 for (i = sizeof (struct in6_addr) - 1; 4826 i >= DTRACE_V4MAPPED_OFFSET; i--) { 4827 ASSERT(end >= base); 4828 4829 #if defined(sun) 4830 val = ip6._S6_un._S6_u8[i]; 4831 #else 4832 val = ip6.__u6_addr.__u6_addr8[i]; 4833 #endif 4834 4835 if (val == 0) { 4836 *end-- = '0'; 4837 } else { 4838 for (; val; val /= 10) { 4839 *end-- = '0' + val % 10; 4840 } 4841 } 4842 4843 if (i > DTRACE_V4MAPPED_OFFSET) 4844 *end-- = '.'; 4845 } 4846 4847 if (subr == DIF_SUBR_INET_NTOA6) 4848 goto inetout; 4849 4850 /* 4851 * Set v6end to skip the IPv4 address that 4852 * we have already stringified. 4853 */ 4854 v6end = 10; 4855 } 4856 4857 /* 4858 * Build the IPv6 string by working through the 4859 * address in reverse. 4860 */ 4861 for (i = v6end; i >= 0; i -= 2) { 4862 ASSERT(end >= base); 4863 4864 if (i == firstzero + numzero - 2) { 4865 *end-- = ':'; 4866 *end-- = ':'; 4867 i -= numzero - 2; 4868 continue; 4869 } 4870 4871 if (i < 14 && i != firstzero - 2) 4872 *end-- = ':'; 4873 4874 #if defined(sun) 4875 val = (ip6._S6_un._S6_u8[i] << 8) + 4876 ip6._S6_un._S6_u8[i + 1]; 4877 #else 4878 val = (ip6.__u6_addr.__u6_addr8[i] << 8) + 4879 ip6.__u6_addr.__u6_addr8[i + 1]; 4880 #endif 4881 4882 if (val == 0) { 4883 *end-- = '0'; 4884 } else { 4885 for (; val; val /= 16) { 4886 *end-- = digits[val % 16]; 4887 } 4888 } 4889 } 4890 ASSERT(end + 1 >= base); 4891 4892 } else { 4893 /* 4894 * The user didn't use AH_INET or AH_INET6. 4895 */ 4896 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 4897 regs[rd] = 0; 4898 break; 4899 } 4900 4901 inetout: regs[rd] = (uintptr_t)end + 1; 4902 mstate->dtms_scratch_ptr += size; 4903 break; 4904 } 4905 4906 case DIF_SUBR_MEMREF: { 4907 uintptr_t size = 2 * sizeof(uintptr_t); 4908 uintptr_t *memref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4909 size_t scratch_size = ((uintptr_t) memref - mstate->dtms_scratch_ptr) + size; 4910 4911 /* address and length */ 4912 memref[0] = tupregs[0].dttk_value; 4913 memref[1] = tupregs[1].dttk_value; 4914 4915 regs[rd] = (uintptr_t) memref; 4916 mstate->dtms_scratch_ptr += scratch_size; 4917 break; 4918 } 4919 4920 case DIF_SUBR_TYPEREF: { 4921 uintptr_t size = 4 * sizeof(uintptr_t); 4922 uintptr_t *typeref = (uintptr_t *) P2ROUNDUP(mstate->dtms_scratch_ptr, sizeof(uintptr_t)); 4923 size_t scratch_size = ((uintptr_t) typeref - mstate->dtms_scratch_ptr) + size; 4924 4925 /* address, num_elements, type_str, type_len */ 4926 typeref[0] = tupregs[0].dttk_value; 4927 typeref[1] = tupregs[1].dttk_value; 4928 typeref[2] = tupregs[2].dttk_value; 4929 typeref[3] = tupregs[3].dttk_value; 4930 4931 regs[rd] = (uintptr_t) typeref; 4932 mstate->dtms_scratch_ptr += scratch_size; 4933 break; 4934 } 4935 } 4936 } 4937 4938 /* 4939 * Emulate the execution of DTrace IR instructions specified by the given 4940 * DIF object. This function is deliberately void of assertions as all of 4941 * the necessary checks are handled by a call to dtrace_difo_validate(). 4942 */ 4943 static uint64_t 4944 dtrace_dif_emulate(dtrace_difo_t *difo, dtrace_mstate_t *mstate, 4945 dtrace_vstate_t *vstate, dtrace_state_t *state) 4946 { 4947 const dif_instr_t *text = difo->dtdo_buf; 4948 const uint_t textlen = difo->dtdo_len; 4949 const char *strtab = difo->dtdo_strtab; 4950 const uint64_t *inttab = difo->dtdo_inttab; 4951 4952 uint64_t rval = 0; 4953 dtrace_statvar_t *svar; 4954 dtrace_dstate_t *dstate = &vstate->dtvs_dynvars; 4955 dtrace_difv_t *v; 4956 volatile uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 4957 volatile uintptr_t *illval = &cpu_core[curcpu].cpuc_dtrace_illval; 4958 4959 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 4960 uint64_t regs[DIF_DIR_NREGS]; 4961 uint64_t *tmp; 4962 4963 uint8_t cc_n = 0, cc_z = 0, cc_v = 0, cc_c = 0; 4964 int64_t cc_r; 4965 uint_t pc = 0, id, opc = 0; 4966 uint8_t ttop = 0; 4967 dif_instr_t instr; 4968 uint_t r1, r2, rd; 4969 4970 /* 4971 * We stash the current DIF object into the machine state: we need it 4972 * for subsequent access checking. 4973 */ 4974 mstate->dtms_difo = difo; 4975 4976 regs[DIF_REG_R0] = 0; /* %r0 is fixed at zero */ 4977 4978 while (pc < textlen && !(*flags & CPU_DTRACE_FAULT)) { 4979 opc = pc; 4980 4981 instr = text[pc++]; 4982 r1 = DIF_INSTR_R1(instr); 4983 r2 = DIF_INSTR_R2(instr); 4984 rd = DIF_INSTR_RD(instr); 4985 4986 switch (DIF_INSTR_OP(instr)) { 4987 case DIF_OP_OR: 4988 regs[rd] = regs[r1] | regs[r2]; 4989 break; 4990 case DIF_OP_XOR: 4991 regs[rd] = regs[r1] ^ regs[r2]; 4992 break; 4993 case DIF_OP_AND: 4994 regs[rd] = regs[r1] & regs[r2]; 4995 break; 4996 case DIF_OP_SLL: 4997 regs[rd] = regs[r1] << regs[r2]; 4998 break; 4999 case DIF_OP_SRL: 5000 regs[rd] = regs[r1] >> regs[r2]; 5001 break; 5002 case DIF_OP_SUB: 5003 regs[rd] = regs[r1] - regs[r2]; 5004 break; 5005 case DIF_OP_ADD: 5006 regs[rd] = regs[r1] + regs[r2]; 5007 break; 5008 case DIF_OP_MUL: 5009 regs[rd] = regs[r1] * regs[r2]; 5010 break; 5011 case DIF_OP_SDIV: 5012 if (regs[r2] == 0) { 5013 regs[rd] = 0; 5014 *flags |= CPU_DTRACE_DIVZERO; 5015 } else { 5016 regs[rd] = (int64_t)regs[r1] / 5017 (int64_t)regs[r2]; 5018 } 5019 break; 5020 5021 case DIF_OP_UDIV: 5022 if (regs[r2] == 0) { 5023 regs[rd] = 0; 5024 *flags |= CPU_DTRACE_DIVZERO; 5025 } else { 5026 regs[rd] = regs[r1] / regs[r2]; 5027 } 5028 break; 5029 5030 case DIF_OP_SREM: 5031 if (regs[r2] == 0) { 5032 regs[rd] = 0; 5033 *flags |= CPU_DTRACE_DIVZERO; 5034 } else { 5035 regs[rd] = (int64_t)regs[r1] % 5036 (int64_t)regs[r2]; 5037 } 5038 break; 5039 5040 case DIF_OP_UREM: 5041 if (regs[r2] == 0) { 5042 regs[rd] = 0; 5043 *flags |= CPU_DTRACE_DIVZERO; 5044 } else { 5045 regs[rd] = regs[r1] % regs[r2]; 5046 } 5047 break; 5048 5049 case DIF_OP_NOT: 5050 regs[rd] = ~regs[r1]; 5051 break; 5052 case DIF_OP_MOV: 5053 regs[rd] = regs[r1]; 5054 break; 5055 case DIF_OP_CMP: 5056 cc_r = regs[r1] - regs[r2]; 5057 cc_n = cc_r < 0; 5058 cc_z = cc_r == 0; 5059 cc_v = 0; 5060 cc_c = regs[r1] < regs[r2]; 5061 break; 5062 case DIF_OP_TST: 5063 cc_n = cc_v = cc_c = 0; 5064 cc_z = regs[r1] == 0; 5065 break; 5066 case DIF_OP_BA: 5067 pc = DIF_INSTR_LABEL(instr); 5068 break; 5069 case DIF_OP_BE: 5070 if (cc_z) 5071 pc = DIF_INSTR_LABEL(instr); 5072 break; 5073 case DIF_OP_BNE: 5074 if (cc_z == 0) 5075 pc = DIF_INSTR_LABEL(instr); 5076 break; 5077 case DIF_OP_BG: 5078 if ((cc_z | (cc_n ^ cc_v)) == 0) 5079 pc = DIF_INSTR_LABEL(instr); 5080 break; 5081 case DIF_OP_BGU: 5082 if ((cc_c | cc_z) == 0) 5083 pc = DIF_INSTR_LABEL(instr); 5084 break; 5085 case DIF_OP_BGE: 5086 if ((cc_n ^ cc_v) == 0) 5087 pc = DIF_INSTR_LABEL(instr); 5088 break; 5089 case DIF_OP_BGEU: 5090 if (cc_c == 0) 5091 pc = DIF_INSTR_LABEL(instr); 5092 break; 5093 case DIF_OP_BL: 5094 if (cc_n ^ cc_v) 5095 pc = DIF_INSTR_LABEL(instr); 5096 break; 5097 case DIF_OP_BLU: 5098 if (cc_c) 5099 pc = DIF_INSTR_LABEL(instr); 5100 break; 5101 case DIF_OP_BLE: 5102 if (cc_z | (cc_n ^ cc_v)) 5103 pc = DIF_INSTR_LABEL(instr); 5104 break; 5105 case DIF_OP_BLEU: 5106 if (cc_c | cc_z) 5107 pc = DIF_INSTR_LABEL(instr); 5108 break; 5109 case DIF_OP_RLDSB: 5110 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5111 *flags |= CPU_DTRACE_KPRIV; 5112 *illval = regs[r1]; 5113 break; 5114 } 5115 /*FALLTHROUGH*/ 5116 case DIF_OP_LDSB: 5117 regs[rd] = (int8_t)dtrace_load8(regs[r1]); 5118 break; 5119 case DIF_OP_RLDSH: 5120 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5121 *flags |= CPU_DTRACE_KPRIV; 5122 *illval = regs[r1]; 5123 break; 5124 } 5125 /*FALLTHROUGH*/ 5126 case DIF_OP_LDSH: 5127 regs[rd] = (int16_t)dtrace_load16(regs[r1]); 5128 break; 5129 case DIF_OP_RLDSW: 5130 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5131 *flags |= CPU_DTRACE_KPRIV; 5132 *illval = regs[r1]; 5133 break; 5134 } 5135 /*FALLTHROUGH*/ 5136 case DIF_OP_LDSW: 5137 regs[rd] = (int32_t)dtrace_load32(regs[r1]); 5138 break; 5139 case DIF_OP_RLDUB: 5140 if (!dtrace_canstore(regs[r1], 1, mstate, vstate)) { 5141 *flags |= CPU_DTRACE_KPRIV; 5142 *illval = regs[r1]; 5143 break; 5144 } 5145 /*FALLTHROUGH*/ 5146 case DIF_OP_LDUB: 5147 regs[rd] = dtrace_load8(regs[r1]); 5148 break; 5149 case DIF_OP_RLDUH: 5150 if (!dtrace_canstore(regs[r1], 2, mstate, vstate)) { 5151 *flags |= CPU_DTRACE_KPRIV; 5152 *illval = regs[r1]; 5153 break; 5154 } 5155 /*FALLTHROUGH*/ 5156 case DIF_OP_LDUH: 5157 regs[rd] = dtrace_load16(regs[r1]); 5158 break; 5159 case DIF_OP_RLDUW: 5160 if (!dtrace_canstore(regs[r1], 4, mstate, vstate)) { 5161 *flags |= CPU_DTRACE_KPRIV; 5162 *illval = regs[r1]; 5163 break; 5164 } 5165 /*FALLTHROUGH*/ 5166 case DIF_OP_LDUW: 5167 regs[rd] = dtrace_load32(regs[r1]); 5168 break; 5169 case DIF_OP_RLDX: 5170 if (!dtrace_canstore(regs[r1], 8, mstate, vstate)) { 5171 *flags |= CPU_DTRACE_KPRIV; 5172 *illval = regs[r1]; 5173 break; 5174 } 5175 /*FALLTHROUGH*/ 5176 case DIF_OP_LDX: 5177 regs[rd] = dtrace_load64(regs[r1]); 5178 break; 5179 case DIF_OP_ULDSB: 5180 regs[rd] = (int8_t) 5181 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5182 break; 5183 case DIF_OP_ULDSH: 5184 regs[rd] = (int16_t) 5185 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5186 break; 5187 case DIF_OP_ULDSW: 5188 regs[rd] = (int32_t) 5189 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5190 break; 5191 case DIF_OP_ULDUB: 5192 regs[rd] = 5193 dtrace_fuword8((void *)(uintptr_t)regs[r1]); 5194 break; 5195 case DIF_OP_ULDUH: 5196 regs[rd] = 5197 dtrace_fuword16((void *)(uintptr_t)regs[r1]); 5198 break; 5199 case DIF_OP_ULDUW: 5200 regs[rd] = 5201 dtrace_fuword32((void *)(uintptr_t)regs[r1]); 5202 break; 5203 case DIF_OP_ULDX: 5204 regs[rd] = 5205 dtrace_fuword64((void *)(uintptr_t)regs[r1]); 5206 break; 5207 case DIF_OP_RET: 5208 rval = regs[rd]; 5209 pc = textlen; 5210 break; 5211 case DIF_OP_NOP: 5212 break; 5213 case DIF_OP_SETX: 5214 regs[rd] = inttab[DIF_INSTR_INTEGER(instr)]; 5215 break; 5216 case DIF_OP_SETS: 5217 regs[rd] = (uint64_t)(uintptr_t) 5218 (strtab + DIF_INSTR_STRING(instr)); 5219 break; 5220 case DIF_OP_SCMP: { 5221 size_t sz = state->dts_options[DTRACEOPT_STRSIZE]; 5222 uintptr_t s1 = regs[r1]; 5223 uintptr_t s2 = regs[r2]; 5224 5225 if (s1 != 0 && 5226 !dtrace_strcanload(s1, sz, mstate, vstate)) 5227 break; 5228 if (s2 != 0 && 5229 !dtrace_strcanload(s2, sz, mstate, vstate)) 5230 break; 5231 5232 cc_r = dtrace_strncmp((char *)s1, (char *)s2, sz); 5233 5234 cc_n = cc_r < 0; 5235 cc_z = cc_r == 0; 5236 cc_v = cc_c = 0; 5237 break; 5238 } 5239 case DIF_OP_LDGA: 5240 regs[rd] = dtrace_dif_variable(mstate, state, 5241 r1, regs[r2]); 5242 break; 5243 case DIF_OP_LDGS: 5244 id = DIF_INSTR_VAR(instr); 5245 5246 if (id >= DIF_VAR_OTHER_UBASE) { 5247 uintptr_t a; 5248 5249 id -= DIF_VAR_OTHER_UBASE; 5250 svar = vstate->dtvs_globals[id]; 5251 ASSERT(svar != NULL); 5252 v = &svar->dtsv_var; 5253 5254 if (!(v->dtdv_type.dtdt_flags & DIF_TF_BYREF)) { 5255 regs[rd] = svar->dtsv_data; 5256 break; 5257 } 5258 5259 a = (uintptr_t)svar->dtsv_data; 5260 5261 if (*(uint8_t *)a == UINT8_MAX) { 5262 /* 5263 * If the 0th byte is set to UINT8_MAX 5264 * then this is to be treated as a 5265 * reference to a NULL variable. 5266 */ 5267 regs[rd] = 0; 5268 } else { 5269 regs[rd] = a + sizeof (uint64_t); 5270 } 5271 5272 break; 5273 } 5274 5275 regs[rd] = dtrace_dif_variable(mstate, state, id, 0); 5276 break; 5277 5278 case DIF_OP_STGS: 5279 id = DIF_INSTR_VAR(instr); 5280 5281 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5282 id -= DIF_VAR_OTHER_UBASE; 5283 5284 svar = vstate->dtvs_globals[id]; 5285 ASSERT(svar != NULL); 5286 v = &svar->dtsv_var; 5287 5288 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5289 uintptr_t a = (uintptr_t)svar->dtsv_data; 5290 5291 ASSERT(a != 0); 5292 ASSERT(svar->dtsv_size != 0); 5293 5294 if (regs[rd] == 0) { 5295 *(uint8_t *)a = UINT8_MAX; 5296 break; 5297 } else { 5298 *(uint8_t *)a = 0; 5299 a += sizeof (uint64_t); 5300 } 5301 if (!dtrace_vcanload( 5302 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5303 mstate, vstate)) 5304 break; 5305 5306 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5307 (void *)a, &v->dtdv_type); 5308 break; 5309 } 5310 5311 svar->dtsv_data = regs[rd]; 5312 break; 5313 5314 case DIF_OP_LDTA: 5315 /* 5316 * There are no DTrace built-in thread-local arrays at 5317 * present. This opcode is saved for future work. 5318 */ 5319 *flags |= CPU_DTRACE_ILLOP; 5320 regs[rd] = 0; 5321 break; 5322 5323 case DIF_OP_LDLS: 5324 id = DIF_INSTR_VAR(instr); 5325 5326 if (id < DIF_VAR_OTHER_UBASE) { 5327 /* 5328 * For now, this has no meaning. 5329 */ 5330 regs[rd] = 0; 5331 break; 5332 } 5333 5334 id -= DIF_VAR_OTHER_UBASE; 5335 5336 ASSERT(id < vstate->dtvs_nlocals); 5337 ASSERT(vstate->dtvs_locals != NULL); 5338 5339 svar = vstate->dtvs_locals[id]; 5340 ASSERT(svar != NULL); 5341 v = &svar->dtsv_var; 5342 5343 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5344 uintptr_t a = (uintptr_t)svar->dtsv_data; 5345 size_t sz = v->dtdv_type.dtdt_size; 5346 5347 sz += sizeof (uint64_t); 5348 ASSERT(svar->dtsv_size == NCPU * sz); 5349 a += curcpu * sz; 5350 5351 if (*(uint8_t *)a == UINT8_MAX) { 5352 /* 5353 * If the 0th byte is set to UINT8_MAX 5354 * then this is to be treated as a 5355 * reference to a NULL variable. 5356 */ 5357 regs[rd] = 0; 5358 } else { 5359 regs[rd] = a + sizeof (uint64_t); 5360 } 5361 5362 break; 5363 } 5364 5365 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5366 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5367 regs[rd] = tmp[curcpu]; 5368 break; 5369 5370 case DIF_OP_STLS: 5371 id = DIF_INSTR_VAR(instr); 5372 5373 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5374 id -= DIF_VAR_OTHER_UBASE; 5375 ASSERT(id < vstate->dtvs_nlocals); 5376 5377 ASSERT(vstate->dtvs_locals != NULL); 5378 svar = vstate->dtvs_locals[id]; 5379 ASSERT(svar != NULL); 5380 v = &svar->dtsv_var; 5381 5382 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5383 uintptr_t a = (uintptr_t)svar->dtsv_data; 5384 size_t sz = v->dtdv_type.dtdt_size; 5385 5386 sz += sizeof (uint64_t); 5387 ASSERT(svar->dtsv_size == NCPU * sz); 5388 a += curcpu * sz; 5389 5390 if (regs[rd] == 0) { 5391 *(uint8_t *)a = UINT8_MAX; 5392 break; 5393 } else { 5394 *(uint8_t *)a = 0; 5395 a += sizeof (uint64_t); 5396 } 5397 5398 if (!dtrace_vcanload( 5399 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5400 mstate, vstate)) 5401 break; 5402 5403 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5404 (void *)a, &v->dtdv_type); 5405 break; 5406 } 5407 5408 ASSERT(svar->dtsv_size == NCPU * sizeof (uint64_t)); 5409 tmp = (uint64_t *)(uintptr_t)svar->dtsv_data; 5410 tmp[curcpu] = regs[rd]; 5411 break; 5412 5413 case DIF_OP_LDTS: { 5414 dtrace_dynvar_t *dvar; 5415 dtrace_key_t *key; 5416 5417 id = DIF_INSTR_VAR(instr); 5418 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5419 id -= DIF_VAR_OTHER_UBASE; 5420 v = &vstate->dtvs_tlocals[id]; 5421 5422 key = &tupregs[DIF_DTR_NREGS]; 5423 key[0].dttk_value = (uint64_t)id; 5424 key[0].dttk_size = 0; 5425 DTRACE_TLS_THRKEY(key[1].dttk_value); 5426 key[1].dttk_size = 0; 5427 5428 dvar = dtrace_dynvar(dstate, 2, key, 5429 sizeof (uint64_t), DTRACE_DYNVAR_NOALLOC, 5430 mstate, vstate); 5431 5432 if (dvar == NULL) { 5433 regs[rd] = 0; 5434 break; 5435 } 5436 5437 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5438 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5439 } else { 5440 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5441 } 5442 5443 break; 5444 } 5445 5446 case DIF_OP_STTS: { 5447 dtrace_dynvar_t *dvar; 5448 dtrace_key_t *key; 5449 5450 id = DIF_INSTR_VAR(instr); 5451 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5452 id -= DIF_VAR_OTHER_UBASE; 5453 5454 key = &tupregs[DIF_DTR_NREGS]; 5455 key[0].dttk_value = (uint64_t)id; 5456 key[0].dttk_size = 0; 5457 DTRACE_TLS_THRKEY(key[1].dttk_value); 5458 key[1].dttk_size = 0; 5459 v = &vstate->dtvs_tlocals[id]; 5460 5461 dvar = dtrace_dynvar(dstate, 2, key, 5462 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5463 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5464 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5465 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5466 5467 /* 5468 * Given that we're storing to thread-local data, 5469 * we need to flush our predicate cache. 5470 */ 5471 curthread->t_predcache = 0; 5472 5473 if (dvar == NULL) 5474 break; 5475 5476 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5477 if (!dtrace_vcanload( 5478 (void *)(uintptr_t)regs[rd], 5479 &v->dtdv_type, mstate, vstate)) 5480 break; 5481 5482 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5483 dvar->dtdv_data, &v->dtdv_type); 5484 } else { 5485 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5486 } 5487 5488 break; 5489 } 5490 5491 case DIF_OP_SRA: 5492 regs[rd] = (int64_t)regs[r1] >> regs[r2]; 5493 break; 5494 5495 case DIF_OP_CALL: 5496 dtrace_dif_subr(DIF_INSTR_SUBR(instr), rd, 5497 regs, tupregs, ttop, mstate, state); 5498 break; 5499 5500 case DIF_OP_PUSHTR: 5501 if (ttop == DIF_DTR_NREGS) { 5502 *flags |= CPU_DTRACE_TUPOFLOW; 5503 break; 5504 } 5505 5506 if (r1 == DIF_TYPE_STRING) { 5507 /* 5508 * If this is a string type and the size is 0, 5509 * we'll use the system-wide default string 5510 * size. Note that we are _not_ looking at 5511 * the value of the DTRACEOPT_STRSIZE option; 5512 * had this been set, we would expect to have 5513 * a non-zero size value in the "pushtr". 5514 */ 5515 tupregs[ttop].dttk_size = 5516 dtrace_strlen((char *)(uintptr_t)regs[rd], 5517 regs[r2] ? regs[r2] : 5518 dtrace_strsize_default) + 1; 5519 } else { 5520 tupregs[ttop].dttk_size = regs[r2]; 5521 } 5522 5523 tupregs[ttop++].dttk_value = regs[rd]; 5524 break; 5525 5526 case DIF_OP_PUSHTV: 5527 if (ttop == DIF_DTR_NREGS) { 5528 *flags |= CPU_DTRACE_TUPOFLOW; 5529 break; 5530 } 5531 5532 tupregs[ttop].dttk_value = regs[rd]; 5533 tupregs[ttop++].dttk_size = 0; 5534 break; 5535 5536 case DIF_OP_POPTS: 5537 if (ttop != 0) 5538 ttop--; 5539 break; 5540 5541 case DIF_OP_FLUSHTS: 5542 ttop = 0; 5543 break; 5544 5545 case DIF_OP_LDGAA: 5546 case DIF_OP_LDTAA: { 5547 dtrace_dynvar_t *dvar; 5548 dtrace_key_t *key = tupregs; 5549 uint_t nkeys = ttop; 5550 5551 id = DIF_INSTR_VAR(instr); 5552 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5553 id -= DIF_VAR_OTHER_UBASE; 5554 5555 key[nkeys].dttk_value = (uint64_t)id; 5556 key[nkeys++].dttk_size = 0; 5557 5558 if (DIF_INSTR_OP(instr) == DIF_OP_LDTAA) { 5559 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5560 key[nkeys++].dttk_size = 0; 5561 v = &vstate->dtvs_tlocals[id]; 5562 } else { 5563 v = &vstate->dtvs_globals[id]->dtsv_var; 5564 } 5565 5566 dvar = dtrace_dynvar(dstate, nkeys, key, 5567 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5568 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5569 DTRACE_DYNVAR_NOALLOC, mstate, vstate); 5570 5571 if (dvar == NULL) { 5572 regs[rd] = 0; 5573 break; 5574 } 5575 5576 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5577 regs[rd] = (uint64_t)(uintptr_t)dvar->dtdv_data; 5578 } else { 5579 regs[rd] = *((uint64_t *)dvar->dtdv_data); 5580 } 5581 5582 break; 5583 } 5584 5585 case DIF_OP_STGAA: 5586 case DIF_OP_STTAA: { 5587 dtrace_dynvar_t *dvar; 5588 dtrace_key_t *key = tupregs; 5589 uint_t nkeys = ttop; 5590 5591 id = DIF_INSTR_VAR(instr); 5592 ASSERT(id >= DIF_VAR_OTHER_UBASE); 5593 id -= DIF_VAR_OTHER_UBASE; 5594 5595 key[nkeys].dttk_value = (uint64_t)id; 5596 key[nkeys++].dttk_size = 0; 5597 5598 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) { 5599 DTRACE_TLS_THRKEY(key[nkeys].dttk_value); 5600 key[nkeys++].dttk_size = 0; 5601 v = &vstate->dtvs_tlocals[id]; 5602 } else { 5603 v = &vstate->dtvs_globals[id]->dtsv_var; 5604 } 5605 5606 dvar = dtrace_dynvar(dstate, nkeys, key, 5607 v->dtdv_type.dtdt_size > sizeof (uint64_t) ? 5608 v->dtdv_type.dtdt_size : sizeof (uint64_t), 5609 regs[rd] ? DTRACE_DYNVAR_ALLOC : 5610 DTRACE_DYNVAR_DEALLOC, mstate, vstate); 5611 5612 if (dvar == NULL) 5613 break; 5614 5615 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) { 5616 if (!dtrace_vcanload( 5617 (void *)(uintptr_t)regs[rd], &v->dtdv_type, 5618 mstate, vstate)) 5619 break; 5620 5621 dtrace_vcopy((void *)(uintptr_t)regs[rd], 5622 dvar->dtdv_data, &v->dtdv_type); 5623 } else { 5624 *((uint64_t *)dvar->dtdv_data) = regs[rd]; 5625 } 5626 5627 break; 5628 } 5629 5630 case DIF_OP_ALLOCS: { 5631 uintptr_t ptr = P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5632 size_t size = ptr - mstate->dtms_scratch_ptr + regs[r1]; 5633 5634 /* 5635 * Rounding up the user allocation size could have 5636 * overflowed large, bogus allocations (like -1ULL) to 5637 * 0. 5638 */ 5639 if (size < regs[r1] || 5640 !DTRACE_INSCRATCH(mstate, size)) { 5641 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5642 regs[rd] = 0; 5643 break; 5644 } 5645 5646 dtrace_bzero((void *) mstate->dtms_scratch_ptr, size); 5647 mstate->dtms_scratch_ptr += size; 5648 regs[rd] = ptr; 5649 break; 5650 } 5651 5652 case DIF_OP_COPYS: 5653 if (!dtrace_canstore(regs[rd], regs[r2], 5654 mstate, vstate)) { 5655 *flags |= CPU_DTRACE_BADADDR; 5656 *illval = regs[rd]; 5657 break; 5658 } 5659 5660 if (!dtrace_canload(regs[r1], regs[r2], mstate, vstate)) 5661 break; 5662 5663 dtrace_bcopy((void *)(uintptr_t)regs[r1], 5664 (void *)(uintptr_t)regs[rd], (size_t)regs[r2]); 5665 break; 5666 5667 case DIF_OP_STB: 5668 if (!dtrace_canstore(regs[rd], 1, mstate, vstate)) { 5669 *flags |= CPU_DTRACE_BADADDR; 5670 *illval = regs[rd]; 5671 break; 5672 } 5673 *((uint8_t *)(uintptr_t)regs[rd]) = (uint8_t)regs[r1]; 5674 break; 5675 5676 case DIF_OP_STH: 5677 if (!dtrace_canstore(regs[rd], 2, mstate, vstate)) { 5678 *flags |= CPU_DTRACE_BADADDR; 5679 *illval = regs[rd]; 5680 break; 5681 } 5682 if (regs[rd] & 1) { 5683 *flags |= CPU_DTRACE_BADALIGN; 5684 *illval = regs[rd]; 5685 break; 5686 } 5687 *((uint16_t *)(uintptr_t)regs[rd]) = (uint16_t)regs[r1]; 5688 break; 5689 5690 case DIF_OP_STW: 5691 if (!dtrace_canstore(regs[rd], 4, mstate, vstate)) { 5692 *flags |= CPU_DTRACE_BADADDR; 5693 *illval = regs[rd]; 5694 break; 5695 } 5696 if (regs[rd] & 3) { 5697 *flags |= CPU_DTRACE_BADALIGN; 5698 *illval = regs[rd]; 5699 break; 5700 } 5701 *((uint32_t *)(uintptr_t)regs[rd]) = (uint32_t)regs[r1]; 5702 break; 5703 5704 case DIF_OP_STX: 5705 if (!dtrace_canstore(regs[rd], 8, mstate, vstate)) { 5706 *flags |= CPU_DTRACE_BADADDR; 5707 *illval = regs[rd]; 5708 break; 5709 } 5710 if (regs[rd] & 7) { 5711 *flags |= CPU_DTRACE_BADALIGN; 5712 *illval = regs[rd]; 5713 break; 5714 } 5715 *((uint64_t *)(uintptr_t)regs[rd]) = regs[r1]; 5716 break; 5717 } 5718 } 5719 5720 if (!(*flags & CPU_DTRACE_FAULT)) 5721 return (rval); 5722 5723 mstate->dtms_fltoffs = opc * sizeof (dif_instr_t); 5724 mstate->dtms_present |= DTRACE_MSTATE_FLTOFFS; 5725 5726 return (0); 5727 } 5728 5729 static void 5730 dtrace_action_breakpoint(dtrace_ecb_t *ecb) 5731 { 5732 dtrace_probe_t *probe = ecb->dte_probe; 5733 dtrace_provider_t *prov = probe->dtpr_provider; 5734 char c[DTRACE_FULLNAMELEN + 80], *str; 5735 char *msg = "dtrace: breakpoint action at probe "; 5736 char *ecbmsg = " (ecb "; 5737 uintptr_t mask = (0xf << (sizeof (uintptr_t) * NBBY / 4)); 5738 uintptr_t val = (uintptr_t)ecb; 5739 int shift = (sizeof (uintptr_t) * NBBY) - 4, i = 0; 5740 5741 if (dtrace_destructive_disallow) 5742 return; 5743 5744 /* 5745 * It's impossible to be taking action on the NULL probe. 5746 */ 5747 ASSERT(probe != NULL); 5748 5749 /* 5750 * This is a poor man's (destitute man's?) sprintf(): we want to 5751 * print the provider name, module name, function name and name of 5752 * the probe, along with the hex address of the ECB with the breakpoint 5753 * action -- all of which we must place in the character buffer by 5754 * hand. 5755 */ 5756 while (*msg != '\0') 5757 c[i++] = *msg++; 5758 5759 for (str = prov->dtpv_name; *str != '\0'; str++) 5760 c[i++] = *str; 5761 c[i++] = ':'; 5762 5763 for (str = probe->dtpr_mod; *str != '\0'; str++) 5764 c[i++] = *str; 5765 c[i++] = ':'; 5766 5767 for (str = probe->dtpr_func; *str != '\0'; str++) 5768 c[i++] = *str; 5769 c[i++] = ':'; 5770 5771 for (str = probe->dtpr_name; *str != '\0'; str++) 5772 c[i++] = *str; 5773 5774 while (*ecbmsg != '\0') 5775 c[i++] = *ecbmsg++; 5776 5777 while (shift >= 0) { 5778 mask = (uintptr_t)0xf << shift; 5779 5780 if (val >= ((uintptr_t)1 << shift)) 5781 c[i++] = "0123456789abcdef"[(val & mask) >> shift]; 5782 shift -= 4; 5783 } 5784 5785 c[i++] = ')'; 5786 c[i] = '\0'; 5787 5788 #if defined(sun) 5789 debug_enter(c); 5790 #else 5791 kdb_enter(KDB_WHY_DTRACE, "breakpoint action"); 5792 #endif 5793 } 5794 5795 static void 5796 dtrace_action_panic(dtrace_ecb_t *ecb) 5797 { 5798 dtrace_probe_t *probe = ecb->dte_probe; 5799 5800 /* 5801 * It's impossible to be taking action on the NULL probe. 5802 */ 5803 ASSERT(probe != NULL); 5804 5805 if (dtrace_destructive_disallow) 5806 return; 5807 5808 if (dtrace_panicked != NULL) 5809 return; 5810 5811 if (dtrace_casptr(&dtrace_panicked, NULL, curthread) != NULL) 5812 return; 5813 5814 /* 5815 * We won the right to panic. (We want to be sure that only one 5816 * thread calls panic() from dtrace_probe(), and that panic() is 5817 * called exactly once.) 5818 */ 5819 dtrace_panic("dtrace: panic action at probe %s:%s:%s:%s (ecb %p)", 5820 probe->dtpr_provider->dtpv_name, probe->dtpr_mod, 5821 probe->dtpr_func, probe->dtpr_name, (void *)ecb); 5822 } 5823 5824 static void 5825 dtrace_action_raise(uint64_t sig) 5826 { 5827 if (dtrace_destructive_disallow) 5828 return; 5829 5830 if (sig >= NSIG) { 5831 DTRACE_CPUFLAG_SET(CPU_DTRACE_ILLOP); 5832 return; 5833 } 5834 5835 #if defined(sun) 5836 /* 5837 * raise() has a queue depth of 1 -- we ignore all subsequent 5838 * invocations of the raise() action. 5839 */ 5840 if (curthread->t_dtrace_sig == 0) 5841 curthread->t_dtrace_sig = (uint8_t)sig; 5842 5843 curthread->t_sig_check = 1; 5844 aston(curthread); 5845 #else 5846 struct proc *p = curproc; 5847 PROC_LOCK(p); 5848 kern_psignal(p, sig); 5849 PROC_UNLOCK(p); 5850 #endif 5851 } 5852 5853 static void 5854 dtrace_action_stop(void) 5855 { 5856 if (dtrace_destructive_disallow) 5857 return; 5858 5859 #if defined(sun) 5860 if (!curthread->t_dtrace_stop) { 5861 curthread->t_dtrace_stop = 1; 5862 curthread->t_sig_check = 1; 5863 aston(curthread); 5864 } 5865 #else 5866 struct proc *p = curproc; 5867 PROC_LOCK(p); 5868 kern_psignal(p, SIGSTOP); 5869 PROC_UNLOCK(p); 5870 #endif 5871 } 5872 5873 static void 5874 dtrace_action_chill(dtrace_mstate_t *mstate, hrtime_t val) 5875 { 5876 hrtime_t now; 5877 volatile uint16_t *flags; 5878 #if defined(sun) 5879 cpu_t *cpu = CPU; 5880 #else 5881 cpu_t *cpu = &solaris_cpu[curcpu]; 5882 #endif 5883 5884 if (dtrace_destructive_disallow) 5885 return; 5886 5887 flags = (volatile uint16_t *)&cpu_core[cpu->cpu_id].cpuc_dtrace_flags; 5888 5889 now = dtrace_gethrtime(); 5890 5891 if (now - cpu->cpu_dtrace_chillmark > dtrace_chill_interval) { 5892 /* 5893 * We need to advance the mark to the current time. 5894 */ 5895 cpu->cpu_dtrace_chillmark = now; 5896 cpu->cpu_dtrace_chilled = 0; 5897 } 5898 5899 /* 5900 * Now check to see if the requested chill time would take us over 5901 * the maximum amount of time allowed in the chill interval. (Or 5902 * worse, if the calculation itself induces overflow.) 5903 */ 5904 if (cpu->cpu_dtrace_chilled + val > dtrace_chill_max || 5905 cpu->cpu_dtrace_chilled + val < cpu->cpu_dtrace_chilled) { 5906 *flags |= CPU_DTRACE_ILLOP; 5907 return; 5908 } 5909 5910 while (dtrace_gethrtime() - now < val) 5911 continue; 5912 5913 /* 5914 * Normally, we assure that the value of the variable "timestamp" does 5915 * not change within an ECB. The presence of chill() represents an 5916 * exception to this rule, however. 5917 */ 5918 mstate->dtms_present &= ~DTRACE_MSTATE_TIMESTAMP; 5919 cpu->cpu_dtrace_chilled += val; 5920 } 5921 5922 static void 5923 dtrace_action_ustack(dtrace_mstate_t *mstate, dtrace_state_t *state, 5924 uint64_t *buf, uint64_t arg) 5925 { 5926 int nframes = DTRACE_USTACK_NFRAMES(arg); 5927 int strsize = DTRACE_USTACK_STRSIZE(arg); 5928 uint64_t *pcs = &buf[1], *fps; 5929 char *str = (char *)&pcs[nframes]; 5930 int size, offs = 0, i, j; 5931 uintptr_t old = mstate->dtms_scratch_ptr, saved; 5932 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 5933 char *sym; 5934 5935 /* 5936 * Should be taking a faster path if string space has not been 5937 * allocated. 5938 */ 5939 ASSERT(strsize != 0); 5940 5941 /* 5942 * We will first allocate some temporary space for the frame pointers. 5943 */ 5944 fps = (uint64_t *)P2ROUNDUP(mstate->dtms_scratch_ptr, 8); 5945 size = (uintptr_t)fps - mstate->dtms_scratch_ptr + 5946 (nframes * sizeof (uint64_t)); 5947 5948 if (!DTRACE_INSCRATCH(mstate, size)) { 5949 /* 5950 * Not enough room for our frame pointers -- need to indicate 5951 * that we ran out of scratch space. 5952 */ 5953 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOSCRATCH); 5954 return; 5955 } 5956 5957 mstate->dtms_scratch_ptr += size; 5958 saved = mstate->dtms_scratch_ptr; 5959 5960 /* 5961 * Now get a stack with both program counters and frame pointers. 5962 */ 5963 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 5964 dtrace_getufpstack(buf, fps, nframes + 1); 5965 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 5966 5967 /* 5968 * If that faulted, we're cooked. 5969 */ 5970 if (*flags & CPU_DTRACE_FAULT) 5971 goto out; 5972 5973 /* 5974 * Now we want to walk up the stack, calling the USTACK helper. For 5975 * each iteration, we restore the scratch pointer. 5976 */ 5977 for (i = 0; i < nframes; i++) { 5978 mstate->dtms_scratch_ptr = saved; 5979 5980 if (offs >= strsize) 5981 break; 5982 5983 sym = (char *)(uintptr_t)dtrace_helper( 5984 DTRACE_HELPER_ACTION_USTACK, 5985 mstate, state, pcs[i], fps[i]); 5986 5987 /* 5988 * If we faulted while running the helper, we're going to 5989 * clear the fault and null out the corresponding string. 5990 */ 5991 if (*flags & CPU_DTRACE_FAULT) { 5992 *flags &= ~CPU_DTRACE_FAULT; 5993 str[offs++] = '\0'; 5994 continue; 5995 } 5996 5997 if (sym == NULL) { 5998 str[offs++] = '\0'; 5999 continue; 6000 } 6001 6002 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6003 6004 /* 6005 * Now copy in the string that the helper returned to us. 6006 */ 6007 for (j = 0; offs + j < strsize; j++) { 6008 if ((str[offs + j] = sym[j]) == '\0') 6009 break; 6010 } 6011 6012 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6013 6014 offs += j + 1; 6015 } 6016 6017 if (offs >= strsize) { 6018 /* 6019 * If we didn't have room for all of the strings, we don't 6020 * abort processing -- this needn't be a fatal error -- but we 6021 * still want to increment a counter (dts_stkstroverflows) to 6022 * allow this condition to be warned about. (If this is from 6023 * a jstack() action, it is easily tuned via jstackstrsize.) 6024 */ 6025 dtrace_error(&state->dts_stkstroverflows); 6026 } 6027 6028 while (offs < strsize) 6029 str[offs++] = '\0'; 6030 6031 out: 6032 mstate->dtms_scratch_ptr = old; 6033 } 6034 6035 /* 6036 * If you're looking for the epicenter of DTrace, you just found it. This 6037 * is the function called by the provider to fire a probe -- from which all 6038 * subsequent probe-context DTrace activity emanates. 6039 */ 6040 void 6041 dtrace_probe(dtrace_id_t id, uintptr_t arg0, uintptr_t arg1, 6042 uintptr_t arg2, uintptr_t arg3, uintptr_t arg4) 6043 { 6044 processorid_t cpuid; 6045 dtrace_icookie_t cookie; 6046 dtrace_probe_t *probe; 6047 dtrace_mstate_t mstate; 6048 dtrace_ecb_t *ecb; 6049 dtrace_action_t *act; 6050 intptr_t offs; 6051 size_t size; 6052 int vtime, onintr; 6053 volatile uint16_t *flags; 6054 hrtime_t now; 6055 6056 if (panicstr != NULL) 6057 return; 6058 6059 #if defined(sun) 6060 /* 6061 * Kick out immediately if this CPU is still being born (in which case 6062 * curthread will be set to -1) or the current thread can't allow 6063 * probes in its current context. 6064 */ 6065 if (((uintptr_t)curthread & 1) || (curthread->t_flag & T_DONTDTRACE)) 6066 return; 6067 #endif 6068 6069 cookie = dtrace_interrupt_disable(); 6070 probe = dtrace_probes[id - 1]; 6071 cpuid = curcpu; 6072 onintr = CPU_ON_INTR(CPU); 6073 6074 if (!onintr && probe->dtpr_predcache != DTRACE_CACHEIDNONE && 6075 probe->dtpr_predcache == curthread->t_predcache) { 6076 /* 6077 * We have hit in the predicate cache; we know that 6078 * this predicate would evaluate to be false. 6079 */ 6080 dtrace_interrupt_enable(cookie); 6081 return; 6082 } 6083 6084 #if defined(sun) 6085 if (panic_quiesce) { 6086 #else 6087 if (panicstr != NULL) { 6088 #endif 6089 /* 6090 * We don't trace anything if we're panicking. 6091 */ 6092 dtrace_interrupt_enable(cookie); 6093 return; 6094 } 6095 6096 now = dtrace_gethrtime(); 6097 vtime = dtrace_vtime_references != 0; 6098 6099 if (vtime && curthread->t_dtrace_start) 6100 curthread->t_dtrace_vtime += now - curthread->t_dtrace_start; 6101 6102 mstate.dtms_difo = NULL; 6103 mstate.dtms_probe = probe; 6104 mstate.dtms_strtok = 0; 6105 mstate.dtms_arg[0] = arg0; 6106 mstate.dtms_arg[1] = arg1; 6107 mstate.dtms_arg[2] = arg2; 6108 mstate.dtms_arg[3] = arg3; 6109 mstate.dtms_arg[4] = arg4; 6110 6111 flags = (volatile uint16_t *)&cpu_core[cpuid].cpuc_dtrace_flags; 6112 6113 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 6114 dtrace_predicate_t *pred = ecb->dte_predicate; 6115 dtrace_state_t *state = ecb->dte_state; 6116 dtrace_buffer_t *buf = &state->dts_buffer[cpuid]; 6117 dtrace_buffer_t *aggbuf = &state->dts_aggbuffer[cpuid]; 6118 dtrace_vstate_t *vstate = &state->dts_vstate; 6119 dtrace_provider_t *prov = probe->dtpr_provider; 6120 uint64_t tracememsize = 0; 6121 int committed = 0; 6122 caddr_t tomax; 6123 6124 /* 6125 * A little subtlety with the following (seemingly innocuous) 6126 * declaration of the automatic 'val': by looking at the 6127 * code, you might think that it could be declared in the 6128 * action processing loop, below. (That is, it's only used in 6129 * the action processing loop.) However, it must be declared 6130 * out of that scope because in the case of DIF expression 6131 * arguments to aggregating actions, one iteration of the 6132 * action loop will use the last iteration's value. 6133 */ 6134 uint64_t val = 0; 6135 6136 mstate.dtms_present = DTRACE_MSTATE_ARGS | DTRACE_MSTATE_PROBE; 6137 *flags &= ~CPU_DTRACE_ERROR; 6138 6139 if (prov == dtrace_provider) { 6140 /* 6141 * If dtrace itself is the provider of this probe, 6142 * we're only going to continue processing the ECB if 6143 * arg0 (the dtrace_state_t) is equal to the ECB's 6144 * creating state. (This prevents disjoint consumers 6145 * from seeing one another's metaprobes.) 6146 */ 6147 if (arg0 != (uint64_t)(uintptr_t)state) 6148 continue; 6149 } 6150 6151 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) { 6152 /* 6153 * We're not currently active. If our provider isn't 6154 * the dtrace pseudo provider, we're not interested. 6155 */ 6156 if (prov != dtrace_provider) 6157 continue; 6158 6159 /* 6160 * Now we must further check if we are in the BEGIN 6161 * probe. If we are, we will only continue processing 6162 * if we're still in WARMUP -- if one BEGIN enabling 6163 * has invoked the exit() action, we don't want to 6164 * evaluate subsequent BEGIN enablings. 6165 */ 6166 if (probe->dtpr_id == dtrace_probeid_begin && 6167 state->dts_activity != DTRACE_ACTIVITY_WARMUP) { 6168 ASSERT(state->dts_activity == 6169 DTRACE_ACTIVITY_DRAINING); 6170 continue; 6171 } 6172 } 6173 6174 if (ecb->dte_cond) { 6175 /* 6176 * If the dte_cond bits indicate that this 6177 * consumer is only allowed to see user-mode firings 6178 * of this probe, call the provider's dtps_usermode() 6179 * entry point to check that the probe was fired 6180 * while in a user context. Skip this ECB if that's 6181 * not the case. 6182 */ 6183 if ((ecb->dte_cond & DTRACE_COND_USERMODE) && 6184 prov->dtpv_pops.dtps_usermode(prov->dtpv_arg, 6185 probe->dtpr_id, probe->dtpr_arg) == 0) 6186 continue; 6187 6188 #if defined(sun) 6189 /* 6190 * This is more subtle than it looks. We have to be 6191 * absolutely certain that CRED() isn't going to 6192 * change out from under us so it's only legit to 6193 * examine that structure if we're in constrained 6194 * situations. Currently, the only times we'll this 6195 * check is if a non-super-user has enabled the 6196 * profile or syscall providers -- providers that 6197 * allow visibility of all processes. For the 6198 * profile case, the check above will ensure that 6199 * we're examining a user context. 6200 */ 6201 if (ecb->dte_cond & DTRACE_COND_OWNER) { 6202 cred_t *cr; 6203 cred_t *s_cr = 6204 ecb->dte_state->dts_cred.dcr_cred; 6205 proc_t *proc; 6206 6207 ASSERT(s_cr != NULL); 6208 6209 if ((cr = CRED()) == NULL || 6210 s_cr->cr_uid != cr->cr_uid || 6211 s_cr->cr_uid != cr->cr_ruid || 6212 s_cr->cr_uid != cr->cr_suid || 6213 s_cr->cr_gid != cr->cr_gid || 6214 s_cr->cr_gid != cr->cr_rgid || 6215 s_cr->cr_gid != cr->cr_sgid || 6216 (proc = ttoproc(curthread)) == NULL || 6217 (proc->p_flag & SNOCD)) 6218 continue; 6219 } 6220 6221 if (ecb->dte_cond & DTRACE_COND_ZONEOWNER) { 6222 cred_t *cr; 6223 cred_t *s_cr = 6224 ecb->dte_state->dts_cred.dcr_cred; 6225 6226 ASSERT(s_cr != NULL); 6227 6228 if ((cr = CRED()) == NULL || 6229 s_cr->cr_zone->zone_id != 6230 cr->cr_zone->zone_id) 6231 continue; 6232 } 6233 #endif 6234 } 6235 6236 if (now - state->dts_alive > dtrace_deadman_timeout) { 6237 /* 6238 * We seem to be dead. Unless we (a) have kernel 6239 * destructive permissions (b) have explicitly enabled 6240 * destructive actions and (c) destructive actions have 6241 * not been disabled, we're going to transition into 6242 * the KILLED state, from which no further processing 6243 * on this state will be performed. 6244 */ 6245 if (!dtrace_priv_kernel_destructive(state) || 6246 !state->dts_cred.dcr_destructive || 6247 dtrace_destructive_disallow) { 6248 void *activity = &state->dts_activity; 6249 dtrace_activity_t current; 6250 6251 do { 6252 current = state->dts_activity; 6253 } while (dtrace_cas32(activity, current, 6254 DTRACE_ACTIVITY_KILLED) != current); 6255 6256 continue; 6257 } 6258 } 6259 6260 if ((offs = dtrace_buffer_reserve(buf, ecb->dte_needed, 6261 ecb->dte_alignment, state, &mstate)) < 0) 6262 continue; 6263 6264 tomax = buf->dtb_tomax; 6265 ASSERT(tomax != NULL); 6266 6267 if (ecb->dte_size != 0) { 6268 dtrace_rechdr_t dtrh; 6269 if (!(mstate.dtms_present & DTRACE_MSTATE_TIMESTAMP)) { 6270 mstate.dtms_timestamp = dtrace_gethrtime(); 6271 mstate.dtms_present |= DTRACE_MSTATE_TIMESTAMP; 6272 } 6273 ASSERT3U(ecb->dte_size, >=, sizeof (dtrace_rechdr_t)); 6274 dtrh.dtrh_epid = ecb->dte_epid; 6275 DTRACE_RECORD_STORE_TIMESTAMP(&dtrh, 6276 mstate.dtms_timestamp); 6277 *((dtrace_rechdr_t *)(tomax + offs)) = dtrh; 6278 } 6279 6280 mstate.dtms_epid = ecb->dte_epid; 6281 mstate.dtms_present |= DTRACE_MSTATE_EPID; 6282 6283 if (state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) 6284 mstate.dtms_access = DTRACE_ACCESS_KERNEL; 6285 else 6286 mstate.dtms_access = 0; 6287 6288 if (pred != NULL) { 6289 dtrace_difo_t *dp = pred->dtp_difo; 6290 int rval; 6291 6292 rval = dtrace_dif_emulate(dp, &mstate, vstate, state); 6293 6294 if (!(*flags & CPU_DTRACE_ERROR) && !rval) { 6295 dtrace_cacheid_t cid = probe->dtpr_predcache; 6296 6297 if (cid != DTRACE_CACHEIDNONE && !onintr) { 6298 /* 6299 * Update the predicate cache... 6300 */ 6301 ASSERT(cid == pred->dtp_cacheid); 6302 curthread->t_predcache = cid; 6303 } 6304 6305 continue; 6306 } 6307 } 6308 6309 for (act = ecb->dte_action; !(*flags & CPU_DTRACE_ERROR) && 6310 act != NULL; act = act->dta_next) { 6311 size_t valoffs; 6312 dtrace_difo_t *dp; 6313 dtrace_recdesc_t *rec = &act->dta_rec; 6314 6315 size = rec->dtrd_size; 6316 valoffs = offs + rec->dtrd_offset; 6317 6318 if (DTRACEACT_ISAGG(act->dta_kind)) { 6319 uint64_t v = 0xbad; 6320 dtrace_aggregation_t *agg; 6321 6322 agg = (dtrace_aggregation_t *)act; 6323 6324 if ((dp = act->dta_difo) != NULL) 6325 v = dtrace_dif_emulate(dp, 6326 &mstate, vstate, state); 6327 6328 if (*flags & CPU_DTRACE_ERROR) 6329 continue; 6330 6331 /* 6332 * Note that we always pass the expression 6333 * value from the previous iteration of the 6334 * action loop. This value will only be used 6335 * if there is an expression argument to the 6336 * aggregating action, denoted by the 6337 * dtag_hasarg field. 6338 */ 6339 dtrace_aggregate(agg, buf, 6340 offs, aggbuf, v, val); 6341 continue; 6342 } 6343 6344 switch (act->dta_kind) { 6345 case DTRACEACT_STOP: 6346 if (dtrace_priv_proc_destructive(state)) 6347 dtrace_action_stop(); 6348 continue; 6349 6350 case DTRACEACT_BREAKPOINT: 6351 if (dtrace_priv_kernel_destructive(state)) 6352 dtrace_action_breakpoint(ecb); 6353 continue; 6354 6355 case DTRACEACT_PANIC: 6356 if (dtrace_priv_kernel_destructive(state)) 6357 dtrace_action_panic(ecb); 6358 continue; 6359 6360 case DTRACEACT_STACK: 6361 if (!dtrace_priv_kernel(state)) 6362 continue; 6363 6364 dtrace_getpcstack((pc_t *)(tomax + valoffs), 6365 size / sizeof (pc_t), probe->dtpr_aframes, 6366 DTRACE_ANCHORED(probe) ? NULL : 6367 (uint32_t *)arg0); 6368 continue; 6369 6370 case DTRACEACT_JSTACK: 6371 case DTRACEACT_USTACK: 6372 if (!dtrace_priv_proc(state)) 6373 continue; 6374 6375 /* 6376 * See comment in DIF_VAR_PID. 6377 */ 6378 if (DTRACE_ANCHORED(mstate.dtms_probe) && 6379 CPU_ON_INTR(CPU)) { 6380 int depth = DTRACE_USTACK_NFRAMES( 6381 rec->dtrd_arg) + 1; 6382 6383 dtrace_bzero((void *)(tomax + valoffs), 6384 DTRACE_USTACK_STRSIZE(rec->dtrd_arg) 6385 + depth * sizeof (uint64_t)); 6386 6387 continue; 6388 } 6389 6390 if (DTRACE_USTACK_STRSIZE(rec->dtrd_arg) != 0 && 6391 curproc->p_dtrace_helpers != NULL) { 6392 /* 6393 * This is the slow path -- we have 6394 * allocated string space, and we're 6395 * getting the stack of a process that 6396 * has helpers. Call into a separate 6397 * routine to perform this processing. 6398 */ 6399 dtrace_action_ustack(&mstate, state, 6400 (uint64_t *)(tomax + valoffs), 6401 rec->dtrd_arg); 6402 continue; 6403 } 6404 6405 DTRACE_CPUFLAG_SET(CPU_DTRACE_NOFAULT); 6406 dtrace_getupcstack((uint64_t *) 6407 (tomax + valoffs), 6408 DTRACE_USTACK_NFRAMES(rec->dtrd_arg) + 1); 6409 DTRACE_CPUFLAG_CLEAR(CPU_DTRACE_NOFAULT); 6410 continue; 6411 6412 default: 6413 break; 6414 } 6415 6416 dp = act->dta_difo; 6417 ASSERT(dp != NULL); 6418 6419 val = dtrace_dif_emulate(dp, &mstate, vstate, state); 6420 6421 if (*flags & CPU_DTRACE_ERROR) 6422 continue; 6423 6424 switch (act->dta_kind) { 6425 case DTRACEACT_SPECULATE: { 6426 dtrace_rechdr_t *dtrh; 6427 6428 ASSERT(buf == &state->dts_buffer[cpuid]); 6429 buf = dtrace_speculation_buffer(state, 6430 cpuid, val); 6431 6432 if (buf == NULL) { 6433 *flags |= CPU_DTRACE_DROP; 6434 continue; 6435 } 6436 6437 offs = dtrace_buffer_reserve(buf, 6438 ecb->dte_needed, ecb->dte_alignment, 6439 state, NULL); 6440 6441 if (offs < 0) { 6442 *flags |= CPU_DTRACE_DROP; 6443 continue; 6444 } 6445 6446 tomax = buf->dtb_tomax; 6447 ASSERT(tomax != NULL); 6448 6449 if (ecb->dte_size == 0) 6450 continue; 6451 6452 ASSERT3U(ecb->dte_size, >=, 6453 sizeof (dtrace_rechdr_t)); 6454 dtrh = ((void *)(tomax + offs)); 6455 dtrh->dtrh_epid = ecb->dte_epid; 6456 /* 6457 * When the speculation is committed, all of 6458 * the records in the speculative buffer will 6459 * have their timestamps set to the commit 6460 * time. Until then, it is set to a sentinel 6461 * value, for debugability. 6462 */ 6463 DTRACE_RECORD_STORE_TIMESTAMP(dtrh, UINT64_MAX); 6464 continue; 6465 } 6466 6467 case DTRACEACT_PRINTM: { 6468 /* The DIF returns a 'memref'. */ 6469 uintptr_t *memref = (uintptr_t *)(uintptr_t) val; 6470 6471 /* Get the size from the memref. */ 6472 size = memref[1]; 6473 6474 /* 6475 * Check if the size exceeds the allocated 6476 * buffer size. 6477 */ 6478 if (size + sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6479 /* Flag a drop! */ 6480 *flags |= CPU_DTRACE_DROP; 6481 continue; 6482 } 6483 6484 /* Store the size in the buffer first. */ 6485 DTRACE_STORE(uintptr_t, tomax, 6486 valoffs, size); 6487 6488 /* 6489 * Offset the buffer address to the start 6490 * of the data. 6491 */ 6492 valoffs += sizeof(uintptr_t); 6493 6494 /* 6495 * Reset to the memory address rather than 6496 * the memref array, then let the BYREF 6497 * code below do the work to store the 6498 * memory data in the buffer. 6499 */ 6500 val = memref[0]; 6501 break; 6502 } 6503 6504 case DTRACEACT_PRINTT: { 6505 /* The DIF returns a 'typeref'. */ 6506 uintptr_t *typeref = (uintptr_t *)(uintptr_t) val; 6507 char c = '\0' + 1; 6508 size_t s; 6509 6510 /* 6511 * Get the type string length and round it 6512 * up so that the data that follows is 6513 * aligned for easy access. 6514 */ 6515 size_t typs = strlen((char *) typeref[2]) + 1; 6516 typs = roundup(typs, sizeof(uintptr_t)); 6517 6518 /* 6519 *Get the size from the typeref using the 6520 * number of elements and the type size. 6521 */ 6522 size = typeref[1] * typeref[3]; 6523 6524 /* 6525 * Check if the size exceeds the allocated 6526 * buffer size. 6527 */ 6528 if (size + typs + 2 * sizeof(uintptr_t) > dp->dtdo_rtype.dtdt_size) { 6529 /* Flag a drop! */ 6530 *flags |= CPU_DTRACE_DROP; 6531 6532 } 6533 6534 /* Store the size in the buffer first. */ 6535 DTRACE_STORE(uintptr_t, tomax, 6536 valoffs, size); 6537 valoffs += sizeof(uintptr_t); 6538 6539 /* Store the type size in the buffer. */ 6540 DTRACE_STORE(uintptr_t, tomax, 6541 valoffs, typeref[3]); 6542 valoffs += sizeof(uintptr_t); 6543 6544 val = typeref[2]; 6545 6546 for (s = 0; s < typs; s++) { 6547 if (c != '\0') 6548 c = dtrace_load8(val++); 6549 6550 DTRACE_STORE(uint8_t, tomax, 6551 valoffs++, c); 6552 } 6553 6554 /* 6555 * Reset to the memory address rather than 6556 * the typeref array, then let the BYREF 6557 * code below do the work to store the 6558 * memory data in the buffer. 6559 */ 6560 val = typeref[0]; 6561 break; 6562 } 6563 6564 case DTRACEACT_CHILL: 6565 if (dtrace_priv_kernel_destructive(state)) 6566 dtrace_action_chill(&mstate, val); 6567 continue; 6568 6569 case DTRACEACT_RAISE: 6570 if (dtrace_priv_proc_destructive(state)) 6571 dtrace_action_raise(val); 6572 continue; 6573 6574 case DTRACEACT_COMMIT: 6575 ASSERT(!committed); 6576 6577 /* 6578 * We need to commit our buffer state. 6579 */ 6580 if (ecb->dte_size) 6581 buf->dtb_offset = offs + ecb->dte_size; 6582 buf = &state->dts_buffer[cpuid]; 6583 dtrace_speculation_commit(state, cpuid, val); 6584 committed = 1; 6585 continue; 6586 6587 case DTRACEACT_DISCARD: 6588 dtrace_speculation_discard(state, cpuid, val); 6589 continue; 6590 6591 case DTRACEACT_DIFEXPR: 6592 case DTRACEACT_LIBACT: 6593 case DTRACEACT_PRINTF: 6594 case DTRACEACT_PRINTA: 6595 case DTRACEACT_SYSTEM: 6596 case DTRACEACT_FREOPEN: 6597 case DTRACEACT_TRACEMEM: 6598 break; 6599 6600 case DTRACEACT_TRACEMEM_DYNSIZE: 6601 tracememsize = val; 6602 break; 6603 6604 case DTRACEACT_SYM: 6605 case DTRACEACT_MOD: 6606 if (!dtrace_priv_kernel(state)) 6607 continue; 6608 break; 6609 6610 case DTRACEACT_USYM: 6611 case DTRACEACT_UMOD: 6612 case DTRACEACT_UADDR: { 6613 #if defined(sun) 6614 struct pid *pid = curthread->t_procp->p_pidp; 6615 #endif 6616 6617 if (!dtrace_priv_proc(state)) 6618 continue; 6619 6620 DTRACE_STORE(uint64_t, tomax, 6621 #if defined(sun) 6622 valoffs, (uint64_t)pid->pid_id); 6623 #else 6624 valoffs, (uint64_t) curproc->p_pid); 6625 #endif 6626 DTRACE_STORE(uint64_t, tomax, 6627 valoffs + sizeof (uint64_t), val); 6628 6629 continue; 6630 } 6631 6632 case DTRACEACT_EXIT: { 6633 /* 6634 * For the exit action, we are going to attempt 6635 * to atomically set our activity to be 6636 * draining. If this fails (either because 6637 * another CPU has beat us to the exit action, 6638 * or because our current activity is something 6639 * other than ACTIVE or WARMUP), we will 6640 * continue. This assures that the exit action 6641 * can be successfully recorded at most once 6642 * when we're in the ACTIVE state. If we're 6643 * encountering the exit() action while in 6644 * COOLDOWN, however, we want to honor the new 6645 * status code. (We know that we're the only 6646 * thread in COOLDOWN, so there is no race.) 6647 */ 6648 void *activity = &state->dts_activity; 6649 dtrace_activity_t current = state->dts_activity; 6650 6651 if (current == DTRACE_ACTIVITY_COOLDOWN) 6652 break; 6653 6654 if (current != DTRACE_ACTIVITY_WARMUP) 6655 current = DTRACE_ACTIVITY_ACTIVE; 6656 6657 if (dtrace_cas32(activity, current, 6658 DTRACE_ACTIVITY_DRAINING) != current) { 6659 *flags |= CPU_DTRACE_DROP; 6660 continue; 6661 } 6662 6663 break; 6664 } 6665 6666 default: 6667 ASSERT(0); 6668 } 6669 6670 if (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF) { 6671 uintptr_t end = valoffs + size; 6672 6673 if (tracememsize != 0 && 6674 valoffs + tracememsize < end) { 6675 end = valoffs + tracememsize; 6676 tracememsize = 0; 6677 } 6678 6679 if (!dtrace_vcanload((void *)(uintptr_t)val, 6680 &dp->dtdo_rtype, &mstate, vstate)) 6681 continue; 6682 6683 /* 6684 * If this is a string, we're going to only 6685 * load until we find the zero byte -- after 6686 * which we'll store zero bytes. 6687 */ 6688 if (dp->dtdo_rtype.dtdt_kind == 6689 DIF_TYPE_STRING) { 6690 char c = '\0' + 1; 6691 int intuple = act->dta_intuple; 6692 size_t s; 6693 6694 for (s = 0; s < size; s++) { 6695 if (c != '\0') 6696 c = dtrace_load8(val++); 6697 6698 DTRACE_STORE(uint8_t, tomax, 6699 valoffs++, c); 6700 6701 if (c == '\0' && intuple) 6702 break; 6703 } 6704 6705 continue; 6706 } 6707 6708 while (valoffs < end) { 6709 DTRACE_STORE(uint8_t, tomax, valoffs++, 6710 dtrace_load8(val++)); 6711 } 6712 6713 continue; 6714 } 6715 6716 switch (size) { 6717 case 0: 6718 break; 6719 6720 case sizeof (uint8_t): 6721 DTRACE_STORE(uint8_t, tomax, valoffs, val); 6722 break; 6723 case sizeof (uint16_t): 6724 DTRACE_STORE(uint16_t, tomax, valoffs, val); 6725 break; 6726 case sizeof (uint32_t): 6727 DTRACE_STORE(uint32_t, tomax, valoffs, val); 6728 break; 6729 case sizeof (uint64_t): 6730 DTRACE_STORE(uint64_t, tomax, valoffs, val); 6731 break; 6732 default: 6733 /* 6734 * Any other size should have been returned by 6735 * reference, not by value. 6736 */ 6737 ASSERT(0); 6738 break; 6739 } 6740 } 6741 6742 if (*flags & CPU_DTRACE_DROP) 6743 continue; 6744 6745 if (*flags & CPU_DTRACE_FAULT) { 6746 int ndx; 6747 dtrace_action_t *err; 6748 6749 buf->dtb_errors++; 6750 6751 if (probe->dtpr_id == dtrace_probeid_error) { 6752 /* 6753 * There's nothing we can do -- we had an 6754 * error on the error probe. We bump an 6755 * error counter to at least indicate that 6756 * this condition happened. 6757 */ 6758 dtrace_error(&state->dts_dblerrors); 6759 continue; 6760 } 6761 6762 if (vtime) { 6763 /* 6764 * Before recursing on dtrace_probe(), we 6765 * need to explicitly clear out our start 6766 * time to prevent it from being accumulated 6767 * into t_dtrace_vtime. 6768 */ 6769 curthread->t_dtrace_start = 0; 6770 } 6771 6772 /* 6773 * Iterate over the actions to figure out which action 6774 * we were processing when we experienced the error. 6775 * Note that act points _past_ the faulting action; if 6776 * act is ecb->dte_action, the fault was in the 6777 * predicate, if it's ecb->dte_action->dta_next it's 6778 * in action #1, and so on. 6779 */ 6780 for (err = ecb->dte_action, ndx = 0; 6781 err != act; err = err->dta_next, ndx++) 6782 continue; 6783 6784 dtrace_probe_error(state, ecb->dte_epid, ndx, 6785 (mstate.dtms_present & DTRACE_MSTATE_FLTOFFS) ? 6786 mstate.dtms_fltoffs : -1, DTRACE_FLAGS2FLT(*flags), 6787 cpu_core[cpuid].cpuc_dtrace_illval); 6788 6789 continue; 6790 } 6791 6792 if (!committed) 6793 buf->dtb_offset = offs + ecb->dte_size; 6794 } 6795 6796 if (vtime) 6797 curthread->t_dtrace_start = dtrace_gethrtime(); 6798 6799 dtrace_interrupt_enable(cookie); 6800 } 6801 6802 /* 6803 * DTrace Probe Hashing Functions 6804 * 6805 * The functions in this section (and indeed, the functions in remaining 6806 * sections) are not _called_ from probe context. (Any exceptions to this are 6807 * marked with a "Note:".) Rather, they are called from elsewhere in the 6808 * DTrace framework to look-up probes in, add probes to and remove probes from 6809 * the DTrace probe hashes. (Each probe is hashed by each element of the 6810 * probe tuple -- allowing for fast lookups, regardless of what was 6811 * specified.) 6812 */ 6813 static uint_t 6814 dtrace_hash_str(const char *p) 6815 { 6816 unsigned int g; 6817 uint_t hval = 0; 6818 6819 while (*p) { 6820 hval = (hval << 4) + *p++; 6821 if ((g = (hval & 0xf0000000)) != 0) 6822 hval ^= g >> 24; 6823 hval &= ~g; 6824 } 6825 return (hval); 6826 } 6827 6828 static dtrace_hash_t * 6829 dtrace_hash_create(uintptr_t stroffs, uintptr_t nextoffs, uintptr_t prevoffs) 6830 { 6831 dtrace_hash_t *hash = kmem_zalloc(sizeof (dtrace_hash_t), KM_SLEEP); 6832 6833 hash->dth_stroffs = stroffs; 6834 hash->dth_nextoffs = nextoffs; 6835 hash->dth_prevoffs = prevoffs; 6836 6837 hash->dth_size = 1; 6838 hash->dth_mask = hash->dth_size - 1; 6839 6840 hash->dth_tab = kmem_zalloc(hash->dth_size * 6841 sizeof (dtrace_hashbucket_t *), KM_SLEEP); 6842 6843 return (hash); 6844 } 6845 6846 static void 6847 dtrace_hash_destroy(dtrace_hash_t *hash) 6848 { 6849 #ifdef DEBUG 6850 int i; 6851 6852 for (i = 0; i < hash->dth_size; i++) 6853 ASSERT(hash->dth_tab[i] == NULL); 6854 #endif 6855 6856 kmem_free(hash->dth_tab, 6857 hash->dth_size * sizeof (dtrace_hashbucket_t *)); 6858 kmem_free(hash, sizeof (dtrace_hash_t)); 6859 } 6860 6861 static void 6862 dtrace_hash_resize(dtrace_hash_t *hash) 6863 { 6864 int size = hash->dth_size, i, ndx; 6865 int new_size = hash->dth_size << 1; 6866 int new_mask = new_size - 1; 6867 dtrace_hashbucket_t **new_tab, *bucket, *next; 6868 6869 ASSERT((new_size & new_mask) == 0); 6870 6871 new_tab = kmem_zalloc(new_size * sizeof (void *), KM_SLEEP); 6872 6873 for (i = 0; i < size; i++) { 6874 for (bucket = hash->dth_tab[i]; bucket != NULL; bucket = next) { 6875 dtrace_probe_t *probe = bucket->dthb_chain; 6876 6877 ASSERT(probe != NULL); 6878 ndx = DTRACE_HASHSTR(hash, probe) & new_mask; 6879 6880 next = bucket->dthb_next; 6881 bucket->dthb_next = new_tab[ndx]; 6882 new_tab[ndx] = bucket; 6883 } 6884 } 6885 6886 kmem_free(hash->dth_tab, hash->dth_size * sizeof (void *)); 6887 hash->dth_tab = new_tab; 6888 hash->dth_size = new_size; 6889 hash->dth_mask = new_mask; 6890 } 6891 6892 static void 6893 dtrace_hash_add(dtrace_hash_t *hash, dtrace_probe_t *new) 6894 { 6895 int hashval = DTRACE_HASHSTR(hash, new); 6896 int ndx = hashval & hash->dth_mask; 6897 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6898 dtrace_probe_t **nextp, **prevp; 6899 6900 for (; bucket != NULL; bucket = bucket->dthb_next) { 6901 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, new)) 6902 goto add; 6903 } 6904 6905 if ((hash->dth_nbuckets >> 1) > hash->dth_size) { 6906 dtrace_hash_resize(hash); 6907 dtrace_hash_add(hash, new); 6908 return; 6909 } 6910 6911 bucket = kmem_zalloc(sizeof (dtrace_hashbucket_t), KM_SLEEP); 6912 bucket->dthb_next = hash->dth_tab[ndx]; 6913 hash->dth_tab[ndx] = bucket; 6914 hash->dth_nbuckets++; 6915 6916 add: 6917 nextp = DTRACE_HASHNEXT(hash, new); 6918 ASSERT(*nextp == NULL && *(DTRACE_HASHPREV(hash, new)) == NULL); 6919 *nextp = bucket->dthb_chain; 6920 6921 if (bucket->dthb_chain != NULL) { 6922 prevp = DTRACE_HASHPREV(hash, bucket->dthb_chain); 6923 ASSERT(*prevp == NULL); 6924 *prevp = new; 6925 } 6926 6927 bucket->dthb_chain = new; 6928 bucket->dthb_len++; 6929 } 6930 6931 static dtrace_probe_t * 6932 dtrace_hash_lookup(dtrace_hash_t *hash, dtrace_probe_t *template) 6933 { 6934 int hashval = DTRACE_HASHSTR(hash, template); 6935 int ndx = hashval & hash->dth_mask; 6936 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6937 6938 for (; bucket != NULL; bucket = bucket->dthb_next) { 6939 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6940 return (bucket->dthb_chain); 6941 } 6942 6943 return (NULL); 6944 } 6945 6946 static int 6947 dtrace_hash_collisions(dtrace_hash_t *hash, dtrace_probe_t *template) 6948 { 6949 int hashval = DTRACE_HASHSTR(hash, template); 6950 int ndx = hashval & hash->dth_mask; 6951 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6952 6953 for (; bucket != NULL; bucket = bucket->dthb_next) { 6954 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, template)) 6955 return (bucket->dthb_len); 6956 } 6957 6958 return (0); 6959 } 6960 6961 static void 6962 dtrace_hash_remove(dtrace_hash_t *hash, dtrace_probe_t *probe) 6963 { 6964 int ndx = DTRACE_HASHSTR(hash, probe) & hash->dth_mask; 6965 dtrace_hashbucket_t *bucket = hash->dth_tab[ndx]; 6966 6967 dtrace_probe_t **prevp = DTRACE_HASHPREV(hash, probe); 6968 dtrace_probe_t **nextp = DTRACE_HASHNEXT(hash, probe); 6969 6970 /* 6971 * Find the bucket that we're removing this probe from. 6972 */ 6973 for (; bucket != NULL; bucket = bucket->dthb_next) { 6974 if (DTRACE_HASHEQ(hash, bucket->dthb_chain, probe)) 6975 break; 6976 } 6977 6978 ASSERT(bucket != NULL); 6979 6980 if (*prevp == NULL) { 6981 if (*nextp == NULL) { 6982 /* 6983 * The removed probe was the only probe on this 6984 * bucket; we need to remove the bucket. 6985 */ 6986 dtrace_hashbucket_t *b = hash->dth_tab[ndx]; 6987 6988 ASSERT(bucket->dthb_chain == probe); 6989 ASSERT(b != NULL); 6990 6991 if (b == bucket) { 6992 hash->dth_tab[ndx] = bucket->dthb_next; 6993 } else { 6994 while (b->dthb_next != bucket) 6995 b = b->dthb_next; 6996 b->dthb_next = bucket->dthb_next; 6997 } 6998 6999 ASSERT(hash->dth_nbuckets > 0); 7000 hash->dth_nbuckets--; 7001 kmem_free(bucket, sizeof (dtrace_hashbucket_t)); 7002 return; 7003 } 7004 7005 bucket->dthb_chain = *nextp; 7006 } else { 7007 *(DTRACE_HASHNEXT(hash, *prevp)) = *nextp; 7008 } 7009 7010 if (*nextp != NULL) 7011 *(DTRACE_HASHPREV(hash, *nextp)) = *prevp; 7012 } 7013 7014 /* 7015 * DTrace Utility Functions 7016 * 7017 * These are random utility functions that are _not_ called from probe context. 7018 */ 7019 static int 7020 dtrace_badattr(const dtrace_attribute_t *a) 7021 { 7022 return (a->dtat_name > DTRACE_STABILITY_MAX || 7023 a->dtat_data > DTRACE_STABILITY_MAX || 7024 a->dtat_class > DTRACE_CLASS_MAX); 7025 } 7026 7027 /* 7028 * Return a duplicate copy of a string. If the specified string is NULL, 7029 * this function returns a zero-length string. 7030 */ 7031 static char * 7032 dtrace_strdup(const char *str) 7033 { 7034 char *new = kmem_zalloc((str != NULL ? strlen(str) : 0) + 1, KM_SLEEP); 7035 7036 if (str != NULL) 7037 (void) strcpy(new, str); 7038 7039 return (new); 7040 } 7041 7042 #define DTRACE_ISALPHA(c) \ 7043 (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) 7044 7045 static int 7046 dtrace_badname(const char *s) 7047 { 7048 char c; 7049 7050 if (s == NULL || (c = *s++) == '\0') 7051 return (0); 7052 7053 if (!DTRACE_ISALPHA(c) && c != '-' && c != '_' && c != '.') 7054 return (1); 7055 7056 while ((c = *s++) != '\0') { 7057 if (!DTRACE_ISALPHA(c) && (c < '0' || c > '9') && 7058 c != '-' && c != '_' && c != '.' && c != '`') 7059 return (1); 7060 } 7061 7062 return (0); 7063 } 7064 7065 static void 7066 dtrace_cred2priv(cred_t *cr, uint32_t *privp, uid_t *uidp, zoneid_t *zoneidp) 7067 { 7068 uint32_t priv; 7069 7070 #if defined(sun) 7071 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 7072 /* 7073 * For DTRACE_PRIV_ALL, the uid and zoneid don't matter. 7074 */ 7075 priv = DTRACE_PRIV_ALL; 7076 } else { 7077 *uidp = crgetuid(cr); 7078 *zoneidp = crgetzoneid(cr); 7079 7080 priv = 0; 7081 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) 7082 priv |= DTRACE_PRIV_KERNEL | DTRACE_PRIV_USER; 7083 else if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) 7084 priv |= DTRACE_PRIV_USER; 7085 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) 7086 priv |= DTRACE_PRIV_PROC; 7087 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 7088 priv |= DTRACE_PRIV_OWNER; 7089 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 7090 priv |= DTRACE_PRIV_ZONEOWNER; 7091 } 7092 #else 7093 priv = DTRACE_PRIV_ALL; 7094 #endif 7095 7096 *privp = priv; 7097 } 7098 7099 #ifdef DTRACE_ERRDEBUG 7100 static void 7101 dtrace_errdebug(const char *str) 7102 { 7103 int hval = dtrace_hash_str(str) % DTRACE_ERRHASHSZ; 7104 int occupied = 0; 7105 7106 mutex_enter(&dtrace_errlock); 7107 dtrace_errlast = str; 7108 dtrace_errthread = curthread; 7109 7110 while (occupied++ < DTRACE_ERRHASHSZ) { 7111 if (dtrace_errhash[hval].dter_msg == str) { 7112 dtrace_errhash[hval].dter_count++; 7113 goto out; 7114 } 7115 7116 if (dtrace_errhash[hval].dter_msg != NULL) { 7117 hval = (hval + 1) % DTRACE_ERRHASHSZ; 7118 continue; 7119 } 7120 7121 dtrace_errhash[hval].dter_msg = str; 7122 dtrace_errhash[hval].dter_count = 1; 7123 goto out; 7124 } 7125 7126 panic("dtrace: undersized error hash"); 7127 out: 7128 mutex_exit(&dtrace_errlock); 7129 } 7130 #endif 7131 7132 /* 7133 * DTrace Matching Functions 7134 * 7135 * These functions are used to match groups of probes, given some elements of 7136 * a probe tuple, or some globbed expressions for elements of a probe tuple. 7137 */ 7138 static int 7139 dtrace_match_priv(const dtrace_probe_t *prp, uint32_t priv, uid_t uid, 7140 zoneid_t zoneid) 7141 { 7142 if (priv != DTRACE_PRIV_ALL) { 7143 uint32_t ppriv = prp->dtpr_provider->dtpv_priv.dtpp_flags; 7144 uint32_t match = priv & ppriv; 7145 7146 /* 7147 * No PRIV_DTRACE_* privileges... 7148 */ 7149 if ((priv & (DTRACE_PRIV_PROC | DTRACE_PRIV_USER | 7150 DTRACE_PRIV_KERNEL)) == 0) 7151 return (0); 7152 7153 /* 7154 * No matching bits, but there were bits to match... 7155 */ 7156 if (match == 0 && ppriv != 0) 7157 return (0); 7158 7159 /* 7160 * Need to have permissions to the process, but don't... 7161 */ 7162 if (((ppriv & ~match) & DTRACE_PRIV_OWNER) != 0 && 7163 uid != prp->dtpr_provider->dtpv_priv.dtpp_uid) { 7164 return (0); 7165 } 7166 7167 /* 7168 * Need to be in the same zone unless we possess the 7169 * privilege to examine all zones. 7170 */ 7171 if (((ppriv & ~match) & DTRACE_PRIV_ZONEOWNER) != 0 && 7172 zoneid != prp->dtpr_provider->dtpv_priv.dtpp_zoneid) { 7173 return (0); 7174 } 7175 } 7176 7177 return (1); 7178 } 7179 7180 /* 7181 * dtrace_match_probe compares a dtrace_probe_t to a pre-compiled key, which 7182 * consists of input pattern strings and an ops-vector to evaluate them. 7183 * This function returns >0 for match, 0 for no match, and <0 for error. 7184 */ 7185 static int 7186 dtrace_match_probe(const dtrace_probe_t *prp, const dtrace_probekey_t *pkp, 7187 uint32_t priv, uid_t uid, zoneid_t zoneid) 7188 { 7189 dtrace_provider_t *pvp = prp->dtpr_provider; 7190 int rv; 7191 7192 if (pvp->dtpv_defunct) 7193 return (0); 7194 7195 if ((rv = pkp->dtpk_pmatch(pvp->dtpv_name, pkp->dtpk_prov, 0)) <= 0) 7196 return (rv); 7197 7198 if ((rv = pkp->dtpk_mmatch(prp->dtpr_mod, pkp->dtpk_mod, 0)) <= 0) 7199 return (rv); 7200 7201 if ((rv = pkp->dtpk_fmatch(prp->dtpr_func, pkp->dtpk_func, 0)) <= 0) 7202 return (rv); 7203 7204 if ((rv = pkp->dtpk_nmatch(prp->dtpr_name, pkp->dtpk_name, 0)) <= 0) 7205 return (rv); 7206 7207 if (dtrace_match_priv(prp, priv, uid, zoneid) == 0) 7208 return (0); 7209 7210 return (rv); 7211 } 7212 7213 /* 7214 * dtrace_match_glob() is a safe kernel implementation of the gmatch(3GEN) 7215 * interface for matching a glob pattern 'p' to an input string 's'. Unlike 7216 * libc's version, the kernel version only applies to 8-bit ASCII strings. 7217 * In addition, all of the recursion cases except for '*' matching have been 7218 * unwound. For '*', we still implement recursive evaluation, but a depth 7219 * counter is maintained and matching is aborted if we recurse too deep. 7220 * The function returns 0 if no match, >0 if match, and <0 if recursion error. 7221 */ 7222 static int 7223 dtrace_match_glob(const char *s, const char *p, int depth) 7224 { 7225 const char *olds; 7226 char s1, c; 7227 int gs; 7228 7229 if (depth > DTRACE_PROBEKEY_MAXDEPTH) 7230 return (-1); 7231 7232 if (s == NULL) 7233 s = ""; /* treat NULL as empty string */ 7234 7235 top: 7236 olds = s; 7237 s1 = *s++; 7238 7239 if (p == NULL) 7240 return (0); 7241 7242 if ((c = *p++) == '\0') 7243 return (s1 == '\0'); 7244 7245 switch (c) { 7246 case '[': { 7247 int ok = 0, notflag = 0; 7248 char lc = '\0'; 7249 7250 if (s1 == '\0') 7251 return (0); 7252 7253 if (*p == '!') { 7254 notflag = 1; 7255 p++; 7256 } 7257 7258 if ((c = *p++) == '\0') 7259 return (0); 7260 7261 do { 7262 if (c == '-' && lc != '\0' && *p != ']') { 7263 if ((c = *p++) == '\0') 7264 return (0); 7265 if (c == '\\' && (c = *p++) == '\0') 7266 return (0); 7267 7268 if (notflag) { 7269 if (s1 < lc || s1 > c) 7270 ok++; 7271 else 7272 return (0); 7273 } else if (lc <= s1 && s1 <= c) 7274 ok++; 7275 7276 } else if (c == '\\' && (c = *p++) == '\0') 7277 return (0); 7278 7279 lc = c; /* save left-hand 'c' for next iteration */ 7280 7281 if (notflag) { 7282 if (s1 != c) 7283 ok++; 7284 else 7285 return (0); 7286 } else if (s1 == c) 7287 ok++; 7288 7289 if ((c = *p++) == '\0') 7290 return (0); 7291 7292 } while (c != ']'); 7293 7294 if (ok) 7295 goto top; 7296 7297 return (0); 7298 } 7299 7300 case '\\': 7301 if ((c = *p++) == '\0') 7302 return (0); 7303 /*FALLTHRU*/ 7304 7305 default: 7306 if (c != s1) 7307 return (0); 7308 /*FALLTHRU*/ 7309 7310 case '?': 7311 if (s1 != '\0') 7312 goto top; 7313 return (0); 7314 7315 case '*': 7316 while (*p == '*') 7317 p++; /* consecutive *'s are identical to a single one */ 7318 7319 if (*p == '\0') 7320 return (1); 7321 7322 for (s = olds; *s != '\0'; s++) { 7323 if ((gs = dtrace_match_glob(s, p, depth + 1)) != 0) 7324 return (gs); 7325 } 7326 7327 return (0); 7328 } 7329 } 7330 7331 /*ARGSUSED*/ 7332 static int 7333 dtrace_match_string(const char *s, const char *p, int depth) 7334 { 7335 return (s != NULL && strcmp(s, p) == 0); 7336 } 7337 7338 /*ARGSUSED*/ 7339 static int 7340 dtrace_match_nul(const char *s, const char *p, int depth) 7341 { 7342 return (1); /* always match the empty pattern */ 7343 } 7344 7345 /*ARGSUSED*/ 7346 static int 7347 dtrace_match_nonzero(const char *s, const char *p, int depth) 7348 { 7349 return (s != NULL && s[0] != '\0'); 7350 } 7351 7352 static int 7353 dtrace_match(const dtrace_probekey_t *pkp, uint32_t priv, uid_t uid, 7354 zoneid_t zoneid, int (*matched)(dtrace_probe_t *, void *), void *arg) 7355 { 7356 dtrace_probe_t template, *probe; 7357 dtrace_hash_t *hash = NULL; 7358 int len, best = INT_MAX, nmatched = 0; 7359 dtrace_id_t i; 7360 7361 ASSERT(MUTEX_HELD(&dtrace_lock)); 7362 7363 /* 7364 * If the probe ID is specified in the key, just lookup by ID and 7365 * invoke the match callback once if a matching probe is found. 7366 */ 7367 if (pkp->dtpk_id != DTRACE_IDNONE) { 7368 if ((probe = dtrace_probe_lookup_id(pkp->dtpk_id)) != NULL && 7369 dtrace_match_probe(probe, pkp, priv, uid, zoneid) > 0) { 7370 (void) (*matched)(probe, arg); 7371 nmatched++; 7372 } 7373 return (nmatched); 7374 } 7375 7376 template.dtpr_mod = (char *)pkp->dtpk_mod; 7377 template.dtpr_func = (char *)pkp->dtpk_func; 7378 template.dtpr_name = (char *)pkp->dtpk_name; 7379 7380 /* 7381 * We want to find the most distinct of the module name, function 7382 * name, and name. So for each one that is not a glob pattern or 7383 * empty string, we perform a lookup in the corresponding hash and 7384 * use the hash table with the fewest collisions to do our search. 7385 */ 7386 if (pkp->dtpk_mmatch == &dtrace_match_string && 7387 (len = dtrace_hash_collisions(dtrace_bymod, &template)) < best) { 7388 best = len; 7389 hash = dtrace_bymod; 7390 } 7391 7392 if (pkp->dtpk_fmatch == &dtrace_match_string && 7393 (len = dtrace_hash_collisions(dtrace_byfunc, &template)) < best) { 7394 best = len; 7395 hash = dtrace_byfunc; 7396 } 7397 7398 if (pkp->dtpk_nmatch == &dtrace_match_string && 7399 (len = dtrace_hash_collisions(dtrace_byname, &template)) < best) { 7400 best = len; 7401 hash = dtrace_byname; 7402 } 7403 7404 /* 7405 * If we did not select a hash table, iterate over every probe and 7406 * invoke our callback for each one that matches our input probe key. 7407 */ 7408 if (hash == NULL) { 7409 for (i = 0; i < dtrace_nprobes; i++) { 7410 if ((probe = dtrace_probes[i]) == NULL || 7411 dtrace_match_probe(probe, pkp, priv, uid, 7412 zoneid) <= 0) 7413 continue; 7414 7415 nmatched++; 7416 7417 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7418 break; 7419 } 7420 7421 return (nmatched); 7422 } 7423 7424 /* 7425 * If we selected a hash table, iterate over each probe of the same key 7426 * name and invoke the callback for every probe that matches the other 7427 * attributes of our input probe key. 7428 */ 7429 for (probe = dtrace_hash_lookup(hash, &template); probe != NULL; 7430 probe = *(DTRACE_HASHNEXT(hash, probe))) { 7431 7432 if (dtrace_match_probe(probe, pkp, priv, uid, zoneid) <= 0) 7433 continue; 7434 7435 nmatched++; 7436 7437 if ((*matched)(probe, arg) != DTRACE_MATCH_NEXT) 7438 break; 7439 } 7440 7441 return (nmatched); 7442 } 7443 7444 /* 7445 * Return the function pointer dtrace_probecmp() should use to compare the 7446 * specified pattern with a string. For NULL or empty patterns, we select 7447 * dtrace_match_nul(). For glob pattern strings, we use dtrace_match_glob(). 7448 * For non-empty non-glob strings, we use dtrace_match_string(). 7449 */ 7450 static dtrace_probekey_f * 7451 dtrace_probekey_func(const char *p) 7452 { 7453 char c; 7454 7455 if (p == NULL || *p == '\0') 7456 return (&dtrace_match_nul); 7457 7458 while ((c = *p++) != '\0') { 7459 if (c == '[' || c == '?' || c == '*' || c == '\\') 7460 return (&dtrace_match_glob); 7461 } 7462 7463 return (&dtrace_match_string); 7464 } 7465 7466 /* 7467 * Build a probe comparison key for use with dtrace_match_probe() from the 7468 * given probe description. By convention, a null key only matches anchored 7469 * probes: if each field is the empty string, reset dtpk_fmatch to 7470 * dtrace_match_nonzero(). 7471 */ 7472 static void 7473 dtrace_probekey(dtrace_probedesc_t *pdp, dtrace_probekey_t *pkp) 7474 { 7475 pkp->dtpk_prov = pdp->dtpd_provider; 7476 pkp->dtpk_pmatch = dtrace_probekey_func(pdp->dtpd_provider); 7477 7478 pkp->dtpk_mod = pdp->dtpd_mod; 7479 pkp->dtpk_mmatch = dtrace_probekey_func(pdp->dtpd_mod); 7480 7481 pkp->dtpk_func = pdp->dtpd_func; 7482 pkp->dtpk_fmatch = dtrace_probekey_func(pdp->dtpd_func); 7483 7484 pkp->dtpk_name = pdp->dtpd_name; 7485 pkp->dtpk_nmatch = dtrace_probekey_func(pdp->dtpd_name); 7486 7487 pkp->dtpk_id = pdp->dtpd_id; 7488 7489 if (pkp->dtpk_id == DTRACE_IDNONE && 7490 pkp->dtpk_pmatch == &dtrace_match_nul && 7491 pkp->dtpk_mmatch == &dtrace_match_nul && 7492 pkp->dtpk_fmatch == &dtrace_match_nul && 7493 pkp->dtpk_nmatch == &dtrace_match_nul) 7494 pkp->dtpk_fmatch = &dtrace_match_nonzero; 7495 } 7496 7497 /* 7498 * DTrace Provider-to-Framework API Functions 7499 * 7500 * These functions implement much of the Provider-to-Framework API, as 7501 * described in <sys/dtrace.h>. The parts of the API not in this section are 7502 * the functions in the API for probe management (found below), and 7503 * dtrace_probe() itself (found above). 7504 */ 7505 7506 /* 7507 * Register the calling provider with the DTrace framework. This should 7508 * generally be called by DTrace providers in their attach(9E) entry point. 7509 */ 7510 int 7511 dtrace_register(const char *name, const dtrace_pattr_t *pap, uint32_t priv, 7512 cred_t *cr, const dtrace_pops_t *pops, void *arg, dtrace_provider_id_t *idp) 7513 { 7514 dtrace_provider_t *provider; 7515 7516 if (name == NULL || pap == NULL || pops == NULL || idp == NULL) { 7517 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7518 "arguments", name ? name : "<NULL>"); 7519 return (EINVAL); 7520 } 7521 7522 if (name[0] == '\0' || dtrace_badname(name)) { 7523 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7524 "provider name", name); 7525 return (EINVAL); 7526 } 7527 7528 if ((pops->dtps_provide == NULL && pops->dtps_provide_module == NULL) || 7529 pops->dtps_enable == NULL || pops->dtps_disable == NULL || 7530 pops->dtps_destroy == NULL || 7531 ((pops->dtps_resume == NULL) != (pops->dtps_suspend == NULL))) { 7532 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7533 "provider ops", name); 7534 return (EINVAL); 7535 } 7536 7537 if (dtrace_badattr(&pap->dtpa_provider) || 7538 dtrace_badattr(&pap->dtpa_mod) || 7539 dtrace_badattr(&pap->dtpa_func) || 7540 dtrace_badattr(&pap->dtpa_name) || 7541 dtrace_badattr(&pap->dtpa_args)) { 7542 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7543 "provider attributes", name); 7544 return (EINVAL); 7545 } 7546 7547 if (priv & ~DTRACE_PRIV_ALL) { 7548 cmn_err(CE_WARN, "failed to register provider '%s': invalid " 7549 "privilege attributes", name); 7550 return (EINVAL); 7551 } 7552 7553 if ((priv & DTRACE_PRIV_KERNEL) && 7554 (priv & (DTRACE_PRIV_USER | DTRACE_PRIV_OWNER)) && 7555 pops->dtps_usermode == NULL) { 7556 cmn_err(CE_WARN, "failed to register provider '%s': need " 7557 "dtps_usermode() op for given privilege attributes", name); 7558 return (EINVAL); 7559 } 7560 7561 provider = kmem_zalloc(sizeof (dtrace_provider_t), KM_SLEEP); 7562 provider->dtpv_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 7563 (void) strcpy(provider->dtpv_name, name); 7564 7565 provider->dtpv_attr = *pap; 7566 provider->dtpv_priv.dtpp_flags = priv; 7567 if (cr != NULL) { 7568 provider->dtpv_priv.dtpp_uid = crgetuid(cr); 7569 provider->dtpv_priv.dtpp_zoneid = crgetzoneid(cr); 7570 } 7571 provider->dtpv_pops = *pops; 7572 7573 if (pops->dtps_provide == NULL) { 7574 ASSERT(pops->dtps_provide_module != NULL); 7575 provider->dtpv_pops.dtps_provide = 7576 (void (*)(void *, dtrace_probedesc_t *))dtrace_nullop; 7577 } 7578 7579 if (pops->dtps_provide_module == NULL) { 7580 ASSERT(pops->dtps_provide != NULL); 7581 provider->dtpv_pops.dtps_provide_module = 7582 (void (*)(void *, modctl_t *))dtrace_nullop; 7583 } 7584 7585 if (pops->dtps_suspend == NULL) { 7586 ASSERT(pops->dtps_resume == NULL); 7587 provider->dtpv_pops.dtps_suspend = 7588 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7589 provider->dtpv_pops.dtps_resume = 7590 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop; 7591 } 7592 7593 provider->dtpv_arg = arg; 7594 *idp = (dtrace_provider_id_t)provider; 7595 7596 if (pops == &dtrace_provider_ops) { 7597 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7598 ASSERT(MUTEX_HELD(&dtrace_lock)); 7599 ASSERT(dtrace_anon.dta_enabling == NULL); 7600 7601 /* 7602 * We make sure that the DTrace provider is at the head of 7603 * the provider chain. 7604 */ 7605 provider->dtpv_next = dtrace_provider; 7606 dtrace_provider = provider; 7607 return (0); 7608 } 7609 7610 mutex_enter(&dtrace_provider_lock); 7611 mutex_enter(&dtrace_lock); 7612 7613 /* 7614 * If there is at least one provider registered, we'll add this 7615 * provider after the first provider. 7616 */ 7617 if (dtrace_provider != NULL) { 7618 provider->dtpv_next = dtrace_provider->dtpv_next; 7619 dtrace_provider->dtpv_next = provider; 7620 } else { 7621 dtrace_provider = provider; 7622 } 7623 7624 if (dtrace_retained != NULL) { 7625 dtrace_enabling_provide(provider); 7626 7627 /* 7628 * Now we need to call dtrace_enabling_matchall() -- which 7629 * will acquire cpu_lock and dtrace_lock. We therefore need 7630 * to drop all of our locks before calling into it... 7631 */ 7632 mutex_exit(&dtrace_lock); 7633 mutex_exit(&dtrace_provider_lock); 7634 dtrace_enabling_matchall(); 7635 7636 return (0); 7637 } 7638 7639 mutex_exit(&dtrace_lock); 7640 mutex_exit(&dtrace_provider_lock); 7641 7642 return (0); 7643 } 7644 7645 /* 7646 * Unregister the specified provider from the DTrace framework. This should 7647 * generally be called by DTrace providers in their detach(9E) entry point. 7648 */ 7649 int 7650 dtrace_unregister(dtrace_provider_id_t id) 7651 { 7652 dtrace_provider_t *old = (dtrace_provider_t *)id; 7653 dtrace_provider_t *prev = NULL; 7654 int i, self = 0, noreap = 0; 7655 dtrace_probe_t *probe, *first = NULL; 7656 7657 if (old->dtpv_pops.dtps_enable == 7658 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop) { 7659 /* 7660 * If DTrace itself is the provider, we're called with locks 7661 * already held. 7662 */ 7663 ASSERT(old == dtrace_provider); 7664 #if defined(sun) 7665 ASSERT(dtrace_devi != NULL); 7666 #endif 7667 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 7668 ASSERT(MUTEX_HELD(&dtrace_lock)); 7669 self = 1; 7670 7671 if (dtrace_provider->dtpv_next != NULL) { 7672 /* 7673 * There's another provider here; return failure. 7674 */ 7675 return (EBUSY); 7676 } 7677 } else { 7678 mutex_enter(&dtrace_provider_lock); 7679 #if defined(sun) 7680 mutex_enter(&mod_lock); 7681 #endif 7682 mutex_enter(&dtrace_lock); 7683 } 7684 7685 /* 7686 * If anyone has /dev/dtrace open, or if there are anonymous enabled 7687 * probes, we refuse to let providers slither away, unless this 7688 * provider has already been explicitly invalidated. 7689 */ 7690 if (!old->dtpv_defunct && 7691 (dtrace_opens || (dtrace_anon.dta_state != NULL && 7692 dtrace_anon.dta_state->dts_necbs > 0))) { 7693 if (!self) { 7694 mutex_exit(&dtrace_lock); 7695 #if defined(sun) 7696 mutex_exit(&mod_lock); 7697 #endif 7698 mutex_exit(&dtrace_provider_lock); 7699 } 7700 return (EBUSY); 7701 } 7702 7703 /* 7704 * Attempt to destroy the probes associated with this provider. 7705 */ 7706 for (i = 0; i < dtrace_nprobes; i++) { 7707 if ((probe = dtrace_probes[i]) == NULL) 7708 continue; 7709 7710 if (probe->dtpr_provider != old) 7711 continue; 7712 7713 if (probe->dtpr_ecb == NULL) 7714 continue; 7715 7716 /* 7717 * If we are trying to unregister a defunct provider, and the 7718 * provider was made defunct within the interval dictated by 7719 * dtrace_unregister_defunct_reap, we'll (asynchronously) 7720 * attempt to reap our enablings. To denote that the provider 7721 * should reattempt to unregister itself at some point in the 7722 * future, we will return a differentiable error code (EAGAIN 7723 * instead of EBUSY) in this case. 7724 */ 7725 if (dtrace_gethrtime() - old->dtpv_defunct > 7726 dtrace_unregister_defunct_reap) 7727 noreap = 1; 7728 7729 if (!self) { 7730 mutex_exit(&dtrace_lock); 7731 #if defined(sun) 7732 mutex_exit(&mod_lock); 7733 #endif 7734 mutex_exit(&dtrace_provider_lock); 7735 } 7736 7737 if (noreap) 7738 return (EBUSY); 7739 7740 (void) taskq_dispatch(dtrace_taskq, 7741 (task_func_t *)dtrace_enabling_reap, NULL, TQ_SLEEP); 7742 7743 return (EAGAIN); 7744 } 7745 7746 /* 7747 * All of the probes for this provider are disabled; we can safely 7748 * remove all of them from their hash chains and from the probe array. 7749 */ 7750 for (i = 0; i < dtrace_nprobes; i++) { 7751 if ((probe = dtrace_probes[i]) == NULL) 7752 continue; 7753 7754 if (probe->dtpr_provider != old) 7755 continue; 7756 7757 dtrace_probes[i] = NULL; 7758 7759 dtrace_hash_remove(dtrace_bymod, probe); 7760 dtrace_hash_remove(dtrace_byfunc, probe); 7761 dtrace_hash_remove(dtrace_byname, probe); 7762 7763 if (first == NULL) { 7764 first = probe; 7765 probe->dtpr_nextmod = NULL; 7766 } else { 7767 probe->dtpr_nextmod = first; 7768 first = probe; 7769 } 7770 } 7771 7772 /* 7773 * The provider's probes have been removed from the hash chains and 7774 * from the probe array. Now issue a dtrace_sync() to be sure that 7775 * everyone has cleared out from any probe array processing. 7776 */ 7777 dtrace_sync(); 7778 7779 for (probe = first; probe != NULL; probe = first) { 7780 first = probe->dtpr_nextmod; 7781 7782 old->dtpv_pops.dtps_destroy(old->dtpv_arg, probe->dtpr_id, 7783 probe->dtpr_arg); 7784 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7785 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7786 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7787 #if defined(sun) 7788 vmem_free(dtrace_arena, (void *)(uintptr_t)(probe->dtpr_id), 1); 7789 #else 7790 free_unr(dtrace_arena, probe->dtpr_id); 7791 #endif 7792 kmem_free(probe, sizeof (dtrace_probe_t)); 7793 } 7794 7795 if ((prev = dtrace_provider) == old) { 7796 #if defined(sun) 7797 ASSERT(self || dtrace_devi == NULL); 7798 ASSERT(old->dtpv_next == NULL || dtrace_devi == NULL); 7799 #endif 7800 dtrace_provider = old->dtpv_next; 7801 } else { 7802 while (prev != NULL && prev->dtpv_next != old) 7803 prev = prev->dtpv_next; 7804 7805 if (prev == NULL) { 7806 panic("attempt to unregister non-existent " 7807 "dtrace provider %p\n", (void *)id); 7808 } 7809 7810 prev->dtpv_next = old->dtpv_next; 7811 } 7812 7813 if (!self) { 7814 mutex_exit(&dtrace_lock); 7815 #if defined(sun) 7816 mutex_exit(&mod_lock); 7817 #endif 7818 mutex_exit(&dtrace_provider_lock); 7819 } 7820 7821 kmem_free(old->dtpv_name, strlen(old->dtpv_name) + 1); 7822 kmem_free(old, sizeof (dtrace_provider_t)); 7823 7824 return (0); 7825 } 7826 7827 /* 7828 * Invalidate the specified provider. All subsequent probe lookups for the 7829 * specified provider will fail, but its probes will not be removed. 7830 */ 7831 void 7832 dtrace_invalidate(dtrace_provider_id_t id) 7833 { 7834 dtrace_provider_t *pvp = (dtrace_provider_t *)id; 7835 7836 ASSERT(pvp->dtpv_pops.dtps_enable != 7837 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7838 7839 mutex_enter(&dtrace_provider_lock); 7840 mutex_enter(&dtrace_lock); 7841 7842 pvp->dtpv_defunct = dtrace_gethrtime(); 7843 7844 mutex_exit(&dtrace_lock); 7845 mutex_exit(&dtrace_provider_lock); 7846 } 7847 7848 /* 7849 * Indicate whether or not DTrace has attached. 7850 */ 7851 int 7852 dtrace_attached(void) 7853 { 7854 /* 7855 * dtrace_provider will be non-NULL iff the DTrace driver has 7856 * attached. (It's non-NULL because DTrace is always itself a 7857 * provider.) 7858 */ 7859 return (dtrace_provider != NULL); 7860 } 7861 7862 /* 7863 * Remove all the unenabled probes for the given provider. This function is 7864 * not unlike dtrace_unregister(), except that it doesn't remove the provider 7865 * -- just as many of its associated probes as it can. 7866 */ 7867 int 7868 dtrace_condense(dtrace_provider_id_t id) 7869 { 7870 dtrace_provider_t *prov = (dtrace_provider_t *)id; 7871 int i; 7872 dtrace_probe_t *probe; 7873 7874 /* 7875 * Make sure this isn't the dtrace provider itself. 7876 */ 7877 ASSERT(prov->dtpv_pops.dtps_enable != 7878 (void (*)(void *, dtrace_id_t, void *))dtrace_nullop); 7879 7880 mutex_enter(&dtrace_provider_lock); 7881 mutex_enter(&dtrace_lock); 7882 7883 /* 7884 * Attempt to destroy the probes associated with this provider. 7885 */ 7886 for (i = 0; i < dtrace_nprobes; i++) { 7887 if ((probe = dtrace_probes[i]) == NULL) 7888 continue; 7889 7890 if (probe->dtpr_provider != prov) 7891 continue; 7892 7893 if (probe->dtpr_ecb != NULL) 7894 continue; 7895 7896 dtrace_probes[i] = NULL; 7897 7898 dtrace_hash_remove(dtrace_bymod, probe); 7899 dtrace_hash_remove(dtrace_byfunc, probe); 7900 dtrace_hash_remove(dtrace_byname, probe); 7901 7902 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, i + 1, 7903 probe->dtpr_arg); 7904 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 7905 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 7906 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 7907 kmem_free(probe, sizeof (dtrace_probe_t)); 7908 #if defined(sun) 7909 vmem_free(dtrace_arena, (void *)((uintptr_t)i + 1), 1); 7910 #else 7911 free_unr(dtrace_arena, i + 1); 7912 #endif 7913 } 7914 7915 mutex_exit(&dtrace_lock); 7916 mutex_exit(&dtrace_provider_lock); 7917 7918 return (0); 7919 } 7920 7921 /* 7922 * DTrace Probe Management Functions 7923 * 7924 * The functions in this section perform the DTrace probe management, 7925 * including functions to create probes, look-up probes, and call into the 7926 * providers to request that probes be provided. Some of these functions are 7927 * in the Provider-to-Framework API; these functions can be identified by the 7928 * fact that they are not declared "static". 7929 */ 7930 7931 /* 7932 * Create a probe with the specified module name, function name, and name. 7933 */ 7934 dtrace_id_t 7935 dtrace_probe_create(dtrace_provider_id_t prov, const char *mod, 7936 const char *func, const char *name, int aframes, void *arg) 7937 { 7938 dtrace_probe_t *probe, **probes; 7939 dtrace_provider_t *provider = (dtrace_provider_t *)prov; 7940 dtrace_id_t id; 7941 7942 if (provider == dtrace_provider) { 7943 ASSERT(MUTEX_HELD(&dtrace_lock)); 7944 } else { 7945 mutex_enter(&dtrace_lock); 7946 } 7947 7948 #if defined(sun) 7949 id = (dtrace_id_t)(uintptr_t)vmem_alloc(dtrace_arena, 1, 7950 VM_BESTFIT | VM_SLEEP); 7951 #else 7952 id = alloc_unr(dtrace_arena); 7953 #endif 7954 probe = kmem_zalloc(sizeof (dtrace_probe_t), KM_SLEEP); 7955 7956 probe->dtpr_id = id; 7957 probe->dtpr_gen = dtrace_probegen++; 7958 probe->dtpr_mod = dtrace_strdup(mod); 7959 probe->dtpr_func = dtrace_strdup(func); 7960 probe->dtpr_name = dtrace_strdup(name); 7961 probe->dtpr_arg = arg; 7962 probe->dtpr_aframes = aframes; 7963 probe->dtpr_provider = provider; 7964 7965 dtrace_hash_add(dtrace_bymod, probe); 7966 dtrace_hash_add(dtrace_byfunc, probe); 7967 dtrace_hash_add(dtrace_byname, probe); 7968 7969 if (id - 1 >= dtrace_nprobes) { 7970 size_t osize = dtrace_nprobes * sizeof (dtrace_probe_t *); 7971 size_t nsize = osize << 1; 7972 7973 if (nsize == 0) { 7974 ASSERT(osize == 0); 7975 ASSERT(dtrace_probes == NULL); 7976 nsize = sizeof (dtrace_probe_t *); 7977 } 7978 7979 probes = kmem_zalloc(nsize, KM_SLEEP); 7980 7981 if (dtrace_probes == NULL) { 7982 ASSERT(osize == 0); 7983 dtrace_probes = probes; 7984 dtrace_nprobes = 1; 7985 } else { 7986 dtrace_probe_t **oprobes = dtrace_probes; 7987 7988 bcopy(oprobes, probes, osize); 7989 dtrace_membar_producer(); 7990 dtrace_probes = probes; 7991 7992 dtrace_sync(); 7993 7994 /* 7995 * All CPUs are now seeing the new probes array; we can 7996 * safely free the old array. 7997 */ 7998 kmem_free(oprobes, osize); 7999 dtrace_nprobes <<= 1; 8000 } 8001 8002 ASSERT(id - 1 < dtrace_nprobes); 8003 } 8004 8005 ASSERT(dtrace_probes[id - 1] == NULL); 8006 dtrace_probes[id - 1] = probe; 8007 8008 if (provider != dtrace_provider) 8009 mutex_exit(&dtrace_lock); 8010 8011 return (id); 8012 } 8013 8014 static dtrace_probe_t * 8015 dtrace_probe_lookup_id(dtrace_id_t id) 8016 { 8017 ASSERT(MUTEX_HELD(&dtrace_lock)); 8018 8019 if (id == 0 || id > dtrace_nprobes) 8020 return (NULL); 8021 8022 return (dtrace_probes[id - 1]); 8023 } 8024 8025 static int 8026 dtrace_probe_lookup_match(dtrace_probe_t *probe, void *arg) 8027 { 8028 *((dtrace_id_t *)arg) = probe->dtpr_id; 8029 8030 return (DTRACE_MATCH_DONE); 8031 } 8032 8033 /* 8034 * Look up a probe based on provider and one or more of module name, function 8035 * name and probe name. 8036 */ 8037 dtrace_id_t 8038 dtrace_probe_lookup(dtrace_provider_id_t prid, char *mod, 8039 char *func, char *name) 8040 { 8041 dtrace_probekey_t pkey; 8042 dtrace_id_t id; 8043 int match; 8044 8045 pkey.dtpk_prov = ((dtrace_provider_t *)prid)->dtpv_name; 8046 pkey.dtpk_pmatch = &dtrace_match_string; 8047 pkey.dtpk_mod = mod; 8048 pkey.dtpk_mmatch = mod ? &dtrace_match_string : &dtrace_match_nul; 8049 pkey.dtpk_func = func; 8050 pkey.dtpk_fmatch = func ? &dtrace_match_string : &dtrace_match_nul; 8051 pkey.dtpk_name = name; 8052 pkey.dtpk_nmatch = name ? &dtrace_match_string : &dtrace_match_nul; 8053 pkey.dtpk_id = DTRACE_IDNONE; 8054 8055 mutex_enter(&dtrace_lock); 8056 match = dtrace_match(&pkey, DTRACE_PRIV_ALL, 0, 0, 8057 dtrace_probe_lookup_match, &id); 8058 mutex_exit(&dtrace_lock); 8059 8060 ASSERT(match == 1 || match == 0); 8061 return (match ? id : 0); 8062 } 8063 8064 /* 8065 * Returns the probe argument associated with the specified probe. 8066 */ 8067 void * 8068 dtrace_probe_arg(dtrace_provider_id_t id, dtrace_id_t pid) 8069 { 8070 dtrace_probe_t *probe; 8071 void *rval = NULL; 8072 8073 mutex_enter(&dtrace_lock); 8074 8075 if ((probe = dtrace_probe_lookup_id(pid)) != NULL && 8076 probe->dtpr_provider == (dtrace_provider_t *)id) 8077 rval = probe->dtpr_arg; 8078 8079 mutex_exit(&dtrace_lock); 8080 8081 return (rval); 8082 } 8083 8084 /* 8085 * Copy a probe into a probe description. 8086 */ 8087 static void 8088 dtrace_probe_description(const dtrace_probe_t *prp, dtrace_probedesc_t *pdp) 8089 { 8090 bzero(pdp, sizeof (dtrace_probedesc_t)); 8091 pdp->dtpd_id = prp->dtpr_id; 8092 8093 (void) strncpy(pdp->dtpd_provider, 8094 prp->dtpr_provider->dtpv_name, DTRACE_PROVNAMELEN - 1); 8095 8096 (void) strncpy(pdp->dtpd_mod, prp->dtpr_mod, DTRACE_MODNAMELEN - 1); 8097 (void) strncpy(pdp->dtpd_func, prp->dtpr_func, DTRACE_FUNCNAMELEN - 1); 8098 (void) strncpy(pdp->dtpd_name, prp->dtpr_name, DTRACE_NAMELEN - 1); 8099 } 8100 8101 #if !defined(sun) 8102 static int 8103 dtrace_probe_provide_cb(linker_file_t lf, void *arg) 8104 { 8105 dtrace_provider_t *prv = (dtrace_provider_t *) arg; 8106 8107 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, lf); 8108 8109 return(0); 8110 } 8111 #endif 8112 8113 8114 /* 8115 * Called to indicate that a probe -- or probes -- should be provided by a 8116 * specfied provider. If the specified description is NULL, the provider will 8117 * be told to provide all of its probes. (This is done whenever a new 8118 * consumer comes along, or whenever a retained enabling is to be matched.) If 8119 * the specified description is non-NULL, the provider is given the 8120 * opportunity to dynamically provide the specified probe, allowing providers 8121 * to support the creation of probes on-the-fly. (So-called _autocreated_ 8122 * probes.) If the provider is NULL, the operations will be applied to all 8123 * providers; if the provider is non-NULL the operations will only be applied 8124 * to the specified provider. The dtrace_provider_lock must be held, and the 8125 * dtrace_lock must _not_ be held -- the provider's dtps_provide() operation 8126 * will need to grab the dtrace_lock when it reenters the framework through 8127 * dtrace_probe_lookup(), dtrace_probe_create(), etc. 8128 */ 8129 static void 8130 dtrace_probe_provide(dtrace_probedesc_t *desc, dtrace_provider_t *prv) 8131 { 8132 #if defined(sun) 8133 modctl_t *ctl; 8134 #endif 8135 int all = 0; 8136 8137 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 8138 8139 if (prv == NULL) { 8140 all = 1; 8141 prv = dtrace_provider; 8142 } 8143 8144 do { 8145 /* 8146 * First, call the blanket provide operation. 8147 */ 8148 prv->dtpv_pops.dtps_provide(prv->dtpv_arg, desc); 8149 8150 #if defined(sun) 8151 /* 8152 * Now call the per-module provide operation. We will grab 8153 * mod_lock to prevent the list from being modified. Note 8154 * that this also prevents the mod_busy bits from changing. 8155 * (mod_busy can only be changed with mod_lock held.) 8156 */ 8157 mutex_enter(&mod_lock); 8158 8159 ctl = &modules; 8160 do { 8161 if (ctl->mod_busy || ctl->mod_mp == NULL) 8162 continue; 8163 8164 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 8165 8166 } while ((ctl = ctl->mod_next) != &modules); 8167 8168 mutex_exit(&mod_lock); 8169 #else 8170 (void) linker_file_foreach(dtrace_probe_provide_cb, prv); 8171 #endif 8172 } while (all && (prv = prv->dtpv_next) != NULL); 8173 } 8174 8175 #if defined(sun) 8176 /* 8177 * Iterate over each probe, and call the Framework-to-Provider API function 8178 * denoted by offs. 8179 */ 8180 static void 8181 dtrace_probe_foreach(uintptr_t offs) 8182 { 8183 dtrace_provider_t *prov; 8184 void (*func)(void *, dtrace_id_t, void *); 8185 dtrace_probe_t *probe; 8186 dtrace_icookie_t cookie; 8187 int i; 8188 8189 /* 8190 * We disable interrupts to walk through the probe array. This is 8191 * safe -- the dtrace_sync() in dtrace_unregister() assures that we 8192 * won't see stale data. 8193 */ 8194 cookie = dtrace_interrupt_disable(); 8195 8196 for (i = 0; i < dtrace_nprobes; i++) { 8197 if ((probe = dtrace_probes[i]) == NULL) 8198 continue; 8199 8200 if (probe->dtpr_ecb == NULL) { 8201 /* 8202 * This probe isn't enabled -- don't call the function. 8203 */ 8204 continue; 8205 } 8206 8207 prov = probe->dtpr_provider; 8208 func = *((void(**)(void *, dtrace_id_t, void *)) 8209 ((uintptr_t)&prov->dtpv_pops + offs)); 8210 8211 func(prov->dtpv_arg, i + 1, probe->dtpr_arg); 8212 } 8213 8214 dtrace_interrupt_enable(cookie); 8215 } 8216 #endif 8217 8218 static int 8219 dtrace_probe_enable(dtrace_probedesc_t *desc, dtrace_enabling_t *enab) 8220 { 8221 dtrace_probekey_t pkey; 8222 uint32_t priv; 8223 uid_t uid; 8224 zoneid_t zoneid; 8225 8226 ASSERT(MUTEX_HELD(&dtrace_lock)); 8227 dtrace_ecb_create_cache = NULL; 8228 8229 if (desc == NULL) { 8230 /* 8231 * If we're passed a NULL description, we're being asked to 8232 * create an ECB with a NULL probe. 8233 */ 8234 (void) dtrace_ecb_create_enable(NULL, enab); 8235 return (0); 8236 } 8237 8238 dtrace_probekey(desc, &pkey); 8239 dtrace_cred2priv(enab->dten_vstate->dtvs_state->dts_cred.dcr_cred, 8240 &priv, &uid, &zoneid); 8241 8242 return (dtrace_match(&pkey, priv, uid, zoneid, dtrace_ecb_create_enable, 8243 enab)); 8244 } 8245 8246 /* 8247 * DTrace Helper Provider Functions 8248 */ 8249 static void 8250 dtrace_dofattr2attr(dtrace_attribute_t *attr, const dof_attr_t dofattr) 8251 { 8252 attr->dtat_name = DOF_ATTR_NAME(dofattr); 8253 attr->dtat_data = DOF_ATTR_DATA(dofattr); 8254 attr->dtat_class = DOF_ATTR_CLASS(dofattr); 8255 } 8256 8257 static void 8258 dtrace_dofprov2hprov(dtrace_helper_provdesc_t *hprov, 8259 const dof_provider_t *dofprov, char *strtab) 8260 { 8261 hprov->dthpv_provname = strtab + dofprov->dofpv_name; 8262 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_provider, 8263 dofprov->dofpv_provattr); 8264 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_mod, 8265 dofprov->dofpv_modattr); 8266 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_func, 8267 dofprov->dofpv_funcattr); 8268 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_name, 8269 dofprov->dofpv_nameattr); 8270 dtrace_dofattr2attr(&hprov->dthpv_pattr.dtpa_args, 8271 dofprov->dofpv_argsattr); 8272 } 8273 8274 static void 8275 dtrace_helper_provide_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8276 { 8277 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8278 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8279 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 8280 dof_provider_t *provider; 8281 dof_probe_t *probe; 8282 uint32_t *off, *enoff; 8283 uint8_t *arg; 8284 char *strtab; 8285 uint_t i, nprobes; 8286 dtrace_helper_provdesc_t dhpv; 8287 dtrace_helper_probedesc_t dhpb; 8288 dtrace_meta_t *meta = dtrace_meta_pid; 8289 dtrace_mops_t *mops = &meta->dtm_mops; 8290 void *parg; 8291 8292 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8293 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8294 provider->dofpv_strtab * dof->dofh_secsize); 8295 prb_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8296 provider->dofpv_probes * dof->dofh_secsize); 8297 arg_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8298 provider->dofpv_prargs * dof->dofh_secsize); 8299 off_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8300 provider->dofpv_proffs * dof->dofh_secsize); 8301 8302 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8303 off = (uint32_t *)(uintptr_t)(daddr + off_sec->dofs_offset); 8304 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 8305 enoff = NULL; 8306 8307 /* 8308 * See dtrace_helper_provider_validate(). 8309 */ 8310 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 8311 provider->dofpv_prenoffs != DOF_SECT_NONE) { 8312 enoff_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8313 provider->dofpv_prenoffs * dof->dofh_secsize); 8314 enoff = (uint32_t *)(uintptr_t)(daddr + enoff_sec->dofs_offset); 8315 } 8316 8317 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 8318 8319 /* 8320 * Create the provider. 8321 */ 8322 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8323 8324 if ((parg = mops->dtms_provide_pid(meta->dtm_arg, &dhpv, pid)) == NULL) 8325 return; 8326 8327 meta->dtm_count++; 8328 8329 /* 8330 * Create the probes. 8331 */ 8332 for (i = 0; i < nprobes; i++) { 8333 probe = (dof_probe_t *)(uintptr_t)(daddr + 8334 prb_sec->dofs_offset + i * prb_sec->dofs_entsize); 8335 8336 dhpb.dthpb_mod = dhp->dofhp_mod; 8337 dhpb.dthpb_func = strtab + probe->dofpr_func; 8338 dhpb.dthpb_name = strtab + probe->dofpr_name; 8339 dhpb.dthpb_base = probe->dofpr_addr; 8340 dhpb.dthpb_offs = off + probe->dofpr_offidx; 8341 dhpb.dthpb_noffs = probe->dofpr_noffs; 8342 if (enoff != NULL) { 8343 dhpb.dthpb_enoffs = enoff + probe->dofpr_enoffidx; 8344 dhpb.dthpb_nenoffs = probe->dofpr_nenoffs; 8345 } else { 8346 dhpb.dthpb_enoffs = NULL; 8347 dhpb.dthpb_nenoffs = 0; 8348 } 8349 dhpb.dthpb_args = arg + probe->dofpr_argidx; 8350 dhpb.dthpb_nargc = probe->dofpr_nargc; 8351 dhpb.dthpb_xargc = probe->dofpr_xargc; 8352 dhpb.dthpb_ntypes = strtab + probe->dofpr_nargv; 8353 dhpb.dthpb_xtypes = strtab + probe->dofpr_xargv; 8354 8355 mops->dtms_create_probe(meta->dtm_arg, parg, &dhpb); 8356 } 8357 } 8358 8359 static void 8360 dtrace_helper_provide(dof_helper_t *dhp, pid_t pid) 8361 { 8362 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8363 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8364 int i; 8365 8366 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8367 8368 for (i = 0; i < dof->dofh_secnum; i++) { 8369 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8370 dof->dofh_secoff + i * dof->dofh_secsize); 8371 8372 if (sec->dofs_type != DOF_SECT_PROVIDER) 8373 continue; 8374 8375 dtrace_helper_provide_one(dhp, sec, pid); 8376 } 8377 8378 /* 8379 * We may have just created probes, so we must now rematch against 8380 * any retained enablings. Note that this call will acquire both 8381 * cpu_lock and dtrace_lock; the fact that we are holding 8382 * dtrace_meta_lock now is what defines the ordering with respect to 8383 * these three locks. 8384 */ 8385 dtrace_enabling_matchall(); 8386 } 8387 8388 static void 8389 dtrace_helper_provider_remove_one(dof_helper_t *dhp, dof_sec_t *sec, pid_t pid) 8390 { 8391 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8392 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8393 dof_sec_t *str_sec; 8394 dof_provider_t *provider; 8395 char *strtab; 8396 dtrace_helper_provdesc_t dhpv; 8397 dtrace_meta_t *meta = dtrace_meta_pid; 8398 dtrace_mops_t *mops = &meta->dtm_mops; 8399 8400 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 8401 str_sec = (dof_sec_t *)(uintptr_t)(daddr + dof->dofh_secoff + 8402 provider->dofpv_strtab * dof->dofh_secsize); 8403 8404 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 8405 8406 /* 8407 * Create the provider. 8408 */ 8409 dtrace_dofprov2hprov(&dhpv, provider, strtab); 8410 8411 mops->dtms_remove_pid(meta->dtm_arg, &dhpv, pid); 8412 8413 meta->dtm_count--; 8414 } 8415 8416 static void 8417 dtrace_helper_provider_remove(dof_helper_t *dhp, pid_t pid) 8418 { 8419 uintptr_t daddr = (uintptr_t)dhp->dofhp_dof; 8420 dof_hdr_t *dof = (dof_hdr_t *)daddr; 8421 int i; 8422 8423 ASSERT(MUTEX_HELD(&dtrace_meta_lock)); 8424 8425 for (i = 0; i < dof->dofh_secnum; i++) { 8426 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 8427 dof->dofh_secoff + i * dof->dofh_secsize); 8428 8429 if (sec->dofs_type != DOF_SECT_PROVIDER) 8430 continue; 8431 8432 dtrace_helper_provider_remove_one(dhp, sec, pid); 8433 } 8434 } 8435 8436 /* 8437 * DTrace Meta Provider-to-Framework API Functions 8438 * 8439 * These functions implement the Meta Provider-to-Framework API, as described 8440 * in <sys/dtrace.h>. 8441 */ 8442 int 8443 dtrace_meta_register(const char *name, const dtrace_mops_t *mops, void *arg, 8444 dtrace_meta_provider_id_t *idp) 8445 { 8446 dtrace_meta_t *meta; 8447 dtrace_helpers_t *help, *next; 8448 int i; 8449 8450 *idp = DTRACE_METAPROVNONE; 8451 8452 /* 8453 * We strictly don't need the name, but we hold onto it for 8454 * debuggability. All hail error queues! 8455 */ 8456 if (name == NULL) { 8457 cmn_err(CE_WARN, "failed to register meta-provider: " 8458 "invalid name"); 8459 return (EINVAL); 8460 } 8461 8462 if (mops == NULL || 8463 mops->dtms_create_probe == NULL || 8464 mops->dtms_provide_pid == NULL || 8465 mops->dtms_remove_pid == NULL) { 8466 cmn_err(CE_WARN, "failed to register meta-register %s: " 8467 "invalid ops", name); 8468 return (EINVAL); 8469 } 8470 8471 meta = kmem_zalloc(sizeof (dtrace_meta_t), KM_SLEEP); 8472 meta->dtm_mops = *mops; 8473 meta->dtm_name = kmem_alloc(strlen(name) + 1, KM_SLEEP); 8474 (void) strcpy(meta->dtm_name, name); 8475 meta->dtm_arg = arg; 8476 8477 mutex_enter(&dtrace_meta_lock); 8478 mutex_enter(&dtrace_lock); 8479 8480 if (dtrace_meta_pid != NULL) { 8481 mutex_exit(&dtrace_lock); 8482 mutex_exit(&dtrace_meta_lock); 8483 cmn_err(CE_WARN, "failed to register meta-register %s: " 8484 "user-land meta-provider exists", name); 8485 kmem_free(meta->dtm_name, strlen(meta->dtm_name) + 1); 8486 kmem_free(meta, sizeof (dtrace_meta_t)); 8487 return (EINVAL); 8488 } 8489 8490 dtrace_meta_pid = meta; 8491 *idp = (dtrace_meta_provider_id_t)meta; 8492 8493 /* 8494 * If there are providers and probes ready to go, pass them 8495 * off to the new meta provider now. 8496 */ 8497 8498 help = dtrace_deferred_pid; 8499 dtrace_deferred_pid = NULL; 8500 8501 mutex_exit(&dtrace_lock); 8502 8503 while (help != NULL) { 8504 for (i = 0; i < help->dthps_nprovs; i++) { 8505 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 8506 help->dthps_pid); 8507 } 8508 8509 next = help->dthps_next; 8510 help->dthps_next = NULL; 8511 help->dthps_prev = NULL; 8512 help->dthps_deferred = 0; 8513 help = next; 8514 } 8515 8516 mutex_exit(&dtrace_meta_lock); 8517 8518 return (0); 8519 } 8520 8521 int 8522 dtrace_meta_unregister(dtrace_meta_provider_id_t id) 8523 { 8524 dtrace_meta_t **pp, *old = (dtrace_meta_t *)id; 8525 8526 mutex_enter(&dtrace_meta_lock); 8527 mutex_enter(&dtrace_lock); 8528 8529 if (old == dtrace_meta_pid) { 8530 pp = &dtrace_meta_pid; 8531 } else { 8532 panic("attempt to unregister non-existent " 8533 "dtrace meta-provider %p\n", (void *)old); 8534 } 8535 8536 if (old->dtm_count != 0) { 8537 mutex_exit(&dtrace_lock); 8538 mutex_exit(&dtrace_meta_lock); 8539 return (EBUSY); 8540 } 8541 8542 *pp = NULL; 8543 8544 mutex_exit(&dtrace_lock); 8545 mutex_exit(&dtrace_meta_lock); 8546 8547 kmem_free(old->dtm_name, strlen(old->dtm_name) + 1); 8548 kmem_free(old, sizeof (dtrace_meta_t)); 8549 8550 return (0); 8551 } 8552 8553 8554 /* 8555 * DTrace DIF Object Functions 8556 */ 8557 static int 8558 dtrace_difo_err(uint_t pc, const char *format, ...) 8559 { 8560 if (dtrace_err_verbose) { 8561 va_list alist; 8562 8563 (void) uprintf("dtrace DIF object error: [%u]: ", pc); 8564 va_start(alist, format); 8565 (void) vuprintf(format, alist); 8566 va_end(alist); 8567 } 8568 8569 #ifdef DTRACE_ERRDEBUG 8570 dtrace_errdebug(format); 8571 #endif 8572 return (1); 8573 } 8574 8575 /* 8576 * Validate a DTrace DIF object by checking the IR instructions. The following 8577 * rules are currently enforced by dtrace_difo_validate(): 8578 * 8579 * 1. Each instruction must have a valid opcode 8580 * 2. Each register, string, variable, or subroutine reference must be valid 8581 * 3. No instruction can modify register %r0 (must be zero) 8582 * 4. All instruction reserved bits must be set to zero 8583 * 5. The last instruction must be a "ret" instruction 8584 * 6. All branch targets must reference a valid instruction _after_ the branch 8585 */ 8586 static int 8587 dtrace_difo_validate(dtrace_difo_t *dp, dtrace_vstate_t *vstate, uint_t nregs, 8588 cred_t *cr) 8589 { 8590 int err = 0, i; 8591 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8592 int kcheckload; 8593 uint_t pc; 8594 8595 kcheckload = cr == NULL || 8596 (vstate->dtvs_state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) == 0; 8597 8598 dp->dtdo_destructive = 0; 8599 8600 for (pc = 0; pc < dp->dtdo_len && err == 0; pc++) { 8601 dif_instr_t instr = dp->dtdo_buf[pc]; 8602 8603 uint_t r1 = DIF_INSTR_R1(instr); 8604 uint_t r2 = DIF_INSTR_R2(instr); 8605 uint_t rd = DIF_INSTR_RD(instr); 8606 uint_t rs = DIF_INSTR_RS(instr); 8607 uint_t label = DIF_INSTR_LABEL(instr); 8608 uint_t v = DIF_INSTR_VAR(instr); 8609 uint_t subr = DIF_INSTR_SUBR(instr); 8610 uint_t type = DIF_INSTR_TYPE(instr); 8611 uint_t op = DIF_INSTR_OP(instr); 8612 8613 switch (op) { 8614 case DIF_OP_OR: 8615 case DIF_OP_XOR: 8616 case DIF_OP_AND: 8617 case DIF_OP_SLL: 8618 case DIF_OP_SRL: 8619 case DIF_OP_SRA: 8620 case DIF_OP_SUB: 8621 case DIF_OP_ADD: 8622 case DIF_OP_MUL: 8623 case DIF_OP_SDIV: 8624 case DIF_OP_UDIV: 8625 case DIF_OP_SREM: 8626 case DIF_OP_UREM: 8627 case DIF_OP_COPYS: 8628 if (r1 >= nregs) 8629 err += efunc(pc, "invalid register %u\n", r1); 8630 if (r2 >= nregs) 8631 err += efunc(pc, "invalid register %u\n", r2); 8632 if (rd >= nregs) 8633 err += efunc(pc, "invalid register %u\n", rd); 8634 if (rd == 0) 8635 err += efunc(pc, "cannot write to %r0\n"); 8636 break; 8637 case DIF_OP_NOT: 8638 case DIF_OP_MOV: 8639 case DIF_OP_ALLOCS: 8640 if (r1 >= nregs) 8641 err += efunc(pc, "invalid register %u\n", r1); 8642 if (r2 != 0) 8643 err += efunc(pc, "non-zero reserved bits\n"); 8644 if (rd >= nregs) 8645 err += efunc(pc, "invalid register %u\n", rd); 8646 if (rd == 0) 8647 err += efunc(pc, "cannot write to %r0\n"); 8648 break; 8649 case DIF_OP_LDSB: 8650 case DIF_OP_LDSH: 8651 case DIF_OP_LDSW: 8652 case DIF_OP_LDUB: 8653 case DIF_OP_LDUH: 8654 case DIF_OP_LDUW: 8655 case DIF_OP_LDX: 8656 if (r1 >= nregs) 8657 err += efunc(pc, "invalid register %u\n", r1); 8658 if (r2 != 0) 8659 err += efunc(pc, "non-zero reserved bits\n"); 8660 if (rd >= nregs) 8661 err += efunc(pc, "invalid register %u\n", rd); 8662 if (rd == 0) 8663 err += efunc(pc, "cannot write to %r0\n"); 8664 if (kcheckload) 8665 dp->dtdo_buf[pc] = DIF_INSTR_LOAD(op + 8666 DIF_OP_RLDSB - DIF_OP_LDSB, r1, rd); 8667 break; 8668 case DIF_OP_RLDSB: 8669 case DIF_OP_RLDSH: 8670 case DIF_OP_RLDSW: 8671 case DIF_OP_RLDUB: 8672 case DIF_OP_RLDUH: 8673 case DIF_OP_RLDUW: 8674 case DIF_OP_RLDX: 8675 if (r1 >= nregs) 8676 err += efunc(pc, "invalid register %u\n", r1); 8677 if (r2 != 0) 8678 err += efunc(pc, "non-zero reserved bits\n"); 8679 if (rd >= nregs) 8680 err += efunc(pc, "invalid register %u\n", rd); 8681 if (rd == 0) 8682 err += efunc(pc, "cannot write to %r0\n"); 8683 break; 8684 case DIF_OP_ULDSB: 8685 case DIF_OP_ULDSH: 8686 case DIF_OP_ULDSW: 8687 case DIF_OP_ULDUB: 8688 case DIF_OP_ULDUH: 8689 case DIF_OP_ULDUW: 8690 case DIF_OP_ULDX: 8691 if (r1 >= nregs) 8692 err += efunc(pc, "invalid register %u\n", r1); 8693 if (r2 != 0) 8694 err += efunc(pc, "non-zero reserved bits\n"); 8695 if (rd >= nregs) 8696 err += efunc(pc, "invalid register %u\n", rd); 8697 if (rd == 0) 8698 err += efunc(pc, "cannot write to %r0\n"); 8699 break; 8700 case DIF_OP_STB: 8701 case DIF_OP_STH: 8702 case DIF_OP_STW: 8703 case DIF_OP_STX: 8704 if (r1 >= nregs) 8705 err += efunc(pc, "invalid register %u\n", r1); 8706 if (r2 != 0) 8707 err += efunc(pc, "non-zero reserved bits\n"); 8708 if (rd >= nregs) 8709 err += efunc(pc, "invalid register %u\n", rd); 8710 if (rd == 0) 8711 err += efunc(pc, "cannot write to 0 address\n"); 8712 break; 8713 case DIF_OP_CMP: 8714 case DIF_OP_SCMP: 8715 if (r1 >= nregs) 8716 err += efunc(pc, "invalid register %u\n", r1); 8717 if (r2 >= nregs) 8718 err += efunc(pc, "invalid register %u\n", r2); 8719 if (rd != 0) 8720 err += efunc(pc, "non-zero reserved bits\n"); 8721 break; 8722 case DIF_OP_TST: 8723 if (r1 >= nregs) 8724 err += efunc(pc, "invalid register %u\n", r1); 8725 if (r2 != 0 || rd != 0) 8726 err += efunc(pc, "non-zero reserved bits\n"); 8727 break; 8728 case DIF_OP_BA: 8729 case DIF_OP_BE: 8730 case DIF_OP_BNE: 8731 case DIF_OP_BG: 8732 case DIF_OP_BGU: 8733 case DIF_OP_BGE: 8734 case DIF_OP_BGEU: 8735 case DIF_OP_BL: 8736 case DIF_OP_BLU: 8737 case DIF_OP_BLE: 8738 case DIF_OP_BLEU: 8739 if (label >= dp->dtdo_len) { 8740 err += efunc(pc, "invalid branch target %u\n", 8741 label); 8742 } 8743 if (label <= pc) { 8744 err += efunc(pc, "backward branch to %u\n", 8745 label); 8746 } 8747 break; 8748 case DIF_OP_RET: 8749 if (r1 != 0 || r2 != 0) 8750 err += efunc(pc, "non-zero reserved bits\n"); 8751 if (rd >= nregs) 8752 err += efunc(pc, "invalid register %u\n", rd); 8753 break; 8754 case DIF_OP_NOP: 8755 case DIF_OP_POPTS: 8756 case DIF_OP_FLUSHTS: 8757 if (r1 != 0 || r2 != 0 || rd != 0) 8758 err += efunc(pc, "non-zero reserved bits\n"); 8759 break; 8760 case DIF_OP_SETX: 8761 if (DIF_INSTR_INTEGER(instr) >= dp->dtdo_intlen) { 8762 err += efunc(pc, "invalid integer ref %u\n", 8763 DIF_INSTR_INTEGER(instr)); 8764 } 8765 if (rd >= nregs) 8766 err += efunc(pc, "invalid register %u\n", rd); 8767 if (rd == 0) 8768 err += efunc(pc, "cannot write to %r0\n"); 8769 break; 8770 case DIF_OP_SETS: 8771 if (DIF_INSTR_STRING(instr) >= dp->dtdo_strlen) { 8772 err += efunc(pc, "invalid string ref %u\n", 8773 DIF_INSTR_STRING(instr)); 8774 } 8775 if (rd >= nregs) 8776 err += efunc(pc, "invalid register %u\n", rd); 8777 if (rd == 0) 8778 err += efunc(pc, "cannot write to %r0\n"); 8779 break; 8780 case DIF_OP_LDGA: 8781 case DIF_OP_LDTA: 8782 if (r1 > DIF_VAR_ARRAY_MAX) 8783 err += efunc(pc, "invalid array %u\n", r1); 8784 if (r2 >= nregs) 8785 err += efunc(pc, "invalid register %u\n", r2); 8786 if (rd >= nregs) 8787 err += efunc(pc, "invalid register %u\n", rd); 8788 if (rd == 0) 8789 err += efunc(pc, "cannot write to %r0\n"); 8790 break; 8791 case DIF_OP_LDGS: 8792 case DIF_OP_LDTS: 8793 case DIF_OP_LDLS: 8794 case DIF_OP_LDGAA: 8795 case DIF_OP_LDTAA: 8796 if (v < DIF_VAR_OTHER_MIN || v > DIF_VAR_OTHER_MAX) 8797 err += efunc(pc, "invalid variable %u\n", v); 8798 if (rd >= nregs) 8799 err += efunc(pc, "invalid register %u\n", rd); 8800 if (rd == 0) 8801 err += efunc(pc, "cannot write to %r0\n"); 8802 break; 8803 case DIF_OP_STGS: 8804 case DIF_OP_STTS: 8805 case DIF_OP_STLS: 8806 case DIF_OP_STGAA: 8807 case DIF_OP_STTAA: 8808 if (v < DIF_VAR_OTHER_UBASE || v > DIF_VAR_OTHER_MAX) 8809 err += efunc(pc, "invalid variable %u\n", v); 8810 if (rs >= nregs) 8811 err += efunc(pc, "invalid register %u\n", rd); 8812 break; 8813 case DIF_OP_CALL: 8814 if (subr > DIF_SUBR_MAX) 8815 err += efunc(pc, "invalid subr %u\n", subr); 8816 if (rd >= nregs) 8817 err += efunc(pc, "invalid register %u\n", rd); 8818 if (rd == 0) 8819 err += efunc(pc, "cannot write to %r0\n"); 8820 8821 if (subr == DIF_SUBR_COPYOUT || 8822 subr == DIF_SUBR_COPYOUTSTR) { 8823 dp->dtdo_destructive = 1; 8824 } 8825 break; 8826 case DIF_OP_PUSHTR: 8827 if (type != DIF_TYPE_STRING && type != DIF_TYPE_CTF) 8828 err += efunc(pc, "invalid ref type %u\n", type); 8829 if (r2 >= nregs) 8830 err += efunc(pc, "invalid register %u\n", r2); 8831 if (rs >= nregs) 8832 err += efunc(pc, "invalid register %u\n", rs); 8833 break; 8834 case DIF_OP_PUSHTV: 8835 if (type != DIF_TYPE_CTF) 8836 err += efunc(pc, "invalid val type %u\n", type); 8837 if (r2 >= nregs) 8838 err += efunc(pc, "invalid register %u\n", r2); 8839 if (rs >= nregs) 8840 err += efunc(pc, "invalid register %u\n", rs); 8841 break; 8842 default: 8843 err += efunc(pc, "invalid opcode %u\n", 8844 DIF_INSTR_OP(instr)); 8845 } 8846 } 8847 8848 if (dp->dtdo_len != 0 && 8849 DIF_INSTR_OP(dp->dtdo_buf[dp->dtdo_len - 1]) != DIF_OP_RET) { 8850 err += efunc(dp->dtdo_len - 1, 8851 "expected 'ret' as last DIF instruction\n"); 8852 } 8853 8854 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) { 8855 /* 8856 * If we're not returning by reference, the size must be either 8857 * 0 or the size of one of the base types. 8858 */ 8859 switch (dp->dtdo_rtype.dtdt_size) { 8860 case 0: 8861 case sizeof (uint8_t): 8862 case sizeof (uint16_t): 8863 case sizeof (uint32_t): 8864 case sizeof (uint64_t): 8865 break; 8866 8867 default: 8868 err += efunc(dp->dtdo_len - 1, "bad return size"); 8869 } 8870 } 8871 8872 for (i = 0; i < dp->dtdo_varlen && err == 0; i++) { 8873 dtrace_difv_t *v = &dp->dtdo_vartab[i], *existing = NULL; 8874 dtrace_diftype_t *vt, *et; 8875 uint_t id, ndx; 8876 8877 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL && 8878 v->dtdv_scope != DIFV_SCOPE_THREAD && 8879 v->dtdv_scope != DIFV_SCOPE_LOCAL) { 8880 err += efunc(i, "unrecognized variable scope %d\n", 8881 v->dtdv_scope); 8882 break; 8883 } 8884 8885 if (v->dtdv_kind != DIFV_KIND_ARRAY && 8886 v->dtdv_kind != DIFV_KIND_SCALAR) { 8887 err += efunc(i, "unrecognized variable type %d\n", 8888 v->dtdv_kind); 8889 break; 8890 } 8891 8892 if ((id = v->dtdv_id) > DIF_VARIABLE_MAX) { 8893 err += efunc(i, "%d exceeds variable id limit\n", id); 8894 break; 8895 } 8896 8897 if (id < DIF_VAR_OTHER_UBASE) 8898 continue; 8899 8900 /* 8901 * For user-defined variables, we need to check that this 8902 * definition is identical to any previous definition that we 8903 * encountered. 8904 */ 8905 ndx = id - DIF_VAR_OTHER_UBASE; 8906 8907 switch (v->dtdv_scope) { 8908 case DIFV_SCOPE_GLOBAL: 8909 if (ndx < vstate->dtvs_nglobals) { 8910 dtrace_statvar_t *svar; 8911 8912 if ((svar = vstate->dtvs_globals[ndx]) != NULL) 8913 existing = &svar->dtsv_var; 8914 } 8915 8916 break; 8917 8918 case DIFV_SCOPE_THREAD: 8919 if (ndx < vstate->dtvs_ntlocals) 8920 existing = &vstate->dtvs_tlocals[ndx]; 8921 break; 8922 8923 case DIFV_SCOPE_LOCAL: 8924 if (ndx < vstate->dtvs_nlocals) { 8925 dtrace_statvar_t *svar; 8926 8927 if ((svar = vstate->dtvs_locals[ndx]) != NULL) 8928 existing = &svar->dtsv_var; 8929 } 8930 8931 break; 8932 } 8933 8934 vt = &v->dtdv_type; 8935 8936 if (vt->dtdt_flags & DIF_TF_BYREF) { 8937 if (vt->dtdt_size == 0) { 8938 err += efunc(i, "zero-sized variable\n"); 8939 break; 8940 } 8941 8942 if (v->dtdv_scope == DIFV_SCOPE_GLOBAL && 8943 vt->dtdt_size > dtrace_global_maxsize) { 8944 err += efunc(i, "oversized by-ref global\n"); 8945 break; 8946 } 8947 } 8948 8949 if (existing == NULL || existing->dtdv_id == 0) 8950 continue; 8951 8952 ASSERT(existing->dtdv_id == v->dtdv_id); 8953 ASSERT(existing->dtdv_scope == v->dtdv_scope); 8954 8955 if (existing->dtdv_kind != v->dtdv_kind) 8956 err += efunc(i, "%d changed variable kind\n", id); 8957 8958 et = &existing->dtdv_type; 8959 8960 if (vt->dtdt_flags != et->dtdt_flags) { 8961 err += efunc(i, "%d changed variable type flags\n", id); 8962 break; 8963 } 8964 8965 if (vt->dtdt_size != 0 && vt->dtdt_size != et->dtdt_size) { 8966 err += efunc(i, "%d changed variable type size\n", id); 8967 break; 8968 } 8969 } 8970 8971 return (err); 8972 } 8973 8974 /* 8975 * Validate a DTrace DIF object that it is to be used as a helper. Helpers 8976 * are much more constrained than normal DIFOs. Specifically, they may 8977 * not: 8978 * 8979 * 1. Make calls to subroutines other than copyin(), copyinstr() or 8980 * miscellaneous string routines 8981 * 2. Access DTrace variables other than the args[] array, and the 8982 * curthread, pid, ppid, tid, execname, zonename, uid and gid variables. 8983 * 3. Have thread-local variables. 8984 * 4. Have dynamic variables. 8985 */ 8986 static int 8987 dtrace_difo_validate_helper(dtrace_difo_t *dp) 8988 { 8989 int (*efunc)(uint_t pc, const char *, ...) = dtrace_difo_err; 8990 int err = 0; 8991 uint_t pc; 8992 8993 for (pc = 0; pc < dp->dtdo_len; pc++) { 8994 dif_instr_t instr = dp->dtdo_buf[pc]; 8995 8996 uint_t v = DIF_INSTR_VAR(instr); 8997 uint_t subr = DIF_INSTR_SUBR(instr); 8998 uint_t op = DIF_INSTR_OP(instr); 8999 9000 switch (op) { 9001 case DIF_OP_OR: 9002 case DIF_OP_XOR: 9003 case DIF_OP_AND: 9004 case DIF_OP_SLL: 9005 case DIF_OP_SRL: 9006 case DIF_OP_SRA: 9007 case DIF_OP_SUB: 9008 case DIF_OP_ADD: 9009 case DIF_OP_MUL: 9010 case DIF_OP_SDIV: 9011 case DIF_OP_UDIV: 9012 case DIF_OP_SREM: 9013 case DIF_OP_UREM: 9014 case DIF_OP_COPYS: 9015 case DIF_OP_NOT: 9016 case DIF_OP_MOV: 9017 case DIF_OP_RLDSB: 9018 case DIF_OP_RLDSH: 9019 case DIF_OP_RLDSW: 9020 case DIF_OP_RLDUB: 9021 case DIF_OP_RLDUH: 9022 case DIF_OP_RLDUW: 9023 case DIF_OP_RLDX: 9024 case DIF_OP_ULDSB: 9025 case DIF_OP_ULDSH: 9026 case DIF_OP_ULDSW: 9027 case DIF_OP_ULDUB: 9028 case DIF_OP_ULDUH: 9029 case DIF_OP_ULDUW: 9030 case DIF_OP_ULDX: 9031 case DIF_OP_STB: 9032 case DIF_OP_STH: 9033 case DIF_OP_STW: 9034 case DIF_OP_STX: 9035 case DIF_OP_ALLOCS: 9036 case DIF_OP_CMP: 9037 case DIF_OP_SCMP: 9038 case DIF_OP_TST: 9039 case DIF_OP_BA: 9040 case DIF_OP_BE: 9041 case DIF_OP_BNE: 9042 case DIF_OP_BG: 9043 case DIF_OP_BGU: 9044 case DIF_OP_BGE: 9045 case DIF_OP_BGEU: 9046 case DIF_OP_BL: 9047 case DIF_OP_BLU: 9048 case DIF_OP_BLE: 9049 case DIF_OP_BLEU: 9050 case DIF_OP_RET: 9051 case DIF_OP_NOP: 9052 case DIF_OP_POPTS: 9053 case DIF_OP_FLUSHTS: 9054 case DIF_OP_SETX: 9055 case DIF_OP_SETS: 9056 case DIF_OP_LDGA: 9057 case DIF_OP_LDLS: 9058 case DIF_OP_STGS: 9059 case DIF_OP_STLS: 9060 case DIF_OP_PUSHTR: 9061 case DIF_OP_PUSHTV: 9062 break; 9063 9064 case DIF_OP_LDGS: 9065 if (v >= DIF_VAR_OTHER_UBASE) 9066 break; 9067 9068 if (v >= DIF_VAR_ARG0 && v <= DIF_VAR_ARG9) 9069 break; 9070 9071 if (v == DIF_VAR_CURTHREAD || v == DIF_VAR_PID || 9072 v == DIF_VAR_PPID || v == DIF_VAR_TID || 9073 v == DIF_VAR_EXECARGS || 9074 v == DIF_VAR_EXECNAME || v == DIF_VAR_ZONENAME || 9075 v == DIF_VAR_UID || v == DIF_VAR_GID) 9076 break; 9077 9078 err += efunc(pc, "illegal variable %u\n", v); 9079 break; 9080 9081 case DIF_OP_LDTA: 9082 case DIF_OP_LDTS: 9083 case DIF_OP_LDGAA: 9084 case DIF_OP_LDTAA: 9085 err += efunc(pc, "illegal dynamic variable load\n"); 9086 break; 9087 9088 case DIF_OP_STTS: 9089 case DIF_OP_STGAA: 9090 case DIF_OP_STTAA: 9091 err += efunc(pc, "illegal dynamic variable store\n"); 9092 break; 9093 9094 case DIF_OP_CALL: 9095 if (subr == DIF_SUBR_ALLOCA || 9096 subr == DIF_SUBR_BCOPY || 9097 subr == DIF_SUBR_COPYIN || 9098 subr == DIF_SUBR_COPYINTO || 9099 subr == DIF_SUBR_COPYINSTR || 9100 subr == DIF_SUBR_INDEX || 9101 subr == DIF_SUBR_INET_NTOA || 9102 subr == DIF_SUBR_INET_NTOA6 || 9103 subr == DIF_SUBR_INET_NTOP || 9104 subr == DIF_SUBR_LLTOSTR || 9105 subr == DIF_SUBR_RINDEX || 9106 subr == DIF_SUBR_STRCHR || 9107 subr == DIF_SUBR_STRJOIN || 9108 subr == DIF_SUBR_STRRCHR || 9109 subr == DIF_SUBR_STRSTR || 9110 subr == DIF_SUBR_HTONS || 9111 subr == DIF_SUBR_HTONL || 9112 subr == DIF_SUBR_HTONLL || 9113 subr == DIF_SUBR_NTOHS || 9114 subr == DIF_SUBR_NTOHL || 9115 subr == DIF_SUBR_NTOHLL || 9116 subr == DIF_SUBR_MEMREF || 9117 subr == DIF_SUBR_TYPEREF) 9118 break; 9119 9120 err += efunc(pc, "invalid subr %u\n", subr); 9121 break; 9122 9123 default: 9124 err += efunc(pc, "invalid opcode %u\n", 9125 DIF_INSTR_OP(instr)); 9126 } 9127 } 9128 9129 return (err); 9130 } 9131 9132 /* 9133 * Returns 1 if the expression in the DIF object can be cached on a per-thread 9134 * basis; 0 if not. 9135 */ 9136 static int 9137 dtrace_difo_cacheable(dtrace_difo_t *dp) 9138 { 9139 int i; 9140 9141 if (dp == NULL) 9142 return (0); 9143 9144 for (i = 0; i < dp->dtdo_varlen; i++) { 9145 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9146 9147 if (v->dtdv_scope != DIFV_SCOPE_GLOBAL) 9148 continue; 9149 9150 switch (v->dtdv_id) { 9151 case DIF_VAR_CURTHREAD: 9152 case DIF_VAR_PID: 9153 case DIF_VAR_TID: 9154 case DIF_VAR_EXECARGS: 9155 case DIF_VAR_EXECNAME: 9156 case DIF_VAR_ZONENAME: 9157 break; 9158 9159 default: 9160 return (0); 9161 } 9162 } 9163 9164 /* 9165 * This DIF object may be cacheable. Now we need to look for any 9166 * array loading instructions, any memory loading instructions, or 9167 * any stores to thread-local variables. 9168 */ 9169 for (i = 0; i < dp->dtdo_len; i++) { 9170 uint_t op = DIF_INSTR_OP(dp->dtdo_buf[i]); 9171 9172 if ((op >= DIF_OP_LDSB && op <= DIF_OP_LDX) || 9173 (op >= DIF_OP_ULDSB && op <= DIF_OP_ULDX) || 9174 (op >= DIF_OP_RLDSB && op <= DIF_OP_RLDX) || 9175 op == DIF_OP_LDGA || op == DIF_OP_STTS) 9176 return (0); 9177 } 9178 9179 return (1); 9180 } 9181 9182 static void 9183 dtrace_difo_hold(dtrace_difo_t *dp) 9184 { 9185 int i; 9186 9187 ASSERT(MUTEX_HELD(&dtrace_lock)); 9188 9189 dp->dtdo_refcnt++; 9190 ASSERT(dp->dtdo_refcnt != 0); 9191 9192 /* 9193 * We need to check this DIF object for references to the variable 9194 * DIF_VAR_VTIMESTAMP. 9195 */ 9196 for (i = 0; i < dp->dtdo_varlen; i++) { 9197 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9198 9199 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9200 continue; 9201 9202 if (dtrace_vtime_references++ == 0) 9203 dtrace_vtime_enable(); 9204 } 9205 } 9206 9207 /* 9208 * This routine calculates the dynamic variable chunksize for a given DIF 9209 * object. The calculation is not fool-proof, and can probably be tricked by 9210 * malicious DIF -- but it works for all compiler-generated DIF. Because this 9211 * calculation is likely imperfect, dtrace_dynvar() is able to gracefully fail 9212 * if a dynamic variable size exceeds the chunksize. 9213 */ 9214 static void 9215 dtrace_difo_chunksize(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9216 { 9217 uint64_t sval = 0; 9218 dtrace_key_t tupregs[DIF_DTR_NREGS + 2]; /* +2 for thread and id */ 9219 const dif_instr_t *text = dp->dtdo_buf; 9220 uint_t pc, srd = 0; 9221 uint_t ttop = 0; 9222 size_t size, ksize; 9223 uint_t id, i; 9224 9225 for (pc = 0; pc < dp->dtdo_len; pc++) { 9226 dif_instr_t instr = text[pc]; 9227 uint_t op = DIF_INSTR_OP(instr); 9228 uint_t rd = DIF_INSTR_RD(instr); 9229 uint_t r1 = DIF_INSTR_R1(instr); 9230 uint_t nkeys = 0; 9231 uchar_t scope = 0; 9232 9233 dtrace_key_t *key = tupregs; 9234 9235 switch (op) { 9236 case DIF_OP_SETX: 9237 sval = dp->dtdo_inttab[DIF_INSTR_INTEGER(instr)]; 9238 srd = rd; 9239 continue; 9240 9241 case DIF_OP_STTS: 9242 key = &tupregs[DIF_DTR_NREGS]; 9243 key[0].dttk_size = 0; 9244 key[1].dttk_size = 0; 9245 nkeys = 2; 9246 scope = DIFV_SCOPE_THREAD; 9247 break; 9248 9249 case DIF_OP_STGAA: 9250 case DIF_OP_STTAA: 9251 nkeys = ttop; 9252 9253 if (DIF_INSTR_OP(instr) == DIF_OP_STTAA) 9254 key[nkeys++].dttk_size = 0; 9255 9256 key[nkeys++].dttk_size = 0; 9257 9258 if (op == DIF_OP_STTAA) { 9259 scope = DIFV_SCOPE_THREAD; 9260 } else { 9261 scope = DIFV_SCOPE_GLOBAL; 9262 } 9263 9264 break; 9265 9266 case DIF_OP_PUSHTR: 9267 if (ttop == DIF_DTR_NREGS) 9268 return; 9269 9270 if ((srd == 0 || sval == 0) && r1 == DIF_TYPE_STRING) { 9271 /* 9272 * If the register for the size of the "pushtr" 9273 * is %r0 (or the value is 0) and the type is 9274 * a string, we'll use the system-wide default 9275 * string size. 9276 */ 9277 tupregs[ttop++].dttk_size = 9278 dtrace_strsize_default; 9279 } else { 9280 if (srd == 0) 9281 return; 9282 9283 tupregs[ttop++].dttk_size = sval; 9284 } 9285 9286 break; 9287 9288 case DIF_OP_PUSHTV: 9289 if (ttop == DIF_DTR_NREGS) 9290 return; 9291 9292 tupregs[ttop++].dttk_size = 0; 9293 break; 9294 9295 case DIF_OP_FLUSHTS: 9296 ttop = 0; 9297 break; 9298 9299 case DIF_OP_POPTS: 9300 if (ttop != 0) 9301 ttop--; 9302 break; 9303 } 9304 9305 sval = 0; 9306 srd = 0; 9307 9308 if (nkeys == 0) 9309 continue; 9310 9311 /* 9312 * We have a dynamic variable allocation; calculate its size. 9313 */ 9314 for (ksize = 0, i = 0; i < nkeys; i++) 9315 ksize += P2ROUNDUP(key[i].dttk_size, sizeof (uint64_t)); 9316 9317 size = sizeof (dtrace_dynvar_t); 9318 size += sizeof (dtrace_key_t) * (nkeys - 1); 9319 size += ksize; 9320 9321 /* 9322 * Now we need to determine the size of the stored data. 9323 */ 9324 id = DIF_INSTR_VAR(instr); 9325 9326 for (i = 0; i < dp->dtdo_varlen; i++) { 9327 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9328 9329 if (v->dtdv_id == id && v->dtdv_scope == scope) { 9330 size += v->dtdv_type.dtdt_size; 9331 break; 9332 } 9333 } 9334 9335 if (i == dp->dtdo_varlen) 9336 return; 9337 9338 /* 9339 * We have the size. If this is larger than the chunk size 9340 * for our dynamic variable state, reset the chunk size. 9341 */ 9342 size = P2ROUNDUP(size, sizeof (uint64_t)); 9343 9344 if (size > vstate->dtvs_dynvars.dtds_chunksize) 9345 vstate->dtvs_dynvars.dtds_chunksize = size; 9346 } 9347 } 9348 9349 static void 9350 dtrace_difo_init(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9351 { 9352 int i, oldsvars, osz, nsz, otlocals, ntlocals; 9353 uint_t id; 9354 9355 ASSERT(MUTEX_HELD(&dtrace_lock)); 9356 ASSERT(dp->dtdo_buf != NULL && dp->dtdo_len != 0); 9357 9358 for (i = 0; i < dp->dtdo_varlen; i++) { 9359 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9360 dtrace_statvar_t *svar, ***svarp = NULL; 9361 size_t dsize = 0; 9362 uint8_t scope = v->dtdv_scope; 9363 int *np = NULL; 9364 9365 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9366 continue; 9367 9368 id -= DIF_VAR_OTHER_UBASE; 9369 9370 switch (scope) { 9371 case DIFV_SCOPE_THREAD: 9372 while (id >= (otlocals = vstate->dtvs_ntlocals)) { 9373 dtrace_difv_t *tlocals; 9374 9375 if ((ntlocals = (otlocals << 1)) == 0) 9376 ntlocals = 1; 9377 9378 osz = otlocals * sizeof (dtrace_difv_t); 9379 nsz = ntlocals * sizeof (dtrace_difv_t); 9380 9381 tlocals = kmem_zalloc(nsz, KM_SLEEP); 9382 9383 if (osz != 0) { 9384 bcopy(vstate->dtvs_tlocals, 9385 tlocals, osz); 9386 kmem_free(vstate->dtvs_tlocals, osz); 9387 } 9388 9389 vstate->dtvs_tlocals = tlocals; 9390 vstate->dtvs_ntlocals = ntlocals; 9391 } 9392 9393 vstate->dtvs_tlocals[id] = *v; 9394 continue; 9395 9396 case DIFV_SCOPE_LOCAL: 9397 np = &vstate->dtvs_nlocals; 9398 svarp = &vstate->dtvs_locals; 9399 9400 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9401 dsize = NCPU * (v->dtdv_type.dtdt_size + 9402 sizeof (uint64_t)); 9403 else 9404 dsize = NCPU * sizeof (uint64_t); 9405 9406 break; 9407 9408 case DIFV_SCOPE_GLOBAL: 9409 np = &vstate->dtvs_nglobals; 9410 svarp = &vstate->dtvs_globals; 9411 9412 if (v->dtdv_type.dtdt_flags & DIF_TF_BYREF) 9413 dsize = v->dtdv_type.dtdt_size + 9414 sizeof (uint64_t); 9415 9416 break; 9417 9418 default: 9419 ASSERT(0); 9420 } 9421 9422 while (id >= (oldsvars = *np)) { 9423 dtrace_statvar_t **statics; 9424 int newsvars, oldsize, newsize; 9425 9426 if ((newsvars = (oldsvars << 1)) == 0) 9427 newsvars = 1; 9428 9429 oldsize = oldsvars * sizeof (dtrace_statvar_t *); 9430 newsize = newsvars * sizeof (dtrace_statvar_t *); 9431 9432 statics = kmem_zalloc(newsize, KM_SLEEP); 9433 9434 if (oldsize != 0) { 9435 bcopy(*svarp, statics, oldsize); 9436 kmem_free(*svarp, oldsize); 9437 } 9438 9439 *svarp = statics; 9440 *np = newsvars; 9441 } 9442 9443 if ((svar = (*svarp)[id]) == NULL) { 9444 svar = kmem_zalloc(sizeof (dtrace_statvar_t), KM_SLEEP); 9445 svar->dtsv_var = *v; 9446 9447 if ((svar->dtsv_size = dsize) != 0) { 9448 svar->dtsv_data = (uint64_t)(uintptr_t) 9449 kmem_zalloc(dsize, KM_SLEEP); 9450 } 9451 9452 (*svarp)[id] = svar; 9453 } 9454 9455 svar->dtsv_refcnt++; 9456 } 9457 9458 dtrace_difo_chunksize(dp, vstate); 9459 dtrace_difo_hold(dp); 9460 } 9461 9462 static dtrace_difo_t * 9463 dtrace_difo_duplicate(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9464 { 9465 dtrace_difo_t *new; 9466 size_t sz; 9467 9468 ASSERT(dp->dtdo_buf != NULL); 9469 ASSERT(dp->dtdo_refcnt != 0); 9470 9471 new = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 9472 9473 ASSERT(dp->dtdo_buf != NULL); 9474 sz = dp->dtdo_len * sizeof (dif_instr_t); 9475 new->dtdo_buf = kmem_alloc(sz, KM_SLEEP); 9476 bcopy(dp->dtdo_buf, new->dtdo_buf, sz); 9477 new->dtdo_len = dp->dtdo_len; 9478 9479 if (dp->dtdo_strtab != NULL) { 9480 ASSERT(dp->dtdo_strlen != 0); 9481 new->dtdo_strtab = kmem_alloc(dp->dtdo_strlen, KM_SLEEP); 9482 bcopy(dp->dtdo_strtab, new->dtdo_strtab, dp->dtdo_strlen); 9483 new->dtdo_strlen = dp->dtdo_strlen; 9484 } 9485 9486 if (dp->dtdo_inttab != NULL) { 9487 ASSERT(dp->dtdo_intlen != 0); 9488 sz = dp->dtdo_intlen * sizeof (uint64_t); 9489 new->dtdo_inttab = kmem_alloc(sz, KM_SLEEP); 9490 bcopy(dp->dtdo_inttab, new->dtdo_inttab, sz); 9491 new->dtdo_intlen = dp->dtdo_intlen; 9492 } 9493 9494 if (dp->dtdo_vartab != NULL) { 9495 ASSERT(dp->dtdo_varlen != 0); 9496 sz = dp->dtdo_varlen * sizeof (dtrace_difv_t); 9497 new->dtdo_vartab = kmem_alloc(sz, KM_SLEEP); 9498 bcopy(dp->dtdo_vartab, new->dtdo_vartab, sz); 9499 new->dtdo_varlen = dp->dtdo_varlen; 9500 } 9501 9502 dtrace_difo_init(new, vstate); 9503 return (new); 9504 } 9505 9506 static void 9507 dtrace_difo_destroy(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9508 { 9509 int i; 9510 9511 ASSERT(dp->dtdo_refcnt == 0); 9512 9513 for (i = 0; i < dp->dtdo_varlen; i++) { 9514 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9515 dtrace_statvar_t *svar, **svarp = NULL; 9516 uint_t id; 9517 uint8_t scope = v->dtdv_scope; 9518 int *np = NULL; 9519 9520 switch (scope) { 9521 case DIFV_SCOPE_THREAD: 9522 continue; 9523 9524 case DIFV_SCOPE_LOCAL: 9525 np = &vstate->dtvs_nlocals; 9526 svarp = vstate->dtvs_locals; 9527 break; 9528 9529 case DIFV_SCOPE_GLOBAL: 9530 np = &vstate->dtvs_nglobals; 9531 svarp = vstate->dtvs_globals; 9532 break; 9533 9534 default: 9535 ASSERT(0); 9536 } 9537 9538 if ((id = v->dtdv_id) < DIF_VAR_OTHER_UBASE) 9539 continue; 9540 9541 id -= DIF_VAR_OTHER_UBASE; 9542 ASSERT(id < *np); 9543 9544 svar = svarp[id]; 9545 ASSERT(svar != NULL); 9546 ASSERT(svar->dtsv_refcnt > 0); 9547 9548 if (--svar->dtsv_refcnt > 0) 9549 continue; 9550 9551 if (svar->dtsv_size != 0) { 9552 ASSERT(svar->dtsv_data != 0); 9553 kmem_free((void *)(uintptr_t)svar->dtsv_data, 9554 svar->dtsv_size); 9555 } 9556 9557 kmem_free(svar, sizeof (dtrace_statvar_t)); 9558 svarp[id] = NULL; 9559 } 9560 9561 if (dp->dtdo_buf != NULL) 9562 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 9563 if (dp->dtdo_inttab != NULL) 9564 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 9565 if (dp->dtdo_strtab != NULL) 9566 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 9567 if (dp->dtdo_vartab != NULL) 9568 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 9569 9570 kmem_free(dp, sizeof (dtrace_difo_t)); 9571 } 9572 9573 static void 9574 dtrace_difo_release(dtrace_difo_t *dp, dtrace_vstate_t *vstate) 9575 { 9576 int i; 9577 9578 ASSERT(MUTEX_HELD(&dtrace_lock)); 9579 ASSERT(dp->dtdo_refcnt != 0); 9580 9581 for (i = 0; i < dp->dtdo_varlen; i++) { 9582 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 9583 9584 if (v->dtdv_id != DIF_VAR_VTIMESTAMP) 9585 continue; 9586 9587 ASSERT(dtrace_vtime_references > 0); 9588 if (--dtrace_vtime_references == 0) 9589 dtrace_vtime_disable(); 9590 } 9591 9592 if (--dp->dtdo_refcnt == 0) 9593 dtrace_difo_destroy(dp, vstate); 9594 } 9595 9596 /* 9597 * DTrace Format Functions 9598 */ 9599 static uint16_t 9600 dtrace_format_add(dtrace_state_t *state, char *str) 9601 { 9602 char *fmt, **new; 9603 uint16_t ndx, len = strlen(str) + 1; 9604 9605 fmt = kmem_zalloc(len, KM_SLEEP); 9606 bcopy(str, fmt, len); 9607 9608 for (ndx = 0; ndx < state->dts_nformats; ndx++) { 9609 if (state->dts_formats[ndx] == NULL) { 9610 state->dts_formats[ndx] = fmt; 9611 return (ndx + 1); 9612 } 9613 } 9614 9615 if (state->dts_nformats == USHRT_MAX) { 9616 /* 9617 * This is only likely if a denial-of-service attack is being 9618 * attempted. As such, it's okay to fail silently here. 9619 */ 9620 kmem_free(fmt, len); 9621 return (0); 9622 } 9623 9624 /* 9625 * For simplicity, we always resize the formats array to be exactly the 9626 * number of formats. 9627 */ 9628 ndx = state->dts_nformats++; 9629 new = kmem_alloc((ndx + 1) * sizeof (char *), KM_SLEEP); 9630 9631 if (state->dts_formats != NULL) { 9632 ASSERT(ndx != 0); 9633 bcopy(state->dts_formats, new, ndx * sizeof (char *)); 9634 kmem_free(state->dts_formats, ndx * sizeof (char *)); 9635 } 9636 9637 state->dts_formats = new; 9638 state->dts_formats[ndx] = fmt; 9639 9640 return (ndx + 1); 9641 } 9642 9643 static void 9644 dtrace_format_remove(dtrace_state_t *state, uint16_t format) 9645 { 9646 char *fmt; 9647 9648 ASSERT(state->dts_formats != NULL); 9649 ASSERT(format <= state->dts_nformats); 9650 ASSERT(state->dts_formats[format - 1] != NULL); 9651 9652 fmt = state->dts_formats[format - 1]; 9653 kmem_free(fmt, strlen(fmt) + 1); 9654 state->dts_formats[format - 1] = NULL; 9655 } 9656 9657 static void 9658 dtrace_format_destroy(dtrace_state_t *state) 9659 { 9660 int i; 9661 9662 if (state->dts_nformats == 0) { 9663 ASSERT(state->dts_formats == NULL); 9664 return; 9665 } 9666 9667 ASSERT(state->dts_formats != NULL); 9668 9669 for (i = 0; i < state->dts_nformats; i++) { 9670 char *fmt = state->dts_formats[i]; 9671 9672 if (fmt == NULL) 9673 continue; 9674 9675 kmem_free(fmt, strlen(fmt) + 1); 9676 } 9677 9678 kmem_free(state->dts_formats, state->dts_nformats * sizeof (char *)); 9679 state->dts_nformats = 0; 9680 state->dts_formats = NULL; 9681 } 9682 9683 /* 9684 * DTrace Predicate Functions 9685 */ 9686 static dtrace_predicate_t * 9687 dtrace_predicate_create(dtrace_difo_t *dp) 9688 { 9689 dtrace_predicate_t *pred; 9690 9691 ASSERT(MUTEX_HELD(&dtrace_lock)); 9692 ASSERT(dp->dtdo_refcnt != 0); 9693 9694 pred = kmem_zalloc(sizeof (dtrace_predicate_t), KM_SLEEP); 9695 pred->dtp_difo = dp; 9696 pred->dtp_refcnt = 1; 9697 9698 if (!dtrace_difo_cacheable(dp)) 9699 return (pred); 9700 9701 if (dtrace_predcache_id == DTRACE_CACHEIDNONE) { 9702 /* 9703 * This is only theoretically possible -- we have had 2^32 9704 * cacheable predicates on this machine. We cannot allow any 9705 * more predicates to become cacheable: as unlikely as it is, 9706 * there may be a thread caching a (now stale) predicate cache 9707 * ID. (N.B.: the temptation is being successfully resisted to 9708 * have this cmn_err() "Holy shit -- we executed this code!") 9709 */ 9710 return (pred); 9711 } 9712 9713 pred->dtp_cacheid = dtrace_predcache_id++; 9714 9715 return (pred); 9716 } 9717 9718 static void 9719 dtrace_predicate_hold(dtrace_predicate_t *pred) 9720 { 9721 ASSERT(MUTEX_HELD(&dtrace_lock)); 9722 ASSERT(pred->dtp_difo != NULL && pred->dtp_difo->dtdo_refcnt != 0); 9723 ASSERT(pred->dtp_refcnt > 0); 9724 9725 pred->dtp_refcnt++; 9726 } 9727 9728 static void 9729 dtrace_predicate_release(dtrace_predicate_t *pred, dtrace_vstate_t *vstate) 9730 { 9731 dtrace_difo_t *dp = pred->dtp_difo; 9732 9733 ASSERT(MUTEX_HELD(&dtrace_lock)); 9734 ASSERT(dp != NULL && dp->dtdo_refcnt != 0); 9735 ASSERT(pred->dtp_refcnt > 0); 9736 9737 if (--pred->dtp_refcnt == 0) { 9738 dtrace_difo_release(pred->dtp_difo, vstate); 9739 kmem_free(pred, sizeof (dtrace_predicate_t)); 9740 } 9741 } 9742 9743 /* 9744 * DTrace Action Description Functions 9745 */ 9746 static dtrace_actdesc_t * 9747 dtrace_actdesc_create(dtrace_actkind_t kind, uint32_t ntuple, 9748 uint64_t uarg, uint64_t arg) 9749 { 9750 dtrace_actdesc_t *act; 9751 9752 #if defined(sun) 9753 ASSERT(!DTRACEACT_ISPRINTFLIKE(kind) || (arg != NULL && 9754 arg >= KERNELBASE) || (arg == NULL && kind == DTRACEACT_PRINTA)); 9755 #endif 9756 9757 act = kmem_zalloc(sizeof (dtrace_actdesc_t), KM_SLEEP); 9758 act->dtad_kind = kind; 9759 act->dtad_ntuple = ntuple; 9760 act->dtad_uarg = uarg; 9761 act->dtad_arg = arg; 9762 act->dtad_refcnt = 1; 9763 9764 return (act); 9765 } 9766 9767 static void 9768 dtrace_actdesc_hold(dtrace_actdesc_t *act) 9769 { 9770 ASSERT(act->dtad_refcnt >= 1); 9771 act->dtad_refcnt++; 9772 } 9773 9774 static void 9775 dtrace_actdesc_release(dtrace_actdesc_t *act, dtrace_vstate_t *vstate) 9776 { 9777 dtrace_actkind_t kind = act->dtad_kind; 9778 dtrace_difo_t *dp; 9779 9780 ASSERT(act->dtad_refcnt >= 1); 9781 9782 if (--act->dtad_refcnt != 0) 9783 return; 9784 9785 if ((dp = act->dtad_difo) != NULL) 9786 dtrace_difo_release(dp, vstate); 9787 9788 if (DTRACEACT_ISPRINTFLIKE(kind)) { 9789 char *str = (char *)(uintptr_t)act->dtad_arg; 9790 9791 #if defined(sun) 9792 ASSERT((str != NULL && (uintptr_t)str >= KERNELBASE) || 9793 (str == NULL && act->dtad_kind == DTRACEACT_PRINTA)); 9794 #endif 9795 9796 if (str != NULL) 9797 kmem_free(str, strlen(str) + 1); 9798 } 9799 9800 kmem_free(act, sizeof (dtrace_actdesc_t)); 9801 } 9802 9803 /* 9804 * DTrace ECB Functions 9805 */ 9806 static dtrace_ecb_t * 9807 dtrace_ecb_add(dtrace_state_t *state, dtrace_probe_t *probe) 9808 { 9809 dtrace_ecb_t *ecb; 9810 dtrace_epid_t epid; 9811 9812 ASSERT(MUTEX_HELD(&dtrace_lock)); 9813 9814 ecb = kmem_zalloc(sizeof (dtrace_ecb_t), KM_SLEEP); 9815 ecb->dte_predicate = NULL; 9816 ecb->dte_probe = probe; 9817 9818 /* 9819 * The default size is the size of the default action: recording 9820 * the header. 9821 */ 9822 ecb->dte_size = ecb->dte_needed = sizeof (dtrace_rechdr_t); 9823 ecb->dte_alignment = sizeof (dtrace_epid_t); 9824 9825 epid = state->dts_epid++; 9826 9827 if (epid - 1 >= state->dts_necbs) { 9828 dtrace_ecb_t **oecbs = state->dts_ecbs, **ecbs; 9829 int necbs = state->dts_necbs << 1; 9830 9831 ASSERT(epid == state->dts_necbs + 1); 9832 9833 if (necbs == 0) { 9834 ASSERT(oecbs == NULL); 9835 necbs = 1; 9836 } 9837 9838 ecbs = kmem_zalloc(necbs * sizeof (*ecbs), KM_SLEEP); 9839 9840 if (oecbs != NULL) 9841 bcopy(oecbs, ecbs, state->dts_necbs * sizeof (*ecbs)); 9842 9843 dtrace_membar_producer(); 9844 state->dts_ecbs = ecbs; 9845 9846 if (oecbs != NULL) { 9847 /* 9848 * If this state is active, we must dtrace_sync() 9849 * before we can free the old dts_ecbs array: we're 9850 * coming in hot, and there may be active ring 9851 * buffer processing (which indexes into the dts_ecbs 9852 * array) on another CPU. 9853 */ 9854 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 9855 dtrace_sync(); 9856 9857 kmem_free(oecbs, state->dts_necbs * sizeof (*ecbs)); 9858 } 9859 9860 dtrace_membar_producer(); 9861 state->dts_necbs = necbs; 9862 } 9863 9864 ecb->dte_state = state; 9865 9866 ASSERT(state->dts_ecbs[epid - 1] == NULL); 9867 dtrace_membar_producer(); 9868 state->dts_ecbs[(ecb->dte_epid = epid) - 1] = ecb; 9869 9870 return (ecb); 9871 } 9872 9873 static void 9874 dtrace_ecb_enable(dtrace_ecb_t *ecb) 9875 { 9876 dtrace_probe_t *probe = ecb->dte_probe; 9877 9878 ASSERT(MUTEX_HELD(&cpu_lock)); 9879 ASSERT(MUTEX_HELD(&dtrace_lock)); 9880 ASSERT(ecb->dte_next == NULL); 9881 9882 if (probe == NULL) { 9883 /* 9884 * This is the NULL probe -- there's nothing to do. 9885 */ 9886 return; 9887 } 9888 9889 if (probe->dtpr_ecb == NULL) { 9890 dtrace_provider_t *prov = probe->dtpr_provider; 9891 9892 /* 9893 * We're the first ECB on this probe. 9894 */ 9895 probe->dtpr_ecb = probe->dtpr_ecb_last = ecb; 9896 9897 if (ecb->dte_predicate != NULL) 9898 probe->dtpr_predcache = ecb->dte_predicate->dtp_cacheid; 9899 9900 prov->dtpv_pops.dtps_enable(prov->dtpv_arg, 9901 probe->dtpr_id, probe->dtpr_arg); 9902 } else { 9903 /* 9904 * This probe is already active. Swing the last pointer to 9905 * point to the new ECB, and issue a dtrace_sync() to assure 9906 * that all CPUs have seen the change. 9907 */ 9908 ASSERT(probe->dtpr_ecb_last != NULL); 9909 probe->dtpr_ecb_last->dte_next = ecb; 9910 probe->dtpr_ecb_last = ecb; 9911 probe->dtpr_predcache = 0; 9912 9913 dtrace_sync(); 9914 } 9915 } 9916 9917 static void 9918 dtrace_ecb_resize(dtrace_ecb_t *ecb) 9919 { 9920 dtrace_action_t *act; 9921 uint32_t curneeded = UINT32_MAX; 9922 uint32_t aggbase = UINT32_MAX; 9923 9924 /* 9925 * If we record anything, we always record the dtrace_rechdr_t. (And 9926 * we always record it first.) 9927 */ 9928 ecb->dte_size = sizeof (dtrace_rechdr_t); 9929 ecb->dte_alignment = sizeof (dtrace_epid_t); 9930 9931 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 9932 dtrace_recdesc_t *rec = &act->dta_rec; 9933 ASSERT(rec->dtrd_size > 0 || rec->dtrd_alignment == 1); 9934 9935 ecb->dte_alignment = MAX(ecb->dte_alignment, 9936 rec->dtrd_alignment); 9937 9938 if (DTRACEACT_ISAGG(act->dta_kind)) { 9939 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 9940 9941 ASSERT(rec->dtrd_size != 0); 9942 ASSERT(agg->dtag_first != NULL); 9943 ASSERT(act->dta_prev->dta_intuple); 9944 ASSERT(aggbase != UINT32_MAX); 9945 ASSERT(curneeded != UINT32_MAX); 9946 9947 agg->dtag_base = aggbase; 9948 9949 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 9950 rec->dtrd_offset = curneeded; 9951 curneeded += rec->dtrd_size; 9952 ecb->dte_needed = MAX(ecb->dte_needed, curneeded); 9953 9954 aggbase = UINT32_MAX; 9955 curneeded = UINT32_MAX; 9956 } else if (act->dta_intuple) { 9957 if (curneeded == UINT32_MAX) { 9958 /* 9959 * This is the first record in a tuple. Align 9960 * curneeded to be at offset 4 in an 8-byte 9961 * aligned block. 9962 */ 9963 ASSERT(act->dta_prev == NULL || 9964 !act->dta_prev->dta_intuple); 9965 ASSERT3U(aggbase, ==, UINT32_MAX); 9966 curneeded = P2PHASEUP(ecb->dte_size, 9967 sizeof (uint64_t), sizeof (dtrace_aggid_t)); 9968 9969 aggbase = curneeded - sizeof (dtrace_aggid_t); 9970 ASSERT(IS_P2ALIGNED(aggbase, 9971 sizeof (uint64_t))); 9972 } 9973 curneeded = P2ROUNDUP(curneeded, rec->dtrd_alignment); 9974 rec->dtrd_offset = curneeded; 9975 curneeded += rec->dtrd_size; 9976 } else { 9977 /* tuples must be followed by an aggregation */ 9978 ASSERT(act->dta_prev == NULL || 9979 !act->dta_prev->dta_intuple); 9980 9981 ecb->dte_size = P2ROUNDUP(ecb->dte_size, 9982 rec->dtrd_alignment); 9983 rec->dtrd_offset = ecb->dte_size; 9984 ecb->dte_size += rec->dtrd_size; 9985 ecb->dte_needed = MAX(ecb->dte_needed, ecb->dte_size); 9986 } 9987 } 9988 9989 if ((act = ecb->dte_action) != NULL && 9990 !(act->dta_kind == DTRACEACT_SPECULATE && act->dta_next == NULL) && 9991 ecb->dte_size == sizeof (dtrace_rechdr_t)) { 9992 /* 9993 * If the size is still sizeof (dtrace_rechdr_t), then all 9994 * actions store no data; set the size to 0. 9995 */ 9996 ecb->dte_size = 0; 9997 } 9998 9999 ecb->dte_size = P2ROUNDUP(ecb->dte_size, sizeof (dtrace_epid_t)); 10000 ecb->dte_needed = P2ROUNDUP(ecb->dte_needed, (sizeof (dtrace_epid_t))); 10001 ecb->dte_state->dts_needed = MAX(ecb->dte_state->dts_needed, 10002 ecb->dte_needed); 10003 } 10004 10005 static dtrace_action_t * 10006 dtrace_ecb_aggregation_create(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10007 { 10008 dtrace_aggregation_t *agg; 10009 size_t size = sizeof (uint64_t); 10010 int ntuple = desc->dtad_ntuple; 10011 dtrace_action_t *act; 10012 dtrace_recdesc_t *frec; 10013 dtrace_aggid_t aggid; 10014 dtrace_state_t *state = ecb->dte_state; 10015 10016 agg = kmem_zalloc(sizeof (dtrace_aggregation_t), KM_SLEEP); 10017 agg->dtag_ecb = ecb; 10018 10019 ASSERT(DTRACEACT_ISAGG(desc->dtad_kind)); 10020 10021 switch (desc->dtad_kind) { 10022 case DTRACEAGG_MIN: 10023 agg->dtag_initial = INT64_MAX; 10024 agg->dtag_aggregate = dtrace_aggregate_min; 10025 break; 10026 10027 case DTRACEAGG_MAX: 10028 agg->dtag_initial = INT64_MIN; 10029 agg->dtag_aggregate = dtrace_aggregate_max; 10030 break; 10031 10032 case DTRACEAGG_COUNT: 10033 agg->dtag_aggregate = dtrace_aggregate_count; 10034 break; 10035 10036 case DTRACEAGG_QUANTIZE: 10037 agg->dtag_aggregate = dtrace_aggregate_quantize; 10038 size = (((sizeof (uint64_t) * NBBY) - 1) * 2 + 1) * 10039 sizeof (uint64_t); 10040 break; 10041 10042 case DTRACEAGG_LQUANTIZE: { 10043 uint16_t step = DTRACE_LQUANTIZE_STEP(desc->dtad_arg); 10044 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(desc->dtad_arg); 10045 10046 agg->dtag_initial = desc->dtad_arg; 10047 agg->dtag_aggregate = dtrace_aggregate_lquantize; 10048 10049 if (step == 0 || levels == 0) 10050 goto err; 10051 10052 size = levels * sizeof (uint64_t) + 3 * sizeof (uint64_t); 10053 break; 10054 } 10055 10056 case DTRACEAGG_LLQUANTIZE: { 10057 uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(desc->dtad_arg); 10058 uint16_t low = DTRACE_LLQUANTIZE_LOW(desc->dtad_arg); 10059 uint16_t high = DTRACE_LLQUANTIZE_HIGH(desc->dtad_arg); 10060 uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(desc->dtad_arg); 10061 int64_t v; 10062 10063 agg->dtag_initial = desc->dtad_arg; 10064 agg->dtag_aggregate = dtrace_aggregate_llquantize; 10065 10066 if (factor < 2 || low >= high || nsteps < factor) 10067 goto err; 10068 10069 /* 10070 * Now check that the number of steps evenly divides a power 10071 * of the factor. (This assures both integer bucket size and 10072 * linearity within each magnitude.) 10073 */ 10074 for (v = factor; v < nsteps; v *= factor) 10075 continue; 10076 10077 if ((v % nsteps) || (nsteps % factor)) 10078 goto err; 10079 10080 size = (dtrace_aggregate_llquantize_bucket(factor, 10081 low, high, nsteps, INT64_MAX) + 2) * sizeof (uint64_t); 10082 break; 10083 } 10084 10085 case DTRACEAGG_AVG: 10086 agg->dtag_aggregate = dtrace_aggregate_avg; 10087 size = sizeof (uint64_t) * 2; 10088 break; 10089 10090 case DTRACEAGG_STDDEV: 10091 agg->dtag_aggregate = dtrace_aggregate_stddev; 10092 size = sizeof (uint64_t) * 4; 10093 break; 10094 10095 case DTRACEAGG_SUM: 10096 agg->dtag_aggregate = dtrace_aggregate_sum; 10097 break; 10098 10099 default: 10100 goto err; 10101 } 10102 10103 agg->dtag_action.dta_rec.dtrd_size = size; 10104 10105 if (ntuple == 0) 10106 goto err; 10107 10108 /* 10109 * We must make sure that we have enough actions for the n-tuple. 10110 */ 10111 for (act = ecb->dte_action_last; act != NULL; act = act->dta_prev) { 10112 if (DTRACEACT_ISAGG(act->dta_kind)) 10113 break; 10114 10115 if (--ntuple == 0) { 10116 /* 10117 * This is the action with which our n-tuple begins. 10118 */ 10119 agg->dtag_first = act; 10120 goto success; 10121 } 10122 } 10123 10124 /* 10125 * This n-tuple is short by ntuple elements. Return failure. 10126 */ 10127 ASSERT(ntuple != 0); 10128 err: 10129 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10130 return (NULL); 10131 10132 success: 10133 /* 10134 * If the last action in the tuple has a size of zero, it's actually 10135 * an expression argument for the aggregating action. 10136 */ 10137 ASSERT(ecb->dte_action_last != NULL); 10138 act = ecb->dte_action_last; 10139 10140 if (act->dta_kind == DTRACEACT_DIFEXPR) { 10141 ASSERT(act->dta_difo != NULL); 10142 10143 if (act->dta_difo->dtdo_rtype.dtdt_size == 0) 10144 agg->dtag_hasarg = 1; 10145 } 10146 10147 /* 10148 * We need to allocate an id for this aggregation. 10149 */ 10150 #if defined(sun) 10151 aggid = (dtrace_aggid_t)(uintptr_t)vmem_alloc(state->dts_aggid_arena, 1, 10152 VM_BESTFIT | VM_SLEEP); 10153 #else 10154 aggid = alloc_unr(state->dts_aggid_arena); 10155 #endif 10156 10157 if (aggid - 1 >= state->dts_naggregations) { 10158 dtrace_aggregation_t **oaggs = state->dts_aggregations; 10159 dtrace_aggregation_t **aggs; 10160 int naggs = state->dts_naggregations << 1; 10161 int onaggs = state->dts_naggregations; 10162 10163 ASSERT(aggid == state->dts_naggregations + 1); 10164 10165 if (naggs == 0) { 10166 ASSERT(oaggs == NULL); 10167 naggs = 1; 10168 } 10169 10170 aggs = kmem_zalloc(naggs * sizeof (*aggs), KM_SLEEP); 10171 10172 if (oaggs != NULL) { 10173 bcopy(oaggs, aggs, onaggs * sizeof (*aggs)); 10174 kmem_free(oaggs, onaggs * sizeof (*aggs)); 10175 } 10176 10177 state->dts_aggregations = aggs; 10178 state->dts_naggregations = naggs; 10179 } 10180 10181 ASSERT(state->dts_aggregations[aggid - 1] == NULL); 10182 state->dts_aggregations[(agg->dtag_id = aggid) - 1] = agg; 10183 10184 frec = &agg->dtag_first->dta_rec; 10185 if (frec->dtrd_alignment < sizeof (dtrace_aggid_t)) 10186 frec->dtrd_alignment = sizeof (dtrace_aggid_t); 10187 10188 for (act = agg->dtag_first; act != NULL; act = act->dta_next) { 10189 ASSERT(!act->dta_intuple); 10190 act->dta_intuple = 1; 10191 } 10192 10193 return (&agg->dtag_action); 10194 } 10195 10196 static void 10197 dtrace_ecb_aggregation_destroy(dtrace_ecb_t *ecb, dtrace_action_t *act) 10198 { 10199 dtrace_aggregation_t *agg = (dtrace_aggregation_t *)act; 10200 dtrace_state_t *state = ecb->dte_state; 10201 dtrace_aggid_t aggid = agg->dtag_id; 10202 10203 ASSERT(DTRACEACT_ISAGG(act->dta_kind)); 10204 #if defined(sun) 10205 vmem_free(state->dts_aggid_arena, (void *)(uintptr_t)aggid, 1); 10206 #else 10207 free_unr(state->dts_aggid_arena, aggid); 10208 #endif 10209 10210 ASSERT(state->dts_aggregations[aggid - 1] == agg); 10211 state->dts_aggregations[aggid - 1] = NULL; 10212 10213 kmem_free(agg, sizeof (dtrace_aggregation_t)); 10214 } 10215 10216 static int 10217 dtrace_ecb_action_add(dtrace_ecb_t *ecb, dtrace_actdesc_t *desc) 10218 { 10219 dtrace_action_t *action, *last; 10220 dtrace_difo_t *dp = desc->dtad_difo; 10221 uint32_t size = 0, align = sizeof (uint8_t), mask; 10222 uint16_t format = 0; 10223 dtrace_recdesc_t *rec; 10224 dtrace_state_t *state = ecb->dte_state; 10225 dtrace_optval_t *opt = state->dts_options, nframes = 0, strsize; 10226 uint64_t arg = desc->dtad_arg; 10227 10228 ASSERT(MUTEX_HELD(&dtrace_lock)); 10229 ASSERT(ecb->dte_action == NULL || ecb->dte_action->dta_refcnt == 1); 10230 10231 if (DTRACEACT_ISAGG(desc->dtad_kind)) { 10232 /* 10233 * If this is an aggregating action, there must be neither 10234 * a speculate nor a commit on the action chain. 10235 */ 10236 dtrace_action_t *act; 10237 10238 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 10239 if (act->dta_kind == DTRACEACT_COMMIT) 10240 return (EINVAL); 10241 10242 if (act->dta_kind == DTRACEACT_SPECULATE) 10243 return (EINVAL); 10244 } 10245 10246 action = dtrace_ecb_aggregation_create(ecb, desc); 10247 10248 if (action == NULL) 10249 return (EINVAL); 10250 } else { 10251 if (DTRACEACT_ISDESTRUCTIVE(desc->dtad_kind) || 10252 (desc->dtad_kind == DTRACEACT_DIFEXPR && 10253 dp != NULL && dp->dtdo_destructive)) { 10254 state->dts_destructive = 1; 10255 } 10256 10257 switch (desc->dtad_kind) { 10258 case DTRACEACT_PRINTF: 10259 case DTRACEACT_PRINTA: 10260 case DTRACEACT_SYSTEM: 10261 case DTRACEACT_FREOPEN: 10262 case DTRACEACT_DIFEXPR: 10263 /* 10264 * We know that our arg is a string -- turn it into a 10265 * format. 10266 */ 10267 if (arg == 0) { 10268 ASSERT(desc->dtad_kind == DTRACEACT_PRINTA || 10269 desc->dtad_kind == DTRACEACT_DIFEXPR); 10270 format = 0; 10271 } else { 10272 ASSERT(arg != 0); 10273 #if defined(sun) 10274 ASSERT(arg > KERNELBASE); 10275 #endif 10276 format = dtrace_format_add(state, 10277 (char *)(uintptr_t)arg); 10278 } 10279 10280 /*FALLTHROUGH*/ 10281 case DTRACEACT_LIBACT: 10282 case DTRACEACT_TRACEMEM: 10283 case DTRACEACT_TRACEMEM_DYNSIZE: 10284 if (dp == NULL) 10285 return (EINVAL); 10286 10287 if ((size = dp->dtdo_rtype.dtdt_size) != 0) 10288 break; 10289 10290 if (dp->dtdo_rtype.dtdt_kind == DIF_TYPE_STRING) { 10291 if (!(dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10292 return (EINVAL); 10293 10294 size = opt[DTRACEOPT_STRSIZE]; 10295 } 10296 10297 break; 10298 10299 case DTRACEACT_STACK: 10300 if ((nframes = arg) == 0) { 10301 nframes = opt[DTRACEOPT_STACKFRAMES]; 10302 ASSERT(nframes > 0); 10303 arg = nframes; 10304 } 10305 10306 size = nframes * sizeof (pc_t); 10307 break; 10308 10309 case DTRACEACT_JSTACK: 10310 if ((strsize = DTRACE_USTACK_STRSIZE(arg)) == 0) 10311 strsize = opt[DTRACEOPT_JSTACKSTRSIZE]; 10312 10313 if ((nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) 10314 nframes = opt[DTRACEOPT_JSTACKFRAMES]; 10315 10316 arg = DTRACE_USTACK_ARG(nframes, strsize); 10317 10318 /*FALLTHROUGH*/ 10319 case DTRACEACT_USTACK: 10320 if (desc->dtad_kind != DTRACEACT_JSTACK && 10321 (nframes = DTRACE_USTACK_NFRAMES(arg)) == 0) { 10322 strsize = DTRACE_USTACK_STRSIZE(arg); 10323 nframes = opt[DTRACEOPT_USTACKFRAMES]; 10324 ASSERT(nframes > 0); 10325 arg = DTRACE_USTACK_ARG(nframes, strsize); 10326 } 10327 10328 /* 10329 * Save a slot for the pid. 10330 */ 10331 size = (nframes + 1) * sizeof (uint64_t); 10332 size += DTRACE_USTACK_STRSIZE(arg); 10333 size = P2ROUNDUP(size, (uint32_t)(sizeof (uintptr_t))); 10334 10335 break; 10336 10337 case DTRACEACT_SYM: 10338 case DTRACEACT_MOD: 10339 if (dp == NULL || ((size = dp->dtdo_rtype.dtdt_size) != 10340 sizeof (uint64_t)) || 10341 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10342 return (EINVAL); 10343 break; 10344 10345 case DTRACEACT_USYM: 10346 case DTRACEACT_UMOD: 10347 case DTRACEACT_UADDR: 10348 if (dp == NULL || 10349 (dp->dtdo_rtype.dtdt_size != sizeof (uint64_t)) || 10350 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10351 return (EINVAL); 10352 10353 /* 10354 * We have a slot for the pid, plus a slot for the 10355 * argument. To keep things simple (aligned with 10356 * bitness-neutral sizing), we store each as a 64-bit 10357 * quantity. 10358 */ 10359 size = 2 * sizeof (uint64_t); 10360 break; 10361 10362 case DTRACEACT_STOP: 10363 case DTRACEACT_BREAKPOINT: 10364 case DTRACEACT_PANIC: 10365 break; 10366 10367 case DTRACEACT_CHILL: 10368 case DTRACEACT_DISCARD: 10369 case DTRACEACT_RAISE: 10370 if (dp == NULL) 10371 return (EINVAL); 10372 break; 10373 10374 case DTRACEACT_EXIT: 10375 if (dp == NULL || 10376 (size = dp->dtdo_rtype.dtdt_size) != sizeof (int) || 10377 (dp->dtdo_rtype.dtdt_flags & DIF_TF_BYREF)) 10378 return (EINVAL); 10379 break; 10380 10381 case DTRACEACT_SPECULATE: 10382 if (ecb->dte_size > sizeof (dtrace_rechdr_t)) 10383 return (EINVAL); 10384 10385 if (dp == NULL) 10386 return (EINVAL); 10387 10388 state->dts_speculates = 1; 10389 break; 10390 10391 case DTRACEACT_PRINTM: 10392 size = dp->dtdo_rtype.dtdt_size; 10393 break; 10394 10395 case DTRACEACT_PRINTT: 10396 size = dp->dtdo_rtype.dtdt_size; 10397 break; 10398 10399 case DTRACEACT_COMMIT: { 10400 dtrace_action_t *act = ecb->dte_action; 10401 10402 for (; act != NULL; act = act->dta_next) { 10403 if (act->dta_kind == DTRACEACT_COMMIT) 10404 return (EINVAL); 10405 } 10406 10407 if (dp == NULL) 10408 return (EINVAL); 10409 break; 10410 } 10411 10412 default: 10413 return (EINVAL); 10414 } 10415 10416 if (size != 0 || desc->dtad_kind == DTRACEACT_SPECULATE) { 10417 /* 10418 * If this is a data-storing action or a speculate, 10419 * we must be sure that there isn't a commit on the 10420 * action chain. 10421 */ 10422 dtrace_action_t *act = ecb->dte_action; 10423 10424 for (; act != NULL; act = act->dta_next) { 10425 if (act->dta_kind == DTRACEACT_COMMIT) 10426 return (EINVAL); 10427 } 10428 } 10429 10430 action = kmem_zalloc(sizeof (dtrace_action_t), KM_SLEEP); 10431 action->dta_rec.dtrd_size = size; 10432 } 10433 10434 action->dta_refcnt = 1; 10435 rec = &action->dta_rec; 10436 size = rec->dtrd_size; 10437 10438 for (mask = sizeof (uint64_t) - 1; size != 0 && mask > 0; mask >>= 1) { 10439 if (!(size & mask)) { 10440 align = mask + 1; 10441 break; 10442 } 10443 } 10444 10445 action->dta_kind = desc->dtad_kind; 10446 10447 if ((action->dta_difo = dp) != NULL) 10448 dtrace_difo_hold(dp); 10449 10450 rec->dtrd_action = action->dta_kind; 10451 rec->dtrd_arg = arg; 10452 rec->dtrd_uarg = desc->dtad_uarg; 10453 rec->dtrd_alignment = (uint16_t)align; 10454 rec->dtrd_format = format; 10455 10456 if ((last = ecb->dte_action_last) != NULL) { 10457 ASSERT(ecb->dte_action != NULL); 10458 action->dta_prev = last; 10459 last->dta_next = action; 10460 } else { 10461 ASSERT(ecb->dte_action == NULL); 10462 ecb->dte_action = action; 10463 } 10464 10465 ecb->dte_action_last = action; 10466 10467 return (0); 10468 } 10469 10470 static void 10471 dtrace_ecb_action_remove(dtrace_ecb_t *ecb) 10472 { 10473 dtrace_action_t *act = ecb->dte_action, *next; 10474 dtrace_vstate_t *vstate = &ecb->dte_state->dts_vstate; 10475 dtrace_difo_t *dp; 10476 uint16_t format; 10477 10478 if (act != NULL && act->dta_refcnt > 1) { 10479 ASSERT(act->dta_next == NULL || act->dta_next->dta_refcnt == 1); 10480 act->dta_refcnt--; 10481 } else { 10482 for (; act != NULL; act = next) { 10483 next = act->dta_next; 10484 ASSERT(next != NULL || act == ecb->dte_action_last); 10485 ASSERT(act->dta_refcnt == 1); 10486 10487 if ((format = act->dta_rec.dtrd_format) != 0) 10488 dtrace_format_remove(ecb->dte_state, format); 10489 10490 if ((dp = act->dta_difo) != NULL) 10491 dtrace_difo_release(dp, vstate); 10492 10493 if (DTRACEACT_ISAGG(act->dta_kind)) { 10494 dtrace_ecb_aggregation_destroy(ecb, act); 10495 } else { 10496 kmem_free(act, sizeof (dtrace_action_t)); 10497 } 10498 } 10499 } 10500 10501 ecb->dte_action = NULL; 10502 ecb->dte_action_last = NULL; 10503 ecb->dte_size = 0; 10504 } 10505 10506 static void 10507 dtrace_ecb_disable(dtrace_ecb_t *ecb) 10508 { 10509 /* 10510 * We disable the ECB by removing it from its probe. 10511 */ 10512 dtrace_ecb_t *pecb, *prev = NULL; 10513 dtrace_probe_t *probe = ecb->dte_probe; 10514 10515 ASSERT(MUTEX_HELD(&dtrace_lock)); 10516 10517 if (probe == NULL) { 10518 /* 10519 * This is the NULL probe; there is nothing to disable. 10520 */ 10521 return; 10522 } 10523 10524 for (pecb = probe->dtpr_ecb; pecb != NULL; pecb = pecb->dte_next) { 10525 if (pecb == ecb) 10526 break; 10527 prev = pecb; 10528 } 10529 10530 ASSERT(pecb != NULL); 10531 10532 if (prev == NULL) { 10533 probe->dtpr_ecb = ecb->dte_next; 10534 } else { 10535 prev->dte_next = ecb->dte_next; 10536 } 10537 10538 if (ecb == probe->dtpr_ecb_last) { 10539 ASSERT(ecb->dte_next == NULL); 10540 probe->dtpr_ecb_last = prev; 10541 } 10542 10543 /* 10544 * The ECB has been disconnected from the probe; now sync to assure 10545 * that all CPUs have seen the change before returning. 10546 */ 10547 dtrace_sync(); 10548 10549 if (probe->dtpr_ecb == NULL) { 10550 /* 10551 * That was the last ECB on the probe; clear the predicate 10552 * cache ID for the probe, disable it and sync one more time 10553 * to assure that we'll never hit it again. 10554 */ 10555 dtrace_provider_t *prov = probe->dtpr_provider; 10556 10557 ASSERT(ecb->dte_next == NULL); 10558 ASSERT(probe->dtpr_ecb_last == NULL); 10559 probe->dtpr_predcache = DTRACE_CACHEIDNONE; 10560 prov->dtpv_pops.dtps_disable(prov->dtpv_arg, 10561 probe->dtpr_id, probe->dtpr_arg); 10562 dtrace_sync(); 10563 } else { 10564 /* 10565 * There is at least one ECB remaining on the probe. If there 10566 * is _exactly_ one, set the probe's predicate cache ID to be 10567 * the predicate cache ID of the remaining ECB. 10568 */ 10569 ASSERT(probe->dtpr_ecb_last != NULL); 10570 ASSERT(probe->dtpr_predcache == DTRACE_CACHEIDNONE); 10571 10572 if (probe->dtpr_ecb == probe->dtpr_ecb_last) { 10573 dtrace_predicate_t *p = probe->dtpr_ecb->dte_predicate; 10574 10575 ASSERT(probe->dtpr_ecb->dte_next == NULL); 10576 10577 if (p != NULL) 10578 probe->dtpr_predcache = p->dtp_cacheid; 10579 } 10580 10581 ecb->dte_next = NULL; 10582 } 10583 } 10584 10585 static void 10586 dtrace_ecb_destroy(dtrace_ecb_t *ecb) 10587 { 10588 dtrace_state_t *state = ecb->dte_state; 10589 dtrace_vstate_t *vstate = &state->dts_vstate; 10590 dtrace_predicate_t *pred; 10591 dtrace_epid_t epid = ecb->dte_epid; 10592 10593 ASSERT(MUTEX_HELD(&dtrace_lock)); 10594 ASSERT(ecb->dte_next == NULL); 10595 ASSERT(ecb->dte_probe == NULL || ecb->dte_probe->dtpr_ecb != ecb); 10596 10597 if ((pred = ecb->dte_predicate) != NULL) 10598 dtrace_predicate_release(pred, vstate); 10599 10600 dtrace_ecb_action_remove(ecb); 10601 10602 ASSERT(state->dts_ecbs[epid - 1] == ecb); 10603 state->dts_ecbs[epid - 1] = NULL; 10604 10605 kmem_free(ecb, sizeof (dtrace_ecb_t)); 10606 } 10607 10608 static dtrace_ecb_t * 10609 dtrace_ecb_create(dtrace_state_t *state, dtrace_probe_t *probe, 10610 dtrace_enabling_t *enab) 10611 { 10612 dtrace_ecb_t *ecb; 10613 dtrace_predicate_t *pred; 10614 dtrace_actdesc_t *act; 10615 dtrace_provider_t *prov; 10616 dtrace_ecbdesc_t *desc = enab->dten_current; 10617 10618 ASSERT(MUTEX_HELD(&dtrace_lock)); 10619 ASSERT(state != NULL); 10620 10621 ecb = dtrace_ecb_add(state, probe); 10622 ecb->dte_uarg = desc->dted_uarg; 10623 10624 if ((pred = desc->dted_pred.dtpdd_predicate) != NULL) { 10625 dtrace_predicate_hold(pred); 10626 ecb->dte_predicate = pred; 10627 } 10628 10629 if (probe != NULL) { 10630 /* 10631 * If the provider shows more leg than the consumer is old 10632 * enough to see, we need to enable the appropriate implicit 10633 * predicate bits to prevent the ecb from activating at 10634 * revealing times. 10635 * 10636 * Providers specifying DTRACE_PRIV_USER at register time 10637 * are stating that they need the /proc-style privilege 10638 * model to be enforced, and this is what DTRACE_COND_OWNER 10639 * and DTRACE_COND_ZONEOWNER will then do at probe time. 10640 */ 10641 prov = probe->dtpr_provider; 10642 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLPROC) && 10643 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10644 ecb->dte_cond |= DTRACE_COND_OWNER; 10645 10646 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_ALLZONE) && 10647 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_USER)) 10648 ecb->dte_cond |= DTRACE_COND_ZONEOWNER; 10649 10650 /* 10651 * If the provider shows us kernel innards and the user 10652 * is lacking sufficient privilege, enable the 10653 * DTRACE_COND_USERMODE implicit predicate. 10654 */ 10655 if (!(state->dts_cred.dcr_visible & DTRACE_CRV_KERNEL) && 10656 (prov->dtpv_priv.dtpp_flags & DTRACE_PRIV_KERNEL)) 10657 ecb->dte_cond |= DTRACE_COND_USERMODE; 10658 } 10659 10660 if (dtrace_ecb_create_cache != NULL) { 10661 /* 10662 * If we have a cached ecb, we'll use its action list instead 10663 * of creating our own (saving both time and space). 10664 */ 10665 dtrace_ecb_t *cached = dtrace_ecb_create_cache; 10666 dtrace_action_t *act = cached->dte_action; 10667 10668 if (act != NULL) { 10669 ASSERT(act->dta_refcnt > 0); 10670 act->dta_refcnt++; 10671 ecb->dte_action = act; 10672 ecb->dte_action_last = cached->dte_action_last; 10673 ecb->dte_needed = cached->dte_needed; 10674 ecb->dte_size = cached->dte_size; 10675 ecb->dte_alignment = cached->dte_alignment; 10676 } 10677 10678 return (ecb); 10679 } 10680 10681 for (act = desc->dted_action; act != NULL; act = act->dtad_next) { 10682 if ((enab->dten_error = dtrace_ecb_action_add(ecb, act)) != 0) { 10683 dtrace_ecb_destroy(ecb); 10684 return (NULL); 10685 } 10686 } 10687 10688 dtrace_ecb_resize(ecb); 10689 10690 return (dtrace_ecb_create_cache = ecb); 10691 } 10692 10693 static int 10694 dtrace_ecb_create_enable(dtrace_probe_t *probe, void *arg) 10695 { 10696 dtrace_ecb_t *ecb; 10697 dtrace_enabling_t *enab = arg; 10698 dtrace_state_t *state = enab->dten_vstate->dtvs_state; 10699 10700 ASSERT(state != NULL); 10701 10702 if (probe != NULL && probe->dtpr_gen < enab->dten_probegen) { 10703 /* 10704 * This probe was created in a generation for which this 10705 * enabling has previously created ECBs; we don't want to 10706 * enable it again, so just kick out. 10707 */ 10708 return (DTRACE_MATCH_NEXT); 10709 } 10710 10711 if ((ecb = dtrace_ecb_create(state, probe, enab)) == NULL) 10712 return (DTRACE_MATCH_DONE); 10713 10714 dtrace_ecb_enable(ecb); 10715 return (DTRACE_MATCH_NEXT); 10716 } 10717 10718 static dtrace_ecb_t * 10719 dtrace_epid2ecb(dtrace_state_t *state, dtrace_epid_t id) 10720 { 10721 dtrace_ecb_t *ecb; 10722 10723 ASSERT(MUTEX_HELD(&dtrace_lock)); 10724 10725 if (id == 0 || id > state->dts_necbs) 10726 return (NULL); 10727 10728 ASSERT(state->dts_necbs > 0 && state->dts_ecbs != NULL); 10729 ASSERT((ecb = state->dts_ecbs[id - 1]) == NULL || ecb->dte_epid == id); 10730 10731 return (state->dts_ecbs[id - 1]); 10732 } 10733 10734 static dtrace_aggregation_t * 10735 dtrace_aggid2agg(dtrace_state_t *state, dtrace_aggid_t id) 10736 { 10737 dtrace_aggregation_t *agg; 10738 10739 ASSERT(MUTEX_HELD(&dtrace_lock)); 10740 10741 if (id == 0 || id > state->dts_naggregations) 10742 return (NULL); 10743 10744 ASSERT(state->dts_naggregations > 0 && state->dts_aggregations != NULL); 10745 ASSERT((agg = state->dts_aggregations[id - 1]) == NULL || 10746 agg->dtag_id == id); 10747 10748 return (state->dts_aggregations[id - 1]); 10749 } 10750 10751 /* 10752 * DTrace Buffer Functions 10753 * 10754 * The following functions manipulate DTrace buffers. Most of these functions 10755 * are called in the context of establishing or processing consumer state; 10756 * exceptions are explicitly noted. 10757 */ 10758 10759 /* 10760 * Note: called from cross call context. This function switches the two 10761 * buffers on a given CPU. The atomicity of this operation is assured by 10762 * disabling interrupts while the actual switch takes place; the disabling of 10763 * interrupts serializes the execution with any execution of dtrace_probe() on 10764 * the same CPU. 10765 */ 10766 static void 10767 dtrace_buffer_switch(dtrace_buffer_t *buf) 10768 { 10769 caddr_t tomax = buf->dtb_tomax; 10770 caddr_t xamot = buf->dtb_xamot; 10771 dtrace_icookie_t cookie; 10772 hrtime_t now; 10773 10774 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 10775 ASSERT(!(buf->dtb_flags & DTRACEBUF_RING)); 10776 10777 cookie = dtrace_interrupt_disable(); 10778 now = dtrace_gethrtime(); 10779 buf->dtb_tomax = xamot; 10780 buf->dtb_xamot = tomax; 10781 buf->dtb_xamot_drops = buf->dtb_drops; 10782 buf->dtb_xamot_offset = buf->dtb_offset; 10783 buf->dtb_xamot_errors = buf->dtb_errors; 10784 buf->dtb_xamot_flags = buf->dtb_flags; 10785 buf->dtb_offset = 0; 10786 buf->dtb_drops = 0; 10787 buf->dtb_errors = 0; 10788 buf->dtb_flags &= ~(DTRACEBUF_ERROR | DTRACEBUF_DROPPED); 10789 buf->dtb_interval = now - buf->dtb_switched; 10790 buf->dtb_switched = now; 10791 dtrace_interrupt_enable(cookie); 10792 } 10793 10794 /* 10795 * Note: called from cross call context. This function activates a buffer 10796 * on a CPU. As with dtrace_buffer_switch(), the atomicity of the operation 10797 * is guaranteed by the disabling of interrupts. 10798 */ 10799 static void 10800 dtrace_buffer_activate(dtrace_state_t *state) 10801 { 10802 dtrace_buffer_t *buf; 10803 dtrace_icookie_t cookie = dtrace_interrupt_disable(); 10804 10805 buf = &state->dts_buffer[curcpu]; 10806 10807 if (buf->dtb_tomax != NULL) { 10808 /* 10809 * We might like to assert that the buffer is marked inactive, 10810 * but this isn't necessarily true: the buffer for the CPU 10811 * that processes the BEGIN probe has its buffer activated 10812 * manually. In this case, we take the (harmless) action 10813 * re-clearing the bit INACTIVE bit. 10814 */ 10815 buf->dtb_flags &= ~DTRACEBUF_INACTIVE; 10816 } 10817 10818 dtrace_interrupt_enable(cookie); 10819 } 10820 10821 static int 10822 dtrace_buffer_alloc(dtrace_buffer_t *bufs, size_t size, int flags, 10823 processorid_t cpu) 10824 { 10825 #if defined(sun) 10826 cpu_t *cp; 10827 #endif 10828 dtrace_buffer_t *buf; 10829 10830 #if defined(sun) 10831 ASSERT(MUTEX_HELD(&cpu_lock)); 10832 ASSERT(MUTEX_HELD(&dtrace_lock)); 10833 10834 if (size > dtrace_nonroot_maxsize && 10835 !PRIV_POLICY_CHOICE(CRED(), PRIV_ALL, B_FALSE)) 10836 return (EFBIG); 10837 10838 cp = cpu_list; 10839 10840 do { 10841 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10842 continue; 10843 10844 buf = &bufs[cp->cpu_id]; 10845 10846 /* 10847 * If there is already a buffer allocated for this CPU, it 10848 * is only possible that this is a DR event. In this case, 10849 */ 10850 if (buf->dtb_tomax != NULL) { 10851 ASSERT(buf->dtb_size == size); 10852 continue; 10853 } 10854 10855 ASSERT(buf->dtb_xamot == NULL); 10856 10857 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10858 goto err; 10859 10860 buf->dtb_size = size; 10861 buf->dtb_flags = flags; 10862 buf->dtb_offset = 0; 10863 buf->dtb_drops = 0; 10864 10865 if (flags & DTRACEBUF_NOSWITCH) 10866 continue; 10867 10868 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10869 goto err; 10870 } while ((cp = cp->cpu_next) != cpu_list); 10871 10872 return (0); 10873 10874 err: 10875 cp = cpu_list; 10876 10877 do { 10878 if (cpu != DTRACE_CPUALL && cpu != cp->cpu_id) 10879 continue; 10880 10881 buf = &bufs[cp->cpu_id]; 10882 10883 if (buf->dtb_xamot != NULL) { 10884 ASSERT(buf->dtb_tomax != NULL); 10885 ASSERT(buf->dtb_size == size); 10886 kmem_free(buf->dtb_xamot, size); 10887 } 10888 10889 if (buf->dtb_tomax != NULL) { 10890 ASSERT(buf->dtb_size == size); 10891 kmem_free(buf->dtb_tomax, size); 10892 } 10893 10894 buf->dtb_tomax = NULL; 10895 buf->dtb_xamot = NULL; 10896 buf->dtb_size = 0; 10897 } while ((cp = cp->cpu_next) != cpu_list); 10898 10899 return (ENOMEM); 10900 #else 10901 int i; 10902 10903 #if defined(__amd64__) || defined(__mips__) || defined(__powerpc__) 10904 /* 10905 * FreeBSD isn't good at limiting the amount of memory we 10906 * ask to malloc, so let's place a limit here before trying 10907 * to do something that might well end in tears at bedtime. 10908 */ 10909 if (size > physmem * PAGE_SIZE / (128 * (mp_maxid + 1))) 10910 return(ENOMEM); 10911 #endif 10912 10913 ASSERT(MUTEX_HELD(&dtrace_lock)); 10914 CPU_FOREACH(i) { 10915 if (cpu != DTRACE_CPUALL && cpu != i) 10916 continue; 10917 10918 buf = &bufs[i]; 10919 10920 /* 10921 * If there is already a buffer allocated for this CPU, it 10922 * is only possible that this is a DR event. In this case, 10923 * the buffer size must match our specified size. 10924 */ 10925 if (buf->dtb_tomax != NULL) { 10926 ASSERT(buf->dtb_size == size); 10927 continue; 10928 } 10929 10930 ASSERT(buf->dtb_xamot == NULL); 10931 10932 if ((buf->dtb_tomax = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10933 goto err; 10934 10935 buf->dtb_size = size; 10936 buf->dtb_flags = flags; 10937 buf->dtb_offset = 0; 10938 buf->dtb_drops = 0; 10939 10940 if (flags & DTRACEBUF_NOSWITCH) 10941 continue; 10942 10943 if ((buf->dtb_xamot = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 10944 goto err; 10945 } 10946 10947 return (0); 10948 10949 err: 10950 /* 10951 * Error allocating memory, so free the buffers that were 10952 * allocated before the failed allocation. 10953 */ 10954 CPU_FOREACH(i) { 10955 if (cpu != DTRACE_CPUALL && cpu != i) 10956 continue; 10957 10958 buf = &bufs[i]; 10959 10960 if (buf->dtb_xamot != NULL) { 10961 ASSERT(buf->dtb_tomax != NULL); 10962 ASSERT(buf->dtb_size == size); 10963 kmem_free(buf->dtb_xamot, size); 10964 } 10965 10966 if (buf->dtb_tomax != NULL) { 10967 ASSERT(buf->dtb_size == size); 10968 kmem_free(buf->dtb_tomax, size); 10969 } 10970 10971 buf->dtb_tomax = NULL; 10972 buf->dtb_xamot = NULL; 10973 buf->dtb_size = 0; 10974 10975 } 10976 10977 return (ENOMEM); 10978 #endif 10979 } 10980 10981 /* 10982 * Note: called from probe context. This function just increments the drop 10983 * count on a buffer. It has been made a function to allow for the 10984 * possibility of understanding the source of mysterious drop counts. (A 10985 * problem for which one may be particularly disappointed that DTrace cannot 10986 * be used to understand DTrace.) 10987 */ 10988 static void 10989 dtrace_buffer_drop(dtrace_buffer_t *buf) 10990 { 10991 buf->dtb_drops++; 10992 } 10993 10994 /* 10995 * Note: called from probe context. This function is called to reserve space 10996 * in a buffer. If mstate is non-NULL, sets the scratch base and size in the 10997 * mstate. Returns the new offset in the buffer, or a negative value if an 10998 * error has occurred. 10999 */ 11000 static intptr_t 11001 dtrace_buffer_reserve(dtrace_buffer_t *buf, size_t needed, size_t align, 11002 dtrace_state_t *state, dtrace_mstate_t *mstate) 11003 { 11004 intptr_t offs = buf->dtb_offset, soffs; 11005 intptr_t woffs; 11006 caddr_t tomax; 11007 size_t total; 11008 11009 if (buf->dtb_flags & DTRACEBUF_INACTIVE) 11010 return (-1); 11011 11012 if ((tomax = buf->dtb_tomax) == NULL) { 11013 dtrace_buffer_drop(buf); 11014 return (-1); 11015 } 11016 11017 if (!(buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL))) { 11018 while (offs & (align - 1)) { 11019 /* 11020 * Assert that our alignment is off by a number which 11021 * is itself sizeof (uint32_t) aligned. 11022 */ 11023 ASSERT(!((align - (offs & (align - 1))) & 11024 (sizeof (uint32_t) - 1))); 11025 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11026 offs += sizeof (uint32_t); 11027 } 11028 11029 if ((soffs = offs + needed) > buf->dtb_size) { 11030 dtrace_buffer_drop(buf); 11031 return (-1); 11032 } 11033 11034 if (mstate == NULL) 11035 return (offs); 11036 11037 mstate->dtms_scratch_base = (uintptr_t)tomax + soffs; 11038 mstate->dtms_scratch_size = buf->dtb_size - soffs; 11039 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11040 11041 return (offs); 11042 } 11043 11044 if (buf->dtb_flags & DTRACEBUF_FILL) { 11045 if (state->dts_activity != DTRACE_ACTIVITY_COOLDOWN && 11046 (buf->dtb_flags & DTRACEBUF_FULL)) 11047 return (-1); 11048 goto out; 11049 } 11050 11051 total = needed + (offs & (align - 1)); 11052 11053 /* 11054 * For a ring buffer, life is quite a bit more complicated. Before 11055 * we can store any padding, we need to adjust our wrapping offset. 11056 * (If we've never before wrapped or we're not about to, no adjustment 11057 * is required.) 11058 */ 11059 if ((buf->dtb_flags & DTRACEBUF_WRAPPED) || 11060 offs + total > buf->dtb_size) { 11061 woffs = buf->dtb_xamot_offset; 11062 11063 if (offs + total > buf->dtb_size) { 11064 /* 11065 * We can't fit in the end of the buffer. First, a 11066 * sanity check that we can fit in the buffer at all. 11067 */ 11068 if (total > buf->dtb_size) { 11069 dtrace_buffer_drop(buf); 11070 return (-1); 11071 } 11072 11073 /* 11074 * We're going to be storing at the top of the buffer, 11075 * so now we need to deal with the wrapped offset. We 11076 * only reset our wrapped offset to 0 if it is 11077 * currently greater than the current offset. If it 11078 * is less than the current offset, it is because a 11079 * previous allocation induced a wrap -- but the 11080 * allocation didn't subsequently take the space due 11081 * to an error or false predicate evaluation. In this 11082 * case, we'll just leave the wrapped offset alone: if 11083 * the wrapped offset hasn't been advanced far enough 11084 * for this allocation, it will be adjusted in the 11085 * lower loop. 11086 */ 11087 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 11088 if (woffs >= offs) 11089 woffs = 0; 11090 } else { 11091 woffs = 0; 11092 } 11093 11094 /* 11095 * Now we know that we're going to be storing to the 11096 * top of the buffer and that there is room for us 11097 * there. We need to clear the buffer from the current 11098 * offset to the end (there may be old gunk there). 11099 */ 11100 while (offs < buf->dtb_size) 11101 tomax[offs++] = 0; 11102 11103 /* 11104 * We need to set our offset to zero. And because we 11105 * are wrapping, we need to set the bit indicating as 11106 * much. We can also adjust our needed space back 11107 * down to the space required by the ECB -- we know 11108 * that the top of the buffer is aligned. 11109 */ 11110 offs = 0; 11111 total = needed; 11112 buf->dtb_flags |= DTRACEBUF_WRAPPED; 11113 } else { 11114 /* 11115 * There is room for us in the buffer, so we simply 11116 * need to check the wrapped offset. 11117 */ 11118 if (woffs < offs) { 11119 /* 11120 * The wrapped offset is less than the offset. 11121 * This can happen if we allocated buffer space 11122 * that induced a wrap, but then we didn't 11123 * subsequently take the space due to an error 11124 * or false predicate evaluation. This is 11125 * okay; we know that _this_ allocation isn't 11126 * going to induce a wrap. We still can't 11127 * reset the wrapped offset to be zero, 11128 * however: the space may have been trashed in 11129 * the previous failed probe attempt. But at 11130 * least the wrapped offset doesn't need to 11131 * be adjusted at all... 11132 */ 11133 goto out; 11134 } 11135 } 11136 11137 while (offs + total > woffs) { 11138 dtrace_epid_t epid = *(uint32_t *)(tomax + woffs); 11139 size_t size; 11140 11141 if (epid == DTRACE_EPIDNONE) { 11142 size = sizeof (uint32_t); 11143 } else { 11144 ASSERT3U(epid, <=, state->dts_necbs); 11145 ASSERT(state->dts_ecbs[epid - 1] != NULL); 11146 11147 size = state->dts_ecbs[epid - 1]->dte_size; 11148 } 11149 11150 ASSERT(woffs + size <= buf->dtb_size); 11151 ASSERT(size != 0); 11152 11153 if (woffs + size == buf->dtb_size) { 11154 /* 11155 * We've reached the end of the buffer; we want 11156 * to set the wrapped offset to 0 and break 11157 * out. However, if the offs is 0, then we're 11158 * in a strange edge-condition: the amount of 11159 * space that we want to reserve plus the size 11160 * of the record that we're overwriting is 11161 * greater than the size of the buffer. This 11162 * is problematic because if we reserve the 11163 * space but subsequently don't consume it (due 11164 * to a failed predicate or error) the wrapped 11165 * offset will be 0 -- yet the EPID at offset 0 11166 * will not be committed. This situation is 11167 * relatively easy to deal with: if we're in 11168 * this case, the buffer is indistinguishable 11169 * from one that hasn't wrapped; we need only 11170 * finish the job by clearing the wrapped bit, 11171 * explicitly setting the offset to be 0, and 11172 * zero'ing out the old data in the buffer. 11173 */ 11174 if (offs == 0) { 11175 buf->dtb_flags &= ~DTRACEBUF_WRAPPED; 11176 buf->dtb_offset = 0; 11177 woffs = total; 11178 11179 while (woffs < buf->dtb_size) 11180 tomax[woffs++] = 0; 11181 } 11182 11183 woffs = 0; 11184 break; 11185 } 11186 11187 woffs += size; 11188 } 11189 11190 /* 11191 * We have a wrapped offset. It may be that the wrapped offset 11192 * has become zero -- that's okay. 11193 */ 11194 buf->dtb_xamot_offset = woffs; 11195 } 11196 11197 out: 11198 /* 11199 * Now we can plow the buffer with any necessary padding. 11200 */ 11201 while (offs & (align - 1)) { 11202 /* 11203 * Assert that our alignment is off by a number which 11204 * is itself sizeof (uint32_t) aligned. 11205 */ 11206 ASSERT(!((align - (offs & (align - 1))) & 11207 (sizeof (uint32_t) - 1))); 11208 DTRACE_STORE(uint32_t, tomax, offs, DTRACE_EPIDNONE); 11209 offs += sizeof (uint32_t); 11210 } 11211 11212 if (buf->dtb_flags & DTRACEBUF_FILL) { 11213 if (offs + needed > buf->dtb_size - state->dts_reserve) { 11214 buf->dtb_flags |= DTRACEBUF_FULL; 11215 return (-1); 11216 } 11217 } 11218 11219 if (mstate == NULL) 11220 return (offs); 11221 11222 /* 11223 * For ring buffers and fill buffers, the scratch space is always 11224 * the inactive buffer. 11225 */ 11226 mstate->dtms_scratch_base = (uintptr_t)buf->dtb_xamot; 11227 mstate->dtms_scratch_size = buf->dtb_size; 11228 mstate->dtms_scratch_ptr = mstate->dtms_scratch_base; 11229 11230 return (offs); 11231 } 11232 11233 static void 11234 dtrace_buffer_polish(dtrace_buffer_t *buf) 11235 { 11236 ASSERT(buf->dtb_flags & DTRACEBUF_RING); 11237 ASSERT(MUTEX_HELD(&dtrace_lock)); 11238 11239 if (!(buf->dtb_flags & DTRACEBUF_WRAPPED)) 11240 return; 11241 11242 /* 11243 * We need to polish the ring buffer. There are three cases: 11244 * 11245 * - The first (and presumably most common) is that there is no gap 11246 * between the buffer offset and the wrapped offset. In this case, 11247 * there is nothing in the buffer that isn't valid data; we can 11248 * mark the buffer as polished and return. 11249 * 11250 * - The second (less common than the first but still more common 11251 * than the third) is that there is a gap between the buffer offset 11252 * and the wrapped offset, and the wrapped offset is larger than the 11253 * buffer offset. This can happen because of an alignment issue, or 11254 * can happen because of a call to dtrace_buffer_reserve() that 11255 * didn't subsequently consume the buffer space. In this case, 11256 * we need to zero the data from the buffer offset to the wrapped 11257 * offset. 11258 * 11259 * - The third (and least common) is that there is a gap between the 11260 * buffer offset and the wrapped offset, but the wrapped offset is 11261 * _less_ than the buffer offset. This can only happen because a 11262 * call to dtrace_buffer_reserve() induced a wrap, but the space 11263 * was not subsequently consumed. In this case, we need to zero the 11264 * space from the offset to the end of the buffer _and_ from the 11265 * top of the buffer to the wrapped offset. 11266 */ 11267 if (buf->dtb_offset < buf->dtb_xamot_offset) { 11268 bzero(buf->dtb_tomax + buf->dtb_offset, 11269 buf->dtb_xamot_offset - buf->dtb_offset); 11270 } 11271 11272 if (buf->dtb_offset > buf->dtb_xamot_offset) { 11273 bzero(buf->dtb_tomax + buf->dtb_offset, 11274 buf->dtb_size - buf->dtb_offset); 11275 bzero(buf->dtb_tomax, buf->dtb_xamot_offset); 11276 } 11277 } 11278 11279 /* 11280 * This routine determines if data generated at the specified time has likely 11281 * been entirely consumed at user-level. This routine is called to determine 11282 * if an ECB on a defunct probe (but for an active enabling) can be safely 11283 * disabled and destroyed. 11284 */ 11285 static int 11286 dtrace_buffer_consumed(dtrace_buffer_t *bufs, hrtime_t when) 11287 { 11288 int i; 11289 11290 for (i = 0; i < NCPU; i++) { 11291 dtrace_buffer_t *buf = &bufs[i]; 11292 11293 if (buf->dtb_size == 0) 11294 continue; 11295 11296 if (buf->dtb_flags & DTRACEBUF_RING) 11297 return (0); 11298 11299 if (!buf->dtb_switched && buf->dtb_offset != 0) 11300 return (0); 11301 11302 if (buf->dtb_switched - buf->dtb_interval < when) 11303 return (0); 11304 } 11305 11306 return (1); 11307 } 11308 11309 static void 11310 dtrace_buffer_free(dtrace_buffer_t *bufs) 11311 { 11312 int i; 11313 11314 for (i = 0; i < NCPU; i++) { 11315 dtrace_buffer_t *buf = &bufs[i]; 11316 11317 if (buf->dtb_tomax == NULL) { 11318 ASSERT(buf->dtb_xamot == NULL); 11319 ASSERT(buf->dtb_size == 0); 11320 continue; 11321 } 11322 11323 if (buf->dtb_xamot != NULL) { 11324 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 11325 kmem_free(buf->dtb_xamot, buf->dtb_size); 11326 } 11327 11328 kmem_free(buf->dtb_tomax, buf->dtb_size); 11329 buf->dtb_size = 0; 11330 buf->dtb_tomax = NULL; 11331 buf->dtb_xamot = NULL; 11332 } 11333 } 11334 11335 /* 11336 * DTrace Enabling Functions 11337 */ 11338 static dtrace_enabling_t * 11339 dtrace_enabling_create(dtrace_vstate_t *vstate) 11340 { 11341 dtrace_enabling_t *enab; 11342 11343 enab = kmem_zalloc(sizeof (dtrace_enabling_t), KM_SLEEP); 11344 enab->dten_vstate = vstate; 11345 11346 return (enab); 11347 } 11348 11349 static void 11350 dtrace_enabling_add(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb) 11351 { 11352 dtrace_ecbdesc_t **ndesc; 11353 size_t osize, nsize; 11354 11355 /* 11356 * We can't add to enablings after we've enabled them, or after we've 11357 * retained them. 11358 */ 11359 ASSERT(enab->dten_probegen == 0); 11360 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11361 11362 if (enab->dten_ndesc < enab->dten_maxdesc) { 11363 enab->dten_desc[enab->dten_ndesc++] = ecb; 11364 return; 11365 } 11366 11367 osize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11368 11369 if (enab->dten_maxdesc == 0) { 11370 enab->dten_maxdesc = 1; 11371 } else { 11372 enab->dten_maxdesc <<= 1; 11373 } 11374 11375 ASSERT(enab->dten_ndesc < enab->dten_maxdesc); 11376 11377 nsize = enab->dten_maxdesc * sizeof (dtrace_enabling_t *); 11378 ndesc = kmem_zalloc(nsize, KM_SLEEP); 11379 bcopy(enab->dten_desc, ndesc, osize); 11380 if (enab->dten_desc != NULL) 11381 kmem_free(enab->dten_desc, osize); 11382 11383 enab->dten_desc = ndesc; 11384 enab->dten_desc[enab->dten_ndesc++] = ecb; 11385 } 11386 11387 static void 11388 dtrace_enabling_addlike(dtrace_enabling_t *enab, dtrace_ecbdesc_t *ecb, 11389 dtrace_probedesc_t *pd) 11390 { 11391 dtrace_ecbdesc_t *new; 11392 dtrace_predicate_t *pred; 11393 dtrace_actdesc_t *act; 11394 11395 /* 11396 * We're going to create a new ECB description that matches the 11397 * specified ECB in every way, but has the specified probe description. 11398 */ 11399 new = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 11400 11401 if ((pred = ecb->dted_pred.dtpdd_predicate) != NULL) 11402 dtrace_predicate_hold(pred); 11403 11404 for (act = ecb->dted_action; act != NULL; act = act->dtad_next) 11405 dtrace_actdesc_hold(act); 11406 11407 new->dted_action = ecb->dted_action; 11408 new->dted_pred = ecb->dted_pred; 11409 new->dted_probe = *pd; 11410 new->dted_uarg = ecb->dted_uarg; 11411 11412 dtrace_enabling_add(enab, new); 11413 } 11414 11415 static void 11416 dtrace_enabling_dump(dtrace_enabling_t *enab) 11417 { 11418 int i; 11419 11420 for (i = 0; i < enab->dten_ndesc; i++) { 11421 dtrace_probedesc_t *desc = &enab->dten_desc[i]->dted_probe; 11422 11423 cmn_err(CE_NOTE, "enabling probe %d (%s:%s:%s:%s)", i, 11424 desc->dtpd_provider, desc->dtpd_mod, 11425 desc->dtpd_func, desc->dtpd_name); 11426 } 11427 } 11428 11429 static void 11430 dtrace_enabling_destroy(dtrace_enabling_t *enab) 11431 { 11432 int i; 11433 dtrace_ecbdesc_t *ep; 11434 dtrace_vstate_t *vstate = enab->dten_vstate; 11435 11436 ASSERT(MUTEX_HELD(&dtrace_lock)); 11437 11438 for (i = 0; i < enab->dten_ndesc; i++) { 11439 dtrace_actdesc_t *act, *next; 11440 dtrace_predicate_t *pred; 11441 11442 ep = enab->dten_desc[i]; 11443 11444 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) 11445 dtrace_predicate_release(pred, vstate); 11446 11447 for (act = ep->dted_action; act != NULL; act = next) { 11448 next = act->dtad_next; 11449 dtrace_actdesc_release(act, vstate); 11450 } 11451 11452 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 11453 } 11454 11455 if (enab->dten_desc != NULL) 11456 kmem_free(enab->dten_desc, 11457 enab->dten_maxdesc * sizeof (dtrace_enabling_t *)); 11458 11459 /* 11460 * If this was a retained enabling, decrement the dts_nretained count 11461 * and take it off of the dtrace_retained list. 11462 */ 11463 if (enab->dten_prev != NULL || enab->dten_next != NULL || 11464 dtrace_retained == enab) { 11465 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11466 ASSERT(enab->dten_vstate->dtvs_state->dts_nretained > 0); 11467 enab->dten_vstate->dtvs_state->dts_nretained--; 11468 } 11469 11470 if (enab->dten_prev == NULL) { 11471 if (dtrace_retained == enab) { 11472 dtrace_retained = enab->dten_next; 11473 11474 if (dtrace_retained != NULL) 11475 dtrace_retained->dten_prev = NULL; 11476 } 11477 } else { 11478 ASSERT(enab != dtrace_retained); 11479 ASSERT(dtrace_retained != NULL); 11480 enab->dten_prev->dten_next = enab->dten_next; 11481 } 11482 11483 if (enab->dten_next != NULL) { 11484 ASSERT(dtrace_retained != NULL); 11485 enab->dten_next->dten_prev = enab->dten_prev; 11486 } 11487 11488 kmem_free(enab, sizeof (dtrace_enabling_t)); 11489 } 11490 11491 static int 11492 dtrace_enabling_retain(dtrace_enabling_t *enab) 11493 { 11494 dtrace_state_t *state; 11495 11496 ASSERT(MUTEX_HELD(&dtrace_lock)); 11497 ASSERT(enab->dten_next == NULL && enab->dten_prev == NULL); 11498 ASSERT(enab->dten_vstate != NULL); 11499 11500 state = enab->dten_vstate->dtvs_state; 11501 ASSERT(state != NULL); 11502 11503 /* 11504 * We only allow each state to retain dtrace_retain_max enablings. 11505 */ 11506 if (state->dts_nretained >= dtrace_retain_max) 11507 return (ENOSPC); 11508 11509 state->dts_nretained++; 11510 11511 if (dtrace_retained == NULL) { 11512 dtrace_retained = enab; 11513 return (0); 11514 } 11515 11516 enab->dten_next = dtrace_retained; 11517 dtrace_retained->dten_prev = enab; 11518 dtrace_retained = enab; 11519 11520 return (0); 11521 } 11522 11523 static int 11524 dtrace_enabling_replicate(dtrace_state_t *state, dtrace_probedesc_t *match, 11525 dtrace_probedesc_t *create) 11526 { 11527 dtrace_enabling_t *new, *enab; 11528 int found = 0, err = ENOENT; 11529 11530 ASSERT(MUTEX_HELD(&dtrace_lock)); 11531 ASSERT(strlen(match->dtpd_provider) < DTRACE_PROVNAMELEN); 11532 ASSERT(strlen(match->dtpd_mod) < DTRACE_MODNAMELEN); 11533 ASSERT(strlen(match->dtpd_func) < DTRACE_FUNCNAMELEN); 11534 ASSERT(strlen(match->dtpd_name) < DTRACE_NAMELEN); 11535 11536 new = dtrace_enabling_create(&state->dts_vstate); 11537 11538 /* 11539 * Iterate over all retained enablings, looking for enablings that 11540 * match the specified state. 11541 */ 11542 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11543 int i; 11544 11545 /* 11546 * dtvs_state can only be NULL for helper enablings -- and 11547 * helper enablings can't be retained. 11548 */ 11549 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11550 11551 if (enab->dten_vstate->dtvs_state != state) 11552 continue; 11553 11554 /* 11555 * Now iterate over each probe description; we're looking for 11556 * an exact match to the specified probe description. 11557 */ 11558 for (i = 0; i < enab->dten_ndesc; i++) { 11559 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11560 dtrace_probedesc_t *pd = &ep->dted_probe; 11561 11562 if (strcmp(pd->dtpd_provider, match->dtpd_provider)) 11563 continue; 11564 11565 if (strcmp(pd->dtpd_mod, match->dtpd_mod)) 11566 continue; 11567 11568 if (strcmp(pd->dtpd_func, match->dtpd_func)) 11569 continue; 11570 11571 if (strcmp(pd->dtpd_name, match->dtpd_name)) 11572 continue; 11573 11574 /* 11575 * We have a winning probe! Add it to our growing 11576 * enabling. 11577 */ 11578 found = 1; 11579 dtrace_enabling_addlike(new, ep, create); 11580 } 11581 } 11582 11583 if (!found || (err = dtrace_enabling_retain(new)) != 0) { 11584 dtrace_enabling_destroy(new); 11585 return (err); 11586 } 11587 11588 return (0); 11589 } 11590 11591 static void 11592 dtrace_enabling_retract(dtrace_state_t *state) 11593 { 11594 dtrace_enabling_t *enab, *next; 11595 11596 ASSERT(MUTEX_HELD(&dtrace_lock)); 11597 11598 /* 11599 * Iterate over all retained enablings, destroy the enablings retained 11600 * for the specified state. 11601 */ 11602 for (enab = dtrace_retained; enab != NULL; enab = next) { 11603 next = enab->dten_next; 11604 11605 /* 11606 * dtvs_state can only be NULL for helper enablings -- and 11607 * helper enablings can't be retained. 11608 */ 11609 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11610 11611 if (enab->dten_vstate->dtvs_state == state) { 11612 ASSERT(state->dts_nretained > 0); 11613 dtrace_enabling_destroy(enab); 11614 } 11615 } 11616 11617 ASSERT(state->dts_nretained == 0); 11618 } 11619 11620 static int 11621 dtrace_enabling_match(dtrace_enabling_t *enab, int *nmatched) 11622 { 11623 int i = 0; 11624 int matched = 0; 11625 11626 ASSERT(MUTEX_HELD(&cpu_lock)); 11627 ASSERT(MUTEX_HELD(&dtrace_lock)); 11628 11629 for (i = 0; i < enab->dten_ndesc; i++) { 11630 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 11631 11632 enab->dten_current = ep; 11633 enab->dten_error = 0; 11634 11635 matched += dtrace_probe_enable(&ep->dted_probe, enab); 11636 11637 if (enab->dten_error != 0) { 11638 /* 11639 * If we get an error half-way through enabling the 11640 * probes, we kick out -- perhaps with some number of 11641 * them enabled. Leaving enabled probes enabled may 11642 * be slightly confusing for user-level, but we expect 11643 * that no one will attempt to actually drive on in 11644 * the face of such errors. If this is an anonymous 11645 * enabling (indicated with a NULL nmatched pointer), 11646 * we cmn_err() a message. We aren't expecting to 11647 * get such an error -- such as it can exist at all, 11648 * it would be a result of corrupted DOF in the driver 11649 * properties. 11650 */ 11651 if (nmatched == NULL) { 11652 cmn_err(CE_WARN, "dtrace_enabling_match() " 11653 "error on %p: %d", (void *)ep, 11654 enab->dten_error); 11655 } 11656 11657 return (enab->dten_error); 11658 } 11659 } 11660 11661 enab->dten_probegen = dtrace_probegen; 11662 if (nmatched != NULL) 11663 *nmatched = matched; 11664 11665 return (0); 11666 } 11667 11668 static void 11669 dtrace_enabling_matchall(void) 11670 { 11671 dtrace_enabling_t *enab; 11672 11673 mutex_enter(&cpu_lock); 11674 mutex_enter(&dtrace_lock); 11675 11676 /* 11677 * Iterate over all retained enablings to see if any probes match 11678 * against them. We only perform this operation on enablings for which 11679 * we have sufficient permissions by virtue of being in the global zone 11680 * or in the same zone as the DTrace client. Because we can be called 11681 * after dtrace_detach() has been called, we cannot assert that there 11682 * are retained enablings. We can safely load from dtrace_retained, 11683 * however: the taskq_destroy() at the end of dtrace_detach() will 11684 * block pending our completion. 11685 */ 11686 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11687 #if defined(sun) 11688 cred_t *cr = enab->dten_vstate->dtvs_state->dts_cred.dcr_cred; 11689 11690 if (INGLOBALZONE(curproc) || getzoneid() == crgetzoneid(cr)) 11691 #endif 11692 (void) dtrace_enabling_match(enab, NULL); 11693 } 11694 11695 mutex_exit(&dtrace_lock); 11696 mutex_exit(&cpu_lock); 11697 } 11698 11699 /* 11700 * If an enabling is to be enabled without having matched probes (that is, if 11701 * dtrace_state_go() is to be called on the underlying dtrace_state_t), the 11702 * enabling must be _primed_ by creating an ECB for every ECB description. 11703 * This must be done to assure that we know the number of speculations, the 11704 * number of aggregations, the minimum buffer size needed, etc. before we 11705 * transition out of DTRACE_ACTIVITY_INACTIVE. To do this without actually 11706 * enabling any probes, we create ECBs for every ECB decription, but with a 11707 * NULL probe -- which is exactly what this function does. 11708 */ 11709 static void 11710 dtrace_enabling_prime(dtrace_state_t *state) 11711 { 11712 dtrace_enabling_t *enab; 11713 int i; 11714 11715 for (enab = dtrace_retained; enab != NULL; enab = enab->dten_next) { 11716 ASSERT(enab->dten_vstate->dtvs_state != NULL); 11717 11718 if (enab->dten_vstate->dtvs_state != state) 11719 continue; 11720 11721 /* 11722 * We don't want to prime an enabling more than once, lest 11723 * we allow a malicious user to induce resource exhaustion. 11724 * (The ECBs that result from priming an enabling aren't 11725 * leaked -- but they also aren't deallocated until the 11726 * consumer state is destroyed.) 11727 */ 11728 if (enab->dten_primed) 11729 continue; 11730 11731 for (i = 0; i < enab->dten_ndesc; i++) { 11732 enab->dten_current = enab->dten_desc[i]; 11733 (void) dtrace_probe_enable(NULL, enab); 11734 } 11735 11736 enab->dten_primed = 1; 11737 } 11738 } 11739 11740 /* 11741 * Called to indicate that probes should be provided due to retained 11742 * enablings. This is implemented in terms of dtrace_probe_provide(), but it 11743 * must take an initial lap through the enabling calling the dtps_provide() 11744 * entry point explicitly to allow for autocreated probes. 11745 */ 11746 static void 11747 dtrace_enabling_provide(dtrace_provider_t *prv) 11748 { 11749 int i, all = 0; 11750 dtrace_probedesc_t desc; 11751 11752 ASSERT(MUTEX_HELD(&dtrace_lock)); 11753 ASSERT(MUTEX_HELD(&dtrace_provider_lock)); 11754 11755 if (prv == NULL) { 11756 all = 1; 11757 prv = dtrace_provider; 11758 } 11759 11760 do { 11761 dtrace_enabling_t *enab = dtrace_retained; 11762 void *parg = prv->dtpv_arg; 11763 11764 for (; enab != NULL; enab = enab->dten_next) { 11765 for (i = 0; i < enab->dten_ndesc; i++) { 11766 desc = enab->dten_desc[i]->dted_probe; 11767 mutex_exit(&dtrace_lock); 11768 prv->dtpv_pops.dtps_provide(parg, &desc); 11769 mutex_enter(&dtrace_lock); 11770 } 11771 } 11772 } while (all && (prv = prv->dtpv_next) != NULL); 11773 11774 mutex_exit(&dtrace_lock); 11775 dtrace_probe_provide(NULL, all ? NULL : prv); 11776 mutex_enter(&dtrace_lock); 11777 } 11778 11779 /* 11780 * Called to reap ECBs that are attached to probes from defunct providers. 11781 */ 11782 static void 11783 dtrace_enabling_reap(void) 11784 { 11785 dtrace_provider_t *prov; 11786 dtrace_probe_t *probe; 11787 dtrace_ecb_t *ecb; 11788 hrtime_t when; 11789 int i; 11790 11791 mutex_enter(&cpu_lock); 11792 mutex_enter(&dtrace_lock); 11793 11794 for (i = 0; i < dtrace_nprobes; i++) { 11795 if ((probe = dtrace_probes[i]) == NULL) 11796 continue; 11797 11798 if (probe->dtpr_ecb == NULL) 11799 continue; 11800 11801 prov = probe->dtpr_provider; 11802 11803 if ((when = prov->dtpv_defunct) == 0) 11804 continue; 11805 11806 /* 11807 * We have ECBs on a defunct provider: we want to reap these 11808 * ECBs to allow the provider to unregister. The destruction 11809 * of these ECBs must be done carefully: if we destroy the ECB 11810 * and the consumer later wishes to consume an EPID that 11811 * corresponds to the destroyed ECB (and if the EPID metadata 11812 * has not been previously consumed), the consumer will abort 11813 * processing on the unknown EPID. To reduce (but not, sadly, 11814 * eliminate) the possibility of this, we will only destroy an 11815 * ECB for a defunct provider if, for the state that 11816 * corresponds to the ECB: 11817 * 11818 * (a) There is no speculative tracing (which can effectively 11819 * cache an EPID for an arbitrary amount of time). 11820 * 11821 * (b) The principal buffers have been switched twice since the 11822 * provider became defunct. 11823 * 11824 * (c) The aggregation buffers are of zero size or have been 11825 * switched twice since the provider became defunct. 11826 * 11827 * We use dts_speculates to determine (a) and call a function 11828 * (dtrace_buffer_consumed()) to determine (b) and (c). Note 11829 * that as soon as we've been unable to destroy one of the ECBs 11830 * associated with the probe, we quit trying -- reaping is only 11831 * fruitful in as much as we can destroy all ECBs associated 11832 * with the defunct provider's probes. 11833 */ 11834 while ((ecb = probe->dtpr_ecb) != NULL) { 11835 dtrace_state_t *state = ecb->dte_state; 11836 dtrace_buffer_t *buf = state->dts_buffer; 11837 dtrace_buffer_t *aggbuf = state->dts_aggbuffer; 11838 11839 if (state->dts_speculates) 11840 break; 11841 11842 if (!dtrace_buffer_consumed(buf, when)) 11843 break; 11844 11845 if (!dtrace_buffer_consumed(aggbuf, when)) 11846 break; 11847 11848 dtrace_ecb_disable(ecb); 11849 ASSERT(probe->dtpr_ecb != ecb); 11850 dtrace_ecb_destroy(ecb); 11851 } 11852 } 11853 11854 mutex_exit(&dtrace_lock); 11855 mutex_exit(&cpu_lock); 11856 } 11857 11858 /* 11859 * DTrace DOF Functions 11860 */ 11861 /*ARGSUSED*/ 11862 static void 11863 dtrace_dof_error(dof_hdr_t *dof, const char *str) 11864 { 11865 if (dtrace_err_verbose) 11866 cmn_err(CE_WARN, "failed to process DOF: %s", str); 11867 11868 #ifdef DTRACE_ERRDEBUG 11869 dtrace_errdebug(str); 11870 #endif 11871 } 11872 11873 /* 11874 * Create DOF out of a currently enabled state. Right now, we only create 11875 * DOF containing the run-time options -- but this could be expanded to create 11876 * complete DOF representing the enabled state. 11877 */ 11878 static dof_hdr_t * 11879 dtrace_dof_create(dtrace_state_t *state) 11880 { 11881 dof_hdr_t *dof; 11882 dof_sec_t *sec; 11883 dof_optdesc_t *opt; 11884 int i, len = sizeof (dof_hdr_t) + 11885 roundup(sizeof (dof_sec_t), sizeof (uint64_t)) + 11886 sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11887 11888 ASSERT(MUTEX_HELD(&dtrace_lock)); 11889 11890 dof = kmem_zalloc(len, KM_SLEEP); 11891 dof->dofh_ident[DOF_ID_MAG0] = DOF_MAG_MAG0; 11892 dof->dofh_ident[DOF_ID_MAG1] = DOF_MAG_MAG1; 11893 dof->dofh_ident[DOF_ID_MAG2] = DOF_MAG_MAG2; 11894 dof->dofh_ident[DOF_ID_MAG3] = DOF_MAG_MAG3; 11895 11896 dof->dofh_ident[DOF_ID_MODEL] = DOF_MODEL_NATIVE; 11897 dof->dofh_ident[DOF_ID_ENCODING] = DOF_ENCODE_NATIVE; 11898 dof->dofh_ident[DOF_ID_VERSION] = DOF_VERSION; 11899 dof->dofh_ident[DOF_ID_DIFVERS] = DIF_VERSION; 11900 dof->dofh_ident[DOF_ID_DIFIREG] = DIF_DIR_NREGS; 11901 dof->dofh_ident[DOF_ID_DIFTREG] = DIF_DTR_NREGS; 11902 11903 dof->dofh_flags = 0; 11904 dof->dofh_hdrsize = sizeof (dof_hdr_t); 11905 dof->dofh_secsize = sizeof (dof_sec_t); 11906 dof->dofh_secnum = 1; /* only DOF_SECT_OPTDESC */ 11907 dof->dofh_secoff = sizeof (dof_hdr_t); 11908 dof->dofh_loadsz = len; 11909 dof->dofh_filesz = len; 11910 dof->dofh_pad = 0; 11911 11912 /* 11913 * Fill in the option section header... 11914 */ 11915 sec = (dof_sec_t *)((uintptr_t)dof + sizeof (dof_hdr_t)); 11916 sec->dofs_type = DOF_SECT_OPTDESC; 11917 sec->dofs_align = sizeof (uint64_t); 11918 sec->dofs_flags = DOF_SECF_LOAD; 11919 sec->dofs_entsize = sizeof (dof_optdesc_t); 11920 11921 opt = (dof_optdesc_t *)((uintptr_t)sec + 11922 roundup(sizeof (dof_sec_t), sizeof (uint64_t))); 11923 11924 sec->dofs_offset = (uintptr_t)opt - (uintptr_t)dof; 11925 sec->dofs_size = sizeof (dof_optdesc_t) * DTRACEOPT_MAX; 11926 11927 for (i = 0; i < DTRACEOPT_MAX; i++) { 11928 opt[i].dofo_option = i; 11929 opt[i].dofo_strtab = DOF_SECIDX_NONE; 11930 opt[i].dofo_value = state->dts_options[i]; 11931 } 11932 11933 return (dof); 11934 } 11935 11936 static dof_hdr_t * 11937 dtrace_dof_copyin(uintptr_t uarg, int *errp) 11938 { 11939 dof_hdr_t hdr, *dof; 11940 11941 ASSERT(!MUTEX_HELD(&dtrace_lock)); 11942 11943 /* 11944 * First, we're going to copyin() the sizeof (dof_hdr_t). 11945 */ 11946 if (copyin((void *)uarg, &hdr, sizeof (hdr)) != 0) { 11947 dtrace_dof_error(NULL, "failed to copyin DOF header"); 11948 *errp = EFAULT; 11949 return (NULL); 11950 } 11951 11952 /* 11953 * Now we'll allocate the entire DOF and copy it in -- provided 11954 * that the length isn't outrageous. 11955 */ 11956 if (hdr.dofh_loadsz >= dtrace_dof_maxsize) { 11957 dtrace_dof_error(&hdr, "load size exceeds maximum"); 11958 *errp = E2BIG; 11959 return (NULL); 11960 } 11961 11962 if (hdr.dofh_loadsz < sizeof (hdr)) { 11963 dtrace_dof_error(&hdr, "invalid load size"); 11964 *errp = EINVAL; 11965 return (NULL); 11966 } 11967 11968 dof = kmem_alloc(hdr.dofh_loadsz, KM_SLEEP); 11969 11970 if (copyin((void *)uarg, dof, hdr.dofh_loadsz) != 0) { 11971 kmem_free(dof, hdr.dofh_loadsz); 11972 *errp = EFAULT; 11973 return (NULL); 11974 } 11975 11976 return (dof); 11977 } 11978 11979 #if !defined(sun) 11980 static __inline uchar_t 11981 dtrace_dof_char(char c) { 11982 switch (c) { 11983 case '0': 11984 case '1': 11985 case '2': 11986 case '3': 11987 case '4': 11988 case '5': 11989 case '6': 11990 case '7': 11991 case '8': 11992 case '9': 11993 return (c - '0'); 11994 case 'A': 11995 case 'B': 11996 case 'C': 11997 case 'D': 11998 case 'E': 11999 case 'F': 12000 return (c - 'A' + 10); 12001 case 'a': 12002 case 'b': 12003 case 'c': 12004 case 'd': 12005 case 'e': 12006 case 'f': 12007 return (c - 'a' + 10); 12008 } 12009 /* Should not reach here. */ 12010 return (0); 12011 } 12012 #endif 12013 12014 static dof_hdr_t * 12015 dtrace_dof_property(const char *name) 12016 { 12017 uchar_t *buf; 12018 uint64_t loadsz; 12019 unsigned int len, i; 12020 dof_hdr_t *dof; 12021 12022 #if defined(sun) 12023 /* 12024 * Unfortunately, array of values in .conf files are always (and 12025 * only) interpreted to be integer arrays. We must read our DOF 12026 * as an integer array, and then squeeze it into a byte array. 12027 */ 12028 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dtrace_devi, 0, 12029 (char *)name, (int **)&buf, &len) != DDI_PROP_SUCCESS) 12030 return (NULL); 12031 12032 for (i = 0; i < len; i++) 12033 buf[i] = (uchar_t)(((int *)buf)[i]); 12034 12035 if (len < sizeof (dof_hdr_t)) { 12036 ddi_prop_free(buf); 12037 dtrace_dof_error(NULL, "truncated header"); 12038 return (NULL); 12039 } 12040 12041 if (len < (loadsz = ((dof_hdr_t *)buf)->dofh_loadsz)) { 12042 ddi_prop_free(buf); 12043 dtrace_dof_error(NULL, "truncated DOF"); 12044 return (NULL); 12045 } 12046 12047 if (loadsz >= dtrace_dof_maxsize) { 12048 ddi_prop_free(buf); 12049 dtrace_dof_error(NULL, "oversized DOF"); 12050 return (NULL); 12051 } 12052 12053 dof = kmem_alloc(loadsz, KM_SLEEP); 12054 bcopy(buf, dof, loadsz); 12055 ddi_prop_free(buf); 12056 #else 12057 char *p; 12058 char *p_env; 12059 12060 if ((p_env = getenv(name)) == NULL) 12061 return (NULL); 12062 12063 len = strlen(p_env) / 2; 12064 12065 buf = kmem_alloc(len, KM_SLEEP); 12066 12067 dof = (dof_hdr_t *) buf; 12068 12069 p = p_env; 12070 12071 for (i = 0; i < len; i++) { 12072 buf[i] = (dtrace_dof_char(p[0]) << 4) | 12073 dtrace_dof_char(p[1]); 12074 p += 2; 12075 } 12076 12077 freeenv(p_env); 12078 12079 if (len < sizeof (dof_hdr_t)) { 12080 kmem_free(buf, 0); 12081 dtrace_dof_error(NULL, "truncated header"); 12082 return (NULL); 12083 } 12084 12085 if (len < (loadsz = dof->dofh_loadsz)) { 12086 kmem_free(buf, 0); 12087 dtrace_dof_error(NULL, "truncated DOF"); 12088 return (NULL); 12089 } 12090 12091 if (loadsz >= dtrace_dof_maxsize) { 12092 kmem_free(buf, 0); 12093 dtrace_dof_error(NULL, "oversized DOF"); 12094 return (NULL); 12095 } 12096 #endif 12097 12098 return (dof); 12099 } 12100 12101 static void 12102 dtrace_dof_destroy(dof_hdr_t *dof) 12103 { 12104 kmem_free(dof, dof->dofh_loadsz); 12105 } 12106 12107 /* 12108 * Return the dof_sec_t pointer corresponding to a given section index. If the 12109 * index is not valid, dtrace_dof_error() is called and NULL is returned. If 12110 * a type other than DOF_SECT_NONE is specified, the header is checked against 12111 * this type and NULL is returned if the types do not match. 12112 */ 12113 static dof_sec_t * 12114 dtrace_dof_sect(dof_hdr_t *dof, uint32_t type, dof_secidx_t i) 12115 { 12116 dof_sec_t *sec = (dof_sec_t *)(uintptr_t) 12117 ((uintptr_t)dof + dof->dofh_secoff + i * dof->dofh_secsize); 12118 12119 if (i >= dof->dofh_secnum) { 12120 dtrace_dof_error(dof, "referenced section index is invalid"); 12121 return (NULL); 12122 } 12123 12124 if (!(sec->dofs_flags & DOF_SECF_LOAD)) { 12125 dtrace_dof_error(dof, "referenced section is not loadable"); 12126 return (NULL); 12127 } 12128 12129 if (type != DOF_SECT_NONE && type != sec->dofs_type) { 12130 dtrace_dof_error(dof, "referenced section is the wrong type"); 12131 return (NULL); 12132 } 12133 12134 return (sec); 12135 } 12136 12137 static dtrace_probedesc_t * 12138 dtrace_dof_probedesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_probedesc_t *desc) 12139 { 12140 dof_probedesc_t *probe; 12141 dof_sec_t *strtab; 12142 uintptr_t daddr = (uintptr_t)dof; 12143 uintptr_t str; 12144 size_t size; 12145 12146 if (sec->dofs_type != DOF_SECT_PROBEDESC) { 12147 dtrace_dof_error(dof, "invalid probe section"); 12148 return (NULL); 12149 } 12150 12151 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12152 dtrace_dof_error(dof, "bad alignment in probe description"); 12153 return (NULL); 12154 } 12155 12156 if (sec->dofs_offset + sizeof (dof_probedesc_t) > dof->dofh_loadsz) { 12157 dtrace_dof_error(dof, "truncated probe description"); 12158 return (NULL); 12159 } 12160 12161 probe = (dof_probedesc_t *)(uintptr_t)(daddr + sec->dofs_offset); 12162 strtab = dtrace_dof_sect(dof, DOF_SECT_STRTAB, probe->dofp_strtab); 12163 12164 if (strtab == NULL) 12165 return (NULL); 12166 12167 str = daddr + strtab->dofs_offset; 12168 size = strtab->dofs_size; 12169 12170 if (probe->dofp_provider >= strtab->dofs_size) { 12171 dtrace_dof_error(dof, "corrupt probe provider"); 12172 return (NULL); 12173 } 12174 12175 (void) strncpy(desc->dtpd_provider, 12176 (char *)(str + probe->dofp_provider), 12177 MIN(DTRACE_PROVNAMELEN - 1, size - probe->dofp_provider)); 12178 12179 if (probe->dofp_mod >= strtab->dofs_size) { 12180 dtrace_dof_error(dof, "corrupt probe module"); 12181 return (NULL); 12182 } 12183 12184 (void) strncpy(desc->dtpd_mod, (char *)(str + probe->dofp_mod), 12185 MIN(DTRACE_MODNAMELEN - 1, size - probe->dofp_mod)); 12186 12187 if (probe->dofp_func >= strtab->dofs_size) { 12188 dtrace_dof_error(dof, "corrupt probe function"); 12189 return (NULL); 12190 } 12191 12192 (void) strncpy(desc->dtpd_func, (char *)(str + probe->dofp_func), 12193 MIN(DTRACE_FUNCNAMELEN - 1, size - probe->dofp_func)); 12194 12195 if (probe->dofp_name >= strtab->dofs_size) { 12196 dtrace_dof_error(dof, "corrupt probe name"); 12197 return (NULL); 12198 } 12199 12200 (void) strncpy(desc->dtpd_name, (char *)(str + probe->dofp_name), 12201 MIN(DTRACE_NAMELEN - 1, size - probe->dofp_name)); 12202 12203 return (desc); 12204 } 12205 12206 static dtrace_difo_t * 12207 dtrace_dof_difo(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12208 cred_t *cr) 12209 { 12210 dtrace_difo_t *dp; 12211 size_t ttl = 0; 12212 dof_difohdr_t *dofd; 12213 uintptr_t daddr = (uintptr_t)dof; 12214 size_t max = dtrace_difo_maxsize; 12215 int i, l, n; 12216 12217 static const struct { 12218 int section; 12219 int bufoffs; 12220 int lenoffs; 12221 int entsize; 12222 int align; 12223 const char *msg; 12224 } difo[] = { 12225 { DOF_SECT_DIF, offsetof(dtrace_difo_t, dtdo_buf), 12226 offsetof(dtrace_difo_t, dtdo_len), sizeof (dif_instr_t), 12227 sizeof (dif_instr_t), "multiple DIF sections" }, 12228 12229 { DOF_SECT_INTTAB, offsetof(dtrace_difo_t, dtdo_inttab), 12230 offsetof(dtrace_difo_t, dtdo_intlen), sizeof (uint64_t), 12231 sizeof (uint64_t), "multiple integer tables" }, 12232 12233 { DOF_SECT_STRTAB, offsetof(dtrace_difo_t, dtdo_strtab), 12234 offsetof(dtrace_difo_t, dtdo_strlen), 0, 12235 sizeof (char), "multiple string tables" }, 12236 12237 { DOF_SECT_VARTAB, offsetof(dtrace_difo_t, dtdo_vartab), 12238 offsetof(dtrace_difo_t, dtdo_varlen), sizeof (dtrace_difv_t), 12239 sizeof (uint_t), "multiple variable tables" }, 12240 12241 { DOF_SECT_NONE, 0, 0, 0, 0, NULL } 12242 }; 12243 12244 if (sec->dofs_type != DOF_SECT_DIFOHDR) { 12245 dtrace_dof_error(dof, "invalid DIFO header section"); 12246 return (NULL); 12247 } 12248 12249 if (sec->dofs_align != sizeof (dof_secidx_t)) { 12250 dtrace_dof_error(dof, "bad alignment in DIFO header"); 12251 return (NULL); 12252 } 12253 12254 if (sec->dofs_size < sizeof (dof_difohdr_t) || 12255 sec->dofs_size % sizeof (dof_secidx_t)) { 12256 dtrace_dof_error(dof, "bad size in DIFO header"); 12257 return (NULL); 12258 } 12259 12260 dofd = (dof_difohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12261 n = (sec->dofs_size - sizeof (*dofd)) / sizeof (dof_secidx_t) + 1; 12262 12263 dp = kmem_zalloc(sizeof (dtrace_difo_t), KM_SLEEP); 12264 dp->dtdo_rtype = dofd->dofd_rtype; 12265 12266 for (l = 0; l < n; l++) { 12267 dof_sec_t *subsec; 12268 void **bufp; 12269 uint32_t *lenp; 12270 12271 if ((subsec = dtrace_dof_sect(dof, DOF_SECT_NONE, 12272 dofd->dofd_links[l])) == NULL) 12273 goto err; /* invalid section link */ 12274 12275 if (ttl + subsec->dofs_size > max) { 12276 dtrace_dof_error(dof, "exceeds maximum size"); 12277 goto err; 12278 } 12279 12280 ttl += subsec->dofs_size; 12281 12282 for (i = 0; difo[i].section != DOF_SECT_NONE; i++) { 12283 if (subsec->dofs_type != difo[i].section) 12284 continue; 12285 12286 if (!(subsec->dofs_flags & DOF_SECF_LOAD)) { 12287 dtrace_dof_error(dof, "section not loaded"); 12288 goto err; 12289 } 12290 12291 if (subsec->dofs_align != difo[i].align) { 12292 dtrace_dof_error(dof, "bad alignment"); 12293 goto err; 12294 } 12295 12296 bufp = (void **)((uintptr_t)dp + difo[i].bufoffs); 12297 lenp = (uint32_t *)((uintptr_t)dp + difo[i].lenoffs); 12298 12299 if (*bufp != NULL) { 12300 dtrace_dof_error(dof, difo[i].msg); 12301 goto err; 12302 } 12303 12304 if (difo[i].entsize != subsec->dofs_entsize) { 12305 dtrace_dof_error(dof, "entry size mismatch"); 12306 goto err; 12307 } 12308 12309 if (subsec->dofs_entsize != 0 && 12310 (subsec->dofs_size % subsec->dofs_entsize) != 0) { 12311 dtrace_dof_error(dof, "corrupt entry size"); 12312 goto err; 12313 } 12314 12315 *lenp = subsec->dofs_size; 12316 *bufp = kmem_alloc(subsec->dofs_size, KM_SLEEP); 12317 bcopy((char *)(uintptr_t)(daddr + subsec->dofs_offset), 12318 *bufp, subsec->dofs_size); 12319 12320 if (subsec->dofs_entsize != 0) 12321 *lenp /= subsec->dofs_entsize; 12322 12323 break; 12324 } 12325 12326 /* 12327 * If we encounter a loadable DIFO sub-section that is not 12328 * known to us, assume this is a broken program and fail. 12329 */ 12330 if (difo[i].section == DOF_SECT_NONE && 12331 (subsec->dofs_flags & DOF_SECF_LOAD)) { 12332 dtrace_dof_error(dof, "unrecognized DIFO subsection"); 12333 goto err; 12334 } 12335 } 12336 12337 if (dp->dtdo_buf == NULL) { 12338 /* 12339 * We can't have a DIF object without DIF text. 12340 */ 12341 dtrace_dof_error(dof, "missing DIF text"); 12342 goto err; 12343 } 12344 12345 /* 12346 * Before we validate the DIF object, run through the variable table 12347 * looking for the strings -- if any of their size are under, we'll set 12348 * their size to be the system-wide default string size. Note that 12349 * this should _not_ happen if the "strsize" option has been set -- 12350 * in this case, the compiler should have set the size to reflect the 12351 * setting of the option. 12352 */ 12353 for (i = 0; i < dp->dtdo_varlen; i++) { 12354 dtrace_difv_t *v = &dp->dtdo_vartab[i]; 12355 dtrace_diftype_t *t = &v->dtdv_type; 12356 12357 if (v->dtdv_id < DIF_VAR_OTHER_UBASE) 12358 continue; 12359 12360 if (t->dtdt_kind == DIF_TYPE_STRING && t->dtdt_size == 0) 12361 t->dtdt_size = dtrace_strsize_default; 12362 } 12363 12364 if (dtrace_difo_validate(dp, vstate, DIF_DIR_NREGS, cr) != 0) 12365 goto err; 12366 12367 dtrace_difo_init(dp, vstate); 12368 return (dp); 12369 12370 err: 12371 kmem_free(dp->dtdo_buf, dp->dtdo_len * sizeof (dif_instr_t)); 12372 kmem_free(dp->dtdo_inttab, dp->dtdo_intlen * sizeof (uint64_t)); 12373 kmem_free(dp->dtdo_strtab, dp->dtdo_strlen); 12374 kmem_free(dp->dtdo_vartab, dp->dtdo_varlen * sizeof (dtrace_difv_t)); 12375 12376 kmem_free(dp, sizeof (dtrace_difo_t)); 12377 return (NULL); 12378 } 12379 12380 static dtrace_predicate_t * 12381 dtrace_dof_predicate(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12382 cred_t *cr) 12383 { 12384 dtrace_difo_t *dp; 12385 12386 if ((dp = dtrace_dof_difo(dof, sec, vstate, cr)) == NULL) 12387 return (NULL); 12388 12389 return (dtrace_predicate_create(dp)); 12390 } 12391 12392 static dtrace_actdesc_t * 12393 dtrace_dof_actdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12394 cred_t *cr) 12395 { 12396 dtrace_actdesc_t *act, *first = NULL, *last = NULL, *next; 12397 dof_actdesc_t *desc; 12398 dof_sec_t *difosec; 12399 size_t offs; 12400 uintptr_t daddr = (uintptr_t)dof; 12401 uint64_t arg; 12402 dtrace_actkind_t kind; 12403 12404 if (sec->dofs_type != DOF_SECT_ACTDESC) { 12405 dtrace_dof_error(dof, "invalid action section"); 12406 return (NULL); 12407 } 12408 12409 if (sec->dofs_offset + sizeof (dof_actdesc_t) > dof->dofh_loadsz) { 12410 dtrace_dof_error(dof, "truncated action description"); 12411 return (NULL); 12412 } 12413 12414 if (sec->dofs_align != sizeof (uint64_t)) { 12415 dtrace_dof_error(dof, "bad alignment in action description"); 12416 return (NULL); 12417 } 12418 12419 if (sec->dofs_size < sec->dofs_entsize) { 12420 dtrace_dof_error(dof, "section entry size exceeds total size"); 12421 return (NULL); 12422 } 12423 12424 if (sec->dofs_entsize != sizeof (dof_actdesc_t)) { 12425 dtrace_dof_error(dof, "bad entry size in action description"); 12426 return (NULL); 12427 } 12428 12429 if (sec->dofs_size / sec->dofs_entsize > dtrace_actions_max) { 12430 dtrace_dof_error(dof, "actions exceed dtrace_actions_max"); 12431 return (NULL); 12432 } 12433 12434 for (offs = 0; offs < sec->dofs_size; offs += sec->dofs_entsize) { 12435 desc = (dof_actdesc_t *)(daddr + 12436 (uintptr_t)sec->dofs_offset + offs); 12437 kind = (dtrace_actkind_t)desc->dofa_kind; 12438 12439 if ((DTRACEACT_ISPRINTFLIKE(kind) && 12440 (kind != DTRACEACT_PRINTA || 12441 desc->dofa_strtab != DOF_SECIDX_NONE)) || 12442 (kind == DTRACEACT_DIFEXPR && 12443 desc->dofa_strtab != DOF_SECIDX_NONE)) { 12444 dof_sec_t *strtab; 12445 char *str, *fmt; 12446 uint64_t i; 12447 12448 /* 12449 * The argument to these actions is an index into the 12450 * DOF string table. For printf()-like actions, this 12451 * is the format string. For print(), this is the 12452 * CTF type of the expression result. 12453 */ 12454 if ((strtab = dtrace_dof_sect(dof, 12455 DOF_SECT_STRTAB, desc->dofa_strtab)) == NULL) 12456 goto err; 12457 12458 str = (char *)((uintptr_t)dof + 12459 (uintptr_t)strtab->dofs_offset); 12460 12461 for (i = desc->dofa_arg; i < strtab->dofs_size; i++) { 12462 if (str[i] == '\0') 12463 break; 12464 } 12465 12466 if (i >= strtab->dofs_size) { 12467 dtrace_dof_error(dof, "bogus format string"); 12468 goto err; 12469 } 12470 12471 if (i == desc->dofa_arg) { 12472 dtrace_dof_error(dof, "empty format string"); 12473 goto err; 12474 } 12475 12476 i -= desc->dofa_arg; 12477 fmt = kmem_alloc(i + 1, KM_SLEEP); 12478 bcopy(&str[desc->dofa_arg], fmt, i + 1); 12479 arg = (uint64_t)(uintptr_t)fmt; 12480 } else { 12481 if (kind == DTRACEACT_PRINTA) { 12482 ASSERT(desc->dofa_strtab == DOF_SECIDX_NONE); 12483 arg = 0; 12484 } else { 12485 arg = desc->dofa_arg; 12486 } 12487 } 12488 12489 act = dtrace_actdesc_create(kind, desc->dofa_ntuple, 12490 desc->dofa_uarg, arg); 12491 12492 if (last != NULL) { 12493 last->dtad_next = act; 12494 } else { 12495 first = act; 12496 } 12497 12498 last = act; 12499 12500 if (desc->dofa_difo == DOF_SECIDX_NONE) 12501 continue; 12502 12503 if ((difosec = dtrace_dof_sect(dof, 12504 DOF_SECT_DIFOHDR, desc->dofa_difo)) == NULL) 12505 goto err; 12506 12507 act->dtad_difo = dtrace_dof_difo(dof, difosec, vstate, cr); 12508 12509 if (act->dtad_difo == NULL) 12510 goto err; 12511 } 12512 12513 ASSERT(first != NULL); 12514 return (first); 12515 12516 err: 12517 for (act = first; act != NULL; act = next) { 12518 next = act->dtad_next; 12519 dtrace_actdesc_release(act, vstate); 12520 } 12521 12522 return (NULL); 12523 } 12524 12525 static dtrace_ecbdesc_t * 12526 dtrace_dof_ecbdesc(dof_hdr_t *dof, dof_sec_t *sec, dtrace_vstate_t *vstate, 12527 cred_t *cr) 12528 { 12529 dtrace_ecbdesc_t *ep; 12530 dof_ecbdesc_t *ecb; 12531 dtrace_probedesc_t *desc; 12532 dtrace_predicate_t *pred = NULL; 12533 12534 if (sec->dofs_size < sizeof (dof_ecbdesc_t)) { 12535 dtrace_dof_error(dof, "truncated ECB description"); 12536 return (NULL); 12537 } 12538 12539 if (sec->dofs_align != sizeof (uint64_t)) { 12540 dtrace_dof_error(dof, "bad alignment in ECB description"); 12541 return (NULL); 12542 } 12543 12544 ecb = (dof_ecbdesc_t *)((uintptr_t)dof + (uintptr_t)sec->dofs_offset); 12545 sec = dtrace_dof_sect(dof, DOF_SECT_PROBEDESC, ecb->dofe_probes); 12546 12547 if (sec == NULL) 12548 return (NULL); 12549 12550 ep = kmem_zalloc(sizeof (dtrace_ecbdesc_t), KM_SLEEP); 12551 ep->dted_uarg = ecb->dofe_uarg; 12552 desc = &ep->dted_probe; 12553 12554 if (dtrace_dof_probedesc(dof, sec, desc) == NULL) 12555 goto err; 12556 12557 if (ecb->dofe_pred != DOF_SECIDX_NONE) { 12558 if ((sec = dtrace_dof_sect(dof, 12559 DOF_SECT_DIFOHDR, ecb->dofe_pred)) == NULL) 12560 goto err; 12561 12562 if ((pred = dtrace_dof_predicate(dof, sec, vstate, cr)) == NULL) 12563 goto err; 12564 12565 ep->dted_pred.dtpdd_predicate = pred; 12566 } 12567 12568 if (ecb->dofe_actions != DOF_SECIDX_NONE) { 12569 if ((sec = dtrace_dof_sect(dof, 12570 DOF_SECT_ACTDESC, ecb->dofe_actions)) == NULL) 12571 goto err; 12572 12573 ep->dted_action = dtrace_dof_actdesc(dof, sec, vstate, cr); 12574 12575 if (ep->dted_action == NULL) 12576 goto err; 12577 } 12578 12579 return (ep); 12580 12581 err: 12582 if (pred != NULL) 12583 dtrace_predicate_release(pred, vstate); 12584 kmem_free(ep, sizeof (dtrace_ecbdesc_t)); 12585 return (NULL); 12586 } 12587 12588 /* 12589 * Apply the relocations from the specified 'sec' (a DOF_SECT_URELHDR) to the 12590 * specified DOF. At present, this amounts to simply adding 'ubase' to the 12591 * site of any user SETX relocations to account for load object base address. 12592 * In the future, if we need other relocations, this function can be extended. 12593 */ 12594 static int 12595 dtrace_dof_relocate(dof_hdr_t *dof, dof_sec_t *sec, uint64_t ubase) 12596 { 12597 uintptr_t daddr = (uintptr_t)dof; 12598 dof_relohdr_t *dofr = 12599 (dof_relohdr_t *)(uintptr_t)(daddr + sec->dofs_offset); 12600 dof_sec_t *ss, *rs, *ts; 12601 dof_relodesc_t *r; 12602 uint_t i, n; 12603 12604 if (sec->dofs_size < sizeof (dof_relohdr_t) || 12605 sec->dofs_align != sizeof (dof_secidx_t)) { 12606 dtrace_dof_error(dof, "invalid relocation header"); 12607 return (-1); 12608 } 12609 12610 ss = dtrace_dof_sect(dof, DOF_SECT_STRTAB, dofr->dofr_strtab); 12611 rs = dtrace_dof_sect(dof, DOF_SECT_RELTAB, dofr->dofr_relsec); 12612 ts = dtrace_dof_sect(dof, DOF_SECT_NONE, dofr->dofr_tgtsec); 12613 12614 if (ss == NULL || rs == NULL || ts == NULL) 12615 return (-1); /* dtrace_dof_error() has been called already */ 12616 12617 if (rs->dofs_entsize < sizeof (dof_relodesc_t) || 12618 rs->dofs_align != sizeof (uint64_t)) { 12619 dtrace_dof_error(dof, "invalid relocation section"); 12620 return (-1); 12621 } 12622 12623 r = (dof_relodesc_t *)(uintptr_t)(daddr + rs->dofs_offset); 12624 n = rs->dofs_size / rs->dofs_entsize; 12625 12626 for (i = 0; i < n; i++) { 12627 uintptr_t taddr = daddr + ts->dofs_offset + r->dofr_offset; 12628 12629 switch (r->dofr_type) { 12630 case DOF_RELO_NONE: 12631 break; 12632 case DOF_RELO_SETX: 12633 if (r->dofr_offset >= ts->dofs_size || r->dofr_offset + 12634 sizeof (uint64_t) > ts->dofs_size) { 12635 dtrace_dof_error(dof, "bad relocation offset"); 12636 return (-1); 12637 } 12638 12639 if (!IS_P2ALIGNED(taddr, sizeof (uint64_t))) { 12640 dtrace_dof_error(dof, "misaligned setx relo"); 12641 return (-1); 12642 } 12643 12644 *(uint64_t *)taddr += ubase; 12645 break; 12646 default: 12647 dtrace_dof_error(dof, "invalid relocation type"); 12648 return (-1); 12649 } 12650 12651 r = (dof_relodesc_t *)((uintptr_t)r + rs->dofs_entsize); 12652 } 12653 12654 return (0); 12655 } 12656 12657 /* 12658 * The dof_hdr_t passed to dtrace_dof_slurp() should be a partially validated 12659 * header: it should be at the front of a memory region that is at least 12660 * sizeof (dof_hdr_t) in size -- and then at least dof_hdr.dofh_loadsz in 12661 * size. It need not be validated in any other way. 12662 */ 12663 static int 12664 dtrace_dof_slurp(dof_hdr_t *dof, dtrace_vstate_t *vstate, cred_t *cr, 12665 dtrace_enabling_t **enabp, uint64_t ubase, int noprobes) 12666 { 12667 uint64_t len = dof->dofh_loadsz, seclen; 12668 uintptr_t daddr = (uintptr_t)dof; 12669 dtrace_ecbdesc_t *ep; 12670 dtrace_enabling_t *enab; 12671 uint_t i; 12672 12673 ASSERT(MUTEX_HELD(&dtrace_lock)); 12674 ASSERT(dof->dofh_loadsz >= sizeof (dof_hdr_t)); 12675 12676 /* 12677 * Check the DOF header identification bytes. In addition to checking 12678 * valid settings, we also verify that unused bits/bytes are zeroed so 12679 * we can use them later without fear of regressing existing binaries. 12680 */ 12681 if (bcmp(&dof->dofh_ident[DOF_ID_MAG0], 12682 DOF_MAG_STRING, DOF_MAG_STRLEN) != 0) { 12683 dtrace_dof_error(dof, "DOF magic string mismatch"); 12684 return (-1); 12685 } 12686 12687 if (dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_ILP32 && 12688 dof->dofh_ident[DOF_ID_MODEL] != DOF_MODEL_LP64) { 12689 dtrace_dof_error(dof, "DOF has invalid data model"); 12690 return (-1); 12691 } 12692 12693 if (dof->dofh_ident[DOF_ID_ENCODING] != DOF_ENCODE_NATIVE) { 12694 dtrace_dof_error(dof, "DOF encoding mismatch"); 12695 return (-1); 12696 } 12697 12698 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 12699 dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_2) { 12700 dtrace_dof_error(dof, "DOF version mismatch"); 12701 return (-1); 12702 } 12703 12704 if (dof->dofh_ident[DOF_ID_DIFVERS] != DIF_VERSION_2) { 12705 dtrace_dof_error(dof, "DOF uses unsupported instruction set"); 12706 return (-1); 12707 } 12708 12709 if (dof->dofh_ident[DOF_ID_DIFIREG] > DIF_DIR_NREGS) { 12710 dtrace_dof_error(dof, "DOF uses too many integer registers"); 12711 return (-1); 12712 } 12713 12714 if (dof->dofh_ident[DOF_ID_DIFTREG] > DIF_DTR_NREGS) { 12715 dtrace_dof_error(dof, "DOF uses too many tuple registers"); 12716 return (-1); 12717 } 12718 12719 for (i = DOF_ID_PAD; i < DOF_ID_SIZE; i++) { 12720 if (dof->dofh_ident[i] != 0) { 12721 dtrace_dof_error(dof, "DOF has invalid ident byte set"); 12722 return (-1); 12723 } 12724 } 12725 12726 if (dof->dofh_flags & ~DOF_FL_VALID) { 12727 dtrace_dof_error(dof, "DOF has invalid flag bits set"); 12728 return (-1); 12729 } 12730 12731 if (dof->dofh_secsize == 0) { 12732 dtrace_dof_error(dof, "zero section header size"); 12733 return (-1); 12734 } 12735 12736 /* 12737 * Check that the section headers don't exceed the amount of DOF 12738 * data. Note that we cast the section size and number of sections 12739 * to uint64_t's to prevent possible overflow in the multiplication. 12740 */ 12741 seclen = (uint64_t)dof->dofh_secnum * (uint64_t)dof->dofh_secsize; 12742 12743 if (dof->dofh_secoff > len || seclen > len || 12744 dof->dofh_secoff + seclen > len) { 12745 dtrace_dof_error(dof, "truncated section headers"); 12746 return (-1); 12747 } 12748 12749 if (!IS_P2ALIGNED(dof->dofh_secoff, sizeof (uint64_t))) { 12750 dtrace_dof_error(dof, "misaligned section headers"); 12751 return (-1); 12752 } 12753 12754 if (!IS_P2ALIGNED(dof->dofh_secsize, sizeof (uint64_t))) { 12755 dtrace_dof_error(dof, "misaligned section size"); 12756 return (-1); 12757 } 12758 12759 /* 12760 * Take an initial pass through the section headers to be sure that 12761 * the headers don't have stray offsets. If the 'noprobes' flag is 12762 * set, do not permit sections relating to providers, probes, or args. 12763 */ 12764 for (i = 0; i < dof->dofh_secnum; i++) { 12765 dof_sec_t *sec = (dof_sec_t *)(daddr + 12766 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12767 12768 if (noprobes) { 12769 switch (sec->dofs_type) { 12770 case DOF_SECT_PROVIDER: 12771 case DOF_SECT_PROBES: 12772 case DOF_SECT_PRARGS: 12773 case DOF_SECT_PROFFS: 12774 dtrace_dof_error(dof, "illegal sections " 12775 "for enabling"); 12776 return (-1); 12777 } 12778 } 12779 12780 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12781 continue; /* just ignore non-loadable sections */ 12782 12783 if (sec->dofs_align & (sec->dofs_align - 1)) { 12784 dtrace_dof_error(dof, "bad section alignment"); 12785 return (-1); 12786 } 12787 12788 if (sec->dofs_offset & (sec->dofs_align - 1)) { 12789 dtrace_dof_error(dof, "misaligned section"); 12790 return (-1); 12791 } 12792 12793 if (sec->dofs_offset > len || sec->dofs_size > len || 12794 sec->dofs_offset + sec->dofs_size > len) { 12795 dtrace_dof_error(dof, "corrupt section header"); 12796 return (-1); 12797 } 12798 12799 if (sec->dofs_type == DOF_SECT_STRTAB && *((char *)daddr + 12800 sec->dofs_offset + sec->dofs_size - 1) != '\0') { 12801 dtrace_dof_error(dof, "non-terminating string table"); 12802 return (-1); 12803 } 12804 } 12805 12806 /* 12807 * Take a second pass through the sections and locate and perform any 12808 * relocations that are present. We do this after the first pass to 12809 * be sure that all sections have had their headers validated. 12810 */ 12811 for (i = 0; i < dof->dofh_secnum; i++) { 12812 dof_sec_t *sec = (dof_sec_t *)(daddr + 12813 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12814 12815 if (!(sec->dofs_flags & DOF_SECF_LOAD)) 12816 continue; /* skip sections that are not loadable */ 12817 12818 switch (sec->dofs_type) { 12819 case DOF_SECT_URELHDR: 12820 if (dtrace_dof_relocate(dof, sec, ubase) != 0) 12821 return (-1); 12822 break; 12823 } 12824 } 12825 12826 if ((enab = *enabp) == NULL) 12827 enab = *enabp = dtrace_enabling_create(vstate); 12828 12829 for (i = 0; i < dof->dofh_secnum; i++) { 12830 dof_sec_t *sec = (dof_sec_t *)(daddr + 12831 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12832 12833 if (sec->dofs_type != DOF_SECT_ECBDESC) 12834 continue; 12835 12836 if ((ep = dtrace_dof_ecbdesc(dof, sec, vstate, cr)) == NULL) { 12837 dtrace_enabling_destroy(enab); 12838 *enabp = NULL; 12839 return (-1); 12840 } 12841 12842 dtrace_enabling_add(enab, ep); 12843 } 12844 12845 return (0); 12846 } 12847 12848 /* 12849 * Process DOF for any options. This routine assumes that the DOF has been 12850 * at least processed by dtrace_dof_slurp(). 12851 */ 12852 static int 12853 dtrace_dof_options(dof_hdr_t *dof, dtrace_state_t *state) 12854 { 12855 int i, rval; 12856 uint32_t entsize; 12857 size_t offs; 12858 dof_optdesc_t *desc; 12859 12860 for (i = 0; i < dof->dofh_secnum; i++) { 12861 dof_sec_t *sec = (dof_sec_t *)((uintptr_t)dof + 12862 (uintptr_t)dof->dofh_secoff + i * dof->dofh_secsize); 12863 12864 if (sec->dofs_type != DOF_SECT_OPTDESC) 12865 continue; 12866 12867 if (sec->dofs_align != sizeof (uint64_t)) { 12868 dtrace_dof_error(dof, "bad alignment in " 12869 "option description"); 12870 return (EINVAL); 12871 } 12872 12873 if ((entsize = sec->dofs_entsize) == 0) { 12874 dtrace_dof_error(dof, "zeroed option entry size"); 12875 return (EINVAL); 12876 } 12877 12878 if (entsize < sizeof (dof_optdesc_t)) { 12879 dtrace_dof_error(dof, "bad option entry size"); 12880 return (EINVAL); 12881 } 12882 12883 for (offs = 0; offs < sec->dofs_size; offs += entsize) { 12884 desc = (dof_optdesc_t *)((uintptr_t)dof + 12885 (uintptr_t)sec->dofs_offset + offs); 12886 12887 if (desc->dofo_strtab != DOF_SECIDX_NONE) { 12888 dtrace_dof_error(dof, "non-zero option string"); 12889 return (EINVAL); 12890 } 12891 12892 if (desc->dofo_value == DTRACEOPT_UNSET) { 12893 dtrace_dof_error(dof, "unset option"); 12894 return (EINVAL); 12895 } 12896 12897 if ((rval = dtrace_state_option(state, 12898 desc->dofo_option, desc->dofo_value)) != 0) { 12899 dtrace_dof_error(dof, "rejected option"); 12900 return (rval); 12901 } 12902 } 12903 } 12904 12905 return (0); 12906 } 12907 12908 /* 12909 * DTrace Consumer State Functions 12910 */ 12911 static int 12912 dtrace_dstate_init(dtrace_dstate_t *dstate, size_t size) 12913 { 12914 size_t hashsize, maxper, min, chunksize = dstate->dtds_chunksize; 12915 void *base; 12916 uintptr_t limit; 12917 dtrace_dynvar_t *dvar, *next, *start; 12918 int i; 12919 12920 ASSERT(MUTEX_HELD(&dtrace_lock)); 12921 ASSERT(dstate->dtds_base == NULL && dstate->dtds_percpu == NULL); 12922 12923 bzero(dstate, sizeof (dtrace_dstate_t)); 12924 12925 if ((dstate->dtds_chunksize = chunksize) == 0) 12926 dstate->dtds_chunksize = DTRACE_DYNVAR_CHUNKSIZE; 12927 12928 if (size < (min = dstate->dtds_chunksize + sizeof (dtrace_dynhash_t))) 12929 size = min; 12930 12931 if ((base = kmem_zalloc(size, KM_NOSLEEP)) == NULL) 12932 return (ENOMEM); 12933 12934 dstate->dtds_size = size; 12935 dstate->dtds_base = base; 12936 dstate->dtds_percpu = kmem_cache_alloc(dtrace_state_cache, KM_SLEEP); 12937 bzero(dstate->dtds_percpu, NCPU * sizeof (dtrace_dstate_percpu_t)); 12938 12939 hashsize = size / (dstate->dtds_chunksize + sizeof (dtrace_dynhash_t)); 12940 12941 if (hashsize != 1 && (hashsize & 1)) 12942 hashsize--; 12943 12944 dstate->dtds_hashsize = hashsize; 12945 dstate->dtds_hash = dstate->dtds_base; 12946 12947 /* 12948 * Set all of our hash buckets to point to the single sink, and (if 12949 * it hasn't already been set), set the sink's hash value to be the 12950 * sink sentinel value. The sink is needed for dynamic variable 12951 * lookups to know that they have iterated over an entire, valid hash 12952 * chain. 12953 */ 12954 for (i = 0; i < hashsize; i++) 12955 dstate->dtds_hash[i].dtdh_chain = &dtrace_dynhash_sink; 12956 12957 if (dtrace_dynhash_sink.dtdv_hashval != DTRACE_DYNHASH_SINK) 12958 dtrace_dynhash_sink.dtdv_hashval = DTRACE_DYNHASH_SINK; 12959 12960 /* 12961 * Determine number of active CPUs. Divide free list evenly among 12962 * active CPUs. 12963 */ 12964 start = (dtrace_dynvar_t *) 12965 ((uintptr_t)base + hashsize * sizeof (dtrace_dynhash_t)); 12966 limit = (uintptr_t)base + size; 12967 12968 maxper = (limit - (uintptr_t)start) / NCPU; 12969 maxper = (maxper / dstate->dtds_chunksize) * dstate->dtds_chunksize; 12970 12971 #if !defined(sun) 12972 CPU_FOREACH(i) { 12973 #else 12974 for (i = 0; i < NCPU; i++) { 12975 #endif 12976 dstate->dtds_percpu[i].dtdsc_free = dvar = start; 12977 12978 /* 12979 * If we don't even have enough chunks to make it once through 12980 * NCPUs, we're just going to allocate everything to the first 12981 * CPU. And if we're on the last CPU, we're going to allocate 12982 * whatever is left over. In either case, we set the limit to 12983 * be the limit of the dynamic variable space. 12984 */ 12985 if (maxper == 0 || i == NCPU - 1) { 12986 limit = (uintptr_t)base + size; 12987 start = NULL; 12988 } else { 12989 limit = (uintptr_t)start + maxper; 12990 start = (dtrace_dynvar_t *)limit; 12991 } 12992 12993 ASSERT(limit <= (uintptr_t)base + size); 12994 12995 for (;;) { 12996 next = (dtrace_dynvar_t *)((uintptr_t)dvar + 12997 dstate->dtds_chunksize); 12998 12999 if ((uintptr_t)next + dstate->dtds_chunksize >= limit) 13000 break; 13001 13002 dvar->dtdv_next = next; 13003 dvar = next; 13004 } 13005 13006 if (maxper == 0) 13007 break; 13008 } 13009 13010 return (0); 13011 } 13012 13013 static void 13014 dtrace_dstate_fini(dtrace_dstate_t *dstate) 13015 { 13016 ASSERT(MUTEX_HELD(&cpu_lock)); 13017 13018 if (dstate->dtds_base == NULL) 13019 return; 13020 13021 kmem_free(dstate->dtds_base, dstate->dtds_size); 13022 kmem_cache_free(dtrace_state_cache, dstate->dtds_percpu); 13023 } 13024 13025 static void 13026 dtrace_vstate_fini(dtrace_vstate_t *vstate) 13027 { 13028 /* 13029 * Logical XOR, where are you? 13030 */ 13031 ASSERT((vstate->dtvs_nglobals == 0) ^ (vstate->dtvs_globals != NULL)); 13032 13033 if (vstate->dtvs_nglobals > 0) { 13034 kmem_free(vstate->dtvs_globals, vstate->dtvs_nglobals * 13035 sizeof (dtrace_statvar_t *)); 13036 } 13037 13038 if (vstate->dtvs_ntlocals > 0) { 13039 kmem_free(vstate->dtvs_tlocals, vstate->dtvs_ntlocals * 13040 sizeof (dtrace_difv_t)); 13041 } 13042 13043 ASSERT((vstate->dtvs_nlocals == 0) ^ (vstate->dtvs_locals != NULL)); 13044 13045 if (vstate->dtvs_nlocals > 0) { 13046 kmem_free(vstate->dtvs_locals, vstate->dtvs_nlocals * 13047 sizeof (dtrace_statvar_t *)); 13048 } 13049 } 13050 13051 #if defined(sun) 13052 static void 13053 dtrace_state_clean(dtrace_state_t *state) 13054 { 13055 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13056 return; 13057 13058 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13059 dtrace_speculation_clean(state); 13060 } 13061 13062 static void 13063 dtrace_state_deadman(dtrace_state_t *state) 13064 { 13065 hrtime_t now; 13066 13067 dtrace_sync(); 13068 13069 now = dtrace_gethrtime(); 13070 13071 if (state != dtrace_anon.dta_state && 13072 now - state->dts_laststatus >= dtrace_deadman_user) 13073 return; 13074 13075 /* 13076 * We must be sure that dts_alive never appears to be less than the 13077 * value upon entry to dtrace_state_deadman(), and because we lack a 13078 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13079 * store INT64_MAX to it, followed by a memory barrier, followed by 13080 * the new value. This assures that dts_alive never appears to be 13081 * less than its true value, regardless of the order in which the 13082 * stores to the underlying storage are issued. 13083 */ 13084 state->dts_alive = INT64_MAX; 13085 dtrace_membar_producer(); 13086 state->dts_alive = now; 13087 } 13088 #else 13089 static void 13090 dtrace_state_clean(void *arg) 13091 { 13092 dtrace_state_t *state = arg; 13093 dtrace_optval_t *opt = state->dts_options; 13094 13095 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) 13096 return; 13097 13098 dtrace_dynvar_clean(&state->dts_vstate.dtvs_dynvars); 13099 dtrace_speculation_clean(state); 13100 13101 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13102 dtrace_state_clean, state); 13103 } 13104 13105 static void 13106 dtrace_state_deadman(void *arg) 13107 { 13108 dtrace_state_t *state = arg; 13109 hrtime_t now; 13110 13111 dtrace_sync(); 13112 13113 dtrace_debug_output(); 13114 13115 now = dtrace_gethrtime(); 13116 13117 if (state != dtrace_anon.dta_state && 13118 now - state->dts_laststatus >= dtrace_deadman_user) 13119 return; 13120 13121 /* 13122 * We must be sure that dts_alive never appears to be less than the 13123 * value upon entry to dtrace_state_deadman(), and because we lack a 13124 * dtrace_cas64(), we cannot store to it atomically. We thus instead 13125 * store INT64_MAX to it, followed by a memory barrier, followed by 13126 * the new value. This assures that dts_alive never appears to be 13127 * less than its true value, regardless of the order in which the 13128 * stores to the underlying storage are issued. 13129 */ 13130 state->dts_alive = INT64_MAX; 13131 dtrace_membar_producer(); 13132 state->dts_alive = now; 13133 13134 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13135 dtrace_state_deadman, state); 13136 } 13137 #endif 13138 13139 static dtrace_state_t * 13140 #if defined(sun) 13141 dtrace_state_create(dev_t *devp, cred_t *cr) 13142 #else 13143 dtrace_state_create(struct cdev *dev) 13144 #endif 13145 { 13146 #if defined(sun) 13147 minor_t minor; 13148 major_t major; 13149 #else 13150 cred_t *cr = NULL; 13151 int m = 0; 13152 #endif 13153 char c[30]; 13154 dtrace_state_t *state; 13155 dtrace_optval_t *opt; 13156 int bufsize = NCPU * sizeof (dtrace_buffer_t), i; 13157 13158 ASSERT(MUTEX_HELD(&dtrace_lock)); 13159 ASSERT(MUTEX_HELD(&cpu_lock)); 13160 13161 #if defined(sun) 13162 minor = (minor_t)(uintptr_t)vmem_alloc(dtrace_minor, 1, 13163 VM_BESTFIT | VM_SLEEP); 13164 13165 if (ddi_soft_state_zalloc(dtrace_softstate, minor) != DDI_SUCCESS) { 13166 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 13167 return (NULL); 13168 } 13169 13170 state = ddi_get_soft_state(dtrace_softstate, minor); 13171 #else 13172 if (dev != NULL) { 13173 cr = dev->si_cred; 13174 m = dev2unit(dev); 13175 } 13176 13177 /* Allocate memory for the state. */ 13178 state = kmem_zalloc(sizeof(dtrace_state_t), KM_SLEEP); 13179 #endif 13180 13181 state->dts_epid = DTRACE_EPIDNONE + 1; 13182 13183 (void) snprintf(c, sizeof (c), "dtrace_aggid_%d", m); 13184 #if defined(sun) 13185 state->dts_aggid_arena = vmem_create(c, (void *)1, UINT32_MAX, 1, 13186 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 13187 13188 if (devp != NULL) { 13189 major = getemajor(*devp); 13190 } else { 13191 major = ddi_driver_major(dtrace_devi); 13192 } 13193 13194 state->dts_dev = makedevice(major, minor); 13195 13196 if (devp != NULL) 13197 *devp = state->dts_dev; 13198 #else 13199 state->dts_aggid_arena = new_unrhdr(1, INT_MAX, &dtrace_unr_mtx); 13200 state->dts_dev = dev; 13201 #endif 13202 13203 /* 13204 * We allocate NCPU buffers. On the one hand, this can be quite 13205 * a bit of memory per instance (nearly 36K on a Starcat). On the 13206 * other hand, it saves an additional memory reference in the probe 13207 * path. 13208 */ 13209 state->dts_buffer = kmem_zalloc(bufsize, KM_SLEEP); 13210 state->dts_aggbuffer = kmem_zalloc(bufsize, KM_SLEEP); 13211 13212 #if defined(sun) 13213 state->dts_cleaner = CYCLIC_NONE; 13214 state->dts_deadman = CYCLIC_NONE; 13215 #else 13216 callout_init(&state->dts_cleaner, CALLOUT_MPSAFE); 13217 callout_init(&state->dts_deadman, CALLOUT_MPSAFE); 13218 #endif 13219 state->dts_vstate.dtvs_state = state; 13220 13221 for (i = 0; i < DTRACEOPT_MAX; i++) 13222 state->dts_options[i] = DTRACEOPT_UNSET; 13223 13224 /* 13225 * Set the default options. 13226 */ 13227 opt = state->dts_options; 13228 opt[DTRACEOPT_BUFPOLICY] = DTRACEOPT_BUFPOLICY_SWITCH; 13229 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_AUTO; 13230 opt[DTRACEOPT_NSPEC] = dtrace_nspec_default; 13231 opt[DTRACEOPT_SPECSIZE] = dtrace_specsize_default; 13232 opt[DTRACEOPT_CPU] = (dtrace_optval_t)DTRACE_CPUALL; 13233 opt[DTRACEOPT_STRSIZE] = dtrace_strsize_default; 13234 opt[DTRACEOPT_STACKFRAMES] = dtrace_stackframes_default; 13235 opt[DTRACEOPT_USTACKFRAMES] = dtrace_ustackframes_default; 13236 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_default; 13237 opt[DTRACEOPT_AGGRATE] = dtrace_aggrate_default; 13238 opt[DTRACEOPT_SWITCHRATE] = dtrace_switchrate_default; 13239 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_default; 13240 opt[DTRACEOPT_JSTACKFRAMES] = dtrace_jstackframes_default; 13241 opt[DTRACEOPT_JSTACKSTRSIZE] = dtrace_jstackstrsize_default; 13242 13243 state->dts_activity = DTRACE_ACTIVITY_INACTIVE; 13244 13245 /* 13246 * Depending on the user credentials, we set flag bits which alter probe 13247 * visibility or the amount of destructiveness allowed. In the case of 13248 * actual anonymous tracing, or the possession of all privileges, all of 13249 * the normal checks are bypassed. 13250 */ 13251 if (cr == NULL || PRIV_POLICY_ONLY(cr, PRIV_ALL, B_FALSE)) { 13252 state->dts_cred.dcr_visible = DTRACE_CRV_ALL; 13253 state->dts_cred.dcr_action = DTRACE_CRA_ALL; 13254 } else { 13255 /* 13256 * Set up the credentials for this instantiation. We take a 13257 * hold on the credential to prevent it from disappearing on 13258 * us; this in turn prevents the zone_t referenced by this 13259 * credential from disappearing. This means that we can 13260 * examine the credential and the zone from probe context. 13261 */ 13262 crhold(cr); 13263 state->dts_cred.dcr_cred = cr; 13264 13265 /* 13266 * CRA_PROC means "we have *some* privilege for dtrace" and 13267 * unlocks the use of variables like pid, zonename, etc. 13268 */ 13269 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE) || 13270 PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13271 state->dts_cred.dcr_action |= DTRACE_CRA_PROC; 13272 } 13273 13274 /* 13275 * dtrace_user allows use of syscall and profile providers. 13276 * If the user also has proc_owner and/or proc_zone, we 13277 * extend the scope to include additional visibility and 13278 * destructive power. 13279 */ 13280 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_USER, B_FALSE)) { 13281 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) { 13282 state->dts_cred.dcr_visible |= 13283 DTRACE_CRV_ALLPROC; 13284 13285 state->dts_cred.dcr_action |= 13286 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13287 } 13288 13289 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) { 13290 state->dts_cred.dcr_visible |= 13291 DTRACE_CRV_ALLZONE; 13292 13293 state->dts_cred.dcr_action |= 13294 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13295 } 13296 13297 /* 13298 * If we have all privs in whatever zone this is, 13299 * we can do destructive things to processes which 13300 * have altered credentials. 13301 */ 13302 #if defined(sun) 13303 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13304 cr->cr_zone->zone_privset)) { 13305 state->dts_cred.dcr_action |= 13306 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13307 } 13308 #endif 13309 } 13310 13311 /* 13312 * Holding the dtrace_kernel privilege also implies that 13313 * the user has the dtrace_user privilege from a visibility 13314 * perspective. But without further privileges, some 13315 * destructive actions are not available. 13316 */ 13317 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_KERNEL, B_FALSE)) { 13318 /* 13319 * Make all probes in all zones visible. However, 13320 * this doesn't mean that all actions become available 13321 * to all zones. 13322 */ 13323 state->dts_cred.dcr_visible |= DTRACE_CRV_KERNEL | 13324 DTRACE_CRV_ALLPROC | DTRACE_CRV_ALLZONE; 13325 13326 state->dts_cred.dcr_action |= DTRACE_CRA_KERNEL | 13327 DTRACE_CRA_PROC; 13328 /* 13329 * Holding proc_owner means that destructive actions 13330 * for *this* zone are allowed. 13331 */ 13332 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13333 state->dts_cred.dcr_action |= 13334 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13335 13336 /* 13337 * Holding proc_zone means that destructive actions 13338 * for this user/group ID in all zones is allowed. 13339 */ 13340 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13341 state->dts_cred.dcr_action |= 13342 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13343 13344 #if defined(sun) 13345 /* 13346 * If we have all privs in whatever zone this is, 13347 * we can do destructive things to processes which 13348 * have altered credentials. 13349 */ 13350 if (priv_isequalset(priv_getset(cr, PRIV_EFFECTIVE), 13351 cr->cr_zone->zone_privset)) { 13352 state->dts_cred.dcr_action |= 13353 DTRACE_CRA_PROC_DESTRUCTIVE_CREDCHG; 13354 } 13355 #endif 13356 } 13357 13358 /* 13359 * Holding the dtrace_proc privilege gives control over fasttrap 13360 * and pid providers. We need to grant wider destructive 13361 * privileges in the event that the user has proc_owner and/or 13362 * proc_zone. 13363 */ 13364 if (PRIV_POLICY_ONLY(cr, PRIV_DTRACE_PROC, B_FALSE)) { 13365 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_OWNER, B_FALSE)) 13366 state->dts_cred.dcr_action |= 13367 DTRACE_CRA_PROC_DESTRUCTIVE_ALLUSER; 13368 13369 if (PRIV_POLICY_ONLY(cr, PRIV_PROC_ZONE, B_FALSE)) 13370 state->dts_cred.dcr_action |= 13371 DTRACE_CRA_PROC_DESTRUCTIVE_ALLZONE; 13372 } 13373 } 13374 13375 return (state); 13376 } 13377 13378 static int 13379 dtrace_state_buffer(dtrace_state_t *state, dtrace_buffer_t *buf, int which) 13380 { 13381 dtrace_optval_t *opt = state->dts_options, size; 13382 processorid_t cpu = 0;; 13383 int flags = 0, rval; 13384 13385 ASSERT(MUTEX_HELD(&dtrace_lock)); 13386 ASSERT(MUTEX_HELD(&cpu_lock)); 13387 ASSERT(which < DTRACEOPT_MAX); 13388 ASSERT(state->dts_activity == DTRACE_ACTIVITY_INACTIVE || 13389 (state == dtrace_anon.dta_state && 13390 state->dts_activity == DTRACE_ACTIVITY_ACTIVE)); 13391 13392 if (opt[which] == DTRACEOPT_UNSET || opt[which] == 0) 13393 return (0); 13394 13395 if (opt[DTRACEOPT_CPU] != DTRACEOPT_UNSET) 13396 cpu = opt[DTRACEOPT_CPU]; 13397 13398 if (which == DTRACEOPT_SPECSIZE) 13399 flags |= DTRACEBUF_NOSWITCH; 13400 13401 if (which == DTRACEOPT_BUFSIZE) { 13402 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_RING) 13403 flags |= DTRACEBUF_RING; 13404 13405 if (opt[DTRACEOPT_BUFPOLICY] == DTRACEOPT_BUFPOLICY_FILL) 13406 flags |= DTRACEBUF_FILL; 13407 13408 if (state != dtrace_anon.dta_state || 13409 state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 13410 flags |= DTRACEBUF_INACTIVE; 13411 } 13412 13413 for (size = opt[which]; size >= sizeof (uint64_t); size >>= 1) { 13414 /* 13415 * The size must be 8-byte aligned. If the size is not 8-byte 13416 * aligned, drop it down by the difference. 13417 */ 13418 if (size & (sizeof (uint64_t) - 1)) 13419 size -= size & (sizeof (uint64_t) - 1); 13420 13421 if (size < state->dts_reserve) { 13422 /* 13423 * Buffers always must be large enough to accommodate 13424 * their prereserved space. We return E2BIG instead 13425 * of ENOMEM in this case to allow for user-level 13426 * software to differentiate the cases. 13427 */ 13428 return (E2BIG); 13429 } 13430 13431 rval = dtrace_buffer_alloc(buf, size, flags, cpu); 13432 13433 if (rval != ENOMEM) { 13434 opt[which] = size; 13435 return (rval); 13436 } 13437 13438 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13439 return (rval); 13440 } 13441 13442 return (ENOMEM); 13443 } 13444 13445 static int 13446 dtrace_state_buffers(dtrace_state_t *state) 13447 { 13448 dtrace_speculation_t *spec = state->dts_speculations; 13449 int rval, i; 13450 13451 if ((rval = dtrace_state_buffer(state, state->dts_buffer, 13452 DTRACEOPT_BUFSIZE)) != 0) 13453 return (rval); 13454 13455 if ((rval = dtrace_state_buffer(state, state->dts_aggbuffer, 13456 DTRACEOPT_AGGSIZE)) != 0) 13457 return (rval); 13458 13459 for (i = 0; i < state->dts_nspeculations; i++) { 13460 if ((rval = dtrace_state_buffer(state, 13461 spec[i].dtsp_buffer, DTRACEOPT_SPECSIZE)) != 0) 13462 return (rval); 13463 } 13464 13465 return (0); 13466 } 13467 13468 static void 13469 dtrace_state_prereserve(dtrace_state_t *state) 13470 { 13471 dtrace_ecb_t *ecb; 13472 dtrace_probe_t *probe; 13473 13474 state->dts_reserve = 0; 13475 13476 if (state->dts_options[DTRACEOPT_BUFPOLICY] != DTRACEOPT_BUFPOLICY_FILL) 13477 return; 13478 13479 /* 13480 * If our buffer policy is a "fill" buffer policy, we need to set the 13481 * prereserved space to be the space required by the END probes. 13482 */ 13483 probe = dtrace_probes[dtrace_probeid_end - 1]; 13484 ASSERT(probe != NULL); 13485 13486 for (ecb = probe->dtpr_ecb; ecb != NULL; ecb = ecb->dte_next) { 13487 if (ecb->dte_state != state) 13488 continue; 13489 13490 state->dts_reserve += ecb->dte_needed + ecb->dte_alignment; 13491 } 13492 } 13493 13494 static int 13495 dtrace_state_go(dtrace_state_t *state, processorid_t *cpu) 13496 { 13497 dtrace_optval_t *opt = state->dts_options, sz, nspec; 13498 dtrace_speculation_t *spec; 13499 dtrace_buffer_t *buf; 13500 #if defined(sun) 13501 cyc_handler_t hdlr; 13502 cyc_time_t when; 13503 #endif 13504 int rval = 0, i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13505 dtrace_icookie_t cookie; 13506 13507 mutex_enter(&cpu_lock); 13508 mutex_enter(&dtrace_lock); 13509 13510 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 13511 rval = EBUSY; 13512 goto out; 13513 } 13514 13515 /* 13516 * Before we can perform any checks, we must prime all of the 13517 * retained enablings that correspond to this state. 13518 */ 13519 dtrace_enabling_prime(state); 13520 13521 if (state->dts_destructive && !state->dts_cred.dcr_destructive) { 13522 rval = EACCES; 13523 goto out; 13524 } 13525 13526 dtrace_state_prereserve(state); 13527 13528 /* 13529 * Now we want to do is try to allocate our speculations. 13530 * We do not automatically resize the number of speculations; if 13531 * this fails, we will fail the operation. 13532 */ 13533 nspec = opt[DTRACEOPT_NSPEC]; 13534 ASSERT(nspec != DTRACEOPT_UNSET); 13535 13536 if (nspec > INT_MAX) { 13537 rval = ENOMEM; 13538 goto out; 13539 } 13540 13541 spec = kmem_zalloc(nspec * sizeof (dtrace_speculation_t), KM_NOSLEEP); 13542 13543 if (spec == NULL) { 13544 rval = ENOMEM; 13545 goto out; 13546 } 13547 13548 state->dts_speculations = spec; 13549 state->dts_nspeculations = (int)nspec; 13550 13551 for (i = 0; i < nspec; i++) { 13552 if ((buf = kmem_zalloc(bufsize, KM_NOSLEEP)) == NULL) { 13553 rval = ENOMEM; 13554 goto err; 13555 } 13556 13557 spec[i].dtsp_buffer = buf; 13558 } 13559 13560 if (opt[DTRACEOPT_GRABANON] != DTRACEOPT_UNSET) { 13561 if (dtrace_anon.dta_state == NULL) { 13562 rval = ENOENT; 13563 goto out; 13564 } 13565 13566 if (state->dts_necbs != 0) { 13567 rval = EALREADY; 13568 goto out; 13569 } 13570 13571 state->dts_anon = dtrace_anon_grab(); 13572 ASSERT(state->dts_anon != NULL); 13573 state = state->dts_anon; 13574 13575 /* 13576 * We want "grabanon" to be set in the grabbed state, so we'll 13577 * copy that option value from the grabbing state into the 13578 * grabbed state. 13579 */ 13580 state->dts_options[DTRACEOPT_GRABANON] = 13581 opt[DTRACEOPT_GRABANON]; 13582 13583 *cpu = dtrace_anon.dta_beganon; 13584 13585 /* 13586 * If the anonymous state is active (as it almost certainly 13587 * is if the anonymous enabling ultimately matched anything), 13588 * we don't allow any further option processing -- but we 13589 * don't return failure. 13590 */ 13591 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13592 goto out; 13593 } 13594 13595 if (opt[DTRACEOPT_AGGSIZE] != DTRACEOPT_UNSET && 13596 opt[DTRACEOPT_AGGSIZE] != 0) { 13597 if (state->dts_aggregations == NULL) { 13598 /* 13599 * We're not going to create an aggregation buffer 13600 * because we don't have any ECBs that contain 13601 * aggregations -- set this option to 0. 13602 */ 13603 opt[DTRACEOPT_AGGSIZE] = 0; 13604 } else { 13605 /* 13606 * If we have an aggregation buffer, we must also have 13607 * a buffer to use as scratch. 13608 */ 13609 if (opt[DTRACEOPT_BUFSIZE] == DTRACEOPT_UNSET || 13610 opt[DTRACEOPT_BUFSIZE] < state->dts_needed) { 13611 opt[DTRACEOPT_BUFSIZE] = state->dts_needed; 13612 } 13613 } 13614 } 13615 13616 if (opt[DTRACEOPT_SPECSIZE] != DTRACEOPT_UNSET && 13617 opt[DTRACEOPT_SPECSIZE] != 0) { 13618 if (!state->dts_speculates) { 13619 /* 13620 * We're not going to create speculation buffers 13621 * because we don't have any ECBs that actually 13622 * speculate -- set the speculation size to 0. 13623 */ 13624 opt[DTRACEOPT_SPECSIZE] = 0; 13625 } 13626 } 13627 13628 /* 13629 * The bare minimum size for any buffer that we're actually going to 13630 * do anything to is sizeof (uint64_t). 13631 */ 13632 sz = sizeof (uint64_t); 13633 13634 if ((state->dts_needed != 0 && opt[DTRACEOPT_BUFSIZE] < sz) || 13635 (state->dts_speculates && opt[DTRACEOPT_SPECSIZE] < sz) || 13636 (state->dts_aggregations != NULL && opt[DTRACEOPT_AGGSIZE] < sz)) { 13637 /* 13638 * A buffer size has been explicitly set to 0 (or to a size 13639 * that will be adjusted to 0) and we need the space -- we 13640 * need to return failure. We return ENOSPC to differentiate 13641 * it from failing to allocate a buffer due to failure to meet 13642 * the reserve (for which we return E2BIG). 13643 */ 13644 rval = ENOSPC; 13645 goto out; 13646 } 13647 13648 if ((rval = dtrace_state_buffers(state)) != 0) 13649 goto err; 13650 13651 if ((sz = opt[DTRACEOPT_DYNVARSIZE]) == DTRACEOPT_UNSET) 13652 sz = dtrace_dstate_defsize; 13653 13654 do { 13655 rval = dtrace_dstate_init(&state->dts_vstate.dtvs_dynvars, sz); 13656 13657 if (rval == 0) 13658 break; 13659 13660 if (opt[DTRACEOPT_BUFRESIZE] == DTRACEOPT_BUFRESIZE_MANUAL) 13661 goto err; 13662 } while (sz >>= 1); 13663 13664 opt[DTRACEOPT_DYNVARSIZE] = sz; 13665 13666 if (rval != 0) 13667 goto err; 13668 13669 if (opt[DTRACEOPT_STATUSRATE] > dtrace_statusrate_max) 13670 opt[DTRACEOPT_STATUSRATE] = dtrace_statusrate_max; 13671 13672 if (opt[DTRACEOPT_CLEANRATE] == 0) 13673 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13674 13675 if (opt[DTRACEOPT_CLEANRATE] < dtrace_cleanrate_min) 13676 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_min; 13677 13678 if (opt[DTRACEOPT_CLEANRATE] > dtrace_cleanrate_max) 13679 opt[DTRACEOPT_CLEANRATE] = dtrace_cleanrate_max; 13680 13681 state->dts_alive = state->dts_laststatus = dtrace_gethrtime(); 13682 #if defined(sun) 13683 hdlr.cyh_func = (cyc_func_t)dtrace_state_clean; 13684 hdlr.cyh_arg = state; 13685 hdlr.cyh_level = CY_LOW_LEVEL; 13686 13687 when.cyt_when = 0; 13688 when.cyt_interval = opt[DTRACEOPT_CLEANRATE]; 13689 13690 state->dts_cleaner = cyclic_add(&hdlr, &when); 13691 13692 hdlr.cyh_func = (cyc_func_t)dtrace_state_deadman; 13693 hdlr.cyh_arg = state; 13694 hdlr.cyh_level = CY_LOW_LEVEL; 13695 13696 when.cyt_when = 0; 13697 when.cyt_interval = dtrace_deadman_interval; 13698 13699 state->dts_deadman = cyclic_add(&hdlr, &when); 13700 #else 13701 callout_reset(&state->dts_cleaner, hz * opt[DTRACEOPT_CLEANRATE] / NANOSEC, 13702 dtrace_state_clean, state); 13703 callout_reset(&state->dts_deadman, hz * dtrace_deadman_interval / NANOSEC, 13704 dtrace_state_deadman, state); 13705 #endif 13706 13707 state->dts_activity = DTRACE_ACTIVITY_WARMUP; 13708 13709 /* 13710 * Now it's time to actually fire the BEGIN probe. We need to disable 13711 * interrupts here both to record the CPU on which we fired the BEGIN 13712 * probe (the data from this CPU will be processed first at user 13713 * level) and to manually activate the buffer for this CPU. 13714 */ 13715 cookie = dtrace_interrupt_disable(); 13716 *cpu = curcpu; 13717 ASSERT(state->dts_buffer[*cpu].dtb_flags & DTRACEBUF_INACTIVE); 13718 state->dts_buffer[*cpu].dtb_flags &= ~DTRACEBUF_INACTIVE; 13719 13720 dtrace_probe(dtrace_probeid_begin, 13721 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13722 dtrace_interrupt_enable(cookie); 13723 /* 13724 * We may have had an exit action from a BEGIN probe; only change our 13725 * state to ACTIVE if we're still in WARMUP. 13726 */ 13727 ASSERT(state->dts_activity == DTRACE_ACTIVITY_WARMUP || 13728 state->dts_activity == DTRACE_ACTIVITY_DRAINING); 13729 13730 if (state->dts_activity == DTRACE_ACTIVITY_WARMUP) 13731 state->dts_activity = DTRACE_ACTIVITY_ACTIVE; 13732 13733 /* 13734 * Regardless of whether or not now we're in ACTIVE or DRAINING, we 13735 * want each CPU to transition its principal buffer out of the 13736 * INACTIVE state. Doing this assures that no CPU will suddenly begin 13737 * processing an ECB halfway down a probe's ECB chain; all CPUs will 13738 * atomically transition from processing none of a state's ECBs to 13739 * processing all of them. 13740 */ 13741 dtrace_xcall(DTRACE_CPUALL, 13742 (dtrace_xcall_t)dtrace_buffer_activate, state); 13743 goto out; 13744 13745 err: 13746 dtrace_buffer_free(state->dts_buffer); 13747 dtrace_buffer_free(state->dts_aggbuffer); 13748 13749 if ((nspec = state->dts_nspeculations) == 0) { 13750 ASSERT(state->dts_speculations == NULL); 13751 goto out; 13752 } 13753 13754 spec = state->dts_speculations; 13755 ASSERT(spec != NULL); 13756 13757 for (i = 0; i < state->dts_nspeculations; i++) { 13758 if ((buf = spec[i].dtsp_buffer) == NULL) 13759 break; 13760 13761 dtrace_buffer_free(buf); 13762 kmem_free(buf, bufsize); 13763 } 13764 13765 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 13766 state->dts_nspeculations = 0; 13767 state->dts_speculations = NULL; 13768 13769 out: 13770 mutex_exit(&dtrace_lock); 13771 mutex_exit(&cpu_lock); 13772 13773 return (rval); 13774 } 13775 13776 static int 13777 dtrace_state_stop(dtrace_state_t *state, processorid_t *cpu) 13778 { 13779 dtrace_icookie_t cookie; 13780 13781 ASSERT(MUTEX_HELD(&dtrace_lock)); 13782 13783 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE && 13784 state->dts_activity != DTRACE_ACTIVITY_DRAINING) 13785 return (EINVAL); 13786 13787 /* 13788 * We'll set the activity to DTRACE_ACTIVITY_DRAINING, and issue a sync 13789 * to be sure that every CPU has seen it. See below for the details 13790 * on why this is done. 13791 */ 13792 state->dts_activity = DTRACE_ACTIVITY_DRAINING; 13793 dtrace_sync(); 13794 13795 /* 13796 * By this point, it is impossible for any CPU to be still processing 13797 * with DTRACE_ACTIVITY_ACTIVE. We can thus set our activity to 13798 * DTRACE_ACTIVITY_COOLDOWN and know that we're not racing with any 13799 * other CPU in dtrace_buffer_reserve(). This allows dtrace_probe() 13800 * and callees to know that the activity is DTRACE_ACTIVITY_COOLDOWN 13801 * iff we're in the END probe. 13802 */ 13803 state->dts_activity = DTRACE_ACTIVITY_COOLDOWN; 13804 dtrace_sync(); 13805 ASSERT(state->dts_activity == DTRACE_ACTIVITY_COOLDOWN); 13806 13807 /* 13808 * Finally, we can release the reserve and call the END probe. We 13809 * disable interrupts across calling the END probe to allow us to 13810 * return the CPU on which we actually called the END probe. This 13811 * allows user-land to be sure that this CPU's principal buffer is 13812 * processed last. 13813 */ 13814 state->dts_reserve = 0; 13815 13816 cookie = dtrace_interrupt_disable(); 13817 *cpu = curcpu; 13818 dtrace_probe(dtrace_probeid_end, 13819 (uint64_t)(uintptr_t)state, 0, 0, 0, 0); 13820 dtrace_interrupt_enable(cookie); 13821 13822 state->dts_activity = DTRACE_ACTIVITY_STOPPED; 13823 dtrace_sync(); 13824 13825 return (0); 13826 } 13827 13828 static int 13829 dtrace_state_option(dtrace_state_t *state, dtrace_optid_t option, 13830 dtrace_optval_t val) 13831 { 13832 ASSERT(MUTEX_HELD(&dtrace_lock)); 13833 13834 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) 13835 return (EBUSY); 13836 13837 if (option >= DTRACEOPT_MAX) 13838 return (EINVAL); 13839 13840 if (option != DTRACEOPT_CPU && val < 0) 13841 return (EINVAL); 13842 13843 switch (option) { 13844 case DTRACEOPT_DESTRUCTIVE: 13845 if (dtrace_destructive_disallow) 13846 return (EACCES); 13847 13848 state->dts_cred.dcr_destructive = 1; 13849 break; 13850 13851 case DTRACEOPT_BUFSIZE: 13852 case DTRACEOPT_DYNVARSIZE: 13853 case DTRACEOPT_AGGSIZE: 13854 case DTRACEOPT_SPECSIZE: 13855 case DTRACEOPT_STRSIZE: 13856 if (val < 0) 13857 return (EINVAL); 13858 13859 if (val >= LONG_MAX) { 13860 /* 13861 * If this is an otherwise negative value, set it to 13862 * the highest multiple of 128m less than LONG_MAX. 13863 * Technically, we're adjusting the size without 13864 * regard to the buffer resizing policy, but in fact, 13865 * this has no effect -- if we set the buffer size to 13866 * ~LONG_MAX and the buffer policy is ultimately set to 13867 * be "manual", the buffer allocation is guaranteed to 13868 * fail, if only because the allocation requires two 13869 * buffers. (We set the the size to the highest 13870 * multiple of 128m because it ensures that the size 13871 * will remain a multiple of a megabyte when 13872 * repeatedly halved -- all the way down to 15m.) 13873 */ 13874 val = LONG_MAX - (1 << 27) + 1; 13875 } 13876 } 13877 13878 state->dts_options[option] = val; 13879 13880 return (0); 13881 } 13882 13883 static void 13884 dtrace_state_destroy(dtrace_state_t *state) 13885 { 13886 dtrace_ecb_t *ecb; 13887 dtrace_vstate_t *vstate = &state->dts_vstate; 13888 #if defined(sun) 13889 minor_t minor = getminor(state->dts_dev); 13890 #endif 13891 int i, bufsize = NCPU * sizeof (dtrace_buffer_t); 13892 dtrace_speculation_t *spec = state->dts_speculations; 13893 int nspec = state->dts_nspeculations; 13894 uint32_t match; 13895 13896 ASSERT(MUTEX_HELD(&dtrace_lock)); 13897 ASSERT(MUTEX_HELD(&cpu_lock)); 13898 13899 /* 13900 * First, retract any retained enablings for this state. 13901 */ 13902 dtrace_enabling_retract(state); 13903 ASSERT(state->dts_nretained == 0); 13904 13905 if (state->dts_activity == DTRACE_ACTIVITY_ACTIVE || 13906 state->dts_activity == DTRACE_ACTIVITY_DRAINING) { 13907 /* 13908 * We have managed to come into dtrace_state_destroy() on a 13909 * hot enabling -- almost certainly because of a disorderly 13910 * shutdown of a consumer. (That is, a consumer that is 13911 * exiting without having called dtrace_stop().) In this case, 13912 * we're going to set our activity to be KILLED, and then 13913 * issue a sync to be sure that everyone is out of probe 13914 * context before we start blowing away ECBs. 13915 */ 13916 state->dts_activity = DTRACE_ACTIVITY_KILLED; 13917 dtrace_sync(); 13918 } 13919 13920 /* 13921 * Release the credential hold we took in dtrace_state_create(). 13922 */ 13923 if (state->dts_cred.dcr_cred != NULL) 13924 crfree(state->dts_cred.dcr_cred); 13925 13926 /* 13927 * Now we can safely disable and destroy any enabled probes. Because 13928 * any DTRACE_PRIV_KERNEL probes may actually be slowing our progress 13929 * (especially if they're all enabled), we take two passes through the 13930 * ECBs: in the first, we disable just DTRACE_PRIV_KERNEL probes, and 13931 * in the second we disable whatever is left over. 13932 */ 13933 for (match = DTRACE_PRIV_KERNEL; ; match = 0) { 13934 for (i = 0; i < state->dts_necbs; i++) { 13935 if ((ecb = state->dts_ecbs[i]) == NULL) 13936 continue; 13937 13938 if (match && ecb->dte_probe != NULL) { 13939 dtrace_probe_t *probe = ecb->dte_probe; 13940 dtrace_provider_t *prov = probe->dtpr_provider; 13941 13942 if (!(prov->dtpv_priv.dtpp_flags & match)) 13943 continue; 13944 } 13945 13946 dtrace_ecb_disable(ecb); 13947 dtrace_ecb_destroy(ecb); 13948 } 13949 13950 if (!match) 13951 break; 13952 } 13953 13954 /* 13955 * Before we free the buffers, perform one more sync to assure that 13956 * every CPU is out of probe context. 13957 */ 13958 dtrace_sync(); 13959 13960 dtrace_buffer_free(state->dts_buffer); 13961 dtrace_buffer_free(state->dts_aggbuffer); 13962 13963 for (i = 0; i < nspec; i++) 13964 dtrace_buffer_free(spec[i].dtsp_buffer); 13965 13966 #if defined(sun) 13967 if (state->dts_cleaner != CYCLIC_NONE) 13968 cyclic_remove(state->dts_cleaner); 13969 13970 if (state->dts_deadman != CYCLIC_NONE) 13971 cyclic_remove(state->dts_deadman); 13972 #else 13973 callout_stop(&state->dts_cleaner); 13974 callout_drain(&state->dts_cleaner); 13975 callout_stop(&state->dts_deadman); 13976 callout_drain(&state->dts_deadman); 13977 #endif 13978 13979 dtrace_dstate_fini(&vstate->dtvs_dynvars); 13980 dtrace_vstate_fini(vstate); 13981 if (state->dts_ecbs != NULL) 13982 kmem_free(state->dts_ecbs, state->dts_necbs * sizeof (dtrace_ecb_t *)); 13983 13984 if (state->dts_aggregations != NULL) { 13985 #ifdef DEBUG 13986 for (i = 0; i < state->dts_naggregations; i++) 13987 ASSERT(state->dts_aggregations[i] == NULL); 13988 #endif 13989 ASSERT(state->dts_naggregations > 0); 13990 kmem_free(state->dts_aggregations, 13991 state->dts_naggregations * sizeof (dtrace_aggregation_t *)); 13992 } 13993 13994 kmem_free(state->dts_buffer, bufsize); 13995 kmem_free(state->dts_aggbuffer, bufsize); 13996 13997 for (i = 0; i < nspec; i++) 13998 kmem_free(spec[i].dtsp_buffer, bufsize); 13999 14000 if (spec != NULL) 14001 kmem_free(spec, nspec * sizeof (dtrace_speculation_t)); 14002 14003 dtrace_format_destroy(state); 14004 14005 if (state->dts_aggid_arena != NULL) { 14006 #if defined(sun) 14007 vmem_destroy(state->dts_aggid_arena); 14008 #else 14009 delete_unrhdr(state->dts_aggid_arena); 14010 #endif 14011 state->dts_aggid_arena = NULL; 14012 } 14013 #if defined(sun) 14014 ddi_soft_state_free(dtrace_softstate, minor); 14015 vmem_free(dtrace_minor, (void *)(uintptr_t)minor, 1); 14016 #endif 14017 } 14018 14019 /* 14020 * DTrace Anonymous Enabling Functions 14021 */ 14022 static dtrace_state_t * 14023 dtrace_anon_grab(void) 14024 { 14025 dtrace_state_t *state; 14026 14027 ASSERT(MUTEX_HELD(&dtrace_lock)); 14028 14029 if ((state = dtrace_anon.dta_state) == NULL) { 14030 ASSERT(dtrace_anon.dta_enabling == NULL); 14031 return (NULL); 14032 } 14033 14034 ASSERT(dtrace_anon.dta_enabling != NULL); 14035 ASSERT(dtrace_retained != NULL); 14036 14037 dtrace_enabling_destroy(dtrace_anon.dta_enabling); 14038 dtrace_anon.dta_enabling = NULL; 14039 dtrace_anon.dta_state = NULL; 14040 14041 return (state); 14042 } 14043 14044 static void 14045 dtrace_anon_property(void) 14046 { 14047 int i, rv; 14048 dtrace_state_t *state; 14049 dof_hdr_t *dof; 14050 char c[32]; /* enough for "dof-data-" + digits */ 14051 14052 ASSERT(MUTEX_HELD(&dtrace_lock)); 14053 ASSERT(MUTEX_HELD(&cpu_lock)); 14054 14055 for (i = 0; ; i++) { 14056 (void) snprintf(c, sizeof (c), "dof-data-%d", i); 14057 14058 dtrace_err_verbose = 1; 14059 14060 if ((dof = dtrace_dof_property(c)) == NULL) { 14061 dtrace_err_verbose = 0; 14062 break; 14063 } 14064 14065 #if defined(sun) 14066 /* 14067 * We want to create anonymous state, so we need to transition 14068 * the kernel debugger to indicate that DTrace is active. If 14069 * this fails (e.g. because the debugger has modified text in 14070 * some way), we won't continue with the processing. 14071 */ 14072 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 14073 cmn_err(CE_NOTE, "kernel debugger active; anonymous " 14074 "enabling ignored."); 14075 dtrace_dof_destroy(dof); 14076 break; 14077 } 14078 #endif 14079 14080 /* 14081 * If we haven't allocated an anonymous state, we'll do so now. 14082 */ 14083 if ((state = dtrace_anon.dta_state) == NULL) { 14084 #if defined(sun) 14085 state = dtrace_state_create(NULL, NULL); 14086 #else 14087 state = dtrace_state_create(NULL); 14088 #endif 14089 dtrace_anon.dta_state = state; 14090 14091 if (state == NULL) { 14092 /* 14093 * This basically shouldn't happen: the only 14094 * failure mode from dtrace_state_create() is a 14095 * failure of ddi_soft_state_zalloc() that 14096 * itself should never happen. Still, the 14097 * interface allows for a failure mode, and 14098 * we want to fail as gracefully as possible: 14099 * we'll emit an error message and cease 14100 * processing anonymous state in this case. 14101 */ 14102 cmn_err(CE_WARN, "failed to create " 14103 "anonymous state"); 14104 dtrace_dof_destroy(dof); 14105 break; 14106 } 14107 } 14108 14109 rv = dtrace_dof_slurp(dof, &state->dts_vstate, CRED(), 14110 &dtrace_anon.dta_enabling, 0, B_TRUE); 14111 14112 if (rv == 0) 14113 rv = dtrace_dof_options(dof, state); 14114 14115 dtrace_err_verbose = 0; 14116 dtrace_dof_destroy(dof); 14117 14118 if (rv != 0) { 14119 /* 14120 * This is malformed DOF; chuck any anonymous state 14121 * that we created. 14122 */ 14123 ASSERT(dtrace_anon.dta_enabling == NULL); 14124 dtrace_state_destroy(state); 14125 dtrace_anon.dta_state = NULL; 14126 break; 14127 } 14128 14129 ASSERT(dtrace_anon.dta_enabling != NULL); 14130 } 14131 14132 if (dtrace_anon.dta_enabling != NULL) { 14133 int rval; 14134 14135 /* 14136 * dtrace_enabling_retain() can only fail because we are 14137 * trying to retain more enablings than are allowed -- but 14138 * we only have one anonymous enabling, and we are guaranteed 14139 * to be allowed at least one retained enabling; we assert 14140 * that dtrace_enabling_retain() returns success. 14141 */ 14142 rval = dtrace_enabling_retain(dtrace_anon.dta_enabling); 14143 ASSERT(rval == 0); 14144 14145 dtrace_enabling_dump(dtrace_anon.dta_enabling); 14146 } 14147 } 14148 14149 /* 14150 * DTrace Helper Functions 14151 */ 14152 static void 14153 dtrace_helper_trace(dtrace_helper_action_t *helper, 14154 dtrace_mstate_t *mstate, dtrace_vstate_t *vstate, int where) 14155 { 14156 uint32_t size, next, nnext, i; 14157 dtrace_helptrace_t *ent; 14158 uint16_t flags = cpu_core[curcpu].cpuc_dtrace_flags; 14159 14160 if (!dtrace_helptrace_enabled) 14161 return; 14162 14163 ASSERT(vstate->dtvs_nlocals <= dtrace_helptrace_nlocals); 14164 14165 /* 14166 * What would a tracing framework be without its own tracing 14167 * framework? (Well, a hell of a lot simpler, for starters...) 14168 */ 14169 size = sizeof (dtrace_helptrace_t) + dtrace_helptrace_nlocals * 14170 sizeof (uint64_t) - sizeof (uint64_t); 14171 14172 /* 14173 * Iterate until we can allocate a slot in the trace buffer. 14174 */ 14175 do { 14176 next = dtrace_helptrace_next; 14177 14178 if (next + size < dtrace_helptrace_bufsize) { 14179 nnext = next + size; 14180 } else { 14181 nnext = size; 14182 } 14183 } while (dtrace_cas32(&dtrace_helptrace_next, next, nnext) != next); 14184 14185 /* 14186 * We have our slot; fill it in. 14187 */ 14188 if (nnext == size) 14189 next = 0; 14190 14191 ent = (dtrace_helptrace_t *)&dtrace_helptrace_buffer[next]; 14192 ent->dtht_helper = helper; 14193 ent->dtht_where = where; 14194 ent->dtht_nlocals = vstate->dtvs_nlocals; 14195 14196 ent->dtht_fltoffs = (mstate->dtms_present & DTRACE_MSTATE_FLTOFFS) ? 14197 mstate->dtms_fltoffs : -1; 14198 ent->dtht_fault = DTRACE_FLAGS2FLT(flags); 14199 ent->dtht_illval = cpu_core[curcpu].cpuc_dtrace_illval; 14200 14201 for (i = 0; i < vstate->dtvs_nlocals; i++) { 14202 dtrace_statvar_t *svar; 14203 14204 if ((svar = vstate->dtvs_locals[i]) == NULL) 14205 continue; 14206 14207 ASSERT(svar->dtsv_size >= NCPU * sizeof (uint64_t)); 14208 ent->dtht_locals[i] = 14209 ((uint64_t *)(uintptr_t)svar->dtsv_data)[curcpu]; 14210 } 14211 } 14212 14213 static uint64_t 14214 dtrace_helper(int which, dtrace_mstate_t *mstate, 14215 dtrace_state_t *state, uint64_t arg0, uint64_t arg1) 14216 { 14217 uint16_t *flags = &cpu_core[curcpu].cpuc_dtrace_flags; 14218 uint64_t sarg0 = mstate->dtms_arg[0]; 14219 uint64_t sarg1 = mstate->dtms_arg[1]; 14220 uint64_t rval = 0; 14221 dtrace_helpers_t *helpers = curproc->p_dtrace_helpers; 14222 dtrace_helper_action_t *helper; 14223 dtrace_vstate_t *vstate; 14224 dtrace_difo_t *pred; 14225 int i, trace = dtrace_helptrace_enabled; 14226 14227 ASSERT(which >= 0 && which < DTRACE_NHELPER_ACTIONS); 14228 14229 if (helpers == NULL) 14230 return (0); 14231 14232 if ((helper = helpers->dthps_actions[which]) == NULL) 14233 return (0); 14234 14235 vstate = &helpers->dthps_vstate; 14236 mstate->dtms_arg[0] = arg0; 14237 mstate->dtms_arg[1] = arg1; 14238 14239 /* 14240 * Now iterate over each helper. If its predicate evaluates to 'true', 14241 * we'll call the corresponding actions. Note that the below calls 14242 * to dtrace_dif_emulate() may set faults in machine state. This is 14243 * okay: our caller (the outer dtrace_dif_emulate()) will simply plow 14244 * the stored DIF offset with its own (which is the desired behavior). 14245 * Also, note the calls to dtrace_dif_emulate() may allocate scratch 14246 * from machine state; this is okay, too. 14247 */ 14248 for (; helper != NULL; helper = helper->dtha_next) { 14249 if ((pred = helper->dtha_predicate) != NULL) { 14250 if (trace) 14251 dtrace_helper_trace(helper, mstate, vstate, 0); 14252 14253 if (!dtrace_dif_emulate(pred, mstate, vstate, state)) 14254 goto next; 14255 14256 if (*flags & CPU_DTRACE_FAULT) 14257 goto err; 14258 } 14259 14260 for (i = 0; i < helper->dtha_nactions; i++) { 14261 if (trace) 14262 dtrace_helper_trace(helper, 14263 mstate, vstate, i + 1); 14264 14265 rval = dtrace_dif_emulate(helper->dtha_actions[i], 14266 mstate, vstate, state); 14267 14268 if (*flags & CPU_DTRACE_FAULT) 14269 goto err; 14270 } 14271 14272 next: 14273 if (trace) 14274 dtrace_helper_trace(helper, mstate, vstate, 14275 DTRACE_HELPTRACE_NEXT); 14276 } 14277 14278 if (trace) 14279 dtrace_helper_trace(helper, mstate, vstate, 14280 DTRACE_HELPTRACE_DONE); 14281 14282 /* 14283 * Restore the arg0 that we saved upon entry. 14284 */ 14285 mstate->dtms_arg[0] = sarg0; 14286 mstate->dtms_arg[1] = sarg1; 14287 14288 return (rval); 14289 14290 err: 14291 if (trace) 14292 dtrace_helper_trace(helper, mstate, vstate, 14293 DTRACE_HELPTRACE_ERR); 14294 14295 /* 14296 * Restore the arg0 that we saved upon entry. 14297 */ 14298 mstate->dtms_arg[0] = sarg0; 14299 mstate->dtms_arg[1] = sarg1; 14300 14301 return (0); 14302 } 14303 14304 static void 14305 dtrace_helper_action_destroy(dtrace_helper_action_t *helper, 14306 dtrace_vstate_t *vstate) 14307 { 14308 int i; 14309 14310 if (helper->dtha_predicate != NULL) 14311 dtrace_difo_release(helper->dtha_predicate, vstate); 14312 14313 for (i = 0; i < helper->dtha_nactions; i++) { 14314 ASSERT(helper->dtha_actions[i] != NULL); 14315 dtrace_difo_release(helper->dtha_actions[i], vstate); 14316 } 14317 14318 kmem_free(helper->dtha_actions, 14319 helper->dtha_nactions * sizeof (dtrace_difo_t *)); 14320 kmem_free(helper, sizeof (dtrace_helper_action_t)); 14321 } 14322 14323 static int 14324 dtrace_helper_destroygen(int gen) 14325 { 14326 proc_t *p = curproc; 14327 dtrace_helpers_t *help = p->p_dtrace_helpers; 14328 dtrace_vstate_t *vstate; 14329 int i; 14330 14331 ASSERT(MUTEX_HELD(&dtrace_lock)); 14332 14333 if (help == NULL || gen > help->dthps_generation) 14334 return (EINVAL); 14335 14336 vstate = &help->dthps_vstate; 14337 14338 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 14339 dtrace_helper_action_t *last = NULL, *h, *next; 14340 14341 for (h = help->dthps_actions[i]; h != NULL; h = next) { 14342 next = h->dtha_next; 14343 14344 if (h->dtha_generation == gen) { 14345 if (last != NULL) { 14346 last->dtha_next = next; 14347 } else { 14348 help->dthps_actions[i] = next; 14349 } 14350 14351 dtrace_helper_action_destroy(h, vstate); 14352 } else { 14353 last = h; 14354 } 14355 } 14356 } 14357 14358 /* 14359 * Interate until we've cleared out all helper providers with the 14360 * given generation number. 14361 */ 14362 for (;;) { 14363 dtrace_helper_provider_t *prov; 14364 14365 /* 14366 * Look for a helper provider with the right generation. We 14367 * have to start back at the beginning of the list each time 14368 * because we drop dtrace_lock. It's unlikely that we'll make 14369 * more than two passes. 14370 */ 14371 for (i = 0; i < help->dthps_nprovs; i++) { 14372 prov = help->dthps_provs[i]; 14373 14374 if (prov->dthp_generation == gen) 14375 break; 14376 } 14377 14378 /* 14379 * If there were no matches, we're done. 14380 */ 14381 if (i == help->dthps_nprovs) 14382 break; 14383 14384 /* 14385 * Move the last helper provider into this slot. 14386 */ 14387 help->dthps_nprovs--; 14388 help->dthps_provs[i] = help->dthps_provs[help->dthps_nprovs]; 14389 help->dthps_provs[help->dthps_nprovs] = NULL; 14390 14391 mutex_exit(&dtrace_lock); 14392 14393 /* 14394 * If we have a meta provider, remove this helper provider. 14395 */ 14396 mutex_enter(&dtrace_meta_lock); 14397 if (dtrace_meta_pid != NULL) { 14398 ASSERT(dtrace_deferred_pid == NULL); 14399 dtrace_helper_provider_remove(&prov->dthp_prov, 14400 p->p_pid); 14401 } 14402 mutex_exit(&dtrace_meta_lock); 14403 14404 dtrace_helper_provider_destroy(prov); 14405 14406 mutex_enter(&dtrace_lock); 14407 } 14408 14409 return (0); 14410 } 14411 14412 static int 14413 dtrace_helper_validate(dtrace_helper_action_t *helper) 14414 { 14415 int err = 0, i; 14416 dtrace_difo_t *dp; 14417 14418 if ((dp = helper->dtha_predicate) != NULL) 14419 err += dtrace_difo_validate_helper(dp); 14420 14421 for (i = 0; i < helper->dtha_nactions; i++) 14422 err += dtrace_difo_validate_helper(helper->dtha_actions[i]); 14423 14424 return (err == 0); 14425 } 14426 14427 static int 14428 dtrace_helper_action_add(int which, dtrace_ecbdesc_t *ep) 14429 { 14430 dtrace_helpers_t *help; 14431 dtrace_helper_action_t *helper, *last; 14432 dtrace_actdesc_t *act; 14433 dtrace_vstate_t *vstate; 14434 dtrace_predicate_t *pred; 14435 int count = 0, nactions = 0, i; 14436 14437 if (which < 0 || which >= DTRACE_NHELPER_ACTIONS) 14438 return (EINVAL); 14439 14440 help = curproc->p_dtrace_helpers; 14441 last = help->dthps_actions[which]; 14442 vstate = &help->dthps_vstate; 14443 14444 for (count = 0; last != NULL; last = last->dtha_next) { 14445 count++; 14446 if (last->dtha_next == NULL) 14447 break; 14448 } 14449 14450 /* 14451 * If we already have dtrace_helper_actions_max helper actions for this 14452 * helper action type, we'll refuse to add a new one. 14453 */ 14454 if (count >= dtrace_helper_actions_max) 14455 return (ENOSPC); 14456 14457 helper = kmem_zalloc(sizeof (dtrace_helper_action_t), KM_SLEEP); 14458 helper->dtha_generation = help->dthps_generation; 14459 14460 if ((pred = ep->dted_pred.dtpdd_predicate) != NULL) { 14461 ASSERT(pred->dtp_difo != NULL); 14462 dtrace_difo_hold(pred->dtp_difo); 14463 helper->dtha_predicate = pred->dtp_difo; 14464 } 14465 14466 for (act = ep->dted_action; act != NULL; act = act->dtad_next) { 14467 if (act->dtad_kind != DTRACEACT_DIFEXPR) 14468 goto err; 14469 14470 if (act->dtad_difo == NULL) 14471 goto err; 14472 14473 nactions++; 14474 } 14475 14476 helper->dtha_actions = kmem_zalloc(sizeof (dtrace_difo_t *) * 14477 (helper->dtha_nactions = nactions), KM_SLEEP); 14478 14479 for (act = ep->dted_action, i = 0; act != NULL; act = act->dtad_next) { 14480 dtrace_difo_hold(act->dtad_difo); 14481 helper->dtha_actions[i++] = act->dtad_difo; 14482 } 14483 14484 if (!dtrace_helper_validate(helper)) 14485 goto err; 14486 14487 if (last == NULL) { 14488 help->dthps_actions[which] = helper; 14489 } else { 14490 last->dtha_next = helper; 14491 } 14492 14493 if (vstate->dtvs_nlocals > dtrace_helptrace_nlocals) { 14494 dtrace_helptrace_nlocals = vstate->dtvs_nlocals; 14495 dtrace_helptrace_next = 0; 14496 } 14497 14498 return (0); 14499 err: 14500 dtrace_helper_action_destroy(helper, vstate); 14501 return (EINVAL); 14502 } 14503 14504 static void 14505 dtrace_helper_provider_register(proc_t *p, dtrace_helpers_t *help, 14506 dof_helper_t *dofhp) 14507 { 14508 ASSERT(MUTEX_NOT_HELD(&dtrace_lock)); 14509 14510 mutex_enter(&dtrace_meta_lock); 14511 mutex_enter(&dtrace_lock); 14512 14513 if (!dtrace_attached() || dtrace_meta_pid == NULL) { 14514 /* 14515 * If the dtrace module is loaded but not attached, or if 14516 * there aren't isn't a meta provider registered to deal with 14517 * these provider descriptions, we need to postpone creating 14518 * the actual providers until later. 14519 */ 14520 14521 if (help->dthps_next == NULL && help->dthps_prev == NULL && 14522 dtrace_deferred_pid != help) { 14523 help->dthps_deferred = 1; 14524 help->dthps_pid = p->p_pid; 14525 help->dthps_next = dtrace_deferred_pid; 14526 help->dthps_prev = NULL; 14527 if (dtrace_deferred_pid != NULL) 14528 dtrace_deferred_pid->dthps_prev = help; 14529 dtrace_deferred_pid = help; 14530 } 14531 14532 mutex_exit(&dtrace_lock); 14533 14534 } else if (dofhp != NULL) { 14535 /* 14536 * If the dtrace module is loaded and we have a particular 14537 * helper provider description, pass that off to the 14538 * meta provider. 14539 */ 14540 14541 mutex_exit(&dtrace_lock); 14542 14543 dtrace_helper_provide(dofhp, p->p_pid); 14544 14545 } else { 14546 /* 14547 * Otherwise, just pass all the helper provider descriptions 14548 * off to the meta provider. 14549 */ 14550 14551 int i; 14552 mutex_exit(&dtrace_lock); 14553 14554 for (i = 0; i < help->dthps_nprovs; i++) { 14555 dtrace_helper_provide(&help->dthps_provs[i]->dthp_prov, 14556 p->p_pid); 14557 } 14558 } 14559 14560 mutex_exit(&dtrace_meta_lock); 14561 } 14562 14563 static int 14564 dtrace_helper_provider_add(dof_helper_t *dofhp, int gen) 14565 { 14566 dtrace_helpers_t *help; 14567 dtrace_helper_provider_t *hprov, **tmp_provs; 14568 uint_t tmp_maxprovs, i; 14569 14570 ASSERT(MUTEX_HELD(&dtrace_lock)); 14571 14572 help = curproc->p_dtrace_helpers; 14573 ASSERT(help != NULL); 14574 14575 /* 14576 * If we already have dtrace_helper_providers_max helper providers, 14577 * we're refuse to add a new one. 14578 */ 14579 if (help->dthps_nprovs >= dtrace_helper_providers_max) 14580 return (ENOSPC); 14581 14582 /* 14583 * Check to make sure this isn't a duplicate. 14584 */ 14585 for (i = 0; i < help->dthps_nprovs; i++) { 14586 if (dofhp->dofhp_addr == 14587 help->dthps_provs[i]->dthp_prov.dofhp_addr) 14588 return (EALREADY); 14589 } 14590 14591 hprov = kmem_zalloc(sizeof (dtrace_helper_provider_t), KM_SLEEP); 14592 hprov->dthp_prov = *dofhp; 14593 hprov->dthp_ref = 1; 14594 hprov->dthp_generation = gen; 14595 14596 /* 14597 * Allocate a bigger table for helper providers if it's already full. 14598 */ 14599 if (help->dthps_maxprovs == help->dthps_nprovs) { 14600 tmp_maxprovs = help->dthps_maxprovs; 14601 tmp_provs = help->dthps_provs; 14602 14603 if (help->dthps_maxprovs == 0) 14604 help->dthps_maxprovs = 2; 14605 else 14606 help->dthps_maxprovs *= 2; 14607 if (help->dthps_maxprovs > dtrace_helper_providers_max) 14608 help->dthps_maxprovs = dtrace_helper_providers_max; 14609 14610 ASSERT(tmp_maxprovs < help->dthps_maxprovs); 14611 14612 help->dthps_provs = kmem_zalloc(help->dthps_maxprovs * 14613 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 14614 14615 if (tmp_provs != NULL) { 14616 bcopy(tmp_provs, help->dthps_provs, tmp_maxprovs * 14617 sizeof (dtrace_helper_provider_t *)); 14618 kmem_free(tmp_provs, tmp_maxprovs * 14619 sizeof (dtrace_helper_provider_t *)); 14620 } 14621 } 14622 14623 help->dthps_provs[help->dthps_nprovs] = hprov; 14624 help->dthps_nprovs++; 14625 14626 return (0); 14627 } 14628 14629 static void 14630 dtrace_helper_provider_destroy(dtrace_helper_provider_t *hprov) 14631 { 14632 mutex_enter(&dtrace_lock); 14633 14634 if (--hprov->dthp_ref == 0) { 14635 dof_hdr_t *dof; 14636 mutex_exit(&dtrace_lock); 14637 dof = (dof_hdr_t *)(uintptr_t)hprov->dthp_prov.dofhp_dof; 14638 dtrace_dof_destroy(dof); 14639 kmem_free(hprov, sizeof (dtrace_helper_provider_t)); 14640 } else { 14641 mutex_exit(&dtrace_lock); 14642 } 14643 } 14644 14645 static int 14646 dtrace_helper_provider_validate(dof_hdr_t *dof, dof_sec_t *sec) 14647 { 14648 uintptr_t daddr = (uintptr_t)dof; 14649 dof_sec_t *str_sec, *prb_sec, *arg_sec, *off_sec, *enoff_sec; 14650 dof_provider_t *provider; 14651 dof_probe_t *probe; 14652 uint8_t *arg; 14653 char *strtab, *typestr; 14654 dof_stridx_t typeidx; 14655 size_t typesz; 14656 uint_t nprobes, j, k; 14657 14658 ASSERT(sec->dofs_type == DOF_SECT_PROVIDER); 14659 14660 if (sec->dofs_offset & (sizeof (uint_t) - 1)) { 14661 dtrace_dof_error(dof, "misaligned section offset"); 14662 return (-1); 14663 } 14664 14665 /* 14666 * The section needs to be large enough to contain the DOF provider 14667 * structure appropriate for the given version. 14668 */ 14669 if (sec->dofs_size < 14670 ((dof->dofh_ident[DOF_ID_VERSION] == DOF_VERSION_1) ? 14671 offsetof(dof_provider_t, dofpv_prenoffs) : 14672 sizeof (dof_provider_t))) { 14673 dtrace_dof_error(dof, "provider section too small"); 14674 return (-1); 14675 } 14676 14677 provider = (dof_provider_t *)(uintptr_t)(daddr + sec->dofs_offset); 14678 str_sec = dtrace_dof_sect(dof, DOF_SECT_STRTAB, provider->dofpv_strtab); 14679 prb_sec = dtrace_dof_sect(dof, DOF_SECT_PROBES, provider->dofpv_probes); 14680 arg_sec = dtrace_dof_sect(dof, DOF_SECT_PRARGS, provider->dofpv_prargs); 14681 off_sec = dtrace_dof_sect(dof, DOF_SECT_PROFFS, provider->dofpv_proffs); 14682 14683 if (str_sec == NULL || prb_sec == NULL || 14684 arg_sec == NULL || off_sec == NULL) 14685 return (-1); 14686 14687 enoff_sec = NULL; 14688 14689 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1 && 14690 provider->dofpv_prenoffs != DOF_SECT_NONE && 14691 (enoff_sec = dtrace_dof_sect(dof, DOF_SECT_PRENOFFS, 14692 provider->dofpv_prenoffs)) == NULL) 14693 return (-1); 14694 14695 strtab = (char *)(uintptr_t)(daddr + str_sec->dofs_offset); 14696 14697 if (provider->dofpv_name >= str_sec->dofs_size || 14698 strlen(strtab + provider->dofpv_name) >= DTRACE_PROVNAMELEN) { 14699 dtrace_dof_error(dof, "invalid provider name"); 14700 return (-1); 14701 } 14702 14703 if (prb_sec->dofs_entsize == 0 || 14704 prb_sec->dofs_entsize > prb_sec->dofs_size) { 14705 dtrace_dof_error(dof, "invalid entry size"); 14706 return (-1); 14707 } 14708 14709 if (prb_sec->dofs_entsize & (sizeof (uintptr_t) - 1)) { 14710 dtrace_dof_error(dof, "misaligned entry size"); 14711 return (-1); 14712 } 14713 14714 if (off_sec->dofs_entsize != sizeof (uint32_t)) { 14715 dtrace_dof_error(dof, "invalid entry size"); 14716 return (-1); 14717 } 14718 14719 if (off_sec->dofs_offset & (sizeof (uint32_t) - 1)) { 14720 dtrace_dof_error(dof, "misaligned section offset"); 14721 return (-1); 14722 } 14723 14724 if (arg_sec->dofs_entsize != sizeof (uint8_t)) { 14725 dtrace_dof_error(dof, "invalid entry size"); 14726 return (-1); 14727 } 14728 14729 arg = (uint8_t *)(uintptr_t)(daddr + arg_sec->dofs_offset); 14730 14731 nprobes = prb_sec->dofs_size / prb_sec->dofs_entsize; 14732 14733 /* 14734 * Take a pass through the probes to check for errors. 14735 */ 14736 for (j = 0; j < nprobes; j++) { 14737 probe = (dof_probe_t *)(uintptr_t)(daddr + 14738 prb_sec->dofs_offset + j * prb_sec->dofs_entsize); 14739 14740 if (probe->dofpr_func >= str_sec->dofs_size) { 14741 dtrace_dof_error(dof, "invalid function name"); 14742 return (-1); 14743 } 14744 14745 if (strlen(strtab + probe->dofpr_func) >= DTRACE_FUNCNAMELEN) { 14746 dtrace_dof_error(dof, "function name too long"); 14747 return (-1); 14748 } 14749 14750 if (probe->dofpr_name >= str_sec->dofs_size || 14751 strlen(strtab + probe->dofpr_name) >= DTRACE_NAMELEN) { 14752 dtrace_dof_error(dof, "invalid probe name"); 14753 return (-1); 14754 } 14755 14756 /* 14757 * The offset count must not wrap the index, and the offsets 14758 * must also not overflow the section's data. 14759 */ 14760 if (probe->dofpr_offidx + probe->dofpr_noffs < 14761 probe->dofpr_offidx || 14762 (probe->dofpr_offidx + probe->dofpr_noffs) * 14763 off_sec->dofs_entsize > off_sec->dofs_size) { 14764 dtrace_dof_error(dof, "invalid probe offset"); 14765 return (-1); 14766 } 14767 14768 if (dof->dofh_ident[DOF_ID_VERSION] != DOF_VERSION_1) { 14769 /* 14770 * If there's no is-enabled offset section, make sure 14771 * there aren't any is-enabled offsets. Otherwise 14772 * perform the same checks as for probe offsets 14773 * (immediately above). 14774 */ 14775 if (enoff_sec == NULL) { 14776 if (probe->dofpr_enoffidx != 0 || 14777 probe->dofpr_nenoffs != 0) { 14778 dtrace_dof_error(dof, "is-enabled " 14779 "offsets with null section"); 14780 return (-1); 14781 } 14782 } else if (probe->dofpr_enoffidx + 14783 probe->dofpr_nenoffs < probe->dofpr_enoffidx || 14784 (probe->dofpr_enoffidx + probe->dofpr_nenoffs) * 14785 enoff_sec->dofs_entsize > enoff_sec->dofs_size) { 14786 dtrace_dof_error(dof, "invalid is-enabled " 14787 "offset"); 14788 return (-1); 14789 } 14790 14791 if (probe->dofpr_noffs + probe->dofpr_nenoffs == 0) { 14792 dtrace_dof_error(dof, "zero probe and " 14793 "is-enabled offsets"); 14794 return (-1); 14795 } 14796 } else if (probe->dofpr_noffs == 0) { 14797 dtrace_dof_error(dof, "zero probe offsets"); 14798 return (-1); 14799 } 14800 14801 if (probe->dofpr_argidx + probe->dofpr_xargc < 14802 probe->dofpr_argidx || 14803 (probe->dofpr_argidx + probe->dofpr_xargc) * 14804 arg_sec->dofs_entsize > arg_sec->dofs_size) { 14805 dtrace_dof_error(dof, "invalid args"); 14806 return (-1); 14807 } 14808 14809 typeidx = probe->dofpr_nargv; 14810 typestr = strtab + probe->dofpr_nargv; 14811 for (k = 0; k < probe->dofpr_nargc; k++) { 14812 if (typeidx >= str_sec->dofs_size) { 14813 dtrace_dof_error(dof, "bad " 14814 "native argument type"); 14815 return (-1); 14816 } 14817 14818 typesz = strlen(typestr) + 1; 14819 if (typesz > DTRACE_ARGTYPELEN) { 14820 dtrace_dof_error(dof, "native " 14821 "argument type too long"); 14822 return (-1); 14823 } 14824 typeidx += typesz; 14825 typestr += typesz; 14826 } 14827 14828 typeidx = probe->dofpr_xargv; 14829 typestr = strtab + probe->dofpr_xargv; 14830 for (k = 0; k < probe->dofpr_xargc; k++) { 14831 if (arg[probe->dofpr_argidx + k] > probe->dofpr_nargc) { 14832 dtrace_dof_error(dof, "bad " 14833 "native argument index"); 14834 return (-1); 14835 } 14836 14837 if (typeidx >= str_sec->dofs_size) { 14838 dtrace_dof_error(dof, "bad " 14839 "translated argument type"); 14840 return (-1); 14841 } 14842 14843 typesz = strlen(typestr) + 1; 14844 if (typesz > DTRACE_ARGTYPELEN) { 14845 dtrace_dof_error(dof, "translated argument " 14846 "type too long"); 14847 return (-1); 14848 } 14849 14850 typeidx += typesz; 14851 typestr += typesz; 14852 } 14853 } 14854 14855 return (0); 14856 } 14857 14858 static int 14859 dtrace_helper_slurp(dof_hdr_t *dof, dof_helper_t *dhp) 14860 { 14861 dtrace_helpers_t *help; 14862 dtrace_vstate_t *vstate; 14863 dtrace_enabling_t *enab = NULL; 14864 int i, gen, rv, nhelpers = 0, nprovs = 0, destroy = 1; 14865 uintptr_t daddr = (uintptr_t)dof; 14866 14867 ASSERT(MUTEX_HELD(&dtrace_lock)); 14868 14869 if ((help = curproc->p_dtrace_helpers) == NULL) 14870 help = dtrace_helpers_create(curproc); 14871 14872 vstate = &help->dthps_vstate; 14873 14874 if ((rv = dtrace_dof_slurp(dof, vstate, NULL, &enab, 14875 dhp != NULL ? dhp->dofhp_addr : 0, B_FALSE)) != 0) { 14876 dtrace_dof_destroy(dof); 14877 return (rv); 14878 } 14879 14880 /* 14881 * Look for helper providers and validate their descriptions. 14882 */ 14883 if (dhp != NULL) { 14884 for (i = 0; i < dof->dofh_secnum; i++) { 14885 dof_sec_t *sec = (dof_sec_t *)(uintptr_t)(daddr + 14886 dof->dofh_secoff + i * dof->dofh_secsize); 14887 14888 if (sec->dofs_type != DOF_SECT_PROVIDER) 14889 continue; 14890 14891 if (dtrace_helper_provider_validate(dof, sec) != 0) { 14892 dtrace_enabling_destroy(enab); 14893 dtrace_dof_destroy(dof); 14894 return (-1); 14895 } 14896 14897 nprovs++; 14898 } 14899 } 14900 14901 /* 14902 * Now we need to walk through the ECB descriptions in the enabling. 14903 */ 14904 for (i = 0; i < enab->dten_ndesc; i++) { 14905 dtrace_ecbdesc_t *ep = enab->dten_desc[i]; 14906 dtrace_probedesc_t *desc = &ep->dted_probe; 14907 14908 if (strcmp(desc->dtpd_provider, "dtrace") != 0) 14909 continue; 14910 14911 if (strcmp(desc->dtpd_mod, "helper") != 0) 14912 continue; 14913 14914 if (strcmp(desc->dtpd_func, "ustack") != 0) 14915 continue; 14916 14917 if ((rv = dtrace_helper_action_add(DTRACE_HELPER_ACTION_USTACK, 14918 ep)) != 0) { 14919 /* 14920 * Adding this helper action failed -- we are now going 14921 * to rip out the entire generation and return failure. 14922 */ 14923 (void) dtrace_helper_destroygen(help->dthps_generation); 14924 dtrace_enabling_destroy(enab); 14925 dtrace_dof_destroy(dof); 14926 return (-1); 14927 } 14928 14929 nhelpers++; 14930 } 14931 14932 if (nhelpers < enab->dten_ndesc) 14933 dtrace_dof_error(dof, "unmatched helpers"); 14934 14935 gen = help->dthps_generation++; 14936 dtrace_enabling_destroy(enab); 14937 14938 if (dhp != NULL && nprovs > 0) { 14939 dhp->dofhp_dof = (uint64_t)(uintptr_t)dof; 14940 if (dtrace_helper_provider_add(dhp, gen) == 0) { 14941 mutex_exit(&dtrace_lock); 14942 dtrace_helper_provider_register(curproc, help, dhp); 14943 mutex_enter(&dtrace_lock); 14944 14945 destroy = 0; 14946 } 14947 } 14948 14949 if (destroy) 14950 dtrace_dof_destroy(dof); 14951 14952 return (gen); 14953 } 14954 14955 static dtrace_helpers_t * 14956 dtrace_helpers_create(proc_t *p) 14957 { 14958 dtrace_helpers_t *help; 14959 14960 ASSERT(MUTEX_HELD(&dtrace_lock)); 14961 ASSERT(p->p_dtrace_helpers == NULL); 14962 14963 help = kmem_zalloc(sizeof (dtrace_helpers_t), KM_SLEEP); 14964 help->dthps_actions = kmem_zalloc(sizeof (dtrace_helper_action_t *) * 14965 DTRACE_NHELPER_ACTIONS, KM_SLEEP); 14966 14967 p->p_dtrace_helpers = help; 14968 dtrace_helpers++; 14969 14970 return (help); 14971 } 14972 14973 #if defined(sun) 14974 static 14975 #endif 14976 void 14977 dtrace_helpers_destroy(proc_t *p) 14978 { 14979 dtrace_helpers_t *help; 14980 dtrace_vstate_t *vstate; 14981 #if defined(sun) 14982 proc_t *p = curproc; 14983 #endif 14984 int i; 14985 14986 mutex_enter(&dtrace_lock); 14987 14988 ASSERT(p->p_dtrace_helpers != NULL); 14989 ASSERT(dtrace_helpers > 0); 14990 14991 help = p->p_dtrace_helpers; 14992 vstate = &help->dthps_vstate; 14993 14994 /* 14995 * We're now going to lose the help from this process. 14996 */ 14997 p->p_dtrace_helpers = NULL; 14998 dtrace_sync(); 14999 15000 /* 15001 * Destory the helper actions. 15002 */ 15003 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15004 dtrace_helper_action_t *h, *next; 15005 15006 for (h = help->dthps_actions[i]; h != NULL; h = next) { 15007 next = h->dtha_next; 15008 dtrace_helper_action_destroy(h, vstate); 15009 h = next; 15010 } 15011 } 15012 15013 mutex_exit(&dtrace_lock); 15014 15015 /* 15016 * Destroy the helper providers. 15017 */ 15018 if (help->dthps_maxprovs > 0) { 15019 mutex_enter(&dtrace_meta_lock); 15020 if (dtrace_meta_pid != NULL) { 15021 ASSERT(dtrace_deferred_pid == NULL); 15022 15023 for (i = 0; i < help->dthps_nprovs; i++) { 15024 dtrace_helper_provider_remove( 15025 &help->dthps_provs[i]->dthp_prov, p->p_pid); 15026 } 15027 } else { 15028 mutex_enter(&dtrace_lock); 15029 ASSERT(help->dthps_deferred == 0 || 15030 help->dthps_next != NULL || 15031 help->dthps_prev != NULL || 15032 help == dtrace_deferred_pid); 15033 15034 /* 15035 * Remove the helper from the deferred list. 15036 */ 15037 if (help->dthps_next != NULL) 15038 help->dthps_next->dthps_prev = help->dthps_prev; 15039 if (help->dthps_prev != NULL) 15040 help->dthps_prev->dthps_next = help->dthps_next; 15041 if (dtrace_deferred_pid == help) { 15042 dtrace_deferred_pid = help->dthps_next; 15043 ASSERT(help->dthps_prev == NULL); 15044 } 15045 15046 mutex_exit(&dtrace_lock); 15047 } 15048 15049 mutex_exit(&dtrace_meta_lock); 15050 15051 for (i = 0; i < help->dthps_nprovs; i++) { 15052 dtrace_helper_provider_destroy(help->dthps_provs[i]); 15053 } 15054 15055 kmem_free(help->dthps_provs, help->dthps_maxprovs * 15056 sizeof (dtrace_helper_provider_t *)); 15057 } 15058 15059 mutex_enter(&dtrace_lock); 15060 15061 dtrace_vstate_fini(&help->dthps_vstate); 15062 kmem_free(help->dthps_actions, 15063 sizeof (dtrace_helper_action_t *) * DTRACE_NHELPER_ACTIONS); 15064 kmem_free(help, sizeof (dtrace_helpers_t)); 15065 15066 --dtrace_helpers; 15067 mutex_exit(&dtrace_lock); 15068 } 15069 15070 #if defined(sun) 15071 static 15072 #endif 15073 void 15074 dtrace_helpers_duplicate(proc_t *from, proc_t *to) 15075 { 15076 dtrace_helpers_t *help, *newhelp; 15077 dtrace_helper_action_t *helper, *new, *last; 15078 dtrace_difo_t *dp; 15079 dtrace_vstate_t *vstate; 15080 int i, j, sz, hasprovs = 0; 15081 15082 mutex_enter(&dtrace_lock); 15083 ASSERT(from->p_dtrace_helpers != NULL); 15084 ASSERT(dtrace_helpers > 0); 15085 15086 help = from->p_dtrace_helpers; 15087 newhelp = dtrace_helpers_create(to); 15088 ASSERT(to->p_dtrace_helpers != NULL); 15089 15090 newhelp->dthps_generation = help->dthps_generation; 15091 vstate = &newhelp->dthps_vstate; 15092 15093 /* 15094 * Duplicate the helper actions. 15095 */ 15096 for (i = 0; i < DTRACE_NHELPER_ACTIONS; i++) { 15097 if ((helper = help->dthps_actions[i]) == NULL) 15098 continue; 15099 15100 for (last = NULL; helper != NULL; helper = helper->dtha_next) { 15101 new = kmem_zalloc(sizeof (dtrace_helper_action_t), 15102 KM_SLEEP); 15103 new->dtha_generation = helper->dtha_generation; 15104 15105 if ((dp = helper->dtha_predicate) != NULL) { 15106 dp = dtrace_difo_duplicate(dp, vstate); 15107 new->dtha_predicate = dp; 15108 } 15109 15110 new->dtha_nactions = helper->dtha_nactions; 15111 sz = sizeof (dtrace_difo_t *) * new->dtha_nactions; 15112 new->dtha_actions = kmem_alloc(sz, KM_SLEEP); 15113 15114 for (j = 0; j < new->dtha_nactions; j++) { 15115 dtrace_difo_t *dp = helper->dtha_actions[j]; 15116 15117 ASSERT(dp != NULL); 15118 dp = dtrace_difo_duplicate(dp, vstate); 15119 new->dtha_actions[j] = dp; 15120 } 15121 15122 if (last != NULL) { 15123 last->dtha_next = new; 15124 } else { 15125 newhelp->dthps_actions[i] = new; 15126 } 15127 15128 last = new; 15129 } 15130 } 15131 15132 /* 15133 * Duplicate the helper providers and register them with the 15134 * DTrace framework. 15135 */ 15136 if (help->dthps_nprovs > 0) { 15137 newhelp->dthps_nprovs = help->dthps_nprovs; 15138 newhelp->dthps_maxprovs = help->dthps_nprovs; 15139 newhelp->dthps_provs = kmem_alloc(newhelp->dthps_nprovs * 15140 sizeof (dtrace_helper_provider_t *), KM_SLEEP); 15141 for (i = 0; i < newhelp->dthps_nprovs; i++) { 15142 newhelp->dthps_provs[i] = help->dthps_provs[i]; 15143 newhelp->dthps_provs[i]->dthp_ref++; 15144 } 15145 15146 hasprovs = 1; 15147 } 15148 15149 mutex_exit(&dtrace_lock); 15150 15151 if (hasprovs) 15152 dtrace_helper_provider_register(to, newhelp, NULL); 15153 } 15154 15155 #if defined(sun) 15156 /* 15157 * DTrace Hook Functions 15158 */ 15159 static void 15160 dtrace_module_loaded(modctl_t *ctl) 15161 { 15162 dtrace_provider_t *prv; 15163 15164 mutex_enter(&dtrace_provider_lock); 15165 #if defined(sun) 15166 mutex_enter(&mod_lock); 15167 #endif 15168 15169 ASSERT(ctl->mod_busy); 15170 15171 /* 15172 * We're going to call each providers per-module provide operation 15173 * specifying only this module. 15174 */ 15175 for (prv = dtrace_provider; prv != NULL; prv = prv->dtpv_next) 15176 prv->dtpv_pops.dtps_provide_module(prv->dtpv_arg, ctl); 15177 15178 #if defined(sun) 15179 mutex_exit(&mod_lock); 15180 #endif 15181 mutex_exit(&dtrace_provider_lock); 15182 15183 /* 15184 * If we have any retained enablings, we need to match against them. 15185 * Enabling probes requires that cpu_lock be held, and we cannot hold 15186 * cpu_lock here -- it is legal for cpu_lock to be held when loading a 15187 * module. (In particular, this happens when loading scheduling 15188 * classes.) So if we have any retained enablings, we need to dispatch 15189 * our task queue to do the match for us. 15190 */ 15191 mutex_enter(&dtrace_lock); 15192 15193 if (dtrace_retained == NULL) { 15194 mutex_exit(&dtrace_lock); 15195 return; 15196 } 15197 15198 (void) taskq_dispatch(dtrace_taskq, 15199 (task_func_t *)dtrace_enabling_matchall, NULL, TQ_SLEEP); 15200 15201 mutex_exit(&dtrace_lock); 15202 15203 /* 15204 * And now, for a little heuristic sleaze: in general, we want to 15205 * match modules as soon as they load. However, we cannot guarantee 15206 * this, because it would lead us to the lock ordering violation 15207 * outlined above. The common case, of course, is that cpu_lock is 15208 * _not_ held -- so we delay here for a clock tick, hoping that that's 15209 * long enough for the task queue to do its work. If it's not, it's 15210 * not a serious problem -- it just means that the module that we 15211 * just loaded may not be immediately instrumentable. 15212 */ 15213 delay(1); 15214 } 15215 15216 static void 15217 dtrace_module_unloaded(modctl_t *ctl) 15218 { 15219 dtrace_probe_t template, *probe, *first, *next; 15220 dtrace_provider_t *prov; 15221 15222 template.dtpr_mod = ctl->mod_modname; 15223 15224 mutex_enter(&dtrace_provider_lock); 15225 #if defined(sun) 15226 mutex_enter(&mod_lock); 15227 #endif 15228 mutex_enter(&dtrace_lock); 15229 15230 if (dtrace_bymod == NULL) { 15231 /* 15232 * The DTrace module is loaded (obviously) but not attached; 15233 * we don't have any work to do. 15234 */ 15235 mutex_exit(&dtrace_provider_lock); 15236 #if defined(sun) 15237 mutex_exit(&mod_lock); 15238 #endif 15239 mutex_exit(&dtrace_lock); 15240 return; 15241 } 15242 15243 for (probe = first = dtrace_hash_lookup(dtrace_bymod, &template); 15244 probe != NULL; probe = probe->dtpr_nextmod) { 15245 if (probe->dtpr_ecb != NULL) { 15246 mutex_exit(&dtrace_provider_lock); 15247 #if defined(sun) 15248 mutex_exit(&mod_lock); 15249 #endif 15250 mutex_exit(&dtrace_lock); 15251 15252 /* 15253 * This shouldn't _actually_ be possible -- we're 15254 * unloading a module that has an enabled probe in it. 15255 * (It's normally up to the provider to make sure that 15256 * this can't happen.) However, because dtps_enable() 15257 * doesn't have a failure mode, there can be an 15258 * enable/unload race. Upshot: we don't want to 15259 * assert, but we're not going to disable the 15260 * probe, either. 15261 */ 15262 if (dtrace_err_verbose) { 15263 cmn_err(CE_WARN, "unloaded module '%s' had " 15264 "enabled probes", ctl->mod_modname); 15265 } 15266 15267 return; 15268 } 15269 } 15270 15271 probe = first; 15272 15273 for (first = NULL; probe != NULL; probe = next) { 15274 ASSERT(dtrace_probes[probe->dtpr_id - 1] == probe); 15275 15276 dtrace_probes[probe->dtpr_id - 1] = NULL; 15277 15278 next = probe->dtpr_nextmod; 15279 dtrace_hash_remove(dtrace_bymod, probe); 15280 dtrace_hash_remove(dtrace_byfunc, probe); 15281 dtrace_hash_remove(dtrace_byname, probe); 15282 15283 if (first == NULL) { 15284 first = probe; 15285 probe->dtpr_nextmod = NULL; 15286 } else { 15287 probe->dtpr_nextmod = first; 15288 first = probe; 15289 } 15290 } 15291 15292 /* 15293 * We've removed all of the module's probes from the hash chains and 15294 * from the probe array. Now issue a dtrace_sync() to be sure that 15295 * everyone has cleared out from any probe array processing. 15296 */ 15297 dtrace_sync(); 15298 15299 for (probe = first; probe != NULL; probe = first) { 15300 first = probe->dtpr_nextmod; 15301 prov = probe->dtpr_provider; 15302 prov->dtpv_pops.dtps_destroy(prov->dtpv_arg, probe->dtpr_id, 15303 probe->dtpr_arg); 15304 kmem_free(probe->dtpr_mod, strlen(probe->dtpr_mod) + 1); 15305 kmem_free(probe->dtpr_func, strlen(probe->dtpr_func) + 1); 15306 kmem_free(probe->dtpr_name, strlen(probe->dtpr_name) + 1); 15307 vmem_free(dtrace_arena, (void *)(uintptr_t)probe->dtpr_id, 1); 15308 kmem_free(probe, sizeof (dtrace_probe_t)); 15309 } 15310 15311 mutex_exit(&dtrace_lock); 15312 #if defined(sun) 15313 mutex_exit(&mod_lock); 15314 #endif 15315 mutex_exit(&dtrace_provider_lock); 15316 } 15317 15318 static void 15319 dtrace_suspend(void) 15320 { 15321 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_suspend)); 15322 } 15323 15324 static void 15325 dtrace_resume(void) 15326 { 15327 dtrace_probe_foreach(offsetof(dtrace_pops_t, dtps_resume)); 15328 } 15329 #endif 15330 15331 static int 15332 dtrace_cpu_setup(cpu_setup_t what, processorid_t cpu) 15333 { 15334 ASSERT(MUTEX_HELD(&cpu_lock)); 15335 mutex_enter(&dtrace_lock); 15336 15337 switch (what) { 15338 case CPU_CONFIG: { 15339 dtrace_state_t *state; 15340 dtrace_optval_t *opt, rs, c; 15341 15342 /* 15343 * For now, we only allocate a new buffer for anonymous state. 15344 */ 15345 if ((state = dtrace_anon.dta_state) == NULL) 15346 break; 15347 15348 if (state->dts_activity != DTRACE_ACTIVITY_ACTIVE) 15349 break; 15350 15351 opt = state->dts_options; 15352 c = opt[DTRACEOPT_CPU]; 15353 15354 if (c != DTRACE_CPUALL && c != DTRACEOPT_UNSET && c != cpu) 15355 break; 15356 15357 /* 15358 * Regardless of what the actual policy is, we're going to 15359 * temporarily set our resize policy to be manual. We're 15360 * also going to temporarily set our CPU option to denote 15361 * the newly configured CPU. 15362 */ 15363 rs = opt[DTRACEOPT_BUFRESIZE]; 15364 opt[DTRACEOPT_BUFRESIZE] = DTRACEOPT_BUFRESIZE_MANUAL; 15365 opt[DTRACEOPT_CPU] = (dtrace_optval_t)cpu; 15366 15367 (void) dtrace_state_buffers(state); 15368 15369 opt[DTRACEOPT_BUFRESIZE] = rs; 15370 opt[DTRACEOPT_CPU] = c; 15371 15372 break; 15373 } 15374 15375 case CPU_UNCONFIG: 15376 /* 15377 * We don't free the buffer in the CPU_UNCONFIG case. (The 15378 * buffer will be freed when the consumer exits.) 15379 */ 15380 break; 15381 15382 default: 15383 break; 15384 } 15385 15386 mutex_exit(&dtrace_lock); 15387 return (0); 15388 } 15389 15390 #if defined(sun) 15391 static void 15392 dtrace_cpu_setup_initial(processorid_t cpu) 15393 { 15394 (void) dtrace_cpu_setup(CPU_CONFIG, cpu); 15395 } 15396 #endif 15397 15398 static void 15399 dtrace_toxrange_add(uintptr_t base, uintptr_t limit) 15400 { 15401 if (dtrace_toxranges >= dtrace_toxranges_max) { 15402 int osize, nsize; 15403 dtrace_toxrange_t *range; 15404 15405 osize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15406 15407 if (osize == 0) { 15408 ASSERT(dtrace_toxrange == NULL); 15409 ASSERT(dtrace_toxranges_max == 0); 15410 dtrace_toxranges_max = 1; 15411 } else { 15412 dtrace_toxranges_max <<= 1; 15413 } 15414 15415 nsize = dtrace_toxranges_max * sizeof (dtrace_toxrange_t); 15416 range = kmem_zalloc(nsize, KM_SLEEP); 15417 15418 if (dtrace_toxrange != NULL) { 15419 ASSERT(osize != 0); 15420 bcopy(dtrace_toxrange, range, osize); 15421 kmem_free(dtrace_toxrange, osize); 15422 } 15423 15424 dtrace_toxrange = range; 15425 } 15426 15427 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_base == 0); 15428 ASSERT(dtrace_toxrange[dtrace_toxranges].dtt_limit == 0); 15429 15430 dtrace_toxrange[dtrace_toxranges].dtt_base = base; 15431 dtrace_toxrange[dtrace_toxranges].dtt_limit = limit; 15432 dtrace_toxranges++; 15433 } 15434 15435 /* 15436 * DTrace Driver Cookbook Functions 15437 */ 15438 #if defined(sun) 15439 /*ARGSUSED*/ 15440 static int 15441 dtrace_attach(dev_info_t *devi, ddi_attach_cmd_t cmd) 15442 { 15443 dtrace_provider_id_t id; 15444 dtrace_state_t *state = NULL; 15445 dtrace_enabling_t *enab; 15446 15447 mutex_enter(&cpu_lock); 15448 mutex_enter(&dtrace_provider_lock); 15449 mutex_enter(&dtrace_lock); 15450 15451 if (ddi_soft_state_init(&dtrace_softstate, 15452 sizeof (dtrace_state_t), 0) != 0) { 15453 cmn_err(CE_NOTE, "/dev/dtrace failed to initialize soft state"); 15454 mutex_exit(&cpu_lock); 15455 mutex_exit(&dtrace_provider_lock); 15456 mutex_exit(&dtrace_lock); 15457 return (DDI_FAILURE); 15458 } 15459 15460 if (ddi_create_minor_node(devi, DTRACEMNR_DTRACE, S_IFCHR, 15461 DTRACEMNRN_DTRACE, DDI_PSEUDO, NULL) == DDI_FAILURE || 15462 ddi_create_minor_node(devi, DTRACEMNR_HELPER, S_IFCHR, 15463 DTRACEMNRN_HELPER, DDI_PSEUDO, NULL) == DDI_FAILURE) { 15464 cmn_err(CE_NOTE, "/dev/dtrace couldn't create minor nodes"); 15465 ddi_remove_minor_node(devi, NULL); 15466 ddi_soft_state_fini(&dtrace_softstate); 15467 mutex_exit(&cpu_lock); 15468 mutex_exit(&dtrace_provider_lock); 15469 mutex_exit(&dtrace_lock); 15470 return (DDI_FAILURE); 15471 } 15472 15473 ddi_report_dev(devi); 15474 dtrace_devi = devi; 15475 15476 dtrace_modload = dtrace_module_loaded; 15477 dtrace_modunload = dtrace_module_unloaded; 15478 dtrace_cpu_init = dtrace_cpu_setup_initial; 15479 dtrace_helpers_cleanup = dtrace_helpers_destroy; 15480 dtrace_helpers_fork = dtrace_helpers_duplicate; 15481 dtrace_cpustart_init = dtrace_suspend; 15482 dtrace_cpustart_fini = dtrace_resume; 15483 dtrace_debugger_init = dtrace_suspend; 15484 dtrace_debugger_fini = dtrace_resume; 15485 15486 register_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 15487 15488 ASSERT(MUTEX_HELD(&cpu_lock)); 15489 15490 dtrace_arena = vmem_create("dtrace", (void *)1, UINT32_MAX, 1, 15491 NULL, NULL, NULL, 0, VM_SLEEP | VMC_IDENTIFIER); 15492 dtrace_minor = vmem_create("dtrace_minor", (void *)DTRACEMNRN_CLONE, 15493 UINT32_MAX - DTRACEMNRN_CLONE, 1, NULL, NULL, NULL, 0, 15494 VM_SLEEP | VMC_IDENTIFIER); 15495 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15496 1, INT_MAX, 0); 15497 15498 dtrace_state_cache = kmem_cache_create("dtrace_state_cache", 15499 sizeof (dtrace_dstate_percpu_t) * NCPU, DTRACE_STATE_ALIGN, 15500 NULL, NULL, NULL, NULL, NULL, 0); 15501 15502 ASSERT(MUTEX_HELD(&cpu_lock)); 15503 dtrace_bymod = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_mod), 15504 offsetof(dtrace_probe_t, dtpr_nextmod), 15505 offsetof(dtrace_probe_t, dtpr_prevmod)); 15506 15507 dtrace_byfunc = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_func), 15508 offsetof(dtrace_probe_t, dtpr_nextfunc), 15509 offsetof(dtrace_probe_t, dtpr_prevfunc)); 15510 15511 dtrace_byname = dtrace_hash_create(offsetof(dtrace_probe_t, dtpr_name), 15512 offsetof(dtrace_probe_t, dtpr_nextname), 15513 offsetof(dtrace_probe_t, dtpr_prevname)); 15514 15515 if (dtrace_retain_max < 1) { 15516 cmn_err(CE_WARN, "illegal value (%lu) for dtrace_retain_max; " 15517 "setting to 1", dtrace_retain_max); 15518 dtrace_retain_max = 1; 15519 } 15520 15521 /* 15522 * Now discover our toxic ranges. 15523 */ 15524 dtrace_toxic_ranges(dtrace_toxrange_add); 15525 15526 /* 15527 * Before we register ourselves as a provider to our own framework, 15528 * we would like to assert that dtrace_provider is NULL -- but that's 15529 * not true if we were loaded as a dependency of a DTrace provider. 15530 * Once we've registered, we can assert that dtrace_provider is our 15531 * pseudo provider. 15532 */ 15533 (void) dtrace_register("dtrace", &dtrace_provider_attr, 15534 DTRACE_PRIV_NONE, 0, &dtrace_provider_ops, NULL, &id); 15535 15536 ASSERT(dtrace_provider != NULL); 15537 ASSERT((dtrace_provider_id_t)dtrace_provider == id); 15538 15539 dtrace_probeid_begin = dtrace_probe_create((dtrace_provider_id_t) 15540 dtrace_provider, NULL, NULL, "BEGIN", 0, NULL); 15541 dtrace_probeid_end = dtrace_probe_create((dtrace_provider_id_t) 15542 dtrace_provider, NULL, NULL, "END", 0, NULL); 15543 dtrace_probeid_error = dtrace_probe_create((dtrace_provider_id_t) 15544 dtrace_provider, NULL, NULL, "ERROR", 1, NULL); 15545 15546 dtrace_anon_property(); 15547 mutex_exit(&cpu_lock); 15548 15549 /* 15550 * If DTrace helper tracing is enabled, we need to allocate the 15551 * trace buffer and initialize the values. 15552 */ 15553 if (dtrace_helptrace_enabled) { 15554 ASSERT(dtrace_helptrace_buffer == NULL); 15555 dtrace_helptrace_buffer = 15556 kmem_zalloc(dtrace_helptrace_bufsize, KM_SLEEP); 15557 dtrace_helptrace_next = 0; 15558 } 15559 15560 /* 15561 * If there are already providers, we must ask them to provide their 15562 * probes, and then match any anonymous enabling against them. Note 15563 * that there should be no other retained enablings at this time: 15564 * the only retained enablings at this time should be the anonymous 15565 * enabling. 15566 */ 15567 if (dtrace_anon.dta_enabling != NULL) { 15568 ASSERT(dtrace_retained == dtrace_anon.dta_enabling); 15569 15570 dtrace_enabling_provide(NULL); 15571 state = dtrace_anon.dta_state; 15572 15573 /* 15574 * We couldn't hold cpu_lock across the above call to 15575 * dtrace_enabling_provide(), but we must hold it to actually 15576 * enable the probes. We have to drop all of our locks, pick 15577 * up cpu_lock, and regain our locks before matching the 15578 * retained anonymous enabling. 15579 */ 15580 mutex_exit(&dtrace_lock); 15581 mutex_exit(&dtrace_provider_lock); 15582 15583 mutex_enter(&cpu_lock); 15584 mutex_enter(&dtrace_provider_lock); 15585 mutex_enter(&dtrace_lock); 15586 15587 if ((enab = dtrace_anon.dta_enabling) != NULL) 15588 (void) dtrace_enabling_match(enab, NULL); 15589 15590 mutex_exit(&cpu_lock); 15591 } 15592 15593 mutex_exit(&dtrace_lock); 15594 mutex_exit(&dtrace_provider_lock); 15595 15596 if (state != NULL) { 15597 /* 15598 * If we created any anonymous state, set it going now. 15599 */ 15600 (void) dtrace_state_go(state, &dtrace_anon.dta_beganon); 15601 } 15602 15603 return (DDI_SUCCESS); 15604 } 15605 #endif 15606 15607 #if !defined(sun) 15608 #if __FreeBSD_version >= 800039 15609 static void dtrace_dtr(void *); 15610 #endif 15611 #endif 15612 15613 /*ARGSUSED*/ 15614 static int 15615 #if defined(sun) 15616 dtrace_open(dev_t *devp, int flag, int otyp, cred_t *cred_p) 15617 #else 15618 dtrace_open(struct cdev *dev, int oflags, int devtype, struct thread *td) 15619 #endif 15620 { 15621 dtrace_state_t *state; 15622 uint32_t priv; 15623 uid_t uid; 15624 zoneid_t zoneid; 15625 15626 #if defined(sun) 15627 if (getminor(*devp) == DTRACEMNRN_HELPER) 15628 return (0); 15629 15630 /* 15631 * If this wasn't an open with the "helper" minor, then it must be 15632 * the "dtrace" minor. 15633 */ 15634 ASSERT(getminor(*devp) == DTRACEMNRN_DTRACE); 15635 #else 15636 cred_t *cred_p = NULL; 15637 15638 #if __FreeBSD_version < 800039 15639 /* 15640 * The first minor device is the one that is cloned so there is 15641 * nothing more to do here. 15642 */ 15643 if (dev2unit(dev) == 0) 15644 return 0; 15645 15646 /* 15647 * Devices are cloned, so if the DTrace state has already 15648 * been allocated, that means this device belongs to a 15649 * different client. Each client should open '/dev/dtrace' 15650 * to get a cloned device. 15651 */ 15652 if (dev->si_drv1 != NULL) 15653 return (EBUSY); 15654 #endif 15655 15656 cred_p = dev->si_cred; 15657 #endif 15658 15659 /* 15660 * If no DTRACE_PRIV_* bits are set in the credential, then the 15661 * caller lacks sufficient permission to do anything with DTrace. 15662 */ 15663 dtrace_cred2priv(cred_p, &priv, &uid, &zoneid); 15664 if (priv == DTRACE_PRIV_NONE) { 15665 #if !defined(sun) 15666 #if __FreeBSD_version < 800039 15667 /* Destroy the cloned device. */ 15668 destroy_dev(dev); 15669 #endif 15670 #endif 15671 15672 return (EACCES); 15673 } 15674 15675 /* 15676 * Ask all providers to provide all their probes. 15677 */ 15678 mutex_enter(&dtrace_provider_lock); 15679 dtrace_probe_provide(NULL, NULL); 15680 mutex_exit(&dtrace_provider_lock); 15681 15682 mutex_enter(&cpu_lock); 15683 mutex_enter(&dtrace_lock); 15684 dtrace_opens++; 15685 dtrace_membar_producer(); 15686 15687 #if defined(sun) 15688 /* 15689 * If the kernel debugger is active (that is, if the kernel debugger 15690 * modified text in some way), we won't allow the open. 15691 */ 15692 if (kdi_dtrace_set(KDI_DTSET_DTRACE_ACTIVATE) != 0) { 15693 dtrace_opens--; 15694 mutex_exit(&cpu_lock); 15695 mutex_exit(&dtrace_lock); 15696 return (EBUSY); 15697 } 15698 15699 state = dtrace_state_create(devp, cred_p); 15700 #else 15701 state = dtrace_state_create(dev); 15702 #if __FreeBSD_version < 800039 15703 dev->si_drv1 = state; 15704 #else 15705 devfs_set_cdevpriv(state, dtrace_dtr); 15706 #endif 15707 /* This code actually belongs in dtrace_attach() */ 15708 if (dtrace_opens == 1) 15709 dtrace_taskq = taskq_create("dtrace_taskq", 1, maxclsyspri, 15710 1, INT_MAX, 0); 15711 #endif 15712 15713 mutex_exit(&cpu_lock); 15714 15715 if (state == NULL) { 15716 #if defined(sun) 15717 if (--dtrace_opens == 0) 15718 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15719 #else 15720 --dtrace_opens; 15721 #endif 15722 mutex_exit(&dtrace_lock); 15723 #if !defined(sun) 15724 #if __FreeBSD_version < 800039 15725 /* Destroy the cloned device. */ 15726 destroy_dev(dev); 15727 #endif 15728 #endif 15729 return (EAGAIN); 15730 } 15731 15732 mutex_exit(&dtrace_lock); 15733 15734 return (0); 15735 } 15736 15737 /*ARGSUSED*/ 15738 #if defined(sun) 15739 static int 15740 dtrace_close(dev_t dev, int flag, int otyp, cred_t *cred_p) 15741 #elif __FreeBSD_version < 800039 15742 static int 15743 dtrace_close(struct cdev *dev, int flags, int fmt __unused, struct thread *td) 15744 #else 15745 static void 15746 dtrace_dtr(void *data) 15747 #endif 15748 { 15749 #if defined(sun) 15750 minor_t minor = getminor(dev); 15751 dtrace_state_t *state; 15752 15753 if (minor == DTRACEMNRN_HELPER) 15754 return (0); 15755 15756 state = ddi_get_soft_state(dtrace_softstate, minor); 15757 #else 15758 #if __FreeBSD_version < 800039 15759 dtrace_state_t *state = dev->si_drv1; 15760 15761 /* Check if this is not a cloned device. */ 15762 if (dev2unit(dev) == 0) 15763 return (0); 15764 #else 15765 dtrace_state_t *state = data; 15766 #endif 15767 15768 #endif 15769 15770 mutex_enter(&cpu_lock); 15771 mutex_enter(&dtrace_lock); 15772 15773 if (state != NULL) { 15774 if (state->dts_anon) { 15775 /* 15776 * There is anonymous state. Destroy that first. 15777 */ 15778 ASSERT(dtrace_anon.dta_state == NULL); 15779 dtrace_state_destroy(state->dts_anon); 15780 } 15781 15782 dtrace_state_destroy(state); 15783 15784 #if !defined(sun) 15785 kmem_free(state, 0); 15786 #if __FreeBSD_version < 800039 15787 dev->si_drv1 = NULL; 15788 #endif 15789 #endif 15790 } 15791 15792 ASSERT(dtrace_opens > 0); 15793 #if defined(sun) 15794 if (--dtrace_opens == 0) 15795 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 15796 #else 15797 --dtrace_opens; 15798 /* This code actually belongs in dtrace_detach() */ 15799 if ((dtrace_opens == 0) && (dtrace_taskq != NULL)) { 15800 taskq_destroy(dtrace_taskq); 15801 dtrace_taskq = NULL; 15802 } 15803 #endif 15804 15805 mutex_exit(&dtrace_lock); 15806 mutex_exit(&cpu_lock); 15807 15808 #if __FreeBSD_version < 800039 15809 /* Schedule this cloned device to be destroyed. */ 15810 destroy_dev_sched(dev); 15811 #endif 15812 15813 #if defined(sun) || __FreeBSD_version < 800039 15814 return (0); 15815 #endif 15816 } 15817 15818 #if defined(sun) 15819 /*ARGSUSED*/ 15820 static int 15821 dtrace_ioctl_helper(int cmd, intptr_t arg, int *rv) 15822 { 15823 int rval; 15824 dof_helper_t help, *dhp = NULL; 15825 15826 switch (cmd) { 15827 case DTRACEHIOC_ADDDOF: 15828 if (copyin((void *)arg, &help, sizeof (help)) != 0) { 15829 dtrace_dof_error(NULL, "failed to copyin DOF helper"); 15830 return (EFAULT); 15831 } 15832 15833 dhp = &help; 15834 arg = (intptr_t)help.dofhp_dof; 15835 /*FALLTHROUGH*/ 15836 15837 case DTRACEHIOC_ADD: { 15838 dof_hdr_t *dof = dtrace_dof_copyin(arg, &rval); 15839 15840 if (dof == NULL) 15841 return (rval); 15842 15843 mutex_enter(&dtrace_lock); 15844 15845 /* 15846 * dtrace_helper_slurp() takes responsibility for the dof -- 15847 * it may free it now or it may save it and free it later. 15848 */ 15849 if ((rval = dtrace_helper_slurp(dof, dhp)) != -1) { 15850 *rv = rval; 15851 rval = 0; 15852 } else { 15853 rval = EINVAL; 15854 } 15855 15856 mutex_exit(&dtrace_lock); 15857 return (rval); 15858 } 15859 15860 case DTRACEHIOC_REMOVE: { 15861 mutex_enter(&dtrace_lock); 15862 rval = dtrace_helper_destroygen(arg); 15863 mutex_exit(&dtrace_lock); 15864 15865 return (rval); 15866 } 15867 15868 default: 15869 break; 15870 } 15871 15872 return (ENOTTY); 15873 } 15874 15875 /*ARGSUSED*/ 15876 static int 15877 dtrace_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 15878 { 15879 minor_t minor = getminor(dev); 15880 dtrace_state_t *state; 15881 int rval; 15882 15883 if (minor == DTRACEMNRN_HELPER) 15884 return (dtrace_ioctl_helper(cmd, arg, rv)); 15885 15886 state = ddi_get_soft_state(dtrace_softstate, minor); 15887 15888 if (state->dts_anon) { 15889 ASSERT(dtrace_anon.dta_state == NULL); 15890 state = state->dts_anon; 15891 } 15892 15893 switch (cmd) { 15894 case DTRACEIOC_PROVIDER: { 15895 dtrace_providerdesc_t pvd; 15896 dtrace_provider_t *pvp; 15897 15898 if (copyin((void *)arg, &pvd, sizeof (pvd)) != 0) 15899 return (EFAULT); 15900 15901 pvd.dtvd_name[DTRACE_PROVNAMELEN - 1] = '\0'; 15902 mutex_enter(&dtrace_provider_lock); 15903 15904 for (pvp = dtrace_provider; pvp != NULL; pvp = pvp->dtpv_next) { 15905 if (strcmp(pvp->dtpv_name, pvd.dtvd_name) == 0) 15906 break; 15907 } 15908 15909 mutex_exit(&dtrace_provider_lock); 15910 15911 if (pvp == NULL) 15912 return (ESRCH); 15913 15914 bcopy(&pvp->dtpv_priv, &pvd.dtvd_priv, sizeof (dtrace_ppriv_t)); 15915 bcopy(&pvp->dtpv_attr, &pvd.dtvd_attr, sizeof (dtrace_pattr_t)); 15916 15917 if (copyout(&pvd, (void *)arg, sizeof (pvd)) != 0) 15918 return (EFAULT); 15919 15920 return (0); 15921 } 15922 15923 case DTRACEIOC_EPROBE: { 15924 dtrace_eprobedesc_t epdesc; 15925 dtrace_ecb_t *ecb; 15926 dtrace_action_t *act; 15927 void *buf; 15928 size_t size; 15929 uintptr_t dest; 15930 int nrecs; 15931 15932 if (copyin((void *)arg, &epdesc, sizeof (epdesc)) != 0) 15933 return (EFAULT); 15934 15935 mutex_enter(&dtrace_lock); 15936 15937 if ((ecb = dtrace_epid2ecb(state, epdesc.dtepd_epid)) == NULL) { 15938 mutex_exit(&dtrace_lock); 15939 return (EINVAL); 15940 } 15941 15942 if (ecb->dte_probe == NULL) { 15943 mutex_exit(&dtrace_lock); 15944 return (EINVAL); 15945 } 15946 15947 epdesc.dtepd_probeid = ecb->dte_probe->dtpr_id; 15948 epdesc.dtepd_uarg = ecb->dte_uarg; 15949 epdesc.dtepd_size = ecb->dte_size; 15950 15951 nrecs = epdesc.dtepd_nrecs; 15952 epdesc.dtepd_nrecs = 0; 15953 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15954 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15955 continue; 15956 15957 epdesc.dtepd_nrecs++; 15958 } 15959 15960 /* 15961 * Now that we have the size, we need to allocate a temporary 15962 * buffer in which to store the complete description. We need 15963 * the temporary buffer to be able to drop dtrace_lock() 15964 * across the copyout(), below. 15965 */ 15966 size = sizeof (dtrace_eprobedesc_t) + 15967 (epdesc.dtepd_nrecs * sizeof (dtrace_recdesc_t)); 15968 15969 buf = kmem_alloc(size, KM_SLEEP); 15970 dest = (uintptr_t)buf; 15971 15972 bcopy(&epdesc, (void *)dest, sizeof (epdesc)); 15973 dest += offsetof(dtrace_eprobedesc_t, dtepd_rec[0]); 15974 15975 for (act = ecb->dte_action; act != NULL; act = act->dta_next) { 15976 if (DTRACEACT_ISAGG(act->dta_kind) || act->dta_intuple) 15977 continue; 15978 15979 if (nrecs-- == 0) 15980 break; 15981 15982 bcopy(&act->dta_rec, (void *)dest, 15983 sizeof (dtrace_recdesc_t)); 15984 dest += sizeof (dtrace_recdesc_t); 15985 } 15986 15987 mutex_exit(&dtrace_lock); 15988 15989 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 15990 kmem_free(buf, size); 15991 return (EFAULT); 15992 } 15993 15994 kmem_free(buf, size); 15995 return (0); 15996 } 15997 15998 case DTRACEIOC_AGGDESC: { 15999 dtrace_aggdesc_t aggdesc; 16000 dtrace_action_t *act; 16001 dtrace_aggregation_t *agg; 16002 int nrecs; 16003 uint32_t offs; 16004 dtrace_recdesc_t *lrec; 16005 void *buf; 16006 size_t size; 16007 uintptr_t dest; 16008 16009 if (copyin((void *)arg, &aggdesc, sizeof (aggdesc)) != 0) 16010 return (EFAULT); 16011 16012 mutex_enter(&dtrace_lock); 16013 16014 if ((agg = dtrace_aggid2agg(state, aggdesc.dtagd_id)) == NULL) { 16015 mutex_exit(&dtrace_lock); 16016 return (EINVAL); 16017 } 16018 16019 aggdesc.dtagd_epid = agg->dtag_ecb->dte_epid; 16020 16021 nrecs = aggdesc.dtagd_nrecs; 16022 aggdesc.dtagd_nrecs = 0; 16023 16024 offs = agg->dtag_base; 16025 lrec = &agg->dtag_action.dta_rec; 16026 aggdesc.dtagd_size = lrec->dtrd_offset + lrec->dtrd_size - offs; 16027 16028 for (act = agg->dtag_first; ; act = act->dta_next) { 16029 ASSERT(act->dta_intuple || 16030 DTRACEACT_ISAGG(act->dta_kind)); 16031 16032 /* 16033 * If this action has a record size of zero, it 16034 * denotes an argument to the aggregating action. 16035 * Because the presence of this record doesn't (or 16036 * shouldn't) affect the way the data is interpreted, 16037 * we don't copy it out to save user-level the 16038 * confusion of dealing with a zero-length record. 16039 */ 16040 if (act->dta_rec.dtrd_size == 0) { 16041 ASSERT(agg->dtag_hasarg); 16042 continue; 16043 } 16044 16045 aggdesc.dtagd_nrecs++; 16046 16047 if (act == &agg->dtag_action) 16048 break; 16049 } 16050 16051 /* 16052 * Now that we have the size, we need to allocate a temporary 16053 * buffer in which to store the complete description. We need 16054 * the temporary buffer to be able to drop dtrace_lock() 16055 * across the copyout(), below. 16056 */ 16057 size = sizeof (dtrace_aggdesc_t) + 16058 (aggdesc.dtagd_nrecs * sizeof (dtrace_recdesc_t)); 16059 16060 buf = kmem_alloc(size, KM_SLEEP); 16061 dest = (uintptr_t)buf; 16062 16063 bcopy(&aggdesc, (void *)dest, sizeof (aggdesc)); 16064 dest += offsetof(dtrace_aggdesc_t, dtagd_rec[0]); 16065 16066 for (act = agg->dtag_first; ; act = act->dta_next) { 16067 dtrace_recdesc_t rec = act->dta_rec; 16068 16069 /* 16070 * See the comment in the above loop for why we pass 16071 * over zero-length records. 16072 */ 16073 if (rec.dtrd_size == 0) { 16074 ASSERT(agg->dtag_hasarg); 16075 continue; 16076 } 16077 16078 if (nrecs-- == 0) 16079 break; 16080 16081 rec.dtrd_offset -= offs; 16082 bcopy(&rec, (void *)dest, sizeof (rec)); 16083 dest += sizeof (dtrace_recdesc_t); 16084 16085 if (act == &agg->dtag_action) 16086 break; 16087 } 16088 16089 mutex_exit(&dtrace_lock); 16090 16091 if (copyout(buf, (void *)arg, dest - (uintptr_t)buf) != 0) { 16092 kmem_free(buf, size); 16093 return (EFAULT); 16094 } 16095 16096 kmem_free(buf, size); 16097 return (0); 16098 } 16099 16100 case DTRACEIOC_ENABLE: { 16101 dof_hdr_t *dof; 16102 dtrace_enabling_t *enab = NULL; 16103 dtrace_vstate_t *vstate; 16104 int err = 0; 16105 16106 *rv = 0; 16107 16108 /* 16109 * If a NULL argument has been passed, we take this as our 16110 * cue to reevaluate our enablings. 16111 */ 16112 if (arg == NULL) { 16113 dtrace_enabling_matchall(); 16114 16115 return (0); 16116 } 16117 16118 if ((dof = dtrace_dof_copyin(arg, &rval)) == NULL) 16119 return (rval); 16120 16121 mutex_enter(&cpu_lock); 16122 mutex_enter(&dtrace_lock); 16123 vstate = &state->dts_vstate; 16124 16125 if (state->dts_activity != DTRACE_ACTIVITY_INACTIVE) { 16126 mutex_exit(&dtrace_lock); 16127 mutex_exit(&cpu_lock); 16128 dtrace_dof_destroy(dof); 16129 return (EBUSY); 16130 } 16131 16132 if (dtrace_dof_slurp(dof, vstate, cr, &enab, 0, B_TRUE) != 0) { 16133 mutex_exit(&dtrace_lock); 16134 mutex_exit(&cpu_lock); 16135 dtrace_dof_destroy(dof); 16136 return (EINVAL); 16137 } 16138 16139 if ((rval = dtrace_dof_options(dof, state)) != 0) { 16140 dtrace_enabling_destroy(enab); 16141 mutex_exit(&dtrace_lock); 16142 mutex_exit(&cpu_lock); 16143 dtrace_dof_destroy(dof); 16144 return (rval); 16145 } 16146 16147 if ((err = dtrace_enabling_match(enab, rv)) == 0) { 16148 err = dtrace_enabling_retain(enab); 16149 } else { 16150 dtrace_enabling_destroy(enab); 16151 } 16152 16153 mutex_exit(&cpu_lock); 16154 mutex_exit(&dtrace_lock); 16155 dtrace_dof_destroy(dof); 16156 16157 return (err); 16158 } 16159 16160 case DTRACEIOC_REPLICATE: { 16161 dtrace_repldesc_t desc; 16162 dtrace_probedesc_t *match = &desc.dtrpd_match; 16163 dtrace_probedesc_t *create = &desc.dtrpd_create; 16164 int err; 16165 16166 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16167 return (EFAULT); 16168 16169 match->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16170 match->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16171 match->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16172 match->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16173 16174 create->dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16175 create->dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16176 create->dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16177 create->dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16178 16179 mutex_enter(&dtrace_lock); 16180 err = dtrace_enabling_replicate(state, match, create); 16181 mutex_exit(&dtrace_lock); 16182 16183 return (err); 16184 } 16185 16186 case DTRACEIOC_PROBEMATCH: 16187 case DTRACEIOC_PROBES: { 16188 dtrace_probe_t *probe = NULL; 16189 dtrace_probedesc_t desc; 16190 dtrace_probekey_t pkey; 16191 dtrace_id_t i; 16192 int m = 0; 16193 uint32_t priv; 16194 uid_t uid; 16195 zoneid_t zoneid; 16196 16197 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16198 return (EFAULT); 16199 16200 desc.dtpd_provider[DTRACE_PROVNAMELEN - 1] = '\0'; 16201 desc.dtpd_mod[DTRACE_MODNAMELEN - 1] = '\0'; 16202 desc.dtpd_func[DTRACE_FUNCNAMELEN - 1] = '\0'; 16203 desc.dtpd_name[DTRACE_NAMELEN - 1] = '\0'; 16204 16205 /* 16206 * Before we attempt to match this probe, we want to give 16207 * all providers the opportunity to provide it. 16208 */ 16209 if (desc.dtpd_id == DTRACE_IDNONE) { 16210 mutex_enter(&dtrace_provider_lock); 16211 dtrace_probe_provide(&desc, NULL); 16212 mutex_exit(&dtrace_provider_lock); 16213 desc.dtpd_id++; 16214 } 16215 16216 if (cmd == DTRACEIOC_PROBEMATCH) { 16217 dtrace_probekey(&desc, &pkey); 16218 pkey.dtpk_id = DTRACE_IDNONE; 16219 } 16220 16221 dtrace_cred2priv(cr, &priv, &uid, &zoneid); 16222 16223 mutex_enter(&dtrace_lock); 16224 16225 if (cmd == DTRACEIOC_PROBEMATCH) { 16226 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16227 if ((probe = dtrace_probes[i - 1]) != NULL && 16228 (m = dtrace_match_probe(probe, &pkey, 16229 priv, uid, zoneid)) != 0) 16230 break; 16231 } 16232 16233 if (m < 0) { 16234 mutex_exit(&dtrace_lock); 16235 return (EINVAL); 16236 } 16237 16238 } else { 16239 for (i = desc.dtpd_id; i <= dtrace_nprobes; i++) { 16240 if ((probe = dtrace_probes[i - 1]) != NULL && 16241 dtrace_match_priv(probe, priv, uid, zoneid)) 16242 break; 16243 } 16244 } 16245 16246 if (probe == NULL) { 16247 mutex_exit(&dtrace_lock); 16248 return (ESRCH); 16249 } 16250 16251 dtrace_probe_description(probe, &desc); 16252 mutex_exit(&dtrace_lock); 16253 16254 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16255 return (EFAULT); 16256 16257 return (0); 16258 } 16259 16260 case DTRACEIOC_PROBEARG: { 16261 dtrace_argdesc_t desc; 16262 dtrace_probe_t *probe; 16263 dtrace_provider_t *prov; 16264 16265 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16266 return (EFAULT); 16267 16268 if (desc.dtargd_id == DTRACE_IDNONE) 16269 return (EINVAL); 16270 16271 if (desc.dtargd_ndx == DTRACE_ARGNONE) 16272 return (EINVAL); 16273 16274 mutex_enter(&dtrace_provider_lock); 16275 mutex_enter(&mod_lock); 16276 mutex_enter(&dtrace_lock); 16277 16278 if (desc.dtargd_id > dtrace_nprobes) { 16279 mutex_exit(&dtrace_lock); 16280 mutex_exit(&mod_lock); 16281 mutex_exit(&dtrace_provider_lock); 16282 return (EINVAL); 16283 } 16284 16285 if ((probe = dtrace_probes[desc.dtargd_id - 1]) == NULL) { 16286 mutex_exit(&dtrace_lock); 16287 mutex_exit(&mod_lock); 16288 mutex_exit(&dtrace_provider_lock); 16289 return (EINVAL); 16290 } 16291 16292 mutex_exit(&dtrace_lock); 16293 16294 prov = probe->dtpr_provider; 16295 16296 if (prov->dtpv_pops.dtps_getargdesc == NULL) { 16297 /* 16298 * There isn't any typed information for this probe. 16299 * Set the argument number to DTRACE_ARGNONE. 16300 */ 16301 desc.dtargd_ndx = DTRACE_ARGNONE; 16302 } else { 16303 desc.dtargd_native[0] = '\0'; 16304 desc.dtargd_xlate[0] = '\0'; 16305 desc.dtargd_mapping = desc.dtargd_ndx; 16306 16307 prov->dtpv_pops.dtps_getargdesc(prov->dtpv_arg, 16308 probe->dtpr_id, probe->dtpr_arg, &desc); 16309 } 16310 16311 mutex_exit(&mod_lock); 16312 mutex_exit(&dtrace_provider_lock); 16313 16314 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16315 return (EFAULT); 16316 16317 return (0); 16318 } 16319 16320 case DTRACEIOC_GO: { 16321 processorid_t cpuid; 16322 rval = dtrace_state_go(state, &cpuid); 16323 16324 if (rval != 0) 16325 return (rval); 16326 16327 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16328 return (EFAULT); 16329 16330 return (0); 16331 } 16332 16333 case DTRACEIOC_STOP: { 16334 processorid_t cpuid; 16335 16336 mutex_enter(&dtrace_lock); 16337 rval = dtrace_state_stop(state, &cpuid); 16338 mutex_exit(&dtrace_lock); 16339 16340 if (rval != 0) 16341 return (rval); 16342 16343 if (copyout(&cpuid, (void *)arg, sizeof (cpuid)) != 0) 16344 return (EFAULT); 16345 16346 return (0); 16347 } 16348 16349 case DTRACEIOC_DOFGET: { 16350 dof_hdr_t hdr, *dof; 16351 uint64_t len; 16352 16353 if (copyin((void *)arg, &hdr, sizeof (hdr)) != 0) 16354 return (EFAULT); 16355 16356 mutex_enter(&dtrace_lock); 16357 dof = dtrace_dof_create(state); 16358 mutex_exit(&dtrace_lock); 16359 16360 len = MIN(hdr.dofh_loadsz, dof->dofh_loadsz); 16361 rval = copyout(dof, (void *)arg, len); 16362 dtrace_dof_destroy(dof); 16363 16364 return (rval == 0 ? 0 : EFAULT); 16365 } 16366 16367 case DTRACEIOC_AGGSNAP: 16368 case DTRACEIOC_BUFSNAP: { 16369 dtrace_bufdesc_t desc; 16370 caddr_t cached; 16371 dtrace_buffer_t *buf; 16372 16373 if (copyin((void *)arg, &desc, sizeof (desc)) != 0) 16374 return (EFAULT); 16375 16376 if (desc.dtbd_cpu < 0 || desc.dtbd_cpu >= NCPU) 16377 return (EINVAL); 16378 16379 mutex_enter(&dtrace_lock); 16380 16381 if (cmd == DTRACEIOC_BUFSNAP) { 16382 buf = &state->dts_buffer[desc.dtbd_cpu]; 16383 } else { 16384 buf = &state->dts_aggbuffer[desc.dtbd_cpu]; 16385 } 16386 16387 if (buf->dtb_flags & (DTRACEBUF_RING | DTRACEBUF_FILL)) { 16388 size_t sz = buf->dtb_offset; 16389 16390 if (state->dts_activity != DTRACE_ACTIVITY_STOPPED) { 16391 mutex_exit(&dtrace_lock); 16392 return (EBUSY); 16393 } 16394 16395 /* 16396 * If this buffer has already been consumed, we're 16397 * going to indicate that there's nothing left here 16398 * to consume. 16399 */ 16400 if (buf->dtb_flags & DTRACEBUF_CONSUMED) { 16401 mutex_exit(&dtrace_lock); 16402 16403 desc.dtbd_size = 0; 16404 desc.dtbd_drops = 0; 16405 desc.dtbd_errors = 0; 16406 desc.dtbd_oldest = 0; 16407 sz = sizeof (desc); 16408 16409 if (copyout(&desc, (void *)arg, sz) != 0) 16410 return (EFAULT); 16411 16412 return (0); 16413 } 16414 16415 /* 16416 * If this is a ring buffer that has wrapped, we want 16417 * to copy the whole thing out. 16418 */ 16419 if (buf->dtb_flags & DTRACEBUF_WRAPPED) { 16420 dtrace_buffer_polish(buf); 16421 sz = buf->dtb_size; 16422 } 16423 16424 if (copyout(buf->dtb_tomax, desc.dtbd_data, sz) != 0) { 16425 mutex_exit(&dtrace_lock); 16426 return (EFAULT); 16427 } 16428 16429 desc.dtbd_size = sz; 16430 desc.dtbd_drops = buf->dtb_drops; 16431 desc.dtbd_errors = buf->dtb_errors; 16432 desc.dtbd_oldest = buf->dtb_xamot_offset; 16433 desc.dtbd_timestamp = dtrace_gethrtime(); 16434 16435 mutex_exit(&dtrace_lock); 16436 16437 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16438 return (EFAULT); 16439 16440 buf->dtb_flags |= DTRACEBUF_CONSUMED; 16441 16442 return (0); 16443 } 16444 16445 if (buf->dtb_tomax == NULL) { 16446 ASSERT(buf->dtb_xamot == NULL); 16447 mutex_exit(&dtrace_lock); 16448 return (ENOENT); 16449 } 16450 16451 cached = buf->dtb_tomax; 16452 ASSERT(!(buf->dtb_flags & DTRACEBUF_NOSWITCH)); 16453 16454 dtrace_xcall(desc.dtbd_cpu, 16455 (dtrace_xcall_t)dtrace_buffer_switch, buf); 16456 16457 state->dts_errors += buf->dtb_xamot_errors; 16458 16459 /* 16460 * If the buffers did not actually switch, then the cross call 16461 * did not take place -- presumably because the given CPU is 16462 * not in the ready set. If this is the case, we'll return 16463 * ENOENT. 16464 */ 16465 if (buf->dtb_tomax == cached) { 16466 ASSERT(buf->dtb_xamot != cached); 16467 mutex_exit(&dtrace_lock); 16468 return (ENOENT); 16469 } 16470 16471 ASSERT(cached == buf->dtb_xamot); 16472 16473 /* 16474 * We have our snapshot; now copy it out. 16475 */ 16476 if (copyout(buf->dtb_xamot, desc.dtbd_data, 16477 buf->dtb_xamot_offset) != 0) { 16478 mutex_exit(&dtrace_lock); 16479 return (EFAULT); 16480 } 16481 16482 desc.dtbd_size = buf->dtb_xamot_offset; 16483 desc.dtbd_drops = buf->dtb_xamot_drops; 16484 desc.dtbd_errors = buf->dtb_xamot_errors; 16485 desc.dtbd_oldest = 0; 16486 desc.dtbd_timestamp = buf->dtb_switched; 16487 16488 mutex_exit(&dtrace_lock); 16489 16490 /* 16491 * Finally, copy out the buffer description. 16492 */ 16493 if (copyout(&desc, (void *)arg, sizeof (desc)) != 0) 16494 return (EFAULT); 16495 16496 return (0); 16497 } 16498 16499 case DTRACEIOC_CONF: { 16500 dtrace_conf_t conf; 16501 16502 bzero(&conf, sizeof (conf)); 16503 conf.dtc_difversion = DIF_VERSION; 16504 conf.dtc_difintregs = DIF_DIR_NREGS; 16505 conf.dtc_diftupregs = DIF_DTR_NREGS; 16506 conf.dtc_ctfmodel = CTF_MODEL_NATIVE; 16507 16508 if (copyout(&conf, (void *)arg, sizeof (conf)) != 0) 16509 return (EFAULT); 16510 16511 return (0); 16512 } 16513 16514 case DTRACEIOC_STATUS: { 16515 dtrace_status_t stat; 16516 dtrace_dstate_t *dstate; 16517 int i, j; 16518 uint64_t nerrs; 16519 16520 /* 16521 * See the comment in dtrace_state_deadman() for the reason 16522 * for setting dts_laststatus to INT64_MAX before setting 16523 * it to the correct value. 16524 */ 16525 state->dts_laststatus = INT64_MAX; 16526 dtrace_membar_producer(); 16527 state->dts_laststatus = dtrace_gethrtime(); 16528 16529 bzero(&stat, sizeof (stat)); 16530 16531 mutex_enter(&dtrace_lock); 16532 16533 if (state->dts_activity == DTRACE_ACTIVITY_INACTIVE) { 16534 mutex_exit(&dtrace_lock); 16535 return (ENOENT); 16536 } 16537 16538 if (state->dts_activity == DTRACE_ACTIVITY_DRAINING) 16539 stat.dtst_exiting = 1; 16540 16541 nerrs = state->dts_errors; 16542 dstate = &state->dts_vstate.dtvs_dynvars; 16543 16544 for (i = 0; i < NCPU; i++) { 16545 dtrace_dstate_percpu_t *dcpu = &dstate->dtds_percpu[i]; 16546 16547 stat.dtst_dyndrops += dcpu->dtdsc_drops; 16548 stat.dtst_dyndrops_dirty += dcpu->dtdsc_dirty_drops; 16549 stat.dtst_dyndrops_rinsing += dcpu->dtdsc_rinsing_drops; 16550 16551 if (state->dts_buffer[i].dtb_flags & DTRACEBUF_FULL) 16552 stat.dtst_filled++; 16553 16554 nerrs += state->dts_buffer[i].dtb_errors; 16555 16556 for (j = 0; j < state->dts_nspeculations; j++) { 16557 dtrace_speculation_t *spec; 16558 dtrace_buffer_t *buf; 16559 16560 spec = &state->dts_speculations[j]; 16561 buf = &spec->dtsp_buffer[i]; 16562 stat.dtst_specdrops += buf->dtb_xamot_drops; 16563 } 16564 } 16565 16566 stat.dtst_specdrops_busy = state->dts_speculations_busy; 16567 stat.dtst_specdrops_unavail = state->dts_speculations_unavail; 16568 stat.dtst_stkstroverflows = state->dts_stkstroverflows; 16569 stat.dtst_dblerrors = state->dts_dblerrors; 16570 stat.dtst_killed = 16571 (state->dts_activity == DTRACE_ACTIVITY_KILLED); 16572 stat.dtst_errors = nerrs; 16573 16574 mutex_exit(&dtrace_lock); 16575 16576 if (copyout(&stat, (void *)arg, sizeof (stat)) != 0) 16577 return (EFAULT); 16578 16579 return (0); 16580 } 16581 16582 case DTRACEIOC_FORMAT: { 16583 dtrace_fmtdesc_t fmt; 16584 char *str; 16585 int len; 16586 16587 if (copyin((void *)arg, &fmt, sizeof (fmt)) != 0) 16588 return (EFAULT); 16589 16590 mutex_enter(&dtrace_lock); 16591 16592 if (fmt.dtfd_format == 0 || 16593 fmt.dtfd_format > state->dts_nformats) { 16594 mutex_exit(&dtrace_lock); 16595 return (EINVAL); 16596 } 16597 16598 /* 16599 * Format strings are allocated contiguously and they are 16600 * never freed; if a format index is less than the number 16601 * of formats, we can assert that the format map is non-NULL 16602 * and that the format for the specified index is non-NULL. 16603 */ 16604 ASSERT(state->dts_formats != NULL); 16605 str = state->dts_formats[fmt.dtfd_format - 1]; 16606 ASSERT(str != NULL); 16607 16608 len = strlen(str) + 1; 16609 16610 if (len > fmt.dtfd_length) { 16611 fmt.dtfd_length = len; 16612 16613 if (copyout(&fmt, (void *)arg, sizeof (fmt)) != 0) { 16614 mutex_exit(&dtrace_lock); 16615 return (EINVAL); 16616 } 16617 } else { 16618 if (copyout(str, fmt.dtfd_string, len) != 0) { 16619 mutex_exit(&dtrace_lock); 16620 return (EINVAL); 16621 } 16622 } 16623 16624 mutex_exit(&dtrace_lock); 16625 return (0); 16626 } 16627 16628 default: 16629 break; 16630 } 16631 16632 return (ENOTTY); 16633 } 16634 16635 /*ARGSUSED*/ 16636 static int 16637 dtrace_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 16638 { 16639 dtrace_state_t *state; 16640 16641 switch (cmd) { 16642 case DDI_DETACH: 16643 break; 16644 16645 case DDI_SUSPEND: 16646 return (DDI_SUCCESS); 16647 16648 default: 16649 return (DDI_FAILURE); 16650 } 16651 16652 mutex_enter(&cpu_lock); 16653 mutex_enter(&dtrace_provider_lock); 16654 mutex_enter(&dtrace_lock); 16655 16656 ASSERT(dtrace_opens == 0); 16657 16658 if (dtrace_helpers > 0) { 16659 mutex_exit(&dtrace_provider_lock); 16660 mutex_exit(&dtrace_lock); 16661 mutex_exit(&cpu_lock); 16662 return (DDI_FAILURE); 16663 } 16664 16665 if (dtrace_unregister((dtrace_provider_id_t)dtrace_provider) != 0) { 16666 mutex_exit(&dtrace_provider_lock); 16667 mutex_exit(&dtrace_lock); 16668 mutex_exit(&cpu_lock); 16669 return (DDI_FAILURE); 16670 } 16671 16672 dtrace_provider = NULL; 16673 16674 if ((state = dtrace_anon_grab()) != NULL) { 16675 /* 16676 * If there were ECBs on this state, the provider should 16677 * have not been allowed to detach; assert that there is 16678 * none. 16679 */ 16680 ASSERT(state->dts_necbs == 0); 16681 dtrace_state_destroy(state); 16682 16683 /* 16684 * If we're being detached with anonymous state, we need to 16685 * indicate to the kernel debugger that DTrace is now inactive. 16686 */ 16687 (void) kdi_dtrace_set(KDI_DTSET_DTRACE_DEACTIVATE); 16688 } 16689 16690 bzero(&dtrace_anon, sizeof (dtrace_anon_t)); 16691 unregister_cpu_setup_func((cpu_setup_func_t *)dtrace_cpu_setup, NULL); 16692 dtrace_cpu_init = NULL; 16693 dtrace_helpers_cleanup = NULL; 16694 dtrace_helpers_fork = NULL; 16695 dtrace_cpustart_init = NULL; 16696 dtrace_cpustart_fini = NULL; 16697 dtrace_debugger_init = NULL; 16698 dtrace_debugger_fini = NULL; 16699 dtrace_modload = NULL; 16700 dtrace_modunload = NULL; 16701 16702 mutex_exit(&cpu_lock); 16703 16704 if (dtrace_helptrace_enabled) { 16705 kmem_free(dtrace_helptrace_buffer, dtrace_helptrace_bufsize); 16706 dtrace_helptrace_buffer = NULL; 16707 } 16708 16709 kmem_free(dtrace_probes, dtrace_nprobes * sizeof (dtrace_probe_t *)); 16710 dtrace_probes = NULL; 16711 dtrace_nprobes = 0; 16712 16713 dtrace_hash_destroy(dtrace_bymod); 16714 dtrace_hash_destroy(dtrace_byfunc); 16715 dtrace_hash_destroy(dtrace_byname); 16716 dtrace_bymod = NULL; 16717 dtrace_byfunc = NULL; 16718 dtrace_byname = NULL; 16719 16720 kmem_cache_destroy(dtrace_state_cache); 16721 vmem_destroy(dtrace_minor); 16722 vmem_destroy(dtrace_arena); 16723 16724 if (dtrace_toxrange != NULL) { 16725 kmem_free(dtrace_toxrange, 16726 dtrace_toxranges_max * sizeof (dtrace_toxrange_t)); 16727 dtrace_toxrange = NULL; 16728 dtrace_toxranges = 0; 16729 dtrace_toxranges_max = 0; 16730 } 16731 16732 ddi_remove_minor_node(dtrace_devi, NULL); 16733 dtrace_devi = NULL; 16734 16735 ddi_soft_state_fini(&dtrace_softstate); 16736 16737 ASSERT(dtrace_vtime_references == 0); 16738 ASSERT(dtrace_opens == 0); 16739 ASSERT(dtrace_retained == NULL); 16740 16741 mutex_exit(&dtrace_lock); 16742 mutex_exit(&dtrace_provider_lock); 16743 16744 /* 16745 * We don't destroy the task queue until after we have dropped our 16746 * locks (taskq_destroy() may block on running tasks). To prevent 16747 * attempting to do work after we have effectively detached but before 16748 * the task queue has been destroyed, all tasks dispatched via the 16749 * task queue must check that DTrace is still attached before 16750 * performing any operation. 16751 */ 16752 taskq_destroy(dtrace_taskq); 16753 dtrace_taskq = NULL; 16754 16755 return (DDI_SUCCESS); 16756 } 16757 #endif 16758 16759 #if defined(sun) 16760 /*ARGSUSED*/ 16761 static int 16762 dtrace_info(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result) 16763 { 16764 int error; 16765 16766 switch (infocmd) { 16767 case DDI_INFO_DEVT2DEVINFO: 16768 *result = (void *)dtrace_devi; 16769 error = DDI_SUCCESS; 16770 break; 16771 case DDI_INFO_DEVT2INSTANCE: 16772 *result = (void *)0; 16773 error = DDI_SUCCESS; 16774 break; 16775 default: 16776 error = DDI_FAILURE; 16777 } 16778 return (error); 16779 } 16780 #endif 16781 16782 #if defined(sun) 16783 static struct cb_ops dtrace_cb_ops = { 16784 dtrace_open, /* open */ 16785 dtrace_close, /* close */ 16786 nulldev, /* strategy */ 16787 nulldev, /* print */ 16788 nodev, /* dump */ 16789 nodev, /* read */ 16790 nodev, /* write */ 16791 dtrace_ioctl, /* ioctl */ 16792 nodev, /* devmap */ 16793 nodev, /* mmap */ 16794 nodev, /* segmap */ 16795 nochpoll, /* poll */ 16796 ddi_prop_op, /* cb_prop_op */ 16797 0, /* streamtab */ 16798 D_NEW | D_MP /* Driver compatibility flag */ 16799 }; 16800 16801 static struct dev_ops dtrace_ops = { 16802 DEVO_REV, /* devo_rev */ 16803 0, /* refcnt */ 16804 dtrace_info, /* get_dev_info */ 16805 nulldev, /* identify */ 16806 nulldev, /* probe */ 16807 dtrace_attach, /* attach */ 16808 dtrace_detach, /* detach */ 16809 nodev, /* reset */ 16810 &dtrace_cb_ops, /* driver operations */ 16811 NULL, /* bus operations */ 16812 nodev /* dev power */ 16813 }; 16814 16815 static struct modldrv modldrv = { 16816 &mod_driverops, /* module type (this is a pseudo driver) */ 16817 "Dynamic Tracing", /* name of module */ 16818 &dtrace_ops, /* driver ops */ 16819 }; 16820 16821 static struct modlinkage modlinkage = { 16822 MODREV_1, 16823 (void *)&modldrv, 16824 NULL 16825 }; 16826 16827 int 16828 _init(void) 16829 { 16830 return (mod_install(&modlinkage)); 16831 } 16832 16833 int 16834 _info(struct modinfo *modinfop) 16835 { 16836 return (mod_info(&modlinkage, modinfop)); 16837 } 16838 16839 int 16840 _fini(void) 16841 { 16842 return (mod_remove(&modlinkage)); 16843 } 16844 #else 16845 16846 static d_ioctl_t dtrace_ioctl; 16847 static d_ioctl_t dtrace_ioctl_helper; 16848 static void dtrace_load(void *); 16849 static int dtrace_unload(void); 16850 #if __FreeBSD_version < 800039 16851 static void dtrace_clone(void *, struct ucred *, char *, int , struct cdev **); 16852 static struct clonedevs *dtrace_clones; /* Ptr to the array of cloned devices. */ 16853 static eventhandler_tag eh_tag; /* Event handler tag. */ 16854 #else 16855 static struct cdev *dtrace_dev; 16856 static struct cdev *helper_dev; 16857 #endif 16858 16859 void dtrace_invop_init(void); 16860 void dtrace_invop_uninit(void); 16861 16862 static struct cdevsw dtrace_cdevsw = { 16863 .d_version = D_VERSION, 16864 #if __FreeBSD_version < 800039 16865 .d_flags = D_TRACKCLOSE | D_NEEDMINOR, 16866 .d_close = dtrace_close, 16867 #endif 16868 .d_ioctl = dtrace_ioctl, 16869 .d_open = dtrace_open, 16870 .d_name = "dtrace", 16871 }; 16872 16873 static struct cdevsw helper_cdevsw = { 16874 .d_version = D_VERSION, 16875 .d_ioctl = dtrace_ioctl_helper, 16876 .d_name = "helper", 16877 }; 16878 16879 #include <dtrace_anon.c> 16880 #if __FreeBSD_version < 800039 16881 #include <dtrace_clone.c> 16882 #endif 16883 #include <dtrace_ioctl.c> 16884 #include <dtrace_load.c> 16885 #include <dtrace_modevent.c> 16886 #include <dtrace_sysctl.c> 16887 #include <dtrace_unload.c> 16888 #include <dtrace_vtime.c> 16889 #include <dtrace_hacks.c> 16890 #include <dtrace_isa.c> 16891 16892 SYSINIT(dtrace_load, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_load, NULL); 16893 SYSUNINIT(dtrace_unload, SI_SUB_DTRACE, SI_ORDER_FIRST, dtrace_unload, NULL); 16894 SYSINIT(dtrace_anon_init, SI_SUB_DTRACE_ANON, SI_ORDER_FIRST, dtrace_anon_init, NULL); 16895 16896 DEV_MODULE(dtrace, dtrace_modevent, NULL); 16897 MODULE_VERSION(dtrace, 1); 16898 MODULE_DEPEND(dtrace, cyclic, 1, 1, 1); 16899 MODULE_DEPEND(dtrace, opensolaris, 1, 1, 1); 16900 #endif 16901