1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * Copyright (c) 2013, Joyent, Inc. All rights reserved. 28 * Copyright (c) 2012 by Delphix. All rights reserved. 29 */ 30 31 #include <stdlib.h> 32 #include <strings.h> 33 #include <errno.h> 34 #include <unistd.h> 35 #include <limits.h> 36 #include <assert.h> 37 #include <ctype.h> 38 #ifdef illumos 39 #include <alloca.h> 40 #endif 41 #include <dt_impl.h> 42 #include <dt_pq.h> 43 #include <dt_printf.h> 44 #ifndef illumos 45 #include <libproc_compat.h> 46 #endif 47 48 #define DT_MASK_LO 0x00000000FFFFFFFFULL 49 50 /* 51 * We declare this here because (1) we need it and (2) we want to avoid a 52 * dependency on libm in libdtrace. 53 */ 54 static long double 55 dt_fabsl(long double x) 56 { 57 if (x < 0) 58 return (-x); 59 60 return (x); 61 } 62 63 static int 64 dt_ndigits(long long val) 65 { 66 int rval = 1; 67 long long cmp = 10; 68 69 if (val < 0) { 70 val = val == INT64_MIN ? INT64_MAX : -val; 71 rval++; 72 } 73 74 while (val > cmp && cmp > 0) { 75 rval++; 76 cmp *= 10; 77 } 78 79 return (rval < 4 ? 4 : rval); 80 } 81 82 /* 83 * 128-bit arithmetic functions needed to support the stddev() aggregating 84 * action. 85 */ 86 static int 87 dt_gt_128(uint64_t *a, uint64_t *b) 88 { 89 return (a[1] > b[1] || (a[1] == b[1] && a[0] > b[0])); 90 } 91 92 static int 93 dt_ge_128(uint64_t *a, uint64_t *b) 94 { 95 return (a[1] > b[1] || (a[1] == b[1] && a[0] >= b[0])); 96 } 97 98 static int 99 dt_le_128(uint64_t *a, uint64_t *b) 100 { 101 return (a[1] < b[1] || (a[1] == b[1] && a[0] <= b[0])); 102 } 103 104 /* 105 * Shift the 128-bit value in a by b. If b is positive, shift left. 106 * If b is negative, shift right. 107 */ 108 static void 109 dt_shift_128(uint64_t *a, int b) 110 { 111 uint64_t mask; 112 113 if (b == 0) 114 return; 115 116 if (b < 0) { 117 b = -b; 118 if (b >= 64) { 119 a[0] = a[1] >> (b - 64); 120 a[1] = 0; 121 } else { 122 a[0] >>= b; 123 mask = 1LL << (64 - b); 124 mask -= 1; 125 a[0] |= ((a[1] & mask) << (64 - b)); 126 a[1] >>= b; 127 } 128 } else { 129 if (b >= 64) { 130 a[1] = a[0] << (b - 64); 131 a[0] = 0; 132 } else { 133 a[1] <<= b; 134 mask = a[0] >> (64 - b); 135 a[1] |= mask; 136 a[0] <<= b; 137 } 138 } 139 } 140 141 static int 142 dt_nbits_128(uint64_t *a) 143 { 144 int nbits = 0; 145 uint64_t tmp[2]; 146 uint64_t zero[2] = { 0, 0 }; 147 148 tmp[0] = a[0]; 149 tmp[1] = a[1]; 150 151 dt_shift_128(tmp, -1); 152 while (dt_gt_128(tmp, zero)) { 153 dt_shift_128(tmp, -1); 154 nbits++; 155 } 156 157 return (nbits); 158 } 159 160 static void 161 dt_subtract_128(uint64_t *minuend, uint64_t *subtrahend, uint64_t *difference) 162 { 163 uint64_t result[2]; 164 165 result[0] = minuend[0] - subtrahend[0]; 166 result[1] = minuend[1] - subtrahend[1] - 167 (minuend[0] < subtrahend[0] ? 1 : 0); 168 169 difference[0] = result[0]; 170 difference[1] = result[1]; 171 } 172 173 static void 174 dt_add_128(uint64_t *addend1, uint64_t *addend2, uint64_t *sum) 175 { 176 uint64_t result[2]; 177 178 result[0] = addend1[0] + addend2[0]; 179 result[1] = addend1[1] + addend2[1] + 180 (result[0] < addend1[0] || result[0] < addend2[0] ? 1 : 0); 181 182 sum[0] = result[0]; 183 sum[1] = result[1]; 184 } 185 186 /* 187 * The basic idea is to break the 2 64-bit values into 4 32-bit values, 188 * use native multiplication on those, and then re-combine into the 189 * resulting 128-bit value. 190 * 191 * (hi1 << 32 + lo1) * (hi2 << 32 + lo2) = 192 * hi1 * hi2 << 64 + 193 * hi1 * lo2 << 32 + 194 * hi2 * lo1 << 32 + 195 * lo1 * lo2 196 */ 197 static void 198 dt_multiply_128(uint64_t factor1, uint64_t factor2, uint64_t *product) 199 { 200 uint64_t hi1, hi2, lo1, lo2; 201 uint64_t tmp[2]; 202 203 hi1 = factor1 >> 32; 204 hi2 = factor2 >> 32; 205 206 lo1 = factor1 & DT_MASK_LO; 207 lo2 = factor2 & DT_MASK_LO; 208 209 product[0] = lo1 * lo2; 210 product[1] = hi1 * hi2; 211 212 tmp[0] = hi1 * lo2; 213 tmp[1] = 0; 214 dt_shift_128(tmp, 32); 215 dt_add_128(product, tmp, product); 216 217 tmp[0] = hi2 * lo1; 218 tmp[1] = 0; 219 dt_shift_128(tmp, 32); 220 dt_add_128(product, tmp, product); 221 } 222 223 /* 224 * This is long-hand division. 225 * 226 * We initialize subtrahend by shifting divisor left as far as possible. We 227 * loop, comparing subtrahend to dividend: if subtrahend is smaller, we 228 * subtract and set the appropriate bit in the result. We then shift 229 * subtrahend right by one bit for the next comparison. 230 */ 231 static void 232 dt_divide_128(uint64_t *dividend, uint64_t divisor, uint64_t *quotient) 233 { 234 uint64_t result[2] = { 0, 0 }; 235 uint64_t remainder[2]; 236 uint64_t subtrahend[2]; 237 uint64_t divisor_128[2]; 238 uint64_t mask[2] = { 1, 0 }; 239 int log = 0; 240 241 assert(divisor != 0); 242 243 divisor_128[0] = divisor; 244 divisor_128[1] = 0; 245 246 remainder[0] = dividend[0]; 247 remainder[1] = dividend[1]; 248 249 subtrahend[0] = divisor; 250 subtrahend[1] = 0; 251 252 while (divisor > 0) { 253 log++; 254 divisor >>= 1; 255 } 256 257 dt_shift_128(subtrahend, 128 - log); 258 dt_shift_128(mask, 128 - log); 259 260 while (dt_ge_128(remainder, divisor_128)) { 261 if (dt_ge_128(remainder, subtrahend)) { 262 dt_subtract_128(remainder, subtrahend, remainder); 263 result[0] |= mask[0]; 264 result[1] |= mask[1]; 265 } 266 267 dt_shift_128(subtrahend, -1); 268 dt_shift_128(mask, -1); 269 } 270 271 quotient[0] = result[0]; 272 quotient[1] = result[1]; 273 } 274 275 /* 276 * This is the long-hand method of calculating a square root. 277 * The algorithm is as follows: 278 * 279 * 1. Group the digits by 2 from the right. 280 * 2. Over the leftmost group, find the largest single-digit number 281 * whose square is less than that group. 282 * 3. Subtract the result of the previous step (2 or 4, depending) and 283 * bring down the next two-digit group. 284 * 4. For the result R we have so far, find the largest single-digit number 285 * x such that 2 * R * 10 * x + x^2 is less than the result from step 3. 286 * (Note that this is doubling R and performing a decimal left-shift by 1 287 * and searching for the appropriate decimal to fill the one's place.) 288 * The value x is the next digit in the square root. 289 * Repeat steps 3 and 4 until the desired precision is reached. (We're 290 * dealing with integers, so the above is sufficient.) 291 * 292 * In decimal, the square root of 582,734 would be calculated as so: 293 * 294 * __7__6__3 295 * | 58 27 34 296 * -49 (7^2 == 49 => 7 is the first digit in the square root) 297 * -- 298 * 9 27 (Subtract and bring down the next group.) 299 * 146 8 76 (2 * 7 * 10 * 6 + 6^2 == 876 => 6 is the next digit in 300 * ----- the square root) 301 * 51 34 (Subtract and bring down the next group.) 302 * 1523 45 69 (2 * 76 * 10 * 3 + 3^2 == 4569 => 3 is the next digit in 303 * ----- the square root) 304 * 5 65 (remainder) 305 * 306 * The above algorithm applies similarly in binary, but note that the 307 * only possible non-zero value for x in step 4 is 1, so step 4 becomes a 308 * simple decision: is 2 * R * 2 * 1 + 1^2 (aka R << 2 + 1) less than the 309 * preceding difference? 310 * 311 * In binary, the square root of 11011011 would be calculated as so: 312 * 313 * __1__1__1__0 314 * | 11 01 10 11 315 * 01 (0 << 2 + 1 == 1 < 11 => this bit is 1) 316 * -- 317 * 10 01 10 11 318 * 101 1 01 (1 << 2 + 1 == 101 < 1001 => next bit is 1) 319 * ----- 320 * 1 00 10 11 321 * 1101 11 01 (11 << 2 + 1 == 1101 < 10010 => next bit is 1) 322 * ------- 323 * 1 01 11 324 * 11101 1 11 01 (111 << 2 + 1 == 11101 > 10111 => last bit is 0) 325 * 326 */ 327 static uint64_t 328 dt_sqrt_128(uint64_t *square) 329 { 330 uint64_t result[2] = { 0, 0 }; 331 uint64_t diff[2] = { 0, 0 }; 332 uint64_t one[2] = { 1, 0 }; 333 uint64_t next_pair[2]; 334 uint64_t next_try[2]; 335 uint64_t bit_pairs, pair_shift; 336 int i; 337 338 bit_pairs = dt_nbits_128(square) / 2; 339 pair_shift = bit_pairs * 2; 340 341 for (i = 0; i <= bit_pairs; i++) { 342 /* 343 * Bring down the next pair of bits. 344 */ 345 next_pair[0] = square[0]; 346 next_pair[1] = square[1]; 347 dt_shift_128(next_pair, -pair_shift); 348 next_pair[0] &= 0x3; 349 next_pair[1] = 0; 350 351 dt_shift_128(diff, 2); 352 dt_add_128(diff, next_pair, diff); 353 354 /* 355 * next_try = R << 2 + 1 356 */ 357 next_try[0] = result[0]; 358 next_try[1] = result[1]; 359 dt_shift_128(next_try, 2); 360 dt_add_128(next_try, one, next_try); 361 362 if (dt_le_128(next_try, diff)) { 363 dt_subtract_128(diff, next_try, diff); 364 dt_shift_128(result, 1); 365 dt_add_128(result, one, result); 366 } else { 367 dt_shift_128(result, 1); 368 } 369 370 pair_shift -= 2; 371 } 372 373 assert(result[1] == 0); 374 375 return (result[0]); 376 } 377 378 uint64_t 379 dt_stddev(uint64_t *data, uint64_t normal) 380 { 381 uint64_t avg_of_squares[2]; 382 uint64_t square_of_avg[2]; 383 int64_t norm_avg; 384 uint64_t diff[2]; 385 386 if (data[0] == 0) 387 return (0); 388 389 /* 390 * The standard approximation for standard deviation is 391 * sqrt(average(x**2) - average(x)**2), i.e. the square root 392 * of the average of the squares minus the square of the average. 393 */ 394 dt_divide_128(data + 2, normal, avg_of_squares); 395 dt_divide_128(avg_of_squares, data[0], avg_of_squares); 396 397 norm_avg = (int64_t)data[1] / (int64_t)normal / (int64_t)data[0]; 398 399 if (norm_avg < 0) 400 norm_avg = -norm_avg; 401 402 dt_multiply_128((uint64_t)norm_avg, (uint64_t)norm_avg, square_of_avg); 403 404 dt_subtract_128(avg_of_squares, square_of_avg, diff); 405 406 return (dt_sqrt_128(diff)); 407 } 408 409 static int 410 dt_flowindent(dtrace_hdl_t *dtp, dtrace_probedata_t *data, dtrace_epid_t last, 411 dtrace_bufdesc_t *buf, size_t offs) 412 { 413 dtrace_probedesc_t *pd = data->dtpda_pdesc, *npd; 414 dtrace_eprobedesc_t *epd = data->dtpda_edesc, *nepd; 415 char *p = pd->dtpd_provider, *n = pd->dtpd_name, *sub; 416 dtrace_flowkind_t flow = DTRACEFLOW_NONE; 417 const char *str = NULL; 418 static const char *e_str[2] = { " -> ", " => " }; 419 static const char *r_str[2] = { " <- ", " <= " }; 420 static const char *ent = "entry", *ret = "return"; 421 static int entlen = 0, retlen = 0; 422 dtrace_epid_t next, id = epd->dtepd_epid; 423 int rval; 424 425 if (entlen == 0) { 426 assert(retlen == 0); 427 entlen = strlen(ent); 428 retlen = strlen(ret); 429 } 430 431 /* 432 * If the name of the probe is "entry" or ends with "-entry", we 433 * treat it as an entry; if it is "return" or ends with "-return", 434 * we treat it as a return. (This allows application-provided probes 435 * like "method-entry" or "function-entry" to participate in flow 436 * indentation -- without accidentally misinterpreting popular probe 437 * names like "carpentry", "gentry" or "Coventry".) 438 */ 439 if ((sub = strstr(n, ent)) != NULL && sub[entlen] == '\0' && 440 (sub == n || sub[-1] == '-')) { 441 flow = DTRACEFLOW_ENTRY; 442 str = e_str[strcmp(p, "syscall") == 0]; 443 } else if ((sub = strstr(n, ret)) != NULL && sub[retlen] == '\0' && 444 (sub == n || sub[-1] == '-')) { 445 flow = DTRACEFLOW_RETURN; 446 str = r_str[strcmp(p, "syscall") == 0]; 447 } 448 449 /* 450 * If we're going to indent this, we need to check the ID of our last 451 * call. If we're looking at the same probe ID but a different EPID, 452 * we _don't_ want to indent. (Yes, there are some minor holes in 453 * this scheme -- it's a heuristic.) 454 */ 455 if (flow == DTRACEFLOW_ENTRY) { 456 if ((last != DTRACE_EPIDNONE && id != last && 457 pd->dtpd_id == dtp->dt_pdesc[last]->dtpd_id)) 458 flow = DTRACEFLOW_NONE; 459 } 460 461 /* 462 * If we're going to unindent this, it's more difficult to see if 463 * we don't actually want to unindent it -- we need to look at the 464 * _next_ EPID. 465 */ 466 if (flow == DTRACEFLOW_RETURN) { 467 offs += epd->dtepd_size; 468 469 do { 470 if (offs >= buf->dtbd_size) 471 goto out; 472 473 next = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 474 475 if (next == DTRACE_EPIDNONE) 476 offs += sizeof (id); 477 } while (next == DTRACE_EPIDNONE); 478 479 if ((rval = dt_epid_lookup(dtp, next, &nepd, &npd)) != 0) 480 return (rval); 481 482 if (next != id && npd->dtpd_id == pd->dtpd_id) 483 flow = DTRACEFLOW_NONE; 484 } 485 486 out: 487 if (flow == DTRACEFLOW_ENTRY || flow == DTRACEFLOW_RETURN) { 488 data->dtpda_prefix = str; 489 } else { 490 data->dtpda_prefix = "| "; 491 } 492 493 if (flow == DTRACEFLOW_RETURN && data->dtpda_indent > 0) 494 data->dtpda_indent -= 2; 495 496 data->dtpda_flow = flow; 497 498 return (0); 499 } 500 501 static int 502 dt_nullprobe(void) 503 { 504 return (DTRACE_CONSUME_THIS); 505 } 506 507 static int 508 dt_nullrec(void) 509 { 510 return (DTRACE_CONSUME_NEXT); 511 } 512 513 static void 514 dt_quantize_total(dtrace_hdl_t *dtp, int64_t datum, long double *total) 515 { 516 long double val = dt_fabsl((long double)datum); 517 518 if (dtp->dt_options[DTRACEOPT_AGGZOOM] == DTRACEOPT_UNSET) { 519 *total += val; 520 return; 521 } 522 523 /* 524 * If we're zooming in on an aggregation, we want the height of the 525 * highest value to be approximately 95% of total bar height -- so we 526 * adjust up by the reciprocal of DTRACE_AGGZOOM_MAX when comparing to 527 * our highest value. 528 */ 529 val *= 1 / DTRACE_AGGZOOM_MAX; 530 531 if (*total < val) 532 *total = val; 533 } 534 535 static int 536 dt_print_quanthdr(dtrace_hdl_t *dtp, FILE *fp, int width) 537 { 538 return (dt_printf(dtp, fp, "\n%*s %41s %-9s\n", 539 width ? width : 16, width ? "key" : "value", 540 "------------- Distribution -------------", "count")); 541 } 542 543 static int 544 dt_print_quanthdr_packed(dtrace_hdl_t *dtp, FILE *fp, int width, 545 const dtrace_aggdata_t *aggdata, dtrace_actkind_t action) 546 { 547 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin; 548 int minwidth, maxwidth, i; 549 550 assert(action == DTRACEAGG_QUANTIZE || action == DTRACEAGG_LQUANTIZE); 551 552 if (action == DTRACEAGG_QUANTIZE) { 553 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET) 554 min--; 555 556 if (max < DTRACE_QUANTIZE_NBUCKETS - 1) 557 max++; 558 559 minwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(min)); 560 maxwidth = dt_ndigits(DTRACE_QUANTIZE_BUCKETVAL(max)); 561 } else { 562 maxwidth = 8; 563 minwidth = maxwidth - 1; 564 max++; 565 } 566 567 if (dt_printf(dtp, fp, "\n%*s %*s .", 568 width, width > 0 ? "key" : "", minwidth, "min") < 0) 569 return (-1); 570 571 for (i = min; i <= max; i++) { 572 if (dt_printf(dtp, fp, "-") < 0) 573 return (-1); 574 } 575 576 return (dt_printf(dtp, fp, ". %*s | count\n", -maxwidth, "max")); 577 } 578 579 /* 580 * We use a subset of the Unicode Block Elements (U+2588 through U+258F, 581 * inclusive) to represent aggregations via UTF-8 -- which are expressed via 582 * 3-byte UTF-8 sequences. 583 */ 584 #define DTRACE_AGGUTF8_FULL 0x2588 585 #define DTRACE_AGGUTF8_BASE 0x258f 586 #define DTRACE_AGGUTF8_LEVELS 8 587 588 #define DTRACE_AGGUTF8_BYTE0(val) (0xe0 | ((val) >> 12)) 589 #define DTRACE_AGGUTF8_BYTE1(val) (0x80 | (((val) >> 6) & 0x3f)) 590 #define DTRACE_AGGUTF8_BYTE2(val) (0x80 | ((val) & 0x3f)) 591 592 static int 593 dt_print_quantline_utf8(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 594 uint64_t normal, long double total) 595 { 596 uint_t len = 40, i, whole, partial; 597 long double f = (dt_fabsl((long double)val) * len) / total; 598 const char *spaces = " "; 599 600 whole = (uint_t)f; 601 partial = (uint_t)((f - (long double)(uint_t)f) * 602 (long double)DTRACE_AGGUTF8_LEVELS); 603 604 if (dt_printf(dtp, fp, "|") < 0) 605 return (-1); 606 607 for (i = 0; i < whole; i++) { 608 if (dt_printf(dtp, fp, "%c%c%c", 609 DTRACE_AGGUTF8_BYTE0(DTRACE_AGGUTF8_FULL), 610 DTRACE_AGGUTF8_BYTE1(DTRACE_AGGUTF8_FULL), 611 DTRACE_AGGUTF8_BYTE2(DTRACE_AGGUTF8_FULL)) < 0) 612 return (-1); 613 } 614 615 if (partial != 0) { 616 partial = DTRACE_AGGUTF8_BASE - (partial - 1); 617 618 if (dt_printf(dtp, fp, "%c%c%c", 619 DTRACE_AGGUTF8_BYTE0(partial), 620 DTRACE_AGGUTF8_BYTE1(partial), 621 DTRACE_AGGUTF8_BYTE2(partial)) < 0) 622 return (-1); 623 624 i++; 625 } 626 627 return (dt_printf(dtp, fp, "%s %-9lld\n", spaces + i, 628 (long long)val / normal)); 629 } 630 631 static int 632 dt_print_quantline(dtrace_hdl_t *dtp, FILE *fp, int64_t val, 633 uint64_t normal, long double total, char positives, char negatives) 634 { 635 long double f; 636 uint_t depth, len = 40; 637 638 const char *ats = "@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@"; 639 const char *spaces = " "; 640 641 assert(strlen(ats) == len && strlen(spaces) == len); 642 assert(!(total == 0 && (positives || negatives))); 643 assert(!(val < 0 && !negatives)); 644 assert(!(val > 0 && !positives)); 645 assert(!(val != 0 && total == 0)); 646 647 if (!negatives) { 648 if (positives) { 649 if (dtp->dt_encoding == DT_ENCODING_UTF8) { 650 return (dt_print_quantline_utf8(dtp, fp, val, 651 normal, total)); 652 } 653 654 f = (dt_fabsl((long double)val) * len) / total; 655 depth = (uint_t)(f + 0.5); 656 } else { 657 depth = 0; 658 } 659 660 return (dt_printf(dtp, fp, "|%s%s %-9lld\n", ats + len - depth, 661 spaces + depth, (long long)val / normal)); 662 } 663 664 if (!positives) { 665 f = (dt_fabsl((long double)val) * len) / total; 666 depth = (uint_t)(f + 0.5); 667 668 return (dt_printf(dtp, fp, "%s%s| %-9lld\n", spaces + depth, 669 ats + len - depth, (long long)val / normal)); 670 } 671 672 /* 673 * If we're here, we have both positive and negative bucket values. 674 * To express this graphically, we're going to generate both positive 675 * and negative bars separated by a centerline. These bars are half 676 * the size of normal quantize()/lquantize() bars, so we divide the 677 * length in half before calculating the bar length. 678 */ 679 len /= 2; 680 ats = &ats[len]; 681 spaces = &spaces[len]; 682 683 f = (dt_fabsl((long double)val) * len) / total; 684 depth = (uint_t)(f + 0.5); 685 686 if (val <= 0) { 687 return (dt_printf(dtp, fp, "%s%s|%*s %-9lld\n", spaces + depth, 688 ats + len - depth, len, "", (long long)val / normal)); 689 } else { 690 return (dt_printf(dtp, fp, "%20s|%s%s %-9lld\n", "", 691 ats + len - depth, spaces + depth, 692 (long long)val / normal)); 693 } 694 } 695 696 /* 697 * As with UTF-8 printing of aggregations, we use a subset of the Unicode 698 * Block Elements (U+2581 through U+2588, inclusive) to represent our packed 699 * aggregation. 700 */ 701 #define DTRACE_AGGPACK_BASE 0x2581 702 #define DTRACE_AGGPACK_LEVELS 8 703 704 static int 705 dt_print_packed(dtrace_hdl_t *dtp, FILE *fp, 706 long double datum, long double total) 707 { 708 static boolean_t utf8_checked = B_FALSE; 709 static boolean_t utf8; 710 char *ascii = "__xxxxXX"; 711 char *neg = "vvvvVV"; 712 unsigned int len; 713 long double val; 714 715 if (!utf8_checked) { 716 char *term; 717 718 /* 719 * We want to determine if we can reasonably emit UTF-8 for our 720 * packed aggregation. To do this, we will check for terminals 721 * that are known to be primitive to emit UTF-8 on these. 722 */ 723 utf8_checked = B_TRUE; 724 725 if (dtp->dt_encoding == DT_ENCODING_ASCII) { 726 utf8 = B_FALSE; 727 } else if (dtp->dt_encoding == DT_ENCODING_UTF8) { 728 utf8 = B_TRUE; 729 } else if ((term = getenv("TERM")) != NULL && 730 (strcmp(term, "sun") == 0 || 731 strcmp(term, "sun-color") == 0 || 732 strcmp(term, "dumb") == 0)) { 733 utf8 = B_FALSE; 734 } else { 735 utf8 = B_TRUE; 736 } 737 } 738 739 if (datum == 0) 740 return (dt_printf(dtp, fp, " ")); 741 742 if (datum < 0) { 743 len = strlen(neg); 744 val = dt_fabsl(datum * (len - 1)) / total; 745 return (dt_printf(dtp, fp, "%c", neg[(uint_t)(val + 0.5)])); 746 } 747 748 if (utf8) { 749 int block = DTRACE_AGGPACK_BASE + (unsigned int)(((datum * 750 (DTRACE_AGGPACK_LEVELS - 1)) / total) + 0.5); 751 752 return (dt_printf(dtp, fp, "%c%c%c", 753 DTRACE_AGGUTF8_BYTE0(block), 754 DTRACE_AGGUTF8_BYTE1(block), 755 DTRACE_AGGUTF8_BYTE2(block))); 756 } 757 758 len = strlen(ascii); 759 val = (datum * (len - 1)) / total; 760 return (dt_printf(dtp, fp, "%c", ascii[(uint_t)(val + 0.5)])); 761 } 762 763 int 764 dt_print_quantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 765 size_t size, uint64_t normal) 766 { 767 const int64_t *data = addr; 768 int i, first_bin = 0, last_bin = DTRACE_QUANTIZE_NBUCKETS - 1; 769 long double total = 0; 770 char positives = 0, negatives = 0; 771 772 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 773 return (dt_set_errno(dtp, EDT_DMISMATCH)); 774 775 while (first_bin < DTRACE_QUANTIZE_NBUCKETS - 1 && data[first_bin] == 0) 776 first_bin++; 777 778 if (first_bin == DTRACE_QUANTIZE_NBUCKETS - 1) { 779 /* 780 * There isn't any data. This is possible if the aggregation 781 * has been clear()'d or if negative increment values have been 782 * used. Regardless, we'll print the buckets around 0. 783 */ 784 first_bin = DTRACE_QUANTIZE_ZEROBUCKET - 1; 785 last_bin = DTRACE_QUANTIZE_ZEROBUCKET + 1; 786 } else { 787 if (first_bin > 0) 788 first_bin--; 789 790 while (last_bin > 0 && data[last_bin] == 0) 791 last_bin--; 792 793 if (last_bin < DTRACE_QUANTIZE_NBUCKETS - 1) 794 last_bin++; 795 } 796 797 for (i = first_bin; i <= last_bin; i++) { 798 positives |= (data[i] > 0); 799 negatives |= (data[i] < 0); 800 dt_quantize_total(dtp, data[i], &total); 801 } 802 803 if (dt_print_quanthdr(dtp, fp, 0) < 0) 804 return (-1); 805 806 for (i = first_bin; i <= last_bin; i++) { 807 if (dt_printf(dtp, fp, "%16lld ", 808 (long long)DTRACE_QUANTIZE_BUCKETVAL(i)) < 0) 809 return (-1); 810 811 if (dt_print_quantline(dtp, fp, data[i], normal, total, 812 positives, negatives) < 0) 813 return (-1); 814 } 815 816 return (0); 817 } 818 819 static int 820 dt_print_quantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 821 size_t size, const dtrace_aggdata_t *aggdata) 822 { 823 const int64_t *data = addr; 824 long double total = 0, count = 0; 825 int min = aggdata->dtada_minbin, max = aggdata->dtada_maxbin, i; 826 int64_t minval, maxval; 827 828 if (size != DTRACE_QUANTIZE_NBUCKETS * sizeof (uint64_t)) 829 return (dt_set_errno(dtp, EDT_DMISMATCH)); 830 831 if (min != 0 && min != DTRACE_QUANTIZE_ZEROBUCKET) 832 min--; 833 834 if (max < DTRACE_QUANTIZE_NBUCKETS - 1) 835 max++; 836 837 minval = DTRACE_QUANTIZE_BUCKETVAL(min); 838 maxval = DTRACE_QUANTIZE_BUCKETVAL(max); 839 840 if (dt_printf(dtp, fp, " %*lld :", dt_ndigits(minval), 841 (long long)minval) < 0) 842 return (-1); 843 844 for (i = min; i <= max; i++) { 845 dt_quantize_total(dtp, data[i], &total); 846 count += data[i]; 847 } 848 849 for (i = min; i <= max; i++) { 850 if (dt_print_packed(dtp, fp, data[i], total) < 0) 851 return (-1); 852 } 853 854 if (dt_printf(dtp, fp, ": %*lld | %lld\n", 855 -dt_ndigits(maxval), (long long)maxval, (long long)count) < 0) 856 return (-1); 857 858 return (0); 859 } 860 861 int 862 dt_print_lquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 863 size_t size, uint64_t normal) 864 { 865 const int64_t *data = addr; 866 int i, first_bin, last_bin, base; 867 uint64_t arg; 868 long double total = 0; 869 uint16_t step, levels; 870 char positives = 0, negatives = 0; 871 872 if (size < sizeof (uint64_t)) 873 return (dt_set_errno(dtp, EDT_DMISMATCH)); 874 875 arg = *data++; 876 size -= sizeof (uint64_t); 877 878 base = DTRACE_LQUANTIZE_BASE(arg); 879 step = DTRACE_LQUANTIZE_STEP(arg); 880 levels = DTRACE_LQUANTIZE_LEVELS(arg); 881 882 first_bin = 0; 883 last_bin = levels + 1; 884 885 if (size != sizeof (uint64_t) * (levels + 2)) 886 return (dt_set_errno(dtp, EDT_DMISMATCH)); 887 888 while (first_bin <= levels + 1 && data[first_bin] == 0) 889 first_bin++; 890 891 if (first_bin > levels + 1) { 892 first_bin = 0; 893 last_bin = 2; 894 } else { 895 if (first_bin > 0) 896 first_bin--; 897 898 while (last_bin > 0 && data[last_bin] == 0) 899 last_bin--; 900 901 if (last_bin < levels + 1) 902 last_bin++; 903 } 904 905 for (i = first_bin; i <= last_bin; i++) { 906 positives |= (data[i] > 0); 907 negatives |= (data[i] < 0); 908 dt_quantize_total(dtp, data[i], &total); 909 } 910 911 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 912 "------------- Distribution -------------", "count") < 0) 913 return (-1); 914 915 for (i = first_bin; i <= last_bin; i++) { 916 char c[32]; 917 int err; 918 919 if (i == 0) { 920 (void) snprintf(c, sizeof (c), "< %d", base); 921 err = dt_printf(dtp, fp, "%16s ", c); 922 } else if (i == levels + 1) { 923 (void) snprintf(c, sizeof (c), ">= %d", 924 base + (levels * step)); 925 err = dt_printf(dtp, fp, "%16s ", c); 926 } else { 927 err = dt_printf(dtp, fp, "%16d ", 928 base + (i - 1) * step); 929 } 930 931 if (err < 0 || dt_print_quantline(dtp, fp, data[i], normal, 932 total, positives, negatives) < 0) 933 return (-1); 934 } 935 936 return (0); 937 } 938 939 /*ARGSUSED*/ 940 static int 941 dt_print_lquantize_packed(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 942 size_t size, const dtrace_aggdata_t *aggdata) 943 { 944 const int64_t *data = addr; 945 long double total = 0, count = 0; 946 int min, max, base, err; 947 uint64_t arg; 948 uint16_t step, levels; 949 char c[32]; 950 unsigned int i; 951 952 if (size < sizeof (uint64_t)) 953 return (dt_set_errno(dtp, EDT_DMISMATCH)); 954 955 arg = *data++; 956 size -= sizeof (uint64_t); 957 958 base = DTRACE_LQUANTIZE_BASE(arg); 959 step = DTRACE_LQUANTIZE_STEP(arg); 960 levels = DTRACE_LQUANTIZE_LEVELS(arg); 961 962 if (size != sizeof (uint64_t) * (levels + 2)) 963 return (dt_set_errno(dtp, EDT_DMISMATCH)); 964 965 min = 0; 966 max = levels + 1; 967 968 if (min == 0) { 969 (void) snprintf(c, sizeof (c), "< %d", base); 970 err = dt_printf(dtp, fp, "%8s :", c); 971 } else { 972 err = dt_printf(dtp, fp, "%8d :", base + (min - 1) * step); 973 } 974 975 if (err < 0) 976 return (-1); 977 978 for (i = min; i <= max; i++) { 979 dt_quantize_total(dtp, data[i], &total); 980 count += data[i]; 981 } 982 983 for (i = min; i <= max; i++) { 984 if (dt_print_packed(dtp, fp, data[i], total) < 0) 985 return (-1); 986 } 987 988 (void) snprintf(c, sizeof (c), ">= %d", base + (levels * step)); 989 return (dt_printf(dtp, fp, ": %-8s | %lld\n", c, (long long)count)); 990 } 991 992 int 993 dt_print_llquantize(dtrace_hdl_t *dtp, FILE *fp, const void *addr, 994 size_t size, uint64_t normal) 995 { 996 int i, first_bin, last_bin, bin = 1, order, levels; 997 uint16_t factor, low, high, nsteps; 998 const int64_t *data = addr; 999 int64_t value = 1, next, step; 1000 char positives = 0, negatives = 0; 1001 long double total = 0; 1002 uint64_t arg; 1003 char c[32]; 1004 1005 if (size < sizeof (uint64_t)) 1006 return (dt_set_errno(dtp, EDT_DMISMATCH)); 1007 1008 arg = *data++; 1009 size -= sizeof (uint64_t); 1010 1011 factor = DTRACE_LLQUANTIZE_FACTOR(arg); 1012 low = DTRACE_LLQUANTIZE_LOW(arg); 1013 high = DTRACE_LLQUANTIZE_HIGH(arg); 1014 nsteps = DTRACE_LLQUANTIZE_NSTEP(arg); 1015 1016 /* 1017 * We don't expect to be handed invalid llquantize() parameters here, 1018 * but sanity check them (to a degree) nonetheless. 1019 */ 1020 if (size > INT32_MAX || factor < 2 || low >= high || 1021 nsteps == 0 || factor > nsteps) 1022 return (dt_set_errno(dtp, EDT_DMISMATCH)); 1023 1024 levels = (int)size / sizeof (uint64_t); 1025 1026 first_bin = 0; 1027 last_bin = levels - 1; 1028 1029 while (first_bin < levels && data[first_bin] == 0) 1030 first_bin++; 1031 1032 if (first_bin == levels) { 1033 first_bin = 0; 1034 last_bin = 1; 1035 } else { 1036 if (first_bin > 0) 1037 first_bin--; 1038 1039 while (last_bin > 0 && data[last_bin] == 0) 1040 last_bin--; 1041 1042 if (last_bin < levels - 1) 1043 last_bin++; 1044 } 1045 1046 for (i = first_bin; i <= last_bin; i++) { 1047 positives |= (data[i] > 0); 1048 negatives |= (data[i] < 0); 1049 dt_quantize_total(dtp, data[i], &total); 1050 } 1051 1052 if (dt_printf(dtp, fp, "\n%16s %41s %-9s\n", "value", 1053 "------------- Distribution -------------", "count") < 0) 1054 return (-1); 1055 1056 for (order = 0; order < low; order++) 1057 value *= factor; 1058 1059 next = value * factor; 1060 step = next > nsteps ? next / nsteps : 1; 1061 1062 if (first_bin == 0) { 1063 (void) snprintf(c, sizeof (c), "< %lld", (long long)value); 1064 1065 if (dt_printf(dtp, fp, "%16s ", c) < 0) 1066 return (-1); 1067 1068 if (dt_print_quantline(dtp, fp, data[0], normal, 1069 total, positives, negatives) < 0) 1070 return (-1); 1071 } 1072 1073 while (order <= high) { 1074 if (bin >= first_bin && bin <= last_bin) { 1075 if (dt_printf(dtp, fp, "%16lld ", (long long)value) < 0) 1076 return (-1); 1077 1078 if (dt_print_quantline(dtp, fp, data[bin], 1079 normal, total, positives, negatives) < 0) 1080 return (-1); 1081 } 1082 1083 assert(value < next); 1084 bin++; 1085 1086 if ((value += step) != next) 1087 continue; 1088 1089 next = value * factor; 1090 step = next > nsteps ? next / nsteps : 1; 1091 order++; 1092 } 1093 1094 if (last_bin < bin) 1095 return (0); 1096 1097 assert(last_bin == bin); 1098 (void) snprintf(c, sizeof (c), ">= %lld", (long long)value); 1099 1100 if (dt_printf(dtp, fp, "%16s ", c) < 0) 1101 return (-1); 1102 1103 return (dt_print_quantline(dtp, fp, data[bin], normal, 1104 total, positives, negatives)); 1105 } 1106 1107 /*ARGSUSED*/ 1108 static int 1109 dt_print_average(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1110 size_t size, uint64_t normal) 1111 { 1112 /* LINTED - alignment */ 1113 int64_t *data = (int64_t *)addr; 1114 1115 return (dt_printf(dtp, fp, " %16lld", data[0] ? 1116 (long long)(data[1] / (int64_t)normal / data[0]) : 0)); 1117 } 1118 1119 /*ARGSUSED*/ 1120 static int 1121 dt_print_stddev(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1122 size_t size, uint64_t normal) 1123 { 1124 /* LINTED - alignment */ 1125 uint64_t *data = (uint64_t *)addr; 1126 1127 return (dt_printf(dtp, fp, " %16llu", data[0] ? 1128 (unsigned long long) dt_stddev(data, normal) : 0)); 1129 } 1130 1131 /*ARGSUSED*/ 1132 static int 1133 dt_print_bytes(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, 1134 size_t nbytes, int width, int quiet, int forceraw) 1135 { 1136 /* 1137 * If the byte stream is a series of printable characters, followed by 1138 * a terminating byte, we print it out as a string. Otherwise, we 1139 * assume that it's something else and just print the bytes. 1140 */ 1141 int i, j, margin = 5; 1142 char *c = (char *)addr; 1143 1144 if (nbytes == 0) 1145 return (0); 1146 1147 if (forceraw) 1148 goto raw; 1149 1150 if (dtp->dt_options[DTRACEOPT_RAWBYTES] != DTRACEOPT_UNSET) 1151 goto raw; 1152 1153 for (i = 0; i < nbytes; i++) { 1154 /* 1155 * We define a "printable character" to be one for which 1156 * isprint(3C) returns non-zero, isspace(3C) returns non-zero, 1157 * or a character which is either backspace or the bell. 1158 * Backspace and the bell are regrettably special because 1159 * they fail the first two tests -- and yet they are entirely 1160 * printable. These are the only two control characters that 1161 * have meaning for the terminal and for which isprint(3C) and 1162 * isspace(3C) return 0. 1163 */ 1164 if (isprint((unsigned char)c[i]) || 1165 isspace((unsigned char)c[i]) || 1166 c[i] == '\b' || c[i] == '\a') 1167 continue; 1168 1169 if (c[i] == '\0' && i > 0) { 1170 /* 1171 * This looks like it might be a string. Before we 1172 * assume that it is indeed a string, check the 1173 * remainder of the byte range; if it contains 1174 * additional non-nul characters, we'll assume that 1175 * it's a binary stream that just happens to look like 1176 * a string, and we'll print out the individual bytes. 1177 */ 1178 for (j = i + 1; j < nbytes; j++) { 1179 if (c[j] != '\0') 1180 break; 1181 } 1182 1183 if (j != nbytes) 1184 break; 1185 1186 if (quiet) { 1187 return (dt_printf(dtp, fp, "%s", c)); 1188 } else { 1189 return (dt_printf(dtp, fp, " %s%*s", 1190 width < 0 ? " " : "", width, c)); 1191 } 1192 } 1193 1194 break; 1195 } 1196 1197 if (i == nbytes) { 1198 /* 1199 * The byte range is all printable characters, but there is 1200 * no trailing nul byte. We'll assume that it's a string and 1201 * print it as such. 1202 */ 1203 char *s = alloca(nbytes + 1); 1204 bcopy(c, s, nbytes); 1205 s[nbytes] = '\0'; 1206 return (dt_printf(dtp, fp, " %-*s", width, s)); 1207 } 1208 1209 raw: 1210 if (dt_printf(dtp, fp, "\n%*s ", margin, "") < 0) 1211 return (-1); 1212 1213 for (i = 0; i < 16; i++) 1214 if (dt_printf(dtp, fp, " %c", "0123456789abcdef"[i]) < 0) 1215 return (-1); 1216 1217 if (dt_printf(dtp, fp, " 0123456789abcdef\n") < 0) 1218 return (-1); 1219 1220 1221 for (i = 0; i < nbytes; i += 16) { 1222 if (dt_printf(dtp, fp, "%*s%5x:", margin, "", i) < 0) 1223 return (-1); 1224 1225 for (j = i; j < i + 16 && j < nbytes; j++) { 1226 if (dt_printf(dtp, fp, " %02x", (uchar_t)c[j]) < 0) 1227 return (-1); 1228 } 1229 1230 while (j++ % 16) { 1231 if (dt_printf(dtp, fp, " ") < 0) 1232 return (-1); 1233 } 1234 1235 if (dt_printf(dtp, fp, " ") < 0) 1236 return (-1); 1237 1238 for (j = i; j < i + 16 && j < nbytes; j++) { 1239 if (dt_printf(dtp, fp, "%c", 1240 c[j] < ' ' || c[j] > '~' ? '.' : c[j]) < 0) 1241 return (-1); 1242 } 1243 1244 if (dt_printf(dtp, fp, "\n") < 0) 1245 return (-1); 1246 } 1247 1248 return (0); 1249 } 1250 1251 int 1252 dt_print_stack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1253 caddr_t addr, int depth, int size) 1254 { 1255 dtrace_syminfo_t dts; 1256 GElf_Sym sym; 1257 int i, indent; 1258 char c[PATH_MAX * 2]; 1259 uint64_t pc; 1260 1261 if (dt_printf(dtp, fp, "\n") < 0) 1262 return (-1); 1263 1264 if (format == NULL) 1265 format = "%s"; 1266 1267 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1268 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1269 else 1270 indent = _dtrace_stkindent; 1271 1272 for (i = 0; i < depth; i++) { 1273 switch (size) { 1274 case sizeof (uint32_t): 1275 /* LINTED - alignment */ 1276 pc = *((uint32_t *)addr); 1277 break; 1278 1279 case sizeof (uint64_t): 1280 /* LINTED - alignment */ 1281 pc = *((uint64_t *)addr); 1282 break; 1283 1284 default: 1285 return (dt_set_errno(dtp, EDT_BADSTACKPC)); 1286 } 1287 1288 if (pc == 0) 1289 break; 1290 1291 addr += size; 1292 1293 if (dt_printf(dtp, fp, "%*s", indent, "") < 0) 1294 return (-1); 1295 1296 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1297 if (pc > sym.st_value) { 1298 (void) snprintf(c, sizeof (c), "%s`%s+0x%llx", 1299 dts.dts_object, dts.dts_name, 1300 (unsigned long long)(pc - sym.st_value)); 1301 } else { 1302 (void) snprintf(c, sizeof (c), "%s`%s", 1303 dts.dts_object, dts.dts_name); 1304 } 1305 } else { 1306 /* 1307 * We'll repeat the lookup, but this time we'll specify 1308 * a NULL GElf_Sym -- indicating that we're only 1309 * interested in the containing module. 1310 */ 1311 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1312 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1313 dts.dts_object, (unsigned long long)pc); 1314 } else { 1315 (void) snprintf(c, sizeof (c), "0x%llx", 1316 (unsigned long long)pc); 1317 } 1318 } 1319 1320 if (dt_printf(dtp, fp, format, c) < 0) 1321 return (-1); 1322 1323 if (dt_printf(dtp, fp, "\n") < 0) 1324 return (-1); 1325 } 1326 1327 return (0); 1328 } 1329 1330 int 1331 dt_print_ustack(dtrace_hdl_t *dtp, FILE *fp, const char *format, 1332 caddr_t addr, uint64_t arg) 1333 { 1334 /* LINTED - alignment */ 1335 uint64_t *pc = (uint64_t *)addr; 1336 uint32_t depth = DTRACE_USTACK_NFRAMES(arg); 1337 uint32_t strsize = DTRACE_USTACK_STRSIZE(arg); 1338 const char *strbase = addr + (depth + 1) * sizeof (uint64_t); 1339 const char *str = strsize ? strbase : NULL; 1340 int err = 0; 1341 1342 char name[PATH_MAX], objname[PATH_MAX], c[PATH_MAX * 2]; 1343 struct ps_prochandle *P; 1344 GElf_Sym sym; 1345 int i, indent; 1346 pid_t pid; 1347 1348 if (depth == 0) 1349 return (0); 1350 1351 pid = (pid_t)*pc++; 1352 1353 if (dt_printf(dtp, fp, "\n") < 0) 1354 return (-1); 1355 1356 if (format == NULL) 1357 format = "%s"; 1358 1359 if (dtp->dt_options[DTRACEOPT_STACKINDENT] != DTRACEOPT_UNSET) 1360 indent = (int)dtp->dt_options[DTRACEOPT_STACKINDENT]; 1361 else 1362 indent = _dtrace_stkindent; 1363 1364 /* 1365 * Ultimately, we need to add an entry point in the library vector for 1366 * determining <symbol, offset> from <pid, address>. For now, if 1367 * this is a vector open, we just print the raw address or string. 1368 */ 1369 if (dtp->dt_vector == NULL) 1370 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1371 else 1372 P = NULL; 1373 1374 if (P != NULL) 1375 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1376 1377 for (i = 0; i < depth && pc[i] != 0; i++) { 1378 const prmap_t *map; 1379 1380 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1381 break; 1382 1383 if (P != NULL && Plookup_by_addr(P, pc[i], 1384 name, sizeof (name), &sym) == 0) { 1385 (void) Pobjname(P, pc[i], objname, sizeof (objname)); 1386 1387 if (pc[i] > sym.st_value) { 1388 (void) snprintf(c, sizeof (c), 1389 "%s`%s+0x%llx", dt_basename(objname), name, 1390 (unsigned long long)(pc[i] - sym.st_value)); 1391 } else { 1392 (void) snprintf(c, sizeof (c), 1393 "%s`%s", dt_basename(objname), name); 1394 } 1395 } else if (str != NULL && str[0] != '\0' && str[0] != '@' && 1396 (P != NULL && ((map = Paddr_to_map(P, pc[i])) == NULL || 1397 (map->pr_mflags & MA_WRITE)))) { 1398 /* 1399 * If the current string pointer in the string table 1400 * does not point to an empty string _and_ the program 1401 * counter falls in a writable region, we'll use the 1402 * string from the string table instead of the raw 1403 * address. This last condition is necessary because 1404 * some (broken) ustack helpers will return a string 1405 * even for a program counter that they can't 1406 * identify. If we have a string for a program 1407 * counter that falls in a segment that isn't 1408 * writable, we assume that we have fallen into this 1409 * case and we refuse to use the string. 1410 */ 1411 (void) snprintf(c, sizeof (c), "%s", str); 1412 } else { 1413 if (P != NULL && Pobjname(P, pc[i], objname, 1414 sizeof (objname)) != 0) { 1415 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1416 dt_basename(objname), (unsigned long long)pc[i]); 1417 } else { 1418 (void) snprintf(c, sizeof (c), "0x%llx", 1419 (unsigned long long)pc[i]); 1420 } 1421 } 1422 1423 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1424 break; 1425 1426 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1427 break; 1428 1429 if (str != NULL && str[0] == '@') { 1430 /* 1431 * If the first character of the string is an "at" sign, 1432 * then the string is inferred to be an annotation -- 1433 * and it is printed out beneath the frame and offset 1434 * with brackets. 1435 */ 1436 if ((err = dt_printf(dtp, fp, "%*s", indent, "")) < 0) 1437 break; 1438 1439 (void) snprintf(c, sizeof (c), " [ %s ]", &str[1]); 1440 1441 if ((err = dt_printf(dtp, fp, format, c)) < 0) 1442 break; 1443 1444 if ((err = dt_printf(dtp, fp, "\n")) < 0) 1445 break; 1446 } 1447 1448 if (str != NULL) { 1449 str += strlen(str) + 1; 1450 if (str - strbase >= strsize) 1451 str = NULL; 1452 } 1453 } 1454 1455 if (P != NULL) { 1456 dt_proc_unlock(dtp, P); 1457 dt_proc_release(dtp, P); 1458 } 1459 1460 return (err); 1461 } 1462 1463 static int 1464 dt_print_usym(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr, dtrace_actkind_t act) 1465 { 1466 /* LINTED - alignment */ 1467 uint64_t pid = ((uint64_t *)addr)[0]; 1468 /* LINTED - alignment */ 1469 uint64_t pc = ((uint64_t *)addr)[1]; 1470 const char *format = " %-50s"; 1471 char *s; 1472 int n, len = 256; 1473 1474 if (act == DTRACEACT_USYM && dtp->dt_vector == NULL) { 1475 struct ps_prochandle *P; 1476 1477 if ((P = dt_proc_grab(dtp, pid, 1478 PGRAB_RDONLY | PGRAB_FORCE, 0)) != NULL) { 1479 GElf_Sym sym; 1480 1481 dt_proc_lock(dtp, P); 1482 1483 if (Plookup_by_addr(P, pc, NULL, 0, &sym) == 0) 1484 pc = sym.st_value; 1485 1486 dt_proc_unlock(dtp, P); 1487 dt_proc_release(dtp, P); 1488 } 1489 } 1490 1491 do { 1492 n = len; 1493 s = alloca(n); 1494 } while ((len = dtrace_uaddr2str(dtp, pid, pc, s, n)) > n); 1495 1496 return (dt_printf(dtp, fp, format, s)); 1497 } 1498 1499 int 1500 dt_print_umod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1501 { 1502 /* LINTED - alignment */ 1503 uint64_t pid = ((uint64_t *)addr)[0]; 1504 /* LINTED - alignment */ 1505 uint64_t pc = ((uint64_t *)addr)[1]; 1506 int err = 0; 1507 1508 char objname[PATH_MAX], c[PATH_MAX * 2]; 1509 struct ps_prochandle *P; 1510 1511 if (format == NULL) 1512 format = " %-50s"; 1513 1514 /* 1515 * See the comment in dt_print_ustack() for the rationale for 1516 * printing raw addresses in the vectored case. 1517 */ 1518 if (dtp->dt_vector == NULL) 1519 P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0); 1520 else 1521 P = NULL; 1522 1523 if (P != NULL) 1524 dt_proc_lock(dtp, P); /* lock handle while we perform lookups */ 1525 1526 if (P != NULL && Pobjname(P, pc, objname, sizeof (objname)) != 0) { 1527 (void) snprintf(c, sizeof (c), "%s", dt_basename(objname)); 1528 } else { 1529 (void) snprintf(c, sizeof (c), "0x%llx", (unsigned long long)pc); 1530 } 1531 1532 err = dt_printf(dtp, fp, format, c); 1533 1534 if (P != NULL) { 1535 dt_proc_unlock(dtp, P); 1536 dt_proc_release(dtp, P); 1537 } 1538 1539 return (err); 1540 } 1541 1542 static int 1543 dt_print_memory(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr) 1544 { 1545 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 1546 size_t nbytes = *((uintptr_t *) addr); 1547 1548 return (dt_print_bytes(dtp, fp, addr + sizeof(uintptr_t), 1549 nbytes, 50, quiet, 1)); 1550 } 1551 1552 typedef struct dt_type_cbdata { 1553 dtrace_hdl_t *dtp; 1554 dtrace_typeinfo_t dtt; 1555 caddr_t addr; 1556 caddr_t addrend; 1557 const char *name; 1558 int f_type; 1559 int indent; 1560 int type_width; 1561 int name_width; 1562 FILE *fp; 1563 } dt_type_cbdata_t; 1564 1565 static int dt_print_type_data(dt_type_cbdata_t *, ctf_id_t); 1566 1567 static int 1568 dt_print_type_member(const char *name, ctf_id_t type, ulong_t off, void *arg) 1569 { 1570 dt_type_cbdata_t cbdata; 1571 dt_type_cbdata_t *cbdatap = arg; 1572 ssize_t ssz; 1573 1574 if ((ssz = ctf_type_size(cbdatap->dtt.dtt_ctfp, type)) <= 0) 1575 return (0); 1576 1577 off /= 8; 1578 1579 cbdata = *cbdatap; 1580 cbdata.name = name; 1581 cbdata.addr += off; 1582 cbdata.addrend = cbdata.addr + ssz; 1583 1584 return (dt_print_type_data(&cbdata, type)); 1585 } 1586 1587 static int 1588 dt_print_type_width(const char *name, ctf_id_t type, ulong_t off, void *arg) 1589 { 1590 char buf[DT_TYPE_NAMELEN]; 1591 char *p; 1592 dt_type_cbdata_t *cbdatap = arg; 1593 size_t sz = strlen(name); 1594 1595 ctf_type_name(cbdatap->dtt.dtt_ctfp, type, buf, sizeof (buf)); 1596 1597 if ((p = strchr(buf, '[')) != NULL) 1598 p[-1] = '\0'; 1599 else 1600 p = __UNCONST(""); 1601 1602 sz += strlen(p); 1603 1604 if (sz > cbdatap->name_width) 1605 cbdatap->name_width = sz; 1606 1607 sz = strlen(buf); 1608 1609 if (sz > cbdatap->type_width) 1610 cbdatap->type_width = sz; 1611 1612 return (0); 1613 } 1614 1615 static int 1616 dt_print_type_data(dt_type_cbdata_t *cbdatap, ctf_id_t type) 1617 { 1618 caddr_t addr = cbdatap->addr; 1619 caddr_t addrend = cbdatap->addrend; 1620 char buf[DT_TYPE_NAMELEN]; 1621 char *p; 1622 int cnt = 0; 1623 uint_t kind = ctf_type_kind(cbdatap->dtt.dtt_ctfp, type); 1624 ssize_t ssz = ctf_type_size(cbdatap->dtt.dtt_ctfp, type); 1625 1626 ctf_type_name(cbdatap->dtt.dtt_ctfp, type, buf, sizeof (buf)); 1627 1628 if ((p = strchr(buf, '[')) != NULL) 1629 p[-1] = '\0'; 1630 else 1631 p = __UNCONST(""); 1632 1633 if (cbdatap->f_type) { 1634 int type_width = roundup(cbdatap->type_width + 1, 4); 1635 int name_width = roundup(cbdatap->name_width + 1, 4); 1636 1637 name_width -= strlen(cbdatap->name); 1638 1639 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s%-*s%s%-*s = ",cbdatap->indent * 4,"",type_width,buf,cbdatap->name,name_width,p); 1640 } 1641 1642 while (addr < addrend) { 1643 dt_type_cbdata_t cbdata; 1644 ctf_arinfo_t arinfo; 1645 ctf_encoding_t cte; 1646 void *vp = addr; 1647 cbdata = *cbdatap; 1648 cbdata.name = ""; 1649 cbdata.addr = addr; 1650 cbdata.addrend = addr + ssz; 1651 cbdata.f_type = 0; 1652 cbdata.indent++; 1653 cbdata.type_width = 0; 1654 cbdata.name_width = 0; 1655 1656 if (cnt > 0) 1657 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s", cbdatap->indent * 4,""); 1658 1659 switch (kind) { 1660 case CTF_K_INTEGER: 1661 if (ctf_type_encoding(cbdatap->dtt.dtt_ctfp, type, &cte) != 0) 1662 return (-1); 1663 if ((cte.cte_format & CTF_INT_SIGNED) != 0) 1664 switch (cte.cte_bits) { 1665 case 8: 1666 if (isprint(*((unsigned char *) vp))) 1667 dt_printf(cbdatap->dtp, cbdatap->fp, "'%c', ", *((char *) vp)); 1668 dt_printf(cbdatap->dtp, cbdatap->fp, "%d (0x%x);\n", *((char *) vp), *((char *) vp)); 1669 break; 1670 case 16: 1671 dt_printf(cbdatap->dtp, cbdatap->fp, "%hd (0x%hx);\n", *((short *) vp), *((u_short *) vp)); 1672 break; 1673 case 32: 1674 dt_printf(cbdatap->dtp, cbdatap->fp, "%d (0x%x);\n", *((int *) vp), *((u_int *) vp)); 1675 break; 1676 case 64: 1677 dt_printf(cbdatap->dtp, cbdatap->fp, "%jd (0x%jx);\n", *((long long *) vp), *((unsigned long long *) vp)); 1678 break; 1679 default: 1680 dt_printf(cbdatap->dtp, cbdatap->fp, "CTF_K_INTEGER: format %x offset %u bits %u\n",cte.cte_format,cte.cte_offset,cte.cte_bits); 1681 break; 1682 } 1683 else 1684 switch (cte.cte_bits) { 1685 case 8: 1686 dt_printf(cbdatap->dtp, cbdatap->fp, "%u (0x%x);\n", *((uint8_t *) vp) & 0xff, *((uint8_t *) vp) & 0xff); 1687 break; 1688 case 16: 1689 dt_printf(cbdatap->dtp, cbdatap->fp, "%hu (0x%hx);\n", *((u_short *) vp), *((u_short *) vp)); 1690 break; 1691 case 32: 1692 dt_printf(cbdatap->dtp, cbdatap->fp, "%u (0x%x);\n", *((u_int *) vp), *((u_int *) vp)); 1693 break; 1694 case 64: 1695 dt_printf(cbdatap->dtp, cbdatap->fp, "%ju (0x%jx);\n", *((unsigned long long *) vp), *((unsigned long long *) vp)); 1696 break; 1697 default: 1698 dt_printf(cbdatap->dtp, cbdatap->fp, "CTF_K_INTEGER: format %x offset %u bits %u\n",cte.cte_format,cte.cte_offset,cte.cte_bits); 1699 break; 1700 } 1701 break; 1702 case CTF_K_FLOAT: 1703 dt_printf(cbdatap->dtp, cbdatap->fp, "CTF_K_FLOAT: format %x offset %u bits %u\n",cte.cte_format,cte.cte_offset,cte.cte_bits); 1704 break; 1705 case CTF_K_POINTER: 1706 dt_printf(cbdatap->dtp, cbdatap->fp, "%p;\n", *((void **) addr)); 1707 break; 1708 case CTF_K_ARRAY: 1709 if (ctf_array_info(cbdatap->dtt.dtt_ctfp, type, &arinfo) != 0) 1710 return (-1); 1711 dt_printf(cbdatap->dtp, cbdatap->fp, "{\n%*s",cbdata.indent * 4,""); 1712 dt_print_type_data(&cbdata, arinfo.ctr_contents); 1713 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s};\n",cbdatap->indent * 4,""); 1714 break; 1715 case CTF_K_FUNCTION: 1716 dt_printf(cbdatap->dtp, cbdatap->fp, "CTF_K_FUNCTION:\n"); 1717 break; 1718 case CTF_K_STRUCT: 1719 cbdata.f_type = 1; 1720 if (ctf_member_iter(cbdatap->dtt.dtt_ctfp, type, 1721 dt_print_type_width, &cbdata) != 0) 1722 return (-1); 1723 dt_printf(cbdatap->dtp, cbdatap->fp, "{\n"); 1724 if (ctf_member_iter(cbdatap->dtt.dtt_ctfp, type, 1725 dt_print_type_member, &cbdata) != 0) 1726 return (-1); 1727 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s};\n",cbdatap->indent * 4,""); 1728 break; 1729 case CTF_K_UNION: 1730 cbdata.f_type = 1; 1731 if (ctf_member_iter(cbdatap->dtt.dtt_ctfp, type, 1732 dt_print_type_width, &cbdata) != 0) 1733 return (-1); 1734 dt_printf(cbdatap->dtp, cbdatap->fp, "{\n"); 1735 if (ctf_member_iter(cbdatap->dtt.dtt_ctfp, type, 1736 dt_print_type_member, &cbdata) != 0) 1737 return (-1); 1738 dt_printf(cbdatap->dtp, cbdatap->fp, "%*s};\n",cbdatap->indent * 4,""); 1739 break; 1740 case CTF_K_ENUM: 1741 dt_printf(cbdatap->dtp, cbdatap->fp, "%s;\n", ctf_enum_name(cbdatap->dtt.dtt_ctfp, type, *((int *) vp))); 1742 break; 1743 case CTF_K_TYPEDEF: 1744 dt_print_type_data(&cbdata, ctf_type_reference(cbdatap->dtt.dtt_ctfp,type)); 1745 break; 1746 case CTF_K_VOLATILE: 1747 if (cbdatap->f_type) 1748 dt_printf(cbdatap->dtp, cbdatap->fp, "volatile "); 1749 dt_print_type_data(&cbdata, ctf_type_reference(cbdatap->dtt.dtt_ctfp,type)); 1750 break; 1751 case CTF_K_CONST: 1752 if (cbdatap->f_type) 1753 dt_printf(cbdatap->dtp, cbdatap->fp, "const "); 1754 dt_print_type_data(&cbdata, ctf_type_reference(cbdatap->dtt.dtt_ctfp,type)); 1755 break; 1756 case CTF_K_RESTRICT: 1757 if (cbdatap->f_type) 1758 dt_printf(cbdatap->dtp, cbdatap->fp, "restrict "); 1759 dt_print_type_data(&cbdata, ctf_type_reference(cbdatap->dtt.dtt_ctfp,type)); 1760 break; 1761 default: 1762 break; 1763 } 1764 1765 addr += ssz; 1766 cnt++; 1767 } 1768 1769 return (0); 1770 } 1771 1772 static int 1773 dt_print_type(dtrace_hdl_t *dtp, FILE *fp, caddr_t addr) 1774 { 1775 char *p; 1776 dtrace_typeinfo_t dtt; 1777 dt_type_cbdata_t cbdata; 1778 int num = 0; 1779 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 1780 ssize_t ssz; 1781 1782 if (!quiet) 1783 dt_printf(dtp, fp, "\n"); 1784 1785 /* Get the total number of bytes of data buffered. */ 1786 size_t nbytes = *((uintptr_t *) addr); 1787 addr += sizeof(uintptr_t); 1788 1789 /* 1790 * Get the size of the type so that we can check that it matches 1791 * the CTF data we look up and so that we can figure out how many 1792 * type elements are buffered. 1793 */ 1794 size_t typs = *((uintptr_t *) addr); 1795 addr += sizeof(uintptr_t); 1796 1797 /* 1798 * Point to the type string in the buffer. Get it's string 1799 * length and round it up to become the offset to the start 1800 * of the buffered type data which we would like to be aligned 1801 * for easy access. 1802 */ 1803 char *strp = (char *) addr; 1804 int offset = roundup(strlen(strp) + 1, sizeof(uintptr_t)); 1805 1806 /* 1807 * The type string might have a format such as 'int [20]'. 1808 * Check if there is an array dimension present. 1809 */ 1810 if ((p = strchr(strp, '[')) != NULL) { 1811 /* Strip off the array dimension. */ 1812 *p++ = '\0'; 1813 1814 for (; *p != '\0' && *p != ']'; p++) 1815 num = num * 10 + *p - '0'; 1816 } else 1817 /* No array dimension, so default. */ 1818 num = 1; 1819 1820 /* Lookup the CTF type from the type string. */ 1821 if (dtrace_lookup_by_type(dtp, DTRACE_OBJ_EVERY, strp, &dtt) < 0) 1822 return (-1); 1823 1824 /* Offset the buffer address to the start of the data... */ 1825 addr += offset; 1826 1827 ssz = ctf_type_size(dtt.dtt_ctfp, dtt.dtt_type); 1828 1829 if (typs != ssz) { 1830 printf("Expected type size from buffer (%lu) to match type size looked up now (%ld)\n", (u_long) typs, (long) ssz); 1831 return (-1); 1832 } 1833 1834 cbdata.dtp = dtp; 1835 cbdata.dtt = dtt; 1836 cbdata.name = ""; 1837 cbdata.addr = addr; 1838 cbdata.addrend = addr + nbytes; 1839 cbdata.indent = 1; 1840 cbdata.f_type = 1; 1841 cbdata.type_width = 0; 1842 cbdata.name_width = 0; 1843 cbdata.fp = fp; 1844 1845 return (dt_print_type_data(&cbdata, dtt.dtt_type)); 1846 } 1847 1848 static int 1849 dt_print_sym(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1850 { 1851 /* LINTED - alignment */ 1852 uint64_t pc = *((uint64_t *)addr); 1853 dtrace_syminfo_t dts; 1854 GElf_Sym sym; 1855 char c[PATH_MAX * 2]; 1856 1857 if (format == NULL) 1858 format = " %-50s"; 1859 1860 if (dtrace_lookup_by_addr(dtp, pc, &sym, &dts) == 0) { 1861 (void) snprintf(c, sizeof (c), "%s`%s", 1862 dts.dts_object, dts.dts_name); 1863 } else { 1864 /* 1865 * We'll repeat the lookup, but this time we'll specify a 1866 * NULL GElf_Sym -- indicating that we're only interested in 1867 * the containing module. 1868 */ 1869 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1870 (void) snprintf(c, sizeof (c), "%s`0x%llx", 1871 dts.dts_object, (unsigned long long)pc); 1872 } else { 1873 (void) snprintf(c, sizeof (c), "0x%llx", 1874 (unsigned long long)pc); 1875 } 1876 } 1877 1878 if (dt_printf(dtp, fp, format, c) < 0) 1879 return (-1); 1880 1881 return (0); 1882 } 1883 1884 int 1885 dt_print_mod(dtrace_hdl_t *dtp, FILE *fp, const char *format, caddr_t addr) 1886 { 1887 /* LINTED - alignment */ 1888 uint64_t pc = *((uint64_t *)addr); 1889 dtrace_syminfo_t dts; 1890 char c[PATH_MAX * 2]; 1891 1892 if (format == NULL) 1893 format = " %-50s"; 1894 1895 if (dtrace_lookup_by_addr(dtp, pc, NULL, &dts) == 0) { 1896 (void) snprintf(c, sizeof (c), "%s", dts.dts_object); 1897 } else { 1898 (void) snprintf(c, sizeof (c), "0x%llx", (unsigned long long)pc); 1899 } 1900 1901 if (dt_printf(dtp, fp, format, c) < 0) 1902 return (-1); 1903 1904 return (0); 1905 } 1906 1907 typedef struct dt_normal { 1908 dtrace_aggvarid_t dtnd_id; 1909 uint64_t dtnd_normal; 1910 } dt_normal_t; 1911 1912 static int 1913 dt_normalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1914 { 1915 dt_normal_t *normal = arg; 1916 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1917 dtrace_aggvarid_t id = normal->dtnd_id; 1918 1919 if (agg->dtagd_nrecs == 0) 1920 return (DTRACE_AGGWALK_NEXT); 1921 1922 if (agg->dtagd_varid != id) 1923 return (DTRACE_AGGWALK_NEXT); 1924 1925 ((dtrace_aggdata_t *)aggdata)->dtada_normal = normal->dtnd_normal; 1926 return (DTRACE_AGGWALK_NORMALIZE); 1927 } 1928 1929 static int 1930 dt_normalize(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 1931 { 1932 dt_normal_t normal; 1933 caddr_t addr; 1934 1935 /* 1936 * We (should) have two records: the aggregation ID followed by the 1937 * normalization value. 1938 */ 1939 addr = base + rec->dtrd_offset; 1940 1941 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 1942 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1943 1944 /* LINTED - alignment */ 1945 normal.dtnd_id = *((dtrace_aggvarid_t *)addr); 1946 rec++; 1947 1948 if (rec->dtrd_action != DTRACEACT_LIBACT) 1949 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1950 1951 if (rec->dtrd_arg != DT_ACT_NORMALIZE) 1952 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1953 1954 addr = base + rec->dtrd_offset; 1955 1956 switch (rec->dtrd_size) { 1957 case sizeof (uint64_t): 1958 /* LINTED - alignment */ 1959 normal.dtnd_normal = *((uint64_t *)addr); 1960 break; 1961 case sizeof (uint32_t): 1962 /* LINTED - alignment */ 1963 normal.dtnd_normal = *((uint32_t *)addr); 1964 break; 1965 case sizeof (uint16_t): 1966 /* LINTED - alignment */ 1967 normal.dtnd_normal = *((uint16_t *)addr); 1968 break; 1969 case sizeof (uint8_t): 1970 normal.dtnd_normal = *((uint8_t *)addr); 1971 break; 1972 default: 1973 return (dt_set_errno(dtp, EDT_BADNORMAL)); 1974 } 1975 1976 (void) dtrace_aggregate_walk(dtp, dt_normalize_agg, &normal); 1977 1978 return (0); 1979 } 1980 1981 static int 1982 dt_denormalize_agg(const dtrace_aggdata_t *aggdata, void *arg) 1983 { 1984 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 1985 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 1986 1987 if (agg->dtagd_nrecs == 0) 1988 return (DTRACE_AGGWALK_NEXT); 1989 1990 if (agg->dtagd_varid != id) 1991 return (DTRACE_AGGWALK_NEXT); 1992 1993 return (DTRACE_AGGWALK_DENORMALIZE); 1994 } 1995 1996 static int 1997 dt_clear_agg(const dtrace_aggdata_t *aggdata, void *arg) 1998 { 1999 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 2000 dtrace_aggvarid_t id = *((dtrace_aggvarid_t *)arg); 2001 2002 if (agg->dtagd_nrecs == 0) 2003 return (DTRACE_AGGWALK_NEXT); 2004 2005 if (agg->dtagd_varid != id) 2006 return (DTRACE_AGGWALK_NEXT); 2007 2008 return (DTRACE_AGGWALK_CLEAR); 2009 } 2010 2011 typedef struct dt_trunc { 2012 dtrace_aggvarid_t dttd_id; 2013 uint64_t dttd_remaining; 2014 } dt_trunc_t; 2015 2016 static int 2017 dt_trunc_agg(const dtrace_aggdata_t *aggdata, void *arg) 2018 { 2019 dt_trunc_t *trunc = arg; 2020 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 2021 dtrace_aggvarid_t id = trunc->dttd_id; 2022 2023 if (agg->dtagd_nrecs == 0) 2024 return (DTRACE_AGGWALK_NEXT); 2025 2026 if (agg->dtagd_varid != id) 2027 return (DTRACE_AGGWALK_NEXT); 2028 2029 if (trunc->dttd_remaining == 0) 2030 return (DTRACE_AGGWALK_REMOVE); 2031 2032 trunc->dttd_remaining--; 2033 return (DTRACE_AGGWALK_NEXT); 2034 } 2035 2036 static int 2037 dt_trunc(dtrace_hdl_t *dtp, caddr_t base, dtrace_recdesc_t *rec) 2038 { 2039 dt_trunc_t trunc; 2040 caddr_t addr; 2041 int64_t remaining; 2042 int (*func)(dtrace_hdl_t *, dtrace_aggregate_f *, void *); 2043 2044 /* 2045 * We (should) have two records: the aggregation ID followed by the 2046 * number of aggregation entries after which the aggregation is to be 2047 * truncated. 2048 */ 2049 addr = base + rec->dtrd_offset; 2050 2051 if (rec->dtrd_size != sizeof (dtrace_aggvarid_t)) 2052 return (dt_set_errno(dtp, EDT_BADTRUNC)); 2053 2054 /* LINTED - alignment */ 2055 trunc.dttd_id = *((dtrace_aggvarid_t *)addr); 2056 rec++; 2057 2058 if (rec->dtrd_action != DTRACEACT_LIBACT) 2059 return (dt_set_errno(dtp, EDT_BADTRUNC)); 2060 2061 if (rec->dtrd_arg != DT_ACT_TRUNC) 2062 return (dt_set_errno(dtp, EDT_BADTRUNC)); 2063 2064 addr = base + rec->dtrd_offset; 2065 2066 switch (rec->dtrd_size) { 2067 case sizeof (uint64_t): 2068 /* LINTED - alignment */ 2069 remaining = *((int64_t *)addr); 2070 break; 2071 case sizeof (uint32_t): 2072 /* LINTED - alignment */ 2073 remaining = *((int32_t *)addr); 2074 break; 2075 case sizeof (uint16_t): 2076 /* LINTED - alignment */ 2077 remaining = *((int16_t *)addr); 2078 break; 2079 case sizeof (uint8_t): 2080 remaining = *((int8_t *)addr); 2081 break; 2082 default: 2083 return (dt_set_errno(dtp, EDT_BADNORMAL)); 2084 } 2085 2086 if (remaining < 0) { 2087 func = dtrace_aggregate_walk_valsorted; 2088 remaining = -remaining; 2089 } else { 2090 func = dtrace_aggregate_walk_valrevsorted; 2091 } 2092 2093 assert(remaining >= 0); 2094 trunc.dttd_remaining = remaining; 2095 2096 (void) func(dtp, dt_trunc_agg, &trunc); 2097 2098 return (0); 2099 } 2100 2101 static int 2102 dt_print_datum(dtrace_hdl_t *dtp, FILE *fp, dtrace_recdesc_t *rec, 2103 caddr_t addr, size_t size, const dtrace_aggdata_t *aggdata, 2104 uint64_t normal, dt_print_aggdata_t *pd) 2105 { 2106 int err, width; 2107 dtrace_actkind_t act = rec->dtrd_action; 2108 boolean_t packed = pd->dtpa_agghist || pd->dtpa_aggpack; 2109 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 2110 2111 static struct { 2112 size_t size; 2113 int width; 2114 int packedwidth; 2115 } *fmt, fmttab[] = { 2116 { sizeof (uint8_t), 3, 3 }, 2117 { sizeof (uint16_t), 5, 5 }, 2118 { sizeof (uint32_t), 8, 8 }, 2119 { sizeof (uint64_t), 16, 16 }, 2120 { 0, -50, 16 } 2121 }; 2122 2123 if (packed && pd->dtpa_agghisthdr != agg->dtagd_varid) { 2124 dtrace_recdesc_t *r; 2125 2126 width = 0; 2127 2128 /* 2129 * To print our quantization header for either an agghist or 2130 * aggpack aggregation, we need to iterate through all of our 2131 * of our records to determine their width. 2132 */ 2133 for (r = rec; !DTRACEACT_ISAGG(r->dtrd_action); r++) { 2134 for (fmt = fmttab; fmt->size && 2135 fmt->size != r->dtrd_size; fmt++) 2136 continue; 2137 2138 width += fmt->packedwidth + 1; 2139 } 2140 2141 if (pd->dtpa_agghist) { 2142 if (dt_print_quanthdr(dtp, fp, width) < 0) 2143 return (-1); 2144 } else { 2145 if (dt_print_quanthdr_packed(dtp, fp, 2146 width, aggdata, r->dtrd_action) < 0) 2147 return (-1); 2148 } 2149 2150 pd->dtpa_agghisthdr = agg->dtagd_varid; 2151 } 2152 2153 if (pd->dtpa_agghist && DTRACEACT_ISAGG(act)) { 2154 char positives = aggdata->dtada_flags & DTRACE_A_HASPOSITIVES; 2155 char negatives = aggdata->dtada_flags & DTRACE_A_HASNEGATIVES; 2156 int64_t val; 2157 2158 assert(act == DTRACEAGG_SUM || act == DTRACEAGG_COUNT); 2159 val = (long long)*((uint64_t *)addr); 2160 2161 if (dt_printf(dtp, fp, " ") < 0) 2162 return (-1); 2163 2164 return (dt_print_quantline(dtp, fp, val, normal, 2165 aggdata->dtada_total, positives, negatives)); 2166 } 2167 2168 if (pd->dtpa_aggpack && DTRACEACT_ISAGG(act)) { 2169 switch (act) { 2170 case DTRACEAGG_QUANTIZE: 2171 return (dt_print_quantize_packed(dtp, 2172 fp, addr, size, aggdata)); 2173 case DTRACEAGG_LQUANTIZE: 2174 return (dt_print_lquantize_packed(dtp, 2175 fp, addr, size, aggdata)); 2176 default: 2177 break; 2178 } 2179 } 2180 2181 switch (act) { 2182 case DTRACEACT_STACK: 2183 return (dt_print_stack(dtp, fp, NULL, addr, 2184 rec->dtrd_arg, rec->dtrd_size / rec->dtrd_arg)); 2185 2186 case DTRACEACT_USTACK: 2187 case DTRACEACT_JSTACK: 2188 return (dt_print_ustack(dtp, fp, NULL, addr, rec->dtrd_arg)); 2189 2190 case DTRACEACT_USYM: 2191 case DTRACEACT_UADDR: 2192 return (dt_print_usym(dtp, fp, addr, act)); 2193 2194 case DTRACEACT_UMOD: 2195 return (dt_print_umod(dtp, fp, NULL, addr)); 2196 2197 case DTRACEACT_SYM: 2198 return (dt_print_sym(dtp, fp, NULL, addr)); 2199 2200 case DTRACEACT_MOD: 2201 return (dt_print_mod(dtp, fp, NULL, addr)); 2202 2203 case DTRACEAGG_QUANTIZE: 2204 return (dt_print_quantize(dtp, fp, addr, size, normal)); 2205 2206 case DTRACEAGG_LQUANTIZE: 2207 return (dt_print_lquantize(dtp, fp, addr, size, normal)); 2208 2209 case DTRACEAGG_LLQUANTIZE: 2210 return (dt_print_llquantize(dtp, fp, addr, size, normal)); 2211 2212 case DTRACEAGG_AVG: 2213 return (dt_print_average(dtp, fp, addr, size, normal)); 2214 2215 case DTRACEAGG_STDDEV: 2216 return (dt_print_stddev(dtp, fp, addr, size, normal)); 2217 2218 default: 2219 break; 2220 } 2221 2222 for (fmt = fmttab; fmt->size && fmt->size != size; fmt++) 2223 continue; 2224 2225 width = packed ? fmt->packedwidth : fmt->width; 2226 2227 switch (size) { 2228 case sizeof (uint64_t): 2229 err = dt_printf(dtp, fp, " %*lld", width, 2230 /* LINTED - alignment */ 2231 (long long)*((uint64_t *)addr) / normal); 2232 break; 2233 case sizeof (uint32_t): 2234 /* LINTED - alignment */ 2235 err = dt_printf(dtp, fp, " %*d", width, *((uint32_t *)addr) / 2236 (uint32_t)normal); 2237 break; 2238 case sizeof (uint16_t): 2239 /* LINTED - alignment */ 2240 err = dt_printf(dtp, fp, " %*d", width, *((uint16_t *)addr) / 2241 (uint32_t)normal); 2242 break; 2243 case sizeof (uint8_t): 2244 err = dt_printf(dtp, fp, " %*d", width, *((uint8_t *)addr) / 2245 (uint32_t)normal); 2246 break; 2247 default: 2248 err = dt_print_bytes(dtp, fp, addr, size, width, 0, 0); 2249 break; 2250 } 2251 2252 return (err); 2253 } 2254 2255 static int 2256 dt_print_aggs(const dtrace_aggdata_t **aggsdata, int naggvars, void *arg) 2257 { 2258 int i, aggact = 0; 2259 dt_print_aggdata_t *pd = arg; 2260 const dtrace_aggdata_t *aggdata = aggsdata[0]; 2261 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 2262 FILE *fp = pd->dtpa_fp; 2263 dtrace_hdl_t *dtp = pd->dtpa_dtp; 2264 dtrace_recdesc_t *rec; 2265 dtrace_actkind_t act; 2266 caddr_t addr; 2267 size_t size; 2268 2269 pd->dtpa_agghist = (aggdata->dtada_flags & DTRACE_A_TOTAL); 2270 pd->dtpa_aggpack = (aggdata->dtada_flags & DTRACE_A_MINMAXBIN); 2271 2272 /* 2273 * Iterate over each record description in the key, printing the traced 2274 * data, skipping the first datum (the tuple member created by the 2275 * compiler). 2276 */ 2277 for (i = 1; i < agg->dtagd_nrecs; i++) { 2278 rec = &agg->dtagd_rec[i]; 2279 act = rec->dtrd_action; 2280 addr = aggdata->dtada_data + rec->dtrd_offset; 2281 size = rec->dtrd_size; 2282 2283 if (DTRACEACT_ISAGG(act)) { 2284 aggact = i; 2285 break; 2286 } 2287 2288 if (dt_print_datum(dtp, fp, rec, addr, 2289 size, aggdata, 1, pd) < 0) 2290 return (-1); 2291 2292 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 2293 DTRACE_BUFDATA_AGGKEY) < 0) 2294 return (-1); 2295 } 2296 2297 assert(aggact != 0); 2298 2299 for (i = (naggvars == 1 ? 0 : 1); i < naggvars; i++) { 2300 uint64_t normal; 2301 2302 aggdata = aggsdata[i]; 2303 agg = aggdata->dtada_desc; 2304 rec = &agg->dtagd_rec[aggact]; 2305 act = rec->dtrd_action; 2306 addr = aggdata->dtada_data + rec->dtrd_offset; 2307 size = rec->dtrd_size; 2308 2309 assert(DTRACEACT_ISAGG(act)); 2310 normal = aggdata->dtada_normal; 2311 2312 if (dt_print_datum(dtp, fp, rec, addr, 2313 size, aggdata, normal, pd) < 0) 2314 return (-1); 2315 2316 if (dt_buffered_flush(dtp, NULL, rec, aggdata, 2317 DTRACE_BUFDATA_AGGVAL) < 0) 2318 return (-1); 2319 2320 if (!pd->dtpa_allunprint) 2321 agg->dtagd_flags |= DTRACE_AGD_PRINTED; 2322 } 2323 2324 if (!pd->dtpa_agghist && !pd->dtpa_aggpack) { 2325 if (dt_printf(dtp, fp, "\n") < 0) 2326 return (-1); 2327 } 2328 2329 if (dt_buffered_flush(dtp, NULL, NULL, aggdata, 2330 DTRACE_BUFDATA_AGGFORMAT | DTRACE_BUFDATA_AGGLAST) < 0) 2331 return (-1); 2332 2333 return (0); 2334 } 2335 2336 int 2337 dt_print_agg(const dtrace_aggdata_t *aggdata, void *arg) 2338 { 2339 dt_print_aggdata_t *pd = arg; 2340 dtrace_aggdesc_t *agg = aggdata->dtada_desc; 2341 dtrace_aggvarid_t aggvarid = pd->dtpa_id; 2342 2343 if (pd->dtpa_allunprint) { 2344 if (agg->dtagd_flags & DTRACE_AGD_PRINTED) 2345 return (0); 2346 } else { 2347 /* 2348 * If we're not printing all unprinted aggregations, then the 2349 * aggregation variable ID denotes a specific aggregation 2350 * variable that we should print -- skip any other aggregations 2351 * that we encounter. 2352 */ 2353 if (agg->dtagd_nrecs == 0) 2354 return (0); 2355 2356 if (aggvarid != agg->dtagd_varid) 2357 return (0); 2358 } 2359 2360 return (dt_print_aggs(&aggdata, 1, arg)); 2361 } 2362 2363 static int 2364 dt_setopt(dtrace_hdl_t *dtp, const dtrace_probedata_t *data, 2365 const char *option, const char *value) 2366 { 2367 int len, rval; 2368 char *msg; 2369 const char *errstr; 2370 dtrace_setoptdata_t optdata; 2371 2372 bzero(&optdata, sizeof (optdata)); 2373 (void) dtrace_getopt(dtp, option, &optdata.dtsda_oldval); 2374 2375 if (dtrace_setopt(dtp, option, value) == 0) { 2376 (void) dtrace_getopt(dtp, option, &optdata.dtsda_newval); 2377 optdata.dtsda_probe = data; 2378 optdata.dtsda_option = option; 2379 optdata.dtsda_handle = dtp; 2380 2381 if ((rval = dt_handle_setopt(dtp, &optdata)) != 0) 2382 return (rval); 2383 2384 return (0); 2385 } 2386 2387 errstr = dtrace_errmsg(dtp, dtrace_errno(dtp)); 2388 len = strlen(option) + strlen(value) + strlen(errstr) + 80; 2389 msg = alloca(len); 2390 2391 (void) snprintf(msg, len, "couldn't set option \"%s\" to \"%s\": %s\n", 2392 option, value, errstr); 2393 2394 if ((rval = dt_handle_liberr(dtp, data, msg)) == 0) 2395 return (0); 2396 2397 return (rval); 2398 } 2399 2400 static int 2401 dt_consume_cpu(dtrace_hdl_t *dtp, FILE *fp, int cpu, 2402 dtrace_bufdesc_t *buf, boolean_t just_one, 2403 dtrace_consume_probe_f *efunc, dtrace_consume_rec_f *rfunc, void *arg) 2404 { 2405 dtrace_epid_t id; 2406 size_t offs; 2407 int flow = (dtp->dt_options[DTRACEOPT_FLOWINDENT] != DTRACEOPT_UNSET); 2408 int quiet = (dtp->dt_options[DTRACEOPT_QUIET] != DTRACEOPT_UNSET); 2409 int rval, i, n; 2410 uint64_t tracememsize = 0; 2411 dtrace_probedata_t data; 2412 uint64_t drops; 2413 2414 bzero(&data, sizeof (data)); 2415 data.dtpda_handle = dtp; 2416 data.dtpda_cpu = cpu; 2417 data.dtpda_flow = dtp->dt_flow; 2418 data.dtpda_indent = dtp->dt_indent; 2419 data.dtpda_prefix = dtp->dt_prefix; 2420 2421 for (offs = buf->dtbd_oldest; offs < buf->dtbd_size; ) { 2422 dtrace_eprobedesc_t *epd; 2423 2424 /* 2425 * We're guaranteed to have an ID. 2426 */ 2427 id = *(uint32_t *)((uintptr_t)buf->dtbd_data + offs); 2428 2429 if (id == DTRACE_EPIDNONE) { 2430 /* 2431 * This is filler to assure proper alignment of the 2432 * next record; we simply ignore it. 2433 */ 2434 offs += sizeof (id); 2435 continue; 2436 } 2437 2438 if ((rval = dt_epid_lookup(dtp, id, &data.dtpda_edesc, 2439 &data.dtpda_pdesc)) != 0) 2440 return (rval); 2441 2442 epd = data.dtpda_edesc; 2443 data.dtpda_data = buf->dtbd_data + offs; 2444 2445 if (data.dtpda_edesc->dtepd_uarg != DT_ECB_DEFAULT) { 2446 rval = dt_handle(dtp, &data); 2447 2448 if (rval == DTRACE_CONSUME_NEXT) 2449 goto nextepid; 2450 2451 if (rval == DTRACE_CONSUME_ERROR) 2452 return (-1); 2453 } 2454 2455 if (flow) 2456 (void) dt_flowindent(dtp, &data, dtp->dt_last_epid, 2457 buf, offs); 2458 2459 rval = (*efunc)(&data, arg); 2460 2461 if (flow) { 2462 if (data.dtpda_flow == DTRACEFLOW_ENTRY) 2463 data.dtpda_indent += 2; 2464 } 2465 2466 if (rval == DTRACE_CONSUME_NEXT) 2467 goto nextepid; 2468 2469 if (rval == DTRACE_CONSUME_ABORT) 2470 return (dt_set_errno(dtp, EDT_DIRABORT)); 2471 2472 if (rval != DTRACE_CONSUME_THIS) 2473 return (dt_set_errno(dtp, EDT_BADRVAL)); 2474 2475 for (i = 0; i < epd->dtepd_nrecs; i++) { 2476 caddr_t addr; 2477 dtrace_recdesc_t *rec = &epd->dtepd_rec[i]; 2478 dtrace_actkind_t act = rec->dtrd_action; 2479 2480 data.dtpda_data = buf->dtbd_data + offs + 2481 rec->dtrd_offset; 2482 addr = data.dtpda_data; 2483 2484 if (act == DTRACEACT_LIBACT) { 2485 uint64_t arg = rec->dtrd_arg; 2486 dtrace_aggvarid_t id; 2487 2488 switch (arg) { 2489 case DT_ACT_CLEAR: 2490 /* LINTED - alignment */ 2491 id = *((dtrace_aggvarid_t *)addr); 2492 (void) dtrace_aggregate_walk(dtp, 2493 dt_clear_agg, &id); 2494 continue; 2495 2496 case DT_ACT_DENORMALIZE: 2497 /* LINTED - alignment */ 2498 id = *((dtrace_aggvarid_t *)addr); 2499 (void) dtrace_aggregate_walk(dtp, 2500 dt_denormalize_agg, &id); 2501 continue; 2502 2503 case DT_ACT_FTRUNCATE: 2504 if (fp == NULL) 2505 continue; 2506 2507 (void) fflush(fp); 2508 (void) ftruncate(fileno(fp), 0); 2509 (void) fseeko(fp, 0, SEEK_SET); 2510 continue; 2511 2512 case DT_ACT_NORMALIZE: 2513 if (i == epd->dtepd_nrecs - 1) 2514 return (dt_set_errno(dtp, 2515 EDT_BADNORMAL)); 2516 2517 if (dt_normalize(dtp, 2518 buf->dtbd_data + offs, rec) != 0) 2519 return (-1); 2520 2521 i++; 2522 continue; 2523 2524 case DT_ACT_SETOPT: { 2525 uint64_t *opts = dtp->dt_options; 2526 dtrace_recdesc_t *valrec; 2527 uint32_t valsize; 2528 caddr_t val; 2529 int rv; 2530 2531 if (i == epd->dtepd_nrecs - 1) { 2532 return (dt_set_errno(dtp, 2533 EDT_BADSETOPT)); 2534 } 2535 2536 valrec = &epd->dtepd_rec[++i]; 2537 valsize = valrec->dtrd_size; 2538 2539 if (valrec->dtrd_action != act || 2540 valrec->dtrd_arg != arg) { 2541 return (dt_set_errno(dtp, 2542 EDT_BADSETOPT)); 2543 } 2544 2545 if (valsize > sizeof (uint64_t)) { 2546 val = buf->dtbd_data + offs + 2547 valrec->dtrd_offset; 2548 } else { 2549 val = "1"; 2550 } 2551 2552 rv = dt_setopt(dtp, &data, addr, val); 2553 2554 if (rv != 0) 2555 return (-1); 2556 2557 flow = (opts[DTRACEOPT_FLOWINDENT] != 2558 DTRACEOPT_UNSET); 2559 quiet = (opts[DTRACEOPT_QUIET] != 2560 DTRACEOPT_UNSET); 2561 2562 continue; 2563 } 2564 2565 case DT_ACT_TRUNC: 2566 if (i == epd->dtepd_nrecs - 1) 2567 return (dt_set_errno(dtp, 2568 EDT_BADTRUNC)); 2569 2570 if (dt_trunc(dtp, 2571 buf->dtbd_data + offs, rec) != 0) 2572 return (-1); 2573 2574 i++; 2575 continue; 2576 2577 default: 2578 continue; 2579 } 2580 } 2581 2582 if (act == DTRACEACT_TRACEMEM_DYNSIZE && 2583 rec->dtrd_size == sizeof (uint64_t)) { 2584 /* LINTED - alignment */ 2585 tracememsize = *((unsigned long long *)addr); 2586 continue; 2587 } 2588 2589 rval = (*rfunc)(&data, rec, arg); 2590 2591 if (rval == DTRACE_CONSUME_NEXT) 2592 continue; 2593 2594 if (rval == DTRACE_CONSUME_ABORT) 2595 return (dt_set_errno(dtp, EDT_DIRABORT)); 2596 2597 if (rval != DTRACE_CONSUME_THIS) 2598 return (dt_set_errno(dtp, EDT_BADRVAL)); 2599 2600 if (act == DTRACEACT_STACK) { 2601 int depth = rec->dtrd_arg; 2602 2603 if (dt_print_stack(dtp, fp, NULL, addr, depth, 2604 rec->dtrd_size / depth) < 0) 2605 return (-1); 2606 goto nextrec; 2607 } 2608 2609 if (act == DTRACEACT_USTACK || 2610 act == DTRACEACT_JSTACK) { 2611 if (dt_print_ustack(dtp, fp, NULL, 2612 addr, rec->dtrd_arg) < 0) 2613 return (-1); 2614 goto nextrec; 2615 } 2616 2617 if (act == DTRACEACT_SYM) { 2618 if (dt_print_sym(dtp, fp, NULL, addr) < 0) 2619 return (-1); 2620 goto nextrec; 2621 } 2622 2623 if (act == DTRACEACT_MOD) { 2624 if (dt_print_mod(dtp, fp, NULL, addr) < 0) 2625 return (-1); 2626 goto nextrec; 2627 } 2628 2629 if (act == DTRACEACT_USYM || act == DTRACEACT_UADDR) { 2630 if (dt_print_usym(dtp, fp, addr, act) < 0) 2631 return (-1); 2632 goto nextrec; 2633 } 2634 2635 if (act == DTRACEACT_UMOD) { 2636 if (dt_print_umod(dtp, fp, NULL, addr) < 0) 2637 return (-1); 2638 goto nextrec; 2639 } 2640 2641 if (act == DTRACEACT_PRINTM) { 2642 if (dt_print_memory(dtp, fp, addr) < 0) 2643 return (-1); 2644 goto nextrec; 2645 } 2646 2647 if (act == DTRACEACT_PRINTT) { 2648 if (dt_print_type(dtp, fp, addr) < 0) 2649 return (-1); 2650 goto nextrec; 2651 } 2652 2653 if (DTRACEACT_ISPRINTFLIKE(act)) { 2654 void *fmtdata; 2655 int (*func)(dtrace_hdl_t *, FILE *, void *, 2656 const dtrace_probedata_t *, 2657 const dtrace_recdesc_t *, uint_t, 2658 const void *buf, size_t); 2659 2660 if ((fmtdata = dt_format_lookup(dtp, 2661 rec->dtrd_format)) == NULL) 2662 goto nofmt; 2663 2664 switch (act) { 2665 case DTRACEACT_PRINTF: 2666 func = dtrace_fprintf; 2667 break; 2668 case DTRACEACT_PRINTA: 2669 func = dtrace_fprinta; 2670 break; 2671 case DTRACEACT_SYSTEM: 2672 func = dtrace_system; 2673 break; 2674 case DTRACEACT_FREOPEN: 2675 func = dtrace_freopen; 2676 break; 2677 default: 2678 return (dt_set_errno(dtp, EDT_BADAGG)); 2679 } 2680 2681 n = (*func)(dtp, fp, fmtdata, &data, 2682 rec, epd->dtepd_nrecs - i, 2683 (uchar_t *)buf->dtbd_data + offs, 2684 buf->dtbd_size - offs); 2685 2686 if (n < 0) 2687 return (-1); /* errno is set for us */ 2688 2689 if (n > 0) 2690 i += n - 1; 2691 goto nextrec; 2692 } 2693 2694 /* 2695 * If this is a DIF expression, and the record has a 2696 * format set, this indicates we have a CTF type name 2697 * associated with the data and we should try to print 2698 * it out by type. 2699 */ 2700 if (act == DTRACEACT_DIFEXPR) { 2701 const char *strdata = dt_strdata_lookup(dtp, 2702 rec->dtrd_format); 2703 if (strdata != NULL) { 2704 n = dtrace_print(dtp, fp, strdata, 2705 addr, rec->dtrd_size); 2706 2707 /* 2708 * dtrace_print() will return -1 on 2709 * error, or return the number of bytes 2710 * consumed. It will return 0 if the 2711 * type couldn't be determined, and we 2712 * should fall through to the normal 2713 * trace method. 2714 */ 2715 if (n < 0) 2716 return (-1); 2717 2718 if (n > 0) 2719 goto nextrec; 2720 } 2721 } 2722 2723 nofmt: 2724 if (act == DTRACEACT_PRINTA) { 2725 dt_print_aggdata_t pd; 2726 dtrace_aggvarid_t *aggvars; 2727 int j, naggvars = 0; 2728 size_t size = ((epd->dtepd_nrecs - i) * 2729 sizeof (dtrace_aggvarid_t)); 2730 2731 if ((aggvars = dt_alloc(dtp, size)) == NULL) 2732 return (-1); 2733 2734 /* 2735 * This might be a printa() with multiple 2736 * aggregation variables. We need to scan 2737 * forward through the records until we find 2738 * a record from a different statement. 2739 */ 2740 for (j = i; j < epd->dtepd_nrecs; j++) { 2741 dtrace_recdesc_t *nrec; 2742 caddr_t naddr; 2743 2744 nrec = &epd->dtepd_rec[j]; 2745 2746 if (nrec->dtrd_uarg != rec->dtrd_uarg) 2747 break; 2748 2749 if (nrec->dtrd_action != act) { 2750 return (dt_set_errno(dtp, 2751 EDT_BADAGG)); 2752 } 2753 2754 naddr = buf->dtbd_data + offs + 2755 nrec->dtrd_offset; 2756 2757 aggvars[naggvars++] = 2758 /* LINTED - alignment */ 2759 *((dtrace_aggvarid_t *)naddr); 2760 } 2761 2762 i = j - 1; 2763 bzero(&pd, sizeof (pd)); 2764 pd.dtpa_dtp = dtp; 2765 pd.dtpa_fp = fp; 2766 2767 assert(naggvars >= 1); 2768 2769 if (naggvars == 1) { 2770 pd.dtpa_id = aggvars[0]; 2771 dt_free(dtp, aggvars); 2772 2773 if (dt_printf(dtp, fp, "\n") < 0 || 2774 dtrace_aggregate_walk_sorted(dtp, 2775 dt_print_agg, &pd) < 0) 2776 return (-1); 2777 goto nextrec; 2778 } 2779 2780 if (dt_printf(dtp, fp, "\n") < 0 || 2781 dtrace_aggregate_walk_joined(dtp, aggvars, 2782 naggvars, dt_print_aggs, &pd) < 0) { 2783 dt_free(dtp, aggvars); 2784 return (-1); 2785 } 2786 2787 dt_free(dtp, aggvars); 2788 goto nextrec; 2789 } 2790 2791 if (act == DTRACEACT_TRACEMEM) { 2792 if (tracememsize == 0 || 2793 tracememsize > rec->dtrd_size) { 2794 tracememsize = rec->dtrd_size; 2795 } 2796 2797 n = dt_print_bytes(dtp, fp, addr, 2798 tracememsize, -33, quiet, 1); 2799 2800 tracememsize = 0; 2801 2802 if (n < 0) 2803 return (-1); 2804 2805 goto nextrec; 2806 } 2807 2808 switch (rec->dtrd_size) { 2809 case sizeof (uint64_t): 2810 n = dt_printf(dtp, fp, 2811 quiet ? "%lld" : " %16lld", 2812 /* LINTED - alignment */ 2813 *((unsigned long long *)addr)); 2814 break; 2815 case sizeof (uint32_t): 2816 n = dt_printf(dtp, fp, quiet ? "%d" : " %8d", 2817 /* LINTED - alignment */ 2818 *((uint32_t *)addr)); 2819 break; 2820 case sizeof (uint16_t): 2821 n = dt_printf(dtp, fp, quiet ? "%d" : " %5d", 2822 /* LINTED - alignment */ 2823 *((uint16_t *)addr)); 2824 break; 2825 case sizeof (uint8_t): 2826 n = dt_printf(dtp, fp, quiet ? "%d" : " %3d", 2827 *((uint8_t *)addr)); 2828 break; 2829 default: 2830 n = dt_print_bytes(dtp, fp, addr, 2831 rec->dtrd_size, -33, quiet, 0); 2832 break; 2833 } 2834 2835 if (n < 0) 2836 return (-1); /* errno is set for us */ 2837 2838 nextrec: 2839 if (dt_buffered_flush(dtp, &data, rec, NULL, 0) < 0) 2840 return (-1); /* errno is set for us */ 2841 } 2842 2843 /* 2844 * Call the record callback with a NULL record to indicate 2845 * that we're done processing this EPID. 2846 */ 2847 rval = (*rfunc)(&data, NULL, arg); 2848 nextepid: 2849 offs += epd->dtepd_size; 2850 dtp->dt_last_epid = id; 2851 if (just_one) { 2852 buf->dtbd_oldest = offs; 2853 break; 2854 } 2855 } 2856 2857 dtp->dt_flow = data.dtpda_flow; 2858 dtp->dt_indent = data.dtpda_indent; 2859 dtp->dt_prefix = data.dtpda_prefix; 2860 2861 if ((drops = buf->dtbd_drops) == 0) 2862 return (0); 2863 2864 /* 2865 * Explicitly zero the drops to prevent us from processing them again. 2866 */ 2867 buf->dtbd_drops = 0; 2868 2869 return (dt_handle_cpudrop(dtp, cpu, DTRACEDROP_PRINCIPAL, drops)); 2870 } 2871 2872 /* 2873 * Reduce memory usage by shrinking the buffer if it's no more than half full. 2874 * Note, we need to preserve the alignment of the data at dtbd_oldest, which is 2875 * only 4-byte aligned. 2876 */ 2877 static void 2878 dt_realloc_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf, int cursize) 2879 { 2880 uint64_t used = buf->dtbd_size - buf->dtbd_oldest; 2881 if (used < cursize / 2) { 2882 int misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1); 2883 char *newdata = dt_alloc(dtp, used + misalign); 2884 if (newdata == NULL) 2885 return; 2886 bzero(newdata, misalign); 2887 bcopy(buf->dtbd_data + buf->dtbd_oldest, 2888 newdata + misalign, used); 2889 dt_free(dtp, buf->dtbd_data); 2890 buf->dtbd_oldest = misalign; 2891 buf->dtbd_size = used + misalign; 2892 buf->dtbd_data = newdata; 2893 } 2894 } 2895 2896 /* 2897 * If the ring buffer has wrapped, the data is not in order. Rearrange it 2898 * so that it is. Note, we need to preserve the alignment of the data at 2899 * dtbd_oldest, which is only 4-byte aligned. 2900 */ 2901 static int 2902 dt_unring_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf) 2903 { 2904 int misalign; 2905 char *newdata, *ndp; 2906 2907 if (buf->dtbd_oldest == 0) 2908 return (0); 2909 2910 misalign = buf->dtbd_oldest & (sizeof (uint64_t) - 1); 2911 newdata = ndp = dt_alloc(dtp, buf->dtbd_size + misalign); 2912 2913 if (newdata == NULL) 2914 return (-1); 2915 2916 assert(0 == (buf->dtbd_size & (sizeof (uint64_t) - 1))); 2917 2918 bzero(ndp, misalign); 2919 ndp += misalign; 2920 2921 bcopy(buf->dtbd_data + buf->dtbd_oldest, ndp, 2922 buf->dtbd_size - buf->dtbd_oldest); 2923 ndp += buf->dtbd_size - buf->dtbd_oldest; 2924 2925 bcopy(buf->dtbd_data, ndp, buf->dtbd_oldest); 2926 2927 dt_free(dtp, buf->dtbd_data); 2928 buf->dtbd_oldest = 0; 2929 buf->dtbd_data = newdata; 2930 buf->dtbd_size += misalign; 2931 2932 return (0); 2933 } 2934 2935 static void 2936 dt_put_buf(dtrace_hdl_t *dtp, dtrace_bufdesc_t *buf) 2937 { 2938 dt_free(dtp, buf->dtbd_data); 2939 dt_free(dtp, buf); 2940 } 2941 2942 /* 2943 * Returns 0 on success, in which case *cbp will be filled in if we retrieved 2944 * data, or NULL if there is no data for this CPU. 2945 * Returns -1 on failure and sets dt_errno. 2946 */ 2947 static int 2948 dt_get_buf(dtrace_hdl_t *dtp, int cpu, dtrace_bufdesc_t **bufp) 2949 { 2950 dtrace_optval_t size; 2951 dtrace_bufdesc_t *buf = dt_zalloc(dtp, sizeof (*buf)); 2952 int error, rval; 2953 2954 if (buf == NULL) 2955 return (-1); 2956 2957 (void) dtrace_getopt(dtp, "bufsize", &size); 2958 buf->dtbd_data = dt_alloc(dtp, size); 2959 if (buf->dtbd_data == NULL) { 2960 dt_free(dtp, buf); 2961 return (-1); 2962 } 2963 buf->dtbd_size = size; 2964 buf->dtbd_cpu = cpu; 2965 2966 #ifdef illumos 2967 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, buf) == -1) { 2968 #else 2969 if (dt_ioctl(dtp, DTRACEIOC_BUFSNAP, &buf) == -1) { 2970 #endif 2971 /* 2972 * If we failed with ENOENT, it may be because the 2973 * CPU was unconfigured -- this is okay. Any other 2974 * error, however, is unexpected. 2975 */ 2976 if (errno == ENOENT) { 2977 *bufp = NULL; 2978 rval = 0; 2979 } else 2980 rval = dt_set_errno(dtp, errno); 2981 2982 dt_put_buf(dtp, buf); 2983 return (rval); 2984 } 2985 2986 error = dt_unring_buf(dtp, buf); 2987 if (error != 0) { 2988 dt_put_buf(dtp, buf); 2989 return (error); 2990 } 2991 dt_realloc_buf(dtp, buf, size); 2992 2993 *bufp = buf; 2994 return (0); 2995 } 2996 2997 typedef struct dt_begin { 2998 dtrace_consume_probe_f *dtbgn_probefunc; 2999 dtrace_consume_rec_f *dtbgn_recfunc; 3000 void *dtbgn_arg; 3001 dtrace_handle_err_f *dtbgn_errhdlr; 3002 void *dtbgn_errarg; 3003 int dtbgn_beginonly; 3004 } dt_begin_t; 3005 3006 static int 3007 dt_consume_begin_probe(const dtrace_probedata_t *data, void *arg) 3008 { 3009 dt_begin_t *begin = arg; 3010 dtrace_probedesc_t *pd = data->dtpda_pdesc; 3011 3012 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 3013 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 3014 3015 if (begin->dtbgn_beginonly) { 3016 if (!(r1 && r2)) 3017 return (DTRACE_CONSUME_NEXT); 3018 } else { 3019 if (r1 && r2) 3020 return (DTRACE_CONSUME_NEXT); 3021 } 3022 3023 /* 3024 * We have a record that we're interested in. Now call the underlying 3025 * probe function... 3026 */ 3027 return (begin->dtbgn_probefunc(data, begin->dtbgn_arg)); 3028 } 3029 3030 static int 3031 dt_consume_begin_record(const dtrace_probedata_t *data, 3032 const dtrace_recdesc_t *rec, void *arg) 3033 { 3034 dt_begin_t *begin = arg; 3035 3036 return (begin->dtbgn_recfunc(data, rec, begin->dtbgn_arg)); 3037 } 3038 3039 static int 3040 dt_consume_begin_error(const dtrace_errdata_t *data, void *arg) 3041 { 3042 dt_begin_t *begin = (dt_begin_t *)arg; 3043 dtrace_probedesc_t *pd = data->dteda_pdesc; 3044 3045 int r1 = (strcmp(pd->dtpd_provider, "dtrace") == 0); 3046 int r2 = (strcmp(pd->dtpd_name, "BEGIN") == 0); 3047 3048 if (begin->dtbgn_beginonly) { 3049 if (!(r1 && r2)) 3050 return (DTRACE_HANDLE_OK); 3051 } else { 3052 if (r1 && r2) 3053 return (DTRACE_HANDLE_OK); 3054 } 3055 3056 return (begin->dtbgn_errhdlr(data, begin->dtbgn_errarg)); 3057 } 3058 3059 static int 3060 dt_consume_begin(dtrace_hdl_t *dtp, FILE *fp, 3061 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 3062 { 3063 /* 3064 * There's this idea that the BEGIN probe should be processed before 3065 * everything else, and that the END probe should be processed after 3066 * anything else. In the common case, this is pretty easy to deal 3067 * with. However, a situation may arise where the BEGIN enabling and 3068 * END enabling are on the same CPU, and some enabling in the middle 3069 * occurred on a different CPU. To deal with this (blech!) we need to 3070 * consume the BEGIN buffer up until the end of the BEGIN probe, and 3071 * then set it aside. We will then process every other CPU, and then 3072 * we'll return to the BEGIN CPU and process the rest of the data 3073 * (which will inevitably include the END probe, if any). Making this 3074 * even more complicated (!) is the library's ERROR enabling. Because 3075 * this enabling is processed before we even get into the consume call 3076 * back, any ERROR firing would result in the library's ERROR enabling 3077 * being processed twice -- once in our first pass (for BEGIN probes), 3078 * and again in our second pass (for everything but BEGIN probes). To 3079 * deal with this, we interpose on the ERROR handler to assure that we 3080 * only process ERROR enablings induced by BEGIN enablings in the 3081 * first pass, and that we only process ERROR enablings _not_ induced 3082 * by BEGIN enablings in the second pass. 3083 */ 3084 3085 dt_begin_t begin; 3086 processorid_t cpu = dtp->dt_beganon; 3087 int rval, i; 3088 static int max_ncpus; 3089 dtrace_bufdesc_t *buf; 3090 3091 dtp->dt_beganon = -1; 3092 3093 if (dt_get_buf(dtp, cpu, &buf) != 0) 3094 return (-1); 3095 if (buf == NULL) 3096 return (0); 3097 3098 if (!dtp->dt_stopped || buf->dtbd_cpu != dtp->dt_endedon) { 3099 /* 3100 * This is the simple case. We're either not stopped, or if 3101 * we are, we actually processed any END probes on another 3102 * CPU. We can simply consume this buffer and return. 3103 */ 3104 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 3105 pf, rf, arg); 3106 dt_put_buf(dtp, buf); 3107 return (rval); 3108 } 3109 3110 begin.dtbgn_probefunc = pf; 3111 begin.dtbgn_recfunc = rf; 3112 begin.dtbgn_arg = arg; 3113 begin.dtbgn_beginonly = 1; 3114 3115 /* 3116 * We need to interpose on the ERROR handler to be sure that we 3117 * only process ERRORs induced by BEGIN. 3118 */ 3119 begin.dtbgn_errhdlr = dtp->dt_errhdlr; 3120 begin.dtbgn_errarg = dtp->dt_errarg; 3121 dtp->dt_errhdlr = dt_consume_begin_error; 3122 dtp->dt_errarg = &begin; 3123 3124 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 3125 dt_consume_begin_probe, dt_consume_begin_record, &begin); 3126 3127 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 3128 dtp->dt_errarg = begin.dtbgn_errarg; 3129 3130 if (rval != 0) { 3131 dt_put_buf(dtp, buf); 3132 return (rval); 3133 } 3134 3135 if (max_ncpus == 0) 3136 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 3137 3138 for (i = 0; i < max_ncpus; i++) { 3139 dtrace_bufdesc_t *nbuf; 3140 if (i == cpu) 3141 continue; 3142 3143 if (dt_get_buf(dtp, i, &nbuf) != 0) { 3144 dt_put_buf(dtp, buf); 3145 return (-1); 3146 } 3147 if (nbuf == NULL) 3148 continue; 3149 3150 rval = dt_consume_cpu(dtp, fp, i, nbuf, B_FALSE, 3151 pf, rf, arg); 3152 dt_put_buf(dtp, nbuf); 3153 if (rval != 0) { 3154 dt_put_buf(dtp, buf); 3155 return (rval); 3156 } 3157 } 3158 3159 /* 3160 * Okay -- we're done with the other buffers. Now we want to 3161 * reconsume the first buffer -- but this time we're looking for 3162 * everything _but_ BEGIN. And of course, in order to only consume 3163 * those ERRORs _not_ associated with BEGIN, we need to reinstall our 3164 * ERROR interposition function... 3165 */ 3166 begin.dtbgn_beginonly = 0; 3167 3168 assert(begin.dtbgn_errhdlr == dtp->dt_errhdlr); 3169 assert(begin.dtbgn_errarg == dtp->dt_errarg); 3170 dtp->dt_errhdlr = dt_consume_begin_error; 3171 dtp->dt_errarg = &begin; 3172 3173 rval = dt_consume_cpu(dtp, fp, cpu, buf, B_FALSE, 3174 dt_consume_begin_probe, dt_consume_begin_record, &begin); 3175 3176 dtp->dt_errhdlr = begin.dtbgn_errhdlr; 3177 dtp->dt_errarg = begin.dtbgn_errarg; 3178 3179 return (rval); 3180 } 3181 3182 /* ARGSUSED */ 3183 static uint64_t 3184 dt_buf_oldest(void *elem, void *arg) 3185 { 3186 dtrace_bufdesc_t *buf = elem; 3187 size_t offs = buf->dtbd_oldest; 3188 3189 while (offs < buf->dtbd_size) { 3190 dtrace_rechdr_t *dtrh = 3191 /* LINTED - alignment */ 3192 (dtrace_rechdr_t *)(buf->dtbd_data + offs); 3193 if (dtrh->dtrh_epid == DTRACE_EPIDNONE) { 3194 offs += sizeof (dtrace_epid_t); 3195 } else { 3196 return (DTRACE_RECORD_LOAD_TIMESTAMP(dtrh)); 3197 } 3198 } 3199 3200 /* There are no records left; use the time the buffer was retrieved. */ 3201 return (buf->dtbd_timestamp); 3202 } 3203 3204 int 3205 dtrace_consume(dtrace_hdl_t *dtp, FILE *fp, 3206 dtrace_consume_probe_f *pf, dtrace_consume_rec_f *rf, void *arg) 3207 { 3208 dtrace_optval_t size; 3209 static int max_ncpus; 3210 int i, rval; 3211 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_SWITCHRATE]; 3212 hrtime_t now = gethrtime(); 3213 3214 if (dtp->dt_lastswitch != 0) { 3215 if (now - dtp->dt_lastswitch < interval) 3216 return (0); 3217 3218 dtp->dt_lastswitch += interval; 3219 } else { 3220 dtp->dt_lastswitch = now; 3221 } 3222 3223 if (!dtp->dt_active) 3224 return (dt_set_errno(dtp, EINVAL)); 3225 3226 if (max_ncpus == 0) 3227 max_ncpus = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 3228 3229 if (pf == NULL) 3230 pf = (dtrace_consume_probe_f *)dt_nullprobe; 3231 3232 if (rf == NULL) 3233 rf = (dtrace_consume_rec_f *)dt_nullrec; 3234 3235 if (dtp->dt_options[DTRACEOPT_TEMPORAL] == DTRACEOPT_UNSET) { 3236 /* 3237 * The output will not be in the order it was traced. Rather, 3238 * we will consume all of the data from each CPU's buffer in 3239 * turn. We apply special handling for the records from BEGIN 3240 * and END probes so that they are consumed first and last, 3241 * respectively. 3242 * 3243 * If we have just begun, we want to first process the CPU that 3244 * executed the BEGIN probe (if any). 3245 */ 3246 if (dtp->dt_active && dtp->dt_beganon != -1 && 3247 (rval = dt_consume_begin(dtp, fp, pf, rf, arg)) != 0) 3248 return (rval); 3249 3250 for (i = 0; i < max_ncpus; i++) { 3251 dtrace_bufdesc_t *buf; 3252 3253 /* 3254 * If we have stopped, we want to process the CPU on 3255 * which the END probe was processed only _after_ we 3256 * have processed everything else. 3257 */ 3258 if (dtp->dt_stopped && (i == dtp->dt_endedon)) 3259 continue; 3260 3261 if (dt_get_buf(dtp, i, &buf) != 0) 3262 return (-1); 3263 if (buf == NULL) 3264 continue; 3265 3266 dtp->dt_flow = 0; 3267 dtp->dt_indent = 0; 3268 dtp->dt_prefix = NULL; 3269 rval = dt_consume_cpu(dtp, fp, i, 3270 buf, B_FALSE, pf, rf, arg); 3271 dt_put_buf(dtp, buf); 3272 if (rval != 0) 3273 return (rval); 3274 } 3275 if (dtp->dt_stopped) { 3276 dtrace_bufdesc_t *buf; 3277 3278 if (dt_get_buf(dtp, dtp->dt_endedon, &buf) != 0) 3279 return (-1); 3280 if (buf == NULL) 3281 return (0); 3282 3283 rval = dt_consume_cpu(dtp, fp, dtp->dt_endedon, 3284 buf, B_FALSE, pf, rf, arg); 3285 dt_put_buf(dtp, buf); 3286 return (rval); 3287 } 3288 } else { 3289 /* 3290 * The output will be in the order it was traced (or for 3291 * speculations, when it was committed). We retrieve a buffer 3292 * from each CPU and put it into a priority queue, which sorts 3293 * based on the first entry in the buffer. This is sufficient 3294 * because entries within a buffer are already sorted. 3295 * 3296 * We then consume records one at a time, always consuming the 3297 * oldest record, as determined by the priority queue. When 3298 * we reach the end of the time covered by these buffers, 3299 * we need to stop and retrieve more records on the next pass. 3300 * The kernel tells us the time covered by each buffer, in 3301 * dtbd_timestamp. The first buffer's timestamp tells us the 3302 * time covered by all buffers, as subsequently retrieved 3303 * buffers will cover to a more recent time. 3304 */ 3305 3306 uint64_t *drops = alloca(max_ncpus * sizeof (uint64_t)); 3307 uint64_t first_timestamp = 0; 3308 uint_t cookie = 0; 3309 dtrace_bufdesc_t *buf; 3310 3311 bzero(drops, max_ncpus * sizeof (uint64_t)); 3312 3313 if (dtp->dt_bufq == NULL) { 3314 dtp->dt_bufq = dt_pq_init(dtp, max_ncpus * 2, 3315 dt_buf_oldest, NULL); 3316 if (dtp->dt_bufq == NULL) /* ENOMEM */ 3317 return (-1); 3318 } 3319 3320 /* Retrieve data from each CPU. */ 3321 (void) dtrace_getopt(dtp, "bufsize", &size); 3322 for (i = 0; i < max_ncpus; i++) { 3323 dtrace_bufdesc_t *buf; 3324 3325 if (dt_get_buf(dtp, i, &buf) != 0) 3326 return (-1); 3327 if (buf != NULL) { 3328 if (first_timestamp == 0) 3329 first_timestamp = buf->dtbd_timestamp; 3330 assert(buf->dtbd_timestamp >= first_timestamp); 3331 3332 dt_pq_insert(dtp->dt_bufq, buf); 3333 drops[i] = buf->dtbd_drops; 3334 buf->dtbd_drops = 0; 3335 } 3336 } 3337 3338 /* Consume records. */ 3339 for (;;) { 3340 dtrace_bufdesc_t *buf = dt_pq_pop(dtp->dt_bufq); 3341 uint64_t timestamp; 3342 3343 if (buf == NULL) 3344 break; 3345 3346 timestamp = dt_buf_oldest(buf, dtp); 3347 assert(timestamp >= dtp->dt_last_timestamp); 3348 dtp->dt_last_timestamp = timestamp; 3349 3350 if (timestamp == buf->dtbd_timestamp) { 3351 /* 3352 * We've reached the end of the time covered 3353 * by this buffer. If this is the oldest 3354 * buffer, we must do another pass 3355 * to retrieve more data. 3356 */ 3357 dt_put_buf(dtp, buf); 3358 if (timestamp == first_timestamp && 3359 !dtp->dt_stopped) 3360 break; 3361 continue; 3362 } 3363 3364 if ((rval = dt_consume_cpu(dtp, fp, 3365 buf->dtbd_cpu, buf, B_TRUE, pf, rf, arg)) != 0) 3366 return (rval); 3367 dt_pq_insert(dtp->dt_bufq, buf); 3368 } 3369 3370 /* Consume drops. */ 3371 for (i = 0; i < max_ncpus; i++) { 3372 if (drops[i] != 0) { 3373 int error = dt_handle_cpudrop(dtp, i, 3374 DTRACEDROP_PRINCIPAL, drops[i]); 3375 if (error != 0) 3376 return (error); 3377 } 3378 } 3379 3380 /* 3381 * Reduce memory usage by re-allocating smaller buffers 3382 * for the "remnants". 3383 */ 3384 while ((buf = dt_pq_walk(dtp->dt_bufq, &cookie)) != NULL) 3385 dt_realloc_buf(dtp, buf, buf->dtbd_size); 3386 } 3387 3388 return (0); 3389 } 3390