1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 #include <stdlib.h>
30 #include <strings.h>
31 #include <errno.h>
32 #include <unistd.h>
33 #include <dt_impl.h>
34 #include <assert.h>
35 #include <alloca.h>
36 #include <limits.h>
37
38 #define DTRACE_AHASHSIZE 32779 /* big 'ol prime */
39
40 /*
41 * Because qsort(3C) does not allow an argument to be passed to a comparison
42 * function, the variables that affect comparison must regrettably be global;
43 * they are protected by a global static lock, dt_qsort_lock.
44 */
45 static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER;
46
47 static int dt_revsort;
48 static int dt_keysort;
49 static int dt_keypos;
50
51 #define DT_LESSTHAN (dt_revsort == 0 ? -1 : 1)
52 #define DT_GREATERTHAN (dt_revsort == 0 ? 1 : -1)
53
54 static void
dt_aggregate_count(int64_t * existing,int64_t * new,size_t size)55 dt_aggregate_count(int64_t *existing, int64_t *new, size_t size)
56 {
57 int i;
58
59 for (i = 0; i < size / sizeof (int64_t); i++)
60 existing[i] = existing[i] + new[i];
61 }
62
63 static int
dt_aggregate_countcmp(int64_t * lhs,int64_t * rhs)64 dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs)
65 {
66 int64_t lvar = *lhs;
67 int64_t rvar = *rhs;
68
69 if (lvar < rvar)
70 return (DT_LESSTHAN);
71
72 if (lvar > rvar)
73 return (DT_GREATERTHAN);
74
75 return (0);
76 }
77
78 /*ARGSUSED*/
79 static void
dt_aggregate_min(int64_t * existing,int64_t * new,size_t size)80 dt_aggregate_min(int64_t *existing, int64_t *new, size_t size)
81 {
82 if (*new < *existing)
83 *existing = *new;
84 }
85
86 /*ARGSUSED*/
87 static void
dt_aggregate_max(int64_t * existing,int64_t * new,size_t size)88 dt_aggregate_max(int64_t *existing, int64_t *new, size_t size)
89 {
90 if (*new > *existing)
91 *existing = *new;
92 }
93
94 static int
dt_aggregate_averagecmp(int64_t * lhs,int64_t * rhs)95 dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs)
96 {
97 int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0;
98 int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0;
99
100 if (lavg < ravg)
101 return (DT_LESSTHAN);
102
103 if (lavg > ravg)
104 return (DT_GREATERTHAN);
105
106 return (0);
107 }
108
109 static int
dt_aggregate_stddevcmp(int64_t * lhs,int64_t * rhs)110 dt_aggregate_stddevcmp(int64_t *lhs, int64_t *rhs)
111 {
112 uint64_t lsd = dt_stddev((uint64_t *)lhs, 1);
113 uint64_t rsd = dt_stddev((uint64_t *)rhs, 1);
114
115 if (lsd < rsd)
116 return (DT_LESSTHAN);
117
118 if (lsd > rsd)
119 return (DT_GREATERTHAN);
120
121 return (0);
122 }
123
124 /*ARGSUSED*/
125 static void
dt_aggregate_lquantize(int64_t * existing,int64_t * new,size_t size)126 dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size)
127 {
128 int64_t arg = *existing++;
129 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
130 int i;
131
132 for (i = 0; i <= levels + 1; i++)
133 existing[i] = existing[i] + new[i + 1];
134 }
135
136 static long double
dt_aggregate_lquantizedsum(int64_t * lquanta)137 dt_aggregate_lquantizedsum(int64_t *lquanta)
138 {
139 int64_t arg = *lquanta++;
140 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
141 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
142 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
143 long double total = (long double)lquanta[0] * (long double)(base - 1);
144
145 for (i = 0; i < levels; base += step, i++)
146 total += (long double)lquanta[i + 1] * (long double)base;
147
148 return (total + (long double)lquanta[levels + 1] *
149 (long double)(base + 1));
150 }
151
152 static int64_t
dt_aggregate_lquantizedzero(int64_t * lquanta)153 dt_aggregate_lquantizedzero(int64_t *lquanta)
154 {
155 int64_t arg = *lquanta++;
156 int32_t base = DTRACE_LQUANTIZE_BASE(arg);
157 uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
158 uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
159
160 if (base - 1 == 0)
161 return (lquanta[0]);
162
163 for (i = 0; i < levels; base += step, i++) {
164 if (base != 0)
165 continue;
166
167 return (lquanta[i + 1]);
168 }
169
170 if (base + 1 == 0)
171 return (lquanta[levels + 1]);
172
173 return (0);
174 }
175
176 static int
dt_aggregate_lquantizedcmp(int64_t * lhs,int64_t * rhs)177 dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs)
178 {
179 long double lsum = dt_aggregate_lquantizedsum(lhs);
180 long double rsum = dt_aggregate_lquantizedsum(rhs);
181 int64_t lzero, rzero;
182
183 if (lsum < rsum)
184 return (DT_LESSTHAN);
185
186 if (lsum > rsum)
187 return (DT_GREATERTHAN);
188
189 /*
190 * If they're both equal, then we will compare based on the weights at
191 * zero. If the weights at zero are equal (or if zero is not within
192 * the range of the linear quantization), then this will be judged a
193 * tie and will be resolved based on the key comparison.
194 */
195 lzero = dt_aggregate_lquantizedzero(lhs);
196 rzero = dt_aggregate_lquantizedzero(rhs);
197
198 if (lzero < rzero)
199 return (DT_LESSTHAN);
200
201 if (lzero > rzero)
202 return (DT_GREATERTHAN);
203
204 return (0);
205 }
206
207 static int
dt_aggregate_quantizedcmp(int64_t * lhs,int64_t * rhs)208 dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs)
209 {
210 int nbuckets = DTRACE_QUANTIZE_NBUCKETS, i;
211 long double ltotal = 0, rtotal = 0;
212 int64_t lzero, rzero;
213
214 for (i = 0; i < nbuckets; i++) {
215 int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i);
216
217 if (bucketval == 0) {
218 lzero = lhs[i];
219 rzero = rhs[i];
220 }
221
222 ltotal += (long double)bucketval * (long double)lhs[i];
223 rtotal += (long double)bucketval * (long double)rhs[i];
224 }
225
226 if (ltotal < rtotal)
227 return (DT_LESSTHAN);
228
229 if (ltotal > rtotal)
230 return (DT_GREATERTHAN);
231
232 /*
233 * If they're both equal, then we will compare based on the weights at
234 * zero. If the weights at zero are equal, then this will be judged a
235 * tie and will be resolved based on the key comparison.
236 */
237 if (lzero < rzero)
238 return (DT_LESSTHAN);
239
240 if (lzero > rzero)
241 return (DT_GREATERTHAN);
242
243 return (0);
244 }
245
246 static void
dt_aggregate_usym(dtrace_hdl_t * dtp,uint64_t * data)247 dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data)
248 {
249 uint64_t pid = data[0];
250 uint64_t *pc = &data[1];
251 struct ps_prochandle *P;
252 GElf_Sym sym;
253
254 if (dtp->dt_vector != NULL)
255 return;
256
257 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
258 return;
259
260 dt_proc_lock(dtp, P);
261
262 if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0)
263 *pc = sym.st_value;
264
265 dt_proc_unlock(dtp, P);
266 dt_proc_release(dtp, P);
267 }
268
269 static void
dt_aggregate_umod(dtrace_hdl_t * dtp,uint64_t * data)270 dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data)
271 {
272 uint64_t pid = data[0];
273 uint64_t *pc = &data[1];
274 struct ps_prochandle *P;
275 const prmap_t *map;
276
277 if (dtp->dt_vector != NULL)
278 return;
279
280 if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
281 return;
282
283 dt_proc_lock(dtp, P);
284
285 if ((map = Paddr_to_map(P, *pc)) != NULL)
286 *pc = map->pr_vaddr;
287
288 dt_proc_unlock(dtp, P);
289 dt_proc_release(dtp, P);
290 }
291
292 static void
dt_aggregate_sym(dtrace_hdl_t * dtp,uint64_t * data)293 dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data)
294 {
295 GElf_Sym sym;
296 uint64_t *pc = data;
297
298 if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0)
299 *pc = sym.st_value;
300 }
301
302 static void
dt_aggregate_mod(dtrace_hdl_t * dtp,uint64_t * data)303 dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data)
304 {
305 uint64_t *pc = data;
306 dt_module_t *dmp;
307
308 if (dtp->dt_vector != NULL) {
309 /*
310 * We don't have a way of just getting the module for a
311 * vectored open, and it doesn't seem to be worth defining
312 * one. This means that use of mod() won't get true
313 * aggregation in the postmortem case (some modules may
314 * appear more than once in aggregation output). It seems
315 * unlikely that anyone will ever notice or care...
316 */
317 return;
318 }
319
320 for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL;
321 dmp = dt_list_next(dmp)) {
322 if (*pc - dmp->dm_text_va < dmp->dm_text_size) {
323 *pc = dmp->dm_text_va;
324 return;
325 }
326 }
327 }
328
329 static dtrace_aggvarid_t
dt_aggregate_aggvarid(dt_ahashent_t * ent)330 dt_aggregate_aggvarid(dt_ahashent_t *ent)
331 {
332 dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc;
333 caddr_t data = ent->dtahe_data.dtada_data;
334 dtrace_recdesc_t *rec = agg->dtagd_rec;
335
336 /*
337 * First, we'll check the variable ID in the aggdesc. If it's valid,
338 * we'll return it. If not, we'll use the compiler-generated ID
339 * present as the first record.
340 */
341 if (agg->dtagd_varid != DTRACE_AGGVARIDNONE)
342 return (agg->dtagd_varid);
343
344 agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data +
345 rec->dtrd_offset));
346
347 return (agg->dtagd_varid);
348 }
349
350
351 static int
dt_aggregate_snap_cpu(dtrace_hdl_t * dtp,processorid_t cpu)352 dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu)
353 {
354 dtrace_epid_t id;
355 uint64_t hashval;
356 size_t offs, roffs, size, ndx;
357 int i, j, rval;
358 caddr_t addr, data;
359 dtrace_recdesc_t *rec;
360 dt_aggregate_t *agp = &dtp->dt_aggregate;
361 dtrace_aggdesc_t *agg;
362 dt_ahash_t *hash = &agp->dtat_hash;
363 dt_ahashent_t *h;
364 dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b;
365 dtrace_aggdata_t *aggdata;
366 int flags = agp->dtat_flags;
367
368 buf->dtbd_cpu = cpu;
369
370 if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) {
371 if (errno == ENOENT) {
372 /*
373 * If that failed with ENOENT, it may be because the
374 * CPU was unconfigured. This is okay; we'll just
375 * do nothing but return success.
376 */
377 return (0);
378 }
379
380 return (dt_set_errno(dtp, errno));
381 }
382
383 if (buf->dtbd_drops != 0) {
384 if (dt_handle_cpudrop(dtp, cpu,
385 DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1)
386 return (-1);
387 }
388
389 if (buf->dtbd_size == 0)
390 return (0);
391
392 if (hash->dtah_hash == NULL) {
393 size_t size;
394
395 hash->dtah_size = DTRACE_AHASHSIZE;
396 size = hash->dtah_size * sizeof (dt_ahashent_t *);
397
398 if ((hash->dtah_hash = malloc(size)) == NULL)
399 return (dt_set_errno(dtp, EDT_NOMEM));
400
401 bzero(hash->dtah_hash, size);
402 }
403
404 for (offs = 0; offs < buf->dtbd_size; ) {
405 /*
406 * We're guaranteed to have an ID.
407 */
408 id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data +
409 (uintptr_t)offs));
410
411 if (id == DTRACE_AGGIDNONE) {
412 /*
413 * This is filler to assure proper alignment of the
414 * next record; we simply ignore it.
415 */
416 offs += sizeof (id);
417 continue;
418 }
419
420 if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0)
421 return (rval);
422
423 addr = buf->dtbd_data + offs;
424 size = agg->dtagd_size;
425 hashval = 0;
426
427 for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
428 rec = &agg->dtagd_rec[j];
429 roffs = rec->dtrd_offset;
430
431 switch (rec->dtrd_action) {
432 case DTRACEACT_USYM:
433 dt_aggregate_usym(dtp,
434 /* LINTED - alignment */
435 (uint64_t *)&addr[roffs]);
436 break;
437
438 case DTRACEACT_UMOD:
439 dt_aggregate_umod(dtp,
440 /* LINTED - alignment */
441 (uint64_t *)&addr[roffs]);
442 break;
443
444 case DTRACEACT_SYM:
445 /* LINTED - alignment */
446 dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]);
447 break;
448
449 case DTRACEACT_MOD:
450 /* LINTED - alignment */
451 dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]);
452 break;
453
454 default:
455 break;
456 }
457
458 for (i = 0; i < rec->dtrd_size; i++)
459 hashval += addr[roffs + i];
460 }
461
462 ndx = hashval % hash->dtah_size;
463
464 for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) {
465 if (h->dtahe_hashval != hashval)
466 continue;
467
468 if (h->dtahe_size != size)
469 continue;
470
471 aggdata = &h->dtahe_data;
472 data = aggdata->dtada_data;
473
474 for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
475 rec = &agg->dtagd_rec[j];
476 roffs = rec->dtrd_offset;
477
478 for (i = 0; i < rec->dtrd_size; i++)
479 if (addr[roffs + i] != data[roffs + i])
480 goto hashnext;
481 }
482
483 /*
484 * We found it. Now we need to apply the aggregating
485 * action on the data here.
486 */
487 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
488 roffs = rec->dtrd_offset;
489 /* LINTED - alignment */
490 h->dtahe_aggregate((int64_t *)&data[roffs],
491 /* LINTED - alignment */
492 (int64_t *)&addr[roffs], rec->dtrd_size);
493
494 /*
495 * If we're keeping per CPU data, apply the aggregating
496 * action there as well.
497 */
498 if (aggdata->dtada_percpu != NULL) {
499 data = aggdata->dtada_percpu[cpu];
500
501 /* LINTED - alignment */
502 h->dtahe_aggregate((int64_t *)data,
503 /* LINTED - alignment */
504 (int64_t *)&addr[roffs], rec->dtrd_size);
505 }
506
507 goto bufnext;
508 hashnext:
509 continue;
510 }
511
512 /*
513 * If we're here, we couldn't find an entry for this record.
514 */
515 if ((h = malloc(sizeof (dt_ahashent_t))) == NULL)
516 return (dt_set_errno(dtp, EDT_NOMEM));
517 bzero(h, sizeof (dt_ahashent_t));
518 aggdata = &h->dtahe_data;
519
520 if ((aggdata->dtada_data = malloc(size)) == NULL) {
521 free(h);
522 return (dt_set_errno(dtp, EDT_NOMEM));
523 }
524
525 bcopy(addr, aggdata->dtada_data, size);
526 aggdata->dtada_size = size;
527 aggdata->dtada_desc = agg;
528 aggdata->dtada_handle = dtp;
529 (void) dt_epid_lookup(dtp, agg->dtagd_epid,
530 &aggdata->dtada_edesc, &aggdata->dtada_pdesc);
531 aggdata->dtada_normal = 1;
532
533 h->dtahe_hashval = hashval;
534 h->dtahe_size = size;
535 (void) dt_aggregate_aggvarid(h);
536
537 rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
538
539 if (flags & DTRACE_A_PERCPU) {
540 int max_cpus = agp->dtat_maxcpu;
541 caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t));
542
543 if (percpu == NULL) {
544 free(aggdata->dtada_data);
545 free(h);
546 return (dt_set_errno(dtp, EDT_NOMEM));
547 }
548
549 for (j = 0; j < max_cpus; j++) {
550 percpu[j] = malloc(rec->dtrd_size);
551
552 if (percpu[j] == NULL) {
553 while (--j >= 0)
554 free(percpu[j]);
555
556 free(aggdata->dtada_data);
557 free(h);
558 return (dt_set_errno(dtp, EDT_NOMEM));
559 }
560
561 if (j == cpu) {
562 bcopy(&addr[rec->dtrd_offset],
563 percpu[j], rec->dtrd_size);
564 } else {
565 bzero(percpu[j], rec->dtrd_size);
566 }
567 }
568
569 aggdata->dtada_percpu = percpu;
570 }
571
572 switch (rec->dtrd_action) {
573 case DTRACEAGG_MIN:
574 h->dtahe_aggregate = dt_aggregate_min;
575 break;
576
577 case DTRACEAGG_MAX:
578 h->dtahe_aggregate = dt_aggregate_max;
579 break;
580
581 case DTRACEAGG_LQUANTIZE:
582 h->dtahe_aggregate = dt_aggregate_lquantize;
583 break;
584
585 case DTRACEAGG_COUNT:
586 case DTRACEAGG_SUM:
587 case DTRACEAGG_AVG:
588 case DTRACEAGG_STDDEV:
589 case DTRACEAGG_QUANTIZE:
590 h->dtahe_aggregate = dt_aggregate_count;
591 break;
592
593 default:
594 return (dt_set_errno(dtp, EDT_BADAGG));
595 }
596
597 if (hash->dtah_hash[ndx] != NULL)
598 hash->dtah_hash[ndx]->dtahe_prev = h;
599
600 h->dtahe_next = hash->dtah_hash[ndx];
601 hash->dtah_hash[ndx] = h;
602
603 if (hash->dtah_all != NULL)
604 hash->dtah_all->dtahe_prevall = h;
605
606 h->dtahe_nextall = hash->dtah_all;
607 hash->dtah_all = h;
608 bufnext:
609 offs += agg->dtagd_size;
610 }
611
612 return (0);
613 }
614
615 int
dtrace_aggregate_snap(dtrace_hdl_t * dtp)616 dtrace_aggregate_snap(dtrace_hdl_t *dtp)
617 {
618 int i, rval;
619 dt_aggregate_t *agp = &dtp->dt_aggregate;
620 hrtime_t now = gethrtime();
621 dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE];
622
623 if (dtp->dt_lastagg != 0) {
624 if (now - dtp->dt_lastagg < interval)
625 return (0);
626
627 dtp->dt_lastagg += interval;
628 } else {
629 dtp->dt_lastagg = now;
630 }
631
632 if (!dtp->dt_active)
633 return (dt_set_errno(dtp, EINVAL));
634
635 if (agp->dtat_buf.dtbd_size == 0)
636 return (0);
637
638 for (i = 0; i < agp->dtat_ncpus; i++) {
639 if (rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i]))
640 return (rval);
641 }
642
643 return (0);
644 }
645
646 static int
dt_aggregate_hashcmp(const void * lhs,const void * rhs)647 dt_aggregate_hashcmp(const void *lhs, const void *rhs)
648 {
649 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
650 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
651 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
652 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
653
654 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
655 return (DT_LESSTHAN);
656
657 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
658 return (DT_GREATERTHAN);
659
660 return (0);
661 }
662
663 static int
dt_aggregate_varcmp(const void * lhs,const void * rhs)664 dt_aggregate_varcmp(const void *lhs, const void *rhs)
665 {
666 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
667 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
668 dtrace_aggvarid_t lid, rid;
669
670 lid = dt_aggregate_aggvarid(lh);
671 rid = dt_aggregate_aggvarid(rh);
672
673 if (lid < rid)
674 return (DT_LESSTHAN);
675
676 if (lid > rid)
677 return (DT_GREATERTHAN);
678
679 return (0);
680 }
681
682 static int
dt_aggregate_keycmp(const void * lhs,const void * rhs)683 dt_aggregate_keycmp(const void *lhs, const void *rhs)
684 {
685 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
686 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
687 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
688 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
689 dtrace_recdesc_t *lrec, *rrec;
690 char *ldata, *rdata;
691 int rval, i, j, keypos, nrecs;
692
693 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
694 return (rval);
695
696 nrecs = lagg->dtagd_nrecs - 1;
697 assert(nrecs == ragg->dtagd_nrecs - 1);
698
699 keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos;
700
701 for (i = 1; i < nrecs; i++) {
702 uint64_t lval, rval;
703 int ndx = i + keypos;
704
705 if (ndx >= nrecs)
706 ndx = ndx - nrecs + 1;
707
708 lrec = &lagg->dtagd_rec[ndx];
709 rrec = &ragg->dtagd_rec[ndx];
710
711 ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset;
712 rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset;
713
714 if (lrec->dtrd_size < rrec->dtrd_size)
715 return (DT_LESSTHAN);
716
717 if (lrec->dtrd_size > rrec->dtrd_size)
718 return (DT_GREATERTHAN);
719
720 switch (lrec->dtrd_size) {
721 case sizeof (uint64_t):
722 /* LINTED - alignment */
723 lval = *((uint64_t *)ldata);
724 /* LINTED - alignment */
725 rval = *((uint64_t *)rdata);
726 break;
727
728 case sizeof (uint32_t):
729 /* LINTED - alignment */
730 lval = *((uint32_t *)ldata);
731 /* LINTED - alignment */
732 rval = *((uint32_t *)rdata);
733 break;
734
735 case sizeof (uint16_t):
736 /* LINTED - alignment */
737 lval = *((uint16_t *)ldata);
738 /* LINTED - alignment */
739 rval = *((uint16_t *)rdata);
740 break;
741
742 case sizeof (uint8_t):
743 lval = *((uint8_t *)ldata);
744 rval = *((uint8_t *)rdata);
745 break;
746
747 default:
748 switch (lrec->dtrd_action) {
749 case DTRACEACT_UMOD:
750 case DTRACEACT_UADDR:
751 case DTRACEACT_USYM:
752 for (j = 0; j < 2; j++) {
753 /* LINTED - alignment */
754 lval = ((uint64_t *)ldata)[j];
755 /* LINTED - alignment */
756 rval = ((uint64_t *)rdata)[j];
757
758 if (lval < rval)
759 return (DT_LESSTHAN);
760
761 if (lval > rval)
762 return (DT_GREATERTHAN);
763 }
764
765 break;
766
767 default:
768 for (j = 0; j < lrec->dtrd_size; j++) {
769 lval = ((uint8_t *)ldata)[j];
770 rval = ((uint8_t *)rdata)[j];
771
772 if (lval < rval)
773 return (DT_LESSTHAN);
774
775 if (lval > rval)
776 return (DT_GREATERTHAN);
777 }
778 }
779
780 continue;
781 }
782
783 if (lval < rval)
784 return (DT_LESSTHAN);
785
786 if (lval > rval)
787 return (DT_GREATERTHAN);
788 }
789
790 return (0);
791 }
792
793 static int
dt_aggregate_valcmp(const void * lhs,const void * rhs)794 dt_aggregate_valcmp(const void *lhs, const void *rhs)
795 {
796 dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
797 dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
798 dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
799 dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
800 caddr_t ldata = lh->dtahe_data.dtada_data;
801 caddr_t rdata = rh->dtahe_data.dtada_data;
802 dtrace_recdesc_t *lrec, *rrec;
803 int64_t *laddr, *raddr;
804 int rval, i;
805
806 if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
807 return (rval);
808
809 if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
810 return (DT_GREATERTHAN);
811
812 if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
813 return (DT_LESSTHAN);
814
815 for (i = 0; i < lagg->dtagd_nrecs; i++) {
816 lrec = &lagg->dtagd_rec[i];
817 rrec = &ragg->dtagd_rec[i];
818
819 if (lrec->dtrd_offset < rrec->dtrd_offset)
820 return (DT_LESSTHAN);
821
822 if (lrec->dtrd_offset > rrec->dtrd_offset)
823 return (DT_GREATERTHAN);
824
825 if (lrec->dtrd_action < rrec->dtrd_action)
826 return (DT_LESSTHAN);
827
828 if (lrec->dtrd_action > rrec->dtrd_action)
829 return (DT_GREATERTHAN);
830 }
831
832 laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset);
833 raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset);
834
835 switch (lrec->dtrd_action) {
836 case DTRACEAGG_AVG:
837 rval = dt_aggregate_averagecmp(laddr, raddr);
838 break;
839
840 case DTRACEAGG_STDDEV:
841 rval = dt_aggregate_stddevcmp(laddr, raddr);
842 break;
843
844 case DTRACEAGG_QUANTIZE:
845 rval = dt_aggregate_quantizedcmp(laddr, raddr);
846 break;
847
848 case DTRACEAGG_LQUANTIZE:
849 rval = dt_aggregate_lquantizedcmp(laddr, raddr);
850 break;
851
852 case DTRACEAGG_COUNT:
853 case DTRACEAGG_SUM:
854 case DTRACEAGG_MIN:
855 case DTRACEAGG_MAX:
856 rval = dt_aggregate_countcmp(laddr, raddr);
857 break;
858
859 default:
860 assert(0);
861 }
862
863 return (rval);
864 }
865
866 static int
dt_aggregate_valkeycmp(const void * lhs,const void * rhs)867 dt_aggregate_valkeycmp(const void *lhs, const void *rhs)
868 {
869 int rval;
870
871 if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0)
872 return (rval);
873
874 /*
875 * If we're here, the values for the two aggregation elements are
876 * equal. We already know that the key layout is the same for the two
877 * elements; we must now compare the keys themselves as a tie-breaker.
878 */
879 return (dt_aggregate_keycmp(lhs, rhs));
880 }
881
882 static int
dt_aggregate_keyvarcmp(const void * lhs,const void * rhs)883 dt_aggregate_keyvarcmp(const void *lhs, const void *rhs)
884 {
885 int rval;
886
887 if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0)
888 return (rval);
889
890 return (dt_aggregate_varcmp(lhs, rhs));
891 }
892
893 static int
dt_aggregate_varkeycmp(const void * lhs,const void * rhs)894 dt_aggregate_varkeycmp(const void *lhs, const void *rhs)
895 {
896 int rval;
897
898 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
899 return (rval);
900
901 return (dt_aggregate_keycmp(lhs, rhs));
902 }
903
904 static int
dt_aggregate_valvarcmp(const void * lhs,const void * rhs)905 dt_aggregate_valvarcmp(const void *lhs, const void *rhs)
906 {
907 int rval;
908
909 if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0)
910 return (rval);
911
912 return (dt_aggregate_varcmp(lhs, rhs));
913 }
914
915 static int
dt_aggregate_varvalcmp(const void * lhs,const void * rhs)916 dt_aggregate_varvalcmp(const void *lhs, const void *rhs)
917 {
918 int rval;
919
920 if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
921 return (rval);
922
923 return (dt_aggregate_valkeycmp(lhs, rhs));
924 }
925
926 static int
dt_aggregate_keyvarrevcmp(const void * lhs,const void * rhs)927 dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs)
928 {
929 return (dt_aggregate_keyvarcmp(rhs, lhs));
930 }
931
932 static int
dt_aggregate_varkeyrevcmp(const void * lhs,const void * rhs)933 dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs)
934 {
935 return (dt_aggregate_varkeycmp(rhs, lhs));
936 }
937
938 static int
dt_aggregate_valvarrevcmp(const void * lhs,const void * rhs)939 dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs)
940 {
941 return (dt_aggregate_valvarcmp(rhs, lhs));
942 }
943
944 static int
dt_aggregate_varvalrevcmp(const void * lhs,const void * rhs)945 dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs)
946 {
947 return (dt_aggregate_varvalcmp(rhs, lhs));
948 }
949
950 static int
dt_aggregate_bundlecmp(const void * lhs,const void * rhs)951 dt_aggregate_bundlecmp(const void *lhs, const void *rhs)
952 {
953 dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs);
954 dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs);
955 int i, rval;
956
957 if (dt_keysort) {
958 /*
959 * If we're sorting on keys, we need to scan until we find the
960 * last entry -- that's the representative key. (The order of
961 * the bundle is values followed by key to accommodate the
962 * default behavior of sorting by value.) If the keys are
963 * equal, we'll fall into the value comparison loop, below.
964 */
965 for (i = 0; lh[i + 1] != NULL; i++)
966 continue;
967
968 assert(i != 0);
969 assert(rh[i + 1] == NULL);
970
971 if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0)
972 return (rval);
973 }
974
975 for (i = 0; ; i++) {
976 if (lh[i + 1] == NULL) {
977 /*
978 * All of the values are equal; if we're sorting on
979 * keys, then we're only here because the keys were
980 * found to be equal and these records are therefore
981 * equal. If we're not sorting on keys, we'll use the
982 * key comparison from the representative key as the
983 * tie-breaker.
984 */
985 if (dt_keysort)
986 return (0);
987
988 assert(i != 0);
989 assert(rh[i + 1] == NULL);
990 return (dt_aggregate_keycmp(&lh[i], &rh[i]));
991 } else {
992 if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0)
993 return (rval);
994 }
995 }
996 }
997
998 int
dt_aggregate_go(dtrace_hdl_t * dtp)999 dt_aggregate_go(dtrace_hdl_t *dtp)
1000 {
1001 dt_aggregate_t *agp = &dtp->dt_aggregate;
1002 dtrace_optval_t size, cpu;
1003 dtrace_bufdesc_t *buf = &agp->dtat_buf;
1004 int rval, i;
1005
1006 assert(agp->dtat_maxcpu == 0);
1007 assert(agp->dtat_ncpu == 0);
1008 assert(agp->dtat_cpus == NULL);
1009
1010 agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
1011 agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX);
1012 agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t));
1013
1014 if (agp->dtat_cpus == NULL)
1015 return (dt_set_errno(dtp, EDT_NOMEM));
1016
1017 /*
1018 * Use the aggregation buffer size as reloaded from the kernel.
1019 */
1020 size = dtp->dt_options[DTRACEOPT_AGGSIZE];
1021
1022 rval = dtrace_getopt(dtp, "aggsize", &size);
1023 assert(rval == 0);
1024
1025 if (size == 0 || size == DTRACEOPT_UNSET)
1026 return (0);
1027
1028 buf = &agp->dtat_buf;
1029 buf->dtbd_size = size;
1030
1031 if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL)
1032 return (dt_set_errno(dtp, EDT_NOMEM));
1033
1034 /*
1035 * Now query for the CPUs enabled.
1036 */
1037 rval = dtrace_getopt(dtp, "cpu", &cpu);
1038 assert(rval == 0 && cpu != DTRACEOPT_UNSET);
1039
1040 if (cpu != DTRACE_CPUALL) {
1041 assert(cpu < agp->dtat_ncpu);
1042 agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu;
1043
1044 return (0);
1045 }
1046
1047 agp->dtat_ncpus = 0;
1048 for (i = 0; i < agp->dtat_maxcpu; i++) {
1049 if (dt_status(dtp, i) == -1)
1050 continue;
1051
1052 agp->dtat_cpus[agp->dtat_ncpus++] = i;
1053 }
1054
1055 return (0);
1056 }
1057
1058 static int
dt_aggwalk_rval(dtrace_hdl_t * dtp,dt_ahashent_t * h,int rval)1059 dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval)
1060 {
1061 dt_aggregate_t *agp = &dtp->dt_aggregate;
1062 dtrace_aggdata_t *data;
1063 dtrace_aggdesc_t *aggdesc;
1064 dtrace_recdesc_t *rec;
1065 int i;
1066
1067 switch (rval) {
1068 case DTRACE_AGGWALK_NEXT:
1069 break;
1070
1071 case DTRACE_AGGWALK_CLEAR: {
1072 uint32_t size, offs = 0;
1073
1074 aggdesc = h->dtahe_data.dtada_desc;
1075 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1076 size = rec->dtrd_size;
1077 data = &h->dtahe_data;
1078
1079 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1080 offs = sizeof (uint64_t);
1081 size -= sizeof (uint64_t);
1082 }
1083
1084 bzero(&data->dtada_data[rec->dtrd_offset] + offs, size);
1085
1086 if (data->dtada_percpu == NULL)
1087 break;
1088
1089 for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++)
1090 bzero(data->dtada_percpu[i] + offs, size);
1091 break;
1092 }
1093
1094 case DTRACE_AGGWALK_ERROR:
1095 /*
1096 * We assume that errno is already set in this case.
1097 */
1098 return (dt_set_errno(dtp, errno));
1099
1100 case DTRACE_AGGWALK_ABORT:
1101 return (dt_set_errno(dtp, EDT_DIRABORT));
1102
1103 case DTRACE_AGGWALK_DENORMALIZE:
1104 h->dtahe_data.dtada_normal = 1;
1105 return (0);
1106
1107 case DTRACE_AGGWALK_NORMALIZE:
1108 if (h->dtahe_data.dtada_normal == 0) {
1109 h->dtahe_data.dtada_normal = 1;
1110 return (dt_set_errno(dtp, EDT_BADRVAL));
1111 }
1112
1113 return (0);
1114
1115 case DTRACE_AGGWALK_REMOVE: {
1116 dtrace_aggdata_t *aggdata = &h->dtahe_data;
1117 int i, max_cpus = agp->dtat_maxcpu;
1118
1119 /*
1120 * First, remove this hash entry from its hash chain.
1121 */
1122 if (h->dtahe_prev != NULL) {
1123 h->dtahe_prev->dtahe_next = h->dtahe_next;
1124 } else {
1125 dt_ahash_t *hash = &agp->dtat_hash;
1126 size_t ndx = h->dtahe_hashval % hash->dtah_size;
1127
1128 assert(hash->dtah_hash[ndx] == h);
1129 hash->dtah_hash[ndx] = h->dtahe_next;
1130 }
1131
1132 if (h->dtahe_next != NULL)
1133 h->dtahe_next->dtahe_prev = h->dtahe_prev;
1134
1135 /*
1136 * Now remove it from the list of all hash entries.
1137 */
1138 if (h->dtahe_prevall != NULL) {
1139 h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall;
1140 } else {
1141 dt_ahash_t *hash = &agp->dtat_hash;
1142
1143 assert(hash->dtah_all == h);
1144 hash->dtah_all = h->dtahe_nextall;
1145 }
1146
1147 if (h->dtahe_nextall != NULL)
1148 h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall;
1149
1150 /*
1151 * We're unlinked. We can safely destroy the data.
1152 */
1153 if (aggdata->dtada_percpu != NULL) {
1154 for (i = 0; i < max_cpus; i++)
1155 free(aggdata->dtada_percpu[i]);
1156 free(aggdata->dtada_percpu);
1157 }
1158
1159 free(aggdata->dtada_data);
1160 free(h);
1161
1162 return (0);
1163 }
1164
1165 default:
1166 return (dt_set_errno(dtp, EDT_BADRVAL));
1167 }
1168
1169 return (0);
1170 }
1171
1172 void
dt_aggregate_qsort(dtrace_hdl_t * dtp,void * base,size_t nel,size_t width,int (* compar)(const void *,const void *))1173 dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width,
1174 int (*compar)(const void *, const void *))
1175 {
1176 int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos;
1177 dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS];
1178
1179 dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET);
1180 dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET);
1181
1182 if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) {
1183 dt_keypos = (int)keyposopt;
1184 } else {
1185 dt_keypos = 0;
1186 }
1187
1188 if (compar == NULL) {
1189 if (!dt_keysort) {
1190 compar = dt_aggregate_varvalcmp;
1191 } else {
1192 compar = dt_aggregate_varkeycmp;
1193 }
1194 }
1195
1196 qsort(base, nel, width, compar);
1197
1198 dt_revsort = rev;
1199 dt_keysort = key;
1200 dt_keypos = keypos;
1201 }
1202
1203 int
dtrace_aggregate_walk(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1204 dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg)
1205 {
1206 dt_ahashent_t *h, *next;
1207 dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash;
1208
1209 for (h = hash->dtah_all; h != NULL; h = next) {
1210 /*
1211 * dt_aggwalk_rval() can potentially remove the current hash
1212 * entry; we need to load the next hash entry before calling
1213 * into it.
1214 */
1215 next = h->dtahe_nextall;
1216
1217 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1218 return (-1);
1219 }
1220
1221 return (0);
1222 }
1223
1224 static int
dt_aggregate_walk_sorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg,int (* sfunc)(const void *,const void *))1225 dt_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1226 dtrace_aggregate_f *func, void *arg,
1227 int (*sfunc)(const void *, const void *))
1228 {
1229 dt_aggregate_t *agp = &dtp->dt_aggregate;
1230 dt_ahashent_t *h, **sorted;
1231 dt_ahash_t *hash = &agp->dtat_hash;
1232 size_t i, nentries = 0;
1233
1234 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall)
1235 nentries++;
1236
1237 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1238
1239 if (sorted == NULL)
1240 return (-1);
1241
1242 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall)
1243 sorted[i++] = h;
1244
1245 (void) pthread_mutex_lock(&dt_qsort_lock);
1246
1247 if (sfunc == NULL) {
1248 dt_aggregate_qsort(dtp, sorted, nentries,
1249 sizeof (dt_ahashent_t *), NULL);
1250 } else {
1251 /*
1252 * If we've been explicitly passed a sorting function,
1253 * we'll use that -- ignoring the values of the "aggsortrev",
1254 * "aggsortkey" and "aggsortkeypos" options.
1255 */
1256 qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc);
1257 }
1258
1259 (void) pthread_mutex_unlock(&dt_qsort_lock);
1260
1261 for (i = 0; i < nentries; i++) {
1262 h = sorted[i];
1263
1264 if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) {
1265 dt_free(dtp, sorted);
1266 return (-1);
1267 }
1268 }
1269
1270 dt_free(dtp, sorted);
1271 return (0);
1272 }
1273
1274 int
dtrace_aggregate_walk_sorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1275 dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1276 dtrace_aggregate_f *func, void *arg)
1277 {
1278 return (dt_aggregate_walk_sorted(dtp, func, arg, NULL));
1279 }
1280
1281 int
dtrace_aggregate_walk_keysorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1282 dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp,
1283 dtrace_aggregate_f *func, void *arg)
1284 {
1285 return (dt_aggregate_walk_sorted(dtp, func,
1286 arg, dt_aggregate_varkeycmp));
1287 }
1288
1289 int
dtrace_aggregate_walk_valsorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1290 dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp,
1291 dtrace_aggregate_f *func, void *arg)
1292 {
1293 return (dt_aggregate_walk_sorted(dtp, func,
1294 arg, dt_aggregate_varvalcmp));
1295 }
1296
1297 int
dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1298 dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp,
1299 dtrace_aggregate_f *func, void *arg)
1300 {
1301 return (dt_aggregate_walk_sorted(dtp, func,
1302 arg, dt_aggregate_keyvarcmp));
1303 }
1304
1305 int
dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1306 dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp,
1307 dtrace_aggregate_f *func, void *arg)
1308 {
1309 return (dt_aggregate_walk_sorted(dtp, func,
1310 arg, dt_aggregate_valvarcmp));
1311 }
1312
1313 int
dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1314 dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp,
1315 dtrace_aggregate_f *func, void *arg)
1316 {
1317 return (dt_aggregate_walk_sorted(dtp, func,
1318 arg, dt_aggregate_varkeyrevcmp));
1319 }
1320
1321 int
dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1322 dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp,
1323 dtrace_aggregate_f *func, void *arg)
1324 {
1325 return (dt_aggregate_walk_sorted(dtp, func,
1326 arg, dt_aggregate_varvalrevcmp));
1327 }
1328
1329 int
dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1330 dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp,
1331 dtrace_aggregate_f *func, void *arg)
1332 {
1333 return (dt_aggregate_walk_sorted(dtp, func,
1334 arg, dt_aggregate_keyvarrevcmp));
1335 }
1336
1337 int
dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t * dtp,dtrace_aggregate_f * func,void * arg)1338 dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp,
1339 dtrace_aggregate_f *func, void *arg)
1340 {
1341 return (dt_aggregate_walk_sorted(dtp, func,
1342 arg, dt_aggregate_valvarrevcmp));
1343 }
1344
1345 int
dtrace_aggregate_walk_joined(dtrace_hdl_t * dtp,dtrace_aggvarid_t * aggvars,int naggvars,dtrace_aggregate_walk_joined_f * func,void * arg)1346 dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars,
1347 int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg)
1348 {
1349 dt_aggregate_t *agp = &dtp->dt_aggregate;
1350 dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle;
1351 const dtrace_aggdata_t **data;
1352 dt_ahashent_t *zaggdata = NULL;
1353 dt_ahash_t *hash = &agp->dtat_hash;
1354 size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize;
1355 dtrace_aggvarid_t max = 0, aggvar;
1356 int rval = -1, *map, *remap = NULL;
1357 int i, j;
1358 dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS];
1359
1360 /*
1361 * If the sorting position is greater than the number of aggregation
1362 * variable IDs, we silently set it to 0.
1363 */
1364 if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars)
1365 sortpos = 0;
1366
1367 /*
1368 * First we need to translate the specified aggregation variable IDs
1369 * into a linear map that will allow us to translate an aggregation
1370 * variable ID into its position in the specified aggvars.
1371 */
1372 for (i = 0; i < naggvars; i++) {
1373 if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0)
1374 return (dt_set_errno(dtp, EDT_BADAGGVAR));
1375
1376 if (aggvars[i] > max)
1377 max = aggvars[i];
1378 }
1379
1380 if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL)
1381 return (-1);
1382
1383 zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t));
1384
1385 if (zaggdata == NULL)
1386 goto out;
1387
1388 for (i = 0; i < naggvars; i++) {
1389 int ndx = i + sortpos;
1390
1391 if (ndx >= naggvars)
1392 ndx -= naggvars;
1393
1394 aggvar = aggvars[ndx];
1395 assert(aggvar <= max);
1396
1397 if (map[aggvar]) {
1398 /*
1399 * We have an aggregation variable that is present
1400 * more than once in the array of aggregation
1401 * variables. While it's unclear why one might want
1402 * to do this, it's legal. To support this construct,
1403 * we will allocate a remap that will indicate the
1404 * position from which this aggregation variable
1405 * should be pulled. (That is, where the remap will
1406 * map from one position to another.)
1407 */
1408 if (remap == NULL) {
1409 remap = dt_zalloc(dtp, naggvars * sizeof (int));
1410
1411 if (remap == NULL)
1412 goto out;
1413 }
1414
1415 /*
1416 * Given that the variable is already present, assert
1417 * that following through the mapping and adjusting
1418 * for the sort position yields the same aggregation
1419 * variable ID.
1420 */
1421 assert(aggvars[(map[aggvar] - 1 + sortpos) %
1422 naggvars] == aggvars[ndx]);
1423
1424 remap[i] = map[aggvar];
1425 continue;
1426 }
1427
1428 map[aggvar] = i + 1;
1429 }
1430
1431 /*
1432 * We need to take two passes over the data to size our allocation, so
1433 * we'll use the first pass to also fill in the zero-filled data to be
1434 * used to properly format a zero-valued aggregation.
1435 */
1436 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1437 dtrace_aggvarid_t id;
1438 int ndx;
1439
1440 if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id]))
1441 continue;
1442
1443 if (zaggdata[ndx - 1].dtahe_size == 0) {
1444 zaggdata[ndx - 1].dtahe_size = h->dtahe_size;
1445 zaggdata[ndx - 1].dtahe_data = h->dtahe_data;
1446 }
1447
1448 nentries++;
1449 }
1450
1451 if (nentries == 0) {
1452 /*
1453 * We couldn't find any entries; there is nothing else to do.
1454 */
1455 rval = 0;
1456 goto out;
1457 }
1458
1459 /*
1460 * Before we sort the data, we're going to look for any holes in our
1461 * zero-filled data. This will occur if an aggregation variable that
1462 * we are being asked to print has not yet been assigned the result of
1463 * any aggregating action for _any_ tuple. The issue becomes that we
1464 * would like a zero value to be printed for all columns for this
1465 * aggregation, but without any record description, we don't know the
1466 * aggregating action that corresponds to the aggregation variable. To
1467 * try to find a match, we're simply going to lookup aggregation IDs
1468 * (which are guaranteed to be contiguous and to start from 1), looking
1469 * for the specified aggregation variable ID. If we find a match,
1470 * we'll use that. If we iterate over all aggregation IDs and don't
1471 * find a match, then we must be an anonymous enabling. (Anonymous
1472 * enablings can't currently derive either aggregation variable IDs or
1473 * aggregation variable names given only an aggregation ID.) In this
1474 * obscure case (anonymous enabling, multiple aggregation printa() with
1475 * some aggregations not represented for any tuple), our defined
1476 * behavior is that the zero will be printed in the format of the first
1477 * aggregation variable that contains any non-zero value.
1478 */
1479 for (i = 0; i < naggvars; i++) {
1480 if (zaggdata[i].dtahe_size == 0) {
1481 dtrace_aggvarid_t aggvar;
1482
1483 aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1484 assert(zaggdata[i].dtahe_data.dtada_data == NULL);
1485
1486 for (j = DTRACE_AGGIDNONE + 1; ; j++) {
1487 dtrace_aggdesc_t *agg;
1488 dtrace_aggdata_t *aggdata;
1489
1490 if (dt_aggid_lookup(dtp, j, &agg) != 0)
1491 break;
1492
1493 if (agg->dtagd_varid != aggvar)
1494 continue;
1495
1496 /*
1497 * We have our description -- now we need to
1498 * cons up the zaggdata entry for it.
1499 */
1500 aggdata = &zaggdata[i].dtahe_data;
1501 aggdata->dtada_size = agg->dtagd_size;
1502 aggdata->dtada_desc = agg;
1503 aggdata->dtada_handle = dtp;
1504 (void) dt_epid_lookup(dtp, agg->dtagd_epid,
1505 &aggdata->dtada_edesc,
1506 &aggdata->dtada_pdesc);
1507 aggdata->dtada_normal = 1;
1508 zaggdata[i].dtahe_hashval = 0;
1509 zaggdata[i].dtahe_size = agg->dtagd_size;
1510 break;
1511 }
1512
1513 if (zaggdata[i].dtahe_size == 0) {
1514 caddr_t data;
1515
1516 /*
1517 * We couldn't find this aggregation, meaning
1518 * that we have never seen it before for any
1519 * tuple _and_ this is an anonymous enabling.
1520 * That is, we're in the obscure case outlined
1521 * above. In this case, our defined behavior
1522 * is to format the data in the format of the
1523 * first non-zero aggregation -- of which, of
1524 * course, we know there to be at least one
1525 * (or nentries would have been zero).
1526 */
1527 for (j = 0; j < naggvars; j++) {
1528 if (zaggdata[j].dtahe_size != 0)
1529 break;
1530 }
1531
1532 assert(j < naggvars);
1533 zaggdata[i] = zaggdata[j];
1534
1535 data = zaggdata[i].dtahe_data.dtada_data;
1536 assert(data != NULL);
1537 }
1538 }
1539 }
1540
1541 /*
1542 * Now we need to allocate our zero-filled data for use for
1543 * aggregations that don't have a value corresponding to a given key.
1544 */
1545 for (i = 0; i < naggvars; i++) {
1546 dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data;
1547 dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc;
1548 dtrace_recdesc_t *rec;
1549 uint64_t larg;
1550 caddr_t zdata;
1551
1552 zsize = zaggdata[i].dtahe_size;
1553 assert(zsize != 0);
1554
1555 if ((zdata = dt_zalloc(dtp, zsize)) == NULL) {
1556 /*
1557 * If we failed to allocated some zero-filled data, we
1558 * need to zero out the remaining dtada_data pointers
1559 * to prevent the wrong data from being freed below.
1560 */
1561 for (j = i; j < naggvars; j++)
1562 zaggdata[j].dtahe_data.dtada_data = NULL;
1563 goto out;
1564 }
1565
1566 aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1567
1568 /*
1569 * First, the easy bit. To maintain compatibility with
1570 * consumers that pull the compiler-generated ID out of the
1571 * data, we put that ID at the top of the zero-filled data.
1572 */
1573 rec = &aggdesc->dtagd_rec[0];
1574 /* LINTED - alignment */
1575 *((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar;
1576
1577 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1578
1579 /*
1580 * Now for the more complicated part. If (and only if) this
1581 * is an lquantize() aggregating action, zero-filled data is
1582 * not equivalent to an empty record: we must also get the
1583 * parameters for the lquantize().
1584 */
1585 if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1586 if (aggdata->dtada_data != NULL) {
1587 /*
1588 * The easier case here is if we actually have
1589 * some prototype data -- in which case we
1590 * manually dig it out of the aggregation
1591 * record.
1592 */
1593 /* LINTED - alignment */
1594 larg = *((uint64_t *)(aggdata->dtada_data +
1595 rec->dtrd_offset));
1596 } else {
1597 /*
1598 * We don't have any prototype data. As a
1599 * result, we know that we _do_ have the
1600 * compiler-generated information. (If this
1601 * were an anonymous enabling, all of our
1602 * zero-filled data would have prototype data
1603 * -- either directly or indirectly.) So as
1604 * gross as it is, we'll grovel around in the
1605 * compiler-generated information to find the
1606 * lquantize() parameters.
1607 */
1608 dtrace_stmtdesc_t *sdp;
1609 dt_ident_t *aid;
1610 dt_idsig_t *isp;
1611
1612 sdp = (dtrace_stmtdesc_t *)(uintptr_t)
1613 aggdesc->dtagd_rec[0].dtrd_uarg;
1614 aid = sdp->dtsd_aggdata;
1615 isp = (dt_idsig_t *)aid->di_data;
1616 assert(isp->dis_auxinfo != 0);
1617 larg = isp->dis_auxinfo;
1618 }
1619
1620 /* LINTED - alignment */
1621 *((uint64_t *)(zdata + rec->dtrd_offset)) = larg;
1622 }
1623
1624 aggdata->dtada_data = zdata;
1625 }
1626
1627 /*
1628 * Now that we've dealt with setting up our zero-filled data, we can
1629 * allocate our sorted array, and take another pass over the data to
1630 * fill it.
1631 */
1632 sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1633
1634 if (sorted == NULL)
1635 goto out;
1636
1637 for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) {
1638 dtrace_aggvarid_t id;
1639
1640 if ((id = dt_aggregate_aggvarid(h)) > max || !map[id])
1641 continue;
1642
1643 sorted[i++] = h;
1644 }
1645
1646 assert(i == nentries);
1647
1648 /*
1649 * We've loaded our array; now we need to sort by value to allow us
1650 * to create bundles of like value. We're going to acquire the
1651 * dt_qsort_lock here, and hold it across all of our subsequent
1652 * comparison and sorting.
1653 */
1654 (void) pthread_mutex_lock(&dt_qsort_lock);
1655
1656 qsort(sorted, nentries, sizeof (dt_ahashent_t *),
1657 dt_aggregate_keyvarcmp);
1658
1659 /*
1660 * Now we need to go through and create bundles. Because the number
1661 * of bundles is bounded by the size of the sorted array, we're going
1662 * to reuse the underlying storage. And note that "bundle" is an
1663 * array of pointers to arrays of pointers to dt_ahashent_t -- making
1664 * its type (regrettably) "dt_ahashent_t ***". (Regrettable because
1665 * '*' -- like '_' and 'X' -- should never appear in triplicate in
1666 * an ideal world.)
1667 */
1668 bundle = (dt_ahashent_t ***)sorted;
1669
1670 for (i = 1, start = 0; i <= nentries; i++) {
1671 if (i < nentries &&
1672 dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0)
1673 continue;
1674
1675 /*
1676 * We have a bundle boundary. Everything from start to
1677 * (i - 1) belongs in one bundle.
1678 */
1679 assert(i - start <= naggvars);
1680 bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *);
1681
1682 if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) {
1683 (void) pthread_mutex_unlock(&dt_qsort_lock);
1684 goto out;
1685 }
1686
1687 for (j = start; j < i; j++) {
1688 dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]);
1689
1690 assert(id <= max);
1691 assert(map[id] != 0);
1692 assert(map[id] - 1 < naggvars);
1693 assert(nbundle[map[id] - 1] == NULL);
1694 nbundle[map[id] - 1] = sorted[j];
1695
1696 if (nbundle[naggvars] == NULL)
1697 nbundle[naggvars] = sorted[j];
1698 }
1699
1700 for (j = 0; j < naggvars; j++) {
1701 if (nbundle[j] != NULL)
1702 continue;
1703
1704 /*
1705 * Before we assume that this aggregation variable
1706 * isn't present (and fall back to using the
1707 * zero-filled data allocated earlier), check the
1708 * remap. If we have a remapping, we'll drop it in
1709 * here. Note that we might be remapping an
1710 * aggregation variable that isn't present for this
1711 * key; in this case, the aggregation data that we
1712 * copy will point to the zeroed data.
1713 */
1714 if (remap != NULL && remap[j]) {
1715 assert(remap[j] - 1 < j);
1716 assert(nbundle[remap[j] - 1] != NULL);
1717 nbundle[j] = nbundle[remap[j] - 1];
1718 } else {
1719 nbundle[j] = &zaggdata[j];
1720 }
1721 }
1722
1723 bundle[nbundles++] = nbundle;
1724 start = i;
1725 }
1726
1727 /*
1728 * Now we need to re-sort based on the first value.
1729 */
1730 dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **),
1731 dt_aggregate_bundlecmp);
1732
1733 (void) pthread_mutex_unlock(&dt_qsort_lock);
1734
1735 /*
1736 * We're done! Now we just need to go back over the sorted bundles,
1737 * calling the function.
1738 */
1739 data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *));
1740
1741 for (i = 0; i < nbundles; i++) {
1742 for (j = 0; j < naggvars; j++)
1743 data[j + 1] = NULL;
1744
1745 for (j = 0; j < naggvars; j++) {
1746 int ndx = j - sortpos;
1747
1748 if (ndx < 0)
1749 ndx += naggvars;
1750
1751 assert(bundle[i][ndx] != NULL);
1752 data[j + 1] = &bundle[i][ndx]->dtahe_data;
1753 }
1754
1755 for (j = 0; j < naggvars; j++)
1756 assert(data[j + 1] != NULL);
1757
1758 /*
1759 * The representative key is the last element in the bundle.
1760 * Assert that we have one, and then set it to be the first
1761 * element of data.
1762 */
1763 assert(bundle[i][j] != NULL);
1764 data[0] = &bundle[i][j]->dtahe_data;
1765
1766 if ((rval = func(data, naggvars + 1, arg)) == -1)
1767 goto out;
1768 }
1769
1770 rval = 0;
1771 out:
1772 for (i = 0; i < nbundles; i++)
1773 dt_free(dtp, bundle[i]);
1774
1775 if (zaggdata != NULL) {
1776 for (i = 0; i < naggvars; i++)
1777 dt_free(dtp, zaggdata[i].dtahe_data.dtada_data);
1778 }
1779
1780 dt_free(dtp, zaggdata);
1781 dt_free(dtp, sorted);
1782 dt_free(dtp, remap);
1783 dt_free(dtp, map);
1784
1785 return (rval);
1786 }
1787
1788 int
dtrace_aggregate_print(dtrace_hdl_t * dtp,FILE * fp,dtrace_aggregate_walk_f * func)1789 dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp,
1790 dtrace_aggregate_walk_f *func)
1791 {
1792 dt_print_aggdata_t pd;
1793
1794 pd.dtpa_dtp = dtp;
1795 pd.dtpa_fp = fp;
1796 pd.dtpa_allunprint = 1;
1797
1798 if (func == NULL)
1799 func = dtrace_aggregate_walk_sorted;
1800
1801 if ((*func)(dtp, dt_print_agg, &pd) == -1)
1802 return (dt_set_errno(dtp, dtp->dt_errno));
1803
1804 return (0);
1805 }
1806
1807 void
dtrace_aggregate_clear(dtrace_hdl_t * dtp)1808 dtrace_aggregate_clear(dtrace_hdl_t *dtp)
1809 {
1810 dt_aggregate_t *agp = &dtp->dt_aggregate;
1811 dt_ahash_t *hash = &agp->dtat_hash;
1812 dt_ahashent_t *h;
1813 dtrace_aggdata_t *data;
1814 dtrace_aggdesc_t *aggdesc;
1815 dtrace_recdesc_t *rec;
1816 int i, max_cpus = agp->dtat_maxcpu;
1817
1818 for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1819 aggdesc = h->dtahe_data.dtada_desc;
1820 rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1821 data = &h->dtahe_data;
1822
1823 bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size);
1824
1825 if (data->dtada_percpu == NULL)
1826 continue;
1827
1828 for (i = 0; i < max_cpus; i++)
1829 bzero(data->dtada_percpu[i], rec->dtrd_size);
1830 }
1831 }
1832
1833 void
dt_aggregate_destroy(dtrace_hdl_t * dtp)1834 dt_aggregate_destroy(dtrace_hdl_t *dtp)
1835 {
1836 dt_aggregate_t *agp = &dtp->dt_aggregate;
1837 dt_ahash_t *hash = &agp->dtat_hash;
1838 dt_ahashent_t *h, *next;
1839 dtrace_aggdata_t *aggdata;
1840 int i, max_cpus = agp->dtat_maxcpu;
1841
1842 if (hash->dtah_hash == NULL) {
1843 assert(hash->dtah_all == NULL);
1844 } else {
1845 free(hash->dtah_hash);
1846
1847 for (h = hash->dtah_all; h != NULL; h = next) {
1848 next = h->dtahe_nextall;
1849
1850 aggdata = &h->dtahe_data;
1851
1852 if (aggdata->dtada_percpu != NULL) {
1853 for (i = 0; i < max_cpus; i++)
1854 free(aggdata->dtada_percpu[i]);
1855 free(aggdata->dtada_percpu);
1856 }
1857
1858 free(aggdata->dtada_data);
1859 free(h);
1860 }
1861
1862 hash->dtah_hash = NULL;
1863 hash->dtah_all = NULL;
1864 hash->dtah_size = 0;
1865 }
1866
1867 free(agp->dtat_buf.dtbd_data);
1868 free(agp->dtat_cpus);
1869 }
1870