xref: /netbsd-src/external/cddl/osnet/dist/lib/libdtrace/common/dt_aggregate.c (revision d909946ca08dceb44d7d0f22ec9488679695d976)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2008 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*
28  * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29  * Copyright (c) 2012 by Delphix. All rights reserved.
30  */
31 
32 #include <stdlib.h>
33 #include <strings.h>
34 #include <errno.h>
35 #include <unistd.h>
36 #include <dt_impl.h>
37 #include <assert.h>
38 #ifdef illumos
39 #include <alloca.h>
40 #else
41 #include <sys/sysctl.h>
42 #if defined(__FreeBSD__) || defined(__NetBSD__)
43 #include <libproc_compat.h>
44 #endif
45 #endif
46 #include <limits.h>
47 
48 #define	DTRACE_AHASHSIZE	32779		/* big 'ol prime */
49 
50 /*
51  * Because qsort(3C) does not allow an argument to be passed to a comparison
52  * function, the variables that affect comparison must regrettably be global;
53  * they are protected by a global static lock, dt_qsort_lock.
54  */
55 static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER;
56 
57 static int dt_revsort;
58 static int dt_keysort;
59 static int dt_keypos;
60 
61 #define	DT_LESSTHAN	(dt_revsort == 0 ? -1 : 1)
62 #define	DT_GREATERTHAN	(dt_revsort == 0 ? 1 : -1)
63 
64 static void
65 dt_aggregate_count(int64_t *existing, int64_t *new, size_t size)
66 {
67 	uint_t i;
68 
69 	for (i = 0; i < size / sizeof (int64_t); i++)
70 		existing[i] = existing[i] + new[i];
71 }
72 
73 static int
74 dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs)
75 {
76 	int64_t lvar = *lhs;
77 	int64_t rvar = *rhs;
78 
79 	if (lvar < rvar)
80 		return (DT_LESSTHAN);
81 
82 	if (lvar > rvar)
83 		return (DT_GREATERTHAN);
84 
85 	return (0);
86 }
87 
88 /*ARGSUSED*/
89 static void
90 dt_aggregate_min(int64_t *existing, int64_t *new, size_t size)
91 {
92 	if (*new < *existing)
93 		*existing = *new;
94 }
95 
96 /*ARGSUSED*/
97 static void
98 dt_aggregate_max(int64_t *existing, int64_t *new, size_t size)
99 {
100 	if (*new > *existing)
101 		*existing = *new;
102 }
103 
104 static int
105 dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs)
106 {
107 	int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0;
108 	int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0;
109 
110 	if (lavg < ravg)
111 		return (DT_LESSTHAN);
112 
113 	if (lavg > ravg)
114 		return (DT_GREATERTHAN);
115 
116 	return (0);
117 }
118 
119 static int
120 dt_aggregate_stddevcmp(int64_t *lhs, int64_t *rhs)
121 {
122 	uint64_t lsd = dt_stddev((uint64_t *)lhs, 1);
123 	uint64_t rsd = dt_stddev((uint64_t *)rhs, 1);
124 
125 	if (lsd < rsd)
126 		return (DT_LESSTHAN);
127 
128 	if (lsd > rsd)
129 		return (DT_GREATERTHAN);
130 
131 	return (0);
132 }
133 
134 /*ARGSUSED*/
135 static void
136 dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size)
137 {
138 	int64_t arg = *existing++;
139 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg);
140 	int i;
141 
142 	for (i = 0; i <= levels + 1; i++)
143 		existing[i] = existing[i] + new[i + 1];
144 }
145 
146 static long double
147 dt_aggregate_lquantizedsum(int64_t *lquanta)
148 {
149 	int64_t arg = *lquanta++;
150 	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
151 	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
152 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
153 	long double total = (long double)lquanta[0] * (long double)(base - 1);
154 
155 	for (i = 0; i < levels; base += step, i++)
156 		total += (long double)lquanta[i + 1] * (long double)base;
157 
158 	return (total + (long double)lquanta[levels + 1] *
159 	    (long double)(base + 1));
160 }
161 
162 static int64_t
163 dt_aggregate_lquantizedzero(int64_t *lquanta)
164 {
165 	int64_t arg = *lquanta++;
166 	int32_t base = DTRACE_LQUANTIZE_BASE(arg);
167 	uint16_t step = DTRACE_LQUANTIZE_STEP(arg);
168 	uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i;
169 
170 	if (base - 1 == 0)
171 		return (lquanta[0]);
172 
173 	for (i = 0; i < levels; base += step, i++) {
174 		if (base != 0)
175 			continue;
176 
177 		return (lquanta[i + 1]);
178 	}
179 
180 	if (base + 1 == 0)
181 		return (lquanta[levels + 1]);
182 
183 	return (0);
184 }
185 
186 static int
187 dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs)
188 {
189 	long double lsum = dt_aggregate_lquantizedsum(lhs);
190 	long double rsum = dt_aggregate_lquantizedsum(rhs);
191 	int64_t lzero, rzero;
192 
193 	if (lsum < rsum)
194 		return (DT_LESSTHAN);
195 
196 	if (lsum > rsum)
197 		return (DT_GREATERTHAN);
198 
199 	/*
200 	 * If they're both equal, then we will compare based on the weights at
201 	 * zero.  If the weights at zero are equal (or if zero is not within
202 	 * the range of the linear quantization), then this will be judged a
203 	 * tie and will be resolved based on the key comparison.
204 	 */
205 	lzero = dt_aggregate_lquantizedzero(lhs);
206 	rzero = dt_aggregate_lquantizedzero(rhs);
207 
208 	if (lzero < rzero)
209 		return (DT_LESSTHAN);
210 
211 	if (lzero > rzero)
212 		return (DT_GREATERTHAN);
213 
214 	return (0);
215 }
216 
217 static void
218 dt_aggregate_llquantize(int64_t *existing, int64_t *new, size_t size)
219 {
220 	int i;
221 
222 	for (i = 1; i < size / sizeof (int64_t); i++)
223 		existing[i] = existing[i] + new[i];
224 }
225 
226 static long double
227 dt_aggregate_llquantizedsum(int64_t *llquanta)
228 {
229 	int64_t arg = *llquanta++;
230 	uint16_t factor = DTRACE_LLQUANTIZE_FACTOR(arg);
231 	uint16_t low = DTRACE_LLQUANTIZE_LOW(arg);
232 	uint16_t high = DTRACE_LLQUANTIZE_HIGH(arg);
233 	uint16_t nsteps = DTRACE_LLQUANTIZE_NSTEP(arg);
234 	int bin = 0, order;
235 	int64_t value = 1, next, step;
236 	long double total;
237 
238 	assert(nsteps >= factor);
239 	assert(nsteps % factor == 0);
240 
241 	for (order = 0; order < low; order++)
242 		value *= factor;
243 
244 	total = (long double)llquanta[bin++] * (long double)(value - 1);
245 
246 	next = value * factor;
247 	step = next > nsteps ? next / nsteps : 1;
248 
249 	while (order <= high) {
250 		assert(value < next);
251 		total += (long double)llquanta[bin++] * (long double)(value);
252 
253 		if ((value += step) != next)
254 			continue;
255 
256 		next = value * factor;
257 		step = next > nsteps ? next / nsteps : 1;
258 		order++;
259 	}
260 
261 	return (total + (long double)llquanta[bin] * (long double)value);
262 }
263 
264 static int
265 dt_aggregate_llquantizedcmp(int64_t *lhs, int64_t *rhs)
266 {
267 	long double lsum = dt_aggregate_llquantizedsum(lhs);
268 	long double rsum = dt_aggregate_llquantizedsum(rhs);
269 	int64_t lzero, rzero;
270 
271 	if (lsum < rsum)
272 		return (DT_LESSTHAN);
273 
274 	if (lsum > rsum)
275 		return (DT_GREATERTHAN);
276 
277 	/*
278 	 * If they're both equal, then we will compare based on the weights at
279 	 * zero.  If the weights at zero are equal, then this will be judged a
280 	 * tie and will be resolved based on the key comparison.
281 	 */
282 	lzero = lhs[1];
283 	rzero = rhs[1];
284 
285 	if (lzero < rzero)
286 		return (DT_LESSTHAN);
287 
288 	if (lzero > rzero)
289 		return (DT_GREATERTHAN);
290 
291 	return (0);
292 }
293 
294 static int
295 dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs)
296 {
297 	int nbuckets = DTRACE_QUANTIZE_NBUCKETS;
298 	long double ltotal = 0, rtotal = 0;
299 	int64_t lzero = 0, rzero = 0;
300 	uint_t i;
301 
302 	for (i = 0; i < nbuckets; i++) {
303 		int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i);
304 
305 		if (bucketval == 0) {
306 			lzero = lhs[i];
307 			rzero = rhs[i];
308 		}
309 
310 		ltotal += (long double)bucketval * (long double)lhs[i];
311 		rtotal += (long double)bucketval * (long double)rhs[i];
312 	}
313 
314 	if (ltotal < rtotal)
315 		return (DT_LESSTHAN);
316 
317 	if (ltotal > rtotal)
318 		return (DT_GREATERTHAN);
319 
320 	/*
321 	 * If they're both equal, then we will compare based on the weights at
322 	 * zero.  If the weights at zero are equal, then this will be judged a
323 	 * tie and will be resolved based on the key comparison.
324 	 */
325 	if (lzero < rzero)
326 		return (DT_LESSTHAN);
327 
328 	if (lzero > rzero)
329 		return (DT_GREATERTHAN);
330 
331 	return (0);
332 }
333 
334 static void
335 dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data)
336 {
337 	uint64_t pid = data[0];
338 	uint64_t *pc = &data[1];
339 	struct ps_prochandle *P;
340 	GElf_Sym sym;
341 
342 	if (dtp->dt_vector != NULL)
343 		return;
344 
345 	if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
346 		return;
347 
348 	dt_proc_lock(dtp, P);
349 
350 	if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0)
351 		*pc = sym.st_value;
352 
353 	dt_proc_unlock(dtp, P);
354 	dt_proc_release(dtp, P);
355 }
356 
357 static void
358 dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data)
359 {
360 	uint64_t pid = data[0];
361 	uint64_t *pc = &data[1];
362 	struct ps_prochandle *P;
363 	const prmap_t *map;
364 
365 	if (dtp->dt_vector != NULL)
366 		return;
367 
368 	if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL)
369 		return;
370 
371 	dt_proc_lock(dtp, P);
372 
373 	if ((map = Paddr_to_map(P, *pc)) != NULL)
374 		*pc = map->pr_vaddr;
375 
376 	dt_proc_unlock(dtp, P);
377 	dt_proc_release(dtp, P);
378 }
379 
380 static void
381 dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data)
382 {
383 	GElf_Sym sym;
384 	uint64_t *pc = data;
385 
386 	if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0)
387 		*pc = sym.st_value;
388 }
389 
390 static void
391 dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data)
392 {
393 	uint64_t *pc = data;
394 	dt_module_t *dmp;
395 
396 	if (dtp->dt_vector != NULL) {
397 		/*
398 		 * We don't have a way of just getting the module for a
399 		 * vectored open, and it doesn't seem to be worth defining
400 		 * one.  This means that use of mod() won't get true
401 		 * aggregation in the postmortem case (some modules may
402 		 * appear more than once in aggregation output).  It seems
403 		 * unlikely that anyone will ever notice or care...
404 		 */
405 		return;
406 	}
407 
408 	for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL;
409 	    dmp = dt_list_next(dmp)) {
410 		if (*pc - dmp->dm_text_va < dmp->dm_text_size) {
411 			*pc = dmp->dm_text_va;
412 			return;
413 		}
414 	}
415 }
416 
417 static dtrace_aggvarid_t
418 dt_aggregate_aggvarid(dt_ahashent_t *ent)
419 {
420 	dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc;
421 	caddr_t data = ent->dtahe_data.dtada_data;
422 	dtrace_recdesc_t *rec = agg->dtagd_rec;
423 
424 	/*
425 	 * First, we'll check the variable ID in the aggdesc.  If it's valid,
426 	 * we'll return it.  If not, we'll use the compiler-generated ID
427 	 * present as the first record.
428 	 */
429 	if (agg->dtagd_varid != DTRACE_AGGVARIDNONE)
430 		return (agg->dtagd_varid);
431 
432 	agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data +
433 	    rec->dtrd_offset));
434 
435 	return (agg->dtagd_varid);
436 }
437 
438 
439 static int
440 dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu)
441 {
442 	dtrace_epid_t id;
443 	uint64_t hashval;
444 	size_t offs, roffs, size, ndx;
445 	int i, j, rval;
446 	caddr_t addr, data;
447 	dtrace_recdesc_t *rec;
448 	dt_aggregate_t *agp = &dtp->dt_aggregate;
449 	dtrace_aggdesc_t *agg;
450 	dt_ahash_t *hash = &agp->dtat_hash;
451 	dt_ahashent_t *h;
452 	dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b;
453 	dtrace_aggdata_t *aggdata;
454 	int flags = agp->dtat_flags;
455 
456 	buf->dtbd_cpu = cpu;
457 
458 #ifdef illumos
459 	if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) {
460 #else
461 	if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, &buf) == -1) {
462 #endif
463 		if (errno == ENOENT) {
464 			/*
465 			 * If that failed with ENOENT, it may be because the
466 			 * CPU was unconfigured.  This is okay; we'll just
467 			 * do nothing but return success.
468 			 */
469 			return (0);
470 		}
471 
472 		return (dt_set_errno(dtp, errno));
473 	}
474 
475 	if (buf->dtbd_drops != 0) {
476 		if (dt_handle_cpudrop(dtp, cpu,
477 		    DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1)
478 			return (-1);
479 	}
480 
481 	if (buf->dtbd_size == 0)
482 		return (0);
483 
484 	if (hash->dtah_hash == NULL) {
485 		size_t size;
486 
487 		hash->dtah_size = DTRACE_AHASHSIZE;
488 		size = hash->dtah_size * sizeof (dt_ahashent_t *);
489 
490 		if ((hash->dtah_hash = malloc(size)) == NULL)
491 			return (dt_set_errno(dtp, EDT_NOMEM));
492 
493 		bzero(hash->dtah_hash, size);
494 	}
495 
496 	for (offs = 0; offs < buf->dtbd_size; ) {
497 		/*
498 		 * We're guaranteed to have an ID.
499 		 */
500 		id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data +
501 		    (uintptr_t)offs));
502 
503 		if (id == DTRACE_AGGIDNONE) {
504 			/*
505 			 * This is filler to assure proper alignment of the
506 			 * next record; we simply ignore it.
507 			 */
508 			offs += sizeof (id);
509 			continue;
510 		}
511 
512 		if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0)
513 			return (rval);
514 
515 		addr = buf->dtbd_data + offs;
516 		size = agg->dtagd_size;
517 		hashval = 0;
518 
519 		for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
520 			rec = &agg->dtagd_rec[j];
521 			roffs = rec->dtrd_offset;
522 
523 			switch (rec->dtrd_action) {
524 			case DTRACEACT_USYM:
525 				dt_aggregate_usym(dtp,
526 				    /* LINTED - alignment */
527 				    (uint64_t *)&addr[roffs]);
528 				break;
529 
530 			case DTRACEACT_UMOD:
531 				dt_aggregate_umod(dtp,
532 				    /* LINTED - alignment */
533 				    (uint64_t *)&addr[roffs]);
534 				break;
535 
536 			case DTRACEACT_SYM:
537 				/* LINTED - alignment */
538 				dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]);
539 				break;
540 
541 			case DTRACEACT_MOD:
542 				/* LINTED - alignment */
543 				dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]);
544 				break;
545 
546 			default:
547 				break;
548 			}
549 
550 			for (i = 0; i < rec->dtrd_size; i++)
551 				hashval += addr[roffs + i];
552 		}
553 
554 		ndx = hashval % hash->dtah_size;
555 
556 		for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) {
557 			if (h->dtahe_hashval != hashval)
558 				continue;
559 
560 			if (h->dtahe_size != size)
561 				continue;
562 
563 			aggdata = &h->dtahe_data;
564 			data = aggdata->dtada_data;
565 
566 			for (j = 0; j < agg->dtagd_nrecs - 1; j++) {
567 				rec = &agg->dtagd_rec[j];
568 				roffs = rec->dtrd_offset;
569 
570 				for (i = 0; i < rec->dtrd_size; i++)
571 					if (addr[roffs + i] != data[roffs + i])
572 						goto hashnext;
573 			}
574 
575 			/*
576 			 * We found it.  Now we need to apply the aggregating
577 			 * action on the data here.
578 			 */
579 			rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
580 			roffs = rec->dtrd_offset;
581 			/* LINTED - alignment */
582 			h->dtahe_aggregate((int64_t *)&data[roffs],
583 			    /* LINTED - alignment */
584 			    (int64_t *)&addr[roffs], rec->dtrd_size);
585 
586 			/*
587 			 * If we're keeping per CPU data, apply the aggregating
588 			 * action there as well.
589 			 */
590 			if (aggdata->dtada_percpu != NULL) {
591 				data = aggdata->dtada_percpu[cpu];
592 
593 				/* LINTED - alignment */
594 				h->dtahe_aggregate((int64_t *)data,
595 				    /* LINTED - alignment */
596 				    (int64_t *)&addr[roffs], rec->dtrd_size);
597 			}
598 
599 			goto bufnext;
600 hashnext:
601 			continue;
602 		}
603 
604 		/*
605 		 * If we're here, we couldn't find an entry for this record.
606 		 */
607 		if ((h = malloc(sizeof (dt_ahashent_t))) == NULL)
608 			return (dt_set_errno(dtp, EDT_NOMEM));
609 		bzero(h, sizeof (dt_ahashent_t));
610 		aggdata = &h->dtahe_data;
611 
612 		if ((aggdata->dtada_data = malloc(size)) == NULL) {
613 			free(h);
614 			return (dt_set_errno(dtp, EDT_NOMEM));
615 		}
616 
617 		bcopy(addr, aggdata->dtada_data, size);
618 		aggdata->dtada_size = size;
619 		aggdata->dtada_desc = agg;
620 		aggdata->dtada_handle = dtp;
621 		(void) dt_epid_lookup(dtp, agg->dtagd_epid,
622 		    &aggdata->dtada_edesc, &aggdata->dtada_pdesc);
623 		aggdata->dtada_normal = 1;
624 
625 		h->dtahe_hashval = hashval;
626 		h->dtahe_size = size;
627 		(void) dt_aggregate_aggvarid(h);
628 
629 		rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
630 
631 		if (flags & DTRACE_A_PERCPU) {
632 			int max_cpus = agp->dtat_maxcpu;
633 			caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t));
634 
635 			if (percpu == NULL) {
636 				free(aggdata->dtada_data);
637 				free(h);
638 				return (dt_set_errno(dtp, EDT_NOMEM));
639 			}
640 
641 			for (j = 0; j < max_cpus; j++) {
642 				percpu[j] = malloc(rec->dtrd_size);
643 
644 				if (percpu[j] == NULL) {
645 					while (--j >= 0)
646 						free(percpu[j]);
647 
648 					free(aggdata->dtada_data);
649 					free(h);
650 					return (dt_set_errno(dtp, EDT_NOMEM));
651 				}
652 
653 				if (j == cpu) {
654 					bcopy(&addr[rec->dtrd_offset],
655 					    percpu[j], rec->dtrd_size);
656 				} else {
657 					bzero(percpu[j], rec->dtrd_size);
658 				}
659 			}
660 
661 			aggdata->dtada_percpu = percpu;
662 		}
663 
664 		switch (rec->dtrd_action) {
665 		case DTRACEAGG_MIN:
666 			h->dtahe_aggregate = dt_aggregate_min;
667 			break;
668 
669 		case DTRACEAGG_MAX:
670 			h->dtahe_aggregate = dt_aggregate_max;
671 			break;
672 
673 		case DTRACEAGG_LQUANTIZE:
674 			h->dtahe_aggregate = dt_aggregate_lquantize;
675 			break;
676 
677 		case DTRACEAGG_LLQUANTIZE:
678 			h->dtahe_aggregate = dt_aggregate_llquantize;
679 			break;
680 
681 		case DTRACEAGG_COUNT:
682 		case DTRACEAGG_SUM:
683 		case DTRACEAGG_AVG:
684 		case DTRACEAGG_STDDEV:
685 		case DTRACEAGG_QUANTIZE:
686 			h->dtahe_aggregate = dt_aggregate_count;
687 			break;
688 
689 		default:
690 			return (dt_set_errno(dtp, EDT_BADAGG));
691 		}
692 
693 		if (hash->dtah_hash[ndx] != NULL)
694 			hash->dtah_hash[ndx]->dtahe_prev = h;
695 
696 		h->dtahe_next = hash->dtah_hash[ndx];
697 		hash->dtah_hash[ndx] = h;
698 
699 		if (hash->dtah_all != NULL)
700 			hash->dtah_all->dtahe_prevall = h;
701 
702 		h->dtahe_nextall = hash->dtah_all;
703 		hash->dtah_all = h;
704 bufnext:
705 		offs += agg->dtagd_size;
706 	}
707 
708 	return (0);
709 }
710 
711 int
712 dtrace_aggregate_snap(dtrace_hdl_t *dtp)
713 {
714 	int i, rval;
715 	dt_aggregate_t *agp = &dtp->dt_aggregate;
716 	hrtime_t now = gethrtime();
717 	dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE];
718 
719 	if (dtp->dt_lastagg != 0) {
720 		if (now - dtp->dt_lastagg < interval)
721 			return (0);
722 
723 		dtp->dt_lastagg += interval;
724 	} else {
725 		dtp->dt_lastagg = now;
726 	}
727 
728 	if (!dtp->dt_active)
729 		return (dt_set_errno(dtp, EINVAL));
730 
731 	if (agp->dtat_buf.dtbd_size == 0)
732 		return (0);
733 
734 	for (i = 0; i < agp->dtat_ncpus; i++) {
735 		if ((rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i])))
736 			return (rval);
737 	}
738 
739 	return (0);
740 }
741 
742 static int
743 dt_aggregate_hashcmp(const void *lhs, const void *rhs)
744 {
745 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
746 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
747 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
748 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
749 
750 	if (lagg->dtagd_nrecs < ragg->dtagd_nrecs)
751 		return (DT_LESSTHAN);
752 
753 	if (lagg->dtagd_nrecs > ragg->dtagd_nrecs)
754 		return (DT_GREATERTHAN);
755 
756 	return (0);
757 }
758 
759 static int
760 dt_aggregate_varcmp(const void *lhs, const void *rhs)
761 {
762 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
763 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
764 	dtrace_aggvarid_t lid, rid;
765 
766 	lid = dt_aggregate_aggvarid(lh);
767 	rid = dt_aggregate_aggvarid(rh);
768 
769 	if (lid < rid)
770 		return (DT_LESSTHAN);
771 
772 	if (lid > rid)
773 		return (DT_GREATERTHAN);
774 
775 	return (0);
776 }
777 
778 static int
779 dt_aggregate_keycmp(const void *lhs, const void *rhs)
780 {
781 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
782 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
783 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
784 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
785 	dtrace_recdesc_t *lrec, *rrec;
786 	char *ldata, *rdata;
787 	int rval, i, j, keypos, nrecs;
788 
789 	if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0)
790 		return (rval);
791 
792 	nrecs = lagg->dtagd_nrecs - 1;
793 	assert(nrecs == ragg->dtagd_nrecs - 1);
794 
795 	keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos;
796 
797 	for (i = 1; i < nrecs; i++) {
798 		uint64_t lval, rval;
799 		int ndx = i + keypos;
800 
801 		if (ndx >= nrecs)
802 			ndx = ndx - nrecs + 1;
803 
804 		lrec = &lagg->dtagd_rec[ndx];
805 		rrec = &ragg->dtagd_rec[ndx];
806 
807 		ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset;
808 		rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset;
809 
810 		if (lrec->dtrd_size < rrec->dtrd_size)
811 			return (DT_LESSTHAN);
812 
813 		if (lrec->dtrd_size > rrec->dtrd_size)
814 			return (DT_GREATERTHAN);
815 
816 		switch (lrec->dtrd_size) {
817 		case sizeof (uint64_t):
818 			/* LINTED - alignment */
819 			lval = *((uint64_t *)ldata);
820 			/* LINTED - alignment */
821 			rval = *((uint64_t *)rdata);
822 			break;
823 
824 		case sizeof (uint32_t):
825 			/* LINTED - alignment */
826 			lval = *((uint32_t *)ldata);
827 			/* LINTED - alignment */
828 			rval = *((uint32_t *)rdata);
829 			break;
830 
831 		case sizeof (uint16_t):
832 			/* LINTED - alignment */
833 			lval = *((uint16_t *)ldata);
834 			/* LINTED - alignment */
835 			rval = *((uint16_t *)rdata);
836 			break;
837 
838 		case sizeof (uint8_t):
839 			lval = *((uint8_t *)ldata);
840 			rval = *((uint8_t *)rdata);
841 			break;
842 
843 		default:
844 			switch (lrec->dtrd_action) {
845 			case DTRACEACT_UMOD:
846 			case DTRACEACT_UADDR:
847 			case DTRACEACT_USYM:
848 				for (j = 0; j < 2; j++) {
849 					/* LINTED - alignment */
850 					lval = ((uint64_t *)ldata)[j];
851 					/* LINTED - alignment */
852 					rval = ((uint64_t *)rdata)[j];
853 
854 					if (lval < rval)
855 						return (DT_LESSTHAN);
856 
857 					if (lval > rval)
858 						return (DT_GREATERTHAN);
859 				}
860 
861 				break;
862 
863 			default:
864 				for (j = 0; j < lrec->dtrd_size; j++) {
865 					lval = ((uint8_t *)ldata)[j];
866 					rval = ((uint8_t *)rdata)[j];
867 
868 					if (lval < rval)
869 						return (DT_LESSTHAN);
870 
871 					if (lval > rval)
872 						return (DT_GREATERTHAN);
873 				}
874 			}
875 
876 			continue;
877 		}
878 
879 		if (lval < rval)
880 			return (DT_LESSTHAN);
881 
882 		if (lval > rval)
883 			return (DT_GREATERTHAN);
884 	}
885 
886 	return (0);
887 }
888 
889 static int
890 dt_aggregate_valcmp(const void *lhs, const void *rhs)
891 {
892 	dt_ahashent_t *lh = *((dt_ahashent_t **)lhs);
893 	dt_ahashent_t *rh = *((dt_ahashent_t **)rhs);
894 	dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc;
895 	dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc;
896 	caddr_t ldata = lh->dtahe_data.dtada_data;
897 	caddr_t rdata = rh->dtahe_data.dtada_data;
898 	dtrace_recdesc_t *lrec, *rrec;
899 	int64_t *laddr, *raddr;
900 	int rval;
901 
902 	assert(lagg->dtagd_nrecs == ragg->dtagd_nrecs);
903 
904 	lrec = &lagg->dtagd_rec[lagg->dtagd_nrecs - 1];
905 	rrec = &ragg->dtagd_rec[ragg->dtagd_nrecs - 1];
906 
907 	assert(lrec->dtrd_action == rrec->dtrd_action);
908 
909 	laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset);
910 	raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset);
911 
912 	switch (lrec->dtrd_action) {
913 	case DTRACEAGG_AVG:
914 		rval = dt_aggregate_averagecmp(laddr, raddr);
915 		break;
916 
917 	case DTRACEAGG_STDDEV:
918 		rval = dt_aggregate_stddevcmp(laddr, raddr);
919 		break;
920 
921 	case DTRACEAGG_QUANTIZE:
922 		rval = dt_aggregate_quantizedcmp(laddr, raddr);
923 		break;
924 
925 	case DTRACEAGG_LQUANTIZE:
926 		rval = dt_aggregate_lquantizedcmp(laddr, raddr);
927 		break;
928 
929 	case DTRACEAGG_LLQUANTIZE:
930 		rval = dt_aggregate_llquantizedcmp(laddr, raddr);
931 		break;
932 
933 	case DTRACEAGG_COUNT:
934 	case DTRACEAGG_SUM:
935 	case DTRACEAGG_MIN:
936 	case DTRACEAGG_MAX:
937 		rval = dt_aggregate_countcmp(laddr, raddr);
938 		break;
939 
940 	default:
941 		assert(0);
942 	}
943 
944 	return (rval);
945 }
946 
947 static int
948 dt_aggregate_valkeycmp(const void *lhs, const void *rhs)
949 {
950 	int rval;
951 
952 	if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0)
953 		return (rval);
954 
955 	/*
956 	 * If we're here, the values for the two aggregation elements are
957 	 * equal.  We already know that the key layout is the same for the two
958 	 * elements; we must now compare the keys themselves as a tie-breaker.
959 	 */
960 	return (dt_aggregate_keycmp(lhs, rhs));
961 }
962 
963 static int
964 dt_aggregate_keyvarcmp(const void *lhs, const void *rhs)
965 {
966 	int rval;
967 
968 	if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0)
969 		return (rval);
970 
971 	return (dt_aggregate_varcmp(lhs, rhs));
972 }
973 
974 static int
975 dt_aggregate_varkeycmp(const void *lhs, const void *rhs)
976 {
977 	int rval;
978 
979 	if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
980 		return (rval);
981 
982 	return (dt_aggregate_keycmp(lhs, rhs));
983 }
984 
985 static int
986 dt_aggregate_valvarcmp(const void *lhs, const void *rhs)
987 {
988 	int rval;
989 
990 	if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0)
991 		return (rval);
992 
993 	return (dt_aggregate_varcmp(lhs, rhs));
994 }
995 
996 static int
997 dt_aggregate_varvalcmp(const void *lhs, const void *rhs)
998 {
999 	int rval;
1000 
1001 	if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0)
1002 		return (rval);
1003 
1004 	return (dt_aggregate_valkeycmp(lhs, rhs));
1005 }
1006 
1007 static int
1008 dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs)
1009 {
1010 	return (dt_aggregate_keyvarcmp(rhs, lhs));
1011 }
1012 
1013 static int
1014 dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs)
1015 {
1016 	return (dt_aggregate_varkeycmp(rhs, lhs));
1017 }
1018 
1019 static int
1020 dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs)
1021 {
1022 	return (dt_aggregate_valvarcmp(rhs, lhs));
1023 }
1024 
1025 static int
1026 dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs)
1027 {
1028 	return (dt_aggregate_varvalcmp(rhs, lhs));
1029 }
1030 
1031 static int
1032 dt_aggregate_bundlecmp(const void *lhs, const void *rhs)
1033 {
1034 	dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs);
1035 	dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs);
1036 	int i, rval;
1037 
1038 	if (dt_keysort) {
1039 		/*
1040 		 * If we're sorting on keys, we need to scan until we find the
1041 		 * last entry -- that's the representative key.  (The order of
1042 		 * the bundle is values followed by key to accommodate the
1043 		 * default behavior of sorting by value.)  If the keys are
1044 		 * equal, we'll fall into the value comparison loop, below.
1045 		 */
1046 		for (i = 0; lh[i + 1] != NULL; i++)
1047 			continue;
1048 
1049 		assert(i != 0);
1050 		assert(rh[i + 1] == NULL);
1051 
1052 		if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0)
1053 			return (rval);
1054 	}
1055 
1056 	for (i = 0; ; i++) {
1057 		if (lh[i + 1] == NULL) {
1058 			/*
1059 			 * All of the values are equal; if we're sorting on
1060 			 * keys, then we're only here because the keys were
1061 			 * found to be equal and these records are therefore
1062 			 * equal.  If we're not sorting on keys, we'll use the
1063 			 * key comparison from the representative key as the
1064 			 * tie-breaker.
1065 			 */
1066 			if (dt_keysort)
1067 				return (0);
1068 
1069 			assert(i != 0);
1070 			assert(rh[i + 1] == NULL);
1071 			return (dt_aggregate_keycmp(&lh[i], &rh[i]));
1072 		} else {
1073 			if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0)
1074 				return (rval);
1075 		}
1076 	}
1077 }
1078 
1079 int
1080 dt_aggregate_go(dtrace_hdl_t *dtp)
1081 {
1082 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1083 	dtrace_optval_t size, cpu;
1084 	dtrace_bufdesc_t *buf = &agp->dtat_buf;
1085 	int rval, i;
1086 
1087 	assert(agp->dtat_maxcpu == 0);
1088 	assert(agp->dtat_ncpu == 0);
1089 	assert(agp->dtat_cpus == NULL);
1090 
1091 	agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1;
1092 	agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX);
1093 	agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t));
1094 
1095 	if (agp->dtat_cpus == NULL)
1096 		return (dt_set_errno(dtp, EDT_NOMEM));
1097 
1098 	/*
1099 	 * Use the aggregation buffer size as reloaded from the kernel.
1100 	 */
1101 	size = dtp->dt_options[DTRACEOPT_AGGSIZE];
1102 
1103 	rval = dtrace_getopt(dtp, "aggsize", &size);
1104 	assert(rval == 0);
1105 
1106 	if (size == 0 || size == DTRACEOPT_UNSET)
1107 		return (0);
1108 
1109 	buf = &agp->dtat_buf;
1110 	buf->dtbd_size = size;
1111 
1112 	if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL)
1113 		return (dt_set_errno(dtp, EDT_NOMEM));
1114 
1115 	/*
1116 	 * Now query for the CPUs enabled.
1117 	 */
1118 	rval = dtrace_getopt(dtp, "cpu", &cpu);
1119 	assert(rval == 0 && cpu != DTRACEOPT_UNSET);
1120 
1121 	if (cpu != DTRACE_CPUALL) {
1122 		assert(cpu < agp->dtat_ncpu);
1123 		agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu;
1124 
1125 		return (0);
1126 	}
1127 
1128 	agp->dtat_ncpus = 0;
1129 	for (i = 0; i < agp->dtat_maxcpu; i++) {
1130 		if (dt_status(dtp, i) == -1)
1131 			continue;
1132 
1133 		agp->dtat_cpus[agp->dtat_ncpus++] = i;
1134 	}
1135 
1136 	return (0);
1137 }
1138 
1139 static int
1140 dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval)
1141 {
1142 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1143 	dtrace_aggdata_t *data;
1144 	dtrace_aggdesc_t *aggdesc;
1145 	dtrace_recdesc_t *rec;
1146 	int i;
1147 
1148 	switch (rval) {
1149 	case DTRACE_AGGWALK_NEXT:
1150 		break;
1151 
1152 	case DTRACE_AGGWALK_CLEAR: {
1153 		uint32_t size, offs = 0;
1154 
1155 		aggdesc = h->dtahe_data.dtada_desc;
1156 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1157 		size = rec->dtrd_size;
1158 		data = &h->dtahe_data;
1159 
1160 		if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1161 			offs = sizeof (uint64_t);
1162 			size -= sizeof (uint64_t);
1163 		}
1164 
1165 		bzero(&data->dtada_data[rec->dtrd_offset] + offs, size);
1166 
1167 		if (data->dtada_percpu == NULL)
1168 			break;
1169 
1170 		for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++)
1171 			bzero(data->dtada_percpu[i] + offs, size);
1172 		break;
1173 	}
1174 
1175 	case DTRACE_AGGWALK_ERROR:
1176 		/*
1177 		 * We assume that errno is already set in this case.
1178 		 */
1179 		return (dt_set_errno(dtp, errno));
1180 
1181 	case DTRACE_AGGWALK_ABORT:
1182 		return (dt_set_errno(dtp, EDT_DIRABORT));
1183 
1184 	case DTRACE_AGGWALK_DENORMALIZE:
1185 		h->dtahe_data.dtada_normal = 1;
1186 		return (0);
1187 
1188 	case DTRACE_AGGWALK_NORMALIZE:
1189 		if (h->dtahe_data.dtada_normal == 0) {
1190 			h->dtahe_data.dtada_normal = 1;
1191 			return (dt_set_errno(dtp, EDT_BADRVAL));
1192 		}
1193 
1194 		return (0);
1195 
1196 	case DTRACE_AGGWALK_REMOVE: {
1197 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1198 		int max_cpus = agp->dtat_maxcpu;
1199 
1200 		/*
1201 		 * First, remove this hash entry from its hash chain.
1202 		 */
1203 		if (h->dtahe_prev != NULL) {
1204 			h->dtahe_prev->dtahe_next = h->dtahe_next;
1205 		} else {
1206 			dt_ahash_t *hash = &agp->dtat_hash;
1207 			size_t ndx = h->dtahe_hashval % hash->dtah_size;
1208 
1209 			assert(hash->dtah_hash[ndx] == h);
1210 			hash->dtah_hash[ndx] = h->dtahe_next;
1211 		}
1212 
1213 		if (h->dtahe_next != NULL)
1214 			h->dtahe_next->dtahe_prev = h->dtahe_prev;
1215 
1216 		/*
1217 		 * Now remove it from the list of all hash entries.
1218 		 */
1219 		if (h->dtahe_prevall != NULL) {
1220 			h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall;
1221 		} else {
1222 			dt_ahash_t *hash = &agp->dtat_hash;
1223 
1224 			assert(hash->dtah_all == h);
1225 			hash->dtah_all = h->dtahe_nextall;
1226 		}
1227 
1228 		if (h->dtahe_nextall != NULL)
1229 			h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall;
1230 
1231 		/*
1232 		 * We're unlinked.  We can safely destroy the data.
1233 		 */
1234 		if (aggdata->dtada_percpu != NULL) {
1235 			for (i = 0; i < max_cpus; i++)
1236 				free(aggdata->dtada_percpu[i]);
1237 			free(aggdata->dtada_percpu);
1238 		}
1239 
1240 		free(aggdata->dtada_data);
1241 		free(h);
1242 
1243 		return (0);
1244 	}
1245 
1246 	default:
1247 		return (dt_set_errno(dtp, EDT_BADRVAL));
1248 	}
1249 
1250 	return (0);
1251 }
1252 
1253 static void
1254 dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width,
1255     int (*compar)(const void *, const void *))
1256 {
1257 	int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos;
1258 	dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS];
1259 
1260 	dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET);
1261 	dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET);
1262 
1263 	if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) {
1264 		dt_keypos = (int)keyposopt;
1265 	} else {
1266 		dt_keypos = 0;
1267 	}
1268 
1269 	if (compar == NULL) {
1270 		if (!dt_keysort) {
1271 			compar = dt_aggregate_varvalcmp;
1272 		} else {
1273 			compar = dt_aggregate_varkeycmp;
1274 		}
1275 	}
1276 
1277 	qsort(base, nel, width, compar);
1278 
1279 	dt_revsort = rev;
1280 	dt_keysort = key;
1281 	dt_keypos = keypos;
1282 }
1283 
1284 int
1285 dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg)
1286 {
1287 	dt_ahashent_t *h, *next;
1288 	dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash;
1289 
1290 	for (h = hash->dtah_all; h != NULL; h = next) {
1291 		/*
1292 		 * dt_aggwalk_rval() can potentially remove the current hash
1293 		 * entry; we need to load the next hash entry before calling
1294 		 * into it.
1295 		 */
1296 		next = h->dtahe_nextall;
1297 
1298 		if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1299 			return (-1);
1300 	}
1301 
1302 	return (0);
1303 }
1304 
1305 static int
1306 dt_aggregate_total(dtrace_hdl_t *dtp, boolean_t clear)
1307 {
1308 	dt_ahashent_t *h;
1309 	dtrace_aggdata_t **total;
1310 	dtrace_aggid_t max = DTRACE_AGGVARIDNONE, id;
1311 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1312 	dt_ahash_t *hash = &agp->dtat_hash;
1313 	uint32_t tflags;
1314 
1315 	tflags = DTRACE_A_TOTAL | DTRACE_A_HASNEGATIVES | DTRACE_A_HASPOSITIVES;
1316 
1317 	/*
1318 	 * If we need to deliver per-aggregation totals, we're going to take
1319 	 * three passes over the aggregate:  one to clear everything out and
1320 	 * determine our maximum aggregation ID, one to actually total
1321 	 * everything up, and a final pass to assign the totals to the
1322 	 * individual elements.
1323 	 */
1324 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1325 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1326 
1327 		if ((id = dt_aggregate_aggvarid(h)) > max)
1328 			max = id;
1329 
1330 		aggdata->dtada_total = 0;
1331 		aggdata->dtada_flags &= ~tflags;
1332 	}
1333 
1334 	if (clear || max == DTRACE_AGGVARIDNONE)
1335 		return (0);
1336 
1337 	total = dt_zalloc(dtp, (max + 1) * sizeof (dtrace_aggdata_t *));
1338 
1339 	if (total == NULL)
1340 		return (-1);
1341 
1342 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1343 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1344 		dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1345 		dtrace_recdesc_t *rec;
1346 		caddr_t data;
1347 		int64_t val, *addr;
1348 
1349 		rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
1350 		data = aggdata->dtada_data;
1351 		addr = (int64_t *)(uintptr_t)(data + rec->dtrd_offset);
1352 
1353 		switch (rec->dtrd_action) {
1354 		case DTRACEAGG_STDDEV:
1355 			val = dt_stddev((uint64_t *)addr, 1);
1356 			break;
1357 
1358 		case DTRACEAGG_SUM:
1359 		case DTRACEAGG_COUNT:
1360 			val = *addr;
1361 			break;
1362 
1363 		case DTRACEAGG_AVG:
1364 			val = addr[0] ? (addr[1] / addr[0]) : 0;
1365 			break;
1366 
1367 		default:
1368 			continue;
1369 		}
1370 
1371 		if (total[agg->dtagd_varid] == NULL) {
1372 			total[agg->dtagd_varid] = aggdata;
1373 			aggdata->dtada_flags |= DTRACE_A_TOTAL;
1374 		} else {
1375 			aggdata = total[agg->dtagd_varid];
1376 		}
1377 
1378 		if (val > 0)
1379 			aggdata->dtada_flags |= DTRACE_A_HASPOSITIVES;
1380 
1381 		if (val < 0) {
1382 			aggdata->dtada_flags |= DTRACE_A_HASNEGATIVES;
1383 			val = -val;
1384 		}
1385 
1386 		if (dtp->dt_options[DTRACEOPT_AGGZOOM] != DTRACEOPT_UNSET) {
1387 			val = (int64_t)((long double)val *
1388 			    (1 / DTRACE_AGGZOOM_MAX));
1389 
1390 			if (val > aggdata->dtada_total)
1391 				aggdata->dtada_total = val;
1392 		} else {
1393 			aggdata->dtada_total += val;
1394 		}
1395 	}
1396 
1397 	/*
1398 	 * And now one final pass to set everyone's total.
1399 	 */
1400 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1401 		dtrace_aggdata_t *aggdata = &h->dtahe_data, *t;
1402 		dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1403 
1404 		if ((t = total[agg->dtagd_varid]) == NULL || aggdata == t)
1405 			continue;
1406 
1407 		aggdata->dtada_total = t->dtada_total;
1408 		aggdata->dtada_flags |= (t->dtada_flags & tflags);
1409 	}
1410 
1411 	dt_free(dtp, total);
1412 
1413 	return (0);
1414 }
1415 
1416 static int
1417 dt_aggregate_minmaxbin(dtrace_hdl_t *dtp, boolean_t clear)
1418 {
1419 	dt_ahashent_t *h;
1420 	dtrace_aggdata_t **minmax;
1421 	dtrace_aggid_t max = DTRACE_AGGVARIDNONE, id;
1422 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1423 	dt_ahash_t *hash = &agp->dtat_hash;
1424 
1425 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1426 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1427 
1428 		if ((id = dt_aggregate_aggvarid(h)) > max)
1429 			max = id;
1430 
1431 		aggdata->dtada_minbin = 0;
1432 		aggdata->dtada_maxbin = 0;
1433 		aggdata->dtada_flags &= ~DTRACE_A_MINMAXBIN;
1434 	}
1435 
1436 	if (clear || max == DTRACE_AGGVARIDNONE)
1437 		return (0);
1438 
1439 	minmax = dt_zalloc(dtp, (max + 1) * sizeof (dtrace_aggdata_t *));
1440 
1441 	if (minmax == NULL)
1442 		return (-1);
1443 
1444 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1445 		dtrace_aggdata_t *aggdata = &h->dtahe_data;
1446 		dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1447 		dtrace_recdesc_t *rec;
1448 		caddr_t data;
1449 		int64_t *addr;
1450 		int minbin = -1, maxbin = -1, i;
1451 		int start = 0, size;
1452 
1453 		rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1];
1454 		size = rec->dtrd_size / sizeof (int64_t);
1455 		data = aggdata->dtada_data;
1456 		addr = (int64_t *)(uintptr_t)(data + rec->dtrd_offset);
1457 
1458 		switch (rec->dtrd_action) {
1459 		case DTRACEAGG_LQUANTIZE:
1460 			/*
1461 			 * For lquantize(), we always display the entire range
1462 			 * of the aggregation when aggpack is set.
1463 			 */
1464 			start = 1;
1465 			minbin = start;
1466 			maxbin = size - 1 - start;
1467 			break;
1468 
1469 		case DTRACEAGG_QUANTIZE:
1470 			for (i = start; i < size; i++) {
1471 				if (!addr[i])
1472 					continue;
1473 
1474 				if (minbin == -1)
1475 					minbin = i - start;
1476 
1477 				maxbin = i - start;
1478 			}
1479 
1480 			if (minbin == -1) {
1481 				/*
1482 				 * If we have no data (e.g., due to a clear()
1483 				 * or negative increments), we'll use the
1484 				 * zero bucket as both our min and max.
1485 				 */
1486 				minbin = maxbin = DTRACE_QUANTIZE_ZEROBUCKET;
1487 			}
1488 
1489 			break;
1490 
1491 		default:
1492 			continue;
1493 		}
1494 
1495 		if (minmax[agg->dtagd_varid] == NULL) {
1496 			minmax[agg->dtagd_varid] = aggdata;
1497 			aggdata->dtada_flags |= DTRACE_A_MINMAXBIN;
1498 			aggdata->dtada_minbin = minbin;
1499 			aggdata->dtada_maxbin = maxbin;
1500 			continue;
1501 		}
1502 
1503 		if (minbin < minmax[agg->dtagd_varid]->dtada_minbin)
1504 			minmax[agg->dtagd_varid]->dtada_minbin = minbin;
1505 
1506 		if (maxbin > minmax[agg->dtagd_varid]->dtada_maxbin)
1507 			minmax[agg->dtagd_varid]->dtada_maxbin = maxbin;
1508 	}
1509 
1510 	/*
1511 	 * And now one final pass to set everyone's minbin and maxbin.
1512 	 */
1513 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1514 		dtrace_aggdata_t *aggdata = &h->dtahe_data, *mm;
1515 		dtrace_aggdesc_t *agg = aggdata->dtada_desc;
1516 
1517 		if ((mm = minmax[agg->dtagd_varid]) == NULL || aggdata == mm)
1518 			continue;
1519 
1520 		aggdata->dtada_minbin = mm->dtada_minbin;
1521 		aggdata->dtada_maxbin = mm->dtada_maxbin;
1522 		aggdata->dtada_flags |= DTRACE_A_MINMAXBIN;
1523 	}
1524 
1525 	dt_free(dtp, minmax);
1526 
1527 	return (0);
1528 }
1529 
1530 static int
1531 dt_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1532     dtrace_aggregate_f *func, void *arg,
1533     int (*sfunc)(const void *, const void *))
1534 {
1535 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1536 	dt_ahashent_t *h, **sorted;
1537 	dt_ahash_t *hash = &agp->dtat_hash;
1538 	size_t i, nentries = 0;
1539 	int rval = -1;
1540 
1541 	agp->dtat_flags &= ~(DTRACE_A_TOTAL | DTRACE_A_MINMAXBIN);
1542 
1543 	if (dtp->dt_options[DTRACEOPT_AGGHIST] != DTRACEOPT_UNSET) {
1544 		agp->dtat_flags |= DTRACE_A_TOTAL;
1545 
1546 		if (dt_aggregate_total(dtp, B_FALSE) != 0)
1547 			return (-1);
1548 	}
1549 
1550 	if (dtp->dt_options[DTRACEOPT_AGGPACK] != DTRACEOPT_UNSET) {
1551 		agp->dtat_flags |= DTRACE_A_MINMAXBIN;
1552 
1553 		if (dt_aggregate_minmaxbin(dtp, B_FALSE) != 0)
1554 			return (-1);
1555 	}
1556 
1557 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall)
1558 		nentries++;
1559 
1560 	sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1561 
1562 	if (sorted == NULL)
1563 		goto out;
1564 
1565 	for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall)
1566 		sorted[i++] = h;
1567 
1568 	(void) pthread_mutex_lock(&dt_qsort_lock);
1569 
1570 	if (sfunc == NULL) {
1571 		dt_aggregate_qsort(dtp, sorted, nentries,
1572 		    sizeof (dt_ahashent_t *), NULL);
1573 	} else {
1574 		/*
1575 		 * If we've been explicitly passed a sorting function,
1576 		 * we'll use that -- ignoring the values of the "aggsortrev",
1577 		 * "aggsortkey" and "aggsortkeypos" options.
1578 		 */
1579 		qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc);
1580 	}
1581 
1582 	(void) pthread_mutex_unlock(&dt_qsort_lock);
1583 
1584 	for (i = 0; i < nentries; i++) {
1585 		h = sorted[i];
1586 
1587 		if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1)
1588 			goto out;
1589 	}
1590 
1591 	rval = 0;
1592 out:
1593 	if (agp->dtat_flags & DTRACE_A_TOTAL)
1594 		(void) dt_aggregate_total(dtp, B_TRUE);
1595 
1596 	if (agp->dtat_flags & DTRACE_A_MINMAXBIN)
1597 		(void) dt_aggregate_minmaxbin(dtp, B_TRUE);
1598 
1599 	dt_free(dtp, sorted);
1600 	return (rval);
1601 }
1602 
1603 int
1604 dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp,
1605     dtrace_aggregate_f *func, void *arg)
1606 {
1607 	return (dt_aggregate_walk_sorted(dtp, func, arg, NULL));
1608 }
1609 
1610 int
1611 dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp,
1612     dtrace_aggregate_f *func, void *arg)
1613 {
1614 	return (dt_aggregate_walk_sorted(dtp, func,
1615 	    arg, dt_aggregate_varkeycmp));
1616 }
1617 
1618 int
1619 dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp,
1620     dtrace_aggregate_f *func, void *arg)
1621 {
1622 	return (dt_aggregate_walk_sorted(dtp, func,
1623 	    arg, dt_aggregate_varvalcmp));
1624 }
1625 
1626 int
1627 dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp,
1628     dtrace_aggregate_f *func, void *arg)
1629 {
1630 	return (dt_aggregate_walk_sorted(dtp, func,
1631 	    arg, dt_aggregate_keyvarcmp));
1632 }
1633 
1634 int
1635 dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp,
1636     dtrace_aggregate_f *func, void *arg)
1637 {
1638 	return (dt_aggregate_walk_sorted(dtp, func,
1639 	    arg, dt_aggregate_valvarcmp));
1640 }
1641 
1642 int
1643 dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp,
1644     dtrace_aggregate_f *func, void *arg)
1645 {
1646 	return (dt_aggregate_walk_sorted(dtp, func,
1647 	    arg, dt_aggregate_varkeyrevcmp));
1648 }
1649 
1650 int
1651 dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp,
1652     dtrace_aggregate_f *func, void *arg)
1653 {
1654 	return (dt_aggregate_walk_sorted(dtp, func,
1655 	    arg, dt_aggregate_varvalrevcmp));
1656 }
1657 
1658 int
1659 dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp,
1660     dtrace_aggregate_f *func, void *arg)
1661 {
1662 	return (dt_aggregate_walk_sorted(dtp, func,
1663 	    arg, dt_aggregate_keyvarrevcmp));
1664 }
1665 
1666 int
1667 dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp,
1668     dtrace_aggregate_f *func, void *arg)
1669 {
1670 	return (dt_aggregate_walk_sorted(dtp, func,
1671 	    arg, dt_aggregate_valvarrevcmp));
1672 }
1673 
1674 int
1675 dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars,
1676     int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg)
1677 {
1678 	dt_aggregate_t *agp = &dtp->dt_aggregate;
1679 	dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle;
1680 	const dtrace_aggdata_t **data;
1681 	dt_ahashent_t *zaggdata = NULL;
1682 	dt_ahash_t *hash = &agp->dtat_hash;
1683 	size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize;
1684 	dtrace_aggvarid_t max = 0, aggvar;
1685 	int rval = -1, *map, *remap = NULL;
1686 	int i, j;
1687 	dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS];
1688 
1689 	/*
1690 	 * If the sorting position is greater than the number of aggregation
1691 	 * variable IDs, we silently set it to 0.
1692 	 */
1693 	if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars)
1694 		sortpos = 0;
1695 
1696 	/*
1697 	 * First we need to translate the specified aggregation variable IDs
1698 	 * into a linear map that will allow us to translate an aggregation
1699 	 * variable ID into its position in the specified aggvars.
1700 	 */
1701 	for (i = 0; i < naggvars; i++) {
1702 		if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0)
1703 			return (dt_set_errno(dtp, EDT_BADAGGVAR));
1704 
1705 		if (aggvars[i] > max)
1706 			max = aggvars[i];
1707 	}
1708 
1709 	if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL)
1710 		return (-1);
1711 
1712 	zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t));
1713 
1714 	if (zaggdata == NULL)
1715 		goto out;
1716 
1717 	for (i = 0; i < naggvars; i++) {
1718 		int ndx = i + sortpos;
1719 
1720 		if (ndx >= naggvars)
1721 			ndx -= naggvars;
1722 
1723 		aggvar = aggvars[ndx];
1724 		assert(aggvar <= max);
1725 
1726 		if (map[aggvar]) {
1727 			/*
1728 			 * We have an aggregation variable that is present
1729 			 * more than once in the array of aggregation
1730 			 * variables.  While it's unclear why one might want
1731 			 * to do this, it's legal.  To support this construct,
1732 			 * we will allocate a remap that will indicate the
1733 			 * position from which this aggregation variable
1734 			 * should be pulled.  (That is, where the remap will
1735 			 * map from one position to another.)
1736 			 */
1737 			if (remap == NULL) {
1738 				remap = dt_zalloc(dtp, naggvars * sizeof (int));
1739 
1740 				if (remap == NULL)
1741 					goto out;
1742 			}
1743 
1744 			/*
1745 			 * Given that the variable is already present, assert
1746 			 * that following through the mapping and adjusting
1747 			 * for the sort position yields the same aggregation
1748 			 * variable ID.
1749 			 */
1750 			assert(aggvars[(map[aggvar] - 1 + sortpos) %
1751 			    naggvars] == aggvars[ndx]);
1752 
1753 			remap[i] = map[aggvar];
1754 			continue;
1755 		}
1756 
1757 		map[aggvar] = i + 1;
1758 	}
1759 
1760 	/*
1761 	 * We need to take two passes over the data to size our allocation, so
1762 	 * we'll use the first pass to also fill in the zero-filled data to be
1763 	 * used to properly format a zero-valued aggregation.
1764 	 */
1765 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
1766 		dtrace_aggvarid_t id;
1767 		int ndx;
1768 
1769 		if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id]))
1770 			continue;
1771 
1772 		if (zaggdata[ndx - 1].dtahe_size == 0) {
1773 			zaggdata[ndx - 1].dtahe_size = h->dtahe_size;
1774 			zaggdata[ndx - 1].dtahe_data = h->dtahe_data;
1775 		}
1776 
1777 		nentries++;
1778 	}
1779 
1780 	if (nentries == 0) {
1781 		/*
1782 		 * We couldn't find any entries; there is nothing else to do.
1783 		 */
1784 		rval = 0;
1785 		goto out;
1786 	}
1787 
1788 	/*
1789 	 * Before we sort the data, we're going to look for any holes in our
1790 	 * zero-filled data.  This will occur if an aggregation variable that
1791 	 * we are being asked to print has not yet been assigned the result of
1792 	 * any aggregating action for _any_ tuple.  The issue becomes that we
1793 	 * would like a zero value to be printed for all columns for this
1794 	 * aggregation, but without any record description, we don't know the
1795 	 * aggregating action that corresponds to the aggregation variable.  To
1796 	 * try to find a match, we're simply going to lookup aggregation IDs
1797 	 * (which are guaranteed to be contiguous and to start from 1), looking
1798 	 * for the specified aggregation variable ID.  If we find a match,
1799 	 * we'll use that.  If we iterate over all aggregation IDs and don't
1800 	 * find a match, then we must be an anonymous enabling.  (Anonymous
1801 	 * enablings can't currently derive either aggregation variable IDs or
1802 	 * aggregation variable names given only an aggregation ID.)  In this
1803 	 * obscure case (anonymous enabling, multiple aggregation printa() with
1804 	 * some aggregations not represented for any tuple), our defined
1805 	 * behavior is that the zero will be printed in the format of the first
1806 	 * aggregation variable that contains any non-zero value.
1807 	 */
1808 	for (i = 0; i < naggvars; i++) {
1809 		if (zaggdata[i].dtahe_size == 0) {
1810 			dtrace_aggvarid_t aggvar;
1811 
1812 			aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1813 			assert(zaggdata[i].dtahe_data.dtada_data == NULL);
1814 
1815 			for (j = DTRACE_AGGIDNONE + 1; ; j++) {
1816 				dtrace_aggdesc_t *agg;
1817 				dtrace_aggdata_t *aggdata;
1818 
1819 				if (dt_aggid_lookup(dtp, j, &agg) != 0)
1820 					break;
1821 
1822 				if (agg->dtagd_varid != aggvar)
1823 					continue;
1824 
1825 				/*
1826 				 * We have our description -- now we need to
1827 				 * cons up the zaggdata entry for it.
1828 				 */
1829 				aggdata = &zaggdata[i].dtahe_data;
1830 				aggdata->dtada_size = agg->dtagd_size;
1831 				aggdata->dtada_desc = agg;
1832 				aggdata->dtada_handle = dtp;
1833 				(void) dt_epid_lookup(dtp, agg->dtagd_epid,
1834 				    &aggdata->dtada_edesc,
1835 				    &aggdata->dtada_pdesc);
1836 				aggdata->dtada_normal = 1;
1837 				zaggdata[i].dtahe_hashval = 0;
1838 				zaggdata[i].dtahe_size = agg->dtagd_size;
1839 				break;
1840 			}
1841 
1842 			if (zaggdata[i].dtahe_size == 0) {
1843 				caddr_t data;
1844 
1845 				/*
1846 				 * We couldn't find this aggregation, meaning
1847 				 * that we have never seen it before for any
1848 				 * tuple _and_ this is an anonymous enabling.
1849 				 * That is, we're in the obscure case outlined
1850 				 * above.  In this case, our defined behavior
1851 				 * is to format the data in the format of the
1852 				 * first non-zero aggregation -- of which, of
1853 				 * course, we know there to be at least one
1854 				 * (or nentries would have been zero).
1855 				 */
1856 				for (j = 0; j < naggvars; j++) {
1857 					if (zaggdata[j].dtahe_size != 0)
1858 						break;
1859 				}
1860 
1861 				assert(j < naggvars);
1862 				zaggdata[i] = zaggdata[j];
1863 
1864 				data = zaggdata[i].dtahe_data.dtada_data;
1865 				assert(data != NULL);
1866 			}
1867 		}
1868 	}
1869 
1870 	/*
1871 	 * Now we need to allocate our zero-filled data for use for
1872 	 * aggregations that don't have a value corresponding to a given key.
1873 	 */
1874 	for (i = 0; i < naggvars; i++) {
1875 		dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data;
1876 		dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc;
1877 		dtrace_recdesc_t *rec;
1878 		uint64_t larg;
1879 		caddr_t zdata;
1880 
1881 		zsize = zaggdata[i].dtahe_size;
1882 		assert(zsize != 0);
1883 
1884 		if ((zdata = dt_zalloc(dtp, zsize)) == NULL) {
1885 			/*
1886 			 * If we failed to allocated some zero-filled data, we
1887 			 * need to zero out the remaining dtada_data pointers
1888 			 * to prevent the wrong data from being freed below.
1889 			 */
1890 			for (j = i; j < naggvars; j++)
1891 				zaggdata[j].dtahe_data.dtada_data = NULL;
1892 			goto out;
1893 		}
1894 
1895 		aggvar = aggvars[(i - sortpos + naggvars) % naggvars];
1896 
1897 		/*
1898 		 * First, the easy bit.  To maintain compatibility with
1899 		 * consumers that pull the compiler-generated ID out of the
1900 		 * data, we put that ID at the top of the zero-filled data.
1901 		 */
1902 		rec = &aggdesc->dtagd_rec[0];
1903 		/* LINTED - alignment */
1904 		*((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar;
1905 
1906 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
1907 
1908 		/*
1909 		 * Now for the more complicated part.  If (and only if) this
1910 		 * is an lquantize() aggregating action, zero-filled data is
1911 		 * not equivalent to an empty record:  we must also get the
1912 		 * parameters for the lquantize().
1913 		 */
1914 		if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) {
1915 			if (aggdata->dtada_data != NULL) {
1916 				/*
1917 				 * The easier case here is if we actually have
1918 				 * some prototype data -- in which case we
1919 				 * manually dig it out of the aggregation
1920 				 * record.
1921 				 */
1922 				/* LINTED - alignment */
1923 				larg = *((uint64_t *)(aggdata->dtada_data +
1924 				    rec->dtrd_offset));
1925 			} else {
1926 				/*
1927 				 * We don't have any prototype data.  As a
1928 				 * result, we know that we _do_ have the
1929 				 * compiler-generated information.  (If this
1930 				 * were an anonymous enabling, all of our
1931 				 * zero-filled data would have prototype data
1932 				 * -- either directly or indirectly.) So as
1933 				 * gross as it is, we'll grovel around in the
1934 				 * compiler-generated information to find the
1935 				 * lquantize() parameters.
1936 				 */
1937 				dtrace_stmtdesc_t *sdp;
1938 				dt_ident_t *aid;
1939 				dt_idsig_t *isp;
1940 
1941 				sdp = (dtrace_stmtdesc_t *)(uintptr_t)
1942 				    aggdesc->dtagd_rec[0].dtrd_uarg;
1943 				aid = sdp->dtsd_aggdata;
1944 				isp = (dt_idsig_t *)aid->di_data;
1945 				assert(isp->dis_auxinfo != 0);
1946 				larg = isp->dis_auxinfo;
1947 			}
1948 
1949 			/* LINTED - alignment */
1950 			*((uint64_t *)(zdata + rec->dtrd_offset)) = larg;
1951 		}
1952 
1953 		aggdata->dtada_data = zdata;
1954 	}
1955 
1956 	/*
1957 	 * Now that we've dealt with setting up our zero-filled data, we can
1958 	 * allocate our sorted array, and take another pass over the data to
1959 	 * fill it.
1960 	 */
1961 	sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *));
1962 
1963 	if (sorted == NULL)
1964 		goto out;
1965 
1966 	for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) {
1967 		dtrace_aggvarid_t id;
1968 
1969 		if ((id = dt_aggregate_aggvarid(h)) > max || !map[id])
1970 			continue;
1971 
1972 		sorted[i++] = h;
1973 	}
1974 
1975 	assert(i == nentries);
1976 
1977 	/*
1978 	 * We've loaded our array; now we need to sort by value to allow us
1979 	 * to create bundles of like value.  We're going to acquire the
1980 	 * dt_qsort_lock here, and hold it across all of our subsequent
1981 	 * comparison and sorting.
1982 	 */
1983 	(void) pthread_mutex_lock(&dt_qsort_lock);
1984 
1985 	qsort(sorted, nentries, sizeof (dt_ahashent_t *),
1986 	    dt_aggregate_keyvarcmp);
1987 
1988 	/*
1989 	 * Now we need to go through and create bundles.  Because the number
1990 	 * of bundles is bounded by the size of the sorted array, we're going
1991 	 * to reuse the underlying storage.  And note that "bundle" is an
1992 	 * array of pointers to arrays of pointers to dt_ahashent_t -- making
1993 	 * its type (regrettably) "dt_ahashent_t ***".  (Regrettable because
1994 	 * '*' -- like '_' and 'X' -- should never appear in triplicate in
1995 	 * an ideal world.)
1996 	 */
1997 	bundle = (dt_ahashent_t ***)sorted;
1998 
1999 	for (i = 1, start = 0; i <= nentries; i++) {
2000 		if (i < nentries &&
2001 		    dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0)
2002 			continue;
2003 
2004 		/*
2005 		 * We have a bundle boundary.  Everything from start to
2006 		 * (i - 1) belongs in one bundle.
2007 		 */
2008 		assert(i - start <= naggvars);
2009 		bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *);
2010 
2011 		if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) {
2012 			(void) pthread_mutex_unlock(&dt_qsort_lock);
2013 			goto out;
2014 		}
2015 
2016 		for (j = start; j < i; j++) {
2017 			dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]);
2018 
2019 			assert(id <= max);
2020 			assert(map[id] != 0);
2021 			assert(map[id] - 1 < naggvars);
2022 			assert(nbundle[map[id] - 1] == NULL);
2023 			nbundle[map[id] - 1] = sorted[j];
2024 
2025 			if (nbundle[naggvars] == NULL)
2026 				nbundle[naggvars] = sorted[j];
2027 		}
2028 
2029 		for (j = 0; j < naggvars; j++) {
2030 			if (nbundle[j] != NULL)
2031 				continue;
2032 
2033 			/*
2034 			 * Before we assume that this aggregation variable
2035 			 * isn't present (and fall back to using the
2036 			 * zero-filled data allocated earlier), check the
2037 			 * remap.  If we have a remapping, we'll drop it in
2038 			 * here.  Note that we might be remapping an
2039 			 * aggregation variable that isn't present for this
2040 			 * key; in this case, the aggregation data that we
2041 			 * copy will point to the zeroed data.
2042 			 */
2043 			if (remap != NULL && remap[j]) {
2044 				assert(remap[j] - 1 < j);
2045 				assert(nbundle[remap[j] - 1] != NULL);
2046 				nbundle[j] = nbundle[remap[j] - 1];
2047 			} else {
2048 				nbundle[j] = &zaggdata[j];
2049 			}
2050 		}
2051 
2052 		bundle[nbundles++] = nbundle;
2053 		start = i;
2054 	}
2055 
2056 	/*
2057 	 * Now we need to re-sort based on the first value.
2058 	 */
2059 	dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **),
2060 	    dt_aggregate_bundlecmp);
2061 
2062 	(void) pthread_mutex_unlock(&dt_qsort_lock);
2063 
2064 	/*
2065 	 * We're done!  Now we just need to go back over the sorted bundles,
2066 	 * calling the function.
2067 	 */
2068 	data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *));
2069 
2070 	for (i = 0; i < nbundles; i++) {
2071 		for (j = 0; j < naggvars; j++)
2072 			data[j + 1] = NULL;
2073 
2074 		for (j = 0; j < naggvars; j++) {
2075 			int ndx = j - sortpos;
2076 
2077 			if (ndx < 0)
2078 				ndx += naggvars;
2079 
2080 			assert(bundle[i][ndx] != NULL);
2081 			data[j + 1] = &bundle[i][ndx]->dtahe_data;
2082 		}
2083 
2084 		for (j = 0; j < naggvars; j++)
2085 			assert(data[j + 1] != NULL);
2086 
2087 		/*
2088 		 * The representative key is the last element in the bundle.
2089 		 * Assert that we have one, and then set it to be the first
2090 		 * element of data.
2091 		 */
2092 		assert(bundle[i][j] != NULL);
2093 		data[0] = &bundle[i][j]->dtahe_data;
2094 
2095 		if ((rval = func(data, naggvars + 1, arg)) == -1)
2096 			goto out;
2097 	}
2098 
2099 	rval = 0;
2100 out:
2101 	for (i = 0; i < nbundles; i++)
2102 		dt_free(dtp, bundle[i]);
2103 
2104 	if (zaggdata != NULL) {
2105 		for (i = 0; i < naggvars; i++)
2106 			dt_free(dtp, zaggdata[i].dtahe_data.dtada_data);
2107 	}
2108 
2109 	dt_free(dtp, zaggdata);
2110 	dt_free(dtp, sorted);
2111 	dt_free(dtp, remap);
2112 	dt_free(dtp, map);
2113 
2114 	return (rval);
2115 }
2116 
2117 int
2118 dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp,
2119     dtrace_aggregate_walk_f *func)
2120 {
2121 	dt_print_aggdata_t pd;
2122 
2123 	bzero(&pd, sizeof (pd));
2124 
2125 	pd.dtpa_dtp = dtp;
2126 	pd.dtpa_fp = fp;
2127 	pd.dtpa_allunprint = 1;
2128 
2129 	if (func == NULL)
2130 		func = dtrace_aggregate_walk_sorted;
2131 
2132 	if ((*func)(dtp, dt_print_agg, &pd) == -1)
2133 		return (dt_set_errno(dtp, dtp->dt_errno));
2134 
2135 	return (0);
2136 }
2137 
2138 void
2139 dtrace_aggregate_clear(dtrace_hdl_t *dtp)
2140 {
2141 	dt_aggregate_t *agp = &dtp->dt_aggregate;
2142 	dt_ahash_t *hash = &agp->dtat_hash;
2143 	dt_ahashent_t *h;
2144 	dtrace_aggdata_t *data;
2145 	dtrace_aggdesc_t *aggdesc;
2146 	dtrace_recdesc_t *rec;
2147 	int i, max_cpus = agp->dtat_maxcpu;
2148 
2149 	for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) {
2150 		aggdesc = h->dtahe_data.dtada_desc;
2151 		rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1];
2152 		data = &h->dtahe_data;
2153 
2154 		bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size);
2155 
2156 		if (data->dtada_percpu == NULL)
2157 			continue;
2158 
2159 		for (i = 0; i < max_cpus; i++)
2160 			bzero(data->dtada_percpu[i], rec->dtrd_size);
2161 	}
2162 }
2163 
2164 void
2165 dt_aggregate_destroy(dtrace_hdl_t *dtp)
2166 {
2167 	dt_aggregate_t *agp = &dtp->dt_aggregate;
2168 	dt_ahash_t *hash = &agp->dtat_hash;
2169 	dt_ahashent_t *h, *next;
2170 	dtrace_aggdata_t *aggdata;
2171 	int i, max_cpus = agp->dtat_maxcpu;
2172 
2173 	if (hash->dtah_hash == NULL) {
2174 		assert(hash->dtah_all == NULL);
2175 	} else {
2176 		free(hash->dtah_hash);
2177 
2178 		for (h = hash->dtah_all; h != NULL; h = next) {
2179 			next = h->dtahe_nextall;
2180 
2181 			aggdata = &h->dtahe_data;
2182 
2183 			if (aggdata->dtada_percpu != NULL) {
2184 				for (i = 0; i < max_cpus; i++)
2185 					free(aggdata->dtada_percpu[i]);
2186 				free(aggdata->dtada_percpu);
2187 			}
2188 
2189 			free(aggdata->dtada_data);
2190 			free(h);
2191 		}
2192 
2193 		hash->dtah_hash = NULL;
2194 		hash->dtah_all = NULL;
2195 		hash->dtah_size = 0;
2196 	}
2197 
2198 	free(agp->dtat_buf.dtbd_data);
2199 	free(agp->dtat_cpus);
2200 }
2201