10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*2777Stomee * Common Development and Distribution License (the "License"). 6*2777Stomee * You may not use this file except in compliance with the License. 70Sstevel@tonic-gate * 80Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 90Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 100Sstevel@tonic-gate * See the License for the specific language governing permissions 110Sstevel@tonic-gate * and limitations under the License. 120Sstevel@tonic-gate * 130Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 140Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 150Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 160Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 170Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 180Sstevel@tonic-gate * 190Sstevel@tonic-gate * CDDL HEADER END 200Sstevel@tonic-gate */ 21*2777Stomee 220Sstevel@tonic-gate /* 23*2777Stomee * Copyright 2006 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <stdlib.h> 300Sstevel@tonic-gate #include <strings.h> 310Sstevel@tonic-gate #include <errno.h> 320Sstevel@tonic-gate #include <unistd.h> 330Sstevel@tonic-gate #include <dt_impl.h> 340Sstevel@tonic-gate #include <assert.h> 351017Sbmc #include <alloca.h> 361017Sbmc #include <limits.h> 370Sstevel@tonic-gate 380Sstevel@tonic-gate #define DTRACE_AHASHSIZE 32779 /* big 'ol prime */ 390Sstevel@tonic-gate 401017Sbmc /* 411017Sbmc * Because qsort(3C) does not allow an argument to be passed to a comparison 421017Sbmc * function, the variables that affect comparison must regrettably be global; 431017Sbmc * they are protected by a global static lock, dt_qsort_lock. 441017Sbmc */ 451017Sbmc static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER; 461017Sbmc 471017Sbmc static int dt_revsort; 481017Sbmc static int dt_keysort; 491017Sbmc static int dt_keypos; 501017Sbmc 511017Sbmc #define DT_LESSTHAN (dt_revsort == 0 ? -1 : 1) 521017Sbmc #define DT_GREATERTHAN (dt_revsort == 0 ? 1 : -1) 531017Sbmc 540Sstevel@tonic-gate static void 55491Sbmc dt_aggregate_count(int64_t *existing, int64_t *new, size_t size) 560Sstevel@tonic-gate { 570Sstevel@tonic-gate int i; 580Sstevel@tonic-gate 59491Sbmc for (i = 0; i < size / sizeof (int64_t); i++) 600Sstevel@tonic-gate existing[i] = existing[i] + new[i]; 610Sstevel@tonic-gate } 620Sstevel@tonic-gate 630Sstevel@tonic-gate static int 64491Sbmc dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs) 650Sstevel@tonic-gate { 66491Sbmc int64_t lvar = *lhs; 67491Sbmc int64_t rvar = *rhs; 680Sstevel@tonic-gate 691017Sbmc if (lvar < rvar) 701017Sbmc return (DT_LESSTHAN); 710Sstevel@tonic-gate 721017Sbmc if (lvar > rvar) 731017Sbmc return (DT_GREATERTHAN); 740Sstevel@tonic-gate 750Sstevel@tonic-gate return (0); 760Sstevel@tonic-gate } 770Sstevel@tonic-gate 780Sstevel@tonic-gate /*ARGSUSED*/ 790Sstevel@tonic-gate static void 80491Sbmc dt_aggregate_min(int64_t *existing, int64_t *new, size_t size) 810Sstevel@tonic-gate { 820Sstevel@tonic-gate if (*new < *existing) 830Sstevel@tonic-gate *existing = *new; 840Sstevel@tonic-gate } 850Sstevel@tonic-gate 860Sstevel@tonic-gate /*ARGSUSED*/ 870Sstevel@tonic-gate static void 88491Sbmc dt_aggregate_max(int64_t *existing, int64_t *new, size_t size) 890Sstevel@tonic-gate { 900Sstevel@tonic-gate if (*new > *existing) 910Sstevel@tonic-gate *existing = *new; 920Sstevel@tonic-gate } 930Sstevel@tonic-gate 940Sstevel@tonic-gate static int 95491Sbmc dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs) 960Sstevel@tonic-gate { 97491Sbmc int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0; 98491Sbmc int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0; 990Sstevel@tonic-gate 1001017Sbmc if (lavg < ravg) 1011017Sbmc return (DT_LESSTHAN); 1020Sstevel@tonic-gate 1031017Sbmc if (lavg > ravg) 1041017Sbmc return (DT_GREATERTHAN); 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate return (0); 1070Sstevel@tonic-gate } 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate /*ARGSUSED*/ 1100Sstevel@tonic-gate static void 111491Sbmc dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size) 1120Sstevel@tonic-gate { 113491Sbmc int64_t arg = *existing++; 1140Sstevel@tonic-gate uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1150Sstevel@tonic-gate int i; 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate for (i = 0; i <= levels + 1; i++) 1180Sstevel@tonic-gate existing[i] = existing[i] + new[i + 1]; 1190Sstevel@tonic-gate } 1200Sstevel@tonic-gate 121457Sbmc static long double 122491Sbmc dt_aggregate_lquantizedsum(int64_t *lquanta) 1230Sstevel@tonic-gate { 124491Sbmc int64_t arg = *lquanta++; 1250Sstevel@tonic-gate int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1260Sstevel@tonic-gate uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1270Sstevel@tonic-gate uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i; 128457Sbmc long double total = (long double)lquanta[0] * (long double)(base - 1); 1290Sstevel@tonic-gate 1300Sstevel@tonic-gate for (i = 0; i < levels; base += step, i++) 131457Sbmc total += (long double)lquanta[i + 1] * (long double)base; 1320Sstevel@tonic-gate 133457Sbmc return (total + (long double)lquanta[levels + 1] * 134457Sbmc (long double)(base + 1)); 1350Sstevel@tonic-gate } 1360Sstevel@tonic-gate 137491Sbmc static int64_t 138491Sbmc dt_aggregate_lquantizedzero(int64_t *lquanta) 139491Sbmc { 140491Sbmc int64_t arg = *lquanta++; 141491Sbmc int32_t base = DTRACE_LQUANTIZE_BASE(arg); 142491Sbmc uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 143491Sbmc uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i; 144491Sbmc 145491Sbmc if (base - 1 == 0) 146491Sbmc return (lquanta[0]); 147491Sbmc 148491Sbmc for (i = 0; i < levels; base += step, i++) { 149491Sbmc if (base != 0) 150491Sbmc continue; 151491Sbmc 152491Sbmc return (lquanta[i + 1]); 153491Sbmc } 154491Sbmc 155491Sbmc if (base + 1 == 0) 156491Sbmc return (lquanta[levels + 1]); 157491Sbmc 158491Sbmc return (0); 159491Sbmc } 160491Sbmc 1610Sstevel@tonic-gate static int 162491Sbmc dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs) 1630Sstevel@tonic-gate { 164457Sbmc long double lsum = dt_aggregate_lquantizedsum(lhs); 165457Sbmc long double rsum = dt_aggregate_lquantizedsum(rhs); 166491Sbmc int64_t lzero, rzero; 1670Sstevel@tonic-gate 1681017Sbmc if (lsum < rsum) 1691017Sbmc return (DT_LESSTHAN); 1700Sstevel@tonic-gate 1711017Sbmc if (lsum > rsum) 1721017Sbmc return (DT_GREATERTHAN); 1730Sstevel@tonic-gate 174491Sbmc /* 175491Sbmc * If they're both equal, then we will compare based on the weights at 176491Sbmc * zero. If the weights at zero are equal (or if zero is not within 177491Sbmc * the range of the linear quantization), then this will be judged a 178491Sbmc * tie and will be resolved based on the key comparison. 179491Sbmc */ 180491Sbmc lzero = dt_aggregate_lquantizedzero(lhs); 181491Sbmc rzero = dt_aggregate_lquantizedzero(rhs); 182491Sbmc 1831017Sbmc if (lzero < rzero) 1841017Sbmc return (DT_LESSTHAN); 185491Sbmc 1861017Sbmc if (lzero > rzero) 1871017Sbmc return (DT_GREATERTHAN); 188491Sbmc 1890Sstevel@tonic-gate return (0); 1900Sstevel@tonic-gate } 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate static int 193491Sbmc dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs) 1940Sstevel@tonic-gate { 1950Sstevel@tonic-gate int nbuckets = DTRACE_QUANTIZE_NBUCKETS, i; 196457Sbmc long double ltotal = 0, rtotal = 0; 197491Sbmc int64_t lzero, rzero; 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate for (i = 0; i < nbuckets; i++) { 2000Sstevel@tonic-gate int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i); 2010Sstevel@tonic-gate 202491Sbmc if (bucketval == 0) { 203491Sbmc lzero = lhs[i]; 204491Sbmc rzero = rhs[i]; 205491Sbmc } 206491Sbmc 207457Sbmc ltotal += (long double)bucketval * (long double)lhs[i]; 208457Sbmc rtotal += (long double)bucketval * (long double)rhs[i]; 2090Sstevel@tonic-gate } 2100Sstevel@tonic-gate 2111017Sbmc if (ltotal < rtotal) 2121017Sbmc return (DT_LESSTHAN); 2130Sstevel@tonic-gate 2141017Sbmc if (ltotal > rtotal) 2151017Sbmc return (DT_GREATERTHAN); 2160Sstevel@tonic-gate 217491Sbmc /* 218491Sbmc * If they're both equal, then we will compare based on the weights at 219491Sbmc * zero. If the weights at zero are equal, then this will be judged a 220491Sbmc * tie and will be resolved based on the key comparison. 221491Sbmc */ 2221017Sbmc if (lzero < rzero) 2231017Sbmc return (DT_LESSTHAN); 224491Sbmc 2251017Sbmc if (lzero > rzero) 2261017Sbmc return (DT_GREATERTHAN); 227491Sbmc 2280Sstevel@tonic-gate return (0); 2290Sstevel@tonic-gate } 2300Sstevel@tonic-gate 231457Sbmc static void 232457Sbmc dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data) 233457Sbmc { 234457Sbmc uint64_t pid = data[0]; 235457Sbmc uint64_t *pc = &data[1]; 236457Sbmc struct ps_prochandle *P; 237457Sbmc GElf_Sym sym; 238457Sbmc 239457Sbmc if (dtp->dt_vector != NULL) 240457Sbmc return; 241457Sbmc 242457Sbmc if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL) 243457Sbmc return; 244457Sbmc 245457Sbmc dt_proc_lock(dtp, P); 246457Sbmc 247457Sbmc if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0) 248457Sbmc *pc = sym.st_value; 249457Sbmc 250457Sbmc dt_proc_unlock(dtp, P); 251457Sbmc dt_proc_release(dtp, P); 252457Sbmc } 253457Sbmc 254457Sbmc static void 255457Sbmc dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data) 256457Sbmc { 257457Sbmc uint64_t pid = data[0]; 258457Sbmc uint64_t *pc = &data[1]; 259457Sbmc struct ps_prochandle *P; 260457Sbmc const prmap_t *map; 261457Sbmc 262457Sbmc if (dtp->dt_vector != NULL) 263457Sbmc return; 264457Sbmc 265457Sbmc if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL) 266457Sbmc return; 267457Sbmc 268457Sbmc dt_proc_lock(dtp, P); 269457Sbmc 270457Sbmc if ((map = Paddr_to_map(P, *pc)) != NULL) 271457Sbmc *pc = map->pr_vaddr; 272457Sbmc 273457Sbmc dt_proc_unlock(dtp, P); 274457Sbmc dt_proc_release(dtp, P); 275457Sbmc } 276457Sbmc 277457Sbmc static void 278457Sbmc dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data) 279457Sbmc { 280457Sbmc GElf_Sym sym; 281457Sbmc uint64_t *pc = data; 282457Sbmc 283457Sbmc if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0) 284457Sbmc *pc = sym.st_value; 285457Sbmc } 286457Sbmc 287457Sbmc static void 288457Sbmc dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data) 289457Sbmc { 290457Sbmc uint64_t *pc = data; 291457Sbmc dt_module_t *dmp; 292457Sbmc 293457Sbmc if (dtp->dt_vector != NULL) { 294457Sbmc /* 295457Sbmc * We don't have a way of just getting the module for a 296457Sbmc * vectored open, and it doesn't seem to be worth defining 297457Sbmc * one. This means that use of mod() won't get true 298457Sbmc * aggregation in the postmortem case (some modules may 299457Sbmc * appear more than once in aggregation output). It seems 300457Sbmc * unlikely that anyone will ever notice or care... 301457Sbmc */ 302457Sbmc return; 303457Sbmc } 304457Sbmc 305457Sbmc for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL; 306457Sbmc dmp = dt_list_next(dmp)) { 307457Sbmc if (*pc - dmp->dm_text_va < dmp->dm_text_size) { 308457Sbmc *pc = dmp->dm_text_va; 309457Sbmc return; 310457Sbmc } 311457Sbmc } 312457Sbmc } 313457Sbmc 3141017Sbmc static dtrace_aggvarid_t 3151017Sbmc dt_aggregate_aggvarid(dt_ahashent_t *ent) 3161017Sbmc { 3171017Sbmc dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc; 3181017Sbmc caddr_t data = ent->dtahe_data.dtada_data; 3191017Sbmc dtrace_recdesc_t *rec = agg->dtagd_rec; 3201017Sbmc 3211017Sbmc /* 3221017Sbmc * First, we'll check the variable ID in the aggdesc. If it's valid, 3231017Sbmc * we'll return it. If not, we'll use the compiler-generated ID 3241017Sbmc * present as the first record. 3251017Sbmc */ 3261017Sbmc if (agg->dtagd_varid != DTRACE_AGGVARIDNONE) 3271017Sbmc return (agg->dtagd_varid); 3281017Sbmc 3291017Sbmc agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data + 3301017Sbmc rec->dtrd_offset)); 3311017Sbmc 3321017Sbmc return (agg->dtagd_varid); 3331017Sbmc } 3341017Sbmc 3351017Sbmc 3360Sstevel@tonic-gate static int 3370Sstevel@tonic-gate dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu) 3380Sstevel@tonic-gate { 3390Sstevel@tonic-gate dtrace_epid_t id; 3400Sstevel@tonic-gate uint64_t hashval; 3410Sstevel@tonic-gate size_t offs, roffs, size, ndx; 3420Sstevel@tonic-gate int i, j, rval; 3430Sstevel@tonic-gate caddr_t addr, data; 3440Sstevel@tonic-gate dtrace_recdesc_t *rec; 3450Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 3460Sstevel@tonic-gate dtrace_aggdesc_t *agg; 3470Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 3480Sstevel@tonic-gate dt_ahashent_t *h; 3490Sstevel@tonic-gate dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b; 3500Sstevel@tonic-gate dtrace_aggdata_t *aggdata; 3510Sstevel@tonic-gate int flags = agp->dtat_flags; 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate buf->dtbd_cpu = cpu; 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) { 3560Sstevel@tonic-gate if (errno == ENOENT) { 3570Sstevel@tonic-gate /* 3580Sstevel@tonic-gate * If that failed with ENOENT, it may be because the 3590Sstevel@tonic-gate * CPU was unconfigured. This is okay; we'll just 3600Sstevel@tonic-gate * do nothing but return success. 3610Sstevel@tonic-gate */ 3620Sstevel@tonic-gate return (0); 3630Sstevel@tonic-gate } 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate return (dt_set_errno(dtp, errno)); 3660Sstevel@tonic-gate } 3670Sstevel@tonic-gate 3680Sstevel@tonic-gate if (buf->dtbd_drops != 0) { 3690Sstevel@tonic-gate if (dt_handle_cpudrop(dtp, cpu, 3700Sstevel@tonic-gate DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1) 3710Sstevel@tonic-gate return (-1); 3720Sstevel@tonic-gate } 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate if (buf->dtbd_size == 0) 3750Sstevel@tonic-gate return (0); 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate if (hash->dtah_hash == NULL) { 3780Sstevel@tonic-gate size_t size; 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate hash->dtah_size = DTRACE_AHASHSIZE; 3810Sstevel@tonic-gate size = hash->dtah_size * sizeof (dt_ahashent_t *); 3820Sstevel@tonic-gate 3830Sstevel@tonic-gate if ((hash->dtah_hash = malloc(size)) == NULL) 3840Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate bzero(hash->dtah_hash, size); 3870Sstevel@tonic-gate } 3880Sstevel@tonic-gate 3890Sstevel@tonic-gate for (offs = 0; offs < buf->dtbd_size; ) { 3900Sstevel@tonic-gate /* 3910Sstevel@tonic-gate * We're guaranteed to have an ID. 3920Sstevel@tonic-gate */ 3930Sstevel@tonic-gate id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data + 3940Sstevel@tonic-gate (uintptr_t)offs)); 3950Sstevel@tonic-gate 3960Sstevel@tonic-gate if (id == DTRACE_AGGIDNONE) { 3970Sstevel@tonic-gate /* 3980Sstevel@tonic-gate * This is filler to assure proper alignment of the 3990Sstevel@tonic-gate * next record; we simply ignore it. 4000Sstevel@tonic-gate */ 4010Sstevel@tonic-gate offs += sizeof (id); 4020Sstevel@tonic-gate continue; 4030Sstevel@tonic-gate } 4040Sstevel@tonic-gate 4050Sstevel@tonic-gate if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0) 4060Sstevel@tonic-gate return (rval); 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate addr = buf->dtbd_data + offs; 4090Sstevel@tonic-gate size = agg->dtagd_size; 4100Sstevel@tonic-gate hashval = 0; 4110Sstevel@tonic-gate 4120Sstevel@tonic-gate for (j = 0; j < agg->dtagd_nrecs - 1; j++) { 4130Sstevel@tonic-gate rec = &agg->dtagd_rec[j]; 4140Sstevel@tonic-gate roffs = rec->dtrd_offset; 4150Sstevel@tonic-gate 416457Sbmc switch (rec->dtrd_action) { 417457Sbmc case DTRACEACT_USYM: 418457Sbmc dt_aggregate_usym(dtp, 419457Sbmc /* LINTED - alignment */ 420457Sbmc (uint64_t *)&addr[roffs]); 421457Sbmc break; 422457Sbmc 423457Sbmc case DTRACEACT_UMOD: 424457Sbmc dt_aggregate_umod(dtp, 425457Sbmc /* LINTED - alignment */ 426457Sbmc (uint64_t *)&addr[roffs]); 427457Sbmc break; 428457Sbmc 429457Sbmc case DTRACEACT_SYM: 430457Sbmc /* LINTED - alignment */ 431457Sbmc dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]); 432457Sbmc break; 433457Sbmc 434457Sbmc case DTRACEACT_MOD: 435457Sbmc /* LINTED - alignment */ 436457Sbmc dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]); 437457Sbmc break; 438457Sbmc 439457Sbmc default: 440457Sbmc break; 441457Sbmc } 442457Sbmc 4430Sstevel@tonic-gate for (i = 0; i < rec->dtrd_size; i++) 4440Sstevel@tonic-gate hashval += addr[roffs + i]; 4450Sstevel@tonic-gate } 4460Sstevel@tonic-gate 4470Sstevel@tonic-gate ndx = hashval % hash->dtah_size; 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) { 4500Sstevel@tonic-gate if (h->dtahe_hashval != hashval) 4510Sstevel@tonic-gate continue; 4520Sstevel@tonic-gate 4530Sstevel@tonic-gate if (h->dtahe_size != size) 4540Sstevel@tonic-gate continue; 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate aggdata = &h->dtahe_data; 4570Sstevel@tonic-gate data = aggdata->dtada_data; 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate for (j = 0; j < agg->dtagd_nrecs - 1; j++) { 4600Sstevel@tonic-gate rec = &agg->dtagd_rec[j]; 4610Sstevel@tonic-gate roffs = rec->dtrd_offset; 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate for (i = 0; i < rec->dtrd_size; i++) 4640Sstevel@tonic-gate if (addr[roffs + i] != data[roffs + i]) 4650Sstevel@tonic-gate goto hashnext; 4660Sstevel@tonic-gate } 4670Sstevel@tonic-gate 4680Sstevel@tonic-gate /* 4690Sstevel@tonic-gate * We found it. Now we need to apply the aggregating 4700Sstevel@tonic-gate * action on the data here. 4710Sstevel@tonic-gate */ 4720Sstevel@tonic-gate rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1]; 4730Sstevel@tonic-gate roffs = rec->dtrd_offset; 4740Sstevel@tonic-gate /* LINTED - alignment */ 475491Sbmc h->dtahe_aggregate((int64_t *)&data[roffs], 4760Sstevel@tonic-gate /* LINTED - alignment */ 477491Sbmc (int64_t *)&addr[roffs], rec->dtrd_size); 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate /* 4800Sstevel@tonic-gate * If we're keeping per CPU data, apply the aggregating 4810Sstevel@tonic-gate * action there as well. 4820Sstevel@tonic-gate */ 4830Sstevel@tonic-gate if (aggdata->dtada_percpu != NULL) { 4840Sstevel@tonic-gate data = aggdata->dtada_percpu[cpu]; 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate /* LINTED - alignment */ 487491Sbmc h->dtahe_aggregate((int64_t *)data, 4880Sstevel@tonic-gate /* LINTED - alignment */ 489491Sbmc (int64_t *)&addr[roffs], rec->dtrd_size); 4900Sstevel@tonic-gate } 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate goto bufnext; 4930Sstevel@tonic-gate hashnext: 4940Sstevel@tonic-gate continue; 4950Sstevel@tonic-gate } 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate /* 4980Sstevel@tonic-gate * If we're here, we couldn't find an entry for this record. 4990Sstevel@tonic-gate */ 5000Sstevel@tonic-gate if ((h = malloc(sizeof (dt_ahashent_t))) == NULL) 5010Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 5020Sstevel@tonic-gate bzero(h, sizeof (dt_ahashent_t)); 5030Sstevel@tonic-gate aggdata = &h->dtahe_data; 5040Sstevel@tonic-gate 5050Sstevel@tonic-gate if ((aggdata->dtada_data = malloc(size)) == NULL) { 5060Sstevel@tonic-gate free(h); 5070Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 5080Sstevel@tonic-gate } 5090Sstevel@tonic-gate 5100Sstevel@tonic-gate bcopy(addr, aggdata->dtada_data, size); 5110Sstevel@tonic-gate aggdata->dtada_size = size; 5120Sstevel@tonic-gate aggdata->dtada_desc = agg; 5130Sstevel@tonic-gate aggdata->dtada_handle = dtp; 5140Sstevel@tonic-gate (void) dt_epid_lookup(dtp, agg->dtagd_epid, 5150Sstevel@tonic-gate &aggdata->dtada_edesc, &aggdata->dtada_pdesc); 5160Sstevel@tonic-gate aggdata->dtada_normal = 1; 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate h->dtahe_hashval = hashval; 5190Sstevel@tonic-gate h->dtahe_size = size; 5201017Sbmc (void) dt_aggregate_aggvarid(h); 5210Sstevel@tonic-gate 5220Sstevel@tonic-gate rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1]; 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate if (flags & DTRACE_A_PERCPU) { 5250Sstevel@tonic-gate int max_cpus = agp->dtat_maxcpu; 5260Sstevel@tonic-gate caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t)); 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate if (percpu == NULL) { 5290Sstevel@tonic-gate free(aggdata->dtada_data); 5300Sstevel@tonic-gate free(h); 5310Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate for (j = 0; j < max_cpus; j++) { 5350Sstevel@tonic-gate percpu[j] = malloc(rec->dtrd_size); 5360Sstevel@tonic-gate 5370Sstevel@tonic-gate if (percpu[j] == NULL) { 5380Sstevel@tonic-gate while (--j >= 0) 5390Sstevel@tonic-gate free(percpu[j]); 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate free(aggdata->dtada_data); 5420Sstevel@tonic-gate free(h); 5430Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 5440Sstevel@tonic-gate } 5450Sstevel@tonic-gate 5460Sstevel@tonic-gate if (j == cpu) { 5470Sstevel@tonic-gate bcopy(&addr[rec->dtrd_offset], 5480Sstevel@tonic-gate percpu[j], rec->dtrd_size); 5490Sstevel@tonic-gate } else { 5500Sstevel@tonic-gate bzero(percpu[j], rec->dtrd_size); 5510Sstevel@tonic-gate } 5520Sstevel@tonic-gate } 5530Sstevel@tonic-gate 5540Sstevel@tonic-gate aggdata->dtada_percpu = percpu; 5550Sstevel@tonic-gate } 5560Sstevel@tonic-gate 5570Sstevel@tonic-gate switch (rec->dtrd_action) { 5580Sstevel@tonic-gate case DTRACEAGG_MIN: 5590Sstevel@tonic-gate h->dtahe_aggregate = dt_aggregate_min; 5600Sstevel@tonic-gate break; 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate case DTRACEAGG_MAX: 5630Sstevel@tonic-gate h->dtahe_aggregate = dt_aggregate_max; 5640Sstevel@tonic-gate break; 5650Sstevel@tonic-gate 5660Sstevel@tonic-gate case DTRACEAGG_LQUANTIZE: 5670Sstevel@tonic-gate h->dtahe_aggregate = dt_aggregate_lquantize; 5680Sstevel@tonic-gate break; 5690Sstevel@tonic-gate 5700Sstevel@tonic-gate case DTRACEAGG_COUNT: 5710Sstevel@tonic-gate case DTRACEAGG_SUM: 5720Sstevel@tonic-gate case DTRACEAGG_AVG: 5730Sstevel@tonic-gate case DTRACEAGG_QUANTIZE: 5740Sstevel@tonic-gate h->dtahe_aggregate = dt_aggregate_count; 5750Sstevel@tonic-gate break; 5760Sstevel@tonic-gate 5770Sstevel@tonic-gate default: 5780Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_BADAGG)); 5790Sstevel@tonic-gate } 5800Sstevel@tonic-gate 5810Sstevel@tonic-gate if (hash->dtah_hash[ndx] != NULL) 5820Sstevel@tonic-gate hash->dtah_hash[ndx]->dtahe_prev = h; 5830Sstevel@tonic-gate 5840Sstevel@tonic-gate h->dtahe_next = hash->dtah_hash[ndx]; 5850Sstevel@tonic-gate hash->dtah_hash[ndx] = h; 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate if (hash->dtah_all != NULL) 5880Sstevel@tonic-gate hash->dtah_all->dtahe_prevall = h; 5890Sstevel@tonic-gate 5900Sstevel@tonic-gate h->dtahe_nextall = hash->dtah_all; 5910Sstevel@tonic-gate hash->dtah_all = h; 5920Sstevel@tonic-gate bufnext: 5930Sstevel@tonic-gate offs += agg->dtagd_size; 5940Sstevel@tonic-gate } 5950Sstevel@tonic-gate 5960Sstevel@tonic-gate return (0); 5970Sstevel@tonic-gate } 5980Sstevel@tonic-gate 5990Sstevel@tonic-gate int 6000Sstevel@tonic-gate dtrace_aggregate_snap(dtrace_hdl_t *dtp) 6010Sstevel@tonic-gate { 6020Sstevel@tonic-gate int i, rval; 6030Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 6040Sstevel@tonic-gate hrtime_t now = gethrtime(); 6050Sstevel@tonic-gate dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE]; 6060Sstevel@tonic-gate 6070Sstevel@tonic-gate if (dtp->dt_lastagg != 0) { 6080Sstevel@tonic-gate if (now - dtp->dt_lastagg < interval) 6090Sstevel@tonic-gate return (0); 6100Sstevel@tonic-gate 6110Sstevel@tonic-gate dtp->dt_lastagg += interval; 6120Sstevel@tonic-gate } else { 6130Sstevel@tonic-gate dtp->dt_lastagg = now; 6140Sstevel@tonic-gate } 6150Sstevel@tonic-gate 6160Sstevel@tonic-gate if (!dtp->dt_active) 6170Sstevel@tonic-gate return (dt_set_errno(dtp, EINVAL)); 6180Sstevel@tonic-gate 6190Sstevel@tonic-gate if (agp->dtat_buf.dtbd_size == 0) 6200Sstevel@tonic-gate return (0); 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate for (i = 0; i < agp->dtat_ncpus; i++) { 6230Sstevel@tonic-gate if (rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i])) 6240Sstevel@tonic-gate return (rval); 6250Sstevel@tonic-gate } 6260Sstevel@tonic-gate 6270Sstevel@tonic-gate return (0); 6280Sstevel@tonic-gate } 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate static int 6310Sstevel@tonic-gate dt_aggregate_hashcmp(const void *lhs, const void *rhs) 6320Sstevel@tonic-gate { 6330Sstevel@tonic-gate dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 6340Sstevel@tonic-gate dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 6350Sstevel@tonic-gate dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 6360Sstevel@tonic-gate dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate if (lagg->dtagd_nrecs < ragg->dtagd_nrecs) 6391017Sbmc return (DT_LESSTHAN); 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate if (lagg->dtagd_nrecs > ragg->dtagd_nrecs) 6421017Sbmc return (DT_GREATERTHAN); 6430Sstevel@tonic-gate 6440Sstevel@tonic-gate return (0); 6450Sstevel@tonic-gate } 6460Sstevel@tonic-gate 6470Sstevel@tonic-gate static int 6480Sstevel@tonic-gate dt_aggregate_varcmp(const void *lhs, const void *rhs) 6490Sstevel@tonic-gate { 6500Sstevel@tonic-gate dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 6510Sstevel@tonic-gate dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 6521017Sbmc dtrace_aggvarid_t lid, rid; 6530Sstevel@tonic-gate 6541017Sbmc lid = dt_aggregate_aggvarid(lh); 6551017Sbmc rid = dt_aggregate_aggvarid(rh); 6560Sstevel@tonic-gate 6570Sstevel@tonic-gate if (lid < rid) 6581017Sbmc return (DT_LESSTHAN); 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate if (lid > rid) 6611017Sbmc return (DT_GREATERTHAN); 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate return (0); 6640Sstevel@tonic-gate } 6650Sstevel@tonic-gate 6660Sstevel@tonic-gate static int 6670Sstevel@tonic-gate dt_aggregate_keycmp(const void *lhs, const void *rhs) 6680Sstevel@tonic-gate { 6690Sstevel@tonic-gate dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 6700Sstevel@tonic-gate dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 6710Sstevel@tonic-gate dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 6720Sstevel@tonic-gate dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 6730Sstevel@tonic-gate dtrace_recdesc_t *lrec, *rrec; 6740Sstevel@tonic-gate char *ldata, *rdata; 6751017Sbmc int rval, i, j, keypos, nrecs; 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0) 6780Sstevel@tonic-gate return (rval); 6790Sstevel@tonic-gate 6801017Sbmc nrecs = lagg->dtagd_nrecs - 1; 6811017Sbmc assert(nrecs == ragg->dtagd_nrecs - 1); 6821017Sbmc 6831017Sbmc keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos; 6840Sstevel@tonic-gate 6851017Sbmc for (i = 1; i < nrecs; i++) { 6861017Sbmc uint64_t lval, rval; 6871017Sbmc int ndx = i + keypos; 6881017Sbmc 6891017Sbmc if (ndx >= nrecs) 6901017Sbmc ndx = ndx - nrecs + 1; 6911017Sbmc 6921017Sbmc lrec = &lagg->dtagd_rec[ndx]; 6931017Sbmc rrec = &ragg->dtagd_rec[ndx]; 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset; 6960Sstevel@tonic-gate rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset; 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate if (lrec->dtrd_size < rrec->dtrd_size) 6991017Sbmc return (DT_LESSTHAN); 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate if (lrec->dtrd_size > rrec->dtrd_size) 7021017Sbmc return (DT_GREATERTHAN); 7030Sstevel@tonic-gate 7040Sstevel@tonic-gate switch (lrec->dtrd_size) { 7050Sstevel@tonic-gate case sizeof (uint64_t): 7060Sstevel@tonic-gate /* LINTED - alignment */ 7070Sstevel@tonic-gate lval = *((uint64_t *)ldata); 7080Sstevel@tonic-gate /* LINTED - alignment */ 7090Sstevel@tonic-gate rval = *((uint64_t *)rdata); 7100Sstevel@tonic-gate break; 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate case sizeof (uint32_t): 7130Sstevel@tonic-gate /* LINTED - alignment */ 7140Sstevel@tonic-gate lval = *((uint32_t *)ldata); 7150Sstevel@tonic-gate /* LINTED - alignment */ 7160Sstevel@tonic-gate rval = *((uint32_t *)rdata); 7170Sstevel@tonic-gate break; 7180Sstevel@tonic-gate 7190Sstevel@tonic-gate case sizeof (uint16_t): 7200Sstevel@tonic-gate /* LINTED - alignment */ 7210Sstevel@tonic-gate lval = *((uint16_t *)ldata); 7220Sstevel@tonic-gate /* LINTED - alignment */ 7230Sstevel@tonic-gate rval = *((uint16_t *)rdata); 7240Sstevel@tonic-gate break; 7250Sstevel@tonic-gate 7260Sstevel@tonic-gate case sizeof (uint8_t): 7270Sstevel@tonic-gate lval = *((uint8_t *)ldata); 7280Sstevel@tonic-gate rval = *((uint8_t *)rdata); 7290Sstevel@tonic-gate break; 7300Sstevel@tonic-gate 7310Sstevel@tonic-gate default: 732*2777Stomee switch (lrec->dtrd_action) { 733*2777Stomee case DTRACEACT_UMOD: 734*2777Stomee case DTRACEACT_UADDR: 735*2777Stomee case DTRACEACT_USYM: 736*2777Stomee for (j = 0; j < 2; j++) { 737*2777Stomee /* LINTED - alignment */ 738*2777Stomee lval = ((uint64_t *)ldata)[j]; 739*2777Stomee /* LINTED - alignment */ 740*2777Stomee rval = ((uint64_t *)rdata)[j]; 741*2777Stomee 742*2777Stomee if (lval < rval) 743*2777Stomee return (DT_LESSTHAN); 7440Sstevel@tonic-gate 745*2777Stomee if (lval > rval) 746*2777Stomee return (DT_GREATERTHAN); 747*2777Stomee } 748*2777Stomee 749*2777Stomee break; 7500Sstevel@tonic-gate 751*2777Stomee default: 752*2777Stomee for (j = 0; j < lrec->dtrd_size; j++) { 753*2777Stomee lval = ((uint8_t *)ldata)[j]; 754*2777Stomee rval = ((uint8_t *)rdata)[j]; 7551017Sbmc 756*2777Stomee if (lval < rval) 757*2777Stomee return (DT_LESSTHAN); 758*2777Stomee 759*2777Stomee if (lval > rval) 760*2777Stomee return (DT_GREATERTHAN); 761*2777Stomee } 7620Sstevel@tonic-gate } 7630Sstevel@tonic-gate 7640Sstevel@tonic-gate continue; 7650Sstevel@tonic-gate } 7660Sstevel@tonic-gate 7670Sstevel@tonic-gate if (lval < rval) 7681017Sbmc return (DT_LESSTHAN); 7690Sstevel@tonic-gate 7700Sstevel@tonic-gate if (lval > rval) 7711017Sbmc return (DT_GREATERTHAN); 7720Sstevel@tonic-gate } 7730Sstevel@tonic-gate 7740Sstevel@tonic-gate return (0); 7750Sstevel@tonic-gate } 7760Sstevel@tonic-gate 7770Sstevel@tonic-gate static int 7780Sstevel@tonic-gate dt_aggregate_valcmp(const void *lhs, const void *rhs) 7790Sstevel@tonic-gate { 7800Sstevel@tonic-gate dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 7810Sstevel@tonic-gate dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 7820Sstevel@tonic-gate dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 7830Sstevel@tonic-gate dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 7840Sstevel@tonic-gate caddr_t ldata = lh->dtahe_data.dtada_data; 7850Sstevel@tonic-gate caddr_t rdata = rh->dtahe_data.dtada_data; 7860Sstevel@tonic-gate dtrace_recdesc_t *lrec, *rrec; 787491Sbmc int64_t *laddr, *raddr; 7880Sstevel@tonic-gate int rval, i; 7890Sstevel@tonic-gate 7900Sstevel@tonic-gate if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0) 7910Sstevel@tonic-gate return (rval); 7920Sstevel@tonic-gate 7931017Sbmc if (lagg->dtagd_nrecs > ragg->dtagd_nrecs) 7941017Sbmc return (DT_GREATERTHAN); 7950Sstevel@tonic-gate 7961017Sbmc if (lagg->dtagd_nrecs < ragg->dtagd_nrecs) 7971017Sbmc return (DT_LESSTHAN); 7980Sstevel@tonic-gate 7990Sstevel@tonic-gate for (i = 0; i < lagg->dtagd_nrecs; i++) { 8000Sstevel@tonic-gate lrec = &lagg->dtagd_rec[i]; 8010Sstevel@tonic-gate rrec = &ragg->dtagd_rec[i]; 8020Sstevel@tonic-gate 8030Sstevel@tonic-gate if (lrec->dtrd_offset < rrec->dtrd_offset) 8041017Sbmc return (DT_LESSTHAN); 8050Sstevel@tonic-gate 8060Sstevel@tonic-gate if (lrec->dtrd_offset > rrec->dtrd_offset) 8071017Sbmc return (DT_GREATERTHAN); 8080Sstevel@tonic-gate 8090Sstevel@tonic-gate if (lrec->dtrd_action < rrec->dtrd_action) 8101017Sbmc return (DT_LESSTHAN); 8110Sstevel@tonic-gate 8120Sstevel@tonic-gate if (lrec->dtrd_action > rrec->dtrd_action) 8131017Sbmc return (DT_GREATERTHAN); 8140Sstevel@tonic-gate } 8150Sstevel@tonic-gate 816491Sbmc laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset); 817491Sbmc raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset); 8180Sstevel@tonic-gate 8190Sstevel@tonic-gate switch (lrec->dtrd_action) { 8200Sstevel@tonic-gate case DTRACEAGG_AVG: 8210Sstevel@tonic-gate rval = dt_aggregate_averagecmp(laddr, raddr); 8220Sstevel@tonic-gate break; 8230Sstevel@tonic-gate 8240Sstevel@tonic-gate case DTRACEAGG_QUANTIZE: 8250Sstevel@tonic-gate rval = dt_aggregate_quantizedcmp(laddr, raddr); 8260Sstevel@tonic-gate break; 8270Sstevel@tonic-gate 8280Sstevel@tonic-gate case DTRACEAGG_LQUANTIZE: 8290Sstevel@tonic-gate rval = dt_aggregate_lquantizedcmp(laddr, raddr); 8300Sstevel@tonic-gate break; 8310Sstevel@tonic-gate 8320Sstevel@tonic-gate case DTRACEAGG_COUNT: 8330Sstevel@tonic-gate case DTRACEAGG_SUM: 8340Sstevel@tonic-gate case DTRACEAGG_MIN: 8350Sstevel@tonic-gate case DTRACEAGG_MAX: 8360Sstevel@tonic-gate rval = dt_aggregate_countcmp(laddr, raddr); 8370Sstevel@tonic-gate break; 8380Sstevel@tonic-gate 8390Sstevel@tonic-gate default: 8400Sstevel@tonic-gate assert(0); 8410Sstevel@tonic-gate } 8420Sstevel@tonic-gate 8431017Sbmc return (rval); 8441017Sbmc } 8451017Sbmc 8461017Sbmc static int 8471017Sbmc dt_aggregate_valkeycmp(const void *lhs, const void *rhs) 8481017Sbmc { 8491017Sbmc int rval; 8501017Sbmc 8511017Sbmc if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0) 8520Sstevel@tonic-gate return (rval); 8530Sstevel@tonic-gate 8540Sstevel@tonic-gate /* 8550Sstevel@tonic-gate * If we're here, the values for the two aggregation elements are 8560Sstevel@tonic-gate * equal. We already know that the key layout is the same for the two 8570Sstevel@tonic-gate * elements; we must now compare the keys themselves as a tie-breaker. 8580Sstevel@tonic-gate */ 8590Sstevel@tonic-gate return (dt_aggregate_keycmp(lhs, rhs)); 8600Sstevel@tonic-gate } 8610Sstevel@tonic-gate 8620Sstevel@tonic-gate static int 8630Sstevel@tonic-gate dt_aggregate_keyvarcmp(const void *lhs, const void *rhs) 8640Sstevel@tonic-gate { 8650Sstevel@tonic-gate int rval; 8660Sstevel@tonic-gate 8670Sstevel@tonic-gate if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0) 8680Sstevel@tonic-gate return (rval); 8690Sstevel@tonic-gate 8700Sstevel@tonic-gate return (dt_aggregate_varcmp(lhs, rhs)); 8710Sstevel@tonic-gate } 8720Sstevel@tonic-gate 8730Sstevel@tonic-gate static int 8740Sstevel@tonic-gate dt_aggregate_varkeycmp(const void *lhs, const void *rhs) 8750Sstevel@tonic-gate { 8760Sstevel@tonic-gate int rval; 8770Sstevel@tonic-gate 8780Sstevel@tonic-gate if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0) 8790Sstevel@tonic-gate return (rval); 8800Sstevel@tonic-gate 8810Sstevel@tonic-gate return (dt_aggregate_keycmp(lhs, rhs)); 8820Sstevel@tonic-gate } 8830Sstevel@tonic-gate 8840Sstevel@tonic-gate static int 8850Sstevel@tonic-gate dt_aggregate_valvarcmp(const void *lhs, const void *rhs) 8860Sstevel@tonic-gate { 8870Sstevel@tonic-gate int rval; 8880Sstevel@tonic-gate 8891017Sbmc if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0) 8900Sstevel@tonic-gate return (rval); 8910Sstevel@tonic-gate 8920Sstevel@tonic-gate return (dt_aggregate_varcmp(lhs, rhs)); 8930Sstevel@tonic-gate } 8940Sstevel@tonic-gate 8950Sstevel@tonic-gate static int 8960Sstevel@tonic-gate dt_aggregate_varvalcmp(const void *lhs, const void *rhs) 8970Sstevel@tonic-gate { 8980Sstevel@tonic-gate int rval; 8990Sstevel@tonic-gate 9000Sstevel@tonic-gate if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0) 9010Sstevel@tonic-gate return (rval); 9020Sstevel@tonic-gate 9031017Sbmc return (dt_aggregate_valkeycmp(lhs, rhs)); 9040Sstevel@tonic-gate } 9050Sstevel@tonic-gate 9060Sstevel@tonic-gate static int 9070Sstevel@tonic-gate dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs) 9080Sstevel@tonic-gate { 9090Sstevel@tonic-gate return (dt_aggregate_keyvarcmp(rhs, lhs)); 9100Sstevel@tonic-gate } 9110Sstevel@tonic-gate 9120Sstevel@tonic-gate static int 9130Sstevel@tonic-gate dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs) 9140Sstevel@tonic-gate { 9150Sstevel@tonic-gate return (dt_aggregate_varkeycmp(rhs, lhs)); 9160Sstevel@tonic-gate } 9170Sstevel@tonic-gate 9180Sstevel@tonic-gate static int 9190Sstevel@tonic-gate dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs) 9200Sstevel@tonic-gate { 9210Sstevel@tonic-gate return (dt_aggregate_valvarcmp(rhs, lhs)); 9220Sstevel@tonic-gate } 9230Sstevel@tonic-gate 9240Sstevel@tonic-gate static int 9250Sstevel@tonic-gate dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs) 9260Sstevel@tonic-gate { 9270Sstevel@tonic-gate return (dt_aggregate_varvalcmp(rhs, lhs)); 9280Sstevel@tonic-gate } 9290Sstevel@tonic-gate 9301017Sbmc static int 9311017Sbmc dt_aggregate_bundlecmp(const void *lhs, const void *rhs) 9321017Sbmc { 9331017Sbmc dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs); 9341017Sbmc dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs); 9351017Sbmc int i, rval; 9361017Sbmc 9371017Sbmc if (dt_keysort) { 9381017Sbmc /* 9391017Sbmc * If we're sorting on keys, we need to scan until we find the 9401017Sbmc * last entry -- that's the representative key. (The order of 9411017Sbmc * the bundle is values followed by key to accommodate the 9421017Sbmc * default behavior of sorting by value.) If the keys are 9431017Sbmc * equal, we'll fall into the value comparison loop, below. 9441017Sbmc */ 9451017Sbmc for (i = 0; lh[i + 1] != NULL; i++) 9461017Sbmc continue; 9471017Sbmc 9481017Sbmc assert(i != 0); 9491017Sbmc assert(rh[i + 1] == NULL); 9501017Sbmc 9511017Sbmc if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0) 9521017Sbmc return (rval); 9531017Sbmc } 9541017Sbmc 9551017Sbmc for (i = 0; ; i++) { 9561017Sbmc if (lh[i + 1] == NULL) { 9571017Sbmc /* 9581017Sbmc * All of the values are equal; if we're sorting on 9591017Sbmc * keys, then we're only here because the keys were 9601017Sbmc * found to be equal and these records are therefore 9611017Sbmc * equal. If we're not sorting on keys, we'll use the 9621017Sbmc * key comparison from the representative key as the 9631017Sbmc * tie-breaker. 9641017Sbmc */ 9651017Sbmc if (dt_keysort) 9661017Sbmc return (0); 9671017Sbmc 9681017Sbmc assert(i != 0); 9691017Sbmc assert(rh[i + 1] == NULL); 9701017Sbmc return (dt_aggregate_keycmp(&lh[i], &rh[i])); 9711017Sbmc } else { 9721017Sbmc if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0) 9731017Sbmc return (rval); 9741017Sbmc } 9751017Sbmc } 9761017Sbmc } 9771017Sbmc 9780Sstevel@tonic-gate int 9790Sstevel@tonic-gate dt_aggregate_go(dtrace_hdl_t *dtp) 9800Sstevel@tonic-gate { 9810Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 9820Sstevel@tonic-gate dtrace_optval_t size, cpu; 9830Sstevel@tonic-gate dtrace_bufdesc_t *buf = &agp->dtat_buf; 9840Sstevel@tonic-gate int rval, i; 9850Sstevel@tonic-gate 9860Sstevel@tonic-gate assert(agp->dtat_maxcpu == 0); 9870Sstevel@tonic-gate assert(agp->dtat_ncpu == 0); 9880Sstevel@tonic-gate assert(agp->dtat_cpus == NULL); 9890Sstevel@tonic-gate 9900Sstevel@tonic-gate agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 9910Sstevel@tonic-gate agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX); 9920Sstevel@tonic-gate agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t)); 9930Sstevel@tonic-gate 9940Sstevel@tonic-gate if (agp->dtat_cpus == NULL) 9950Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 9960Sstevel@tonic-gate 9970Sstevel@tonic-gate /* 9980Sstevel@tonic-gate * Use the aggregation buffer size as reloaded from the kernel. 9990Sstevel@tonic-gate */ 10000Sstevel@tonic-gate size = dtp->dt_options[DTRACEOPT_AGGSIZE]; 10010Sstevel@tonic-gate 10020Sstevel@tonic-gate rval = dtrace_getopt(dtp, "aggsize", &size); 10030Sstevel@tonic-gate assert(rval == 0); 10040Sstevel@tonic-gate 10050Sstevel@tonic-gate if (size == 0 || size == DTRACEOPT_UNSET) 10060Sstevel@tonic-gate return (0); 10070Sstevel@tonic-gate 10080Sstevel@tonic-gate buf = &agp->dtat_buf; 10090Sstevel@tonic-gate buf->dtbd_size = size; 10100Sstevel@tonic-gate 10110Sstevel@tonic-gate if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL) 10120Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 10130Sstevel@tonic-gate 10140Sstevel@tonic-gate /* 10150Sstevel@tonic-gate * Now query for the CPUs enabled. 10160Sstevel@tonic-gate */ 10170Sstevel@tonic-gate rval = dtrace_getopt(dtp, "cpu", &cpu); 10180Sstevel@tonic-gate assert(rval == 0 && cpu != DTRACEOPT_UNSET); 10190Sstevel@tonic-gate 10200Sstevel@tonic-gate if (cpu != DTRACE_CPUALL) { 10210Sstevel@tonic-gate assert(cpu < agp->dtat_ncpu); 10220Sstevel@tonic-gate agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu; 10230Sstevel@tonic-gate 10240Sstevel@tonic-gate return (0); 10250Sstevel@tonic-gate } 10260Sstevel@tonic-gate 10270Sstevel@tonic-gate agp->dtat_ncpus = 0; 10280Sstevel@tonic-gate for (i = 0; i < agp->dtat_maxcpu; i++) { 10290Sstevel@tonic-gate if (dt_status(dtp, i) == -1) 10300Sstevel@tonic-gate continue; 10310Sstevel@tonic-gate 10320Sstevel@tonic-gate agp->dtat_cpus[agp->dtat_ncpus++] = i; 10330Sstevel@tonic-gate } 10340Sstevel@tonic-gate 10350Sstevel@tonic-gate return (0); 10360Sstevel@tonic-gate } 10370Sstevel@tonic-gate 10380Sstevel@tonic-gate static int 10390Sstevel@tonic-gate dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval) 10400Sstevel@tonic-gate { 10410Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 10420Sstevel@tonic-gate dtrace_aggdata_t *data; 10430Sstevel@tonic-gate dtrace_aggdesc_t *aggdesc; 10440Sstevel@tonic-gate dtrace_recdesc_t *rec; 10450Sstevel@tonic-gate int i; 10460Sstevel@tonic-gate 10470Sstevel@tonic-gate switch (rval) { 10480Sstevel@tonic-gate case DTRACE_AGGWALK_NEXT: 10490Sstevel@tonic-gate break; 10500Sstevel@tonic-gate 10510Sstevel@tonic-gate case DTRACE_AGGWALK_CLEAR: { 10520Sstevel@tonic-gate uint32_t size, offs = 0; 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate aggdesc = h->dtahe_data.dtada_desc; 10550Sstevel@tonic-gate rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 10560Sstevel@tonic-gate size = rec->dtrd_size; 10570Sstevel@tonic-gate data = &h->dtahe_data; 10580Sstevel@tonic-gate 10590Sstevel@tonic-gate if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) { 10600Sstevel@tonic-gate offs = sizeof (uint64_t); 10610Sstevel@tonic-gate size -= sizeof (uint64_t); 10620Sstevel@tonic-gate } 10630Sstevel@tonic-gate 10640Sstevel@tonic-gate bzero(&data->dtada_data[rec->dtrd_offset] + offs, size); 10650Sstevel@tonic-gate 10660Sstevel@tonic-gate if (data->dtada_percpu == NULL) 10670Sstevel@tonic-gate break; 10680Sstevel@tonic-gate 10690Sstevel@tonic-gate for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++) 10700Sstevel@tonic-gate bzero(data->dtada_percpu[i] + offs, size); 10710Sstevel@tonic-gate break; 10720Sstevel@tonic-gate } 10730Sstevel@tonic-gate 10740Sstevel@tonic-gate case DTRACE_AGGWALK_ERROR: 10750Sstevel@tonic-gate /* 10760Sstevel@tonic-gate * We assume that errno is already set in this case. 10770Sstevel@tonic-gate */ 10780Sstevel@tonic-gate return (dt_set_errno(dtp, errno)); 10790Sstevel@tonic-gate 10800Sstevel@tonic-gate case DTRACE_AGGWALK_ABORT: 10810Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_DIRABORT)); 10820Sstevel@tonic-gate 10830Sstevel@tonic-gate case DTRACE_AGGWALK_DENORMALIZE: 10840Sstevel@tonic-gate h->dtahe_data.dtada_normal = 1; 10850Sstevel@tonic-gate return (0); 10860Sstevel@tonic-gate 10870Sstevel@tonic-gate case DTRACE_AGGWALK_NORMALIZE: 10880Sstevel@tonic-gate if (h->dtahe_data.dtada_normal == 0) { 10890Sstevel@tonic-gate h->dtahe_data.dtada_normal = 1; 10900Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_BADRVAL)); 10910Sstevel@tonic-gate } 10920Sstevel@tonic-gate 10930Sstevel@tonic-gate return (0); 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate case DTRACE_AGGWALK_REMOVE: { 10960Sstevel@tonic-gate dtrace_aggdata_t *aggdata = &h->dtahe_data; 10970Sstevel@tonic-gate int i, max_cpus = agp->dtat_maxcpu; 10980Sstevel@tonic-gate 10990Sstevel@tonic-gate /* 11000Sstevel@tonic-gate * First, remove this hash entry from its hash chain. 11010Sstevel@tonic-gate */ 11020Sstevel@tonic-gate if (h->dtahe_prev != NULL) { 11030Sstevel@tonic-gate h->dtahe_prev->dtahe_next = h->dtahe_next; 11040Sstevel@tonic-gate } else { 11050Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 11060Sstevel@tonic-gate size_t ndx = h->dtahe_hashval % hash->dtah_size; 11070Sstevel@tonic-gate 11080Sstevel@tonic-gate assert(hash->dtah_hash[ndx] == h); 11090Sstevel@tonic-gate hash->dtah_hash[ndx] = h->dtahe_next; 11100Sstevel@tonic-gate } 11110Sstevel@tonic-gate 11120Sstevel@tonic-gate if (h->dtahe_next != NULL) 11130Sstevel@tonic-gate h->dtahe_next->dtahe_prev = h->dtahe_prev; 11140Sstevel@tonic-gate 11150Sstevel@tonic-gate /* 11160Sstevel@tonic-gate * Now remove it from the list of all hash entries. 11170Sstevel@tonic-gate */ 11180Sstevel@tonic-gate if (h->dtahe_prevall != NULL) { 11190Sstevel@tonic-gate h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall; 11200Sstevel@tonic-gate } else { 11210Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 11220Sstevel@tonic-gate 11230Sstevel@tonic-gate assert(hash->dtah_all == h); 11240Sstevel@tonic-gate hash->dtah_all = h->dtahe_nextall; 11250Sstevel@tonic-gate } 11260Sstevel@tonic-gate 11270Sstevel@tonic-gate if (h->dtahe_nextall != NULL) 11280Sstevel@tonic-gate h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall; 11290Sstevel@tonic-gate 11300Sstevel@tonic-gate /* 11310Sstevel@tonic-gate * We're unlinked. We can safely destroy the data. 11320Sstevel@tonic-gate */ 11330Sstevel@tonic-gate if (aggdata->dtada_percpu != NULL) { 11340Sstevel@tonic-gate for (i = 0; i < max_cpus; i++) 11350Sstevel@tonic-gate free(aggdata->dtada_percpu[i]); 11360Sstevel@tonic-gate free(aggdata->dtada_percpu); 11370Sstevel@tonic-gate } 11380Sstevel@tonic-gate 11390Sstevel@tonic-gate free(aggdata->dtada_data); 11400Sstevel@tonic-gate free(h); 11410Sstevel@tonic-gate 11420Sstevel@tonic-gate return (0); 11430Sstevel@tonic-gate } 11440Sstevel@tonic-gate 11450Sstevel@tonic-gate default: 11460Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_BADRVAL)); 11470Sstevel@tonic-gate } 11480Sstevel@tonic-gate 11490Sstevel@tonic-gate return (0); 11500Sstevel@tonic-gate } 11510Sstevel@tonic-gate 11521017Sbmc void 11531017Sbmc dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width, 11541017Sbmc int (*compar)(const void *, const void *)) 11551017Sbmc { 11561017Sbmc int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos; 11571017Sbmc dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS]; 11581017Sbmc 11591017Sbmc dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET); 11601017Sbmc dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET); 11611017Sbmc 11621017Sbmc if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) { 11631017Sbmc dt_keypos = (int)keyposopt; 11641017Sbmc } else { 11651017Sbmc dt_keypos = 0; 11661017Sbmc } 11671017Sbmc 11681017Sbmc if (compar == NULL) { 11691017Sbmc if (!dt_keysort) { 11701017Sbmc compar = dt_aggregate_varvalcmp; 11711017Sbmc } else { 11721017Sbmc compar = dt_aggregate_varkeycmp; 11731017Sbmc } 11741017Sbmc } 11751017Sbmc 11761017Sbmc qsort(base, nel, width, compar); 11771017Sbmc 11781017Sbmc dt_revsort = rev; 11791017Sbmc dt_keysort = key; 11801017Sbmc dt_keypos = keypos; 11811017Sbmc } 11821017Sbmc 11830Sstevel@tonic-gate int 11840Sstevel@tonic-gate dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg) 11850Sstevel@tonic-gate { 11860Sstevel@tonic-gate dt_ahashent_t *h, *next; 11870Sstevel@tonic-gate dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash; 11880Sstevel@tonic-gate 11890Sstevel@tonic-gate for (h = hash->dtah_all; h != NULL; h = next) { 11900Sstevel@tonic-gate /* 11910Sstevel@tonic-gate * dt_aggwalk_rval() can potentially remove the current hash 11920Sstevel@tonic-gate * entry; we need to load the next hash entry before calling 11930Sstevel@tonic-gate * into it. 11940Sstevel@tonic-gate */ 11950Sstevel@tonic-gate next = h->dtahe_nextall; 11960Sstevel@tonic-gate 11970Sstevel@tonic-gate if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) 11980Sstevel@tonic-gate return (-1); 11990Sstevel@tonic-gate } 12000Sstevel@tonic-gate 12010Sstevel@tonic-gate return (0); 12020Sstevel@tonic-gate } 12030Sstevel@tonic-gate 12040Sstevel@tonic-gate static int 12050Sstevel@tonic-gate dt_aggregate_walk_sorted(dtrace_hdl_t *dtp, 12060Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg, 12070Sstevel@tonic-gate int (*sfunc)(const void *, const void *)) 12080Sstevel@tonic-gate { 12090Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 12100Sstevel@tonic-gate dt_ahashent_t *h, **sorted; 12110Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 12120Sstevel@tonic-gate size_t i, nentries = 0; 12130Sstevel@tonic-gate 12140Sstevel@tonic-gate for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) 12150Sstevel@tonic-gate nentries++; 12160Sstevel@tonic-gate 12171017Sbmc sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *)); 12180Sstevel@tonic-gate 12190Sstevel@tonic-gate if (sorted == NULL) 12201017Sbmc return (-1); 12210Sstevel@tonic-gate 12220Sstevel@tonic-gate for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) 12230Sstevel@tonic-gate sorted[i++] = h; 12240Sstevel@tonic-gate 12251017Sbmc (void) pthread_mutex_lock(&dt_qsort_lock); 12261017Sbmc 12271017Sbmc if (sfunc == NULL) { 12281017Sbmc dt_aggregate_qsort(dtp, sorted, nentries, 12291017Sbmc sizeof (dt_ahashent_t *), NULL); 12301017Sbmc } else { 12311017Sbmc /* 12321017Sbmc * If we've been explicitly passed a sorting function, 12331017Sbmc * we'll use that -- ignoring the values of the "aggsortrev", 12341017Sbmc * "aggsortkey" and "aggsortkeypos" options. 12351017Sbmc */ 12361017Sbmc qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc); 12371017Sbmc } 12381017Sbmc 12391017Sbmc (void) pthread_mutex_unlock(&dt_qsort_lock); 12400Sstevel@tonic-gate 12410Sstevel@tonic-gate for (i = 0; i < nentries; i++) { 12420Sstevel@tonic-gate h = sorted[i]; 12430Sstevel@tonic-gate 12441017Sbmc if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) { 12451017Sbmc dt_free(dtp, sorted); 12460Sstevel@tonic-gate return (-1); 12471017Sbmc } 12480Sstevel@tonic-gate } 12490Sstevel@tonic-gate 12501017Sbmc dt_free(dtp, sorted); 12510Sstevel@tonic-gate return (0); 12520Sstevel@tonic-gate } 12530Sstevel@tonic-gate 12540Sstevel@tonic-gate int 12551017Sbmc dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp, 12561017Sbmc dtrace_aggregate_f *func, void *arg) 12571017Sbmc { 12581017Sbmc return (dt_aggregate_walk_sorted(dtp, func, arg, NULL)); 12591017Sbmc } 12601017Sbmc 12611017Sbmc int 12620Sstevel@tonic-gate dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp, 12630Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12640Sstevel@tonic-gate { 12650Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12660Sstevel@tonic-gate arg, dt_aggregate_varkeycmp)); 12670Sstevel@tonic-gate } 12680Sstevel@tonic-gate 12690Sstevel@tonic-gate int 12700Sstevel@tonic-gate dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp, 12710Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12720Sstevel@tonic-gate { 12730Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12740Sstevel@tonic-gate arg, dt_aggregate_varvalcmp)); 12750Sstevel@tonic-gate } 12760Sstevel@tonic-gate 12770Sstevel@tonic-gate int 12780Sstevel@tonic-gate dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp, 12790Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12800Sstevel@tonic-gate { 12810Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12820Sstevel@tonic-gate arg, dt_aggregate_keyvarcmp)); 12830Sstevel@tonic-gate } 12840Sstevel@tonic-gate 12850Sstevel@tonic-gate int 12860Sstevel@tonic-gate dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp, 12870Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12880Sstevel@tonic-gate { 12890Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12900Sstevel@tonic-gate arg, dt_aggregate_valvarcmp)); 12910Sstevel@tonic-gate } 12920Sstevel@tonic-gate 12930Sstevel@tonic-gate int 12940Sstevel@tonic-gate dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp, 12950Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12960Sstevel@tonic-gate { 12970Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12980Sstevel@tonic-gate arg, dt_aggregate_varkeyrevcmp)); 12990Sstevel@tonic-gate } 13000Sstevel@tonic-gate 13010Sstevel@tonic-gate int 13020Sstevel@tonic-gate dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp, 13030Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 13040Sstevel@tonic-gate { 13050Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 13060Sstevel@tonic-gate arg, dt_aggregate_varvalrevcmp)); 13070Sstevel@tonic-gate } 13080Sstevel@tonic-gate 13090Sstevel@tonic-gate int 13100Sstevel@tonic-gate dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp, 13110Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 13120Sstevel@tonic-gate { 13130Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 13140Sstevel@tonic-gate arg, dt_aggregate_keyvarrevcmp)); 13150Sstevel@tonic-gate } 13160Sstevel@tonic-gate 13170Sstevel@tonic-gate int 13180Sstevel@tonic-gate dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp, 13190Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 13200Sstevel@tonic-gate { 13210Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 13220Sstevel@tonic-gate arg, dt_aggregate_valvarrevcmp)); 13230Sstevel@tonic-gate } 13240Sstevel@tonic-gate 13250Sstevel@tonic-gate int 13261017Sbmc dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars, 13271017Sbmc int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg) 13281017Sbmc { 13291017Sbmc dt_aggregate_t *agp = &dtp->dt_aggregate; 13301017Sbmc dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle; 13311017Sbmc const dtrace_aggdata_t **data; 13321017Sbmc dt_ahashent_t *zaggdata = NULL; 13331017Sbmc dt_ahash_t *hash = &agp->dtat_hash; 13341017Sbmc size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize; 13351017Sbmc dtrace_aggvarid_t max = 0, aggvar; 13361017Sbmc int rval = -1, *map, *remap = NULL; 13371017Sbmc int i, j; 13381017Sbmc dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS]; 13391017Sbmc 13401017Sbmc /* 13411017Sbmc * If the sorting position is greater than the number of aggregation 13421017Sbmc * variable IDs, we silently set it to 0. 13431017Sbmc */ 13441017Sbmc if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars) 13451017Sbmc sortpos = 0; 13461017Sbmc 13471017Sbmc /* 13481017Sbmc * First we need to translate the specified aggregation variable IDs 13491017Sbmc * into a linear map that will allow us to translate an aggregation 13501017Sbmc * variable ID into its position in the specified aggvars. 13511017Sbmc */ 13521017Sbmc for (i = 0; i < naggvars; i++) { 13531017Sbmc if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0) 13541017Sbmc return (dt_set_errno(dtp, EDT_BADAGGVAR)); 13551017Sbmc 13561017Sbmc if (aggvars[i] > max) 13571017Sbmc max = aggvars[i]; 13581017Sbmc } 13591017Sbmc 13601017Sbmc if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL) 13611017Sbmc return (-1); 13621017Sbmc 13631017Sbmc zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t)); 13641017Sbmc 13651017Sbmc if (zaggdata == NULL) 13661017Sbmc goto out; 13671017Sbmc 13681017Sbmc for (i = 0; i < naggvars; i++) { 13691017Sbmc int ndx = i + sortpos; 13701017Sbmc 13711017Sbmc if (ndx >= naggvars) 13721017Sbmc ndx -= naggvars; 13731017Sbmc 13741017Sbmc aggvar = aggvars[ndx]; 13751017Sbmc assert(aggvar <= max); 13761017Sbmc 13771017Sbmc if (map[aggvar]) { 13781017Sbmc /* 13791017Sbmc * We have an aggregation variable that is present 13801017Sbmc * more than once in the array of aggregation 13811017Sbmc * variables. While it's unclear why one might want 13821017Sbmc * to do this, it's legal. To support this construct, 13831017Sbmc * we will allocate a remap that will indicate the 13841017Sbmc * position from which this aggregation variable 13851017Sbmc * should be pulled. (That is, where the remap will 13861017Sbmc * map from one position to another.) 13871017Sbmc */ 13881017Sbmc if (remap == NULL) { 13891017Sbmc remap = dt_zalloc(dtp, naggvars * sizeof (int)); 13901017Sbmc 13911017Sbmc if (remap == NULL) 13921017Sbmc goto out; 13931017Sbmc } 13941017Sbmc 13951017Sbmc /* 13961017Sbmc * Given that the variable is already present, assert 13971017Sbmc * that following through the mapping and adjusting 13981017Sbmc * for the sort position yields the same aggregation 13991017Sbmc * variable ID. 14001017Sbmc */ 14011017Sbmc assert(aggvars[(map[aggvar] - 1 + sortpos) % 14021017Sbmc naggvars] == aggvars[ndx]); 14031017Sbmc 14041017Sbmc remap[i] = map[aggvar]; 14051017Sbmc continue; 14061017Sbmc } 14071017Sbmc 14081017Sbmc map[aggvar] = i + 1; 14091017Sbmc } 14101017Sbmc 14111017Sbmc /* 14121017Sbmc * We need to take two passes over the data to size our allocation, so 14131017Sbmc * we'll use the first pass to also fill in the zero-filled data to be 14141017Sbmc * used to properly format a zero-valued aggregation. 14151017Sbmc */ 14161017Sbmc for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 14171017Sbmc dtrace_aggvarid_t id; 14181017Sbmc int ndx; 14191017Sbmc 14201017Sbmc if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id])) 14211017Sbmc continue; 14221017Sbmc 14231017Sbmc if (zaggdata[ndx - 1].dtahe_size == 0) { 14241017Sbmc zaggdata[ndx - 1].dtahe_size = h->dtahe_size; 14251017Sbmc zaggdata[ndx - 1].dtahe_data = h->dtahe_data; 14261017Sbmc } 14271017Sbmc 14281017Sbmc nentries++; 14291017Sbmc } 14301017Sbmc 14311017Sbmc if (nentries == 0) { 14321017Sbmc /* 14331017Sbmc * We couldn't find any entries; there is nothing else to do. 14341017Sbmc */ 14351017Sbmc rval = 0; 14361017Sbmc goto out; 14371017Sbmc } 14381017Sbmc 14391017Sbmc /* 14401017Sbmc * Before we sort the data, we're going to look for any holes in our 14411017Sbmc * zero-filled data. This will occur if an aggregation variable that 14421017Sbmc * we are being asked to print has not yet been assigned the result of 14431017Sbmc * any aggregating action for _any_ tuple. The issue becomes that we 14441017Sbmc * would like a zero value to be printed for all columns for this 14451017Sbmc * aggregation, but without any record description, we don't know the 14461017Sbmc * aggregating action that corresponds to the aggregation variable. To 14471017Sbmc * try to find a match, we're simply going to lookup aggregation IDs 14481017Sbmc * (which are guaranteed to be contiguous and to start from 1), looking 14491017Sbmc * for the specified aggregation variable ID. If we find a match, 14501017Sbmc * we'll use that. If we iterate over all aggregation IDs and don't 14511017Sbmc * find a match, then we must be an anonymous enabling. (Anonymous 14521017Sbmc * enablings can't currently derive either aggregation variable IDs or 14531017Sbmc * aggregation variable names given only an aggregation ID.) In this 14541017Sbmc * obscure case (anonymous enabling, multiple aggregation printa() with 14551017Sbmc * some aggregations not represented for any tuple), our defined 14561017Sbmc * behavior is that the zero will be printed in the format of the first 14571017Sbmc * aggregation variable that contains any non-zero value. 14581017Sbmc */ 14591017Sbmc for (i = 0; i < naggvars; i++) { 14601017Sbmc if (zaggdata[i].dtahe_size == 0) { 14611017Sbmc dtrace_aggvarid_t aggvar; 14621017Sbmc 14631017Sbmc aggvar = aggvars[(i - sortpos + naggvars) % naggvars]; 14641017Sbmc assert(zaggdata[i].dtahe_data.dtada_data == NULL); 14651017Sbmc 14661017Sbmc for (j = DTRACE_AGGIDNONE + 1; ; j++) { 14671017Sbmc dtrace_aggdesc_t *agg; 14681017Sbmc dtrace_aggdata_t *aggdata; 14691017Sbmc 14701017Sbmc if (dt_aggid_lookup(dtp, j, &agg) != 0) 14711017Sbmc break; 14721017Sbmc 14731017Sbmc if (agg->dtagd_varid != aggvar) 14741017Sbmc continue; 14751017Sbmc 14761017Sbmc /* 14771017Sbmc * We have our description -- now we need to 14781017Sbmc * cons up the zaggdata entry for it. 14791017Sbmc */ 14801017Sbmc aggdata = &zaggdata[i].dtahe_data; 14811017Sbmc aggdata->dtada_size = agg->dtagd_size; 14821017Sbmc aggdata->dtada_desc = agg; 14831017Sbmc aggdata->dtada_handle = dtp; 14841017Sbmc (void) dt_epid_lookup(dtp, agg->dtagd_epid, 14851017Sbmc &aggdata->dtada_edesc, 14861017Sbmc &aggdata->dtada_pdesc); 14871017Sbmc aggdata->dtada_normal = 1; 14881017Sbmc zaggdata[i].dtahe_hashval = 0; 14891017Sbmc zaggdata[i].dtahe_size = agg->dtagd_size; 14901017Sbmc break; 14911017Sbmc } 14921017Sbmc 14931017Sbmc if (zaggdata[i].dtahe_size == 0) { 14941017Sbmc caddr_t data; 14951017Sbmc 14961017Sbmc /* 14971017Sbmc * We couldn't find this aggregation, meaning 14981017Sbmc * that we have never seen it before for any 14991017Sbmc * tuple _and_ this is an anonymous enabling. 15001017Sbmc * That is, we're in the obscure case outlined 15011017Sbmc * above. In this case, our defined behavior 15021017Sbmc * is to format the data in the format of the 15031017Sbmc * first non-zero aggregation -- of which, of 15041017Sbmc * course, we know there to be at least one 15051017Sbmc * (or nentries would have been zero). 15061017Sbmc */ 15071017Sbmc for (j = 0; j < naggvars; j++) { 15081017Sbmc if (zaggdata[j].dtahe_size != 0) 15091017Sbmc break; 15101017Sbmc } 15111017Sbmc 15121017Sbmc assert(j < naggvars); 15131017Sbmc zaggdata[i] = zaggdata[j]; 15141017Sbmc 15151017Sbmc data = zaggdata[i].dtahe_data.dtada_data; 15161017Sbmc assert(data != NULL); 15171017Sbmc } 15181017Sbmc } 15191017Sbmc } 15201017Sbmc 15211017Sbmc /* 15221017Sbmc * Now we need to allocate our zero-filled data for use for 15231017Sbmc * aggregations that don't have a value corresponding to a given key. 15241017Sbmc */ 15251017Sbmc for (i = 0; i < naggvars; i++) { 15261017Sbmc dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data; 15271017Sbmc dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc; 15281017Sbmc dtrace_recdesc_t *rec; 15291017Sbmc uint64_t larg; 15301017Sbmc caddr_t zdata; 15311017Sbmc 15321017Sbmc zsize = zaggdata[i].dtahe_size; 15331017Sbmc assert(zsize != 0); 15341017Sbmc 15351017Sbmc if ((zdata = dt_zalloc(dtp, zsize)) == NULL) { 15361017Sbmc /* 15371017Sbmc * If we failed to allocated some zero-filled data, we 15381017Sbmc * need to zero out the remaining dtada_data pointers 15391017Sbmc * to prevent the wrong data from being freed below. 15401017Sbmc */ 15411017Sbmc for (j = i; j < naggvars; j++) 15421017Sbmc zaggdata[j].dtahe_data.dtada_data = NULL; 15431017Sbmc goto out; 15441017Sbmc } 15451017Sbmc 15461017Sbmc aggvar = aggvars[(i - sortpos + naggvars) % naggvars]; 15471017Sbmc 15481017Sbmc /* 15491017Sbmc * First, the easy bit. To maintain compatibility with 15501017Sbmc * consumers that pull the compiler-generated ID out of the 15511017Sbmc * data, we put that ID at the top of the zero-filled data. 15521017Sbmc */ 15531017Sbmc rec = &aggdesc->dtagd_rec[0]; 15541017Sbmc /* LINTED - alignment */ 15551017Sbmc *((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar; 15561017Sbmc 15571017Sbmc rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 15581017Sbmc 15591017Sbmc /* 15601017Sbmc * Now for the more complicated part. If (and only if) this 15611017Sbmc * is an lquantize() aggregating action, zero-filled data is 15621017Sbmc * not equivalent to an empty record: we must also get the 15631017Sbmc * parameters for the lquantize(). 15641017Sbmc */ 15651017Sbmc if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) { 15661017Sbmc if (aggdata->dtada_data != NULL) { 15671017Sbmc /* 15681017Sbmc * The easier case here is if we actually have 15691017Sbmc * some prototype data -- in which case we 15701017Sbmc * manually dig it out of the aggregation 15711017Sbmc * record. 15721017Sbmc */ 15731017Sbmc /* LINTED - alignment */ 15741017Sbmc larg = *((uint64_t *)(aggdata->dtada_data + 15751017Sbmc rec->dtrd_offset)); 15761017Sbmc } else { 15771017Sbmc /* 15781017Sbmc * We don't have any prototype data. As a 15791017Sbmc * result, we know that we _do_ have the 15801017Sbmc * compiler-generated information. (If this 15811017Sbmc * were an anonymous enabling, all of our 15821017Sbmc * zero-filled data would have prototype data 15831017Sbmc * -- either directly or indirectly.) So as 15841017Sbmc * gross as it is, we'll grovel around in the 15851017Sbmc * compiler-generated information to find the 15861017Sbmc * lquantize() parameters. 15871017Sbmc */ 15881017Sbmc dtrace_stmtdesc_t *sdp; 15891017Sbmc dt_ident_t *aid; 15901017Sbmc dt_idsig_t *isp; 15911017Sbmc 15921017Sbmc sdp = (dtrace_stmtdesc_t *)(uintptr_t) 15931017Sbmc aggdesc->dtagd_rec[0].dtrd_uarg; 15941017Sbmc aid = sdp->dtsd_aggdata; 15951017Sbmc isp = (dt_idsig_t *)aid->di_data; 15961017Sbmc assert(isp->dis_auxinfo != 0); 15971017Sbmc larg = isp->dis_auxinfo; 15981017Sbmc } 15991017Sbmc 16001017Sbmc /* LINTED - alignment */ 16011017Sbmc *((uint64_t *)(zdata + rec->dtrd_offset)) = larg; 16021017Sbmc } 16031017Sbmc 16041017Sbmc aggdata->dtada_data = zdata; 16051017Sbmc } 16061017Sbmc 16071017Sbmc /* 16081017Sbmc * Now that we've dealt with setting up our zero-filled data, we can 16091017Sbmc * allocate our sorted array, and take another pass over the data to 16101017Sbmc * fill it. 16111017Sbmc */ 16121017Sbmc sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *)); 16131017Sbmc 16141017Sbmc if (sorted == NULL) 16151017Sbmc goto out; 16161017Sbmc 16171017Sbmc for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) { 16181017Sbmc dtrace_aggvarid_t id; 16191017Sbmc 16201017Sbmc if ((id = dt_aggregate_aggvarid(h)) > max || !map[id]) 16211017Sbmc continue; 16221017Sbmc 16231017Sbmc sorted[i++] = h; 16241017Sbmc } 16251017Sbmc 16261017Sbmc assert(i == nentries); 16271017Sbmc 16281017Sbmc /* 16291017Sbmc * We've loaded our array; now we need to sort by value to allow us 16301017Sbmc * to create bundles of like value. We're going to acquire the 16311017Sbmc * dt_qsort_lock here, and hold it across all of our subsequent 16321017Sbmc * comparison and sorting. 16331017Sbmc */ 16341017Sbmc (void) pthread_mutex_lock(&dt_qsort_lock); 16351017Sbmc 16361017Sbmc qsort(sorted, nentries, sizeof (dt_ahashent_t *), 16371017Sbmc dt_aggregate_keyvarcmp); 16381017Sbmc 16391017Sbmc /* 16401017Sbmc * Now we need to go through and create bundles. Because the number 16411017Sbmc * of bundles is bounded by the size of the sorted array, we're going 16421017Sbmc * to reuse the underlying storage. And note that "bundle" is an 16431017Sbmc * array of pointers to arrays of pointers to dt_ahashent_t -- making 16441017Sbmc * its type (regrettably) "dt_ahashent_t ***". (Regrettable because 16451017Sbmc * '*' -- like '_' and 'X' -- should never appear in triplicate in 16461017Sbmc * an ideal world.) 16471017Sbmc */ 16481017Sbmc bundle = (dt_ahashent_t ***)sorted; 16491017Sbmc 16501017Sbmc for (i = 1, start = 0; i <= nentries; i++) { 16511017Sbmc if (i < nentries && 16521017Sbmc dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0) 16531017Sbmc continue; 16541017Sbmc 16551017Sbmc /* 16561017Sbmc * We have a bundle boundary. Everything from start to 16571017Sbmc * (i - 1) belongs in one bundle. 16581017Sbmc */ 16591017Sbmc assert(i - start <= naggvars); 16601017Sbmc bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *); 16611017Sbmc 16621017Sbmc if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) { 16631017Sbmc (void) pthread_mutex_unlock(&dt_qsort_lock); 16641017Sbmc goto out; 16651017Sbmc } 16661017Sbmc 16671017Sbmc for (j = start; j < i; j++) { 16681017Sbmc dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]); 16691017Sbmc 16701017Sbmc assert(id <= max); 16711017Sbmc assert(map[id] != 0); 16721017Sbmc assert(map[id] - 1 < naggvars); 16731017Sbmc assert(nbundle[map[id] - 1] == NULL); 16741017Sbmc nbundle[map[id] - 1] = sorted[j]; 16751017Sbmc 16761017Sbmc if (nbundle[naggvars] == NULL) 16771017Sbmc nbundle[naggvars] = sorted[j]; 16781017Sbmc } 16791017Sbmc 16801017Sbmc for (j = 0; j < naggvars; j++) { 16811017Sbmc if (nbundle[j] != NULL) 16821017Sbmc continue; 16831017Sbmc 16841017Sbmc /* 16851017Sbmc * Before we assume that this aggregation variable 16861017Sbmc * isn't present (and fall back to using the 16871017Sbmc * zero-filled data allocated earlier), check the 16881017Sbmc * remap. If we have a remapping, we'll drop it in 16891017Sbmc * here. Note that we might be remapping an 16901017Sbmc * aggregation variable that isn't present for this 16911017Sbmc * key; in this case, the aggregation data that we 16921017Sbmc * copy will point to the zeroed data. 16931017Sbmc */ 16941017Sbmc if (remap != NULL && remap[j]) { 16951017Sbmc assert(remap[j] - 1 < j); 16961017Sbmc assert(nbundle[remap[j] - 1] != NULL); 16971017Sbmc nbundle[j] = nbundle[remap[j] - 1]; 16981017Sbmc } else { 16991017Sbmc nbundle[j] = &zaggdata[j]; 17001017Sbmc } 17011017Sbmc } 17021017Sbmc 17031017Sbmc bundle[nbundles++] = nbundle; 17041017Sbmc start = i; 17051017Sbmc } 17061017Sbmc 17071017Sbmc /* 17081017Sbmc * Now we need to re-sort based on the first value. 17091017Sbmc */ 17101017Sbmc dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **), 17111017Sbmc dt_aggregate_bundlecmp); 17121017Sbmc 17131017Sbmc (void) pthread_mutex_unlock(&dt_qsort_lock); 17141017Sbmc 17151017Sbmc /* 17161017Sbmc * We're done! Now we just need to go back over the sorted bundles, 17171017Sbmc * calling the function. 17181017Sbmc */ 17191017Sbmc data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *)); 17201017Sbmc 17211017Sbmc for (i = 0; i < nbundles; i++) { 17221017Sbmc for (j = 0; j < naggvars; j++) 17231017Sbmc data[j + 1] = NULL; 17241017Sbmc 17251017Sbmc for (j = 0; j < naggvars; j++) { 17261017Sbmc int ndx = j - sortpos; 17271017Sbmc 17281017Sbmc if (ndx < 0) 17291017Sbmc ndx += naggvars; 17301017Sbmc 17311017Sbmc assert(bundle[i][ndx] != NULL); 17321017Sbmc data[j + 1] = &bundle[i][ndx]->dtahe_data; 17331017Sbmc } 17341017Sbmc 17351017Sbmc for (j = 0; j < naggvars; j++) 17361017Sbmc assert(data[j + 1] != NULL); 17371017Sbmc 17381017Sbmc /* 17391017Sbmc * The representative key is the last element in the bundle. 17401017Sbmc * Assert that we have one, and then set it to be the first 17411017Sbmc * element of data. 17421017Sbmc */ 17431017Sbmc assert(bundle[i][j] != NULL); 17441017Sbmc data[0] = &bundle[i][j]->dtahe_data; 17451017Sbmc 17461017Sbmc if ((rval = func(data, naggvars + 1, arg)) == -1) 17471017Sbmc goto out; 17481017Sbmc } 17491017Sbmc 17501017Sbmc rval = 0; 17511017Sbmc out: 17521017Sbmc for (i = 0; i < nbundles; i++) 17531017Sbmc dt_free(dtp, bundle[i]); 17541017Sbmc 17551017Sbmc if (zaggdata != NULL) { 17561017Sbmc for (i = 0; i < naggvars; i++) 17571017Sbmc dt_free(dtp, zaggdata[i].dtahe_data.dtada_data); 17581017Sbmc } 17591017Sbmc 17601017Sbmc dt_free(dtp, zaggdata); 17611017Sbmc dt_free(dtp, sorted); 17621017Sbmc dt_free(dtp, remap); 17631017Sbmc dt_free(dtp, map); 17641017Sbmc 17651017Sbmc return (rval); 17661017Sbmc } 17671017Sbmc 17681017Sbmc int 17690Sstevel@tonic-gate dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp, 17700Sstevel@tonic-gate dtrace_aggregate_walk_f *func) 17710Sstevel@tonic-gate { 17720Sstevel@tonic-gate dt_print_aggdata_t pd; 17730Sstevel@tonic-gate 17740Sstevel@tonic-gate pd.dtpa_dtp = dtp; 17750Sstevel@tonic-gate pd.dtpa_fp = fp; 17760Sstevel@tonic-gate pd.dtpa_allunprint = 1; 17770Sstevel@tonic-gate 17780Sstevel@tonic-gate if (func == NULL) 17791017Sbmc func = dtrace_aggregate_walk_sorted; 17800Sstevel@tonic-gate 17810Sstevel@tonic-gate if ((*func)(dtp, dt_print_agg, &pd) == -1) 17820Sstevel@tonic-gate return (dt_set_errno(dtp, dtp->dt_errno)); 17830Sstevel@tonic-gate 17840Sstevel@tonic-gate return (0); 17850Sstevel@tonic-gate } 17860Sstevel@tonic-gate 17870Sstevel@tonic-gate void 17880Sstevel@tonic-gate dtrace_aggregate_clear(dtrace_hdl_t *dtp) 17890Sstevel@tonic-gate { 17900Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 17910Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 17920Sstevel@tonic-gate dt_ahashent_t *h; 17930Sstevel@tonic-gate dtrace_aggdata_t *data; 17940Sstevel@tonic-gate dtrace_aggdesc_t *aggdesc; 17950Sstevel@tonic-gate dtrace_recdesc_t *rec; 17960Sstevel@tonic-gate int i, max_cpus = agp->dtat_maxcpu; 17970Sstevel@tonic-gate 17980Sstevel@tonic-gate for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 17990Sstevel@tonic-gate aggdesc = h->dtahe_data.dtada_desc; 18000Sstevel@tonic-gate rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 18010Sstevel@tonic-gate data = &h->dtahe_data; 18020Sstevel@tonic-gate 18030Sstevel@tonic-gate bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size); 18040Sstevel@tonic-gate 18050Sstevel@tonic-gate if (data->dtada_percpu == NULL) 18060Sstevel@tonic-gate continue; 18070Sstevel@tonic-gate 18080Sstevel@tonic-gate for (i = 0; i < max_cpus; i++) 18090Sstevel@tonic-gate bzero(data->dtada_percpu[i], rec->dtrd_size); 18100Sstevel@tonic-gate } 18110Sstevel@tonic-gate } 18120Sstevel@tonic-gate 18130Sstevel@tonic-gate void 18140Sstevel@tonic-gate dt_aggregate_destroy(dtrace_hdl_t *dtp) 18150Sstevel@tonic-gate { 18160Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 18170Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 18180Sstevel@tonic-gate dt_ahashent_t *h, *next; 18190Sstevel@tonic-gate dtrace_aggdata_t *aggdata; 18200Sstevel@tonic-gate int i, max_cpus = agp->dtat_maxcpu; 18210Sstevel@tonic-gate 18220Sstevel@tonic-gate if (hash->dtah_hash == NULL) { 18230Sstevel@tonic-gate assert(hash->dtah_all == NULL); 18240Sstevel@tonic-gate } else { 18250Sstevel@tonic-gate free(hash->dtah_hash); 18260Sstevel@tonic-gate 18270Sstevel@tonic-gate for (h = hash->dtah_all; h != NULL; h = next) { 18280Sstevel@tonic-gate next = h->dtahe_nextall; 18290Sstevel@tonic-gate 18300Sstevel@tonic-gate aggdata = &h->dtahe_data; 18310Sstevel@tonic-gate 18320Sstevel@tonic-gate if (aggdata->dtada_percpu != NULL) { 18330Sstevel@tonic-gate for (i = 0; i < max_cpus; i++) 18340Sstevel@tonic-gate free(aggdata->dtada_percpu[i]); 18350Sstevel@tonic-gate free(aggdata->dtada_percpu); 18360Sstevel@tonic-gate } 18370Sstevel@tonic-gate 18380Sstevel@tonic-gate free(aggdata->dtada_data); 18390Sstevel@tonic-gate free(h); 18400Sstevel@tonic-gate } 18410Sstevel@tonic-gate 18420Sstevel@tonic-gate hash->dtah_hash = NULL; 18430Sstevel@tonic-gate hash->dtah_all = NULL; 18440Sstevel@tonic-gate hash->dtah_size = 0; 18450Sstevel@tonic-gate } 18460Sstevel@tonic-gate 18470Sstevel@tonic-gate free(agp->dtat_buf.dtbd_data); 18480Sstevel@tonic-gate free(agp->dtat_cpus); 18490Sstevel@tonic-gate } 1850