10Sstevel@tonic-gate /* 20Sstevel@tonic-gate * CDDL HEADER START 30Sstevel@tonic-gate * 40Sstevel@tonic-gate * The contents of this file are subject to the terms of the 50Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 60Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 70Sstevel@tonic-gate * with the License. 80Sstevel@tonic-gate * 90Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 100Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 110Sstevel@tonic-gate * See the License for the specific language governing permissions 120Sstevel@tonic-gate * and limitations under the License. 130Sstevel@tonic-gate * 140Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 150Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 160Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 170Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 180Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 190Sstevel@tonic-gate * 200Sstevel@tonic-gate * CDDL HEADER END 210Sstevel@tonic-gate */ 220Sstevel@tonic-gate /* 230Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 240Sstevel@tonic-gate * Use is subject to license terms. 250Sstevel@tonic-gate */ 260Sstevel@tonic-gate 270Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 280Sstevel@tonic-gate 290Sstevel@tonic-gate #include <stdlib.h> 300Sstevel@tonic-gate #include <strings.h> 310Sstevel@tonic-gate #include <errno.h> 320Sstevel@tonic-gate #include <unistd.h> 330Sstevel@tonic-gate #include <dt_impl.h> 340Sstevel@tonic-gate #include <assert.h> 35*1017Sbmc #include <alloca.h> 36*1017Sbmc #include <limits.h> 370Sstevel@tonic-gate 380Sstevel@tonic-gate #define DTRACE_AHASHSIZE 32779 /* big 'ol prime */ 390Sstevel@tonic-gate 40*1017Sbmc /* 41*1017Sbmc * Because qsort(3C) does not allow an argument to be passed to a comparison 42*1017Sbmc * function, the variables that affect comparison must regrettably be global; 43*1017Sbmc * they are protected by a global static lock, dt_qsort_lock. 44*1017Sbmc */ 45*1017Sbmc static pthread_mutex_t dt_qsort_lock = PTHREAD_MUTEX_INITIALIZER; 46*1017Sbmc 47*1017Sbmc static int dt_revsort; 48*1017Sbmc static int dt_keysort; 49*1017Sbmc static int dt_keypos; 50*1017Sbmc 51*1017Sbmc #define DT_LESSTHAN (dt_revsort == 0 ? -1 : 1) 52*1017Sbmc #define DT_GREATERTHAN (dt_revsort == 0 ? 1 : -1) 53*1017Sbmc 540Sstevel@tonic-gate static void 55491Sbmc dt_aggregate_count(int64_t *existing, int64_t *new, size_t size) 560Sstevel@tonic-gate { 570Sstevel@tonic-gate int i; 580Sstevel@tonic-gate 59491Sbmc for (i = 0; i < size / sizeof (int64_t); i++) 600Sstevel@tonic-gate existing[i] = existing[i] + new[i]; 610Sstevel@tonic-gate } 620Sstevel@tonic-gate 630Sstevel@tonic-gate static int 64491Sbmc dt_aggregate_countcmp(int64_t *lhs, int64_t *rhs) 650Sstevel@tonic-gate { 66491Sbmc int64_t lvar = *lhs; 67491Sbmc int64_t rvar = *rhs; 680Sstevel@tonic-gate 69*1017Sbmc if (lvar < rvar) 70*1017Sbmc return (DT_LESSTHAN); 710Sstevel@tonic-gate 72*1017Sbmc if (lvar > rvar) 73*1017Sbmc return (DT_GREATERTHAN); 740Sstevel@tonic-gate 750Sstevel@tonic-gate return (0); 760Sstevel@tonic-gate } 770Sstevel@tonic-gate 780Sstevel@tonic-gate /*ARGSUSED*/ 790Sstevel@tonic-gate static void 80491Sbmc dt_aggregate_min(int64_t *existing, int64_t *new, size_t size) 810Sstevel@tonic-gate { 820Sstevel@tonic-gate if (*new < *existing) 830Sstevel@tonic-gate *existing = *new; 840Sstevel@tonic-gate } 850Sstevel@tonic-gate 860Sstevel@tonic-gate /*ARGSUSED*/ 870Sstevel@tonic-gate static void 88491Sbmc dt_aggregate_max(int64_t *existing, int64_t *new, size_t size) 890Sstevel@tonic-gate { 900Sstevel@tonic-gate if (*new > *existing) 910Sstevel@tonic-gate *existing = *new; 920Sstevel@tonic-gate } 930Sstevel@tonic-gate 940Sstevel@tonic-gate static int 95491Sbmc dt_aggregate_averagecmp(int64_t *lhs, int64_t *rhs) 960Sstevel@tonic-gate { 97491Sbmc int64_t lavg = lhs[0] ? (lhs[1] / lhs[0]) : 0; 98491Sbmc int64_t ravg = rhs[0] ? (rhs[1] / rhs[0]) : 0; 990Sstevel@tonic-gate 100*1017Sbmc if (lavg < ravg) 101*1017Sbmc return (DT_LESSTHAN); 1020Sstevel@tonic-gate 103*1017Sbmc if (lavg > ravg) 104*1017Sbmc return (DT_GREATERTHAN); 1050Sstevel@tonic-gate 1060Sstevel@tonic-gate return (0); 1070Sstevel@tonic-gate } 1080Sstevel@tonic-gate 1090Sstevel@tonic-gate /*ARGSUSED*/ 1100Sstevel@tonic-gate static void 111491Sbmc dt_aggregate_lquantize(int64_t *existing, int64_t *new, size_t size) 1120Sstevel@tonic-gate { 113491Sbmc int64_t arg = *existing++; 1140Sstevel@tonic-gate uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg); 1150Sstevel@tonic-gate int i; 1160Sstevel@tonic-gate 1170Sstevel@tonic-gate for (i = 0; i <= levels + 1; i++) 1180Sstevel@tonic-gate existing[i] = existing[i] + new[i + 1]; 1190Sstevel@tonic-gate } 1200Sstevel@tonic-gate 121457Sbmc static long double 122491Sbmc dt_aggregate_lquantizedsum(int64_t *lquanta) 1230Sstevel@tonic-gate { 124491Sbmc int64_t arg = *lquanta++; 1250Sstevel@tonic-gate int32_t base = DTRACE_LQUANTIZE_BASE(arg); 1260Sstevel@tonic-gate uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 1270Sstevel@tonic-gate uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i; 128457Sbmc long double total = (long double)lquanta[0] * (long double)(base - 1); 1290Sstevel@tonic-gate 1300Sstevel@tonic-gate for (i = 0; i < levels; base += step, i++) 131457Sbmc total += (long double)lquanta[i + 1] * (long double)base; 1320Sstevel@tonic-gate 133457Sbmc return (total + (long double)lquanta[levels + 1] * 134457Sbmc (long double)(base + 1)); 1350Sstevel@tonic-gate } 1360Sstevel@tonic-gate 137491Sbmc static int64_t 138491Sbmc dt_aggregate_lquantizedzero(int64_t *lquanta) 139491Sbmc { 140491Sbmc int64_t arg = *lquanta++; 141491Sbmc int32_t base = DTRACE_LQUANTIZE_BASE(arg); 142491Sbmc uint16_t step = DTRACE_LQUANTIZE_STEP(arg); 143491Sbmc uint16_t levels = DTRACE_LQUANTIZE_LEVELS(arg), i; 144491Sbmc 145491Sbmc if (base - 1 == 0) 146491Sbmc return (lquanta[0]); 147491Sbmc 148491Sbmc for (i = 0; i < levels; base += step, i++) { 149491Sbmc if (base != 0) 150491Sbmc continue; 151491Sbmc 152491Sbmc return (lquanta[i + 1]); 153491Sbmc } 154491Sbmc 155491Sbmc if (base + 1 == 0) 156491Sbmc return (lquanta[levels + 1]); 157491Sbmc 158491Sbmc return (0); 159491Sbmc } 160491Sbmc 1610Sstevel@tonic-gate static int 162491Sbmc dt_aggregate_lquantizedcmp(int64_t *lhs, int64_t *rhs) 1630Sstevel@tonic-gate { 164457Sbmc long double lsum = dt_aggregate_lquantizedsum(lhs); 165457Sbmc long double rsum = dt_aggregate_lquantizedsum(rhs); 166491Sbmc int64_t lzero, rzero; 1670Sstevel@tonic-gate 168*1017Sbmc if (lsum < rsum) 169*1017Sbmc return (DT_LESSTHAN); 1700Sstevel@tonic-gate 171*1017Sbmc if (lsum > rsum) 172*1017Sbmc return (DT_GREATERTHAN); 1730Sstevel@tonic-gate 174491Sbmc /* 175491Sbmc * If they're both equal, then we will compare based on the weights at 176491Sbmc * zero. If the weights at zero are equal (or if zero is not within 177491Sbmc * the range of the linear quantization), then this will be judged a 178491Sbmc * tie and will be resolved based on the key comparison. 179491Sbmc */ 180491Sbmc lzero = dt_aggregate_lquantizedzero(lhs); 181491Sbmc rzero = dt_aggregate_lquantizedzero(rhs); 182491Sbmc 183*1017Sbmc if (lzero < rzero) 184*1017Sbmc return (DT_LESSTHAN); 185491Sbmc 186*1017Sbmc if (lzero > rzero) 187*1017Sbmc return (DT_GREATERTHAN); 188491Sbmc 1890Sstevel@tonic-gate return (0); 1900Sstevel@tonic-gate } 1910Sstevel@tonic-gate 1920Sstevel@tonic-gate static int 193491Sbmc dt_aggregate_quantizedcmp(int64_t *lhs, int64_t *rhs) 1940Sstevel@tonic-gate { 1950Sstevel@tonic-gate int nbuckets = DTRACE_QUANTIZE_NBUCKETS, i; 196457Sbmc long double ltotal = 0, rtotal = 0; 197491Sbmc int64_t lzero, rzero; 1980Sstevel@tonic-gate 1990Sstevel@tonic-gate for (i = 0; i < nbuckets; i++) { 2000Sstevel@tonic-gate int64_t bucketval = DTRACE_QUANTIZE_BUCKETVAL(i); 2010Sstevel@tonic-gate 202491Sbmc if (bucketval == 0) { 203491Sbmc lzero = lhs[i]; 204491Sbmc rzero = rhs[i]; 205491Sbmc } 206491Sbmc 207457Sbmc ltotal += (long double)bucketval * (long double)lhs[i]; 208457Sbmc rtotal += (long double)bucketval * (long double)rhs[i]; 2090Sstevel@tonic-gate } 2100Sstevel@tonic-gate 211*1017Sbmc if (ltotal < rtotal) 212*1017Sbmc return (DT_LESSTHAN); 2130Sstevel@tonic-gate 214*1017Sbmc if (ltotal > rtotal) 215*1017Sbmc return (DT_GREATERTHAN); 2160Sstevel@tonic-gate 217491Sbmc /* 218491Sbmc * If they're both equal, then we will compare based on the weights at 219491Sbmc * zero. If the weights at zero are equal, then this will be judged a 220491Sbmc * tie and will be resolved based on the key comparison. 221491Sbmc */ 222*1017Sbmc if (lzero < rzero) 223*1017Sbmc return (DT_LESSTHAN); 224491Sbmc 225*1017Sbmc if (lzero > rzero) 226*1017Sbmc return (DT_GREATERTHAN); 227491Sbmc 2280Sstevel@tonic-gate return (0); 2290Sstevel@tonic-gate } 2300Sstevel@tonic-gate 231457Sbmc static void 232457Sbmc dt_aggregate_usym(dtrace_hdl_t *dtp, uint64_t *data) 233457Sbmc { 234457Sbmc uint64_t pid = data[0]; 235457Sbmc uint64_t *pc = &data[1]; 236457Sbmc struct ps_prochandle *P; 237457Sbmc GElf_Sym sym; 238457Sbmc 239457Sbmc if (dtp->dt_vector != NULL) 240457Sbmc return; 241457Sbmc 242457Sbmc if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL) 243457Sbmc return; 244457Sbmc 245457Sbmc dt_proc_lock(dtp, P); 246457Sbmc 247457Sbmc if (Plookup_by_addr(P, *pc, NULL, 0, &sym) == 0) 248457Sbmc *pc = sym.st_value; 249457Sbmc 250457Sbmc dt_proc_unlock(dtp, P); 251457Sbmc dt_proc_release(dtp, P); 252457Sbmc } 253457Sbmc 254457Sbmc static void 255457Sbmc dt_aggregate_umod(dtrace_hdl_t *dtp, uint64_t *data) 256457Sbmc { 257457Sbmc uint64_t pid = data[0]; 258457Sbmc uint64_t *pc = &data[1]; 259457Sbmc struct ps_prochandle *P; 260457Sbmc const prmap_t *map; 261457Sbmc 262457Sbmc if (dtp->dt_vector != NULL) 263457Sbmc return; 264457Sbmc 265457Sbmc if ((P = dt_proc_grab(dtp, pid, PGRAB_RDONLY | PGRAB_FORCE, 0)) == NULL) 266457Sbmc return; 267457Sbmc 268457Sbmc dt_proc_lock(dtp, P); 269457Sbmc 270457Sbmc if ((map = Paddr_to_map(P, *pc)) != NULL) 271457Sbmc *pc = map->pr_vaddr; 272457Sbmc 273457Sbmc dt_proc_unlock(dtp, P); 274457Sbmc dt_proc_release(dtp, P); 275457Sbmc } 276457Sbmc 277457Sbmc static void 278457Sbmc dt_aggregate_sym(dtrace_hdl_t *dtp, uint64_t *data) 279457Sbmc { 280457Sbmc GElf_Sym sym; 281457Sbmc uint64_t *pc = data; 282457Sbmc 283457Sbmc if (dtrace_lookup_by_addr(dtp, *pc, &sym, NULL) == 0) 284457Sbmc *pc = sym.st_value; 285457Sbmc } 286457Sbmc 287457Sbmc static void 288457Sbmc dt_aggregate_mod(dtrace_hdl_t *dtp, uint64_t *data) 289457Sbmc { 290457Sbmc uint64_t *pc = data; 291457Sbmc dt_module_t *dmp; 292457Sbmc 293457Sbmc if (dtp->dt_vector != NULL) { 294457Sbmc /* 295457Sbmc * We don't have a way of just getting the module for a 296457Sbmc * vectored open, and it doesn't seem to be worth defining 297457Sbmc * one. This means that use of mod() won't get true 298457Sbmc * aggregation in the postmortem case (some modules may 299457Sbmc * appear more than once in aggregation output). It seems 300457Sbmc * unlikely that anyone will ever notice or care... 301457Sbmc */ 302457Sbmc return; 303457Sbmc } 304457Sbmc 305457Sbmc for (dmp = dt_list_next(&dtp->dt_modlist); dmp != NULL; 306457Sbmc dmp = dt_list_next(dmp)) { 307457Sbmc if (*pc - dmp->dm_text_va < dmp->dm_text_size) { 308457Sbmc *pc = dmp->dm_text_va; 309457Sbmc return; 310457Sbmc } 311457Sbmc } 312457Sbmc } 313457Sbmc 314*1017Sbmc static dtrace_aggvarid_t 315*1017Sbmc dt_aggregate_aggvarid(dt_ahashent_t *ent) 316*1017Sbmc { 317*1017Sbmc dtrace_aggdesc_t *agg = ent->dtahe_data.dtada_desc; 318*1017Sbmc caddr_t data = ent->dtahe_data.dtada_data; 319*1017Sbmc dtrace_recdesc_t *rec = agg->dtagd_rec; 320*1017Sbmc 321*1017Sbmc /* 322*1017Sbmc * First, we'll check the variable ID in the aggdesc. If it's valid, 323*1017Sbmc * we'll return it. If not, we'll use the compiler-generated ID 324*1017Sbmc * present as the first record. 325*1017Sbmc */ 326*1017Sbmc if (agg->dtagd_varid != DTRACE_AGGVARIDNONE) 327*1017Sbmc return (agg->dtagd_varid); 328*1017Sbmc 329*1017Sbmc agg->dtagd_varid = *((dtrace_aggvarid_t *)(uintptr_t)(data + 330*1017Sbmc rec->dtrd_offset)); 331*1017Sbmc 332*1017Sbmc return (agg->dtagd_varid); 333*1017Sbmc } 334*1017Sbmc 335*1017Sbmc 3360Sstevel@tonic-gate static int 3370Sstevel@tonic-gate dt_aggregate_snap_cpu(dtrace_hdl_t *dtp, processorid_t cpu) 3380Sstevel@tonic-gate { 3390Sstevel@tonic-gate dtrace_epid_t id; 3400Sstevel@tonic-gate uint64_t hashval; 3410Sstevel@tonic-gate size_t offs, roffs, size, ndx; 3420Sstevel@tonic-gate int i, j, rval; 3430Sstevel@tonic-gate caddr_t addr, data; 3440Sstevel@tonic-gate dtrace_recdesc_t *rec; 3450Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 3460Sstevel@tonic-gate dtrace_aggdesc_t *agg; 3470Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 3480Sstevel@tonic-gate dt_ahashent_t *h; 3490Sstevel@tonic-gate dtrace_bufdesc_t b = agp->dtat_buf, *buf = &b; 3500Sstevel@tonic-gate dtrace_aggdata_t *aggdata; 3510Sstevel@tonic-gate int flags = agp->dtat_flags; 3520Sstevel@tonic-gate 3530Sstevel@tonic-gate buf->dtbd_cpu = cpu; 3540Sstevel@tonic-gate 3550Sstevel@tonic-gate if (dt_ioctl(dtp, DTRACEIOC_AGGSNAP, buf) == -1) { 3560Sstevel@tonic-gate if (errno == ENOENT) { 3570Sstevel@tonic-gate /* 3580Sstevel@tonic-gate * If that failed with ENOENT, it may be because the 3590Sstevel@tonic-gate * CPU was unconfigured. This is okay; we'll just 3600Sstevel@tonic-gate * do nothing but return success. 3610Sstevel@tonic-gate */ 3620Sstevel@tonic-gate return (0); 3630Sstevel@tonic-gate } 3640Sstevel@tonic-gate 3650Sstevel@tonic-gate return (dt_set_errno(dtp, errno)); 3660Sstevel@tonic-gate } 3670Sstevel@tonic-gate 3680Sstevel@tonic-gate if (buf->dtbd_drops != 0) { 3690Sstevel@tonic-gate if (dt_handle_cpudrop(dtp, cpu, 3700Sstevel@tonic-gate DTRACEDROP_AGGREGATION, buf->dtbd_drops) == -1) 3710Sstevel@tonic-gate return (-1); 3720Sstevel@tonic-gate } 3730Sstevel@tonic-gate 3740Sstevel@tonic-gate if (buf->dtbd_size == 0) 3750Sstevel@tonic-gate return (0); 3760Sstevel@tonic-gate 3770Sstevel@tonic-gate if (hash->dtah_hash == NULL) { 3780Sstevel@tonic-gate size_t size; 3790Sstevel@tonic-gate 3800Sstevel@tonic-gate hash->dtah_size = DTRACE_AHASHSIZE; 3810Sstevel@tonic-gate size = hash->dtah_size * sizeof (dt_ahashent_t *); 3820Sstevel@tonic-gate 3830Sstevel@tonic-gate if ((hash->dtah_hash = malloc(size)) == NULL) 3840Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 3850Sstevel@tonic-gate 3860Sstevel@tonic-gate bzero(hash->dtah_hash, size); 3870Sstevel@tonic-gate } 3880Sstevel@tonic-gate 3890Sstevel@tonic-gate for (offs = 0; offs < buf->dtbd_size; ) { 3900Sstevel@tonic-gate /* 3910Sstevel@tonic-gate * We're guaranteed to have an ID. 3920Sstevel@tonic-gate */ 3930Sstevel@tonic-gate id = *((dtrace_epid_t *)((uintptr_t)buf->dtbd_data + 3940Sstevel@tonic-gate (uintptr_t)offs)); 3950Sstevel@tonic-gate 3960Sstevel@tonic-gate if (id == DTRACE_AGGIDNONE) { 3970Sstevel@tonic-gate /* 3980Sstevel@tonic-gate * This is filler to assure proper alignment of the 3990Sstevel@tonic-gate * next record; we simply ignore it. 4000Sstevel@tonic-gate */ 4010Sstevel@tonic-gate offs += sizeof (id); 4020Sstevel@tonic-gate continue; 4030Sstevel@tonic-gate } 4040Sstevel@tonic-gate 4050Sstevel@tonic-gate if ((rval = dt_aggid_lookup(dtp, id, &agg)) != 0) 4060Sstevel@tonic-gate return (rval); 4070Sstevel@tonic-gate 4080Sstevel@tonic-gate addr = buf->dtbd_data + offs; 4090Sstevel@tonic-gate size = agg->dtagd_size; 4100Sstevel@tonic-gate hashval = 0; 4110Sstevel@tonic-gate 4120Sstevel@tonic-gate for (j = 0; j < agg->dtagd_nrecs - 1; j++) { 4130Sstevel@tonic-gate rec = &agg->dtagd_rec[j]; 4140Sstevel@tonic-gate roffs = rec->dtrd_offset; 4150Sstevel@tonic-gate 416457Sbmc switch (rec->dtrd_action) { 417457Sbmc case DTRACEACT_USYM: 418457Sbmc dt_aggregate_usym(dtp, 419457Sbmc /* LINTED - alignment */ 420457Sbmc (uint64_t *)&addr[roffs]); 421457Sbmc break; 422457Sbmc 423457Sbmc case DTRACEACT_UMOD: 424457Sbmc dt_aggregate_umod(dtp, 425457Sbmc /* LINTED - alignment */ 426457Sbmc (uint64_t *)&addr[roffs]); 427457Sbmc break; 428457Sbmc 429457Sbmc case DTRACEACT_SYM: 430457Sbmc /* LINTED - alignment */ 431457Sbmc dt_aggregate_sym(dtp, (uint64_t *)&addr[roffs]); 432457Sbmc break; 433457Sbmc 434457Sbmc case DTRACEACT_MOD: 435457Sbmc /* LINTED - alignment */ 436457Sbmc dt_aggregate_mod(dtp, (uint64_t *)&addr[roffs]); 437457Sbmc break; 438457Sbmc 439457Sbmc default: 440457Sbmc break; 441457Sbmc } 442457Sbmc 4430Sstevel@tonic-gate for (i = 0; i < rec->dtrd_size; i++) 4440Sstevel@tonic-gate hashval += addr[roffs + i]; 4450Sstevel@tonic-gate } 4460Sstevel@tonic-gate 4470Sstevel@tonic-gate ndx = hashval % hash->dtah_size; 4480Sstevel@tonic-gate 4490Sstevel@tonic-gate for (h = hash->dtah_hash[ndx]; h != NULL; h = h->dtahe_next) { 4500Sstevel@tonic-gate if (h->dtahe_hashval != hashval) 4510Sstevel@tonic-gate continue; 4520Sstevel@tonic-gate 4530Sstevel@tonic-gate if (h->dtahe_size != size) 4540Sstevel@tonic-gate continue; 4550Sstevel@tonic-gate 4560Sstevel@tonic-gate aggdata = &h->dtahe_data; 4570Sstevel@tonic-gate data = aggdata->dtada_data; 4580Sstevel@tonic-gate 4590Sstevel@tonic-gate for (j = 0; j < agg->dtagd_nrecs - 1; j++) { 4600Sstevel@tonic-gate rec = &agg->dtagd_rec[j]; 4610Sstevel@tonic-gate roffs = rec->dtrd_offset; 4620Sstevel@tonic-gate 4630Sstevel@tonic-gate for (i = 0; i < rec->dtrd_size; i++) 4640Sstevel@tonic-gate if (addr[roffs + i] != data[roffs + i]) 4650Sstevel@tonic-gate goto hashnext; 4660Sstevel@tonic-gate } 4670Sstevel@tonic-gate 4680Sstevel@tonic-gate /* 4690Sstevel@tonic-gate * We found it. Now we need to apply the aggregating 4700Sstevel@tonic-gate * action on the data here. 4710Sstevel@tonic-gate */ 4720Sstevel@tonic-gate rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1]; 4730Sstevel@tonic-gate roffs = rec->dtrd_offset; 4740Sstevel@tonic-gate /* LINTED - alignment */ 475491Sbmc h->dtahe_aggregate((int64_t *)&data[roffs], 4760Sstevel@tonic-gate /* LINTED - alignment */ 477491Sbmc (int64_t *)&addr[roffs], rec->dtrd_size); 4780Sstevel@tonic-gate 4790Sstevel@tonic-gate /* 4800Sstevel@tonic-gate * If we're keeping per CPU data, apply the aggregating 4810Sstevel@tonic-gate * action there as well. 4820Sstevel@tonic-gate */ 4830Sstevel@tonic-gate if (aggdata->dtada_percpu != NULL) { 4840Sstevel@tonic-gate data = aggdata->dtada_percpu[cpu]; 4850Sstevel@tonic-gate 4860Sstevel@tonic-gate /* LINTED - alignment */ 487491Sbmc h->dtahe_aggregate((int64_t *)data, 4880Sstevel@tonic-gate /* LINTED - alignment */ 489491Sbmc (int64_t *)&addr[roffs], rec->dtrd_size); 4900Sstevel@tonic-gate } 4910Sstevel@tonic-gate 4920Sstevel@tonic-gate goto bufnext; 4930Sstevel@tonic-gate hashnext: 4940Sstevel@tonic-gate continue; 4950Sstevel@tonic-gate } 4960Sstevel@tonic-gate 4970Sstevel@tonic-gate /* 4980Sstevel@tonic-gate * If we're here, we couldn't find an entry for this record. 4990Sstevel@tonic-gate */ 5000Sstevel@tonic-gate if ((h = malloc(sizeof (dt_ahashent_t))) == NULL) 5010Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 5020Sstevel@tonic-gate bzero(h, sizeof (dt_ahashent_t)); 5030Sstevel@tonic-gate aggdata = &h->dtahe_data; 5040Sstevel@tonic-gate 5050Sstevel@tonic-gate if ((aggdata->dtada_data = malloc(size)) == NULL) { 5060Sstevel@tonic-gate free(h); 5070Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 5080Sstevel@tonic-gate } 5090Sstevel@tonic-gate 5100Sstevel@tonic-gate bcopy(addr, aggdata->dtada_data, size); 5110Sstevel@tonic-gate aggdata->dtada_size = size; 5120Sstevel@tonic-gate aggdata->dtada_desc = agg; 5130Sstevel@tonic-gate aggdata->dtada_handle = dtp; 5140Sstevel@tonic-gate (void) dt_epid_lookup(dtp, agg->dtagd_epid, 5150Sstevel@tonic-gate &aggdata->dtada_edesc, &aggdata->dtada_pdesc); 5160Sstevel@tonic-gate aggdata->dtada_normal = 1; 5170Sstevel@tonic-gate 5180Sstevel@tonic-gate h->dtahe_hashval = hashval; 5190Sstevel@tonic-gate h->dtahe_size = size; 520*1017Sbmc (void) dt_aggregate_aggvarid(h); 5210Sstevel@tonic-gate 5220Sstevel@tonic-gate rec = &agg->dtagd_rec[agg->dtagd_nrecs - 1]; 5230Sstevel@tonic-gate 5240Sstevel@tonic-gate if (flags & DTRACE_A_PERCPU) { 5250Sstevel@tonic-gate int max_cpus = agp->dtat_maxcpu; 5260Sstevel@tonic-gate caddr_t *percpu = malloc(max_cpus * sizeof (caddr_t)); 5270Sstevel@tonic-gate 5280Sstevel@tonic-gate if (percpu == NULL) { 5290Sstevel@tonic-gate free(aggdata->dtada_data); 5300Sstevel@tonic-gate free(h); 5310Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 5320Sstevel@tonic-gate } 5330Sstevel@tonic-gate 5340Sstevel@tonic-gate for (j = 0; j < max_cpus; j++) { 5350Sstevel@tonic-gate percpu[j] = malloc(rec->dtrd_size); 5360Sstevel@tonic-gate 5370Sstevel@tonic-gate if (percpu[j] == NULL) { 5380Sstevel@tonic-gate while (--j >= 0) 5390Sstevel@tonic-gate free(percpu[j]); 5400Sstevel@tonic-gate 5410Sstevel@tonic-gate free(aggdata->dtada_data); 5420Sstevel@tonic-gate free(h); 5430Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 5440Sstevel@tonic-gate } 5450Sstevel@tonic-gate 5460Sstevel@tonic-gate if (j == cpu) { 5470Sstevel@tonic-gate bcopy(&addr[rec->dtrd_offset], 5480Sstevel@tonic-gate percpu[j], rec->dtrd_size); 5490Sstevel@tonic-gate } else { 5500Sstevel@tonic-gate bzero(percpu[j], rec->dtrd_size); 5510Sstevel@tonic-gate } 5520Sstevel@tonic-gate } 5530Sstevel@tonic-gate 5540Sstevel@tonic-gate aggdata->dtada_percpu = percpu; 5550Sstevel@tonic-gate } 5560Sstevel@tonic-gate 5570Sstevel@tonic-gate switch (rec->dtrd_action) { 5580Sstevel@tonic-gate case DTRACEAGG_MIN: 5590Sstevel@tonic-gate h->dtahe_aggregate = dt_aggregate_min; 5600Sstevel@tonic-gate break; 5610Sstevel@tonic-gate 5620Sstevel@tonic-gate case DTRACEAGG_MAX: 5630Sstevel@tonic-gate h->dtahe_aggregate = dt_aggregate_max; 5640Sstevel@tonic-gate break; 5650Sstevel@tonic-gate 5660Sstevel@tonic-gate case DTRACEAGG_LQUANTIZE: 5670Sstevel@tonic-gate h->dtahe_aggregate = dt_aggregate_lquantize; 5680Sstevel@tonic-gate break; 5690Sstevel@tonic-gate 5700Sstevel@tonic-gate case DTRACEAGG_COUNT: 5710Sstevel@tonic-gate case DTRACEAGG_SUM: 5720Sstevel@tonic-gate case DTRACEAGG_AVG: 5730Sstevel@tonic-gate case DTRACEAGG_QUANTIZE: 5740Sstevel@tonic-gate h->dtahe_aggregate = dt_aggregate_count; 5750Sstevel@tonic-gate break; 5760Sstevel@tonic-gate 5770Sstevel@tonic-gate default: 5780Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_BADAGG)); 5790Sstevel@tonic-gate } 5800Sstevel@tonic-gate 5810Sstevel@tonic-gate if (hash->dtah_hash[ndx] != NULL) 5820Sstevel@tonic-gate hash->dtah_hash[ndx]->dtahe_prev = h; 5830Sstevel@tonic-gate 5840Sstevel@tonic-gate h->dtahe_next = hash->dtah_hash[ndx]; 5850Sstevel@tonic-gate hash->dtah_hash[ndx] = h; 5860Sstevel@tonic-gate 5870Sstevel@tonic-gate if (hash->dtah_all != NULL) 5880Sstevel@tonic-gate hash->dtah_all->dtahe_prevall = h; 5890Sstevel@tonic-gate 5900Sstevel@tonic-gate h->dtahe_nextall = hash->dtah_all; 5910Sstevel@tonic-gate hash->dtah_all = h; 5920Sstevel@tonic-gate bufnext: 5930Sstevel@tonic-gate offs += agg->dtagd_size; 5940Sstevel@tonic-gate } 5950Sstevel@tonic-gate 5960Sstevel@tonic-gate return (0); 5970Sstevel@tonic-gate } 5980Sstevel@tonic-gate 5990Sstevel@tonic-gate int 6000Sstevel@tonic-gate dtrace_aggregate_snap(dtrace_hdl_t *dtp) 6010Sstevel@tonic-gate { 6020Sstevel@tonic-gate int i, rval; 6030Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 6040Sstevel@tonic-gate hrtime_t now = gethrtime(); 6050Sstevel@tonic-gate dtrace_optval_t interval = dtp->dt_options[DTRACEOPT_AGGRATE]; 6060Sstevel@tonic-gate 6070Sstevel@tonic-gate if (dtp->dt_lastagg != 0) { 6080Sstevel@tonic-gate if (now - dtp->dt_lastagg < interval) 6090Sstevel@tonic-gate return (0); 6100Sstevel@tonic-gate 6110Sstevel@tonic-gate dtp->dt_lastagg += interval; 6120Sstevel@tonic-gate } else { 6130Sstevel@tonic-gate dtp->dt_lastagg = now; 6140Sstevel@tonic-gate } 6150Sstevel@tonic-gate 6160Sstevel@tonic-gate if (!dtp->dt_active) 6170Sstevel@tonic-gate return (dt_set_errno(dtp, EINVAL)); 6180Sstevel@tonic-gate 6190Sstevel@tonic-gate if (agp->dtat_buf.dtbd_size == 0) 6200Sstevel@tonic-gate return (0); 6210Sstevel@tonic-gate 6220Sstevel@tonic-gate for (i = 0; i < agp->dtat_ncpus; i++) { 6230Sstevel@tonic-gate if (rval = dt_aggregate_snap_cpu(dtp, agp->dtat_cpus[i])) 6240Sstevel@tonic-gate return (rval); 6250Sstevel@tonic-gate } 6260Sstevel@tonic-gate 6270Sstevel@tonic-gate return (0); 6280Sstevel@tonic-gate } 6290Sstevel@tonic-gate 6300Sstevel@tonic-gate static int 6310Sstevel@tonic-gate dt_aggregate_hashcmp(const void *lhs, const void *rhs) 6320Sstevel@tonic-gate { 6330Sstevel@tonic-gate dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 6340Sstevel@tonic-gate dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 6350Sstevel@tonic-gate dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 6360Sstevel@tonic-gate dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 6370Sstevel@tonic-gate 6380Sstevel@tonic-gate if (lagg->dtagd_nrecs < ragg->dtagd_nrecs) 639*1017Sbmc return (DT_LESSTHAN); 6400Sstevel@tonic-gate 6410Sstevel@tonic-gate if (lagg->dtagd_nrecs > ragg->dtagd_nrecs) 642*1017Sbmc return (DT_GREATERTHAN); 6430Sstevel@tonic-gate 6440Sstevel@tonic-gate return (0); 6450Sstevel@tonic-gate } 6460Sstevel@tonic-gate 6470Sstevel@tonic-gate static int 6480Sstevel@tonic-gate dt_aggregate_varcmp(const void *lhs, const void *rhs) 6490Sstevel@tonic-gate { 6500Sstevel@tonic-gate dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 6510Sstevel@tonic-gate dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 652*1017Sbmc dtrace_aggvarid_t lid, rid; 6530Sstevel@tonic-gate 654*1017Sbmc lid = dt_aggregate_aggvarid(lh); 655*1017Sbmc rid = dt_aggregate_aggvarid(rh); 6560Sstevel@tonic-gate 6570Sstevel@tonic-gate if (lid < rid) 658*1017Sbmc return (DT_LESSTHAN); 6590Sstevel@tonic-gate 6600Sstevel@tonic-gate if (lid > rid) 661*1017Sbmc return (DT_GREATERTHAN); 6620Sstevel@tonic-gate 6630Sstevel@tonic-gate return (0); 6640Sstevel@tonic-gate } 6650Sstevel@tonic-gate 6660Sstevel@tonic-gate static int 6670Sstevel@tonic-gate dt_aggregate_keycmp(const void *lhs, const void *rhs) 6680Sstevel@tonic-gate { 6690Sstevel@tonic-gate dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 6700Sstevel@tonic-gate dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 6710Sstevel@tonic-gate dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 6720Sstevel@tonic-gate dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 6730Sstevel@tonic-gate dtrace_recdesc_t *lrec, *rrec; 6740Sstevel@tonic-gate char *ldata, *rdata; 675*1017Sbmc int rval, i, j, keypos, nrecs; 6760Sstevel@tonic-gate 6770Sstevel@tonic-gate if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0) 6780Sstevel@tonic-gate return (rval); 6790Sstevel@tonic-gate 680*1017Sbmc nrecs = lagg->dtagd_nrecs - 1; 681*1017Sbmc assert(nrecs == ragg->dtagd_nrecs - 1); 682*1017Sbmc 683*1017Sbmc keypos = dt_keypos + 1 >= nrecs ? 0 : dt_keypos; 6840Sstevel@tonic-gate 685*1017Sbmc for (i = 1; i < nrecs; i++) { 686*1017Sbmc uint64_t lval, rval; 687*1017Sbmc int ndx = i + keypos; 688*1017Sbmc 689*1017Sbmc if (ndx >= nrecs) 690*1017Sbmc ndx = ndx - nrecs + 1; 691*1017Sbmc 692*1017Sbmc lrec = &lagg->dtagd_rec[ndx]; 693*1017Sbmc rrec = &ragg->dtagd_rec[ndx]; 6940Sstevel@tonic-gate 6950Sstevel@tonic-gate ldata = lh->dtahe_data.dtada_data + lrec->dtrd_offset; 6960Sstevel@tonic-gate rdata = rh->dtahe_data.dtada_data + rrec->dtrd_offset; 6970Sstevel@tonic-gate 6980Sstevel@tonic-gate if (lrec->dtrd_size < rrec->dtrd_size) 699*1017Sbmc return (DT_LESSTHAN); 7000Sstevel@tonic-gate 7010Sstevel@tonic-gate if (lrec->dtrd_size > rrec->dtrd_size) 702*1017Sbmc return (DT_GREATERTHAN); 7030Sstevel@tonic-gate 7040Sstevel@tonic-gate switch (lrec->dtrd_size) { 7050Sstevel@tonic-gate case sizeof (uint64_t): 7060Sstevel@tonic-gate /* LINTED - alignment */ 7070Sstevel@tonic-gate lval = *((uint64_t *)ldata); 7080Sstevel@tonic-gate /* LINTED - alignment */ 7090Sstevel@tonic-gate rval = *((uint64_t *)rdata); 7100Sstevel@tonic-gate break; 7110Sstevel@tonic-gate 7120Sstevel@tonic-gate case sizeof (uint32_t): 7130Sstevel@tonic-gate /* LINTED - alignment */ 7140Sstevel@tonic-gate lval = *((uint32_t *)ldata); 7150Sstevel@tonic-gate /* LINTED - alignment */ 7160Sstevel@tonic-gate rval = *((uint32_t *)rdata); 7170Sstevel@tonic-gate break; 7180Sstevel@tonic-gate 7190Sstevel@tonic-gate case sizeof (uint16_t): 7200Sstevel@tonic-gate /* LINTED - alignment */ 7210Sstevel@tonic-gate lval = *((uint16_t *)ldata); 7220Sstevel@tonic-gate /* LINTED - alignment */ 7230Sstevel@tonic-gate rval = *((uint16_t *)rdata); 7240Sstevel@tonic-gate break; 7250Sstevel@tonic-gate 7260Sstevel@tonic-gate case sizeof (uint8_t): 7270Sstevel@tonic-gate lval = *((uint8_t *)ldata); 7280Sstevel@tonic-gate rval = *((uint8_t *)rdata); 7290Sstevel@tonic-gate break; 7300Sstevel@tonic-gate 7310Sstevel@tonic-gate default: 732403Sbmc for (j = 0; j < lrec->dtrd_size; j++) { 733403Sbmc lval = ((uint8_t *)ldata)[j]; 734403Sbmc rval = ((uint8_t *)rdata)[j]; 7350Sstevel@tonic-gate 7360Sstevel@tonic-gate if (lval < rval) 737*1017Sbmc return (DT_LESSTHAN); 7380Sstevel@tonic-gate 7390Sstevel@tonic-gate if (lval > rval) 740*1017Sbmc return (DT_GREATERTHAN); 741*1017Sbmc 7420Sstevel@tonic-gate } 7430Sstevel@tonic-gate 7440Sstevel@tonic-gate continue; 7450Sstevel@tonic-gate } 7460Sstevel@tonic-gate 7470Sstevel@tonic-gate if (lval < rval) 748*1017Sbmc return (DT_LESSTHAN); 7490Sstevel@tonic-gate 7500Sstevel@tonic-gate if (lval > rval) 751*1017Sbmc return (DT_GREATERTHAN); 7520Sstevel@tonic-gate } 7530Sstevel@tonic-gate 7540Sstevel@tonic-gate return (0); 7550Sstevel@tonic-gate } 7560Sstevel@tonic-gate 7570Sstevel@tonic-gate static int 7580Sstevel@tonic-gate dt_aggregate_valcmp(const void *lhs, const void *rhs) 7590Sstevel@tonic-gate { 7600Sstevel@tonic-gate dt_ahashent_t *lh = *((dt_ahashent_t **)lhs); 7610Sstevel@tonic-gate dt_ahashent_t *rh = *((dt_ahashent_t **)rhs); 7620Sstevel@tonic-gate dtrace_aggdesc_t *lagg = lh->dtahe_data.dtada_desc; 7630Sstevel@tonic-gate dtrace_aggdesc_t *ragg = rh->dtahe_data.dtada_desc; 7640Sstevel@tonic-gate caddr_t ldata = lh->dtahe_data.dtada_data; 7650Sstevel@tonic-gate caddr_t rdata = rh->dtahe_data.dtada_data; 7660Sstevel@tonic-gate dtrace_recdesc_t *lrec, *rrec; 767491Sbmc int64_t *laddr, *raddr; 7680Sstevel@tonic-gate int rval, i; 7690Sstevel@tonic-gate 7700Sstevel@tonic-gate if ((rval = dt_aggregate_hashcmp(lhs, rhs)) != 0) 7710Sstevel@tonic-gate return (rval); 7720Sstevel@tonic-gate 773*1017Sbmc if (lagg->dtagd_nrecs > ragg->dtagd_nrecs) 774*1017Sbmc return (DT_GREATERTHAN); 7750Sstevel@tonic-gate 776*1017Sbmc if (lagg->dtagd_nrecs < ragg->dtagd_nrecs) 777*1017Sbmc return (DT_LESSTHAN); 7780Sstevel@tonic-gate 7790Sstevel@tonic-gate for (i = 0; i < lagg->dtagd_nrecs; i++) { 7800Sstevel@tonic-gate lrec = &lagg->dtagd_rec[i]; 7810Sstevel@tonic-gate rrec = &ragg->dtagd_rec[i]; 7820Sstevel@tonic-gate 7830Sstevel@tonic-gate if (lrec->dtrd_offset < rrec->dtrd_offset) 784*1017Sbmc return (DT_LESSTHAN); 7850Sstevel@tonic-gate 7860Sstevel@tonic-gate if (lrec->dtrd_offset > rrec->dtrd_offset) 787*1017Sbmc return (DT_GREATERTHAN); 7880Sstevel@tonic-gate 7890Sstevel@tonic-gate if (lrec->dtrd_action < rrec->dtrd_action) 790*1017Sbmc return (DT_LESSTHAN); 7910Sstevel@tonic-gate 7920Sstevel@tonic-gate if (lrec->dtrd_action > rrec->dtrd_action) 793*1017Sbmc return (DT_GREATERTHAN); 7940Sstevel@tonic-gate } 7950Sstevel@tonic-gate 796491Sbmc laddr = (int64_t *)(uintptr_t)(ldata + lrec->dtrd_offset); 797491Sbmc raddr = (int64_t *)(uintptr_t)(rdata + rrec->dtrd_offset); 7980Sstevel@tonic-gate 7990Sstevel@tonic-gate switch (lrec->dtrd_action) { 8000Sstevel@tonic-gate case DTRACEAGG_AVG: 8010Sstevel@tonic-gate rval = dt_aggregate_averagecmp(laddr, raddr); 8020Sstevel@tonic-gate break; 8030Sstevel@tonic-gate 8040Sstevel@tonic-gate case DTRACEAGG_QUANTIZE: 8050Sstevel@tonic-gate rval = dt_aggregate_quantizedcmp(laddr, raddr); 8060Sstevel@tonic-gate break; 8070Sstevel@tonic-gate 8080Sstevel@tonic-gate case DTRACEAGG_LQUANTIZE: 8090Sstevel@tonic-gate rval = dt_aggregate_lquantizedcmp(laddr, raddr); 8100Sstevel@tonic-gate break; 8110Sstevel@tonic-gate 8120Sstevel@tonic-gate case DTRACEAGG_COUNT: 8130Sstevel@tonic-gate case DTRACEAGG_SUM: 8140Sstevel@tonic-gate case DTRACEAGG_MIN: 8150Sstevel@tonic-gate case DTRACEAGG_MAX: 8160Sstevel@tonic-gate rval = dt_aggregate_countcmp(laddr, raddr); 8170Sstevel@tonic-gate break; 8180Sstevel@tonic-gate 8190Sstevel@tonic-gate default: 8200Sstevel@tonic-gate assert(0); 8210Sstevel@tonic-gate } 8220Sstevel@tonic-gate 823*1017Sbmc return (rval); 824*1017Sbmc } 825*1017Sbmc 826*1017Sbmc static int 827*1017Sbmc dt_aggregate_valkeycmp(const void *lhs, const void *rhs) 828*1017Sbmc { 829*1017Sbmc int rval; 830*1017Sbmc 831*1017Sbmc if ((rval = dt_aggregate_valcmp(lhs, rhs)) != 0) 8320Sstevel@tonic-gate return (rval); 8330Sstevel@tonic-gate 8340Sstevel@tonic-gate /* 8350Sstevel@tonic-gate * If we're here, the values for the two aggregation elements are 8360Sstevel@tonic-gate * equal. We already know that the key layout is the same for the two 8370Sstevel@tonic-gate * elements; we must now compare the keys themselves as a tie-breaker. 8380Sstevel@tonic-gate */ 8390Sstevel@tonic-gate return (dt_aggregate_keycmp(lhs, rhs)); 8400Sstevel@tonic-gate } 8410Sstevel@tonic-gate 8420Sstevel@tonic-gate static int 8430Sstevel@tonic-gate dt_aggregate_keyvarcmp(const void *lhs, const void *rhs) 8440Sstevel@tonic-gate { 8450Sstevel@tonic-gate int rval; 8460Sstevel@tonic-gate 8470Sstevel@tonic-gate if ((rval = dt_aggregate_keycmp(lhs, rhs)) != 0) 8480Sstevel@tonic-gate return (rval); 8490Sstevel@tonic-gate 8500Sstevel@tonic-gate return (dt_aggregate_varcmp(lhs, rhs)); 8510Sstevel@tonic-gate } 8520Sstevel@tonic-gate 8530Sstevel@tonic-gate static int 8540Sstevel@tonic-gate dt_aggregate_varkeycmp(const void *lhs, const void *rhs) 8550Sstevel@tonic-gate { 8560Sstevel@tonic-gate int rval; 8570Sstevel@tonic-gate 8580Sstevel@tonic-gate if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0) 8590Sstevel@tonic-gate return (rval); 8600Sstevel@tonic-gate 8610Sstevel@tonic-gate return (dt_aggregate_keycmp(lhs, rhs)); 8620Sstevel@tonic-gate } 8630Sstevel@tonic-gate 8640Sstevel@tonic-gate static int 8650Sstevel@tonic-gate dt_aggregate_valvarcmp(const void *lhs, const void *rhs) 8660Sstevel@tonic-gate { 8670Sstevel@tonic-gate int rval; 8680Sstevel@tonic-gate 869*1017Sbmc if ((rval = dt_aggregate_valkeycmp(lhs, rhs)) != 0) 8700Sstevel@tonic-gate return (rval); 8710Sstevel@tonic-gate 8720Sstevel@tonic-gate return (dt_aggregate_varcmp(lhs, rhs)); 8730Sstevel@tonic-gate } 8740Sstevel@tonic-gate 8750Sstevel@tonic-gate static int 8760Sstevel@tonic-gate dt_aggregate_varvalcmp(const void *lhs, const void *rhs) 8770Sstevel@tonic-gate { 8780Sstevel@tonic-gate int rval; 8790Sstevel@tonic-gate 8800Sstevel@tonic-gate if ((rval = dt_aggregate_varcmp(lhs, rhs)) != 0) 8810Sstevel@tonic-gate return (rval); 8820Sstevel@tonic-gate 883*1017Sbmc return (dt_aggregate_valkeycmp(lhs, rhs)); 8840Sstevel@tonic-gate } 8850Sstevel@tonic-gate 8860Sstevel@tonic-gate static int 8870Sstevel@tonic-gate dt_aggregate_keyvarrevcmp(const void *lhs, const void *rhs) 8880Sstevel@tonic-gate { 8890Sstevel@tonic-gate return (dt_aggregate_keyvarcmp(rhs, lhs)); 8900Sstevel@tonic-gate } 8910Sstevel@tonic-gate 8920Sstevel@tonic-gate static int 8930Sstevel@tonic-gate dt_aggregate_varkeyrevcmp(const void *lhs, const void *rhs) 8940Sstevel@tonic-gate { 8950Sstevel@tonic-gate return (dt_aggregate_varkeycmp(rhs, lhs)); 8960Sstevel@tonic-gate } 8970Sstevel@tonic-gate 8980Sstevel@tonic-gate static int 8990Sstevel@tonic-gate dt_aggregate_valvarrevcmp(const void *lhs, const void *rhs) 9000Sstevel@tonic-gate { 9010Sstevel@tonic-gate return (dt_aggregate_valvarcmp(rhs, lhs)); 9020Sstevel@tonic-gate } 9030Sstevel@tonic-gate 9040Sstevel@tonic-gate static int 9050Sstevel@tonic-gate dt_aggregate_varvalrevcmp(const void *lhs, const void *rhs) 9060Sstevel@tonic-gate { 9070Sstevel@tonic-gate return (dt_aggregate_varvalcmp(rhs, lhs)); 9080Sstevel@tonic-gate } 9090Sstevel@tonic-gate 910*1017Sbmc static int 911*1017Sbmc dt_aggregate_bundlecmp(const void *lhs, const void *rhs) 912*1017Sbmc { 913*1017Sbmc dt_ahashent_t **lh = *((dt_ahashent_t ***)lhs); 914*1017Sbmc dt_ahashent_t **rh = *((dt_ahashent_t ***)rhs); 915*1017Sbmc int i, rval; 916*1017Sbmc 917*1017Sbmc if (dt_keysort) { 918*1017Sbmc /* 919*1017Sbmc * If we're sorting on keys, we need to scan until we find the 920*1017Sbmc * last entry -- that's the representative key. (The order of 921*1017Sbmc * the bundle is values followed by key to accommodate the 922*1017Sbmc * default behavior of sorting by value.) If the keys are 923*1017Sbmc * equal, we'll fall into the value comparison loop, below. 924*1017Sbmc */ 925*1017Sbmc for (i = 0; lh[i + 1] != NULL; i++) 926*1017Sbmc continue; 927*1017Sbmc 928*1017Sbmc assert(i != 0); 929*1017Sbmc assert(rh[i + 1] == NULL); 930*1017Sbmc 931*1017Sbmc if ((rval = dt_aggregate_keycmp(&lh[i], &rh[i])) != 0) 932*1017Sbmc return (rval); 933*1017Sbmc } 934*1017Sbmc 935*1017Sbmc for (i = 0; ; i++) { 936*1017Sbmc if (lh[i + 1] == NULL) { 937*1017Sbmc /* 938*1017Sbmc * All of the values are equal; if we're sorting on 939*1017Sbmc * keys, then we're only here because the keys were 940*1017Sbmc * found to be equal and these records are therefore 941*1017Sbmc * equal. If we're not sorting on keys, we'll use the 942*1017Sbmc * key comparison from the representative key as the 943*1017Sbmc * tie-breaker. 944*1017Sbmc */ 945*1017Sbmc if (dt_keysort) 946*1017Sbmc return (0); 947*1017Sbmc 948*1017Sbmc assert(i != 0); 949*1017Sbmc assert(rh[i + 1] == NULL); 950*1017Sbmc return (dt_aggregate_keycmp(&lh[i], &rh[i])); 951*1017Sbmc } else { 952*1017Sbmc if ((rval = dt_aggregate_valcmp(&lh[i], &rh[i])) != 0) 953*1017Sbmc return (rval); 954*1017Sbmc } 955*1017Sbmc } 956*1017Sbmc } 957*1017Sbmc 9580Sstevel@tonic-gate int 9590Sstevel@tonic-gate dt_aggregate_go(dtrace_hdl_t *dtp) 9600Sstevel@tonic-gate { 9610Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 9620Sstevel@tonic-gate dtrace_optval_t size, cpu; 9630Sstevel@tonic-gate dtrace_bufdesc_t *buf = &agp->dtat_buf; 9640Sstevel@tonic-gate int rval, i; 9650Sstevel@tonic-gate 9660Sstevel@tonic-gate assert(agp->dtat_maxcpu == 0); 9670Sstevel@tonic-gate assert(agp->dtat_ncpu == 0); 9680Sstevel@tonic-gate assert(agp->dtat_cpus == NULL); 9690Sstevel@tonic-gate 9700Sstevel@tonic-gate agp->dtat_maxcpu = dt_sysconf(dtp, _SC_CPUID_MAX) + 1; 9710Sstevel@tonic-gate agp->dtat_ncpu = dt_sysconf(dtp, _SC_NPROCESSORS_MAX); 9720Sstevel@tonic-gate agp->dtat_cpus = malloc(agp->dtat_ncpu * sizeof (processorid_t)); 9730Sstevel@tonic-gate 9740Sstevel@tonic-gate if (agp->dtat_cpus == NULL) 9750Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 9760Sstevel@tonic-gate 9770Sstevel@tonic-gate /* 9780Sstevel@tonic-gate * Use the aggregation buffer size as reloaded from the kernel. 9790Sstevel@tonic-gate */ 9800Sstevel@tonic-gate size = dtp->dt_options[DTRACEOPT_AGGSIZE]; 9810Sstevel@tonic-gate 9820Sstevel@tonic-gate rval = dtrace_getopt(dtp, "aggsize", &size); 9830Sstevel@tonic-gate assert(rval == 0); 9840Sstevel@tonic-gate 9850Sstevel@tonic-gate if (size == 0 || size == DTRACEOPT_UNSET) 9860Sstevel@tonic-gate return (0); 9870Sstevel@tonic-gate 9880Sstevel@tonic-gate buf = &agp->dtat_buf; 9890Sstevel@tonic-gate buf->dtbd_size = size; 9900Sstevel@tonic-gate 9910Sstevel@tonic-gate if ((buf->dtbd_data = malloc(buf->dtbd_size)) == NULL) 9920Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_NOMEM)); 9930Sstevel@tonic-gate 9940Sstevel@tonic-gate /* 9950Sstevel@tonic-gate * Now query for the CPUs enabled. 9960Sstevel@tonic-gate */ 9970Sstevel@tonic-gate rval = dtrace_getopt(dtp, "cpu", &cpu); 9980Sstevel@tonic-gate assert(rval == 0 && cpu != DTRACEOPT_UNSET); 9990Sstevel@tonic-gate 10000Sstevel@tonic-gate if (cpu != DTRACE_CPUALL) { 10010Sstevel@tonic-gate assert(cpu < agp->dtat_ncpu); 10020Sstevel@tonic-gate agp->dtat_cpus[agp->dtat_ncpus++] = (processorid_t)cpu; 10030Sstevel@tonic-gate 10040Sstevel@tonic-gate return (0); 10050Sstevel@tonic-gate } 10060Sstevel@tonic-gate 10070Sstevel@tonic-gate agp->dtat_ncpus = 0; 10080Sstevel@tonic-gate for (i = 0; i < agp->dtat_maxcpu; i++) { 10090Sstevel@tonic-gate if (dt_status(dtp, i) == -1) 10100Sstevel@tonic-gate continue; 10110Sstevel@tonic-gate 10120Sstevel@tonic-gate agp->dtat_cpus[agp->dtat_ncpus++] = i; 10130Sstevel@tonic-gate } 10140Sstevel@tonic-gate 10150Sstevel@tonic-gate return (0); 10160Sstevel@tonic-gate } 10170Sstevel@tonic-gate 10180Sstevel@tonic-gate static int 10190Sstevel@tonic-gate dt_aggwalk_rval(dtrace_hdl_t *dtp, dt_ahashent_t *h, int rval) 10200Sstevel@tonic-gate { 10210Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 10220Sstevel@tonic-gate dtrace_aggdata_t *data; 10230Sstevel@tonic-gate dtrace_aggdesc_t *aggdesc; 10240Sstevel@tonic-gate dtrace_recdesc_t *rec; 10250Sstevel@tonic-gate int i; 10260Sstevel@tonic-gate 10270Sstevel@tonic-gate switch (rval) { 10280Sstevel@tonic-gate case DTRACE_AGGWALK_NEXT: 10290Sstevel@tonic-gate break; 10300Sstevel@tonic-gate 10310Sstevel@tonic-gate case DTRACE_AGGWALK_CLEAR: { 10320Sstevel@tonic-gate uint32_t size, offs = 0; 10330Sstevel@tonic-gate 10340Sstevel@tonic-gate aggdesc = h->dtahe_data.dtada_desc; 10350Sstevel@tonic-gate rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 10360Sstevel@tonic-gate size = rec->dtrd_size; 10370Sstevel@tonic-gate data = &h->dtahe_data; 10380Sstevel@tonic-gate 10390Sstevel@tonic-gate if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) { 10400Sstevel@tonic-gate offs = sizeof (uint64_t); 10410Sstevel@tonic-gate size -= sizeof (uint64_t); 10420Sstevel@tonic-gate } 10430Sstevel@tonic-gate 10440Sstevel@tonic-gate bzero(&data->dtada_data[rec->dtrd_offset] + offs, size); 10450Sstevel@tonic-gate 10460Sstevel@tonic-gate if (data->dtada_percpu == NULL) 10470Sstevel@tonic-gate break; 10480Sstevel@tonic-gate 10490Sstevel@tonic-gate for (i = 0; i < dtp->dt_aggregate.dtat_maxcpu; i++) 10500Sstevel@tonic-gate bzero(data->dtada_percpu[i] + offs, size); 10510Sstevel@tonic-gate break; 10520Sstevel@tonic-gate } 10530Sstevel@tonic-gate 10540Sstevel@tonic-gate case DTRACE_AGGWALK_ERROR: 10550Sstevel@tonic-gate /* 10560Sstevel@tonic-gate * We assume that errno is already set in this case. 10570Sstevel@tonic-gate */ 10580Sstevel@tonic-gate return (dt_set_errno(dtp, errno)); 10590Sstevel@tonic-gate 10600Sstevel@tonic-gate case DTRACE_AGGWALK_ABORT: 10610Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_DIRABORT)); 10620Sstevel@tonic-gate 10630Sstevel@tonic-gate case DTRACE_AGGWALK_DENORMALIZE: 10640Sstevel@tonic-gate h->dtahe_data.dtada_normal = 1; 10650Sstevel@tonic-gate return (0); 10660Sstevel@tonic-gate 10670Sstevel@tonic-gate case DTRACE_AGGWALK_NORMALIZE: 10680Sstevel@tonic-gate if (h->dtahe_data.dtada_normal == 0) { 10690Sstevel@tonic-gate h->dtahe_data.dtada_normal = 1; 10700Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_BADRVAL)); 10710Sstevel@tonic-gate } 10720Sstevel@tonic-gate 10730Sstevel@tonic-gate return (0); 10740Sstevel@tonic-gate 10750Sstevel@tonic-gate case DTRACE_AGGWALK_REMOVE: { 10760Sstevel@tonic-gate dtrace_aggdata_t *aggdata = &h->dtahe_data; 10770Sstevel@tonic-gate int i, max_cpus = agp->dtat_maxcpu; 10780Sstevel@tonic-gate 10790Sstevel@tonic-gate /* 10800Sstevel@tonic-gate * First, remove this hash entry from its hash chain. 10810Sstevel@tonic-gate */ 10820Sstevel@tonic-gate if (h->dtahe_prev != NULL) { 10830Sstevel@tonic-gate h->dtahe_prev->dtahe_next = h->dtahe_next; 10840Sstevel@tonic-gate } else { 10850Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 10860Sstevel@tonic-gate size_t ndx = h->dtahe_hashval % hash->dtah_size; 10870Sstevel@tonic-gate 10880Sstevel@tonic-gate assert(hash->dtah_hash[ndx] == h); 10890Sstevel@tonic-gate hash->dtah_hash[ndx] = h->dtahe_next; 10900Sstevel@tonic-gate } 10910Sstevel@tonic-gate 10920Sstevel@tonic-gate if (h->dtahe_next != NULL) 10930Sstevel@tonic-gate h->dtahe_next->dtahe_prev = h->dtahe_prev; 10940Sstevel@tonic-gate 10950Sstevel@tonic-gate /* 10960Sstevel@tonic-gate * Now remove it from the list of all hash entries. 10970Sstevel@tonic-gate */ 10980Sstevel@tonic-gate if (h->dtahe_prevall != NULL) { 10990Sstevel@tonic-gate h->dtahe_prevall->dtahe_nextall = h->dtahe_nextall; 11000Sstevel@tonic-gate } else { 11010Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 11020Sstevel@tonic-gate 11030Sstevel@tonic-gate assert(hash->dtah_all == h); 11040Sstevel@tonic-gate hash->dtah_all = h->dtahe_nextall; 11050Sstevel@tonic-gate } 11060Sstevel@tonic-gate 11070Sstevel@tonic-gate if (h->dtahe_nextall != NULL) 11080Sstevel@tonic-gate h->dtahe_nextall->dtahe_prevall = h->dtahe_prevall; 11090Sstevel@tonic-gate 11100Sstevel@tonic-gate /* 11110Sstevel@tonic-gate * We're unlinked. We can safely destroy the data. 11120Sstevel@tonic-gate */ 11130Sstevel@tonic-gate if (aggdata->dtada_percpu != NULL) { 11140Sstevel@tonic-gate for (i = 0; i < max_cpus; i++) 11150Sstevel@tonic-gate free(aggdata->dtada_percpu[i]); 11160Sstevel@tonic-gate free(aggdata->dtada_percpu); 11170Sstevel@tonic-gate } 11180Sstevel@tonic-gate 11190Sstevel@tonic-gate free(aggdata->dtada_data); 11200Sstevel@tonic-gate free(h); 11210Sstevel@tonic-gate 11220Sstevel@tonic-gate return (0); 11230Sstevel@tonic-gate } 11240Sstevel@tonic-gate 11250Sstevel@tonic-gate default: 11260Sstevel@tonic-gate return (dt_set_errno(dtp, EDT_BADRVAL)); 11270Sstevel@tonic-gate } 11280Sstevel@tonic-gate 11290Sstevel@tonic-gate return (0); 11300Sstevel@tonic-gate } 11310Sstevel@tonic-gate 1132*1017Sbmc void 1133*1017Sbmc dt_aggregate_qsort(dtrace_hdl_t *dtp, void *base, size_t nel, size_t width, 1134*1017Sbmc int (*compar)(const void *, const void *)) 1135*1017Sbmc { 1136*1017Sbmc int rev = dt_revsort, key = dt_keysort, keypos = dt_keypos; 1137*1017Sbmc dtrace_optval_t keyposopt = dtp->dt_options[DTRACEOPT_AGGSORTKEYPOS]; 1138*1017Sbmc 1139*1017Sbmc dt_revsort = (dtp->dt_options[DTRACEOPT_AGGSORTREV] != DTRACEOPT_UNSET); 1140*1017Sbmc dt_keysort = (dtp->dt_options[DTRACEOPT_AGGSORTKEY] != DTRACEOPT_UNSET); 1141*1017Sbmc 1142*1017Sbmc if (keyposopt != DTRACEOPT_UNSET && keyposopt <= INT_MAX) { 1143*1017Sbmc dt_keypos = (int)keyposopt; 1144*1017Sbmc } else { 1145*1017Sbmc dt_keypos = 0; 1146*1017Sbmc } 1147*1017Sbmc 1148*1017Sbmc if (compar == NULL) { 1149*1017Sbmc if (!dt_keysort) { 1150*1017Sbmc compar = dt_aggregate_varvalcmp; 1151*1017Sbmc } else { 1152*1017Sbmc compar = dt_aggregate_varkeycmp; 1153*1017Sbmc } 1154*1017Sbmc } 1155*1017Sbmc 1156*1017Sbmc qsort(base, nel, width, compar); 1157*1017Sbmc 1158*1017Sbmc dt_revsort = rev; 1159*1017Sbmc dt_keysort = key; 1160*1017Sbmc dt_keypos = keypos; 1161*1017Sbmc } 1162*1017Sbmc 11630Sstevel@tonic-gate int 11640Sstevel@tonic-gate dtrace_aggregate_walk(dtrace_hdl_t *dtp, dtrace_aggregate_f *func, void *arg) 11650Sstevel@tonic-gate { 11660Sstevel@tonic-gate dt_ahashent_t *h, *next; 11670Sstevel@tonic-gate dt_ahash_t *hash = &dtp->dt_aggregate.dtat_hash; 11680Sstevel@tonic-gate 11690Sstevel@tonic-gate for (h = hash->dtah_all; h != NULL; h = next) { 11700Sstevel@tonic-gate /* 11710Sstevel@tonic-gate * dt_aggwalk_rval() can potentially remove the current hash 11720Sstevel@tonic-gate * entry; we need to load the next hash entry before calling 11730Sstevel@tonic-gate * into it. 11740Sstevel@tonic-gate */ 11750Sstevel@tonic-gate next = h->dtahe_nextall; 11760Sstevel@tonic-gate 11770Sstevel@tonic-gate if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) 11780Sstevel@tonic-gate return (-1); 11790Sstevel@tonic-gate } 11800Sstevel@tonic-gate 11810Sstevel@tonic-gate return (0); 11820Sstevel@tonic-gate } 11830Sstevel@tonic-gate 11840Sstevel@tonic-gate static int 11850Sstevel@tonic-gate dt_aggregate_walk_sorted(dtrace_hdl_t *dtp, 11860Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg, 11870Sstevel@tonic-gate int (*sfunc)(const void *, const void *)) 11880Sstevel@tonic-gate { 11890Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 11900Sstevel@tonic-gate dt_ahashent_t *h, **sorted; 11910Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 11920Sstevel@tonic-gate size_t i, nentries = 0; 11930Sstevel@tonic-gate 11940Sstevel@tonic-gate for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) 11950Sstevel@tonic-gate nentries++; 11960Sstevel@tonic-gate 1197*1017Sbmc sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *)); 11980Sstevel@tonic-gate 11990Sstevel@tonic-gate if (sorted == NULL) 1200*1017Sbmc return (-1); 12010Sstevel@tonic-gate 12020Sstevel@tonic-gate for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) 12030Sstevel@tonic-gate sorted[i++] = h; 12040Sstevel@tonic-gate 1205*1017Sbmc (void) pthread_mutex_lock(&dt_qsort_lock); 1206*1017Sbmc 1207*1017Sbmc if (sfunc == NULL) { 1208*1017Sbmc dt_aggregate_qsort(dtp, sorted, nentries, 1209*1017Sbmc sizeof (dt_ahashent_t *), NULL); 1210*1017Sbmc } else { 1211*1017Sbmc /* 1212*1017Sbmc * If we've been explicitly passed a sorting function, 1213*1017Sbmc * we'll use that -- ignoring the values of the "aggsortrev", 1214*1017Sbmc * "aggsortkey" and "aggsortkeypos" options. 1215*1017Sbmc */ 1216*1017Sbmc qsort(sorted, nentries, sizeof (dt_ahashent_t *), sfunc); 1217*1017Sbmc } 1218*1017Sbmc 1219*1017Sbmc (void) pthread_mutex_unlock(&dt_qsort_lock); 12200Sstevel@tonic-gate 12210Sstevel@tonic-gate for (i = 0; i < nentries; i++) { 12220Sstevel@tonic-gate h = sorted[i]; 12230Sstevel@tonic-gate 1224*1017Sbmc if (dt_aggwalk_rval(dtp, h, func(&h->dtahe_data, arg)) == -1) { 1225*1017Sbmc dt_free(dtp, sorted); 12260Sstevel@tonic-gate return (-1); 1227*1017Sbmc } 12280Sstevel@tonic-gate } 12290Sstevel@tonic-gate 1230*1017Sbmc dt_free(dtp, sorted); 12310Sstevel@tonic-gate return (0); 12320Sstevel@tonic-gate } 12330Sstevel@tonic-gate 12340Sstevel@tonic-gate int 1235*1017Sbmc dtrace_aggregate_walk_sorted(dtrace_hdl_t *dtp, 1236*1017Sbmc dtrace_aggregate_f *func, void *arg) 1237*1017Sbmc { 1238*1017Sbmc return (dt_aggregate_walk_sorted(dtp, func, arg, NULL)); 1239*1017Sbmc } 1240*1017Sbmc 1241*1017Sbmc int 12420Sstevel@tonic-gate dtrace_aggregate_walk_keysorted(dtrace_hdl_t *dtp, 12430Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12440Sstevel@tonic-gate { 12450Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12460Sstevel@tonic-gate arg, dt_aggregate_varkeycmp)); 12470Sstevel@tonic-gate } 12480Sstevel@tonic-gate 12490Sstevel@tonic-gate int 12500Sstevel@tonic-gate dtrace_aggregate_walk_valsorted(dtrace_hdl_t *dtp, 12510Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12520Sstevel@tonic-gate { 12530Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12540Sstevel@tonic-gate arg, dt_aggregate_varvalcmp)); 12550Sstevel@tonic-gate } 12560Sstevel@tonic-gate 12570Sstevel@tonic-gate int 12580Sstevel@tonic-gate dtrace_aggregate_walk_keyvarsorted(dtrace_hdl_t *dtp, 12590Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12600Sstevel@tonic-gate { 12610Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12620Sstevel@tonic-gate arg, dt_aggregate_keyvarcmp)); 12630Sstevel@tonic-gate } 12640Sstevel@tonic-gate 12650Sstevel@tonic-gate int 12660Sstevel@tonic-gate dtrace_aggregate_walk_valvarsorted(dtrace_hdl_t *dtp, 12670Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12680Sstevel@tonic-gate { 12690Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12700Sstevel@tonic-gate arg, dt_aggregate_valvarcmp)); 12710Sstevel@tonic-gate } 12720Sstevel@tonic-gate 12730Sstevel@tonic-gate int 12740Sstevel@tonic-gate dtrace_aggregate_walk_keyrevsorted(dtrace_hdl_t *dtp, 12750Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12760Sstevel@tonic-gate { 12770Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12780Sstevel@tonic-gate arg, dt_aggregate_varkeyrevcmp)); 12790Sstevel@tonic-gate } 12800Sstevel@tonic-gate 12810Sstevel@tonic-gate int 12820Sstevel@tonic-gate dtrace_aggregate_walk_valrevsorted(dtrace_hdl_t *dtp, 12830Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12840Sstevel@tonic-gate { 12850Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12860Sstevel@tonic-gate arg, dt_aggregate_varvalrevcmp)); 12870Sstevel@tonic-gate } 12880Sstevel@tonic-gate 12890Sstevel@tonic-gate int 12900Sstevel@tonic-gate dtrace_aggregate_walk_keyvarrevsorted(dtrace_hdl_t *dtp, 12910Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 12920Sstevel@tonic-gate { 12930Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 12940Sstevel@tonic-gate arg, dt_aggregate_keyvarrevcmp)); 12950Sstevel@tonic-gate } 12960Sstevel@tonic-gate 12970Sstevel@tonic-gate int 12980Sstevel@tonic-gate dtrace_aggregate_walk_valvarrevsorted(dtrace_hdl_t *dtp, 12990Sstevel@tonic-gate dtrace_aggregate_f *func, void *arg) 13000Sstevel@tonic-gate { 13010Sstevel@tonic-gate return (dt_aggregate_walk_sorted(dtp, func, 13020Sstevel@tonic-gate arg, dt_aggregate_valvarrevcmp)); 13030Sstevel@tonic-gate } 13040Sstevel@tonic-gate 13050Sstevel@tonic-gate int 1306*1017Sbmc dtrace_aggregate_walk_joined(dtrace_hdl_t *dtp, dtrace_aggvarid_t *aggvars, 1307*1017Sbmc int naggvars, dtrace_aggregate_walk_joined_f *func, void *arg) 1308*1017Sbmc { 1309*1017Sbmc dt_aggregate_t *agp = &dtp->dt_aggregate; 1310*1017Sbmc dt_ahashent_t *h, **sorted = NULL, ***bundle, **nbundle; 1311*1017Sbmc const dtrace_aggdata_t **data; 1312*1017Sbmc dt_ahashent_t *zaggdata = NULL; 1313*1017Sbmc dt_ahash_t *hash = &agp->dtat_hash; 1314*1017Sbmc size_t nentries = 0, nbundles = 0, start, zsize = 0, bundlesize; 1315*1017Sbmc dtrace_aggvarid_t max = 0, aggvar; 1316*1017Sbmc int rval = -1, *map, *remap = NULL; 1317*1017Sbmc int i, j; 1318*1017Sbmc dtrace_optval_t sortpos = dtp->dt_options[DTRACEOPT_AGGSORTPOS]; 1319*1017Sbmc 1320*1017Sbmc /* 1321*1017Sbmc * If the sorting position is greater than the number of aggregation 1322*1017Sbmc * variable IDs, we silently set it to 0. 1323*1017Sbmc */ 1324*1017Sbmc if (sortpos == DTRACEOPT_UNSET || sortpos >= naggvars) 1325*1017Sbmc sortpos = 0; 1326*1017Sbmc 1327*1017Sbmc /* 1328*1017Sbmc * First we need to translate the specified aggregation variable IDs 1329*1017Sbmc * into a linear map that will allow us to translate an aggregation 1330*1017Sbmc * variable ID into its position in the specified aggvars. 1331*1017Sbmc */ 1332*1017Sbmc for (i = 0; i < naggvars; i++) { 1333*1017Sbmc if (aggvars[i] == DTRACE_AGGVARIDNONE || aggvars[i] < 0) 1334*1017Sbmc return (dt_set_errno(dtp, EDT_BADAGGVAR)); 1335*1017Sbmc 1336*1017Sbmc if (aggvars[i] > max) 1337*1017Sbmc max = aggvars[i]; 1338*1017Sbmc } 1339*1017Sbmc 1340*1017Sbmc if ((map = dt_zalloc(dtp, (max + 1) * sizeof (int))) == NULL) 1341*1017Sbmc return (-1); 1342*1017Sbmc 1343*1017Sbmc zaggdata = dt_zalloc(dtp, naggvars * sizeof (dt_ahashent_t)); 1344*1017Sbmc 1345*1017Sbmc if (zaggdata == NULL) 1346*1017Sbmc goto out; 1347*1017Sbmc 1348*1017Sbmc for (i = 0; i < naggvars; i++) { 1349*1017Sbmc int ndx = i + sortpos; 1350*1017Sbmc 1351*1017Sbmc if (ndx >= naggvars) 1352*1017Sbmc ndx -= naggvars; 1353*1017Sbmc 1354*1017Sbmc aggvar = aggvars[ndx]; 1355*1017Sbmc assert(aggvar <= max); 1356*1017Sbmc 1357*1017Sbmc if (map[aggvar]) { 1358*1017Sbmc /* 1359*1017Sbmc * We have an aggregation variable that is present 1360*1017Sbmc * more than once in the array of aggregation 1361*1017Sbmc * variables. While it's unclear why one might want 1362*1017Sbmc * to do this, it's legal. To support this construct, 1363*1017Sbmc * we will allocate a remap that will indicate the 1364*1017Sbmc * position from which this aggregation variable 1365*1017Sbmc * should be pulled. (That is, where the remap will 1366*1017Sbmc * map from one position to another.) 1367*1017Sbmc */ 1368*1017Sbmc if (remap == NULL) { 1369*1017Sbmc remap = dt_zalloc(dtp, naggvars * sizeof (int)); 1370*1017Sbmc 1371*1017Sbmc if (remap == NULL) 1372*1017Sbmc goto out; 1373*1017Sbmc } 1374*1017Sbmc 1375*1017Sbmc /* 1376*1017Sbmc * Given that the variable is already present, assert 1377*1017Sbmc * that following through the mapping and adjusting 1378*1017Sbmc * for the sort position yields the same aggregation 1379*1017Sbmc * variable ID. 1380*1017Sbmc */ 1381*1017Sbmc assert(aggvars[(map[aggvar] - 1 + sortpos) % 1382*1017Sbmc naggvars] == aggvars[ndx]); 1383*1017Sbmc 1384*1017Sbmc remap[i] = map[aggvar]; 1385*1017Sbmc continue; 1386*1017Sbmc } 1387*1017Sbmc 1388*1017Sbmc map[aggvar] = i + 1; 1389*1017Sbmc } 1390*1017Sbmc 1391*1017Sbmc /* 1392*1017Sbmc * We need to take two passes over the data to size our allocation, so 1393*1017Sbmc * we'll use the first pass to also fill in the zero-filled data to be 1394*1017Sbmc * used to properly format a zero-valued aggregation. 1395*1017Sbmc */ 1396*1017Sbmc for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 1397*1017Sbmc dtrace_aggvarid_t id; 1398*1017Sbmc int ndx; 1399*1017Sbmc 1400*1017Sbmc if ((id = dt_aggregate_aggvarid(h)) > max || !(ndx = map[id])) 1401*1017Sbmc continue; 1402*1017Sbmc 1403*1017Sbmc if (zaggdata[ndx - 1].dtahe_size == 0) { 1404*1017Sbmc zaggdata[ndx - 1].dtahe_size = h->dtahe_size; 1405*1017Sbmc zaggdata[ndx - 1].dtahe_data = h->dtahe_data; 1406*1017Sbmc } 1407*1017Sbmc 1408*1017Sbmc nentries++; 1409*1017Sbmc } 1410*1017Sbmc 1411*1017Sbmc if (nentries == 0) { 1412*1017Sbmc /* 1413*1017Sbmc * We couldn't find any entries; there is nothing else to do. 1414*1017Sbmc */ 1415*1017Sbmc rval = 0; 1416*1017Sbmc goto out; 1417*1017Sbmc } 1418*1017Sbmc 1419*1017Sbmc /* 1420*1017Sbmc * Before we sort the data, we're going to look for any holes in our 1421*1017Sbmc * zero-filled data. This will occur if an aggregation variable that 1422*1017Sbmc * we are being asked to print has not yet been assigned the result of 1423*1017Sbmc * any aggregating action for _any_ tuple. The issue becomes that we 1424*1017Sbmc * would like a zero value to be printed for all columns for this 1425*1017Sbmc * aggregation, but without any record description, we don't know the 1426*1017Sbmc * aggregating action that corresponds to the aggregation variable. To 1427*1017Sbmc * try to find a match, we're simply going to lookup aggregation IDs 1428*1017Sbmc * (which are guaranteed to be contiguous and to start from 1), looking 1429*1017Sbmc * for the specified aggregation variable ID. If we find a match, 1430*1017Sbmc * we'll use that. If we iterate over all aggregation IDs and don't 1431*1017Sbmc * find a match, then we must be an anonymous enabling. (Anonymous 1432*1017Sbmc * enablings can't currently derive either aggregation variable IDs or 1433*1017Sbmc * aggregation variable names given only an aggregation ID.) In this 1434*1017Sbmc * obscure case (anonymous enabling, multiple aggregation printa() with 1435*1017Sbmc * some aggregations not represented for any tuple), our defined 1436*1017Sbmc * behavior is that the zero will be printed in the format of the first 1437*1017Sbmc * aggregation variable that contains any non-zero value. 1438*1017Sbmc */ 1439*1017Sbmc for (i = 0; i < naggvars; i++) { 1440*1017Sbmc if (zaggdata[i].dtahe_size == 0) { 1441*1017Sbmc dtrace_aggvarid_t aggvar; 1442*1017Sbmc 1443*1017Sbmc aggvar = aggvars[(i - sortpos + naggvars) % naggvars]; 1444*1017Sbmc assert(zaggdata[i].dtahe_data.dtada_data == NULL); 1445*1017Sbmc 1446*1017Sbmc for (j = DTRACE_AGGIDNONE + 1; ; j++) { 1447*1017Sbmc dtrace_aggdesc_t *agg; 1448*1017Sbmc dtrace_aggdata_t *aggdata; 1449*1017Sbmc 1450*1017Sbmc if (dt_aggid_lookup(dtp, j, &agg) != 0) 1451*1017Sbmc break; 1452*1017Sbmc 1453*1017Sbmc if (agg->dtagd_varid != aggvar) 1454*1017Sbmc continue; 1455*1017Sbmc 1456*1017Sbmc /* 1457*1017Sbmc * We have our description -- now we need to 1458*1017Sbmc * cons up the zaggdata entry for it. 1459*1017Sbmc */ 1460*1017Sbmc aggdata = &zaggdata[i].dtahe_data; 1461*1017Sbmc aggdata->dtada_size = agg->dtagd_size; 1462*1017Sbmc aggdata->dtada_desc = agg; 1463*1017Sbmc aggdata->dtada_handle = dtp; 1464*1017Sbmc (void) dt_epid_lookup(dtp, agg->dtagd_epid, 1465*1017Sbmc &aggdata->dtada_edesc, 1466*1017Sbmc &aggdata->dtada_pdesc); 1467*1017Sbmc aggdata->dtada_normal = 1; 1468*1017Sbmc zaggdata[i].dtahe_hashval = 0; 1469*1017Sbmc zaggdata[i].dtahe_size = agg->dtagd_size; 1470*1017Sbmc break; 1471*1017Sbmc } 1472*1017Sbmc 1473*1017Sbmc if (zaggdata[i].dtahe_size == 0) { 1474*1017Sbmc caddr_t data; 1475*1017Sbmc 1476*1017Sbmc /* 1477*1017Sbmc * We couldn't find this aggregation, meaning 1478*1017Sbmc * that we have never seen it before for any 1479*1017Sbmc * tuple _and_ this is an anonymous enabling. 1480*1017Sbmc * That is, we're in the obscure case outlined 1481*1017Sbmc * above. In this case, our defined behavior 1482*1017Sbmc * is to format the data in the format of the 1483*1017Sbmc * first non-zero aggregation -- of which, of 1484*1017Sbmc * course, we know there to be at least one 1485*1017Sbmc * (or nentries would have been zero). 1486*1017Sbmc */ 1487*1017Sbmc for (j = 0; j < naggvars; j++) { 1488*1017Sbmc if (zaggdata[j].dtahe_size != 0) 1489*1017Sbmc break; 1490*1017Sbmc } 1491*1017Sbmc 1492*1017Sbmc assert(j < naggvars); 1493*1017Sbmc zaggdata[i] = zaggdata[j]; 1494*1017Sbmc 1495*1017Sbmc data = zaggdata[i].dtahe_data.dtada_data; 1496*1017Sbmc assert(data != NULL); 1497*1017Sbmc } 1498*1017Sbmc } 1499*1017Sbmc } 1500*1017Sbmc 1501*1017Sbmc /* 1502*1017Sbmc * Now we need to allocate our zero-filled data for use for 1503*1017Sbmc * aggregations that don't have a value corresponding to a given key. 1504*1017Sbmc */ 1505*1017Sbmc for (i = 0; i < naggvars; i++) { 1506*1017Sbmc dtrace_aggdata_t *aggdata = &zaggdata[i].dtahe_data; 1507*1017Sbmc dtrace_aggdesc_t *aggdesc = aggdata->dtada_desc; 1508*1017Sbmc dtrace_recdesc_t *rec; 1509*1017Sbmc uint64_t larg; 1510*1017Sbmc caddr_t zdata; 1511*1017Sbmc 1512*1017Sbmc zsize = zaggdata[i].dtahe_size; 1513*1017Sbmc assert(zsize != 0); 1514*1017Sbmc 1515*1017Sbmc if ((zdata = dt_zalloc(dtp, zsize)) == NULL) { 1516*1017Sbmc /* 1517*1017Sbmc * If we failed to allocated some zero-filled data, we 1518*1017Sbmc * need to zero out the remaining dtada_data pointers 1519*1017Sbmc * to prevent the wrong data from being freed below. 1520*1017Sbmc */ 1521*1017Sbmc for (j = i; j < naggvars; j++) 1522*1017Sbmc zaggdata[j].dtahe_data.dtada_data = NULL; 1523*1017Sbmc goto out; 1524*1017Sbmc } 1525*1017Sbmc 1526*1017Sbmc aggvar = aggvars[(i - sortpos + naggvars) % naggvars]; 1527*1017Sbmc 1528*1017Sbmc /* 1529*1017Sbmc * First, the easy bit. To maintain compatibility with 1530*1017Sbmc * consumers that pull the compiler-generated ID out of the 1531*1017Sbmc * data, we put that ID at the top of the zero-filled data. 1532*1017Sbmc */ 1533*1017Sbmc rec = &aggdesc->dtagd_rec[0]; 1534*1017Sbmc /* LINTED - alignment */ 1535*1017Sbmc *((dtrace_aggvarid_t *)(zdata + rec->dtrd_offset)) = aggvar; 1536*1017Sbmc 1537*1017Sbmc rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 1538*1017Sbmc 1539*1017Sbmc /* 1540*1017Sbmc * Now for the more complicated part. If (and only if) this 1541*1017Sbmc * is an lquantize() aggregating action, zero-filled data is 1542*1017Sbmc * not equivalent to an empty record: we must also get the 1543*1017Sbmc * parameters for the lquantize(). 1544*1017Sbmc */ 1545*1017Sbmc if (rec->dtrd_action == DTRACEAGG_LQUANTIZE) { 1546*1017Sbmc if (aggdata->dtada_data != NULL) { 1547*1017Sbmc /* 1548*1017Sbmc * The easier case here is if we actually have 1549*1017Sbmc * some prototype data -- in which case we 1550*1017Sbmc * manually dig it out of the aggregation 1551*1017Sbmc * record. 1552*1017Sbmc */ 1553*1017Sbmc /* LINTED - alignment */ 1554*1017Sbmc larg = *((uint64_t *)(aggdata->dtada_data + 1555*1017Sbmc rec->dtrd_offset)); 1556*1017Sbmc } else { 1557*1017Sbmc /* 1558*1017Sbmc * We don't have any prototype data. As a 1559*1017Sbmc * result, we know that we _do_ have the 1560*1017Sbmc * compiler-generated information. (If this 1561*1017Sbmc * were an anonymous enabling, all of our 1562*1017Sbmc * zero-filled data would have prototype data 1563*1017Sbmc * -- either directly or indirectly.) So as 1564*1017Sbmc * gross as it is, we'll grovel around in the 1565*1017Sbmc * compiler-generated information to find the 1566*1017Sbmc * lquantize() parameters. 1567*1017Sbmc */ 1568*1017Sbmc dtrace_stmtdesc_t *sdp; 1569*1017Sbmc dt_ident_t *aid; 1570*1017Sbmc dt_idsig_t *isp; 1571*1017Sbmc 1572*1017Sbmc sdp = (dtrace_stmtdesc_t *)(uintptr_t) 1573*1017Sbmc aggdesc->dtagd_rec[0].dtrd_uarg; 1574*1017Sbmc aid = sdp->dtsd_aggdata; 1575*1017Sbmc isp = (dt_idsig_t *)aid->di_data; 1576*1017Sbmc assert(isp->dis_auxinfo != 0); 1577*1017Sbmc larg = isp->dis_auxinfo; 1578*1017Sbmc } 1579*1017Sbmc 1580*1017Sbmc /* LINTED - alignment */ 1581*1017Sbmc *((uint64_t *)(zdata + rec->dtrd_offset)) = larg; 1582*1017Sbmc } 1583*1017Sbmc 1584*1017Sbmc aggdata->dtada_data = zdata; 1585*1017Sbmc } 1586*1017Sbmc 1587*1017Sbmc /* 1588*1017Sbmc * Now that we've dealt with setting up our zero-filled data, we can 1589*1017Sbmc * allocate our sorted array, and take another pass over the data to 1590*1017Sbmc * fill it. 1591*1017Sbmc */ 1592*1017Sbmc sorted = dt_alloc(dtp, nentries * sizeof (dt_ahashent_t *)); 1593*1017Sbmc 1594*1017Sbmc if (sorted == NULL) 1595*1017Sbmc goto out; 1596*1017Sbmc 1597*1017Sbmc for (h = hash->dtah_all, i = 0; h != NULL; h = h->dtahe_nextall) { 1598*1017Sbmc dtrace_aggvarid_t id; 1599*1017Sbmc 1600*1017Sbmc if ((id = dt_aggregate_aggvarid(h)) > max || !map[id]) 1601*1017Sbmc continue; 1602*1017Sbmc 1603*1017Sbmc sorted[i++] = h; 1604*1017Sbmc } 1605*1017Sbmc 1606*1017Sbmc assert(i == nentries); 1607*1017Sbmc 1608*1017Sbmc /* 1609*1017Sbmc * We've loaded our array; now we need to sort by value to allow us 1610*1017Sbmc * to create bundles of like value. We're going to acquire the 1611*1017Sbmc * dt_qsort_lock here, and hold it across all of our subsequent 1612*1017Sbmc * comparison and sorting. 1613*1017Sbmc */ 1614*1017Sbmc (void) pthread_mutex_lock(&dt_qsort_lock); 1615*1017Sbmc 1616*1017Sbmc qsort(sorted, nentries, sizeof (dt_ahashent_t *), 1617*1017Sbmc dt_aggregate_keyvarcmp); 1618*1017Sbmc 1619*1017Sbmc /* 1620*1017Sbmc * Now we need to go through and create bundles. Because the number 1621*1017Sbmc * of bundles is bounded by the size of the sorted array, we're going 1622*1017Sbmc * to reuse the underlying storage. And note that "bundle" is an 1623*1017Sbmc * array of pointers to arrays of pointers to dt_ahashent_t -- making 1624*1017Sbmc * its type (regrettably) "dt_ahashent_t ***". (Regrettable because 1625*1017Sbmc * '*' -- like '_' and 'X' -- should never appear in triplicate in 1626*1017Sbmc * an ideal world.) 1627*1017Sbmc */ 1628*1017Sbmc bundle = (dt_ahashent_t ***)sorted; 1629*1017Sbmc 1630*1017Sbmc for (i = 1, start = 0; i <= nentries; i++) { 1631*1017Sbmc if (i < nentries && 1632*1017Sbmc dt_aggregate_keycmp(&sorted[i], &sorted[i - 1]) == 0) 1633*1017Sbmc continue; 1634*1017Sbmc 1635*1017Sbmc /* 1636*1017Sbmc * We have a bundle boundary. Everything from start to 1637*1017Sbmc * (i - 1) belongs in one bundle. 1638*1017Sbmc */ 1639*1017Sbmc assert(i - start <= naggvars); 1640*1017Sbmc bundlesize = (naggvars + 2) * sizeof (dt_ahashent_t *); 1641*1017Sbmc 1642*1017Sbmc if ((nbundle = dt_zalloc(dtp, bundlesize)) == NULL) { 1643*1017Sbmc (void) pthread_mutex_unlock(&dt_qsort_lock); 1644*1017Sbmc goto out; 1645*1017Sbmc } 1646*1017Sbmc 1647*1017Sbmc for (j = start; j < i; j++) { 1648*1017Sbmc dtrace_aggvarid_t id = dt_aggregate_aggvarid(sorted[j]); 1649*1017Sbmc 1650*1017Sbmc assert(id <= max); 1651*1017Sbmc assert(map[id] != 0); 1652*1017Sbmc assert(map[id] - 1 < naggvars); 1653*1017Sbmc assert(nbundle[map[id] - 1] == NULL); 1654*1017Sbmc nbundle[map[id] - 1] = sorted[j]; 1655*1017Sbmc 1656*1017Sbmc if (nbundle[naggvars] == NULL) 1657*1017Sbmc nbundle[naggvars] = sorted[j]; 1658*1017Sbmc } 1659*1017Sbmc 1660*1017Sbmc for (j = 0; j < naggvars; j++) { 1661*1017Sbmc if (nbundle[j] != NULL) 1662*1017Sbmc continue; 1663*1017Sbmc 1664*1017Sbmc /* 1665*1017Sbmc * Before we assume that this aggregation variable 1666*1017Sbmc * isn't present (and fall back to using the 1667*1017Sbmc * zero-filled data allocated earlier), check the 1668*1017Sbmc * remap. If we have a remapping, we'll drop it in 1669*1017Sbmc * here. Note that we might be remapping an 1670*1017Sbmc * aggregation variable that isn't present for this 1671*1017Sbmc * key; in this case, the aggregation data that we 1672*1017Sbmc * copy will point to the zeroed data. 1673*1017Sbmc */ 1674*1017Sbmc if (remap != NULL && remap[j]) { 1675*1017Sbmc assert(remap[j] - 1 < j); 1676*1017Sbmc assert(nbundle[remap[j] - 1] != NULL); 1677*1017Sbmc nbundle[j] = nbundle[remap[j] - 1]; 1678*1017Sbmc } else { 1679*1017Sbmc nbundle[j] = &zaggdata[j]; 1680*1017Sbmc } 1681*1017Sbmc } 1682*1017Sbmc 1683*1017Sbmc bundle[nbundles++] = nbundle; 1684*1017Sbmc start = i; 1685*1017Sbmc } 1686*1017Sbmc 1687*1017Sbmc /* 1688*1017Sbmc * Now we need to re-sort based on the first value. 1689*1017Sbmc */ 1690*1017Sbmc dt_aggregate_qsort(dtp, bundle, nbundles, sizeof (dt_ahashent_t **), 1691*1017Sbmc dt_aggregate_bundlecmp); 1692*1017Sbmc 1693*1017Sbmc (void) pthread_mutex_unlock(&dt_qsort_lock); 1694*1017Sbmc 1695*1017Sbmc /* 1696*1017Sbmc * We're done! Now we just need to go back over the sorted bundles, 1697*1017Sbmc * calling the function. 1698*1017Sbmc */ 1699*1017Sbmc data = alloca((naggvars + 1) * sizeof (dtrace_aggdata_t *)); 1700*1017Sbmc 1701*1017Sbmc for (i = 0; i < nbundles; i++) { 1702*1017Sbmc for (j = 0; j < naggvars; j++) 1703*1017Sbmc data[j + 1] = NULL; 1704*1017Sbmc 1705*1017Sbmc for (j = 0; j < naggvars; j++) { 1706*1017Sbmc int ndx = j - sortpos; 1707*1017Sbmc 1708*1017Sbmc if (ndx < 0) 1709*1017Sbmc ndx += naggvars; 1710*1017Sbmc 1711*1017Sbmc assert(bundle[i][ndx] != NULL); 1712*1017Sbmc data[j + 1] = &bundle[i][ndx]->dtahe_data; 1713*1017Sbmc } 1714*1017Sbmc 1715*1017Sbmc for (j = 0; j < naggvars; j++) 1716*1017Sbmc assert(data[j + 1] != NULL); 1717*1017Sbmc 1718*1017Sbmc /* 1719*1017Sbmc * The representative key is the last element in the bundle. 1720*1017Sbmc * Assert that we have one, and then set it to be the first 1721*1017Sbmc * element of data. 1722*1017Sbmc */ 1723*1017Sbmc assert(bundle[i][j] != NULL); 1724*1017Sbmc data[0] = &bundle[i][j]->dtahe_data; 1725*1017Sbmc 1726*1017Sbmc if ((rval = func(data, naggvars + 1, arg)) == -1) 1727*1017Sbmc goto out; 1728*1017Sbmc } 1729*1017Sbmc 1730*1017Sbmc rval = 0; 1731*1017Sbmc out: 1732*1017Sbmc for (i = 0; i < nbundles; i++) 1733*1017Sbmc dt_free(dtp, bundle[i]); 1734*1017Sbmc 1735*1017Sbmc if (zaggdata != NULL) { 1736*1017Sbmc for (i = 0; i < naggvars; i++) 1737*1017Sbmc dt_free(dtp, zaggdata[i].dtahe_data.dtada_data); 1738*1017Sbmc } 1739*1017Sbmc 1740*1017Sbmc dt_free(dtp, zaggdata); 1741*1017Sbmc dt_free(dtp, sorted); 1742*1017Sbmc dt_free(dtp, remap); 1743*1017Sbmc dt_free(dtp, map); 1744*1017Sbmc 1745*1017Sbmc return (rval); 1746*1017Sbmc } 1747*1017Sbmc 1748*1017Sbmc int 17490Sstevel@tonic-gate dtrace_aggregate_print(dtrace_hdl_t *dtp, FILE *fp, 17500Sstevel@tonic-gate dtrace_aggregate_walk_f *func) 17510Sstevel@tonic-gate { 17520Sstevel@tonic-gate dt_print_aggdata_t pd; 17530Sstevel@tonic-gate 17540Sstevel@tonic-gate pd.dtpa_dtp = dtp; 17550Sstevel@tonic-gate pd.dtpa_fp = fp; 17560Sstevel@tonic-gate pd.dtpa_allunprint = 1; 17570Sstevel@tonic-gate 17580Sstevel@tonic-gate if (func == NULL) 1759*1017Sbmc func = dtrace_aggregate_walk_sorted; 17600Sstevel@tonic-gate 17610Sstevel@tonic-gate if ((*func)(dtp, dt_print_agg, &pd) == -1) 17620Sstevel@tonic-gate return (dt_set_errno(dtp, dtp->dt_errno)); 17630Sstevel@tonic-gate 17640Sstevel@tonic-gate return (0); 17650Sstevel@tonic-gate } 17660Sstevel@tonic-gate 17670Sstevel@tonic-gate void 17680Sstevel@tonic-gate dtrace_aggregate_clear(dtrace_hdl_t *dtp) 17690Sstevel@tonic-gate { 17700Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 17710Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 17720Sstevel@tonic-gate dt_ahashent_t *h; 17730Sstevel@tonic-gate dtrace_aggdata_t *data; 17740Sstevel@tonic-gate dtrace_aggdesc_t *aggdesc; 17750Sstevel@tonic-gate dtrace_recdesc_t *rec; 17760Sstevel@tonic-gate int i, max_cpus = agp->dtat_maxcpu; 17770Sstevel@tonic-gate 17780Sstevel@tonic-gate for (h = hash->dtah_all; h != NULL; h = h->dtahe_nextall) { 17790Sstevel@tonic-gate aggdesc = h->dtahe_data.dtada_desc; 17800Sstevel@tonic-gate rec = &aggdesc->dtagd_rec[aggdesc->dtagd_nrecs - 1]; 17810Sstevel@tonic-gate data = &h->dtahe_data; 17820Sstevel@tonic-gate 17830Sstevel@tonic-gate bzero(&data->dtada_data[rec->dtrd_offset], rec->dtrd_size); 17840Sstevel@tonic-gate 17850Sstevel@tonic-gate if (data->dtada_percpu == NULL) 17860Sstevel@tonic-gate continue; 17870Sstevel@tonic-gate 17880Sstevel@tonic-gate for (i = 0; i < max_cpus; i++) 17890Sstevel@tonic-gate bzero(data->dtada_percpu[i], rec->dtrd_size); 17900Sstevel@tonic-gate } 17910Sstevel@tonic-gate } 17920Sstevel@tonic-gate 17930Sstevel@tonic-gate void 17940Sstevel@tonic-gate dt_aggregate_destroy(dtrace_hdl_t *dtp) 17950Sstevel@tonic-gate { 17960Sstevel@tonic-gate dt_aggregate_t *agp = &dtp->dt_aggregate; 17970Sstevel@tonic-gate dt_ahash_t *hash = &agp->dtat_hash; 17980Sstevel@tonic-gate dt_ahashent_t *h, *next; 17990Sstevel@tonic-gate dtrace_aggdata_t *aggdata; 18000Sstevel@tonic-gate int i, max_cpus = agp->dtat_maxcpu; 18010Sstevel@tonic-gate 18020Sstevel@tonic-gate if (hash->dtah_hash == NULL) { 18030Sstevel@tonic-gate assert(hash->dtah_all == NULL); 18040Sstevel@tonic-gate } else { 18050Sstevel@tonic-gate free(hash->dtah_hash); 18060Sstevel@tonic-gate 18070Sstevel@tonic-gate for (h = hash->dtah_all; h != NULL; h = next) { 18080Sstevel@tonic-gate next = h->dtahe_nextall; 18090Sstevel@tonic-gate 18100Sstevel@tonic-gate aggdata = &h->dtahe_data; 18110Sstevel@tonic-gate 18120Sstevel@tonic-gate if (aggdata->dtada_percpu != NULL) { 18130Sstevel@tonic-gate for (i = 0; i < max_cpus; i++) 18140Sstevel@tonic-gate free(aggdata->dtada_percpu[i]); 18150Sstevel@tonic-gate free(aggdata->dtada_percpu); 18160Sstevel@tonic-gate } 18170Sstevel@tonic-gate 18180Sstevel@tonic-gate free(aggdata->dtada_data); 18190Sstevel@tonic-gate free(h); 18200Sstevel@tonic-gate } 18210Sstevel@tonic-gate 18220Sstevel@tonic-gate hash->dtah_hash = NULL; 18230Sstevel@tonic-gate hash->dtah_all = NULL; 18240Sstevel@tonic-gate hash->dtah_size = 0; 18250Sstevel@tonic-gate } 18260Sstevel@tonic-gate 18270Sstevel@tonic-gate free(agp->dtat_buf.dtbd_data); 18280Sstevel@tonic-gate free(agp->dtat_cpus); 18290Sstevel@tonic-gate } 1830