1*0Sstevel@tonic-gate /* 2*0Sstevel@tonic-gate * CDDL HEADER START 3*0Sstevel@tonic-gate * 4*0Sstevel@tonic-gate * The contents of this file are subject to the terms of the 5*0Sstevel@tonic-gate * Common Development and Distribution License, Version 1.0 only 6*0Sstevel@tonic-gate * (the "License"). You may not use this file except in compliance 7*0Sstevel@tonic-gate * with the License. 8*0Sstevel@tonic-gate * 9*0Sstevel@tonic-gate * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 10*0Sstevel@tonic-gate * or http://www.opensolaris.org/os/licensing. 11*0Sstevel@tonic-gate * See the License for the specific language governing permissions 12*0Sstevel@tonic-gate * and limitations under the License. 13*0Sstevel@tonic-gate * 14*0Sstevel@tonic-gate * When distributing Covered Code, include this CDDL HEADER in each 15*0Sstevel@tonic-gate * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 16*0Sstevel@tonic-gate * If applicable, add the following below this CDDL HEADER, with the 17*0Sstevel@tonic-gate * fields enclosed by brackets "[]" replaced with your own identifying 18*0Sstevel@tonic-gate * information: Portions Copyright [yyyy] [name of copyright owner] 19*0Sstevel@tonic-gate * 20*0Sstevel@tonic-gate * CDDL HEADER END 21*0Sstevel@tonic-gate */ 22*0Sstevel@tonic-gate /* 23*0Sstevel@tonic-gate * Copyright 2005 Sun Microsystems, Inc. All rights reserved. 24*0Sstevel@tonic-gate * Use is subject to license terms. 25*0Sstevel@tonic-gate */ 26*0Sstevel@tonic-gate 27*0Sstevel@tonic-gate #pragma ident "%Z%%M% %I% %E% SMI" 28*0Sstevel@tonic-gate 29*0Sstevel@tonic-gate #include <sys/param.h> 30*0Sstevel@tonic-gate #include <sys/thread.h> 31*0Sstevel@tonic-gate #include <sys/cpuvar.h> 32*0Sstevel@tonic-gate #include <sys/inttypes.h> 33*0Sstevel@tonic-gate #include <sys/cmn_err.h> 34*0Sstevel@tonic-gate #include <sys/time.h> 35*0Sstevel@tonic-gate #include <sys/mutex.h> 36*0Sstevel@tonic-gate #include <sys/systm.h> 37*0Sstevel@tonic-gate #include <sys/kcpc.h> 38*0Sstevel@tonic-gate #include <sys/cpc_impl.h> 39*0Sstevel@tonic-gate #include <sys/cpc_pcbe.h> 40*0Sstevel@tonic-gate #include <sys/atomic.h> 41*0Sstevel@tonic-gate #include <sys/sunddi.h> 42*0Sstevel@tonic-gate #include <sys/modctl.h> 43*0Sstevel@tonic-gate #include <sys/sdt.h> 44*0Sstevel@tonic-gate #if defined(__x86) 45*0Sstevel@tonic-gate #include <asm/clock.h> 46*0Sstevel@tonic-gate #endif 47*0Sstevel@tonic-gate 48*0Sstevel@tonic-gate kmutex_t kcpc_ctx_llock[CPC_HASH_BUCKETS]; /* protects ctx_list */ 49*0Sstevel@tonic-gate kcpc_ctx_t *kcpc_ctx_list[CPC_HASH_BUCKETS]; /* head of list */ 50*0Sstevel@tonic-gate 51*0Sstevel@tonic-gate 52*0Sstevel@tonic-gate krwlock_t kcpc_cpuctx_lock; /* lock for 'kcpc_cpuctx' below */ 53*0Sstevel@tonic-gate int kcpc_cpuctx; /* number of cpu-specific contexts */ 54*0Sstevel@tonic-gate 55*0Sstevel@tonic-gate int kcpc_counts_include_idle = 1; /* Project Private /etc/system variable */ 56*0Sstevel@tonic-gate 57*0Sstevel@tonic-gate /* 58*0Sstevel@tonic-gate * These are set when a PCBE module is loaded. 59*0Sstevel@tonic-gate */ 60*0Sstevel@tonic-gate uint_t cpc_ncounters = 0; 61*0Sstevel@tonic-gate pcbe_ops_t *pcbe_ops = NULL; 62*0Sstevel@tonic-gate 63*0Sstevel@tonic-gate /* 64*0Sstevel@tonic-gate * Statistics on (mis)behavior 65*0Sstevel@tonic-gate */ 66*0Sstevel@tonic-gate static uint32_t kcpc_intrctx_count; /* # overflows in an interrupt handler */ 67*0Sstevel@tonic-gate static uint32_t kcpc_nullctx_count; /* # overflows in a thread with no ctx */ 68*0Sstevel@tonic-gate 69*0Sstevel@tonic-gate /* 70*0Sstevel@tonic-gate * Is misbehaviour (overflow in a thread with no context) fatal? 71*0Sstevel@tonic-gate */ 72*0Sstevel@tonic-gate #ifdef DEBUG 73*0Sstevel@tonic-gate static int kcpc_nullctx_panic = 1; 74*0Sstevel@tonic-gate #else 75*0Sstevel@tonic-gate static int kcpc_nullctx_panic = 0; 76*0Sstevel@tonic-gate #endif 77*0Sstevel@tonic-gate 78*0Sstevel@tonic-gate static void kcpc_lwp_create(kthread_t *t, kthread_t *ct); 79*0Sstevel@tonic-gate static void kcpc_restore(kcpc_ctx_t *ctx); 80*0Sstevel@tonic-gate static void kcpc_save(kcpc_ctx_t *ctx); 81*0Sstevel@tonic-gate static void kcpc_free(kcpc_ctx_t *ctx, int isexec); 82*0Sstevel@tonic-gate static int kcpc_configure_reqs(kcpc_ctx_t *ctx, kcpc_set_t *set, int *subcode); 83*0Sstevel@tonic-gate static void kcpc_free_configs(kcpc_set_t *set); 84*0Sstevel@tonic-gate static kcpc_ctx_t *kcpc_ctx_alloc(void); 85*0Sstevel@tonic-gate static void kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx); 86*0Sstevel@tonic-gate static void kcpc_ctx_free(kcpc_ctx_t *ctx); 87*0Sstevel@tonic-gate static int kcpc_assign_reqs(kcpc_set_t *set, kcpc_ctx_t *ctx); 88*0Sstevel@tonic-gate static int kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch); 89*0Sstevel@tonic-gate static kcpc_set_t *kcpc_dup_set(kcpc_set_t *set); 90*0Sstevel@tonic-gate 91*0Sstevel@tonic-gate void 92*0Sstevel@tonic-gate kcpc_register_pcbe(pcbe_ops_t *ops) 93*0Sstevel@tonic-gate { 94*0Sstevel@tonic-gate pcbe_ops = ops; 95*0Sstevel@tonic-gate cpc_ncounters = pcbe_ops->pcbe_ncounters(); 96*0Sstevel@tonic-gate } 97*0Sstevel@tonic-gate 98*0Sstevel@tonic-gate int 99*0Sstevel@tonic-gate kcpc_bind_cpu(kcpc_set_t *set, processorid_t cpuid, int *subcode) 100*0Sstevel@tonic-gate { 101*0Sstevel@tonic-gate cpu_t *cp; 102*0Sstevel@tonic-gate kcpc_ctx_t *ctx; 103*0Sstevel@tonic-gate int error; 104*0Sstevel@tonic-gate 105*0Sstevel@tonic-gate ctx = kcpc_ctx_alloc(); 106*0Sstevel@tonic-gate 107*0Sstevel@tonic-gate if (kcpc_assign_reqs(set, ctx) != 0) { 108*0Sstevel@tonic-gate kcpc_ctx_free(ctx); 109*0Sstevel@tonic-gate *subcode = CPC_RESOURCE_UNAVAIL; 110*0Sstevel@tonic-gate return (EINVAL); 111*0Sstevel@tonic-gate } 112*0Sstevel@tonic-gate 113*0Sstevel@tonic-gate ctx->kc_cpuid = cpuid; 114*0Sstevel@tonic-gate ctx->kc_thread = curthread; 115*0Sstevel@tonic-gate 116*0Sstevel@tonic-gate set->ks_data = kmem_zalloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP); 117*0Sstevel@tonic-gate 118*0Sstevel@tonic-gate if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) { 119*0Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 120*0Sstevel@tonic-gate kcpc_ctx_free(ctx); 121*0Sstevel@tonic-gate return (error); 122*0Sstevel@tonic-gate } 123*0Sstevel@tonic-gate 124*0Sstevel@tonic-gate set->ks_ctx = ctx; 125*0Sstevel@tonic-gate ctx->kc_set = set; 126*0Sstevel@tonic-gate 127*0Sstevel@tonic-gate /* 128*0Sstevel@tonic-gate * We must hold cpu_lock to prevent DR, offlining, or unbinding while 129*0Sstevel@tonic-gate * we are manipulating the cpu_t and programming the hardware, else the 130*0Sstevel@tonic-gate * the cpu_t could go away while we're looking at it. 131*0Sstevel@tonic-gate */ 132*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 133*0Sstevel@tonic-gate cp = cpu_get(cpuid); 134*0Sstevel@tonic-gate 135*0Sstevel@tonic-gate if (cp == NULL) 136*0Sstevel@tonic-gate /* 137*0Sstevel@tonic-gate * The CPU could have been DRd out while we were getting set up. 138*0Sstevel@tonic-gate */ 139*0Sstevel@tonic-gate goto unbound; 140*0Sstevel@tonic-gate 141*0Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 142*0Sstevel@tonic-gate 143*0Sstevel@tonic-gate if (cp->cpu_cpc_ctx != NULL) { 144*0Sstevel@tonic-gate /* 145*0Sstevel@tonic-gate * If this CPU already has a bound set, return an error. 146*0Sstevel@tonic-gate */ 147*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 148*0Sstevel@tonic-gate goto unbound; 149*0Sstevel@tonic-gate } 150*0Sstevel@tonic-gate 151*0Sstevel@tonic-gate if (curthread->t_bind_cpu != cpuid) { 152*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 153*0Sstevel@tonic-gate goto unbound; 154*0Sstevel@tonic-gate } 155*0Sstevel@tonic-gate cp->cpu_cpc_ctx = ctx; 156*0Sstevel@tonic-gate 157*0Sstevel@tonic-gate /* 158*0Sstevel@tonic-gate * Kernel preemption must be disabled while fiddling with the hardware 159*0Sstevel@tonic-gate * registers to prevent partial updates. 160*0Sstevel@tonic-gate */ 161*0Sstevel@tonic-gate kpreempt_disable(); 162*0Sstevel@tonic-gate ctx->kc_rawtick = KCPC_GET_TICK(); 163*0Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 164*0Sstevel@tonic-gate kpreempt_enable(); 165*0Sstevel@tonic-gate 166*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 167*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 168*0Sstevel@tonic-gate 169*0Sstevel@tonic-gate return (0); 170*0Sstevel@tonic-gate 171*0Sstevel@tonic-gate unbound: 172*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 173*0Sstevel@tonic-gate set->ks_ctx = NULL; 174*0Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 175*0Sstevel@tonic-gate kcpc_ctx_free(ctx); 176*0Sstevel@tonic-gate return (EAGAIN); 177*0Sstevel@tonic-gate } 178*0Sstevel@tonic-gate 179*0Sstevel@tonic-gate int 180*0Sstevel@tonic-gate kcpc_bind_thread(kcpc_set_t *set, kthread_t *t, int *subcode) 181*0Sstevel@tonic-gate { 182*0Sstevel@tonic-gate kcpc_ctx_t *ctx; 183*0Sstevel@tonic-gate int error; 184*0Sstevel@tonic-gate 185*0Sstevel@tonic-gate /* 186*0Sstevel@tonic-gate * Only one set is allowed per context, so ensure there is no 187*0Sstevel@tonic-gate * existing context. 188*0Sstevel@tonic-gate */ 189*0Sstevel@tonic-gate 190*0Sstevel@tonic-gate if (t->t_cpc_ctx != NULL) 191*0Sstevel@tonic-gate return (EEXIST); 192*0Sstevel@tonic-gate 193*0Sstevel@tonic-gate ctx = kcpc_ctx_alloc(); 194*0Sstevel@tonic-gate 195*0Sstevel@tonic-gate /* 196*0Sstevel@tonic-gate * The context must begin life frozen until it has been properly 197*0Sstevel@tonic-gate * programmed onto the hardware. This prevents the context ops from 198*0Sstevel@tonic-gate * worrying about it until we're ready. 199*0Sstevel@tonic-gate */ 200*0Sstevel@tonic-gate ctx->kc_flags |= KCPC_CTX_FREEZE; 201*0Sstevel@tonic-gate ctx->kc_hrtime = gethrtime(); 202*0Sstevel@tonic-gate 203*0Sstevel@tonic-gate if (kcpc_assign_reqs(set, ctx) != 0) { 204*0Sstevel@tonic-gate kcpc_ctx_free(ctx); 205*0Sstevel@tonic-gate *subcode = CPC_RESOURCE_UNAVAIL; 206*0Sstevel@tonic-gate return (EINVAL); 207*0Sstevel@tonic-gate } 208*0Sstevel@tonic-gate 209*0Sstevel@tonic-gate ctx->kc_cpuid = -1; 210*0Sstevel@tonic-gate if (set->ks_flags & CPC_BIND_LWP_INHERIT) 211*0Sstevel@tonic-gate ctx->kc_flags |= KCPC_CTX_LWPINHERIT; 212*0Sstevel@tonic-gate ctx->kc_thread = t; 213*0Sstevel@tonic-gate t->t_cpc_ctx = ctx; 214*0Sstevel@tonic-gate /* 215*0Sstevel@tonic-gate * Permit threads to look at their own hardware counters from userland. 216*0Sstevel@tonic-gate */ 217*0Sstevel@tonic-gate ctx->kc_flags |= KCPC_CTX_NONPRIV; 218*0Sstevel@tonic-gate 219*0Sstevel@tonic-gate /* 220*0Sstevel@tonic-gate * Create the data store for this set. 221*0Sstevel@tonic-gate */ 222*0Sstevel@tonic-gate set->ks_data = kmem_alloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP); 223*0Sstevel@tonic-gate 224*0Sstevel@tonic-gate if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) { 225*0Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 226*0Sstevel@tonic-gate kcpc_ctx_free(ctx); 227*0Sstevel@tonic-gate t->t_cpc_ctx = NULL; 228*0Sstevel@tonic-gate return (error); 229*0Sstevel@tonic-gate } 230*0Sstevel@tonic-gate 231*0Sstevel@tonic-gate set->ks_ctx = ctx; 232*0Sstevel@tonic-gate ctx->kc_set = set; 233*0Sstevel@tonic-gate 234*0Sstevel@tonic-gate /* 235*0Sstevel@tonic-gate * Add a device context to the subject thread. 236*0Sstevel@tonic-gate */ 237*0Sstevel@tonic-gate installctx(t, ctx, kcpc_save, kcpc_restore, NULL, 238*0Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free); 239*0Sstevel@tonic-gate 240*0Sstevel@tonic-gate /* 241*0Sstevel@tonic-gate * Ask the backend to program the hardware. 242*0Sstevel@tonic-gate */ 243*0Sstevel@tonic-gate if (t == curthread) { 244*0Sstevel@tonic-gate kpreempt_disable(); 245*0Sstevel@tonic-gate ctx->kc_rawtick = KCPC_GET_TICK(); 246*0Sstevel@tonic-gate atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE); 247*0Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 248*0Sstevel@tonic-gate kpreempt_enable(); 249*0Sstevel@tonic-gate } else 250*0Sstevel@tonic-gate /* 251*0Sstevel@tonic-gate * Since we are the agent LWP, we know the victim LWP is stopped 252*0Sstevel@tonic-gate * until we're done here; no need to worry about preemption or 253*0Sstevel@tonic-gate * migration here. We still use an atomic op to clear the flag 254*0Sstevel@tonic-gate * to ensure the flags are always self-consistent; they can 255*0Sstevel@tonic-gate * still be accessed from, for instance, another CPU doing a 256*0Sstevel@tonic-gate * kcpc_invalidate_all(). 257*0Sstevel@tonic-gate */ 258*0Sstevel@tonic-gate atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE); 259*0Sstevel@tonic-gate 260*0Sstevel@tonic-gate 261*0Sstevel@tonic-gate return (0); 262*0Sstevel@tonic-gate } 263*0Sstevel@tonic-gate 264*0Sstevel@tonic-gate /* 265*0Sstevel@tonic-gate * Walk through each request in the set and ask the PCBE to configure a 266*0Sstevel@tonic-gate * corresponding counter. 267*0Sstevel@tonic-gate */ 268*0Sstevel@tonic-gate static int 269*0Sstevel@tonic-gate kcpc_configure_reqs(kcpc_ctx_t *ctx, kcpc_set_t *set, int *subcode) 270*0Sstevel@tonic-gate { 271*0Sstevel@tonic-gate int i; 272*0Sstevel@tonic-gate int ret; 273*0Sstevel@tonic-gate kcpc_request_t *rp; 274*0Sstevel@tonic-gate 275*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 276*0Sstevel@tonic-gate int n; 277*0Sstevel@tonic-gate rp = &set->ks_req[i]; 278*0Sstevel@tonic-gate 279*0Sstevel@tonic-gate n = rp->kr_picnum; 280*0Sstevel@tonic-gate 281*0Sstevel@tonic-gate ASSERT(n >= 0 && n < cpc_ncounters); 282*0Sstevel@tonic-gate 283*0Sstevel@tonic-gate ASSERT(ctx->kc_pics[n].kp_req == NULL); 284*0Sstevel@tonic-gate 285*0Sstevel@tonic-gate if (rp->kr_flags & CPC_OVF_NOTIFY_EMT) { 286*0Sstevel@tonic-gate if ((pcbe_ops->pcbe_caps & CPC_CAP_OVERFLOW_INTERRUPT) 287*0Sstevel@tonic-gate == 0) { 288*0Sstevel@tonic-gate *subcode = -1; 289*0Sstevel@tonic-gate return (ENOTSUP); 290*0Sstevel@tonic-gate } 291*0Sstevel@tonic-gate /* 292*0Sstevel@tonic-gate * If any of the counters have requested overflow 293*0Sstevel@tonic-gate * notification, we flag the context as being one that 294*0Sstevel@tonic-gate * cares about overflow. 295*0Sstevel@tonic-gate */ 296*0Sstevel@tonic-gate ctx->kc_flags |= KCPC_CTX_SIGOVF; 297*0Sstevel@tonic-gate } 298*0Sstevel@tonic-gate 299*0Sstevel@tonic-gate rp->kr_config = NULL; 300*0Sstevel@tonic-gate if ((ret = pcbe_ops->pcbe_configure(n, rp->kr_event, 301*0Sstevel@tonic-gate rp->kr_preset, rp->kr_flags, rp->kr_nattrs, rp->kr_attr, 302*0Sstevel@tonic-gate &(rp->kr_config), (void *)ctx)) != 0) { 303*0Sstevel@tonic-gate kcpc_free_configs(set); 304*0Sstevel@tonic-gate *subcode = ret; 305*0Sstevel@tonic-gate if (ret == CPC_ATTR_REQUIRES_PRIVILEGE) 306*0Sstevel@tonic-gate return (EACCES); 307*0Sstevel@tonic-gate return (EINVAL); 308*0Sstevel@tonic-gate } 309*0Sstevel@tonic-gate 310*0Sstevel@tonic-gate ctx->kc_pics[n].kp_req = rp; 311*0Sstevel@tonic-gate rp->kr_picp = &ctx->kc_pics[n]; 312*0Sstevel@tonic-gate rp->kr_data = set->ks_data + rp->kr_index; 313*0Sstevel@tonic-gate *rp->kr_data = rp->kr_preset; 314*0Sstevel@tonic-gate } 315*0Sstevel@tonic-gate 316*0Sstevel@tonic-gate return (0); 317*0Sstevel@tonic-gate } 318*0Sstevel@tonic-gate 319*0Sstevel@tonic-gate static void 320*0Sstevel@tonic-gate kcpc_free_configs(kcpc_set_t *set) 321*0Sstevel@tonic-gate { 322*0Sstevel@tonic-gate int i; 323*0Sstevel@tonic-gate 324*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 325*0Sstevel@tonic-gate if (set->ks_req[i].kr_config != NULL) 326*0Sstevel@tonic-gate pcbe_ops->pcbe_free(set->ks_req[i].kr_config); 327*0Sstevel@tonic-gate } 328*0Sstevel@tonic-gate 329*0Sstevel@tonic-gate /* 330*0Sstevel@tonic-gate * buf points to a user address and the data should be copied out to that 331*0Sstevel@tonic-gate * address in the current process. 332*0Sstevel@tonic-gate */ 333*0Sstevel@tonic-gate int 334*0Sstevel@tonic-gate kcpc_sample(kcpc_set_t *set, uint64_t *buf, hrtime_t *hrtime, uint64_t *tick) 335*0Sstevel@tonic-gate { 336*0Sstevel@tonic-gate kcpc_ctx_t *ctx = set->ks_ctx; 337*0Sstevel@tonic-gate uint64_t curtick = KCPC_GET_TICK(); 338*0Sstevel@tonic-gate 339*0Sstevel@tonic-gate if (ctx == NULL) 340*0Sstevel@tonic-gate return (EINVAL); 341*0Sstevel@tonic-gate else if (ctx->kc_flags & KCPC_CTX_INVALID) 342*0Sstevel@tonic-gate return (EAGAIN); 343*0Sstevel@tonic-gate 344*0Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) { 345*0Sstevel@tonic-gate /* 346*0Sstevel@tonic-gate * Kernel preemption must be disabled while reading the 347*0Sstevel@tonic-gate * hardware regs, and if this is a CPU-bound context, while 348*0Sstevel@tonic-gate * checking the CPU binding of the current thread. 349*0Sstevel@tonic-gate */ 350*0Sstevel@tonic-gate kpreempt_disable(); 351*0Sstevel@tonic-gate 352*0Sstevel@tonic-gate if (ctx->kc_cpuid != -1) { 353*0Sstevel@tonic-gate if (curthread->t_bind_cpu != ctx->kc_cpuid) { 354*0Sstevel@tonic-gate kpreempt_enable(); 355*0Sstevel@tonic-gate return (EAGAIN); 356*0Sstevel@tonic-gate } 357*0Sstevel@tonic-gate } 358*0Sstevel@tonic-gate 359*0Sstevel@tonic-gate if (ctx->kc_thread == curthread) { 360*0Sstevel@tonic-gate ctx->kc_hrtime = gethrtime(); 361*0Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 362*0Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 363*0Sstevel@tonic-gate ctx->kc_rawtick = curtick; 364*0Sstevel@tonic-gate } 365*0Sstevel@tonic-gate 366*0Sstevel@tonic-gate kpreempt_enable(); 367*0Sstevel@tonic-gate } 368*0Sstevel@tonic-gate 369*0Sstevel@tonic-gate if (copyout(set->ks_data, buf, 370*0Sstevel@tonic-gate set->ks_nreqs * sizeof (uint64_t)) == -1) 371*0Sstevel@tonic-gate return (EFAULT); 372*0Sstevel@tonic-gate if (copyout(&ctx->kc_hrtime, hrtime, sizeof (uint64_t)) == -1) 373*0Sstevel@tonic-gate return (EFAULT); 374*0Sstevel@tonic-gate if (copyout(&ctx->kc_vtick, tick, sizeof (uint64_t)) == -1) 375*0Sstevel@tonic-gate return (EFAULT); 376*0Sstevel@tonic-gate 377*0Sstevel@tonic-gate return (0); 378*0Sstevel@tonic-gate } 379*0Sstevel@tonic-gate 380*0Sstevel@tonic-gate /* 381*0Sstevel@tonic-gate * Stop the counters on the CPU this context is bound to. 382*0Sstevel@tonic-gate */ 383*0Sstevel@tonic-gate static void 384*0Sstevel@tonic-gate kcpc_stop_hw(kcpc_ctx_t *ctx) 385*0Sstevel@tonic-gate { 386*0Sstevel@tonic-gate cpu_t *cp; 387*0Sstevel@tonic-gate 388*0Sstevel@tonic-gate ASSERT((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED)) 389*0Sstevel@tonic-gate == KCPC_CTX_INVALID); 390*0Sstevel@tonic-gate 391*0Sstevel@tonic-gate kpreempt_disable(); 392*0Sstevel@tonic-gate 393*0Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 394*0Sstevel@tonic-gate ASSERT(cp != NULL); 395*0Sstevel@tonic-gate 396*0Sstevel@tonic-gate if (cp == CPU) { 397*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 398*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, 399*0Sstevel@tonic-gate KCPC_CTX_INVALID_STOPPED); 400*0Sstevel@tonic-gate } else 401*0Sstevel@tonic-gate kcpc_remote_stop(cp); 402*0Sstevel@tonic-gate kpreempt_enable(); 403*0Sstevel@tonic-gate } 404*0Sstevel@tonic-gate 405*0Sstevel@tonic-gate int 406*0Sstevel@tonic-gate kcpc_unbind(kcpc_set_t *set) 407*0Sstevel@tonic-gate { 408*0Sstevel@tonic-gate kcpc_ctx_t *ctx = set->ks_ctx; 409*0Sstevel@tonic-gate kthread_t *t; 410*0Sstevel@tonic-gate 411*0Sstevel@tonic-gate if (ctx == NULL) 412*0Sstevel@tonic-gate return (EINVAL); 413*0Sstevel@tonic-gate 414*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID); 415*0Sstevel@tonic-gate 416*0Sstevel@tonic-gate if (ctx->kc_cpuid == -1) { 417*0Sstevel@tonic-gate t = ctx->kc_thread; 418*0Sstevel@tonic-gate /* 419*0Sstevel@tonic-gate * The context is thread-bound and therefore has a device 420*0Sstevel@tonic-gate * context. It will be freed via removectx() calling 421*0Sstevel@tonic-gate * freectx() calling kcpc_free(). 422*0Sstevel@tonic-gate */ 423*0Sstevel@tonic-gate if (t == curthread && 424*0Sstevel@tonic-gate (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) { 425*0Sstevel@tonic-gate kpreempt_disable(); 426*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 427*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, 428*0Sstevel@tonic-gate KCPC_CTX_INVALID_STOPPED); 429*0Sstevel@tonic-gate kpreempt_enable(); 430*0Sstevel@tonic-gate } 431*0Sstevel@tonic-gate #ifdef DEBUG 432*0Sstevel@tonic-gate if (removectx(t, ctx, kcpc_save, kcpc_restore, NULL, 433*0Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free) == 0) 434*0Sstevel@tonic-gate panic("kcpc_unbind: context %p not preset on thread %p", 435*0Sstevel@tonic-gate ctx, t); 436*0Sstevel@tonic-gate #else 437*0Sstevel@tonic-gate (void) removectx(t, ctx, kcpc_save, kcpc_restore, NULL, 438*0Sstevel@tonic-gate kcpc_lwp_create, NULL, kcpc_free); 439*0Sstevel@tonic-gate #endif /* DEBUG */ 440*0Sstevel@tonic-gate t->t_cpc_set = NULL; 441*0Sstevel@tonic-gate t->t_cpc_ctx = NULL; 442*0Sstevel@tonic-gate } else { 443*0Sstevel@tonic-gate /* 444*0Sstevel@tonic-gate * If we are unbinding a CPU-bound set from a remote CPU, the 445*0Sstevel@tonic-gate * native CPU's idle thread could be in the midst of programming 446*0Sstevel@tonic-gate * this context onto the CPU. We grab the context's lock here to 447*0Sstevel@tonic-gate * ensure that the idle thread is done with it. When we release 448*0Sstevel@tonic-gate * the lock, the CPU no longer has a context and the idle thread 449*0Sstevel@tonic-gate * will move on. 450*0Sstevel@tonic-gate * 451*0Sstevel@tonic-gate * cpu_lock must be held to prevent the CPU from being DR'd out 452*0Sstevel@tonic-gate * while we disassociate the context from the cpu_t. 453*0Sstevel@tonic-gate */ 454*0Sstevel@tonic-gate cpu_t *cp; 455*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 456*0Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 457*0Sstevel@tonic-gate if (cp != NULL) { 458*0Sstevel@tonic-gate /* 459*0Sstevel@tonic-gate * The CPU may have been DR'd out of the system. 460*0Sstevel@tonic-gate */ 461*0Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 462*0Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) 463*0Sstevel@tonic-gate kcpc_stop_hw(ctx); 464*0Sstevel@tonic-gate ASSERT(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED); 465*0Sstevel@tonic-gate cp->cpu_cpc_ctx = NULL; 466*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 467*0Sstevel@tonic-gate } 468*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 469*0Sstevel@tonic-gate if (ctx->kc_thread == curthread) { 470*0Sstevel@tonic-gate kcpc_free(ctx, 0); 471*0Sstevel@tonic-gate curthread->t_cpc_set = NULL; 472*0Sstevel@tonic-gate } 473*0Sstevel@tonic-gate } 474*0Sstevel@tonic-gate 475*0Sstevel@tonic-gate return (0); 476*0Sstevel@tonic-gate } 477*0Sstevel@tonic-gate 478*0Sstevel@tonic-gate int 479*0Sstevel@tonic-gate kcpc_preset(kcpc_set_t *set, int index, uint64_t preset) 480*0Sstevel@tonic-gate { 481*0Sstevel@tonic-gate int i; 482*0Sstevel@tonic-gate 483*0Sstevel@tonic-gate ASSERT(set != NULL); 484*0Sstevel@tonic-gate ASSERT(set->ks_ctx != NULL); 485*0Sstevel@tonic-gate ASSERT(set->ks_ctx->kc_thread == curthread); 486*0Sstevel@tonic-gate ASSERT(set->ks_ctx->kc_cpuid == -1); 487*0Sstevel@tonic-gate 488*0Sstevel@tonic-gate if (index < 0 || index >= set->ks_nreqs) 489*0Sstevel@tonic-gate return (EINVAL); 490*0Sstevel@tonic-gate 491*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 492*0Sstevel@tonic-gate if (set->ks_req[i].kr_index == index) 493*0Sstevel@tonic-gate break; 494*0Sstevel@tonic-gate ASSERT(i != set->ks_nreqs); 495*0Sstevel@tonic-gate 496*0Sstevel@tonic-gate set->ks_req[i].kr_preset = preset; 497*0Sstevel@tonic-gate return (0); 498*0Sstevel@tonic-gate } 499*0Sstevel@tonic-gate 500*0Sstevel@tonic-gate int 501*0Sstevel@tonic-gate kcpc_restart(kcpc_set_t *set) 502*0Sstevel@tonic-gate { 503*0Sstevel@tonic-gate kcpc_ctx_t *ctx = set->ks_ctx; 504*0Sstevel@tonic-gate int i; 505*0Sstevel@tonic-gate 506*0Sstevel@tonic-gate ASSERT(ctx != NULL); 507*0Sstevel@tonic-gate ASSERT(ctx->kc_thread == curthread); 508*0Sstevel@tonic-gate ASSERT(ctx->kc_cpuid == -1); 509*0Sstevel@tonic-gate 510*0Sstevel@tonic-gate kpreempt_disable(); 511*0Sstevel@tonic-gate 512*0Sstevel@tonic-gate /* 513*0Sstevel@tonic-gate * If the user is doing this on a running set, make sure the counters 514*0Sstevel@tonic-gate * are stopped first. 515*0Sstevel@tonic-gate */ 516*0Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) 517*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 518*0Sstevel@tonic-gate 519*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 520*0Sstevel@tonic-gate *(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset; 521*0Sstevel@tonic-gate pcbe_ops->pcbe_configure(0, NULL, set->ks_req[i].kr_preset, 522*0Sstevel@tonic-gate 0, 0, NULL, &set->ks_req[i].kr_config, NULL); 523*0Sstevel@tonic-gate } 524*0Sstevel@tonic-gate 525*0Sstevel@tonic-gate /* 526*0Sstevel@tonic-gate * Ask the backend to program the hardware. 527*0Sstevel@tonic-gate */ 528*0Sstevel@tonic-gate ctx->kc_rawtick = KCPC_GET_TICK(); 529*0Sstevel@tonic-gate atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE); 530*0Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 531*0Sstevel@tonic-gate kpreempt_enable(); 532*0Sstevel@tonic-gate 533*0Sstevel@tonic-gate return (0); 534*0Sstevel@tonic-gate } 535*0Sstevel@tonic-gate 536*0Sstevel@tonic-gate /* 537*0Sstevel@tonic-gate * Caller must hold kcpc_cpuctx_lock. 538*0Sstevel@tonic-gate */ 539*0Sstevel@tonic-gate int 540*0Sstevel@tonic-gate kcpc_enable(kthread_t *t, int cmd, int enable) 541*0Sstevel@tonic-gate { 542*0Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx; 543*0Sstevel@tonic-gate kcpc_set_t *set = t->t_cpc_set; 544*0Sstevel@tonic-gate kcpc_set_t *newset; 545*0Sstevel@tonic-gate int i; 546*0Sstevel@tonic-gate int flag; 547*0Sstevel@tonic-gate int err; 548*0Sstevel@tonic-gate 549*0Sstevel@tonic-gate ASSERT(RW_READ_HELD(&kcpc_cpuctx_lock)); 550*0Sstevel@tonic-gate 551*0Sstevel@tonic-gate if (ctx == NULL) { 552*0Sstevel@tonic-gate /* 553*0Sstevel@tonic-gate * This thread has a set but no context; it must be a 554*0Sstevel@tonic-gate * CPU-bound set. 555*0Sstevel@tonic-gate */ 556*0Sstevel@tonic-gate ASSERT(t->t_cpc_set != NULL); 557*0Sstevel@tonic-gate ASSERT(t->t_cpc_set->ks_ctx->kc_cpuid != -1); 558*0Sstevel@tonic-gate return (EINVAL); 559*0Sstevel@tonic-gate } else if (ctx->kc_flags & KCPC_CTX_INVALID) 560*0Sstevel@tonic-gate return (EAGAIN); 561*0Sstevel@tonic-gate 562*0Sstevel@tonic-gate if (cmd == CPC_ENABLE) { 563*0Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0) 564*0Sstevel@tonic-gate return (EINVAL); 565*0Sstevel@tonic-gate kpreempt_disable(); 566*0Sstevel@tonic-gate atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE); 567*0Sstevel@tonic-gate kcpc_restore(ctx); 568*0Sstevel@tonic-gate kpreempt_enable(); 569*0Sstevel@tonic-gate } else if (cmd == CPC_DISABLE) { 570*0Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_FREEZE) 571*0Sstevel@tonic-gate return (EINVAL); 572*0Sstevel@tonic-gate kpreempt_disable(); 573*0Sstevel@tonic-gate kcpc_save(ctx); 574*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, KCPC_CTX_FREEZE); 575*0Sstevel@tonic-gate kpreempt_enable(); 576*0Sstevel@tonic-gate } else if (cmd == CPC_USR_EVENTS || cmd == CPC_SYS_EVENTS) { 577*0Sstevel@tonic-gate /* 578*0Sstevel@tonic-gate * Strategy for usr/sys: stop counters and update set's presets 579*0Sstevel@tonic-gate * with current counter values, unbind, update requests with 580*0Sstevel@tonic-gate * new config, then re-bind. 581*0Sstevel@tonic-gate */ 582*0Sstevel@tonic-gate flag = (cmd == CPC_USR_EVENTS) ? 583*0Sstevel@tonic-gate CPC_COUNT_USER: CPC_COUNT_SYSTEM; 584*0Sstevel@tonic-gate 585*0Sstevel@tonic-gate kpreempt_disable(); 586*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, 587*0Sstevel@tonic-gate KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED); 588*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 589*0Sstevel@tonic-gate kpreempt_enable(); 590*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 591*0Sstevel@tonic-gate set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data); 592*0Sstevel@tonic-gate if (enable) 593*0Sstevel@tonic-gate set->ks_req[i].kr_flags |= flag; 594*0Sstevel@tonic-gate else 595*0Sstevel@tonic-gate set->ks_req[i].kr_flags &= ~flag; 596*0Sstevel@tonic-gate } 597*0Sstevel@tonic-gate newset = kcpc_dup_set(set); 598*0Sstevel@tonic-gate if (kcpc_unbind(set) != 0) 599*0Sstevel@tonic-gate return (EINVAL); 600*0Sstevel@tonic-gate t->t_cpc_set = newset; 601*0Sstevel@tonic-gate if (kcpc_bind_thread(newset, t, &err) != 0) { 602*0Sstevel@tonic-gate t->t_cpc_set = NULL; 603*0Sstevel@tonic-gate kcpc_free_set(newset); 604*0Sstevel@tonic-gate return (EINVAL); 605*0Sstevel@tonic-gate } 606*0Sstevel@tonic-gate } else 607*0Sstevel@tonic-gate return (EINVAL); 608*0Sstevel@tonic-gate 609*0Sstevel@tonic-gate return (0); 610*0Sstevel@tonic-gate } 611*0Sstevel@tonic-gate 612*0Sstevel@tonic-gate /* 613*0Sstevel@tonic-gate * Provide PCBEs with a way of obtaining the configs of every counter which will 614*0Sstevel@tonic-gate * be programmed together. 615*0Sstevel@tonic-gate * 616*0Sstevel@tonic-gate * If current is NULL, provide the first config. 617*0Sstevel@tonic-gate * 618*0Sstevel@tonic-gate * If data != NULL, caller wants to know where the data store associated with 619*0Sstevel@tonic-gate * the config we return is located. 620*0Sstevel@tonic-gate */ 621*0Sstevel@tonic-gate void * 622*0Sstevel@tonic-gate kcpc_next_config(void *token, void *current, uint64_t **data) 623*0Sstevel@tonic-gate { 624*0Sstevel@tonic-gate int i; 625*0Sstevel@tonic-gate kcpc_pic_t *pic; 626*0Sstevel@tonic-gate kcpc_ctx_t *ctx = (kcpc_ctx_t *)token; 627*0Sstevel@tonic-gate 628*0Sstevel@tonic-gate if (current == NULL) { 629*0Sstevel@tonic-gate /* 630*0Sstevel@tonic-gate * Client would like the first config, which may not be in 631*0Sstevel@tonic-gate * counter 0; we need to search through the counters for the 632*0Sstevel@tonic-gate * first config. 633*0Sstevel@tonic-gate */ 634*0Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) 635*0Sstevel@tonic-gate if (ctx->kc_pics[i].kp_req != NULL) 636*0Sstevel@tonic-gate break; 637*0Sstevel@tonic-gate /* 638*0Sstevel@tonic-gate * There are no counters configured for the given context. 639*0Sstevel@tonic-gate */ 640*0Sstevel@tonic-gate if (i == cpc_ncounters) 641*0Sstevel@tonic-gate return (NULL); 642*0Sstevel@tonic-gate } else { 643*0Sstevel@tonic-gate /* 644*0Sstevel@tonic-gate * There surely is a faster way to do this. 645*0Sstevel@tonic-gate */ 646*0Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 647*0Sstevel@tonic-gate pic = &ctx->kc_pics[i]; 648*0Sstevel@tonic-gate 649*0Sstevel@tonic-gate if (pic->kp_req != NULL && 650*0Sstevel@tonic-gate current == pic->kp_req->kr_config) 651*0Sstevel@tonic-gate break; 652*0Sstevel@tonic-gate } 653*0Sstevel@tonic-gate 654*0Sstevel@tonic-gate /* 655*0Sstevel@tonic-gate * We found the current config at picnum i. Now search for the 656*0Sstevel@tonic-gate * next configured PIC. 657*0Sstevel@tonic-gate */ 658*0Sstevel@tonic-gate for (i++; i < cpc_ncounters; i++) { 659*0Sstevel@tonic-gate pic = &ctx->kc_pics[i]; 660*0Sstevel@tonic-gate if (pic->kp_req != NULL) 661*0Sstevel@tonic-gate break; 662*0Sstevel@tonic-gate } 663*0Sstevel@tonic-gate 664*0Sstevel@tonic-gate if (i == cpc_ncounters) 665*0Sstevel@tonic-gate return (NULL); 666*0Sstevel@tonic-gate } 667*0Sstevel@tonic-gate 668*0Sstevel@tonic-gate if (data != NULL) { 669*0Sstevel@tonic-gate *data = ctx->kc_pics[i].kp_req->kr_data; 670*0Sstevel@tonic-gate } 671*0Sstevel@tonic-gate 672*0Sstevel@tonic-gate return (ctx->kc_pics[i].kp_req->kr_config); 673*0Sstevel@tonic-gate } 674*0Sstevel@tonic-gate 675*0Sstevel@tonic-gate 676*0Sstevel@tonic-gate static kcpc_ctx_t * 677*0Sstevel@tonic-gate kcpc_ctx_alloc(void) 678*0Sstevel@tonic-gate { 679*0Sstevel@tonic-gate kcpc_ctx_t *ctx; 680*0Sstevel@tonic-gate long hash; 681*0Sstevel@tonic-gate 682*0Sstevel@tonic-gate ctx = (kcpc_ctx_t *)kmem_alloc(sizeof (kcpc_ctx_t), KM_SLEEP); 683*0Sstevel@tonic-gate 684*0Sstevel@tonic-gate hash = CPC_HASH_CTX(ctx); 685*0Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 686*0Sstevel@tonic-gate ctx->kc_next = kcpc_ctx_list[hash]; 687*0Sstevel@tonic-gate kcpc_ctx_list[hash] = ctx; 688*0Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 689*0Sstevel@tonic-gate 690*0Sstevel@tonic-gate ctx->kc_pics = (kcpc_pic_t *)kmem_zalloc(sizeof (kcpc_pic_t) * 691*0Sstevel@tonic-gate cpc_ncounters, KM_SLEEP); 692*0Sstevel@tonic-gate 693*0Sstevel@tonic-gate ctx->kc_flags = 0; 694*0Sstevel@tonic-gate ctx->kc_vtick = 0; 695*0Sstevel@tonic-gate ctx->kc_rawtick = 0; 696*0Sstevel@tonic-gate ctx->kc_cpuid = -1; 697*0Sstevel@tonic-gate 698*0Sstevel@tonic-gate return (ctx); 699*0Sstevel@tonic-gate } 700*0Sstevel@tonic-gate 701*0Sstevel@tonic-gate /* 702*0Sstevel@tonic-gate * Copy set from ctx to the child context, cctx, if it has CPC_BIND_LWP_INHERIT 703*0Sstevel@tonic-gate * in the flags. 704*0Sstevel@tonic-gate */ 705*0Sstevel@tonic-gate static void 706*0Sstevel@tonic-gate kcpc_ctx_clone(kcpc_ctx_t *ctx, kcpc_ctx_t *cctx) 707*0Sstevel@tonic-gate { 708*0Sstevel@tonic-gate kcpc_set_t *ks = ctx->kc_set, *cks; 709*0Sstevel@tonic-gate int i, j; 710*0Sstevel@tonic-gate int code; 711*0Sstevel@tonic-gate 712*0Sstevel@tonic-gate ASSERT(ks != NULL); 713*0Sstevel@tonic-gate 714*0Sstevel@tonic-gate if ((ks->ks_flags & CPC_BIND_LWP_INHERIT) == 0) 715*0Sstevel@tonic-gate return; 716*0Sstevel@tonic-gate 717*0Sstevel@tonic-gate cks = kmem_alloc(sizeof (*cks), KM_SLEEP); 718*0Sstevel@tonic-gate cctx->kc_set = cks; 719*0Sstevel@tonic-gate cks->ks_flags = ks->ks_flags; 720*0Sstevel@tonic-gate cks->ks_nreqs = ks->ks_nreqs; 721*0Sstevel@tonic-gate cks->ks_req = kmem_alloc(cks->ks_nreqs * 722*0Sstevel@tonic-gate sizeof (kcpc_request_t), KM_SLEEP); 723*0Sstevel@tonic-gate cks->ks_data = kmem_alloc(cks->ks_nreqs * sizeof (uint64_t), 724*0Sstevel@tonic-gate KM_SLEEP); 725*0Sstevel@tonic-gate cks->ks_ctx = cctx; 726*0Sstevel@tonic-gate 727*0Sstevel@tonic-gate for (i = 0; i < cks->ks_nreqs; i++) { 728*0Sstevel@tonic-gate cks->ks_req[i].kr_index = ks->ks_req[i].kr_index; 729*0Sstevel@tonic-gate cks->ks_req[i].kr_picnum = ks->ks_req[i].kr_picnum; 730*0Sstevel@tonic-gate (void) strncpy(cks->ks_req[i].kr_event, 731*0Sstevel@tonic-gate ks->ks_req[i].kr_event, CPC_MAX_EVENT_LEN); 732*0Sstevel@tonic-gate cks->ks_req[i].kr_preset = ks->ks_req[i].kr_preset; 733*0Sstevel@tonic-gate cks->ks_req[i].kr_flags = ks->ks_req[i].kr_flags; 734*0Sstevel@tonic-gate cks->ks_req[i].kr_nattrs = ks->ks_req[i].kr_nattrs; 735*0Sstevel@tonic-gate if (ks->ks_req[i].kr_nattrs > 0) { 736*0Sstevel@tonic-gate cks->ks_req[i].kr_attr = 737*0Sstevel@tonic-gate kmem_alloc(ks->ks_req[i].kr_nattrs * 738*0Sstevel@tonic-gate sizeof (kcpc_attr_t), KM_SLEEP); 739*0Sstevel@tonic-gate } 740*0Sstevel@tonic-gate for (j = 0; j < ks->ks_req[i].kr_nattrs; j++) { 741*0Sstevel@tonic-gate (void) strncpy(cks->ks_req[i].kr_attr[j].ka_name, 742*0Sstevel@tonic-gate ks->ks_req[i].kr_attr[j].ka_name, 743*0Sstevel@tonic-gate CPC_MAX_ATTR_LEN); 744*0Sstevel@tonic-gate cks->ks_req[i].kr_attr[j].ka_val = 745*0Sstevel@tonic-gate ks->ks_req[i].kr_attr[j].ka_val; 746*0Sstevel@tonic-gate } 747*0Sstevel@tonic-gate } 748*0Sstevel@tonic-gate if (kcpc_configure_reqs(cctx, cks, &code) != 0) 749*0Sstevel@tonic-gate panic("kcpc_ctx_clone: configure of context %p with " 750*0Sstevel@tonic-gate "set %p failed with subcode %d", cctx, cks, code); 751*0Sstevel@tonic-gate } 752*0Sstevel@tonic-gate 753*0Sstevel@tonic-gate 754*0Sstevel@tonic-gate static void 755*0Sstevel@tonic-gate kcpc_ctx_free(kcpc_ctx_t *ctx) 756*0Sstevel@tonic-gate { 757*0Sstevel@tonic-gate kcpc_ctx_t **loc; 758*0Sstevel@tonic-gate long hash = CPC_HASH_CTX(ctx); 759*0Sstevel@tonic-gate 760*0Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 761*0Sstevel@tonic-gate loc = &kcpc_ctx_list[hash]; 762*0Sstevel@tonic-gate ASSERT(*loc != NULL); 763*0Sstevel@tonic-gate while (*loc != ctx) 764*0Sstevel@tonic-gate loc = &(*loc)->kc_next; 765*0Sstevel@tonic-gate *loc = ctx->kc_next; 766*0Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 767*0Sstevel@tonic-gate 768*0Sstevel@tonic-gate kmem_free(ctx->kc_pics, cpc_ncounters * sizeof (kcpc_pic_t)); 769*0Sstevel@tonic-gate kmem_free(ctx, sizeof (*ctx)); 770*0Sstevel@tonic-gate } 771*0Sstevel@tonic-gate 772*0Sstevel@tonic-gate /* 773*0Sstevel@tonic-gate * Generic interrupt handler used on hardware that generates 774*0Sstevel@tonic-gate * overflow interrupts. 775*0Sstevel@tonic-gate * 776*0Sstevel@tonic-gate * Note: executed at high-level interrupt context! 777*0Sstevel@tonic-gate */ 778*0Sstevel@tonic-gate /*ARGSUSED*/ 779*0Sstevel@tonic-gate kcpc_ctx_t * 780*0Sstevel@tonic-gate kcpc_overflow_intr(caddr_t arg, uint64_t bitmap) 781*0Sstevel@tonic-gate { 782*0Sstevel@tonic-gate kcpc_ctx_t *ctx; 783*0Sstevel@tonic-gate kthread_t *t = curthread; 784*0Sstevel@tonic-gate int i; 785*0Sstevel@tonic-gate 786*0Sstevel@tonic-gate /* 787*0Sstevel@tonic-gate * On both x86 and UltraSPARC, we may deliver the high-level 788*0Sstevel@tonic-gate * interrupt in kernel mode, just after we've started to run an 789*0Sstevel@tonic-gate * interrupt thread. (That's because the hardware helpfully 790*0Sstevel@tonic-gate * delivers the overflow interrupt some random number of cycles 791*0Sstevel@tonic-gate * after the instruction that caused the overflow by which time 792*0Sstevel@tonic-gate * we're in some part of the kernel, not necessarily running on 793*0Sstevel@tonic-gate * the right thread). 794*0Sstevel@tonic-gate * 795*0Sstevel@tonic-gate * Check for this case here -- find the pinned thread 796*0Sstevel@tonic-gate * that was running when the interrupt went off. 797*0Sstevel@tonic-gate */ 798*0Sstevel@tonic-gate if (t->t_flag & T_INTR_THREAD) { 799*0Sstevel@tonic-gate klwp_t *lwp; 800*0Sstevel@tonic-gate 801*0Sstevel@tonic-gate atomic_add_32(&kcpc_intrctx_count, 1); 802*0Sstevel@tonic-gate 803*0Sstevel@tonic-gate /* 804*0Sstevel@tonic-gate * Note that t_lwp is always set to point at the underlying 805*0Sstevel@tonic-gate * thread, thus this will work in the presence of nested 806*0Sstevel@tonic-gate * interrupts. 807*0Sstevel@tonic-gate */ 808*0Sstevel@tonic-gate ctx = NULL; 809*0Sstevel@tonic-gate if ((lwp = t->t_lwp) != NULL) { 810*0Sstevel@tonic-gate t = lwptot(lwp); 811*0Sstevel@tonic-gate ctx = t->t_cpc_ctx; 812*0Sstevel@tonic-gate } 813*0Sstevel@tonic-gate } else 814*0Sstevel@tonic-gate ctx = t->t_cpc_ctx; 815*0Sstevel@tonic-gate 816*0Sstevel@tonic-gate if (ctx == NULL) { 817*0Sstevel@tonic-gate /* 818*0Sstevel@tonic-gate * This can easily happen if we're using the counters in 819*0Sstevel@tonic-gate * "shared" mode, for example, and an overflow interrupt 820*0Sstevel@tonic-gate * occurs while we are running cpustat. In that case, the 821*0Sstevel@tonic-gate * bound thread that has the context that belongs to this 822*0Sstevel@tonic-gate * CPU is almost certainly sleeping (if it was running on 823*0Sstevel@tonic-gate * the CPU we'd have found it above), and the actual 824*0Sstevel@tonic-gate * interrupted thread has no knowledge of performance counters! 825*0Sstevel@tonic-gate */ 826*0Sstevel@tonic-gate ctx = curthread->t_cpu->cpu_cpc_ctx; 827*0Sstevel@tonic-gate if (ctx != NULL) { 828*0Sstevel@tonic-gate /* 829*0Sstevel@tonic-gate * Return the bound context for this CPU to 830*0Sstevel@tonic-gate * the interrupt handler so that it can synchronously 831*0Sstevel@tonic-gate * sample the hardware counters and restart them. 832*0Sstevel@tonic-gate */ 833*0Sstevel@tonic-gate return (ctx); 834*0Sstevel@tonic-gate } 835*0Sstevel@tonic-gate 836*0Sstevel@tonic-gate /* 837*0Sstevel@tonic-gate * As long as the overflow interrupt really is delivered early 838*0Sstevel@tonic-gate * enough after trapping into the kernel to avoid switching 839*0Sstevel@tonic-gate * threads, we must always be able to find the cpc context, 840*0Sstevel@tonic-gate * or something went terribly wrong i.e. we ended up 841*0Sstevel@tonic-gate * running a passivated interrupt thread, a kernel 842*0Sstevel@tonic-gate * thread or we interrupted idle, all of which are Very Bad. 843*0Sstevel@tonic-gate */ 844*0Sstevel@tonic-gate if (kcpc_nullctx_panic) 845*0Sstevel@tonic-gate panic("null cpc context, thread %p", (void *)t); 846*0Sstevel@tonic-gate atomic_add_32(&kcpc_nullctx_count, 1); 847*0Sstevel@tonic-gate } else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) { 848*0Sstevel@tonic-gate /* 849*0Sstevel@tonic-gate * Schedule an ast to sample the counters, which will 850*0Sstevel@tonic-gate * propagate any overflow into the virtualized performance 851*0Sstevel@tonic-gate * counter(s), and may deliver a signal. 852*0Sstevel@tonic-gate */ 853*0Sstevel@tonic-gate ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW; 854*0Sstevel@tonic-gate /* 855*0Sstevel@tonic-gate * If a counter has overflowed which was counting on behalf of 856*0Sstevel@tonic-gate * a request which specified CPC_OVF_NOTIFY_EMT, send the 857*0Sstevel@tonic-gate * process a signal. 858*0Sstevel@tonic-gate */ 859*0Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 860*0Sstevel@tonic-gate if (ctx->kc_pics[i].kp_req != NULL && 861*0Sstevel@tonic-gate bitmap & (1 << i) && 862*0Sstevel@tonic-gate ctx->kc_pics[i].kp_req->kr_flags & 863*0Sstevel@tonic-gate CPC_OVF_NOTIFY_EMT) { 864*0Sstevel@tonic-gate /* 865*0Sstevel@tonic-gate * A signal has been requested for this PIC, so 866*0Sstevel@tonic-gate * so freeze the context. The interrupt handler 867*0Sstevel@tonic-gate * has already stopped the counter hardware. 868*0Sstevel@tonic-gate */ 869*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, KCPC_CTX_FREEZE); 870*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_pics[i].kp_flags, 871*0Sstevel@tonic-gate KCPC_PIC_OVERFLOWED); 872*0Sstevel@tonic-gate } 873*0Sstevel@tonic-gate } 874*0Sstevel@tonic-gate aston(t); 875*0Sstevel@tonic-gate } 876*0Sstevel@tonic-gate return (NULL); 877*0Sstevel@tonic-gate } 878*0Sstevel@tonic-gate 879*0Sstevel@tonic-gate /* 880*0Sstevel@tonic-gate * The current thread context had an overflow interrupt; we're 881*0Sstevel@tonic-gate * executing here in high-level interrupt context. 882*0Sstevel@tonic-gate */ 883*0Sstevel@tonic-gate /*ARGSUSED*/ 884*0Sstevel@tonic-gate uint_t 885*0Sstevel@tonic-gate kcpc_hw_overflow_intr(caddr_t arg1, caddr_t arg2) 886*0Sstevel@tonic-gate { 887*0Sstevel@tonic-gate kcpc_ctx_t *ctx; 888*0Sstevel@tonic-gate uint64_t bitmap; 889*0Sstevel@tonic-gate 890*0Sstevel@tonic-gate if (pcbe_ops == NULL || 891*0Sstevel@tonic-gate (bitmap = pcbe_ops->pcbe_overflow_bitmap()) == 0) 892*0Sstevel@tonic-gate return (DDI_INTR_UNCLAIMED); 893*0Sstevel@tonic-gate 894*0Sstevel@tonic-gate /* 895*0Sstevel@tonic-gate * Prevent any further interrupts. 896*0Sstevel@tonic-gate */ 897*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 898*0Sstevel@tonic-gate 899*0Sstevel@tonic-gate /* 900*0Sstevel@tonic-gate * Invoke the "generic" handler. 901*0Sstevel@tonic-gate * 902*0Sstevel@tonic-gate * If the interrupt has occurred in the context of an lwp owning 903*0Sstevel@tonic-gate * the counters, then the handler posts an AST to the lwp to 904*0Sstevel@tonic-gate * trigger the actual sampling, and optionally deliver a signal or 905*0Sstevel@tonic-gate * restart the counters, on the way out of the kernel using 906*0Sstevel@tonic-gate * kcpc_hw_overflow_ast() (see below). 907*0Sstevel@tonic-gate * 908*0Sstevel@tonic-gate * On the other hand, if the handler returns the context to us 909*0Sstevel@tonic-gate * directly, then it means that there are no other threads in 910*0Sstevel@tonic-gate * the middle of updating it, no AST has been posted, and so we 911*0Sstevel@tonic-gate * should sample the counters here, and restart them with no 912*0Sstevel@tonic-gate * further fuss. 913*0Sstevel@tonic-gate */ 914*0Sstevel@tonic-gate if ((ctx = kcpc_overflow_intr(arg1, bitmap)) != NULL) { 915*0Sstevel@tonic-gate uint64_t curtick = KCPC_GET_TICK(); 916*0Sstevel@tonic-gate 917*0Sstevel@tonic-gate ctx->kc_hrtime = gethrtime_waitfree(); 918*0Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 919*0Sstevel@tonic-gate ctx->kc_rawtick = curtick; 920*0Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 921*0Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 922*0Sstevel@tonic-gate } 923*0Sstevel@tonic-gate 924*0Sstevel@tonic-gate return (DDI_INTR_CLAIMED); 925*0Sstevel@tonic-gate } 926*0Sstevel@tonic-gate 927*0Sstevel@tonic-gate /* 928*0Sstevel@tonic-gate * Called from trap() when processing the ast posted by the high-level 929*0Sstevel@tonic-gate * interrupt handler. 930*0Sstevel@tonic-gate */ 931*0Sstevel@tonic-gate int 932*0Sstevel@tonic-gate kcpc_overflow_ast() 933*0Sstevel@tonic-gate { 934*0Sstevel@tonic-gate kcpc_ctx_t *ctx = curthread->t_cpc_ctx; 935*0Sstevel@tonic-gate int i; 936*0Sstevel@tonic-gate int found = 0; 937*0Sstevel@tonic-gate uint64_t curtick = KCPC_GET_TICK(); 938*0Sstevel@tonic-gate 939*0Sstevel@tonic-gate ASSERT(ctx != NULL); /* Beware of interrupt skid. */ 940*0Sstevel@tonic-gate 941*0Sstevel@tonic-gate /* 942*0Sstevel@tonic-gate * An overflow happened: sample the context to ensure that 943*0Sstevel@tonic-gate * the overflow is propagated into the upper bits of the 944*0Sstevel@tonic-gate * virtualized 64-bit counter(s). 945*0Sstevel@tonic-gate */ 946*0Sstevel@tonic-gate kpreempt_disable(); 947*0Sstevel@tonic-gate ctx->kc_hrtime = gethrtime_waitfree(); 948*0Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 949*0Sstevel@tonic-gate kpreempt_enable(); 950*0Sstevel@tonic-gate 951*0Sstevel@tonic-gate ctx->kc_vtick += curtick - ctx->kc_rawtick; 952*0Sstevel@tonic-gate 953*0Sstevel@tonic-gate /* 954*0Sstevel@tonic-gate * The interrupt handler has marked any pics with KCPC_PIC_OVERFLOWED 955*0Sstevel@tonic-gate * if that pic generated an overflow and if the request it was counting 956*0Sstevel@tonic-gate * on behalf of had CPC_OVERFLOW_REQUEST specified. We go through all 957*0Sstevel@tonic-gate * pics in the context and clear the KCPC_PIC_OVERFLOWED flags. If we 958*0Sstevel@tonic-gate * found any overflowed pics, keep the context frozen and return true 959*0Sstevel@tonic-gate * (thus causing a signal to be sent). 960*0Sstevel@tonic-gate */ 961*0Sstevel@tonic-gate for (i = 0; i < cpc_ncounters; i++) { 962*0Sstevel@tonic-gate if (ctx->kc_pics[i].kp_flags & KCPC_PIC_OVERFLOWED) { 963*0Sstevel@tonic-gate atomic_and_uint(&ctx->kc_pics[i].kp_flags, 964*0Sstevel@tonic-gate ~KCPC_PIC_OVERFLOWED); 965*0Sstevel@tonic-gate found = 1; 966*0Sstevel@tonic-gate } 967*0Sstevel@tonic-gate } 968*0Sstevel@tonic-gate if (found) 969*0Sstevel@tonic-gate return (1); 970*0Sstevel@tonic-gate 971*0Sstevel@tonic-gate /* 972*0Sstevel@tonic-gate * Otherwise, re-enable the counters and continue life as before. 973*0Sstevel@tonic-gate */ 974*0Sstevel@tonic-gate kpreempt_disable(); 975*0Sstevel@tonic-gate atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE); 976*0Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 977*0Sstevel@tonic-gate kpreempt_enable(); 978*0Sstevel@tonic-gate return (0); 979*0Sstevel@tonic-gate } 980*0Sstevel@tonic-gate 981*0Sstevel@tonic-gate /* 982*0Sstevel@tonic-gate * Called when switching away from current thread. 983*0Sstevel@tonic-gate */ 984*0Sstevel@tonic-gate static void 985*0Sstevel@tonic-gate kcpc_save(kcpc_ctx_t *ctx) 986*0Sstevel@tonic-gate { 987*0Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_INVALID) { 988*0Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) 989*0Sstevel@tonic-gate return; 990*0Sstevel@tonic-gate /* 991*0Sstevel@tonic-gate * This context has been invalidated but the counters have not 992*0Sstevel@tonic-gate * been stopped. Stop them here and mark the context stopped. 993*0Sstevel@tonic-gate */ 994*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 995*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID_STOPPED); 996*0Sstevel@tonic-gate return; 997*0Sstevel@tonic-gate } 998*0Sstevel@tonic-gate 999*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 1000*0Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_FREEZE) 1001*0Sstevel@tonic-gate return; 1002*0Sstevel@tonic-gate 1003*0Sstevel@tonic-gate /* 1004*0Sstevel@tonic-gate * Need to sample for all reqs into each req's current mpic. 1005*0Sstevel@tonic-gate */ 1006*0Sstevel@tonic-gate ctx->kc_hrtime = gethrtime(); 1007*0Sstevel@tonic-gate ctx->kc_vtick += KCPC_GET_TICK() - ctx->kc_rawtick; 1008*0Sstevel@tonic-gate pcbe_ops->pcbe_sample(ctx); 1009*0Sstevel@tonic-gate } 1010*0Sstevel@tonic-gate 1011*0Sstevel@tonic-gate static void 1012*0Sstevel@tonic-gate kcpc_restore(kcpc_ctx_t *ctx) 1013*0Sstevel@tonic-gate { 1014*0Sstevel@tonic-gate if ((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED)) == 1015*0Sstevel@tonic-gate KCPC_CTX_INVALID) 1016*0Sstevel@tonic-gate /* 1017*0Sstevel@tonic-gate * The context is invalidated but has not been marked stopped. 1018*0Sstevel@tonic-gate * We mark it as such here because we will not start the 1019*0Sstevel@tonic-gate * counters during this context switch. 1020*0Sstevel@tonic-gate */ 1021*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID_STOPPED); 1022*0Sstevel@tonic-gate 1023*0Sstevel@tonic-gate 1024*0Sstevel@tonic-gate if (ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_FREEZE)) 1025*0Sstevel@tonic-gate return; 1026*0Sstevel@tonic-gate 1027*0Sstevel@tonic-gate /* 1028*0Sstevel@tonic-gate * While programming the hardware, the counters should be stopped. We 1029*0Sstevel@tonic-gate * don't do an explicit pcbe_allstop() here because they should have 1030*0Sstevel@tonic-gate * been stopped already by the last consumer. 1031*0Sstevel@tonic-gate */ 1032*0Sstevel@tonic-gate ctx->kc_rawtick = KCPC_GET_TICK(); 1033*0Sstevel@tonic-gate pcbe_ops->pcbe_program(ctx); 1034*0Sstevel@tonic-gate } 1035*0Sstevel@tonic-gate 1036*0Sstevel@tonic-gate /* 1037*0Sstevel@tonic-gate * If kcpc_counts_include_idle is set to 0 by the sys admin, we add the the 1038*0Sstevel@tonic-gate * following context operators to the idle thread on each CPU. They stop the 1039*0Sstevel@tonic-gate * counters when the idle thread is switched on, and they start them again when 1040*0Sstevel@tonic-gate * it is switched off. 1041*0Sstevel@tonic-gate */ 1042*0Sstevel@tonic-gate 1043*0Sstevel@tonic-gate /*ARGSUSED*/ 1044*0Sstevel@tonic-gate void 1045*0Sstevel@tonic-gate kcpc_idle_save(struct cpu *cp) 1046*0Sstevel@tonic-gate { 1047*0Sstevel@tonic-gate /* 1048*0Sstevel@tonic-gate * The idle thread shouldn't be run anywhere else. 1049*0Sstevel@tonic-gate */ 1050*0Sstevel@tonic-gate ASSERT(CPU == cp); 1051*0Sstevel@tonic-gate 1052*0Sstevel@tonic-gate /* 1053*0Sstevel@tonic-gate * We must hold the CPU's context lock to ensure the context isn't freed 1054*0Sstevel@tonic-gate * while we're looking at it. 1055*0Sstevel@tonic-gate */ 1056*0Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 1057*0Sstevel@tonic-gate 1058*0Sstevel@tonic-gate if ((cp->cpu_cpc_ctx == NULL) || 1059*0Sstevel@tonic-gate (cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) { 1060*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 1061*0Sstevel@tonic-gate return; 1062*0Sstevel@tonic-gate } 1063*0Sstevel@tonic-gate 1064*0Sstevel@tonic-gate pcbe_ops->pcbe_program(cp->cpu_cpc_ctx); 1065*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 1066*0Sstevel@tonic-gate } 1067*0Sstevel@tonic-gate 1068*0Sstevel@tonic-gate void 1069*0Sstevel@tonic-gate kcpc_idle_restore(struct cpu *cp) 1070*0Sstevel@tonic-gate { 1071*0Sstevel@tonic-gate /* 1072*0Sstevel@tonic-gate * The idle thread shouldn't be run anywhere else. 1073*0Sstevel@tonic-gate */ 1074*0Sstevel@tonic-gate ASSERT(CPU == cp); 1075*0Sstevel@tonic-gate 1076*0Sstevel@tonic-gate /* 1077*0Sstevel@tonic-gate * We must hold the CPU's context lock to ensure the context isn't freed 1078*0Sstevel@tonic-gate * while we're looking at it. 1079*0Sstevel@tonic-gate */ 1080*0Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 1081*0Sstevel@tonic-gate 1082*0Sstevel@tonic-gate if ((cp->cpu_cpc_ctx == NULL) || 1083*0Sstevel@tonic-gate (cp->cpu_cpc_ctx->kc_flags & KCPC_CTX_INVALID)) { 1084*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 1085*0Sstevel@tonic-gate return; 1086*0Sstevel@tonic-gate } 1087*0Sstevel@tonic-gate 1088*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 1089*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 1090*0Sstevel@tonic-gate } 1091*0Sstevel@tonic-gate 1092*0Sstevel@tonic-gate /*ARGSUSED*/ 1093*0Sstevel@tonic-gate static void 1094*0Sstevel@tonic-gate kcpc_lwp_create(kthread_t *t, kthread_t *ct) 1095*0Sstevel@tonic-gate { 1096*0Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx, *cctx; 1097*0Sstevel@tonic-gate int i; 1098*0Sstevel@tonic-gate 1099*0Sstevel@tonic-gate if (ctx == NULL || (ctx->kc_flags & KCPC_CTX_LWPINHERIT) == 0) 1100*0Sstevel@tonic-gate return; 1101*0Sstevel@tonic-gate 1102*0Sstevel@tonic-gate rw_enter(&kcpc_cpuctx_lock, RW_READER); 1103*0Sstevel@tonic-gate if (ctx->kc_flags & KCPC_CTX_INVALID) { 1104*0Sstevel@tonic-gate rw_exit(&kcpc_cpuctx_lock); 1105*0Sstevel@tonic-gate return; 1106*0Sstevel@tonic-gate } 1107*0Sstevel@tonic-gate cctx = kcpc_ctx_alloc(); 1108*0Sstevel@tonic-gate kcpc_ctx_clone(ctx, cctx); 1109*0Sstevel@tonic-gate rw_exit(&kcpc_cpuctx_lock); 1110*0Sstevel@tonic-gate 1111*0Sstevel@tonic-gate cctx->kc_flags = ctx->kc_flags; 1112*0Sstevel@tonic-gate cctx->kc_thread = ct; 1113*0Sstevel@tonic-gate cctx->kc_cpuid = -1; 1114*0Sstevel@tonic-gate ct->t_cpc_set = cctx->kc_set; 1115*0Sstevel@tonic-gate ct->t_cpc_ctx = cctx; 1116*0Sstevel@tonic-gate 1117*0Sstevel@tonic-gate if (cctx->kc_flags & KCPC_CTX_SIGOVF) { 1118*0Sstevel@tonic-gate kcpc_set_t *ks = cctx->kc_set; 1119*0Sstevel@tonic-gate /* 1120*0Sstevel@tonic-gate * Our contract with the user requires us to immediately send an 1121*0Sstevel@tonic-gate * overflow signal to all children if we have the LWPINHERIT 1122*0Sstevel@tonic-gate * and SIGOVF flags set. In addition, all counters should be 1123*0Sstevel@tonic-gate * set to UINT64_MAX, and their pic's overflow flag turned on 1124*0Sstevel@tonic-gate * so that our trap() processing knows to send a signal. 1125*0Sstevel@tonic-gate */ 1126*0Sstevel@tonic-gate atomic_or_uint(&cctx->kc_flags, KCPC_CTX_FREEZE); 1127*0Sstevel@tonic-gate for (i = 0; i < ks->ks_nreqs; i++) { 1128*0Sstevel@tonic-gate kcpc_request_t *kr = &ks->ks_req[i]; 1129*0Sstevel@tonic-gate 1130*0Sstevel@tonic-gate if (kr->kr_flags & CPC_OVF_NOTIFY_EMT) { 1131*0Sstevel@tonic-gate *(kr->kr_data) = UINT64_MAX; 1132*0Sstevel@tonic-gate kr->kr_picp->kp_flags |= KCPC_PIC_OVERFLOWED; 1133*0Sstevel@tonic-gate } 1134*0Sstevel@tonic-gate } 1135*0Sstevel@tonic-gate ttolwp(ct)->lwp_pcb.pcb_flags |= CPC_OVERFLOW; 1136*0Sstevel@tonic-gate aston(ct); 1137*0Sstevel@tonic-gate } 1138*0Sstevel@tonic-gate 1139*0Sstevel@tonic-gate installctx(ct, cctx, kcpc_save, kcpc_restore, 1140*0Sstevel@tonic-gate NULL, kcpc_lwp_create, NULL, kcpc_free); 1141*0Sstevel@tonic-gate } 1142*0Sstevel@tonic-gate 1143*0Sstevel@tonic-gate /* 1144*0Sstevel@tonic-gate * Counter Stoppage Theory 1145*0Sstevel@tonic-gate * 1146*0Sstevel@tonic-gate * The counters may need to be stopped properly at the following occasions: 1147*0Sstevel@tonic-gate * 1148*0Sstevel@tonic-gate * 1) An LWP exits. 1149*0Sstevel@tonic-gate * 2) A thread exits. 1150*0Sstevel@tonic-gate * 3) An LWP performs an exec(). 1151*0Sstevel@tonic-gate * 4) A bound set is unbound. 1152*0Sstevel@tonic-gate * 1153*0Sstevel@tonic-gate * In addition to stopping the counters, the CPC context (a kcpc_ctx_t) may need 1154*0Sstevel@tonic-gate * to be freed as well. 1155*0Sstevel@tonic-gate * 1156*0Sstevel@tonic-gate * Case 1: kcpc_passivate(), called via lwp_exit(), stops the counters. Later on 1157*0Sstevel@tonic-gate * when the thread is freed, kcpc_free(), called by freectx(), frees the 1158*0Sstevel@tonic-gate * context. 1159*0Sstevel@tonic-gate * 1160*0Sstevel@tonic-gate * Case 2: same as case 1 except kcpc_passivate is called from thread_exit(). 1161*0Sstevel@tonic-gate * 1162*0Sstevel@tonic-gate * Case 3: kcpc_free(), called via freectx() via exec(), recognizes that it has 1163*0Sstevel@tonic-gate * been called from exec. It stops the counters _and_ frees the context. 1164*0Sstevel@tonic-gate * 1165*0Sstevel@tonic-gate * Case 4: kcpc_unbind() stops the hardware _and_ frees the context. 1166*0Sstevel@tonic-gate * 1167*0Sstevel@tonic-gate * CPU-bound counters are always stopped via kcpc_unbind(). 1168*0Sstevel@tonic-gate */ 1169*0Sstevel@tonic-gate 1170*0Sstevel@tonic-gate /* 1171*0Sstevel@tonic-gate * We're being called to delete the context; we ensure that all associated data 1172*0Sstevel@tonic-gate * structures are freed, and that the hardware is passivated if this is an exec. 1173*0Sstevel@tonic-gate */ 1174*0Sstevel@tonic-gate 1175*0Sstevel@tonic-gate /*ARGSUSED*/ 1176*0Sstevel@tonic-gate static void 1177*0Sstevel@tonic-gate kcpc_free(kcpc_ctx_t *ctx, int isexec) 1178*0Sstevel@tonic-gate { 1179*0Sstevel@tonic-gate int i; 1180*0Sstevel@tonic-gate kcpc_set_t *set = ctx->kc_set; 1181*0Sstevel@tonic-gate 1182*0Sstevel@tonic-gate ASSERT(set != NULL); 1183*0Sstevel@tonic-gate 1184*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID); 1185*0Sstevel@tonic-gate 1186*0Sstevel@tonic-gate if (isexec) { 1187*0Sstevel@tonic-gate /* 1188*0Sstevel@tonic-gate * This thread is execing, and after the exec it should not have 1189*0Sstevel@tonic-gate * any performance counter context. Stop the counters properly 1190*0Sstevel@tonic-gate * here so the system isn't surprised by an overflow interrupt 1191*0Sstevel@tonic-gate * later. 1192*0Sstevel@tonic-gate */ 1193*0Sstevel@tonic-gate if (ctx->kc_cpuid != -1) { 1194*0Sstevel@tonic-gate cpu_t *cp; 1195*0Sstevel@tonic-gate /* 1196*0Sstevel@tonic-gate * CPU-bound context; stop the appropriate CPU's ctrs. 1197*0Sstevel@tonic-gate * Hold cpu_lock while examining the CPU to ensure it 1198*0Sstevel@tonic-gate * doesn't go away. 1199*0Sstevel@tonic-gate */ 1200*0Sstevel@tonic-gate mutex_enter(&cpu_lock); 1201*0Sstevel@tonic-gate cp = cpu_get(ctx->kc_cpuid); 1202*0Sstevel@tonic-gate /* 1203*0Sstevel@tonic-gate * The CPU could have been DR'd out, so only stop the 1204*0Sstevel@tonic-gate * CPU and clear its context pointer if the CPU still 1205*0Sstevel@tonic-gate * exists. 1206*0Sstevel@tonic-gate */ 1207*0Sstevel@tonic-gate if (cp != NULL) { 1208*0Sstevel@tonic-gate mutex_enter(&cp->cpu_cpc_ctxlock); 1209*0Sstevel@tonic-gate kcpc_stop_hw(ctx); 1210*0Sstevel@tonic-gate cp->cpu_cpc_ctx = NULL; 1211*0Sstevel@tonic-gate mutex_exit(&cp->cpu_cpc_ctxlock); 1212*0Sstevel@tonic-gate } 1213*0Sstevel@tonic-gate mutex_exit(&cpu_lock); 1214*0Sstevel@tonic-gate ASSERT(curthread->t_cpc_ctx == NULL); 1215*0Sstevel@tonic-gate } else { 1216*0Sstevel@tonic-gate /* 1217*0Sstevel@tonic-gate * Thread-bound context; stop _this_ CPU's counters. 1218*0Sstevel@tonic-gate */ 1219*0Sstevel@tonic-gate kpreempt_disable(); 1220*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 1221*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, 1222*0Sstevel@tonic-gate KCPC_CTX_INVALID_STOPPED); 1223*0Sstevel@tonic-gate kpreempt_enable(); 1224*0Sstevel@tonic-gate curthread->t_cpc_ctx = NULL; 1225*0Sstevel@tonic-gate } 1226*0Sstevel@tonic-gate 1227*0Sstevel@tonic-gate /* 1228*0Sstevel@tonic-gate * Since we are being called from an exec and we know that 1229*0Sstevel@tonic-gate * exec is not permitted via the agent thread, we should clean 1230*0Sstevel@tonic-gate * up this thread's CPC state completely, and not leave dangling 1231*0Sstevel@tonic-gate * CPC pointers behind. 1232*0Sstevel@tonic-gate */ 1233*0Sstevel@tonic-gate ASSERT(ctx->kc_thread == curthread); 1234*0Sstevel@tonic-gate curthread->t_cpc_set = NULL; 1235*0Sstevel@tonic-gate } 1236*0Sstevel@tonic-gate 1237*0Sstevel@tonic-gate /* 1238*0Sstevel@tonic-gate * Walk through each request in this context's set and free the PCBE's 1239*0Sstevel@tonic-gate * configuration if it exists. 1240*0Sstevel@tonic-gate */ 1241*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 1242*0Sstevel@tonic-gate if (set->ks_req[i].kr_config != NULL) 1243*0Sstevel@tonic-gate pcbe_ops->pcbe_free(set->ks_req[i].kr_config); 1244*0Sstevel@tonic-gate } 1245*0Sstevel@tonic-gate 1246*0Sstevel@tonic-gate kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t)); 1247*0Sstevel@tonic-gate kcpc_ctx_free(ctx); 1248*0Sstevel@tonic-gate kcpc_free_set(set); 1249*0Sstevel@tonic-gate } 1250*0Sstevel@tonic-gate 1251*0Sstevel@tonic-gate /* 1252*0Sstevel@tonic-gate * Free the memory associated with a request set. 1253*0Sstevel@tonic-gate */ 1254*0Sstevel@tonic-gate void 1255*0Sstevel@tonic-gate kcpc_free_set(kcpc_set_t *set) 1256*0Sstevel@tonic-gate { 1257*0Sstevel@tonic-gate int i; 1258*0Sstevel@tonic-gate kcpc_request_t *req; 1259*0Sstevel@tonic-gate 1260*0Sstevel@tonic-gate ASSERT(set->ks_req != NULL); 1261*0Sstevel@tonic-gate 1262*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 1263*0Sstevel@tonic-gate req = &set->ks_req[i]; 1264*0Sstevel@tonic-gate 1265*0Sstevel@tonic-gate if (req->kr_nattrs != 0) { 1266*0Sstevel@tonic-gate kmem_free(req->kr_attr, 1267*0Sstevel@tonic-gate req->kr_nattrs * sizeof (kcpc_attr_t)); 1268*0Sstevel@tonic-gate } 1269*0Sstevel@tonic-gate } 1270*0Sstevel@tonic-gate 1271*0Sstevel@tonic-gate kmem_free(set->ks_req, sizeof (kcpc_request_t) * set->ks_nreqs); 1272*0Sstevel@tonic-gate kmem_free(set, sizeof (kcpc_set_t)); 1273*0Sstevel@tonic-gate } 1274*0Sstevel@tonic-gate 1275*0Sstevel@tonic-gate /* 1276*0Sstevel@tonic-gate * Grab every existing context and mark it as invalid. 1277*0Sstevel@tonic-gate */ 1278*0Sstevel@tonic-gate void 1279*0Sstevel@tonic-gate kcpc_invalidate_all(void) 1280*0Sstevel@tonic-gate { 1281*0Sstevel@tonic-gate kcpc_ctx_t *ctx; 1282*0Sstevel@tonic-gate long hash; 1283*0Sstevel@tonic-gate 1284*0Sstevel@tonic-gate for (hash = 0; hash < CPC_HASH_BUCKETS; hash++) { 1285*0Sstevel@tonic-gate mutex_enter(&kcpc_ctx_llock[hash]); 1286*0Sstevel@tonic-gate for (ctx = kcpc_ctx_list[hash]; ctx; ctx = ctx->kc_next) 1287*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID); 1288*0Sstevel@tonic-gate mutex_exit(&kcpc_ctx_llock[hash]); 1289*0Sstevel@tonic-gate } 1290*0Sstevel@tonic-gate } 1291*0Sstevel@tonic-gate 1292*0Sstevel@tonic-gate /* 1293*0Sstevel@tonic-gate * Called from lwp_exit() and thread_exit() 1294*0Sstevel@tonic-gate */ 1295*0Sstevel@tonic-gate void 1296*0Sstevel@tonic-gate kcpc_passivate(void) 1297*0Sstevel@tonic-gate { 1298*0Sstevel@tonic-gate kcpc_ctx_t *ctx = curthread->t_cpc_ctx; 1299*0Sstevel@tonic-gate kcpc_set_t *set = curthread->t_cpc_set; 1300*0Sstevel@tonic-gate 1301*0Sstevel@tonic-gate if (set == NULL) 1302*0Sstevel@tonic-gate return; 1303*0Sstevel@tonic-gate 1304*0Sstevel@tonic-gate /* 1305*0Sstevel@tonic-gate * We're cleaning up after this thread; ensure there are no dangling 1306*0Sstevel@tonic-gate * CPC pointers left behind. The context and set will be freed by 1307*0Sstevel@tonic-gate * freectx() in the case of an LWP-bound set, and by kcpc_unbind() in 1308*0Sstevel@tonic-gate * the case of a CPU-bound set. 1309*0Sstevel@tonic-gate */ 1310*0Sstevel@tonic-gate curthread->t_cpc_ctx = NULL; 1311*0Sstevel@tonic-gate 1312*0Sstevel@tonic-gate if (ctx == NULL) { 1313*0Sstevel@tonic-gate /* 1314*0Sstevel@tonic-gate * This thread has a set but no context; it must be a CPU-bound 1315*0Sstevel@tonic-gate * set. The hardware will be stopped via kcpc_unbind() when the 1316*0Sstevel@tonic-gate * process exits and closes its file descriptors with 1317*0Sstevel@tonic-gate * kcpc_close(). Our only job here is to clean up this thread's 1318*0Sstevel@tonic-gate * state; the set will be freed with the unbind(). 1319*0Sstevel@tonic-gate */ 1320*0Sstevel@tonic-gate (void) kcpc_unbind(set); 1321*0Sstevel@tonic-gate /* 1322*0Sstevel@tonic-gate * Unbinding a set belonging to the current thread should clear 1323*0Sstevel@tonic-gate * its set pointer. 1324*0Sstevel@tonic-gate */ 1325*0Sstevel@tonic-gate ASSERT(curthread->t_cpc_set == NULL); 1326*0Sstevel@tonic-gate return; 1327*0Sstevel@tonic-gate } 1328*0Sstevel@tonic-gate 1329*0Sstevel@tonic-gate curthread->t_cpc_set = NULL; 1330*0Sstevel@tonic-gate 1331*0Sstevel@tonic-gate /* 1332*0Sstevel@tonic-gate * This thread/LWP is exiting but context switches will continue to 1333*0Sstevel@tonic-gate * happen for a bit as the exit proceeds. Kernel preemption must be 1334*0Sstevel@tonic-gate * disabled here to prevent a race between checking or setting the 1335*0Sstevel@tonic-gate * INVALID_STOPPED flag here and kcpc_restore() setting the flag during 1336*0Sstevel@tonic-gate * a context switch. 1337*0Sstevel@tonic-gate */ 1338*0Sstevel@tonic-gate 1339*0Sstevel@tonic-gate kpreempt_disable(); 1340*0Sstevel@tonic-gate if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) { 1341*0Sstevel@tonic-gate pcbe_ops->pcbe_allstop(); 1342*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, 1343*0Sstevel@tonic-gate KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED); 1344*0Sstevel@tonic-gate } 1345*0Sstevel@tonic-gate kpreempt_enable(); 1346*0Sstevel@tonic-gate } 1347*0Sstevel@tonic-gate 1348*0Sstevel@tonic-gate /* 1349*0Sstevel@tonic-gate * Assign the requests in the given set to the PICs in the context. 1350*0Sstevel@tonic-gate * Returns 0 if successful, -1 on failure. 1351*0Sstevel@tonic-gate */ 1352*0Sstevel@tonic-gate /*ARGSUSED*/ 1353*0Sstevel@tonic-gate static int 1354*0Sstevel@tonic-gate kcpc_assign_reqs(kcpc_set_t *set, kcpc_ctx_t *ctx) 1355*0Sstevel@tonic-gate { 1356*0Sstevel@tonic-gate int i; 1357*0Sstevel@tonic-gate int *picnum_save; 1358*0Sstevel@tonic-gate 1359*0Sstevel@tonic-gate ASSERT(set->ks_nreqs <= cpc_ncounters); 1360*0Sstevel@tonic-gate 1361*0Sstevel@tonic-gate /* 1362*0Sstevel@tonic-gate * Provide kcpc_tryassign() with scratch space to avoid doing an 1363*0Sstevel@tonic-gate * alloc/free with every invocation. 1364*0Sstevel@tonic-gate */ 1365*0Sstevel@tonic-gate picnum_save = kmem_alloc(set->ks_nreqs * sizeof (int), KM_SLEEP); 1366*0Sstevel@tonic-gate /* 1367*0Sstevel@tonic-gate * kcpc_tryassign() blindly walks through each request in the set, 1368*0Sstevel@tonic-gate * seeing if a counter can count its event. If yes, it assigns that 1369*0Sstevel@tonic-gate * counter. However, that counter may have been the only capable counter 1370*0Sstevel@tonic-gate * for _another_ request's event. The solution is to try every possible 1371*0Sstevel@tonic-gate * request first. Note that this does not cover all solutions, as 1372*0Sstevel@tonic-gate * that would require all unique orderings of requests, an n^n operation 1373*0Sstevel@tonic-gate * which would be unacceptable for architectures with many counters. 1374*0Sstevel@tonic-gate */ 1375*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 1376*0Sstevel@tonic-gate if (kcpc_tryassign(set, i, picnum_save) == 0) 1377*0Sstevel@tonic-gate break; 1378*0Sstevel@tonic-gate 1379*0Sstevel@tonic-gate kmem_free(picnum_save, set->ks_nreqs * sizeof (int)); 1380*0Sstevel@tonic-gate if (i == set->ks_nreqs) 1381*0Sstevel@tonic-gate return (-1); 1382*0Sstevel@tonic-gate return (0); 1383*0Sstevel@tonic-gate } 1384*0Sstevel@tonic-gate 1385*0Sstevel@tonic-gate static int 1386*0Sstevel@tonic-gate kcpc_tryassign(kcpc_set_t *set, int starting_req, int *scratch) 1387*0Sstevel@tonic-gate { 1388*0Sstevel@tonic-gate int i; 1389*0Sstevel@tonic-gate int j; 1390*0Sstevel@tonic-gate uint64_t bitmap = 0, resmap = 0; 1391*0Sstevel@tonic-gate uint64_t ctrmap; 1392*0Sstevel@tonic-gate 1393*0Sstevel@tonic-gate /* 1394*0Sstevel@tonic-gate * We are attempting to assign the reqs to pics, but we may fail. If we 1395*0Sstevel@tonic-gate * fail, we need to restore the state of the requests to what it was 1396*0Sstevel@tonic-gate * when we found it, as some reqs may have been explicitly assigned to 1397*0Sstevel@tonic-gate * a specific PIC beforehand. We do this by snapshotting the assignments 1398*0Sstevel@tonic-gate * now and restoring from it later if we fail. 1399*0Sstevel@tonic-gate * 1400*0Sstevel@tonic-gate * Also we note here which counters have already been claimed by 1401*0Sstevel@tonic-gate * requests with explicit counter assignments. 1402*0Sstevel@tonic-gate */ 1403*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) { 1404*0Sstevel@tonic-gate scratch[i] = set->ks_req[i].kr_picnum; 1405*0Sstevel@tonic-gate if (set->ks_req[i].kr_picnum != -1) 1406*0Sstevel@tonic-gate resmap |= (1 << set->ks_req[i].kr_picnum); 1407*0Sstevel@tonic-gate } 1408*0Sstevel@tonic-gate 1409*0Sstevel@tonic-gate /* 1410*0Sstevel@tonic-gate * Walk through requests assigning them to the first PIC that is 1411*0Sstevel@tonic-gate * capable. 1412*0Sstevel@tonic-gate */ 1413*0Sstevel@tonic-gate i = starting_req; 1414*0Sstevel@tonic-gate do { 1415*0Sstevel@tonic-gate if (set->ks_req[i].kr_picnum != -1) { 1416*0Sstevel@tonic-gate ASSERT((bitmap & (1 << set->ks_req[i].kr_picnum)) == 0); 1417*0Sstevel@tonic-gate bitmap |= (1 << set->ks_req[i].kr_picnum); 1418*0Sstevel@tonic-gate if (++i == set->ks_nreqs) 1419*0Sstevel@tonic-gate i = 0; 1420*0Sstevel@tonic-gate continue; 1421*0Sstevel@tonic-gate } 1422*0Sstevel@tonic-gate 1423*0Sstevel@tonic-gate ctrmap = pcbe_ops->pcbe_event_coverage(set->ks_req[i].kr_event); 1424*0Sstevel@tonic-gate for (j = 0; j < cpc_ncounters; j++) { 1425*0Sstevel@tonic-gate if (ctrmap & (1 << j) && (bitmap & (1 << j)) == 0 && 1426*0Sstevel@tonic-gate (resmap & (1 << j)) == 0) { 1427*0Sstevel@tonic-gate /* 1428*0Sstevel@tonic-gate * We can assign this counter because: 1429*0Sstevel@tonic-gate * 1430*0Sstevel@tonic-gate * 1. It can count the event (ctrmap) 1431*0Sstevel@tonic-gate * 2. It hasn't been assigned yet (bitmap) 1432*0Sstevel@tonic-gate * 3. It wasn't reserved by a request (resmap) 1433*0Sstevel@tonic-gate */ 1434*0Sstevel@tonic-gate bitmap |= (1 << j); 1435*0Sstevel@tonic-gate break; 1436*0Sstevel@tonic-gate } 1437*0Sstevel@tonic-gate } 1438*0Sstevel@tonic-gate if (j == cpc_ncounters) { 1439*0Sstevel@tonic-gate for (i = 0; i < set->ks_nreqs; i++) 1440*0Sstevel@tonic-gate set->ks_req[i].kr_picnum = scratch[i]; 1441*0Sstevel@tonic-gate return (-1); 1442*0Sstevel@tonic-gate } 1443*0Sstevel@tonic-gate set->ks_req[i].kr_picnum = j; 1444*0Sstevel@tonic-gate 1445*0Sstevel@tonic-gate if (++i == set->ks_nreqs) 1446*0Sstevel@tonic-gate i = 0; 1447*0Sstevel@tonic-gate } while (i != starting_req); 1448*0Sstevel@tonic-gate 1449*0Sstevel@tonic-gate return (0); 1450*0Sstevel@tonic-gate } 1451*0Sstevel@tonic-gate 1452*0Sstevel@tonic-gate kcpc_set_t * 1453*0Sstevel@tonic-gate kcpc_dup_set(kcpc_set_t *set) 1454*0Sstevel@tonic-gate { 1455*0Sstevel@tonic-gate kcpc_set_t *new; 1456*0Sstevel@tonic-gate int i; 1457*0Sstevel@tonic-gate int j; 1458*0Sstevel@tonic-gate 1459*0Sstevel@tonic-gate new = kmem_alloc(sizeof (*new), KM_SLEEP); 1460*0Sstevel@tonic-gate new->ks_flags = set->ks_flags; 1461*0Sstevel@tonic-gate new->ks_nreqs = set->ks_nreqs; 1462*0Sstevel@tonic-gate new->ks_req = kmem_alloc(set->ks_nreqs * sizeof (kcpc_request_t), 1463*0Sstevel@tonic-gate KM_SLEEP); 1464*0Sstevel@tonic-gate new->ks_data = NULL; 1465*0Sstevel@tonic-gate new->ks_ctx = NULL; 1466*0Sstevel@tonic-gate 1467*0Sstevel@tonic-gate for (i = 0; i < new->ks_nreqs; i++) { 1468*0Sstevel@tonic-gate new->ks_req[i].kr_config = NULL; 1469*0Sstevel@tonic-gate new->ks_req[i].kr_index = set->ks_req[i].kr_index; 1470*0Sstevel@tonic-gate new->ks_req[i].kr_picnum = set->ks_req[i].kr_picnum; 1471*0Sstevel@tonic-gate new->ks_req[i].kr_picp = NULL; 1472*0Sstevel@tonic-gate new->ks_req[i].kr_data = NULL; 1473*0Sstevel@tonic-gate (void) strncpy(new->ks_req[i].kr_event, set->ks_req[i].kr_event, 1474*0Sstevel@tonic-gate CPC_MAX_EVENT_LEN); 1475*0Sstevel@tonic-gate new->ks_req[i].kr_preset = set->ks_req[i].kr_preset; 1476*0Sstevel@tonic-gate new->ks_req[i].kr_flags = set->ks_req[i].kr_flags; 1477*0Sstevel@tonic-gate new->ks_req[i].kr_nattrs = set->ks_req[i].kr_nattrs; 1478*0Sstevel@tonic-gate new->ks_req[i].kr_attr = kmem_alloc(new->ks_req[i].kr_nattrs * 1479*0Sstevel@tonic-gate sizeof (kcpc_attr_t), KM_SLEEP); 1480*0Sstevel@tonic-gate for (j = 0; j < new->ks_req[i].kr_nattrs; j++) { 1481*0Sstevel@tonic-gate new->ks_req[i].kr_attr[j].ka_val = 1482*0Sstevel@tonic-gate set->ks_req[i].kr_attr[j].ka_val; 1483*0Sstevel@tonic-gate (void) strncpy(new->ks_req[i].kr_attr[j].ka_name, 1484*0Sstevel@tonic-gate set->ks_req[i].kr_attr[j].ka_name, 1485*0Sstevel@tonic-gate CPC_MAX_ATTR_LEN); 1486*0Sstevel@tonic-gate } 1487*0Sstevel@tonic-gate } 1488*0Sstevel@tonic-gate 1489*0Sstevel@tonic-gate return (new); 1490*0Sstevel@tonic-gate } 1491*0Sstevel@tonic-gate 1492*0Sstevel@tonic-gate int 1493*0Sstevel@tonic-gate kcpc_allow_nonpriv(void *token) 1494*0Sstevel@tonic-gate { 1495*0Sstevel@tonic-gate return (((kcpc_ctx_t *)token)->kc_flags & KCPC_CTX_NONPRIV); 1496*0Sstevel@tonic-gate } 1497*0Sstevel@tonic-gate 1498*0Sstevel@tonic-gate void 1499*0Sstevel@tonic-gate kcpc_invalidate(kthread_t *t) 1500*0Sstevel@tonic-gate { 1501*0Sstevel@tonic-gate kcpc_ctx_t *ctx = t->t_cpc_ctx; 1502*0Sstevel@tonic-gate 1503*0Sstevel@tonic-gate if (ctx != NULL) 1504*0Sstevel@tonic-gate atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID); 1505*0Sstevel@tonic-gate } 1506*0Sstevel@tonic-gate 1507*0Sstevel@tonic-gate /* 1508*0Sstevel@tonic-gate * Given a PCBE ID, attempt to load a matching PCBE module. The strings given 1509*0Sstevel@tonic-gate * are used to construct PCBE names, starting with the most specific, 1510*0Sstevel@tonic-gate * "pcbe.first.second.third.fourth" and ending with the least specific, 1511*0Sstevel@tonic-gate * "pcbe.first". 1512*0Sstevel@tonic-gate * 1513*0Sstevel@tonic-gate * Returns 0 if a PCBE was successfully loaded and -1 upon error. 1514*0Sstevel@tonic-gate */ 1515*0Sstevel@tonic-gate int 1516*0Sstevel@tonic-gate kcpc_pcbe_tryload(const char *prefix, uint_t first, uint_t second, uint_t third) 1517*0Sstevel@tonic-gate { 1518*0Sstevel@tonic-gate char modname[PCBE_NAMELEN]; 1519*0Sstevel@tonic-gate char stub[PCBE_NAMELEN]; 1520*0Sstevel@tonic-gate 1521*0Sstevel@tonic-gate if (prefix != NULL) 1522*0Sstevel@tonic-gate (void) snprintf(stub, PCBE_NAMELEN, "pcbe.%s", prefix); 1523*0Sstevel@tonic-gate else 1524*0Sstevel@tonic-gate (void) snprintf(stub, PCBE_NAMELEN, "pcbe"); 1525*0Sstevel@tonic-gate 1526*0Sstevel@tonic-gate (void) snprintf(modname, PCBE_NAMELEN, "%s.%u.%u.%u", 1527*0Sstevel@tonic-gate stub, first, second, third); 1528*0Sstevel@tonic-gate 1529*0Sstevel@tonic-gate DTRACE_PROBE1(kcpc__pcbe__spec, char *, modname); 1530*0Sstevel@tonic-gate 1531*0Sstevel@tonic-gate if (modload("pcbe", modname) >= 0) 1532*0Sstevel@tonic-gate return (0); 1533*0Sstevel@tonic-gate 1534*0Sstevel@tonic-gate (void) snprintf(modname, PCBE_NAMELEN, "%s.%u.%u", 1535*0Sstevel@tonic-gate stub, first, second); 1536*0Sstevel@tonic-gate if (modload("pcbe", modname) >= 0) 1537*0Sstevel@tonic-gate return (0); 1538*0Sstevel@tonic-gate 1539*0Sstevel@tonic-gate (void) snprintf(modname, PCBE_NAMELEN, "%s.%u", stub, first); 1540*0Sstevel@tonic-gate if (modload("pcbe", modname) >= 0) 1541*0Sstevel@tonic-gate return (0); 1542*0Sstevel@tonic-gate 1543*0Sstevel@tonic-gate if (prefix == NULL) 1544*0Sstevel@tonic-gate /* 1545*0Sstevel@tonic-gate * If no prefix was given, we have tried all possible 1546*0Sstevel@tonic-gate * PCBE names. 1547*0Sstevel@tonic-gate */ 1548*0Sstevel@tonic-gate return (-1); 1549*0Sstevel@tonic-gate 1550*0Sstevel@tonic-gate (void) snprintf(modname, PCBE_NAMELEN, "%s", stub); 1551*0Sstevel@tonic-gate if (modload("pcbe", modname) >= 0) 1552*0Sstevel@tonic-gate return (0); 1553*0Sstevel@tonic-gate 1554*0Sstevel@tonic-gate return (-1); 1555*0Sstevel@tonic-gate } 1556