1 /* $OpenBSD: subr_prof.c,v 1.35 2023/06/02 17:44:29 cheloha Exp $ */ 2 /* $NetBSD: subr_prof.c,v 1.12 1996/04/22 01:38:50 christos Exp $ */ 3 4 /*- 5 * Copyright (c) 1982, 1986, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the University nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 * @(#)subr_prof.c 8.3 (Berkeley) 9/23/93 33 */ 34 35 #include <sys/param.h> 36 #include <sys/systm.h> 37 #include <sys/pledge.h> 38 #include <sys/proc.h> 39 #include <sys/resourcevar.h> 40 #include <sys/mount.h> 41 #include <sys/sysctl.h> 42 #include <sys/syscallargs.h> 43 44 45 #if defined(GPROF) || defined(DDBPROF) 46 #include <sys/malloc.h> 47 #include <sys/gmon.h> 48 49 #include <uvm/uvm_extern.h> 50 51 #include <machine/db_machdep.h> 52 #include <ddb/db_extern.h> 53 54 /* 55 * Flag to prevent CPUs from executing the mcount() monitor function 56 * until we're sure they are in a sane state. 57 */ 58 int gmoninit = 0; 59 u_int gmon_cpu_count; /* [K] number of CPUs with profiling enabled */ 60 61 extern char etext[]; 62 63 void 64 prof_init(void) 65 { 66 CPU_INFO_ITERATOR cii; 67 struct cpu_info *ci; 68 struct gmonparam *p; 69 u_long lowpc, highpc, textsize; 70 u_long kcountsize, fromssize, tossize; 71 long tolimit; 72 char *cp; 73 int size; 74 75 /* 76 * Round lowpc and highpc to multiples of the density we're using 77 * so the rest of the scaling (here and in gprof) stays in ints. 78 */ 79 lowpc = ROUNDDOWN(KERNBASE, HISTFRACTION * sizeof(HISTCOUNTER)); 80 highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER)); 81 textsize = highpc - lowpc; 82 #ifdef GPROF 83 printf("Profiling kernel, textsize=%ld [%lx..%lx]\n", 84 textsize, lowpc, highpc); 85 #endif 86 kcountsize = textsize / HISTFRACTION; 87 fromssize = textsize / HASHFRACTION; 88 tolimit = textsize * ARCDENSITY / 100; 89 if (tolimit < MINARCS) 90 tolimit = MINARCS; 91 else if (tolimit > MAXARCS) 92 tolimit = MAXARCS; 93 tossize = tolimit * sizeof(struct tostruct); 94 size = sizeof(*p) + kcountsize + fromssize + tossize; 95 96 /* Allocate and initialize one profiling buffer per CPU. */ 97 CPU_INFO_FOREACH(cii, ci) { 98 cp = km_alloc(round_page(size), &kv_any, &kp_zero, &kd_nowait); 99 if (cp == NULL) { 100 printf("No memory for profiling.\n"); 101 return; 102 } 103 104 p = (struct gmonparam *)cp; 105 cp += sizeof(*p); 106 p->tos = (struct tostruct *)cp; 107 cp += tossize; 108 p->kcount = (u_short *)cp; 109 cp += kcountsize; 110 p->froms = (u_short *)cp; 111 112 p->state = GMON_PROF_OFF; 113 p->lowpc = lowpc; 114 p->highpc = highpc; 115 p->textsize = textsize; 116 p->hashfraction = HASHFRACTION; 117 p->kcountsize = kcountsize; 118 p->fromssize = fromssize; 119 p->tolimit = tolimit; 120 p->tossize = tossize; 121 122 ci->ci_gmon = p; 123 } 124 } 125 126 int 127 prof_state_toggle(struct gmonparam *gp, int oldstate) 128 { 129 int error = 0; 130 131 KERNEL_ASSERT_LOCKED(); 132 133 if (gp->state == oldstate) 134 return (0); 135 136 switch (gp->state) { 137 case GMON_PROF_ON: 138 #if !defined(GPROF) 139 /* 140 * If this is not a profiling kernel, we need to patch 141 * all symbols that can be instrumented. 142 */ 143 error = db_prof_enable(); 144 #endif 145 if (error == 0) { 146 if (++gmon_cpu_count == 1) 147 startprofclock(&process0); 148 } 149 break; 150 default: 151 error = EINVAL; 152 gp->state = GMON_PROF_OFF; 153 /* FALLTHROUGH */ 154 case GMON_PROF_OFF: 155 if (--gmon_cpu_count == 0) 156 stopprofclock(&process0); 157 #if !defined(GPROF) 158 db_prof_disable(); 159 #endif 160 break; 161 } 162 163 return (error); 164 } 165 166 /* 167 * Return kernel profiling information. 168 */ 169 int 170 sysctl_doprof(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp, 171 size_t newlen) 172 { 173 CPU_INFO_ITERATOR cii; 174 struct cpu_info *ci; 175 struct gmonparam *gp = NULL; 176 int error, cpuid, op, state; 177 178 /* all sysctl names at this level are name and field */ 179 if (namelen != 2) 180 return (ENOTDIR); /* overloaded */ 181 182 op = name[0]; 183 cpuid = name[1]; 184 185 CPU_INFO_FOREACH(cii, ci) { 186 if (cpuid == CPU_INFO_UNIT(ci)) { 187 gp = ci->ci_gmon; 188 break; 189 } 190 } 191 192 if (gp == NULL) 193 return (EOPNOTSUPP); 194 195 /* Assume that if we're here it is safe to execute profiling. */ 196 gmoninit = 1; 197 198 switch (op) { 199 case GPROF_STATE: 200 state = gp->state; 201 error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state); 202 if (error) 203 return (error); 204 return (prof_state_toggle(gp, state)); 205 case GPROF_COUNT: 206 return (sysctl_struct(oldp, oldlenp, newp, newlen, 207 gp->kcount, gp->kcountsize)); 208 case GPROF_FROMS: 209 return (sysctl_struct(oldp, oldlenp, newp, newlen, 210 gp->froms, gp->fromssize)); 211 case GPROF_TOS: 212 return (sysctl_struct(oldp, oldlenp, newp, newlen, 213 gp->tos, gp->tossize)); 214 case GPROF_GMONPARAM: 215 return (sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof *gp)); 216 default: 217 return (EOPNOTSUPP); 218 } 219 /* NOTREACHED */ 220 } 221 #endif /* GPROF || DDBPROF */ 222 223 /* 224 * Profiling system call. 225 * 226 * The scale factor is a fixed point number with 16 bits of fraction, so that 227 * 1.0 is represented as 0x10000. A scale factor of 0 turns off profiling. 228 */ 229 int 230 sys_profil(struct proc *p, void *v, register_t *retval) 231 { 232 struct sys_profil_args /* { 233 syscallarg(caddr_t) samples; 234 syscallarg(size_t) size; 235 syscallarg(u_long) offset; 236 syscallarg(u_int) scale; 237 } */ *uap = v; 238 struct process *pr = p->p_p; 239 struct uprof *upp; 240 int error, s; 241 242 error = pledge_profil(p, SCARG(uap, scale)); 243 if (error) 244 return error; 245 246 if (SCARG(uap, scale) > (1 << 16)) 247 return (EINVAL); 248 if (SCARG(uap, scale) == 0) { 249 stopprofclock(pr); 250 return (0); 251 } 252 upp = &pr->ps_prof; 253 254 /* Block profile interrupts while changing state. */ 255 s = splstatclock(); 256 upp->pr_off = SCARG(uap, offset); 257 upp->pr_scale = SCARG(uap, scale); 258 upp->pr_base = (caddr_t)SCARG(uap, samples); 259 upp->pr_size = SCARG(uap, size); 260 startprofclock(pr); 261 splx(s); 262 263 return (0); 264 } 265 266 /* 267 * Scale is a fixed-point number with the binary point 16 bits 268 * into the value, and is <= 1.0. pc is at most 32 bits, so the 269 * intermediate result is at most 48 bits. 270 */ 271 #define PC_TO_INDEX(pc, prof) \ 272 ((int)(((u_quad_t)((pc) - (prof)->pr_off) * \ 273 (u_quad_t)((prof)->pr_scale)) >> 16) & ~1) 274 275 /* 276 * Collect user-level profiling statistics; called on a profiling tick, 277 * when a process is running in user-mode. This routine may be called 278 * from an interrupt context. Schedule an AST that will vector us to 279 * trap() with a context in which copyin and copyout will work. 280 * Trap will then call addupc_task(). 281 */ 282 void 283 addupc_intr(struct proc *p, u_long pc, u_long nticks) 284 { 285 struct uprof *prof; 286 287 prof = &p->p_p->ps_prof; 288 if (pc < prof->pr_off || PC_TO_INDEX(pc, prof) >= prof->pr_size) 289 return; /* out of range; ignore */ 290 291 p->p_prof_addr = pc; 292 p->p_prof_ticks += nticks; 293 atomic_setbits_int(&p->p_flag, P_OWEUPC); 294 need_proftick(p); 295 } 296 297 298 /* 299 * Much like before, but we can afford to take faults here. If the 300 * update fails, we simply turn off profiling. 301 */ 302 void 303 addupc_task(struct proc *p, u_long pc, u_int nticks) 304 { 305 struct process *pr = p->p_p; 306 struct uprof *prof; 307 caddr_t addr; 308 u_int i; 309 u_short v; 310 311 /* Testing PS_PROFIL may be unnecessary, but is certainly safe. */ 312 if ((pr->ps_flags & PS_PROFIL) == 0 || nticks == 0) 313 return; 314 315 prof = &pr->ps_prof; 316 if (pc < prof->pr_off || 317 (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size) 318 return; 319 320 addr = prof->pr_base + i; 321 if (copyin(addr, (caddr_t)&v, sizeof(v)) == 0) { 322 v += nticks; 323 if (copyout((caddr_t)&v, addr, sizeof(v)) == 0) 324 return; 325 } 326 stopprofclock(pr); 327 } 328