xref: /netbsd-src/sys/kern/subr_prof.c (revision 001c68bd94f75ce9270b69227c4199fbf34ee396)
1 /*	$NetBSD: subr_prof.c,v 1.27 2003/02/01 06:23:44 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1982, 1986, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)subr_prof.c	8.4 (Berkeley) 2/14/95
36  */
37 
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: subr_prof.c,v 1.27 2003/02/01 06:23:44 thorpej Exp $");
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/proc.h>
45 #include <sys/user.h>
46 #include <sys/mount.h>
47 #include <sys/sa.h>
48 #include <sys/syscallargs.h>
49 #include <sys/sysctl.h>
50 
51 #include <machine/cpu.h>
52 
53 #ifdef GPROF
54 #include <sys/malloc.h>
55 #include <sys/gmon.h>
56 
57 MALLOC_DEFINE(M_GPROF, "gprof", "kernel profiling buffer");
58 
59 /*
60  * Froms is actually a bunch of unsigned shorts indexing tos
61  */
62 struct gmonparam _gmonparam = { GMON_PROF_OFF };
63 
64 /* Actual start of the kernel text segment. */
65 extern char kernel_text[];
66 
67 extern char etext[];
68 
69 
70 void
71 kmstartup()
72 {
73 	char *cp;
74 	struct gmonparam *p = &_gmonparam;
75 	/*
76 	 * Round lowpc and highpc to multiples of the density we're using
77 	 * so the rest of the scaling (here and in gprof) stays in ints.
78 	 */
79 	p->lowpc = ROUNDDOWN(((u_long)kernel_text),
80 		HISTFRACTION * sizeof(HISTCOUNTER));
81 	p->highpc = ROUNDUP((u_long)etext,
82 		HISTFRACTION * sizeof(HISTCOUNTER));
83 	p->textsize = p->highpc - p->lowpc;
84 	printf("Profiling kernel, textsize=%ld [%lx..%lx]\n",
85 	       p->textsize, p->lowpc, p->highpc);
86 	p->kcountsize = p->textsize / HISTFRACTION;
87 	p->hashfraction = HASHFRACTION;
88 	p->fromssize = p->textsize / HASHFRACTION;
89 	p->tolimit = p->textsize * ARCDENSITY / 100;
90 	if (p->tolimit < MINARCS)
91 		p->tolimit = MINARCS;
92 	else if (p->tolimit > MAXARCS)
93 		p->tolimit = MAXARCS;
94 	p->tossize = p->tolimit * sizeof(struct tostruct);
95 	cp = (char *)malloc(p->kcountsize + p->fromssize + p->tossize,
96 	    M_GPROF, M_NOWAIT);
97 	if (cp == 0) {
98 		printf("No memory for profiling.\n");
99 		return;
100 	}
101 	memset(cp, 0, p->kcountsize + p->tossize + p->fromssize);
102 	p->tos = (struct tostruct *)cp;
103 	cp += p->tossize;
104 	p->kcount = (u_short *)cp;
105 	cp += p->kcountsize;
106 	p->froms = (u_short *)cp;
107 }
108 
109 /*
110  * Return kernel profiling information.
111  */
112 int
113 sysctl_doprof(name, namelen, oldp, oldlenp, newp, newlen)
114 	int *name;
115 	u_int namelen;
116 	void *oldp;
117 	size_t *oldlenp;
118 	void *newp;
119 	size_t newlen;
120 {
121 	struct gmonparam *gp = &_gmonparam;
122 	int error;
123 
124 	/* all sysctl names at this level are terminal */
125 	if (namelen != 1)
126 		return (ENOTDIR);		/* overloaded */
127 
128 	/* Check we got the necessary memory at startup. */
129 	if (gp->kcount == NULL)
130 		return (EOPNOTSUPP);
131 
132 	switch (name[0]) {
133 	case GPROF_STATE:
134 		error = sysctl_int(oldp, oldlenp, newp, newlen, &gp->state);
135 		if (error)
136 			return (error);
137 		if (gp->state == GMON_PROF_OFF)
138 			stopprofclock(&proc0);
139 		else
140 			startprofclock(&proc0);
141 		return (0);
142 	case GPROF_COUNT:
143 		return (sysctl_struct(oldp, oldlenp, newp, newlen,
144 		    gp->kcount, gp->kcountsize));
145 	case GPROF_FROMS:
146 		return (sysctl_struct(oldp, oldlenp, newp, newlen,
147 		    gp->froms, gp->fromssize));
148 	case GPROF_TOS:
149 		return (sysctl_struct(oldp, oldlenp, newp, newlen,
150 		    gp->tos, gp->tossize));
151 	case GPROF_GMONPARAM:
152 		return (sysctl_rdstruct(oldp, oldlenp, newp, gp, sizeof(*gp)));
153 	default:
154 		return (EOPNOTSUPP);
155 	}
156 	/* NOTREACHED */
157 }
158 #endif /* GPROF */
159 
160 /*
161  * Profiling system call.
162  *
163  * The scale factor is a fixed point number with 16 bits of fraction, so that
164  * 1.0 is represented as 0x10000.  A scale factor of 0 turns off profiling.
165  */
166 /* ARGSUSED */
167 int
168 sys_profil(l, v, retval)
169 	struct lwp *l;
170 	void *v;
171 	register_t *retval;
172 {
173 	struct sys_profil_args /* {
174 		syscallarg(caddr_t) samples;
175 		syscallarg(u_int) size;
176 		syscallarg(u_int) offset;
177 		syscallarg(u_int) scale;
178 	} */ *uap = v;
179 	struct proc *p = l->l_proc;
180 	struct uprof *upp;
181 	int s;
182 
183 	if (SCARG(uap, scale) > (1 << 16))
184 		return (EINVAL);
185 	if (SCARG(uap, scale) == 0) {
186 		stopprofclock(p);
187 		return (0);
188 	}
189 	upp = &p->p_stats->p_prof;
190 
191 	/* Block profile interrupts while changing state. */
192 	s = splstatclock();
193 	upp->pr_off = SCARG(uap, offset);
194 	upp->pr_scale = SCARG(uap, scale);
195 	upp->pr_base = SCARG(uap, samples);
196 	upp->pr_size = SCARG(uap, size);
197 	startprofclock(p);
198 	splx(s);
199 
200 	return (0);
201 }
202 
203 /*
204  * Scale is a fixed-point number with the binary point 16 bits
205  * into the value, and is <= 1.0.  pc is at most 32 bits, so the
206  * intermediate result is at most 48 bits.
207  */
208 #define	PC_TO_INDEX(pc, prof) \
209 	((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
210 	    (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
211 
212 /*
213  * Collect user-level profiling statistics; called on a profiling tick,
214  * when a process is running in user-mode.  This routine may be called
215  * from an interrupt context.  We try to update the user profiling buffers
216  * cheaply with fuswintr() and suswintr().  If that fails, we revert to
217  * an AST that will vector us to trap() with a context in which copyin
218  * and copyout will work.  Trap will then call addupc_task().
219  *
220  * Note that we may (rarely) not get around to the AST soon enough, and
221  * lose profile ticks when the next tick overwrites this one, but in this
222  * case the system is overloaded and the profile is probably already
223  * inaccurate.
224  */
225 void
226 addupc_intr(p, pc)
227 	struct proc *p;
228 	u_long pc;
229 {
230 	struct uprof *prof;
231 	caddr_t addr;
232 	u_int i;
233 	int v;
234 
235 	prof = &p->p_stats->p_prof;
236 	if (pc < prof->pr_off ||
237 	    (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
238 		return;			/* out of range; ignore */
239 
240 	addr = prof->pr_base + i;
241 	if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + 1) == -1) {
242 		prof->pr_addr = pc;
243 		prof->pr_ticks++;
244 		need_proftick(p);
245 	}
246 }
247 
248 /*
249  * Much like before, but we can afford to take faults here.  If the
250  * update fails, we simply turn off profiling.
251  */
252 void
253 addupc_task(p, pc, ticks)
254 	struct proc *p;
255 	u_long pc;
256 	u_int ticks;
257 {
258 	struct uprof *prof;
259 	caddr_t addr;
260 	u_int i;
261 	u_short v;
262 
263 	/* Testing P_PROFIL may be unnecessary, but is certainly safe. */
264 	if ((p->p_flag & P_PROFIL) == 0 || ticks == 0)
265 		return;
266 
267 	prof = &p->p_stats->p_prof;
268 	if (pc < prof->pr_off ||
269 	    (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
270 		return;
271 
272 	addr = prof->pr_base + i;
273 	if (copyin(addr, (caddr_t)&v, sizeof(v)) == 0) {
274 		v += ticks;
275 		if (copyout((caddr_t)&v, addr, sizeof(v)) == 0)
276 			return;
277 	}
278 	stopprofclock(p);
279 }
280