xref: /csrg-svn/sys/kern/subr_prof.c (revision 54887)
1 /*-
2  * Copyright (c) 1982, 1986 Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)subr_prof.c	7.16 (Berkeley) 07/10/92
8  */
9 
10 #include <sys/param.h>
11 #include <sys/systm.h>
12 #include <sys/kernel.h>
13 #include <sys/proc.h>
14 #include <sys/user.h>
15 #include <machine/cpu.h>
16 
17 #ifdef GPROF
18 #include <sys/malloc.h>
19 #include <sys/gmon.h>
20 
21 /*
22  * Froms is actually a bunch of unsigned shorts indexing tos
23  */
24 struct gmonparam _gmonparam = { GMON_PROF_OFF };
25 
26 u_short	*kcount;
27 extern char etext[];
28 
29 kmstartup()
30 {
31 	char *cp;
32 	int fsize, tsize, ksize;
33 	struct gmonparam *p = &_gmonparam;
34 	/*
35 	 * Round lowpc and highpc to multiples of the density we're using
36 	 * so the rest of the scaling (here and in gprof) stays in ints.
37 	 */
38 	p->lowpc = ROUNDDOWN(KERNBASE, HISTFRACTION * sizeof(HISTCOUNTER));
39 	p->highpc = ROUNDUP((u_long)etext, HISTFRACTION * sizeof(HISTCOUNTER));
40 	p->textsize = p->highpc - p->lowpc;
41 	printf("Profiling kernel, textsize=%d [%x..%x]\n",
42 	       p->textsize, p->lowpc, p->highpc);
43 	ksize = p->textsize / HISTFRACTION;
44 	fsize = p->textsize / HASHFRACTION;
45 	p->tolimit = p->textsize * ARCDENSITY / 100;
46 	if (p->tolimit < MINARCS)
47 		p->tolimit = MINARCS;
48 	else if (p->tolimit > MAXARCS)
49 		p->tolimit = MAXARCS;
50 	tsize = p->tolimit * sizeof(struct tostruct);
51 	cp = (char *)malloc(ksize + fsize + tsize, M_GPROF, M_NOWAIT);
52 	if (cp == 0) {
53 		printf("No memory for profiling.\n");
54 		return;
55 	}
56 	bzero(cp, ksize + tsize + fsize);
57 	p->tos = (struct tostruct *)cp;
58 	cp += tsize;
59 	kcount = (u_short *)cp;
60 	cp += ksize;
61 	p->froms = (u_short *)cp;
62 	startprofclock(&proc0);
63 }
64 #endif
65 
66 /*
67  * Profiling system call.
68  *
69  * The scale factor is a fixed point number with 16 bits of fraction, so that
70  * 1.0 is represented as 0x10000.  A scale factor of 0 turns off profiling.
71  */
72 /* ARGSUSED */
73 profil(p, uap, retval)
74 	struct proc *p;
75 	register struct args {
76 		caddr_t	buf;
77 		u_int	bufsize;
78 		u_int	offset;
79 		u_int	scale;
80 	} *uap;
81 	int *retval;
82 {
83 	register struct uprof *upp;
84 	int s;
85 
86 	if (uap->scale > (1 << 16))
87 		return (EINVAL);
88 	if (uap->scale == 0) {
89 		stopprofclock(p);
90 		return (0);
91 	}
92 	upp = &p->p_stats->p_prof;
93 	s = splstatclock(); /* block profile interrupts while changing state */
94 	upp->pr_base = uap->buf;
95 	upp->pr_size = uap->bufsize;
96 	upp->pr_off = uap->offset;
97 	upp->pr_scale = uap->scale;
98 	startprofclock(p);
99 	splx(s);
100 	return (0);
101 }
102 
103 /*
104  * Scale is a fixed-point number with the binary point 16 bits
105  * into the value, and is <= 1.0.  pc is at most 32 bits, so the
106  * intermediate result is at most 48 bits.
107  */
108 #define	PC_TO_INDEX(pc, prof) \
109 	((int)(((u_quad_t)((pc) - (prof)->pr_off) * \
110 	    (u_quad_t)((prof)->pr_scale)) >> 16) & ~1)
111 
112 /*
113  * Collect user-level profiling statistics; called on a profiling tick,
114  * when a process is running in user-mode.  This routine may be called
115  * from an interrupt context.  We try to update the user profiling buffers
116  * cheaply with fuswintr() and suswintr().  If that fails, we revert to
117  * an AST that will vector us to trap() with a context in which copyin
118  * and copyout will work.  Trap will then call addupc_task().
119  *
120  * Note that we may (rarely) not get around to the AST soon enough, and
121  * lose profile ticks when the next tick overwrites this one, but in this
122  * case the system is overloaded and the profile is probably already
123  * inaccurate.
124  */
125 void
126 addupc_intr(p, pc, ticks)
127 	register struct proc *p;
128 	register u_long pc;
129 	u_int ticks;
130 {
131 	register struct uprof *prof;
132 	register caddr_t addr;
133 	register u_int i;
134 	register int v;
135 
136 	if (ticks == 0)
137 		return;
138 	prof = &p->p_stats->p_prof;
139 	if (pc < prof->pr_off ||
140 	    (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
141 		return;			/* out of range; ignore */
142 
143 	addr = prof->pr_base + i;
144 	if ((v = fuswintr(addr)) == -1 || suswintr(addr, v + ticks) == -1) {
145 		prof->pr_addr = pc;
146 		prof->pr_ticks = ticks;
147 		need_proftick(p);
148 	}
149 }
150 
151 /*
152  * Much like before, but we can afford to take faults here.  If the
153  * update fails, we simply turn off profiling.
154  */
155 void
156 addupc_task(p, pc, ticks)
157 	register struct proc *p;
158 	register u_long pc;
159 	u_int ticks;
160 {
161 	register struct uprof *prof;
162 	register caddr_t addr;
163 	register u_int i;
164 	u_short v;
165 
166 	/* testing SPROFIL may be unnecessary, but is certainly safe */
167 	if ((p->p_flag & SPROFIL) == 0 || ticks == 0)
168 		return;
169 
170 	prof = &p->p_stats->p_prof;
171 	if (pc < prof->pr_off ||
172 	    (i = PC_TO_INDEX(pc, prof)) >= prof->pr_size)
173 		return;
174 
175 	addr = prof->pr_base + i;
176 	if (copyin(addr, (caddr_t)&v, sizeof(v)) == 0) {
177 		v += ticks;
178 		if (copyout((caddr_t)&v, addr, sizeof(v)) == 0)
179 			return;
180 	}
181 	stopprofclock(p);
182 }
183