154905Smckusick /*-
2*61112Sbostic * Copyright (c) 1983, 1992, 1993
3*61112Sbostic * The Regents of the University of California. All rights reserved.
454905Smckusick *
554905Smckusick * %sccs.include.redist.c%
654905Smckusick */
754905Smckusick
854905Smckusick #if !defined(lint) && !defined(KERNEL) && defined(LIBC_SCCS)
9*61112Sbostic static char sccsid[] = "@(#)mcount.c 8.1 (Berkeley) 06/04/93";
1054905Smckusick #endif
1154905Smckusick
1254905Smckusick #include <sys/param.h>
1354905Smckusick #include <sys/gmon.h>
1454905Smckusick
1554905Smckusick /*
1654905Smckusick * mcount is called on entry to each function compiled with the profiling
1754905Smckusick * switch set. _mcount(), which is declared in a machine-dependent way
1854905Smckusick * with _MCOUNT_DECL, does the actual work and is either inlined into a
1954905Smckusick * C routine or called by an assembly stub. In any case, this magic is
2055179Storek * taken care of by the MCOUNT definition in <machine/profile.h>.
2154905Smckusick *
2254905Smckusick * _mcount updates data structures that represent traversals of the
2354905Smckusick * program's call graph edges. frompc and selfpc are the return
2454905Smckusick * address and function address that represents the given call graph edge.
2554905Smckusick *
2654905Smckusick * Note: the original BSD code used the same variable (frompcindex) for
2754905Smckusick * both frompcindex and frompc. Any reasonable, modern compiler will
2854905Smckusick * perform this optimization.
2954905Smckusick */
_MCOUNT_DECL(frompc,selfpc)3054905Smckusick _MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */
3154905Smckusick register u_long frompc, selfpc;
3254905Smckusick {
3354905Smckusick register u_short *frompcindex;
3454905Smckusick register struct tostruct *top, *prevtop;
3554905Smckusick register struct gmonparam *p;
3654905Smckusick register long toindex;
3754905Smckusick #ifdef KERNEL
3854905Smckusick register int s;
3954905Smckusick #endif
4054905Smckusick
4154905Smckusick p = &_gmonparam;
4254905Smckusick /*
4354905Smckusick * check that we are profiling
4454905Smckusick * and that we aren't recursively invoked.
4554905Smckusick */
4654905Smckusick if (p->state != GMON_PROF_ON)
4754905Smckusick return;
4854905Smckusick #ifdef KERNEL
4954905Smckusick MCOUNT_ENTER;
5054905Smckusick #else
5154905Smckusick p->state = GMON_PROF_BUSY;
5254905Smckusick #endif
5354905Smckusick /*
5454905Smckusick * check that frompcindex is a reasonable pc value.
5554905Smckusick * for example: signal catchers get called from the stack,
5654905Smckusick * not from text space. too bad.
5754905Smckusick */
5854905Smckusick frompc -= p->lowpc;
5954905Smckusick if (frompc > p->textsize)
6054905Smckusick goto done;
6154905Smckusick
6259559Smckusick frompcindex = &p->froms[frompc / (p->hashfraction * sizeof(*p->froms))];
6354905Smckusick toindex = *frompcindex;
6454905Smckusick if (toindex == 0) {
6554905Smckusick /*
6654905Smckusick * first time traversing this arc
6754905Smckusick */
6854905Smckusick toindex = ++p->tos[0].link;
6954905Smckusick if (toindex >= p->tolimit)
7054905Smckusick /* halt further profiling */
7154905Smckusick goto overflow;
7254905Smckusick
7354905Smckusick *frompcindex = toindex;
7454905Smckusick top = &p->tos[toindex];
7554905Smckusick top->selfpc = selfpc;
7654905Smckusick top->count = 1;
7754905Smckusick top->link = 0;
7854905Smckusick goto done;
7954905Smckusick }
8054905Smckusick top = &p->tos[toindex];
8154905Smckusick if (top->selfpc == selfpc) {
8254905Smckusick /*
8354905Smckusick * arc at front of chain; usual case.
8454905Smckusick */
8554905Smckusick top->count++;
8654905Smckusick goto done;
8754905Smckusick }
8854905Smckusick /*
8954905Smckusick * have to go looking down chain for it.
9054905Smckusick * top points to what we are looking at,
9154905Smckusick * prevtop points to previous top.
9254905Smckusick * we know it is not at the head of the chain.
9354905Smckusick */
9454905Smckusick for (; /* goto done */; ) {
9554905Smckusick if (top->link == 0) {
9654905Smckusick /*
9754905Smckusick * top is end of the chain and none of the chain
9854905Smckusick * had top->selfpc == selfpc.
9954905Smckusick * so we allocate a new tostruct
10054905Smckusick * and link it to the head of the chain.
10154905Smckusick */
10254905Smckusick toindex = ++p->tos[0].link;
10354905Smckusick if (toindex >= p->tolimit)
10454905Smckusick goto overflow;
10554905Smckusick
10654905Smckusick top = &p->tos[toindex];
10754905Smckusick top->selfpc = selfpc;
10854905Smckusick top->count = 1;
10954905Smckusick top->link = *frompcindex;
11054905Smckusick *frompcindex = toindex;
11154905Smckusick goto done;
11254905Smckusick }
11354905Smckusick /*
11454905Smckusick * otherwise, check the next arc on the chain.
11554905Smckusick */
11654905Smckusick prevtop = top;
11754905Smckusick top = &p->tos[top->link];
11854905Smckusick if (top->selfpc == selfpc) {
11954905Smckusick /*
12054905Smckusick * there it is.
12154905Smckusick * increment its count
12254905Smckusick * move it to the head of the chain.
12354905Smckusick */
12454905Smckusick top->count++;
12554905Smckusick toindex = prevtop->link;
12654905Smckusick prevtop->link = top->link;
12754905Smckusick top->link = *frompcindex;
12854905Smckusick *frompcindex = toindex;
12954905Smckusick goto done;
13054905Smckusick }
13154905Smckusick
13254905Smckusick }
13354905Smckusick done:
13454905Smckusick #ifdef KERNEL
13554905Smckusick MCOUNT_EXIT;
13654905Smckusick #else
13754905Smckusick p->state = GMON_PROF_ON;
13854905Smckusick #endif
13954905Smckusick return;
14054905Smckusick overflow:
14154905Smckusick p->state = GMON_PROF_ERROR;
14254905Smckusick #ifdef KERNEL
14354905Smckusick MCOUNT_EXIT;
14454905Smckusick #endif
14554905Smckusick return;
14654905Smckusick }
14754905Smckusick
14854905Smckusick /*
14955179Storek * Actual definition of mcount function. Defined in <machine/profile.h>,
15054905Smckusick * which is included by <sys/gmon.h>.
15154905Smckusick */
15254905Smckusick MCOUNT
153