xref: /csrg-svn/sys/kern/kern_ktrace.c (revision 41518)
1 /*
2  * Copyright (c) 1989 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms are permitted
6  * provided that the above copyright notice and this paragraph are
7  * duplicated in all such forms and that any documentation,
8  * advertising materials, and other materials related to such
9  * distribution and use acknowledge that the software was developed
10  * by the University of California, Berkeley.  The name of the
11  * University may not be used to endorse or promote products derived
12  * from this software without specific prior written permission.
13  * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
14  * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
15  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
16  *
17  *	@(#)kern_ktrace.c	7.4 (Berkeley) 05/10/90
18  */
19 
20 #ifdef KTRACE
21 
22 #include "param.h"
23 #include "user.h"
24 #include "proc.h"
25 #include "file.h"
26 #include "vnode.h"
27 #include "ktrace.h"
28 #include "malloc.h"
29 
30 #include "syscalls.c"
31 
32 extern int nsysent;
33 extern char *syscallnames[];
34 
35 int ktrace_nocheck = 1;
36 
37 struct ktr_header *
38 ktrgetheader(type)
39 {
40 	register struct ktr_header *kth;
41 
42 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
43 		M_TEMP, M_WAITOK);
44 	kth->ktr_type = type;
45 	microtime(&kth->ktr_time);
46 	kth->ktr_pid = u.u_procp->p_pid;
47 	bcopy(u.u_procp->p_comm, kth->ktr_comm, MAXCOMLEN);
48 	return (kth);
49 }
50 
51 ktrsyscall(vp, code, narg)
52 	struct vnode *vp;
53 {
54 	struct	ktr_header *kth = ktrgetheader(KTR_SYSCALL);
55 	struct	ktr_syscall *ktp;
56 	register len = sizeof(struct ktr_syscall) + (narg * sizeof(int));
57 	int 	*argp, i;
58 
59 	if (kth == NULL)
60 		return;
61 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
62 	ktp->ktr_code = code;
63 	ktp->ktr_narg = narg;
64 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
65 	for (i = 0; i < narg; i++)
66 		*argp++ = u.u_arg[i];
67 	kth->ktr_buf = (caddr_t)ktp;
68 	kth->ktr_len = len;
69 	ktrwrite(vp, kth);
70 	FREE(ktp, M_TEMP);
71 	FREE(kth, M_TEMP);
72 }
73 
74 ktrsysret(vp, code)
75 	struct vnode *vp;
76 {
77 	struct ktr_header *kth = ktrgetheader(KTR_SYSRET);
78 	struct ktr_sysret ktp;
79 
80 	if (kth == NULL)
81 		return;
82 	ktp.ktr_code = code;
83 	ktp.ktr_error = u.u_error;
84 	ktp.ktr_retval = u.u_r.r_val1;		/* what about val2 ? */
85 
86 	kth->ktr_buf = (caddr_t)&ktp;
87 	kth->ktr_len = sizeof(struct ktr_sysret);
88 
89 	ktrwrite(vp, kth);
90 	FREE(kth, M_TEMP);
91 }
92 
93 ktrnamei(vp, path)
94 	struct vnode *vp;
95 	char *path;
96 {
97 	struct ktr_header *kth = ktrgetheader(KTR_NAMEI);
98 
99 	if (kth == NULL)
100 		return;
101 	kth->ktr_len = strlen(path);
102 	kth->ktr_buf = path;
103 
104 	ktrwrite(vp, kth);
105 	FREE(kth, M_TEMP);
106 }
107 
108 ktrgenio(vp, fd, rw, iov, len)
109 	struct vnode *vp;
110 	enum uio_rw rw;
111 	register struct iovec *iov;
112 {
113 	struct ktr_header *kth = ktrgetheader(KTR_GENIO);
114 	register struct ktr_genio *ktp;
115 	register caddr_t cp;
116 	register int resid = len, cnt;
117 
118 	if (kth == NULL || u.u_error)
119 		return;
120 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
121 		M_TEMP, M_WAITOK);
122 	ktp->ktr_fd = fd;
123 	ktp->ktr_rw = rw;
124 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
125 	while (resid > 0) {
126 		if ((cnt = iov->iov_len) > resid)
127 			cnt = resid;
128 		if (copyin(iov->iov_base, cp, cnt))
129 			goto done;
130 		cp += cnt;
131 		resid -= cnt;
132 		iov++;
133 	}
134 	kth->ktr_buf = (caddr_t)ktp;
135 	kth->ktr_len = sizeof (struct ktr_genio) + len;
136 
137 	ktrwrite(vp, kth);
138 done:
139 	FREE(kth, M_TEMP);
140 	FREE(ktp, M_TEMP);
141 }
142 
143 ktrpsig(vp, sig, action, mask, code)
144 	struct	vnode *vp;
145 	sig_t	action;
146 {
147 	struct ktr_header *kth = ktrgetheader(KTR_PSIG);
148 	struct ktr_psig	kp;
149 
150 	if (kth == NULL)
151 		return;
152 	kp.signo = (char)sig;
153 	kp.action = action;
154 	kp.mask = mask;
155 	kp.code = code;
156 	kth->ktr_buf = (caddr_t)&kp;
157 	kth->ktr_len = sizeof (struct ktr_psig);
158 
159 	ktrwrite(vp, kth);
160 	FREE(kth, M_TEMP);
161 }
162 
163 /* Interface and common routines */
164 
165 /*
166  * ktrace system call
167  */
168 ktrace()
169 {
170 	register struct a {
171 		char	*fname;
172 		int	ops;
173 		int	facs;
174 		int	pid;
175 	} *uap = (struct a *)u.u_ap;
176 	register struct vnode *vp = NULL;
177 	register struct nameidata *ndp = &u.u_nd;
178 	register struct proc *p;
179 	register ops = KTROP(uap->ops);
180 	struct pgrp *pg;
181 	register int facs = uap->facs;
182 	register int ret = 0;
183 
184 	/*
185 	 * Until security implications are thought through,
186 	 * limit tracing to root (unless ktrace_nocheck is set).
187 	 */
188 	if (!ktrace_nocheck && (u.u_error = suser(u.u_cred, &u.u_acflag)))
189 		return;
190 	if (ops != KTROP_CLEAR) {
191 		/*
192 		 * an operation which requires a file argument.
193 		 */
194 		ndp->ni_segflg = UIO_USERSPACE;
195 		ndp->ni_dirp = uap->fname;
196 		if (u.u_error = vn_open(ndp, FREAD|FWRITE, 0))
197 			return;
198 		vp = ndp->ni_vp;
199 		if (vp->v_type != VREG) {
200 			u.u_error = EACCES;
201 			vrele(vp);
202 			return;
203 		}
204 	}
205 	/*
206 	 * Clear all uses of the tracefile
207 	 */
208 	if (ops == KTROP_CLEARFILE) {
209 		for (p = allproc; p != NULL; p = p->p_nxt) {
210 			if (p->p_tracep == vp) {
211 				p->p_tracep = NULL;
212 				p->p_traceflag = 0;
213 				vrele(vp);
214 			}
215 		}
216 		goto done;
217 	}
218 	/*
219 	 * need something to (un)trace
220 	 */
221 	if (!facs) {
222 		u.u_error = EINVAL;
223 		goto done;
224 	}
225 	/*
226 	 * doit
227 	 */
228 	if (uap->pid < 0) {
229 		pg = pgfind(-uap->pid);
230 		if (pg == NULL) {
231 			u.u_error = ESRCH;
232 			goto done;
233 		}
234 		for (p = pg->pg_mem; p != NULL; p = p->p_pgrpnxt)
235 			if (uap->ops&KTRFLAG_DESCEND)
236 				ret |= ktrsetchildren(p, ops, facs, vp);
237 			else
238 				ret |= ktrops(p, ops, facs, vp);
239 
240 	} else {
241 		p = pfind(uap->pid);
242 		if (p == NULL) {
243 			u.u_error = ESRCH;
244 			goto done;
245 		}
246 		if (ops&KTRFLAG_DESCEND)
247 			ret |= ktrsetchildren(p, ops, facs, vp);
248 		else
249 			ret |= ktrops(p, ops, facs, vp);
250 	}
251 	if (!ret)
252 		u.u_error = EPERM;
253 done:
254 	if (vp != NULL)
255 		vrele(vp);
256 }
257 
258 ktrops(p, ops, facs, vp)
259 	struct proc *p;
260 	struct vnode *vp;
261 {
262 
263 	if (u.u_uid && u.u_uid != p->p_uid)
264 		return 0;
265 	if (ops == KTROP_SET) {
266 		if (p->p_tracep != vp) {
267 			/*
268 			 * if trace file already in use, relinquish
269 			 */
270 			if (p->p_tracep != NULL)
271 				vrele(p->p_tracep);
272 			VREF(vp);
273 			p->p_tracep = vp;
274 		}
275 		p->p_traceflag |= facs;
276 	} else {
277 		/* KTROP_CLEAR */
278 		if (((p->p_traceflag &= ~facs) & ~KTRFAC_INHERIT) == 0) {
279 			/* no more tracing */
280 			p->p_traceflag = 0;
281 			if (p->p_tracep != NULL) {
282 				vrele(p->p_tracep);
283 				p->p_tracep = NULL;
284 			}
285 		}
286 	}
287 
288 	return 1;
289 }
290 
291 ktrsetchildren(top, ops, facs, vp)
292 	struct proc *top;
293 	struct vnode *vp;
294 {
295 	register struct proc *p;
296 	register int ndx;
297 	register int ret = 0;
298 
299 	p = top;
300 	for (;;) {
301 		ret |= ktrops(p, ops, facs, vp);
302 		/*
303 		 * If this process has children, descend to them next,
304 		 * otherwise do any siblings, and if done with this level,
305 		 * follow back up the tree (but not past top).
306 		 */
307 		if (p->p_cptr)
308 			p = p->p_cptr;
309 		else if (p == top)
310 			return ret;
311 		else if (p->p_osptr)
312 			p = p->p_osptr;
313 		else for (;;) {
314 			p = p->p_pptr;
315 			if (p == top)
316 				return ret;
317 			if (p->p_osptr) {
318 				p = p->p_osptr;
319 				break;
320 			}
321 		}
322 	}
323 	/*NOTREACHED*/
324 }
325 
326 ktrwrite(vp, kth)
327 	struct vnode *vp;
328 	register struct ktr_header *kth;
329 {
330 	struct uio auio;
331 	struct iovec aiov[2];
332 	int error;
333 
334 	if (vp == NULL)
335 		return;
336 	auio.uio_iov = &aiov[0];
337 	auio.uio_offset = 0;
338 	auio.uio_segflg = UIO_SYSSPACE;
339 	auio.uio_rw = UIO_WRITE;
340 	aiov[0].iov_base = (caddr_t)kth;
341 	aiov[0].iov_len = sizeof(struct ktr_header);
342 	auio.uio_resid = sizeof(struct ktr_header);
343 	auio.uio_iovcnt = 1;
344 	if (kth->ktr_len > 0) {
345 		auio.uio_iovcnt++;
346 		aiov[1].iov_base = kth->ktr_buf;
347 		aiov[1].iov_len = kth->ktr_len;
348 		auio.uio_resid += kth->ktr_len;
349 	}
350 	VOP_LOCK(vp);
351 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, u.u_cred);
352 	VOP_UNLOCK(vp);
353 }
354 #endif
355