xref: /netbsd-src/sys/kern/kern_ktrace.c (revision ae9172d6cd9432a6a1a56760d86b32c57a66c39c)
1 /*	$NetBSD: kern_ktrace.c,v 1.15 1994/12/14 19:07:08 mycroft Exp $	*/
2 
3 /*
4  * Copyright (c) 1989, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. All advertising materials mentioning features or use of this software
16  *    must display the following acknowledgement:
17  *	This product includes software developed by the University of
18  *	California, Berkeley and its contributors.
19  * 4. Neither the name of the University nor the names of its contributors
20  *    may be used to endorse or promote products derived from this software
21  *    without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  *	@(#)kern_ktrace.c	8.2 (Berkeley) 9/23/93
36  */
37 
38 #ifdef KTRACE
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/proc.h>
43 #include <sys/file.h>
44 #include <sys/namei.h>
45 #include <sys/vnode.h>
46 #include <sys/ktrace.h>
47 #include <sys/malloc.h>
48 #include <sys/syslog.h>
49 
50 #include <sys/mount.h>
51 #include <sys/syscallargs.h>
52 
53 struct ktr_header *
54 ktrgetheader(type)
55 	int type;
56 {
57 	register struct ktr_header *kth;
58 	struct proc *p = curproc;	/* XXX */
59 
60 	MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header),
61 		M_TEMP, M_WAITOK);
62 	kth->ktr_type = type;
63 	microtime(&kth->ktr_time);
64 	kth->ktr_pid = p->p_pid;
65 	bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN);
66 	return (kth);
67 }
68 
69 ktrsyscall(vp, code, narg, argsize, args)
70 	struct vnode *vp;
71 	int code, narg, argsize, args[];
72 {
73 	struct	ktr_header *kth;
74 	struct	ktr_syscall *ktp;
75 	register len = sizeof(struct ktr_syscall) + argsize;
76 	struct proc *p = curproc;	/* XXX */
77 	int 	*argp, i;
78 
79 	p->p_traceflag |= KTRFAC_ACTIVE;
80 	kth = ktrgetheader(KTR_SYSCALL);
81 	MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK);
82 	ktp->ktr_code = code;
83 	ktp->ktr_narg = narg;
84 	argp = (int *)((char *)ktp + sizeof(struct ktr_syscall));
85 	for (i = 0; i < (argsize / sizeof *argp); i++)
86 		*argp++ = args[i];
87 	kth->ktr_buf = (caddr_t)ktp;
88 	kth->ktr_len = len;
89 	ktrwrite(vp, kth);
90 	FREE(ktp, M_TEMP);
91 	FREE(kth, M_TEMP);
92 	p->p_traceflag &= ~KTRFAC_ACTIVE;
93 }
94 
95 ktrsysret(vp, code, error, retval)
96 	struct vnode *vp;
97 	int code, error, retval;
98 {
99 	struct ktr_header *kth;
100 	struct ktr_sysret ktp;
101 	struct proc *p = curproc;	/* XXX */
102 
103 	p->p_traceflag |= KTRFAC_ACTIVE;
104 	kth = ktrgetheader(KTR_SYSRET);
105 	ktp.ktr_code = code;
106 	ktp.ktr_error = error;
107 	ktp.ktr_retval = retval;		/* what about val2 ? */
108 
109 	kth->ktr_buf = (caddr_t)&ktp;
110 	kth->ktr_len = sizeof(struct ktr_sysret);
111 
112 	ktrwrite(vp, kth);
113 	FREE(kth, M_TEMP);
114 	p->p_traceflag &= ~KTRFAC_ACTIVE;
115 }
116 
117 ktrnamei(vp, path)
118 	struct vnode *vp;
119 	char *path;
120 {
121 	struct ktr_header *kth;
122 	struct proc *p = curproc;	/* XXX */
123 
124 	p->p_traceflag |= KTRFAC_ACTIVE;
125 	kth = ktrgetheader(KTR_NAMEI);
126 	kth->ktr_len = strlen(path);
127 	kth->ktr_buf = path;
128 
129 	ktrwrite(vp, kth);
130 	FREE(kth, M_TEMP);
131 	p->p_traceflag &= ~KTRFAC_ACTIVE;
132 }
133 
134 ktrgenio(vp, fd, rw, iov, len, error)
135 	struct vnode *vp;
136 	int fd;
137 	enum uio_rw rw;
138 	register struct iovec *iov;
139 	int len, error;
140 {
141 	struct ktr_header *kth;
142 	register struct ktr_genio *ktp;
143 	register caddr_t cp;
144 	register int resid = len, cnt;
145 	struct proc *p = curproc;	/* XXX */
146 
147 	if (error)
148 		return;
149 	p->p_traceflag |= KTRFAC_ACTIVE;
150 	kth = ktrgetheader(KTR_GENIO);
151 	MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len,
152 		M_TEMP, M_WAITOK);
153 	ktp->ktr_fd = fd;
154 	ktp->ktr_rw = rw;
155 	cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio));
156 	while (resid > 0) {
157 		if ((cnt = iov->iov_len) > resid)
158 			cnt = resid;
159 		if (copyin(iov->iov_base, cp, (unsigned)cnt))
160 			goto done;
161 		cp += cnt;
162 		resid -= cnt;
163 		iov++;
164 	}
165 	kth->ktr_buf = (caddr_t)ktp;
166 	kth->ktr_len = sizeof (struct ktr_genio) + len;
167 
168 	ktrwrite(vp, kth);
169 done:
170 	FREE(kth, M_TEMP);
171 	FREE(ktp, M_TEMP);
172 	p->p_traceflag &= ~KTRFAC_ACTIVE;
173 }
174 
175 ktrpsig(vp, sig, action, mask, code)
176 	struct vnode *vp;
177 	int sig;
178 	sig_t action;
179 	int mask, code;
180 {
181 	struct ktr_header *kth;
182 	struct ktr_psig	kp;
183 	struct proc *p = curproc;	/* XXX */
184 
185 	p->p_traceflag |= KTRFAC_ACTIVE;
186 	kth = ktrgetheader(KTR_PSIG);
187 	kp.signo = (char)sig;
188 	kp.action = action;
189 	kp.mask = mask;
190 	kp.code = code;
191 	kth->ktr_buf = (caddr_t)&kp;
192 	kth->ktr_len = sizeof (struct ktr_psig);
193 
194 	ktrwrite(vp, kth);
195 	FREE(kth, M_TEMP);
196 	p->p_traceflag &= ~KTRFAC_ACTIVE;
197 }
198 
199 ktrcsw(vp, out, user)
200 	struct vnode *vp;
201 	int out, user;
202 {
203 	struct ktr_header *kth;
204 	struct	ktr_csw kc;
205 	struct proc *p = curproc;	/* XXX */
206 
207 	p->p_traceflag |= KTRFAC_ACTIVE;
208 	kth = ktrgetheader(KTR_CSW);
209 	kc.out = out;
210 	kc.user = user;
211 	kth->ktr_buf = (caddr_t)&kc;
212 	kth->ktr_len = sizeof (struct ktr_csw);
213 
214 	ktrwrite(vp, kth);
215 	FREE(kth, M_TEMP);
216 	p->p_traceflag &= ~KTRFAC_ACTIVE;
217 }
218 
219 /* Interface and common routines */
220 
221 /*
222  * ktrace system call
223  */
224 /* ARGSUSED */
225 ktrace(curp, uap, retval)
226 	struct proc *curp;
227 	register struct ktrace_args /* {
228 		syscallarg(char *) fname;
229 		syscallarg(int) ops;
230 		syscallarg(int) facs;
231 		syscallarg(int) pid;
232 	} */ *uap;
233 	register_t *retval;
234 {
235 	register struct vnode *vp = NULL;
236 	register struct proc *p;
237 	struct pgrp *pg;
238 	int facs = SCARG(uap, facs) & ~KTRFAC_ROOT;
239 	int ops = KTROP(SCARG(uap, ops));
240 	int descend = SCARG(uap, ops) & KTRFLAG_DESCEND;
241 	int ret = 0;
242 	int error = 0;
243 	struct nameidata nd;
244 
245 	curp->p_traceflag |= KTRFAC_ACTIVE;
246 	if (ops != KTROP_CLEAR) {
247 		/*
248 		 * an operation which requires a file argument.
249 		 */
250 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname),
251 		    curp);
252 		if (error = vn_open(&nd, FREAD|FWRITE, 0)) {
253 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
254 			return (error);
255 		}
256 		vp = nd.ni_vp;
257 		VOP_UNLOCK(vp);
258 		if (vp->v_type != VREG) {
259 			(void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp);
260 			curp->p_traceflag &= ~KTRFAC_ACTIVE;
261 			return (EACCES);
262 		}
263 	}
264 	/*
265 	 * Clear all uses of the tracefile
266 	 */
267 	if (ops == KTROP_CLEARFILE) {
268 		for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
269 			if (p->p_tracep == vp) {
270 				if (ktrcanset(curp, p)) {
271 					p->p_tracep = NULL;
272 					p->p_traceflag = 0;
273 					(void) vn_close(vp, FREAD|FWRITE,
274 						p->p_ucred, p);
275 				} else
276 					error = EPERM;
277 			}
278 		}
279 		goto done;
280 	}
281 	/*
282 	 * need something to (un)trace (XXX - why is this here?)
283 	 */
284 	if (!facs) {
285 		error = EINVAL;
286 		goto done;
287 	}
288 	/*
289 	 * do it
290 	 */
291 	if (SCARG(uap, pid) < 0) {
292 		/*
293 		 * by process group
294 		 */
295 		pg = pgfind(-SCARG(uap, pid));
296 		if (pg == NULL) {
297 			error = ESRCH;
298 			goto done;
299 		}
300 		for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next)
301 			if (descend)
302 				ret |= ktrsetchildren(curp, p, ops, facs, vp);
303 			else
304 				ret |= ktrops(curp, p, ops, facs, vp);
305 
306 	} else {
307 		/*
308 		 * by pid
309 		 */
310 		p = pfind(SCARG(uap, pid));
311 		if (p == NULL) {
312 			error = ESRCH;
313 			goto done;
314 		}
315 		if (descend)
316 			ret |= ktrsetchildren(curp, p, ops, facs, vp);
317 		else
318 			ret |= ktrops(curp, p, ops, facs, vp);
319 	}
320 	if (!ret)
321 		error = EPERM;
322 done:
323 	if (vp != NULL)
324 		(void) vn_close(vp, FWRITE, curp->p_ucred, curp);
325 	curp->p_traceflag &= ~KTRFAC_ACTIVE;
326 	return (error);
327 }
328 
329 int
330 ktrops(curp, p, ops, facs, vp)
331 	struct proc *p, *curp;
332 	int ops, facs;
333 	struct vnode *vp;
334 {
335 
336 	if (!ktrcanset(curp, p))
337 		return (0);
338 	if (ops == KTROP_SET) {
339 		if (p->p_tracep != vp) {
340 			/*
341 			 * if trace file already in use, relinquish
342 			 */
343 			if (p->p_tracep != NULL)
344 				vrele(p->p_tracep);
345 			VREF(vp);
346 			p->p_tracep = vp;
347 		}
348 		p->p_traceflag |= facs;
349 		if (curp->p_ucred->cr_uid == 0)
350 			p->p_traceflag |= KTRFAC_ROOT;
351 	} else {
352 		/* KTROP_CLEAR */
353 		if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
354 			/* no more tracing */
355 			p->p_traceflag = 0;
356 			if (p->p_tracep != NULL) {
357 				vrele(p->p_tracep);
358 				p->p_tracep = NULL;
359 			}
360 		}
361 	}
362 
363 	return (1);
364 }
365 
366 ktrsetchildren(curp, top, ops, facs, vp)
367 	struct proc *curp, *top;
368 	int ops, facs;
369 	struct vnode *vp;
370 {
371 	register struct proc *p;
372 	register int ret = 0;
373 
374 	p = top;
375 	for (;;) {
376 		ret |= ktrops(curp, p, ops, facs, vp);
377 		/*
378 		 * If this process has children, descend to them next,
379 		 * otherwise do any siblings, and if done with this level,
380 		 * follow back up the tree (but not past top).
381 		 */
382 		if (p->p_children.lh_first)
383 			p = p->p_children.lh_first;
384 		else for (;;) {
385 			if (p == top)
386 				return (ret);
387 			if (p->p_sibling.le_next) {
388 				p = p->p_sibling.le_next;
389 				break;
390 			}
391 			p = p->p_pptr;
392 		}
393 	}
394 	/*NOTREACHED*/
395 }
396 
397 ktrwrite(vp, kth)
398 	struct vnode *vp;
399 	register struct ktr_header *kth;
400 {
401 	struct uio auio;
402 	struct iovec aiov[2];
403 	register struct proc *p = curproc;	/* XXX */
404 	int error;
405 
406 	if (vp == NULL)
407 		return;
408 	auio.uio_iov = &aiov[0];
409 	auio.uio_offset = 0;
410 	auio.uio_segflg = UIO_SYSSPACE;
411 	auio.uio_rw = UIO_WRITE;
412 	aiov[0].iov_base = (caddr_t)kth;
413 	aiov[0].iov_len = sizeof(struct ktr_header);
414 	auio.uio_resid = sizeof(struct ktr_header);
415 	auio.uio_iovcnt = 1;
416 	auio.uio_procp = (struct proc *)0;
417 	if (kth->ktr_len > 0) {
418 		auio.uio_iovcnt++;
419 		aiov[1].iov_base = kth->ktr_buf;
420 		aiov[1].iov_len = kth->ktr_len;
421 		auio.uio_resid += kth->ktr_len;
422 	}
423 	VOP_LOCK(vp);
424 	error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred);
425 	VOP_UNLOCK(vp);
426 	if (!error)
427 		return;
428 	/*
429 	 * If error encountered, give up tracing on this vnode.
430 	 */
431 	log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
432 	    error);
433 	for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) {
434 		if (p->p_tracep == vp) {
435 			p->p_tracep = NULL;
436 			p->p_traceflag = 0;
437 			vrele(vp);
438 		}
439 	}
440 }
441 
442 /*
443  * Return true if caller has permission to set the ktracing state
444  * of target.  Essentially, the target can't possess any
445  * more permissions than the caller.  KTRFAC_ROOT signifies that
446  * root previously set the tracing status on the target process, and
447  * so, only root may further change it.
448  *
449  * TODO: check groups.  use caller effective gid.
450  */
451 ktrcanset(callp, targetp)
452 	struct proc *callp, *targetp;
453 {
454 	register struct pcred *caller = callp->p_cred;
455 	register struct pcred *target = targetp->p_cred;
456 
457 	if ((caller->pc_ucred->cr_uid == target->p_ruid &&
458 	     target->p_ruid == target->p_svuid &&
459 	     caller->p_rgid == target->p_rgid &&	/* XXX */
460 	     target->p_rgid == target->p_svgid &&
461 	     (targetp->p_traceflag & KTRFAC_ROOT) == 0) ||
462 	     caller->pc_ucred->cr_uid == 0)
463 		return (1);
464 
465 	return (0);
466 }
467 
468 #endif
469