1 /* $OpenBSD: kern_ktrace.c,v 1.24 2001/06/26 06:27:38 aaron Exp $ */ 2 /* $NetBSD: kern_ktrace.c,v 1.23 1996/02/09 18:59:36 christos Exp $ */ 3 4 /* 5 * Copyright (c) 1989, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by the University of 19 * California, Berkeley and its contributors. 20 * 4. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 37 */ 38 39 #ifdef KTRACE 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/proc.h> 44 #include <sys/file.h> 45 #include <sys/namei.h> 46 #include <sys/vnode.h> 47 #include <sys/ktrace.h> 48 #include <sys/malloc.h> 49 #include <sys/syslog.h> 50 51 #include <sys/mount.h> 52 #include <sys/syscallargs.h> 53 54 #include <vm/vm.h> 55 56 void ktrinitheader __P((struct ktr_header *, struct proc *, int)); 57 int ktrops __P((struct proc *, struct proc *, int, int, struct vnode *)); 58 int ktrsetchildren __P((struct proc *, struct proc *, int, int, 59 struct vnode *)); 60 int ktrwrite __P((struct proc *, struct ktr_header *)); 61 int ktrcanset __P((struct proc *, struct proc *)); 62 63 /* 64 * Change the trace vnode in a correct way (to avoid races). 65 */ 66 void 67 ktrsettracevnode(p, newvp) 68 struct proc *p; 69 struct vnode *newvp; 70 { 71 struct vnode *vp; 72 73 if (p->p_tracep == newvp) /* avoid work */ 74 return; 75 76 if (newvp != NULL) 77 VREF(newvp); 78 79 vp = p->p_tracep; 80 p->p_tracep = newvp; 81 82 if (vp != NULL) 83 vrele(vp); 84 } 85 86 void 87 ktrinitheader(kth, p, type) 88 struct ktr_header *kth; 89 struct proc *p; 90 int type; 91 { 92 bzero(kth, sizeof (struct ktr_header)); 93 kth->ktr_type = type; 94 microtime(&kth->ktr_time); 95 kth->ktr_pid = p->p_pid; 96 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN); 97 } 98 99 void 100 ktrsyscall(p, code, argsize, args) 101 struct proc *p; 102 register_t code; 103 size_t argsize; 104 register_t args[]; 105 { 106 struct ktr_header kth; 107 struct ktr_syscall *ktp; 108 unsigned int len = sizeof(struct ktr_syscall) + argsize; 109 register_t *argp; 110 int i; 111 112 p->p_traceflag |= KTRFAC_ACTIVE; 113 ktrinitheader(&kth, p, KTR_SYSCALL); 114 ktp = malloc(len, M_TEMP, M_WAITOK); 115 ktp->ktr_code = code; 116 ktp->ktr_argsize = argsize; 117 argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall)); 118 for (i = 0; i < (argsize / sizeof *argp); i++) 119 *argp++ = args[i]; 120 kth.ktr_buf = (caddr_t)ktp; 121 kth.ktr_len = len; 122 ktrwrite(p, &kth); 123 free(ktp, M_TEMP); 124 p->p_traceflag &= ~KTRFAC_ACTIVE; 125 } 126 127 void 128 ktrsysret(p, code, error, retval) 129 struct proc *p; 130 register_t code; 131 int error; 132 register_t retval; 133 { 134 struct ktr_header kth; 135 struct ktr_sysret ktp; 136 137 p->p_traceflag |= KTRFAC_ACTIVE; 138 ktrinitheader(&kth, p, KTR_SYSRET); 139 ktp.ktr_code = code; 140 ktp.ktr_error = error; 141 ktp.ktr_retval = retval; /* what about val2 ? */ 142 143 kth.ktr_buf = (caddr_t)&ktp; 144 kth.ktr_len = sizeof(struct ktr_sysret); 145 146 ktrwrite(p, &kth); 147 p->p_traceflag &= ~KTRFAC_ACTIVE; 148 } 149 150 void 151 ktrnamei(p, path) 152 struct proc *p; 153 char *path; 154 { 155 struct ktr_header kth; 156 157 p->p_traceflag |= KTRFAC_ACTIVE; 158 ktrinitheader(&kth, p, KTR_NAMEI); 159 kth.ktr_len = strlen(path); 160 kth.ktr_buf = path; 161 162 ktrwrite(p, &kth); 163 p->p_traceflag &= ~KTRFAC_ACTIVE; 164 } 165 166 void 167 ktremul(p, emul) 168 struct proc *p; 169 char *emul; 170 { 171 struct ktr_header kth; 172 173 p->p_traceflag |= KTRFAC_ACTIVE; 174 ktrinitheader(&kth, p, KTR_EMUL); 175 kth.ktr_len = strlen(emul); 176 kth.ktr_buf = emul; 177 178 ktrwrite(p, &kth); 179 p->p_traceflag &= ~KTRFAC_ACTIVE; 180 } 181 182 void 183 ktrgenio(p, fd, rw, iov, len, error) 184 struct proc *p; 185 int fd; 186 enum uio_rw rw; 187 struct iovec *iov; 188 int len, error; 189 { 190 struct ktr_header kth; 191 struct ktr_genio *ktp; 192 caddr_t cp; 193 int resid = len, count; 194 int buflen; 195 196 if (error) 197 return; 198 199 p->p_traceflag |= KTRFAC_ACTIVE; 200 201 buflen = min(PAGE_SIZE, len + sizeof(struct ktr_genio)); 202 203 ktrinitheader(&kth, p, KTR_GENIO); 204 ktp = malloc(buflen, M_TEMP, M_WAITOK); 205 ktp->ktr_fd = fd; 206 ktp->ktr_rw = rw; 207 208 kth.ktr_buf = (caddr_t)ktp; 209 210 cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio)); 211 buflen -= sizeof(struct ktr_genio); 212 213 while (resid > 0) { 214 /* 215 * Don't allow this process to hog the cpu when doing 216 * huge I/O. 217 */ 218 if (p->p_schedflags & PSCHED_SHOULDYIELD) 219 preempt(NULL); 220 221 count = min(iov->iov_len, buflen); 222 if (count > resid) 223 count = resid; 224 if (copyin(iov->iov_base, cp, count)) 225 break; 226 227 kth.ktr_len = count + sizeof(struct ktr_genio); 228 229 if (ktrwrite(p, &kth) != 0) 230 break; 231 232 iov->iov_len -= count; 233 iov->iov_base = (caddr_t)iov->iov_base + count; 234 235 if (iov->iov_len == 0) 236 iov++; 237 238 resid -= count; 239 } 240 241 free(ktp, M_TEMP); 242 p->p_traceflag &= ~KTRFAC_ACTIVE; 243 244 } 245 246 void 247 ktrpsig(p, sig, action, mask, code) 248 struct proc *p; 249 int sig; 250 sig_t action; 251 int mask, code; 252 { 253 struct ktr_header kth; 254 struct ktr_psig kp; 255 256 p->p_traceflag |= KTRFAC_ACTIVE; 257 ktrinitheader(&kth, p, KTR_PSIG); 258 kp.signo = (char)sig; 259 kp.action = action; 260 kp.mask = mask; 261 kp.code = code; 262 kth.ktr_buf = (caddr_t)&kp; 263 kth.ktr_len = sizeof (struct ktr_psig); 264 265 ktrwrite(p, &kth); 266 p->p_traceflag &= ~KTRFAC_ACTIVE; 267 } 268 269 void 270 ktrcsw(p, out, user) 271 struct proc *p; 272 int out, user; 273 { 274 struct ktr_header kth; 275 struct ktr_csw kc; 276 277 p->p_traceflag |= KTRFAC_ACTIVE; 278 ktrinitheader(&kth, p, KTR_CSW); 279 kc.out = out; 280 kc.user = user; 281 kth.ktr_buf = (caddr_t)&kc; 282 kth.ktr_len = sizeof (struct ktr_csw); 283 284 ktrwrite(p, &kth); 285 p->p_traceflag &= ~KTRFAC_ACTIVE; 286 } 287 288 /* Interface and common routines */ 289 290 /* 291 * ktrace system call 292 */ 293 /* ARGSUSED */ 294 int 295 sys_ktrace(curp, v, retval) 296 struct proc *curp; 297 void *v; 298 register_t *retval; 299 { 300 struct sys_ktrace_args /* { 301 syscallarg(char *) fname; 302 syscallarg(int) ops; 303 syscallarg(int) facs; 304 syscallarg(int) pid; 305 } */ *uap = v; 306 struct vnode *vp = NULL; 307 struct proc *p = NULL; 308 struct pgrp *pg; 309 int facs = SCARG(uap, facs) & ~((unsigned) KTRFAC_ROOT); 310 int ops = KTROP(SCARG(uap, ops)); 311 int descend = SCARG(uap, ops) & KTRFLAG_DESCEND; 312 int ret = 0; 313 int error = 0; 314 struct nameidata nd; 315 316 curp->p_traceflag |= KTRFAC_ACTIVE; 317 if (ops != KTROP_CLEAR) { 318 /* 319 * an operation which requires a file argument. 320 */ 321 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname), 322 curp); 323 if ((error = vn_open(&nd, FREAD|FWRITE|O_NOFOLLOW, 0)) != 0) { 324 curp->p_traceflag &= ~KTRFAC_ACTIVE; 325 return (error); 326 } 327 vp = nd.ni_vp; 328 329 VOP_UNLOCK(vp, 0, curp); 330 if (vp->v_type != VREG) { 331 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); 332 curp->p_traceflag &= ~KTRFAC_ACTIVE; 333 return (EACCES); 334 } 335 } 336 /* 337 * Clear all uses of the tracefile 338 */ 339 if (ops == KTROP_CLEARFILE) { 340 for (p = LIST_FIRST(&allproc); p; p = LIST_NEXT(p, p_list)) { 341 if (p->p_tracep == vp) { 342 if (ktrcanset(curp, p)) { 343 p->p_traceflag = 0; 344 ktrsettracevnode(p, NULL); 345 } else 346 error = EPERM; 347 } 348 } 349 goto done; 350 } 351 /* 352 * need something to (un)trace (XXX - why is this here?) 353 */ 354 if (!facs) { 355 error = EINVAL; 356 goto done; 357 } 358 /* 359 * do it 360 */ 361 if (SCARG(uap, pid) < 0) { 362 /* 363 * by process group 364 */ 365 pg = pgfind(-SCARG(uap, pid)); 366 if (pg == NULL) { 367 error = ESRCH; 368 goto done; 369 } 370 for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) 371 if (descend) 372 ret |= ktrsetchildren(curp, p, ops, facs, vp); 373 else 374 ret |= ktrops(curp, p, ops, facs, vp); 375 376 } else { 377 /* 378 * by pid 379 */ 380 p = pfind(SCARG(uap, pid)); 381 if (p == NULL) { 382 error = ESRCH; 383 goto done; 384 } 385 if (descend) 386 ret |= ktrsetchildren(curp, p, ops, facs, vp); 387 else 388 ret |= ktrops(curp, p, ops, facs, vp); 389 } 390 if (!ret) 391 error = EPERM; 392 done: 393 if (vp != NULL) 394 (void) vn_close(vp, FWRITE, curp->p_ucred, curp); 395 curp->p_traceflag &= ~KTRFAC_ACTIVE; 396 return (error); 397 } 398 399 int 400 ktrops(curp, p, ops, facs, vp) 401 struct proc *p, *curp; 402 int ops, facs; 403 struct vnode *vp; 404 { 405 406 if (!ktrcanset(curp, p)) 407 return (0); 408 if (ops == KTROP_SET) { 409 ktrsettracevnode(p, vp); 410 p->p_traceflag |= facs; 411 if (curp->p_ucred->cr_uid == 0) 412 p->p_traceflag |= KTRFAC_ROOT; 413 } else { 414 /* KTROP_CLEAR */ 415 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 416 /* no more tracing */ 417 p->p_traceflag = 0; 418 ktrsettracevnode(p, NULL); 419 } 420 } 421 422 /* 423 * Emit an emulation record, every time there is a ktrace 424 * change/attach request. 425 */ 426 if (KTRPOINT(p, KTR_EMUL)) 427 ktremul(p, p->p_emul->e_name); 428 429 return (1); 430 } 431 432 int 433 ktrsetchildren(curp, top, ops, facs, vp) 434 struct proc *curp, *top; 435 int ops, facs; 436 struct vnode *vp; 437 { 438 struct proc *p; 439 int ret = 0; 440 441 p = top; 442 for (;;) { 443 ret |= ktrops(curp, p, ops, facs, vp); 444 /* 445 * If this process has children, descend to them next, 446 * otherwise do any siblings, and if done with this level, 447 * follow back up the tree (but not past top). 448 */ 449 if (p->p_children.lh_first) 450 p = p->p_children.lh_first; 451 else for (;;) { 452 if (p == top) 453 return (ret); 454 if (p->p_sibling.le_next) { 455 p = p->p_sibling.le_next; 456 break; 457 } 458 p = p->p_pptr; 459 } 460 } 461 /*NOTREACHED*/ 462 } 463 464 int 465 ktrwrite(p, kth) 466 struct proc *p; 467 struct ktr_header *kth; 468 { 469 struct uio auio; 470 struct iovec aiov[2]; 471 int error; 472 struct vnode *vp = p->p_tracep; 473 474 if (vp == NULL) 475 return 0; 476 auio.uio_iov = &aiov[0]; 477 auio.uio_offset = 0; 478 auio.uio_segflg = UIO_SYSSPACE; 479 auio.uio_rw = UIO_WRITE; 480 aiov[0].iov_base = (caddr_t)kth; 481 aiov[0].iov_len = sizeof(struct ktr_header); 482 auio.uio_resid = sizeof(struct ktr_header); 483 auio.uio_iovcnt = 1; 484 auio.uio_procp = (struct proc *)0; 485 if (kth->ktr_len > 0) { 486 auio.uio_iovcnt++; 487 aiov[1].iov_base = kth->ktr_buf; 488 aiov[1].iov_len = kth->ktr_len; 489 auio.uio_resid += kth->ktr_len; 490 } 491 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); 492 error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred); 493 VOP_UNLOCK(vp, 0, p); 494 if (!error) 495 return 0; 496 /* 497 * If error encountered, give up tracing on this vnode. 498 */ 499 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 500 error); 501 for (p = LIST_FIRST(&allproc); p != NULL; p = LIST_NEXT(p, p_list)) { 502 if (p->p_tracep == vp) { 503 p->p_traceflag = 0; 504 ktrsettracevnode(p, NULL); 505 } 506 } 507 508 return error; 509 } 510 511 /* 512 * Return true if caller has permission to set the ktracing state 513 * of target. Essentially, the target can't possess any 514 * more permissions than the caller. KTRFAC_ROOT signifies that 515 * root previously set the tracing status on the target process, and 516 * so, only root may further change it. 517 * 518 * TODO: check groups. use caller effective gid. 519 */ 520 int 521 ktrcanset(callp, targetp) 522 struct proc *callp, *targetp; 523 { 524 struct pcred *caller = callp->p_cred; 525 struct pcred *target = targetp->p_cred; 526 527 if ((caller->pc_ucred->cr_uid == target->p_ruid && 528 target->p_ruid == target->p_svuid && 529 caller->p_rgid == target->p_rgid && /* XXX */ 530 target->p_rgid == target->p_svgid && 531 (targetp->p_traceflag & KTRFAC_ROOT) == 0) || 532 caller->pc_ucred->cr_uid == 0) 533 return (1); 534 535 return (0); 536 } 537 538 #endif 539