1 /* $NetBSD: kern_ktrace.c,v 1.12 1994/08/30 03:05:37 mycroft Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93 36 */ 37 38 #ifdef KTRACE 39 40 #include <sys/param.h> 41 #include <sys/proc.h> 42 #include <sys/file.h> 43 #include <sys/namei.h> 44 #include <sys/vnode.h> 45 #include <sys/ktrace.h> 46 #include <sys/malloc.h> 47 #include <sys/syslog.h> 48 49 struct ktr_header * 50 ktrgetheader(type) 51 int type; 52 { 53 register struct ktr_header *kth; 54 struct proc *p = curproc; /* XXX */ 55 56 MALLOC(kth, struct ktr_header *, sizeof (struct ktr_header), 57 M_TEMP, M_WAITOK); 58 kth->ktr_type = type; 59 microtime(&kth->ktr_time); 60 kth->ktr_pid = p->p_pid; 61 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN); 62 return (kth); 63 } 64 65 ktrsyscall(vp, code, narg, args) 66 struct vnode *vp; 67 int code, narg, args[]; 68 { 69 struct ktr_header *kth; 70 struct ktr_syscall *ktp; 71 register len = sizeof(struct ktr_syscall) + (narg * sizeof(int)); 72 struct proc *p = curproc; /* XXX */ 73 int *argp, i; 74 75 p->p_traceflag |= KTRFAC_ACTIVE; 76 kth = ktrgetheader(KTR_SYSCALL); 77 MALLOC(ktp, struct ktr_syscall *, len, M_TEMP, M_WAITOK); 78 ktp->ktr_code = code; 79 ktp->ktr_narg = narg; 80 argp = (int *)((char *)ktp + sizeof(struct ktr_syscall)); 81 for (i = 0; i < narg; i++) 82 *argp++ = args[i]; 83 kth->ktr_buf = (caddr_t)ktp; 84 kth->ktr_len = len; 85 ktrwrite(vp, kth); 86 FREE(ktp, M_TEMP); 87 FREE(kth, M_TEMP); 88 p->p_traceflag &= ~KTRFAC_ACTIVE; 89 } 90 91 ktrsysret(vp, code, error, retval) 92 struct vnode *vp; 93 int code, error, retval; 94 { 95 struct ktr_header *kth; 96 struct ktr_sysret ktp; 97 struct proc *p = curproc; /* XXX */ 98 99 p->p_traceflag |= KTRFAC_ACTIVE; 100 kth = ktrgetheader(KTR_SYSRET); 101 ktp.ktr_code = code; 102 ktp.ktr_error = error; 103 ktp.ktr_retval = retval; /* what about val2 ? */ 104 105 kth->ktr_buf = (caddr_t)&ktp; 106 kth->ktr_len = sizeof(struct ktr_sysret); 107 108 ktrwrite(vp, kth); 109 FREE(kth, M_TEMP); 110 p->p_traceflag &= ~KTRFAC_ACTIVE; 111 } 112 113 ktrnamei(vp, path) 114 struct vnode *vp; 115 char *path; 116 { 117 struct ktr_header *kth; 118 struct proc *p = curproc; /* XXX */ 119 120 p->p_traceflag |= KTRFAC_ACTIVE; 121 kth = ktrgetheader(KTR_NAMEI); 122 kth->ktr_len = strlen(path); 123 kth->ktr_buf = path; 124 125 ktrwrite(vp, kth); 126 FREE(kth, M_TEMP); 127 p->p_traceflag &= ~KTRFAC_ACTIVE; 128 } 129 130 ktrgenio(vp, fd, rw, iov, len, error) 131 struct vnode *vp; 132 int fd; 133 enum uio_rw rw; 134 register struct iovec *iov; 135 int len, error; 136 { 137 struct ktr_header *kth; 138 register struct ktr_genio *ktp; 139 register caddr_t cp; 140 register int resid = len, cnt; 141 struct proc *p = curproc; /* XXX */ 142 143 if (error) 144 return; 145 p->p_traceflag |= KTRFAC_ACTIVE; 146 kth = ktrgetheader(KTR_GENIO); 147 MALLOC(ktp, struct ktr_genio *, sizeof(struct ktr_genio) + len, 148 M_TEMP, M_WAITOK); 149 ktp->ktr_fd = fd; 150 ktp->ktr_rw = rw; 151 cp = (caddr_t)((char *)ktp + sizeof (struct ktr_genio)); 152 while (resid > 0) { 153 if ((cnt = iov->iov_len) > resid) 154 cnt = resid; 155 if (copyin(iov->iov_base, cp, (unsigned)cnt)) 156 goto done; 157 cp += cnt; 158 resid -= cnt; 159 iov++; 160 } 161 kth->ktr_buf = (caddr_t)ktp; 162 kth->ktr_len = sizeof (struct ktr_genio) + len; 163 164 ktrwrite(vp, kth); 165 done: 166 FREE(kth, M_TEMP); 167 FREE(ktp, M_TEMP); 168 p->p_traceflag &= ~KTRFAC_ACTIVE; 169 } 170 171 ktrpsig(vp, sig, action, mask, code) 172 struct vnode *vp; 173 int sig; 174 sig_t action; 175 int mask, code; 176 { 177 struct ktr_header *kth; 178 struct ktr_psig kp; 179 struct proc *p = curproc; /* XXX */ 180 181 p->p_traceflag |= KTRFAC_ACTIVE; 182 kth = ktrgetheader(KTR_PSIG); 183 kp.signo = (char)sig; 184 kp.action = action; 185 kp.mask = mask; 186 kp.code = code; 187 kth->ktr_buf = (caddr_t)&kp; 188 kth->ktr_len = sizeof (struct ktr_psig); 189 190 ktrwrite(vp, kth); 191 FREE(kth, M_TEMP); 192 p->p_traceflag &= ~KTRFAC_ACTIVE; 193 } 194 195 ktrcsw(vp, out, user) 196 struct vnode *vp; 197 int out, user; 198 { 199 struct ktr_header *kth; 200 struct ktr_csw kc; 201 struct proc *p = curproc; /* XXX */ 202 203 p->p_traceflag |= KTRFAC_ACTIVE; 204 kth = ktrgetheader(KTR_CSW); 205 kc.out = out; 206 kc.user = user; 207 kth->ktr_buf = (caddr_t)&kc; 208 kth->ktr_len = sizeof (struct ktr_csw); 209 210 ktrwrite(vp, kth); 211 FREE(kth, M_TEMP); 212 p->p_traceflag &= ~KTRFAC_ACTIVE; 213 } 214 215 /* Interface and common routines */ 216 217 /* 218 * ktrace system call 219 */ 220 struct ktrace_args { 221 char *fname; 222 int ops; 223 int facs; 224 int pid; 225 }; 226 /* ARGSUSED */ 227 ktrace(curp, uap, retval) 228 struct proc *curp; 229 register struct ktrace_args *uap; 230 int *retval; 231 { 232 register struct vnode *vp = NULL; 233 register struct proc *p; 234 struct pgrp *pg; 235 int facs = uap->facs & ~KTRFAC_ROOT; 236 int ops = KTROP(uap->ops); 237 int descend = uap->ops & KTRFLAG_DESCEND; 238 int ret = 0; 239 int error = 0; 240 struct nameidata nd; 241 242 curp->p_traceflag |= KTRFAC_ACTIVE; 243 if (ops != KTROP_CLEAR) { 244 /* 245 * an operation which requires a file argument. 246 */ 247 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, uap->fname, curp); 248 if (error = vn_open(&nd, FREAD|FWRITE, 0)) { 249 curp->p_traceflag &= ~KTRFAC_ACTIVE; 250 return (error); 251 } 252 vp = nd.ni_vp; 253 VOP_UNLOCK(vp); 254 if (vp->v_type != VREG) { 255 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); 256 curp->p_traceflag &= ~KTRFAC_ACTIVE; 257 return (EACCES); 258 } 259 } 260 /* 261 * Clear all uses of the tracefile 262 */ 263 if (ops == KTROP_CLEARFILE) { 264 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 265 if (p->p_tracep == vp) { 266 if (ktrcanset(curp, p)) { 267 p->p_tracep = NULL; 268 p->p_traceflag = 0; 269 (void) vn_close(vp, FREAD|FWRITE, 270 p->p_ucred, p); 271 } else 272 error = EPERM; 273 } 274 } 275 goto done; 276 } 277 /* 278 * need something to (un)trace (XXX - why is this here?) 279 */ 280 if (!facs) { 281 error = EINVAL; 282 goto done; 283 } 284 /* 285 * do it 286 */ 287 if (uap->pid < 0) { 288 /* 289 * by process group 290 */ 291 pg = pgfind(-uap->pid); 292 if (pg == NULL) { 293 error = ESRCH; 294 goto done; 295 } 296 for (p = pg->pg_members.lh_first; p != 0; p = p->p_pglist.le_next) 297 if (descend) 298 ret |= ktrsetchildren(curp, p, ops, facs, vp); 299 else 300 ret |= ktrops(curp, p, ops, facs, vp); 301 302 } else { 303 /* 304 * by pid 305 */ 306 p = pfind(uap->pid); 307 if (p == NULL) { 308 error = ESRCH; 309 goto done; 310 } 311 if (descend) 312 ret |= ktrsetchildren(curp, p, ops, facs, vp); 313 else 314 ret |= ktrops(curp, p, ops, facs, vp); 315 } 316 if (!ret) 317 error = EPERM; 318 done: 319 if (vp != NULL) 320 (void) vn_close(vp, FWRITE, curp->p_ucred, curp); 321 curp->p_traceflag &= ~KTRFAC_ACTIVE; 322 return (error); 323 } 324 325 int 326 ktrops(curp, p, ops, facs, vp) 327 struct proc *p, *curp; 328 int ops, facs; 329 struct vnode *vp; 330 { 331 332 if (!ktrcanset(curp, p)) 333 return (0); 334 if (ops == KTROP_SET) { 335 if (p->p_tracep != vp) { 336 /* 337 * if trace file already in use, relinquish 338 */ 339 if (p->p_tracep != NULL) 340 vrele(p->p_tracep); 341 VREF(vp); 342 p->p_tracep = vp; 343 } 344 p->p_traceflag |= facs; 345 if (curp->p_ucred->cr_uid == 0) 346 p->p_traceflag |= KTRFAC_ROOT; 347 } else { 348 /* KTROP_CLEAR */ 349 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 350 /* no more tracing */ 351 p->p_traceflag = 0; 352 if (p->p_tracep != NULL) { 353 vrele(p->p_tracep); 354 p->p_tracep = NULL; 355 } 356 } 357 } 358 359 return (1); 360 } 361 362 ktrsetchildren(curp, top, ops, facs, vp) 363 struct proc *curp, *top; 364 int ops, facs; 365 struct vnode *vp; 366 { 367 register struct proc *p; 368 register int ret = 0; 369 370 p = top; 371 for (;;) { 372 ret |= ktrops(curp, p, ops, facs, vp); 373 /* 374 * If this process has children, descend to them next, 375 * otherwise do any siblings, and if done with this level, 376 * follow back up the tree (but not past top). 377 */ 378 if (p->p_children.lh_first) 379 p = p->p_children.lh_first; 380 else for (;;) { 381 if (p == top) 382 return (ret); 383 if (p->p_sibling.le_next) { 384 p = p->p_sibling.le_next; 385 break; 386 } 387 p = p->p_pptr; 388 } 389 } 390 /*NOTREACHED*/ 391 } 392 393 ktrwrite(vp, kth) 394 struct vnode *vp; 395 register struct ktr_header *kth; 396 { 397 struct uio auio; 398 struct iovec aiov[2]; 399 register struct proc *p = curproc; /* XXX */ 400 int error; 401 402 if (vp == NULL) 403 return; 404 auio.uio_iov = &aiov[0]; 405 auio.uio_offset = 0; 406 auio.uio_segflg = UIO_SYSSPACE; 407 auio.uio_rw = UIO_WRITE; 408 aiov[0].iov_base = (caddr_t)kth; 409 aiov[0].iov_len = sizeof(struct ktr_header); 410 auio.uio_resid = sizeof(struct ktr_header); 411 auio.uio_iovcnt = 1; 412 auio.uio_procp = (struct proc *)0; 413 if (kth->ktr_len > 0) { 414 auio.uio_iovcnt++; 415 aiov[1].iov_base = kth->ktr_buf; 416 aiov[1].iov_len = kth->ktr_len; 417 auio.uio_resid += kth->ktr_len; 418 } 419 VOP_LOCK(vp); 420 error = VOP_WRITE(vp, &auio, IO_UNIT|IO_APPEND, p->p_ucred); 421 VOP_UNLOCK(vp); 422 if (!error) 423 return; 424 /* 425 * If error encountered, give up tracing on this vnode. 426 */ 427 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n", 428 error); 429 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 430 if (p->p_tracep == vp) { 431 p->p_tracep = NULL; 432 p->p_traceflag = 0; 433 vrele(vp); 434 } 435 } 436 } 437 438 /* 439 * Return true if caller has permission to set the ktracing state 440 * of target. Essentially, the target can't possess any 441 * more permissions than the caller. KTRFAC_ROOT signifies that 442 * root previously set the tracing status on the target process, and 443 * so, only root may further change it. 444 * 445 * TODO: check groups. use caller effective gid. 446 */ 447 ktrcanset(callp, targetp) 448 struct proc *callp, *targetp; 449 { 450 register struct pcred *caller = callp->p_cred; 451 register struct pcred *target = targetp->p_cred; 452 453 if ((caller->pc_ucred->cr_uid == target->p_ruid && 454 target->p_ruid == target->p_svuid && 455 caller->p_rgid == target->p_rgid && /* XXX */ 456 target->p_rgid == target->p_svgid && 457 (targetp->p_traceflag & KTRFAC_ROOT) == 0) || 458 caller->pc_ucred->cr_uid == 0) 459 return (1); 460 461 return (0); 462 } 463 464 #endif 465