1 /* $NetBSD: kern_ktrace.c,v 1.67 2003/01/18 10:06:27 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 1989, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 36 */ 37 38 #include <sys/cdefs.h> 39 __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.67 2003/01/18 10:06:27 thorpej Exp $"); 40 41 #include "opt_ktrace.h" 42 #include "opt_compat_mach.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/file.h> 48 #include <sys/namei.h> 49 #include <sys/vnode.h> 50 #include <sys/ktrace.h> 51 #include <sys/malloc.h> 52 #include <sys/syslog.h> 53 #include <sys/filedesc.h> 54 #include <sys/ioctl.h> 55 56 #include <sys/mount.h> 57 #include <sys/sa.h> 58 #include <sys/syscallargs.h> 59 60 #ifdef KTRACE 61 62 int ktrace_common(struct proc *, int, int, int, struct file *); 63 void ktrinitheader(struct ktr_header *, struct proc *, int); 64 int ktrops(struct proc *, struct proc *, int, int, struct file *); 65 int ktrsetchildren(struct proc *, struct proc *, int, int, 66 struct file *); 67 int ktrwrite(struct proc *, struct ktr_header *); 68 int ktrcanset(struct proc *, struct proc *); 69 int ktrsamefile(struct file *, struct file *); 70 71 /* 72 * "deep" compare of two files for the purposes of clearing a trace. 73 * Returns true if they're the same open file, or if they point at the 74 * same underlying vnode/socket. 75 */ 76 77 int 78 ktrsamefile(f1, f2) 79 struct file *f1; 80 struct file *f2; 81 { 82 return ((f1 == f2) || 83 ((f1 != NULL) && (f2 != NULL) && 84 (f1->f_type == f2->f_type) && 85 (f1->f_data == f2->f_data))); 86 } 87 88 void 89 ktrderef(p) 90 struct proc *p; 91 { 92 struct file *fp = p->p_tracep; 93 p->p_traceflag = 0; 94 if (fp == NULL) 95 return; 96 FILE_USE(fp); 97 98 /* 99 * ktrace file descriptor can't be watched (are not visible to 100 * userspace), so no kqueue stuff here 101 */ 102 closef(fp, NULL); 103 104 p->p_tracep = NULL; 105 } 106 107 void 108 ktradref(p) 109 struct proc *p; 110 { 111 struct file *fp = p->p_tracep; 112 113 fp->f_count++; 114 } 115 116 void 117 ktrinitheader(kth, p, type) 118 struct ktr_header *kth; 119 struct proc *p; 120 int type; 121 { 122 123 memset(kth, 0, sizeof(*kth)); 124 kth->ktr_type = type; 125 microtime(&kth->ktr_time); 126 kth->ktr_pid = p->p_pid; 127 memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); 128 } 129 130 void 131 ktrsyscall(p, code, realcode, callp, args) 132 struct proc *p; 133 register_t code; 134 register_t realcode; 135 const struct sysent *callp; 136 register_t args[]; 137 { 138 struct ktr_header kth; 139 struct ktr_syscall *ktp; 140 register_t *argp; 141 int argsize; 142 size_t len; 143 u_int i; 144 145 if (callp == NULL) 146 callp = p->p_emul->e_sysent; 147 148 argsize = callp[code].sy_narg * sizeof (register_t); 149 len = sizeof(struct ktr_syscall) + argsize; 150 151 p->p_traceflag |= KTRFAC_ACTIVE; 152 ktrinitheader(&kth, p, KTR_SYSCALL); 153 ktp = malloc(len, M_TEMP, M_WAITOK); 154 ktp->ktr_code = realcode; 155 ktp->ktr_argsize = argsize; 156 argp = (register_t *)((char *)ktp + sizeof(struct ktr_syscall)); 157 for (i = 0; i < (argsize / sizeof(*argp)); i++) 158 *argp++ = args[i]; 159 kth.ktr_buf = (caddr_t)ktp; 160 kth.ktr_len = len; 161 (void) ktrwrite(p, &kth); 162 free(ktp, M_TEMP); 163 p->p_traceflag &= ~KTRFAC_ACTIVE; 164 } 165 166 void 167 ktrsysret(p, code, error, retval) 168 struct proc *p; 169 register_t code; 170 int error; 171 register_t retval; 172 { 173 struct ktr_header kth; 174 struct ktr_sysret ktp; 175 176 p->p_traceflag |= KTRFAC_ACTIVE; 177 ktrinitheader(&kth, p, KTR_SYSRET); 178 ktp.ktr_code = code; 179 ktp.ktr_eosys = 0; /* XXX unused */ 180 ktp.ktr_error = error; 181 ktp.ktr_retval = retval; /* what about val2 ? */ 182 183 kth.ktr_buf = (caddr_t)&ktp; 184 kth.ktr_len = sizeof(struct ktr_sysret); 185 186 (void) ktrwrite(p, &kth); 187 p->p_traceflag &= ~KTRFAC_ACTIVE; 188 } 189 190 void 191 ktrnamei(p, path) 192 struct proc *p; 193 char *path; 194 { 195 struct ktr_header kth; 196 197 p->p_traceflag |= KTRFAC_ACTIVE; 198 ktrinitheader(&kth, p, KTR_NAMEI); 199 kth.ktr_len = strlen(path); 200 kth.ktr_buf = path; 201 202 (void) ktrwrite(p, &kth); 203 p->p_traceflag &= ~KTRFAC_ACTIVE; 204 } 205 206 void 207 ktremul(p) 208 struct proc *p; 209 { 210 struct ktr_header kth; 211 const char *emul = p->p_emul->e_name; 212 213 p->p_traceflag |= KTRFAC_ACTIVE; 214 ktrinitheader(&kth, p, KTR_EMUL); 215 kth.ktr_len = strlen(emul); 216 kth.ktr_buf = (caddr_t)emul; 217 218 (void) ktrwrite(p, &kth); 219 p->p_traceflag &= ~KTRFAC_ACTIVE; 220 } 221 222 void 223 ktrgenio(p, fd, rw, iov, len, error) 224 struct proc *p; 225 int fd; 226 enum uio_rw rw; 227 struct iovec *iov; 228 int len; 229 int error; 230 { 231 struct ktr_header kth; 232 struct ktr_genio *ktp; 233 caddr_t cp; 234 int resid = len, cnt; 235 int buflen; 236 237 if (error) 238 return; 239 240 p->p_traceflag |= KTRFAC_ACTIVE; 241 242 buflen = min(PAGE_SIZE, len + sizeof(struct ktr_genio)); 243 244 ktrinitheader(&kth, p, KTR_GENIO); 245 ktp = malloc(buflen, M_TEMP, M_WAITOK); 246 ktp->ktr_fd = fd; 247 ktp->ktr_rw = rw; 248 249 kth.ktr_buf = (caddr_t)ktp; 250 251 cp = (caddr_t)((char *)ktp + sizeof(struct ktr_genio)); 252 buflen -= sizeof(struct ktr_genio); 253 254 while (resid > 0) { 255 #if 0 /* XXX NJWLWP */ 256 KDASSERT(p->p_cpu != NULL); 257 KDASSERT(p->p_cpu == curcpu()); 258 #endif 259 /* XXX NJWLWP */ 260 if (curcpu()->ci_schedstate.spc_flags & SPCF_SHOULDYIELD) 261 preempt(1); 262 263 cnt = min(iov->iov_len, buflen); 264 if (cnt > resid) 265 cnt = resid; 266 if (copyin(iov->iov_base, cp, cnt)) 267 break; 268 269 kth.ktr_len = cnt + sizeof(struct ktr_genio); 270 271 if (__predict_false(ktrwrite(p, &kth) != 0)) 272 break; 273 274 iov->iov_base = (caddr_t)iov->iov_base + cnt; 275 iov->iov_len -= cnt; 276 277 if (iov->iov_len == 0) 278 iov++; 279 280 resid -= cnt; 281 } 282 283 free(ktp, M_TEMP); 284 p->p_traceflag &= ~KTRFAC_ACTIVE; 285 } 286 287 void 288 ktrpsig(p, sig, action, mask, code) 289 struct proc *p; 290 int sig; 291 sig_t action; 292 sigset_t *mask; 293 int code; 294 { 295 struct ktr_header kth; 296 struct ktr_psig kp; 297 298 p->p_traceflag |= KTRFAC_ACTIVE; 299 ktrinitheader(&kth, p, KTR_PSIG); 300 kp.signo = (char)sig; 301 kp.action = action; 302 kp.mask = *mask; 303 kp.code = code; 304 kth.ktr_buf = (caddr_t)&kp; 305 kth.ktr_len = sizeof(struct ktr_psig); 306 307 (void) ktrwrite(p, &kth); 308 p->p_traceflag &= ~KTRFAC_ACTIVE; 309 } 310 311 void 312 ktrcsw(p, out, user) 313 struct proc *p; 314 int out; 315 int user; 316 { 317 struct ktr_header kth; 318 struct ktr_csw kc; 319 320 p->p_traceflag |= KTRFAC_ACTIVE; 321 ktrinitheader(&kth, p, KTR_CSW); 322 kc.out = out; 323 kc.user = user; 324 kth.ktr_buf = (caddr_t)&kc; 325 kth.ktr_len = sizeof(struct ktr_csw); 326 327 (void) ktrwrite(p, &kth); 328 p->p_traceflag &= ~KTRFAC_ACTIVE; 329 } 330 331 void 332 ktruser(p, id, addr, len, ustr) 333 struct proc *p; 334 const char *id; 335 void *addr; 336 size_t len; 337 int ustr; 338 { 339 struct ktr_header kth; 340 struct ktr_user *ktp; 341 caddr_t user_dta; 342 343 p->p_traceflag |= KTRFAC_ACTIVE; 344 ktrinitheader(&kth, p, KTR_USER); 345 ktp = malloc(sizeof(struct ktr_user) + len, M_TEMP, M_WAITOK); 346 if (ustr) { 347 if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) 348 ktp->ktr_id[0] = '\0'; 349 } else 350 strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); 351 ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; 352 353 user_dta = (caddr_t) ((char *)ktp + sizeof(struct ktr_user)); 354 if (copyin(addr, (void *) user_dta, len) != 0) 355 len = 0; 356 357 kth.ktr_buf = (void *)ktp; 358 kth.ktr_len = sizeof(struct ktr_user) + len; 359 (void) ktrwrite(p, &kth); 360 361 free(ktp, M_TEMP); 362 p->p_traceflag &= ~KTRFAC_ACTIVE; 363 364 } 365 366 void 367 ktrmmsg(p, msgh, size) 368 struct proc *p; 369 const void *msgh; 370 size_t size; 371 { 372 struct ktr_header kth; 373 struct ktr_mmsg *kp; 374 375 p->p_traceflag |= KTRFAC_ACTIVE; 376 ktrinitheader(&kth, p, KTR_MMSG); 377 378 kp = (struct ktr_mmsg *)msgh; 379 kth.ktr_buf = (caddr_t)kp; 380 kth.ktr_len = size; 381 (void) ktrwrite(p, &kth); 382 p->p_traceflag &= ~KTRFAC_ACTIVE; 383 } 384 385 /* Interface and common routines */ 386 387 int 388 ktrace_common(curp, ops, facs, pid, fp) 389 struct proc *curp; 390 int ops; 391 int facs; 392 int pid; 393 struct file *fp; 394 { 395 int ret = 0; 396 int error = 0; 397 int one = 1; 398 int descend; 399 struct proc *p; 400 struct pgrp *pg; 401 402 curp->p_traceflag |= KTRFAC_ACTIVE; 403 descend = ops & KTRFLAG_DESCEND; 404 facs = facs & ~((unsigned) KTRFAC_ROOT); 405 406 /* 407 * Clear all uses of the tracefile 408 */ 409 if (KTROP(ops) == KTROP_CLEARFILE) { 410 proclist_lock_read(); 411 for (p = LIST_FIRST(&allproc); p != NULL; 412 p = LIST_NEXT(p, p_list)) { 413 if (ktrsamefile(p->p_tracep, fp)) { 414 if (ktrcanset(curp, p)) 415 ktrderef(p); 416 else 417 error = EPERM; 418 } 419 } 420 proclist_unlock_read(); 421 goto done; 422 } 423 424 /* 425 * Mark fp non-blocking, to avoid problems from possible deadlocks. 426 */ 427 428 if (fp != NULL) { 429 fp->f_flag |= FNONBLOCK; 430 (*fp->f_ops->fo_ioctl)(fp, FIONBIO, (caddr_t)&one, curp); 431 } 432 433 /* 434 * need something to (un)trace (XXX - why is this here?) 435 */ 436 if (!facs) { 437 error = EINVAL; 438 goto done; 439 } 440 /* 441 * do it 442 */ 443 if (pid < 0) { 444 /* 445 * by process group 446 */ 447 pg = pgfind(-pid); 448 if (pg == NULL) { 449 error = ESRCH; 450 goto done; 451 } 452 for (p = LIST_FIRST(&pg->pg_members); p != NULL; 453 p = LIST_NEXT(p, p_pglist)) { 454 if (descend) 455 ret |= ktrsetchildren(curp, p, ops, facs, fp); 456 else 457 ret |= ktrops(curp, p, ops, facs, fp); 458 } 459 460 } else { 461 /* 462 * by pid 463 */ 464 p = pfind(pid); 465 if (p == NULL) { 466 error = ESRCH; 467 goto done; 468 } 469 if (descend) 470 ret |= ktrsetchildren(curp, p, ops, facs, fp); 471 else 472 ret |= ktrops(curp, p, ops, facs, fp); 473 } 474 if (!ret) 475 error = EPERM; 476 done: 477 curp->p_traceflag &= ~KTRFAC_ACTIVE; 478 return (error); 479 } 480 481 /* 482 * ktrace system call 483 */ 484 /* ARGSUSED */ 485 int 486 sys_fktrace(l, v, retval) 487 struct lwp *l; 488 void *v; 489 register_t *retval; 490 { 491 struct sys_fktrace_args /* { 492 syscallarg(int) fd; 493 syscallarg(int) ops; 494 syscallarg(int) facs; 495 syscallarg(int) pid; 496 } */ *uap = v; 497 struct proc *curp = l->l_proc; 498 struct file *fp = NULL; 499 struct filedesc *fdp = curp->p_fd; 500 501 if ((fp = fd_getfile(fdp, SCARG(uap, fd))) == NULL) 502 return (EBADF); 503 504 if ((fp->f_flag & FWRITE) == 0) 505 return (EBADF); 506 507 return ktrace_common(curp, SCARG(uap, ops), 508 SCARG(uap, facs), SCARG(uap, pid), fp); 509 } 510 511 /* 512 * ktrace system call 513 */ 514 /* ARGSUSED */ 515 int 516 sys_ktrace(l, v, retval) 517 struct lwp *l; 518 void *v; 519 register_t *retval; 520 { 521 struct sys_ktrace_args /* { 522 syscallarg(const char *) fname; 523 syscallarg(int) ops; 524 syscallarg(int) facs; 525 syscallarg(int) pid; 526 } */ *uap = v; 527 struct proc *curp = l->l_proc; 528 struct vnode *vp = NULL; 529 struct file *fp = NULL; 530 int fd; 531 int ops = SCARG(uap, ops); 532 int error = 0; 533 struct nameidata nd; 534 535 ops = KTROP(ops) | (ops & KTRFLAG_DESCEND); 536 537 curp->p_traceflag |= KTRFAC_ACTIVE; 538 if (ops != KTROP_CLEAR) { 539 /* 540 * an operation which requires a file argument. 541 */ 542 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, SCARG(uap, fname), 543 curp); 544 if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) { 545 curp->p_traceflag &= ~KTRFAC_ACTIVE; 546 return (error); 547 } 548 vp = nd.ni_vp; 549 VOP_UNLOCK(vp, 0); 550 if (vp->v_type != VREG) { 551 (void) vn_close(vp, FREAD|FWRITE, curp->p_ucred, curp); 552 curp->p_traceflag &= ~KTRFAC_ACTIVE; 553 return (EACCES); 554 } 555 /* 556 * XXX This uses up a file descriptor slot in the 557 * tracing process for the duration of this syscall. 558 * This is not expected to be a problem. If 559 * falloc(NULL, ...) DTRT we could skip that part, but 560 * that would require changing its interface to allow 561 * the caller to pass in a ucred.. 562 * 563 * This will FILE_USE the fp it returns, if any. 564 * Keep it in use until we return. 565 */ 566 if ((error = falloc(curp, &fp, &fd)) != 0) 567 goto done; 568 569 fp->f_flag = FWRITE|FAPPEND; 570 fp->f_type = DTYPE_VNODE; 571 fp->f_ops = &vnops; 572 fp->f_data = (caddr_t)vp; 573 FILE_SET_MATURE(fp); 574 vp = NULL; 575 } 576 error = ktrace_common(curp, SCARG(uap, ops), SCARG(uap, facs), 577 SCARG(uap, pid), fp); 578 done: 579 if (vp != NULL) 580 (void) vn_close(vp, FWRITE, curp->p_ucred, curp); 581 if (fp != NULL) { 582 FILE_UNUSE(fp, curp); /* release file */ 583 fdrelease(curp, fd); /* release fd table slot */ 584 } 585 return (error); 586 } 587 588 int 589 ktrops(curp, p, ops, facs, fp) 590 struct proc *curp; 591 struct proc *p; 592 int ops; 593 int facs; 594 struct file *fp; 595 { 596 597 if (!ktrcanset(curp, p)) 598 return (0); 599 if (KTROP(ops) == KTROP_SET) { 600 if (p->p_tracep != fp) { 601 /* 602 * if trace file already in use, relinquish 603 */ 604 ktrderef(p); 605 p->p_tracep = fp; 606 ktradref(p); 607 } 608 p->p_traceflag |= facs; 609 if (curp->p_ucred->cr_uid == 0) 610 p->p_traceflag |= KTRFAC_ROOT; 611 } else { 612 /* KTROP_CLEAR */ 613 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 614 /* no more tracing */ 615 ktrderef(p); 616 } 617 } 618 619 /* 620 * Emit an emulation record, every time there is a ktrace 621 * change/attach request. 622 */ 623 if (KTRPOINT(p, KTR_EMUL)) 624 ktremul(p); 625 #ifdef __HAVE_SYSCALL_INTERN 626 (*p->p_emul->e_syscall_intern)(p); 627 #endif 628 629 return (1); 630 } 631 632 int 633 ktrsetchildren(curp, top, ops, facs, fp) 634 struct proc *curp; 635 struct proc *top; 636 int ops; 637 int facs; 638 struct file *fp; 639 { 640 struct proc *p; 641 int ret = 0; 642 643 p = top; 644 for (;;) { 645 ret |= ktrops(curp, p, ops, facs, fp); 646 /* 647 * If this process has children, descend to them next, 648 * otherwise do any siblings, and if done with this level, 649 * follow back up the tree (but not past top). 650 */ 651 if (LIST_FIRST(&p->p_children) != NULL) 652 p = LIST_FIRST(&p->p_children); 653 else for (;;) { 654 if (p == top) 655 return (ret); 656 if (LIST_NEXT(p, p_sibling) != NULL) { 657 p = LIST_NEXT(p, p_sibling); 658 break; 659 } 660 p = p->p_pptr; 661 } 662 } 663 /*NOTREACHED*/ 664 } 665 666 int 667 ktrwrite(p, kth) 668 struct proc *p; 669 struct ktr_header *kth; 670 { 671 struct uio auio; 672 struct iovec aiov[2]; 673 int error, tries; 674 struct file *fp = p->p_tracep; 675 676 if (fp == NULL) 677 return 0; 678 679 auio.uio_iov = &aiov[0]; 680 auio.uio_offset = 0; 681 auio.uio_segflg = UIO_SYSSPACE; 682 auio.uio_rw = UIO_WRITE; 683 aiov[0].iov_base = (caddr_t)kth; 684 aiov[0].iov_len = sizeof(struct ktr_header); 685 auio.uio_resid = sizeof(struct ktr_header); 686 auio.uio_iovcnt = 1; 687 auio.uio_procp = (struct proc *)0; 688 if (kth->ktr_len > 0) { 689 auio.uio_iovcnt++; 690 aiov[1].iov_base = kth->ktr_buf; 691 aiov[1].iov_len = kth->ktr_len; 692 auio.uio_resid += kth->ktr_len; 693 } 694 695 FILE_USE(fp); 696 697 tries = 0; 698 do { 699 error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio, 700 fp->f_cred, FOF_UPDATE_OFFSET); 701 tries++; 702 if (error == EWOULDBLOCK) 703 preempt(1); 704 } while ((error == EWOULDBLOCK) && (tries < 3)); 705 FILE_UNUSE(fp, NULL); 706 707 if (__predict_true(error == 0)) 708 return (0); 709 /* 710 * If error encountered, give up tracing on this vnode. Don't report 711 * EPIPE as this can easily happen with fktrace()/ktruss. 712 */ 713 if (error != EPIPE) 714 log(LOG_NOTICE, 715 "ktrace write failed, errno %d, tracing stopped\n", 716 error); 717 proclist_lock_read(); 718 for (p = LIST_FIRST(&allproc); p != NULL; p = LIST_NEXT(p, p_list)) { 719 if (ktrsamefile(p->p_tracep, fp)) 720 ktrderef(p); 721 } 722 proclist_unlock_read(); 723 724 return (error); 725 } 726 727 /* 728 * Return true if caller has permission to set the ktracing state 729 * of target. Essentially, the target can't possess any 730 * more permissions than the caller. KTRFAC_ROOT signifies that 731 * root previously set the tracing status on the target process, and 732 * so, only root may further change it. 733 * 734 * TODO: check groups. use caller effective gid. 735 */ 736 int 737 ktrcanset(callp, targetp) 738 struct proc *callp; 739 struct proc *targetp; 740 { 741 struct pcred *caller = callp->p_cred; 742 struct pcred *target = targetp->p_cred; 743 744 if ((caller->pc_ucred->cr_uid == target->p_ruid && 745 target->p_ruid == target->p_svuid && 746 caller->p_rgid == target->p_rgid && /* XXX */ 747 target->p_rgid == target->p_svgid && 748 (targetp->p_traceflag & KTRFAC_ROOT) == 0 && 749 (targetp->p_flag & P_SUGID) == 0) || 750 caller->pc_ucred->cr_uid == 0) 751 return (1); 752 753 return (0); 754 } 755 #endif /* KTRACE */ 756 757 /* 758 * Put user defined entry to ktrace records. 759 */ 760 int 761 sys_utrace(l, v, retval) 762 struct lwp *l; 763 void *v; 764 register_t *retval; 765 { 766 #ifdef KTRACE 767 struct sys_utrace_args /* { 768 syscallarg(const char *) label; 769 syscallarg(void *) addr; 770 syscallarg(size_t) len; 771 } */ *uap = v; 772 struct proc *p = l->l_proc; 773 if (!KTRPOINT(p, KTR_USER)) 774 return (0); 775 776 if (SCARG(uap, len) > KTR_USER_MAXLEN) 777 return (EINVAL); 778 779 ktruser(p, SCARG(uap, label), SCARG(uap, addr), SCARG(uap, len), 1); 780 781 return (0); 782 #else /* !KTRACE */ 783 return ENOSYS; 784 #endif /* KTRACE */ 785 } 786