1 /* $NetBSD: kern_ktrace.c,v 1.182 2022/07/01 01:07:56 riastradh Exp $ */ 2 3 /*- 4 * Copyright (c) 2006, 2007, 2008, 2020 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Andrew Doran. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1989, 1993 34 * The Regents of the University of California. All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of the University nor the names of its contributors 45 * may be used to endorse or promote products derived from this software 46 * without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 58 * SUCH DAMAGE. 59 * 60 * @(#)kern_ktrace.c 8.5 (Berkeley) 5/14/95 61 */ 62 63 #include <sys/cdefs.h> 64 __KERNEL_RCSID(0, "$NetBSD: kern_ktrace.c,v 1.182 2022/07/01 01:07:56 riastradh Exp $"); 65 66 #include <sys/param.h> 67 #include <sys/systm.h> 68 #include <sys/proc.h> 69 #include <sys/file.h> 70 #include <sys/kernel.h> 71 #include <sys/kthread.h> 72 #include <sys/ktrace.h> 73 #include <sys/kmem.h> 74 #include <sys/syslog.h> 75 #include <sys/filedesc.h> 76 #include <sys/ioctl.h> 77 #include <sys/callout.h> 78 #include <sys/kauth.h> 79 #include <sys/cpu.h> 80 81 #include <sys/mount.h> 82 #include <sys/syscallargs.h> 83 84 /* 85 * TODO: 86 * - need better error reporting? 87 * - userland utility to sort ktrace.out by timestamp. 88 * - keep minimum information in ktrace_entry when rest of alloc failed. 89 * - per trace control of configurable parameters. 90 */ 91 92 struct ktrace_entry { 93 TAILQ_ENTRY(ktrace_entry) kte_list; 94 struct ktr_header kte_kth; 95 void *kte_buf; 96 size_t kte_bufsz; 97 #define KTE_SPACE 32 98 uint8_t kte_space[KTE_SPACE] __aligned(sizeof(register_t)); 99 }; 100 101 struct ktr_desc { 102 TAILQ_ENTRY(ktr_desc) ktd_list; 103 int ktd_flags; 104 #define KTDF_WAIT 0x0001 105 #define KTDF_DONE 0x0002 106 #define KTDF_BLOCKING 0x0004 107 #define KTDF_INTERACTIVE 0x0008 108 int ktd_error; 109 #define KTDE_ENOMEM 0x0001 110 #define KTDE_ENOSPC 0x0002 111 int ktd_errcnt; 112 int ktd_ref; /* # of reference */ 113 int ktd_qcount; /* # of entry in the queue */ 114 115 /* 116 * Params to control behaviour. 117 */ 118 int ktd_delayqcnt; /* # of entry allowed to delay */ 119 int ktd_wakedelay; /* delay of wakeup in *tick* */ 120 int ktd_intrwakdl; /* ditto, but when interactive */ 121 122 file_t *ktd_fp; /* trace output file */ 123 lwp_t *ktd_lwp; /* our kernel thread */ 124 TAILQ_HEAD(, ktrace_entry) ktd_queue; 125 callout_t ktd_wakch; /* delayed wakeup */ 126 kcondvar_t ktd_sync_cv; 127 kcondvar_t ktd_cv; 128 }; 129 130 static void ktrwrite(struct ktr_desc *, struct ktrace_entry *); 131 static int ktrops(lwp_t *, struct proc *, int, int, 132 struct ktr_desc *); 133 static int ktrsetchildren(lwp_t *, struct proc *, int, int, 134 struct ktr_desc *); 135 static int ktrcanset(lwp_t *, struct proc *); 136 static int ktrsamefile(file_t *, file_t *); 137 static void ktr_kmem(lwp_t *, int, const void *, size_t); 138 static void ktr_io(lwp_t *, int, enum uio_rw, struct iovec *, size_t); 139 140 static struct ktr_desc * 141 ktd_lookup(file_t *); 142 static void ktdrel(struct ktr_desc *); 143 static void ktdref(struct ktr_desc *); 144 static void ktefree(struct ktrace_entry *); 145 static void ktd_logerrl(struct ktr_desc *, int); 146 static void ktrace_thread(void *); 147 static int ktrderefall(struct ktr_desc *, int); 148 149 /* 150 * Default values. 151 */ 152 #define KTD_MAXENTRY 1000 /* XXX: tune */ 153 #define KTD_TIMEOUT 5 /* XXX: tune */ 154 #define KTD_DELAYQCNT 100 /* XXX: tune */ 155 #define KTD_WAKEDELAY 5000 /* XXX: tune */ 156 #define KTD_INTRWAKDL 100 /* XXX: tune */ 157 158 /* 159 * Patchable variables. 160 */ 161 int ktd_maxentry = KTD_MAXENTRY; /* max # of entry in the queue */ 162 int ktd_timeout = KTD_TIMEOUT; /* timeout in seconds */ 163 int ktd_delayqcnt = KTD_DELAYQCNT; /* # of entry allowed to delay */ 164 int ktd_wakedelay = KTD_WAKEDELAY; /* delay of wakeup in *ms* */ 165 int ktd_intrwakdl = KTD_INTRWAKDL; /* ditto, but when interactive */ 166 167 kmutex_t ktrace_lock; 168 int ktrace_on; 169 static TAILQ_HEAD(, ktr_desc) ktdq = TAILQ_HEAD_INITIALIZER(ktdq); 170 static pool_cache_t kte_cache; 171 172 static kauth_listener_t ktrace_listener; 173 174 static void 175 ktd_wakeup(struct ktr_desc *ktd) 176 { 177 178 callout_stop(&ktd->ktd_wakch); 179 cv_signal(&ktd->ktd_cv); 180 } 181 182 static void 183 ktd_callout(void *arg) 184 { 185 186 mutex_enter(&ktrace_lock); 187 ktd_wakeup(arg); 188 mutex_exit(&ktrace_lock); 189 } 190 191 static void 192 ktd_logerrl(struct ktr_desc *ktd, int error) 193 { 194 195 ktd->ktd_error |= error; 196 ktd->ktd_errcnt++; 197 } 198 199 #if 0 200 static void 201 ktd_logerr(struct proc *p, int error) 202 { 203 struct ktr_desc *ktd; 204 205 KASSERT(mutex_owned(&ktrace_lock)); 206 207 ktd = p->p_tracep; 208 if (ktd == NULL) 209 return; 210 211 ktd_logerrl(ktd, error); 212 } 213 #endif 214 215 static int 216 ktrace_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie, 217 void *arg0, void *arg1, void *arg2, void *arg3) 218 { 219 struct proc *p; 220 int result; 221 enum kauth_process_req req; 222 223 result = KAUTH_RESULT_DEFER; 224 p = arg0; 225 226 if (action != KAUTH_PROCESS_KTRACE) 227 return result; 228 229 req = (enum kauth_process_req)(uintptr_t)arg1; 230 231 /* Privileged; secmodel should handle these. */ 232 if (req == KAUTH_REQ_PROCESS_KTRACE_PERSISTENT) 233 return result; 234 235 if ((p->p_traceflag & KTRFAC_PERSISTENT) || 236 (p->p_flag & PK_SUGID)) 237 return result; 238 239 if (kauth_cred_geteuid(cred) == kauth_cred_getuid(p->p_cred) && 240 kauth_cred_getuid(cred) == kauth_cred_getsvuid(p->p_cred) && 241 kauth_cred_getgid(cred) == kauth_cred_getgid(p->p_cred) && 242 kauth_cred_getgid(cred) == kauth_cred_getsvgid(p->p_cred)) 243 result = KAUTH_RESULT_ALLOW; 244 245 return result; 246 } 247 248 /* 249 * Initialise the ktrace system. 250 */ 251 void 252 ktrinit(void) 253 { 254 255 mutex_init(&ktrace_lock, MUTEX_DEFAULT, IPL_NONE); 256 kte_cache = pool_cache_init(sizeof(struct ktrace_entry), 0, 0, 0, 257 "ktrace", &pool_allocator_nointr, IPL_NONE, NULL, NULL, NULL); 258 259 ktrace_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS, 260 ktrace_listener_cb, NULL); 261 } 262 263 /* 264 * Release a reference. Called with ktrace_lock held. 265 */ 266 static void 267 ktdrel(struct ktr_desc *ktd) 268 { 269 270 KASSERT(mutex_owned(&ktrace_lock)); 271 272 KDASSERT(ktd->ktd_ref != 0); 273 KASSERT(ktd->ktd_ref > 0); 274 KASSERT(ktrace_on > 0); 275 ktrace_on--; 276 if (--ktd->ktd_ref <= 0) { 277 ktd->ktd_flags |= KTDF_DONE; 278 cv_signal(&ktd->ktd_cv); 279 } 280 } 281 282 static void 283 ktdref(struct ktr_desc *ktd) 284 { 285 286 KASSERT(mutex_owned(&ktrace_lock)); 287 288 ktd->ktd_ref++; 289 ktrace_on++; 290 } 291 292 static struct ktr_desc * 293 ktd_lookup(file_t *fp) 294 { 295 struct ktr_desc *ktd; 296 297 KASSERT(mutex_owned(&ktrace_lock)); 298 299 for (ktd = TAILQ_FIRST(&ktdq); ktd != NULL; 300 ktd = TAILQ_NEXT(ktd, ktd_list)) { 301 if (ktrsamefile(ktd->ktd_fp, fp)) { 302 ktdref(ktd); 303 break; 304 } 305 } 306 307 return (ktd); 308 } 309 310 void 311 ktraddentry(lwp_t *l, struct ktrace_entry *kte, int flags) 312 { 313 struct proc *p = l->l_proc; 314 struct ktr_desc *ktd; 315 #ifdef DEBUG 316 struct timeval t1, t2; 317 #endif 318 319 mutex_enter(&ktrace_lock); 320 321 if (p->p_traceflag & KTRFAC_TRC_EMUL) { 322 /* Add emulation trace before first entry for this process */ 323 p->p_traceflag &= ~KTRFAC_TRC_EMUL; 324 mutex_exit(&ktrace_lock); 325 ktrexit(l); 326 ktremul(); 327 (void)ktrenter(l); 328 mutex_enter(&ktrace_lock); 329 } 330 331 /* Tracing may have been cancelled. */ 332 ktd = p->p_tracep; 333 if (ktd == NULL) 334 goto freekte; 335 336 /* 337 * Bump reference count so that the object will remain while 338 * we are here. Note that the trace is controlled by other 339 * process. 340 */ 341 ktdref(ktd); 342 343 if (ktd->ktd_flags & KTDF_DONE) 344 goto relktd; 345 346 if (ktd->ktd_qcount > ktd_maxentry) { 347 ktd_logerrl(ktd, KTDE_ENOSPC); 348 goto relktd; 349 } 350 TAILQ_INSERT_TAIL(&ktd->ktd_queue, kte, kte_list); 351 ktd->ktd_qcount++; 352 if (ktd->ktd_flags & KTDF_BLOCKING) 353 goto skip_sync; 354 355 if (flags & KTA_WAITOK && 356 (/* flags & KTA_LARGE */0 || ktd->ktd_flags & KTDF_WAIT || 357 ktd->ktd_qcount > ktd_maxentry >> 1)) 358 /* 359 * Sync with writer thread since we're requesting rather 360 * big one or many requests are pending. 361 */ 362 do { 363 ktd->ktd_flags |= KTDF_WAIT; 364 ktd_wakeup(ktd); 365 #ifdef DEBUG 366 getmicrouptime(&t1); 367 #endif 368 if (cv_timedwait(&ktd->ktd_sync_cv, &ktrace_lock, 369 ktd_timeout * hz) != 0) { 370 ktd->ktd_flags |= KTDF_BLOCKING; 371 /* 372 * Maybe the writer thread is blocking 373 * completely for some reason, but 374 * don't stop target process forever. 375 */ 376 log(LOG_NOTICE, "ktrace timeout\n"); 377 break; 378 } 379 #ifdef DEBUG 380 getmicrouptime(&t2); 381 timersub(&t2, &t1, &t2); 382 if (t2.tv_sec > 0) 383 log(LOG_NOTICE, 384 "ktrace long wait: %lld.%06ld\n", 385 (long long)t2.tv_sec, (long)t2.tv_usec); 386 #endif 387 } while (p->p_tracep == ktd && 388 (ktd->ktd_flags & (KTDF_WAIT | KTDF_DONE)) == KTDF_WAIT); 389 else { 390 /* Schedule delayed wakeup */ 391 if (ktd->ktd_qcount > ktd->ktd_delayqcnt) 392 ktd_wakeup(ktd); /* Wakeup now */ 393 else if (!callout_pending(&ktd->ktd_wakch)) 394 callout_reset(&ktd->ktd_wakch, 395 ktd->ktd_flags & KTDF_INTERACTIVE ? 396 ktd->ktd_intrwakdl : ktd->ktd_wakedelay, 397 ktd_callout, ktd); 398 } 399 400 skip_sync: 401 ktdrel(ktd); 402 mutex_exit(&ktrace_lock); 403 ktrexit(l); 404 return; 405 406 relktd: 407 ktdrel(ktd); 408 409 freekte: 410 mutex_exit(&ktrace_lock); 411 ktefree(kte); 412 ktrexit(l); 413 } 414 415 static void 416 ktefree(struct ktrace_entry *kte) 417 { 418 419 if (kte->kte_buf != kte->kte_space) 420 kmem_free(kte->kte_buf, kte->kte_bufsz); 421 pool_cache_put(kte_cache, kte); 422 } 423 424 /* 425 * "deep" compare of two files for the purposes of clearing a trace. 426 * Returns true if they're the same open file, or if they point at the 427 * same underlying vnode/socket. 428 */ 429 430 static int 431 ktrsamefile(file_t *f1, file_t *f2) 432 { 433 434 return ((f1 == f2) || 435 ((f1 != NULL) && (f2 != NULL) && 436 (f1->f_type == f2->f_type) && 437 (f1->f_data == f2->f_data))); 438 } 439 440 void 441 ktrderef(struct proc *p) 442 { 443 struct ktr_desc *ktd = p->p_tracep; 444 445 KASSERT(mutex_owned(&ktrace_lock)); 446 447 p->p_traceflag = 0; 448 if (ktd == NULL) 449 return; 450 p->p_tracep = NULL; 451 452 cv_broadcast(&ktd->ktd_sync_cv); 453 ktdrel(ktd); 454 } 455 456 void 457 ktradref(struct proc *p) 458 { 459 struct ktr_desc *ktd = p->p_tracep; 460 461 KASSERT(mutex_owned(&ktrace_lock)); 462 463 ktdref(ktd); 464 } 465 466 static int 467 ktrderefall(struct ktr_desc *ktd, int auth) 468 { 469 lwp_t *curl = curlwp; 470 struct proc *p; 471 int error = 0; 472 473 mutex_enter(&proc_lock); 474 PROCLIST_FOREACH(p, &allproc) { 475 if (p->p_tracep != ktd) 476 continue; 477 mutex_enter(p->p_lock); 478 mutex_enter(&ktrace_lock); 479 if (p->p_tracep == ktd) { 480 if (!auth || ktrcanset(curl, p)) 481 ktrderef(p); 482 else 483 error = EPERM; 484 } 485 mutex_exit(&ktrace_lock); 486 mutex_exit(p->p_lock); 487 } 488 mutex_exit(&proc_lock); 489 490 return error; 491 } 492 493 int 494 ktealloc(struct ktrace_entry **ktep, void **bufp, lwp_t *l, int type, 495 size_t sz) 496 { 497 struct proc *p = l->l_proc; 498 struct ktrace_entry *kte; 499 struct ktr_header *kth; 500 void *buf; 501 502 if (ktrenter(l)) 503 return EAGAIN; 504 505 kte = pool_cache_get(kte_cache, PR_WAITOK); 506 if (sz > sizeof(kte->kte_space)) { 507 buf = kmem_alloc(sz, KM_SLEEP); 508 } else 509 buf = kte->kte_space; 510 511 kte->kte_bufsz = sz; 512 kte->kte_buf = buf; 513 514 kth = &kte->kte_kth; 515 (void)memset(kth, 0, sizeof(*kth)); 516 kth->ktr_len = sz; 517 kth->ktr_type = type; 518 kth->ktr_pid = p->p_pid; 519 memcpy(kth->ktr_comm, p->p_comm, MAXCOMLEN); 520 kth->ktr_version = KTRFAC_VERSION(p->p_traceflag); 521 kth->ktr_lid = l->l_lid; 522 nanotime(&kth->ktr_ts); 523 524 *ktep = kte; 525 *bufp = buf; 526 527 return 0; 528 } 529 530 void 531 ktesethdrlen(struct ktrace_entry *kte, size_t l) 532 { 533 kte->kte_kth.ktr_len = l; 534 } 535 536 void 537 ktr_syscall(register_t code, const register_t args[], int narg) 538 { 539 lwp_t *l = curlwp; 540 struct proc *p = l->l_proc; 541 struct ktrace_entry *kte; 542 struct ktr_syscall *ktp; 543 register_t *argp; 544 size_t len; 545 u_int i; 546 547 if (!KTRPOINT(p, KTR_SYSCALL)) 548 return; 549 550 len = sizeof(struct ktr_syscall) + narg * sizeof argp[0]; 551 552 if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSCALL, len)) 553 return; 554 555 ktp->ktr_code = code; 556 ktp->ktr_argsize = narg * sizeof argp[0]; 557 argp = (register_t *)(ktp + 1); 558 for (i = 0; i < narg; i++) 559 *argp++ = args[i]; 560 561 ktraddentry(l, kte, KTA_WAITOK); 562 } 563 564 void 565 ktr_sysret(register_t code, int error, register_t *retval) 566 { 567 lwp_t *l = curlwp; 568 struct ktrace_entry *kte; 569 struct ktr_sysret *ktp; 570 571 if (!KTRPOINT(l->l_proc, KTR_SYSRET)) 572 return; 573 574 if (ktealloc(&kte, (void *)&ktp, l, KTR_SYSRET, 575 sizeof(struct ktr_sysret))) 576 return; 577 578 ktp->ktr_code = code; 579 ktp->ktr_eosys = 0; /* XXX unused */ 580 ktp->ktr_error = error; 581 ktp->ktr_retval = retval && error == 0 ? retval[0] : 0; 582 ktp->ktr_retval_1 = retval && error == 0 ? retval[1] : 0; 583 584 ktraddentry(l, kte, KTA_WAITOK); 585 } 586 587 void 588 ktr_namei(const char *path, size_t pathlen) 589 { 590 lwp_t *l = curlwp; 591 592 if (!KTRPOINT(l->l_proc, KTR_NAMEI)) 593 return; 594 595 ktr_kmem(l, KTR_NAMEI, path, pathlen); 596 } 597 598 void 599 ktr_namei2(const char *eroot, size_t erootlen, 600 const char *path, size_t pathlen) 601 { 602 lwp_t *l = curlwp; 603 struct ktrace_entry *kte; 604 void *buf; 605 606 if (!KTRPOINT(l->l_proc, KTR_NAMEI)) 607 return; 608 609 if (ktealloc(&kte, &buf, l, KTR_NAMEI, erootlen + pathlen)) 610 return; 611 memcpy(buf, eroot, erootlen); 612 buf = (char *)buf + erootlen; 613 memcpy(buf, path, pathlen); 614 ktraddentry(l, kte, KTA_WAITOK); 615 } 616 617 void 618 ktr_emul(void) 619 { 620 lwp_t *l = curlwp; 621 const char *emul = l->l_proc->p_emul->e_name; 622 623 if (!KTRPOINT(l->l_proc, KTR_EMUL)) 624 return; 625 626 ktr_kmem(l, KTR_EMUL, emul, strlen(emul)); 627 } 628 629 void 630 ktr_execarg(const void *bf, size_t len) 631 { 632 lwp_t *l = curlwp; 633 634 if (!KTRPOINT(l->l_proc, KTR_EXEC_ARG)) 635 return; 636 637 ktr_kmem(l, KTR_EXEC_ARG, bf, len); 638 } 639 640 void 641 ktr_execenv(const void *bf, size_t len) 642 { 643 lwp_t *l = curlwp; 644 645 if (!KTRPOINT(l->l_proc, KTR_EXEC_ENV)) 646 return; 647 648 ktr_kmem(l, KTR_EXEC_ENV, bf, len); 649 } 650 651 void 652 ktr_execfd(int fd, u_int dtype) 653 { 654 struct ktrace_entry *kte; 655 struct ktr_execfd* ktp; 656 657 lwp_t *l = curlwp; 658 659 if (!KTRPOINT(l->l_proc, KTR_EXEC_FD)) 660 return; 661 662 if (ktealloc(&kte, (void *)&ktp, l, KTR_EXEC_FD, sizeof(*ktp))) 663 return; 664 665 ktp->ktr_fd = fd; 666 ktp->ktr_dtype = dtype; 667 ktraddentry(l, kte, KTA_WAITOK); 668 } 669 670 static void 671 ktr_kmem(lwp_t *l, int type, const void *bf, size_t len) 672 { 673 struct ktrace_entry *kte; 674 void *buf; 675 676 if (ktealloc(&kte, &buf, l, type, len)) 677 return; 678 memcpy(buf, bf, len); 679 ktraddentry(l, kte, KTA_WAITOK); 680 } 681 682 static void 683 ktr_io(lwp_t *l, int fd, enum uio_rw rw, struct iovec *iov, size_t len) 684 { 685 struct ktrace_entry *kte; 686 struct ktr_genio *ktp; 687 size_t resid = len, cnt, buflen; 688 char *cp; 689 690 next: 691 buflen = uimin(PAGE_SIZE, resid + sizeof(struct ktr_genio)); 692 693 if (ktealloc(&kte, (void *)&ktp, l, KTR_GENIO, buflen)) 694 return; 695 696 ktp->ktr_fd = fd; 697 ktp->ktr_rw = rw; 698 699 cp = (void *)(ktp + 1); 700 buflen -= sizeof(struct ktr_genio); 701 kte->kte_kth.ktr_len = sizeof(struct ktr_genio); 702 703 while (buflen > 0) { 704 cnt = uimin(iov->iov_len, buflen); 705 if (copyin(iov->iov_base, cp, cnt) != 0) 706 goto out; 707 kte->kte_kth.ktr_len += cnt; 708 cp += cnt; 709 buflen -= cnt; 710 resid -= cnt; 711 iov->iov_len -= cnt; 712 if (iov->iov_len == 0) 713 iov++; 714 else 715 iov->iov_base = (char *)iov->iov_base + cnt; 716 } 717 718 /* 719 * Don't push so many entry at once. It will cause kmem map 720 * shortage. 721 */ 722 ktraddentry(l, kte, KTA_WAITOK | KTA_LARGE); 723 if (resid > 0) { 724 if (preempt_needed()) { 725 (void)ktrenter(l); 726 preempt(); 727 ktrexit(l); 728 } 729 730 goto next; 731 } 732 733 return; 734 735 out: 736 ktefree(kte); 737 ktrexit(l); 738 } 739 740 void 741 ktr_genio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) 742 { 743 lwp_t *l = curlwp; 744 struct iovec iov; 745 746 if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) 747 return; 748 iov.iov_base = __UNCONST(addr); 749 iov.iov_len = len; 750 ktr_io(l, fd, rw, &iov, len); 751 } 752 753 void 754 ktr_geniov(int fd, enum uio_rw rw, struct iovec *iov, size_t len, int error) 755 { 756 lwp_t *l = curlwp; 757 758 if (!KTRPOINT(l->l_proc, KTR_GENIO) || error != 0) 759 return; 760 ktr_io(l, fd, rw, iov, len); 761 } 762 763 void 764 ktr_mibio(int fd, enum uio_rw rw, const void *addr, size_t len, int error) 765 { 766 lwp_t *l = curlwp; 767 struct iovec iov; 768 769 if (!KTRPOINT(l->l_proc, KTR_MIB) || error != 0) 770 return; 771 iov.iov_base = __UNCONST(addr); 772 iov.iov_len = len; 773 ktr_io(l, fd, rw, &iov, len); 774 } 775 776 void 777 ktr_psig(int sig, sig_t action, const sigset_t *mask, 778 const ksiginfo_t *ksi) 779 { 780 struct ktrace_entry *kte; 781 lwp_t *l = curlwp; 782 struct { 783 struct ktr_psig kp; 784 siginfo_t si; 785 } *kbuf; 786 787 if (!KTRPOINT(l->l_proc, KTR_PSIG)) 788 return; 789 790 if (ktealloc(&kte, (void *)&kbuf, l, KTR_PSIG, sizeof(*kbuf))) 791 return; 792 793 memset(&kbuf->kp, 0, sizeof(kbuf->kp)); 794 kbuf->kp.signo = (char)sig; 795 kbuf->kp.action = action; 796 kbuf->kp.mask = *mask; 797 798 if (ksi) { 799 kbuf->kp.code = KSI_TRAPCODE(ksi); 800 (void)memset(&kbuf->si, 0, sizeof(kbuf->si)); 801 kbuf->si._info = ksi->ksi_info; 802 kte->kte_kth.ktr_len = sizeof(*kbuf); 803 } else { 804 kbuf->kp.code = 0; 805 kte->kte_kth.ktr_len = sizeof(struct ktr_psig); 806 } 807 808 ktraddentry(l, kte, KTA_WAITOK); 809 } 810 811 void 812 ktr_csw(int out, int user, const struct syncobj *syncobj) 813 { 814 lwp_t *l = curlwp; 815 struct proc *p = l->l_proc; 816 struct ktrace_entry *kte; 817 struct ktr_csw *kc; 818 819 if (!KTRPOINT(p, KTR_CSW)) 820 return; 821 822 /* 823 * Don't record context switches resulting from blocking on 824 * locks; the results are not useful, and the mutex may be in a 825 * softint, which would lead us to ktealloc in softint context, 826 * which is forbidden. 827 */ 828 if (syncobj == &mutex_syncobj || syncobj == &rw_syncobj) 829 return; 830 KASSERT(!cpu_intr_p()); 831 KASSERT(!cpu_softintr_p()); 832 833 /* 834 * We can't sleep if we're already going to sleep (if original 835 * condition is met during sleep, we hang up). 836 * 837 * XXX This is not ideal: it would be better to maintain a pool 838 * of ktes and actually push this to the kthread when context 839 * switch happens, however given the points where we are called 840 * from that is difficult to do. 841 */ 842 if (out) { 843 if (ktrenter(l)) 844 return; 845 846 nanotime(&l->l_ktrcsw); 847 l->l_pflag |= LP_KTRCSW; 848 if (user) 849 l->l_pflag |= LP_KTRCSWUSER; 850 else 851 l->l_pflag &= ~LP_KTRCSWUSER; 852 853 ktrexit(l); 854 return; 855 } 856 857 /* 858 * On the way back in, we need to record twice: once for entry, and 859 * once for exit. 860 */ 861 if ((l->l_pflag & LP_KTRCSW) != 0) { 862 struct timespec *ts; 863 l->l_pflag &= ~LP_KTRCSW; 864 865 if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) 866 return; 867 868 kc->out = 1; 869 kc->user = ((l->l_pflag & LP_KTRCSWUSER) != 0); 870 871 ts = &l->l_ktrcsw; 872 switch (KTRFAC_VERSION(p->p_traceflag)) { 873 case 0: 874 kte->kte_kth.ktr_otv.tv_sec = ts->tv_sec; 875 kte->kte_kth.ktr_otv.tv_usec = ts->tv_nsec / 1000; 876 break; 877 case 1: 878 kte->kte_kth.ktr_ots.tv_sec = ts->tv_sec; 879 kte->kte_kth.ktr_ots.tv_nsec = ts->tv_nsec; 880 break; 881 case 2: 882 kte->kte_kth.ktr_ts.tv_sec = ts->tv_sec; 883 kte->kte_kth.ktr_ts.tv_nsec = ts->tv_nsec; 884 break; 885 default: 886 break; 887 } 888 889 ktraddentry(l, kte, KTA_WAITOK); 890 } 891 892 if (ktealloc(&kte, (void *)&kc, l, KTR_CSW, sizeof(*kc))) 893 return; 894 895 kc->out = 0; 896 kc->user = user; 897 898 ktraddentry(l, kte, KTA_WAITOK); 899 } 900 901 bool 902 ktr_point(int fac_bit) 903 { 904 return curlwp->l_proc->p_traceflag & fac_bit; 905 } 906 907 int 908 ktruser(const char *id, void *addr, size_t len, int ustr) 909 { 910 struct ktrace_entry *kte; 911 struct ktr_user *ktp; 912 lwp_t *l = curlwp; 913 void *user_dta; 914 int error; 915 916 if (!KTRPOINT(l->l_proc, KTR_USER)) 917 return 0; 918 919 if (len > KTR_USER_MAXLEN) 920 return ENOSPC; 921 922 error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); 923 if (error != 0) 924 return error; 925 926 if (ustr) { 927 if (copyinstr(id, ktp->ktr_id, KTR_USER_MAXIDLEN, NULL) != 0) 928 ktp->ktr_id[0] = '\0'; 929 } else 930 strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN); 931 ktp->ktr_id[KTR_USER_MAXIDLEN-1] = '\0'; 932 933 user_dta = (void *)(ktp + 1); 934 if ((error = copyin(addr, user_dta, len)) != 0) 935 kte->kte_kth.ktr_len = 0; 936 937 ktraddentry(l, kte, KTA_WAITOK); 938 return error; 939 } 940 941 void 942 ktr_kuser(const char *id, const void *addr, size_t len) 943 { 944 struct ktrace_entry *kte; 945 struct ktr_user *ktp; 946 lwp_t *l = curlwp; 947 int error; 948 949 if (!KTRPOINT(l->l_proc, KTR_USER)) 950 return; 951 952 if (len > KTR_USER_MAXLEN) 953 return; 954 955 error = ktealloc(&kte, (void *)&ktp, l, KTR_USER, sizeof(*ktp) + len); 956 if (error != 0) 957 return; 958 959 strncpy(ktp->ktr_id, id, KTR_USER_MAXIDLEN - 1); 960 ktp->ktr_id[KTR_USER_MAXIDLEN - 1] = '\0'; 961 962 memcpy(ktp + 1, addr, len); 963 964 ktraddentry(l, kte, KTA_WAITOK); 965 } 966 967 void 968 ktr_mib(const int *name, u_int namelen) 969 { 970 struct ktrace_entry *kte; 971 int *namep; 972 size_t size; 973 lwp_t *l = curlwp; 974 975 if (!KTRPOINT(l->l_proc, KTR_MIB)) 976 return; 977 978 size = namelen * sizeof(*name); 979 980 if (ktealloc(&kte, (void *)&namep, l, KTR_MIB, size)) 981 return; 982 983 (void)memcpy(namep, name, namelen * sizeof(*name)); 984 985 ktraddentry(l, kte, KTA_WAITOK); 986 } 987 988 /* Interface and common routines */ 989 990 int 991 ktrace_common(lwp_t *curl, int ops, int facs, int pid, file_t **fpp) 992 { 993 struct proc *p; 994 struct pgrp *pg; 995 struct ktr_desc *ktd = NULL, *nktd; 996 file_t *fp = *fpp; 997 int ret = 0; 998 int error = 0; 999 int descend; 1000 1001 descend = ops & KTRFLAG_DESCEND; 1002 facs = facs & ~((unsigned) KTRFAC_PERSISTENT); 1003 1004 (void)ktrenter(curl); 1005 1006 switch (KTROP(ops)) { 1007 1008 case KTROP_CLEARFILE: 1009 /* 1010 * Clear all uses of the tracefile 1011 */ 1012 mutex_enter(&ktrace_lock); 1013 ktd = ktd_lookup(fp); 1014 mutex_exit(&ktrace_lock); 1015 if (ktd == NULL) 1016 goto done; 1017 error = ktrderefall(ktd, 1); 1018 goto done; 1019 1020 case KTROP_SET: 1021 mutex_enter(&ktrace_lock); 1022 ktd = ktd_lookup(fp); 1023 mutex_exit(&ktrace_lock); 1024 if (ktd == NULL) { 1025 nktd = kmem_alloc(sizeof(*nktd), KM_SLEEP); 1026 TAILQ_INIT(&nktd->ktd_queue); 1027 callout_init(&nktd->ktd_wakch, CALLOUT_MPSAFE); 1028 cv_init(&nktd->ktd_cv, "ktrwait"); 1029 cv_init(&nktd->ktd_sync_cv, "ktrsync"); 1030 nktd->ktd_flags = 0; 1031 nktd->ktd_qcount = 0; 1032 nktd->ktd_error = 0; 1033 nktd->ktd_errcnt = 0; 1034 nktd->ktd_delayqcnt = ktd_delayqcnt; 1035 nktd->ktd_wakedelay = mstohz(ktd_wakedelay); 1036 nktd->ktd_intrwakdl = mstohz(ktd_intrwakdl); 1037 nktd->ktd_ref = 0; 1038 nktd->ktd_fp = fp; 1039 mutex_enter(&ktrace_lock); 1040 ktdref(nktd); 1041 mutex_exit(&ktrace_lock); 1042 1043 /* 1044 * XXX: not correct. needs an way to detect 1045 * whether ktruss or ktrace. 1046 */ 1047 if (fp->f_type == DTYPE_PIPE) 1048 nktd->ktd_flags |= KTDF_INTERACTIVE; 1049 1050 mutex_enter(&fp->f_lock); 1051 fp->f_count++; 1052 mutex_exit(&fp->f_lock); 1053 error = kthread_create(PRI_NONE, KTHREAD_MPSAFE, NULL, 1054 ktrace_thread, nktd, &nktd->ktd_lwp, "ktrace"); 1055 if (error != 0) { 1056 kmem_free(nktd, sizeof(*nktd)); 1057 nktd = NULL; 1058 mutex_enter(&fp->f_lock); 1059 fp->f_count--; 1060 mutex_exit(&fp->f_lock); 1061 goto done; 1062 } 1063 1064 mutex_enter(&ktrace_lock); 1065 ktd = ktd_lookup(fp); 1066 if (ktd != NULL) { 1067 ktdrel(nktd); 1068 nktd = NULL; 1069 } else { 1070 TAILQ_INSERT_TAIL(&ktdq, nktd, ktd_list); 1071 ktd = nktd; 1072 } 1073 mutex_exit(&ktrace_lock); 1074 } 1075 break; 1076 1077 case KTROP_CLEAR: 1078 break; 1079 } 1080 1081 /* 1082 * need something to (un)trace (XXX - why is this here?) 1083 */ 1084 if (!facs) { 1085 error = EINVAL; 1086 *fpp = NULL; 1087 goto done; 1088 } 1089 1090 /* 1091 * do it 1092 */ 1093 mutex_enter(&proc_lock); 1094 if (pid < 0) { 1095 /* 1096 * by process group 1097 */ 1098 pg = pgrp_find(-pid); 1099 if (pg == NULL) 1100 error = ESRCH; 1101 else { 1102 LIST_FOREACH(p, &pg->pg_members, p_pglist) { 1103 if (descend) 1104 ret |= ktrsetchildren(curl, p, ops, 1105 facs, ktd); 1106 else 1107 ret |= ktrops(curl, p, ops, facs, 1108 ktd); 1109 } 1110 } 1111 1112 } else { 1113 /* 1114 * by pid 1115 */ 1116 p = proc_find(pid); 1117 if (p == NULL) 1118 error = ESRCH; 1119 else if (descend) 1120 ret |= ktrsetchildren(curl, p, ops, facs, ktd); 1121 else 1122 ret |= ktrops(curl, p, ops, facs, ktd); 1123 } 1124 mutex_exit(&proc_lock); 1125 if (error == 0 && !ret) 1126 error = EPERM; 1127 *fpp = NULL; 1128 done: 1129 if (ktd != NULL) { 1130 mutex_enter(&ktrace_lock); 1131 if (error != 0) { 1132 /* 1133 * Wakeup the thread so that it can be die if we 1134 * can't trace any process. 1135 */ 1136 ktd_wakeup(ktd); 1137 } 1138 if (KTROP(ops) == KTROP_SET || KTROP(ops) == KTROP_CLEARFILE) 1139 ktdrel(ktd); 1140 mutex_exit(&ktrace_lock); 1141 } 1142 ktrexit(curl); 1143 return (error); 1144 } 1145 1146 /* 1147 * fktrace system call 1148 */ 1149 /* ARGSUSED */ 1150 int 1151 sys_fktrace(struct lwp *l, const struct sys_fktrace_args *uap, 1152 register_t *retval) 1153 { 1154 /* { 1155 syscallarg(int) fd; 1156 syscallarg(int) ops; 1157 syscallarg(int) facs; 1158 syscallarg(int) pid; 1159 } */ 1160 file_t *fp; 1161 int error, fd; 1162 1163 fd = SCARG(uap, fd); 1164 if ((fp = fd_getfile(fd)) == NULL) 1165 return (EBADF); 1166 if ((fp->f_flag & FWRITE) == 0) 1167 error = EBADF; 1168 else 1169 error = ktrace_common(l, SCARG(uap, ops), 1170 SCARG(uap, facs), SCARG(uap, pid), &fp); 1171 fd_putfile(fd); 1172 return error; 1173 } 1174 1175 static int 1176 ktrops(lwp_t *curl, struct proc *p, int ops, int facs, 1177 struct ktr_desc *ktd) 1178 { 1179 int vers = ops & KTRFAC_VER_MASK; 1180 int error = 0; 1181 1182 mutex_enter(p->p_lock); 1183 mutex_enter(&ktrace_lock); 1184 1185 if (!ktrcanset(curl, p)) 1186 goto out; 1187 1188 switch (vers) { 1189 case KTRFACv0: 1190 case KTRFACv1: 1191 case KTRFACv2: 1192 break; 1193 default: 1194 error = EINVAL; 1195 goto out; 1196 } 1197 1198 if (KTROP(ops) == KTROP_SET) { 1199 if (p->p_tracep != ktd) { 1200 /* 1201 * if trace file already in use, relinquish 1202 */ 1203 ktrderef(p); 1204 p->p_tracep = ktd; 1205 ktradref(p); 1206 } 1207 p->p_traceflag |= facs; 1208 if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KTRACE, 1209 p, KAUTH_ARG(KAUTH_REQ_PROCESS_KTRACE_PERSISTENT), NULL, 1210 NULL) == 0) 1211 p->p_traceflag |= KTRFAC_PERSISTENT; 1212 } else { 1213 /* KTROP_CLEAR */ 1214 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) { 1215 /* no more tracing */ 1216 ktrderef(p); 1217 } 1218 } 1219 1220 if (p->p_traceflag) 1221 p->p_traceflag |= vers; 1222 /* 1223 * Emit an emulation record, every time there is a ktrace 1224 * change/attach request. 1225 */ 1226 if (KTRPOINT(p, KTR_EMUL)) 1227 p->p_traceflag |= KTRFAC_TRC_EMUL; 1228 1229 p->p_trace_enabled = trace_is_enabled(p); 1230 #ifdef __HAVE_SYSCALL_INTERN 1231 (*p->p_emul->e_syscall_intern)(p); 1232 #endif 1233 1234 out: 1235 mutex_exit(&ktrace_lock); 1236 mutex_exit(p->p_lock); 1237 1238 return error ? 0 : 1; 1239 } 1240 1241 static int 1242 ktrsetchildren(lwp_t *curl, struct proc *top, int ops, int facs, 1243 struct ktr_desc *ktd) 1244 { 1245 struct proc *p; 1246 int ret = 0; 1247 1248 KASSERT(mutex_owned(&proc_lock)); 1249 1250 p = top; 1251 for (;;) { 1252 ret |= ktrops(curl, p, ops, facs, ktd); 1253 /* 1254 * If this process has children, descend to them next, 1255 * otherwise do any siblings, and if done with this level, 1256 * follow back up the tree (but not past top). 1257 */ 1258 if (LIST_FIRST(&p->p_children) != NULL) { 1259 p = LIST_FIRST(&p->p_children); 1260 continue; 1261 } 1262 for (;;) { 1263 if (p == top) 1264 return (ret); 1265 if (LIST_NEXT(p, p_sibling) != NULL) { 1266 p = LIST_NEXT(p, p_sibling); 1267 break; 1268 } 1269 p = p->p_pptr; 1270 } 1271 } 1272 /*NOTREACHED*/ 1273 } 1274 1275 static void 1276 ktrwrite(struct ktr_desc *ktd, struct ktrace_entry *kte) 1277 { 1278 size_t hlen; 1279 struct uio auio; 1280 struct iovec aiov[64], *iov; 1281 struct ktrace_entry *top = kte; 1282 struct ktr_header *kth; 1283 file_t *fp = ktd->ktd_fp; 1284 int error; 1285 next: 1286 auio.uio_iov = iov = &aiov[0]; 1287 auio.uio_offset = 0; 1288 auio.uio_rw = UIO_WRITE; 1289 auio.uio_resid = 0; 1290 auio.uio_iovcnt = 0; 1291 UIO_SETUP_SYSSPACE(&auio); 1292 do { 1293 struct timespec ts; 1294 lwpid_t lid; 1295 kth = &kte->kte_kth; 1296 1297 hlen = sizeof(struct ktr_header); 1298 switch (kth->ktr_version) { 1299 case 0: 1300 ts = kth->ktr_time; 1301 1302 kth->ktr_otv.tv_sec = ts.tv_sec; 1303 kth->ktr_otv.tv_usec = ts.tv_nsec / 1000; 1304 kth->ktr_unused = NULL; 1305 hlen -= sizeof(kth->_v) - 1306 MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); 1307 break; 1308 case 1: 1309 ts = kth->ktr_time; 1310 lid = kth->ktr_lid; 1311 1312 kth->ktr_ots.tv_sec = ts.tv_sec; 1313 kth->ktr_ots.tv_nsec = ts.tv_nsec; 1314 kth->ktr_olid = lid; 1315 hlen -= sizeof(kth->_v) - 1316 MAX(sizeof(kth->_v._v0), sizeof(kth->_v._v1)); 1317 break; 1318 } 1319 iov->iov_base = (void *)kth; 1320 iov++->iov_len = hlen; 1321 auio.uio_resid += hlen; 1322 auio.uio_iovcnt++; 1323 if (kth->ktr_len > 0) { 1324 iov->iov_base = kte->kte_buf; 1325 iov++->iov_len = kth->ktr_len; 1326 auio.uio_resid += kth->ktr_len; 1327 auio.uio_iovcnt++; 1328 } 1329 } while ((kte = TAILQ_NEXT(kte, kte_list)) != NULL && 1330 auio.uio_iovcnt < sizeof(aiov) / sizeof(aiov[0]) - 1); 1331 1332 again: 1333 error = (*fp->f_ops->fo_write)(fp, &fp->f_offset, &auio, 1334 fp->f_cred, FOF_UPDATE_OFFSET); 1335 switch (error) { 1336 1337 case 0: 1338 if (auio.uio_resid > 0) 1339 goto again; 1340 if (kte != NULL) 1341 goto next; 1342 break; 1343 1344 case EWOULDBLOCK: 1345 kpause("ktrzzz", false, 1, NULL); 1346 goto again; 1347 1348 default: 1349 /* 1350 * If error encountered, give up tracing on this 1351 * vnode. Don't report EPIPE as this can easily 1352 * happen with fktrace()/ktruss. 1353 */ 1354 #ifndef DEBUG 1355 if (error != EPIPE) 1356 #endif 1357 log(LOG_NOTICE, 1358 "ktrace write failed, errno %d, tracing stopped\n", 1359 error); 1360 (void)ktrderefall(ktd, 0); 1361 } 1362 1363 while ((kte = top) != NULL) { 1364 top = TAILQ_NEXT(top, kte_list); 1365 ktefree(kte); 1366 } 1367 } 1368 1369 static void 1370 ktrace_thread(void *arg) 1371 { 1372 struct ktr_desc *ktd = arg; 1373 file_t *fp = ktd->ktd_fp; 1374 struct ktrace_entry *kte; 1375 int ktrerr, errcnt; 1376 1377 mutex_enter(&ktrace_lock); 1378 for (;;) { 1379 kte = TAILQ_FIRST(&ktd->ktd_queue); 1380 if (kte == NULL) { 1381 if (ktd->ktd_flags & KTDF_WAIT) { 1382 ktd->ktd_flags &= ~(KTDF_WAIT | KTDF_BLOCKING); 1383 cv_broadcast(&ktd->ktd_sync_cv); 1384 } 1385 if (ktd->ktd_ref == 0) 1386 break; 1387 cv_wait(&ktd->ktd_cv, &ktrace_lock); 1388 continue; 1389 } 1390 TAILQ_INIT(&ktd->ktd_queue); 1391 ktd->ktd_qcount = 0; 1392 ktrerr = ktd->ktd_error; 1393 errcnt = ktd->ktd_errcnt; 1394 ktd->ktd_error = ktd->ktd_errcnt = 0; 1395 mutex_exit(&ktrace_lock); 1396 1397 if (ktrerr) { 1398 log(LOG_NOTICE, 1399 "ktrace failed, fp %p, error 0x%x, total %d\n", 1400 fp, ktrerr, errcnt); 1401 } 1402 ktrwrite(ktd, kte); 1403 mutex_enter(&ktrace_lock); 1404 } 1405 1406 if (ktd_lookup(ktd->ktd_fp) == ktd) { 1407 TAILQ_REMOVE(&ktdq, ktd, ktd_list); 1408 } else { 1409 /* nothing, collision in KTROP_SET */ 1410 } 1411 1412 callout_halt(&ktd->ktd_wakch, &ktrace_lock); 1413 callout_destroy(&ktd->ktd_wakch); 1414 mutex_exit(&ktrace_lock); 1415 1416 /* 1417 * ktrace file descriptor can't be watched (are not visible to 1418 * userspace), so no kqueue stuff here 1419 * XXX: The above comment is wrong, because the fktrace file 1420 * descriptor is available in userland. 1421 */ 1422 closef(fp); 1423 1424 cv_destroy(&ktd->ktd_sync_cv); 1425 cv_destroy(&ktd->ktd_cv); 1426 1427 kmem_free(ktd, sizeof(*ktd)); 1428 1429 kthread_exit(0); 1430 } 1431 1432 /* 1433 * Return true if caller has permission to set the ktracing state 1434 * of target. Essentially, the target can't possess any 1435 * more permissions than the caller. KTRFAC_PERSISTENT signifies that 1436 * the tracing will persist on sugid processes during exec; it is only 1437 * settable by a process with appropriate credentials. 1438 * 1439 * TODO: check groups. use caller effective gid. 1440 */ 1441 static int 1442 ktrcanset(lwp_t *calll, struct proc *targetp) 1443 { 1444 KASSERT(mutex_owned(targetp->p_lock)); 1445 KASSERT(mutex_owned(&ktrace_lock)); 1446 1447 if (kauth_authorize_process(calll->l_cred, KAUTH_PROCESS_KTRACE, 1448 targetp, NULL, NULL, NULL) == 0) 1449 return (1); 1450 1451 return (0); 1452 } 1453 1454 /* 1455 * Put user defined entry to ktrace records. 1456 */ 1457 int 1458 sys_utrace(struct lwp *l, const struct sys_utrace_args *uap, register_t *retval) 1459 { 1460 /* { 1461 syscallarg(const char *) label; 1462 syscallarg(void *) addr; 1463 syscallarg(size_t) len; 1464 } */ 1465 1466 return ktruser(SCARG(uap, label), SCARG(uap, addr), 1467 SCARG(uap, len), 1); 1468 } 1469