1 /* 2 * Copyright (c) 1994, Sean Eric Fagan 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 3. All advertising materials mentioning features or use of this software 14 * must display the following acknowledgement: 15 * This product includes software developed by Sean Eric Fagan. 16 * 4. The name of the author may not be used to endorse or promote products 17 * derived from this software without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $ 32 */ 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/sysproto.h> 37 #include <sys/uio.h> 38 #include <sys/proc.h> 39 #include <sys/priv.h> 40 #include <sys/vnode.h> 41 #include <sys/ptrace.h> 42 #include <sys/reg.h> 43 #include <sys/lock.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 #include <vm/vm_map.h> 48 #include <vm/vm_page.h> 49 50 #include <vfs/procfs/procfs.h> 51 52 #include <sys/thread2.h> 53 #include <sys/spinlock2.h> 54 55 /* use the equivalent procfs code */ 56 #if 0 57 static int 58 pread (struct proc *procp, unsigned int addr, unsigned int *retval) 59 { 60 int rv; 61 vm_map_t map, tmap; 62 vm_object_t object; 63 vm_map_backing_t ba; 64 vm_offset_t kva = 0; 65 int page_offset; /* offset into page */ 66 vm_offset_t pageno; /* page number */ 67 vm_map_entry_t out_entry; 68 vm_prot_t out_prot; 69 int wflags; 70 vm_pindex_t pindex; 71 72 /* Map page into kernel space */ 73 74 map = &procp->p_vmspace->vm_map; 75 76 page_offset = addr - trunc_page(addr); 77 pageno = trunc_page(addr); 78 79 tmap = map; 80 rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry, 81 &ba, &pindex, &out_prot, &wflags); 82 if (ba) 83 object = ba->object; 84 else 85 object = NULL; 86 87 88 if (rv != KERN_SUCCESS) 89 return EINVAL; 90 91 vm_map_lookup_done (tmap, out_entry, 0); 92 93 /* Find space in kernel_map for the page we're interested in */ 94 rv = vm_map_find (&kernel_map, object, NULL, 95 IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 96 PAGE_SIZE, FALSE, 97 VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC, 98 VM_PROT_ALL, VM_PROT_ALL, 0); 99 100 if (!rv) { 101 vm_object_reference XXX (object); 102 103 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); 104 if (!rv) { 105 *retval = 0; 106 bcopy ((caddr_t)kva + page_offset, 107 retval, sizeof *retval); 108 } 109 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); 110 } 111 112 return rv; 113 } 114 115 static int 116 pwrite (struct proc *procp, unsigned int addr, unsigned int datum) 117 { 118 int rv; 119 vm_map_t map, tmap; 120 vm_object_t object; 121 vm_map_backing_t ba; 122 vm_offset_t kva = 0; 123 int page_offset; /* offset into page */ 124 vm_offset_t pageno; /* page number */ 125 vm_map_entry_t out_entry; 126 vm_prot_t out_prot; 127 int wflags; 128 vm_pindex_t pindex; 129 boolean_t fix_prot = 0; 130 131 /* Map page into kernel space */ 132 133 map = &procp->p_vmspace->vm_map; 134 135 page_offset = addr - trunc_page(addr); 136 pageno = trunc_page(addr); 137 138 /* 139 * Check the permissions for the area we're interested in. 140 */ 141 142 if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE, 143 VM_PROT_WRITE, FALSE) == FALSE) { 144 /* 145 * If the page was not writable, we make it so. 146 * XXX It is possible a page may *not* be read/executable, 147 * if a process changes that! 148 */ 149 fix_prot = 1; 150 /* The page isn't writable, so let's try making it so... */ 151 if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE, 152 VM_PROT_ALL, 0)) != KERN_SUCCESS) 153 return EFAULT; /* I guess... */ 154 } 155 156 /* 157 * Now we need to get the page. out_entry, out_prot, wflags, and 158 * single_use aren't used. One would think the vm code would be 159 * a *bit* nicer... We use tmap because vm_map_lookup() can 160 * change the map argument. 161 */ 162 163 tmap = map; 164 rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry, 165 &ba, &pindex, &out_prot, &wflags); 166 if (ba) 167 object = ba->object; 168 else 169 object = NULL; 170 171 if (rv != KERN_SUCCESS) 172 return EINVAL; 173 174 /* 175 * Okay, we've got the page. Let's release tmap. 176 */ 177 vm_map_lookup_done (tmap, out_entry, 0); 178 179 /* 180 * Fault the page in... 181 */ 182 rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE); 183 if (rv != KERN_SUCCESS) 184 return EFAULT; 185 186 /* Find space in kernel_map for the page we're interested in */ 187 rv = vm_map_find (&kernel_map, object, NULL, 188 IDX_TO_OFF(pindex), &kva, PAGE_SIZE, 189 PAGE_SIZE, FALSE, 190 VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC, 191 VM_PROT_ALL, VM_PROT_ALL, 0); 192 if (!rv) { 193 vm_object_reference XXX (object); 194 195 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0); 196 if (!rv) { 197 bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum); 198 } 199 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE); 200 } 201 202 if (fix_prot) 203 vm_map_protect (map, pageno, pageno + PAGE_SIZE, 204 VM_PROT_READ|VM_PROT_EXECUTE, 0); 205 return rv; 206 } 207 #endif 208 209 /* 210 * Process debugging system call. 211 * 212 * MPALMOSTSAFE 213 */ 214 int 215 sys_ptrace(struct ptrace_args *uap) 216 { 217 struct proc *p = curproc; 218 219 /* 220 * XXX this obfuscation is to reduce stack usage, but the register 221 * structs may be too large to put on the stack anyway. 222 */ 223 union { 224 struct ptrace_io_desc piod; 225 struct dbreg dbreg; 226 struct fpreg fpreg; 227 struct reg reg; 228 } r; 229 void *addr; 230 int error = 0; 231 232 addr = &r; 233 switch (uap->req) { 234 case PT_GETREGS: 235 case PT_GETFPREGS: 236 #ifdef PT_GETDBREGS 237 case PT_GETDBREGS: 238 #endif 239 break; 240 case PT_SETREGS: 241 error = copyin(uap->addr, &r.reg, sizeof r.reg); 242 break; 243 case PT_SETFPREGS: 244 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg); 245 break; 246 #ifdef PT_SETDBREGS 247 case PT_SETDBREGS: 248 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg); 249 break; 250 #endif 251 case PT_IO: 252 error = copyin(uap->addr, &r.piod, sizeof r.piod); 253 break; 254 default: 255 addr = uap->addr; 256 } 257 if (error) 258 return (error); 259 260 error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data, 261 &uap->sysmsg_result); 262 if (error) 263 return (error); 264 265 switch (uap->req) { 266 case PT_IO: 267 (void)copyout(&r.piod, uap->addr, sizeof r.piod); 268 break; 269 case PT_GETREGS: 270 error = copyout(&r.reg, uap->addr, sizeof r.reg); 271 break; 272 case PT_GETFPREGS: 273 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg); 274 break; 275 #ifdef PT_GETDBREGS 276 case PT_GETDBREGS: 277 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg); 278 break; 279 #endif 280 } 281 282 return (error); 283 } 284 285 int 286 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr, 287 int data, int *res) 288 { 289 struct proc *p, *pp; 290 struct lwp *lp; 291 struct iovec iov; 292 struct uio uio; 293 struct ptrace_io_desc *piod; 294 int error = 0; 295 int write, tmp; 296 int t; 297 298 write = 0; 299 if (req == PT_TRACE_ME) { 300 p = curp; 301 PHOLD(p); 302 } else { 303 if ((p = pfind(pid)) == NULL) 304 return ESRCH; 305 } 306 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) { 307 PRELE(p); 308 return (ESRCH); 309 } 310 if (p->p_flags & P_SYSTEM) { 311 PRELE(p); 312 return EINVAL; 313 } 314 315 lwkt_gettoken(&p->p_token); 316 /* Can't trace a process that's currently exec'ing. */ 317 if ((p->p_flags & P_INEXEC) != 0) { 318 lwkt_reltoken(&p->p_token); 319 PRELE(p); 320 return EAGAIN; 321 } 322 323 /* 324 * Permissions check 325 */ 326 switch (req) { 327 case PT_TRACE_ME: 328 /* Always legal. */ 329 break; 330 331 case PT_ATTACH: 332 /* Self */ 333 if (p->p_pid == curp->p_pid) { 334 lwkt_reltoken(&p->p_token); 335 PRELE(p); 336 return EINVAL; 337 } 338 339 /* Already traced */ 340 if (p->p_flags & P_TRACED) { 341 lwkt_reltoken(&p->p_token); 342 PRELE(p); 343 return EBUSY; 344 } 345 346 if (curp->p_flags & P_TRACED) 347 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) 348 if (pp == p) { 349 lwkt_reltoken(&p->p_token); 350 PRELE(p); 351 return (EINVAL); 352 } 353 354 /* not owned by you, has done setuid (unless you're root) */ 355 if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) || 356 (p->p_flags & P_SUGID)) { 357 if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) { 358 lwkt_reltoken(&p->p_token); 359 PRELE(p); 360 return error; 361 } 362 } 363 364 /* can't trace init when securelevel > 0 */ 365 if (securelevel > 0 && p->p_pid == 1) { 366 lwkt_reltoken(&p->p_token); 367 PRELE(p); 368 return EPERM; 369 } 370 371 /* OK */ 372 break; 373 374 case PT_READ_I: 375 case PT_READ_D: 376 case PT_WRITE_I: 377 case PT_WRITE_D: 378 case PT_IO: 379 case PT_CONTINUE: 380 case PT_KILL: 381 case PT_STEP: 382 case PT_DETACH: 383 #ifdef PT_GETREGS 384 case PT_GETREGS: 385 #endif 386 #ifdef PT_SETREGS 387 case PT_SETREGS: 388 #endif 389 #ifdef PT_GETFPREGS 390 case PT_GETFPREGS: 391 #endif 392 #ifdef PT_SETFPREGS 393 case PT_SETFPREGS: 394 #endif 395 #ifdef PT_GETDBREGS 396 case PT_GETDBREGS: 397 #endif 398 #ifdef PT_SETDBREGS 399 case PT_SETDBREGS: 400 #endif 401 /* not being traced... */ 402 if ((p->p_flags & P_TRACED) == 0) { 403 lwkt_reltoken(&p->p_token); 404 PRELE(p); 405 return EPERM; 406 } 407 408 /* not being traced by YOU */ 409 if (p->p_pptr != curp) { 410 lwkt_reltoken(&p->p_token); 411 PRELE(p); 412 return EBUSY; 413 } 414 415 /* not currently stopped */ 416 if (p->p_stat != SSTOP || 417 (p->p_flags & P_WAITED) == 0) { 418 lwkt_reltoken(&p->p_token); 419 PRELE(p); 420 return EBUSY; 421 } 422 423 /* OK */ 424 break; 425 426 default: 427 lwkt_reltoken(&p->p_token); 428 PRELE(p); 429 return EINVAL; 430 } 431 432 /* XXX lwp */ 433 lp = FIRST_LWP_IN_PROC(p); 434 #ifdef FIX_SSTEP 435 /* 436 * Single step fixup ala procfs 437 */ 438 FIX_SSTEP(lp); 439 #endif 440 441 /* 442 * Actually do the requests 443 */ 444 445 *res = 0; 446 447 switch (req) { 448 case PT_TRACE_ME: 449 /* set my trace flag and "owner" so it can read/write me */ 450 p->p_flags |= P_TRACED; 451 p->p_oppid = p->p_pptr->p_pid; 452 lwkt_reltoken(&p->p_token); 453 PRELE(p); 454 return 0; 455 456 case PT_ATTACH: 457 /* security check done above */ 458 p->p_flags |= P_TRACED; 459 p->p_oppid = p->p_pptr->p_pid; 460 proc_reparent(p, curp); 461 data = SIGSTOP; 462 goto sendsig; /* in PT_CONTINUE below */ 463 464 case PT_STEP: 465 case PT_CONTINUE: 466 case PT_DETACH: 467 /* Zero means do not send any signal */ 468 if (data < 0 || data > _SIG_MAXSIG) { 469 lwkt_reltoken(&p->p_token); 470 PRELE(p); 471 return EINVAL; 472 } 473 474 LWPHOLD(lp); 475 476 if (req == PT_STEP) { 477 if ((error = ptrace_single_step (lp))) { 478 LWPRELE(lp); 479 lwkt_reltoken(&p->p_token); 480 PRELE(p); 481 return error; 482 } 483 } 484 485 if (addr != (void *)1) { 486 if ((error = ptrace_set_pc (lp, (u_long)addr))) { 487 LWPRELE(lp); 488 lwkt_reltoken(&p->p_token); 489 PRELE(p); 490 return error; 491 } 492 } 493 LWPRELE(lp); 494 495 if (req == PT_DETACH) { 496 /* reset process parent */ 497 if (p->p_oppid != p->p_pptr->p_pid) { 498 struct proc *pp; 499 500 pp = pfind(p->p_oppid); 501 if (pp) { 502 proc_reparent(p, pp); 503 PRELE(pp); 504 } 505 } 506 507 p->p_flags &= ~(P_TRACED | P_WAITED); 508 p->p_oppid = 0; 509 510 /* should we send SIGCHLD? */ 511 } 512 513 sendsig: 514 /* 515 * Deliver or queue signal. If the process is stopped 516 * force it to be SACTIVE again. 517 */ 518 crit_enter(); 519 if (p->p_stat == SSTOP) { 520 p->p_xstat = data; 521 proc_unstop(p, SSTOP); 522 } else if (data) { 523 ksignal(p, data); 524 } 525 crit_exit(); 526 lwkt_reltoken(&p->p_token); 527 PRELE(p); 528 return 0; 529 530 case PT_WRITE_I: 531 case PT_WRITE_D: 532 write = 1; 533 /* fallthrough */ 534 case PT_READ_I: 535 case PT_READ_D: 536 /* 537 * NOTE! uio_offset represents the offset in the target 538 * process. The iov is in the current process (the guy 539 * making the ptrace call) so uio_td must be the current 540 * process (though for a SYSSPACE transfer it doesn't 541 * really matter). 542 */ 543 tmp = 0; 544 /* write = 0 set above */ 545 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp; 546 iov.iov_len = sizeof(int); 547 uio.uio_iov = &iov; 548 uio.uio_iovcnt = 1; 549 uio.uio_offset = (off_t)(uintptr_t)addr; 550 uio.uio_resid = sizeof(int); 551 uio.uio_segflg = UIO_SYSSPACE; 552 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 553 uio.uio_td = curthread; 554 error = procfs_domem(curp, lp, NULL, &uio); 555 if (uio.uio_resid != 0) { 556 /* 557 * XXX procfs_domem() doesn't currently return ENOSPC, 558 * so I think write() can bogusly return 0. 559 * XXX what happens for short writes? We don't want 560 * to write partial data. 561 * XXX procfs_domem() returns EPERM for other invalid 562 * addresses. Convert this to EINVAL. Does this 563 * clobber returns of EPERM for other reasons? 564 */ 565 if (error == 0 || error == ENOSPC || error == EPERM) 566 error = EINVAL; /* EOF */ 567 } 568 if (!write) 569 *res = tmp; 570 lwkt_reltoken(&p->p_token); 571 PRELE(p); 572 return (error); 573 574 case PT_IO: 575 /* 576 * NOTE! uio_offset represents the offset in the target 577 * process. The iov is in the current process (the guy 578 * making the ptrace call) so uio_td must be the current 579 * process. 580 */ 581 piod = addr; 582 iov.iov_base = piod->piod_addr; 583 iov.iov_len = piod->piod_len; 584 uio.uio_iov = &iov; 585 uio.uio_iovcnt = 1; 586 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs; 587 uio.uio_resid = piod->piod_len; 588 uio.uio_segflg = UIO_USERSPACE; 589 uio.uio_td = curthread; 590 switch (piod->piod_op) { 591 case PIOD_READ_D: 592 case PIOD_READ_I: 593 uio.uio_rw = UIO_READ; 594 break; 595 case PIOD_WRITE_D: 596 case PIOD_WRITE_I: 597 uio.uio_rw = UIO_WRITE; 598 break; 599 default: 600 lwkt_reltoken(&p->p_token); 601 PRELE(p); 602 return (EINVAL); 603 } 604 error = procfs_domem(curp, lp, NULL, &uio); 605 piod->piod_len -= uio.uio_resid; 606 lwkt_reltoken(&p->p_token); 607 PRELE(p); 608 return (error); 609 610 case PT_KILL: 611 data = SIGKILL; 612 goto sendsig; /* in PT_CONTINUE above */ 613 614 #ifdef PT_SETREGS 615 case PT_SETREGS: 616 write = 1; 617 /* fallthrough */ 618 #endif /* PT_SETREGS */ 619 #ifdef PT_GETREGS 620 case PT_GETREGS: 621 /* write = 0 above */ 622 #endif /* PT_SETREGS */ 623 #if defined(PT_SETREGS) || defined(PT_GETREGS) 624 if (!procfs_validregs(lp)) { 625 lwkt_reltoken(&p->p_token); 626 PRELE(p); 627 return EINVAL; 628 } else { 629 iov.iov_base = addr; 630 iov.iov_len = sizeof(struct reg); 631 uio.uio_iov = &iov; 632 uio.uio_iovcnt = 1; 633 uio.uio_offset = 0; 634 uio.uio_resid = sizeof(struct reg); 635 uio.uio_segflg = UIO_SYSSPACE; 636 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 637 uio.uio_td = curthread; 638 t = procfs_doregs(curp, lp, NULL, &uio); 639 lwkt_reltoken(&p->p_token); 640 PRELE(p); 641 return t; 642 } 643 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */ 644 645 #ifdef PT_SETFPREGS 646 case PT_SETFPREGS: 647 write = 1; 648 /* fallthrough */ 649 #endif /* PT_SETFPREGS */ 650 #ifdef PT_GETFPREGS 651 case PT_GETFPREGS: 652 /* write = 0 above */ 653 #endif /* PT_SETFPREGS */ 654 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS) 655 if (!procfs_validfpregs(lp)) { 656 lwkt_reltoken(&p->p_token); 657 PRELE(p); 658 return EINVAL; 659 } else { 660 iov.iov_base = addr; 661 iov.iov_len = sizeof(struct fpreg); 662 uio.uio_iov = &iov; 663 uio.uio_iovcnt = 1; 664 uio.uio_offset = 0; 665 uio.uio_resid = sizeof(struct fpreg); 666 uio.uio_segflg = UIO_SYSSPACE; 667 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 668 uio.uio_td = curthread; 669 t = procfs_dofpregs(curp, lp, NULL, &uio); 670 lwkt_reltoken(&p->p_token); 671 PRELE(p); 672 return t; 673 } 674 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */ 675 676 #ifdef PT_SETDBREGS 677 case PT_SETDBREGS: 678 write = 1; 679 /* fallthrough */ 680 #endif /* PT_SETDBREGS */ 681 #ifdef PT_GETDBREGS 682 case PT_GETDBREGS: 683 /* write = 0 above */ 684 #endif /* PT_SETDBREGS */ 685 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS) 686 if (!procfs_validdbregs(lp)) { 687 lwkt_reltoken(&p->p_token); 688 PRELE(p); 689 return EINVAL; 690 } else { 691 iov.iov_base = addr; 692 iov.iov_len = sizeof(struct dbreg); 693 uio.uio_iov = &iov; 694 uio.uio_iovcnt = 1; 695 uio.uio_offset = 0; 696 uio.uio_resid = sizeof(struct dbreg); 697 uio.uio_segflg = UIO_SYSSPACE; 698 uio.uio_rw = write ? UIO_WRITE : UIO_READ; 699 uio.uio_td = curthread; 700 t = procfs_dodbregs(curp, lp, NULL, &uio); 701 lwkt_reltoken(&p->p_token); 702 PRELE(p); 703 return t; 704 } 705 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */ 706 707 default: 708 break; 709 } 710 711 lwkt_reltoken(&p->p_token); 712 PRELE(p); 713 714 return 0; 715 } 716 717 int 718 trace_req(struct proc *p) 719 { 720 return 1; 721 } 722 723 /* 724 * stopevent() 725 * 726 * Stop a process because of a procfs event. Stay stopped until p->p_step 727 * is cleared (cleared by PIOCCONT in procfs). 728 * 729 * MPSAFE 730 */ 731 void 732 stopevent(struct proc *p, unsigned int event, unsigned int val) 733 { 734 /* 735 * Set event info. Recheck p_stops in case we are 736 * racing a close() on procfs. 737 */ 738 spin_lock(&p->p_spin); 739 if ((p->p_stops & event) == 0) { 740 spin_unlock(&p->p_spin); 741 return; 742 } 743 p->p_xstat = val; 744 p->p_stype = event; 745 p->p_step = 1; 746 tsleep_interlock(&p->p_step, 0); 747 spin_unlock(&p->p_spin); 748 749 /* 750 * Wakeup any PIOCWAITing procs and wait for p_step to 751 * be cleared. 752 */ 753 for (;;) { 754 wakeup(&p->p_stype); 755 tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0); 756 spin_lock(&p->p_spin); 757 if (p->p_step == 0) { 758 spin_unlock(&p->p_spin); 759 break; 760 } 761 tsleep_interlock(&p->p_step, 0); 762 spin_unlock(&p->p_spin); 763 } 764 } 765 766