145748Smckusick /* 245748Smckusick * Copyright (c) 1991 Regents of the University of California. 345748Smckusick * All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*61002Shibler * @(#)vm_glue.c 7.25 (Berkeley) 06/02/93 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Permission to use, copy, modify and distribute this software and 1748493Smckusick * its documentation is hereby granted, provided that both the copyright 1848493Smckusick * notice and this permission notice appear in all copies of the 1948493Smckusick * software, derivative works or modified versions, and any portions 2048493Smckusick * thereof, and that both notices appear in supporting documentation. 2148493Smckusick * 2248493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2348493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2448493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2548493Smckusick * 2648493Smckusick * Carnegie Mellon requests users of this software to return to 2748493Smckusick * 2848493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 2948493Smckusick * School of Computer Science 3048493Smckusick * Carnegie Mellon University 3148493Smckusick * Pittsburgh PA 15213-3890 3248493Smckusick * 3348493Smckusick * any improvements or extensions that they make and grant Carnegie the 3448493Smckusick * rights to redistribute these changes. 3545748Smckusick */ 3645748Smckusick 3753355Sbostic #include <sys/param.h> 3853355Sbostic #include <sys/systm.h> 3953355Sbostic #include <sys/proc.h> 4053355Sbostic #include <sys/resourcevar.h> 4153355Sbostic #include <sys/buf.h> 4253355Sbostic #include <sys/user.h> 4345748Smckusick 4453355Sbostic #include <vm/vm.h> 4553355Sbostic #include <vm/vm_page.h> 4653355Sbostic #include <vm/vm_kern.h> 4745748Smckusick 4845748Smckusick int avefree = 0; /* XXX */ 4945748Smckusick unsigned maxdmap = MAXDSIZ; /* XXX */ 5049743Smckusick int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */ 5145748Smckusick 5253355Sbostic int 5345748Smckusick kernacc(addr, len, rw) 5445748Smckusick caddr_t addr; 5545748Smckusick int len, rw; 5645748Smckusick { 5745748Smckusick boolean_t rv; 5849295Shibler vm_offset_t saddr, eaddr; 5945748Smckusick vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 6045748Smckusick 6149295Shibler saddr = trunc_page(addr); 6249295Shibler eaddr = round_page(addr+len-1); 6349295Shibler rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 6449295Shibler /* 6549295Shibler * XXX there are still some things (e.g. the buffer cache) that 6649295Shibler * are managed behind the VM system's back so even though an 6749295Shibler * address is accessible in the mind of the VM system, there may 6849295Shibler * not be physical pages where the VM thinks there is. This can 6949295Shibler * lead to bogus allocation of pages in the kernel address space 7049295Shibler * or worse, inconsistencies at the pmap level. We only worry 7149295Shibler * about the buffer cache for now. 7249295Shibler */ 7349743Smckusick if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers && 7449667Shibler saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf)) 7549295Shibler rv = FALSE; 7645748Smckusick return(rv == TRUE); 7745748Smckusick } 7845748Smckusick 7953355Sbostic int 8045748Smckusick useracc(addr, len, rw) 8145748Smckusick caddr_t addr; 8245748Smckusick int len, rw; 8345748Smckusick { 8445748Smckusick boolean_t rv; 8545748Smckusick vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 8645748Smckusick 8748382Skarels rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 8848382Skarels trunc_page(addr), round_page(addr+len-1), prot); 8945748Smckusick return(rv == TRUE); 9045748Smckusick } 9145748Smckusick 9245748Smckusick #ifdef KGDB 9345748Smckusick /* 9448947Skarels * Change protections on kernel pages from addr to addr+len 9545748Smckusick * (presumably so debugger can plant a breakpoint). 9657302Shibler * 9757302Shibler * We force the protection change at the pmap level. If we were 9857302Shibler * to use vm_map_protect a change to allow writing would be lazily- 9957302Shibler * applied meaning we would still take a protection fault, something 10057302Shibler * we really don't want to do. It would also fragment the kernel 10157302Shibler * map unnecessarily. We cannot use pmap_protect since it also won't 10257302Shibler * enforce a write-enable request. Using pmap_enter is the only way 10357302Shibler * we can ensure the change takes place properly. 10445748Smckusick */ 10553355Sbostic void 10645748Smckusick chgkprot(addr, len, rw) 10745748Smckusick register caddr_t addr; 10845748Smckusick int len, rw; 10945748Smckusick { 11057302Shibler vm_prot_t prot; 11157302Shibler vm_offset_t pa, sva, eva; 11245748Smckusick 11357302Shibler prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE; 11457302Shibler eva = round_page(addr + len - 1); 11557302Shibler for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) { 11657302Shibler /* 11757302Shibler * Extract physical address for the page. 11857302Shibler * We use a cheezy hack to differentiate physical 11957302Shibler * page 0 from an invalid mapping, not that it 12057302Shibler * really matters... 12157302Shibler */ 12257302Shibler pa = pmap_extract(kernel_pmap, sva|1); 12357302Shibler if (pa == 0) 12457302Shibler panic("chgkprot: invalid page"); 12557392Shibler pmap_enter(kernel_pmap, sva, pa&~1, prot, TRUE); 12657302Shibler } 12745748Smckusick } 12845748Smckusick #endif 12945748Smckusick 13053355Sbostic void 13145748Smckusick vslock(addr, len) 13245748Smckusick caddr_t addr; 13345748Smckusick u_int len; 13445748Smckusick { 13548382Skarels vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 13645748Smckusick round_page(addr+len-1), FALSE); 13745748Smckusick } 13845748Smckusick 13953355Sbostic void 14045748Smckusick vsunlock(addr, len, dirtied) 14145748Smckusick caddr_t addr; 14245748Smckusick u_int len; 14345748Smckusick int dirtied; 14445748Smckusick { 14545748Smckusick #ifdef lint 14645748Smckusick dirtied++; 14760345Storek #endif 14848382Skarels vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 14945748Smckusick round_page(addr+len-1), TRUE); 15045748Smckusick } 15145748Smckusick 15248947Skarels /* 15348947Skarels * Implement fork's actions on an address space. 15448947Skarels * Here we arrange for the address space to be copied or referenced, 15548947Skarels * allocate a user struct (pcb and kernel stack), then call the 15648947Skarels * machine-dependent layer to fill those in and make the new process 15748947Skarels * ready to run. 15848947Skarels * NOTE: the kernel stack may be at a different location in the child 15948947Skarels * process, and thus addresses of automatic variables may be invalid 16048947Skarels * after cpu_fork returns in the child process. We do nothing here 16148947Skarels * after cpu_fork returns. 16248947Skarels */ 16353355Sbostic int 16448382Skarels vm_fork(p1, p2, isvfork) 16548382Skarels register struct proc *p1, *p2; 16645748Smckusick int isvfork; 16745748Smckusick { 16845748Smckusick register struct user *up; 16945748Smckusick vm_offset_t addr; 17045748Smckusick 17149702Swilliam #ifdef i386 17249702Swilliam /* 17349702Swilliam * avoid copying any of the parent's pagetables or other per-process 17449702Swilliam * objects that reside in the map by marking all of them non-inheritable 17549702Swilliam */ 17649702Swilliam (void)vm_map_inherit(&p1->p_vmspace->vm_map, 17749702Swilliam UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE); 17849702Swilliam #endif 17948382Skarels p2->p_vmspace = vmspace_fork(p1->p_vmspace); 18048382Skarels 18148382Skarels #ifdef SYSVSHM 18248382Skarels if (p1->p_vmspace->vm_shm) 18348382Skarels shmfork(p1, p2, isvfork); 18445748Smckusick #endif 18548382Skarels 18650852Swilliam #ifndef i386 18745748Smckusick /* 18848947Skarels * Allocate a wired-down (for now) pcb and kernel stack for the process 18945748Smckusick */ 19048947Skarels addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES)); 19159543Sralph if (addr == 0) 19259544Sralph panic("vm_fork: no more kernel virtual memory"); 19348947Skarels vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE); 19450852Swilliam #else 19550852Swilliam /* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack, 19650852Swilliam and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is 19750852Swilliam not yet clear, yet it does... */ 19850852Swilliam addr = kmem_alloc(kernel_map, ctob(UPAGES)); 19959543Sralph if (addr == 0) 20059544Sralph panic("vm_fork: no more kernel virtual memory"); 20150852Swilliam #endif 20245748Smckusick up = (struct user *)addr; 20348947Skarels p2->p_addr = up; 20445748Smckusick 20545748Smckusick /* 20648382Skarels * p_stats and p_sigacts currently point at fields 20748382Skarels * in the user struct but not at &u, instead at p_addr. 20848947Skarels * Copy p_sigacts and parts of p_stats; zero the rest 20948947Skarels * of p_stats (statistics). 21048382Skarels */ 21148947Skarels p2->p_stats = &up->u_stats; 21248947Skarels p2->p_sigacts = &up->u_sigacts; 21348947Skarels up->u_sigacts = *p1->p_sigacts; 21448947Skarels bzero(&up->u_stats.pstat_startzero, 21548947Skarels (unsigned) ((caddr_t)&up->u_stats.pstat_endzero - 21648947Skarels (caddr_t)&up->u_stats.pstat_startzero)); 21748947Skarels bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 21848947Skarels ((caddr_t)&up->u_stats.pstat_endcopy - 21948947Skarels (caddr_t)&up->u_stats.pstat_startcopy)); 22045748Smckusick 22149295Shibler #ifdef i386 22249295Shibler { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp; 22349295Shibler 22449295Shibler vp = &p2->p_vmspace->vm_map; 22550852Swilliam (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr); 22649295Shibler (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE); 22749295Shibler (void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE); 22849295Shibler } 22949295Shibler #endif 23045748Smckusick /* 23148947Skarels * cpu_fork will copy and update the kernel stack and pcb, 23248947Skarels * and make the child ready to run. It marks the child 23348947Skarels * so that it can return differently than the parent. 23448947Skarels * It returns twice, once in the parent process and 23548947Skarels * once in the child. 23648382Skarels */ 23748947Skarels return (cpu_fork(p1, p2)); 23845748Smckusick } 23945748Smckusick 24045748Smckusick /* 24148382Skarels * Set default limits for VM system. 24248382Skarels * Called for proc 0, and then inherited by all others. 24345748Smckusick */ 24453355Sbostic void 24548382Skarels vm_init_limits(p) 24648382Skarels register struct proc *p; 24745748Smckusick { 24848382Skarels 24945748Smckusick /* 25045748Smckusick * Set up the initial limits on process VM. 25145748Smckusick * Set the maximum resident set size to be all 25245748Smckusick * of (reasonably) available memory. This causes 25345748Smckusick * any single, large process to start random page 25445748Smckusick * replacement once it fills memory. 25545748Smckusick */ 25648382Skarels p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 25748382Skarels p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 25848382Skarels p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 25948382Skarels p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 26059489Smckusick p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(cnt.v_free_count); 26145748Smckusick } 26245748Smckusick 26356544Sbostic #include <vm/vm_pageout.h> 26445748Smckusick 26545748Smckusick #ifdef DEBUG 26645748Smckusick int enableswap = 1; 26745748Smckusick int swapdebug = 0; 26845748Smckusick #define SDB_FOLLOW 1 26945748Smckusick #define SDB_SWAPIN 2 27045748Smckusick #define SDB_SWAPOUT 4 27145748Smckusick #endif 27245748Smckusick 27345748Smckusick /* 27445748Smckusick * Brutally simple: 27545748Smckusick * 1. Attempt to swapin every swaped-out, runnable process in 27645748Smckusick * order of priority. 27745748Smckusick * 2. If not enough memory, wake the pageout daemon and let it 27845748Smckusick * clear some space. 27945748Smckusick */ 28053355Sbostic void 28145748Smckusick sched() 28245748Smckusick { 28348382Skarels register struct proc *p; 28448382Skarels register int pri; 28548382Skarels struct proc *pp; 28648382Skarels int ppri; 28745748Smckusick vm_offset_t addr; 28845748Smckusick vm_size_t size; 28945748Smckusick 29045748Smckusick loop: 29145748Smckusick #ifdef DEBUG 29252600Storek while (!enableswap) 29352600Storek sleep((caddr_t)&proc0, PVM); 29445748Smckusick #endif 29548382Skarels pp = NULL; 29648382Skarels ppri = INT_MIN; 29754792Storek for (p = (struct proc *)allproc; p != NULL; p = p->p_nxt) { 29848382Skarels if (p->p_stat == SRUN && (p->p_flag & SLOAD) == 0) { 29948382Skarels pri = p->p_time + p->p_slptime - p->p_nice * 8; 30048382Skarels if (pri > ppri) { 30148382Skarels pp = p; 30248382Skarels ppri = pri; 30345748Smckusick } 30445748Smckusick } 30552600Storek } 30645748Smckusick #ifdef DEBUG 30745748Smckusick if (swapdebug & SDB_FOLLOW) 30848382Skarels printf("sched: running, procp %x pri %d\n", pp, ppri); 30945748Smckusick #endif 31045748Smckusick /* 31145748Smckusick * Nothing to do, back to sleep 31245748Smckusick */ 31348382Skarels if ((p = pp) == NULL) { 31448382Skarels sleep((caddr_t)&proc0, PVM); 31545748Smckusick goto loop; 31645748Smckusick } 31748382Skarels 31845748Smckusick /* 31945748Smckusick * We would like to bring someone in. 32045748Smckusick * This part is really bogus cuz we could deadlock on memory 32145748Smckusick * despite our feeble check. 32245748Smckusick */ 32345748Smckusick size = round_page(ctob(UPAGES)); 32448382Skarels addr = (vm_offset_t) p->p_addr; 32550915Smckusick if (cnt.v_free_count > atop(size)) { 32645748Smckusick #ifdef DEBUG 32745748Smckusick if (swapdebug & SDB_SWAPIN) 32845748Smckusick printf("swapin: pid %d(%s)@%x, pri %d free %d\n", 32948382Skarels p->p_pid, p->p_comm, p->p_addr, 33050915Smckusick ppri, cnt.v_free_count); 33145748Smckusick #endif 33245748Smckusick vm_map_pageable(kernel_map, addr, addr+size, FALSE); 33354792Storek (void) splstatclock(); 33448382Skarels if (p->p_stat == SRUN) 33548382Skarels setrq(p); 33648382Skarels p->p_flag |= SLOAD; 33745748Smckusick (void) spl0(); 33848382Skarels p->p_time = 0; 33945748Smckusick goto loop; 34045748Smckusick } 34145748Smckusick /* 34245748Smckusick * Not enough memory, jab the pageout daemon and wait til the 34345748Smckusick * coast is clear. 34445748Smckusick */ 34545748Smckusick #ifdef DEBUG 34645748Smckusick if (swapdebug & SDB_FOLLOW) 34745748Smckusick printf("sched: no room for pid %d(%s), free %d\n", 34850915Smckusick p->p_pid, p->p_comm, cnt.v_free_count); 34945748Smckusick #endif 35045748Smckusick (void) splhigh(); 35145748Smckusick VM_WAIT; 35245748Smckusick (void) spl0(); 35345748Smckusick #ifdef DEBUG 35445748Smckusick if (swapdebug & SDB_FOLLOW) 35550915Smckusick printf("sched: room again, free %d\n", cnt.v_free_count); 35645748Smckusick #endif 35745748Smckusick goto loop; 35845748Smckusick } 35945748Smckusick 36045748Smckusick #define swappable(p) \ 36148382Skarels (((p)->p_flag & (SSYS|SLOAD|SKEEP|SWEXIT|SPHYSIO)) == SLOAD) 36245748Smckusick 36345748Smckusick /* 36445748Smckusick * Swapout is driven by the pageout daemon. Very simple, we find eligible 36545748Smckusick * procs and unwire their u-areas. We try to always "swap" at least one 36645748Smckusick * process in case we need the room for a swapin. 36748382Skarels * If any procs have been sleeping/stopped for at least maxslp seconds, 36848382Skarels * they are swapped. Else, we swap the longest-sleeping or stopped process, 36948382Skarels * if any, otherwise the longest-resident process. 37045748Smckusick */ 37153355Sbostic void 37245748Smckusick swapout_threads() 37345748Smckusick { 37448382Skarels register struct proc *p; 37545748Smckusick struct proc *outp, *outp2; 37645748Smckusick int outpri, outpri2; 37745748Smckusick int didswap = 0; 37845748Smckusick extern int maxslp; 37945748Smckusick 38045748Smckusick #ifdef DEBUG 38145748Smckusick if (!enableswap) 38245748Smckusick return; 38345748Smckusick #endif 38445748Smckusick outp = outp2 = NULL; 38548382Skarels outpri = outpri2 = 0; 38654792Storek for (p = (struct proc *)allproc; p != NULL; p = p->p_nxt) { 38748382Skarels if (!swappable(p)) 38845748Smckusick continue; 38948382Skarels switch (p->p_stat) { 39045748Smckusick case SRUN: 39148382Skarels if (p->p_time > outpri2) { 39248382Skarels outp2 = p; 39348382Skarels outpri2 = p->p_time; 39445748Smckusick } 39545748Smckusick continue; 39645748Smckusick 39745748Smckusick case SSLEEP: 39845748Smckusick case SSTOP: 399*61002Shibler if (p->p_slptime >= maxslp) { 40048382Skarels swapout(p); 40145748Smckusick didswap++; 40248382Skarels } else if (p->p_slptime > outpri) { 40348382Skarels outp = p; 40448382Skarels outpri = p->p_slptime; 40545748Smckusick } 40645748Smckusick continue; 40745748Smckusick } 40845748Smckusick } 40945748Smckusick /* 41045748Smckusick * If we didn't get rid of any real duds, toss out the next most 41145748Smckusick * likely sleeping/stopped or running candidate. We only do this 41245748Smckusick * if we are real low on memory since we don't gain much by doing 41345748Smckusick * it (UPAGES pages). 41445748Smckusick */ 41545748Smckusick if (didswap == 0 && 41650915Smckusick cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) { 41748382Skarels if ((p = outp) == 0) 41848382Skarels p = outp2; 41945748Smckusick #ifdef DEBUG 42045748Smckusick if (swapdebug & SDB_SWAPOUT) 42148382Skarels printf("swapout_threads: no duds, try procp %x\n", p); 42245748Smckusick #endif 42348382Skarels if (p) 42448382Skarels swapout(p); 42545748Smckusick } 42645748Smckusick } 42745748Smckusick 42853355Sbostic void 42945748Smckusick swapout(p) 43045748Smckusick register struct proc *p; 43145748Smckusick { 43245748Smckusick vm_offset_t addr; 43345748Smckusick vm_size_t size; 43445748Smckusick 43545748Smckusick #ifdef DEBUG 43645748Smckusick if (swapdebug & SDB_SWAPOUT) 43745748Smckusick printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n", 43845748Smckusick p->p_pid, p->p_comm, p->p_addr, p->p_stat, 43950915Smckusick p->p_slptime, cnt.v_free_count); 44045748Smckusick #endif 44145748Smckusick size = round_page(ctob(UPAGES)); 44245748Smckusick addr = (vm_offset_t) p->p_addr; 44353876Smckusick #if defined(hp300) || defined(luna68k) 44449295Shibler /* 44549295Shibler * Ugh! u-area is double mapped to a fixed address behind the 44649295Shibler * back of the VM system and accesses are usually through that 44749295Shibler * address rather than the per-process address. Hence reference 44849295Shibler * and modify information are recorded at the fixed address and 44949295Shibler * lost at context switch time. We assume the u-struct and 45049295Shibler * kernel stack are always accessed/modified and force it to be so. 45149295Shibler */ 45249295Shibler { 45349295Shibler register int i; 45449295Shibler volatile long tmp; 45549295Shibler 45649295Shibler for (i = 0; i < UPAGES; i++) { 45749295Shibler tmp = *(long *)addr; *(long *)addr = tmp; 45849295Shibler addr += NBPG; 45949295Shibler } 46049295Shibler addr = (vm_offset_t) p->p_addr; 46149295Shibler } 46249295Shibler #endif 46351774Smarc #ifdef mips 46451774Smarc /* 46551774Smarc * Be sure to save the floating point coprocessor state before 46651774Smarc * paging out the u-struct. 46751774Smarc */ 46851774Smarc { 46951774Smarc extern struct proc *machFPCurProcPtr; 47051774Smarc 47151774Smarc if (p == machFPCurProcPtr) { 47251774Smarc MachSaveCurFPState(p); 47351774Smarc machFPCurProcPtr = (struct proc *)0; 47451774Smarc } 47551774Smarc } 47651774Smarc #endif 47750852Swilliam #ifndef i386 /* temporary measure till we find spontaineous unwire of kstack */ 47845748Smckusick vm_map_pageable(kernel_map, addr, addr+size, TRUE); 47948382Skarels pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); 48050852Swilliam #endif 48145748Smckusick (void) splhigh(); 48245748Smckusick p->p_flag &= ~SLOAD; 48345748Smckusick if (p->p_stat == SRUN) 48445748Smckusick remrq(p); 48545748Smckusick (void) spl0(); 48645748Smckusick p->p_time = 0; 48745748Smckusick } 48845748Smckusick 48945748Smckusick /* 49045748Smckusick * The rest of these routines fake thread handling 49145748Smckusick */ 49245748Smckusick 49345748Smckusick void 49445748Smckusick assert_wait(event, ruptible) 49545748Smckusick int event; 49645748Smckusick boolean_t ruptible; 49745748Smckusick { 49845748Smckusick #ifdef lint 49945748Smckusick ruptible++; 50045748Smckusick #endif 50148382Skarels curproc->p_thread = event; 50245748Smckusick } 50345748Smckusick 50445748Smckusick void 50545748Smckusick thread_block() 50645748Smckusick { 50745748Smckusick int s = splhigh(); 50845748Smckusick 50948382Skarels if (curproc->p_thread) 51048382Skarels sleep((caddr_t)curproc->p_thread, PVM); 51145748Smckusick splx(s); 51245748Smckusick } 51345748Smckusick 51452611Smckusick void 51545748Smckusick thread_sleep(event, lock, ruptible) 51645748Smckusick int event; 51745748Smckusick simple_lock_t lock; 51845748Smckusick boolean_t ruptible; 51945748Smckusick { 52045748Smckusick #ifdef lint 52145748Smckusick ruptible++; 52245748Smckusick #endif 52345748Smckusick int s = splhigh(); 52445748Smckusick 52548382Skarels curproc->p_thread = event; 52645748Smckusick simple_unlock(lock); 52748382Skarels if (curproc->p_thread) 52848382Skarels sleep((caddr_t)event, PVM); 52945748Smckusick splx(s); 53045748Smckusick } 53145748Smckusick 53252611Smckusick void 53345748Smckusick thread_wakeup(event) 53445748Smckusick int event; 53545748Smckusick { 53645748Smckusick int s = splhigh(); 53745748Smckusick 53845748Smckusick wakeup((caddr_t)event); 53945748Smckusick splx(s); 54045748Smckusick } 54145748Smckusick 54245748Smckusick /* 54345748Smckusick * DEBUG stuff 54445748Smckusick */ 54545748Smckusick 54645748Smckusick int indent = 0; 54745748Smckusick 54852600Storek #include <machine/stdarg.h> /* see subr_prf.c */ 54952600Storek 55045748Smckusick /*ARGSUSED2*/ 55153355Sbostic void 55252600Storek #if __STDC__ 55352600Storek iprintf(const char *fmt, ...) 55452600Storek #else 55552600Storek iprintf(fmt /* , va_alist */) 55652600Storek char *fmt; 55752600Storek /* va_dcl */ 55852600Storek #endif 55945748Smckusick { 56045748Smckusick register int i; 56152600Storek va_list ap; 56245748Smckusick 56352600Storek for (i = indent; i >= 8; i -= 8) 56449295Shibler printf("\t"); 56552600Storek while (--i >= 0) 56649295Shibler printf(" "); 56752600Storek va_start(ap, fmt); 56852600Storek printf("%r", fmt, ap); 56952600Storek va_end(ap); 57045748Smckusick } 571