145748Smckusick /* 263822Sbostic * Copyright (c) 1991, 1993 363822Sbostic * The Regents of the University of California. All rights reserved. 445748Smckusick * 545748Smckusick * This code is derived from software contributed to Berkeley by 645748Smckusick * The Mach Operating System project at Carnegie-Mellon University. 745748Smckusick * 848493Smckusick * %sccs.include.redist.c% 945748Smckusick * 10*65510Smckusick * @(#)vm_glue.c 8.6 (Berkeley) 01/05/94 1148493Smckusick * 1248493Smckusick * 1348493Smckusick * Copyright (c) 1987, 1990 Carnegie-Mellon University. 1448493Smckusick * All rights reserved. 1548493Smckusick * 1648493Smckusick * Permission to use, copy, modify and distribute this software and 1748493Smckusick * its documentation is hereby granted, provided that both the copyright 1848493Smckusick * notice and this permission notice appear in all copies of the 1948493Smckusick * software, derivative works or modified versions, and any portions 2048493Smckusick * thereof, and that both notices appear in supporting documentation. 2148493Smckusick * 2248493Smckusick * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 2348493Smckusick * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 2448493Smckusick * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 2548493Smckusick * 2648493Smckusick * Carnegie Mellon requests users of this software to return to 2748493Smckusick * 2848493Smckusick * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 2948493Smckusick * School of Computer Science 3048493Smckusick * Carnegie Mellon University 3148493Smckusick * Pittsburgh PA 15213-3890 3248493Smckusick * 3348493Smckusick * any improvements or extensions that they make and grant Carnegie the 3448493Smckusick * rights to redistribute these changes. 3545748Smckusick */ 3645748Smckusick 3753355Sbostic #include <sys/param.h> 3853355Sbostic #include <sys/systm.h> 3953355Sbostic #include <sys/proc.h> 4053355Sbostic #include <sys/resourcevar.h> 4153355Sbostic #include <sys/buf.h> 4253355Sbostic #include <sys/user.h> 4345748Smckusick 4453355Sbostic #include <vm/vm.h> 4553355Sbostic #include <vm/vm_page.h> 4653355Sbostic #include <vm/vm_kern.h> 4745748Smckusick 48*65510Smckusick #include <machine/cpu.h> 49*65510Smckusick 5045748Smckusick int avefree = 0; /* XXX */ 5145748Smckusick unsigned maxdmap = MAXDSIZ; /* XXX */ 5249743Smckusick int readbuffers = 0; /* XXX allow kgdb to read kernel buffer pool */ 5345748Smckusick 5453355Sbostic int 5545748Smckusick kernacc(addr, len, rw) 5645748Smckusick caddr_t addr; 5745748Smckusick int len, rw; 5845748Smckusick { 5945748Smckusick boolean_t rv; 6049295Shibler vm_offset_t saddr, eaddr; 6145748Smckusick vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 6245748Smckusick 6349295Shibler saddr = trunc_page(addr); 6463813Shibler eaddr = round_page(addr+len); 6549295Shibler rv = vm_map_check_protection(kernel_map, saddr, eaddr, prot); 6649295Shibler /* 6749295Shibler * XXX there are still some things (e.g. the buffer cache) that 6849295Shibler * are managed behind the VM system's back so even though an 6949295Shibler * address is accessible in the mind of the VM system, there may 7049295Shibler * not be physical pages where the VM thinks there is. This can 7149295Shibler * lead to bogus allocation of pages in the kernel address space 7249295Shibler * or worse, inconsistencies at the pmap level. We only worry 7349295Shibler * about the buffer cache for now. 7449295Shibler */ 7549743Smckusick if (!readbuffers && rv && (eaddr > (vm_offset_t)buffers && 7649667Shibler saddr < (vm_offset_t)buffers + MAXBSIZE * nbuf)) 7749295Shibler rv = FALSE; 7845748Smckusick return(rv == TRUE); 7945748Smckusick } 8045748Smckusick 8153355Sbostic int 8245748Smckusick useracc(addr, len, rw) 8345748Smckusick caddr_t addr; 8445748Smckusick int len, rw; 8545748Smckusick { 8645748Smckusick boolean_t rv; 8745748Smckusick vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 8845748Smckusick 8948382Skarels rv = vm_map_check_protection(&curproc->p_vmspace->vm_map, 9063813Shibler trunc_page(addr), round_page(addr+len), prot); 9145748Smckusick return(rv == TRUE); 9245748Smckusick } 9345748Smckusick 9445748Smckusick #ifdef KGDB 9545748Smckusick /* 9648947Skarels * Change protections on kernel pages from addr to addr+len 9745748Smckusick * (presumably so debugger can plant a breakpoint). 9857302Shibler * 9957302Shibler * We force the protection change at the pmap level. If we were 10057302Shibler * to use vm_map_protect a change to allow writing would be lazily- 10157302Shibler * applied meaning we would still take a protection fault, something 10257302Shibler * we really don't want to do. It would also fragment the kernel 10357302Shibler * map unnecessarily. We cannot use pmap_protect since it also won't 10457302Shibler * enforce a write-enable request. Using pmap_enter is the only way 10557302Shibler * we can ensure the change takes place properly. 10645748Smckusick */ 10753355Sbostic void 10845748Smckusick chgkprot(addr, len, rw) 10945748Smckusick register caddr_t addr; 11045748Smckusick int len, rw; 11145748Smckusick { 11257302Shibler vm_prot_t prot; 11357302Shibler vm_offset_t pa, sva, eva; 11445748Smckusick 11557302Shibler prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE; 11663813Shibler eva = round_page(addr + len); 11757302Shibler for (sva = trunc_page(addr); sva < eva; sva += PAGE_SIZE) { 11857302Shibler /* 11957302Shibler * Extract physical address for the page. 12057302Shibler * We use a cheezy hack to differentiate physical 12157302Shibler * page 0 from an invalid mapping, not that it 12257302Shibler * really matters... 12357302Shibler */ 12457302Shibler pa = pmap_extract(kernel_pmap, sva|1); 12557302Shibler if (pa == 0) 12657302Shibler panic("chgkprot: invalid page"); 12757392Shibler pmap_enter(kernel_pmap, sva, pa&~1, prot, TRUE); 12857302Shibler } 12945748Smckusick } 13045748Smckusick #endif 13145748Smckusick 13253355Sbostic void 13345748Smckusick vslock(addr, len) 13445748Smckusick caddr_t addr; 13545748Smckusick u_int len; 13645748Smckusick { 13748382Skarels vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 13863813Shibler round_page(addr+len), FALSE); 13945748Smckusick } 14045748Smckusick 14153355Sbostic void 14245748Smckusick vsunlock(addr, len, dirtied) 14345748Smckusick caddr_t addr; 14445748Smckusick u_int len; 14545748Smckusick int dirtied; 14645748Smckusick { 14745748Smckusick #ifdef lint 14845748Smckusick dirtied++; 14960345Storek #endif 15048382Skarels vm_map_pageable(&curproc->p_vmspace->vm_map, trunc_page(addr), 15163813Shibler round_page(addr+len), TRUE); 15245748Smckusick } 15345748Smckusick 15448947Skarels /* 15548947Skarels * Implement fork's actions on an address space. 15648947Skarels * Here we arrange for the address space to be copied or referenced, 15748947Skarels * allocate a user struct (pcb and kernel stack), then call the 15848947Skarels * machine-dependent layer to fill those in and make the new process 15948947Skarels * ready to run. 16048947Skarels * NOTE: the kernel stack may be at a different location in the child 16148947Skarels * process, and thus addresses of automatic variables may be invalid 16248947Skarels * after cpu_fork returns in the child process. We do nothing here 16348947Skarels * after cpu_fork returns. 16448947Skarels */ 16553355Sbostic int 16648382Skarels vm_fork(p1, p2, isvfork) 16748382Skarels register struct proc *p1, *p2; 16845748Smckusick int isvfork; 16945748Smckusick { 17045748Smckusick register struct user *up; 17145748Smckusick vm_offset_t addr; 17245748Smckusick 17349702Swilliam #ifdef i386 17449702Swilliam /* 17549702Swilliam * avoid copying any of the parent's pagetables or other per-process 17649702Swilliam * objects that reside in the map by marking all of them non-inheritable 17749702Swilliam */ 17849702Swilliam (void)vm_map_inherit(&p1->p_vmspace->vm_map, 17949702Swilliam UPT_MIN_ADDRESS-UPAGES*NBPG, VM_MAX_ADDRESS, VM_INHERIT_NONE); 18049702Swilliam #endif 18148382Skarels p2->p_vmspace = vmspace_fork(p1->p_vmspace); 18248382Skarels 18348382Skarels #ifdef SYSVSHM 18448382Skarels if (p1->p_vmspace->vm_shm) 18548382Skarels shmfork(p1, p2, isvfork); 18645748Smckusick #endif 18748382Skarels 18850852Swilliam #ifndef i386 18945748Smckusick /* 19048947Skarels * Allocate a wired-down (for now) pcb and kernel stack for the process 19145748Smckusick */ 19248947Skarels addr = kmem_alloc_pageable(kernel_map, ctob(UPAGES)); 19359543Sralph if (addr == 0) 19459544Sralph panic("vm_fork: no more kernel virtual memory"); 19548947Skarels vm_map_pageable(kernel_map, addr, addr + ctob(UPAGES), FALSE); 19650852Swilliam #else 19750852Swilliam /* XXX somehow, on 386, ocassionally pageout removes active, wired down kstack, 19850852Swilliam and pagetables, WITHOUT going thru vm_page_unwire! Why this appears to work is 19950852Swilliam not yet clear, yet it does... */ 20050852Swilliam addr = kmem_alloc(kernel_map, ctob(UPAGES)); 20159543Sralph if (addr == 0) 20259544Sralph panic("vm_fork: no more kernel virtual memory"); 20350852Swilliam #endif 20445748Smckusick up = (struct user *)addr; 20548947Skarels p2->p_addr = up; 20645748Smckusick 20745748Smckusick /* 20848382Skarels * p_stats and p_sigacts currently point at fields 20948382Skarels * in the user struct but not at &u, instead at p_addr. 21048947Skarels * Copy p_sigacts and parts of p_stats; zero the rest 21148947Skarels * of p_stats (statistics). 21248382Skarels */ 21348947Skarels p2->p_stats = &up->u_stats; 21448947Skarels p2->p_sigacts = &up->u_sigacts; 21548947Skarels up->u_sigacts = *p1->p_sigacts; 21648947Skarels bzero(&up->u_stats.pstat_startzero, 21748947Skarels (unsigned) ((caddr_t)&up->u_stats.pstat_endzero - 21848947Skarels (caddr_t)&up->u_stats.pstat_startzero)); 21948947Skarels bcopy(&p1->p_stats->pstat_startcopy, &up->u_stats.pstat_startcopy, 22048947Skarels ((caddr_t)&up->u_stats.pstat_endcopy - 22148947Skarels (caddr_t)&up->u_stats.pstat_startcopy)); 22245748Smckusick 22349295Shibler #ifdef i386 22449295Shibler { u_int addr = UPT_MIN_ADDRESS - UPAGES*NBPG; struct vm_map *vp; 22549295Shibler 22649295Shibler vp = &p2->p_vmspace->vm_map; 22750852Swilliam (void)vm_deallocate(vp, addr, UPT_MAX_ADDRESS - addr); 22849295Shibler (void)vm_allocate(vp, &addr, UPT_MAX_ADDRESS - addr, FALSE); 22949295Shibler (void)vm_map_inherit(vp, addr, UPT_MAX_ADDRESS, VM_INHERIT_NONE); 23049295Shibler } 23149295Shibler #endif 23245748Smckusick /* 23348947Skarels * cpu_fork will copy and update the kernel stack and pcb, 23448947Skarels * and make the child ready to run. It marks the child 23548947Skarels * so that it can return differently than the parent. 23648947Skarels * It returns twice, once in the parent process and 23748947Skarels * once in the child. 23848382Skarels */ 23948947Skarels return (cpu_fork(p1, p2)); 24045748Smckusick } 24145748Smckusick 24245748Smckusick /* 24348382Skarels * Set default limits for VM system. 24448382Skarels * Called for proc 0, and then inherited by all others. 24545748Smckusick */ 24653355Sbostic void 24748382Skarels vm_init_limits(p) 24848382Skarels register struct proc *p; 24945748Smckusick { 25048382Skarels 25145748Smckusick /* 25245748Smckusick * Set up the initial limits on process VM. 25345748Smckusick * Set the maximum resident set size to be all 25445748Smckusick * of (reasonably) available memory. This causes 25545748Smckusick * any single, large process to start random page 25645748Smckusick * replacement once it fills memory. 25745748Smckusick */ 25848382Skarels p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 25948382Skarels p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 26048382Skarels p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 26148382Skarels p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 26259489Smckusick p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(cnt.v_free_count); 26345748Smckusick } 26445748Smckusick 26556544Sbostic #include <vm/vm_pageout.h> 26645748Smckusick 26745748Smckusick #ifdef DEBUG 26845748Smckusick int enableswap = 1; 26945748Smckusick int swapdebug = 0; 27045748Smckusick #define SDB_FOLLOW 1 27145748Smckusick #define SDB_SWAPIN 2 27245748Smckusick #define SDB_SWAPOUT 4 27345748Smckusick #endif 27445748Smckusick 27545748Smckusick /* 27645748Smckusick * Brutally simple: 27745748Smckusick * 1. Attempt to swapin every swaped-out, runnable process in 27845748Smckusick * order of priority. 27945748Smckusick * 2. If not enough memory, wake the pageout daemon and let it 28045748Smckusick * clear some space. 28145748Smckusick */ 28253355Sbostic void 28364424Sbostic scheduler() 28445748Smckusick { 28548382Skarels register struct proc *p; 28648382Skarels register int pri; 28748382Skarels struct proc *pp; 28848382Skarels int ppri; 28945748Smckusick vm_offset_t addr; 29045748Smckusick vm_size_t size; 29145748Smckusick 29245748Smckusick loop: 29345748Smckusick #ifdef DEBUG 29452600Storek while (!enableswap) 29552600Storek sleep((caddr_t)&proc0, PVM); 29645748Smckusick #endif 29748382Skarels pp = NULL; 29848382Skarels ppri = INT_MIN; 29964625Sbostic for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 30064625Sbostic if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) { 30164625Sbostic pri = p->p_swtime + p->p_slptime - p->p_nice * 8; 30248382Skarels if (pri > ppri) { 30348382Skarels pp = p; 30448382Skarels ppri = pri; 30545748Smckusick } 30645748Smckusick } 30752600Storek } 30845748Smckusick #ifdef DEBUG 30945748Smckusick if (swapdebug & SDB_FOLLOW) 31048382Skarels printf("sched: running, procp %x pri %d\n", pp, ppri); 31145748Smckusick #endif 31245748Smckusick /* 31345748Smckusick * Nothing to do, back to sleep 31445748Smckusick */ 31548382Skarels if ((p = pp) == NULL) { 31648382Skarels sleep((caddr_t)&proc0, PVM); 31745748Smckusick goto loop; 31845748Smckusick } 31948382Skarels 32045748Smckusick /* 32145748Smckusick * We would like to bring someone in. 32245748Smckusick * This part is really bogus cuz we could deadlock on memory 32345748Smckusick * despite our feeble check. 32445748Smckusick */ 32545748Smckusick size = round_page(ctob(UPAGES)); 32648382Skarels addr = (vm_offset_t) p->p_addr; 32750915Smckusick if (cnt.v_free_count > atop(size)) { 32845748Smckusick #ifdef DEBUG 32945748Smckusick if (swapdebug & SDB_SWAPIN) 33045748Smckusick printf("swapin: pid %d(%s)@%x, pri %d free %d\n", 33148382Skarels p->p_pid, p->p_comm, p->p_addr, 33250915Smckusick ppri, cnt.v_free_count); 33345748Smckusick #endif 33445748Smckusick vm_map_pageable(kernel_map, addr, addr+size, FALSE); 33565502Smckusick /* 33665502Smckusick * Some architectures need to be notified when the 33765502Smckusick * user area has moved to new physical page(s) (e.g. 33865502Smckusick * see pmax/pmax/vm_machdep.c). 33965502Smckusick */ 34065502Smckusick cpu_swapin(p); 34154792Storek (void) splstatclock(); 34248382Skarels if (p->p_stat == SRUN) 34364546Sbostic setrunqueue(p); 34464625Sbostic p->p_flag |= P_INMEM; 34545748Smckusick (void) spl0(); 34664625Sbostic p->p_swtime = 0; 34745748Smckusick goto loop; 34845748Smckusick } 34945748Smckusick /* 35045748Smckusick * Not enough memory, jab the pageout daemon and wait til the 35145748Smckusick * coast is clear. 35245748Smckusick */ 35345748Smckusick #ifdef DEBUG 35445748Smckusick if (swapdebug & SDB_FOLLOW) 35545748Smckusick printf("sched: no room for pid %d(%s), free %d\n", 35650915Smckusick p->p_pid, p->p_comm, cnt.v_free_count); 35745748Smckusick #endif 35845748Smckusick (void) splhigh(); 35945748Smckusick VM_WAIT; 36045748Smckusick (void) spl0(); 36145748Smckusick #ifdef DEBUG 36245748Smckusick if (swapdebug & SDB_FOLLOW) 36350915Smckusick printf("sched: room again, free %d\n", cnt.v_free_count); 36445748Smckusick #endif 36545748Smckusick goto loop; 36645748Smckusick } 36745748Smckusick 36864625Sbostic #define swappable(p) \ 36964625Sbostic (((p)->p_flag & \ 37064625Sbostic (P_SYSTEM | P_INMEM | P_NOSWAP | P_WEXIT | P_PHYSIO)) == P_INMEM) 37145748Smckusick 37245748Smckusick /* 37345748Smckusick * Swapout is driven by the pageout daemon. Very simple, we find eligible 37445748Smckusick * procs and unwire their u-areas. We try to always "swap" at least one 37545748Smckusick * process in case we need the room for a swapin. 37648382Skarels * If any procs have been sleeping/stopped for at least maxslp seconds, 37748382Skarels * they are swapped. Else, we swap the longest-sleeping or stopped process, 37848382Skarels * if any, otherwise the longest-resident process. 37945748Smckusick */ 38053355Sbostic void 38145748Smckusick swapout_threads() 38245748Smckusick { 38348382Skarels register struct proc *p; 38445748Smckusick struct proc *outp, *outp2; 38545748Smckusick int outpri, outpri2; 38645748Smckusick int didswap = 0; 38745748Smckusick extern int maxslp; 38845748Smckusick 38945748Smckusick #ifdef DEBUG 39045748Smckusick if (!enableswap) 39145748Smckusick return; 39245748Smckusick #endif 39345748Smckusick outp = outp2 = NULL; 39448382Skarels outpri = outpri2 = 0; 39564625Sbostic for (p = (struct proc *)allproc; p != NULL; p = p->p_next) { 39648382Skarels if (!swappable(p)) 39745748Smckusick continue; 39848382Skarels switch (p->p_stat) { 39945748Smckusick case SRUN: 40064625Sbostic if (p->p_swtime > outpri2) { 40148382Skarels outp2 = p; 40264625Sbostic outpri2 = p->p_swtime; 40345748Smckusick } 40445748Smckusick continue; 40545748Smckusick 40645748Smckusick case SSLEEP: 40745748Smckusick case SSTOP: 40861002Shibler if (p->p_slptime >= maxslp) { 40948382Skarels swapout(p); 41045748Smckusick didswap++; 41148382Skarels } else if (p->p_slptime > outpri) { 41248382Skarels outp = p; 41348382Skarels outpri = p->p_slptime; 41445748Smckusick } 41545748Smckusick continue; 41645748Smckusick } 41745748Smckusick } 41845748Smckusick /* 41945748Smckusick * If we didn't get rid of any real duds, toss out the next most 42045748Smckusick * likely sleeping/stopped or running candidate. We only do this 42145748Smckusick * if we are real low on memory since we don't gain much by doing 42245748Smckusick * it (UPAGES pages). 42345748Smckusick */ 42445748Smckusick if (didswap == 0 && 42550915Smckusick cnt.v_free_count <= atop(round_page(ctob(UPAGES)))) { 42648382Skarels if ((p = outp) == 0) 42748382Skarels p = outp2; 42845748Smckusick #ifdef DEBUG 42945748Smckusick if (swapdebug & SDB_SWAPOUT) 43048382Skarels printf("swapout_threads: no duds, try procp %x\n", p); 43145748Smckusick #endif 43248382Skarels if (p) 43348382Skarels swapout(p); 43445748Smckusick } 43545748Smckusick } 43645748Smckusick 43753355Sbostic void 43845748Smckusick swapout(p) 43945748Smckusick register struct proc *p; 44045748Smckusick { 44145748Smckusick vm_offset_t addr; 44245748Smckusick vm_size_t size; 44345748Smckusick 44445748Smckusick #ifdef DEBUG 44545748Smckusick if (swapdebug & SDB_SWAPOUT) 44645748Smckusick printf("swapout: pid %d(%s)@%x, stat %x pri %d free %d\n", 44745748Smckusick p->p_pid, p->p_comm, p->p_addr, p->p_stat, 44850915Smckusick p->p_slptime, cnt.v_free_count); 44945748Smckusick #endif 45045748Smckusick size = round_page(ctob(UPAGES)); 45145748Smckusick addr = (vm_offset_t) p->p_addr; 45253876Smckusick #if defined(hp300) || defined(luna68k) 45349295Shibler /* 45449295Shibler * Ugh! u-area is double mapped to a fixed address behind the 45549295Shibler * back of the VM system and accesses are usually through that 45649295Shibler * address rather than the per-process address. Hence reference 45749295Shibler * and modify information are recorded at the fixed address and 45849295Shibler * lost at context switch time. We assume the u-struct and 45949295Shibler * kernel stack are always accessed/modified and force it to be so. 46049295Shibler */ 46149295Shibler { 46249295Shibler register int i; 46349295Shibler volatile long tmp; 46449295Shibler 46549295Shibler for (i = 0; i < UPAGES; i++) { 46649295Shibler tmp = *(long *)addr; *(long *)addr = tmp; 46749295Shibler addr += NBPG; 46849295Shibler } 46949295Shibler addr = (vm_offset_t) p->p_addr; 47049295Shibler } 47149295Shibler #endif 47251774Smarc #ifdef mips 47351774Smarc /* 47451774Smarc * Be sure to save the floating point coprocessor state before 47551774Smarc * paging out the u-struct. 47651774Smarc */ 47751774Smarc { 47851774Smarc extern struct proc *machFPCurProcPtr; 47951774Smarc 48051774Smarc if (p == machFPCurProcPtr) { 48151774Smarc MachSaveCurFPState(p); 48251774Smarc machFPCurProcPtr = (struct proc *)0; 48351774Smarc } 48451774Smarc } 48551774Smarc #endif 48650852Swilliam #ifndef i386 /* temporary measure till we find spontaineous unwire of kstack */ 48745748Smckusick vm_map_pageable(kernel_map, addr, addr+size, TRUE); 48848382Skarels pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); 48950852Swilliam #endif 49045748Smckusick (void) splhigh(); 49164625Sbostic p->p_flag &= ~P_INMEM; 49245748Smckusick if (p->p_stat == SRUN) 49345748Smckusick remrq(p); 49445748Smckusick (void) spl0(); 49564625Sbostic p->p_swtime = 0; 49645748Smckusick } 49745748Smckusick 49845748Smckusick /* 49945748Smckusick * The rest of these routines fake thread handling 50045748Smckusick */ 50145748Smckusick 50245748Smckusick void 50345748Smckusick assert_wait(event, ruptible) 50445748Smckusick int event; 50545748Smckusick boolean_t ruptible; 50645748Smckusick { 50745748Smckusick #ifdef lint 50845748Smckusick ruptible++; 50945748Smckusick #endif 51048382Skarels curproc->p_thread = event; 51145748Smckusick } 51245748Smckusick 51345748Smckusick void 51445748Smckusick thread_block() 51545748Smckusick { 51645748Smckusick int s = splhigh(); 51745748Smckusick 51848382Skarels if (curproc->p_thread) 51948382Skarels sleep((caddr_t)curproc->p_thread, PVM); 52045748Smckusick splx(s); 52145748Smckusick } 52245748Smckusick 52352611Smckusick void 52445748Smckusick thread_sleep(event, lock, ruptible) 52545748Smckusick int event; 52645748Smckusick simple_lock_t lock; 52745748Smckusick boolean_t ruptible; 52845748Smckusick { 52945748Smckusick #ifdef lint 53045748Smckusick ruptible++; 53145748Smckusick #endif 53245748Smckusick int s = splhigh(); 53345748Smckusick 53448382Skarels curproc->p_thread = event; 53545748Smckusick simple_unlock(lock); 53648382Skarels if (curproc->p_thread) 53748382Skarels sleep((caddr_t)event, PVM); 53845748Smckusick splx(s); 53945748Smckusick } 54045748Smckusick 54152611Smckusick void 54245748Smckusick thread_wakeup(event) 54345748Smckusick int event; 54445748Smckusick { 54545748Smckusick int s = splhigh(); 54645748Smckusick 54745748Smckusick wakeup((caddr_t)event); 54845748Smckusick splx(s); 54945748Smckusick } 55045748Smckusick 55145748Smckusick /* 55245748Smckusick * DEBUG stuff 55345748Smckusick */ 55445748Smckusick 55545748Smckusick int indent = 0; 55645748Smckusick 55752600Storek #include <machine/stdarg.h> /* see subr_prf.c */ 55852600Storek 55945748Smckusick /*ARGSUSED2*/ 56053355Sbostic void 56152600Storek #if __STDC__ 56252600Storek iprintf(const char *fmt, ...) 56352600Storek #else 56452600Storek iprintf(fmt /* , va_alist */) 56552600Storek char *fmt; 56652600Storek /* va_dcl */ 56752600Storek #endif 56845748Smckusick { 56945748Smckusick register int i; 57052600Storek va_list ap; 57145748Smckusick 57252600Storek for (i = indent; i >= 8; i -= 8) 57349295Shibler printf("\t"); 57452600Storek while (--i >= 0) 57549295Shibler printf(" "); 57652600Storek va_start(ap, fmt); 57752600Storek printf("%r", fmt, ap); 57852600Storek va_end(ap); 57945748Smckusick } 580