1 /* $NetBSD: uvm_glue.c,v 1.38 2000/06/27 17:29:22 mrg Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 42 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69 #include "opt_uvmhist.h" 70 #include "opt_sysv.h" 71 72 /* 73 * uvm_glue.c: glue functions 74 */ 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/proc.h> 79 #include <sys/resourcevar.h> 80 #include <sys/buf.h> 81 #include <sys/user.h> 82 #ifdef SYSVSHM 83 #include <sys/shm.h> 84 #endif 85 86 #include <uvm/uvm.h> 87 88 #include <machine/cpu.h> 89 90 /* 91 * local prototypes 92 */ 93 94 static void uvm_swapout __P((struct proc *)); 95 96 /* 97 * XXXCDC: do these really belong here? 98 */ 99 100 unsigned maxdmap = MAXDSIZ; /* kern_resource.c: RLIMIT_DATA max */ 101 unsigned maxsmap = MAXSSIZ; /* kern_resource.c: RLIMIT_STACK max */ 102 103 int readbuffers = 0; /* allow KGDB to read kern buffer pool */ 104 /* XXX: see uvm_kernacc */ 105 106 107 /* 108 * uvm_kernacc: can the kernel access a region of memory 109 * 110 * - called from malloc [DIAGNOSTIC], and /dev/kmem driver (mem.c) 111 */ 112 113 boolean_t 114 uvm_kernacc(addr, len, rw) 115 caddr_t addr; 116 size_t len; 117 int rw; 118 { 119 boolean_t rv; 120 vaddr_t saddr, eaddr; 121 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 122 123 saddr = trunc_page((vaddr_t)addr); 124 eaddr = round_page((vaddr_t)addr+len); 125 vm_map_lock_read(kernel_map); 126 rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot); 127 vm_map_unlock_read(kernel_map); 128 129 /* 130 * XXX there are still some things (e.g. the buffer cache) that 131 * are managed behind the VM system's back so even though an 132 * address is accessible in the mind of the VM system, there may 133 * not be physical pages where the VM thinks there is. This can 134 * lead to bogus allocation of pages in the kernel address space 135 * or worse, inconsistencies at the pmap level. We only worry 136 * about the buffer cache for now. 137 */ 138 if (!readbuffers && rv && (eaddr > (vaddr_t)buffers && 139 saddr < (vaddr_t)buffers + MAXBSIZE * nbuf)) 140 rv = FALSE; 141 return(rv); 142 } 143 144 /* 145 * uvm_useracc: can the user access it? 146 * 147 * - called from physio() and sys___sysctl(). 148 */ 149 150 boolean_t 151 uvm_useracc(addr, len, rw) 152 caddr_t addr; 153 size_t len; 154 int rw; 155 { 156 vm_map_t map; 157 boolean_t rv; 158 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 159 160 /* XXX curproc */ 161 map = &curproc->p_vmspace->vm_map; 162 163 vm_map_lock_read(map); 164 rv = uvm_map_checkprot(map, trunc_page((vaddr_t)addr), 165 round_page((vaddr_t)addr+len), prot); 166 vm_map_unlock_read(map); 167 168 return(rv); 169 } 170 171 #ifdef KGDB 172 /* 173 * Change protections on kernel pages from addr to addr+len 174 * (presumably so debugger can plant a breakpoint). 175 * 176 * We force the protection change at the pmap level. If we were 177 * to use vm_map_protect a change to allow writing would be lazily- 178 * applied meaning we would still take a protection fault, something 179 * we really don't want to do. It would also fragment the kernel 180 * map unnecessarily. We cannot use pmap_protect since it also won't 181 * enforce a write-enable request. Using pmap_enter is the only way 182 * we can ensure the change takes place properly. 183 */ 184 void 185 uvm_chgkprot(addr, len, rw) 186 caddr_t addr; 187 size_t len; 188 int rw; 189 { 190 vm_prot_t prot; 191 paddr_t pa; 192 vaddr_t sva, eva; 193 194 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE; 195 eva = round_page((vaddr_t)addr + len); 196 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) { 197 /* 198 * Extract physical address for the page. 199 * We use a cheezy hack to differentiate physical 200 * page 0 from an invalid mapping, not that it 201 * really matters... 202 */ 203 if (pmap_extract(pmap_kernel(), sva, &pa) == FALSE) 204 panic("chgkprot: invalid page"); 205 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED); 206 } 207 } 208 #endif 209 210 /* 211 * vslock: wire user memory for I/O 212 * 213 * - called from physio and sys___sysctl 214 * - XXXCDC: consider nuking this (or making it a macro?) 215 */ 216 217 int 218 uvm_vslock(p, addr, len, access_type) 219 struct proc *p; 220 caddr_t addr; 221 size_t len; 222 vm_prot_t access_type; 223 { 224 vm_map_t map; 225 vaddr_t start, end; 226 int rv; 227 228 map = &p->p_vmspace->vm_map; 229 start = trunc_page((vaddr_t)addr); 230 end = round_page((vaddr_t)addr + len); 231 232 rv = uvm_fault_wire(map, start, end, access_type); 233 234 return (rv); 235 } 236 237 /* 238 * vslock: wire user memory for I/O 239 * 240 * - called from physio and sys___sysctl 241 * - XXXCDC: consider nuking this (or making it a macro?) 242 */ 243 244 void 245 uvm_vsunlock(p, addr, len) 246 struct proc *p; 247 caddr_t addr; 248 size_t len; 249 { 250 uvm_fault_unwire(&p->p_vmspace->vm_map, trunc_page((vaddr_t)addr), 251 round_page((vaddr_t)addr+len)); 252 } 253 254 /* 255 * uvm_fork: fork a virtual address space 256 * 257 * - the address space is copied as per parent map's inherit values 258 * - a new "user" structure is allocated for the child process 259 * [filled in by MD layer...] 260 * - if specified, the child gets a new user stack described by 261 * stack and stacksize 262 * - NOTE: the kernel stack may be at a different location in the child 263 * process, and thus addresses of automatic variables may be invalid 264 * after cpu_fork returns in the child process. We do nothing here 265 * after cpu_fork returns. 266 * - XXXCDC: we need a way for this to return a failure value rather 267 * than just hang 268 */ 269 void 270 uvm_fork(p1, p2, shared, stack, stacksize, func, arg) 271 struct proc *p1, *p2; 272 boolean_t shared; 273 void *stack; 274 size_t stacksize; 275 void (*func) __P((void *)); 276 void *arg; 277 { 278 struct user *up = p2->p_addr; 279 int rv; 280 281 if (shared == TRUE) 282 uvmspace_share(p1, p2); /* share vmspace */ 283 else 284 p2->p_vmspace = uvmspace_fork(p1->p_vmspace); /* fork vmspace */ 285 286 /* 287 * Wire down the U-area for the process, which contains the PCB 288 * and the kernel stack. Wired state is stored in p->p_flag's 289 * P_INMEM bit rather than in the vm_map_entry's wired count 290 * to prevent kernel_map fragmentation. 291 * 292 * Note the kernel stack gets read/write accesses right off 293 * the bat. 294 */ 295 rv = uvm_fault_wire(kernel_map, (vaddr_t)up, 296 (vaddr_t)up + USPACE, VM_PROT_READ | VM_PROT_WRITE); 297 if (rv != KERN_SUCCESS) 298 panic("uvm_fork: uvm_fault_wire failed: %d", rv); 299 300 /* 301 * p_stats currently points at a field in the user struct. Copy 302 * parts of p_stats, and zero out the rest. 303 */ 304 p2->p_stats = &up->u_stats; 305 memset(&up->u_stats.pstat_startzero, 0, 306 (unsigned) ((caddr_t)&up->u_stats.pstat_endzero - 307 (caddr_t)&up->u_stats.pstat_startzero)); 308 memcpy(&up->u_stats.pstat_startcopy, &p1->p_stats->pstat_startcopy, 309 ((caddr_t)&up->u_stats.pstat_endcopy - 310 (caddr_t)&up->u_stats.pstat_startcopy)); 311 312 /* 313 * cpu_fork() copy and update the pcb, and make the child ready 314 * to run. If this is a normal user fork, the child will exit 315 * directly to user mode via child_return() on its first time 316 * slice and will not return here. If this is a kernel thread, 317 * the specified entry point will be executed. 318 */ 319 cpu_fork(p1, p2, stack, stacksize, func, arg); 320 } 321 322 /* 323 * uvm_exit: exit a virtual address space 324 * 325 * - the process passed to us is a dead (pre-zombie) process; we 326 * are running on a different context now (the reaper). 327 * - we must run in a separate thread because freeing the vmspace 328 * of the dead process may block. 329 */ 330 void 331 uvm_exit(p) 332 struct proc *p; 333 { 334 335 uvmspace_free(p->p_vmspace); 336 uvm_km_free(kernel_map, (vaddr_t)p->p_addr, USPACE); 337 p->p_addr = NULL; 338 } 339 340 /* 341 * uvm_init_limit: init per-process VM limits 342 * 343 * - called for process 0 and then inherited by all others. 344 */ 345 void 346 uvm_init_limits(p) 347 struct proc *p; 348 { 349 350 /* 351 * Set up the initial limits on process VM. Set the maximum 352 * resident set size to be all of (reasonably) available memory. 353 * This causes any single, large process to start random page 354 * replacement once it fills memory. 355 */ 356 357 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 358 p->p_rlimit[RLIMIT_STACK].rlim_max = MAXSSIZ; 359 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 360 p->p_rlimit[RLIMIT_DATA].rlim_max = MAXDSIZ; 361 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free); 362 } 363 364 #ifdef DEBUG 365 int enableswap = 1; 366 int swapdebug = 0; 367 #define SDB_FOLLOW 1 368 #define SDB_SWAPIN 2 369 #define SDB_SWAPOUT 4 370 #endif 371 372 /* 373 * uvm_swapin: swap in a process's u-area. 374 */ 375 376 void 377 uvm_swapin(p) 378 struct proc *p; 379 { 380 vaddr_t addr; 381 int s; 382 383 addr = (vaddr_t)p->p_addr; 384 /* make P_INMEM true */ 385 uvm_fault_wire(kernel_map, addr, addr + USPACE, 386 VM_PROT_READ | VM_PROT_WRITE); 387 388 /* 389 * Some architectures need to be notified when the user area has 390 * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c). 391 */ 392 cpu_swapin(p); 393 s = splstatclock(); 394 if (p->p_stat == SRUN) 395 setrunqueue(p); 396 p->p_flag |= P_INMEM; 397 splx(s); 398 p->p_swtime = 0; 399 ++uvmexp.swapins; 400 } 401 402 /* 403 * uvm_scheduler: process zero main loop 404 * 405 * - attempt to swapin every swaped-out, runnable process in order of 406 * priority. 407 * - if not enough memory, wake the pagedaemon and let it clear space. 408 */ 409 410 void 411 uvm_scheduler() 412 { 413 struct proc *p; 414 int pri; 415 struct proc *pp; 416 int ppri; 417 UVMHIST_FUNC("uvm_scheduler"); UVMHIST_CALLED(maphist); 418 419 loop: 420 #ifdef DEBUG 421 while (!enableswap) 422 tsleep((caddr_t)&proc0, PVM, "noswap", 0); 423 #endif 424 pp = NULL; /* process to choose */ 425 ppri = INT_MIN; /* its priority */ 426 proclist_lock_read(); 427 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 428 429 /* is it a runnable swapped out process? */ 430 if (p->p_stat == SRUN && (p->p_flag & P_INMEM) == 0) { 431 pri = p->p_swtime + p->p_slptime - 432 (p->p_nice - NZERO) * 8; 433 if (pri > ppri) { /* higher priority? remember it. */ 434 pp = p; 435 ppri = pri; 436 } 437 } 438 } 439 proclist_unlock_read(); 440 441 #ifdef DEBUG 442 if (swapdebug & SDB_FOLLOW) 443 printf("scheduler: running, procp %p pri %d\n", pp, ppri); 444 #endif 445 /* 446 * Nothing to do, back to sleep 447 */ 448 if ((p = pp) == NULL) { 449 tsleep((caddr_t)&proc0, PVM, "scheduler", 0); 450 goto loop; 451 } 452 453 /* 454 * we have found swapped out process which we would like to bring 455 * back in. 456 * 457 * XXX: this part is really bogus cuz we could deadlock on memory 458 * despite our feeble check 459 */ 460 if (uvmexp.free > atop(USPACE)) { 461 #ifdef DEBUG 462 if (swapdebug & SDB_SWAPIN) 463 printf("swapin: pid %d(%s)@%p, pri %d free %d\n", 464 p->p_pid, p->p_comm, p->p_addr, ppri, uvmexp.free); 465 #endif 466 uvm_swapin(p); 467 goto loop; 468 } 469 /* 470 * not enough memory, jab the pageout daemon and wait til the coast 471 * is clear 472 */ 473 #ifdef DEBUG 474 if (swapdebug & SDB_FOLLOW) 475 printf("scheduler: no room for pid %d(%s), free %d\n", 476 p->p_pid, p->p_comm, uvmexp.free); 477 #endif 478 (void) splhigh(); 479 uvm_wait("schedpwait"); 480 (void) spl0(); 481 #ifdef DEBUG 482 if (swapdebug & SDB_FOLLOW) 483 printf("scheduler: room again, free %d\n", uvmexp.free); 484 #endif 485 goto loop; 486 } 487 488 /* 489 * swappable: is process "p" swappable? 490 */ 491 492 #define swappable(p) \ 493 (((p)->p_flag & (P_SYSTEM | P_INMEM | P_WEXIT)) == P_INMEM && \ 494 (p)->p_holdcnt == 0) 495 496 /* 497 * swapout_threads: find threads that can be swapped and unwire their 498 * u-areas. 499 * 500 * - called by the pagedaemon 501 * - try and swap at least one processs 502 * - processes that are sleeping or stopped for maxslp or more seconds 503 * are swapped... otherwise the longest-sleeping or stopped process 504 * is swapped, otherwise the longest resident process... 505 */ 506 void 507 uvm_swapout_threads() 508 { 509 struct proc *p; 510 struct proc *outp, *outp2; 511 int outpri, outpri2; 512 int didswap = 0; 513 extern int maxslp; 514 /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */ 515 516 #ifdef DEBUG 517 if (!enableswap) 518 return; 519 #endif 520 521 /* 522 * outp/outpri : stop/sleep process with largest sleeptime < maxslp 523 * outp2/outpri2: the longest resident process (its swap time) 524 */ 525 outp = outp2 = NULL; 526 outpri = outpri2 = 0; 527 proclist_lock_read(); 528 for (p = allproc.lh_first; p != 0; p = p->p_list.le_next) { 529 if (!swappable(p)) 530 continue; 531 switch (p->p_stat) { 532 case SRUN: 533 case SONPROC: 534 if (p->p_swtime > outpri2) { 535 outp2 = p; 536 outpri2 = p->p_swtime; 537 } 538 continue; 539 540 case SSLEEP: 541 case SSTOP: 542 if (p->p_slptime >= maxslp) { 543 uvm_swapout(p); /* zap! */ 544 didswap++; 545 } else if (p->p_slptime > outpri) { 546 outp = p; 547 outpri = p->p_slptime; 548 } 549 continue; 550 } 551 } 552 proclist_unlock_read(); 553 554 /* 555 * If we didn't get rid of any real duds, toss out the next most 556 * likely sleeping/stopped or running candidate. We only do this 557 * if we are real low on memory since we don't gain much by doing 558 * it (USPACE bytes). 559 */ 560 if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) { 561 if ((p = outp) == NULL) 562 p = outp2; 563 #ifdef DEBUG 564 if (swapdebug & SDB_SWAPOUT) 565 printf("swapout_threads: no duds, try procp %p\n", p); 566 #endif 567 if (p) 568 uvm_swapout(p); 569 } 570 } 571 572 /* 573 * uvm_swapout: swap out process "p" 574 * 575 * - currently "swapout" means "unwire U-area" and "pmap_collect()" 576 * the pmap. 577 * - XXXCDC: should deactivate all process' private anonymous memory 578 */ 579 580 static void 581 uvm_swapout(p) 582 struct proc *p; 583 { 584 vaddr_t addr; 585 int s; 586 587 #ifdef DEBUG 588 if (swapdebug & SDB_SWAPOUT) 589 printf("swapout: pid %d(%s)@%p, stat %x pri %d free %d\n", 590 p->p_pid, p->p_comm, p->p_addr, p->p_stat, 591 p->p_slptime, uvmexp.free); 592 #endif 593 594 /* 595 * Do any machine-specific actions necessary before swapout. 596 * This can include saving floating point state, etc. 597 */ 598 cpu_swapout(p); 599 600 /* 601 * Unwire the to-be-swapped process's user struct and kernel stack. 602 */ 603 addr = (vaddr_t)p->p_addr; 604 uvm_fault_unwire(kernel_map, addr, addr + USPACE); /* !P_INMEM */ 605 pmap_collect(vm_map_pmap(&p->p_vmspace->vm_map)); 606 607 /* 608 * Mark it as (potentially) swapped out. 609 */ 610 s = splstatclock(); 611 p->p_flag &= ~P_INMEM; 612 if (p->p_stat == SRUN) 613 remrunqueue(p); 614 splx(s); 615 p->p_swtime = 0; 616 ++uvmexp.swapouts; 617 } 618 619