1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Permission to use, copy, modify and distribute this software and 45 * its documentation is hereby granted, provided that both the copyright 46 * notice and this permission notice appear in all copies of the 47 * software, derivative works or modified versions, and any portions 48 * thereof, and that both notices appear in supporting documentation. 49 * 50 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 51 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 52 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 53 * 54 * Carnegie Mellon requests users of this software to return to 55 * 56 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 57 * School of Computer Science 58 * Carnegie Mellon University 59 * Pittsburgh PA 15213-3890 60 * 61 * any improvements or extensions that they make and grant Carnegie the 62 * rights to redistribute these changes. 63 * 64 * $FreeBSD: src/sys/vm/vm_glue.c,v 1.94.2.4 2003/01/13 22:51:17 dillon Exp $ 65 * $DragonFly: src/sys/vm/vm_glue.c,v 1.56 2008/07/01 02:02:56 dillon Exp $ 66 */ 67 68 #include "opt_vm.h" 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/proc.h> 73 #include <sys/resourcevar.h> 74 #include <sys/buf.h> 75 #include <sys/shm.h> 76 #include <sys/vmmeter.h> 77 #include <sys/sysctl.h> 78 79 #include <sys/kernel.h> 80 #include <sys/unistd.h> 81 82 #include <machine/limits.h> 83 84 #include <vm/vm.h> 85 #include <vm/vm_param.h> 86 #include <sys/lock.h> 87 #include <vm/pmap.h> 88 #include <vm/vm_map.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_pageout.h> 91 #include <vm/vm_kern.h> 92 #include <vm/vm_extern.h> 93 94 #include <sys/user.h> 95 #include <vm/vm_page2.h> 96 #include <sys/thread2.h> 97 #include <sys/sysref2.h> 98 99 /* 100 * THIS MUST BE THE LAST INITIALIZATION ITEM!!! 101 * 102 * Note: run scheduling should be divorced from the vm system. 103 */ 104 static void scheduler (void *); 105 SYSINIT(scheduler, SI_SUB_RUN_SCHEDULER, SI_ORDER_FIRST, scheduler, NULL) 106 107 #ifdef INVARIANTS 108 109 static int swap_debug = 0; 110 SYSCTL_INT(_vm, OID_AUTO, swap_debug, 111 CTLFLAG_RW, &swap_debug, 0, ""); 112 113 #endif 114 115 static int scheduler_notify; 116 117 static void swapout (struct proc *); 118 119 /* 120 * No requirements. 121 */ 122 int 123 kernacc(c_caddr_t addr, int len, int rw) 124 { 125 boolean_t rv; 126 vm_offset_t saddr, eaddr; 127 vm_prot_t prot; 128 129 KASSERT((rw & (~VM_PROT_ALL)) == 0, 130 ("illegal ``rw'' argument to kernacc (%x)\n", rw)); 131 132 /* 133 * The globaldata space is not part of the kernel_map proper, 134 * check access separately. 135 */ 136 if (is_globaldata_space((vm_offset_t)addr, (vm_offset_t)(addr + len))) 137 return (TRUE); 138 139 /* 140 * Nominal kernel memory access - check access via kernel_map. 141 */ 142 if ((vm_offset_t)addr + len > kernel_map.max_offset || 143 (vm_offset_t)addr + len < (vm_offset_t)addr) { 144 return (FALSE); 145 } 146 prot = rw; 147 saddr = trunc_page((vm_offset_t)addr); 148 eaddr = round_page((vm_offset_t)addr + len); 149 rv = vm_map_check_protection(&kernel_map, saddr, eaddr, prot, FALSE); 150 151 return (rv == TRUE); 152 } 153 154 /* 155 * No requirements. 156 */ 157 int 158 useracc(c_caddr_t addr, int len, int rw) 159 { 160 boolean_t rv; 161 vm_prot_t prot; 162 vm_map_t map; 163 vm_map_entry_t save_hint; 164 165 KASSERT((rw & (~VM_PROT_ALL)) == 0, 166 ("illegal ``rw'' argument to useracc (%x)\n", rw)); 167 prot = rw; 168 /* 169 * XXX - check separately to disallow access to user area and user 170 * page tables - they are in the map. 171 * 172 * XXX - VM_MAX_USER_ADDRESS is an end address, not a max. It was once 173 * only used (as an end address) in trap.c. Use it as an end address 174 * here too. This bogusness has spread. I just fixed where it was 175 * used as a max in vm_mmap.c. 176 */ 177 if ((vm_offset_t) addr + len > /* XXX */ VM_MAX_USER_ADDRESS 178 || (vm_offset_t) addr + len < (vm_offset_t) addr) { 179 return (FALSE); 180 } 181 map = &curproc->p_vmspace->vm_map; 182 vm_map_lock_read(map); 183 /* 184 * We save the map hint, and restore it. Useracc appears to distort 185 * the map hint unnecessarily. 186 */ 187 save_hint = map->hint; 188 rv = vm_map_check_protection(map, trunc_page((vm_offset_t)addr), 189 round_page((vm_offset_t)addr + len), 190 prot, TRUE); 191 map->hint = save_hint; 192 vm_map_unlock_read(map); 193 194 return (rv == TRUE); 195 } 196 197 /* 198 * No requirements. 199 */ 200 void 201 vslock(caddr_t addr, u_int len) 202 { 203 if (len) { 204 vm_map_wire(&curproc->p_vmspace->vm_map, 205 trunc_page((vm_offset_t)addr), 206 round_page((vm_offset_t)addr + len), 0); 207 } 208 } 209 210 /* 211 * No requirements. 212 */ 213 void 214 vsunlock(caddr_t addr, u_int len) 215 { 216 if (len) { 217 vm_map_wire(&curproc->p_vmspace->vm_map, 218 trunc_page((vm_offset_t)addr), 219 round_page((vm_offset_t)addr + len), 220 KM_PAGEABLE); 221 } 222 } 223 224 /* 225 * Implement fork's actions on an address space. 226 * Here we arrange for the address space to be copied or referenced, 227 * allocate a user struct (pcb and kernel stack), then call the 228 * machine-dependent layer to fill those in and make the new process 229 * ready to run. The new process is set up so that it returns directly 230 * to user mode to avoid stack copying and relocation problems. 231 * 232 * No requirements. 233 */ 234 void 235 vm_fork(struct proc *p1, struct proc *p2, int flags) 236 { 237 if ((flags & RFPROC) == 0) { 238 /* 239 * Divorce the memory, if it is shared, essentially 240 * this changes shared memory amongst threads, into 241 * COW locally. 242 */ 243 if ((flags & RFMEM) == 0) { 244 if (p1->p_vmspace->vm_sysref.refcnt > 1) { 245 vmspace_unshare(p1); 246 } 247 } 248 cpu_fork(ONLY_LWP_IN_PROC(p1), NULL, flags); 249 return; 250 } 251 252 if (flags & RFMEM) { 253 p2->p_vmspace = p1->p_vmspace; 254 sysref_get(&p1->p_vmspace->vm_sysref); 255 } 256 257 while (vm_page_count_severe()) { 258 vm_wait(0); 259 } 260 261 if ((flags & RFMEM) == 0) { 262 p2->p_vmspace = vmspace_fork(p1->p_vmspace); 263 264 pmap_pinit2(vmspace_pmap(p2->p_vmspace)); 265 266 if (p1->p_vmspace->vm_shm) 267 shmfork(p1, p2); 268 } 269 270 pmap_init_proc(p2); 271 } 272 273 /* 274 * Called after process has been wait(2)'ed apon and is being reaped. 275 * The idea is to reclaim resources that we could not reclaim while 276 * the process was still executing. 277 * 278 * No requirements. 279 */ 280 void 281 vm_waitproc(struct proc *p) 282 { 283 cpu_proc_wait(p); 284 vmspace_exitfree(p); /* and clean-out the vmspace */ 285 } 286 287 /* 288 * Set default limits for VM system. Call during proc0's initialization. 289 * 290 * Called from the low level boot code only. 291 */ 292 void 293 vm_init_limits(struct proc *p) 294 { 295 int rss_limit; 296 297 /* 298 * Set up the initial limits on process VM. Set the maximum resident 299 * set size to be half of (reasonably) available memory. Since this 300 * is a soft limit, it comes into effect only when the system is out 301 * of memory - half of main memory helps to favor smaller processes, 302 * and reduces thrashing of the object cache. 303 */ 304 p->p_rlimit[RLIMIT_STACK].rlim_cur = dflssiz; 305 p->p_rlimit[RLIMIT_STACK].rlim_max = maxssiz; 306 p->p_rlimit[RLIMIT_DATA].rlim_cur = dfldsiz; 307 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdsiz; 308 /* limit the limit to no less than 2MB */ 309 rss_limit = max(vmstats.v_free_count, 512); 310 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(rss_limit); 311 p->p_rlimit[RLIMIT_RSS].rlim_max = RLIM_INFINITY; 312 } 313 314 /* 315 * Faultin the specified process. Note that the process can be in any 316 * state. Just clear P_SWAPPEDOUT and call wakeup in case the process is 317 * sleeping. 318 * 319 * No requirements. 320 */ 321 void 322 faultin(struct proc *p) 323 { 324 if (p->p_flag & P_SWAPPEDOUT) { 325 /* 326 * The process is waiting in the kernel to return to user 327 * mode but cannot until P_SWAPPEDOUT gets cleared. 328 */ 329 lwkt_gettoken(&p->p_token); 330 p->p_flag &= ~(P_SWAPPEDOUT | P_SWAPWAIT); 331 #ifdef INVARIANTS 332 if (swap_debug) 333 kprintf("swapping in %d (%s)\n", p->p_pid, p->p_comm); 334 #endif 335 wakeup(p); 336 lwkt_reltoken(&p->p_token); 337 } 338 } 339 340 /* 341 * Kernel initialization eventually falls through to this function, 342 * which is process 0. 343 * 344 * This swapin algorithm attempts to swap-in processes only if there 345 * is enough space for them. Of course, if a process waits for a long 346 * time, it will be swapped in anyway. 347 */ 348 struct scheduler_info { 349 struct proc *pp; 350 int ppri; 351 }; 352 353 static int scheduler_callback(struct proc *p, void *data); 354 355 static void 356 scheduler(void *dummy) 357 { 358 struct scheduler_info info; 359 struct proc *p; 360 361 KKASSERT(!IN_CRITICAL_SECT(curthread)); 362 loop: 363 scheduler_notify = 0; 364 /* 365 * Don't try to swap anything in if we are low on memory. 366 */ 367 if (vm_page_count_severe()) { 368 vm_wait(0); 369 goto loop; 370 } 371 372 /* 373 * Look for a good candidate to wake up 374 */ 375 info.pp = NULL; 376 info.ppri = INT_MIN; 377 allproc_scan(scheduler_callback, &info); 378 379 /* 380 * Nothing to do, back to sleep for at least 1/10 of a second. If 381 * we are woken up, immediately process the next request. If 382 * multiple requests have built up the first is processed 383 * immediately and the rest are staggered. 384 */ 385 if ((p = info.pp) == NULL) { 386 tsleep(&proc0, 0, "nowork", hz / 10); 387 if (scheduler_notify == 0) 388 tsleep(&scheduler_notify, 0, "nowork", 0); 389 goto loop; 390 } 391 392 /* 393 * Fault the selected process in, then wait for a short period of 394 * time and loop up. 395 * 396 * XXX we need a heuristic to get a measure of system stress and 397 * then adjust our stagger wakeup delay accordingly. 398 */ 399 lwkt_gettoken(&proc_token); 400 faultin(p); 401 p->p_swtime = 0; 402 PRELE(p); 403 lwkt_reltoken(&proc_token); 404 tsleep(&proc0, 0, "swapin", hz / 10); 405 goto loop; 406 } 407 408 /* 409 * The caller must hold proc_token. 410 */ 411 static int 412 scheduler_callback(struct proc *p, void *data) 413 { 414 struct scheduler_info *info = data; 415 struct lwp *lp; 416 segsz_t pgs; 417 int pri; 418 419 if (p->p_flag & P_SWAPWAIT) { 420 pri = 0; 421 FOREACH_LWP_IN_PROC(lp, p) { 422 /* XXX lwp might need a different metric */ 423 pri += lp->lwp_slptime; 424 } 425 pri += p->p_swtime - p->p_nice * 8; 426 427 /* 428 * The more pages paged out while we were swapped, 429 * the more work we have to do to get up and running 430 * again and the lower our wakeup priority. 431 * 432 * Each second of sleep time is worth ~1MB 433 */ 434 lwkt_gettoken(&p->p_vmspace->vm_map.token); 435 pgs = vmspace_resident_count(p->p_vmspace); 436 if (pgs < p->p_vmspace->vm_swrss) { 437 pri -= (p->p_vmspace->vm_swrss - pgs) / 438 (1024 * 1024 / PAGE_SIZE); 439 } 440 lwkt_reltoken(&p->p_vmspace->vm_map.token); 441 442 /* 443 * If this process is higher priority and there is 444 * enough space, then select this process instead of 445 * the previous selection. 446 */ 447 if (pri > info->ppri) { 448 if (info->pp) 449 PRELE(info->pp); 450 PHOLD(p); 451 info->pp = p; 452 info->ppri = pri; 453 } 454 } 455 return(0); 456 } 457 458 /* 459 * SMP races ok. 460 * No requirements. 461 */ 462 void 463 swapin_request(void) 464 { 465 if (scheduler_notify == 0) { 466 scheduler_notify = 1; 467 wakeup(&scheduler_notify); 468 } 469 } 470 471 #ifndef NO_SWAPPING 472 473 #define swappable(p) \ 474 (((p)->p_lock == 0) && \ 475 ((p)->p_flag & (P_TRACED|P_SYSTEM|P_SWAPPEDOUT|P_WEXIT)) == 0) 476 477 478 /* 479 * Swap_idle_threshold1 is the guaranteed swapped in time for a process 480 */ 481 static int swap_idle_threshold1 = 15; 482 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold1, 483 CTLFLAG_RW, &swap_idle_threshold1, 0, "Guaranteed process resident time (sec)"); 484 485 /* 486 * Swap_idle_threshold2 is the time that a process can be idle before 487 * it will be swapped out, if idle swapping is enabled. Default is 488 * one minute. 489 */ 490 static int swap_idle_threshold2 = 60; 491 SYSCTL_INT(_vm, OID_AUTO, swap_idle_threshold2, 492 CTLFLAG_RW, &swap_idle_threshold2, 0, "Time (sec) a process can idle before being swapped"); 493 494 /* 495 * Swapout is driven by the pageout daemon. Very simple, we find eligible 496 * procs and mark them as being swapped out. This will cause the kernel 497 * to prefer to pageout those proc's pages first and the procs in question 498 * will not return to user mode until the swapper tells them they can. 499 * 500 * If any procs have been sleeping/stopped for at least maxslp seconds, 501 * they are swapped. Else, we swap the longest-sleeping or stopped process, 502 * if any, otherwise the longest-resident process. 503 */ 504 505 static int swapout_procs_callback(struct proc *p, void *data); 506 507 /* 508 * No requirements. 509 */ 510 void 511 swapout_procs(int action) 512 { 513 allproc_scan(swapout_procs_callback, &action); 514 } 515 516 /* 517 * The caller must hold proc_token 518 */ 519 static int 520 swapout_procs_callback(struct proc *p, void *data) 521 { 522 struct vmspace *vm; 523 struct lwp *lp; 524 int action = *(int *)data; 525 int minslp = -1; 526 527 if (!swappable(p)) 528 return(0); 529 530 lwkt_gettoken(&p->p_token); 531 vm = p->p_vmspace; 532 533 /* 534 * We only consider active processes. 535 */ 536 if (p->p_stat != SACTIVE && p->p_stat != SSTOP) { 537 lwkt_reltoken(&p->p_token); 538 return(0); 539 } 540 541 FOREACH_LWP_IN_PROC(lp, p) { 542 /* 543 * do not swap out a realtime process 544 */ 545 if (RTP_PRIO_IS_REALTIME(lp->lwp_rtprio.type)) { 546 lwkt_reltoken(&p->p_token); 547 return(0); 548 } 549 550 /* 551 * Guarentee swap_idle_threshold time in memory 552 */ 553 if (lp->lwp_slptime < swap_idle_threshold1) { 554 lwkt_reltoken(&p->p_token); 555 return(0); 556 } 557 558 /* 559 * If the system is under memory stress, or if we 560 * are swapping idle processes >= swap_idle_threshold2, 561 * then swap the process out. 562 */ 563 if (((action & VM_SWAP_NORMAL) == 0) && 564 (((action & VM_SWAP_IDLE) == 0) || 565 (lp->lwp_slptime < swap_idle_threshold2))) { 566 lwkt_reltoken(&p->p_token); 567 return(0); 568 } 569 570 if (minslp == -1 || lp->lwp_slptime < minslp) 571 minslp = lp->lwp_slptime; 572 } 573 574 sysref_get(&vm->vm_sysref); 575 576 /* 577 * If the process has been asleep for awhile, swap 578 * it out. 579 */ 580 if ((action & VM_SWAP_NORMAL) || 581 ((action & VM_SWAP_IDLE) && 582 (minslp > swap_idle_threshold2))) { 583 swapout(p); 584 } 585 586 /* 587 * cleanup our reference 588 */ 589 sysref_put(&vm->vm_sysref); 590 lwkt_reltoken(&p->p_token); 591 592 return(0); 593 } 594 595 /* 596 * The caller must hold proc_token and p->p_token 597 */ 598 static void 599 swapout(struct proc *p) 600 { 601 #ifdef INVARIANTS 602 if (swap_debug) 603 kprintf("swapping out %d (%s)\n", p->p_pid, p->p_comm); 604 #endif 605 ++p->p_ru.ru_nswap; 606 /* 607 * remember the process resident count 608 */ 609 lwkt_gettoken(&p->p_vmspace->vm_map.token); 610 p->p_vmspace->vm_swrss = vmspace_resident_count(p->p_vmspace); 611 lwkt_reltoken(&p->p_vmspace->vm_map.token); 612 p->p_flag |= P_SWAPPEDOUT; 613 p->p_swtime = 0; 614 } 615 616 #endif /* !NO_SWAPPING */ 617 618