1 /* $NetBSD: uvm_glue.c,v 1.138 2009/06/28 15:18:51 rmind Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * Copyright (c) 1991, 1993, The Regents of the University of California. 6 * 7 * All rights reserved. 8 * 9 * This code is derived from software contributed to Berkeley by 10 * The Mach Operating System project at Carnegie-Mellon University. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by Charles D. Cranor, 23 * Washington University, the University of California, Berkeley and 24 * its contributors. 25 * 4. Neither the name of the University nor the names of its contributors 26 * may be used to endorse or promote products derived from this software 27 * without specific prior written permission. 28 * 29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 39 * SUCH DAMAGE. 40 * 41 * @(#)vm_glue.c 8.6 (Berkeley) 1/5/94 42 * from: Id: uvm_glue.c,v 1.1.2.8 1998/02/07 01:16:54 chs Exp 43 * 44 * 45 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 46 * All rights reserved. 47 * 48 * Permission to use, copy, modify and distribute this software and 49 * its documentation is hereby granted, provided that both the copyright 50 * notice and this permission notice appear in all copies of the 51 * software, derivative works or modified versions, and any portions 52 * thereof, and that both notices appear in supporting documentation. 53 * 54 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 55 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 56 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 57 * 58 * Carnegie Mellon requests users of this software to return to 59 * 60 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 61 * School of Computer Science 62 * Carnegie Mellon University 63 * Pittsburgh PA 15213-3890 64 * 65 * any improvements or extensions that they make and grant Carnegie the 66 * rights to redistribute these changes. 67 */ 68 69 #include <sys/cdefs.h> 70 __KERNEL_RCSID(0, "$NetBSD: uvm_glue.c,v 1.138 2009/06/28 15:18:51 rmind Exp $"); 71 72 #include "opt_kgdb.h" 73 #include "opt_kstack.h" 74 #include "opt_uvmhist.h" 75 76 /* 77 * uvm_glue.c: glue functions 78 */ 79 80 #include <sys/param.h> 81 #include <sys/systm.h> 82 #include <sys/proc.h> 83 #include <sys/resourcevar.h> 84 #include <sys/buf.h> 85 #include <sys/user.h> 86 #include <sys/syncobj.h> 87 #include <sys/cpu.h> 88 #include <sys/atomic.h> 89 90 #include <uvm/uvm.h> 91 92 /* 93 * local prototypes 94 */ 95 96 static void uvm_swapout(struct lwp *); 97 static int uarea_swapin(vaddr_t); 98 99 /* 100 * XXXCDC: do these really belong here? 101 */ 102 103 /* 104 * uvm_kernacc: can the kernel access a region of memory 105 * 106 * - used only by /dev/kmem driver (mem.c) 107 */ 108 109 bool 110 uvm_kernacc(void *addr, size_t len, int rw) 111 { 112 bool rv; 113 vaddr_t saddr, eaddr; 114 vm_prot_t prot = rw == B_READ ? VM_PROT_READ : VM_PROT_WRITE; 115 116 saddr = trunc_page((vaddr_t)addr); 117 eaddr = round_page((vaddr_t)addr + len); 118 vm_map_lock_read(kernel_map); 119 rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot); 120 vm_map_unlock_read(kernel_map); 121 122 return(rv); 123 } 124 125 #ifdef KGDB 126 /* 127 * Change protections on kernel pages from addr to addr+len 128 * (presumably so debugger can plant a breakpoint). 129 * 130 * We force the protection change at the pmap level. If we were 131 * to use vm_map_protect a change to allow writing would be lazily- 132 * applied meaning we would still take a protection fault, something 133 * we really don't want to do. It would also fragment the kernel 134 * map unnecessarily. We cannot use pmap_protect since it also won't 135 * enforce a write-enable request. Using pmap_enter is the only way 136 * we can ensure the change takes place properly. 137 */ 138 void 139 uvm_chgkprot(void *addr, size_t len, int rw) 140 { 141 vm_prot_t prot; 142 paddr_t pa; 143 vaddr_t sva, eva; 144 145 prot = rw == B_READ ? VM_PROT_READ : VM_PROT_READ|VM_PROT_WRITE; 146 eva = round_page((vaddr_t)addr + len); 147 for (sva = trunc_page((vaddr_t)addr); sva < eva; sva += PAGE_SIZE) { 148 /* 149 * Extract physical address for the page. 150 */ 151 if (pmap_extract(pmap_kernel(), sva, &pa) == false) 152 panic("%s: invalid page", __func__); 153 pmap_enter(pmap_kernel(), sva, pa, prot, PMAP_WIRED); 154 } 155 pmap_update(pmap_kernel()); 156 } 157 #endif 158 159 /* 160 * uvm_vslock: wire user memory for I/O 161 * 162 * - called from physio and sys___sysctl 163 * - XXXCDC: consider nuking this (or making it a macro?) 164 */ 165 166 int 167 uvm_vslock(struct vmspace *vs, void *addr, size_t len, vm_prot_t access_type) 168 { 169 struct vm_map *map; 170 vaddr_t start, end; 171 int error; 172 173 map = &vs->vm_map; 174 start = trunc_page((vaddr_t)addr); 175 end = round_page((vaddr_t)addr + len); 176 error = uvm_fault_wire(map, start, end, access_type, 0); 177 return error; 178 } 179 180 /* 181 * uvm_vsunlock: unwire user memory wired by uvm_vslock() 182 * 183 * - called from physio and sys___sysctl 184 * - XXXCDC: consider nuking this (or making it a macro?) 185 */ 186 187 void 188 uvm_vsunlock(struct vmspace *vs, void *addr, size_t len) 189 { 190 uvm_fault_unwire(&vs->vm_map, trunc_page((vaddr_t)addr), 191 round_page((vaddr_t)addr + len)); 192 } 193 194 /* 195 * uvm_proc_fork: fork a virtual address space 196 * 197 * - the address space is copied as per parent map's inherit values 198 */ 199 void 200 uvm_proc_fork(struct proc *p1, struct proc *p2, bool shared) 201 { 202 203 if (shared == true) { 204 p2->p_vmspace = NULL; 205 uvmspace_share(p1, p2); 206 } else { 207 p2->p_vmspace = uvmspace_fork(p1->p_vmspace); 208 } 209 210 cpu_proc_fork(p1, p2); 211 } 212 213 214 /* 215 * uvm_lwp_fork: fork a thread 216 * 217 * - a new "user" structure is allocated for the child process 218 * [filled in by MD layer...] 219 * - if specified, the child gets a new user stack described by 220 * stack and stacksize 221 * - NOTE: the kernel stack may be at a different location in the child 222 * process, and thus addresses of automatic variables may be invalid 223 * after cpu_lwp_fork returns in the child process. We do nothing here 224 * after cpu_lwp_fork returns. 225 * - XXXCDC: we need a way for this to return a failure value rather 226 * than just hang 227 */ 228 void 229 uvm_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize, 230 void (*func)(void *), void *arg) 231 { 232 int error; 233 234 /* 235 * Wire down the U-area for the process, which contains the PCB 236 * and the kernel stack. Wired state is stored in l->l_flag's 237 * L_INMEM bit rather than in the vm_map_entry's wired count 238 * to prevent kernel_map fragmentation. If we reused a cached U-area, 239 * L_INMEM will already be set and we don't need to do anything. 240 * 241 * Note the kernel stack gets read/write accesses right off the bat. 242 */ 243 244 if ((l2->l_flag & LW_INMEM) == 0) { 245 vaddr_t uarea = USER_TO_UAREA(l2->l_addr); 246 247 if ((error = uarea_swapin(uarea)) != 0) 248 panic("%s: uvm_fault_wire failed: %d", __func__, error); 249 #ifdef PMAP_UAREA 250 /* Tell the pmap this is a u-area mapping */ 251 PMAP_UAREA(uarea); 252 #endif 253 l2->l_flag |= LW_INMEM; 254 } 255 256 /* Fill stack with magic number. */ 257 kstack_setup_magic(l2); 258 259 /* 260 * cpu_lwp_fork() copy and update the pcb, and make the child ready 261 * to run. If this is a normal user fork, the child will exit 262 * directly to user mode via child_return() on its first time 263 * slice and will not return here. If this is a kernel thread, 264 * the specified entry point will be executed. 265 */ 266 cpu_lwp_fork(l1, l2, stack, stacksize, func, arg); 267 268 /* Inactive emap for new LWP. */ 269 l2->l_emap_gen = UVM_EMAP_INACTIVE; 270 } 271 272 static int 273 uarea_swapin(vaddr_t addr) 274 { 275 276 return uvm_fault_wire(kernel_map, addr, addr + USPACE, 277 VM_PROT_READ | VM_PROT_WRITE, 0); 278 } 279 280 static void 281 uarea_swapout(vaddr_t addr) 282 { 283 284 uvm_fault_unwire(kernel_map, addr, addr + USPACE); 285 } 286 287 #ifndef USPACE_ALIGN 288 #define USPACE_ALIGN 0 289 #endif 290 291 static pool_cache_t uvm_uarea_cache; 292 293 static int 294 uarea_ctor(void *arg, void *obj, int flags) 295 { 296 297 KASSERT((flags & PR_WAITOK) != 0); 298 return uarea_swapin((vaddr_t)obj); 299 } 300 301 static void * 302 uarea_poolpage_alloc(struct pool *pp, int flags) 303 { 304 305 return (void *)uvm_km_alloc(kernel_map, pp->pr_alloc->pa_pagesz, 306 USPACE_ALIGN, UVM_KMF_PAGEABLE | 307 ((flags & PR_WAITOK) != 0 ? UVM_KMF_WAITVA : 308 (UVM_KMF_NOWAIT | UVM_KMF_TRYLOCK))); 309 } 310 311 static void 312 uarea_poolpage_free(struct pool *pp, void *addr) 313 { 314 315 uvm_km_free(kernel_map, (vaddr_t)addr, pp->pr_alloc->pa_pagesz, 316 UVM_KMF_PAGEABLE); 317 } 318 319 static struct pool_allocator uvm_uarea_allocator = { 320 .pa_alloc = uarea_poolpage_alloc, 321 .pa_free = uarea_poolpage_free, 322 .pa_pagesz = USPACE, 323 }; 324 325 void 326 uvm_uarea_init(void) 327 { 328 int flags = PR_NOTOUCH; 329 330 /* 331 * specify PR_NOALIGN unless the alignment provided by 332 * the backend (USPACE_ALIGN) is sufficient to provide 333 * pool page size (UPSACE) alignment. 334 */ 335 336 if ((USPACE_ALIGN == 0 && USPACE != PAGE_SIZE) || 337 (USPACE_ALIGN % USPACE) != 0) { 338 flags |= PR_NOALIGN; 339 } 340 341 uvm_uarea_cache = pool_cache_init(USPACE, USPACE_ALIGN, 0, flags, 342 "uarea", &uvm_uarea_allocator, IPL_NONE, uarea_ctor, NULL, NULL); 343 } 344 345 /* 346 * uvm_uarea_alloc: allocate a u-area 347 */ 348 349 bool 350 uvm_uarea_alloc(vaddr_t *uaddrp) 351 { 352 353 *uaddrp = (vaddr_t)pool_cache_get(uvm_uarea_cache, PR_WAITOK); 354 return true; 355 } 356 357 /* 358 * uvm_uarea_free: free a u-area 359 */ 360 361 void 362 uvm_uarea_free(vaddr_t uaddr, struct cpu_info *ci) 363 { 364 365 pool_cache_put(uvm_uarea_cache, (void *)uaddr); 366 } 367 368 /* 369 * uvm_proc_exit: exit a virtual address space 370 * 371 * - borrow proc0's address space because freeing the vmspace 372 * of the dead process may block. 373 */ 374 375 void 376 uvm_proc_exit(struct proc *p) 377 { 378 struct lwp *l = curlwp; /* XXX */ 379 struct vmspace *ovm; 380 381 KASSERT(p == l->l_proc); 382 ovm = p->p_vmspace; 383 384 /* 385 * borrow proc0's address space. 386 */ 387 KPREEMPT_DISABLE(l); 388 pmap_deactivate(l); 389 p->p_vmspace = proc0.p_vmspace; 390 pmap_activate(l); 391 KPREEMPT_ENABLE(l); 392 393 uvmspace_free(ovm); 394 } 395 396 void 397 uvm_lwp_exit(struct lwp *l) 398 { 399 vaddr_t va = USER_TO_UAREA(l->l_addr); 400 401 l->l_flag &= ~LW_INMEM; 402 uvm_uarea_free(va, l->l_cpu); 403 l->l_addr = NULL; 404 } 405 406 /* 407 * uvm_init_limit: init per-process VM limits 408 * 409 * - called for process 0 and then inherited by all others. 410 */ 411 412 void 413 uvm_init_limits(struct proc *p) 414 { 415 416 /* 417 * Set up the initial limits on process VM. Set the maximum 418 * resident set size to be all of (reasonably) available memory. 419 * This causes any single, large process to start random page 420 * replacement once it fills memory. 421 */ 422 423 p->p_rlimit[RLIMIT_STACK].rlim_cur = DFLSSIZ; 424 p->p_rlimit[RLIMIT_STACK].rlim_max = maxsmap; 425 p->p_rlimit[RLIMIT_DATA].rlim_cur = DFLDSIZ; 426 p->p_rlimit[RLIMIT_DATA].rlim_max = maxdmap; 427 p->p_rlimit[RLIMIT_AS].rlim_cur = RLIM_INFINITY; 428 p->p_rlimit[RLIMIT_AS].rlim_max = RLIM_INFINITY; 429 p->p_rlimit[RLIMIT_RSS].rlim_cur = ptoa(uvmexp.free); 430 } 431 432 #ifdef DEBUG 433 int enableswap = 1; 434 int swapdebug = 0; 435 #define SDB_FOLLOW 1 436 #define SDB_SWAPIN 2 437 #define SDB_SWAPOUT 4 438 #endif 439 440 /* 441 * uvm_swapin: swap in an lwp's u-area. 442 * 443 * - must be called with the LWP's swap lock held. 444 * - naturally, must not be called with l == curlwp 445 */ 446 447 void 448 uvm_swapin(struct lwp *l) 449 { 450 int error; 451 452 KASSERT(mutex_owned(&l->l_swaplock)); 453 KASSERT(l != curlwp); 454 455 error = uarea_swapin(USER_TO_UAREA(l->l_addr)); 456 if (error) { 457 panic("%s: rewiring stack failed: %d", __func__, error); 458 } 459 460 /* 461 * Some architectures need to be notified when the user area has 462 * moved to new physical page(s) (e.g. see mips/mips/vm_machdep.c). 463 */ 464 cpu_swapin(l); 465 lwp_lock(l); 466 if (l->l_stat == LSRUN) 467 sched_enqueue(l, false); 468 l->l_flag |= LW_INMEM; 469 l->l_swtime = 0; 470 lwp_unlock(l); 471 ++uvmexp.swapins; 472 } 473 474 /* 475 * uvm_kick_scheduler: kick the scheduler into action if not running. 476 * 477 * - called when swapped out processes have been awoken. 478 */ 479 480 void 481 uvm_kick_scheduler(void) 482 { 483 484 if (uvm.swap_running == false) 485 return; 486 487 mutex_enter(&uvm_scheduler_mutex); 488 uvm.scheduler_kicked = true; 489 cv_signal(&uvm.scheduler_cv); 490 mutex_exit(&uvm_scheduler_mutex); 491 } 492 493 /* 494 * uvm_scheduler: process zero main loop 495 * 496 * - attempt to swapin every swaped-out, runnable process in order of 497 * priority. 498 * - if not enough memory, wake the pagedaemon and let it clear space. 499 */ 500 501 void 502 uvm_scheduler(void) 503 { 504 struct lwp *l, *ll; 505 int pri; 506 int ppri; 507 508 l = curlwp; 509 lwp_lock(l); 510 l->l_priority = PRI_VM; 511 l->l_class = SCHED_FIFO; 512 lwp_unlock(l); 513 514 for (;;) { 515 #ifdef DEBUG 516 mutex_enter(&uvm_scheduler_mutex); 517 while (!enableswap) 518 cv_wait(&uvm.scheduler_cv, &uvm_scheduler_mutex); 519 mutex_exit(&uvm_scheduler_mutex); 520 #endif 521 ll = NULL; /* process to choose */ 522 ppri = INT_MIN; /* its priority */ 523 524 mutex_enter(proc_lock); 525 LIST_FOREACH(l, &alllwp, l_list) { 526 /* is it a runnable swapped out process? */ 527 if (l->l_stat == LSRUN && !(l->l_flag & LW_INMEM)) { 528 pri = l->l_swtime + l->l_slptime - 529 (l->l_proc->p_nice - NZERO) * 8; 530 if (pri > ppri) { /* higher priority? */ 531 ll = l; 532 ppri = pri; 533 } 534 } 535 } 536 #ifdef DEBUG 537 if (swapdebug & SDB_FOLLOW) 538 printf("%s: running, procp %p pri %d\n", __func__, ll, 539 ppri); 540 #endif 541 /* 542 * Nothing to do, back to sleep 543 */ 544 if ((l = ll) == NULL) { 545 mutex_exit(proc_lock); 546 mutex_enter(&uvm_scheduler_mutex); 547 if (uvm.scheduler_kicked == false) 548 cv_wait(&uvm.scheduler_cv, 549 &uvm_scheduler_mutex); 550 uvm.scheduler_kicked = false; 551 mutex_exit(&uvm_scheduler_mutex); 552 continue; 553 } 554 555 /* 556 * we have found swapped out process which we would like 557 * to bring back in. 558 * 559 * XXX: this part is really bogus cuz we could deadlock 560 * on memory despite our feeble check 561 */ 562 if (uvmexp.free > atop(USPACE)) { 563 #ifdef DEBUG 564 if (swapdebug & SDB_SWAPIN) 565 printf("swapin: pid %d(%s)@%p, pri %d " 566 "free %d\n", l->l_proc->p_pid, 567 l->l_proc->p_comm, l->l_addr, ppri, 568 uvmexp.free); 569 #endif 570 mutex_enter(&l->l_swaplock); 571 mutex_exit(proc_lock); 572 uvm_swapin(l); 573 mutex_exit(&l->l_swaplock); 574 continue; 575 } else { 576 /* 577 * not enough memory, jab the pageout daemon and 578 * wait til the coast is clear 579 */ 580 mutex_exit(proc_lock); 581 #ifdef DEBUG 582 if (swapdebug & SDB_FOLLOW) 583 printf("%s: no room for pid %d(%s)," 584 " free %d\n", __func__, l->l_proc->p_pid, 585 l->l_proc->p_comm, uvmexp.free); 586 #endif 587 uvm_wait("schedpwait"); 588 #ifdef DEBUG 589 if (swapdebug & SDB_FOLLOW) 590 printf("%s: room again, free %d\n", __func__, 591 uvmexp.free); 592 #endif 593 } 594 } 595 } 596 597 /* 598 * swappable: is LWP "l" swappable? 599 */ 600 601 static bool 602 swappable(struct lwp *l) 603 { 604 605 if ((l->l_flag & (LW_INMEM|LW_SYSTEM|LW_WEXIT)) != LW_INMEM) 606 return false; 607 if ((l->l_pflag & LP_RUNNING) != 0) 608 return false; 609 if (l->l_holdcnt != 0) 610 return false; 611 if (l->l_class != SCHED_OTHER) 612 return false; 613 if (l->l_syncobj == &rw_syncobj || l->l_syncobj == &mutex_syncobj) 614 return false; 615 if (l->l_proc->p_stat != SACTIVE && l->l_proc->p_stat != SSTOP) 616 return false; 617 return true; 618 } 619 620 /* 621 * swapout_threads: find threads that can be swapped and unwire their 622 * u-areas. 623 * 624 * - called by the pagedaemon 625 * - try and swap at least one processs 626 * - processes that are sleeping or stopped for maxslp or more seconds 627 * are swapped... otherwise the longest-sleeping or stopped process 628 * is swapped, otherwise the longest resident process... 629 */ 630 631 void 632 uvm_swapout_threads(void) 633 { 634 struct lwp *l; 635 struct lwp *outl, *outl2; 636 int outpri, outpri2; 637 int didswap = 0; 638 extern int maxslp; 639 bool gotit; 640 641 /* XXXCDC: should move off to uvmexp. or uvm., also in uvm_meter */ 642 643 #ifdef DEBUG 644 if (!enableswap) 645 return; 646 #endif 647 648 /* 649 * outl/outpri : stop/sleep thread with largest sleeptime < maxslp 650 * outl2/outpri2: the longest resident thread (its swap time) 651 */ 652 outl = outl2 = NULL; 653 outpri = outpri2 = 0; 654 655 restart: 656 mutex_enter(proc_lock); 657 LIST_FOREACH(l, &alllwp, l_list) { 658 KASSERT(l->l_proc != NULL); 659 if (!mutex_tryenter(&l->l_swaplock)) 660 continue; 661 if (!swappable(l)) { 662 mutex_exit(&l->l_swaplock); 663 continue; 664 } 665 switch (l->l_stat) { 666 case LSONPROC: 667 break; 668 669 case LSRUN: 670 if (l->l_swtime > outpri2) { 671 outl2 = l; 672 outpri2 = l->l_swtime; 673 } 674 break; 675 676 case LSSLEEP: 677 case LSSTOP: 678 if (l->l_slptime >= maxslp) { 679 mutex_exit(proc_lock); 680 uvm_swapout(l); 681 /* 682 * Locking in the wrong direction - 683 * try to prevent the LWP from exiting. 684 */ 685 gotit = mutex_tryenter(proc_lock); 686 mutex_exit(&l->l_swaplock); 687 didswap++; 688 if (!gotit) 689 goto restart; 690 continue; 691 } else if (l->l_slptime > outpri) { 692 outl = l; 693 outpri = l->l_slptime; 694 } 695 break; 696 } 697 mutex_exit(&l->l_swaplock); 698 } 699 700 /* 701 * If we didn't get rid of any real duds, toss out the next most 702 * likely sleeping/stopped or running candidate. We only do this 703 * if we are real low on memory since we don't gain much by doing 704 * it (USPACE bytes). 705 */ 706 if (didswap == 0 && uvmexp.free <= atop(round_page(USPACE))) { 707 if ((l = outl) == NULL) 708 l = outl2; 709 #ifdef DEBUG 710 if (swapdebug & SDB_SWAPOUT) 711 printf("%s: no duds, try procp %p\n", __func__, l); 712 #endif 713 if (l) { 714 mutex_enter(&l->l_swaplock); 715 mutex_exit(proc_lock); 716 if (swappable(l)) 717 uvm_swapout(l); 718 mutex_exit(&l->l_swaplock); 719 return; 720 } 721 } 722 723 mutex_exit(proc_lock); 724 } 725 726 /* 727 * uvm_swapout: swap out lwp "l" 728 * 729 * - currently "swapout" means "unwire U-area" and "pmap_collect()" 730 * the pmap. 731 * - must be called with l->l_swaplock held. 732 * - XXXCDC: should deactivate all process' private anonymous memory 733 */ 734 735 static void 736 uvm_swapout(struct lwp *l) 737 { 738 struct vm_map *map; 739 740 KASSERT(mutex_owned(&l->l_swaplock)); 741 742 #ifdef DEBUG 743 if (swapdebug & SDB_SWAPOUT) 744 printf("%s: lid %d.%d(%s)@%p, stat %x pri %d free %d\n", 745 __func__, l->l_proc->p_pid, l->l_lid, l->l_proc->p_comm, 746 l->l_addr, l->l_stat, l->l_slptime, uvmexp.free); 747 #endif 748 749 /* 750 * Mark it as (potentially) swapped out. 751 */ 752 lwp_lock(l); 753 if (!swappable(l)) { 754 KDASSERT(l->l_cpu != curcpu()); 755 lwp_unlock(l); 756 return; 757 } 758 l->l_flag &= ~LW_INMEM; 759 l->l_swtime = 0; 760 if (l->l_stat == LSRUN) 761 sched_dequeue(l); 762 lwp_unlock(l); 763 l->l_ru.ru_nswap++; 764 ++uvmexp.swapouts; 765 766 /* 767 * Do any machine-specific actions necessary before swapout. 768 * This can include saving floating point state, etc. 769 */ 770 cpu_swapout(l); 771 772 /* 773 * Unwire the to-be-swapped process's user struct and kernel stack. 774 */ 775 uarea_swapout(USER_TO_UAREA(l->l_addr)); 776 map = &l->l_proc->p_vmspace->vm_map; 777 if (vm_map_lock_try(map)) { 778 pmap_collect(vm_map_pmap(map)); 779 vm_map_unlock(map); 780 } 781 } 782 783 /* 784 * uvm_lwp_hold: prevent lwp "l" from being swapped out, and bring 785 * back into memory if it is currently swapped. 786 */ 787 788 void 789 uvm_lwp_hold(struct lwp *l) 790 { 791 792 if (l == curlwp) { 793 atomic_inc_uint(&l->l_holdcnt); 794 } else { 795 mutex_enter(&l->l_swaplock); 796 if (atomic_inc_uint_nv(&l->l_holdcnt) == 1 && 797 (l->l_flag & LW_INMEM) == 0) 798 uvm_swapin(l); 799 mutex_exit(&l->l_swaplock); 800 } 801 } 802 803 /* 804 * uvm_lwp_rele: release a hold on lwp "l". when the holdcount 805 * drops to zero, it's eligable to be swapped. 806 */ 807 808 void 809 uvm_lwp_rele(struct lwp *l) 810 { 811 812 KASSERT(l->l_holdcnt != 0); 813 814 atomic_dec_uint(&l->l_holdcnt); 815 } 816