1 /* $NetBSD: uvm_anon.c,v 1.7 2000/06/27 17:29:18 mrg Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * uvm_anon.c: uvm anon ops 37 */ 38 39 #include "opt_uvmhist.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/proc.h> 44 #include <sys/malloc.h> 45 #include <sys/pool.h> 46 #include <sys/kernel.h> 47 48 #include <uvm/uvm.h> 49 #include <uvm/uvm_swap.h> 50 51 /* 52 * anonblock_list: global list of anon blocks, 53 * locked by swap_syscall_lock (since we never remove 54 * anything from this list and we only add to it via swapctl(2)). 55 */ 56 57 struct uvm_anonblock { 58 LIST_ENTRY(uvm_anonblock) list; 59 int count; 60 struct vm_anon *anons; 61 }; 62 static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list; 63 64 65 static boolean_t anon_pagein __P((struct vm_anon *)); 66 67 68 /* 69 * allocate anons 70 */ 71 void 72 uvm_anon_init() 73 { 74 int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */ 75 76 simple_lock_init(&uvm.afreelock); 77 LIST_INIT(&anonblock_list); 78 79 /* 80 * Allocate the initial anons. 81 */ 82 uvm_anon_add(nanon); 83 } 84 85 /* 86 * add some more anons to the free pool. called when we add 87 * more swap space. 88 * 89 * => swap_syscall_lock should be held (protects anonblock_list). 90 */ 91 void 92 uvm_anon_add(count) 93 int count; 94 { 95 struct uvm_anonblock *anonblock; 96 struct vm_anon *anon; 97 int lcv, needed; 98 99 simple_lock(&uvm.afreelock); 100 uvmexp.nanonneeded += count; 101 needed = uvmexp.nanonneeded - uvmexp.nanon; 102 simple_unlock(&uvm.afreelock); 103 104 if (needed <= 0) { 105 return; 106 } 107 108 MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK); 109 anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed); 110 111 /* XXX Should wait for VM to free up. */ 112 if (anonblock == NULL || anon == NULL) { 113 printf("uvm_anon_add: can not allocate %d anons\n", needed); 114 panic("uvm_anon_add"); 115 } 116 117 anonblock->count = needed; 118 anonblock->anons = anon; 119 LIST_INSERT_HEAD(&anonblock_list, anonblock, list); 120 memset(anon, 0, sizeof(*anon) * needed); 121 122 simple_lock(&uvm.afreelock); 123 uvmexp.nanon += needed; 124 uvmexp.nfreeanon += needed; 125 for (lcv = 0; lcv < needed; lcv++) { 126 simple_lock_init(&anon->an_lock); 127 anon[lcv].u.an_nxt = uvm.afree; 128 uvm.afree = &anon[lcv]; 129 simple_lock_init(&uvm.afree->an_lock); 130 } 131 simple_unlock(&uvm.afreelock); 132 } 133 134 /* 135 * remove anons from the free pool. 136 */ 137 void 138 uvm_anon_remove(count) 139 int count; 140 { 141 /* 142 * we never actually free any anons, to avoid allocation overhead. 143 * XXX someday we might want to try to free anons. 144 */ 145 146 simple_lock(&uvm.afreelock); 147 uvmexp.nanonneeded -= count; 148 simple_unlock(&uvm.afreelock); 149 } 150 151 /* 152 * allocate an anon 153 */ 154 struct vm_anon * 155 uvm_analloc() 156 { 157 struct vm_anon *a; 158 159 simple_lock(&uvm.afreelock); 160 a = uvm.afree; 161 if (a) { 162 uvm.afree = a->u.an_nxt; 163 uvmexp.nfreeanon--; 164 a->an_ref = 1; 165 a->an_swslot = 0; 166 a->u.an_page = NULL; /* so we can free quickly */ 167 } 168 simple_unlock(&uvm.afreelock); 169 return(a); 170 } 171 172 /* 173 * uvm_anfree: free a single anon structure 174 * 175 * => caller must remove anon from its amap before calling (if it was in 176 * an amap). 177 * => anon must be unlocked and have a zero reference count. 178 * => we may lock the pageq's. 179 */ 180 void 181 uvm_anfree(anon) 182 struct vm_anon *anon; 183 { 184 struct vm_page *pg; 185 UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist); 186 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0); 187 188 /* 189 * get page 190 */ 191 192 pg = anon->u.an_page; 193 194 /* 195 * if there is a resident page and it is loaned, then anon may not 196 * own it. call out to uvm_anon_lockpage() to ensure the real owner 197 * of the page has been identified and locked. 198 */ 199 200 if (pg && pg->loan_count) 201 pg = uvm_anon_lockloanpg(anon); 202 203 /* 204 * if we have a resident page, we must dispose of it before freeing 205 * the anon. 206 */ 207 208 if (pg) { 209 210 /* 211 * if the page is owned by a uobject (now locked), then we must 212 * kill the loan on the page rather than free it. 213 */ 214 215 if (pg->uobject) { 216 217 /* kill loan */ 218 uvm_lock_pageq(); 219 #ifdef DIAGNOSTIC 220 if (pg->loan_count < 1) 221 panic("uvm_anfree: obj owned page " 222 "with no loan count"); 223 #endif 224 pg->loan_count--; 225 pg->uanon = NULL; 226 uvm_unlock_pageq(); 227 simple_unlock(&pg->uobject->vmobjlock); 228 229 } else { 230 231 /* 232 * page has no uobject, so we must be the owner of it. 233 * 234 * if page is busy then we just mark it as released 235 * (who ever has it busy must check for this when they 236 * wake up). if the page is not busy then we can 237 * free it now. 238 */ 239 240 if ((pg->flags & PG_BUSY) != 0) { 241 /* tell them to dump it when done */ 242 pg->flags |= PG_RELEASED; 243 UVMHIST_LOG(maphist, 244 " anon 0x%x, page 0x%x: BUSY (released!)", 245 anon, pg, 0, 0); 246 return; 247 } 248 249 pmap_page_protect(pg, VM_PROT_NONE); 250 uvm_lock_pageq(); /* lock out pagedaemon */ 251 uvm_pagefree(pg); /* bye bye */ 252 uvm_unlock_pageq(); /* free the daemon */ 253 254 UVMHIST_LOG(maphist," anon 0x%x, page 0x%x: freed now!", 255 anon, pg, 0, 0); 256 } 257 } 258 259 /* 260 * free any swap resources. 261 */ 262 uvm_anon_dropswap(anon); 263 264 /* 265 * now that we've stripped the data areas from the anon, free the anon 266 * itself! 267 */ 268 simple_lock(&uvm.afreelock); 269 anon->u.an_nxt = uvm.afree; 270 uvm.afree = anon; 271 uvmexp.nfreeanon++; 272 simple_unlock(&uvm.afreelock); 273 UVMHIST_LOG(maphist,"<- done!",0,0,0,0); 274 } 275 276 /* 277 * uvm_anon_dropswap: release any swap resources from this anon. 278 * 279 * => anon must be locked or have a reference count of 0. 280 */ 281 void 282 uvm_anon_dropswap(anon) 283 struct vm_anon *anon; 284 { 285 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist); 286 if (anon->an_swslot == 0) { 287 return; 288 } 289 290 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x", 291 anon, anon->an_swslot, 0, 0); 292 uvm_swap_free(anon->an_swslot, 1); 293 anon->an_swslot = 0; 294 295 if (anon->u.an_page == NULL) { 296 /* this page is no longer only in swap. */ 297 simple_lock(&uvm.swap_data_lock); 298 uvmexp.swpgonly--; 299 simple_unlock(&uvm.swap_data_lock); 300 } 301 } 302 303 /* 304 * uvm_anon_lockloanpg: given a locked anon, lock its resident page 305 * 306 * => anon is locked by caller 307 * => on return: anon is locked 308 * if there is a resident page: 309 * if it has a uobject, it is locked by us 310 * if it is ownerless, we take over as owner 311 * we return the resident page (it can change during 312 * this function) 313 * => note that the only time an anon has an ownerless resident page 314 * is if the page was loaned from a uvm_object and the uvm_object 315 * disowned it 316 * => this only needs to be called when you want to do an operation 317 * on an anon's resident page and that page has a non-zero loan 318 * count. 319 */ 320 struct vm_page * 321 uvm_anon_lockloanpg(anon) 322 struct vm_anon *anon; 323 { 324 struct vm_page *pg; 325 boolean_t locked = FALSE; 326 327 /* 328 * loop while we have a resident page that has a non-zero loan count. 329 * if we successfully get our lock, we will "break" the loop. 330 * note that the test for pg->loan_count is not protected -- this 331 * may produce false positive results. note that a false positive 332 * result may cause us to do more work than we need to, but it will 333 * not produce an incorrect result. 334 */ 335 336 while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) { 337 338 /* 339 * quickly check to see if the page has an object before 340 * bothering to lock the page queues. this may also produce 341 * a false positive result, but that's ok because we do a real 342 * check after that. 343 * 344 * XXX: quick check -- worth it? need volatile? 345 */ 346 347 if (pg->uobject) { 348 349 uvm_lock_pageq(); 350 if (pg->uobject) { /* the "real" check */ 351 locked = 352 simple_lock_try(&pg->uobject->vmobjlock); 353 } else { 354 /* object disowned before we got PQ lock */ 355 locked = TRUE; 356 } 357 uvm_unlock_pageq(); 358 359 /* 360 * if we didn't get a lock (try lock failed), then we 361 * toggle our anon lock and try again 362 */ 363 364 if (!locked) { 365 simple_unlock(&anon->an_lock); 366 /* 367 * someone locking the object has a chance to 368 * lock us right now 369 */ 370 simple_lock(&anon->an_lock); 371 continue; /* start over */ 372 } 373 } 374 375 /* 376 * if page is un-owned [i.e. the object dropped its ownership], 377 * then we can take over as owner! 378 */ 379 380 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) { 381 uvm_lock_pageq(); 382 pg->pqflags |= PQ_ANON; /* take ownership... */ 383 pg->loan_count--; /* ... and drop our loan */ 384 uvm_unlock_pageq(); 385 } 386 387 /* 388 * we did it! break the loop 389 */ 390 break; 391 } 392 393 /* 394 * done! 395 */ 396 397 return(pg); 398 } 399 400 401 402 /* 403 * page in every anon that is paged out to a range of swslots. 404 * 405 * swap_syscall_lock should be held (protects anonblock_list). 406 */ 407 408 boolean_t 409 anon_swap_off(startslot, endslot) 410 int startslot, endslot; 411 { 412 struct uvm_anonblock *anonblock; 413 414 for (anonblock = LIST_FIRST(&anonblock_list); 415 anonblock != NULL; 416 anonblock = LIST_NEXT(anonblock, list)) { 417 int i; 418 419 /* 420 * loop thru all the anons in the anonblock, 421 * paging in where needed. 422 */ 423 424 for (i = 0; i < anonblock->count; i++) { 425 struct vm_anon *anon = &anonblock->anons[i]; 426 int slot; 427 428 /* 429 * lock anon to work on it. 430 */ 431 432 simple_lock(&anon->an_lock); 433 434 /* 435 * is this anon's swap slot in range? 436 */ 437 438 slot = anon->an_swslot; 439 if (slot >= startslot && slot < endslot) { 440 boolean_t rv; 441 442 /* 443 * yup, page it in. 444 */ 445 446 /* locked: anon */ 447 rv = anon_pagein(anon); 448 /* unlocked: anon */ 449 450 if (rv) { 451 return rv; 452 } 453 } else { 454 455 /* 456 * nope, unlock and proceed. 457 */ 458 459 simple_unlock(&anon->an_lock); 460 } 461 } 462 } 463 return FALSE; 464 } 465 466 467 /* 468 * fetch an anon's page. 469 * 470 * => anon must be locked, and is unlocked upon return. 471 * => returns TRUE if pagein was aborted due to lack of memory. 472 */ 473 474 static boolean_t 475 anon_pagein(anon) 476 struct vm_anon *anon; 477 { 478 struct vm_page *pg; 479 struct uvm_object *uobj; 480 int rv; 481 UVMHIST_FUNC("anon_pagein"); UVMHIST_CALLED(pdhist); 482 483 /* locked: anon */ 484 rv = uvmfault_anonget(NULL, NULL, anon); 485 /* unlocked: anon */ 486 487 switch (rv) { 488 case VM_PAGER_OK: 489 break; 490 491 case VM_PAGER_ERROR: 492 case VM_PAGER_REFAULT: 493 494 /* 495 * nothing more to do on errors. 496 * VM_PAGER_REFAULT can only mean that the anon was freed, 497 * so again there's nothing to do. 498 */ 499 500 return FALSE; 501 502 #ifdef DIAGNOSTIC 503 default: 504 panic("anon_pagein: uvmfault_anonget -> %d", rv); 505 #endif 506 } 507 508 /* 509 * ok, we've got the page now. 510 * mark it as dirty, clear its swslot and un-busy it. 511 */ 512 513 pg = anon->u.an_page; 514 uobj = pg->uobject; 515 uvm_swap_free(anon->an_swslot, 1); 516 anon->an_swslot = 0; 517 pg->flags &= ~(PG_CLEAN); 518 519 /* 520 * deactivate the page (to put it on a page queue) 521 */ 522 523 pmap_clear_reference(pg); 524 pmap_page_protect(pg, VM_PROT_NONE); 525 uvm_lock_pageq(); 526 uvm_pagedeactivate(pg); 527 uvm_unlock_pageq(); 528 529 /* 530 * unlock the anon and we're done. 531 */ 532 533 simple_unlock(&anon->an_lock); 534 if (uobj) { 535 simple_unlock(&uobj->vmobjlock); 536 } 537 return FALSE; 538 } 539