1 /* $NetBSD: uvm_anon.c,v 1.18 2001/09/15 20:36:44 chs Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * uvm_anon.c: uvm anon ops 37 */ 38 39 #include "opt_uvmhist.h" 40 41 #include <sys/param.h> 42 #include <sys/systm.h> 43 #include <sys/proc.h> 44 #include <sys/malloc.h> 45 #include <sys/pool.h> 46 #include <sys/kernel.h> 47 48 #include <uvm/uvm.h> 49 #include <uvm/uvm_swap.h> 50 51 /* 52 * anonblock_list: global list of anon blocks, 53 * locked by swap_syscall_lock (since we never remove 54 * anything from this list and we only add to it via swapctl(2)). 55 */ 56 57 struct uvm_anonblock { 58 LIST_ENTRY(uvm_anonblock) list; 59 int count; 60 struct vm_anon *anons; 61 }; 62 static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list; 63 64 65 static boolean_t anon_pagein __P((struct vm_anon *)); 66 67 68 /* 69 * allocate anons 70 */ 71 void 72 uvm_anon_init() 73 { 74 int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */ 75 76 simple_lock_init(&uvm.afreelock); 77 LIST_INIT(&anonblock_list); 78 79 /* 80 * Allocate the initial anons. 81 */ 82 uvm_anon_add(nanon); 83 } 84 85 /* 86 * add some more anons to the free pool. called when we add 87 * more swap space. 88 * 89 * => swap_syscall_lock should be held (protects anonblock_list). 90 */ 91 int 92 uvm_anon_add(count) 93 int count; 94 { 95 struct uvm_anonblock *anonblock; 96 struct vm_anon *anon; 97 int lcv, needed; 98 99 simple_lock(&uvm.afreelock); 100 uvmexp.nanonneeded += count; 101 needed = uvmexp.nanonneeded - uvmexp.nanon; 102 simple_unlock(&uvm.afreelock); 103 104 if (needed <= 0) { 105 return 0; 106 } 107 anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed); 108 if (anon == NULL) { 109 simple_lock(&uvm.afreelock); 110 uvmexp.nanonneeded -= count; 111 simple_unlock(&uvm.afreelock); 112 return ENOMEM; 113 } 114 MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK); 115 116 anonblock->count = needed; 117 anonblock->anons = anon; 118 LIST_INSERT_HEAD(&anonblock_list, anonblock, list); 119 memset(anon, 0, sizeof(*anon) * needed); 120 121 simple_lock(&uvm.afreelock); 122 uvmexp.nanon += needed; 123 uvmexp.nfreeanon += needed; 124 for (lcv = 0; lcv < needed; lcv++) { 125 simple_lock_init(&anon->an_lock); 126 anon[lcv].u.an_nxt = uvm.afree; 127 uvm.afree = &anon[lcv]; 128 simple_lock_init(&uvm.afree->an_lock); 129 } 130 simple_unlock(&uvm.afreelock); 131 return 0; 132 } 133 134 /* 135 * remove anons from the free pool. 136 */ 137 void 138 uvm_anon_remove(count) 139 int count; 140 { 141 /* 142 * we never actually free any anons, to avoid allocation overhead. 143 * XXX someday we might want to try to free anons. 144 */ 145 146 simple_lock(&uvm.afreelock); 147 uvmexp.nanonneeded -= count; 148 simple_unlock(&uvm.afreelock); 149 } 150 151 /* 152 * allocate an anon 153 * 154 * => new anon is returned locked! 155 */ 156 struct vm_anon * 157 uvm_analloc() 158 { 159 struct vm_anon *a; 160 161 simple_lock(&uvm.afreelock); 162 a = uvm.afree; 163 if (a) { 164 uvm.afree = a->u.an_nxt; 165 uvmexp.nfreeanon--; 166 a->an_ref = 1; 167 a->an_swslot = 0; 168 a->u.an_page = NULL; /* so we can free quickly */ 169 LOCK_ASSERT(simple_lock_held(&a->an_lock) == 0); 170 simple_lock(&a->an_lock); 171 } 172 simple_unlock(&uvm.afreelock); 173 return(a); 174 } 175 176 /* 177 * uvm_anfree: free a single anon structure 178 * 179 * => caller must remove anon from its amap before calling (if it was in 180 * an amap). 181 * => anon must be unlocked and have a zero reference count. 182 * => we may lock the pageq's. 183 */ 184 void 185 uvm_anfree(anon) 186 struct vm_anon *anon; 187 { 188 struct vm_page *pg; 189 UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist); 190 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0); 191 192 KASSERT(anon->an_ref == 0); 193 LOCK_ASSERT(!simple_lock_held(&anon->an_lock)); 194 195 /* 196 * get page 197 */ 198 199 pg = anon->u.an_page; 200 201 /* 202 * if there is a resident page and it is loaned, then anon may not 203 * own it. call out to uvm_anon_lockpage() to ensure the real owner 204 * of the page has been identified and locked. 205 */ 206 207 if (pg && pg->loan_count) 208 pg = uvm_anon_lockloanpg(anon); 209 210 /* 211 * if we have a resident page, we must dispose of it before freeing 212 * the anon. 213 */ 214 215 if (pg) { 216 217 /* 218 * if the page is owned by a uobject (now locked), then we must 219 * kill the loan on the page rather than free it. 220 */ 221 222 if (pg->uobject) { 223 uvm_lock_pageq(); 224 KASSERT(pg->loan_count > 0); 225 pg->loan_count--; 226 pg->uanon = NULL; 227 uvm_unlock_pageq(); 228 simple_unlock(&pg->uobject->vmobjlock); 229 } else { 230 231 /* 232 * page has no uobject, so we must be the owner of it. 233 * if page is busy then we wait until it is not busy, 234 * and then free it. 235 */ 236 237 KASSERT((pg->flags & PG_RELEASED) == 0); 238 pmap_page_protect(pg, VM_PROT_NONE); 239 while ((pg = anon->u.an_page) && 240 (pg->flags & PG_BUSY) != 0) { 241 pg->flags |= PG_WANTED; 242 UVM_UNLOCK_AND_WAIT(pg, &anon->an_lock, 0, 243 "anfree", 0); 244 simple_lock(&anon->an_lock); 245 } 246 if (pg) { 247 uvm_lock_pageq(); 248 uvm_pagefree(pg); 249 uvm_unlock_pageq(); 250 } 251 UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: " 252 "freed now!", anon, pg, 0, 0); 253 } 254 } 255 256 /* 257 * free any swap resources. 258 */ 259 260 uvm_anon_dropswap(anon); 261 262 /* 263 * now that we've stripped the data areas from the anon, 264 * free the anon itself. 265 */ 266 267 simple_lock(&uvm.afreelock); 268 anon->u.an_nxt = uvm.afree; 269 uvm.afree = anon; 270 uvmexp.nfreeanon++; 271 simple_unlock(&uvm.afreelock); 272 UVMHIST_LOG(maphist,"<- done!",0,0,0,0); 273 } 274 275 /* 276 * uvm_anon_dropswap: release any swap resources from this anon. 277 * 278 * => anon must be locked or have a reference count of 0. 279 */ 280 void 281 uvm_anon_dropswap(anon) 282 struct vm_anon *anon; 283 { 284 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist); 285 286 if (anon->an_swslot == 0) 287 return; 288 289 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x", 290 anon, anon->an_swslot, 0, 0); 291 uvm_swap_free(anon->an_swslot, 1); 292 anon->an_swslot = 0; 293 294 if (anon->u.an_page == NULL) { 295 /* this page is no longer only in swap. */ 296 simple_lock(&uvm.swap_data_lock); 297 uvmexp.swpgonly--; 298 simple_unlock(&uvm.swap_data_lock); 299 } 300 } 301 302 /* 303 * uvm_anon_lockloanpg: given a locked anon, lock its resident page 304 * 305 * => anon is locked by caller 306 * => on return: anon is locked 307 * if there is a resident page: 308 * if it has a uobject, it is locked by us 309 * if it is ownerless, we take over as owner 310 * we return the resident page (it can change during 311 * this function) 312 * => note that the only time an anon has an ownerless resident page 313 * is if the page was loaned from a uvm_object and the uvm_object 314 * disowned it 315 * => this only needs to be called when you want to do an operation 316 * on an anon's resident page and that page has a non-zero loan 317 * count. 318 */ 319 struct vm_page * 320 uvm_anon_lockloanpg(anon) 321 struct vm_anon *anon; 322 { 323 struct vm_page *pg; 324 boolean_t locked = FALSE; 325 326 LOCK_ASSERT(simple_lock_held(&anon->an_lock)); 327 328 /* 329 * loop while we have a resident page that has a non-zero loan count. 330 * if we successfully get our lock, we will "break" the loop. 331 * note that the test for pg->loan_count is not protected -- this 332 * may produce false positive results. note that a false positive 333 * result may cause us to do more work than we need to, but it will 334 * not produce an incorrect result. 335 */ 336 337 while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) { 338 339 /* 340 * quickly check to see if the page has an object before 341 * bothering to lock the page queues. this may also produce 342 * a false positive result, but that's ok because we do a real 343 * check after that. 344 */ 345 346 if (pg->uobject) { 347 uvm_lock_pageq(); 348 if (pg->uobject) { 349 locked = 350 simple_lock_try(&pg->uobject->vmobjlock); 351 } else { 352 /* object disowned before we got PQ lock */ 353 locked = TRUE; 354 } 355 uvm_unlock_pageq(); 356 357 /* 358 * if we didn't get a lock (try lock failed), then we 359 * toggle our anon lock and try again 360 */ 361 362 if (!locked) { 363 simple_unlock(&anon->an_lock); 364 365 /* 366 * someone locking the object has a chance to 367 * lock us right now 368 */ 369 370 simple_lock(&anon->an_lock); 371 continue; 372 } 373 } 374 375 /* 376 * if page is un-owned [i.e. the object dropped its ownership], 377 * then we can take over as owner! 378 */ 379 380 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) { 381 uvm_lock_pageq(); 382 pg->pqflags |= PQ_ANON; 383 pg->loan_count--; 384 uvm_unlock_pageq(); 385 } 386 break; 387 } 388 return(pg); 389 } 390 391 392 393 /* 394 * page in every anon that is paged out to a range of swslots. 395 * 396 * swap_syscall_lock should be held (protects anonblock_list). 397 */ 398 399 boolean_t 400 anon_swap_off(startslot, endslot) 401 int startslot, endslot; 402 { 403 struct uvm_anonblock *anonblock; 404 405 LIST_FOREACH(anonblock, &anonblock_list, list) { 406 int i; 407 408 /* 409 * loop thru all the anons in the anonblock, 410 * paging in where needed. 411 */ 412 413 for (i = 0; i < anonblock->count; i++) { 414 struct vm_anon *anon = &anonblock->anons[i]; 415 int slot; 416 417 /* 418 * lock anon to work on it. 419 */ 420 421 simple_lock(&anon->an_lock); 422 423 /* 424 * is this anon's swap slot in range? 425 */ 426 427 slot = anon->an_swslot; 428 if (slot >= startslot && slot < endslot) { 429 boolean_t rv; 430 431 /* 432 * yup, page it in. 433 */ 434 435 /* locked: anon */ 436 rv = anon_pagein(anon); 437 /* unlocked: anon */ 438 439 if (rv) { 440 return rv; 441 } 442 } else { 443 444 /* 445 * nope, unlock and proceed. 446 */ 447 448 simple_unlock(&anon->an_lock); 449 } 450 } 451 } 452 return FALSE; 453 } 454 455 456 /* 457 * fetch an anon's page. 458 * 459 * => anon must be locked, and is unlocked upon return. 460 * => returns TRUE if pagein was aborted due to lack of memory. 461 */ 462 463 static boolean_t 464 anon_pagein(anon) 465 struct vm_anon *anon; 466 { 467 struct vm_page *pg; 468 struct uvm_object *uobj; 469 int rv; 470 471 /* locked: anon */ 472 LOCK_ASSERT(simple_lock_held(&anon->an_lock)); 473 474 rv = uvmfault_anonget(NULL, NULL, anon); 475 476 /* 477 * if rv == 0, anon is still locked, else anon 478 * is unlocked 479 */ 480 481 switch (rv) { 482 case 0: 483 break; 484 485 case EIO: 486 case ERESTART: 487 488 /* 489 * nothing more to do on errors. 490 * ERESTART can only mean that the anon was freed, 491 * so again there's nothing to do. 492 */ 493 494 return FALSE; 495 } 496 497 /* 498 * ok, we've got the page now. 499 * mark it as dirty, clear its swslot and un-busy it. 500 */ 501 502 pg = anon->u.an_page; 503 uobj = pg->uobject; 504 uvm_swap_free(anon->an_swslot, 1); 505 anon->an_swslot = 0; 506 pg->flags &= ~(PG_CLEAN); 507 508 /* 509 * deactivate the page (to put it on a page queue) 510 */ 511 512 pmap_clear_reference(pg); 513 uvm_lock_pageq(); 514 uvm_pagedeactivate(pg); 515 uvm_unlock_pageq(); 516 517 /* 518 * unlock the anon and we're done. 519 */ 520 521 simple_unlock(&anon->an_lock); 522 if (uobj) { 523 simple_unlock(&uobj->vmobjlock); 524 } 525 return FALSE; 526 } 527