1 /* $NetBSD: uvm_anon.c,v 1.51 2008/01/18 10:48:23 yamt Exp $ */ 2 3 /* 4 * 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Charles D. Cranor and 19 * Washington University. 20 * 4. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 /* 36 * uvm_anon.c: uvm anon ops 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.51 2008/01/18 10:48:23 yamt Exp $"); 41 42 #include "opt_uvmhist.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/proc.h> 47 #include <sys/malloc.h> 48 #include <sys/pool.h> 49 #include <sys/kernel.h> 50 51 #include <uvm/uvm.h> 52 #include <uvm/uvm_swap.h> 53 #include <uvm/uvm_pdpolicy.h> 54 55 static struct pool_cache uvm_anon_cache; 56 57 static int uvm_anon_ctor(void *, void *, int); 58 static void uvm_anon_dtor(void *, void *); 59 60 /* 61 * allocate anons 62 */ 63 void 64 uvm_anon_init(void) 65 { 66 67 pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0, 68 PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor, 69 uvm_anon_dtor, NULL); 70 } 71 72 static int 73 uvm_anon_ctor(void *arg, void *object, int flags) 74 { 75 struct vm_anon *anon = object; 76 77 anon->an_ref = 0; 78 mutex_init(&anon->an_lock, MUTEX_DEFAULT, IPL_NONE); 79 anon->an_page = NULL; 80 #if defined(VMSWAP) 81 anon->an_swslot = 0; 82 #endif /* defined(VMSWAP) */ 83 84 return 0; 85 } 86 87 static void 88 uvm_anon_dtor(void *arg, void *object) 89 { 90 struct vm_anon *anon = object; 91 92 mutex_destroy(&anon->an_lock); 93 } 94 95 /* 96 * allocate an anon 97 * 98 * => new anon is returned locked! 99 */ 100 struct vm_anon * 101 uvm_analloc(void) 102 { 103 struct vm_anon *anon; 104 105 anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT); 106 if (anon) { 107 KASSERT(anon->an_ref == 0); 108 KASSERT(anon->an_page == NULL); 109 #if defined(VMSWAP) 110 KASSERT(anon->an_swslot == 0); 111 #endif /* defined(VMSWAP) */ 112 anon->an_ref = 1; 113 mutex_enter(&anon->an_lock); 114 } 115 return anon; 116 } 117 118 /* 119 * uvm_anfree: free a single anon structure 120 * 121 * => caller must remove anon from its amap before calling (if it was in 122 * an amap). 123 * => anon must be unlocked and have a zero reference count. 124 * => we may lock the pageq's. 125 */ 126 127 void 128 uvm_anfree(struct vm_anon *anon) 129 { 130 struct vm_page *pg; 131 UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist); 132 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0); 133 134 KASSERT(anon->an_ref == 0); 135 KASSERT(!mutex_owned(&anon->an_lock)); 136 137 /* 138 * get page 139 */ 140 141 pg = anon->an_page; 142 143 /* 144 * if there is a resident page and it is loaned, then anon may not 145 * own it. call out to uvm_anon_lockpage() to ensure the real owner 146 * of the page has been identified and locked. 147 */ 148 149 if (pg && pg->loan_count) { 150 mutex_enter(&anon->an_lock); 151 pg = uvm_anon_lockloanpg(anon); 152 mutex_exit(&anon->an_lock); 153 } 154 155 /* 156 * if we have a resident page, we must dispose of it before freeing 157 * the anon. 158 */ 159 160 if (pg) { 161 162 /* 163 * if the page is owned by a uobject (now locked), then we must 164 * kill the loan on the page rather than free it. 165 */ 166 167 if (pg->uobject) { 168 mutex_enter(&uvm_pageqlock); 169 KASSERT(pg->loan_count > 0); 170 pg->loan_count--; 171 pg->uanon = NULL; 172 mutex_exit(&uvm_pageqlock); 173 mutex_exit(&pg->uobject->vmobjlock); 174 } else { 175 176 /* 177 * page has no uobject, so we must be the owner of it. 178 */ 179 180 KASSERT((pg->flags & PG_RELEASED) == 0); 181 mutex_enter(&anon->an_lock); 182 pmap_page_protect(pg, VM_PROT_NONE); 183 184 /* 185 * if the page is busy, mark it as PG_RELEASED 186 * so that uvm_anon_release will release it later. 187 */ 188 189 if (pg->flags & PG_BUSY) { 190 pg->flags |= PG_RELEASED; 191 mutex_exit(&anon->an_lock); 192 return; 193 } 194 mutex_enter(&uvm_pageqlock); 195 uvm_pagefree(pg); 196 mutex_exit(&uvm_pageqlock); 197 mutex_exit(&anon->an_lock); 198 UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: " 199 "freed now!", anon, pg, 0, 0); 200 } 201 } 202 #if defined(VMSWAP) 203 if (pg == NULL && anon->an_swslot > 0) { 204 /* this page is no longer only in swap. */ 205 mutex_enter(&uvm_swap_data_lock); 206 KASSERT(uvmexp.swpgonly > 0); 207 uvmexp.swpgonly--; 208 mutex_exit(&uvm_swap_data_lock); 209 } 210 #endif /* defined(VMSWAP) */ 211 212 /* 213 * free any swap resources. 214 */ 215 216 uvm_anon_dropswap(anon); 217 218 /* 219 * give a page replacement hint. 220 */ 221 222 uvmpdpol_anfree(anon); 223 224 /* 225 * now that we've stripped the data areas from the anon, 226 * free the anon itself. 227 */ 228 229 KASSERT(anon->an_page == NULL); 230 #if defined(VMSWAP) 231 KASSERT(anon->an_swslot == 0); 232 #endif /* defined(VMSWAP) */ 233 234 pool_cache_put(&uvm_anon_cache, anon); 235 UVMHIST_LOG(maphist,"<- done!",0,0,0,0); 236 } 237 238 #if defined(VMSWAP) 239 240 /* 241 * uvm_anon_dropswap: release any swap resources from this anon. 242 * 243 * => anon must be locked or have a reference count of 0. 244 */ 245 void 246 uvm_anon_dropswap(struct vm_anon *anon) 247 { 248 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist); 249 250 if (anon->an_swslot == 0) 251 return; 252 253 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x", 254 anon, anon->an_swslot, 0, 0); 255 uvm_swap_free(anon->an_swslot, 1); 256 anon->an_swslot = 0; 257 } 258 259 #endif /* defined(VMSWAP) */ 260 261 /* 262 * uvm_anon_lockloanpg: given a locked anon, lock its resident page 263 * 264 * => anon is locked by caller 265 * => on return: anon is locked 266 * if there is a resident page: 267 * if it has a uobject, it is locked by us 268 * if it is ownerless, we take over as owner 269 * we return the resident page (it can change during 270 * this function) 271 * => note that the only time an anon has an ownerless resident page 272 * is if the page was loaned from a uvm_object and the uvm_object 273 * disowned it 274 * => this only needs to be called when you want to do an operation 275 * on an anon's resident page and that page has a non-zero loan 276 * count. 277 */ 278 struct vm_page * 279 uvm_anon_lockloanpg(struct vm_anon *anon) 280 { 281 struct vm_page *pg; 282 bool locked = false; 283 284 KASSERT(mutex_owned(&anon->an_lock)); 285 286 /* 287 * loop while we have a resident page that has a non-zero loan count. 288 * if we successfully get our lock, we will "break" the loop. 289 * note that the test for pg->loan_count is not protected -- this 290 * may produce false positive results. note that a false positive 291 * result may cause us to do more work than we need to, but it will 292 * not produce an incorrect result. 293 */ 294 295 while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) { 296 297 /* 298 * quickly check to see if the page has an object before 299 * bothering to lock the page queues. this may also produce 300 * a false positive result, but that's ok because we do a real 301 * check after that. 302 */ 303 304 if (pg->uobject) { 305 mutex_enter(&uvm_pageqlock); 306 if (pg->uobject) { 307 locked = 308 mutex_tryenter(&pg->uobject->vmobjlock); 309 } else { 310 /* object disowned before we got PQ lock */ 311 locked = true; 312 } 313 mutex_exit(&uvm_pageqlock); 314 315 /* 316 * if we didn't get a lock (try lock failed), then we 317 * toggle our anon lock and try again 318 */ 319 320 if (!locked) { 321 mutex_exit(&anon->an_lock); 322 323 /* 324 * someone locking the object has a chance to 325 * lock us right now 326 */ 327 /* XXX Better than yielding but inadequate. */ 328 kpause("livelock", false, 1, NULL); 329 330 mutex_enter(&anon->an_lock); 331 continue; 332 } 333 } 334 335 /* 336 * if page is un-owned [i.e. the object dropped its ownership], 337 * then we can take over as owner! 338 */ 339 340 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) { 341 mutex_enter(&uvm_pageqlock); 342 pg->pqflags |= PQ_ANON; 343 pg->loan_count--; 344 mutex_exit(&uvm_pageqlock); 345 } 346 break; 347 } 348 return(pg); 349 } 350 351 #if defined(VMSWAP) 352 353 /* 354 * fetch an anon's page. 355 * 356 * => anon must be locked, and is unlocked upon return. 357 * => returns true if pagein was aborted due to lack of memory. 358 */ 359 360 bool 361 uvm_anon_pagein(struct vm_anon *anon) 362 { 363 struct vm_page *pg; 364 struct uvm_object *uobj; 365 int rv; 366 367 /* locked: anon */ 368 KASSERT(mutex_owned(&anon->an_lock)); 369 370 rv = uvmfault_anonget(NULL, NULL, anon); 371 372 /* 373 * if rv == 0, anon is still locked, else anon 374 * is unlocked 375 */ 376 377 switch (rv) { 378 case 0: 379 break; 380 381 case EIO: 382 case ERESTART: 383 384 /* 385 * nothing more to do on errors. 386 * ERESTART can only mean that the anon was freed, 387 * so again there's nothing to do. 388 */ 389 390 return false; 391 392 default: 393 return true; 394 } 395 396 /* 397 * ok, we've got the page now. 398 * mark it as dirty, clear its swslot and un-busy it. 399 */ 400 401 pg = anon->an_page; 402 uobj = pg->uobject; 403 if (anon->an_swslot > 0) 404 uvm_swap_free(anon->an_swslot, 1); 405 anon->an_swslot = 0; 406 pg->flags &= ~(PG_CLEAN); 407 408 /* 409 * deactivate the page (to put it on a page queue) 410 */ 411 412 mutex_enter(&uvm_pageqlock); 413 if (pg->wire_count == 0) 414 uvm_pagedeactivate(pg); 415 mutex_exit(&uvm_pageqlock); 416 417 if (pg->flags & PG_WANTED) { 418 wakeup(pg); 419 pg->flags &= ~(PG_WANTED); 420 } 421 422 /* 423 * unlock the anon and we're done. 424 */ 425 426 mutex_exit(&anon->an_lock); 427 if (uobj) { 428 mutex_exit(&uobj->vmobjlock); 429 } 430 return false; 431 } 432 433 #endif /* defined(VMSWAP) */ 434 435 /* 436 * uvm_anon_release: release an anon and its page. 437 * 438 * => caller must lock the anon. 439 */ 440 441 void 442 uvm_anon_release(struct vm_anon *anon) 443 { 444 struct vm_page *pg = anon->an_page; 445 446 KASSERT(mutex_owned(&anon->an_lock)); 447 KASSERT(pg != NULL); 448 KASSERT((pg->flags & PG_RELEASED) != 0); 449 KASSERT((pg->flags & PG_BUSY) != 0); 450 KASSERT(pg->uobject == NULL); 451 KASSERT(pg->uanon == anon); 452 KASSERT(pg->loan_count == 0); 453 KASSERT(anon->an_ref == 0); 454 455 mutex_enter(&uvm_pageqlock); 456 uvm_pagefree(pg); 457 mutex_exit(&uvm_pageqlock); 458 mutex_exit(&anon->an_lock); 459 460 KASSERT(anon->an_page == NULL); 461 462 uvm_anfree(anon); 463 } 464