1 /* $NetBSD: uvm_anon.c,v 1.63 2013/10/25 20:08:11 martin Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * uvm_anon.c: uvm anon ops 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.63 2013/10/25 20:08:11 martin Exp $"); 34 35 #include "opt_uvmhist.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/pool.h> 40 #include <sys/kernel.h> 41 42 #include <uvm/uvm.h> 43 #include <uvm/uvm_swap.h> 44 #include <uvm/uvm_pdpolicy.h> 45 46 static struct pool_cache uvm_anon_cache; 47 48 static int uvm_anon_ctor(void *, void *, int); 49 50 void 51 uvm_anon_init(void) 52 { 53 54 pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0, 55 PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor, 56 NULL, NULL); 57 } 58 59 static int 60 uvm_anon_ctor(void *arg, void *object, int flags) 61 { 62 struct vm_anon *anon = object; 63 64 anon->an_ref = 0; 65 anon->an_lock = NULL; 66 anon->an_page = NULL; 67 #if defined(VMSWAP) 68 anon->an_swslot = 0; 69 #endif 70 return 0; 71 } 72 73 /* 74 * uvm_analloc: allocate a new anon. 75 * 76 * => anon will have no lock associated. 77 */ 78 struct vm_anon * 79 uvm_analloc(void) 80 { 81 struct vm_anon *anon; 82 83 anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT); 84 if (anon) { 85 KASSERT(anon->an_ref == 0); 86 KASSERT(anon->an_lock == NULL); 87 KASSERT(anon->an_page == NULL); 88 #if defined(VMSWAP) 89 KASSERT(anon->an_swslot == 0); 90 #endif 91 anon->an_ref = 1; 92 } 93 return anon; 94 } 95 96 /* 97 * uvm_anon_dispose: free any resident page or swap resources of anon. 98 * 99 * => anon must be removed from the amap (if anon was in an amap). 100 * => amap must be locked; we may drop and re-acquire the lock here. 101 */ 102 static bool 103 uvm_anon_dispose(struct vm_anon *anon) 104 { 105 struct vm_page *pg = anon->an_page; 106 107 UVMHIST_FUNC("uvm_anon_dispose"); UVMHIST_CALLED(maphist); 108 UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0); 109 110 KASSERT(mutex_owned(anon->an_lock)); 111 112 /* 113 * If there is a resident page and it is loaned, then anon may not 114 * own it. Call out to uvm_anon_lockloanpg() to identify and lock 115 * the real owner of the page. 116 */ 117 118 if (pg && pg->loan_count) { 119 KASSERT(anon->an_lock != NULL); 120 pg = uvm_anon_lockloanpg(anon); 121 } 122 123 /* 124 * Dispose the page, if it is resident. 125 */ 126 127 if (pg) { 128 KASSERT(anon->an_lock != NULL); 129 130 /* 131 * If the page is owned by a UVM object (now locked), 132 * then kill the loan on the page rather than free it, 133 * and release the object lock. 134 */ 135 136 if (pg->uobject) { 137 mutex_enter(&uvm_pageqlock); 138 KASSERT(pg->loan_count > 0); 139 pg->loan_count--; 140 pg->uanon = NULL; 141 mutex_exit(&uvm_pageqlock); 142 mutex_exit(pg->uobject->vmobjlock); 143 } else { 144 145 /* 146 * If page has no UVM object, then anon is the owner, 147 * and it is already locked. 148 */ 149 150 KASSERT((pg->flags & PG_RELEASED) == 0); 151 pmap_page_protect(pg, VM_PROT_NONE); 152 153 /* 154 * If the page is busy, mark it as PG_RELEASED, so 155 * that uvm_anon_release(9) would release it later. 156 */ 157 158 if (pg->flags & PG_BUSY) { 159 pg->flags |= PG_RELEASED; 160 mutex_obj_hold(anon->an_lock); 161 return false; 162 } 163 mutex_enter(&uvm_pageqlock); 164 uvm_pagefree(pg); 165 mutex_exit(&uvm_pageqlock); 166 UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: " 167 "freed now!", anon, pg, 0, 0); 168 } 169 } 170 171 #if defined(VMSWAP) 172 if (pg == NULL && anon->an_swslot > 0) { 173 /* This page is no longer only in swap. */ 174 mutex_enter(&uvm_swap_data_lock); 175 KASSERT(uvmexp.swpgonly > 0); 176 uvmexp.swpgonly--; 177 mutex_exit(&uvm_swap_data_lock); 178 } 179 #endif 180 181 /* 182 * Free any swap resources, leave a page replacement hint. 183 */ 184 185 uvm_anon_dropswap(anon); 186 uvmpdpol_anfree(anon); 187 UVMHIST_LOG(maphist,"<- done!",0,0,0,0); 188 return true; 189 } 190 191 /* 192 * uvm_anon_free: free a single anon. 193 * 194 * => anon must be already disposed. 195 */ 196 void 197 uvm_anon_free(struct vm_anon *anon) 198 { 199 200 KASSERT(anon->an_ref == 0); 201 KASSERT(anon->an_lock == NULL); 202 KASSERT(anon->an_page == NULL); 203 #if defined(VMSWAP) 204 KASSERT(anon->an_swslot == 0); 205 #endif 206 pool_cache_put(&uvm_anon_cache, anon); 207 } 208 209 /* 210 * uvm_anon_freelst: free a linked list of anon structures. 211 * 212 * => anon must be locked, we will unlock it. 213 */ 214 void 215 uvm_anon_freelst(struct vm_amap *amap, struct vm_anon *anonlst) 216 { 217 struct vm_anon *anon; 218 struct vm_anon **anonp = &anonlst; 219 220 KASSERT(mutex_owned(amap->am_lock)); 221 while ((anon = *anonp) != NULL) { 222 if (!uvm_anon_dispose(anon)) { 223 /* Do not free this anon. */ 224 *anonp = anon->an_link; 225 /* Note: clears an_ref as well. */ 226 anon->an_link = NULL; 227 } else { 228 anonp = &anon->an_link; 229 } 230 } 231 amap_unlock(amap); 232 233 while (anonlst) { 234 anon = anonlst->an_link; 235 /* Note: clears an_ref as well. */ 236 anonlst->an_link = NULL; 237 anonlst->an_lock = NULL; 238 uvm_anon_free(anonlst); 239 anonlst = anon; 240 } 241 } 242 243 /* 244 * uvm_anon_lockloanpg: given a locked anon, lock its resident page owner. 245 * 246 * => anon is locked by caller 247 * => on return: anon is locked 248 * if there is a resident page: 249 * if it has a uobject, it is locked by us 250 * if it is ownerless, we take over as owner 251 * we return the resident page (it can change during 252 * this function) 253 * => note that the only time an anon has an ownerless resident page 254 * is if the page was loaned from a uvm_object and the uvm_object 255 * disowned it 256 * => this only needs to be called when you want to do an operation 257 * on an anon's resident page and that page has a non-zero loan 258 * count. 259 */ 260 struct vm_page * 261 uvm_anon_lockloanpg(struct vm_anon *anon) 262 { 263 struct vm_page *pg; 264 bool locked = false; 265 266 KASSERT(mutex_owned(anon->an_lock)); 267 268 /* 269 * loop while we have a resident page that has a non-zero loan count. 270 * if we successfully get our lock, we will "break" the loop. 271 * note that the test for pg->loan_count is not protected -- this 272 * may produce false positive results. note that a false positive 273 * result may cause us to do more work than we need to, but it will 274 * not produce an incorrect result. 275 */ 276 277 while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) { 278 279 /* 280 * quickly check to see if the page has an object before 281 * bothering to lock the page queues. this may also produce 282 * a false positive result, but that's ok because we do a real 283 * check after that. 284 */ 285 286 if (pg->uobject) { 287 mutex_enter(&uvm_pageqlock); 288 if (pg->uobject) { 289 locked = 290 mutex_tryenter(pg->uobject->vmobjlock); 291 } else { 292 /* object disowned before we got PQ lock */ 293 locked = true; 294 } 295 mutex_exit(&uvm_pageqlock); 296 297 /* 298 * if we didn't get a lock (try lock failed), then we 299 * toggle our anon lock and try again 300 */ 301 302 if (!locked) { 303 /* 304 * someone locking the object has a chance to 305 * lock us right now 306 * 307 * XXX Better than yielding but inadequate. 308 */ 309 kpause("livelock", false, 1, anon->an_lock); 310 continue; 311 } 312 } 313 314 /* 315 * If page is un-owned i.e. the object dropped its ownership, 316 * then we have to take the ownership. 317 */ 318 319 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) { 320 mutex_enter(&uvm_pageqlock); 321 pg->pqflags |= PQ_ANON; 322 pg->loan_count--; 323 mutex_exit(&uvm_pageqlock); 324 } 325 break; 326 } 327 return pg; 328 } 329 330 #if defined(VMSWAP) 331 332 /* 333 * uvm_anon_pagein: fetch an anon's page. 334 * 335 * => anon must be locked, and is unlocked upon return. 336 * => returns true if pagein was aborted due to lack of memory. 337 */ 338 339 bool 340 uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon) 341 { 342 struct vm_page *pg; 343 struct uvm_object *uobj; 344 345 KASSERT(mutex_owned(anon->an_lock)); 346 KASSERT(anon->an_lock == amap->am_lock); 347 348 /* 349 * Get the page of the anon. 350 */ 351 352 switch (uvmfault_anonget(NULL, amap, anon)) { 353 case 0: 354 /* Success - we have the page. */ 355 KASSERT(mutex_owned(anon->an_lock)); 356 break; 357 case EIO: 358 case ERESTART: 359 /* 360 * Nothing more to do on errors. ERESTART means that the 361 * anon was freed. 362 */ 363 return false; 364 default: 365 return true; 366 } 367 368 /* 369 * Mark the page as dirty, clear its swslot and un-busy it. 370 */ 371 372 pg = anon->an_page; 373 uobj = pg->uobject; 374 if (anon->an_swslot > 0) { 375 uvm_swap_free(anon->an_swslot, 1); 376 } 377 anon->an_swslot = 0; 378 pg->flags &= ~PG_CLEAN; 379 380 /* 381 * Deactivate the page (to put it on a page queue). 382 */ 383 384 mutex_enter(&uvm_pageqlock); 385 if (pg->wire_count == 0) { 386 uvm_pagedeactivate(pg); 387 } 388 mutex_exit(&uvm_pageqlock); 389 390 if (pg->flags & PG_WANTED) { 391 pg->flags &= ~PG_WANTED; 392 wakeup(pg); 393 } 394 395 mutex_exit(anon->an_lock); 396 if (uobj) { 397 mutex_exit(uobj->vmobjlock); 398 } 399 return false; 400 } 401 402 /* 403 * uvm_anon_dropswap: release any swap resources from this anon. 404 * 405 * => anon must be locked or have a reference count of 0. 406 */ 407 void 408 uvm_anon_dropswap(struct vm_anon *anon) 409 { 410 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist); 411 412 if (anon->an_swslot == 0) 413 return; 414 415 UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x", 416 anon, anon->an_swslot, 0, 0); 417 uvm_swap_free(anon->an_swslot, 1); 418 anon->an_swslot = 0; 419 } 420 421 #endif 422 423 /* 424 * uvm_anon_release: release an anon and its page. 425 * 426 * => anon should not have any references. 427 * => anon must be locked. 428 */ 429 430 void 431 uvm_anon_release(struct vm_anon *anon) 432 { 433 struct vm_page *pg = anon->an_page; 434 bool success __diagused; 435 436 KASSERT(mutex_owned(anon->an_lock)); 437 KASSERT(pg != NULL); 438 KASSERT((pg->flags & PG_RELEASED) != 0); 439 KASSERT((pg->flags & PG_BUSY) != 0); 440 KASSERT(pg->uobject == NULL); 441 KASSERT(pg->uanon == anon); 442 KASSERT(pg->loan_count == 0); 443 KASSERT(anon->an_ref == 0); 444 445 mutex_enter(&uvm_pageqlock); 446 uvm_pagefree(pg); 447 mutex_exit(&uvm_pageqlock); 448 KASSERT(anon->an_page == NULL); 449 /* dispose should succeed as no one can reach this anon anymore. */ 450 success = uvm_anon_dispose(anon); 451 KASSERT(success); 452 mutex_exit(anon->an_lock); 453 /* Note: extra reference is held for PG_RELEASED case. */ 454 mutex_obj_free(anon->an_lock); 455 anon->an_lock = NULL; 456 uvm_anon_free(anon); 457 } 458