1 /* $NetBSD: uvm_anon.c,v 1.64 2017/10/28 00:37:13 pgoyette Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * uvm_anon.c: uvm anon ops 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.64 2017/10/28 00:37:13 pgoyette Exp $"); 34 35 #include "opt_uvmhist.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/pool.h> 40 #include <sys/kernel.h> 41 42 #include <uvm/uvm.h> 43 #include <uvm/uvm_swap.h> 44 #include <uvm/uvm_pdpolicy.h> 45 46 static struct pool_cache uvm_anon_cache; 47 48 static int uvm_anon_ctor(void *, void *, int); 49 50 void 51 uvm_anon_init(void) 52 { 53 54 pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0, 55 PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor, 56 NULL, NULL); 57 } 58 59 static int 60 uvm_anon_ctor(void *arg, void *object, int flags) 61 { 62 struct vm_anon *anon = object; 63 64 anon->an_ref = 0; 65 anon->an_lock = NULL; 66 anon->an_page = NULL; 67 #if defined(VMSWAP) 68 anon->an_swslot = 0; 69 #endif 70 return 0; 71 } 72 73 /* 74 * uvm_analloc: allocate a new anon. 75 * 76 * => anon will have no lock associated. 77 */ 78 struct vm_anon * 79 uvm_analloc(void) 80 { 81 struct vm_anon *anon; 82 83 anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT); 84 if (anon) { 85 KASSERT(anon->an_ref == 0); 86 KASSERT(anon->an_lock == NULL); 87 KASSERT(anon->an_page == NULL); 88 #if defined(VMSWAP) 89 KASSERT(anon->an_swslot == 0); 90 #endif 91 anon->an_ref = 1; 92 } 93 return anon; 94 } 95 96 /* 97 * uvm_anon_dispose: free any resident page or swap resources of anon. 98 * 99 * => anon must be removed from the amap (if anon was in an amap). 100 * => amap must be locked; we may drop and re-acquire the lock here. 101 */ 102 static bool 103 uvm_anon_dispose(struct vm_anon *anon) 104 { 105 struct vm_page *pg = anon->an_page; 106 107 UVMHIST_FUNC("uvm_anon_dispose"); UVMHIST_CALLED(maphist); 108 UVMHIST_LOG(maphist,"(anon=0x%#jx)", (uintptr_t)anon, 0,0,0); 109 110 KASSERT(mutex_owned(anon->an_lock)); 111 112 /* 113 * If there is a resident page and it is loaned, then anon may not 114 * own it. Call out to uvm_anon_lockloanpg() to identify and lock 115 * the real owner of the page. 116 */ 117 118 if (pg && pg->loan_count) { 119 KASSERT(anon->an_lock != NULL); 120 pg = uvm_anon_lockloanpg(anon); 121 } 122 123 /* 124 * Dispose the page, if it is resident. 125 */ 126 127 if (pg) { 128 KASSERT(anon->an_lock != NULL); 129 130 /* 131 * If the page is owned by a UVM object (now locked), 132 * then kill the loan on the page rather than free it, 133 * and release the object lock. 134 */ 135 136 if (pg->uobject) { 137 mutex_enter(&uvm_pageqlock); 138 KASSERT(pg->loan_count > 0); 139 pg->loan_count--; 140 pg->uanon = NULL; 141 mutex_exit(&uvm_pageqlock); 142 mutex_exit(pg->uobject->vmobjlock); 143 } else { 144 145 /* 146 * If page has no UVM object, then anon is the owner, 147 * and it is already locked. 148 */ 149 150 KASSERT((pg->flags & PG_RELEASED) == 0); 151 pmap_page_protect(pg, VM_PROT_NONE); 152 153 /* 154 * If the page is busy, mark it as PG_RELEASED, so 155 * that uvm_anon_release(9) would release it later. 156 */ 157 158 if (pg->flags & PG_BUSY) { 159 pg->flags |= PG_RELEASED; 160 mutex_obj_hold(anon->an_lock); 161 return false; 162 } 163 mutex_enter(&uvm_pageqlock); 164 uvm_pagefree(pg); 165 mutex_exit(&uvm_pageqlock); 166 UVMHIST_LOG(maphist, "anon 0x%#jx, page 0x%#jx: " 167 "freed now!", (uintptr_t)anon, (uintptr_t)pg, 168 0, 0); 169 } 170 } 171 172 #if defined(VMSWAP) 173 if (pg == NULL && anon->an_swslot > 0) { 174 /* This page is no longer only in swap. */ 175 mutex_enter(&uvm_swap_data_lock); 176 KASSERT(uvmexp.swpgonly > 0); 177 uvmexp.swpgonly--; 178 mutex_exit(&uvm_swap_data_lock); 179 } 180 #endif 181 182 /* 183 * Free any swap resources, leave a page replacement hint. 184 */ 185 186 uvm_anon_dropswap(anon); 187 uvmpdpol_anfree(anon); 188 UVMHIST_LOG(maphist,"<- done!",0,0,0,0); 189 return true; 190 } 191 192 /* 193 * uvm_anon_free: free a single anon. 194 * 195 * => anon must be already disposed. 196 */ 197 void 198 uvm_anon_free(struct vm_anon *anon) 199 { 200 201 KASSERT(anon->an_ref == 0); 202 KASSERT(anon->an_lock == NULL); 203 KASSERT(anon->an_page == NULL); 204 #if defined(VMSWAP) 205 KASSERT(anon->an_swslot == 0); 206 #endif 207 pool_cache_put(&uvm_anon_cache, anon); 208 } 209 210 /* 211 * uvm_anon_freelst: free a linked list of anon structures. 212 * 213 * => anon must be locked, we will unlock it. 214 */ 215 void 216 uvm_anon_freelst(struct vm_amap *amap, struct vm_anon *anonlst) 217 { 218 struct vm_anon *anon; 219 struct vm_anon **anonp = &anonlst; 220 221 KASSERT(mutex_owned(amap->am_lock)); 222 while ((anon = *anonp) != NULL) { 223 if (!uvm_anon_dispose(anon)) { 224 /* Do not free this anon. */ 225 *anonp = anon->an_link; 226 /* Note: clears an_ref as well. */ 227 anon->an_link = NULL; 228 } else { 229 anonp = &anon->an_link; 230 } 231 } 232 amap_unlock(amap); 233 234 while (anonlst) { 235 anon = anonlst->an_link; 236 /* Note: clears an_ref as well. */ 237 anonlst->an_link = NULL; 238 anonlst->an_lock = NULL; 239 uvm_anon_free(anonlst); 240 anonlst = anon; 241 } 242 } 243 244 /* 245 * uvm_anon_lockloanpg: given a locked anon, lock its resident page owner. 246 * 247 * => anon is locked by caller 248 * => on return: anon is locked 249 * if there is a resident page: 250 * if it has a uobject, it is locked by us 251 * if it is ownerless, we take over as owner 252 * we return the resident page (it can change during 253 * this function) 254 * => note that the only time an anon has an ownerless resident page 255 * is if the page was loaned from a uvm_object and the uvm_object 256 * disowned it 257 * => this only needs to be called when you want to do an operation 258 * on an anon's resident page and that page has a non-zero loan 259 * count. 260 */ 261 struct vm_page * 262 uvm_anon_lockloanpg(struct vm_anon *anon) 263 { 264 struct vm_page *pg; 265 bool locked = false; 266 267 KASSERT(mutex_owned(anon->an_lock)); 268 269 /* 270 * loop while we have a resident page that has a non-zero loan count. 271 * if we successfully get our lock, we will "break" the loop. 272 * note that the test for pg->loan_count is not protected -- this 273 * may produce false positive results. note that a false positive 274 * result may cause us to do more work than we need to, but it will 275 * not produce an incorrect result. 276 */ 277 278 while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) { 279 280 /* 281 * quickly check to see if the page has an object before 282 * bothering to lock the page queues. this may also produce 283 * a false positive result, but that's ok because we do a real 284 * check after that. 285 */ 286 287 if (pg->uobject) { 288 mutex_enter(&uvm_pageqlock); 289 if (pg->uobject) { 290 locked = 291 mutex_tryenter(pg->uobject->vmobjlock); 292 } else { 293 /* object disowned before we got PQ lock */ 294 locked = true; 295 } 296 mutex_exit(&uvm_pageqlock); 297 298 /* 299 * if we didn't get a lock (try lock failed), then we 300 * toggle our anon lock and try again 301 */ 302 303 if (!locked) { 304 /* 305 * someone locking the object has a chance to 306 * lock us right now 307 * 308 * XXX Better than yielding but inadequate. 309 */ 310 kpause("livelock", false, 1, anon->an_lock); 311 continue; 312 } 313 } 314 315 /* 316 * If page is un-owned i.e. the object dropped its ownership, 317 * then we have to take the ownership. 318 */ 319 320 if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) { 321 mutex_enter(&uvm_pageqlock); 322 pg->pqflags |= PQ_ANON; 323 pg->loan_count--; 324 mutex_exit(&uvm_pageqlock); 325 } 326 break; 327 } 328 return pg; 329 } 330 331 #if defined(VMSWAP) 332 333 /* 334 * uvm_anon_pagein: fetch an anon's page. 335 * 336 * => anon must be locked, and is unlocked upon return. 337 * => returns true if pagein was aborted due to lack of memory. 338 */ 339 340 bool 341 uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon) 342 { 343 struct vm_page *pg; 344 struct uvm_object *uobj; 345 346 KASSERT(mutex_owned(anon->an_lock)); 347 KASSERT(anon->an_lock == amap->am_lock); 348 349 /* 350 * Get the page of the anon. 351 */ 352 353 switch (uvmfault_anonget(NULL, amap, anon)) { 354 case 0: 355 /* Success - we have the page. */ 356 KASSERT(mutex_owned(anon->an_lock)); 357 break; 358 case EIO: 359 case ERESTART: 360 /* 361 * Nothing more to do on errors. ERESTART means that the 362 * anon was freed. 363 */ 364 return false; 365 default: 366 return true; 367 } 368 369 /* 370 * Mark the page as dirty, clear its swslot and un-busy it. 371 */ 372 373 pg = anon->an_page; 374 uobj = pg->uobject; 375 if (anon->an_swslot > 0) { 376 uvm_swap_free(anon->an_swslot, 1); 377 } 378 anon->an_swslot = 0; 379 pg->flags &= ~PG_CLEAN; 380 381 /* 382 * Deactivate the page (to put it on a page queue). 383 */ 384 385 mutex_enter(&uvm_pageqlock); 386 if (pg->wire_count == 0) { 387 uvm_pagedeactivate(pg); 388 } 389 mutex_exit(&uvm_pageqlock); 390 391 if (pg->flags & PG_WANTED) { 392 pg->flags &= ~PG_WANTED; 393 wakeup(pg); 394 } 395 396 mutex_exit(anon->an_lock); 397 if (uobj) { 398 mutex_exit(uobj->vmobjlock); 399 } 400 return false; 401 } 402 403 /* 404 * uvm_anon_dropswap: release any swap resources from this anon. 405 * 406 * => anon must be locked or have a reference count of 0. 407 */ 408 void 409 uvm_anon_dropswap(struct vm_anon *anon) 410 { 411 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist); 412 413 if (anon->an_swslot == 0) 414 return; 415 416 UVMHIST_LOG(maphist,"freeing swap for anon %#jx, paged to swslot 0x%jx", 417 (uintptr_t)anon, anon->an_swslot, 0, 0); 418 uvm_swap_free(anon->an_swslot, 1); 419 anon->an_swslot = 0; 420 } 421 422 #endif 423 424 /* 425 * uvm_anon_release: release an anon and its page. 426 * 427 * => anon should not have any references. 428 * => anon must be locked. 429 */ 430 431 void 432 uvm_anon_release(struct vm_anon *anon) 433 { 434 struct vm_page *pg = anon->an_page; 435 bool success __diagused; 436 437 KASSERT(mutex_owned(anon->an_lock)); 438 KASSERT(pg != NULL); 439 KASSERT((pg->flags & PG_RELEASED) != 0); 440 KASSERT((pg->flags & PG_BUSY) != 0); 441 KASSERT(pg->uobject == NULL); 442 KASSERT(pg->uanon == anon); 443 KASSERT(pg->loan_count == 0); 444 KASSERT(anon->an_ref == 0); 445 446 mutex_enter(&uvm_pageqlock); 447 uvm_pagefree(pg); 448 mutex_exit(&uvm_pageqlock); 449 KASSERT(anon->an_page == NULL); 450 /* dispose should succeed as no one can reach this anon anymore. */ 451 success = uvm_anon_dispose(anon); 452 KASSERT(success); 453 mutex_exit(anon->an_lock); 454 /* Note: extra reference is held for PG_RELEASED case. */ 455 mutex_obj_free(anon->an_lock); 456 anon->an_lock = NULL; 457 uvm_anon_free(anon); 458 } 459