1 /* $NetBSD: uvm_anon.c,v 1.75 2020/03/14 20:23:51 ad Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Charles D. Cranor and Washington University. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 */ 27 28 /* 29 * uvm_anon.c: uvm anon ops 30 */ 31 32 #include <sys/cdefs.h> 33 __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.75 2020/03/14 20:23:51 ad Exp $"); 34 35 #include "opt_uvmhist.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/pool.h> 40 #include <sys/kernel.h> 41 #include <sys/atomic.h> 42 43 #include <uvm/uvm.h> 44 #include <uvm/uvm_swap.h> 45 #include <uvm/uvm_pdpolicy.h> 46 47 static struct pool_cache uvm_anon_cache; 48 49 static int uvm_anon_ctor(void *, void *, int); 50 51 void 52 uvm_anon_init(void) 53 { 54 55 pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0, 56 PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor, 57 NULL, NULL); 58 } 59 60 static int 61 uvm_anon_ctor(void *arg, void *object, int flags) 62 { 63 struct vm_anon *anon = object; 64 65 anon->an_ref = 0; 66 anon->an_lock = NULL; 67 anon->an_page = NULL; 68 #if defined(VMSWAP) 69 anon->an_swslot = 0; 70 #endif 71 return 0; 72 } 73 74 /* 75 * uvm_analloc: allocate a new anon. 76 * 77 * => anon will have no lock associated. 78 */ 79 struct vm_anon * 80 uvm_analloc(void) 81 { 82 struct vm_anon *anon; 83 84 anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT); 85 if (anon) { 86 KASSERT(anon->an_ref == 0); 87 KASSERT(anon->an_lock == NULL); 88 KASSERT(anon->an_page == NULL); 89 #if defined(VMSWAP) 90 KASSERT(anon->an_swslot == 0); 91 #endif 92 anon->an_ref = 1; 93 } 94 return anon; 95 } 96 97 /* 98 * uvm_anon_dispose: free any resident page or swap resources of anon. 99 * 100 * => anon must be removed from the amap (if anon was in an amap). 101 * => amap must be locked; we may drop and re-acquire the lock here. 102 */ 103 static bool 104 uvm_anon_dispose(struct vm_anon *anon) 105 { 106 struct vm_page *pg = anon->an_page; 107 108 UVMHIST_FUNC("uvm_anon_dispose"); UVMHIST_CALLED(maphist); 109 UVMHIST_LOG(maphist,"(anon=%#jx)", (uintptr_t)anon, 0,0,0); 110 111 KASSERT(rw_write_held(anon->an_lock)); 112 113 /* 114 * Dispose the page, if it is resident. 115 */ 116 117 if (pg) { 118 KASSERT(anon->an_lock != NULL); 119 120 /* 121 * If there is a resident page and it is loaned, then anon 122 * may not own it. Call out to uvm_anon_lockloanpg() to 123 * identify and lock the real owner of the page. 124 */ 125 126 if (pg->loan_count) { 127 pg = uvm_anon_lockloanpg(anon); 128 } 129 130 /* 131 * If the page is owned by a UVM object (now locked), 132 * then kill the loan on the page rather than free it, 133 * and release the object lock. 134 */ 135 136 if (pg->uobject) { 137 mutex_enter(&pg->interlock); 138 KASSERT(pg->loan_count > 0); 139 pg->loan_count--; 140 pg->uanon = NULL; 141 mutex_exit(&pg->interlock); 142 rw_exit(pg->uobject->vmobjlock); 143 } else { 144 145 /* 146 * If page has no UVM object, then anon is the owner, 147 * and it is already locked. 148 */ 149 150 KASSERT((pg->flags & PG_RELEASED) == 0); 151 pmap_page_protect(pg, VM_PROT_NONE); 152 153 /* 154 * If the page is busy, mark it as PG_RELEASED, so 155 * that uvm_anon_release(9) would release it later. 156 */ 157 158 if (pg->flags & PG_BUSY) { 159 pg->flags |= PG_RELEASED; 160 rw_obj_hold(anon->an_lock); 161 return false; 162 } 163 uvm_pagefree(pg); 164 UVMHIST_LOG(maphist, "anon %#jx, page %#jx: " 165 "freed now!", (uintptr_t)anon, (uintptr_t)pg, 166 0, 0); 167 } 168 } 169 170 #if defined(VMSWAP) 171 if (pg == NULL && anon->an_swslot > 0) { 172 /* This page is no longer only in swap. */ 173 KASSERT(uvmexp.swpgonly > 0); 174 atomic_dec_uint(&uvmexp.swpgonly); 175 } 176 #endif 177 178 /* 179 * Free any swap resources, leave a page replacement hint. 180 */ 181 182 uvm_anon_dropswap(anon); 183 uvmpdpol_anfree(anon); 184 UVMHIST_LOG(maphist,"<- done!",0,0,0,0); 185 return true; 186 } 187 188 /* 189 * uvm_anon_free: free a single anon. 190 * 191 * => anon must be already disposed. 192 */ 193 void 194 uvm_anon_free(struct vm_anon *anon) 195 { 196 197 KASSERT(anon->an_ref == 0); 198 KASSERT(anon->an_lock == NULL); 199 KASSERT(anon->an_page == NULL); 200 #if defined(VMSWAP) 201 KASSERT(anon->an_swslot == 0); 202 #endif 203 pool_cache_put(&uvm_anon_cache, anon); 204 } 205 206 /* 207 * uvm_anon_freelst: free a linked list of anon structures. 208 * 209 * => amap must be locked, we will unlock it. 210 */ 211 void 212 uvm_anon_freelst(struct vm_amap *amap, struct vm_anon *anonlst) 213 { 214 struct vm_anon *next; 215 216 UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist); 217 218 KASSERT(rw_write_held(amap->am_lock)); 219 220 for (; anonlst != NULL; anonlst = next) { 221 next = anonlst->an_link; 222 /* Note: clears an_ref as well. */ 223 anonlst->an_link = NULL; 224 if (uvm_anon_dispose(anonlst)) { 225 anonlst->an_lock = NULL; 226 uvm_anon_free(anonlst); 227 } 228 } 229 amap_unlock(amap); 230 } 231 232 /* 233 * uvm_anon_lockloanpg: given a locked anon, lock its resident page owner. 234 * 235 * => anon is locked by caller 236 * => on return: anon is locked 237 * if there is a resident page: 238 * if it has a uobject, it is locked by us 239 * if it is ownerless, we take over as owner 240 * we return the resident page (it can change during 241 * this function) 242 * => note that the only time an anon has an ownerless resident page 243 * is if the page was loaned from a uvm_object and the uvm_object 244 * disowned it 245 * => this only needs to be called when you want to do an operation 246 * on an anon's resident page and that page has a non-zero loan 247 * count. 248 */ 249 struct vm_page * 250 uvm_anon_lockloanpg(struct vm_anon *anon) 251 { 252 struct vm_page *pg; 253 krw_t op; 254 255 KASSERT(rw_lock_held(anon->an_lock)); 256 257 /* 258 * loop while we have a resident page that has a non-zero loan count. 259 * if we successfully get our lock, we will "break" the loop. 260 * note that the test for pg->loan_count is not protected -- this 261 * may produce false positive results. note that a false positive 262 * result may cause us to do more work than we need to, but it will 263 * not produce an incorrect result. 264 */ 265 266 while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) { 267 mutex_enter(&pg->interlock); 268 if (pg->uobject) { 269 /* 270 * if we didn't get a lock (try lock failed), then we 271 * toggle our anon lock and try again 272 */ 273 274 if (!rw_tryenter(pg->uobject->vmobjlock, RW_WRITER)) { 275 /* 276 * someone locking the object has a chance to 277 * lock us right now 278 * 279 * XXX Better than yielding but inadequate. 280 */ 281 mutex_exit(&pg->interlock); 282 op = rw_lock_op(anon->an_lock); 283 rw_exit(anon->an_lock); 284 kpause("lkloanpg", false, 1, NULL); 285 rw_enter(anon->an_lock, op); 286 continue; 287 } 288 } 289 290 /* 291 * If page is un-owned i.e. the object dropped its ownership, 292 * then we have to take the ownership. 293 */ 294 295 if (pg->uobject == NULL && (pg->flags & PG_ANON) == 0) { 296 pg->flags |= PG_ANON; 297 pg->loan_count--; 298 } 299 mutex_exit(&pg->interlock); 300 break; 301 } 302 return pg; 303 } 304 305 #if defined(VMSWAP) 306 307 /* 308 * uvm_anon_pagein: fetch an anon's page. 309 * 310 * => anon must be locked, and is unlocked upon return. 311 * => returns true if pagein was aborted due to lack of memory. 312 */ 313 314 bool 315 uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon) 316 { 317 struct vm_page *pg; 318 struct uvm_object *uobj; 319 320 KASSERT(rw_write_held(anon->an_lock)); 321 KASSERT(anon->an_lock == amap->am_lock); 322 323 /* 324 * Get the page of the anon. 325 */ 326 327 switch (uvmfault_anonget(NULL, amap, anon)) { 328 case 0: 329 /* Success - we have the page. */ 330 KASSERT(rw_write_held(anon->an_lock)); 331 break; 332 case EIO: 333 case ERESTART: 334 /* 335 * Nothing more to do on errors. ERESTART means that the 336 * anon was freed. 337 */ 338 return false; 339 default: 340 return true; 341 } 342 343 /* 344 * Mark the page as dirty and clear its swslot. 345 */ 346 347 pg = anon->an_page; 348 uobj = pg->uobject; 349 if (anon->an_swslot > 0) { 350 uvm_swap_free(anon->an_swslot, 1); 351 } 352 anon->an_swslot = 0; 353 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY); 354 355 /* 356 * Deactivate the page (to put it on a page queue). 357 */ 358 359 uvm_pagelock(pg); 360 uvm_pagedeactivate(pg); 361 uvm_pageunlock(pg); 362 rw_exit(anon->an_lock); 363 if (uobj) { 364 rw_exit(uobj->vmobjlock); 365 } 366 return false; 367 } 368 369 /* 370 * uvm_anon_dropswap: release any swap resources from this anon. 371 * 372 * => anon must be locked or have a reference count of 0. 373 */ 374 void 375 uvm_anon_dropswap(struct vm_anon *anon) 376 { 377 UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist); 378 379 if (anon->an_swslot == 0) 380 return; 381 382 UVMHIST_LOG(maphist,"freeing swap for anon %#jx, paged to swslot %#jx", 383 (uintptr_t)anon, anon->an_swslot, 0, 0); 384 uvm_swap_free(anon->an_swslot, 1); 385 anon->an_swslot = 0; 386 } 387 388 #endif 389 390 /* 391 * uvm_anon_release: release an anon and its page. 392 * 393 * => anon should not have any references. 394 * => anon must be locked. 395 */ 396 397 void 398 uvm_anon_release(struct vm_anon *anon) 399 { 400 struct vm_page *pg = anon->an_page; 401 bool success __diagused; 402 403 KASSERT(rw_write_held(anon->an_lock)); 404 KASSERT(pg != NULL); 405 KASSERT((pg->flags & PG_RELEASED) != 0); 406 KASSERT((pg->flags & PG_BUSY) != 0); 407 KASSERT(pg->uobject == NULL); 408 KASSERT(pg->uanon == anon); 409 KASSERT(pg->loan_count == 0); 410 KASSERT(anon->an_ref == 0); 411 412 uvm_pagefree(pg); 413 KASSERT(anon->an_page == NULL); 414 /* dispose should succeed as no one can reach this anon anymore. */ 415 success = uvm_anon_dispose(anon); 416 KASSERT(success); 417 rw_exit(anon->an_lock); 418 /* Note: extra reference is held for PG_RELEASED case. */ 419 rw_obj_free(anon->an_lock); 420 anon->an_lock = NULL; 421 uvm_anon_free(anon); 422 } 423