1 /* $OpenBSD: uvm_anon.c,v 1.48 2016/09/15 02:00:18 dlg Exp $ */ 2 /* $NetBSD: uvm_anon.c,v 1.10 2000/11/25 06:27:59 chs Exp $ */ 3 4 /* 5 * Copyright (c) 1997 Charles D. Cranor and Washington University. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 /* 30 * uvm_anon.c: uvm anon ops 31 */ 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 #include <sys/pool.h> 37 #include <sys/kernel.h> 38 #include <sys/atomic.h> 39 40 #include <uvm/uvm.h> 41 #include <uvm/uvm_swap.h> 42 43 struct pool uvm_anon_pool; 44 45 /* 46 * allocate anons 47 */ 48 void 49 uvm_anon_init(void) 50 { 51 pool_init(&uvm_anon_pool, sizeof(struct vm_anon), 0, IPL_NONE, 52 PR_WAITOK, "anonpl", NULL); 53 pool_sethiwat(&uvm_anon_pool, uvmexp.free / 16); 54 } 55 56 /* 57 * allocate an anon 58 */ 59 struct vm_anon * 60 uvm_analloc(void) 61 { 62 struct vm_anon *anon; 63 64 anon = pool_get(&uvm_anon_pool, PR_NOWAIT); 65 if (anon) { 66 anon->an_ref = 1; 67 anon->an_page = NULL; 68 anon->an_swslot = 0; 69 } 70 return(anon); 71 } 72 73 /* 74 * uvm_anfree: free a single anon structure 75 * 76 * => caller must remove anon from its amap before calling (if it was in 77 * an amap). 78 * => we may lock the pageq's. 79 */ 80 void 81 uvm_anfree(struct vm_anon *anon) 82 { 83 struct vm_page *pg; 84 85 /* get page */ 86 pg = anon->an_page; 87 88 /* 89 * if we have a resident page, we must dispose of it before freeing 90 * the anon. 91 */ 92 if (pg) { 93 /* 94 * if page is busy then we just mark it as released (who ever 95 * has it busy must check for this when they wake up). if the 96 * page is not busy then we can free it now. 97 */ 98 if ((pg->pg_flags & PG_BUSY) != 0) { 99 /* tell them to dump it when done */ 100 atomic_setbits_int(&pg->pg_flags, PG_RELEASED); 101 return; 102 } 103 pmap_page_protect(pg, PROT_NONE); 104 uvm_lock_pageq(); /* lock out pagedaemon */ 105 uvm_pagefree(pg); /* bye bye */ 106 uvm_unlock_pageq(); /* free the daemon */ 107 } 108 if (pg == NULL && anon->an_swslot != 0) { 109 /* this page is no longer only in swap. */ 110 KASSERT(uvmexp.swpgonly > 0); 111 uvmexp.swpgonly--; 112 } 113 114 /* free any swap resources. */ 115 uvm_anon_dropswap(anon); 116 117 /* 118 * now that we've stripped the data areas from the anon, free the anon 119 * itself! 120 */ 121 KASSERT(anon->an_page == NULL); 122 KASSERT(anon->an_swslot == 0); 123 124 pool_put(&uvm_anon_pool, anon); 125 } 126 127 /* 128 * uvm_anwait: wait for memory to become available to allocate an anon. 129 */ 130 void 131 uvm_anwait(void) 132 { 133 struct vm_anon *anon; 134 135 /* XXX: Want something like pool_wait()? */ 136 anon = pool_get(&uvm_anon_pool, PR_WAITOK); 137 pool_put(&uvm_anon_pool, anon); 138 } 139 140 /* 141 * uvm_anon_dropswap: release any swap resources from this anon. 142 */ 143 void 144 uvm_anon_dropswap(struct vm_anon *anon) 145 { 146 147 if (anon->an_swslot == 0) 148 return; 149 150 uvm_swap_free(anon->an_swslot, 1); 151 anon->an_swslot = 0; 152 } 153 154 /* 155 * fetch an anon's page. 156 * 157 * => returns TRUE if pagein was aborted due to lack of memory. 158 */ 159 160 boolean_t 161 uvm_anon_pagein(struct vm_anon *anon) 162 { 163 struct vm_page *pg; 164 int rv; 165 166 rv = uvmfault_anonget(NULL, NULL, anon); 167 168 switch (rv) { 169 case VM_PAGER_OK: 170 break; 171 case VM_PAGER_ERROR: 172 case VM_PAGER_REFAULT: 173 /* 174 * nothing more to do on errors. 175 * VM_PAGER_REFAULT can only mean that the anon was freed, 176 * so again there's nothing to do. 177 */ 178 return FALSE; 179 default: 180 #ifdef DIAGNOSTIC 181 panic("anon_pagein: uvmfault_anonget -> %d", rv); 182 #else 183 return FALSE; 184 #endif 185 } 186 187 /* 188 * ok, we've got the page now. 189 * mark it as dirty, clear its swslot and un-busy it. 190 */ 191 pg = anon->an_page; 192 uvm_swap_free(anon->an_swslot, 1); 193 anon->an_swslot = 0; 194 atomic_clearbits_int(&pg->pg_flags, PG_CLEAN); 195 196 /* deactivate the page (to put it on a page queue) */ 197 pmap_clear_reference(pg); 198 pmap_page_protect(pg, PROT_NONE); 199 uvm_lock_pageq(); 200 uvm_pagedeactivate(pg); 201 uvm_unlock_pageq(); 202 203 return FALSE; 204 } 205