1 /* 2 * Copyright (c) 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * The Mach Operating System project at Carnegie-Mellon University. 7 * 8 * %sccs.include.redist.c% 9 * 10 * @(#)vm_pageout.c 8.3 (Berkeley) 12/30/93 11 * 12 * 13 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 14 * All rights reserved. 15 * 16 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 17 * 18 * Permission to use, copy, modify and distribute this software and 19 * its documentation is hereby granted, provided that both the copyright 20 * notice and this permission notice appear in all copies of the 21 * software, derivative works or modified versions, and any portions 22 * thereof, and that both notices appear in supporting documentation. 23 * 24 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 25 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 26 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 27 * 28 * Carnegie Mellon requests users of this software to return to 29 * 30 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 31 * School of Computer Science 32 * Carnegie Mellon University 33 * Pittsburgh PA 15213-3890 34 * 35 * any improvements or extensions that they make and grant Carnegie the 36 * rights to redistribute these changes. 37 */ 38 39 /* 40 * The proverbial page-out daemon. 41 */ 42 43 #include <sys/param.h> 44 45 #include <vm/vm.h> 46 #include <vm/vm_page.h> 47 #include <vm/vm_pageout.h> 48 49 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 50 51 int vm_page_free_min_sanity = 40; 52 53 int vm_page_max_wired = 0; /* XXX max # of wired pages system-wide */ 54 55 /* 56 * vm_pageout_scan does the dirty work for the pageout daemon. 57 */ 58 void 59 vm_pageout_scan() 60 { 61 register vm_page_t m, next; 62 register int page_shortage; 63 register int s; 64 register int pages_freed; 65 int free; 66 67 /* 68 * Only continue when we want more pages to be "free" 69 */ 70 71 s = splimp(); 72 simple_lock(&vm_page_queue_free_lock); 73 free = cnt.v_free_count; 74 simple_unlock(&vm_page_queue_free_lock); 75 splx(s); 76 77 if (free < cnt.v_free_target) { 78 swapout_threads(); 79 80 /* 81 * Be sure the pmap system is updated so 82 * we can scan the inactive queue. 83 */ 84 85 pmap_update(); 86 } 87 88 /* 89 * Acquire the resident page system lock, 90 * as we may be changing what's resident quite a bit. 91 */ 92 vm_page_lock_queues(); 93 94 /* 95 * Start scanning the inactive queue for pages we can free. 96 * We keep scanning until we have enough free pages or 97 * we have scanned through the entire queue. If we 98 * encounter dirty pages, we start cleaning them. 99 */ 100 101 pages_freed = 0; 102 for (m = vm_page_queue_inactive.tqh_first; m != NULL; m = next) { 103 vm_object_t object; 104 vm_pager_t pager; 105 int pageout_status; 106 107 next = m->pageq.tqe_next; 108 s = splimp(); 109 simple_lock(&vm_page_queue_free_lock); 110 free = cnt.v_free_count; 111 simple_unlock(&vm_page_queue_free_lock); 112 splx(s); 113 114 if (free >= cnt.v_free_target) 115 break; 116 117 /* 118 * If the page has been referenced, move it back to the 119 * active queue. 120 */ 121 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 122 vm_page_activate(m); 123 cnt.v_reactivated++; 124 continue; 125 } 126 127 /* 128 * If the page is clean, free it up. 129 */ 130 if (m->flags & PG_CLEAN) { 131 object = m->object; 132 if (vm_object_lock_try(object)) { 133 pmap_page_protect(VM_PAGE_TO_PHYS(m), 134 VM_PROT_NONE); 135 vm_page_free(m); 136 pages_freed++; 137 vm_object_unlock(object); 138 } 139 continue; 140 } 141 142 /* 143 * If the page is dirty but already being washed, skip it. 144 */ 145 if ((m->flags & PG_LAUNDRY) == 0) 146 continue; 147 148 /* 149 * Otherwise the page is dirty and still in the laundry, 150 * so we start the cleaning operation and remove it from 151 * the laundry. 152 * 153 * We set the busy bit to cause potential page faults on 154 * this page to block. 155 * 156 * We also set pageout-in-progress to keep the object from 157 * disappearing during pageout. This guarantees that the 158 * page won't move from the inactive queue. (However, any 159 * other page on the inactive queue may move!) 160 */ 161 object = m->object; 162 if (!vm_object_lock_try(object)) 163 continue; 164 pmap_page_protect(VM_PAGE_TO_PHYS(m), VM_PROT_NONE); 165 m->flags |= PG_BUSY; 166 cnt.v_pageouts++; 167 168 /* 169 * Try to collapse the object before making a pager for it. 170 * We must unlock the page queues first. 171 */ 172 vm_page_unlock_queues(); 173 vm_object_collapse(object); 174 175 object->paging_in_progress++; 176 vm_object_unlock(object); 177 178 /* 179 * Do a wakeup here in case the following operations block. 180 */ 181 thread_wakeup((int) &cnt.v_free_count); 182 183 /* 184 * If there is no pager for the page, use the default pager. 185 * If there is no place to put the page at the moment, 186 * leave it in the laundry and hope that there will be 187 * paging space later. 188 */ 189 if ((pager = object->pager) == NULL) { 190 pager = vm_pager_allocate(PG_DFLT, (caddr_t)0, 191 object->size, VM_PROT_ALL, 192 (vm_offset_t)0); 193 if (pager != NULL) 194 vm_object_setpager(object, pager, 0, FALSE); 195 } 196 pageout_status = pager ? 197 vm_pager_put(pager, m, FALSE) : VM_PAGER_FAIL; 198 vm_object_lock(object); 199 vm_page_lock_queues(); 200 201 switch (pageout_status) { 202 case VM_PAGER_OK: 203 case VM_PAGER_PEND: 204 m->flags &= ~PG_LAUNDRY; 205 break; 206 case VM_PAGER_BAD: 207 /* 208 * Page outside of range of object. Right now we 209 * essentially lose the changes by pretending it 210 * worked. 211 * 212 * XXX dubious, what should we do? 213 */ 214 m->flags &= ~PG_LAUNDRY; 215 m->flags |= PG_CLEAN; 216 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 217 break; 218 case VM_PAGER_FAIL: 219 case VM_PAGER_ERROR: 220 /* 221 * If page couldn't be paged out, then reactivate 222 * the page so it doesn't clog the inactive list. 223 * (We will try paging out it again later). 224 */ 225 vm_page_activate(m); 226 break; 227 } 228 229 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 230 231 /* 232 * If the operation is still going, leave the page busy 233 * to block all other accesses. Also, leave the paging 234 * in progress indicator set so that we don't attempt an 235 * object collapse. 236 */ 237 if (pageout_status != VM_PAGER_PEND) { 238 m->flags &= ~PG_BUSY; 239 PAGE_WAKEUP(m); 240 object->paging_in_progress--; 241 } 242 thread_wakeup((int) object); 243 vm_object_unlock(object); 244 } 245 246 /* 247 * Compute the page shortage. If we are still very low on memory 248 * be sure that we will move a minimal amount of pages from active 249 * to inactive. 250 */ 251 252 page_shortage = cnt.v_inactive_target - cnt.v_inactive_count; 253 if (page_shortage <= 0 && pages_freed == 0) 254 page_shortage = 1; 255 256 while (page_shortage > 0) { 257 /* 258 * Move some more pages from active to inactive. 259 */ 260 261 if ((m = vm_page_queue_active.tqh_first) == NULL) 262 break; 263 vm_page_deactivate(m); 264 page_shortage--; 265 } 266 267 vm_page_unlock_queues(); 268 } 269 270 /* 271 * vm_pageout is the high level pageout daemon. 272 */ 273 274 void vm_pageout() 275 { 276 (void) spl0(); 277 278 /* 279 * Initialize some paging parameters. 280 */ 281 282 if (cnt.v_free_min == 0) { 283 cnt.v_free_min = cnt.v_free_count / 20; 284 if (cnt.v_free_min < 3) 285 cnt.v_free_min = 3; 286 287 if (cnt.v_free_min > vm_page_free_min_sanity) 288 cnt.v_free_min = vm_page_free_min_sanity; 289 } 290 291 if (cnt.v_free_target == 0) 292 cnt.v_free_target = (cnt.v_free_min * 4) / 3; 293 294 if (cnt.v_free_target <= cnt.v_free_min) 295 cnt.v_free_target = cnt.v_free_min + 1; 296 297 /* XXX does not really belong here */ 298 if (vm_page_max_wired == 0) 299 vm_page_max_wired = cnt.v_free_count / 3; 300 301 /* 302 * The pageout daemon is never done, so loop 303 * forever. 304 */ 305 306 simple_lock(&vm_pages_needed_lock); 307 while (TRUE) { 308 thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock, 309 FALSE); 310 /* 311 * Compute the inactive target for this scan. 312 * We need to keep a reasonable amount of memory in the 313 * inactive list to better simulate LRU behavior. 314 */ 315 cnt.v_inactive_target = 316 (cnt.v_active_count + cnt.v_inactive_count) / 3; 317 if (cnt.v_inactive_target <= cnt.v_free_target) 318 cnt.v_inactive_target = cnt.v_free_target + 1; 319 320 vm_pageout_scan(); 321 vm_pager_sync(); 322 simple_lock(&vm_pages_needed_lock); 323 thread_wakeup((int) &cnt.v_free_count); 324 } 325 } 326