1 /* 2 * Copyright (c) 1985, Avadis Tevanian, Jr., Michael Wayne Young 3 * Copyright (c) 1987 Carnegie-Mellon University 4 * Copyright (c) 1991 Regents of the University of California. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * The CMU software License Agreement specifies the terms and conditions 11 * for use and redistribution. 12 * 13 * @(#)vm_pageout.c 7.2 (Berkeley) 04/20/91 14 */ 15 16 /* 17 * The proverbial page-out daemon. 18 */ 19 20 #include "param.h" 21 22 #include "vm.h" 23 #include "vm_page.h" 24 #include "vm_pageout.h" 25 26 int vm_pages_needed; /* Event on which pageout daemon sleeps */ 27 int vm_pageout_free_min = 0; /* Stop pageout to wait for pagers at this free level */ 28 29 int vm_page_free_min_sanity = 40; 30 31 /* 32 * vm_pageout_scan does the dirty work for the pageout daemon. 33 */ 34 vm_pageout_scan() 35 { 36 register vm_page_t m; 37 register int page_shortage; 38 register int s; 39 register int pages_freed; 40 int free; 41 42 /* 43 * Only continue when we want more pages to be "free" 44 */ 45 46 s = splimp(); 47 simple_lock(&vm_page_queue_free_lock); 48 free = vm_page_free_count; 49 simple_unlock(&vm_page_queue_free_lock); 50 splx(s); 51 52 if (free < vm_page_free_target) { 53 swapout_threads(); 54 55 /* 56 * Be sure the pmap system is updated so 57 * we can scan the inactive queue. 58 */ 59 60 pmap_update(); 61 } 62 63 /* 64 * Acquire the resident page system lock, 65 * as we may be changing what's resident quite a bit. 66 */ 67 vm_page_lock_queues(); 68 69 /* 70 * Start scanning the inactive queue for pages we can free. 71 * We keep scanning until we have enough free pages or 72 * we have scanned through the entire queue. If we 73 * encounter dirty pages, we start cleaning them. 74 */ 75 76 pages_freed = 0; 77 m = (vm_page_t) queue_first(&vm_page_queue_inactive); 78 while (!queue_end(&vm_page_queue_inactive, (queue_entry_t) m)) { 79 vm_page_t next; 80 81 s = splimp(); 82 simple_lock(&vm_page_queue_free_lock); 83 free = vm_page_free_count; 84 simple_unlock(&vm_page_queue_free_lock); 85 splx(s); 86 87 if (free >= vm_page_free_target) 88 break; 89 90 if (m->clean) { 91 next = (vm_page_t) queue_next(&m->pageq); 92 if (pmap_is_referenced(VM_PAGE_TO_PHYS(m))) { 93 vm_page_activate(m); 94 vm_stat.reactivations++; 95 } 96 else { 97 register vm_object_t object; 98 object = m->object; 99 if (!vm_object_lock_try(object)) { 100 /* 101 * Can't lock object - 102 * skip page. 103 */ 104 m = next; 105 continue; 106 } 107 pmap_remove_all(VM_PAGE_TO_PHYS(m)); 108 vm_page_free(m); /* will dequeue */ 109 pages_freed++; 110 vm_object_unlock(object); 111 } 112 m = next; 113 } 114 else { 115 /* 116 * If a page is dirty, then it is either 117 * being washed (but not yet cleaned) 118 * or it is still in the laundry. If it is 119 * still in the laundry, then we start the 120 * cleaning operation. 121 */ 122 123 if (m->laundry) { 124 /* 125 * Clean the page and remove it from the 126 * laundry. 127 * 128 * We set the busy bit to cause 129 * potential page faults on this page to 130 * block. 131 * 132 * And we set pageout-in-progress to keep 133 * the object from disappearing during 134 * pageout. This guarantees that the 135 * page won't move from the inactive 136 * queue. (However, any other page on 137 * the inactive queue may move!) 138 */ 139 140 register vm_object_t object; 141 register vm_pager_t pager; 142 int pageout_status; 143 144 object = m->object; 145 if (!vm_object_lock_try(object)) { 146 /* 147 * Skip page if we can't lock 148 * its object 149 */ 150 m = (vm_page_t) queue_next(&m->pageq); 151 continue; 152 } 153 154 pmap_remove_all(VM_PAGE_TO_PHYS(m)); 155 m->busy = TRUE; 156 vm_stat.pageouts++; 157 158 /* 159 * Try to collapse the object before 160 * making a pager for it. We must 161 * unlock the page queues first. 162 */ 163 vm_page_unlock_queues(); 164 165 vm_object_collapse(object); 166 167 object->paging_in_progress++; 168 vm_object_unlock(object); 169 170 /* 171 * Do a wakeup here in case the following 172 * operations block. 173 */ 174 thread_wakeup((int) &vm_page_free_count); 175 176 /* 177 * If there is no pager for the page, 178 * use the default pager. If there's 179 * no place to put the page at the 180 * moment, leave it in the laundry and 181 * hope that there will be paging space 182 * later. 183 */ 184 185 if ((pager = object->pager) == NULL) { 186 pager = vm_pager_allocate(PG_DFLT, 187 (caddr_t)0, 188 object->size, 189 VM_PROT_ALL); 190 if (pager != NULL) { 191 vm_object_setpager(object, 192 pager, 0, FALSE); 193 } 194 } 195 pageout_status = pager ? 196 vm_pager_put(pager, m, FALSE) : 197 VM_PAGER_FAIL; 198 vm_object_lock(object); 199 vm_page_lock_queues(); 200 next = (vm_page_t) queue_next(&m->pageq); 201 202 switch (pageout_status) { 203 case VM_PAGER_OK: 204 case VM_PAGER_PEND: 205 m->laundry = FALSE; 206 break; 207 case VM_PAGER_BAD: 208 /* 209 * Page outside of range of object. 210 * Right now we essentially lose the 211 * changes by pretending it worked. 212 * XXX dubious, what should we do? 213 */ 214 m->laundry = FALSE; 215 m->clean = TRUE; 216 pmap_clear_modify(VM_PAGE_TO_PHYS(m)); 217 break; 218 case VM_PAGER_FAIL: 219 /* 220 * If page couldn't be paged out, then 221 * reactivate the page so it doesn't 222 * clog the inactive list. (We will 223 * try paging out it again later). 224 */ 225 vm_page_activate(m); 226 break; 227 } 228 229 pmap_clear_reference(VM_PAGE_TO_PHYS(m)); 230 m->busy = FALSE; 231 PAGE_WAKEUP(m); 232 233 /* 234 * If the operation is still going, leave the 235 * paging in progress indicator set so that we 236 * don't attempt an object collapse. 237 */ 238 if (pageout_status != VM_PAGER_PEND) 239 object->paging_in_progress--; 240 thread_wakeup((int) object); 241 vm_object_unlock(object); 242 m = next; 243 } 244 else 245 m = (vm_page_t) queue_next(&m->pageq); 246 } 247 } 248 249 /* 250 * Compute the page shortage. If we are still very low on memory 251 * be sure that we will move a minimal amount of pages from active 252 * to inactive. 253 */ 254 255 page_shortage = vm_page_inactive_target - vm_page_inactive_count; 256 page_shortage -= vm_page_free_count; 257 258 if ((page_shortage <= 0) && (pages_freed == 0)) 259 page_shortage = 1; 260 261 while (page_shortage > 0) { 262 /* 263 * Move some more pages from active to inactive. 264 */ 265 266 if (queue_empty(&vm_page_queue_active)) { 267 break; 268 } 269 m = (vm_page_t) queue_first(&vm_page_queue_active); 270 vm_page_deactivate(m); 271 page_shortage--; 272 } 273 274 vm_page_unlock_queues(); 275 } 276 277 /* 278 * vm_pageout is the high level pageout daemon. 279 */ 280 281 void vm_pageout() 282 { 283 (void) spl0(); 284 285 /* 286 * Initialize some paging parameters. 287 */ 288 289 if (vm_page_free_min == 0) { 290 vm_page_free_min = vm_page_free_count / 20; 291 if (vm_page_free_min < 3) 292 vm_page_free_min = 3; 293 294 if (vm_page_free_min > vm_page_free_min_sanity) 295 vm_page_free_min = vm_page_free_min_sanity; 296 } 297 298 if (vm_page_free_reserved == 0) { 299 if ((vm_page_free_reserved = vm_page_free_min / 2) < 10) 300 vm_page_free_reserved = 10; 301 } 302 if (vm_pageout_free_min == 0) { 303 if ((vm_pageout_free_min = vm_page_free_reserved / 2) > 10) 304 vm_pageout_free_min = 10; 305 } 306 307 if (vm_page_free_target == 0) 308 vm_page_free_target = (vm_page_free_min * 4) / 3; 309 310 if (vm_page_inactive_target == 0) 311 vm_page_inactive_target = vm_page_free_min * 2; 312 313 if (vm_page_free_target <= vm_page_free_min) 314 vm_page_free_target = vm_page_free_min + 1; 315 316 if (vm_page_inactive_target <= vm_page_free_target) 317 vm_page_inactive_target = vm_page_free_target + 1; 318 319 /* 320 * The pageout daemon is never done, so loop 321 * forever. 322 */ 323 324 simple_lock(&vm_pages_needed_lock); 325 while (TRUE) { 326 thread_sleep((int) &vm_pages_needed, &vm_pages_needed_lock, 327 FALSE); 328 vm_pageout_scan(); 329 vm_pager_sync(); 330 simple_lock(&vm_pages_needed_lock); 331 thread_wakeup((int) &vm_page_free_count); 332 } 333 } 334