1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94 39 * 40 * 41 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 42 * All rights reserved. 43 * 44 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 45 * 46 * Permission to use, copy, modify and distribute this software and 47 * its documentation is hereby granted, provided that both the copyright 48 * notice and this permission notice appear in all copies of the 49 * software, derivative works or modified versions, and any portions 50 * thereof, and that both notices appear in supporting documentation. 51 * 52 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 53 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 54 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 55 * 56 * Carnegie Mellon requests users of this software to return to 57 * 58 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 59 * School of Computer Science 60 * Carnegie Mellon University 61 * Pittsburgh PA 15213-3890 62 * 63 * any improvements or extensions that they make and grant Carnegie the 64 * rights to redistribute these changes. 65 * 66 * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $ 67 * $DragonFly: src/sys/vm/vm_pager.c,v 1.24 2007/11/06 03:50:01 dillon Exp $ 68 */ 69 70 /* 71 * Paging space routine stubs. Emulates a matchmaker-like interface 72 * for builtin pagers. 73 */ 74 75 #include <sys/param.h> 76 #include <sys/systm.h> 77 #include <sys/kernel.h> 78 #include <sys/vnode.h> 79 #include <sys/buf.h> 80 #include <sys/ucred.h> 81 #include <sys/malloc.h> 82 #include <sys/dsched.h> 83 #include <sys/proc.h> 84 #include <sys/thread2.h> 85 86 #include <vm/vm.h> 87 #include <vm/vm_param.h> 88 #include <vm/vm_object.h> 89 #include <vm/vm_page.h> 90 #include <vm/vm_pager.h> 91 #include <vm/vm_extern.h> 92 93 #include <sys/buf2.h> 94 95 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data"); 96 97 extern struct pagerops defaultpagerops; 98 extern struct pagerops swappagerops; 99 extern struct pagerops vnodepagerops; 100 extern struct pagerops devicepagerops; 101 extern struct pagerops physpagerops; 102 103 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */ 104 105 static int dead_pager_getpage (vm_object_t, vm_page_t *, int); 106 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *); 107 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t); 108 static void dead_pager_dealloc (vm_object_t); 109 110 /* 111 * No requirements. 112 */ 113 static int 114 dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess) 115 { 116 return VM_PAGER_FAIL; 117 } 118 119 /* 120 * No requirements. 121 */ 122 static void 123 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags, 124 int *rtvals) 125 { 126 int i; 127 128 for (i = 0; i < count; i++) { 129 rtvals[i] = VM_PAGER_AGAIN; 130 } 131 } 132 133 /* 134 * No requirements. 135 */ 136 static int 137 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex) 138 { 139 return FALSE; 140 } 141 142 /* 143 * No requirements. 144 */ 145 static void 146 dead_pager_dealloc(vm_object_t object) 147 { 148 KKASSERT(object->swblock_count == 0); 149 return; 150 } 151 152 static struct pagerops deadpagerops = { 153 dead_pager_dealloc, 154 dead_pager_getpage, 155 dead_pager_putpages, 156 dead_pager_haspage 157 }; 158 159 struct pagerops *pagertab[] = { 160 &defaultpagerops, /* OBJT_DEFAULT */ 161 &swappagerops, /* OBJT_SWAP */ 162 &vnodepagerops, /* OBJT_VNODE */ 163 &devicepagerops, /* OBJT_DEVICE */ 164 &physpagerops, /* OBJT_PHYS */ 165 &deadpagerops /* OBJT_DEAD */ 166 }; 167 168 int npagers = sizeof(pagertab) / sizeof(pagertab[0]); 169 170 /* 171 * Kernel address space for mapping pages. 172 * Used by pagers where KVAs are needed for IO. 173 * 174 * XXX needs to be large enough to support the number of pending async 175 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 176 * (MAXPHYS == 64k) if you want to get the most efficiency. 177 */ 178 #define PAGER_MAP_SIZE (8 * 1024 * 1024) 179 180 int pager_map_size = PAGER_MAP_SIZE; 181 struct vm_map pager_map; 182 183 static int bswneeded; 184 static vm_offset_t swapbkva; /* swap buffers kva */ 185 static TAILQ_HEAD(swqueue, buf) bswlist; 186 static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin); 187 188 /* 189 * Initialize the swap buffer list. 190 * 191 * Called from the low level boot code only. 192 */ 193 static void 194 vm_pager_init(void *arg __unused) 195 { 196 TAILQ_INIT(&bswlist); 197 } 198 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_SECOND, vm_pager_init, NULL) 199 200 /* 201 * Called from the low level boot code only. 202 */ 203 void 204 vm_pager_bufferinit(void) 205 { 206 struct buf *bp; 207 int i; 208 209 /* 210 * Reserve KVM space for pbuf data. 211 */ 212 swapbkva = kmem_alloc_pageable(&pager_map, nswbuf * MAXPHYS); 213 if (!swapbkva) 214 panic("Not enough pager_map VM space for physical buffers"); 215 216 /* 217 * Initial pbuf setup. 218 */ 219 bp = swbuf; 220 for (i = 0; i < nswbuf; ++i, ++bp) { 221 bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva; 222 bp->b_kvasize = MAXPHYS; 223 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 224 BUF_LOCKINIT(bp); 225 buf_dep_init(bp); 226 } 227 228 /* 229 * Allow the clustering code to use half of our pbufs. 230 */ 231 cluster_pbuf_freecnt = nswbuf / 2; 232 } 233 234 /* 235 * No requirements. 236 */ 237 void 238 vm_pager_deallocate(vm_object_t object) 239 { 240 (*pagertab[object->type]->pgo_dealloc) (object); 241 } 242 243 /* 244 * vm_pager_get_pages() - inline, see vm/vm_pager.h 245 * vm_pager_put_pages() - inline, see vm/vm_pager.h 246 * vm_pager_has_page() - inline, see vm/vm_pager.h 247 * vm_pager_page_inserted() - inline, see vm/vm_pager.h 248 * vm_pager_page_removed() - inline, see vm/vm_pager.h 249 */ 250 251 #if 0 252 /* 253 * vm_pager_sync: 254 * 255 * Called by pageout daemon before going back to sleep. 256 * Gives pagers a chance to clean up any completed async pageing 257 * operations. 258 */ 259 void 260 vm_pager_sync(void) 261 { 262 struct pagerops **pgops; 263 264 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 265 if (pgops && ((*pgops)->pgo_sync != NULL)) 266 (*(*pgops)->pgo_sync) (); 267 } 268 269 #endif 270 271 /* 272 * Initialize a physical buffer. 273 * 274 * No requirements. 275 */ 276 static void 277 initpbuf(struct buf *bp) 278 { 279 bp->b_qindex = 0; /* BQUEUE_NONE */ 280 bp->b_data = bp->b_kvabase; 281 bp->b_flags = B_PAGING; 282 bp->b_cmd = BUF_CMD_DONE; 283 bp->b_error = 0; 284 bp->b_bcount = 0; 285 bp->b_bufsize = MAXPHYS; 286 initbufbio(bp); 287 xio_init(&bp->b_xio); 288 BUF_LOCK(bp, LK_EXCLUSIVE); 289 } 290 291 /* 292 * Allocate a physical buffer 293 * 294 * There are a limited number (nswbuf) of physical buffers. We need 295 * to make sure that no single subsystem is able to hog all of them, 296 * so each subsystem implements a counter which is typically initialized 297 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and 298 * increments it on release, and blocks if the counter hits zero. A 299 * subsystem may initialize the counter to -1 to disable the feature, 300 * but it must still be sure to match up all uses of getpbuf() with 301 * relpbuf() using the same variable. 302 * 303 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 304 * relatively soon when the rest of the subsystems get smart about it. XXX 305 * 306 * No requirements. 307 */ 308 struct buf * 309 getpbuf(int *pfreecnt) 310 { 311 struct buf *bp; 312 313 spin_lock_wr(&bswspin); 314 315 for (;;) { 316 if (pfreecnt) { 317 while (*pfreecnt == 0) 318 ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0); 319 } 320 321 /* get a bp from the swap buffer header pool */ 322 if ((bp = TAILQ_FIRST(&bswlist)) != NULL) 323 break; 324 bswneeded = 1; 325 ssleep(&bswneeded, &bswspin, 0, "wswbuf1", 0); 326 /* loop in case someone else grabbed one */ 327 } 328 TAILQ_REMOVE(&bswlist, bp, b_freelist); 329 if (pfreecnt) 330 --*pfreecnt; 331 332 spin_unlock_wr(&bswspin); 333 334 initpbuf(bp); 335 KKASSERT(dsched_is_clear_buf_priv(bp)); 336 return bp; 337 } 338 339 /* 340 * Allocate a physical buffer, if one is available. 341 * 342 * Note that there is no NULL hack here - all subsystems using this 343 * call understand how to use pfreecnt. 344 * 345 * No requirements. 346 */ 347 struct buf * 348 trypbuf(int *pfreecnt) 349 { 350 struct buf *bp; 351 352 spin_lock_wr(&bswspin); 353 354 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist)) == NULL) { 355 spin_unlock_wr(&bswspin); 356 return NULL; 357 } 358 TAILQ_REMOVE(&bswlist, bp, b_freelist); 359 --*pfreecnt; 360 361 spin_unlock_wr(&bswspin); 362 363 initpbuf(bp); 364 365 return bp; 366 } 367 368 /* 369 * Release a physical buffer 370 * 371 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 372 * relatively soon when the rest of the subsystems get smart about it. XXX 373 * 374 * No requirements. 375 */ 376 void 377 relpbuf(struct buf *bp, int *pfreecnt) 378 { 379 int wake_bsw = 0; 380 int wake_freecnt = 0; 381 382 KKASSERT(bp->b_flags & B_PAGING); 383 dsched_exit_buf(bp); 384 385 spin_lock_wr(&bswspin); 386 387 BUF_UNLOCK(bp); 388 TAILQ_INSERT_HEAD(&bswlist, bp, b_freelist); 389 if (bswneeded) { 390 bswneeded = 0; 391 wake_bsw = 1; 392 } 393 if (pfreecnt) { 394 if (++*pfreecnt == 1) 395 wake_freecnt = 1; 396 } 397 398 spin_unlock_wr(&bswspin); 399 400 if (wake_bsw) 401 wakeup(&bswneeded); 402 if (wake_freecnt) 403 wakeup(pfreecnt); 404 } 405