1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * The Mach Operating System project at Carnegie-Mellon University. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * from: @(#)vm_pager.c 8.6 (Berkeley) 1/12/94 35 * 36 * 37 * Copyright (c) 1987, 1990 Carnegie-Mellon University. 38 * All rights reserved. 39 * 40 * Authors: Avadis Tevanian, Jr., Michael Wayne Young 41 * 42 * Permission to use, copy, modify and distribute this software and 43 * its documentation is hereby granted, provided that both the copyright 44 * notice and this permission notice appear in all copies of the 45 * software, derivative works or modified versions, and any portions 46 * thereof, and that both notices appear in supporting documentation. 47 * 48 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 49 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND 50 * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 51 * 52 * Carnegie Mellon requests users of this software to return to 53 * 54 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU 55 * School of Computer Science 56 * Carnegie Mellon University 57 * Pittsburgh PA 15213-3890 58 * 59 * any improvements or extensions that they make and grant Carnegie the 60 * rights to redistribute these changes. 61 * 62 * $FreeBSD: src/sys/vm/vm_pager.c,v 1.54.2.2 2001/11/18 07:11:00 dillon Exp $ 63 */ 64 65 /* 66 * Paging space routine stubs. Emulates a matchmaker-like interface 67 * for builtin pagers. 68 */ 69 70 #include <sys/param.h> 71 #include <sys/systm.h> 72 #include <sys/kernel.h> 73 #include <sys/vnode.h> 74 #include <sys/buf.h> 75 #include <sys/ucred.h> 76 #include <sys/malloc.h> 77 #include <sys/dsched.h> 78 #include <sys/proc.h> 79 #include <sys/sysctl.h> 80 #include <sys/thread2.h> 81 82 #include <vm/vm.h> 83 #include <vm/vm_param.h> 84 #include <vm/vm_kern.h> 85 #include <vm/vm_object.h> 86 #include <vm/vm_page.h> 87 #include <vm/vm_pager.h> 88 #include <vm/vm_extern.h> 89 90 #include <sys/buf2.h> 91 92 MALLOC_DEFINE(M_VMPGDATA, "VM pgdata", "XXX: VM pager private data"); 93 94 extern struct pagerops defaultpagerops; 95 extern struct pagerops swappagerops; 96 extern struct pagerops vnodepagerops; 97 extern struct pagerops devicepagerops; 98 extern struct pagerops physpagerops; 99 100 int cluster_pbuf_freecnt = -1; /* unlimited to begin with */ 101 102 static int dead_pager_getpage (vm_object_t, vm_page_t *, int); 103 static void dead_pager_putpages (vm_object_t, vm_page_t *, int, int, int *); 104 static boolean_t dead_pager_haspage (vm_object_t, vm_pindex_t); 105 static void dead_pager_dealloc (vm_object_t); 106 107 /* 108 * No requirements. 109 */ 110 static int 111 dead_pager_getpage(vm_object_t obj, vm_page_t *mpp, int seqaccess) 112 { 113 return VM_PAGER_FAIL; 114 } 115 116 /* 117 * No requirements. 118 */ 119 static void 120 dead_pager_putpages(vm_object_t object, vm_page_t *m, int count, int flags, 121 int *rtvals) 122 { 123 int i; 124 125 for (i = 0; i < count; i++) { 126 rtvals[i] = VM_PAGER_AGAIN; 127 } 128 } 129 130 /* 131 * No requirements. 132 */ 133 static int 134 dead_pager_haspage(vm_object_t object, vm_pindex_t pindex) 135 { 136 return FALSE; 137 } 138 139 /* 140 * No requirements. 141 */ 142 static void 143 dead_pager_dealloc(vm_object_t object) 144 { 145 KKASSERT(object->swblock_count == 0); 146 return; 147 } 148 149 static struct pagerops deadpagerops = { 150 dead_pager_dealloc, 151 dead_pager_getpage, 152 dead_pager_putpages, 153 dead_pager_haspage 154 }; 155 156 struct pagerops *pagertab[] = { 157 &defaultpagerops, /* OBJT_DEFAULT */ 158 &swappagerops, /* OBJT_SWAP */ 159 &vnodepagerops, /* OBJT_VNODE */ 160 &devicepagerops, /* OBJT_DEVICE */ 161 &physpagerops, /* OBJT_PHYS */ 162 &deadpagerops /* OBJT_DEAD */ 163 }; 164 165 int npagers = NELEM(pagertab); 166 167 /* 168 * Kernel address space for mapping pages. 169 * Used by pagers where KVAs are needed for IO. 170 * 171 * XXX needs to be large enough to support the number of pending async 172 * cleaning requests (NPENDINGIO == 64) * the maximum swap cluster size 173 * (MAXPHYS == 64k) if you want to get the most efficiency. 174 */ 175 #define PAGER_MAP_SIZE (8 * 1024 * 1024) 176 177 TAILQ_HEAD(swqueue, buf); 178 179 int pager_map_size = PAGER_MAP_SIZE; 180 struct vm_map pager_map; 181 182 static int bswneeded_raw; 183 static int bswneeded_kva; 184 static long nswbuf_raw; 185 static struct buf *swbuf_raw; 186 static vm_offset_t swapbkva; /* swap buffers kva */ 187 static struct swqueue bswlist_raw; /* without kva */ 188 static struct swqueue bswlist_kva; /* with kva */ 189 static struct spinlock bswspin = SPINLOCK_INITIALIZER(&bswspin); 190 static int pbuf_raw_count; 191 static int pbuf_kva_count; 192 193 SYSCTL_INT(_vfs, OID_AUTO, pbuf_raw_count, CTLFLAG_RD, &pbuf_raw_count, 0, 194 "Kernel virtual address space reservations"); 195 SYSCTL_INT(_vfs, OID_AUTO, pbuf_kva_count, CTLFLAG_RD, &pbuf_kva_count, 0, 196 "Kernel raw address space reservations"); 197 198 /* 199 * Initialize the swap buffer list. 200 * 201 * Called from the low level boot code only. 202 */ 203 static void 204 vm_pager_init(void *arg __unused) 205 { 206 TAILQ_INIT(&bswlist_raw); 207 TAILQ_INIT(&bswlist_kva); 208 } 209 SYSINIT(vm_mem, SI_BOOT1_VM, SI_ORDER_SECOND, vm_pager_init, NULL) 210 211 /* 212 * Called from the low level boot code only. 213 */ 214 void 215 vm_pager_bufferinit(void) 216 { 217 struct buf *bp; 218 long i; 219 220 /* 221 * Reserve KVM space for pbuf data. 222 */ 223 swapbkva = kmem_alloc_pageable(&pager_map, nswbuf * MAXPHYS); 224 if (!swapbkva) 225 panic("Not enough pager_map VM space for physical buffers"); 226 227 /* 228 * Initial pbuf setup. These pbufs have KVA reservations. 229 */ 230 bp = swbuf; 231 for (i = 0; i < nswbuf; ++i, ++bp) { 232 bp->b_kvabase = (caddr_t)((intptr_t)i * MAXPHYS) + swapbkva; 233 bp->b_kvasize = MAXPHYS; 234 BUF_LOCKINIT(bp); 235 buf_dep_init(bp); 236 TAILQ_INSERT_HEAD(&bswlist_kva, bp, b_freelist); 237 ++pbuf_kva_count; 238 } 239 240 /* 241 * Initial pbuf setup. These pbufs do not have KVA reservations, 242 * so we can have a lot more of them. These are typically used 243 * to massage low level buf/bio requests. 244 */ 245 nswbuf_raw = nbuf * 2; 246 swbuf_raw = (void *)kmem_alloc(&kernel_map, 247 round_page(nswbuf_raw * sizeof(struct buf))); 248 bp = swbuf_raw; 249 for (i = 0; i < nswbuf_raw; ++i, ++bp) { 250 BUF_LOCKINIT(bp); 251 buf_dep_init(bp); 252 TAILQ_INSERT_HEAD(&bswlist_raw, bp, b_freelist); 253 ++pbuf_raw_count; 254 } 255 256 /* 257 * Allow the clustering code to use half of our pbufs. 258 */ 259 cluster_pbuf_freecnt = nswbuf / 2; 260 } 261 262 /* 263 * No requirements. 264 */ 265 void 266 vm_pager_deallocate(vm_object_t object) 267 { 268 (*pagertab[object->type]->pgo_dealloc) (object); 269 } 270 271 /* 272 * vm_pager_get_pages() - inline, see vm/vm_pager.h 273 * vm_pager_put_pages() - inline, see vm/vm_pager.h 274 * vm_pager_has_page() - inline, see vm/vm_pager.h 275 * vm_pager_page_inserted() - inline, see vm/vm_pager.h 276 * vm_pager_page_removed() - inline, see vm/vm_pager.h 277 */ 278 279 #if 0 280 /* 281 * vm_pager_sync: 282 * 283 * Called by pageout daemon before going back to sleep. 284 * Gives pagers a chance to clean up any completed async pageing 285 * operations. 286 */ 287 void 288 vm_pager_sync(void) 289 { 290 struct pagerops **pgops; 291 292 for (pgops = pagertab; pgops < &pagertab[npagers]; pgops++) 293 if (pgops && ((*pgops)->pgo_sync != NULL)) 294 (*(*pgops)->pgo_sync) (); 295 } 296 297 #endif 298 299 /* 300 * Initialize a physical buffer. 301 * 302 * No requirements. 303 */ 304 static void 305 initpbuf(struct buf *bp) 306 { 307 bp->b_qindex = 0; /* BQUEUE_NONE */ 308 bp->b_data = bp->b_kvabase; /* NULL if pbuf sans kva */ 309 bp->b_flags = B_PAGING; 310 bp->b_cmd = BUF_CMD_DONE; 311 bp->b_error = 0; 312 bp->b_bcount = 0; 313 bp->b_bufsize = MAXPHYS; 314 initbufbio(bp); 315 xio_init(&bp->b_xio); 316 BUF_LOCK(bp, LK_EXCLUSIVE); 317 } 318 319 /* 320 * Allocate a physical buffer 321 * 322 * There are a limited number (nswbuf) of physical buffers. We need 323 * to make sure that no single subsystem is able to hog all of them, 324 * so each subsystem implements a counter which is typically initialized 325 * to 1/2 nswbuf. getpbuf() decrements this counter in allocation and 326 * increments it on release, and blocks if the counter hits zero. A 327 * subsystem may initialize the counter to -1 to disable the feature, 328 * but it must still be sure to match up all uses of getpbuf() with 329 * relpbuf() using the same variable. 330 * 331 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 332 * relatively soon when the rest of the subsystems get smart about it. XXX 333 * 334 * Physical buffers can be with or without KVA space reserved. There 335 * are severe limitations on the ones with KVA reserved, and fewer 336 * limitations on the ones without. getpbuf() gets one without, 337 * getpbuf_kva() gets one with. 338 * 339 * No requirements. 340 */ 341 struct buf * 342 getpbuf(int *pfreecnt) 343 { 344 struct buf *bp; 345 346 spin_lock(&bswspin); 347 348 for (;;) { 349 if (pfreecnt) { 350 while (*pfreecnt == 0) 351 ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0); 352 } 353 354 /* get a bp from the swap buffer header pool */ 355 if ((bp = TAILQ_FIRST(&bswlist_raw)) != NULL) 356 break; 357 bswneeded_raw = 1; 358 ssleep(&bswneeded_raw, &bswspin, 0, "wswbuf1", 0); 359 /* loop in case someone else grabbed one */ 360 } 361 TAILQ_REMOVE(&bswlist_raw, bp, b_freelist); 362 --pbuf_raw_count; 363 if (pfreecnt) 364 --*pfreecnt; 365 366 spin_unlock(&bswspin); 367 368 initpbuf(bp); 369 KKASSERT(dsched_is_clear_buf_priv(bp)); 370 371 return (bp); 372 } 373 374 struct buf * 375 getpbuf_kva(int *pfreecnt) 376 { 377 struct buf *bp; 378 379 spin_lock(&bswspin); 380 381 for (;;) { 382 if (pfreecnt) { 383 while (*pfreecnt == 0) 384 ssleep(pfreecnt, &bswspin, 0, "wswbuf0", 0); 385 } 386 387 /* get a bp from the swap buffer header pool */ 388 if ((bp = TAILQ_FIRST(&bswlist_kva)) != NULL) 389 break; 390 bswneeded_kva = 1; 391 ssleep(&bswneeded_kva, &bswspin, 0, "wswbuf1", 0); 392 /* loop in case someone else grabbed one */ 393 } 394 TAILQ_REMOVE(&bswlist_kva, bp, b_freelist); 395 --pbuf_kva_count; 396 if (pfreecnt) 397 --*pfreecnt; 398 399 spin_unlock(&bswspin); 400 401 initpbuf(bp); 402 KKASSERT(dsched_is_clear_buf_priv(bp)); 403 404 return (bp); 405 } 406 407 /* 408 * Allocate a physical buffer, if one is available. 409 * 410 * Note that there is no NULL hack here - all subsystems using this 411 * call understand how to use pfreecnt. 412 * 413 * No requirements. 414 */ 415 struct buf * 416 trypbuf(int *pfreecnt) 417 { 418 struct buf *bp; 419 420 spin_lock(&bswspin); 421 422 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist_raw)) == NULL) { 423 spin_unlock(&bswspin); 424 return NULL; 425 } 426 TAILQ_REMOVE(&bswlist_raw, bp, b_freelist); 427 --pbuf_raw_count; 428 --*pfreecnt; 429 430 spin_unlock(&bswspin); 431 432 initpbuf(bp); 433 434 return bp; 435 } 436 437 struct buf * 438 trypbuf_kva(int *pfreecnt) 439 { 440 struct buf *bp; 441 442 spin_lock(&bswspin); 443 444 if (*pfreecnt == 0 || (bp = TAILQ_FIRST(&bswlist_kva)) == NULL) { 445 spin_unlock(&bswspin); 446 return NULL; 447 } 448 TAILQ_REMOVE(&bswlist_kva, bp, b_freelist); 449 --pbuf_kva_count; 450 --*pfreecnt; 451 452 spin_unlock(&bswspin); 453 454 initpbuf(bp); 455 456 return bp; 457 } 458 459 /* 460 * Release a physical buffer 461 * 462 * NOTE: pfreecnt can be NULL, but this 'feature' will be removed 463 * relatively soon when the rest of the subsystems get smart about it. XXX 464 * 465 * No requirements. 466 */ 467 void 468 relpbuf(struct buf *bp, int *pfreecnt) 469 { 470 int wake_bsw_kva = 0; 471 int wake_bsw_raw = 0; 472 int wake_freecnt = 0; 473 474 KKASSERT(bp->b_flags & B_PAGING); 475 dsched_exit_buf(bp); 476 477 BUF_UNLOCK(bp); 478 479 spin_lock(&bswspin); 480 if (bp->b_kvabase) { 481 TAILQ_INSERT_HEAD(&bswlist_kva, bp, b_freelist); 482 ++pbuf_kva_count; 483 } else { 484 TAILQ_INSERT_HEAD(&bswlist_raw, bp, b_freelist); 485 ++pbuf_raw_count; 486 } 487 if (bswneeded_kva) { 488 bswneeded_kva = 0; 489 wake_bsw_kva = 1; 490 } 491 if (bswneeded_raw) { 492 bswneeded_raw = 0; 493 wake_bsw_raw = 1; 494 } 495 if (pfreecnt) { 496 if (++*pfreecnt == 1) 497 wake_freecnt = 1; 498 } 499 spin_unlock(&bswspin); 500 501 if (wake_bsw_kva) 502 wakeup(&bswneeded_kva); 503 if (wake_bsw_raw) 504 wakeup(&bswneeded_raw); 505 if (wake_freecnt) 506 wakeup(pfreecnt); 507 } 508 509 void 510 pbuf_adjcount(int *pfreecnt, int n) 511 { 512 if (n) { 513 spin_lock(&bswspin); 514 *pfreecnt += n; 515 spin_unlock(&bswspin); 516 wakeup(pfreecnt); 517 } 518 } 519