1 /* 2 * Copyright (c) 2000 Peter Wemm 3 * 4 * Redistribution and use in source and binary forms, with or without 5 * modification, are permitted provided that the following conditions 6 * are met: 7 * 1. Redistributions of source code must retain the above copyright 8 * notice, this list of conditions and the following disclaimer. 9 * 2. Redistributions in binary form must reproduce the above copyright 10 * notice, this list of conditions and the following disclaimer in the 11 * documentation and/or other materials provided with the distribution. 12 * 13 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND 14 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 16 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE 17 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 18 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 19 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 20 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 21 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 22 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 23 * SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/vm/phys_pager.c,v 1.3.2.3 2000/12/17 02:05:41 alfred Exp $ 26 * $DragonFly: src/sys/vm/phys_pager.c,v 1.5 2006/03/27 01:54:18 dillon Exp $ 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/linker_set.h> 32 #include <sys/conf.h> 33 #include <sys/mman.h> 34 #include <sys/sysctl.h> 35 36 #include <vm/vm.h> 37 #include <vm/vm_object.h> 38 #include <vm/vm_page.h> 39 #include <vm/vm_pager.h> 40 #include <vm/vm_zone.h> 41 42 #include <sys/thread2.h> 43 44 vm_object_t 45 phys_pager_alloc(void *handle, off_t size, vm_prot_t prot, off_t foff) 46 { 47 vm_object_t object; 48 49 /* 50 * Offset should be page aligned. 51 */ 52 if (foff & PAGE_MASK) 53 return (NULL); 54 55 size = round_page64(size); 56 57 KKASSERT(handle == NULL); 58 #if 0 59 if (handle != NULL) { 60 /* 61 * Lock to prevent object creation race condition. 62 */ 63 while (phys_pager_alloc_lock) { 64 phys_pager_alloc_lock_want++; 65 tsleep(&phys_pager_alloc_lock, 0, "ppall", 0); 66 phys_pager_alloc_lock_want--; 67 } 68 phys_pager_alloc_lock = 1; 69 70 /* 71 * Look up pager, creating as necessary. 72 */ 73 object = vm_pager_object_lookup(&phys_pager_object_list, handle); 74 if (object == NULL) { 75 /* 76 * Allocate object and associate it with the pager. 77 */ 78 object = vm_object_allocate(OBJT_PHYS, 79 OFF_TO_IDX(foff + size)); 80 object->handle = handle; 81 TAILQ_INSERT_TAIL(&phys_pager_object_list, object, 82 pager_object_list); 83 } else { 84 /* 85 * Gain a reference to the object. 86 */ 87 vm_object_reference(object); 88 if (OFF_TO_IDX(foff + size) > object->size) 89 object->size = OFF_TO_IDX(foff + size); 90 } 91 phys_pager_alloc_lock = 0; 92 if (phys_pager_alloc_lock_want) 93 wakeup(&phys_pager_alloc_lock); 94 } else { ... } 95 #endif 96 object = vm_object_allocate(OBJT_PHYS, OFF_TO_IDX(foff + size)); 97 98 return (object); 99 } 100 101 static void 102 phys_pager_dealloc(vm_object_t object) 103 { 104 KKASSERT(object->handle == NULL); 105 KKASSERT(object->swblock_count == 0); 106 } 107 108 static int 109 phys_pager_getpage(vm_object_t object, vm_page_t *mpp, int seqaccess) 110 { 111 vm_page_t m = *mpp; 112 113 crit_enter(); 114 if ((m->flags & PG_ZERO) == 0) 115 vm_page_zero_fill(m); 116 vm_page_flag_set(m, PG_ZERO); 117 /* Switch off pv_entries */ 118 vm_page_unmanage(m); 119 m->valid = VM_PAGE_BITS_ALL; 120 m->dirty = 0; 121 crit_exit(); 122 123 return (VM_PAGER_OK); 124 } 125 126 static void 127 phys_pager_putpages(vm_object_t object, vm_page_t *m, int count, 128 boolean_t sync, int *rtvals) 129 { 130 131 panic("phys_pager_putpage called"); 132 } 133 134 /* 135 * Implement a pretty aggressive clustered getpages strategy. Hint that 136 * everything in an entire 4MB window should be prefaulted at once. 137 * 138 * XXX 4MB (1024 slots per page table page) is convenient for x86, 139 * but may not be for other arches. 140 */ 141 #ifndef PHYSCLUSTER 142 #define PHYSCLUSTER 1024 143 #endif 144 145 static boolean_t 146 phys_pager_haspage(vm_object_t object, vm_pindex_t pindex) 147 { 148 return (TRUE); 149 } 150 151 struct pagerops physpagerops = { 152 phys_pager_dealloc, 153 phys_pager_getpage, 154 phys_pager_putpages, 155 phys_pager_haspage 156 }; 157