1 /* $NetBSD: uvm_pglist.c,v 1.17 2001/06/27 21:18:34 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1997 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility, 9 * NASA Ames Research Center. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the NetBSD 22 * Foundation, Inc. and its contributors. 23 * 4. Neither the name of The NetBSD Foundation nor the names of its 24 * contributors may be used to endorse or promote products derived 25 * from this software without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 28 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 29 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 30 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 31 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 37 * POSSIBILITY OF SUCH DAMAGE. 38 */ 39 40 /* 41 * uvm_pglist.c: pglist functions 42 */ 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/malloc.h> 47 #include <sys/proc.h> 48 49 #include <uvm/uvm.h> 50 51 #ifdef VM_PAGE_ALLOC_MEMORY_STATS 52 #define STAT_INCR(v) (v)++ 53 #define STAT_DECR(v) do { \ 54 if ((v) == 0) \ 55 printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \ 56 else \ 57 (v)--; \ 58 } while (0) 59 u_long uvm_pglistalloc_npages; 60 #else 61 #define STAT_INCR(v) 62 #define STAT_DECR(v) 63 #endif 64 65 /* 66 * uvm_pglistalloc: allocate a list of pages 67 * 68 * => allocated pages are placed at the tail of rlist. rlist is 69 * assumed to be properly initialized by caller. 70 * => returns 0 on success or errno on failure 71 * => XXX: implementation allocates only a single segment, also 72 * might be able to better advantage of vm_physeg[]. 73 * => doesn't take into account clean non-busy pages on inactive list 74 * that could be used(?) 75 * => params: 76 * size the size of the allocation, rounded to page size. 77 * low the low address of the allowed allocation range. 78 * high the high address of the allowed allocation range. 79 * alignment memory must be aligned to this power-of-two boundary. 80 * boundary no segment in the allocation may cross this 81 * power-of-two boundary (relative to zero). 82 */ 83 84 int 85 uvm_pglistalloc(size, low, high, alignment, boundary, rlist, nsegs, waitok) 86 psize_t size; 87 paddr_t low, high, alignment, boundary; 88 struct pglist *rlist; 89 int nsegs, waitok; 90 { 91 paddr_t try, idxpa, lastidxpa; 92 int psi; 93 struct vm_page *pgs; 94 int s, tryidx, idx, pgflidx, end, error, free_list, color; 95 struct vm_page *m; 96 u_long pagemask; 97 #ifdef DEBUG 98 struct vm_page *tp; 99 #endif 100 101 KASSERT((alignment & (alignment - 1)) == 0); 102 KASSERT((boundary & (boundary - 1)) == 0); 103 104 /* 105 * Our allocations are always page granularity, so our alignment 106 * must be, too. 107 */ 108 if (alignment < PAGE_SIZE) 109 alignment = PAGE_SIZE; 110 111 size = round_page(size); 112 try = roundup(low, alignment); 113 114 if (boundary != 0 && boundary < size) 115 return (EINVAL); 116 117 pagemask = ~(boundary - 1); 118 119 /* Default to "lose". */ 120 error = ENOMEM; 121 122 /* 123 * Block all memory allocation and lock the free list. 124 */ 125 s = uvm_lock_fpageq(); 126 127 /* Are there even any free pages? */ 128 if (uvmexp.free <= (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel)) 129 goto out; 130 131 for (;; try += alignment) { 132 if (try + size > high) { 133 134 /* 135 * We've run past the allowable range. 136 */ 137 138 goto out; 139 } 140 141 /* 142 * Make sure this is a managed physical page. 143 */ 144 145 if ((psi = vm_physseg_find(atop(try), &idx)) == -1) 146 continue; /* managed? */ 147 if (vm_physseg_find(atop(try + size), NULL) != psi) 148 continue; /* end must be in this segment */ 149 150 tryidx = idx; 151 end = idx + (size / PAGE_SIZE); 152 pgs = vm_physmem[psi].pgs; 153 154 /* 155 * Found a suitable starting page. See of the range is free. 156 */ 157 158 for (; idx < end; idx++) { 159 if (VM_PAGE_IS_FREE(&pgs[idx]) == 0) { 160 break; 161 } 162 idxpa = VM_PAGE_TO_PHYS(&pgs[idx]); 163 if (idx > tryidx) { 164 lastidxpa = VM_PAGE_TO_PHYS(&pgs[idx - 1]); 165 if ((lastidxpa + PAGE_SIZE) != idxpa) { 166 167 /* 168 * Region not contiguous. 169 */ 170 171 break; 172 } 173 if (boundary != 0 && 174 ((lastidxpa ^ idxpa) & pagemask) != 0) { 175 176 /* 177 * Region crosses boundary. 178 */ 179 180 break; 181 } 182 } 183 } 184 if (idx == end) { 185 break; 186 } 187 } 188 189 #if PGFL_NQUEUES != 2 190 #error uvm_pglistalloc needs to be updated 191 #endif 192 193 /* 194 * we have a chunk of memory that conforms to the requested constraints. 195 */ 196 idx = tryidx; 197 while (idx < end) { 198 m = &pgs[idx]; 199 free_list = uvm_page_lookup_freelist(m); 200 color = VM_PGCOLOR_BUCKET(m); 201 pgflidx = (m->flags & PG_ZERO) ? PGFL_ZEROS : PGFL_UNKNOWN; 202 #ifdef DEBUG 203 for (tp = TAILQ_FIRST(&uvm.page_free[ 204 free_list].pgfl_buckets[color].pgfl_queues[pgflidx]); 205 tp != NULL; 206 tp = TAILQ_NEXT(tp, pageq)) { 207 if (tp == m) 208 break; 209 } 210 if (tp == NULL) 211 panic("uvm_pglistalloc: page not on freelist"); 212 #endif 213 TAILQ_REMOVE(&uvm.page_free[free_list].pgfl_buckets[ 214 color].pgfl_queues[pgflidx], m, pageq); 215 uvmexp.free--; 216 if (m->flags & PG_ZERO) 217 uvmexp.zeropages--; 218 m->flags = PG_CLEAN; 219 m->pqflags = 0; 220 m->uobject = NULL; 221 m->uanon = NULL; 222 m->version++; 223 TAILQ_INSERT_TAIL(rlist, m, pageq); 224 idx++; 225 STAT_INCR(uvm_pglistalloc_npages); 226 } 227 error = 0; 228 229 out: 230 /* 231 * check to see if we need to generate some free pages waking 232 * the pagedaemon. 233 */ 234 235 UVM_KICK_PDAEMON(); 236 237 uvm_unlock_fpageq(s); 238 239 return (error); 240 } 241 242 /* 243 * uvm_pglistfree: free a list of pages 244 * 245 * => pages should already be unmapped 246 */ 247 248 void 249 uvm_pglistfree(list) 250 struct pglist *list; 251 { 252 struct vm_page *m; 253 int s; 254 255 /* 256 * Block all memory allocation and lock the free list. 257 */ 258 s = uvm_lock_fpageq(); 259 260 while ((m = TAILQ_FIRST(list)) != NULL) { 261 KASSERT((m->pqflags & (PQ_ACTIVE|PQ_INACTIVE)) == 0); 262 TAILQ_REMOVE(list, m, pageq); 263 m->pqflags = PQ_FREE; 264 TAILQ_INSERT_TAIL(&uvm.page_free[ 265 uvm_page_lookup_freelist(m)].pgfl_buckets[ 266 VM_PGCOLOR_BUCKET(m)].pgfl_queues[PGFL_UNKNOWN], m, pageq); 267 uvmexp.free++; 268 if (uvmexp.zeropages < UVM_PAGEZERO_TARGET) 269 uvm.page_idle_zero = vm_page_zero_enable; 270 STAT_DECR(uvm_pglistalloc_npages); 271 } 272 273 uvm_unlock_fpageq(s); 274 } 275