xref: /netbsd-src/sys/uvm/uvm_pglist.c (revision d16b7486a53dcb8072b60ec6fcb4373a2d0c27b7)
1 /*	$NetBSD: uvm_pglist.c,v 1.90 2021/12/21 08:27:49 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * uvm_pglist.c: pglist functions
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.90 2021/12/21 08:27:49 skrll Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/cpu.h>
43 
44 #include <uvm/uvm.h>
45 #include <uvm/uvm_pdpolicy.h>
46 #include <uvm/uvm_pgflcache.h>
47 
48 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
49 #define	STAT_INCR(v)	(v)++
50 #define	STAT_DECR(v)	do { \
51 		if ((v) == 0) \
52 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
53 		else \
54 			(v)--; \
55 	} while (/*CONSTCOND*/ 0)
56 u_long	uvm_pglistalloc_npages;
57 #else
58 #define	STAT_INCR(v)
59 #define	STAT_DECR(v)
60 #endif
61 
62 kmutex_t uvm_pglistalloc_contig_lock;
63 
64 /*
65  * uvm_pglistalloc: allocate a list of pages
66  *
67  * => allocated pages are placed onto an rlist.  rlist is
68  *    initialized by uvm_pglistalloc.
69  * => returns 0 on success or errno on failure
70  * => implementation allocates a single segment if any constraints are
71  *	imposed by call arguments.
72  * => doesn't take into account clean non-busy pages on inactive list
73  *	that could be used(?)
74  * => params:
75  *	size		the size of the allocation, rounded to page size.
76  *	low		the low address of the allowed allocation range.
77  *	high		the high address of the allowed allocation range.
78  *	alignment	memory must be aligned to this power-of-two boundary.
79  *	boundary	no segment in the allocation may cross this
80  *			power-of-two boundary (relative to zero).
81  */
82 
83 static void
84 uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
85 {
86 	struct pgfreelist *pgfl;
87 	struct pgflbucket *pgb;
88 
89 	pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
90 	pgb = pgfl->pgfl_buckets[uvm_page_get_bucket(pg)];
91 
92 #ifdef UVMDEBUG
93 	struct vm_page *tp;
94 	LIST_FOREACH(tp, &pgb->pgb_colors[VM_PGCOLOR(pg)], pageq.list) {
95 		if (tp == pg)
96 			break;
97 	}
98 	if (tp == NULL)
99 		panic("uvm_pglistalloc: page not on freelist");
100 #endif
101 	LIST_REMOVE(pg, pageq.list);
102 	pgb->pgb_nfree--;
103     	CPU_COUNT(CPU_COUNT_FREEPAGES, -1);
104 	pg->flags = PG_CLEAN;
105 	pg->uobject = NULL;
106 	pg->uanon = NULL;
107 	TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
108 	STAT_INCR(uvm_pglistalloc_npages);
109 }
110 
111 static int
112 uvm_pglistalloc_c_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
113     paddr_t alignment, paddr_t boundary, struct pglist *rlist)
114 {
115 	signed int candidate, limit, candidateidx, end, idx, skip;
116 	int pagemask;
117 	bool second_pass;
118 #ifdef DEBUG
119 	paddr_t idxpa, lastidxpa;
120 	paddr_t cidx = 0;	/* XXX: GCC */
121 #endif
122 #ifdef PGALLOC_VERBOSE
123 	printf("pgalloc: contig %d pgs from psi %d\n", num, psi);
124 #endif
125 
126 	low = atop(low);
127 	high = atop(high);
128 
129 	/*
130 	 * Make sure that physseg falls within with range to be allocated from.
131 	 */
132 	if (high <= uvm_physseg_get_avail_start(psi) ||
133 	    low >= uvm_physseg_get_avail_end(psi))
134 		return -1;
135 
136 	/*
137 	 * We start our search at the just after where the last allocation
138 	 * succeeded.
139 	 */
140 	alignment = atop(alignment);
141 	candidate = roundup2(uimax(low, uvm_physseg_get_avail_start(psi) +
142 		uvm_physseg_get_start_hint(psi)), alignment);
143 	limit = uimin(high, uvm_physseg_get_avail_end(psi));
144 	pagemask = ~((boundary >> PAGE_SHIFT) - 1);
145 	skip = 0;
146 	second_pass = false;
147 
148 	for (;;) {
149 		bool ok = true;
150 		signed int cnt;
151 
152 		if (candidate + num > limit) {
153 			if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
154 				/*
155 				 * We've run past the allowable range.
156 				 */
157 				return 0; /* FAIL = 0 pages*/
158 			}
159 			/*
160 			 * We've wrapped around the end of this segment
161 			 * so restart at the beginning but now our limit
162 			 * is were we started.
163 			 */
164 			second_pass = true;
165 			candidate = roundup2(uimax(low, uvm_physseg_get_avail_start(psi)), alignment);
166 			limit = uimin(limit, uvm_physseg_get_avail_start(psi) +
167 			    uvm_physseg_get_start_hint(psi));
168 			skip = 0;
169 			continue;
170 		}
171 		if (boundary != 0 &&
172 		    ((candidate ^ (candidate + num - 1)) & pagemask) != 0) {
173 			/*
174 			 * Region crosses boundary. Jump to the boundary
175 			 * just crossed and ensure alignment.
176 			 */
177 			candidate = (candidate + num - 1) & pagemask;
178 			candidate = roundup2(candidate, alignment);
179 			skip = 0;
180 			continue;
181 		}
182 #ifdef DEBUG
183 		/*
184 		 * Make sure this is a managed physical page.
185 		 */
186 
187 		if (uvm_physseg_find(candidate, &cidx) != psi)
188 			panic("pgalloc contig: botch1");
189 		if (cidx != candidate - uvm_physseg_get_start(psi))
190 			panic("pgalloc contig: botch2");
191 		if (uvm_physseg_find(candidate + num - 1, &cidx) != psi)
192 			panic("pgalloc contig: botch3");
193 		if (cidx != candidate - uvm_physseg_get_start(psi) + num - 1)
194 			panic("pgalloc contig: botch4");
195 #endif
196 		candidateidx = candidate - uvm_physseg_get_start(psi);
197 		end = candidateidx + num;
198 
199 		/*
200 		 * Found a suitable starting page.  See if the range is free.
201 		 */
202 #ifdef PGALLOC_VERBOSE
203 		printf("%s: psi=%d candidate=%#x end=%#x skip=%#x, align=%#"PRIxPADDR,
204 		    __func__, psi, candidateidx, end, skip, alignment);
205 #endif
206 		/*
207 		 * We start at the end and work backwards since if we find a
208 		 * non-free page, it makes no sense to continue.
209 		 *
210 		 * But on the plus size we have "vetted" some number of free
211 		 * pages.  If this iteration fails, we may be able to skip
212 		 * testing most of those pages again in the next pass.
213 		 */
214 		for (idx = end - 1; idx >= candidateidx + skip; idx--) {
215 			if (VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, idx)) == 0) {
216 				ok = false;
217 				break;
218 			}
219 
220 #ifdef DEBUG
221 			if (idx > candidateidx) {
222 				idxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx));
223 				lastidxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx - 1));
224 				if ((lastidxpa + PAGE_SIZE) != idxpa) {
225 					/*
226 					 * Region not contiguous.
227 					 */
228 					panic("pgalloc contig: botch5");
229 				}
230 				if (boundary != 0 &&
231 				    ((lastidxpa ^ idxpa) & ~(boundary - 1))
232 				    != 0) {
233 					/*
234 					 * Region crosses boundary.
235 					 */
236 					panic("pgalloc contig: botch6");
237 				}
238 			}
239 #endif
240 		}
241 
242 		if (ok) {
243 			while (skip-- > 0) {
244 				KDASSERT(VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, candidateidx + skip)));
245 			}
246 #ifdef PGALLOC_VERBOSE
247 			printf(": ok\n");
248 #endif
249 			break;
250 		}
251 
252 #ifdef PGALLOC_VERBOSE
253 		printf(": non-free at %#x\n", idx - candidateidx);
254 #endif
255 		/*
256 		 * count the number of pages we can advance
257 		 * since we know they aren't all free.
258 		 */
259 		cnt = idx + 1 - candidateidx;
260 		/*
261 		 * now round up that to the needed alignment.
262 		 */
263 		cnt = roundup2(cnt, alignment);
264 		/*
265 		 * The number of pages we can skip checking
266 		 * (might be 0 if cnt > num).
267 		 */
268 		skip = uimax(num - cnt, 0);
269 		candidate += cnt;
270 	}
271 
272 	/*
273 	 * we have a chunk of memory that conforms to the requested constraints.
274 	 */
275 	for (idx = candidateidx; idx < end; idx++)
276 		uvm_pglist_add(uvm_physseg_get_pg(psi, idx), rlist);
277 
278 	/*
279 	 * the next time we need to search this segment, start after this
280 	 * chunk of pages we just allocated.
281 	 */
282 	uvm_physseg_set_start_hint(psi, candidate + num -
283 	    uvm_physseg_get_avail_start(psi));
284 	KASSERTMSG(uvm_physseg_get_start_hint(psi) <=
285 	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi),
286 	    "%x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
287 	    candidate + num,
288 	    uvm_physseg_get_start_hint(psi), uvm_physseg_get_start_hint(psi),
289 	    uvm_physseg_get_avail_end(psi), uvm_physseg_get_avail_start(psi),
290 	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
291 
292 #ifdef PGALLOC_VERBOSE
293 	printf("got %d pgs\n", num);
294 #endif
295 	return num; /* number of pages allocated */
296 }
297 
298 static int
299 uvm_pglistalloc_contig_aggressive(int num, paddr_t low, paddr_t high,
300     paddr_t alignment, paddr_t boundary, struct pglist *rlist)
301 {
302 	struct vm_page *pg;
303 	struct pglist tmp;
304 	paddr_t pa, off, spa, amask, bmask, rlo, rhi;
305 	uvm_physseg_t upm;
306 	int error, i, run, acnt;
307 
308 	/*
309 	 * Allocate pages the normal way and for each new page, check if
310 	 * the page completes a range satisfying the request.
311 	 * The pagedaemon will evict pages as we go and we are very likely
312 	 * to get compatible pages eventually.
313 	 */
314 
315 	error = ENOMEM;
316 	TAILQ_INIT(&tmp);
317 	acnt = atop(alignment);
318 	amask = ~(alignment - 1);
319 	bmask = ~(boundary - 1);
320 	KASSERT(bmask <= amask);
321 	mutex_enter(&uvm_pglistalloc_contig_lock);
322 	while (uvm_reclaimable()) {
323 		pg = uvm_pagealloc(NULL, 0, NULL, 0);
324 		if (pg == NULL) {
325 			uvm_wait("pglac2");
326 			continue;
327 		}
328 		pg->flags |= PG_PGLCA;
329 		TAILQ_INSERT_HEAD(&tmp, pg, pageq.queue);
330 
331 		pa = VM_PAGE_TO_PHYS(pg);
332 		if (pa < low || pa >= high) {
333 			continue;
334 		}
335 
336 		upm = uvm_physseg_find(atop(pa), &off);
337 		KASSERT(uvm_physseg_valid_p(upm));
338 
339 		spa = pa & amask;
340 
341 		/*
342 		 * Look backward for at most num - 1 pages, back to
343 		 * the highest of:
344 		 *  - the first page in the physseg
345 		 *  - the specified low address
346 		 *  - num-1 pages before the one we just allocated
347 		 *  - the start of the boundary range containing pa
348 		 * all rounded up to alignment.
349 		 */
350 
351 		rlo = roundup2(ptoa(uvm_physseg_get_avail_start(upm)), alignment);
352 		rlo = MAX(rlo, roundup2(low, alignment));
353 		rlo = MAX(rlo, roundup2(pa - ptoa(num - 1), alignment));
354 		if (boundary) {
355 			rlo = MAX(rlo, spa & bmask);
356 		}
357 
358 		/*
359 		 * Look forward as far as the lowest of:
360 		 *  - the last page of the physseg
361 		 *  - the specified high address
362 		 *  - the boundary after pa
363 		 */
364 
365 		rhi = ptoa(uvm_physseg_get_avail_end(upm));
366 		rhi = MIN(rhi, high);
367 		if (boundary) {
368 			rhi = MIN(rhi, rounddown2(pa, boundary) + boundary);
369 		}
370 
371 		/*
372 		 * Make sure our range to consider is big enough.
373 		 */
374 
375 		if (rhi - rlo < ptoa(num)) {
376 			continue;
377 		}
378 
379 		run = 0;
380 		while (spa > rlo) {
381 
382 			/*
383 			 * Examine pages before spa in groups of acnt.
384 			 * If all the pages in a group are marked then add
385 			 * these pages to the run.
386 			 */
387 
388 			for (i = 0; i < acnt; i++) {
389 				pg = PHYS_TO_VM_PAGE(spa - alignment + ptoa(i));
390 				if ((pg->flags & PG_PGLCA) == 0) {
391 					break;
392 				}
393 			}
394 			if (i < acnt) {
395 				break;
396 			}
397 			spa -= alignment;
398 			run += acnt;
399 		}
400 
401 		/*
402 		 * Look forward for any remaining pages.
403 		 */
404 
405 		if (spa + ptoa(num) > rhi) {
406 			continue;
407 		}
408 		for (; run < num; run++) {
409 			pg = PHYS_TO_VM_PAGE(spa + ptoa(run));
410 			if ((pg->flags & PG_PGLCA) == 0) {
411 				break;
412 			}
413 		}
414 		if (run < num) {
415 			continue;
416 		}
417 
418 		/*
419 		 * We found a match.  Move these pages from the tmp list to
420 		 * the caller's list.
421 		 */
422 
423 		for (i = 0; i < num; i++) {
424 			pg = PHYS_TO_VM_PAGE(spa + ptoa(i));
425 			TAILQ_REMOVE(&tmp, pg, pageq.queue);
426 			pg->flags &= ~PG_PGLCA;
427 			TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
428 			STAT_INCR(uvm_pglistalloc_npages);
429 		}
430 
431 		error = 0;
432 		break;
433 	}
434 
435 	/*
436 	 * Free all the pages that we didn't need.
437 	 */
438 
439 	while (!TAILQ_EMPTY(&tmp)) {
440 		pg = TAILQ_FIRST(&tmp);
441 		TAILQ_REMOVE(&tmp, pg, pageq.queue);
442 		pg->flags &= ~PG_PGLCA;
443 		uvm_pagefree(pg);
444 	}
445 	mutex_exit(&uvm_pglistalloc_contig_lock);
446 	return error;
447 }
448 
449 static int
450 uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
451     paddr_t boundary, struct pglist *rlist, int waitok)
452 {
453 	int fl;
454 	int error;
455 	uvm_physseg_t psi;
456 
457 	/* Default to "lose". */
458 	error = ENOMEM;
459 	bool valid = false;
460 
461 	/*
462 	 * Block all memory allocation and lock the free list.
463 	 */
464 	uvm_pgfl_lock();
465 
466 	/* Are there even any free pages? */
467 	if (uvm_availmem(false) <=
468 	    (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
469 		goto out;
470 
471 	for (fl = 0; fl < VM_NFREELIST; fl++) {
472 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
473 		for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
474 #else
475 		for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
476 #endif
477 		{
478 			if (uvm_physseg_get_free_list(psi) != fl)
479 				continue;
480 
481 			int done = uvm_pglistalloc_c_ps(psi, num, low, high,
482 			    alignment, boundary, rlist);
483 			if (done >= 0) {
484 				valid = true;
485 				num -= done;
486 			}
487 			if (num == 0) {
488 #ifdef PGALLOC_VERBOSE
489 				printf("pgalloc: %"PRIxMAX"-%"PRIxMAX"\n",
490 				       (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
491 				       (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
492 #endif
493 				error = 0;
494 				goto out;
495 			}
496 		}
497 	}
498 	if (!valid) {
499 		uvm_pgfl_unlock();
500 		return EINVAL;
501 	}
502 
503 out:
504 	uvm_pgfl_unlock();
505 
506 	/*
507 	 * If that didn't work, try the more aggressive approach.
508 	 */
509 
510 	if (error) {
511 		if (waitok) {
512 			error = uvm_pglistalloc_contig_aggressive(num, low, high,
513 			    alignment, boundary, rlist);
514 		} else {
515 			uvm_pglistfree(rlist);
516 			uvm_kick_pdaemon();
517 		}
518 	}
519 	return error;
520 }
521 
522 static int
523 uvm_pglistalloc_s_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
524     struct pglist *rlist)
525 {
526 	int todo, limit, candidate;
527 	struct vm_page *pg;
528 	bool second_pass;
529 #ifdef PGALLOC_VERBOSE
530 	printf("pgalloc: simple %d pgs from psi %d\n", num, psi);
531 #endif
532 
533 	KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_start(psi));
534 	KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_end(psi));
535 	KASSERT(uvm_physseg_get_avail_start(psi) <= uvm_physseg_get_end(psi));
536 	KASSERT(uvm_physseg_get_avail_end(psi) <= uvm_physseg_get_end(psi));
537 
538 	low = atop(low);
539 	high = atop(high);
540 
541 	/*
542 	 * Make sure that physseg falls within with range to be allocated from.
543 	 */
544 	if (high <= uvm_physseg_get_avail_start(psi) ||
545 	    low >= uvm_physseg_get_avail_end(psi))
546 		return -1;
547 
548 	todo = num;
549 	candidate = uimax(low, uvm_physseg_get_avail_start(psi) +
550 	    uvm_physseg_get_start_hint(psi));
551 	limit = uimin(high, uvm_physseg_get_avail_end(psi));
552 	pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
553 	second_pass = false;
554 
555 again:
556 	for (;; candidate++, pg++) {
557 		if (candidate >= limit) {
558 			if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
559 				candidate = limit - 1;
560 				break;
561 			}
562 			second_pass = true;
563 			candidate = uimax(low, uvm_physseg_get_avail_start(psi));
564 			limit = uimin(limit, uvm_physseg_get_avail_start(psi) +
565 			    uvm_physseg_get_start_hint(psi));
566 			pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
567 			goto again;
568 		}
569 #if defined(DEBUG)
570 		{
571 			paddr_t cidx = 0;
572 			const uvm_physseg_t bank = uvm_physseg_find(candidate, &cidx);
573 			KDASSERTMSG(bank == psi,
574 			    "uvm_physseg_find(%#x) (%"PRIxPHYSSEG ") != psi %"PRIxPHYSSEG,
575 			     candidate, bank, psi);
576 			KDASSERTMSG(cidx == candidate - uvm_physseg_get_start(psi),
577 			    "uvm_physseg_find(%#x): %#"PRIxPADDR" != off %"PRIxPADDR,
578 			     candidate, cidx, candidate - uvm_physseg_get_start(psi));
579 		}
580 #endif
581 		if (VM_PAGE_IS_FREE(pg) == 0)
582 			continue;
583 
584 		uvm_pglist_add(pg, rlist);
585 		if (--todo == 0) {
586 			break;
587 		}
588 	}
589 
590 	/*
591 	 * The next time we need to search this segment,
592 	 * start just after the pages we just allocated.
593 	 */
594 	uvm_physseg_set_start_hint(psi, candidate + 1 - uvm_physseg_get_avail_start(psi));
595 	KASSERTMSG(uvm_physseg_get_start_hint(psi) <= uvm_physseg_get_avail_end(psi) -
596 	    uvm_physseg_get_avail_start(psi),
597 	    "%#x %u (%#x) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
598 	    candidate + 1,
599 	    uvm_physseg_get_start_hint(psi),
600 	    uvm_physseg_get_start_hint(psi),
601 	    uvm_physseg_get_avail_end(psi),
602 	    uvm_physseg_get_avail_start(psi),
603 	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
604 
605 #ifdef PGALLOC_VERBOSE
606 	printf("got %d pgs\n", num - todo);
607 #endif
608 	return (num - todo); /* number of pages allocated */
609 }
610 
611 static int
612 uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high,
613     struct pglist *rlist, int waitok)
614 {
615 	int fl, error;
616 	uvm_physseg_t psi;
617 	int count = 0;
618 
619 	/* Default to "lose". */
620 	error = ENOMEM;
621 	bool valid = false;
622 
623 again:
624 	/*
625 	 * Block all memory allocation and lock the free list.
626 	 */
627 	uvm_pgfl_lock();
628 	count++;
629 
630 	/* Are there even any free pages? */
631 	if (uvm_availmem(false) <=
632 	    (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
633 		goto out;
634 
635 	for (fl = 0; fl < VM_NFREELIST; fl++) {
636 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
637 		for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
638 #else
639 		for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
640 #endif
641 		{
642 			if (uvm_physseg_get_free_list(psi) != fl)
643 				continue;
644 
645 			int done = uvm_pglistalloc_s_ps(psi, num, low, high,
646                             rlist);
647 			if (done >= 0) {
648 				valid = true;
649 				num -= done;
650 			}
651 			if (num == 0) {
652 				error = 0;
653 				goto out;
654 			}
655 		}
656 
657 	}
658 	if (!valid) {
659 		uvm_pgfl_unlock();
660 		return EINVAL;
661 	}
662 
663 out:
664 	/*
665 	 * check to see if we need to generate some free pages waking
666 	 * the pagedaemon.
667 	 */
668 
669 	uvm_pgfl_unlock();
670 	uvm_kick_pdaemon();
671 
672 	if (error) {
673 		if (waitok) {
674 			uvm_wait("pglalloc");
675 			goto again;
676 		} else
677 			uvm_pglistfree(rlist);
678 	}
679 #ifdef PGALLOC_VERBOSE
680 	if (!error)
681 		printf("pgalloc: %"PRIxMAX"..%"PRIxMAX"\n",
682 		       (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
683 		       (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
684 #endif
685 	return (error);
686 }
687 
688 int
689 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
690     paddr_t boundary, struct pglist *rlist, int nsegs, int waitok)
691 {
692 	int num, res;
693 
694 	KASSERT(!cpu_intr_p());
695 	KASSERT(!cpu_softintr_p());
696 	KASSERT((alignment & (alignment - 1)) == 0);
697 	KASSERT((boundary & (boundary - 1)) == 0);
698 
699 	/*
700 	 * Our allocations are always page granularity, so our alignment
701 	 * must be, too.
702 	 */
703 	if (alignment < PAGE_SIZE)
704 		alignment = PAGE_SIZE;
705 	if (boundary != 0 && boundary < size)
706 		return (EINVAL);
707 	num = atop(round_page(size));
708 	low = roundup2(low, alignment);
709 
710 	TAILQ_INIT(rlist);
711 
712 	/*
713 	 * Turn off the caching of free pages - we need everything to be on
714 	 * the global freelists.
715 	 */
716 	uvm_pgflcache_pause();
717 
718 	if (nsegs < num || alignment != PAGE_SIZE || boundary != 0)
719 		res = uvm_pglistalloc_contig(num, low, high, alignment,
720 					     boundary, rlist, waitok);
721 	else
722 		res = uvm_pglistalloc_simple(num, low, high, rlist, waitok);
723 
724 	uvm_pgflcache_resume();
725 
726 	return (res);
727 }
728 
729 /*
730  * uvm_pglistfree: free a list of pages
731  *
732  * => pages should already be unmapped
733  */
734 
735 void
736 uvm_pglistfree(struct pglist *list)
737 {
738 	struct vm_page *pg;
739 
740 	KASSERT(!cpu_intr_p());
741 	KASSERT(!cpu_softintr_p());
742 
743 	while ((pg = TAILQ_FIRST(list)) != NULL) {
744 		TAILQ_REMOVE(list, pg, pageq.queue);
745 		uvm_pagefree(pg);
746 		STAT_DECR(uvm_pglistalloc_npages);
747 	}
748 }
749 
750 void
751 uvm_pglistalloc_init(void)
752 {
753 
754 	mutex_init(&uvm_pglistalloc_contig_lock, MUTEX_DEFAULT, IPL_NONE);
755 }
756