xref: /netbsd-src/sys/uvm/uvm_pglist.c (revision d1c1fc05bba62fb0bcf9d96cbb907cbe2b18cd95)
1 /*	$NetBSD: uvm_pglist.c,v 1.92 2024/01/14 10:38:47 tnn Exp $	*/
2 
3 /*-
4  * Copyright (c) 1997, 2019 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9  * NASA Ames Research Center, and by Andrew Doran.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 /*
34  * uvm_pglist.c: pglist functions
35  */
36 
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: uvm_pglist.c,v 1.92 2024/01/14 10:38:47 tnn Exp $");
39 
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/cpu.h>
43 
44 #include <uvm/uvm.h>
45 #include <uvm/uvm_pdpolicy.h>
46 #include <uvm/uvm_pgflcache.h>
47 
48 #ifdef VM_PAGE_ALLOC_MEMORY_STATS
49 #define	STAT_INCR(v)	(v)++
50 #define	STAT_DECR(v)	do { \
51 		if ((v) == 0) \
52 			printf("%s:%d -- Already 0!\n", __FILE__, __LINE__); \
53 		else \
54 			(v)--; \
55 	} while (/*CONSTCOND*/ 0)
56 u_long	uvm_pglistalloc_npages;
57 #else
58 #define	STAT_INCR(v)
59 #define	STAT_DECR(v)
60 #endif
61 
62 kmutex_t uvm_pglistalloc_contig_lock;
63 
64 /*
65  * uvm_pglistalloc: allocate a list of pages
66  *
67  * => allocated pages are placed onto an rlist.  rlist is
68  *    initialized by uvm_pglistalloc.
69  * => returns 0 on success or errno on failure
70  * => implementation allocates a single segment if any constraints are
71  *	imposed by call arguments.
72  * => doesn't take into account clean non-busy pages on inactive list
73  *	that could be used(?)
74  * => params:
75  *	size		the size of the allocation, rounded to page size.
76  *	low		the low address of the allowed allocation range.
77  *	high		the high address of the allowed allocation range.
78  *	alignment	memory must be aligned to this power-of-two boundary.
79  *	boundary	no segment in the allocation may cross this
80  *			power-of-two boundary (relative to zero).
81  */
82 
83 static void
uvm_pglist_add(struct vm_page * pg,struct pglist * rlist)84 uvm_pglist_add(struct vm_page *pg, struct pglist *rlist)
85 {
86 	struct pgfreelist *pgfl;
87 	struct pgflbucket *pgb;
88 
89 	pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
90 	pgb = pgfl->pgfl_buckets[uvm_page_get_bucket(pg)];
91 
92 #ifdef UVMDEBUG
93 	struct vm_page *tp;
94 	LIST_FOREACH(tp, &pgb->pgb_colors[VM_PGCOLOR(pg)], pageq.list) {
95 		if (tp == pg)
96 			break;
97 	}
98 	if (tp == NULL)
99 		panic("uvm_pglistalloc: page not on freelist");
100 #endif
101 	LIST_REMOVE(pg, pageq.list);
102 	pgb->pgb_nfree--;
103     	CPU_COUNT(CPU_COUNT_FREEPAGES, -1);
104 	pg->flags = PG_CLEAN;
105 	pg->uobject = NULL;
106 	pg->uanon = NULL;
107 	TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
108 	STAT_INCR(uvm_pglistalloc_npages);
109 }
110 
111 static int
uvm_pglistalloc_c_ps(uvm_physseg_t psi,int num,paddr_t low,paddr_t high,paddr_t alignment,paddr_t boundary,struct pglist * rlist)112 uvm_pglistalloc_c_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
113     paddr_t alignment, paddr_t boundary, struct pglist *rlist)
114 {
115 	long candidate, limit, candidateidx, end, idx;
116 	int skip;
117 	long pagemask;
118 	bool second_pass;
119 #ifdef DEBUG
120 	paddr_t idxpa, lastidxpa;
121 	paddr_t cidx = 0;	/* XXX: GCC */
122 #endif
123 #ifdef PGALLOC_VERBOSE
124 	printf("pgalloc: contig %d pgs from psi %d\n", num, psi);
125 #endif
126 
127 	low = atop(low);
128 	high = atop(high);
129 
130 	/*
131 	 * Make sure that physseg falls within with range to be allocated from.
132 	 */
133 	if (high <= uvm_physseg_get_avail_start(psi) ||
134 	    low >= uvm_physseg_get_avail_end(psi))
135 		return -1;
136 
137 	/*
138 	 * We start our search at the just after where the last allocation
139 	 * succeeded.
140 	 */
141 	alignment = atop(alignment);
142 	candidate = roundup2(ulmax(low, uvm_physseg_get_avail_start(psi) +
143 		uvm_physseg_get_start_hint(psi)), alignment);
144 	limit = ulmin(high, uvm_physseg_get_avail_end(psi));
145 	pagemask = ~((boundary >> PAGE_SHIFT) - 1);
146 	skip = 0;
147 	second_pass = false;
148 
149 	for (;;) {
150 		bool ok = true;
151 		signed int cnt;
152 
153 		if (candidate + num > limit) {
154 			if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
155 				/*
156 				 * We've run past the allowable range.
157 				 */
158 				return 0; /* FAIL = 0 pages*/
159 			}
160 			/*
161 			 * We've wrapped around the end of this segment
162 			 * so restart at the beginning but now our limit
163 			 * is were we started.
164 			 */
165 			second_pass = true;
166 			candidate = roundup2(ulmax(low, uvm_physseg_get_avail_start(psi)), alignment);
167 			limit = ulmin(limit, uvm_physseg_get_avail_start(psi) +
168 			    uvm_physseg_get_start_hint(psi));
169 			skip = 0;
170 			continue;
171 		}
172 		if (boundary != 0 &&
173 		    ((candidate ^ (candidate + num - 1)) & pagemask) != 0) {
174 			/*
175 			 * Region crosses boundary. Jump to the boundary
176 			 * just crossed and ensure alignment.
177 			 */
178 			candidate = (candidate + num - 1) & pagemask;
179 			candidate = roundup2(candidate, alignment);
180 			skip = 0;
181 			continue;
182 		}
183 #ifdef DEBUG
184 		/*
185 		 * Make sure this is a managed physical page.
186 		 */
187 
188 		if (uvm_physseg_find(candidate, &cidx) != psi)
189 			panic("pgalloc contig: botch1");
190 		if (cidx != candidate - uvm_physseg_get_start(psi))
191 			panic("pgalloc contig: botch2");
192 		if (uvm_physseg_find(candidate + num - 1, &cidx) != psi)
193 			panic("pgalloc contig: botch3");
194 		if (cidx != candidate - uvm_physseg_get_start(psi) + num - 1)
195 			panic("pgalloc contig: botch4");
196 #endif
197 		candidateidx = candidate - uvm_physseg_get_start(psi);
198 		end = candidateidx + num;
199 
200 		/*
201 		 * Found a suitable starting page.  See if the range is free.
202 		 */
203 #ifdef PGALLOC_VERBOSE
204 		printf("%s: psi=%d candidate=%#lx end=%#lx skip=%#x, align=%#"PRIxPADDR,
205 		    __func__, psi, candidateidx, end, skip, alignment);
206 #endif
207 		/*
208 		 * We start at the end and work backwards since if we find a
209 		 * non-free page, it makes no sense to continue.
210 		 *
211 		 * But on the plus size we have "vetted" some number of free
212 		 * pages.  If this iteration fails, we may be able to skip
213 		 * testing most of those pages again in the next pass.
214 		 */
215 		for (idx = end - 1; idx >= candidateidx + skip; idx--) {
216 			if (VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, idx)) == 0) {
217 				ok = false;
218 				break;
219 			}
220 
221 #ifdef DEBUG
222 			if (idx > candidateidx) {
223 				idxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx));
224 				lastidxpa = VM_PAGE_TO_PHYS(uvm_physseg_get_pg(psi, idx - 1));
225 				if ((lastidxpa + PAGE_SIZE) != idxpa) {
226 					/*
227 					 * Region not contiguous.
228 					 */
229 					panic("pgalloc contig: botch5");
230 				}
231 				if (boundary != 0 &&
232 				    ((lastidxpa ^ idxpa) & ~(boundary - 1))
233 				    != 0) {
234 					/*
235 					 * Region crosses boundary.
236 					 */
237 					panic("pgalloc contig: botch6");
238 				}
239 			}
240 #endif
241 		}
242 
243 		if (ok) {
244 			while (skip-- > 0) {
245 				KDASSERT(VM_PAGE_IS_FREE(uvm_physseg_get_pg(psi, candidateidx + skip)));
246 			}
247 #ifdef PGALLOC_VERBOSE
248 			printf(": ok\n");
249 #endif
250 			break;
251 		}
252 
253 #ifdef PGALLOC_VERBOSE
254 		printf(": non-free at %#x\n", idx - candidateidx);
255 #endif
256 		/*
257 		 * count the number of pages we can advance
258 		 * since we know they aren't all free.
259 		 */
260 		cnt = idx + 1 - candidateidx;
261 		/*
262 		 * now round up that to the needed alignment.
263 		 */
264 		cnt = roundup2(cnt, alignment);
265 		/*
266 		 * The number of pages we can skip checking
267 		 * (might be 0 if cnt > num).
268 		 */
269 		skip = uimax(num - cnt, 0);
270 		candidate += cnt;
271 	}
272 
273 	/*
274 	 * we have a chunk of memory that conforms to the requested constraints.
275 	 */
276 	for (idx = candidateidx; idx < end; idx++)
277 		uvm_pglist_add(uvm_physseg_get_pg(psi, idx), rlist);
278 
279 	/*
280 	 * the next time we need to search this segment, start after this
281 	 * chunk of pages we just allocated.
282 	 */
283 	uvm_physseg_set_start_hint(psi, candidate + num -
284 	    uvm_physseg_get_avail_start(psi));
285 	KASSERTMSG(uvm_physseg_get_start_hint(psi) <=
286 	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi),
287 	    "%lx %lu (%#lx) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
288 	    candidate + num,
289 	    uvm_physseg_get_start_hint(psi), uvm_physseg_get_start_hint(psi),
290 	    uvm_physseg_get_avail_end(psi), uvm_physseg_get_avail_start(psi),
291 	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
292 
293 #ifdef PGALLOC_VERBOSE
294 	printf("got %d pgs\n", num);
295 #endif
296 	return num; /* number of pages allocated */
297 }
298 
299 static int
uvm_pglistalloc_contig_aggressive(int num,paddr_t low,paddr_t high,paddr_t alignment,paddr_t boundary,struct pglist * rlist)300 uvm_pglistalloc_contig_aggressive(int num, paddr_t low, paddr_t high,
301     paddr_t alignment, paddr_t boundary, struct pglist *rlist)
302 {
303 	struct vm_page *pg;
304 	struct pglist tmp;
305 	paddr_t pa, off, spa, amask, bmask, rlo, rhi;
306 	uvm_physseg_t upm;
307 	int error, i, run, acnt;
308 
309 	/*
310 	 * Allocate pages the normal way and for each new page, check if
311 	 * the page completes a range satisfying the request.
312 	 * The pagedaemon will evict pages as we go and we are very likely
313 	 * to get compatible pages eventually.
314 	 */
315 
316 	error = ENOMEM;
317 	TAILQ_INIT(&tmp);
318 	acnt = atop(alignment);
319 	amask = ~(alignment - 1);
320 	bmask = ~(boundary - 1);
321 	KASSERT(bmask <= amask);
322 	mutex_enter(&uvm_pglistalloc_contig_lock);
323 	while (uvm_reclaimable()) {
324 		pg = uvm_pagealloc(NULL, 0, NULL, 0);
325 		if (pg == NULL) {
326 			uvm_wait("pglac2");
327 			continue;
328 		}
329 		pg->flags |= PG_PGLCA;
330 		TAILQ_INSERT_HEAD(&tmp, pg, pageq.queue);
331 
332 		pa = VM_PAGE_TO_PHYS(pg);
333 		if (pa < low || pa >= high) {
334 			continue;
335 		}
336 
337 		upm = uvm_physseg_find(atop(pa), &off);
338 		KASSERT(uvm_physseg_valid_p(upm));
339 
340 		spa = pa & amask;
341 
342 		/*
343 		 * Look backward for at most num - 1 pages, back to
344 		 * the highest of:
345 		 *  - the first page in the physseg
346 		 *  - the specified low address
347 		 *  - num-1 pages before the one we just allocated
348 		 *  - the start of the boundary range containing pa
349 		 * all rounded up to alignment.
350 		 */
351 
352 		rlo = roundup2(ptoa(uvm_physseg_get_avail_start(upm)), alignment);
353 		rlo = MAX(rlo, roundup2(low, alignment));
354 		rlo = MAX(rlo, roundup2(pa - ptoa(num - 1), alignment));
355 		if (boundary) {
356 			rlo = MAX(rlo, spa & bmask);
357 		}
358 
359 		/*
360 		 * Look forward as far as the lowest of:
361 		 *  - the last page of the physseg
362 		 *  - the specified high address
363 		 *  - the boundary after pa
364 		 */
365 
366 		rhi = ptoa(uvm_physseg_get_avail_end(upm));
367 		rhi = MIN(rhi, high);
368 		if (boundary) {
369 			rhi = MIN(rhi, rounddown2(pa, boundary) + boundary);
370 		}
371 
372 		/*
373 		 * Make sure our range to consider is big enough.
374 		 */
375 
376 		if (rhi - rlo < ptoa(num)) {
377 			continue;
378 		}
379 
380 		run = 0;
381 		while (spa > rlo) {
382 
383 			/*
384 			 * Examine pages before spa in groups of acnt.
385 			 * If all the pages in a group are marked then add
386 			 * these pages to the run.
387 			 */
388 
389 			for (i = 0; i < acnt; i++) {
390 				pg = PHYS_TO_VM_PAGE(spa - alignment + ptoa(i));
391 				if ((pg->flags & PG_PGLCA) == 0) {
392 					break;
393 				}
394 			}
395 			if (i < acnt) {
396 				break;
397 			}
398 			spa -= alignment;
399 			run += acnt;
400 		}
401 
402 		/*
403 		 * Look forward for any remaining pages.
404 		 */
405 
406 		if (spa + ptoa(num) > rhi) {
407 			continue;
408 		}
409 		for (; run < num; run++) {
410 			pg = PHYS_TO_VM_PAGE(spa + ptoa(run));
411 			if ((pg->flags & PG_PGLCA) == 0) {
412 				break;
413 			}
414 		}
415 		if (run < num) {
416 			continue;
417 		}
418 
419 		/*
420 		 * We found a match.  Move these pages from the tmp list to
421 		 * the caller's list.
422 		 */
423 
424 		for (i = 0; i < num; i++) {
425 			pg = PHYS_TO_VM_PAGE(spa + ptoa(i));
426 			TAILQ_REMOVE(&tmp, pg, pageq.queue);
427 			pg->flags &= ~PG_PGLCA;
428 			TAILQ_INSERT_TAIL(rlist, pg, pageq.queue);
429 			STAT_INCR(uvm_pglistalloc_npages);
430 		}
431 
432 		error = 0;
433 		break;
434 	}
435 
436 	/*
437 	 * Free all the pages that we didn't need.
438 	 */
439 
440 	while (!TAILQ_EMPTY(&tmp)) {
441 		pg = TAILQ_FIRST(&tmp);
442 		TAILQ_REMOVE(&tmp, pg, pageq.queue);
443 		pg->flags &= ~PG_PGLCA;
444 		uvm_pagefree(pg);
445 	}
446 	mutex_exit(&uvm_pglistalloc_contig_lock);
447 	return error;
448 }
449 
450 static int
uvm_pglistalloc_contig(int num,paddr_t low,paddr_t high,paddr_t alignment,paddr_t boundary,struct pglist * rlist,int waitok)451 uvm_pglistalloc_contig(int num, paddr_t low, paddr_t high, paddr_t alignment,
452     paddr_t boundary, struct pglist *rlist, int waitok)
453 {
454 	int fl;
455 	int error;
456 	uvm_physseg_t psi;
457 
458 	/* Default to "lose". */
459 	error = ENOMEM;
460 	bool valid = false;
461 
462 	/*
463 	 * Block all memory allocation and lock the free list.
464 	 */
465 	uvm_pgfl_lock();
466 
467 	/* Are there even any free pages? */
468 	if (uvm_availmem(false) <=
469 	    (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
470 		goto out;
471 
472 	for (fl = 0; fl < VM_NFREELIST; fl++) {
473 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
474 		for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
475 #else
476 		for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
477 #endif
478 		{
479 			if (uvm_physseg_get_free_list(psi) != fl)
480 				continue;
481 
482 			int done = uvm_pglistalloc_c_ps(psi, num, low, high,
483 			    alignment, boundary, rlist);
484 			if (done >= 0) {
485 				valid = true;
486 				num -= done;
487 			}
488 			if (num == 0) {
489 #ifdef PGALLOC_VERBOSE
490 				printf("pgalloc: %"PRIxMAX"-%"PRIxMAX"\n",
491 				       (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
492 				       (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
493 #endif
494 				error = 0;
495 				goto out;
496 			}
497 		}
498 	}
499 	if (!valid) {
500 		uvm_pgfl_unlock();
501 		return EINVAL;
502 	}
503 
504 out:
505 	uvm_pgfl_unlock();
506 
507 	/*
508 	 * If that didn't work, try the more aggressive approach.
509 	 */
510 
511 	if (error) {
512 		if (waitok) {
513 			error = uvm_pglistalloc_contig_aggressive(num, low, high,
514 			    alignment, boundary, rlist);
515 		} else {
516 			uvm_pglistfree(rlist);
517 			uvm_kick_pdaemon();
518 		}
519 	}
520 	return error;
521 }
522 
523 static int
uvm_pglistalloc_s_ps(uvm_physseg_t psi,int num,paddr_t low,paddr_t high,struct pglist * rlist)524 uvm_pglistalloc_s_ps(uvm_physseg_t psi, int num, paddr_t low, paddr_t high,
525     struct pglist *rlist)
526 {
527 	int todo;
528 	long limit, candidate;
529 	struct vm_page *pg;
530 	bool second_pass;
531 #ifdef PGALLOC_VERBOSE
532 	printf("pgalloc: simple %d pgs from psi %d\n", num, psi);
533 #endif
534 
535 	KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_start(psi));
536 	KASSERT(uvm_physseg_get_start(psi) <= uvm_physseg_get_avail_end(psi));
537 	KASSERT(uvm_physseg_get_avail_start(psi) <= uvm_physseg_get_end(psi));
538 	KASSERT(uvm_physseg_get_avail_end(psi) <= uvm_physseg_get_end(psi));
539 
540 	low = atop(low);
541 	high = atop(high);
542 
543 	/*
544 	 * Make sure that physseg falls within with range to be allocated from.
545 	 */
546 	if (high <= uvm_physseg_get_avail_start(psi) ||
547 	    low >= uvm_physseg_get_avail_end(psi))
548 		return -1;
549 
550 	todo = num;
551 	candidate = ulmax(low, uvm_physseg_get_avail_start(psi) +
552 	    uvm_physseg_get_start_hint(psi));
553 	limit = ulmin(high, uvm_physseg_get_avail_end(psi));
554 	pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
555 	second_pass = false;
556 
557 again:
558 	for (;; candidate++, pg++) {
559 		if (candidate >= limit) {
560 			if (uvm_physseg_get_start_hint(psi) == 0 || second_pass) {
561 				candidate = limit - 1;
562 				break;
563 			}
564 			second_pass = true;
565 			candidate = ulmax(low, uvm_physseg_get_avail_start(psi));
566 			limit = ulmin(limit, uvm_physseg_get_avail_start(psi) +
567 			    uvm_physseg_get_start_hint(psi));
568 			pg = uvm_physseg_get_pg(psi, candidate - uvm_physseg_get_start(psi));
569 			goto again;
570 		}
571 #if defined(DEBUG)
572 		{
573 			paddr_t cidx = 0;
574 			const uvm_physseg_t bank = uvm_physseg_find(candidate, &cidx);
575 			KDASSERTMSG(bank == psi,
576 			    "uvm_physseg_find(%#lx) (%"PRIxPHYSSEG ") != psi %"PRIxPHYSSEG,
577 			     candidate, bank, psi);
578 			KDASSERTMSG(cidx == candidate - uvm_physseg_get_start(psi),
579 			    "uvm_physseg_find(%#lx): %#"PRIxPADDR" != off %"PRIxPADDR,
580 			     candidate, cidx, (paddr_t)candidate - uvm_physseg_get_start(psi));
581 		}
582 #endif
583 		if (VM_PAGE_IS_FREE(pg) == 0)
584 			continue;
585 
586 		uvm_pglist_add(pg, rlist);
587 		if (--todo == 0) {
588 			break;
589 		}
590 	}
591 
592 	/*
593 	 * The next time we need to search this segment,
594 	 * start just after the pages we just allocated.
595 	 */
596 	uvm_physseg_set_start_hint(psi, candidate + 1 - uvm_physseg_get_avail_start(psi));
597 	KASSERTMSG(uvm_physseg_get_start_hint(psi) <= uvm_physseg_get_avail_end(psi) -
598 	    uvm_physseg_get_avail_start(psi),
599 	    "%#lx %lu (%#lx) <= %#"PRIxPADDR" - %#"PRIxPADDR" (%#"PRIxPADDR")",
600 	    candidate + 1,
601 	    uvm_physseg_get_start_hint(psi),
602 	    uvm_physseg_get_start_hint(psi),
603 	    uvm_physseg_get_avail_end(psi),
604 	    uvm_physseg_get_avail_start(psi),
605 	    uvm_physseg_get_avail_end(psi) - uvm_physseg_get_avail_start(psi));
606 
607 #ifdef PGALLOC_VERBOSE
608 	printf("got %d pgs\n", num - todo);
609 #endif
610 	return (num - todo); /* number of pages allocated */
611 }
612 
613 static int
uvm_pglistalloc_simple(int num,paddr_t low,paddr_t high,struct pglist * rlist,int waitok)614 uvm_pglistalloc_simple(int num, paddr_t low, paddr_t high,
615     struct pglist *rlist, int waitok)
616 {
617 	int fl, error;
618 	uvm_physseg_t psi;
619 	int count = 0;
620 
621 	/* Default to "lose". */
622 	error = ENOMEM;
623 	bool valid = false;
624 
625 again:
626 	/*
627 	 * Block all memory allocation and lock the free list.
628 	 */
629 	uvm_pgfl_lock();
630 	count++;
631 
632 	/* Are there even any free pages? */
633 	if (uvm_availmem(false) <=
634 	    (uvmexp.reserve_pagedaemon + uvmexp.reserve_kernel))
635 		goto out;
636 
637 	for (fl = 0; fl < VM_NFREELIST; fl++) {
638 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
639 		for (psi = uvm_physseg_get_last(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_prev(psi))
640 #else
641 		for (psi = uvm_physseg_get_first(); uvm_physseg_valid_p(psi); psi = uvm_physseg_get_next(psi))
642 #endif
643 		{
644 			if (uvm_physseg_get_free_list(psi) != fl)
645 				continue;
646 
647 			int done = uvm_pglistalloc_s_ps(psi, num, low, high,
648                             rlist);
649 			if (done >= 0) {
650 				valid = true;
651 				num -= done;
652 			}
653 			if (num == 0) {
654 				error = 0;
655 				goto out;
656 			}
657 		}
658 
659 	}
660 	if (!valid) {
661 		uvm_pgfl_unlock();
662 		return EINVAL;
663 	}
664 
665 out:
666 	/*
667 	 * check to see if we need to generate some free pages waking
668 	 * the pagedaemon.
669 	 */
670 
671 	uvm_pgfl_unlock();
672 	uvm_kick_pdaemon();
673 
674 	if (error) {
675 		if (waitok) {
676 			uvm_wait("pglalloc");
677 			goto again;
678 		} else
679 			uvm_pglistfree(rlist);
680 	}
681 #ifdef PGALLOC_VERBOSE
682 	if (!error)
683 		printf("pgalloc: %"PRIxMAX"..%"PRIxMAX"\n",
684 		       (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_FIRST(rlist)),
685 		       (uintmax_t) VM_PAGE_TO_PHYS(TAILQ_LAST(rlist, pglist)));
686 #endif
687 	return (error);
688 }
689 
690 int
uvm_pglistalloc(psize_t size,paddr_t low,paddr_t high,paddr_t alignment,paddr_t boundary,struct pglist * rlist,int nsegs,int waitok)691 uvm_pglistalloc(psize_t size, paddr_t low, paddr_t high, paddr_t alignment,
692     paddr_t boundary, struct pglist *rlist, int nsegs, int waitok)
693 {
694 	int num, res;
695 
696 	KASSERT(!cpu_intr_p());
697 	KASSERT(!cpu_softintr_p());
698 	KASSERT((alignment & (alignment - 1)) == 0);
699 	KASSERT((boundary & (boundary - 1)) == 0);
700 
701 	/*
702 	 * Our allocations are always page granularity, so our alignment
703 	 * must be, too.
704 	 */
705 	if (alignment < PAGE_SIZE)
706 		alignment = PAGE_SIZE;
707 	if (boundary != 0 && boundary < size)
708 		return (EINVAL);
709 	num = atop(round_page(size));
710 	low = roundup2(low, alignment);
711 
712 	TAILQ_INIT(rlist);
713 
714 	/*
715 	 * Turn off the caching of free pages - we need everything to be on
716 	 * the global freelists.
717 	 */
718 	uvm_pgflcache_pause();
719 
720 	if (nsegs < num || alignment != PAGE_SIZE || boundary != 0)
721 		res = uvm_pglistalloc_contig(num, low, high, alignment,
722 					     boundary, rlist, waitok);
723 	else
724 		res = uvm_pglistalloc_simple(num, low, high, rlist, waitok);
725 
726 	uvm_pgflcache_resume();
727 
728 	return (res);
729 }
730 
731 /*
732  * uvm_pglistfree: free a list of pages
733  *
734  * => pages should already be unmapped
735  */
736 
737 void
uvm_pglistfree(struct pglist * list)738 uvm_pglistfree(struct pglist *list)
739 {
740 	struct vm_page *pg;
741 
742 	KASSERT(!cpu_intr_p());
743 	KASSERT(!cpu_softintr_p());
744 
745 	while ((pg = TAILQ_FIRST(list)) != NULL) {
746 		TAILQ_REMOVE(list, pg, pageq.queue);
747 		uvm_pagefree(pg);
748 		STAT_DECR(uvm_pglistalloc_npages);
749 	}
750 }
751 
752 void
uvm_pglistalloc_init(void)753 uvm_pglistalloc_init(void)
754 {
755 
756 	mutex_init(&uvm_pglistalloc_contig_lock, MUTEX_DEFAULT, IPL_NONE);
757 }
758