xref: /netbsd-src/sys/uvm/uvm_physseg.c (revision 181254a7b1bdde6873432bffef2d2decc4b5c22f)
1 /* $NetBSD: uvm_physseg.c,v 1.17 2020/07/15 15:08:26 rin Exp $ */
2 
3 /*
4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
5  * Copyright (c) 1991, 1993, The Regents of the University of California.
6  *
7  * All rights reserved.
8  *
9  * This code is derived from software contributed to Berkeley by
10  * The Mach Operating System project at Carnegie-Mellon University.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)vm_page.h   7.3 (Berkeley) 4/21/91
37  * from: Id: uvm_page.h,v 1.1.2.6 1998/02/04 02:31:42 chuck Exp
38  *
39  *
40  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
41  * All rights reserved.
42  *
43  * Permission to use, copy, modify and distribute this software and
44  * its documentation is hereby granted, provided that both the copyright
45  * notice and this permission notice appear in all copies of the
46  * software, derivative works or modified versions, and any portions
47  * thereof, and that both notices appear in supporting documentation.
48  *
49  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
50  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
51  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
52  *
53  * Carnegie Mellon requests users of this software to return to
54  *
55  *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
56  *  School of Computer Science
57  *  Carnegie Mellon University
58  *  Pittsburgh PA 15213-3890
59  *
60  * any improvements or extensions that they make and grant Carnegie the
61  * rights to redistribute these changes.
62  */
63 
64 /*
65  * Consolidated API from uvm_page.c and others.
66  * Consolidated and designed by Cherry G. Mathew <cherry@zyx.in>
67  * rbtree(3) backing implementation by:
68  * Santhosh N. Raju <santhosh.raju@gmail.com>
69  */
70 
71 #ifdef _KERNEL_OPT
72 #include "opt_uvm.h"
73 #endif
74 
75 #include <sys/param.h>
76 #include <sys/types.h>
77 #include <sys/extent.h>
78 #include <sys/kmem.h>
79 
80 #include <uvm/uvm.h>
81 #include <uvm/uvm_page.h>
82 #include <uvm/uvm_param.h>
83 #include <uvm/uvm_pdpolicy.h>
84 #include <uvm/uvm_physseg.h>
85 
86 /*
87  * uvm_physseg: describes one segment of physical memory
88  */
89 struct uvm_physseg {
90 	/* used during RB tree lookup for PHYS_TO_VM_PAGE(). */
91 	struct  rb_node rb_node;	/* tree information */
92 	paddr_t	start;			/* PF# of first page in segment */
93 	paddr_t	end;			/* (PF# of last page in segment) + 1 */
94 	struct	vm_page *pgs;		/* vm_page structures (from start) */
95 
96 	/* less performance sensitive fields. */
97 	paddr_t	avail_start;		/* PF# of first free page in segment */
98 	paddr_t	avail_end;		/* (PF# of last free page in segment) +1  */
99 	struct  extent *ext;		/* extent(9) structure to manage pgs[] */
100 	int	free_list;		/* which free list they belong on */
101 	u_int	start_hint;		/* start looking for free pages here */
102 #ifdef __HAVE_PMAP_PHYSSEG
103 	struct	pmap_physseg pmseg;	/* pmap specific (MD) data */
104 #endif
105 };
106 
107 /*
108  * These functions are reserved for uvm(9) internal use and are not
109  * exported in the header file uvm_physseg.h
110  *
111  * Thus they are redefined here.
112  */
113 void uvm_physseg_init_seg(uvm_physseg_t, struct vm_page *);
114 void uvm_physseg_seg_chomp_slab(uvm_physseg_t, struct vm_page *, size_t);
115 
116 /* returns a pgs array */
117 struct vm_page *uvm_physseg_seg_alloc_from_slab(uvm_physseg_t, size_t);
118 
119 #if defined(UVM_HOTPLUG) /* rbtree impementation */
120 
121 #define		HANDLE_TO_PHYSSEG_NODE(h)	((struct uvm_physseg *)(h))
122 #define		PHYSSEG_NODE_TO_HANDLE(u)	((uvm_physseg_t)(u))
123 
124 struct uvm_physseg_graph {
125 	struct rb_tree rb_tree;		/* Tree for entries */
126 	int            nentries;	/* Number of entries */
127 } __aligned(COHERENCY_UNIT);
128 
129 static struct uvm_physseg_graph uvm_physseg_graph __read_mostly;
130 
131 /*
132  * Note on kmem(9) allocator usage:
133  * We take the conservative approach that plug/unplug are allowed to
134  * fail in high memory stress situations.
135  *
136  * We want to avoid re-entrant situations in which one plug/unplug
137  * operation is waiting on a previous one to complete, since this
138  * makes the design more complicated than necessary.
139  *
140  * We may review this and change its behaviour, once the use cases
141  * become more obvious.
142  */
143 
144 /*
145  * Special alloc()/free() functions for boot time support:
146  * We assume that alloc() at boot time is only for new 'vm_physseg's
147  * This allows us to use a static array for memory allocation at boot
148  * time. Thus we avoid using kmem(9) which is not ready at this point
149  * in boot.
150  *
151  * After kmem(9) is ready, we use it. We currently discard any free()s
152  * to this static array, since the size is small enough to be a
153  * trivial waste on all architectures we run on.
154  */
155 
156 static size_t nseg = 0;
157 static struct uvm_physseg uvm_physseg[VM_PHYSSEG_MAX];
158 
159 static void *
160 uvm_physseg_alloc(size_t sz)
161 {
162 	/*
163 	 * During boot time, we only support allocating vm_physseg
164 	 * entries from the static array.
165 	 * We need to assert for this.
166 	 */
167 
168 	if (__predict_false(uvm.page_init_done == false)) {
169 		if (sz % sizeof(struct uvm_physseg))
170 			panic("%s: tried to alloc size other than multiple"
171 			    " of struct uvm_physseg at boot\n", __func__);
172 
173 		size_t n = sz / sizeof(struct uvm_physseg);
174 		nseg += n;
175 
176 		KASSERT(nseg > 0 && nseg <= VM_PHYSSEG_MAX);
177 
178 		return &uvm_physseg[nseg - n];
179 	}
180 
181 	return kmem_zalloc(sz, KM_NOSLEEP);
182 }
183 
184 static void
185 uvm_physseg_free(void *p, size_t sz)
186 {
187 	/*
188 	 * This is a bit tricky. We do allow simulation of free()
189 	 * during boot (for eg: when MD code is "steal"ing memory,
190 	 * and the segment has been exhausted (and thus needs to be
191 	 * free() - ed.
192 	 * free() also complicates things because we leak the
193 	 * free(). Therefore calling code can't assume that free()-ed
194 	 * memory is available for alloc() again, at boot time.
195 	 *
196 	 * Thus we can't explicitly disallow free()s during
197 	 * boot time. However, the same restriction for alloc()
198 	 * applies to free(). We only allow uvm_physseg related free()s
199 	 * via this function during boot time.
200 	 */
201 
202 	if (__predict_false(uvm.page_init_done == false)) {
203 		if (sz % sizeof(struct uvm_physseg))
204 			panic("%s: tried to free size other than struct uvm_physseg"
205 			    " at boot\n", __func__);
206 
207 	}
208 
209 	/*
210 	 * Could have been in a single if(){} block - split for
211 	 * clarity
212 	 */
213 
214 	if ((struct uvm_physseg *)p >= uvm_physseg &&
215 	    (struct uvm_physseg *)p < (uvm_physseg + VM_PHYSSEG_MAX)) {
216 		if (sz % sizeof(struct uvm_physseg))
217 			panic("%s: tried to free() other than struct uvm_physseg"
218 			    " from static array\n", __func__);
219 
220 		if ((sz / sizeof(struct uvm_physseg)) >= VM_PHYSSEG_MAX)
221 			panic("%s: tried to free() the entire static array!", __func__);
222 		return; /* Nothing to free */
223 	}
224 
225 	kmem_free(p, sz);
226 }
227 
228 /* XXX: Multi page size */
229 bool
230 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
231 {
232 	int preload;
233 	size_t slabpages;
234 	struct uvm_physseg *ps, *current_ps = NULL;
235 	struct vm_page *slab = NULL, *pgs = NULL;
236 
237 #ifdef DEBUG
238 	paddr_t off;
239 	uvm_physseg_t upm;
240 	upm = uvm_physseg_find(pfn, &off);
241 
242 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
243 
244 	if (ps != NULL) /* XXX; do we allow "update" plugs ? */
245 		return false;
246 #endif
247 
248 	/*
249 	 * do we have room?
250 	 */
251 
252 	ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
253 	if (ps == NULL) {
254 		printf("uvm_page_physload: unable to load physical memory "
255 		    "segment\n");
256 		printf("\t%d segments allocated, ignoring 0x%"PRIxPADDR" -> 0x%"PRIxPADDR"\n",
257 		    VM_PHYSSEG_MAX, pfn, pfn + pages + 1);
258 		printf("\tincrease VM_PHYSSEG_MAX\n");
259 		return false;
260 	}
261 
262 	/* span init */
263 	ps->start = pfn;
264 	ps->end = pfn + pages;
265 
266 	/*
267 	 * XXX: Ugly hack because uvmexp.npages accounts for only
268 	 * those pages in the segment included below as well - this
269 	 * should be legacy and removed.
270 	 */
271 
272 	ps->avail_start = ps->start;
273 	ps->avail_end = ps->end;
274 
275 	/*
276 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
277 	 * called yet, so kmem is not available).
278 	 */
279 
280 	preload = 1; /* We are going to assume it is a preload */
281 
282 	RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
283 		/* If there are non NULL pages then we are not in a preload */
284 		if (current_ps->pgs != NULL) {
285 			preload = 0;
286 			/* Try to scavenge from earlier unplug()s. */
287 			pgs = uvm_physseg_seg_alloc_from_slab(current_ps, pages);
288 
289 			if (pgs != NULL) {
290 				break;
291 			}
292 		}
293 	}
294 
295 
296 	/*
297 	 * if VM is already running, attempt to kmem_alloc vm_page structures
298 	 */
299 
300 	if (!preload) {
301 		if (pgs == NULL) { /* Brand new */
302 			/* Iteratively try alloc down from uvmexp.npages */
303 			for (slabpages = (size_t) uvmexp.npages; slabpages >= pages; slabpages--) {
304 				slab = kmem_zalloc(sizeof *pgs * (long unsigned int)slabpages, KM_NOSLEEP);
305 				if (slab != NULL)
306 					break;
307 			}
308 
309 			if (slab == NULL) {
310 				uvm_physseg_free(ps, sizeof(struct uvm_physseg));
311 				return false;
312 			}
313 
314 			uvm_physseg_seg_chomp_slab(ps, slab, (size_t) slabpages);
315 			/* We allocate enough for this plug */
316 			pgs = uvm_physseg_seg_alloc_from_slab(ps, pages);
317 
318 			if (pgs == NULL) {
319 				printf("unable to uvm_physseg_seg_alloc_from_slab() from backend\n");
320 				return false;
321 			}
322 		} else {
323 			/* Reuse scavenged extent */
324 			ps->ext = current_ps->ext;
325 		}
326 
327 		physmem += pages;
328 		uvmpdpol_reinit();
329 	} else { /* Boot time - see uvm_page.c:uvm_page_init() */
330 		pgs = NULL;
331 		ps->pgs = pgs;
332 	}
333 
334 	/*
335 	 * now insert us in the proper place in uvm_physseg_graph.rb_tree
336 	 */
337 
338 	current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
339 	if (current_ps != ps) {
340 		panic("uvm_page_physload: Duplicate address range detected!");
341 	}
342 	uvm_physseg_graph.nentries++;
343 
344 	/*
345 	 * uvm_pagefree() requires the PHYS_TO_VM_PAGE(pgs[i]) on the
346 	 * newly allocated pgs[] to return the correct value. This is
347 	 * a bit of a chicken and egg problem, since it needs
348 	 * uvm_physseg_find() to succeed. For this, the node needs to
349 	 * be inserted *before* uvm_physseg_init_seg() happens.
350 	 *
351 	 * During boot, this happens anyway, since
352 	 * uvm_physseg_init_seg() is called later on and separately
353 	 * from uvm_page.c:uvm_page_init().
354 	 * In the case of hotplug we need to ensure this.
355 	 */
356 
357 	if (__predict_true(!preload))
358 		uvm_physseg_init_seg(ps, pgs);
359 
360 	if (psp != NULL)
361 		*psp = ps;
362 
363 	return true;
364 }
365 
366 static int
367 uvm_physseg_compare_nodes(void *ctx, const void *nnode1, const void *nnode2)
368 {
369 	const struct uvm_physseg *enode1 = nnode1;
370 	const struct uvm_physseg *enode2 = nnode2;
371 
372 	KASSERT(enode1->start < enode2->start || enode1->start >= enode2->end);
373 	KASSERT(enode2->start < enode1->start || enode2->start >= enode1->end);
374 
375 	if (enode1->start < enode2->start)
376 		return -1;
377 	if (enode1->start >= enode2->end)
378 		return 1;
379 	return 0;
380 }
381 
382 static int
383 uvm_physseg_compare_key(void *ctx, const void *nnode, const void *pkey)
384 {
385 	const struct uvm_physseg *enode = nnode;
386 	const paddr_t pa = *(const paddr_t *) pkey;
387 
388 	if(enode->start <= pa && pa < enode->end)
389 		return 0;
390 	if (enode->start < pa)
391 		return -1;
392 	if (enode->end > pa)
393 		return 1;
394 
395 	return 0;
396 }
397 
398 static const rb_tree_ops_t uvm_physseg_tree_ops = {
399 	.rbto_compare_nodes = uvm_physseg_compare_nodes,
400 	.rbto_compare_key = uvm_physseg_compare_key,
401 	.rbto_node_offset = offsetof(struct uvm_physseg, rb_node),
402 	.rbto_context = NULL
403 };
404 
405 /*
406  * uvm_physseg_init: init the physmem
407  *
408  * => physmem unit should not be in use at this point
409  */
410 
411 void
412 uvm_physseg_init(void)
413 {
414 	rb_tree_init(&(uvm_physseg_graph.rb_tree), &uvm_physseg_tree_ops);
415 	uvm_physseg_graph.nentries = 0;
416 }
417 
418 uvm_physseg_t
419 uvm_physseg_get_next(uvm_physseg_t upm)
420 {
421 	/* next of invalid is invalid, not fatal */
422 	if (uvm_physseg_valid_p(upm) == false)
423 		return UVM_PHYSSEG_TYPE_INVALID;
424 
425 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
426 	    RB_DIR_RIGHT);
427 }
428 
429 uvm_physseg_t
430 uvm_physseg_get_prev(uvm_physseg_t upm)
431 {
432 	/* prev of invalid is invalid, not fatal */
433 	if (uvm_physseg_valid_p(upm) == false)
434 		return UVM_PHYSSEG_TYPE_INVALID;
435 
436 	return (uvm_physseg_t) rb_tree_iterate(&(uvm_physseg_graph.rb_tree), upm,
437 	    RB_DIR_LEFT);
438 }
439 
440 uvm_physseg_t
441 uvm_physseg_get_last(void)
442 {
443 	return (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
444 }
445 
446 uvm_physseg_t
447 uvm_physseg_get_first(void)
448 {
449 	return (uvm_physseg_t) RB_TREE_MIN(&(uvm_physseg_graph.rb_tree));
450 }
451 
452 paddr_t
453 uvm_physseg_get_highest_frame(void)
454 {
455 	struct uvm_physseg *ps =
456 	    (uvm_physseg_t) RB_TREE_MAX(&(uvm_physseg_graph.rb_tree));
457 
458 	return ps->end - 1;
459 }
460 
461 /*
462  * uvm_page_physunload: unload physical memory and return it to
463  * caller.
464  */
465 bool
466 uvm_page_physunload(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
467 {
468 	struct uvm_physseg *seg;
469 
470 	if (__predict_true(uvm.page_init_done == true))
471 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
472 
473 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
474 
475 	if (seg->free_list != freelist) {
476 		return false;
477 	}
478 
479 	/*
480 	 * During cold boot, what we're about to unplug hasn't been
481 	 * put on the uvm freelist, nor has uvmexp.npages been
482 	 * updated. (This happens in uvm_page.c:uvm_page_init())
483 	 *
484 	 * For hotplug, we assume here that the pages being unloaded
485 	 * here are completely out of sight of uvm (ie; not on any uvm
486 	 * lists), and that  uvmexp.npages has been suitably
487 	 * decremented before we're called.
488 	 *
489 	 * XXX: will avail_end == start if avail_start < avail_end?
490 	 */
491 
492 	/* try from front */
493 	if (seg->avail_start == seg->start &&
494 	    seg->avail_start < seg->avail_end) {
495 		*paddrp = ctob(seg->avail_start);
496 		return uvm_physseg_unplug(seg->avail_start, 1);
497 	}
498 
499 	/* try from rear */
500 	if (seg->avail_end == seg->end &&
501 	    seg->avail_start < seg->avail_end) {
502 		*paddrp = ctob(seg->avail_end - 1);
503 		return uvm_physseg_unplug(seg->avail_end - 1, 1);
504 	}
505 
506 	return false;
507 }
508 
509 bool
510 uvm_page_physunload_force(uvm_physseg_t upm, int freelist, paddr_t *paddrp)
511 {
512 	struct uvm_physseg *seg;
513 
514 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
515 
516 	if (__predict_true(uvm.page_init_done == true))
517 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
518 	/* any room in this bank? */
519 	if (seg->avail_start >= seg->avail_end) {
520 		return false; /* nope */
521 	}
522 
523 	*paddrp = ctob(seg->avail_start);
524 
525 	/* Always unplug from front */
526 	return uvm_physseg_unplug(seg->avail_start, 1);
527 }
528 
529 
530 /*
531  * vm_physseg_find: find vm_physseg structure that belongs to a PA
532  */
533 uvm_physseg_t
534 uvm_physseg_find(paddr_t pframe, psize_t *offp)
535 {
536 	struct uvm_physseg * ps = NULL;
537 
538 	ps = rb_tree_find_node(&(uvm_physseg_graph.rb_tree), &pframe);
539 
540 	if(ps != NULL && offp != NULL)
541 		*offp = pframe - ps->start;
542 
543 	return ps;
544 }
545 
546 #else  /* UVM_HOTPLUG */
547 
548 /*
549  * physical memory config is stored in vm_physmem.
550  */
551 
552 #define	VM_PHYSMEM_PTR(i)	(&vm_physmem[i])
553 #if VM_PHYSSEG_MAX == 1
554 #define VM_PHYSMEM_PTR_SWAP(i, j) /* impossible */
555 #else
556 #define VM_PHYSMEM_PTR_SWAP(i, j)					      \
557 	do { vm_physmem[(i)] = vm_physmem[(j)]; } while (0)
558 #endif
559 
560 #define		HANDLE_TO_PHYSSEG_NODE(h)	(VM_PHYSMEM_PTR((int)h))
561 #define		PHYSSEG_NODE_TO_HANDLE(u)	((int)((vsize_t) (u - vm_physmem) / sizeof(struct uvm_physseg)))
562 
563 static struct uvm_physseg vm_physmem[VM_PHYSSEG_MAX];	/* XXXCDC: uvm.physmem */
564 static int vm_nphysseg = 0;				/* XXXCDC: uvm.nphysseg */
565 #define	vm_nphysmem	vm_nphysseg
566 
567 void
568 uvm_physseg_init(void)
569 {
570 	/* XXX: Provisioning for rb_tree related init(s) */
571 	return;
572 }
573 
574 int
575 uvm_physseg_get_next(uvm_physseg_t lcv)
576 {
577 	/* next of invalid is invalid, not fatal */
578 	if (uvm_physseg_valid_p(lcv) == false)
579 		return UVM_PHYSSEG_TYPE_INVALID;
580 
581 	return (lcv + 1);
582 }
583 
584 int
585 uvm_physseg_get_prev(uvm_physseg_t lcv)
586 {
587 	/* prev of invalid is invalid, not fatal */
588 	if (uvm_physseg_valid_p(lcv) == false)
589 		return UVM_PHYSSEG_TYPE_INVALID;
590 
591 	return (lcv - 1);
592 }
593 
594 int
595 uvm_physseg_get_last(void)
596 {
597 	return (vm_nphysseg - 1);
598 }
599 
600 int
601 uvm_physseg_get_first(void)
602 {
603 	return 0;
604 }
605 
606 paddr_t
607 uvm_physseg_get_highest_frame(void)
608 {
609 	int lcv;
610 	paddr_t last = 0;
611 	struct uvm_physseg *ps;
612 
613 	for (lcv = 0; lcv < vm_nphysseg; lcv++) {
614 		ps = VM_PHYSMEM_PTR(lcv);
615 		if (last < ps->end)
616 			last = ps->end;
617 	}
618 
619 	return last;
620 }
621 
622 
623 static struct vm_page *
624 uvm_post_preload_check(void)
625 {
626 	int preload, lcv;
627 
628 	/*
629 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
630 	 * called yet, so kmem is not available).
631 	 */
632 
633 	for (lcv = 0 ; lcv < vm_nphysmem ; lcv++) {
634 		if (VM_PHYSMEM_PTR(lcv)->pgs)
635 			break;
636 	}
637 	preload = (lcv == vm_nphysmem);
638 
639 	/*
640 	 * if VM is already running, attempt to kmem_alloc vm_page structures
641 	 */
642 
643 	if (!preload) {
644 		panic("Tried to add RAM after uvm_page_init");
645 	}
646 
647 	return NULL;
648 }
649 
650 /*
651  * uvm_page_physunload: unload physical memory and return it to
652  * caller.
653  */
654 bool
655 uvm_page_physunload(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
656 {
657 	int x;
658 	struct uvm_physseg *seg;
659 
660 	uvm_post_preload_check();
661 
662 	seg = VM_PHYSMEM_PTR(psi);
663 
664 	if (seg->free_list != freelist) {
665 		return false;
666 	}
667 
668 	/* try from front */
669 	if (seg->avail_start == seg->start &&
670 	    seg->avail_start < seg->avail_end) {
671 		*paddrp = ctob(seg->avail_start);
672 		seg->avail_start++;
673 		seg->start++;
674 		/* nothing left?   nuke it */
675 		if (seg->avail_start == seg->end) {
676 			if (vm_nphysmem == 1)
677 				panic("uvm_page_physget: out of memory!");
678 			vm_nphysmem--;
679 			for (x = psi ; x < vm_nphysmem ; x++)
680 				/* structure copy */
681 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
682 		}
683 		return (true);
684 	}
685 
686 	/* try from rear */
687 	if (seg->avail_end == seg->end &&
688 	    seg->avail_start < seg->avail_end) {
689 		*paddrp = ctob(seg->avail_end - 1);
690 		seg->avail_end--;
691 		seg->end--;
692 		/* nothing left?   nuke it */
693 		if (seg->avail_end == seg->start) {
694 			if (vm_nphysmem == 1)
695 				panic("uvm_page_physget: out of memory!");
696 			vm_nphysmem--;
697 			for (x = psi ; x < vm_nphysmem ; x++)
698 				/* structure copy */
699 				VM_PHYSMEM_PTR_SWAP(x, x + 1);
700 		}
701 		return (true);
702 	}
703 
704 	return false;
705 }
706 
707 bool
708 uvm_page_physunload_force(uvm_physseg_t psi, int freelist, paddr_t *paddrp)
709 {
710 	int x;
711 	struct uvm_physseg *seg;
712 
713 	uvm_post_preload_check();
714 
715 	seg = VM_PHYSMEM_PTR(psi);
716 
717 	/* any room in this bank? */
718 	if (seg->avail_start >= seg->avail_end) {
719 		return false; /* nope */
720 	}
721 
722 	*paddrp = ctob(seg->avail_start);
723 	seg->avail_start++;
724 	/* truncate! */
725 	seg->start = seg->avail_start;
726 
727 	/* nothing left?   nuke it */
728 	if (seg->avail_start == seg->end) {
729 		if (vm_nphysmem == 1)
730 			panic("uvm_page_physget: out of memory!");
731 		vm_nphysmem--;
732 		for (x = psi ; x < vm_nphysmem ; x++)
733 			/* structure copy */
734 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
735 	}
736 	return (true);
737 }
738 
739 bool
740 uvm_physseg_plug(paddr_t pfn, size_t pages, uvm_physseg_t *psp)
741 {
742 	int lcv;
743 	struct vm_page *pgs;
744 	struct uvm_physseg *ps;
745 
746 #ifdef DEBUG
747 	paddr_t off;
748 	uvm_physseg_t upm;
749 	upm = uvm_physseg_find(pfn, &off);
750 
751 	if (uvm_physseg_valid_p(upm)) /* XXX; do we allow "update" plugs ? */
752 		return false;
753 #endif
754 
755 	paddr_t start = pfn;
756 	paddr_t end = pfn + pages;
757 	paddr_t avail_start = start;
758 	paddr_t avail_end = end;
759 
760 	if (uvmexp.pagesize == 0)
761 		panic("uvm_page_physload: page size not set!");
762 
763 	/*
764 	 * do we have room?
765 	 */
766 
767 	if (vm_nphysmem == VM_PHYSSEG_MAX) {
768 		printf("uvm_page_physload: unable to load physical memory "
769 		    "segment\n");
770 		printf("\t%d segments allocated, ignoring 0x%llx -> 0x%llx\n",
771 		    VM_PHYSSEG_MAX, (long long)start, (long long)end);
772 		printf("\tincrease VM_PHYSSEG_MAX\n");
773 		if (psp != NULL)
774 			*psp = UVM_PHYSSEG_TYPE_INVALID_OVERFLOW;
775 		return false;
776 	}
777 
778 	/*
779 	 * check to see if this is a "preload" (i.e. uvm_page_init hasn't been
780 	 * called yet, so kmem is not available).
781 	 */
782 	pgs = uvm_post_preload_check();
783 
784 	/*
785 	 * now insert us in the proper place in vm_physmem[]
786 	 */
787 
788 #if (VM_PHYSSEG_STRAT == VM_PSTRAT_RANDOM)
789 	/* random: put it at the end (easy!) */
790 	ps = VM_PHYSMEM_PTR(vm_nphysmem);
791 	lcv = vm_nphysmem;
792 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
793 	{
794 		int x;
795 		/* sort by address for binary search */
796 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
797 			if (start < VM_PHYSMEM_PTR(lcv)->start)
798 				break;
799 		ps = VM_PHYSMEM_PTR(lcv);
800 		/* move back other entries, if necessary ... */
801 		for (x = vm_nphysmem ; x > lcv ; x--)
802 			/* structure copy */
803 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
804 	}
805 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BIGFIRST)
806 	{
807 		int x;
808 		/* sort by largest segment first */
809 		for (lcv = 0 ; lcv < vm_nphysmem ; lcv++)
810 			if ((end - start) >
811 			    (VM_PHYSMEM_PTR(lcv)->end - VM_PHYSMEM_PTR(lcv)->start))
812 				break;
813 		ps = VM_PHYSMEM_PTR(lcv);
814 		/* move back other entries, if necessary ... */
815 		for (x = vm_nphysmem ; x > lcv ; x--)
816 			/* structure copy */
817 			VM_PHYSMEM_PTR_SWAP(x, x - 1);
818 	}
819 #else
820 	panic("uvm_page_physload: unknown physseg strategy selected!");
821 #endif
822 
823 	ps->start = start;
824 	ps->end = end;
825 	ps->avail_start = avail_start;
826 	ps->avail_end = avail_end;
827 
828 	ps->pgs = pgs;
829 
830 	vm_nphysmem++;
831 
832 	if (psp != NULL)
833 		*psp = lcv;
834 
835 	return true;
836 }
837 
838 /*
839  * when VM_PHYSSEG_MAX is 1, we can simplify these functions
840  */
841 
842 #if VM_PHYSSEG_MAX == 1
843 static inline int vm_physseg_find_contig(struct uvm_physseg *, int, paddr_t, psize_t *);
844 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
845 static inline int vm_physseg_find_bsearch(struct uvm_physseg *, int, paddr_t, psize_t *);
846 #else
847 static inline int vm_physseg_find_linear(struct uvm_physseg *, int, paddr_t, psize_t *);
848 #endif
849 
850 /*
851  * vm_physseg_find: find vm_physseg structure that belongs to a PA
852  */
853 int
854 uvm_physseg_find(paddr_t pframe, psize_t *offp)
855 {
856 
857 #if VM_PHYSSEG_MAX == 1
858 	return vm_physseg_find_contig(vm_physmem, vm_nphysseg, pframe, offp);
859 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
860 	return vm_physseg_find_bsearch(vm_physmem, vm_nphysseg, pframe, offp);
861 #else
862 	return vm_physseg_find_linear(vm_physmem, vm_nphysseg, pframe, offp);
863 #endif
864 }
865 
866 #if VM_PHYSSEG_MAX == 1
867 static inline int
868 vm_physseg_find_contig(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
869 {
870 
871 	/* 'contig' case */
872 	if (pframe >= segs[0].start && pframe < segs[0].end) {
873 		if (offp)
874 			*offp = pframe - segs[0].start;
875 		return(0);
876 	}
877 	return(-1);
878 }
879 
880 #elif (VM_PHYSSEG_STRAT == VM_PSTRAT_BSEARCH)
881 
882 static inline int
883 vm_physseg_find_bsearch(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
884 {
885 	/* binary search for it */
886 	int	start, len, guess;
887 
888 	/*
889 	 * if try is too large (thus target is less than try) we reduce
890 	 * the length to trunc(len/2) [i.e. everything smaller than "try"]
891 	 *
892 	 * if the try is too small (thus target is greater than try) then
893 	 * we set the new start to be (try + 1).   this means we need to
894 	 * reduce the length to (round(len/2) - 1).
895 	 *
896 	 * note "adjust" below which takes advantage of the fact that
897 	 *  (round(len/2) - 1) == trunc((len - 1) / 2)
898 	 * for any value of len we may have
899 	 */
900 
901 	for (start = 0, len = nsegs ; len != 0 ; len = len / 2) {
902 		guess = start + (len / 2);	/* try in the middle */
903 
904 		/* start past our try? */
905 		if (pframe >= segs[guess].start) {
906 			/* was try correct? */
907 			if (pframe < segs[guess].end) {
908 				if (offp)
909 					*offp = pframe - segs[guess].start;
910 				return guess;            /* got it */
911 			}
912 			start = guess + 1;	/* next time, start here */
913 			len--;			/* "adjust" */
914 		} else {
915 			/*
916 			 * pframe before try, just reduce length of
917 			 * region, done in "for" loop
918 			 */
919 		}
920 	}
921 	return(-1);
922 }
923 
924 #else
925 
926 static inline int
927 vm_physseg_find_linear(struct uvm_physseg *segs, int nsegs, paddr_t pframe, psize_t *offp)
928 {
929 	/* linear search for it */
930 	int	lcv;
931 
932 	for (lcv = 0; lcv < nsegs; lcv++) {
933 		if (pframe >= segs[lcv].start &&
934 		    pframe < segs[lcv].end) {
935 			if (offp)
936 				*offp = pframe - segs[lcv].start;
937 			return(lcv);		   /* got it */
938 		}
939 	}
940 	return(-1);
941 }
942 #endif
943 #endif /* UVM_HOTPLUG */
944 
945 bool
946 uvm_physseg_valid_p(uvm_physseg_t upm)
947 {
948 	struct uvm_physseg *ps;
949 
950 	if (upm == UVM_PHYSSEG_TYPE_INVALID ||
951 	    upm == UVM_PHYSSEG_TYPE_INVALID_EMPTY ||
952 	    upm == UVM_PHYSSEG_TYPE_INVALID_OVERFLOW)
953 		return false;
954 
955 	/*
956 	 * This is the delicate init dance -
957 	 * needs to go with the dance.
958 	 */
959 	if (uvm.page_init_done != true)
960 		return true;
961 
962 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
963 
964 	/* Extra checks needed only post uvm_page_init() */
965 	if (ps->pgs == NULL)
966 		return false;
967 
968 	/* XXX: etc. */
969 
970 	return true;
971 
972 }
973 
974 /*
975  * Boot protocol dictates that these must be able to return partially
976  * initialised segments.
977  */
978 paddr_t
979 uvm_physseg_get_start(uvm_physseg_t upm)
980 {
981 	if (uvm_physseg_valid_p(upm) == false)
982 		return (paddr_t) -1;
983 
984 	return HANDLE_TO_PHYSSEG_NODE(upm)->start;
985 }
986 
987 paddr_t
988 uvm_physseg_get_end(uvm_physseg_t upm)
989 {
990 	if (uvm_physseg_valid_p(upm) == false)
991 		return (paddr_t) -1;
992 
993 	return HANDLE_TO_PHYSSEG_NODE(upm)->end;
994 }
995 
996 paddr_t
997 uvm_physseg_get_avail_start(uvm_physseg_t upm)
998 {
999 	if (uvm_physseg_valid_p(upm) == false)
1000 		return (paddr_t) -1;
1001 
1002 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_start;
1003 }
1004 
1005 #if defined(UVM_PHYSSEG_LEGACY)
1006 void
1007 uvm_physseg_set_avail_start(uvm_physseg_t upm, paddr_t avail_start)
1008 {
1009 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1010 
1011 #if defined(DIAGNOSTIC)
1012 	paddr_t avail_end;
1013 	avail_end = uvm_physseg_get_avail_end(upm);
1014 	KASSERT(uvm_physseg_valid_p(upm));
1015 	KASSERT(avail_start < avail_end && avail_start >= ps->start);
1016 #endif
1017 
1018 	ps->avail_start = avail_start;
1019 }
1020 
1021 void
1022 uvm_physseg_set_avail_end(uvm_physseg_t upm, paddr_t avail_end)
1023 {
1024 	struct uvm_physseg *ps = HANDLE_TO_PHYSSEG_NODE(upm);
1025 
1026 #if defined(DIAGNOSTIC)
1027 	paddr_t avail_start;
1028 	avail_start = uvm_physseg_get_avail_start(upm);
1029 	KASSERT(uvm_physseg_valid_p(upm));
1030 	KASSERT(avail_end > avail_start && avail_end <= ps->end);
1031 #endif
1032 
1033 	ps->avail_end = avail_end;
1034 }
1035 
1036 #endif /* UVM_PHYSSEG_LEGACY */
1037 
1038 paddr_t
1039 uvm_physseg_get_avail_end(uvm_physseg_t upm)
1040 {
1041 	if (uvm_physseg_valid_p(upm) == false)
1042 		return (paddr_t) -1;
1043 
1044 	return HANDLE_TO_PHYSSEG_NODE(upm)->avail_end;
1045 }
1046 
1047 struct vm_page *
1048 uvm_physseg_get_pg(uvm_physseg_t upm, paddr_t idx)
1049 {
1050 	KASSERT(uvm_physseg_valid_p(upm));
1051 	return &HANDLE_TO_PHYSSEG_NODE(upm)->pgs[idx];
1052 }
1053 
1054 #ifdef __HAVE_PMAP_PHYSSEG
1055 struct pmap_physseg *
1056 uvm_physseg_get_pmseg(uvm_physseg_t upm)
1057 {
1058 	KASSERT(uvm_physseg_valid_p(upm));
1059 	return &(HANDLE_TO_PHYSSEG_NODE(upm)->pmseg);
1060 }
1061 #endif
1062 
1063 int
1064 uvm_physseg_get_free_list(uvm_physseg_t upm)
1065 {
1066 	KASSERT(uvm_physseg_valid_p(upm));
1067 	return HANDLE_TO_PHYSSEG_NODE(upm)->free_list;
1068 }
1069 
1070 u_int
1071 uvm_physseg_get_start_hint(uvm_physseg_t upm)
1072 {
1073 	KASSERT(uvm_physseg_valid_p(upm));
1074 	return HANDLE_TO_PHYSSEG_NODE(upm)->start_hint;
1075 }
1076 
1077 bool
1078 uvm_physseg_set_start_hint(uvm_physseg_t upm, u_int start_hint)
1079 {
1080 	if (uvm_physseg_valid_p(upm) == false)
1081 		return false;
1082 
1083 	HANDLE_TO_PHYSSEG_NODE(upm)->start_hint = start_hint;
1084 	return true;
1085 }
1086 
1087 void
1088 uvm_physseg_init_seg(uvm_physseg_t upm, struct vm_page *pgs)
1089 {
1090 	psize_t i;
1091 	psize_t n;
1092 	paddr_t paddr;
1093 	struct uvm_physseg *seg;
1094 	struct vm_page *pg;
1095 
1096 	KASSERT(upm != UVM_PHYSSEG_TYPE_INVALID && pgs != NULL);
1097 
1098 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
1099 	KASSERT(seg != NULL);
1100 	KASSERT(seg->pgs == NULL);
1101 
1102 	n = seg->end - seg->start;
1103 	seg->pgs = pgs;
1104 
1105 	/* init and free vm_pages (we've already zeroed them) */
1106 	paddr = ctob(seg->start);
1107 	for (i = 0 ; i < n ; i++, paddr += PAGE_SIZE) {
1108 		pg = &seg->pgs[i];
1109 		pg->phys_addr = paddr;
1110 #ifdef __HAVE_VM_PAGE_MD
1111 		VM_MDPAGE_INIT(pg);
1112 #endif
1113 		if (atop(paddr) >= seg->avail_start &&
1114 		    atop(paddr) < seg->avail_end) {
1115 			uvmexp.npages++;
1116 			/* add page to free pool */
1117 			uvm_page_set_freelist(pg,
1118 			    uvm_page_lookup_freelist(pg));
1119 			/* Disable LOCKDEBUG: too many and too early. */
1120 			mutex_init(&pg->interlock, MUTEX_NODEBUG, IPL_NONE);
1121 			uvm_pagefree(pg);
1122 		}
1123 	}
1124 }
1125 
1126 void
1127 uvm_physseg_seg_chomp_slab(uvm_physseg_t upm, struct vm_page *pgs, size_t n)
1128 {
1129 	struct uvm_physseg *seg = HANDLE_TO_PHYSSEG_NODE(upm);
1130 
1131 	/* max number of pre-boot unplug()s allowed */
1132 #define UVM_PHYSSEG_BOOT_UNPLUG_MAX VM_PHYSSEG_MAX
1133 
1134 	static char btslab_ex_storage[EXTENT_FIXED_STORAGE_SIZE(UVM_PHYSSEG_BOOT_UNPLUG_MAX)];
1135 
1136 	if (__predict_false(uvm.page_init_done == false)) {
1137 		seg->ext = extent_create("Boot time slab", (u_long) pgs, (u_long) (pgs + n),
1138 		    (void *)btslab_ex_storage, sizeof(btslab_ex_storage), 0);
1139 	} else {
1140 		seg->ext = extent_create("Hotplug slab", (u_long) pgs, (u_long) (pgs + n), NULL, 0, 0);
1141 	}
1142 
1143 	KASSERT(seg->ext != NULL);
1144 
1145 }
1146 
1147 struct vm_page *
1148 uvm_physseg_seg_alloc_from_slab(uvm_physseg_t upm, size_t pages)
1149 {
1150 	int err;
1151 	struct uvm_physseg *seg;
1152 	struct vm_page *pgs = NULL;
1153 
1154 	KASSERT(pages > 0);
1155 
1156 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
1157 
1158 	if (__predict_false(seg->ext == NULL)) {
1159 		/*
1160 		 * This is a situation unique to boot time.
1161 		 * It shouldn't happen at any point other than from
1162 		 * the first uvm_page.c:uvm_page_init() call
1163 		 * Since we're in a loop, we can get away with the
1164 		 * below.
1165 		 */
1166 		KASSERT(uvm.page_init_done != true);
1167 
1168 		uvm_physseg_t upmp = uvm_physseg_get_prev(upm);
1169 		KASSERT(upmp != UVM_PHYSSEG_TYPE_INVALID);
1170 
1171 		seg->ext = HANDLE_TO_PHYSSEG_NODE(upmp)->ext;
1172 
1173 		KASSERT(seg->ext != NULL);
1174 	}
1175 
1176 	/* We allocate enough for this segment */
1177 	err = extent_alloc(seg->ext, sizeof(*pgs) * pages, 1, 0, EX_BOUNDZERO, (u_long *)&pgs);
1178 
1179 	if (err != 0) {
1180 #ifdef DEBUG
1181 		printf("%s: extent_alloc failed with error: %d \n",
1182 		    __func__, err);
1183 #endif
1184 	}
1185 
1186 	return pgs;
1187 }
1188 
1189 /*
1190  * uvm_page_physload: load physical memory into VM system
1191  *
1192  * => all args are PFs
1193  * => all pages in start/end get vm_page structures
1194  * => areas marked by avail_start/avail_end get added to the free page pool
1195  * => we are limited to VM_PHYSSEG_MAX physical memory segments
1196  */
1197 
1198 uvm_physseg_t
1199 uvm_page_physload(paddr_t start, paddr_t end, paddr_t avail_start,
1200     paddr_t avail_end, int free_list)
1201 {
1202 	struct uvm_physseg *ps;
1203 	uvm_physseg_t upm;
1204 
1205 	if (__predict_true(uvm.page_init_done == true))
1206 		panic("%s: unload attempted after uvm_page_init()\n", __func__);
1207 	if (uvmexp.pagesize == 0)
1208 		panic("uvm_page_physload: page size not set!");
1209 	if (free_list >= VM_NFREELIST || free_list < VM_FREELIST_DEFAULT)
1210 		panic("uvm_page_physload: bad free list %d", free_list);
1211 	if (start >= end)
1212 		panic("uvm_page_physload: start[%" PRIxPADDR "] >= end[%"
1213 		    PRIxPADDR "]", start, end);
1214 
1215 	if (uvm_physseg_plug(start, end - start, &upm) == false) {
1216 		panic("uvm_physseg_plug() failed at boot.");
1217 		/* NOTREACHED */
1218 		return UVM_PHYSSEG_TYPE_INVALID; /* XXX: correct type */
1219 	}
1220 
1221 	ps = HANDLE_TO_PHYSSEG_NODE(upm);
1222 
1223 	/* Legacy */
1224 	ps->avail_start = avail_start;
1225 	ps->avail_end = avail_end;
1226 
1227 	ps->free_list = free_list; /* XXX: */
1228 
1229 
1230 	return upm;
1231 }
1232 
1233 bool
1234 uvm_physseg_unplug(paddr_t pfn, size_t pages)
1235 {
1236 	uvm_physseg_t upm;
1237 	paddr_t off = 0, start __diagused, end;
1238 	struct uvm_physseg *seg;
1239 
1240 	upm = uvm_physseg_find(pfn, &off);
1241 
1242 	if (!uvm_physseg_valid_p(upm)) {
1243 		printf("%s: Tried to unplug from unknown offset\n", __func__);
1244 		return false;
1245 	}
1246 
1247 	seg = HANDLE_TO_PHYSSEG_NODE(upm);
1248 
1249 	start = uvm_physseg_get_start(upm);
1250 	end = uvm_physseg_get_end(upm);
1251 
1252 	if (end < (pfn + pages)) {
1253 		printf("%s: Tried to unplug oversized span \n", __func__);
1254 		return false;
1255 	}
1256 
1257 	KASSERT(pfn == start + off); /* sanity */
1258 
1259 	if (__predict_true(uvm.page_init_done == true)) {
1260 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1261 		if (extent_free(seg->ext, (u_long)(seg->pgs + off), sizeof(struct vm_page) * pages, EX_MALLOCOK | EX_NOWAIT) != 0)
1262 			return false;
1263 	}
1264 
1265 	if (off == 0 && (pfn + pages) == end) {
1266 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1267 		int segcount = 0;
1268 		struct uvm_physseg *current_ps;
1269 		/* Complete segment */
1270 		if (uvm_physseg_graph.nentries == 1)
1271 			panic("%s: out of memory!", __func__);
1272 
1273 		if (__predict_true(uvm.page_init_done == true)) {
1274 			RB_TREE_FOREACH(current_ps, &(uvm_physseg_graph.rb_tree)) {
1275 				if (seg->ext == current_ps->ext)
1276 					segcount++;
1277 			}
1278 			KASSERT(segcount > 0);
1279 
1280 			if (segcount == 1) {
1281 				extent_destroy(seg->ext);
1282 			}
1283 
1284 			/*
1285 			 * We assume that the unplug will succeed from
1286 			 *  this point onwards
1287 			 */
1288 			uvmexp.npages -= (int) pages;
1289 		}
1290 
1291 		rb_tree_remove_node(&(uvm_physseg_graph.rb_tree), upm);
1292 		memset(seg, 0, sizeof(struct uvm_physseg));
1293 		uvm_physseg_free(seg, sizeof(struct uvm_physseg));
1294 		uvm_physseg_graph.nentries--;
1295 #else /* UVM_HOTPLUG */
1296 		int x;
1297 		if (vm_nphysmem == 1)
1298 			panic("uvm_page_physget: out of memory!");
1299 		vm_nphysmem--;
1300 		for (x = upm ; x < vm_nphysmem ; x++)
1301 			/* structure copy */
1302 			VM_PHYSMEM_PTR_SWAP(x, x + 1);
1303 #endif /* UVM_HOTPLUG */
1304 		/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1305 		return true;
1306 	}
1307 
1308 	if (off > 0 &&
1309 	    (pfn + pages) < end) {
1310 #if defined(UVM_HOTPLUG) /* rbtree implementation */
1311 		/* middle chunk - need a new segment */
1312 		struct uvm_physseg *ps, *current_ps;
1313 		ps = uvm_physseg_alloc(sizeof (struct uvm_physseg));
1314 		if (ps == NULL) {
1315 			printf("%s: Unable to allocated new fragment vm_physseg \n",
1316 			    __func__);
1317 			return false;
1318 		}
1319 
1320 		/* Remove middle chunk */
1321 		if (__predict_true(uvm.page_init_done == true)) {
1322 			KASSERT(seg->ext != NULL);
1323 			ps->ext = seg->ext;
1324 
1325 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1326 			/*
1327 			 * We assume that the unplug will succeed from
1328 			 *  this point onwards
1329 			 */
1330 			uvmexp.npages -= (int) pages;
1331 		}
1332 
1333 		ps->start = pfn + pages;
1334 		ps->avail_start = ps->start; /* XXX: Legacy */
1335 
1336 		ps->end = seg->end;
1337 		ps->avail_end = ps->end; /* XXX: Legacy */
1338 
1339 		seg->end = pfn;
1340 		seg->avail_end = seg->end; /* XXX: Legacy */
1341 
1342 
1343 		/*
1344 		 * The new pgs array points to the beginning of the
1345 		 * tail fragment.
1346 		 */
1347 		if (__predict_true(uvm.page_init_done == true))
1348 			ps->pgs = seg->pgs + off + pages;
1349 
1350 		current_ps = rb_tree_insert_node(&(uvm_physseg_graph.rb_tree), ps);
1351 		if (current_ps != ps) {
1352 			panic("uvm_page_physload: Duplicate address range detected!");
1353 		}
1354 		uvm_physseg_graph.nentries++;
1355 #else /* UVM_HOTPLUG */
1356 		panic("%s: can't unplug() from the middle of a segment without"
1357 		    " UVM_HOTPLUG\n",  __func__);
1358 		/* NOTREACHED */
1359 #endif /* UVM_HOTPLUG */
1360 		return true;
1361 	}
1362 
1363 	if (off == 0 && (pfn + pages) < end) {
1364 		/* Remove front chunk */
1365 		if (__predict_true(uvm.page_init_done == true)) {
1366 			/* XXX: KASSERT() that seg->pgs[] are not on any uvm lists */
1367 			/*
1368 			 * We assume that the unplug will succeed from
1369 			 *  this point onwards
1370 			 */
1371 			uvmexp.npages -= (int) pages;
1372 		}
1373 
1374 		/* Truncate */
1375 		seg->start = pfn + pages;
1376 		seg->avail_start = seg->start; /* XXX: Legacy */
1377 
1378 		/*
1379 		 * Move the pgs array start to the beginning of the
1380 		 * tail end.
1381 		 */
1382 		if (__predict_true(uvm.page_init_done == true))
1383 			seg->pgs += pages;
1384 
1385 		return true;
1386 	}
1387 
1388 	if (off > 0 && (pfn + pages) == end) {
1389 		/* back chunk */
1390 
1391 
1392 		/* Truncate! */
1393 		seg->end = pfn;
1394 		seg->avail_end = seg->end; /* XXX: Legacy */
1395 
1396 		uvmexp.npages -= (int) pages;
1397 
1398 		return true;
1399 	}
1400 
1401 	printf("%s: Tried to unplug unknown range \n", __func__);
1402 
1403 	return false;
1404 }
1405