xref: /netbsd-src/tests/sys/uvm/t_uvm_physseg_load.c (revision 07acf3c0966b983460b8c661d724f75f41e8ca5e)
1 /* $NetBSD: t_uvm_physseg_load.c,v 1.1 2016/12/19 12:21:29 cherry Exp $ */
2 
3 /*-
4  * Copyright (c) 2015, 2016 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Santhosh N. Raju <santhosh.raju@gmail.com> and
9  * by Cherry G. Mathew
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions and the following disclaimer.
16  * 2. Redistributions in binary form must reproduce the above copyright
17  *    notice, this list of conditions and the following disclaimer in the
18  *    documentation and/or other materials provided with the distribution.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30  * POSSIBILITY OF SUCH DAMAGE.
31  */
32 
33 #include <sys/cdefs.h>
34 __RCSID("$NetBSD: t_uvm_physseg_load.c,v 1.1 2016/12/19 12:21:29 cherry Exp $");
35 
36 /* Testing API - assumes userland */
37 /* Provide Kernel API equivalents */
38 #include <assert.h>
39 #include <stdbool.h>
40 #include <string.h> /* memset(3) et. al */
41 #include <stdio.h> /* printf(3) */
42 #include <stdlib.h> /* malloc(3) */
43 #include <stdarg.h>
44 #include <stddef.h>
45 #include <time.h>
46 
47 #define	PRIxPADDR	"lx"
48 #define	PRIxPSIZE	"lx"
49 #define	PRIuPSIZE	"lu"
50 #define	PRIxVADDR	"lx"
51 #define	PRIxVSIZE	"lx"
52 #define	PRIuVSIZE	"lu"
53 
54 #define UVM_HOTPLUG /* Enable hotplug with rbtree. */
55 #define PMAP_STEAL_MEMORY
56 #define DEBUG /* Enable debug functionality. */
57 
58 typedef unsigned long vaddr_t;
59 typedef unsigned long paddr_t;
60 typedef unsigned long psize_t;
61 typedef unsigned long vsize_t;
62 
63 #include <uvm/uvm_page.h>
64 
65 /*
66  * If this line is commented out tests related touvm_physseg_get_pmseg()
67  * wont run.
68  *
69  * Have a look at machine/uvm_physseg.h for more details.
70  */
71 #define __HAVE_PMAP_PHYSSEG
72 
73 #include <uvm/uvm_physseg.h>
74 
75 /*
76  * This is a dummy struct used for testing purposes
77  *
78  * In reality this struct would exist in the MD part of the code residing in
79  * machines/vmparam.h
80  */
81 
82 #ifdef __HAVE_PMAP_PHYSSEG
83 struct pmap_physseg {
84 	bool dummy_variable;		/* Dummy variable use for testing */
85 };
86 #endif
87 
88 #ifndef DIAGNOSTIC
89 #define	KASSERTMSG(e, msg, ...)	/* NOTHING */
90 #define	KASSERT(e)		/* NOTHING */
91 #else
92 #define	KASSERT(a)		assert(a)
93 #define KASSERTMSG(exp, ...)    printf(__VA_ARGS__); assert((exp))
94 #endif
95 
96 #define VM_PHYSSEG_STRAT VM_PSTRAT_BSEARCH
97 
98 #define VM_NFREELIST            4
99 #define VM_FREELIST_DEFAULT     0
100 #define VM_FREELIST_FIRST16     3
101 #define VM_FREELIST_FIRST1G     2
102 #define VM_FREELIST_FIRST4G     1
103 
104 /*
105  * Used in tests when Array implementation is tested
106  */
107 #if !defined(VM_PHYSSEG_MAX)
108 #define VM_PHYSSEG_MAX          32
109 #endif
110 
111 #define PAGE_SIZE               4096
112 #define PAGE_SHIFT              12
113 #define atop(x)         (((paddr_t)(x)) >> PAGE_SHIFT)
114 
115 #define	mutex_enter(l)
116 #define	mutex_exit(l)
117 
118 #define	_SYS_KMEM_H_ /* Disallow the real kmem API (see below) */
119 /* free(p) XXX: pgs management need more thought */
120 #define kmem_alloc(size, flags) malloc(size)
121 #define kmem_zalloc(size, flags) malloc(size)
122 #define kmem_free(p, size) free(p)
123 
124 psize_t physmem;
125 
126 struct uvmexp uvmexp;        /* decl */
127 
128 /*
129  * uvm structure borrowed from uvm.h
130  *
131  * Remember this is a dummy structure used within the ATF Tests and
132  * uses only necessary fields from the original uvm struct.
133  * See uvm/uvm.h for the full struct.
134  */
135 
136 struct uvm {
137 	/* vm_page related parameters */
138 
139 	bool page_init_done;		/* TRUE if uvm_page_init() finished */
140 } uvm;
141 
142 static void
143 panic(const char *fmt, ...)
144 {
145 	va_list ap;
146 
147 	va_start(ap, fmt);
148 	vprintf(fmt, ap);
149 	printf("\n");
150 	va_end(ap);
151 	KASSERT(false);
152 
153 	/*NOTREACHED*/
154 }
155 
156 static void
157 uvm_pagefree(struct vm_page *pg)
158 {
159 	return;
160 }
161 
162 #if defined(UVM_HOTPLUG)
163 static void
164 uvmpdpol_reinit(void)
165 {
166 	return;
167 }
168 #endif /* UVM_HOTPLUG */
169 
170 /* end - Provide Kernel API equivalents */
171 
172 #include "uvm/uvm_physseg.c"
173 
174 #include <atf-c.h>
175 
176 #define ONE_MEGABYTE 1024 * 1024
177 
178 /* Sample Page Frame Numbers */
179 #define VALID_START_PFN_1 atop(0)
180 #define VALID_END_PFN_1 atop(ONE_MEGABYTE)
181 #define VALID_AVAIL_START_PFN_1 atop(0)
182 #define VALID_AVAIL_END_PFN_1 atop(ONE_MEGABYTE)
183 
184 #define VALID_START_PFN_2 atop(ONE_MEGABYTE + 1)
185 #define VALID_END_PFN_2 atop(ONE_MEGABYTE * 2)
186 #define VALID_AVAIL_START_PFN_2 atop(ONE_MEGABYTE + 1)
187 #define VALID_AVAIL_END_PFN_2 atop(ONE_MEGABYTE * 2)
188 
189 #define VALID_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
190 #define VALID_END_PFN_3 atop(ONE_MEGABYTE * 3)
191 #define VALID_AVAIL_START_PFN_3 atop((ONE_MEGABYTE * 2) + 1)
192 #define VALID_AVAIL_END_PFN_3 atop(ONE_MEGABYTE * 3)
193 
194 #define VALID_START_PFN_4 atop(ONE_MEGABYTE + 1)
195 #define VALID_END_PFN_4 atop(ONE_MEGABYTE * 128)
196 #define VALID_AVAIL_START_PFN_4 atop(ONE_MEGABYTE + 1)
197 #define VALID_AVAIL_END_PFN_4 atop(ONE_MEGABYTE * 128)
198 
199 #define VALID_START_PFN_5 atop(ONE_MEGABYTE + 1)
200 #define VALID_END_PFN_5 atop(ONE_MEGABYTE * 256)
201 #define VALID_AVAIL_START_PFN_5 atop(ONE_MEGABYTE + 1)
202 #define VALID_AVAIL_END_PFN_5 atop(ONE_MEGABYTE * 256)
203 
204 /*
205  * Total number of pages (of 4K size each) should be 256 for 1MB of memory.
206  */
207 #define PAGE_COUNT_1M      256
208 
209 /*
210  * The number of Page Frames to allot per segment
211  */
212 #define PF_STEP 8
213 
214 /*
215  * A debug fucntion to print the content of upm.
216  */
217 	static inline void
218 	uvm_physseg_dump_seg(uvm_physseg_t upm)
219 	{
220 #if defined(DEBUG)
221 		printf("%s: seg->start == %ld\n", __func__,
222 		    uvm_physseg_get_start(upm));
223 		printf("%s: seg->end == %ld\n", __func__,
224 		    uvm_physseg_get_end(upm));
225 		printf("%s: seg->avail_start == %ld\n", __func__,
226 		    uvm_physseg_get_avail_start(upm));
227 		printf("%s: seg->avail_end == %ld\n", __func__,
228 		    uvm_physseg_get_avail_end(upm));
229 
230 		printf("====\n\n");
231 #else
232 		return;
233 #endif /* DEBUG */
234 	}
235 
236 /*
237  * Private accessor that gets the value of vm_physmem.nentries
238  */
239 static int
240 uvm_physseg_get_entries(void)
241 {
242 #if defined(UVM_HOTPLUG)
243 	return uvm_physseg_graph.nentries;
244 #else
245 	return vm_nphysmem;
246 #endif /* UVM_HOTPLUG */
247 }
248 
249 /*
250  * Note: This function replicates verbatim what happens in
251  * uvm_page.c:uvm_page_init().
252  *
253  * Please track any changes that happen there.
254  */
255 static void
256 uvm_page_init_fake(struct vm_page *pagearray, psize_t pagecount)
257 {
258 	uvm_physseg_t bank;
259 	size_t n;
260 
261 	for (bank = uvm_physseg_get_first(),
262 		 uvm_physseg_seg_chomp_slab(bank, pagearray, pagecount);
263 	     uvm_physseg_valid(bank);
264 	     bank = uvm_physseg_get_next(bank)) {
265 
266 		n = uvm_physseg_get_end(bank) - uvm_physseg_get_start(bank);
267 		uvm_physseg_seg_alloc_from_slab(bank, n);
268 		uvm_physseg_init_seg(bank, pagearray);
269 
270 		/* set up page array pointers */
271 		pagearray += n;
272 		pagecount -= n;
273 	}
274 
275 	uvm.page_init_done = true;
276 }
277 
278 /*
279  * PHYS_TO_VM_PAGE: find vm_page for a PA.   used by MI code to get vm_pages
280  * back from an I/O mapping (ugh!).   used in some MD code as well.
281  */
282 static struct vm_page *
283 uvm_phys_to_vm_page(paddr_t pa)
284 {
285 	paddr_t pf = atop(pa);
286 	paddr_t off;
287 	uvm_physseg_t psi;
288 
289 	psi = uvm_physseg_find(pf, &off);
290 	if (psi != UVM_PHYSSEG_TYPE_INVALID)
291 		return uvm_physseg_get_pg(psi, off);
292 	return(NULL);
293 }
294 
295 //static paddr_t
296 //uvm_vm_page_to_phys(const struct vm_page *pg)
297 //{
298 //
299 //	return pg->phys_addr;
300 //}
301 
302 /*
303  * XXX: To do, write control test cases for uvm_vm_page_to_phys().
304  */
305 
306 /* #define VM_PAGE_TO_PHYS(entry)  uvm_vm_page_to_phys(entry) */
307 
308 #define PHYS_TO_VM_PAGE(pa)     uvm_phys_to_vm_page(pa)
309 
310 /*
311  * Test Fixture SetUp().
312  */
313 static void
314 setup(void)
315 {
316 	/* Prerequisites for running certain calls in uvm_physseg */
317 	uvmexp.pagesize = PAGE_SIZE;
318 	uvmexp.npages = 0;
319 	uvm.page_init_done = false;
320 	uvm_physseg_init();
321 }
322 
323 ATF_TC(uvm_physseg_100);
324 ATF_TC_HEAD(uvm_physseg_100, tc)
325 {
326 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
327 	    100 calls, VM_PHYSSEG_MAX is 32.");
328 }
329 ATF_TC_BODY(uvm_physseg_100, tc)
330 {
331 	paddr_t pa;
332 
333 	setup();
334 
335 	for(paddr_t i = VALID_START_PFN_1;
336 	    i < VALID_END_PFN_1; i += PF_STEP) {
337 		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
338 		    VM_FREELIST_DEFAULT);
339 	}
340 
341 	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
342 
343 	srandom((unsigned)time(NULL));
344 	for(int i = 0; i < 100; i++) {
345 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
346 		PHYS_TO_VM_PAGE(pa);
347 	}
348 
349 	ATF_CHECK_EQ(true, true);
350 }
351 
352 ATF_TC(uvm_physseg_1K);
353 ATF_TC_HEAD(uvm_physseg_1K, tc)
354 {
355 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
356 	    1000 calls, VM_PHYSSEG_MAX is 32.");
357 }
358 ATF_TC_BODY(uvm_physseg_1K, tc)
359 {
360 	paddr_t pa;
361 
362 	setup();
363 
364 	for(paddr_t i = VALID_START_PFN_1;
365 	    i < VALID_END_PFN_1; i += PF_STEP) {
366 		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
367 		    VM_FREELIST_DEFAULT);
368 	}
369 
370 	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
371 
372 	srandom((unsigned)time(NULL));
373 	for(int i = 0; i < 1000; i++) {
374 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
375 		PHYS_TO_VM_PAGE(pa);
376 	}
377 
378 	ATF_CHECK_EQ(true, true);
379 }
380 
381 ATF_TC(uvm_physseg_10K);
382 ATF_TC_HEAD(uvm_physseg_10K, tc)
383 {
384 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
385 	    10,000 calls, VM_PHYSSEG_MAX is 32.");
386 }
387 ATF_TC_BODY(uvm_physseg_10K, tc)
388 {
389 	paddr_t pa;
390 
391 	setup();
392 
393 	for(paddr_t i = VALID_START_PFN_1;
394 	    i < VALID_END_PFN_1; i += PF_STEP) {
395 		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
396 		    VM_FREELIST_DEFAULT);
397 	}
398 
399 	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
400 
401 	srandom((unsigned)time(NULL));
402 	for(int i = 0; i < 10000; i++) {
403 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
404 		PHYS_TO_VM_PAGE(pa);
405 	}
406 
407 	ATF_CHECK_EQ(true, true);
408 }
409 
410 ATF_TC(uvm_physseg_100K);
411 ATF_TC_HEAD(uvm_physseg_100K, tc)
412 {
413 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
414 	    100,000 calls, VM_PHYSSEG_MAX is 32.");
415 }
416 ATF_TC_BODY(uvm_physseg_100K, tc)
417 {
418 	paddr_t pa;
419 
420 	setup();
421 
422 	for(paddr_t i = VALID_START_PFN_1;
423 	    i < VALID_END_PFN_1; i += PF_STEP) {
424 		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
425 		    VM_FREELIST_DEFAULT);
426 	}
427 
428 	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
429 
430 	srandom((unsigned)time(NULL));
431 	for(int i = 0; i < 100000; i++) {
432 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
433 		PHYS_TO_VM_PAGE(pa);
434 	}
435 
436 	ATF_CHECK_EQ(true, true);
437 }
438 
439 ATF_TC(uvm_physseg_1M);
440 ATF_TC_HEAD(uvm_physseg_1M, tc)
441 {
442 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
443 	    1,000,000 calls, VM_PHYSSEG_MAX is 32.");
444 }
445 ATF_TC_BODY(uvm_physseg_1M, tc)
446 {
447 	paddr_t pa;
448 
449 	setup();
450 
451 	for(paddr_t i = VALID_START_PFN_1;
452 	    i < VALID_END_PFN_1; i += PF_STEP) {
453 		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
454 		    VM_FREELIST_DEFAULT);
455 	}
456 
457 	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
458 
459 	srandom((unsigned)time(NULL));
460 	for(int i = 0; i < 1000000; i++) {
461 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
462 		PHYS_TO_VM_PAGE(pa);
463 	}
464 
465 	ATF_CHECK_EQ(true, true);
466 }
467 
468 ATF_TC(uvm_physseg_10M);
469 ATF_TC_HEAD(uvm_physseg_10M, tc)
470 {
471 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
472 	    10,000,000 calls, VM_PHYSSEG_MAX is 32.");
473 }
474 ATF_TC_BODY(uvm_physseg_10M, tc)
475 {
476 	paddr_t pa;
477 
478 	setup();
479 
480 	for(paddr_t i = VALID_START_PFN_1;
481 	    i < VALID_END_PFN_1; i += PF_STEP) {
482 		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
483 		    VM_FREELIST_DEFAULT);
484 	}
485 
486 	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
487 
488 	srandom((unsigned)time(NULL));
489 	for(int i = 0; i < 10000000; i++) {
490 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
491 		PHYS_TO_VM_PAGE(pa);
492 	}
493 
494 	ATF_CHECK_EQ(true, true);
495 }
496 
497 ATF_TC(uvm_physseg_100M);
498 ATF_TC_HEAD(uvm_physseg_100M, tc)
499 {
500 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
501 	    100,000,000 calls, VM_PHYSSEG_MAX is 32.");
502 }
503 ATF_TC_BODY(uvm_physseg_100M, tc)
504 {
505 	paddr_t pa;
506 
507 	setup();
508 
509 	for(paddr_t i = VALID_START_PFN_1;
510 	    i < VALID_END_PFN_1; i += PF_STEP) {
511 		uvm_page_physload(i, i + PF_STEP, i, i + PF_STEP,
512 		    VM_FREELIST_DEFAULT);
513 	}
514 
515 	ATF_REQUIRE_EQ(VM_PHYSSEG_MAX, uvm_physseg_get_entries());
516 
517 	srandom((unsigned)time(NULL));
518 	for(int i = 0; i < 100000000; i++) {
519 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_1);
520 		PHYS_TO_VM_PAGE(pa);
521 	}
522 
523 	ATF_CHECK_EQ(true, true);
524 }
525 
526 ATF_TC(uvm_physseg_1MB);
527 ATF_TC_HEAD(uvm_physseg_1MB, tc)
528 {
529 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
530 	    10,000,000 calls, VM_PHYSSEG_MAX is 32 on 1 MB Segment.");
531 }
532 ATF_TC_BODY(uvm_physseg_1MB, t)
533 {
534 	paddr_t pa = 0;
535 
536 	paddr_t pf = 0;
537 
538 	psize_t pf_chunk_size = 0;
539 
540 	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
541 
542 	psize_t npages2 = (VALID_END_PFN_2 - VALID_START_PFN_2);
543 
544 	struct vm_page *slab = malloc(sizeof(struct vm_page) *
545 	    (npages1 + npages2));
546 
547 	setup();
548 
549 	/* We start with zero segments */
550 	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
551 	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
552 
553 	/* Post boot: Fake all segments and pages accounted for. */
554 	uvm_page_init_fake(slab, npages1 + npages2);
555 
556 	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
557 	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
558 
559 	srandom((unsigned)time(NULL));
560 	for(pf = VALID_START_PFN_2; pf < VALID_END_PFN_2; pf += PF_STEP) {
561 		pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
562 		uvm_physseg_unplug(pf, pf_chunk_size);
563 	}
564 
565 	for(int i = 0; i < 10000000; i++) {
566 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_2);
567 		if(pa < ctob(VALID_START_PFN_2))
568 			pa += ctob(VALID_START_PFN_2);
569 		PHYS_TO_VM_PAGE(pa);
570 	}
571 
572 	ATF_CHECK_EQ(true, true);
573 }
574 
575 ATF_TC(uvm_physseg_64MB);
576 ATF_TC_HEAD(uvm_physseg_64MB, tc)
577 {
578 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
579 	    10,000,000 calls, VM_PHYSSEG_MAX is 32 on 64 MB Segment.");
580 }
581 ATF_TC_BODY(uvm_physseg_64MB, t)
582 {
583 	paddr_t pa = 0;
584 
585 	paddr_t pf = 0;
586 
587 	psize_t pf_chunk_size = 0;
588 
589 	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
590 
591 	psize_t npages2 = (VALID_END_PFN_3 - VALID_START_PFN_3);
592 
593 	struct vm_page *slab = malloc(sizeof(struct vm_page)  *
594 	    (npages1 + npages2));
595 
596 	setup();
597 
598 	/* We start with zero segments */
599 	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
600 	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
601 
602 	/* Post boot: Fake all segments and pages accounted for. */
603 	uvm_page_init_fake(slab, npages1 + npages2);
604 
605 	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_3, npages2, NULL));
606 	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
607 
608 	srandom((unsigned)time(NULL));
609 	for(pf = VALID_START_PFN_3; pf < VALID_END_PFN_3; pf += PF_STEP) {
610 		pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
611 		uvm_physseg_unplug(pf, pf_chunk_size);
612 	}
613 
614 	for(int i = 0; i < 10000000; i++) {
615 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_3);
616 		if(pa < ctob(VALID_START_PFN_3))
617 			pa += ctob(VALID_START_PFN_3);
618 		PHYS_TO_VM_PAGE(pa);
619 	}
620 
621 	ATF_CHECK_EQ(true, true);
622 }
623 
624 ATF_TC(uvm_physseg_128MB);
625 ATF_TC_HEAD(uvm_physseg_128MB, tc)
626 {
627 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
628 	    10,000,000 calls, VM_PHYSSEG_MAX is 32 on 128 MB Segment.");
629 }
630 ATF_TC_BODY(uvm_physseg_128MB, t)
631 {
632 	paddr_t pa = 0;
633 
634 	paddr_t pf = 0;
635 
636 	psize_t pf_chunk_size = 0;
637 
638 	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
639 
640 	psize_t npages2 = (VALID_END_PFN_4 - VALID_START_PFN_4);
641 
642 	struct vm_page *slab = malloc(sizeof(struct vm_page)
643 	    * (npages1 + npages2));
644 
645 	setup();
646 
647 	/* We start with zero segments */
648 	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
649 	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
650 
651 	/* Post boot: Fake all segments and pages accounted for. */
652 	uvm_page_init_fake(slab, npages1 + npages2);
653 
654 	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
655 	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
656 
657 	srandom((unsigned)time(NULL));
658 	for(pf = VALID_START_PFN_4; pf < VALID_END_PFN_4; pf += PF_STEP) {
659 		pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
660 		uvm_physseg_unplug(pf, pf_chunk_size);
661 	}
662 
663 	for(int i = 0; i < 10000000; i++) {
664 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_4);
665 		if(pa < ctob(VALID_START_PFN_4))
666 			pa += ctob(VALID_START_PFN_4);
667 		PHYS_TO_VM_PAGE(pa);
668 	}
669 
670 	ATF_CHECK_EQ(true, true);
671 }
672 
673 ATF_TC(uvm_physseg_256MB);
674 ATF_TC_HEAD(uvm_physseg_256MB, tc)
675 {
676 	atf_tc_set_md_var(tc, "descr", "Load test uvm_phys_to_vm_page() with \
677 	    10,000,000 calls, VM_PHYSSEG_MAX is 32 on 256 MB Segment.");
678 }
679 ATF_TC_BODY(uvm_physseg_256MB, t)
680 {
681 	paddr_t pa = 0;
682 
683 	paddr_t pf = 0;
684 
685 	psize_t pf_chunk_size = 0;
686 
687 	psize_t npages1 = (VALID_END_PFN_1 - VALID_START_PFN_1);
688 
689 	psize_t npages2 = (VALID_END_PFN_5 - VALID_START_PFN_5);
690 
691 	struct vm_page *slab = malloc(sizeof(struct vm_page)  * (npages1 + npages2));
692 
693 	setup();
694 
695 	/* We start with zero segments */
696 	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_1, npages1, NULL));
697 	ATF_REQUIRE_EQ(1, uvm_physseg_get_entries());
698 
699 	/* Post boot: Fake all segments and pages accounted for. */
700 	uvm_page_init_fake(slab, npages1 + npages2);
701 
702 	ATF_REQUIRE_EQ(true, uvm_physseg_plug(VALID_START_PFN_2, npages2, NULL));
703 	ATF_REQUIRE_EQ(2, uvm_physseg_get_entries());
704 
705 	srandom((unsigned)time(NULL));
706 	for(pf = VALID_START_PFN_5; pf < VALID_END_PFN_5; pf += PF_STEP) {
707 		pf_chunk_size = (psize_t) random() % (psize_t) (PF_STEP - 1) + 1;
708 		uvm_physseg_unplug(pf, pf_chunk_size);
709 	}
710 
711 	for(int i = 0; i < 10000000; i++) {
712 		pa = (paddr_t) random() % (paddr_t) ctob(VALID_END_PFN_5);
713 		if(pa < ctob(VALID_END_PFN_5))
714 			pa += ctob(VALID_START_PFN_5);
715 		PHYS_TO_VM_PAGE(pa);
716 	}
717 
718 	ATF_CHECK_EQ(true, true);
719 }
720 
721 ATF_TP_ADD_TCS(tp)
722 {
723 	/* Fixed memory size tests. */
724 	ATF_TP_ADD_TC(tp, uvm_physseg_100);
725 	ATF_TP_ADD_TC(tp, uvm_physseg_1K);
726 	ATF_TP_ADD_TC(tp, uvm_physseg_10K);
727 	ATF_TP_ADD_TC(tp, uvm_physseg_100K);
728 	ATF_TP_ADD_TC(tp, uvm_physseg_1M);
729 	ATF_TP_ADD_TC(tp, uvm_physseg_10M);
730 	ATF_TP_ADD_TC(tp, uvm_physseg_100M);
731 
732 #if defined(UVM_HOTPLUG)
733 	/* Variable memory size tests. */
734 	ATF_TP_ADD_TC(tp, uvm_physseg_1MB);
735 	ATF_TP_ADD_TC(tp, uvm_physseg_64MB);
736 	ATF_TP_ADD_TC(tp, uvm_physseg_128MB);
737 	ATF_TP_ADD_TC(tp, uvm_physseg_256MB);
738 #endif /* UVM_HOTPLUG */
739 
740 	return atf_no_error();
741 }
742