xref: /netbsd-src/sys/arch/xen/x86/x86_xpmap.c (revision a5847cc334d9a7029f6352b847e9e8d71a0f9e0c)
1 /*	$NetBSD: x86_xpmap.c,v 1.36 2011/11/06 15:18:19 cherry Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Copyright (c) 2006, 2007 Manuel Bouyer.
21  *
22  * Redistribution and use in source and binary forms, with or without
23  * modification, are permitted provided that the following conditions
24  * are met:
25  * 1. Redistributions of source code must retain the above copyright
26  *    notice, this list of conditions and the following disclaimer.
27  * 2. Redistributions in binary form must reproduce the above copyright
28  *    notice, this list of conditions and the following disclaimer in the
29  *    documentation and/or other materials provided with the distribution.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  */
43 
44 /*
45  *
46  * Copyright (c) 2004 Christian Limpach.
47  * All rights reserved.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
59  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
62  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68  */
69 
70 
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.36 2011/11/06 15:18:19 cherry Exp $");
73 
74 #include "opt_xen.h"
75 #include "opt_ddb.h"
76 #include "ksyms.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/simplelock.h>
81 
82 #include <uvm/uvm.h>
83 
84 #include <machine/pmap.h>
85 #include <machine/gdt.h>
86 #include <xen/xenfunc.h>
87 
88 #include <dev/isa/isareg.h>
89 #include <machine/isa_machdep.h>
90 
91 #undef	XENDEBUG
92 /* #define XENDEBUG_SYNC */
93 /* #define	XENDEBUG_LOW */
94 
95 #ifdef XENDEBUG
96 #define	XENPRINTF(x) printf x
97 #define	XENPRINTK(x) printk x
98 #define	XENPRINTK2(x) /* printk x */
99 
100 static char XBUF[256];
101 #else
102 #define	XENPRINTF(x)
103 #define	XENPRINTK(x)
104 #define	XENPRINTK2(x)
105 #endif
106 #define	PRINTF(x) printf x
107 #define	PRINTK(x) printk x
108 
109 /* on x86_64 kernel runs in ring 3 */
110 #ifdef __x86_64__
111 #define PG_k PG_u
112 #else
113 #define PG_k 0
114 #endif
115 
116 volatile shared_info_t *HYPERVISOR_shared_info;
117 /* Xen requires the start_info struct to be page aligned */
118 union start_info_union start_info_union __aligned(PAGE_SIZE);
119 unsigned long *xpmap_phys_to_machine_mapping;
120 
121 void xen_failsafe_handler(void);
122 
123 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
124 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
125 
126 void
127 xen_failsafe_handler(void)
128 {
129 
130 	panic("xen_failsafe_handler called!\n");
131 }
132 
133 
134 void
135 xen_set_ldt(vaddr_t base, uint32_t entries)
136 {
137 	vaddr_t va;
138 	vaddr_t end;
139 	pt_entry_t *ptp;
140 	int s;
141 
142 #ifdef __x86_64__
143 	end = base + (entries << 3);
144 #else
145 	end = base + entries * sizeof(union descriptor);
146 #endif
147 
148 	for (va = base; va < end; va += PAGE_SIZE) {
149 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
150 		ptp = kvtopte(va);
151 		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
152 		    base, entries, ptp));
153 		pmap_pte_clearbits(ptp, PG_RW);
154 	}
155 	s = splvm();
156 	xpq_queue_set_ldt(base, entries);
157 	splx(s);
158 }
159 
160 #ifdef XENDEBUG
161 void xpq_debug_dump(void);
162 #endif
163 
164 #define XPQUEUE_SIZE 2048
165 static mmu_update_t xpq_queue_array[MAXCPUS][XPQUEUE_SIZE];
166 static int xpq_idx_array[MAXCPUS];
167 
168 extern struct cpu_info * (*xpq_cpu)(void);
169 
170 void
171 xpq_flush_queue(void)
172 {
173 	int i, ok = 0, ret;
174 
175 	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
176 	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
177 
178 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
179 	for (i = 0; i < xpq_idx; i++)
180 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
181 		    xpq_queue[i].ptr, xpq_queue[i].val));
182 
183 retry:
184 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
185 
186 	if (xpq_idx != 0 && ret < 0) {
187 		printf("xpq_flush_queue: %d entries (%d successful)\n",
188 		    xpq_idx, ok);
189 
190 		if (ok != 0) {
191 			xpq_queue += ok;
192 			xpq_idx -= ok;
193 			ok = 0;
194 			goto retry;
195 		}
196 
197 		for (i = 0; i < xpq_idx; i++)
198 			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
199 			   xpq_queue[i].ptr, xpq_queue[i].val);
200 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
201 	}
202 	xpq_idx_array[xpq_cpu()->ci_cpuid] = 0;
203 }
204 
205 static inline void
206 xpq_increment_idx(void)
207 {
208 
209 	if (__predict_false(++xpq_idx_array[xpq_cpu()->ci_cpuid] == XPQUEUE_SIZE))
210 		xpq_flush_queue();
211 }
212 
213 void
214 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
215 {
216 
217 	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
218 	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
219 
220 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
221 	    "\n", (int64_t)ma, (int64_t)pa));
222 
223 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
224 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
225 	xpq_increment_idx();
226 #ifdef XENDEBUG_SYNC
227 	xpq_flush_queue();
228 #endif
229 }
230 
231 void
232 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
233 {
234 
235 	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
236 	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
237 
238 	KASSERT((ptr & 3) == 0);
239 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
240 	xpq_queue[xpq_idx].val = val;
241 	xpq_increment_idx();
242 #ifdef XENDEBUG_SYNC
243 	xpq_flush_queue();
244 #endif
245 }
246 
247 void
248 xpq_queue_pt_switch(paddr_t pa)
249 {
250 	struct mmuext_op op;
251 	xpq_flush_queue();
252 
253 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
254 	    (int64_t)pa, (int64_t)pa));
255 	op.cmd = MMUEXT_NEW_BASEPTR;
256 	op.arg1.mfn = pa >> PAGE_SHIFT;
257 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
258 		panic("xpq_queue_pt_switch");
259 }
260 
261 void
262 xpq_queue_pin_table(paddr_t pa, int lvl)
263 {
264 	struct mmuext_op op;
265 
266 	xpq_flush_queue();
267 
268 	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
269 	    lvl + 1, pa));
270 
271 	op.arg1.mfn = pa >> PAGE_SHIFT;
272 	op.cmd = lvl;
273 
274 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
275 		panic("xpq_queue_pin_table");
276 }
277 
278 void
279 xpq_queue_unpin_table(paddr_t pa)
280 {
281 	struct mmuext_op op;
282 
283 	xpq_flush_queue();
284 
285 	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
286 	op.arg1.mfn = pa >> PAGE_SHIFT;
287 	op.cmd = MMUEXT_UNPIN_TABLE;
288 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
289 		panic("xpq_queue_unpin_table");
290 }
291 
292 void
293 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
294 {
295 	struct mmuext_op op;
296 
297 	xpq_flush_queue();
298 
299 	XENPRINTK2(("xpq_queue_set_ldt\n"));
300 	KASSERT(va == (va & ~PAGE_MASK));
301 	op.cmd = MMUEXT_SET_LDT;
302 	op.arg1.linear_addr = va;
303 	op.arg2.nr_ents = entries;
304 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
305 		panic("xpq_queue_set_ldt");
306 }
307 
308 void
309 xpq_queue_tlb_flush(void)
310 {
311 	struct mmuext_op op;
312 
313 	xpq_flush_queue();
314 
315 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
316 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
317 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
318 		panic("xpq_queue_tlb_flush");
319 }
320 
321 void
322 xpq_flush_cache(void)
323 {
324 	struct mmuext_op op;
325 	int s = splvm(), err;
326 
327 	xpq_flush_queue();
328 
329 	XENPRINTK2(("xpq_queue_flush_cache\n"));
330 	op.cmd = MMUEXT_FLUSH_CACHE;
331 	if ((err = HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) < 0) {
332 		panic("xpq_flush_cache, err %d", err);
333 	}
334 	splx(s); /* XXX: removeme */
335 }
336 
337 void
338 xpq_queue_invlpg(vaddr_t va)
339 {
340 	struct mmuext_op op;
341 	xpq_flush_queue();
342 
343 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
344 	op.cmd = MMUEXT_INVLPG_LOCAL;
345 	op.arg1.linear_addr = (va & ~PAGE_MASK);
346 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
347 		panic("xpq_queue_invlpg");
348 }
349 
350 void
351 xen_mcast_invlpg(vaddr_t va, uint32_t cpumask)
352 {
353 	mmuext_op_t op;
354 
355 	/* Flush pending page updates */
356 	xpq_flush_queue();
357 
358 	op.cmd = MMUEXT_INVLPG_MULTI;
359 	op.arg1.linear_addr = va;
360 	op.arg2.vcpumask = &cpumask;
361 
362 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
363 		panic("xpq_queue_invlpg_all");
364 	}
365 
366 	return;
367 }
368 
369 void
370 xen_bcast_invlpg(vaddr_t va)
371 {
372 	mmuext_op_t op;
373 
374 	/* Flush pending page updates */
375 	xpq_flush_queue();
376 
377 	op.cmd = MMUEXT_INVLPG_ALL;
378 	op.arg1.linear_addr = va;
379 
380 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
381 		panic("xpq_queue_invlpg_all");
382 	}
383 
384 	return;
385 }
386 
387 /* This is a synchronous call. */
388 void
389 xen_mcast_tlbflush(uint32_t cpumask)
390 {
391 	mmuext_op_t op;
392 
393 	/* Flush pending page updates */
394 	xpq_flush_queue();
395 
396 	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
397 	op.arg2.vcpumask = &cpumask;
398 
399 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
400 		panic("xpq_queue_invlpg_all");
401 	}
402 
403 	return;
404 }
405 
406 /* This is a synchronous call. */
407 void
408 xen_bcast_tlbflush(void)
409 {
410 	mmuext_op_t op;
411 
412 	/* Flush pending page updates */
413 	xpq_flush_queue();
414 
415 	op.cmd = MMUEXT_TLB_FLUSH_ALL;
416 
417 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
418 		panic("xpq_queue_invlpg_all");
419 	}
420 
421 	return;
422 }
423 
424 /* This is a synchronous call. */
425 void
426 xen_vcpu_mcast_invlpg(vaddr_t sva, vaddr_t eva, uint32_t cpumask)
427 {
428 	KASSERT(eva > sva);
429 
430 	/* Flush pending page updates */
431 	xpq_flush_queue();
432 
433 	/* Align to nearest page boundary */
434 	sva &= ~PAGE_MASK;
435 	eva &= ~PAGE_MASK;
436 
437 	for ( ; sva <= eva; sva += PAGE_SIZE) {
438 		xen_mcast_invlpg(sva, cpumask);
439 	}
440 
441 	return;
442 }
443 
444 /* This is a synchronous call. */
445 void
446 xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
447 {
448 	KASSERT(eva > sva);
449 
450 	/* Flush pending page updates */
451 	xpq_flush_queue();
452 
453 	/* Align to nearest page boundary */
454 	sva &= ~PAGE_MASK;
455 	eva &= ~PAGE_MASK;
456 
457 	for ( ; sva <= eva; sva += PAGE_SIZE) {
458 		xen_bcast_invlpg(sva);
459 	}
460 
461 	return;
462 }
463 
464 int
465 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
466 {
467 	mmu_update_t op;
468 	int ok;
469 
470 	xpq_flush_queue();
471 
472 	op.ptr = ptr;
473 	op.val = val;
474 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
475 		return EFAULT;
476 	return (0);
477 }
478 
479 #ifdef XENDEBUG
480 void
481 xpq_debug_dump(void)
482 {
483 	int i;
484 
485 	mmu_update_t *xpq_queue = xpq_queue_array[xpq_cpu()->ci_cpuid];
486 	int xpq_idx = xpq_idx_array[xpq_cpu()->ci_cpuid];
487 
488 	XENPRINTK2(("idx: %d\n", xpq_idx));
489 	for (i = 0; i < xpq_idx; i++) {
490 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
491 		    xpq_queue[i].ptr, xpq_queue[i].val);
492 		if (++i < xpq_idx)
493 			snprintf(XBUF + strlen(XBUF),
494 			    sizeof(XBUF) - strlen(XBUF),
495 			    "%" PRIx64 " %08" PRIx64,
496 			    xpq_queue[i].ptr, xpq_queue[i].val);
497 		if (++i < xpq_idx)
498 			snprintf(XBUF + strlen(XBUF),
499 			    sizeof(XBUF) - strlen(XBUF),
500 			    "%" PRIx64 " %08" PRIx64,
501 			    xpq_queue[i].ptr, xpq_queue[i].val);
502 		if (++i < xpq_idx)
503 			snprintf(XBUF + strlen(XBUF),
504 			    sizeof(XBUF) - strlen(XBUF),
505 			    "%" PRIx64 " %08" PRIx64,
506 			    xpq_queue[i].ptr, xpq_queue[i].val);
507 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
508 	}
509 }
510 #endif
511 
512 
513 extern volatile struct xencons_interface *xencons_interface; /* XXX */
514 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
515 
516 static void xen_bt_set_readonly (vaddr_t);
517 static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
518 
519 /* How many PDEs ? */
520 #if L2_SLOT_KERNBASE > 0
521 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
522 #else
523 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
524 #endif
525 
526 /*
527  * Construct and switch to new pagetables
528  * first_avail is the first vaddr we can use after
529  * we get rid of Xen pagetables
530  */
531 
532 vaddr_t xen_pmap_bootstrap (void);
533 
534 /*
535  * Function to get rid of Xen bootstrap tables
536  */
537 
538 /* How many PDP do we need: */
539 #ifdef PAE
540 /*
541  * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
542  * all of them mapped by the L3 page. We also need a shadow page
543  * for L3[3].
544  */
545 static const int l2_4_count = 6;
546 #elif defined(__x86_64__)
547 static const int l2_4_count = PTP_LEVELS;
548 #else
549 static const int l2_4_count = PTP_LEVELS - 1;
550 #endif
551 
552 vaddr_t
553 xen_pmap_bootstrap(void)
554 {
555 	int count, oldcount;
556 	long mapsize;
557 	vaddr_t bootstrap_tables, init_tables;
558 
559 	memset(xpq_idx_array, 0, sizeof xpq_idx_array);
560 
561 	xpmap_phys_to_machine_mapping =
562 	    (unsigned long *)xen_start_info.mfn_list;
563 	init_tables = xen_start_info.pt_base;
564 	__PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
565 
566 	/* Space after Xen boostrap tables should be free */
567 	bootstrap_tables = xen_start_info.pt_base +
568 		(xen_start_info.nr_pt_frames * PAGE_SIZE);
569 
570 	/*
571 	 * Calculate how many space we need
572 	 * first everything mapped before the Xen bootstrap tables
573 	 */
574 	mapsize = init_tables - KERNTEXTOFF;
575 	/* after the tables we'll have:
576 	 *  - UAREA
577 	 *  - dummy user PGD (x86_64)
578 	 *  - HYPERVISOR_shared_info
579 	 *  - ISA I/O mem (if needed)
580 	 */
581 	mapsize += UPAGES * NBPG;
582 #ifdef __x86_64__
583 	mapsize += NBPG;
584 #endif
585 	mapsize += NBPG;
586 
587 #ifdef DOM0OPS
588 	if (xendomain_is_dom0()) {
589 		/* space for ISA I/O mem */
590 		mapsize += IOM_SIZE;
591 	}
592 #endif
593 	/* at this point mapsize doens't include the table size */
594 
595 #ifdef __x86_64__
596 	count = TABLE_L2_ENTRIES;
597 #else
598 	count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
599 #endif /* __x86_64__ */
600 
601 	/* now compute how many L2 pages we need exactly */
602 	XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
603 	while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
604 	    ((long)count << L2_SHIFT) + KERNBASE) {
605 		count++;
606 	}
607 #ifndef __x86_64__
608 	/*
609 	 * one more L2 page: we'll alocate several pages after kva_start
610 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
611 	 * counted here. It's not a big issue to allocate one more L2 as
612 	 * pmap_growkernel() will be called anyway.
613 	 */
614 	count++;
615 	nkptp[1] = count;
616 #endif
617 
618 	/*
619 	 * install bootstrap pages. We may need more L2 pages than will
620 	 * have the final table here, as it's installed after the final table
621 	 */
622 	oldcount = count;
623 
624 bootstrap_again:
625 	XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
626 	/*
627 	 * Xen space we'll reclaim may not be enough for our new page tables,
628 	 * move bootstrap tables if necessary
629 	 */
630 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
631 		bootstrap_tables = init_tables +
632 					((count + l2_4_count) * PAGE_SIZE);
633 	/* make sure we have enough to map the bootstrap_tables */
634 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
635 	    ((long)oldcount << L2_SHIFT) + KERNBASE) {
636 		oldcount++;
637 		goto bootstrap_again;
638 	}
639 
640 	/* Create temporary tables */
641 	xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
642 		xen_start_info.nr_pt_frames, oldcount, 0);
643 
644 	/* Create final tables */
645 	xen_bootstrap_tables(bootstrap_tables, init_tables,
646 	    oldcount + l2_4_count, count, 1);
647 
648 	/* zero out free space after tables */
649 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
650 	    (UPAGES + 1) * NBPG);
651 
652 	/* Finally, flush TLB. */
653 	xpq_queue_tlb_flush();
654 
655 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
656 }
657 
658 /*
659  * Build a new table and switch to it
660  * old_count is # of old tables (including PGD, PDTPE and PDE)
661  * new_count is # of new tables (PTE only)
662  * we assume areas don't overlap
663  */
664 static void
665 xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
666 	int old_count, int new_count, int final)
667 {
668 	pd_entry_t *pdtpe, *pde, *pte;
669 	pd_entry_t *cur_pgd, *bt_pgd;
670 	paddr_t addr;
671 	vaddr_t page, avail, text_end, map_end;
672 	int i;
673 	extern char __data_start;
674 
675 	__PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
676 	    " %d, %d)\n",
677 	    old_pgd, new_pgd, old_count, new_count));
678 	text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
679 	/*
680 	 * size of R/W area after kernel text:
681 	 *  xencons_interface (if present)
682 	 *  xenstore_interface (if present)
683 	 *  table pages (new_count + l2_4_count entries)
684 	 * extra mappings (only when final is true):
685 	 *  UAREA
686 	 *  dummy user PGD (x86_64 only)/gdt page (i386 only)
687 	 *  HYPERVISOR_shared_info
688 	 *  ISA I/O mem (if needed)
689 	 */
690 	map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
691 	if (final) {
692 		map_end += (UPAGES + 1) * NBPG;
693 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
694 		map_end += NBPG;
695 	}
696 	/*
697 	 * we always set atdevbase, as it's used by init386 to find the first
698 	 * available VA. map_end is updated only if we are dom0, so
699 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
700 	 * this case.
701 	 */
702 	if (final)
703 		atdevbase = map_end;
704 #ifdef DOM0OPS
705 	if (final && xendomain_is_dom0()) {
706 		/* ISA I/O mem */
707 		map_end += IOM_SIZE;
708 	}
709 #endif /* DOM0OPS */
710 
711 	__PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
712 	    text_end, map_end));
713 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
714 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
715 
716 	/*
717 	 * Create bootstrap page tables
718 	 * What we need:
719 	 * - a PGD (level 4)
720 	 * - a PDTPE (level 3)
721 	 * - a PDE (level2)
722 	 * - some PTEs (level 1)
723 	 */
724 
725 	cur_pgd = (pd_entry_t *) old_pgd;
726 	bt_pgd = (pd_entry_t *) new_pgd;
727 	memset (bt_pgd, 0, PAGE_SIZE);
728 	avail = new_pgd + PAGE_SIZE;
729 #if PTP_LEVELS > 3
730 	/* per-cpu L4 PD */
731 	pd_entry_t *bt_cpu_pgd = bt_pgd;
732 	/* pmap_kernel() "shadow" L4 PD */
733 	bt_pgd = (pd_entry_t *) avail;
734 	memset(bt_pgd, 0, PAGE_SIZE);
735 	avail += PAGE_SIZE;
736 
737 	/* Install level 3 */
738 	pdtpe = (pd_entry_t *) avail;
739 	memset (pdtpe, 0, PAGE_SIZE);
740 	avail += PAGE_SIZE;
741 
742 	addr = ((u_long) pdtpe) - KERNBASE;
743 	bt_pgd[pl4_pi(KERNTEXTOFF)] = bt_cpu_pgd[pl4_pi(KERNTEXTOFF)] =
744 	    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
745 
746 	__PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
747 	    " -> L4[%#x]\n",
748 	    pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF)));
749 #else
750 	pdtpe = bt_pgd;
751 #endif /* PTP_LEVELS > 3 */
752 
753 #if PTP_LEVELS > 2
754 	/* Level 2 */
755 	pde = (pd_entry_t *) avail;
756 	memset(pde, 0, PAGE_SIZE);
757 	avail += PAGE_SIZE;
758 
759 	addr = ((u_long) pde) - KERNBASE;
760 	pdtpe[pl3_pi(KERNTEXTOFF)] =
761 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
762 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
763 	    " -> L3[%#x]\n",
764 	    pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF)));
765 #elif defined(PAE)
766 	/* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
767 	pde = (pd_entry_t *) avail;
768 	memset(pde, 0, PAGE_SIZE * 5);
769 	avail += PAGE_SIZE * 5;
770 	addr = ((u_long) pde) - KERNBASE;
771 	/*
772 	 * enter L2 pages in the L3.
773 	 * The real L2 kernel PD will be the last one (so that
774 	 * pde[L2_SLOT_KERN] always point to the shadow).
775 	 */
776 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
777 		/*
778 		 * Xen doesn't want R/W mappings in L3 entries, it'll add it
779 		 * itself.
780 		 */
781 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
782 		__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
783 		    " -> L3[%#x]\n",
784 		    (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i));
785 	}
786 	addr += PAGE_SIZE;
787 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
788 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
789 	    " -> L3[%#x]\n",
790 	    (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3));
791 
792 #else /* PAE */
793 	pde = bt_pgd;
794 #endif /* PTP_LEVELS > 2 */
795 
796 	/* Level 1 */
797 	page = KERNTEXTOFF;
798 	for (i = 0; i < new_count; i ++) {
799 		vaddr_t cur_page = page;
800 
801 		pte = (pd_entry_t *) avail;
802 		avail += PAGE_SIZE;
803 
804 		memset(pte, 0, PAGE_SIZE);
805 		while (pl2_pi(page) == pl2_pi (cur_page)) {
806 			if (page >= map_end) {
807 				/* not mapped at all */
808 				pte[pl1_pi(page)] = 0;
809 				page += PAGE_SIZE;
810 				continue;
811 			}
812 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
813 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
814 				pte[pl1_pi(page)] = xen_start_info.shared_info;
815 				__PRINTK(("HYPERVISOR_shared_info "
816 				    "va %#lx pte %#" PRIxPADDR "\n",
817 				    HYPERVISOR_shared_info, pte[pl1_pi(page)]));
818 			}
819 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
820 			    == xen_start_info.console.domU.mfn) {
821 				xencons_interface = (void *)page;
822 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
823 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
824 				__PRINTK(("xencons_interface "
825 				    "va %#lx pte %#" PRIxPADDR "\n",
826 				    xencons_interface, pte[pl1_pi(page)]));
827 			}
828 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
829 			    == xen_start_info.store_mfn) {
830 				xenstore_interface = (void *)page;
831 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
832 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
833 				__PRINTK(("xenstore_interface "
834 				    "va %#lx pte %#" PRIxPADDR "\n",
835 				    xenstore_interface, pte[pl1_pi(page)]));
836 			}
837 #ifdef DOM0OPS
838 			if (page >= (vaddr_t)atdevbase &&
839 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
840 				pte[pl1_pi(page)] =
841 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
842 			}
843 #endif
844 			pte[pl1_pi(page)] |= PG_k | PG_V;
845 			if (page < text_end) {
846 				/* map kernel text RO */
847 				pte[pl1_pi(page)] |= 0;
848 			} else if (page >= old_pgd
849 			    && page < old_pgd + (old_count * PAGE_SIZE)) {
850 				/* map old page tables RO */
851 				pte[pl1_pi(page)] |= 0;
852 			} else if (page >= new_pgd &&
853 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
854 				/* map new page tables RO */
855 				pte[pl1_pi(page)] |= 0;
856 			} else {
857 				/* map page RW */
858 				pte[pl1_pi(page)] |= PG_RW;
859 			}
860 
861 			if ((page  >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
862 			    || page >= new_pgd) {
863 				__PRINTK(("va %#lx pa %#lx "
864 				    "entry 0x%" PRIxPADDR " -> L1[%#x]\n",
865 				    page, page - KERNBASE,
866 				    pte[pl1_pi(page)], pl1_pi(page)));
867 			}
868 			page += PAGE_SIZE;
869 		}
870 
871 		addr = ((u_long) pte) - KERNBASE;
872 		pde[pl2_pi(cur_page)] =
873 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
874 		__PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
875 		    " -> L2[%#x]\n",
876 		    pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
877 		/* Mark readonly */
878 		xen_bt_set_readonly((vaddr_t) pte);
879 	}
880 
881 	/* Install recursive page tables mapping */
882 #ifdef PAE
883 	/*
884 	 * we need a shadow page for the kernel's L2 page
885 	 * The real L2 kernel PD will be the last one (so that
886 	 * pde[L2_SLOT_KERN] always point to the shadow.
887 	 */
888 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
889 	cpu_info_primary.ci_kpm_pdir = &pde[L2_SLOT_KERN + NPDPG];
890 	cpu_info_primary.ci_kpm_pdirpa =
891 	    (vaddr_t) cpu_info_primary.ci_kpm_pdir - KERNBASE;
892 
893 	/*
894 	 * We don't enter a recursive entry from the L3 PD. Instead,
895 	 * we enter the first 4 L2 pages, which includes the kernel's L2
896 	 * shadow. But we have to entrer the shadow after switching
897 	 * %cr3, or Xen will refcount some PTE with the wrong type.
898 	 */
899 	addr = (u_long)pde - KERNBASE;
900 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
901 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
902 		__PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR
903 		    " entry %#" PRIxPADDR "\n",
904 		    (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i,
905 		    addr, pde[PDIR_SLOT_PTE + i]));
906 	}
907 #if 0
908 	addr += PAGE_SIZE; /* point to shadow L2 */
909 	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
910 	__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
911 	    (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
912 	    (int64_t)pde[PDIR_SLOT_PTE + 3]));
913 #endif
914 	/* Mark tables RO, and pin the kernel's shadow as L2 */
915 	addr = (u_long)pde - KERNBASE;
916 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
917 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
918 		if (i == 2 || i == 3)
919 			continue;
920 #if 0
921 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
922 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
923 #endif
924 	}
925 	if (final) {
926 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
927 		__PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr));
928 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
929 	}
930 #if 0
931 	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
932 	__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
933 	xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
934 #endif
935 #else /* PAE */
936 	/* recursive entry in higher-level per-cpu PD and pmap_kernel() */
937 	bt_pgd[PDIR_SLOT_PTE] = xpmap_ptom_masked((paddr_t)bt_pgd - KERNBASE) | PG_k | PG_V;
938 #ifdef __x86_64__
939 	   bt_cpu_pgd[PDIR_SLOT_PTE] =
940 		   xpmap_ptom_masked((paddr_t)bt_cpu_pgd - KERNBASE) | PG_k | PG_V;
941 #endif /* __x86_64__ */
942 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
943 	    " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
944 	    bt_pgd[PDIR_SLOT_PTE]));
945 	/* Mark tables RO */
946 	xen_bt_set_readonly((vaddr_t) pde);
947 #endif
948 #if PTP_LEVELS > 2 || defined(PAE)
949 	xen_bt_set_readonly((vaddr_t) pdtpe);
950 #endif
951 #if PTP_LEVELS > 3
952 	xen_bt_set_readonly(new_pgd);
953 #endif
954 	/* Pin the PGD */
955 	__PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE));
956 #ifdef __x86_64__
957 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
958 #elif PAE
959 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
960 #else
961 	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
962 #endif
963 
964 	/* Save phys. addr of PDP, for libkvm. */
965 #ifdef PAE
966 	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
967 #else
968 	PDPpaddr = (u_long)bt_pgd - KERNBASE;
969 #endif
970 
971 	/* Switch to new tables */
972 	__PRINTK(("switch to PGD\n"));
973 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
974 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n",
975 	    bt_pgd[PDIR_SLOT_PTE]));
976 
977 #ifdef PAE
978 	if (final) {
979 		/* save the address of the L3 page */
980 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
981 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
982 
983 		/* now enter kernel's PTE mappings */
984 		addr =  (u_long)pde - KERNBASE + PAGE_SIZE * 3;
985 		xpq_queue_pte_update(
986 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
987 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
988 		xpq_flush_queue();
989 	}
990 #elif defined(__x86_64__)
991 	if (final) {
992 		/* save the address of the real per-cpu L4 pgd page */
993 		cpu_info_primary.ci_kpm_pdir = bt_cpu_pgd;
994 		cpu_info_primary.ci_kpm_pdirpa = ((paddr_t) bt_cpu_pgd - KERNBASE);
995 	}
996 #endif
997 
998 	/* Now we can safely reclaim space taken by old tables */
999 
1000 	__PRINTK(("unpin old PGD\n"));
1001 	/* Unpin old PGD */
1002 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
1003 	/* Mark old tables RW */
1004 	page = old_pgd;
1005 	addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
1006 	addr = xpmap_mtop(addr);
1007 	pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
1008 	pte += pl1_pi(page);
1009 	__PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n",
1010 	    pde[pl2_pi(page)], addr, (long)pte));
1011 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
1012 		addr = xpmap_ptom(((u_long) pte) - KERNBASE);
1013 		XENPRINTK(("addr %#" PRIxPADDR " pte %#lx "
1014 		   "*pte %#" PRIxPADDR "\n",
1015 		   addr, (long)pte, *pte));
1016 		xpq_queue_pte_update(addr, *pte | PG_RW);
1017 		page += PAGE_SIZE;
1018 		/*
1019 		 * Our ptes are contiguous
1020 		 * so it's safe to just "++" here
1021 		 */
1022 		pte++;
1023 	}
1024 	xpq_flush_queue();
1025 }
1026 
1027 
1028 /*
1029  * Bootstrap helper functions
1030  */
1031 
1032 /*
1033  * Mark a page readonly
1034  * XXX: assuming vaddr = paddr + KERNBASE
1035  */
1036 
1037 static void
1038 xen_bt_set_readonly (vaddr_t page)
1039 {
1040 	pt_entry_t entry;
1041 
1042 	entry = xpmap_ptom_masked(page - KERNBASE);
1043 	entry |= PG_k | PG_V;
1044 
1045 	HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
1046 }
1047 
1048 #ifdef __x86_64__
1049 void
1050 xen_set_user_pgd(paddr_t page)
1051 {
1052 	struct mmuext_op op;
1053 	int s = splvm();
1054 
1055 	xpq_flush_queue();
1056 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
1057 	op.arg1.mfn = pfn_to_mfn(page >> PAGE_SHIFT);
1058         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
1059 		panic("xen_set_user_pgd: failed to install new user page"
1060 			" directory %#" PRIxPADDR, page);
1061 	splx(s);
1062 }
1063 #endif /* __x86_64__ */
1064