xref: /netbsd-src/sys/arch/xen/x86/x86_xpmap.c (revision d25ffa98a4bfca1fe272f3c182496ec9934faac7)
1 /*	$NetBSD: x86_xpmap.c,v 1.28 2011/06/15 20:50:02 rmind Exp $	*/
2 
3 /*
4  * Copyright (c) 2006 Mathieu Ropert <mro@adviseo.fr>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 /*
20  * Copyright (c) 2006, 2007 Manuel Bouyer.
21  *
22  * Redistribution and use in source and binary forms, with or without
23  * modification, are permitted provided that the following conditions
24  * are met:
25  * 1. Redistributions of source code must retain the above copyright
26  *    notice, this list of conditions and the following disclaimer.
27  * 2. Redistributions in binary form must reproduce the above copyright
28  *    notice, this list of conditions and the following disclaimer in the
29  *    documentation and/or other materials provided with the distribution.
30  *
31  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
32  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
33  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
34  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
35  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
36  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
37  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
38  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
40  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41  *
42  */
43 
44 /*
45  *
46  * Copyright (c) 2004 Christian Limpach.
47  * All rights reserved.
48  *
49  * Redistribution and use in source and binary forms, with or without
50  * modification, are permitted provided that the following conditions
51  * are met:
52  * 1. Redistributions of source code must retain the above copyright
53  *    notice, this list of conditions and the following disclaimer.
54  * 2. Redistributions in binary form must reproduce the above copyright
55  *    notice, this list of conditions and the following disclaimer in the
56  *    documentation and/or other materials provided with the distribution.
57  *
58  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
59  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
60  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
61  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
62  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
63  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
64  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
65  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
66  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
67  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
68  */
69 
70 
71 #include <sys/cdefs.h>
72 __KERNEL_RCSID(0, "$NetBSD: x86_xpmap.c,v 1.28 2011/06/15 20:50:02 rmind Exp $");
73 
74 #include "opt_xen.h"
75 #include "opt_ddb.h"
76 #include "ksyms.h"
77 
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 
81 #include <uvm/uvm.h>
82 
83 #include <machine/pmap.h>
84 #include <machine/gdt.h>
85 #include <xen/xenfunc.h>
86 
87 #include <dev/isa/isareg.h>
88 #include <machine/isa_machdep.h>
89 
90 #undef	XENDEBUG
91 /* #define XENDEBUG_SYNC */
92 /* #define	XENDEBUG_LOW */
93 
94 #ifdef XENDEBUG
95 #define	XENPRINTF(x) printf x
96 #define	XENPRINTK(x) printk x
97 #define	XENPRINTK2(x) /* printk x */
98 
99 static char XBUF[256];
100 #else
101 #define	XENPRINTF(x)
102 #define	XENPRINTK(x)
103 #define	XENPRINTK2(x)
104 #endif
105 #define	PRINTF(x) printf x
106 #define	PRINTK(x) printk x
107 
108 /* on x86_64 kernel runs in ring 3 */
109 #ifdef __x86_64__
110 #define PG_k PG_u
111 #else
112 #define PG_k 0
113 #endif
114 
115 volatile shared_info_t *HYPERVISOR_shared_info;
116 /* Xen requires the start_info struct to be page aligned */
117 union start_info_union start_info_union __aligned(PAGE_SIZE);
118 unsigned long *xpmap_phys_to_machine_mapping;
119 
120 void xen_failsafe_handler(void);
121 
122 #define HYPERVISOR_mmu_update_self(req, count, success_count) \
123 	HYPERVISOR_mmu_update((req), (count), (success_count), DOMID_SELF)
124 
125 void
126 xen_failsafe_handler(void)
127 {
128 
129 	panic("xen_failsafe_handler called!\n");
130 }
131 
132 
133 void
134 xen_set_ldt(vaddr_t base, uint32_t entries)
135 {
136 	vaddr_t va;
137 	vaddr_t end;
138 	pt_entry_t *ptp;
139 	int s;
140 
141 #ifdef __x86_64__
142 	end = base + (entries << 3);
143 #else
144 	end = base + entries * sizeof(union descriptor);
145 #endif
146 
147 	for (va = base; va < end; va += PAGE_SIZE) {
148 		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
149 		ptp = kvtopte(va);
150 		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
151 		    base, entries, ptp));
152 		pmap_pte_clearbits(ptp, PG_RW);
153 	}
154 	s = splvm();
155 	xpq_queue_set_ldt(base, entries);
156 	splx(s);
157 }
158 
159 #ifdef XENDEBUG
160 void xpq_debug_dump(void);
161 #endif
162 
163 #define XPQUEUE_SIZE 2048
164 static mmu_update_t xpq_queue[XPQUEUE_SIZE];
165 static int xpq_idx = 0;
166 
167 void
168 xpq_flush_queue(void)
169 {
170 	int i, ok, ret;
171 
172 	XENPRINTK2(("flush queue %p entries %d\n", xpq_queue, xpq_idx));
173 	for (i = 0; i < xpq_idx; i++)
174 		XENPRINTK2(("%d: 0x%08" PRIx64 " 0x%08" PRIx64 "\n", i,
175 		    xpq_queue[i].ptr, xpq_queue[i].val));
176 
177 	ret = HYPERVISOR_mmu_update_self(xpq_queue, xpq_idx, &ok);
178 
179 	if (xpq_idx != 0 && ret < 0) {
180 		printf("xpq_flush_queue: %d entries (%d successful)\n",
181 		    xpq_idx, ok);
182 		for (i = 0; i < xpq_idx; i++)
183 			printf("0x%016" PRIx64 ": 0x%016" PRIx64 "\n",
184 			   xpq_queue[i].ptr, xpq_queue[i].val);
185 		panic("HYPERVISOR_mmu_update failed, ret: %d\n", ret);
186 	}
187 	xpq_idx = 0;
188 }
189 
190 static inline void
191 xpq_increment_idx(void)
192 {
193 
194 	xpq_idx++;
195 	if (__predict_false(xpq_idx == XPQUEUE_SIZE))
196 		xpq_flush_queue();
197 }
198 
199 void
200 xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
201 {
202 	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
203 	    "\n", (int64_t)ma, (int64_t)pa));
204 	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
205 	xpq_queue[xpq_idx].val = (pa - XPMAP_OFFSET) >> PAGE_SHIFT;
206 	xpq_increment_idx();
207 #ifdef XENDEBUG_SYNC
208 	xpq_flush_queue();
209 #endif
210 }
211 
212 void
213 xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
214 {
215 
216 	KASSERT((ptr & 3) == 0);
217 	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
218 	xpq_queue[xpq_idx].val = val;
219 	xpq_increment_idx();
220 #ifdef XENDEBUG_SYNC
221 	xpq_flush_queue();
222 #endif
223 }
224 
225 void
226 xpq_queue_pt_switch(paddr_t pa)
227 {
228 	struct mmuext_op op;
229 	xpq_flush_queue();
230 
231 	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
232 	    (int64_t)pa, (int64_t)pa));
233 	op.cmd = MMUEXT_NEW_BASEPTR;
234 	op.arg1.mfn = pa >> PAGE_SHIFT;
235 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
236 		panic("xpq_queue_pt_switch");
237 }
238 
239 void
240 xpq_queue_pin_table(paddr_t pa, int lvl)
241 {
242 	struct mmuext_op op;
243 	xpq_flush_queue();
244 
245 	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
246 	    lvl + 1, pa));
247 
248 	op.arg1.mfn = pa >> PAGE_SHIFT;
249 	op.cmd = lvl;
250 
251 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
252 		panic("xpq_queue_pin_table");
253 }
254 
255 void
256 xpq_queue_unpin_table(paddr_t pa)
257 {
258 	struct mmuext_op op;
259 	xpq_flush_queue();
260 
261 	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
262 	op.arg1.mfn = pa >> PAGE_SHIFT;
263 	op.cmd = MMUEXT_UNPIN_TABLE;
264 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
265 		panic("xpq_queue_unpin_table");
266 }
267 
268 void
269 xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
270 {
271 	struct mmuext_op op;
272 	xpq_flush_queue();
273 
274 	XENPRINTK2(("xpq_queue_set_ldt\n"));
275 	KASSERT(va == (va & ~PAGE_MASK));
276 	op.cmd = MMUEXT_SET_LDT;
277 	op.arg1.linear_addr = va;
278 	op.arg2.nr_ents = entries;
279 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
280 		panic("xpq_queue_set_ldt");
281 }
282 
283 void
284 xpq_queue_tlb_flush(void)
285 {
286 	struct mmuext_op op;
287 	xpq_flush_queue();
288 
289 	XENPRINTK2(("xpq_queue_tlb_flush\n"));
290 	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
291 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
292 		panic("xpq_queue_tlb_flush");
293 }
294 
295 void
296 xpq_flush_cache(void)
297 {
298 	struct mmuext_op op;
299 	int s = splvm();
300 	xpq_flush_queue();
301 
302 	XENPRINTK2(("xpq_queue_flush_cache\n"));
303 	op.cmd = MMUEXT_FLUSH_CACHE;
304 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
305 		panic("xpq_flush_cache");
306 	splx(s);
307 }
308 
309 void
310 xpq_queue_invlpg(vaddr_t va)
311 {
312 	struct mmuext_op op;
313 	xpq_flush_queue();
314 
315 	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
316 	op.cmd = MMUEXT_INVLPG_LOCAL;
317 	op.arg1.linear_addr = (va & ~PAGE_MASK);
318 	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
319 		panic("xpq_queue_invlpg");
320 }
321 
322 int
323 xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
324 {
325 	mmu_update_t op;
326 	int ok;
327 	xpq_flush_queue();
328 
329 	op.ptr = ptr;
330 	op.val = val;
331 	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
332 		return EFAULT;
333 	return (0);
334 }
335 
336 #ifdef XENDEBUG
337 void
338 xpq_debug_dump(void)
339 {
340 	int i;
341 
342 	XENPRINTK2(("idx: %d\n", xpq_idx));
343 	for (i = 0; i < xpq_idx; i++) {
344 		snprintf(XBUF, sizeof(XBUF), "%" PRIx64 " %08" PRIx64,
345 		    xpq_queue[i].ptr, xpq_queue[i].val);
346 		if (++i < xpq_idx)
347 			snprintf(XBUF + strlen(XBUF),
348 			    sizeof(XBUF) - strlen(XBUF),
349 			    "%" PRIx64 " %08" PRIx64,
350 			    xpq_queue[i].ptr, xpq_queue[i].val);
351 		if (++i < xpq_idx)
352 			snprintf(XBUF + strlen(XBUF),
353 			    sizeof(XBUF) - strlen(XBUF),
354 			    "%" PRIx64 " %08" PRIx64,
355 			    xpq_queue[i].ptr, xpq_queue[i].val);
356 		if (++i < xpq_idx)
357 			snprintf(XBUF + strlen(XBUF),
358 			    sizeof(XBUF) - strlen(XBUF),
359 			    "%" PRIx64 " %08" PRIx64,
360 			    xpq_queue[i].ptr, xpq_queue[i].val);
361 		XENPRINTK2(("%d: %s\n", xpq_idx, XBUF));
362 	}
363 }
364 #endif
365 
366 
367 extern volatile struct xencons_interface *xencons_interface; /* XXX */
368 extern struct xenstore_domain_interface *xenstore_interface; /* XXX */
369 
370 static void xen_bt_set_readonly (vaddr_t);
371 static void xen_bootstrap_tables (vaddr_t, vaddr_t, int, int, int);
372 
373 /* How many PDEs ? */
374 #if L2_SLOT_KERNBASE > 0
375 #define TABLE_L2_ENTRIES (2 * (NKL2_KIMG_ENTRIES + 1))
376 #else
377 #define TABLE_L2_ENTRIES (NKL2_KIMG_ENTRIES + 1)
378 #endif
379 
380 /*
381  * Construct and switch to new pagetables
382  * first_avail is the first vaddr we can use after
383  * we get rid of Xen pagetables
384  */
385 
386 vaddr_t xen_pmap_bootstrap (void);
387 
388 /*
389  * Function to get rid of Xen bootstrap tables
390  */
391 
392 /* How many PDP do we need: */
393 #ifdef PAE
394 /*
395  * For PAE, we consider a single contigous L2 "superpage" of 4 pages,
396  * all of them mapped by the L3 page. We also need a shadow page
397  * for L3[3].
398  */
399 static const int l2_4_count = 6;
400 #else
401 static const int l2_4_count = PTP_LEVELS - 1;
402 #endif
403 
404 vaddr_t
405 xen_pmap_bootstrap(void)
406 {
407 	int count, oldcount;
408 	long mapsize;
409 	vaddr_t bootstrap_tables, init_tables;
410 
411 	xpmap_phys_to_machine_mapping =
412 	    (unsigned long *)xen_start_info.mfn_list;
413 	init_tables = xen_start_info.pt_base;
414 	__PRINTK(("xen_arch_pmap_bootstrap init_tables=0x%lx\n", init_tables));
415 
416 	/* Space after Xen boostrap tables should be free */
417 	bootstrap_tables = xen_start_info.pt_base +
418 		(xen_start_info.nr_pt_frames * PAGE_SIZE);
419 
420 	/*
421 	 * Calculate how many space we need
422 	 * first everything mapped before the Xen bootstrap tables
423 	 */
424 	mapsize = init_tables - KERNTEXTOFF;
425 	/* after the tables we'll have:
426 	 *  - UAREA
427 	 *  - dummy user PGD (x86_64)
428 	 *  - HYPERVISOR_shared_info
429 	 *  - ISA I/O mem (if needed)
430 	 */
431 	mapsize += UPAGES * NBPG;
432 #ifdef __x86_64__
433 	mapsize += NBPG;
434 #endif
435 	mapsize += NBPG;
436 
437 #ifdef DOM0OPS
438 	if (xendomain_is_dom0()) {
439 		/* space for ISA I/O mem */
440 		mapsize += IOM_SIZE;
441 	}
442 #endif
443 	/* at this point mapsize doens't include the table size */
444 
445 #ifdef __x86_64__
446 	count = TABLE_L2_ENTRIES;
447 #else
448 	count = (mapsize + (NBPD_L2 -1)) >> L2_SHIFT;
449 #endif /* __x86_64__ */
450 
451 	/* now compute how many L2 pages we need exactly */
452 	XENPRINTK(("bootstrap_final mapsize 0x%lx count %d\n", mapsize, count));
453 	while (mapsize + (count + l2_4_count) * PAGE_SIZE + KERNTEXTOFF >
454 	    ((long)count << L2_SHIFT) + KERNBASE) {
455 		count++;
456 	}
457 #ifndef __x86_64__
458 	/*
459 	 * one more L2 page: we'll alocate several pages after kva_start
460 	 * in pmap_bootstrap() before pmap_growkernel(), which have not been
461 	 * counted here. It's not a big issue to allocate one more L2 as
462 	 * pmap_growkernel() will be called anyway.
463 	 */
464 	count++;
465 	nkptp[1] = count;
466 #endif
467 
468 	/*
469 	 * install bootstrap pages. We may need more L2 pages than will
470 	 * have the final table here, as it's installed after the final table
471 	 */
472 	oldcount = count;
473 
474 bootstrap_again:
475 	XENPRINTK(("bootstrap_again oldcount %d\n", oldcount));
476 	/*
477 	 * Xen space we'll reclaim may not be enough for our new page tables,
478 	 * move bootstrap tables if necessary
479 	 */
480 	if (bootstrap_tables < init_tables + ((count + l2_4_count) * PAGE_SIZE))
481 		bootstrap_tables = init_tables +
482 					((count + l2_4_count) * PAGE_SIZE);
483 	/* make sure we have enough to map the bootstrap_tables */
484 	if (bootstrap_tables + ((oldcount + l2_4_count) * PAGE_SIZE) >
485 	    ((long)oldcount << L2_SHIFT) + KERNBASE) {
486 		oldcount++;
487 		goto bootstrap_again;
488 	}
489 
490 	/* Create temporary tables */
491 	xen_bootstrap_tables(xen_start_info.pt_base, bootstrap_tables,
492 		xen_start_info.nr_pt_frames, oldcount, 0);
493 
494 	/* Create final tables */
495 	xen_bootstrap_tables(bootstrap_tables, init_tables,
496 	    oldcount + l2_4_count, count, 1);
497 
498 	/* zero out free space after tables */
499 	memset((void *)(init_tables + ((count + l2_4_count) * PAGE_SIZE)), 0,
500 	    (UPAGES + 1) * NBPG);
501 
502 	/* Finally, flush TLB. */
503 	xpq_queue_tlb_flush();
504 
505 	return (init_tables + ((count + l2_4_count) * PAGE_SIZE));
506 }
507 
508 
509 /*
510  * Build a new table and switch to it
511  * old_count is # of old tables (including PGD, PDTPE and PDE)
512  * new_count is # of new tables (PTE only)
513  * we assume areas don't overlap
514  */
515 static void
516 xen_bootstrap_tables (vaddr_t old_pgd, vaddr_t new_pgd,
517 	int old_count, int new_count, int final)
518 {
519 	pd_entry_t *pdtpe, *pde, *pte;
520 	pd_entry_t *cur_pgd, *bt_pgd;
521 	paddr_t addr;
522 	vaddr_t page, avail, text_end, map_end;
523 	int i;
524 	extern char __data_start;
525 
526 	__PRINTK(("xen_bootstrap_tables(%#" PRIxVADDR ", %#" PRIxVADDR ","
527 	    " %d, %d)\n",
528 	    old_pgd, new_pgd, old_count, new_count));
529 	text_end = ((vaddr_t)&__data_start) & ~PAGE_MASK;
530 	/*
531 	 * size of R/W area after kernel text:
532 	 *  xencons_interface (if present)
533 	 *  xenstore_interface (if present)
534 	 *  table pages (new_count + l2_4_count entries)
535 	 * extra mappings (only when final is true):
536 	 *  UAREA
537 	 *  dummy user PGD (x86_64 only)/gdt page (i386 only)
538 	 *  HYPERVISOR_shared_info
539 	 *  ISA I/O mem (if needed)
540 	 */
541 	map_end = new_pgd + ((new_count + l2_4_count) * NBPG);
542 	if (final) {
543 		map_end += (UPAGES + 1) * NBPG;
544 		HYPERVISOR_shared_info = (shared_info_t *)map_end;
545 		map_end += NBPG;
546 	}
547 	/*
548 	 * we always set atdevbase, as it's used by init386 to find the first
549 	 * available VA. map_end is updated only if we are dom0, so
550 	 * atdevbase -> atdevbase + IOM_SIZE will be mapped only in
551 	 * this case.
552 	 */
553 	if (final)
554 		atdevbase = map_end;
555 #ifdef DOM0OPS
556 	if (final && xendomain_is_dom0()) {
557 		/* ISA I/O mem */
558 		map_end += IOM_SIZE;
559 	}
560 #endif /* DOM0OPS */
561 
562 	__PRINTK(("xen_bootstrap_tables text_end 0x%lx map_end 0x%lx\n",
563 	    text_end, map_end));
564 	__PRINTK(("console %#lx ", xen_start_info.console_mfn));
565 	__PRINTK(("xenstore %#" PRIx32 "\n", xen_start_info.store_mfn));
566 
567 	/*
568 	 * Create bootstrap page tables
569 	 * What we need:
570 	 * - a PGD (level 4)
571 	 * - a PDTPE (level 3)
572 	 * - a PDE (level2)
573 	 * - some PTEs (level 1)
574 	 */
575 
576 	cur_pgd = (pd_entry_t *) old_pgd;
577 	bt_pgd = (pd_entry_t *) new_pgd;
578 	memset (bt_pgd, 0, PAGE_SIZE);
579 	avail = new_pgd + PAGE_SIZE;
580 #if PTP_LEVELS > 3
581 	/* Install level 3 */
582 	pdtpe = (pd_entry_t *) avail;
583 	memset (pdtpe, 0, PAGE_SIZE);
584 	avail += PAGE_SIZE;
585 
586 	addr = ((u_long) pdtpe) - KERNBASE;
587 	bt_pgd[pl4_pi(KERNTEXTOFF)] =
588 	    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
589 
590 	__PRINTK(("L3 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
591 	    " -> L4[%#x]\n",
592 	    pdtpe, addr, bt_pgd[pl4_pi(KERNTEXTOFF)], pl4_pi(KERNTEXTOFF)));
593 #else
594 	pdtpe = bt_pgd;
595 #endif /* PTP_LEVELS > 3 */
596 
597 #if PTP_LEVELS > 2
598 	/* Level 2 */
599 	pde = (pd_entry_t *) avail;
600 	memset(pde, 0, PAGE_SIZE);
601 	avail += PAGE_SIZE;
602 
603 	addr = ((u_long) pde) - KERNBASE;
604 	pdtpe[pl3_pi(KERNTEXTOFF)] =
605 	    xpmap_ptom_masked(addr) | PG_k | PG_V | PG_RW;
606 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
607 	    " -> L3[%#x]\n",
608 	    pde, addr, pdtpe[pl3_pi(KERNTEXTOFF)], pl3_pi(KERNTEXTOFF)));
609 #elif defined(PAE)
610 	/* our PAE-style level 2: 5 contigous pages (4 L2 + 1 shadow) */
611 	pde = (pd_entry_t *) avail;
612 	memset(pde, 0, PAGE_SIZE * 5);
613 	avail += PAGE_SIZE * 5;
614 	addr = ((u_long) pde) - KERNBASE;
615 	/*
616 	 * enter L2 pages in the L3.
617 	 * The real L2 kernel PD will be the last one (so that
618 	 * pde[L2_SLOT_KERN] always point to the shadow).
619 	 */
620 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
621 		/*
622 		 * Xen doesn't want R/W mappings in L3 entries, it'll add it
623 		 * itself.
624 		 */
625 		pdtpe[i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
626 		__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
627 		    " -> L3[%#x]\n",
628 		    (vaddr_t)pde + PAGE_SIZE * i, addr, pdtpe[i], i));
629 	}
630 	addr += PAGE_SIZE;
631 	pdtpe[3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
632 	__PRINTK(("L2 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
633 	    " -> L3[%#x]\n",
634 	    (vaddr_t)pde + PAGE_SIZE * 4, addr, pdtpe[3], 3));
635 
636 #else /* PAE */
637 	pde = bt_pgd;
638 #endif /* PTP_LEVELS > 2 */
639 
640 	/* Level 1 */
641 	page = KERNTEXTOFF;
642 	for (i = 0; i < new_count; i ++) {
643 		vaddr_t cur_page = page;
644 
645 		pte = (pd_entry_t *) avail;
646 		avail += PAGE_SIZE;
647 
648 		memset(pte, 0, PAGE_SIZE);
649 		while (pl2_pi(page) == pl2_pi (cur_page)) {
650 			if (page >= map_end) {
651 				/* not mapped at all */
652 				pte[pl1_pi(page)] = 0;
653 				page += PAGE_SIZE;
654 				continue;
655 			}
656 			pte[pl1_pi(page)] = xpmap_ptom_masked(page - KERNBASE);
657 			if (page == (vaddr_t)HYPERVISOR_shared_info) {
658 				pte[pl1_pi(page)] = xen_start_info.shared_info;
659 				__PRINTK(("HYPERVISOR_shared_info "
660 				    "va %#lx pte %#" PRIxPADDR "\n",
661 				    HYPERVISOR_shared_info, pte[pl1_pi(page)]));
662 			}
663 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
664 			    == xen_start_info.console.domU.mfn) {
665 				xencons_interface = (void *)page;
666 				pte[pl1_pi(page)] = xen_start_info.console_mfn;
667 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
668 				__PRINTK(("xencons_interface "
669 				    "va %#lx pte %#" PRIxPADDR "\n",
670 				    xencons_interface, pte[pl1_pi(page)]));
671 			}
672 			if ((xpmap_ptom_masked(page - KERNBASE) >> PAGE_SHIFT)
673 			    == xen_start_info.store_mfn) {
674 				xenstore_interface = (void *)page;
675 				pte[pl1_pi(page)] = xen_start_info.store_mfn;
676 				pte[pl1_pi(page)] <<= PAGE_SHIFT;
677 				__PRINTK(("xenstore_interface "
678 				    "va %#lx pte %#" PRIxPADDR "\n",
679 				    xenstore_interface, pte[pl1_pi(page)]));
680 			}
681 #ifdef DOM0OPS
682 			if (page >= (vaddr_t)atdevbase &&
683 			    page < (vaddr_t)atdevbase + IOM_SIZE) {
684 				pte[pl1_pi(page)] =
685 				    IOM_BEGIN + (page - (vaddr_t)atdevbase);
686 			}
687 #endif
688 			pte[pl1_pi(page)] |= PG_k | PG_V;
689 			if (page < text_end) {
690 				/* map kernel text RO */
691 				pte[pl1_pi(page)] |= 0;
692 			} else if (page >= old_pgd
693 			    && page < old_pgd + (old_count * PAGE_SIZE)) {
694 				/* map old page tables RO */
695 				pte[pl1_pi(page)] |= 0;
696 			} else if (page >= new_pgd &&
697 			    page < new_pgd + ((new_count + l2_4_count) * PAGE_SIZE)) {
698 				/* map new page tables RO */
699 				pte[pl1_pi(page)] |= 0;
700 			} else {
701 				/* map page RW */
702 				pte[pl1_pi(page)] |= PG_RW;
703 			}
704 
705 			if ((page  >= old_pgd && page < old_pgd + (old_count * PAGE_SIZE))
706 			    || page >= new_pgd) {
707 				__PRINTK(("va %#lx pa %#lx "
708 				    "entry 0x%" PRIxPADDR " -> L1[%#x]\n",
709 				    page, page - KERNBASE,
710 				    pte[pl1_pi(page)], pl1_pi(page)));
711 			}
712 			page += PAGE_SIZE;
713 		}
714 
715 		addr = ((u_long) pte) - KERNBASE;
716 		pde[pl2_pi(cur_page)] =
717 		    xpmap_ptom_masked(addr) | PG_k | PG_RW | PG_V;
718 		__PRINTK(("L1 va %#lx pa %#" PRIxPADDR " entry %#" PRIxPADDR
719 		    " -> L2[%#x]\n",
720 		    pte, addr, pde[pl2_pi(cur_page)], pl2_pi(cur_page)));
721 		/* Mark readonly */
722 		xen_bt_set_readonly((vaddr_t) pte);
723 	}
724 
725 	/* Install recursive page tables mapping */
726 #ifdef PAE
727 	/*
728 	 * we need a shadow page for the kernel's L2 page
729 	 * The real L2 kernel PD will be the last one (so that
730 	 * pde[L2_SLOT_KERN] always point to the shadow.
731 	 */
732 	memcpy(&pde[L2_SLOT_KERN + NPDPG], &pde[L2_SLOT_KERN], PAGE_SIZE);
733 	pmap_kl2pd = &pde[L2_SLOT_KERN + NPDPG];
734 	pmap_kl2paddr = (u_long)pmap_kl2pd - KERNBASE;
735 
736 	/*
737 	 * We don't enter a recursive entry from the L3 PD. Instead,
738 	 * we enter the first 4 L2 pages, which includes the kernel's L2
739 	 * shadow. But we have to entrer the shadow after switching
740 	 * %cr3, or Xen will refcount some PTE with the wrong type.
741 	 */
742 	addr = (u_long)pde - KERNBASE;
743 	for (i = 0; i < 3; i++, addr += PAGE_SIZE) {
744 		pde[PDIR_SLOT_PTE + i] = xpmap_ptom_masked(addr) | PG_k | PG_V;
745 		__PRINTK(("pde[%d] va %#" PRIxVADDR " pa %#" PRIxPADDR
746 		    " entry %#" PRIxPADDR "\n",
747 		    (int)(PDIR_SLOT_PTE + i), pde + PAGE_SIZE * i,
748 		    addr, pde[PDIR_SLOT_PTE + i]));
749 	}
750 #if 0
751 	addr += PAGE_SIZE; /* point to shadow L2 */
752 	pde[PDIR_SLOT_PTE + 3] = xpmap_ptom_masked(addr) | PG_k | PG_V;
753 	__PRINTK(("pde[%d] va 0x%lx pa 0x%lx entry 0x%" PRIx64 "\n",
754 	    (int)(PDIR_SLOT_PTE + 3), pde + PAGE_SIZE * 4, (long)addr,
755 	    (int64_t)pde[PDIR_SLOT_PTE + 3]));
756 #endif
757 	/* Mark tables RO, and pin the kernel's shadow as L2 */
758 	addr = (u_long)pde - KERNBASE;
759 	for (i = 0; i < 5; i++, addr += PAGE_SIZE) {
760 		xen_bt_set_readonly(((vaddr_t)pde) + PAGE_SIZE * i);
761 		if (i == 2 || i == 3)
762 			continue;
763 #if 0
764 		__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", i, (int64_t)addr));
765 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
766 #endif
767 	}
768 	if (final) {
769 		addr = (u_long)pde - KERNBASE + 3 * PAGE_SIZE;
770 		__PRINTK(("pin L2 %d addr %#" PRIxPADDR "\n", 2, addr));
771 		xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
772 	}
773 #if 0
774 	addr = (u_long)pde - KERNBASE + 2 * PAGE_SIZE;
775 	__PRINTK(("pin L2 %d addr 0x%" PRIx64 "\n", 2, (int64_t)addr));
776 	xpq_queue_pin_l2_table(xpmap_ptom_masked(addr));
777 #endif
778 #else /* PAE */
779 	/* recursive entry in higher-level PD */
780 	bt_pgd[PDIR_SLOT_PTE] =
781 	    xpmap_ptom_masked(new_pgd - KERNBASE) | PG_k | PG_V;
782 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] va %#" PRIxVADDR " pa %#" PRIxPADDR
783 	    " entry %#" PRIxPADDR "\n", new_pgd, (paddr_t)new_pgd - KERNBASE,
784 	    bt_pgd[PDIR_SLOT_PTE]));
785 	/* Mark tables RO */
786 	xen_bt_set_readonly((vaddr_t) pde);
787 #endif
788 #if PTP_LEVELS > 2 || defined(PAE)
789 	xen_bt_set_readonly((vaddr_t) pdtpe);
790 #endif
791 #if PTP_LEVELS > 3
792 	xen_bt_set_readonly(new_pgd);
793 #endif
794 	/* Pin the PGD */
795 	__PRINTK(("pin PGD: %"PRIxVADDR"\n", new_pgd - KERNBASE));
796 #ifdef __x86_64__
797 	xpq_queue_pin_l4_table(xpmap_ptom_masked(new_pgd - KERNBASE));
798 #elif PAE
799 	xpq_queue_pin_l3_table(xpmap_ptom_masked(new_pgd - KERNBASE));
800 #else
801 	xpq_queue_pin_l2_table(xpmap_ptom_masked(new_pgd - KERNBASE));
802 #endif
803 
804 	/* Save phys. addr of PDP, for libkvm. */
805 #ifdef PAE
806 	PDPpaddr = (u_long)pde - KERNBASE; /* PDP is the L2 with PAE */
807 #else
808 	PDPpaddr = (u_long)new_pgd - KERNBASE;
809 #endif
810 
811 	/* Switch to new tables */
812 	__PRINTK(("switch to PGD\n"));
813 	xpq_queue_pt_switch(xpmap_ptom_masked(new_pgd - KERNBASE));
814 	__PRINTK(("bt_pgd[PDIR_SLOT_PTE] now entry %#" PRIxPADDR "\n",
815 	    bt_pgd[PDIR_SLOT_PTE]));
816 
817 #ifdef PAE
818 	if (final) {
819 		/* save the address of the L3 page */
820 		cpu_info_primary.ci_pae_l3_pdir = pdtpe;
821 		cpu_info_primary.ci_pae_l3_pdirpa = (new_pgd - KERNBASE);
822 
823 		/* now enter kernel's PTE mappings */
824 		addr =  (u_long)pde - KERNBASE + PAGE_SIZE * 3;
825 		xpq_queue_pte_update(
826 		    xpmap_ptom(((vaddr_t)&pde[PDIR_SLOT_PTE + 3]) - KERNBASE),
827 		    xpmap_ptom_masked(addr) | PG_k | PG_V);
828 		xpq_flush_queue();
829 	}
830 #endif
831 
832 	/* Now we can safely reclaim space taken by old tables */
833 
834 	__PRINTK(("unpin old PGD\n"));
835 	/* Unpin old PGD */
836 	xpq_queue_unpin_table(xpmap_ptom_masked(old_pgd - KERNBASE));
837 	/* Mark old tables RW */
838 	page = old_pgd;
839 	addr = (paddr_t) pde[pl2_pi(page)] & PG_FRAME;
840 	addr = xpmap_mtop(addr);
841 	pte = (pd_entry_t *) ((u_long)addr + KERNBASE);
842 	pte += pl1_pi(page);
843 	__PRINTK(("*pde %#" PRIxPADDR " addr %#" PRIxPADDR " pte %#lx\n",
844 	    pde[pl2_pi(page)], addr, (long)pte));
845 	while (page < old_pgd + (old_count * PAGE_SIZE) && page < map_end) {
846 		addr = xpmap_ptom(((u_long) pte) - KERNBASE);
847 		XENPRINTK(("addr %#" PRIxPADDR " pte %#lx "
848 		   "*pte %#" PRIxPADDR "\n",
849 		   addr, (long)pte, *pte));
850 		xpq_queue_pte_update(addr, *pte | PG_RW);
851 		page += PAGE_SIZE;
852 		/*
853 		 * Our ptes are contiguous
854 		 * so it's safe to just "++" here
855 		 */
856 		pte++;
857 	}
858 	xpq_flush_queue();
859 }
860 
861 
862 /*
863  * Bootstrap helper functions
864  */
865 
866 /*
867  * Mark a page readonly
868  * XXX: assuming vaddr = paddr + KERNBASE
869  */
870 
871 static void
872 xen_bt_set_readonly (vaddr_t page)
873 {
874 	pt_entry_t entry;
875 
876 	entry = xpmap_ptom_masked(page - KERNBASE);
877 	entry |= PG_k | PG_V;
878 
879 	HYPERVISOR_update_va_mapping (page, entry, UVMF_INVLPG);
880 }
881 
882 #ifdef __x86_64__
883 void
884 xen_set_user_pgd(paddr_t page)
885 {
886 	struct mmuext_op op;
887 	int s = splvm();
888 
889 	xpq_flush_queue();
890 	op.cmd = MMUEXT_NEW_USER_BASEPTR;
891 	op.arg1.mfn = xpmap_phys_to_machine_mapping[page >> PAGE_SHIFT];
892         if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
893 		panic("xen_set_user_pgd: failed to install new user page"
894 			" directory %#" PRIxPADDR, page);
895 	splx(s);
896 }
897 #endif /* __x86_64__ */
898