xref: /netbsd-src/sys/arch/amd64/include/pmap.h (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /*	$NetBSD: pmap.h,v 1.8 2005/12/24 20:06:47 perry Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgment:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Copyright (c) 2001 Wasabi Systems, Inc.
37  * All rights reserved.
38  *
39  * Written by Frank van der Linden for Wasabi Systems, Inc.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *      This product includes software developed for the NetBSD Project by
52  *      Wasabi Systems, Inc.
53  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54  *    or promote products derived from this software without specific prior
55  *    written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
61  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67  * POSSIBILITY OF SUCH DAMAGE.
68  */
69 
70 /*
71  * pmap.h: see pmap.c for the history of this pmap module.
72  */
73 
74 #ifndef	_AMD64_PMAP_H_
75 #define	_AMD64_PMAP_H_
76 
77 #ifndef _LOCORE
78 #if defined(_KERNEL_OPT)
79 #include "opt_largepages.h"
80 #endif
81 
82 #include <machine/cpufunc.h>
83 #include <machine/pte.h>
84 #include <machine/segments.h>
85 #include <uvm/uvm_object.h>
86 #endif
87 
88 /*
89  * The x86_64 pmap module closely resembles the i386 one. It uses
90  * the same recursive entry scheme, and the same alternate area
91  * trick for accessing non-current pmaps. See the i386 pmap.h
92  * for a description. The obvious difference is that 3 extra
93  * levels of page table need to be dealt with. The level 1 page
94  * table pages are at:
95  *
96  * l1: 0x00007f8000000000 - 0x00007fffffffffff     (39 bits, needs PML4 entry)
97  *
98  * The alternate space is at:
99  *
100  * l1: 0xffffff8000000000 - 0xffffffffffffffff     (39 bits, needs PML4 entry)
101  *
102  * The rest is kept as physical pages in 3 UVM objects, and is
103  * temporarily mapped for virtual access when needed.
104  *
105  * Note that address space is signed, so the layout for 48 bits is:
106  *
107  *  +---------------------------------+ 0xffffffffffffffff
108  *  |                                 |
109  *  |    alt.L1 table (PTE pages)     |
110  *  |                                 |
111  *  +---------------------------------+ 0xffffff8000000000
112  *  ~                                 ~
113  *  |                                 |
114  *  |         Kernel Space            |
115  *  |                                 |
116  *  |                                 |
117  *  +---------------------------------+ 0xffff800000000000 = 0x0000800000000000
118  *  |                                 |
119  *  |    alt.L1 table (PTE pages)     |
120  *  |                                 |
121  *  +---------------------------------+ 0x00007f8000000000
122  *  ~                                 ~
123  *  |                                 |
124  *  |         User Space              |
125  *  |                                 |
126  *  |                                 |
127  *  +---------------------------------+ 0x0000000000000000
128  *
129  * In other words, there is a 'VA hole' at 0x0000800000000000 -
130  * 0xffff800000000000 which will trap, just as on, for example,
131  * sparcv9.
132  *
133  * The unused space can be used if needed, but it adds a little more
134  * complexity to the calculations.
135  */
136 
137 /*
138  * The first generation of Hammer processors can use 48 bits of
139  * virtual memory, and 40 bits of physical memory. This will be
140  * more for later generations. These defines can be changed to
141  * variable names containing the # of bits, extracted from an
142  * extended cpuid instruction (variables are harder to use during
143  * bootstrap, though)
144  */
145 #define VIRT_BITS	48
146 #define PHYS_BITS	40
147 
148 /*
149  * Mask to get rid of the sign-extended part of addresses.
150  */
151 #define VA_SIGN_MASK		0xffff000000000000
152 #define VA_SIGN_NEG(va)		((va) | VA_SIGN_MASK)
153 /*
154  * XXXfvdl this one's not right.
155  */
156 #define VA_SIGN_POS(va)		((va) & ~VA_SIGN_MASK)
157 
158 #define L4_SLOT_PTE		255
159 #define L4_SLOT_KERN		256
160 #define L4_SLOT_KERNBASE	511
161 #define L4_SLOT_APTE		510
162 
163 #define PDIR_SLOT_KERN	L4_SLOT_KERN
164 #define PDIR_SLOT_PTE	L4_SLOT_PTE
165 #define PDIR_SLOT_APTE	L4_SLOT_APTE
166 
167 /*
168  * the following defines give the virtual addresses of various MMU
169  * data structures:
170  * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
171  * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
172  * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
173  *
174  */
175 
176 #define PTE_BASE  ((pt_entry_t *) (L4_SLOT_PTE * NBPD_L4))
177 #define APTE_BASE ((pt_entry_t *) (VA_SIGN_NEG((L4_SLOT_APTE * NBPD_L4))))
178 
179 #define L1_BASE		PTE_BASE
180 #define AL1_BASE	APTE_BASE
181 
182 #define L2_BASE ((pd_entry_t *)((char *)L1_BASE + L4_SLOT_PTE * NBPD_L3))
183 #define L3_BASE ((pd_entry_t *)((char *)L2_BASE + L4_SLOT_PTE * NBPD_L2))
184 #define L4_BASE ((pd_entry_t *)((char *)L3_BASE + L4_SLOT_PTE * NBPD_L1))
185 
186 #define AL2_BASE ((pd_entry_t *)((char *)AL1_BASE + L4_SLOT_PTE * NBPD_L3))
187 #define AL3_BASE ((pd_entry_t *)((char *)AL2_BASE + L4_SLOT_PTE * NBPD_L2))
188 #define AL4_BASE ((pd_entry_t *)((char *)AL3_BASE + L4_SLOT_PTE * NBPD_L1))
189 
190 #define PDP_PDE		(L4_BASE + PDIR_SLOT_PTE)
191 #define APDP_PDE	(L4_BASE + PDIR_SLOT_APTE)
192 
193 #define PDP_BASE	L4_BASE
194 #define APDP_BASE	AL4_BASE
195 
196 #define NKL4_MAX_ENTRIES	(unsigned long)1
197 #define NKL3_MAX_ENTRIES	(unsigned long)(NKL4_MAX_ENTRIES * 512)
198 #define NKL2_MAX_ENTRIES	(unsigned long)(NKL3_MAX_ENTRIES * 512)
199 #define NKL1_MAX_ENTRIES	(unsigned long)(NKL2_MAX_ENTRIES * 512)
200 
201 #define NKL4_KIMG_ENTRIES	1
202 #define NKL3_KIMG_ENTRIES	1
203 #define NKL2_KIMG_ENTRIES	8
204 
205 /*
206  * Since kva space is below the kernel in its entirety, we start off
207  * with zero entries on each level.
208  */
209 #define NKL4_START_ENTRIES	0
210 #define NKL3_START_ENTRIES	0
211 #define NKL2_START_ENTRIES	0
212 #define NKL1_START_ENTRIES	0	/* XXX */
213 
214 #define NTOPLEVEL_PDES		(PAGE_SIZE / (sizeof (pd_entry_t)))
215 
216 #define KERNSPACE		(NKL4_ENTRIES * NBPD_L4)
217 
218 #define NPDPG			(PAGE_SIZE / sizeof (pd_entry_t))
219 
220 #define ptei(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
221 
222 /*
223  * pl*_pi: index in the ptp page for a pde mapping a VA.
224  * (pl*_i below is the index in the virtual array of all pdes per level)
225  */
226 #define pl1_pi(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
227 #define pl2_pi(VA)	(((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
228 #define pl3_pi(VA)	(((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
229 #define pl4_pi(VA)	(((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
230 
231 /*
232  * pl*_i: generate index into pde/pte arrays in virtual space
233  */
234 #define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
235 #define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
236 #define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
237 #define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
238 #define pl_i(va, lvl) \
239         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
240 
241 #define PTP_MASK_INITIALIZER	{ L1_FRAME, L2_FRAME, L3_FRAME, L4_FRAME }
242 #define PTP_SHIFT_INITIALIZER	{ L1_SHIFT, L2_SHIFT, L3_SHIFT, L4_SHIFT }
243 #define NKPTP_INITIALIZER	{ NKL1_START_ENTRIES, NKL2_START_ENTRIES, \
244 				  NKL3_START_ENTRIES, NKL4_START_ENTRIES }
245 #define NKPTPMAX_INITIALIZER	{ NKL1_MAX_ENTRIES, NKL2_MAX_ENTRIES, \
246 				  NKL3_MAX_ENTRIES, NKL4_MAX_ENTRIES }
247 #define NBPD_INITIALIZER	{ NBPD_L1, NBPD_L2, NBPD_L3, NBPD_L4 }
248 #define PDES_INITIALIZER	{ L2_BASE, L3_BASE, L4_BASE }
249 #define APDES_INITIALIZER	{ AL2_BASE, AL3_BASE, AL4_BASE }
250 
251 /*
252  * PTP macros:
253  *   a PTP's index is the PD index of the PDE that points to it
254  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
255  *   a PTP's VA is the first VA mapped by that PTP
256  *
257  * note that PAGE_SIZE == number of bytes in a PTP (4096 bytes == 1024 entries)
258  *           NBPD == number of bytes a PTP can map (4MB)
259  */
260 
261 #define ptp_va2o(va, lvl)	(pl_i(va, (lvl)+1) * PAGE_SIZE)
262 
263 #define PTP_LEVELS	4
264 
265 /*
266  * PG_AVAIL usage: we make use of the ignored bits of the PTE
267  */
268 
269 #define PG_W		PG_AVAIL1	/* "wired" mapping */
270 #define PG_PVLIST	PG_AVAIL2	/* mapping has entry on pvlist */
271 /* PG_AVAIL3 not used */
272 
273 /*
274  * Number of PTE's per cache line.  8 byte pte, 64-byte cache line
275  * Used to avoid false sharing of cache lines.
276  */
277 #define NPTECL		8
278 
279 
280 #if defined(_KERNEL) && !defined(_LOCORE)
281 /*
282  * pmap data structures: see pmap.c for details of locking.
283  */
284 
285 struct pmap;
286 typedef struct pmap *pmap_t;
287 
288 /*
289  * we maintain a list of all non-kernel pmaps
290  */
291 
292 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
293 
294 /*
295  * the pmap structure
296  *
297  * note that the pm_obj contains the simple_lock, the reference count,
298  * page list, and number of PTPs within the pmap.
299  *
300  * pm_lock is the same as the spinlock for vm object 0. Changes to
301  * the other objects may only be made if that lock has been taken
302  * (the other object locks are only used when uvm_pagealloc is called)
303  */
304 
305 struct pmap {
306 	struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
307 #define	pm_lock	pm_obj[0].vmobjlock
308 #define pm_obj_l1 pm_obj[0]
309 #define pm_obj_l2 pm_obj[1]
310 #define pm_obj_l3 pm_obj[2]
311 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
312 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
313 	paddr_t pm_pdirpa;		/* PA of PD (read-only after create) */
314 	struct vm_page *pm_ptphint[PTP_LEVELS-1];
315 					/* pointer to a PTP in our pmap */
316 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
317 
318 	int pm_flags;			/* see below */
319 
320 	union descriptor *pm_ldt;	/* user-set LDT */
321 	int pm_ldt_len;			/* number of LDT entries */
322 	int pm_ldt_sel;			/* LDT selector */
323 	u_int32_t pm_cpus;		/* mask of CPUs using pmap */
324 };
325 
326 /* pm_flags */
327 #define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
328 
329 /*
330  * for each managed physical page we maintain a list of <PMAP,VA>'s
331  * which it is mapped at.  the list is headed by a pv_head structure.
332  * there is one pv_head per managed phys page (allocated at boot time).
333  * the pv_head structure points to a list of pv_entry structures (each
334  * describes one mapping).
335  */
336 
337 struct pv_entry {                       /* locked by its list's pvh_lock */
338         SPLAY_ENTRY(pv_entry) pv_node;  /* splay-tree node */
339         struct pmap *pv_pmap;           /* the pmap */
340         vaddr_t pv_va;                  /* the virtual address */
341         struct vm_page *pv_ptp;         /* the vm_page of the PTP */
342 };
343 
344 /*
345  * pv_entrys are dynamically allocated in chunks from a single page.
346  * we keep track of how many pv_entrys are in use for each page and
347  * we can free pv_entry pages if needed.  there is one lock for the
348  * entire allocation system.
349  */
350 
351 struct pv_page_info {
352 	TAILQ_ENTRY(pv_page) pvpi_list;
353 	struct pv_entry *pvpi_pvfree;
354 	int pvpi_nfree;
355 };
356 
357 /*
358  * number of pv_entry's in a pv_page
359  * (note: won't work on systems where NPBG isn't a constant)
360  */
361 
362 #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
363 			sizeof(struct pv_entry))
364 
365 /*
366  * a pv_page: where pv_entrys are allocated from
367  */
368 
369 struct pv_page {
370 	struct pv_page_info pvinfo;
371 	struct pv_entry pvents[PVE_PER_PVPAGE];
372 };
373 
374 /*
375  * pmap_remove_record: a record of VAs that have been unmapped, used to
376  * flush TLB.  if we have more than PMAP_RR_MAX then we stop recording.
377  */
378 
379 #define PMAP_RR_MAX	16	/* max of 16 pages (64K) */
380 
381 struct pmap_remove_record {
382 	int prr_npages;
383 	vaddr_t prr_vas[PMAP_RR_MAX];
384 };
385 
386 /*
387  * global kernel variables
388  */
389 
390 /* PTDpaddr: is the physical address of the kernel's PDP */
391 extern u_long PTDpaddr;
392 
393 extern struct pmap kernel_pmap_store;	/* kernel pmap */
394 extern int pmap_pg_g;			/* do we support PG_G? */
395 
396 extern paddr_t ptp_masks[];
397 extern int ptp_shifts[];
398 extern long nkptp[], nbpd[], nkptpmax[];
399 
400 /*
401  * macros
402  */
403 
404 #define	pmap_kernel()			(&kernel_pmap_store)
405 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
406 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
407 #define	pmap_update(pmap)		/* nothing (yet) */
408 
409 #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
410 #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
411 #define pmap_copy(DP,SP,D,L,S)
412 #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
413 #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
414 #define pmap_move(DP,SP,D,L,S)
415 #define pmap_phys_address(ppn)		ptob(ppn)
416 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
417 
418 
419 /*
420  * prototypes
421  */
422 
423 void		pmap_activate __P((struct lwp *));
424 void		pmap_bootstrap __P((vaddr_t));
425 boolean_t	pmap_clear_attrs __P((struct vm_page *, unsigned));
426 void		pmap_deactivate __P((struct lwp *));
427 static void	pmap_page_protect __P((struct vm_page *, vm_prot_t));
428 void		pmap_page_remove  __P((struct vm_page *));
429 static void	pmap_protect __P((struct pmap *, vaddr_t,
430 				vaddr_t, vm_prot_t));
431 void		pmap_remove __P((struct pmap *, vaddr_t, vaddr_t));
432 boolean_t	pmap_test_attrs __P((struct vm_page *, unsigned));
433 static void	pmap_update_pg __P((vaddr_t));
434 static void	pmap_update_2pg __P((vaddr_t,vaddr_t));
435 void		pmap_write_protect __P((struct pmap *, vaddr_t,
436 				vaddr_t, vm_prot_t));
437 void		pmap_changeprot_local(vaddr_t, vm_prot_t);
438 
439 vaddr_t reserve_dumppages __P((vaddr_t)); /* XXX: not a pmap fn */
440 
441 void	pmap_tlb_shootdown __P((pmap_t, vaddr_t, pt_entry_t, int32_t *));
442 void	pmap_tlb_shootnow __P((int32_t));
443 void	pmap_do_tlb_shootdown __P((struct cpu_info *));
444 void	pmap_prealloc_lowmem_ptps __P((void));
445 
446 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
447 
448 /*
449  * Do idle page zero'ing uncached to avoid polluting the cache.
450  */
451 boolean_t	pmap_pageidlezero __P((paddr_t));
452 #define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
453 
454 /*
455  * inline functions
456  */
457 
458 static inline void
459 pmap_remove_all(struct pmap *pmap)
460 {
461 	/* Nothing. */
462 }
463 
464 /*
465  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
466  *	if hardware doesn't support one-page flushing)
467  */
468 
469 inline static void
470 pmap_update_pg(va)
471 	vaddr_t va;
472 {
473 	invlpg(va);
474 }
475 
476 /*
477  * pmap_update_2pg: flush two pages from the TLB
478  */
479 
480 inline static void
481 pmap_update_2pg(va, vb)
482 	vaddr_t va, vb;
483 {
484 	invlpg(va);
485 	invlpg(vb);
486 }
487 
488 /*
489  * pmap_page_protect: change the protection of all recorded mappings
490  *	of a managed page
491  *
492  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
493  * => we only have to worry about making the page more protected.
494  *	unprotecting a page is done on-demand at fault time.
495  */
496 
497 inline static void
498 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
499 {
500 	if ((prot & VM_PROT_WRITE) == 0) {
501 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
502 			(void) pmap_clear_attrs(pg, PG_RW);
503 		} else {
504 			pmap_page_remove(pg);
505 		}
506 	}
507 }
508 
509 /*
510  * pmap_protect: change the protection of pages in a pmap
511  *
512  * => this function is a frontend for pmap_remove/pmap_write_protect
513  * => we only have to worry about making the page more protected.
514  *	unprotecting a page is done on-demand at fault time.
515  */
516 
517 inline static void
518 pmap_protect(pmap, sva, eva, prot)
519 	struct pmap *pmap;
520 	vaddr_t sva, eva;
521 	vm_prot_t prot;
522 {
523 	if ((prot & VM_PROT_WRITE) == 0) {
524 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
525 			pmap_write_protect(pmap, sva, eva, prot);
526 		} else {
527 			pmap_remove(pmap, sva, eva);
528 		}
529 	}
530 }
531 
532 /*
533  * various address inlines
534  *
535  *  vtopte: return a pointer to the PTE mapping a VA, works only for
536  *  user and PT addresses
537  *
538  *  kvtopte: return a pointer to the PTE mapping a kernel VA
539  */
540 
541 #include <lib/libkern/libkern.h>
542 
543 static inline pt_entry_t *
544 vtopte(vaddr_t va)
545 {
546 
547 	KASSERT(va < (L4_SLOT_KERN * NBPD_L4));
548 
549 	return (PTE_BASE + pl1_i(va));
550 }
551 
552 static inline pt_entry_t *
553 kvtopte(vaddr_t va)
554 {
555 
556 	KASSERT(va >= (L4_SLOT_KERN * NBPD_L4));
557 
558 #ifdef LARGEPAGES
559 	{
560 		pd_entry_t *pde;
561 
562 		pde = L2_BASE + pl2_i(va);
563 		if (*pde & PG_PS)
564 			return ((pt_entry_t *)pde);
565 	}
566 #endif
567 
568 	return (PTE_BASE + pl1_i(va));
569 }
570 
571 #define pmap_pte_set(p, n)		x86_atomic_testset_u64(p, n)
572 #define pmap_pte_clearbits(p, b)	x86_atomic_clearbits_u64(p, b)
573 #define pmap_cpu_has_pg_n()		(1)
574 #define pmap_cpu_has_invlpg		(1)
575 
576 paddr_t vtophys __P((vaddr_t));
577 vaddr_t	pmap_map __P((vaddr_t, paddr_t, paddr_t, vm_prot_t));
578 
579 #if 0   /* XXXfvdl was USER_LDT, need to check if that can be supported */
580 void	pmap_ldt_cleanup __P((struct lwp *));
581 #define	PMAP_FORK
582 #endif /* USER_LDT */
583 
584 /*
585  * Hooks for the pool allocator.
586  */
587 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
588 
589 #endif /* _KERNEL && !_LOCORE */
590 #endif	/* _AMD64_PMAP_H_ */
591