xref: /dflybsd-src/sys/platform/pc64/include/pmap.h (revision 9bbbdb7e3226d49e14a8b3e4974a01202d69aca2)
1c8fe38aeSMatthew Dillon /*
2c8fe38aeSMatthew Dillon  * Copyright (c) 1991 Regents of the University of California.
348ffc236SJordan Gordeev  * Copyright (c) 2003 Peter Wemm.
4c8fe38aeSMatthew Dillon  * Copyright (c) 2008 The DragonFly Project.
5c8fe38aeSMatthew Dillon  * All rights reserved.
6c8fe38aeSMatthew Dillon  *
7c8fe38aeSMatthew Dillon  * This code is derived from software contributed to Berkeley by
8c8fe38aeSMatthew Dillon  * the Systems Programming Group of the University of Utah Computer
9c8fe38aeSMatthew Dillon  * Science Department and William Jolitz of UUNET Technologies Inc.
10c8fe38aeSMatthew Dillon  *
11c8fe38aeSMatthew Dillon  * Redistribution and use in source and binary forms, with or without
12c8fe38aeSMatthew Dillon  * modification, are permitted provided that the following conditions
13c8fe38aeSMatthew Dillon  * are met:
14c8fe38aeSMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
15c8fe38aeSMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
16c8fe38aeSMatthew Dillon  * 2. Redistributions in binary form must reproduce the above copyright
17c8fe38aeSMatthew Dillon  *    notice, this list of conditions and the following disclaimer in the
18c8fe38aeSMatthew Dillon  *    documentation and/or other materials provided with the distribution.
192c64e990Szrj  * 3. Neither the name of the University nor the names of its contributors
20c8fe38aeSMatthew Dillon  *    may be used to endorse or promote products derived from this software
21c8fe38aeSMatthew Dillon  *    without specific prior written permission.
22c8fe38aeSMatthew Dillon  *
23c8fe38aeSMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24c8fe38aeSMatthew Dillon  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25c8fe38aeSMatthew Dillon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26c8fe38aeSMatthew Dillon  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27c8fe38aeSMatthew Dillon  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28c8fe38aeSMatthew Dillon  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29c8fe38aeSMatthew Dillon  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30c8fe38aeSMatthew Dillon  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31c8fe38aeSMatthew Dillon  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32c8fe38aeSMatthew Dillon  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33c8fe38aeSMatthew Dillon  * SUCH DAMAGE.
34c8fe38aeSMatthew Dillon  *
35c8fe38aeSMatthew Dillon  * Derived from hp300 version by Mike Hibler, this version by William
36c8fe38aeSMatthew Dillon  * Jolitz uses a recursive map [a pde points to the page directory] to
37c8fe38aeSMatthew Dillon  * map the page tables using the pagetables themselves. This is done to
38c8fe38aeSMatthew Dillon  * reduce the impact on kernel virtual memory for lots of sparse address
39c8fe38aeSMatthew Dillon  * space, and to reduce the cost of memory to each process.
40c8fe38aeSMatthew Dillon  *
41c8fe38aeSMatthew Dillon  * from: hp300: @(#)pmap.h	7.2 (Berkeley) 12/16/90
42c8fe38aeSMatthew Dillon  * from: @(#)pmap.h	7.4 (Berkeley) 5/12/91
43c8fe38aeSMatthew Dillon  * $FreeBSD: src/sys/i386/include/pmap.h,v 1.65.2.3 2001/10/03 07:15:37 peter Exp $
44c8fe38aeSMatthew Dillon  */
45c8fe38aeSMatthew Dillon 
46c8fe38aeSMatthew Dillon #ifndef _MACHINE_PMAP_H_
47c8fe38aeSMatthew Dillon #define	_MACHINE_PMAP_H_
48c8fe38aeSMatthew Dillon 
49c8fe38aeSMatthew Dillon #include <cpu/pmap.h>
50c8fe38aeSMatthew Dillon 
51c8fe38aeSMatthew Dillon /*
5248ffc236SJordan Gordeev  * Pte related macros.  This is complicated by having to deal with
5348ffc236SJordan Gordeev  * the sign extension of the 48th bit.
54c8fe38aeSMatthew Dillon  */
5548ffc236SJordan Gordeev #define KVADDR(l4, l3, l2, l1) ( \
5648ffc236SJordan Gordeev 	((unsigned long)-1 << 47) | \
5748ffc236SJordan Gordeev 	((unsigned long)(l4) << PML4SHIFT) | \
5848ffc236SJordan Gordeev 	((unsigned long)(l3) << PDPSHIFT) | \
5948ffc236SJordan Gordeev 	((unsigned long)(l2) << PDRSHIFT) | \
6048ffc236SJordan Gordeev 	((unsigned long)(l1) << PAGE_SHIFT))
61c8fe38aeSMatthew Dillon 
6248ffc236SJordan Gordeev #define UVADDR(l4, l3, l2, l1) ( \
6348ffc236SJordan Gordeev 	((unsigned long)(l4) << PML4SHIFT) | \
6448ffc236SJordan Gordeev 	((unsigned long)(l3) << PDPSHIFT) | \
6548ffc236SJordan Gordeev 	((unsigned long)(l2) << PDRSHIFT) | \
6648ffc236SJordan Gordeev 	((unsigned long)(l1) << PAGE_SHIFT))
6748ffc236SJordan Gordeev 
68791c6551SMatthew Dillon /*
698ff9866bSMatthew Dillon  * NKPML4E is the number of PML4E slots used for KVM.  Each slot represents
708ff9866bSMatthew Dillon  * 512GB of KVM.  A number between 1 and 128 may be specified.  To support
718ff9866bSMatthew Dillon  * the maximum machine configuration of 64TB we recommend around
728ff9866bSMatthew Dillon  * 16 slots (8TB of KVM).
738ff9866bSMatthew Dillon  *
74ad54aa11SMatthew Dillon  * NOTE: We no longer hardwire NKPT, it is calculated in create_pagetables()
75791c6551SMatthew Dillon  */
768ff9866bSMatthew Dillon #define NKPML4E		16
77791c6551SMatthew Dillon /* NKPDPE defined in vmparam.h */
7848ffc236SJordan Gordeev 
79701c977eSMatthew Dillon /*
80701c977eSMatthew Dillon  * NUPDPs	512 (256 user)		number of PDPs in user page table
81701c977eSMatthew Dillon  * NUPDs	512 * 512		number of PDs in user page table
82701c977eSMatthew Dillon  * NUPTs	512 * 512 * 512		number of PTs in user page table
83701c977eSMatthew Dillon  * NUPTEs	512 * 512 * 512 * 512	number of PTEs in user page table
84701c977eSMatthew Dillon  *
85701c977eSMatthew Dillon  * NUPDP_USER	number of PDPs reserved for userland
86701c977eSMatthew Dillon  * NUPTE_USER	number of PTEs reserved for userland (big number)
87701c977eSMatthew Dillon  */
88701c977eSMatthew Dillon #define NUPDP_USER	(NPML4EPG/2)
89701c977eSMatthew Dillon #define NUPDP_TOTAL	(NPML4EPG)
90701c977eSMatthew Dillon #define NUPD_TOTAL	(NPDPEPG * NUPDP_TOTAL)
91701c977eSMatthew Dillon #define NUPT_TOTAL	(NPDEPG * NUPD_TOTAL)
92701c977eSMatthew Dillon #define NUPTE_TOTAL	((vm_pindex_t)NPTEPG * NUPT_TOTAL)
93701c977eSMatthew Dillon #define NUPTE_USER	((vm_pindex_t)NPTEPG * NPDEPG * NPDPEPG * NUPDP_USER)
9448ffc236SJordan Gordeev 
9533fb3ba1SMatthew Dillon /*
966379cf29SAaron LI  * Number of 512G DMAP PML4 slots.  There are 512 slots of which 256 are
978ff9866bSMatthew Dillon  * used by the kernel.  Of those 256 we allow up to 128 to be used by the
986379cf29SAaron LI  * DMAP (for 64TB of RAM), leaving 128 for the kernel and other incidentals.
9933fb3ba1SMatthew Dillon  */
1008ff9866bSMatthew Dillon #define	NDMPML4E	128
101c8fe38aeSMatthew Dillon 
102c8fe38aeSMatthew Dillon /*
103da23a592SMatthew Dillon  * The *PML4I values control the layout of virtual memory.  Each PML4
104da23a592SMatthew Dillon  * entry represents 512G.
105c8fe38aeSMatthew Dillon  */
1066379cf29SAaron LI #define	PML4PML4I	(NPML4EPG/2)	/* Index of recursive PML4 mapping */
107c8fe38aeSMatthew Dillon 
1088ff9866bSMatthew Dillon #define	KPML4I		(NPML4EPG-NKPML4E) /* Start of KVM */
1096379cf29SAaron LI #define	DMPML4I		(KPML4I-NDMPML4E) /* Next N*512GB down for DMAP */
11048ffc236SJordan Gordeev 
111da23a592SMatthew Dillon /*
1128ff9866bSMatthew Dillon  * Make sure the kernel map and DMAP don't overflow the 256 PDP entries
1138ff9866bSMatthew Dillon  * we have available.  Minus one for the PML4PML4I.
1148ff9866bSMatthew Dillon  */
1158ff9866bSMatthew Dillon #if NKPML4E + NDMPML4E >= 255
1168ff9866bSMatthew Dillon #error "NKPML4E or NDMPML4E is too large"
1178ff9866bSMatthew Dillon #endif
1188ff9866bSMatthew Dillon 
1198ff9866bSMatthew Dillon /*
120da23a592SMatthew Dillon  * The location of KERNBASE in the last PD of the kernel's KVM (KPML4I)
121da23a592SMatthew Dillon  * space.  Each PD represents 1GB.  The kernel must be placed here
122da23a592SMatthew Dillon  * for the compile/link options to work properly so absolute 32-bit
123da23a592SMatthew Dillon  * addressing can be used to access stuff.
124da23a592SMatthew Dillon  */
12548ffc236SJordan Gordeev #define	KPDPI		(NPDPEPG-2)	/* kernbase at -2GB */
12648ffc236SJordan Gordeev 
127da23a592SMatthew Dillon /*
128da23a592SMatthew Dillon  * per-CPU data assume ~64K x SMP_MAXCPU, say up to 256 cpus
129da23a592SMatthew Dillon  * in the future or 16MB of space.  Each PD represents 2MB so
130da23a592SMatthew Dillon  * use NPDEPG-8 to place the per-CPU data.
131da23a592SMatthew Dillon  */
1328ff9866bSMatthew Dillon #define	MPPML4I		(KPML4I + NKPML4E - 1)
13348ffc236SJordan Gordeev #define	MPPDPI		KPDPI
134da23a592SMatthew Dillon #define	MPPTDI		(NPDEPG-8)
135c8fe38aeSMatthew Dillon 
136c8fe38aeSMatthew Dillon /*
137c8fe38aeSMatthew Dillon  * XXX doesn't really belong here I guess...
138c8fe38aeSMatthew Dillon  */
139c8fe38aeSMatthew Dillon #define ISA_HOLE_START	0xa0000
140c8fe38aeSMatthew Dillon #define ISA_HOLE_LENGTH	(0x100000-ISA_HOLE_START)
141c8fe38aeSMatthew Dillon 
142c8fe38aeSMatthew Dillon #ifndef LOCORE
143c8fe38aeSMatthew Dillon 
144c8fe38aeSMatthew Dillon #ifndef _SYS_TYPES_H_
145c8fe38aeSMatthew Dillon #include <sys/types.h>
146c8fe38aeSMatthew Dillon #endif
147da82a65aSzrj #ifndef _SYS_CPUMASK_H_
148da82a65aSzrj #include <sys/cpumask.h>
149da82a65aSzrj #endif
150c8fe38aeSMatthew Dillon #ifndef _SYS_QUEUE_H_
151c8fe38aeSMatthew Dillon #include <sys/queue.h>
152c8fe38aeSMatthew Dillon #endif
153701c977eSMatthew Dillon #ifndef _SYS_TREE_H_
154701c977eSMatthew Dillon #include <sys/tree.h>
155701c977eSMatthew Dillon #endif
156b12defdcSMatthew Dillon #ifndef _SYS_SPINLOCK_H_
157b12defdcSMatthew Dillon #include <sys/spinlock.h>
158b12defdcSMatthew Dillon #endif
159b12defdcSMatthew Dillon #ifndef _SYS_THREAD_H_
160b12defdcSMatthew Dillon #include <sys/thread.h>
161b12defdcSMatthew Dillon #endif
162c8fe38aeSMatthew Dillon #ifndef _MACHINE_TYPES_H_
163c8fe38aeSMatthew Dillon #include <machine/types.h>
164c8fe38aeSMatthew Dillon #endif
165c8fe38aeSMatthew Dillon #ifndef _MACHINE_PARAM_H_
166c8fe38aeSMatthew Dillon #include <machine/param.h>
167c8fe38aeSMatthew Dillon #endif
168c8fe38aeSMatthew Dillon 
169c8fe38aeSMatthew Dillon /*
170c8fe38aeSMatthew Dillon  * Address of current and alternate address space page table maps
171c8fe38aeSMatthew Dillon  * and directories.
172c8fe38aeSMatthew Dillon  */
173c8fe38aeSMatthew Dillon #ifdef _KERNEL
17448ffc236SJordan Gordeev #define	addr_PTmap	(KVADDR(PML4PML4I, 0, 0, 0))
17548ffc236SJordan Gordeev #define	addr_PDmap	(KVADDR(PML4PML4I, PML4PML4I, 0, 0))
17648ffc236SJordan Gordeev #define	addr_PDPmap	(KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, 0))
17748ffc236SJordan Gordeev #define	addr_PML4map	(KVADDR(PML4PML4I, PML4PML4I, PML4PML4I, PML4PML4I))
17848ffc236SJordan Gordeev #define	addr_PML4pml4e	(addr_PML4map + (PML4PML4I * sizeof(pml4_entry_t)))
17948ffc236SJordan Gordeev #define	PTmap		((pt_entry_t *)(addr_PTmap))
18048ffc236SJordan Gordeev #define	PDmap		((pd_entry_t *)(addr_PDmap))
18148ffc236SJordan Gordeev #define	PDPmap		((pd_entry_t *)(addr_PDPmap))
18248ffc236SJordan Gordeev #define	PML4map		((pd_entry_t *)(addr_PML4map))
18348ffc236SJordan Gordeev #define	PML4pml4e	((pd_entry_t *)(addr_PML4pml4e))
184c8fe38aeSMatthew Dillon 
185db2ec6f8SSascha Wildner extern uint64_t KPDPphys;	/* phys addr of kernel level 3 */
186db2ec6f8SSascha Wildner extern uint64_t KPML4phys;	/* physical address of kernel level 4 */
187c8fe38aeSMatthew Dillon #endif
188c8fe38aeSMatthew Dillon 
189c8fe38aeSMatthew Dillon /*
190c8fe38aeSMatthew Dillon  * Pmap stuff
191c8fe38aeSMatthew Dillon  */
192921c891eSMatthew Dillon struct pmap;
193c8fe38aeSMatthew Dillon struct pv_entry;
194c8fe38aeSMatthew Dillon struct vm_page;
195c8fe38aeSMatthew Dillon struct vm_object;
196c2fb025dSMatthew Dillon struct vmspace;
197c8fe38aeSMatthew Dillon 
198921c891eSMatthew Dillon /*
199567a6398SMatthew Dillon  * vm_page structure extension for pmap.  Track the number of pmap mappings
200567a6398SMatthew Dillon  * for a managed page.  Unmanaged pages do not use this field.
201921c891eSMatthew Dillon  */
202c8fe38aeSMatthew Dillon struct md_page {
203c2830aa6SMatthew Dillon 	long interlock_count;
204c2830aa6SMatthew Dillon 	long writeable_count_unused;
205c8fe38aeSMatthew Dillon };
206c8fe38aeSMatthew Dillon 
207c2830aa6SMatthew Dillon #define MD_PAGE_FREEABLE(m)	\
208c2830aa6SMatthew Dillon 	(((m)->flags & (PG_MAPPED | PG_WRITEABLE)) == 0)
209c2830aa6SMatthew Dillon 
210c8fe38aeSMatthew Dillon /*
211921c891eSMatthew Dillon  * vm_object's representing large mappings can contain embedded pmaps
212921c891eSMatthew Dillon  * to organize sharing at higher page table levels for PROT_READ and
213921c891eSMatthew Dillon  * PROT_READ|PROT_WRITE maps.
214921c891eSMatthew Dillon  */
215921c891eSMatthew Dillon struct md_object {
216567a6398SMatthew Dillon 	void *dummy_unused;
217921c891eSMatthew Dillon };
218921c891eSMatthew Dillon 
219921c891eSMatthew Dillon /*
220c8fe38aeSMatthew Dillon  * Each machine dependent implementation is expected to
221c8fe38aeSMatthew Dillon  * keep certain statistics.  They may do this anyway they
222c8fe38aeSMatthew Dillon  * so choose, but are expected to return the statistics
223c8fe38aeSMatthew Dillon  * in the following structure.
224c8fe38aeSMatthew Dillon  *
225c8fe38aeSMatthew Dillon  * NOTE: We try to match the size of the pc32 pmap with the vkernel pmap
226c8fe38aeSMatthew Dillon  * so the same utilities (like 'ps') can be used on both.
227c8fe38aeSMatthew Dillon  */
228c8fe38aeSMatthew Dillon struct pmap_statistics {
229c8fe38aeSMatthew Dillon 	long resident_count;    /* # of pages mapped (total) */
230c8fe38aeSMatthew Dillon 	long wired_count;       /* # of pages wired */
231c8fe38aeSMatthew Dillon };
232c8fe38aeSMatthew Dillon typedef struct pmap_statistics *pmap_statistics_t;
233c8fe38aeSMatthew Dillon 
234701c977eSMatthew Dillon struct pv_entry_rb_tree;
235701c977eSMatthew Dillon RB_PROTOTYPE2(pv_entry_rb_tree, pv_entry, pv_entry,
236701c977eSMatthew Dillon 	      pv_entry_compare, vm_pindex_t);
237701c977eSMatthew Dillon 
23824aa2e44SAaron LI /* Types of pmap */
23924aa2e44SAaron LI #define	REGULAR_PMAP		0	/* Regular x86 */
24024aa2e44SAaron LI #define	EPT_PMAP		1	/* Intel EPT */
241bb11cce6SAaron LI #define	NPT_PMAP		2	/* AMD NPT/RVI */
242a86ce0cdSMatthew Dillon 
243a86ce0cdSMatthew Dillon /* Bits indexes in pmap_bits */
24424aa2e44SAaron LI enum {
24524aa2e44SAaron LI 	TYPE_IDX = 0,		/* Pmap type */
24624aa2e44SAaron LI 	PG_V_IDX,		/* Valid */
24724aa2e44SAaron LI 	PG_RW_IDX,		/* Read/Write */
24824aa2e44SAaron LI 	PG_U_IDX,		/* User/Supervisor */
24924aa2e44SAaron LI 	PG_A_IDX,		/* Accessed */
25024aa2e44SAaron LI 	PG_M_IDX,		/* Modified/Dirty */
25124aa2e44SAaron LI 	PG_PS_IDX,		/* Page size */
25224aa2e44SAaron LI 	PG_G_IDX,		/* Global */
25324aa2e44SAaron LI 	PG_W_IDX,		/* Wired */
25424aa2e44SAaron LI 	PG_MANAGED_IDX,		/* Managed */
25524aa2e44SAaron LI 	PG_N_IDX,		/* Non-cacheable */
25624aa2e44SAaron LI 	PG_NX_IDX,		/* Non-execute */
25724aa2e44SAaron LI 	PG_BITS_SIZE,
25824aa2e44SAaron LI };
259a86ce0cdSMatthew Dillon 
260a86ce0cdSMatthew Dillon #define PROTECTION_CODES_SIZE	8
261a86ce0cdSMatthew Dillon #define PAT_INDEX_SIZE		8
262a86ce0cdSMatthew Dillon 
2630deb97d4SMatthew Dillon #define PM_PLACEMARKS		64		/* 16 @ 4 zones */
26476f1911eSMatthew Dillon #define PM_NOPLACEMARK		((vm_pindex_t)-1)
26576f1911eSMatthew Dillon #define PM_PLACEMARK_WAKEUP	((vm_pindex_t)0x8000000000000000LLU)
26676f1911eSMatthew Dillon 
267c8fe38aeSMatthew Dillon struct pmap {
26848ffc236SJordan Gordeev 	pml4_entry_t		*pm_pml4;	/* KVA of level 4 page table */
2694611d87fSMatthew Dillon 	pml4_entry_t		*pm_pml4_iso;	/* (isolated version) */
270701c977eSMatthew Dillon 	struct pv_entry		*pm_pmlpv;	/* PV entry for pml4 */
2714611d87fSMatthew Dillon 	struct pv_entry		*pm_pmlpv_iso;	/* (isolated version) */
272c8fe38aeSMatthew Dillon 	TAILQ_ENTRY(pmap)	pm_pmnode;	/* list of pmaps */
273701c977eSMatthew Dillon 	RB_HEAD(pv_entry_rb_tree, pv_entry) pm_pvroot;
274c8fe38aeSMatthew Dillon 	int			pm_count;	/* reference count */
275cc694a4aSMatthew Dillon 	cpulock_t		pm_active_lock; /* interlock */
276c8fe38aeSMatthew Dillon 	cpumask_t		pm_active;	/* active on cpus */
277921c891eSMatthew Dillon 	int			pm_flags;
278c5030460SMatthew Dillon 	uint32_t		pm_softhold;
279c8fe38aeSMatthew Dillon 	struct pmap_statistics	pm_stats;	/* pmap statistics */
280b12defdcSMatthew Dillon 	struct spinlock		pm_spin;
281bb1339f8SMatthew Dillon 	struct pv_entry		*pm_pvhint_pt;	/* pv_entry lookup hint */
282567a6398SMatthew Dillon 	struct pv_entry		*pm_pvhint_unused;
28376f1911eSMatthew Dillon 	vm_pindex_t		pm_placemarks[PM_PLACEMARKS];
284*39d0d2cbSAaron LI 	uint64_t		pm_invgen;	/* pmap generation id */
285a86ce0cdSMatthew Dillon 	uint64_t		pmap_bits[PG_BITS_SIZE];
2863e925ec2SMatthew Dillon 	uint64_t		protection_codes[PROTECTION_CODES_SIZE];
287c2ec3418SMatthew Dillon 	pt_entry_t		pmap_cache_bits_pte[PAT_INDEX_SIZE];
288c2ec3418SMatthew Dillon 	pt_entry_t		pmap_cache_bits_pde[PAT_INDEX_SIZE];
289c2ec3418SMatthew Dillon 	pt_entry_t		pmap_cache_mask_pte;
290c2ec3418SMatthew Dillon 	pt_entry_t		pmap_cache_mask_pde;
291a86ce0cdSMatthew Dillon 	int (*copyinstr)(const void *, void *, size_t, size_t *);
292a86ce0cdSMatthew Dillon 	int (*copyin)(const void *, void *, size_t);
293a86ce0cdSMatthew Dillon 	int (*copyout)(const void *, void *, size_t);
2945947157eSMatthew Dillon 	int (*fubyte)(const uint8_t *);		/* returns int for -1 err */
2955947157eSMatthew Dillon 	int (*subyte)(uint8_t *, uint8_t);
2965947157eSMatthew Dillon 	int32_t (*fuword32)(const uint32_t *);
2975947157eSMatthew Dillon 	int64_t (*fuword64)(const uint64_t *);
2985947157eSMatthew Dillon 	int (*suword64)(uint64_t *, uint64_t);
2995947157eSMatthew Dillon 	int (*suword32)(uint32_t *, int);
3007f4bfbe7SMatthew Dillon 	uint32_t (*swapu32)(volatile uint32_t *, uint32_t v);
3017f4bfbe7SMatthew Dillon 	uint64_t (*swapu64)(volatile uint64_t *, uint64_t v);
3026481baf4SMatthew Dillon 	uint32_t (*fuwordadd32)(volatile uint32_t *, uint32_t v);
3036481baf4SMatthew Dillon 	uint64_t (*fuwordadd64)(volatile uint64_t *, uint64_t v);
304c8fe38aeSMatthew Dillon };
305c8fe38aeSMatthew Dillon 
306921c891eSMatthew Dillon #define PMAP_FLAG_SIMPLE	0x00000001
3076379cf29SAaron LI #define PMAP_EMULATE_AD_BITS	0x00000002	/* emulate A/D bits for EPT */
3086379cf29SAaron LI #define PMAP_HVM		0x00000004	/* hardware virtual machine */
309e1ea8b24SMatthew Dillon #define PMAP_SEGSHARED		0x00000008	/* segment shared opt */
310e3c330f0SMatthew Dillon #define PMAP_MULTI		0x00000010	/* multi-threaded use */
311921c891eSMatthew Dillon 
3126379cf29SAaron LI #define pmap_resident_count(pmap)	\
3136379cf29SAaron LI 	((pmap)->pm_stats.resident_count)
3146379cf29SAaron LI #define pmap_resident_tlnw_count(pmap)	\
3156379cf29SAaron LI 	((pmap)->pm_stats.resident_count - (pmap)->pm_stats.wired_count)
316c8fe38aeSMatthew Dillon 
317c8fe38aeSMatthew Dillon typedef struct pmap	*pmap_t;
318c8fe38aeSMatthew Dillon 
319c8fe38aeSMatthew Dillon #ifdef _KERNEL
320c713db65SAaron LI extern struct pmap	*kernel_pmap;
321c8fe38aeSMatthew Dillon #endif
322c8fe38aeSMatthew Dillon 
323c8fe38aeSMatthew Dillon /*
324567a6398SMatthew Dillon  * The pv_entry structure is used to track higher levels of the page table.
325567a6398SMatthew Dillon  * The leaf PTE is no longer tracked with this structure.
326c8fe38aeSMatthew Dillon  */
327c8fe38aeSMatthew Dillon typedef struct pv_entry {
328c8fe38aeSMatthew Dillon 	pmap_t		pv_pmap;	/* pmap where mapping lies */
329701c977eSMatthew Dillon 	vm_pindex_t	pv_pindex;	/* PTE, PT, PD, PDP, or PML4 */
330701c977eSMatthew Dillon 	RB_ENTRY(pv_entry) pv_entry;
331701c977eSMatthew Dillon 	struct vm_page	*pv_m;		/* page being mapped */
332701c977eSMatthew Dillon 	u_int		pv_hold;	/* interlock action */
333921c891eSMatthew Dillon 	u_int		pv_flags;
334701c977eSMatthew Dillon #ifdef PMAP_DEBUG
335701c977eSMatthew Dillon 	const char	*pv_func;
336701c977eSMatthew Dillon 	int		pv_line;
337e989b548SMatthew Dillon 	const char	*pv_func_lastfree;
338e989b548SMatthew Dillon 	int		pv_line_lastfree;
339701c977eSMatthew Dillon #endif
340c8fe38aeSMatthew Dillon } *pv_entry_t;
341c8fe38aeSMatthew Dillon 
342701c977eSMatthew Dillon #define PV_HOLD_LOCKED		0x80000000U
343701c977eSMatthew Dillon #define PV_HOLD_WAITING		0x40000000U
3448e2efb11SMatthew Dillon #define PV_HOLD_UNUSED2000	0x20000000U
345701c977eSMatthew Dillon #define PV_HOLD_MASK		0x1FFFFFFFU
346701c977eSMatthew Dillon 
347e05899ceSMatthew Dillon #define PV_FLAG_UNUSED01	0x00000001U
3480600465eSMatthew Dillon #define PV_FLAG_UNUSED02	0x00000002U
349921c891eSMatthew Dillon 
350c8fe38aeSMatthew Dillon #ifdef	_KERNEL
351c8fe38aeSMatthew Dillon 
352c8fe38aeSMatthew Dillon extern caddr_t CADDR1;
353c8fe38aeSMatthew Dillon extern pt_entry_t *CMAP1;
354c8fe38aeSMatthew Dillon extern vm_paddr_t avail_end;
355c8fe38aeSMatthew Dillon extern vm_paddr_t avail_start;
356c8fe38aeSMatthew Dillon extern vm_offset_t clean_eva;
357c8fe38aeSMatthew Dillon extern vm_offset_t clean_sva;
358c8fe38aeSMatthew Dillon extern char *ptvmmap;		/* poor name! */
359c8fe38aeSMatthew Dillon 
360381fa6daSSascha Wildner #ifndef __VM_PAGE_T_DEFINED__
361381fa6daSSascha Wildner #define __VM_PAGE_T_DEFINED__
362ec1a31ddSFrançois Tigeot typedef struct vm_page *vm_page_t;
363381fa6daSSascha Wildner #endif
364381fa6daSSascha Wildner #ifndef __VM_MEMATTR_T_DEFINED__
365381fa6daSSascha Wildner #define __VM_MEMATTR_T_DEFINED__
366ec1a31ddSFrançois Tigeot typedef char vm_memattr_t;
367381fa6daSSascha Wildner #endif
368ec1a31ddSFrançois Tigeot 
369b12defdcSMatthew Dillon void	pmap_release(struct pmap *pmap);
370c2fb025dSMatthew Dillon void	pmap_interlock_wait (struct vmspace *);
37148ffc236SJordan Gordeev void	pmap_bootstrap (vm_paddr_t *);
372ac9e78e3SFrançois Tigeot void	*pmap_mapbios(vm_paddr_t, vm_size_t);
373c8fe38aeSMatthew Dillon void	*pmap_mapdev (vm_paddr_t, vm_size_t);
374b524ca76SMatthew Dillon void	*pmap_mapdev_attr(vm_paddr_t, vm_size_t, int);
375057877acSJordan Gordeev void	*pmap_mapdev_uncacheable(vm_paddr_t, vm_size_t);
376ec1a31ddSFrançois Tigeot void	pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma);
377c8fe38aeSMatthew Dillon void	pmap_unmapdev (vm_offset_t, vm_size_t);
378c8fe38aeSMatthew Dillon struct vm_page *pmap_use_pt (pmap_t, vm_offset_t);
379c8fe38aeSMatthew Dillon void	pmap_set_opt (void);
380b524ca76SMatthew Dillon void	pmap_init_pat(void);
381c174861dSFrançois Tigeot void	pmap_invalidate_cache_pages(vm_page_t *pages, int count);
382300a6373SJohannes Hofmann void	pmap_invalidate_cache_range(vm_offset_t sva, vm_offset_t eva);
383c8fe38aeSMatthew Dillon 
384a86ce0cdSMatthew Dillon static __inline int
pmap_emulate_ad_bits(pmap_t pmap)385a86ce0cdSMatthew Dillon pmap_emulate_ad_bits(pmap_t pmap) {
386a86ce0cdSMatthew Dillon 	return pmap->pm_flags & PMAP_EMULATE_AD_BITS;
387a86ce0cdSMatthew Dillon }
388a86ce0cdSMatthew Dillon 
389c8fe38aeSMatthew Dillon #endif /* _KERNEL */
390c8fe38aeSMatthew Dillon 
391c8fe38aeSMatthew Dillon #endif /* !LOCORE */
392c8fe38aeSMatthew Dillon 
393c8fe38aeSMatthew Dillon #endif /* !_MACHINE_PMAP_H_ */
394