xref: /openbsd-src/sys/arch/i386/include/pmap.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: pmap.h,v 1.82 2016/03/15 03:17:51 guenther Exp $	*/
2 /*	$NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 
29 /*
30  * pmap.h: see pmap.c for the history of this pmap module.
31  */
32 
33 #ifndef	_MACHINE_PMAP_H_
34 #define	_MACHINE_PMAP_H_
35 
36 #ifdef _KERNEL
37 #include <machine/cpufunc.h>
38 #include <machine/segments.h>
39 #endif
40 #include <sys/mutex.h>
41 #include <uvm/uvm_object.h>
42 #include <machine/pte.h>
43 
44 #define	PDSLOT_PTE	((KERNBASE/NBPD)-2) /* 830: for recursive PDP map */
45 #define	PDSLOT_KERN	(KERNBASE/NBPD) /* 832: start of kernel space */
46 #define	PDSLOT_APTE	((unsigned)1022) /* 1022: alternative recursive slot */
47 
48 /*
49  * The following define determines how many PTPs should be set up for the
50  * kernel by locore.s at boot time.  This should be large enough to
51  * get the VM system running.  Once the VM system is running, the
52  * pmap module can add more PTPs to the kernel area on demand.
53  */
54 
55 #ifndef	NKPTP
56 #define	NKPTP		8	/* 16/32MB to start */
57 #endif
58 #define	NKPTP_MIN	4	/* smallest value we allow */
59 
60 /*
61  * PG_AVAIL usage: we make use of the ignored bits of the PTE
62  */
63 
64 #define PG_W		PG_AVAIL1	/* "wired" mapping */
65 #define PG_PVLIST	PG_AVAIL2	/* mapping has entry on pvlist */
66 #define	PG_X		PG_AVAIL3	/* executable mapping */
67 
68 #define PTP0_PA             (PAGE_SIZE * 3)
69 
70 #ifdef _KERNEL
71 /*
72  * pmap data structures: see pmap.c for details of locking.
73  */
74 
75 struct pmap;
76 typedef struct pmap *pmap_t;
77 
78 /*
79  * We maintain a list of all non-kernel pmaps.
80  */
81 
82 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
83 
84 /*
85  * The pmap structure
86  *
87  * Note that the pm_obj contains the reference count,
88  * page list, and number of PTPs within the pmap.
89  */
90 
91 struct pmap {
92 	uint64_t pm_pdidx[4];		/* PDIEs for PAE mode */
93 
94 	struct mutex pm_mtx;
95 	struct mutex pm_apte_mtx;
96 
97 	paddr_t pm_pdirpa;		/* PA of PD (read-only after create) */
98 	vaddr_t pm_pdir;		/* VA of PD (lck by object lock) */
99 	int	pm_pdirsize;		/* PD size (4k vs 16k on PAE) */
100 	struct uvm_object pm_obj;	/* object (lck by object lock) */
101 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
102 	struct vm_page *pm_ptphint;	/* pointer to a PTP in our pmap */
103 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
104 
105 	vaddr_t pm_hiexec;		/* highest executable mapping */
106 	int pm_flags;			/* see below */
107 
108 	struct segment_descriptor pm_codeseg;	/* cs descriptor for process */
109 };
110 
111 /*
112  * For each managed physical page we maintain a list of <PMAP,VA>s
113  * which it is mapped at.  The list is headed by a pv_head structure.
114  * there is one pv_head per managed phys page (allocated at boot time).
115  * The pv_head structure points to a list of pv_entry structures (each
116  * describes one mapping).
117  */
118 
119 struct pv_entry {			/* locked by its list's pvh_lock */
120 	struct pv_entry *pv_next;	/* next entry */
121 	struct pmap *pv_pmap;		/* the pmap */
122 	vaddr_t pv_va;			/* the virtual address */
123 	struct vm_page *pv_ptp;		/* the vm_page of the PTP */
124 };
125 /*
126  * MD flags to pmap_enter:
127  */
128 
129 /* to get just the pa from params to pmap_enter */
130 #define PMAP_PA_MASK	~((paddr_t)PAGE_MASK)
131 #define	PMAP_NOCACHE	0x1		/* map uncached */
132 #define	PMAP_WC		0x2		/* map write combining. */
133 
134 /*
135  * We keep mod/ref flags in struct vm_page->pg_flags.
136  */
137 #define	PG_PMAP_MOD	PG_PMAP0
138 #define	PG_PMAP_REF	PG_PMAP1
139 #define	PG_PMAP_WC	PG_PMAP2
140 
141 /*
142  * pv_entrys are dynamically allocated in chunks from a single page.
143  * we keep track of how many pv_entrys are in use for each page and
144  * we can free pv_entry pages if needed.  There is one lock for the
145  * entire allocation system.
146  */
147 
148 struct pv_page_info {
149 	TAILQ_ENTRY(pv_page) pvpi_list;
150 	struct pv_entry *pvpi_pvfree;
151 	int pvpi_nfree;
152 };
153 
154 /*
155  * number of pv_entries in a pv_page
156  */
157 
158 #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
159 			sizeof(struct pv_entry))
160 
161 /*
162  * a pv_page: where pv_entrys are allocated from
163  */
164 
165 struct pv_page {
166 	struct pv_page_info pvinfo;
167 	struct pv_entry pvents[PVE_PER_PVPAGE];
168 };
169 
170 /*
171  * pv_entrys are dynamically allocated in chunks from a single page.
172  * we keep track of how many pv_entrys are in use for each page and
173  * we can free pv_entry pages if needed.  There is one lock for the
174  * entire allocation system.
175  */
176 
177 extern char PTD[];
178 extern struct pmap kernel_pmap_store; /* kernel pmap */
179 extern int nkptp_max;
180 
181 #define PMAP_REMOVE_ALL 0
182 #define PMAP_REMOVE_SKIPWIRED 1
183 
184 extern struct pool pmap_pv_pool;
185 
186 /*
187  * Macros
188  */
189 
190 #define	pmap_kernel()			(&kernel_pmap_store)
191 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
192 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
193 #define	pmap_update(pm)			/* nada */
194 
195 #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
196 #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
197 #define pmap_copy(DP,SP,D,L,S)
198 #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
199 #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
200 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
201 
202 #define pmap_proc_iflush(p,va,len)	/* nothing */
203 #define pmap_unuse_final(p)		/* nothing */
204 #define	pmap_remove_holes(vm)		do { /* nothing */ } while (0)
205 
206 /*
207  * Prototypes
208  */
209 
210 vaddr_t pmap_tmpmap_pa(paddr_t);
211 void pmap_tmpunmap_pa(void);
212 
213 void pmap_bootstrap(vaddr_t);
214 void pmap_bootstrap_pae(void);
215 void pmap_virtual_space(vaddr_t *, vaddr_t *);
216 void pmap_init(void);
217 struct pmap *pmap_create(void);
218 void pmap_destroy(struct pmap *);
219 void pmap_reference(struct pmap *);
220 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
221 void pmap_collect(struct pmap *);
222 void pmap_activate(struct proc *);
223 void pmap_deactivate(struct proc *);
224 void pmap_kenter_pa(vaddr_t, paddr_t, vm_prot_t);
225 void pmap_kremove(vaddr_t, vsize_t);
226 void pmap_zero_page(struct vm_page *);
227 void pmap_copy_page(struct vm_page *, struct vm_page *);
228 void pmap_enter_pv(struct vm_page *, struct pv_entry *,
229     struct pmap *, vaddr_t, struct vm_page *);
230 boolean_t pmap_clear_attrs(struct vm_page *, int);
231 static void pmap_page_protect(struct vm_page *, vm_prot_t);
232 void pmap_page_remove(struct vm_page *);
233 static void pmap_protect(struct pmap *, vaddr_t,
234     vaddr_t, vm_prot_t);
235 void pmap_remove(struct pmap *, vaddr_t, vaddr_t);
236 boolean_t pmap_test_attrs(struct vm_page *, int);
237 void pmap_write_protect(struct pmap *, vaddr_t,
238     vaddr_t, vm_prot_t);
239 int pmap_exec_fixup(struct vm_map *, struct trapframe *,
240     struct pcb *);
241 void pmap_exec_account(struct pmap *, vaddr_t, u_int32_t,
242     u_int32_t);
243 struct pv_entry *pmap_remove_pv(struct vm_page *, struct pmap *, vaddr_t);
244 void pmap_apte_flush(void);
245 void pmap_switch(struct proc *, struct proc *);
246 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
247 paddr_t vtophys(vaddr_t va);
248 paddr_t vtophys_pae(vaddr_t va);
249 
250 extern u_int32_t (*pmap_pte_set_p)(vaddr_t, paddr_t, u_int32_t);
251 extern u_int32_t (*pmap_pte_setbits_p)(vaddr_t, u_int32_t, u_int32_t);
252 extern u_int32_t (*pmap_pte_bits_p)(vaddr_t);
253 extern paddr_t (*pmap_pte_paddr_p)(vaddr_t);
254 extern boolean_t (*pmap_clear_attrs_p)(struct vm_page *, int);
255 extern int (*pmap_enter_p)(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
256 extern boolean_t (*pmap_extract_p)(pmap_t, vaddr_t, paddr_t *);
257 extern vaddr_t (*pmap_growkernel_p)(vaddr_t);
258 extern void (*pmap_page_remove_p)(struct vm_page *);
259 extern void (*pmap_do_remove_p)(struct pmap *, vaddr_t, vaddr_t, int);
260 extern boolean_t (*pmap_test_attrs_p)(struct vm_page *, int);
261 extern void (*pmap_unwire_p)(struct pmap *, vaddr_t);
262 extern void (*pmap_write_protect_p)(struct pmap*, vaddr_t, vaddr_t, vm_prot_t);
263 extern void (*pmap_pinit_pd_p)(pmap_t);
264 extern void (*pmap_zero_phys_p)(paddr_t);
265 extern boolean_t (*pmap_zero_page_uncached_p)(paddr_t);
266 extern void (*pmap_copy_page_p)(struct vm_page *, struct vm_page *);
267 
268 u_int32_t pmap_pte_set_pae(vaddr_t, paddr_t, u_int32_t);
269 u_int32_t pmap_pte_setbits_pae(vaddr_t, u_int32_t, u_int32_t);
270 u_int32_t pmap_pte_bits_pae(vaddr_t);
271 paddr_t pmap_pte_paddr_pae(vaddr_t);
272 boolean_t pmap_clear_attrs_pae(struct vm_page *, int);
273 int pmap_enter_pae(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
274 boolean_t pmap_extract_pae(pmap_t, vaddr_t, paddr_t *);
275 vaddr_t pmap_growkernel_pae(vaddr_t);
276 void pmap_page_remove_pae(struct vm_page *);
277 void pmap_do_remove_pae(struct pmap *, vaddr_t, vaddr_t, int);
278 boolean_t pmap_test_attrs_pae(struct vm_page *, int);
279 void pmap_unwire_pae(struct pmap *, vaddr_t);
280 void pmap_write_protect_pae(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
281 void pmap_pinit_pd_pae(pmap_t);
282 void pmap_zero_phys_pae(paddr_t);
283 boolean_t pmap_zero_page_uncached_pae(paddr_t);
284 void pmap_copy_page_pae(struct vm_page *, struct vm_page *);
285 void pae_copy_phys(paddr_t, paddr_t, int, int);
286 
287 #define	pmap_pte_set		(*pmap_pte_set_p)
288 #define	pmap_pte_setbits	(*pmap_pte_setbits_p)
289 #define	pmap_pte_bits		(*pmap_pte_bits_p)
290 #define	pmap_pte_paddr		(*pmap_pte_paddr_p)
291 #define	pmap_clear_attrs	(*pmap_clear_attrs_p)
292 #define	pmap_page_remove	(*pmap_page_remove_p)
293 #define	pmap_do_remove		(*pmap_do_remove_p)
294 #define	pmap_test_attrs		(*pmap_test_attrs_p)
295 #define	pmap_unwire		(*pmap_unwire_p)
296 #define	pmap_write_protect	(*pmap_write_protect_p)
297 #define	pmap_pinit_pd		(*pmap_pinit_pd_p)
298 #define	pmap_zero_phys		(*pmap_zero_phys_p)
299 #define	pmap_zero_page_uncached	(*pmap_zero_page_uncached_p)
300 #define	pmap_copy_page		(*pmap_copy_page_p)
301 
302 u_int32_t pmap_pte_set_86(vaddr_t, paddr_t, u_int32_t);
303 u_int32_t pmap_pte_setbits_86(vaddr_t, u_int32_t, u_int32_t);
304 u_int32_t pmap_pte_bits_86(vaddr_t);
305 paddr_t pmap_pte_paddr_86(vaddr_t);
306 boolean_t pmap_clear_attrs_86(struct vm_page *, int);
307 int pmap_enter_86(pmap_t, vaddr_t, paddr_t, vm_prot_t, int);
308 boolean_t pmap_extract_86(pmap_t, vaddr_t, paddr_t *);
309 vaddr_t pmap_growkernel_86(vaddr_t);
310 void pmap_page_remove_86(struct vm_page *);
311 void pmap_do_remove_86(struct pmap *, vaddr_t, vaddr_t, int);
312 boolean_t pmap_test_attrs_86(struct vm_page *, int);
313 void pmap_unwire_86(struct pmap *, vaddr_t);
314 void pmap_write_protect_86(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
315 void pmap_pinit_pd_86(pmap_t);
316 void pmap_zero_phys_86(paddr_t);
317 boolean_t pmap_zero_page_uncached_86(paddr_t);
318 void pmap_copy_page_86(struct vm_page *, struct vm_page *);
319 void pmap_tlb_shootpage(struct pmap *, vaddr_t);
320 void pmap_tlb_shootrange(struct pmap *, vaddr_t, vaddr_t);
321 void pmap_tlb_shoottlb(void);
322 #ifdef MULTIPROCESSOR
323 void pmap_tlb_droppmap(struct pmap *);
324 void pmap_tlb_shootwait(void);
325 #else
326 #define pmap_tlb_shootwait()
327 #endif
328 
329 void pmap_prealloc_lowmem_ptp(void);
330 void pmap_prealloc_lowmem_ptp_pae(void);
331 vaddr_t pmap_tmpmap_pa(paddr_t);
332 void pmap_tmpunmap_pa(void);
333 vaddr_t pmap_tmpmap_pa_pae(paddr_t);
334 void pmap_tmpunmap_pa_pae(void);
335 
336 
337 /*
338  * functions for flushing the cache for vaddrs and pages.
339  * these functions are not part of the MI pmap interface and thus
340  * should not be used as such.
341  */
342 void pmap_flush_cache(vaddr_t, vsize_t);
343 void pmap_flush_page(paddr_t);
344 void pmap_flush_page_pae(paddr_t);
345 
346 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
347 
348 /*
349  * Do idle page zero'ing uncached to avoid polluting the cache.
350  */
351 #define	PMAP_PAGEIDLEZERO(pg)	pmap_zero_page_uncached(VM_PAGE_TO_PHYS(pg))
352 
353 /*
354  * Inline functions
355  */
356 
357 /*
358  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
359  *	if hardware doesn't support one-page flushing)
360  */
361 
362 #define pmap_update_pg(va)	invlpg((u_int)(va))
363 
364 /*
365  * pmap_update_2pg: flush two pages from the TLB
366  */
367 
368 #define pmap_update_2pg(va, vb) { invlpg((u_int)(va)); invlpg((u_int)(vb)); }
369 
370 /*
371  * pmap_page_protect: change the protection of all recorded mappings
372  *	of a managed page
373  *
374  * => This function is a front end for pmap_page_remove/pmap_clear_attrs
375  * => We only have to worry about making the page more protected.
376  *	Unprotecting a page is done on-demand at fault time.
377  */
378 
379 __inline static void
380 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
381 {
382 	if ((prot & PROT_WRITE) == 0) {
383 		if (prot & (PROT_READ | PROT_EXEC)) {
384 			(void) pmap_clear_attrs(pg, PG_RW);
385 		} else {
386 			pmap_page_remove(pg);
387 		}
388 	}
389 }
390 
391 /*
392  * pmap_protect: change the protection of pages in a pmap
393  *
394  * => This function is a front end for pmap_remove/pmap_write_protect.
395  * => We only have to worry about making the page more protected.
396  *	Unprotecting a page is done on-demand at fault time.
397  */
398 
399 __inline static void
400 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
401 {
402 	if ((prot & PROT_WRITE) == 0) {
403 		if (prot & (PROT_READ | PROT_EXEC)) {
404 			pmap_write_protect(pmap, sva, eva, prot);
405 		} else {
406 			pmap_remove(pmap, sva, eva);
407 		}
408 	}
409 }
410 
411 /*
412  * pmap_growkernel, pmap_enter, and pmap_extract get picked up in variuos
413  * modules from both uvm_pmap.h and pmap.h. Since uvm_pmap.h defines these
414  * as functions, inline them here to suppress linker warnings.
415  */
416 __inline static vaddr_t
417 pmap_growkernel(vaddr_t maxkvaddr)
418 {
419 	return (*pmap_growkernel_p)(maxkvaddr);
420 }
421 
422 __inline static int
423 pmap_enter(struct pmap *pmap, vaddr_t va, paddr_t pa, vm_prot_t prot, int flags)
424 {
425 	return (*pmap_enter_p)(pmap, va, pa, prot, flags);
426 }
427 
428 __inline static boolean_t
429 pmap_extract(struct pmap *pmap, vaddr_t va, paddr_t *pa)
430 {
431 	return (*pmap_extract_p)(pmap, va, pa);
432 }
433 
434 /*
435  * p m a p   i n l i n e   h e l p e r   f u n c t i o n s
436  */
437 
438 /*
439  * pmap_is_active: is this pmap loaded into the specified processor's %cr3?
440  */
441 
442 static __inline boolean_t
443 pmap_is_active(struct pmap *pmap, struct cpu_info *ci)
444 {
445 	return (pmap == pmap_kernel() || ci->ci_curpmap == pmap);
446 }
447 
448 static __inline boolean_t
449 pmap_is_curpmap(struct pmap *pmap)
450 {
451 	return (pmap_is_active(pmap, curcpu()));
452 }
453 
454 #endif /* _KERNEL */
455 
456 struct pv_entry;
457 struct vm_page_md {
458 	struct mutex pv_mtx;
459 	struct pv_entry *pv_list;
460 };
461 
462 #define VM_MDPAGE_INIT(pg) do {			\
463 	mtx_init(&(pg)->mdpage.pv_mtx, IPL_VM); \
464 	(pg)->mdpage.pv_list = NULL;	\
465 } while (0)
466 
467 #endif	/* _MACHINE_PMAP_H_ */
468