xref: /openbsd-src/sys/arch/i386/include/pmap.h (revision 2b0358df1d88d06ef4139321dd05bd5e05d91eaf)
1 /*	$OpenBSD: pmap.h,v 1.51 2009/02/05 01:13:21 oga Exp $	*/
2 /*	$NetBSD: pmap.h,v 1.44 2000/04/24 17:18:18 thorpej Exp $	*/
3 
4 /*
5  *
6  * Copyright (c) 1997 Charles D. Cranor and Washington University.
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgment:
19  *      This product includes software developed by Charles D. Cranor and
20  *      Washington University.
21  * 4. The name of the author may not be used to endorse or promote products
22  *    derived from this software without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
25  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
26  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
27  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
29  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
33  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34  */
35 
36 /*
37  * pmap.h: see pmap.c for the history of this pmap module.
38  */
39 
40 #ifndef	_I386_PMAP_H_
41 #define	_I386_PMAP_H_
42 
43 #include <machine/cpufunc.h>
44 #include <machine/pte.h>
45 #include <machine/segments.h>
46 #include <uvm/uvm_pglist.h>
47 #include <uvm/uvm_object.h>
48 
49 /*
50  * See pte.h for a description of i386 MMU terminology and hardware
51  * interface.
52  *
53  * A pmap describes a process' 4GB virtual address space.  This
54  * virtual address space can be broken up into 1024 4MB regions which
55  * are described by PDEs in the PDP.  The PDEs are defined as follows:
56  *
57  * Ranges are inclusive -> exclusive, just like vm_map_entry start/end.
58  * The following assumes that KERNBASE is 0xd0000000.
59  *
60  * PDE#s	VA range		Usage
61  * 0->831	0x0 -> 0xcfc00000	user address space, note that the
62  *					max user address is 0xcfbfe000
63  *					the final two pages in the last 4MB
64  *					used to be reserved for the UAREA
65  *					but now are no longer used.
66  * 831		0xcfc00000->		recursive mapping of PDP (used for
67  *			0xd0000000	linear mapping of PTPs).
68  * 832->1023	0xd0000000->		kernel address space (constant
69  *			0xffc00000	across all pmaps/processes).
70  * 1023		0xffc00000->		"alternate" recursive PDP mapping
71  *			<end>		(for other pmaps).
72  *
73  *
74  * Note: A recursive PDP mapping provides a way to map all the PTEs for
75  * a 4GB address space into a linear chunk of virtual memory.  In other
76  * words, the PTE for page 0 is the first int mapped into the 4MB recursive
77  * area.  The PTE for page 1 is the second int.  The very last int in the
78  * 4MB range is the PTE that maps VA 0xffffe000 (the last page in a 4GB
79  * address).
80  *
81  * All pmaps' PDs must have the same values in slots 832->1023 so that
82  * the kernel is always mapped in every process.  These values are loaded
83  * into the PD at pmap creation time.
84  *
85  * At any one time only one pmap can be active on a processor.  This is
86  * the pmap whose PDP is pointed to by processor register %cr3.  This pmap
87  * will have all its PTEs mapped into memory at the recursive mapping
88  * point (slot #831 as show above).  When the pmap code wants to find the
89  * PTE for a virtual address, all it has to do is the following:
90  *
91  * Address of PTE = (831 * 4MB) + (VA / NBPG) * sizeof(pt_entry_t)
92  *                = 0xcfc00000 + (VA / 4096) * 4
93  *
94  * What happens if the pmap layer is asked to perform an operation
95  * on a pmap that is not the one which is currently active?  In that
96  * case we take the PA of the PDP of non-active pmap and put it in
97  * slot 1023 of the active pmap.  This causes the non-active pmap's
98  * PTEs to get mapped in the final 4MB of the 4GB address space
99  * (e.g. starting at 0xffc00000).
100  *
101  * The following figure shows the effects of the recursive PDP mapping:
102  *
103  *   PDP (%cr3)
104  *   +----+
105  *   |   0| -> PTP#0 that maps VA 0x0 -> 0x400000
106  *   |    |
107  *   |    |
108  *   | 831| -> points back to PDP (%cr3) mapping VA 0xcfc00000 -> 0xd0000000
109  *   | 832| -> first kernel PTP (maps 0xd0000000 -> 0xe0400000)
110  *   |    |
111  *   |1023| -> points to alternate pmap's PDP (maps 0xffc00000 -> end)
112  *   +----+
113  *
114  * Note that the PDE#831 VA (0xcfc00000) is defined as "PTE_BASE".
115  * Note that the PDE#1023 VA (0xffc00000) is defined as "APTE_BASE".
116  *
117  * Starting at VA 0xcfc00000 the current active PDP (%cr3) acts as a
118  * PTP:
119  *
120  * PTP#831 == PDP(%cr3) => maps VA 0xcfc00000 -> 0xd0000000
121  *   +----+
122  *   |   0| -> maps the contents of PTP#0 at VA 0xcfc00000->0xcfc01000
123  *   |    |
124  *   |    |
125  *   | 831| -> maps the contents of PTP#831 (the PDP) at VA 0xcff3f000
126  *   | 832| -> maps the contents of first kernel PTP
127  *   |    |
128  *   |1023|
129  *   +----+
130  *
131  * Note that mapping of the PDP at PTP#831's VA (0xcff3f000) is
132  * defined as "PDP_BASE".... within that mapping there are two
133  * defines:
134  *   "PDP_PDE" (0xcff3fcfc) is the VA of the PDE in the PDP
135  *      which points back to itself.
136  *   "APDP_PDE" (0xcff3fffc) is the VA of the PDE in the PDP which
137  *      establishes the recursive mapping of the alternate pmap.
138  *      To set the alternate PDP, one just has to put the correct
139  *	PA info in *APDP_PDE.
140  *
141  * Note that in the APTE_BASE space, the APDP appears at VA
142  * "APDP_BASE" (0xfffff000).
143  */
144 
145 /*
146  * The following defines identify the slots used as described above.
147  */
148 
149 #define PDSLOT_PTE	((KERNBASE/NBPD)-1) /* 831: for recursive PDP map */
150 #define PDSLOT_KERN	(KERNBASE/NBPD)	    /* 832: start of kernel space */
151 #define PDSLOT_APTE	((unsigned)1023) /* 1023: alternative recursive slot */
152 
153 /*
154  * The following defines give the virtual addresses of various MMU
155  * data structures:
156  * PTE_BASE and APTE_BASE: the base VA of the linear PTE mappings
157  * PTD_BASE and APTD_BASE: the base VA of the recursive mapping of the PTD
158  * PDP_PDE and APDP_PDE: the VA of the PDE that points back to the PDP/APDP
159  */
160 
161 #define PTE_BASE	((pt_entry_t *)  (PDSLOT_PTE * NBPD) )
162 #define APTE_BASE	((pt_entry_t *)  (PDSLOT_APTE * NBPD) )
163 #define PDP_BASE ((pd_entry_t *)(((char *)PTE_BASE) + (PDSLOT_PTE * NBPG)))
164 #define APDP_BASE ((pd_entry_t *)(((char *)APTE_BASE) + (PDSLOT_APTE * NBPG)))
165 #define PDP_PDE		(PDP_BASE + PDSLOT_PTE)
166 #define APDP_PDE	(PDP_BASE + PDSLOT_APTE)
167 
168 /*
169  * The following define determines how many PTPs should be set up for the
170  * kernel by locore.s at boot time.  This should be large enough to
171  * get the VM system running.  Once the VM system is running, the
172  * pmap module can add more PTPs to the kernel area on demand.
173  */
174 
175 #ifndef NKPTP
176 #define NKPTP		4	/* 16MB to start */
177 #endif
178 #define NKPTP_MIN	4	/* smallest value we allow */
179 #define NKPTP_MAX	(1024 - (KERNBASE/NBPD) - 1)
180 				/* largest value (-1 for APTP space) */
181 
182 /*
183  * various address macros
184  *
185  *  vtopte: return a pointer to the PTE mapping a VA
186  *  kvtopte: same as above (takes a KVA, but doesn't matter with this pmap)
187  *  ptetov: given a pointer to a PTE, return the VA that it maps
188  *  vtophys: translate a VA to the PA mapped to it
189  *
190  * plus alternative versions of the above
191  */
192 
193 #define vtopte(VA)	(PTE_BASE + atop(VA))
194 #define kvtopte(VA)	vtopte(VA)
195 #define ptetov(PT)	(ptoa(PT - PTE_BASE))
196 #define	vtophys(VA)	((*vtopte(VA) & PG_FRAME) | \
197 			 ((unsigned)(VA) & ~PG_FRAME))
198 #define	avtopte(VA)	(APTE_BASE + atop(VA))
199 #define	ptetoav(PT)	(ptoa(PT - APTE_BASE))
200 #define	avtophys(VA)	((*avtopte(VA) & PG_FRAME) | \
201 			 ((unsigned)(VA) & ~PG_FRAME))
202 
203 /*
204  * pdei/ptei: generate index into PDP/PTP from a VA
205  */
206 #define	pdei(VA)	(((VA) & PD_MASK) >> PDSHIFT)
207 #define	ptei(VA)	(((VA) & PT_MASK) >> PGSHIFT)
208 
209 /*
210  * PTP macros:
211  *   A PTP's index is the PD index of the PDE that points to it.
212  *   A PTP's offset is the byte-offset in the PTE space that this PTP is at.
213  *   A PTP's VA is the first VA mapped by that PTP.
214  *
215  * Note that NBPG == number of bytes in a PTP (4096 bytes == 1024 entries)
216  *           NBPD == number of bytes a PTP can map (4MB)
217  */
218 
219 #define ptp_i2o(I)	((I) * NBPG)	/* index => offset */
220 #define ptp_o2i(O)	((O) / NBPG)	/* offset => index */
221 #define ptp_i2v(I)	((I) * NBPD)	/* index => VA */
222 #define ptp_v2i(V)	((V) / NBPD)	/* VA => index (same as pdei) */
223 
224 /*
225  * PG_AVAIL usage: we make use of the ignored bits of the PTE
226  */
227 
228 #define PG_W		PG_AVAIL1	/* "wired" mapping */
229 #define PG_PVLIST	PG_AVAIL2	/* mapping has entry on pvlist */
230 #define	PG_X		PG_AVAIL3	/* executable mapping */
231 
232 /*
233  * Number of PTE's per cache line.  4 byte pte, 32-byte cache line
234  * Used to avoid false sharing of cache lines.
235  */
236 #define NPTECL			8
237 
238 #ifdef _KERNEL
239 /*
240  * pmap data structures: see pmap.c for details of locking.
241  */
242 
243 struct pmap;
244 typedef struct pmap *pmap_t;
245 
246 /*
247  * We maintain a list of all non-kernel pmaps.
248  */
249 
250 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
251 
252 /*
253  * The pmap structure
254  *
255  * Note that the pm_obj contains the simple_lock, the reference count,
256  * page list, and number of PTPs within the pmap.
257  */
258 
259 struct pmap {
260 	struct uvm_object pm_obj;	/* object (lck by object lock) */
261 #define	pm_lock	pm_obj.vmobjlock
262 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
263 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
264 	paddr_t pm_pdirpa;		/* PA of PD (read-only after create) */
265 	struct vm_page *pm_ptphint;	/* pointer to a PTP in our pmap */
266 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
267 
268 	vaddr_t pm_hiexec;		/* highest executable mapping */
269 	int pm_flags;			/* see below */
270 
271 	struct	segment_descriptor pm_codeseg;	/* cs descriptor for process */
272 	union descriptor *pm_ldt;	/* user-set LDT */
273 	int pm_ldt_len;			/* number of LDT entries */
274 	int pm_ldt_sel;			/* LDT selector */
275 	uint32_t pm_cpus;		/* mask of CPUs using map */
276 };
277 
278 /* pm_flags */
279 #define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
280 
281 /*
282  * For each managed physical page we maintain a list of <PMAP,VA>s
283  * which it is mapped at.  The list is headed by a pv_head structure.
284  * there is one pv_head per managed phys page (allocated at boot time).
285  * The pv_head structure points to a list of pv_entry structures (each
286  * describes one mapping).
287  */
288 
289 struct pv_entry {			/* locked by its list's pvh_lock */
290 	struct pv_entry *pv_next;	/* next entry */
291 	struct pmap *pv_pmap;		/* the pmap */
292 	vaddr_t pv_va;			/* the virtual address */
293 	struct vm_page *pv_ptp;		/* the vm_page of the PTP */
294 };
295 /*
296  * MD flags to pmap_enter:
297  */
298 #define	PMAP_NOCACHE	PMAP_MD0
299 
300 /*
301  * We keep mod/ref flags in struct vm_page->pg_flags.
302  */
303 #define PG_PMAP_MOD	PG_PMAP0
304 #define	PG_PMAP_REF	PG_PMAP1
305 
306 /*
307  * pv_entrys are dynamically allocated in chunks from a single page.
308  * we keep track of how many pv_entrys are in use for each page and
309  * we can free pv_entry pages if needed.  There is one lock for the
310  * entire allocation system.
311  */
312 
313 struct pv_page_info {
314 	TAILQ_ENTRY(pv_page) pvpi_list;
315 	struct pv_entry *pvpi_pvfree;
316 	int pvpi_nfree;
317 };
318 
319 /*
320  * number of pv_entries in a pv_page
321  * (note: won't work on systems where NPBG isn't a constant)
322  */
323 
324 #define PVE_PER_PVPAGE ((NBPG - sizeof(struct pv_page_info)) / \
325 			sizeof(struct pv_entry))
326 
327 /*
328  * a pv_page: where pv_entrys are allocated from
329  */
330 
331 struct pv_page {
332 	struct pv_page_info pvinfo;
333 	struct pv_entry pvents[PVE_PER_PVPAGE];
334 };
335 
336 /*
337  * global kernel variables
338  */
339 
340 extern pd_entry_t	PTD[];
341 
342 /* PTDpaddr: is the physical address of the kernel's PDP */
343 extern u_int32_t PTDpaddr;
344 
345 extern struct pmap kernel_pmap_store;	/* kernel pmap */
346 extern int nkpde;			/* current # of PDEs for kernel */
347 extern int pmap_pg_g;			/* do we support PG_G? */
348 
349 /*
350  * Macros
351  */
352 
353 #define	pmap_kernel()			(&kernel_pmap_store)
354 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
355 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
356 #define	pmap_update(pm)			/* nada */
357 
358 #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
359 #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
360 #define pmap_copy(DP,SP,D,L,S)
361 #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
362 #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
363 #define pmap_phys_address(ppn)		ptoa(ppn)
364 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
365 
366 #define pmap_proc_iflush(p,va,len)	/* nothing */
367 #define pmap_unuse_final(p)		/* nothing */
368 #define	pmap_remove_holes(map)		do { /* nothing */ } while (0)
369 
370 
371 /*
372  * Prototypes
373  */
374 
375 void		pmap_bootstrap(vaddr_t);
376 boolean_t	pmap_clear_attrs(struct vm_page *, int);
377 static void	pmap_page_protect(struct vm_page *, vm_prot_t);
378 void		pmap_page_remove(struct vm_page *);
379 static void	pmap_protect(struct pmap *, vaddr_t,
380 				vaddr_t, vm_prot_t);
381 void		pmap_remove(struct pmap *, vaddr_t, vaddr_t);
382 boolean_t	pmap_test_attrs(struct vm_page *, int);
383 void		pmap_write_protect(struct pmap *, vaddr_t,
384 				vaddr_t, vm_prot_t);
385 int		pmap_exec_fixup(struct vm_map *, struct trapframe *,
386 		    struct pcb *);
387 
388 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
389 
390 void	pmap_tlb_shootpage(struct pmap *, vaddr_t);
391 void	pmap_tlb_shootrange(struct pmap *, vaddr_t, vaddr_t);
392 void	pmap_tlb_shoottlb(void);
393 #ifdef MULTIPROCESSOR
394 void	pmap_tlb_shootwait(void);
395 #else
396 #define pmap_tlb_shootwait()
397 #endif
398 
399 void	pmap_prealloc_lowmem_ptp(paddr_t);
400 
401 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
402 
403 /*
404  * Do idle page zero'ing uncached to avoid polluting the cache.
405  */
406 boolean_t	pmap_zero_page_uncached(paddr_t);
407 #define	PMAP_PAGEIDLEZERO(pg)	pmap_zero_page_uncached(VM_PAGE_TO_PHYS(pg))
408 
409 /*
410  * Inline functions
411  */
412 
413 /*
414  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
415  *	if hardware doesn't support one-page flushing)
416  */
417 
418 #define pmap_update_pg(va)	invlpg((u_int)(va))
419 
420 /*
421  * pmap_update_2pg: flush two pages from the TLB
422  */
423 
424 #define pmap_update_2pg(va, vb) { invlpg((u_int)(va)); invlpg((u_int)(vb)); }
425 
426 /*
427  * pmap_page_protect: change the protection of all recorded mappings
428  *	of a managed page
429  *
430  * => This function is a front end for pmap_page_remove/pmap_clear_attrs
431  * => We only have to worry about making the page more protected.
432  *	Unprotecting a page is done on-demand at fault time.
433  */
434 
435 __inline static void
436 pmap_page_protect(pg, prot)
437 	struct vm_page *pg;
438 	vm_prot_t prot;
439 {
440 	if ((prot & VM_PROT_WRITE) == 0) {
441 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
442 			(void) pmap_clear_attrs(pg, PG_RW);
443 		} else {
444 			pmap_page_remove(pg);
445 		}
446 	}
447 }
448 
449 /*
450  * pmap_protect: change the protection of pages in a pmap
451  *
452  * => This function is a front end for pmap_remove/pmap_write_protect.
453  * => We only have to worry about making the page more protected.
454  *	Unprotecting a page is done on-demand at fault time.
455  */
456 
457 __inline static void
458 pmap_protect(pmap, sva, eva, prot)
459 	struct pmap *pmap;
460 	vaddr_t sva, eva;
461 	vm_prot_t prot;
462 {
463 	if ((prot & VM_PROT_WRITE) == 0) {
464 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
465 			pmap_write_protect(pmap, sva, eva, prot);
466 		} else {
467 			pmap_remove(pmap, sva, eva);
468 		}
469 	}
470 }
471 
472 #if defined(USER_LDT)
473 void	pmap_ldt_cleanup(struct proc *);
474 #define	PMAP_FORK
475 #endif /* USER_LDT */
476 
477 #endif /* _KERNEL */
478 #endif	/* _I386_PMAP_H_ */
479