xref: /netbsd-src/sys/arch/x86/include/pmap.h (revision 7fa608457b817eca6e0977b37f758ae064f3c99c)
1 /*	$NetBSD: pmap.h,v 1.3 2007/11/07 00:23:16 ad Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgment:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Copyright (c) 2001 Wasabi Systems, Inc.
37  * All rights reserved.
38  *
39  * Written by Frank van der Linden for Wasabi Systems, Inc.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *      This product includes software developed for the NetBSD Project by
52  *      Wasabi Systems, Inc.
53  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54  *    or promote products derived from this software without specific prior
55  *    written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
61  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67  * POSSIBILITY OF SUCH DAMAGE.
68  */
69 
70 /*
71  * pmap.h: see pmap.c for the history of this pmap module.
72  */
73 
74 #ifndef _X86_PMAP_H_
75 #define	_X86_PMAP_H_
76 
77 #define ptei(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
78 
79 /*
80  * pl*_pi: index in the ptp page for a pde mapping a VA.
81  * (pl*_i below is the index in the virtual array of all pdes per level)
82  */
83 #define pl1_pi(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
84 #define pl2_pi(VA)	(((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
85 #define pl3_pi(VA)	(((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
86 #define pl4_pi(VA)	(((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
87 
88 /*
89  * pl*_i: generate index into pde/pte arrays in virtual space
90  */
91 #define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
92 #define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
93 #define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
94 #define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
95 #define pl_i(va, lvl) \
96         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
97 
98 #define	pl_i_roundup(va, lvl)	pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
99 
100 /*
101  * PTP macros:
102  *   a PTP's index is the PD index of the PDE that points to it
103  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
104  *   a PTP's VA is the first VA mapped by that PTP
105  */
106 
107 #define ptp_va2o(va, lvl)	(pl_i(va, (lvl)+1) * PAGE_SIZE)
108 
109 #if defined(_KERNEL)
110 /*
111  * pmap data structures: see pmap.c for details of locking.
112  */
113 
114 struct pmap;
115 typedef struct pmap *pmap_t;
116 
117 /*
118  * we maintain a list of all non-kernel pmaps
119  */
120 
121 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
122 
123 /*
124  * the pmap structure
125  *
126  * note that the pm_obj contains the simple_lock, the reference count,
127  * page list, and number of PTPs within the pmap.
128  *
129  * pm_lock is the same as the spinlock for vm object 0. Changes to
130  * the other objects may only be made if that lock has been taken
131  * (the other object locks are only used when uvm_pagealloc is called)
132  *
133  * XXX If we ever support processor numbers higher than 31, we'll have
134  * XXX to rethink the CPU mask.
135  */
136 
137 struct pmap {
138 	struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
139 #define	pm_lock	pm_obj[0].vmobjlock
140 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
141 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
142 	paddr_t pm_pdirpa;		/* PA of PD (read-only after create) */
143 	struct vm_page *pm_ptphint[PTP_LEVELS-1];
144 					/* pointer to a PTP in our pmap */
145 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
146 
147 #if !defined(__x86_64__)
148 	vaddr_t pm_hiexec;		/* highest executable mapping */
149 #endif /* !defined(__x86_64__) */
150 	int pm_flags;			/* see below */
151 
152 	union descriptor *pm_ldt;	/* user-set LDT */
153 	int pm_ldt_len;			/* number of LDT entries */
154 	int pm_ldt_sel;			/* LDT selector */
155 	uint32_t pm_cpus;		/* mask of CPUs using pmap */
156 	uint32_t pm_kernel_cpus;	/* mask of CPUs using kernel part
157 					 of pmap */
158 };
159 
160 /* pm_flags */
161 #define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
162 
163 /*
164  * for each managed physical page we maintain a list of <PMAP,VA>'s
165  * which it is mapped at.  the list is headed by a pv_head structure.
166  * there is one pv_head per managed phys page (allocated at boot time).
167  * the pv_head structure points to a list of pv_entry structures (each
168  * describes one mapping).
169  */
170 
171 struct pv_entry {			/* locked by its list's pvh_lock */
172 	SPLAY_ENTRY(pv_entry) pv_node;	/* splay-tree node */
173 	struct pmap *pv_pmap;		/* the pmap */
174 	vaddr_t pv_va;			/* the virtual address */
175 	struct vm_page *pv_ptp;		/* the vm_page of the PTP */
176 };
177 
178 /*
179  * pv_entrys are dynamically allocated in chunks from a single page.
180  * we keep track of how many pv_entrys are in use for each page and
181  * we can free pv_entry pages if needed.  there is one lock for the
182  * entire allocation system.
183  */
184 
185 struct pv_page_info {
186 	TAILQ_ENTRY(pv_page) pvpi_list;
187 	struct pv_entry *pvpi_pvfree;
188 	int pvpi_nfree;
189 };
190 
191 /*
192  * number of pv_entry's in a pv_page
193  * (note: won't work on systems where NPBG isn't a constant)
194  */
195 
196 #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
197 			sizeof(struct pv_entry))
198 
199 /*
200  * a pv_page: where pv_entrys are allocated from
201  */
202 
203 struct pv_page {
204 	struct pv_page_info pvinfo;
205 	struct pv_entry pvents[PVE_PER_PVPAGE];
206 };
207 
208 /*
209  * global kernel variables
210  */
211 
212 /* PDPpaddr: is the physical address of the kernel's PDP */
213 extern u_long PDPpaddr;
214 
215 extern struct pmap kernel_pmap_store;	/* kernel pmap */
216 extern int pmap_pg_g;			/* do we support PG_G? */
217 extern long nkptp[PTP_LEVELS];
218 
219 /*
220  * macros
221  */
222 
223 #define	pmap_kernel()			(&kernel_pmap_store)
224 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
225 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
226 
227 #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
228 #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
229 #define pmap_copy(DP,SP,D,L,S)
230 #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
231 #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
232 #define pmap_move(DP,SP,D,L,S)
233 #define pmap_phys_address(ppn)		x86_ptob(ppn)
234 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
235 
236 
237 /*
238  * prototypes
239  */
240 
241 void		pmap_activate(struct lwp *);
242 void		pmap_bootstrap(vaddr_t);
243 bool		pmap_clear_attrs(struct vm_page *, unsigned);
244 void		pmap_deactivate(struct lwp *);
245 void		pmap_page_remove (struct vm_page *);
246 void		pmap_remove(struct pmap *, vaddr_t, vaddr_t);
247 bool		pmap_test_attrs(struct vm_page *, unsigned);
248 void		pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
249 void		pmap_load(void);
250 
251 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
252 
253 void	pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
254 void	pmap_tlb_shootwait(void);
255 
256 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
257 
258 /*
259  * Do idle page zero'ing uncached to avoid polluting the cache.
260  */
261 bool	pmap_pageidlezero(paddr_t);
262 #define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
263 
264 /*
265  * inline functions
266  */
267 
268 /*ARGSUSED*/
269 static __inline void
270 pmap_remove_all(struct pmap *pmap)
271 {
272 	/* Nothing. */
273 }
274 
275 /*
276  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
277  *	if hardware doesn't support one-page flushing)
278  */
279 
280 __inline static void __attribute__((__unused__))
281 pmap_update_pg(vaddr_t va)
282 {
283 #if defined(I386_CPU)
284 	if (cpu_class == CPUCLASS_386)
285 		tlbflush();
286 	else
287 #endif
288 		invlpg(va);
289 }
290 
291 /*
292  * pmap_update_2pg: flush two pages from the TLB
293  */
294 
295 __inline static void __attribute__((__unused__))
296 pmap_update_2pg(vaddr_t va, vaddr_t vb)
297 {
298 #if defined(I386_CPU)
299 	if (cpu_class == CPUCLASS_386)
300 		tlbflush();
301 	else
302 #endif
303 	{
304 		invlpg(va);
305 		invlpg(vb);
306 	}
307 }
308 
309 /*
310  * pmap_page_protect: change the protection of all recorded mappings
311  *	of a managed page
312  *
313  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
314  * => we only have to worry about making the page more protected.
315  *	unprotecting a page is done on-demand at fault time.
316  */
317 
318 __inline static void __attribute__((__unused__))
319 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
320 {
321 	if ((prot & VM_PROT_WRITE) == 0) {
322 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
323 			(void) pmap_clear_attrs(pg, PG_RW);
324 		} else {
325 			pmap_page_remove(pg);
326 		}
327 	}
328 }
329 
330 /*
331  * pmap_protect: change the protection of pages in a pmap
332  *
333  * => this function is a frontend for pmap_remove/pmap_write_protect
334  * => we only have to worry about making the page more protected.
335  *	unprotecting a page is done on-demand at fault time.
336  */
337 
338 __inline static void __attribute__((__unused__))
339 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
340 {
341 	if ((prot & VM_PROT_WRITE) == 0) {
342 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
343 			pmap_write_protect(pmap, sva, eva, prot);
344 		} else {
345 			pmap_remove(pmap, sva, eva);
346 		}
347 	}
348 }
349 
350 /*
351  * various address inlines
352  *
353  *  vtopte: return a pointer to the PTE mapping a VA, works only for
354  *  user and PT addresses
355  *
356  *  kvtopte: return a pointer to the PTE mapping a kernel VA
357  */
358 
359 #include <lib/libkern/libkern.h>
360 
361 static __inline pt_entry_t * __attribute__((__unused__))
362 vtopte(vaddr_t va)
363 {
364 
365 	KASSERT(va < VM_MIN_KERNEL_ADDRESS);
366 
367 	return (PTE_BASE + pl1_i(va));
368 }
369 
370 static __inline pt_entry_t * __attribute__((__unused__))
371 kvtopte(vaddr_t va)
372 {
373 	pd_entry_t *pde;
374 
375 	KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
376 
377 	pde = L2_BASE + pl2_i(va);
378 	if (*pde & PG_PS)
379 		return ((pt_entry_t *)pde);
380 
381 	return (PTE_BASE + pl1_i(va));
382 }
383 
384 paddr_t vtophys(vaddr_t);
385 vaddr_t	pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
386 void	pmap_cpu_init_early(struct cpu_info *);
387 void	pmap_cpu_init_late(struct cpu_info *);
388 void	sse2_zero_page(void *);
389 void	sse2_copy_page(void *, void *);
390 
391 /*
392  * Hooks for the pool allocator.
393  */
394 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
395 
396 /*
397  * TLB shootdown mailbox.
398  */
399 
400 struct pmap_mbox {
401 	volatile void		*mb_pointer;
402 	volatile uintptr_t	mb_addr1;
403 	volatile uintptr_t	mb_addr2;
404 	volatile uintptr_t	mb_head;
405 	volatile uintptr_t	mb_tail;
406 	volatile uintptr_t	mb_global;
407 };
408 
409 #endif /* _KERNEL */
410 
411 #endif /* _X86_PMAP_H_ */
412