xref: /netbsd-src/sys/arch/x86/include/pmap.h (revision 8b0f9554ff8762542c4defc4f70e1eb76fb508fa)
1 /*	$NetBSD: pmap.h,v 1.6 2007/12/09 20:27:48 jmcneill Exp $	*/
2 
3 /*
4  *
5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. All advertising materials mentioning features or use of this software
17  *    must display the following acknowledgment:
18  *      This product includes software developed by Charles D. Cranor and
19  *      Washington University.
20  * 4. The name of the author may not be used to endorse or promote products
21  *    derived from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33  */
34 
35 /*
36  * Copyright (c) 2001 Wasabi Systems, Inc.
37  * All rights reserved.
38  *
39  * Written by Frank van der Linden for Wasabi Systems, Inc.
40  *
41  * Redistribution and use in source and binary forms, with or without
42  * modification, are permitted provided that the following conditions
43  * are met:
44  * 1. Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  * 2. Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in the
48  *    documentation and/or other materials provided with the distribution.
49  * 3. All advertising materials mentioning features or use of this software
50  *    must display the following acknowledgement:
51  *      This product includes software developed for the NetBSD Project by
52  *      Wasabi Systems, Inc.
53  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
54  *    or promote products derived from this software without specific prior
55  *    written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
58  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
59  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
60  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
61  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
62  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
63  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
64  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
65  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
66  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
67  * POSSIBILITY OF SUCH DAMAGE.
68  */
69 
70 /*
71  * pmap.h: see pmap.c for the history of this pmap module.
72  */
73 
74 #ifndef _X86_PMAP_H_
75 #define	_X86_PMAP_H_
76 
77 #define ptei(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
78 
79 /*
80  * pl*_pi: index in the ptp page for a pde mapping a VA.
81  * (pl*_i below is the index in the virtual array of all pdes per level)
82  */
83 #define pl1_pi(VA)	(((VA_SIGN_POS(VA)) & L1_MASK) >> L1_SHIFT)
84 #define pl2_pi(VA)	(((VA_SIGN_POS(VA)) & L2_MASK) >> L2_SHIFT)
85 #define pl3_pi(VA)	(((VA_SIGN_POS(VA)) & L3_MASK) >> L3_SHIFT)
86 #define pl4_pi(VA)	(((VA_SIGN_POS(VA)) & L4_MASK) >> L4_SHIFT)
87 
88 /*
89  * pl*_i: generate index into pde/pte arrays in virtual space
90  */
91 #define pl1_i(VA)	(((VA_SIGN_POS(VA)) & L1_FRAME) >> L1_SHIFT)
92 #define pl2_i(VA)	(((VA_SIGN_POS(VA)) & L2_FRAME) >> L2_SHIFT)
93 #define pl3_i(VA)	(((VA_SIGN_POS(VA)) & L3_FRAME) >> L3_SHIFT)
94 #define pl4_i(VA)	(((VA_SIGN_POS(VA)) & L4_FRAME) >> L4_SHIFT)
95 #define pl_i(va, lvl) \
96         (((VA_SIGN_POS(va)) & ptp_masks[(lvl)-1]) >> ptp_shifts[(lvl)-1])
97 
98 #define	pl_i_roundup(va, lvl)	pl_i((va)+ ~ptp_masks[(lvl)-1], (lvl))
99 
100 /*
101  * PTP macros:
102  *   a PTP's index is the PD index of the PDE that points to it
103  *   a PTP's offset is the byte-offset in the PTE space that this PTP is at
104  *   a PTP's VA is the first VA mapped by that PTP
105  */
106 
107 #define ptp_va2o(va, lvl)	(pl_i(va, (lvl)+1) * PAGE_SIZE)
108 
109 #if defined(_KERNEL)
110 /*
111  * pmap data structures: see pmap.c for details of locking.
112  */
113 
114 struct pmap;
115 typedef struct pmap *pmap_t;
116 
117 /*
118  * we maintain a list of all non-kernel pmaps
119  */
120 
121 LIST_HEAD(pmap_head, pmap); /* struct pmap_head: head of a pmap list */
122 
123 /*
124  * the pmap structure
125  *
126  * note that the pm_obj contains the simple_lock, the reference count,
127  * page list, and number of PTPs within the pmap.
128  *
129  * pm_lock is the same as the spinlock for vm object 0. Changes to
130  * the other objects may only be made if that lock has been taken
131  * (the other object locks are only used when uvm_pagealloc is called)
132  *
133  * XXX If we ever support processor numbers higher than 31, we'll have
134  * XXX to rethink the CPU mask.
135  */
136 
137 struct pmap {
138 	struct uvm_object pm_obj[PTP_LEVELS-1]; /* objects for lvl >= 1) */
139 #define	pm_lock	pm_obj[0].vmobjlock
140 	LIST_ENTRY(pmap) pm_list;	/* list (lck by pm_list lock) */
141 	pd_entry_t *pm_pdir;		/* VA of PD (lck by object lock) */
142 	paddr_t pm_pdirpa;		/* PA of PD (read-only after create) */
143 	struct vm_page *pm_ptphint[PTP_LEVELS-1];
144 					/* pointer to a PTP in our pmap */
145 	struct pmap_statistics pm_stats;  /* pmap stats (lck by object lock) */
146 
147 #if !defined(__x86_64__)
148 	vaddr_t pm_hiexec;		/* highest executable mapping */
149 #endif /* !defined(__x86_64__) */
150 	int pm_flags;			/* see below */
151 
152 	union descriptor *pm_ldt;	/* user-set LDT */
153 	int pm_ldt_len;			/* number of LDT entries */
154 	int pm_ldt_sel;			/* LDT selector */
155 	uint32_t pm_cpus;		/* mask of CPUs using pmap */
156 	uint32_t pm_kernel_cpus;	/* mask of CPUs using kernel part
157 					 of pmap */
158 };
159 
160 /* pm_flags */
161 #define	PMF_USER_LDT	0x01	/* pmap has user-set LDT */
162 #define	PMF_USER_XPIN	0x02	/* pmap pdirpa is pinned (Xen) */
163 #define	PMF_USER_RELOAD	0x04	/* reload user pmap on PTE unmap (Xen) */
164 
165 
166 /*
167  * for each managed physical page we maintain a list of <PMAP,VA>'s
168  * which it is mapped at.  the list is headed by a pv_head structure.
169  * there is one pv_head per managed phys page (allocated at boot time).
170  * the pv_head structure points to a list of pv_entry structures (each
171  * describes one mapping).
172  */
173 
174 struct pv_entry {			/* locked by its list's pvh_lock */
175 	SPLAY_ENTRY(pv_entry) pv_node;	/* splay-tree node */
176 	struct pmap *pv_pmap;		/* the pmap */
177 	vaddr_t pv_va;			/* the virtual address */
178 	struct vm_page *pv_ptp;		/* the vm_page of the PTP */
179 };
180 
181 /*
182  * pv_entrys are dynamically allocated in chunks from a single page.
183  * we keep track of how many pv_entrys are in use for each page and
184  * we can free pv_entry pages if needed.  there is one lock for the
185  * entire allocation system.
186  */
187 
188 struct pv_page_info {
189 	TAILQ_ENTRY(pv_page) pvpi_list;
190 	struct pv_entry *pvpi_pvfree;
191 	int pvpi_nfree;
192 };
193 
194 /*
195  * number of pv_entry's in a pv_page
196  * (note: won't work on systems where NPBG isn't a constant)
197  */
198 
199 #define PVE_PER_PVPAGE ((PAGE_SIZE - sizeof(struct pv_page_info)) / \
200 			sizeof(struct pv_entry))
201 
202 /*
203  * a pv_page: where pv_entrys are allocated from
204  */
205 
206 struct pv_page {
207 	struct pv_page_info pvinfo;
208 	struct pv_entry pvents[PVE_PER_PVPAGE];
209 };
210 
211 /*
212  * global kernel variables
213  */
214 
215 /* PDPpaddr: is the physical address of the kernel's PDP */
216 extern u_long PDPpaddr;
217 
218 extern struct pmap kernel_pmap_store;	/* kernel pmap */
219 extern int pmap_pg_g;			/* do we support PG_G? */
220 extern long nkptp[PTP_LEVELS];
221 
222 /*
223  * macros
224  */
225 
226 #define	pmap_kernel()			(&kernel_pmap_store)
227 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
228 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
229 
230 #define pmap_clear_modify(pg)		pmap_clear_attrs(pg, PG_M)
231 #define pmap_clear_reference(pg)	pmap_clear_attrs(pg, PG_U)
232 #define pmap_copy(DP,SP,D,L,S)
233 #define pmap_is_modified(pg)		pmap_test_attrs(pg, PG_M)
234 #define pmap_is_referenced(pg)		pmap_test_attrs(pg, PG_U)
235 #define pmap_move(DP,SP,D,L,S)
236 #define pmap_phys_address(ppn)		x86_ptob(ppn)
237 #define pmap_valid_entry(E) 		((E) & PG_V) /* is PDE or PTE valid? */
238 
239 
240 /*
241  * prototypes
242  */
243 
244 void		pmap_activate(struct lwp *);
245 void		pmap_bootstrap(vaddr_t);
246 bool		pmap_clear_attrs(struct vm_page *, unsigned);
247 void		pmap_deactivate(struct lwp *);
248 void		pmap_page_remove (struct vm_page *);
249 void		pmap_remove(struct pmap *, vaddr_t, vaddr_t);
250 bool		pmap_test_attrs(struct vm_page *, unsigned);
251 void		pmap_write_protect(struct pmap *, vaddr_t, vaddr_t, vm_prot_t);
252 void		pmap_load(void);
253 paddr_t		pmap_init_tmp_pgtbl(paddr_t);
254 
255 vaddr_t reserve_dumppages(vaddr_t); /* XXX: not a pmap fn */
256 
257 void	pmap_tlb_shootdown(pmap_t, vaddr_t, vaddr_t, pt_entry_t);
258 void	pmap_tlb_shootwait(void);
259 
260 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
261 
262 /*
263  * Do idle page zero'ing uncached to avoid polluting the cache.
264  */
265 bool	pmap_pageidlezero(paddr_t);
266 #define	PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
267 
268 /*
269  * inline functions
270  */
271 
272 /*ARGSUSED*/
273 static __inline void
274 pmap_remove_all(struct pmap *pmap)
275 {
276 	/* Nothing. */
277 }
278 
279 /*
280  * pmap_update_pg: flush one page from the TLB (or flush the whole thing
281  *	if hardware doesn't support one-page flushing)
282  */
283 
284 __inline static void __attribute__((__unused__))
285 pmap_update_pg(vaddr_t va)
286 {
287 	invlpg(va);
288 }
289 
290 /*
291  * pmap_update_2pg: flush two pages from the TLB
292  */
293 
294 __inline static void __attribute__((__unused__))
295 pmap_update_2pg(vaddr_t va, vaddr_t vb)
296 {
297 	invlpg(va);
298 	invlpg(vb);
299 }
300 
301 /*
302  * pmap_page_protect: change the protection of all recorded mappings
303  *	of a managed page
304  *
305  * => this function is a frontend for pmap_page_remove/pmap_clear_attrs
306  * => we only have to worry about making the page more protected.
307  *	unprotecting a page is done on-demand at fault time.
308  */
309 
310 __inline static void __attribute__((__unused__))
311 pmap_page_protect(struct vm_page *pg, vm_prot_t prot)
312 {
313 	if ((prot & VM_PROT_WRITE) == 0) {
314 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
315 			(void) pmap_clear_attrs(pg, PG_RW);
316 		} else {
317 			pmap_page_remove(pg);
318 		}
319 	}
320 }
321 
322 /*
323  * pmap_protect: change the protection of pages in a pmap
324  *
325  * => this function is a frontend for pmap_remove/pmap_write_protect
326  * => we only have to worry about making the page more protected.
327  *	unprotecting a page is done on-demand at fault time.
328  */
329 
330 __inline static void __attribute__((__unused__))
331 pmap_protect(struct pmap *pmap, vaddr_t sva, vaddr_t eva, vm_prot_t prot)
332 {
333 	if ((prot & VM_PROT_WRITE) == 0) {
334 		if (prot & (VM_PROT_READ|VM_PROT_EXECUTE)) {
335 			pmap_write_protect(pmap, sva, eva, prot);
336 		} else {
337 			pmap_remove(pmap, sva, eva);
338 		}
339 	}
340 }
341 
342 /*
343  * various address inlines
344  *
345  *  vtopte: return a pointer to the PTE mapping a VA, works only for
346  *  user and PT addresses
347  *
348  *  kvtopte: return a pointer to the PTE mapping a kernel VA
349  */
350 
351 #include <lib/libkern/libkern.h>
352 
353 static __inline pt_entry_t * __attribute__((__unused__))
354 vtopte(vaddr_t va)
355 {
356 
357 	KASSERT(va < VM_MIN_KERNEL_ADDRESS);
358 
359 	return (PTE_BASE + pl1_i(va));
360 }
361 
362 static __inline pt_entry_t * __attribute__((__unused__))
363 kvtopte(vaddr_t va)
364 {
365 	pd_entry_t *pde;
366 
367 	KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
368 
369 	pde = L2_BASE + pl2_i(va);
370 	if (*pde & PG_PS)
371 		return ((pt_entry_t *)pde);
372 
373 	return (PTE_BASE + pl1_i(va));
374 }
375 
376 paddr_t vtophys(vaddr_t);
377 vaddr_t	pmap_map(vaddr_t, paddr_t, paddr_t, vm_prot_t);
378 void	pmap_cpu_init_early(struct cpu_info *);
379 void	pmap_cpu_init_late(struct cpu_info *);
380 void	sse2_zero_page(void *);
381 void	sse2_copy_page(void *, void *);
382 
383 
384 #ifdef XEN
385 
386 #define XPTE_MASK	L1_FRAME
387 #define XPTE_SHIFT	9
388 
389 /* PTE access inline fuctions */
390 
391 /*
392  * Get the machine address of the pointed pte
393  * We use hardware MMU to get value so works only for levels 1-3
394  */
395 
396 static __inline paddr_t
397 xpmap_ptetomach(pt_entry_t *pte)
398 {
399 	pt_entry_t *up_pte;
400 	vaddr_t va = (vaddr_t) pte;
401 
402 	va = ((va & XPTE_MASK) >> XPTE_SHIFT) | (vaddr_t) PTE_BASE;
403 	up_pte = (pt_entry_t *) va;
404 
405 	return (paddr_t) (((*up_pte) & PG_FRAME) + (((vaddr_t) pte) & (~PG_FRAME & ~VA_SIGN_MASK)));
406 }
407 
408 /*
409  * xpmap_update()
410  * Update an active pt entry with Xen
411  * Equivalent to *pte = npte
412  */
413 
414 static __inline void
415 xpmap_update (pt_entry_t *pte, pt_entry_t npte)
416 {
417         int s = splvm();
418 
419         xpq_queue_pte_update((pt_entry_t *) xpmap_ptetomach(pte), npte);
420         xpq_flush_queue();
421         splx(s);
422 }
423 
424 
425 /* Xen helpers to change bits of a pte */
426 #define XPMAP_UPDATE_DIRECT	1	/* Update direct map entry flags too */
427 
428 /* pmap functions with machine addresses */
429 void	pmap_kenter_ma(vaddr_t, paddr_t, vm_prot_t);
430 int	pmap_enter_ma(struct pmap *, vaddr_t, paddr_t, paddr_t,
431 	    vm_prot_t, int, int);
432 bool	pmap_extract_ma(pmap_t, vaddr_t, paddr_t *);
433 paddr_t	vtomach(vaddr_t);
434 
435 #endif	/* XEN */
436 
437 /*
438  * Hooks for the pool allocator.
439  */
440 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
441 
442 /*
443  * TLB shootdown mailbox.
444  */
445 
446 struct pmap_mbox {
447 	volatile void		*mb_pointer;
448 	volatile uintptr_t	mb_addr1;
449 	volatile uintptr_t	mb_addr2;
450 	volatile uintptr_t	mb_head;
451 	volatile uintptr_t	mb_tail;
452 	volatile uintptr_t	mb_global;
453 };
454 
455 #endif /* _KERNEL */
456 
457 #endif /* _X86_PMAP_H_ */
458