xref: /netbsd-src/sys/arch/arm/include/arm32/pmap.h (revision c2f76ff004a2cb67efe5b12d97bd3ef7fe89e18d)
1 /*	$NetBSD: pmap.h,v 1.97 2010/11/14 13:33:21 uebayasi Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1994,1995 Mark Brinicombe.
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Mark Brinicombe
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66  */
67 
68 #ifndef	_ARM32_PMAP_H_
69 #define	_ARM32_PMAP_H_
70 
71 #ifdef _KERNEL
72 
73 #include <arm/cpuconf.h>
74 #include <arm/arm32/pte.h>
75 #ifndef _LOCORE
76 #if defined(_KERNEL_OPT)
77 #include "opt_arm32_pmap.h"
78 #endif
79 #include <arm/cpufunc.h>
80 #include <uvm/uvm_object.h>
81 #endif
82 
83 /*
84  * a pmap describes a processes' 4GB virtual address space.  this
85  * virtual address space can be broken up into 4096 1MB regions which
86  * are described by L1 PTEs in the L1 table.
87  *
88  * There is a line drawn at KERNEL_BASE.  Everything below that line
89  * changes when the VM context is switched.  Everything above that line
90  * is the same no matter which VM context is running.  This is achieved
91  * by making the L1 PTEs for those slots above KERNEL_BASE reference
92  * kernel L2 tables.
93  *
94  * The basic layout of the virtual address space thus looks like this:
95  *
96  *	0xffffffff
97  *	.
98  *	.
99  *	.
100  *	KERNEL_BASE
101  *	--------------------
102  *	.
103  *	.
104  *	.
105  *	0x00000000
106  */
107 
108 /*
109  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
110  * A bucket size of 16 provides for 16MB of contiguous virtual address
111  * space per l2_dtable. Most processes will, therefore, require only two or
112  * three of these to map their whole working set.
113  */
114 #define	L2_BUCKET_LOG2	4
115 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
116 
117 /*
118  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
119  * of l2_dtable structures required to track all possible page descriptors
120  * mappable by an L1 translation table is given by the following constants:
121  */
122 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
123 #define	L2_SIZE		(1 << L2_LOG2)
124 
125 /*
126  * tell MI code that the cache is virtually-indexed.
127  * ARMv6 is physically-tagged but all others are virtually-tagged.
128  */
129 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
130 #define PMAP_CACHE_VIPT
131 #else
132 #define PMAP_CACHE_VIVT
133 #endif
134 
135 #ifndef _LOCORE
136 
137 struct l1_ttable;
138 struct l2_dtable;
139 
140 /*
141  * Track cache/tlb occupancy using the following structure
142  */
143 union pmap_cache_state {
144 	struct {
145 		union {
146 			u_int8_t csu_cache_b[2];
147 			u_int16_t csu_cache;
148 		} cs_cache_u;
149 
150 		union {
151 			u_int8_t csu_tlb_b[2];
152 			u_int16_t csu_tlb;
153 		} cs_tlb_u;
154 	} cs_s;
155 	u_int32_t cs_all;
156 };
157 #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
158 #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
159 #define	cs_cache	cs_s.cs_cache_u.csu_cache
160 #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
161 #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
162 #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
163 
164 /*
165  * Assigned to cs_all to force cacheops to work for a particular pmap
166  */
167 #define	PMAP_CACHE_STATE_ALL	0xffffffffu
168 
169 /*
170  * This structure is used by machine-dependent code to describe
171  * static mappings of devices, created at bootstrap time.
172  */
173 struct pmap_devmap {
174 	vaddr_t		pd_va;		/* virtual address */
175 	paddr_t		pd_pa;		/* physical address */
176 	psize_t		pd_size;	/* size of region */
177 	vm_prot_t	pd_prot;	/* protection code */
178 	int		pd_cache;	/* cache attributes */
179 };
180 
181 /*
182  * The pmap structure itself
183  */
184 struct pmap {
185 	u_int8_t		pm_domain;
186 	bool			pm_remove_all;
187 	bool			pm_activated;
188 	struct l1_ttable	*pm_l1;
189 	pd_entry_t		*pm_pl1vec;
190 	pd_entry_t		pm_l1vec;
191 	union pmap_cache_state	pm_cstate;
192 	struct uvm_object	pm_obj;
193 #define	pm_lock pm_obj.vmobjlock
194 	struct l2_dtable	*pm_l2[L2_SIZE];
195 	struct pmap_statistics	pm_stats;
196 	LIST_ENTRY(pmap)	pm_list;
197 };
198 
199 /*
200  * Physical / virtual address structure. In a number of places (particularly
201  * during bootstrapping) we need to keep track of the physical and virtual
202  * addresses of various pages
203  */
204 typedef struct pv_addr {
205 	SLIST_ENTRY(pv_addr) pv_list;
206 	paddr_t pv_pa;
207 	vaddr_t pv_va;
208 	vsize_t pv_size;
209 } pv_addr_t;
210 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
211 
212 extern pv_addrqh_t pmap_freeq;
213 extern pv_addr_t kernelpages;
214 extern pv_addr_t systempage;
215 extern pv_addr_t kernel_l1pt;
216 
217 /*
218  * Determine various modes for PTEs (user vs. kernel, cacheable
219  * vs. non-cacheable).
220  */
221 #define	PTE_KERNEL	0
222 #define	PTE_USER	1
223 #define	PTE_NOCACHE	0
224 #define	PTE_CACHE	1
225 #define	PTE_PAGETABLE	2
226 
227 /*
228  * Flags that indicate attributes of pages or mappings of pages.
229  *
230  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
231  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
232  * pv_entry's for each page.  They live in the same "namespace" so
233  * that we can clear multiple attributes at a time.
234  *
235  * Note the "non-cacheable" flag generally means the page has
236  * multiple mappings in a given address space.
237  */
238 #define	PVF_MOD		0x01		/* page is modified */
239 #define	PVF_REF		0x02		/* page is referenced */
240 #define	PVF_WIRED	0x04		/* mapping is wired */
241 #define	PVF_WRITE	0x08		/* mapping is writable */
242 #define	PVF_EXEC	0x10		/* mapping is executable */
243 #ifdef PMAP_CACHE_VIVT
244 #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
245 #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
246 #define	PVF_NC		(PVF_UNC|PVF_KNC)
247 #endif
248 #ifdef PMAP_CACHE_VIPT
249 #define	PVF_NC		0x20		/* mapping is 'kernel' non-cacheable */
250 #define	PVF_MULTCLR	0x40		/* mapping is multi-colored */
251 #endif
252 #define	PVF_COLORED	0x80		/* page has or had a color */
253 #define	PVF_KENTRY	0x0100		/* page entered via pmap_kenter_pa */
254 #define	PVF_KMPAGE	0x0200		/* page is used for kmem */
255 #define	PVF_DIRTY	0x0400		/* page may have dirty cache lines */
256 #define	PVF_KMOD	0x0800		/* unmanaged page is modified  */
257 #define	PVF_KWRITE	(PVF_KENTRY|PVF_WRITE)
258 #define	PVF_DMOD	(PVF_MOD|PVF_KMOD|PVF_KMPAGE)
259 
260 /*
261  * Commonly referenced structures
262  */
263 extern int		pmap_debug_level; /* Only exists if PMAP_DEBUG */
264 
265 /*
266  * Macros that we need to export
267  */
268 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
269 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
270 
271 #define	pmap_is_modified(pg)	\
272 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
273 #define	pmap_is_referenced(pg)	\
274 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
275 #define	pmap_is_page_colored_p(md)	\
276 	(((md)->pvh_attrs & PVF_COLORED) != 0)
277 
278 #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
279 
280 #define pmap_phys_address(ppn)		(arm_ptob((ppn)))
281 
282 /*
283  * Functions that we need to export
284  */
285 void	pmap_procwr(struct proc *, vaddr_t, int);
286 void	pmap_remove_all(pmap_t);
287 bool	pmap_extract(pmap_t, vaddr_t, paddr_t *);
288 
289 #define	PMAP_NEED_PROCWR
290 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
291 #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
292 
293 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
294 #define	PMAP_PREFER(hint, vap, sz, td)	pmap_prefer((hint), (vap), (td))
295 void	pmap_prefer(vaddr_t, vaddr_t *, int);
296 #endif
297 
298 void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
299 
300 /* Functions we use internally. */
301 #ifdef PMAP_STEAL_MEMORY
302 void	pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
303 void	pmap_boot_pageadd(pv_addr_t *);
304 vaddr_t	pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
305 #endif
306 void	pmap_bootstrap(vaddr_t, vaddr_t);
307 
308 void	pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
309 int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
310 bool	pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
311 bool	pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
312 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
313 
314 void	pmap_debug(int);
315 void	pmap_postinit(void);
316 
317 void	vector_page_setprot(int);
318 
319 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
320 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
321 
322 /* Bootstrapping routines. */
323 void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
324 void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
325 vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
326 void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
327 void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
328 void	pmap_devmap_register(const struct pmap_devmap *);
329 
330 /*
331  * Special page zero routine for use by the idle loop (no cache cleans).
332  */
333 bool	pmap_pageidlezero(paddr_t);
334 #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
335 
336 /*
337  * used by dumpsys to record the PA of the L1 table
338  */
339 uint32_t pmap_kernel_L1_addr(void);
340 /*
341  * The current top of kernel VM
342  */
343 extern vaddr_t	pmap_curmaxkvaddr;
344 
345 /*
346  * Useful macros and constants
347  */
348 
349 /* Virtual address to page table entry */
350 static inline pt_entry_t *
351 vtopte(vaddr_t va)
352 {
353 	pd_entry_t *pdep;
354 	pt_entry_t *ptep;
355 
356 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
357 		return (NULL);
358 	return (ptep);
359 }
360 
361 /*
362  * Virtual address to physical address
363  */
364 static inline paddr_t
365 vtophys(vaddr_t va)
366 {
367 	paddr_t pa;
368 
369 	if (pmap_extract(pmap_kernel(), va, &pa) == false)
370 		return (0);	/* XXXSCW: Panic? */
371 
372 	return (pa);
373 }
374 
375 /*
376  * The new pmap ensures that page-tables are always mapping Write-Thru.
377  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
378  * on every change.
379  *
380  * Unfortunately, not all CPUs have a write-through cache mode.  So we
381  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
382  * and if there is the chance for PTE syncs to be needed, we define
383  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
384  * the code.
385  */
386 extern int pmap_needs_pte_sync;
387 #if defined(_KERNEL_OPT)
388 /*
389  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
390  * we need to do PTE syncs.  If only SA-1 is configured, then evaluate
391  * this at compile time.
392  */
393 #if (ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7 != 0) && (ARM_NMMUS == 1)
394 #define	PMAP_NEEDS_PTE_SYNC	1
395 #define	PMAP_INCLUDE_PTE_SYNC
396 #elif (ARM_MMU_SA1 == 0)
397 #define	PMAP_NEEDS_PTE_SYNC	0
398 #endif
399 #endif /* _KERNEL_OPT */
400 
401 /*
402  * Provide a fallback in case we were not able to determine it at
403  * compile-time.
404  */
405 #ifndef PMAP_NEEDS_PTE_SYNC
406 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
407 #define	PMAP_INCLUDE_PTE_SYNC
408 #endif
409 
410 #define	PTE_SYNC(pte)							\
411 do {									\
412 	if (PMAP_NEEDS_PTE_SYNC)					\
413 		cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
414 } while (/*CONSTCOND*/0)
415 
416 #define	PTE_SYNC_RANGE(pte, cnt)					\
417 do {									\
418 	if (PMAP_NEEDS_PTE_SYNC) {					\
419 		cpu_dcache_wb_range((vaddr_t)(pte),			\
420 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
421 	}								\
422 } while (/*CONSTCOND*/0)
423 
424 #define	l1pte_valid(pde)	((pde) != 0)
425 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
426 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
427 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
428 
429 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
430 #define	l2pte_valid(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
431 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
432 #define l2pte_minidata(pte)	(((pte) & \
433 				 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
434 				 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
435 
436 /* L1 and L2 page table macros */
437 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
438 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
439 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
440 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
441 
442 #define	pmap_pte_v(pte)		l2pte_valid(*(pte))
443 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
444 
445 /* Size of the kernel part of the L1 page table */
446 #define KERNEL_PD_SIZE	\
447 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
448 
449 /************************* ARM MMU configuration *****************************/
450 
451 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
452 void	pmap_copy_page_generic(paddr_t, paddr_t);
453 void	pmap_zero_page_generic(paddr_t);
454 
455 void	pmap_pte_init_generic(void);
456 #if defined(CPU_ARM8)
457 void	pmap_pte_init_arm8(void);
458 #endif
459 #if defined(CPU_ARM9)
460 void	pmap_pte_init_arm9(void);
461 #endif /* CPU_ARM9 */
462 #if defined(CPU_ARM10)
463 void	pmap_pte_init_arm10(void);
464 #endif /* CPU_ARM10 */
465 #if defined(CPU_ARM11)
466 void	pmap_pte_init_arm11(void);
467 #endif /* CPU_ARM11 */
468 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
469 
470 #if ARM_MMU_SA1 == 1
471 void	pmap_pte_init_sa1(void);
472 #endif /* ARM_MMU_SA1 == 1 */
473 
474 #if ARM_MMU_XSCALE == 1
475 void	pmap_copy_page_xscale(paddr_t, paddr_t);
476 void	pmap_zero_page_xscale(paddr_t);
477 
478 void	pmap_pte_init_xscale(void);
479 
480 void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
481 
482 #define	PMAP_UAREA(va)		pmap_uarea(va)
483 void	pmap_uarea(vaddr_t);
484 #endif /* ARM_MMU_XSCALE == 1 */
485 
486 #if ARM_MMU_V7 == 1
487 void	pmap_pte_init_armv7(void);
488 #endif /* ARM_MMU_V7 */
489 
490 extern pt_entry_t		pte_l1_s_cache_mode;
491 extern pt_entry_t		pte_l1_s_cache_mask;
492 
493 extern pt_entry_t		pte_l2_l_cache_mode;
494 extern pt_entry_t		pte_l2_l_cache_mask;
495 
496 extern pt_entry_t		pte_l2_s_cache_mode;
497 extern pt_entry_t		pte_l2_s_cache_mask;
498 
499 extern pt_entry_t		pte_l1_s_cache_mode_pt;
500 extern pt_entry_t		pte_l2_l_cache_mode_pt;
501 extern pt_entry_t		pte_l2_s_cache_mode_pt;
502 
503 extern pt_entry_t		pte_l1_s_prot_u;
504 extern pt_entry_t		pte_l1_s_prot_w;
505 extern pt_entry_t		pte_l1_s_prot_ro;
506 extern pt_entry_t		pte_l1_s_prot_mask;
507 
508 extern pt_entry_t		pte_l2_s_prot_u;
509 extern pt_entry_t		pte_l2_s_prot_w;
510 extern pt_entry_t		pte_l2_s_prot_ro;
511 extern pt_entry_t		pte_l2_s_prot_mask;
512 
513 extern pt_entry_t		pte_l2_l_prot_u;
514 extern pt_entry_t		pte_l2_l_prot_w;
515 extern pt_entry_t		pte_l2_l_prot_ro;
516 extern pt_entry_t		pte_l2_l_prot_mask;
517 
518 extern pt_entry_t		pte_l1_s_proto;
519 extern pt_entry_t		pte_l1_c_proto;
520 extern pt_entry_t		pte_l2_s_proto;
521 
522 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
523 extern void (*pmap_zero_page_func)(paddr_t);
524 
525 #endif /* !_LOCORE */
526 
527 /*****************************************************************************/
528 
529 /*
530  * Definitions for MMU domains
531  */
532 #define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
533 #define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
534 
535 /*
536  * These macros define the various bit masks in the PTE.
537  *
538  * We use these macros since we use different bits on different processor
539  * models.
540  */
541 #define	L1_S_PROT_U_generic	(L1_S_AP(AP_U))
542 #define	L1_S_PROT_W_generic	(L1_S_AP(AP_W))
543 #define	L1_S_PROT_RO_generic	(0)
544 #define	L1_S_PROT_MASK_generic	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
545 
546 #define	L1_S_PROT_U_xscale	(L1_S_AP(AP_U))
547 #define	L1_S_PROT_W_xscale	(L1_S_AP(AP_W))
548 #define	L1_S_PROT_RO_xscale	(0)
549 #define	L1_S_PROT_MASK_xscale	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
550 
551 #define	L1_S_PROT_U_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
552 #define	L1_S_PROT_W_armv7	(L1_S_AP(AP_W))
553 #define	L1_S_PROT_RO_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
554 #define	L1_S_PROT_MASK_armv7	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
555 
556 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
557 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
558 #define	L1_S_CACHE_MASK_armv7	(L1_S_B|L1_S_C)
559 
560 #define	L2_L_PROT_U_generic	(L2_AP(AP_U))
561 #define	L2_L_PROT_W_generic	(L2_AP(AP_W))
562 #define	L2_L_PROT_RO_generic	(0)
563 #define	L2_L_PROT_MASK_generic	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
564 
565 #define	L2_L_PROT_U_xscale	(L2_AP(AP_U))
566 #define	L2_L_PROT_W_xscale	(L2_AP(AP_W))
567 #define	L2_L_PROT_RO_xscale	(0)
568 #define	L2_L_PROT_MASK_xscale	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
569 
570 #define	L2_L_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
571 #define	L2_L_PROT_W_armv7	(L2_AP0(AP_W))
572 #define	L2_L_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
573 #define	L2_L_PROT_MASK_armv7	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
574 
575 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
576 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
577 #define	L2_L_CACHE_MASK_armv7	(L2_B|L2_C)
578 
579 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
580 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
581 #define	L2_S_PROT_RO_generic	(0)
582 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
583 
584 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
585 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
586 #define	L2_S_PROT_RO_xscale	(0)
587 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
588 
589 #define	L2_S_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
590 #define	L2_S_PROT_W_armv7	(L2_AP0(AP_W))
591 #define	L2_S_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
592 #define	L2_S_PROT_MASK_armv7	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
593 
594 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
595 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
596 #define	L2_S_CACHE_MASK_armv7	(L2_B|L2_C)
597 
598 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
599 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
600 #define	L1_S_PROTO_armv7	(L1_TYPE_S)
601 
602 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
603 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
604 #define	L1_C_PROTO_armv7	(L1_TYPE_C)
605 
606 #define	L2_L_PROTO		(L2_TYPE_L)
607 
608 #define	L2_S_PROTO_generic	(L2_TYPE_S)
609 #define	L2_S_PROTO_xscale	(L2_TYPE_XS)
610 #define	L2_S_PROTO_armv7	(L2_TYPE_S)
611 
612 /*
613  * User-visible names for the ones that vary with MMU class.
614  */
615 
616 #if ARM_NMMUS > 1
617 /* More than one MMU class configured; use variables. */
618 #define	L1_S_PROT_U		pte_l1_s_prot_u
619 #define	L1_S_PROT_W		pte_l1_s_prot_w
620 #define	L1_S_PROT_RO		pte_l1_s_prot_ro
621 #define	L1_S_PROT_MASK		pte_l1_s_prot_mask
622 
623 #define	L2_S_PROT_U		pte_l2_s_prot_u
624 #define	L2_S_PROT_W		pte_l2_s_prot_w
625 #define	L2_S_PROT_RO		pte_l2_s_prot_ro
626 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
627 
628 #define	L2_L_PROT_U		pte_l2_l_prot_u
629 #define	L2_L_PROT_W		pte_l2_l_prot_w
630 #define	L2_L_PROT_RO		pte_l2_l_prot_ro
631 #define	L2_L_PROT_MASK		pte_l2_l_prot_mask
632 
633 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
634 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
635 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
636 
637 #define	L1_S_PROTO		pte_l1_s_proto
638 #define	L1_C_PROTO		pte_l1_c_proto
639 #define	L2_S_PROTO		pte_l2_s_proto
640 
641 #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
642 #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
643 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6) != 0
644 #define	L1_S_PROT_U		L1_S_PROT_U_generic
645 #define	L1_S_PROT_W		L1_S_PROT_W_generic
646 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
647 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
648 
649 #define	L2_S_PROT_U		L2_S_PROT_U_generic
650 #define	L2_S_PROT_W		L2_S_PROT_W_generic
651 #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
652 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
653 
654 #define	L2_L_PROT_U		L2_L_PROT_U_generic
655 #define	L2_L_PROT_W		L2_L_PROT_W_generic
656 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
657 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
658 
659 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
660 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
661 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
662 
663 #define	L1_S_PROTO		L1_S_PROTO_generic
664 #define	L1_C_PROTO		L1_C_PROTO_generic
665 #define	L2_S_PROTO		L2_S_PROTO_generic
666 
667 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
668 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
669 #elif ARM_MMU_XSCALE == 1
670 #define	L1_S_PROT_U		L1_S_PROT_U_generic
671 #define	L1_S_PROT_W		L1_S_PROT_W_generic
672 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
673 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
674 
675 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
676 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
677 #define	L2_S_PROT_RO		L2_S_PROT_RO_xscale
678 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
679 
680 #define	L2_L_PROT_U		L2_L_PROT_U_generic
681 #define	L2_L_PROT_W		L2_L_PROT_W_generic
682 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
683 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
684 
685 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
686 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
687 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
688 
689 #define	L1_S_PROTO		L1_S_PROTO_xscale
690 #define	L1_C_PROTO		L1_C_PROTO_xscale
691 #define	L2_S_PROTO		L2_S_PROTO_xscale
692 
693 #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
694 #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
695 #elif ARM_MMU_V7 == 1
696 #define	L1_S_PROT_U		L1_S_PROT_U_armv7
697 #define	L1_S_PROT_W		L1_S_PROT_W_armv7
698 #define	L1_S_PROT_RO		L1_S_PROT_RO_armv7
699 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv7
700 
701 #define	L2_S_PROT_U		L2_S_PROT_U_armv7
702 #define	L2_S_PROT_W		L2_S_PROT_W_armv7
703 #define	L2_S_PROT_RO		L2_S_PROT_RO_armv7
704 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv7
705 
706 #define	L2_L_PROT_U		L2_L_PROT_U_armv7
707 #define	L2_L_PROT_W		L2_L_PROT_W_armv7
708 #define	L2_L_PROT_RO		L2_L_PROT_RO_armv7
709 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv7
710 
711 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv7
712 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv7
713 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv7
714 
715 /* These prototypes make writeable mappings, while the other MMU types
716  * make read-only mappings. */
717 #define	L1_S_PROTO		L1_S_PROTO_armv7
718 #define	L1_C_PROTO		L1_C_PROTO_armv7
719 #define	L2_S_PROTO		L2_S_PROTO_armv7
720 
721 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
722 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
723 #endif /* ARM_NMMUS > 1 */
724 
725 /*
726  * Macros to set and query the write permission on page descriptors.
727  */
728 #define l1pte_set_writable(pte)	(((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
729 #define l1pte_set_readonly(pte)	(((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
730 #define l2pte_set_writable(pte)	(((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
731 #define l2pte_set_readonly(pte)	(((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
732 
733 #define l2pte_writable_p(pte)	(((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
734 				 (L2_S_PROT_RO == 0 || \
735 				  ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
736 
737 /*
738  * These macros return various bits based on kernel/user and protection.
739  * Note that the compiler will usually fold these at compile time.
740  */
741 #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
742 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO))
743 
744 #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
745 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO))
746 
747 #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
748 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
749 
750 /*
751  * Macros to test if a mapping is mappable with an L1 Section mapping
752  * or an L2 Large Page mapping.
753  */
754 #define	L1_S_MAPPABLE_P(va, pa, size)					\
755 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
756 
757 #define	L2_L_MAPPABLE_P(va, pa, size)					\
758 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
759 
760 /*
761  * Hooks for the pool allocator.
762  */
763 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
764 
765 #ifndef _LOCORE
766 
767 /*
768  * pmap-specific data store in the vm_page structure.
769  */
770 #define	__HAVE_VM_PAGE_MD
771 struct vm_page_md {
772 	SLIST_HEAD(,pv_entry) pvh_list;		/* pv_entry list */
773 	struct simplelock pvh_slock;		/* lock on this head */
774 	int pvh_attrs;				/* page attributes */
775 	u_int uro_mappings;
776 	u_int urw_mappings;
777 	union {
778 		u_short s_mappings[2];	/* Assume kernel count <= 65535 */
779 		u_int i_mappings;
780 	} k_u;
781 #define	kro_mappings	k_u.s_mappings[0]
782 #define	krw_mappings	k_u.s_mappings[1]
783 #define	k_mappings	k_u.i_mappings
784 };
785 
786 /*
787  * Set the default color of each page.
788  */
789 #if ARM_MMU_V6 > 0
790 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
791 	(pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
792 #else
793 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
794 	(pg)->mdpage.pvh_attrs = 0
795 #endif
796 
797 #define	VM_MDPAGE_INIT(pg)						\
798 do {									\
799 	SLIST_INIT(&(pg)->mdpage.pvh_list);				\
800 	simple_lock_init(&(pg)->mdpage.pvh_slock);			\
801 	VM_MDPAGE_PVH_ATTRS_INIT(pg);					\
802 	(pg)->mdpage.uro_mappings = 0;					\
803 	(pg)->mdpage.urw_mappings = 0;					\
804 	(pg)->mdpage.k_mappings = 0;					\
805 } while (/*CONSTCOND*/0)
806 
807 #endif /* !_LOCORE */
808 
809 #endif /* _KERNEL */
810 
811 #endif	/* _ARM32_PMAP_H_ */
812