xref: /netbsd-src/sys/arch/arm/include/arm32/pmap.h (revision 7788a0781fe6ff2cce37368b4578a7ade0850cb1)
1 /*	$NetBSD: pmap.h,v 1.121 2013/07/03 21:37:35 matt Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1994,1995 Mark Brinicombe.
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Mark Brinicombe
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66  */
67 
68 #ifndef	_ARM32_PMAP_H_
69 #define	_ARM32_PMAP_H_
70 
71 #ifdef _KERNEL
72 
73 #include <arm/cpuconf.h>
74 #include <arm/arm32/pte.h>
75 #ifndef _LOCORE
76 #if defined(_KERNEL_OPT)
77 #include "opt_arm32_pmap.h"
78 #endif
79 #include <arm/cpufunc.h>
80 #include <uvm/uvm_object.h>
81 #endif
82 
83 /*
84  * a pmap describes a processes' 4GB virtual address space.  this
85  * virtual address space can be broken up into 4096 1MB regions which
86  * are described by L1 PTEs in the L1 table.
87  *
88  * There is a line drawn at KERNEL_BASE.  Everything below that line
89  * changes when the VM context is switched.  Everything above that line
90  * is the same no matter which VM context is running.  This is achieved
91  * by making the L1 PTEs for those slots above KERNEL_BASE reference
92  * kernel L2 tables.
93  *
94  * The basic layout of the virtual address space thus looks like this:
95  *
96  *	0xffffffff
97  *	.
98  *	.
99  *	.
100  *	KERNEL_BASE
101  *	--------------------
102  *	.
103  *	.
104  *	.
105  *	0x00000000
106  */
107 
108 /*
109  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
110  * A bucket size of 16 provides for 16MB of contiguous virtual address
111  * space per l2_dtable. Most processes will, therefore, require only two or
112  * three of these to map their whole working set.
113  */
114 #define	L2_BUCKET_LOG2	4
115 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
116 
117 /*
118  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
119  * of l2_dtable structures required to track all possible page descriptors
120  * mappable by an L1 translation table is given by the following constants:
121  */
122 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
123 #define	L2_SIZE		(1 << L2_LOG2)
124 
125 /*
126  * tell MI code that the cache is virtually-indexed.
127  * ARMv6 is physically-tagged but all others are virtually-tagged.
128  */
129 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
130 #define PMAP_CACHE_VIPT
131 #else
132 #define PMAP_CACHE_VIVT
133 #endif
134 
135 #ifndef _LOCORE
136 
137 struct l1_ttable;
138 struct l2_dtable;
139 
140 /*
141  * Track cache/tlb occupancy using the following structure
142  */
143 union pmap_cache_state {
144 	struct {
145 		union {
146 			uint8_t csu_cache_b[2];
147 			uint16_t csu_cache;
148 		} cs_cache_u;
149 
150 		union {
151 			uint8_t csu_tlb_b[2];
152 			uint16_t csu_tlb;
153 		} cs_tlb_u;
154 	} cs_s;
155 	uint32_t cs_all;
156 };
157 #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
158 #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
159 #define	cs_cache	cs_s.cs_cache_u.csu_cache
160 #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
161 #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
162 #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
163 
164 /*
165  * Assigned to cs_all to force cacheops to work for a particular pmap
166  */
167 #define	PMAP_CACHE_STATE_ALL	0xffffffffu
168 
169 /*
170  * This structure is used by machine-dependent code to describe
171  * static mappings of devices, created at bootstrap time.
172  */
173 struct pmap_devmap {
174 	vaddr_t		pd_va;		/* virtual address */
175 	paddr_t		pd_pa;		/* physical address */
176 	psize_t		pd_size;	/* size of region */
177 	vm_prot_t	pd_prot;	/* protection code */
178 	int		pd_cache;	/* cache attributes */
179 };
180 
181 /*
182  * The pmap structure itself
183  */
184 struct pmap {
185 	uint8_t			pm_domain;
186 	bool			pm_remove_all;
187 	bool			pm_activated;
188 	struct l1_ttable	*pm_l1;
189 #ifndef ARM_HAS_VBAR
190 	pd_entry_t		*pm_pl1vec;
191 #endif
192 	pd_entry_t		pm_l1vec;
193 	union pmap_cache_state	pm_cstate;
194 	struct uvm_object	pm_obj;
195 	kmutex_t		pm_obj_lock;
196 #define	pm_lock pm_obj.vmobjlock
197 	struct l2_dtable	*pm_l2[L2_SIZE];
198 	struct pmap_statistics	pm_stats;
199 	LIST_ENTRY(pmap)	pm_list;
200 };
201 
202 /*
203  * Physical / virtual address structure. In a number of places (particularly
204  * during bootstrapping) we need to keep track of the physical and virtual
205  * addresses of various pages
206  */
207 typedef struct pv_addr {
208 	SLIST_ENTRY(pv_addr) pv_list;
209 	paddr_t pv_pa;
210 	vaddr_t pv_va;
211 	vsize_t pv_size;
212 	uint8_t pv_cache;
213 	uint8_t pv_prot;
214 } pv_addr_t;
215 typedef SLIST_HEAD(, pv_addr) pv_addrqh_t;
216 
217 extern pv_addrqh_t pmap_freeq;
218 extern pv_addr_t kernelstack;
219 extern pv_addr_t abtstack;
220 extern pv_addr_t fiqstack;
221 extern pv_addr_t irqstack;
222 extern pv_addr_t undstack;
223 extern pv_addr_t idlestack;
224 extern pv_addr_t systempage;
225 extern pv_addr_t kernel_l1pt;
226 
227 /*
228  * Determine various modes for PTEs (user vs. kernel, cacheable
229  * vs. non-cacheable).
230  */
231 #define	PTE_KERNEL	0
232 #define	PTE_USER	1
233 #define	PTE_NOCACHE	0
234 #define	PTE_CACHE	1
235 #define	PTE_PAGETABLE	2
236 
237 /*
238  * Flags that indicate attributes of pages or mappings of pages.
239  *
240  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
241  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
242  * pv_entry's for each page.  They live in the same "namespace" so
243  * that we can clear multiple attributes at a time.
244  *
245  * Note the "non-cacheable" flag generally means the page has
246  * multiple mappings in a given address space.
247  */
248 #define	PVF_MOD		0x01		/* page is modified */
249 #define	PVF_REF		0x02		/* page is referenced */
250 #define	PVF_WIRED	0x04		/* mapping is wired */
251 #define	PVF_WRITE	0x08		/* mapping is writable */
252 #define	PVF_EXEC	0x10		/* mapping is executable */
253 #ifdef PMAP_CACHE_VIVT
254 #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
255 #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
256 #define	PVF_NC		(PVF_UNC|PVF_KNC)
257 #endif
258 #ifdef PMAP_CACHE_VIPT
259 #define	PVF_NC		0x20		/* mapping is 'kernel' non-cacheable */
260 #define	PVF_MULTCLR	0x40		/* mapping is multi-colored */
261 #endif
262 #define	PVF_COLORED	0x80		/* page has or had a color */
263 #define	PVF_KENTRY	0x0100		/* page entered via pmap_kenter_pa */
264 #define	PVF_KMPAGE	0x0200		/* page is used for kmem */
265 #define	PVF_DIRTY	0x0400		/* page may have dirty cache lines */
266 #define	PVF_KMOD	0x0800		/* unmanaged page is modified  */
267 #define	PVF_KWRITE	(PVF_KENTRY|PVF_WRITE)
268 #define	PVF_DMOD	(PVF_MOD|PVF_KMOD|PVF_KMPAGE)
269 
270 /*
271  * Commonly referenced structures
272  */
273 extern int		pmap_debug_level; /* Only exists if PMAP_DEBUG */
274 extern int		arm_poolpage_vmfreelist;
275 
276 /*
277  * Macros that we need to export
278  */
279 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
280 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
281 
282 #define	pmap_is_modified(pg)	\
283 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
284 #define	pmap_is_referenced(pg)	\
285 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
286 #define	pmap_is_page_colored_p(md)	\
287 	(((md)->pvh_attrs & PVF_COLORED) != 0)
288 
289 #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
290 
291 #define pmap_phys_address(ppn)		(arm_ptob((ppn)))
292 u_int arm32_mmap_flags(paddr_t);
293 #define ARM32_MMAP_WRITECOMBINE	0x40000000
294 #define ARM32_MMAP_CACHEABLE		0x20000000
295 #define pmap_mmap_flags(ppn)			arm32_mmap_flags(ppn)
296 
297 /*
298  * Functions that we need to export
299  */
300 void	pmap_procwr(struct proc *, vaddr_t, int);
301 void	pmap_remove_all(pmap_t);
302 bool	pmap_extract(pmap_t, vaddr_t, paddr_t *);
303 
304 #define	PMAP_NEED_PROCWR
305 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
306 #define	PMAP_ENABLE_PMAP_KMPAGE	/* enable the PMAP_KMPAGE flag */
307 
308 #if (ARM_MMU_V6 + ARM_MMU_V7) > 0
309 #define	PMAP_PREFER(hint, vap, sz, td)	pmap_prefer((hint), (vap), (td))
310 void	pmap_prefer(vaddr_t, vaddr_t *, int);
311 #endif
312 
313 void	pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
314 
315 /* Functions we use internally. */
316 #ifdef PMAP_STEAL_MEMORY
317 void	pmap_boot_pagealloc(psize_t, psize_t, psize_t, pv_addr_t *);
318 void	pmap_boot_pageadd(pv_addr_t *);
319 vaddr_t	pmap_steal_memory(vsize_t, vaddr_t *, vaddr_t *);
320 #endif
321 void	pmap_bootstrap(vaddr_t, vaddr_t);
322 
323 void	pmap_do_remove(pmap_t, vaddr_t, vaddr_t, int);
324 int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
325 bool	pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
326 bool	pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
327 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
328 
329 void	pmap_debug(int);
330 void	pmap_postinit(void);
331 
332 void	vector_page_setprot(int);
333 
334 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
335 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
336 
337 /* Bootstrapping routines. */
338 void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
339 void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
340 vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
341 void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
342 void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
343 void	pmap_devmap_register(const struct pmap_devmap *);
344 
345 /*
346  * Special page zero routine for use by the idle loop (no cache cleans).
347  */
348 bool	pmap_pageidlezero(paddr_t);
349 #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
350 
351 /*
352  * used by dumpsys to record the PA of the L1 table
353  */
354 uint32_t pmap_kernel_L1_addr(void);
355 /*
356  * The current top of kernel VM
357  */
358 extern vaddr_t	pmap_curmaxkvaddr;
359 
360 /*
361  * Useful macros and constants
362  */
363 
364 /* Virtual address to page table entry */
365 static inline pt_entry_t *
366 vtopte(vaddr_t va)
367 {
368 	pd_entry_t *pdep;
369 	pt_entry_t *ptep;
370 
371 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == false)
372 		return (NULL);
373 	return (ptep);
374 }
375 
376 /*
377  * Virtual address to physical address
378  */
379 static inline paddr_t
380 vtophys(vaddr_t va)
381 {
382 	paddr_t pa;
383 
384 	if (pmap_extract(pmap_kernel(), va, &pa) == false)
385 		return (0);	/* XXXSCW: Panic? */
386 
387 	return (pa);
388 }
389 
390 /*
391  * The new pmap ensures that page-tables are always mapping Write-Thru.
392  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
393  * on every change.
394  *
395  * Unfortunately, not all CPUs have a write-through cache mode.  So we
396  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
397  * and if there is the chance for PTE syncs to be needed, we define
398  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
399  * the code.
400  */
401 extern int pmap_needs_pte_sync;
402 #if defined(_KERNEL_OPT)
403 /*
404  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
405  * we need to do PTE syncs.  If only SA-1 is configured, then evaluate
406  * this at compile time.
407  */
408 #if (ARM_MMU_SA1 + ARM_MMU_V6 != 0) && (ARM_NMMUS == 1)
409 #define	PMAP_INCLUDE_PTE_SYNC
410 #if (ARM_MMU_V6 > 0)
411 #define	PMAP_NEEDS_PTE_SYNC	1
412 #elif (ARM_MMU_SA1 == 0)
413 #define	PMAP_NEEDS_PTE_SYNC	0
414 #endif
415 #endif
416 #endif /* _KERNEL_OPT */
417 
418 /*
419  * Provide a fallback in case we were not able to determine it at
420  * compile-time.
421  */
422 #ifndef PMAP_NEEDS_PTE_SYNC
423 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
424 #define	PMAP_INCLUDE_PTE_SYNC
425 #endif
426 
427 static inline void
428 pmap_ptesync(pt_entry_t *ptep, size_t cnt)
429 {
430 	if (PMAP_NEEDS_PTE_SYNC)
431 		cpu_dcache_wb_range((vaddr_t)ptep, cnt * sizeof(pt_entry_t));
432 #if ARM_MMU_V7 > 0
433 	__asm("dsb");
434 #endif
435 }
436 
437 #define	PTE_SYNC(ptep)			pmap_ptesync((ptep), 1)
438 #define	PTE_SYNC_RANGE(ptep, cnt)	pmap_ptesync((ptep), (cnt))
439 
440 #define	l1pte_valid(pde)	((pde) != 0)
441 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
442 #define	l1pte_supersection_p(pde) (l1pte_section_p(pde)	\
443 				&& ((pde) & L1_S_V6_SUPER) != 0)
444 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
445 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
446 
447 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
448 #define	l2pte_valid(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
449 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
450 #define l2pte_minidata(pte)	(((pte) & \
451 				 (L2_B | L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))\
452 				 == (L2_C | L2_XS_T_TEX(TEX_XSCALE_X)))
453 
454 static inline void
455 l2pte_set(pt_entry_t *ptep, pt_entry_t pte, pt_entry_t opte)
456 {
457 	KASSERT(*ptep == opte);
458 	*ptep = pte;
459 	for (vsize_t k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
460 		KASSERT(ptep[k] == opte ? opte + k * L2_S_SIZE : 0);
461 		pte += L2_S_SIZE;
462 		ptep[k] = pte;
463 	}
464 }
465 
466 static inline void
467 l2pte_reset(pt_entry_t *ptep)
468 {
469 	*ptep = 0;
470 	for (vsize_t k = 1; k < PAGE_SIZE / L2_S_SIZE; k++) {
471 		ptep[k] = 0;
472 	}
473 }
474 
475 /* L1 and L2 page table macros */
476 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
477 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
478 #define pmap_pde_supersection(pde)	l1pte_supersection_p(*(pde))
479 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
480 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
481 
482 #define	pmap_pte_v(pte)		l2pte_valid(*(pte))
483 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
484 
485 /* Size of the kernel part of the L1 page table */
486 #define KERNEL_PD_SIZE	\
487 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
488 
489 void	bzero_page(vaddr_t);
490 void	bcopy_page(vaddr_t, vaddr_t);
491 
492 #ifdef FPU_VFP
493 void	bzero_page_vfp(vaddr_t);
494 void	bcopy_page_vfp(vaddr_t, vaddr_t);
495 #endif
496 
497 /************************* ARM MMU configuration *****************************/
498 
499 #if (ARM_MMU_GENERIC + ARM_MMU_SA1 + ARM_MMU_V6 + ARM_MMU_V7) != 0
500 void	pmap_copy_page_generic(paddr_t, paddr_t);
501 void	pmap_zero_page_generic(paddr_t);
502 
503 void	pmap_pte_init_generic(void);
504 #if defined(CPU_ARM8)
505 void	pmap_pte_init_arm8(void);
506 #endif
507 #if defined(CPU_ARM9)
508 void	pmap_pte_init_arm9(void);
509 #endif /* CPU_ARM9 */
510 #if defined(CPU_ARM10)
511 void	pmap_pte_init_arm10(void);
512 #endif /* CPU_ARM10 */
513 #if defined(CPU_ARM11)	/* ARM_MMU_V6 */
514 void	pmap_pte_init_arm11(void);
515 #endif /* CPU_ARM11 */
516 #if defined(CPU_ARM11MPCORE)	/* ARM_MMU_V6 */
517 void	pmap_pte_init_arm11mpcore(void);
518 #endif
519 #if ARM_MMU_V7 == 1
520 void	pmap_pte_init_armv7(void);
521 #endif /* ARM_MMU_V7 */
522 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
523 
524 #if ARM_MMU_SA1 == 1
525 void	pmap_pte_init_sa1(void);
526 #endif /* ARM_MMU_SA1 == 1 */
527 
528 #if ARM_MMU_XSCALE == 1
529 void	pmap_copy_page_xscale(paddr_t, paddr_t);
530 void	pmap_zero_page_xscale(paddr_t);
531 
532 void	pmap_pte_init_xscale(void);
533 
534 void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
535 
536 #define	PMAP_UAREA(va)		pmap_uarea(va)
537 void	pmap_uarea(vaddr_t);
538 #endif /* ARM_MMU_XSCALE == 1 */
539 
540 extern pt_entry_t		pte_l1_s_cache_mode;
541 extern pt_entry_t		pte_l1_s_cache_mask;
542 
543 extern pt_entry_t		pte_l2_l_cache_mode;
544 extern pt_entry_t		pte_l2_l_cache_mask;
545 
546 extern pt_entry_t		pte_l2_s_cache_mode;
547 extern pt_entry_t		pte_l2_s_cache_mask;
548 
549 extern pt_entry_t		pte_l1_s_cache_mode_pt;
550 extern pt_entry_t		pte_l2_l_cache_mode_pt;
551 extern pt_entry_t		pte_l2_s_cache_mode_pt;
552 
553 extern pt_entry_t		pte_l1_s_wc_mode;
554 extern pt_entry_t		pte_l2_l_wc_mode;
555 extern pt_entry_t		pte_l2_s_wc_mode;
556 
557 extern pt_entry_t		pte_l1_s_prot_u;
558 extern pt_entry_t		pte_l1_s_prot_w;
559 extern pt_entry_t		pte_l1_s_prot_ro;
560 extern pt_entry_t		pte_l1_s_prot_mask;
561 
562 extern pt_entry_t		pte_l2_s_prot_u;
563 extern pt_entry_t		pte_l2_s_prot_w;
564 extern pt_entry_t		pte_l2_s_prot_ro;
565 extern pt_entry_t		pte_l2_s_prot_mask;
566 
567 extern pt_entry_t		pte_l2_l_prot_u;
568 extern pt_entry_t		pte_l2_l_prot_w;
569 extern pt_entry_t		pte_l2_l_prot_ro;
570 extern pt_entry_t		pte_l2_l_prot_mask;
571 
572 extern pt_entry_t		pte_l1_ss_proto;
573 extern pt_entry_t		pte_l1_s_proto;
574 extern pt_entry_t		pte_l1_c_proto;
575 extern pt_entry_t		pte_l2_s_proto;
576 
577 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
578 extern void (*pmap_zero_page_func)(paddr_t);
579 
580 #endif /* !_LOCORE */
581 
582 /*****************************************************************************/
583 
584 /*
585  * Definitions for MMU domains
586  */
587 #define	PMAP_DOMAINS		15	/* 15 'user' domains (1-15) */
588 #define	PMAP_DOMAIN_KERNEL	0	/* The kernel uses domain #0 */
589 
590 /*
591  * These macros define the various bit masks in the PTE.
592  *
593  * We use these macros since we use different bits on different processor
594  * models.
595  */
596 #define	L1_S_PROT_U_generic	(L1_S_AP(AP_U))
597 #define	L1_S_PROT_W_generic	(L1_S_AP(AP_W))
598 #define	L1_S_PROT_RO_generic	(0)
599 #define	L1_S_PROT_MASK_generic	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
600 
601 #define	L1_S_PROT_U_xscale	(L1_S_AP(AP_U))
602 #define	L1_S_PROT_W_xscale	(L1_S_AP(AP_W))
603 #define	L1_S_PROT_RO_xscale	(0)
604 #define	L1_S_PROT_MASK_xscale	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
605 
606 #define	L1_S_PROT_U_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
607 #define	L1_S_PROT_W_armv6	(L1_S_AP(AP_W))
608 #define	L1_S_PROT_RO_armv6	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
609 #define	L1_S_PROT_MASK_armv6	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
610 
611 #define	L1_S_PROT_U_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_U))
612 #define	L1_S_PROT_W_armv7	(L1_S_AP(AP_W))
613 #define	L1_S_PROT_RO_armv7	(L1_S_AP(AP_R) | L1_S_AP(AP_RO))
614 #define	L1_S_PROT_MASK_armv7	(L1_S_PROT_U|L1_S_PROT_W|L1_S_PROT_RO)
615 
616 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
617 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_XSCALE_X))
618 #define	L1_S_CACHE_MASK_armv6	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX))
619 #define	L1_S_CACHE_MASK_armv7	(L1_S_B|L1_S_C|L1_S_XS_TEX(TEX_ARMV6_TEX)|L1_S_V6_S)
620 
621 #define	L2_L_PROT_U_generic	(L2_AP(AP_U))
622 #define	L2_L_PROT_W_generic	(L2_AP(AP_W))
623 #define	L2_L_PROT_RO_generic	(0)
624 #define	L2_L_PROT_MASK_generic	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
625 
626 #define	L2_L_PROT_U_xscale	(L2_AP(AP_U))
627 #define	L2_L_PROT_W_xscale	(L2_AP(AP_W))
628 #define	L2_L_PROT_RO_xscale	(0)
629 #define	L2_L_PROT_MASK_xscale	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
630 
631 #define	L2_L_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
632 #define	L2_L_PROT_W_armv6n	(L2_AP0(AP_W))
633 #define	L2_L_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
634 #define	L2_L_PROT_MASK_armv6n	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
635 
636 #define	L2_L_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
637 #define	L2_L_PROT_W_armv7	(L2_AP0(AP_W))
638 #define	L2_L_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
639 #define	L2_L_PROT_MASK_armv7	(L2_L_PROT_U|L2_L_PROT_W|L2_L_PROT_RO)
640 
641 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
642 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_L_TEX(TEX_XSCALE_X))
643 #define	L2_L_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX))
644 #define	L2_L_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_L_TEX(TEX_ARMV6_TEX)|L2_XS_S)
645 
646 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
647 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
648 #define	L2_S_PROT_RO_generic	(0)
649 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
650 
651 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
652 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
653 #define	L2_S_PROT_RO_xscale	(0)
654 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
655 
656 #define	L2_S_PROT_U_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_U))
657 #define	L2_S_PROT_W_armv6n	(L2_AP0(AP_W))
658 #define	L2_S_PROT_RO_armv6n	(L2_AP0(AP_R) | L2_AP0(AP_RO))
659 #define	L2_S_PROT_MASK_armv6n	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
660 
661 #define	L2_S_PROT_U_armv7	(L2_AP0(AP_R) | L2_AP0(AP_U))
662 #define	L2_S_PROT_W_armv7	(L2_AP0(AP_W))
663 #define	L2_S_PROT_RO_armv7	(L2_AP0(AP_R) | L2_AP0(AP_RO))
664 #define	L2_S_PROT_MASK_armv7	(L2_S_PROT_U|L2_S_PROT_W|L2_S_PROT_RO)
665 
666 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
667 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XS_T_TEX(TEX_XSCALE_X))
668 #define	L2_XS_CACHE_MASK_armv6	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX))
669 #define	L2_S_CACHE_MASK_armv6n	L2_XS_CACHE_MASK_armv6
670 #ifdef	ARMV6_EXTENDED_SMALL_PAGE
671 #define	L2_S_CACHE_MASK_armv6c	L2_XS_CACHE_MASK_armv6
672 #else
673 #define	L2_S_CACHE_MASK_armv6c	L2_S_CACHE_MASK_generic
674 #endif
675 #define	L2_S_CACHE_MASK_armv7	(L2_B|L2_C|L2_V6_XS_TEX(TEX_ARMV6_TEX)|L2_XS_S)
676 
677 
678 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
679 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
680 #define	L1_S_PROTO_armv6	(L1_TYPE_S)
681 #define	L1_S_PROTO_armv7	(L1_TYPE_S)
682 
683 #define	L1_SS_PROTO_generic	0
684 #define	L1_SS_PROTO_xscale	0
685 #define	L1_SS_PROTO_armv6	(L1_TYPE_S | L1_S_V6_SS)
686 #define	L1_SS_PROTO_armv7	(L1_TYPE_S | L1_S_V6_SS)
687 
688 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
689 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
690 #define	L1_C_PROTO_armv6	(L1_TYPE_C)
691 #define	L1_C_PROTO_armv7	(L1_TYPE_C)
692 
693 #define	L2_L_PROTO		(L2_TYPE_L)
694 
695 #define	L2_S_PROTO_generic	(L2_TYPE_S)
696 #define	L2_S_PROTO_xscale	(L2_TYPE_XS)
697 #ifdef	ARMV6_EXTENDED_SMALL_PAGE
698 #define	L2_S_PROTO_armv6c	(L2_TYPE_XS)    /* XP=0, extended small page */
699 #else
700 #define	L2_S_PROTO_armv6c	(L2_TYPE_S)	/* XP=0, subpage APs */
701 #endif
702 #define	L2_S_PROTO_armv6n	(L2_TYPE_S)	/* with XP=1 */
703 #define	L2_S_PROTO_armv7	(L2_TYPE_S)
704 
705 /*
706  * User-visible names for the ones that vary with MMU class.
707  */
708 
709 #if ARM_NMMUS > 1
710 /* More than one MMU class configured; use variables. */
711 #define	L1_S_PROT_U		pte_l1_s_prot_u
712 #define	L1_S_PROT_W		pte_l1_s_prot_w
713 #define	L1_S_PROT_RO		pte_l1_s_prot_ro
714 #define	L1_S_PROT_MASK		pte_l1_s_prot_mask
715 
716 #define	L2_S_PROT_U		pte_l2_s_prot_u
717 #define	L2_S_PROT_W		pte_l2_s_prot_w
718 #define	L2_S_PROT_RO		pte_l2_s_prot_ro
719 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
720 
721 #define	L2_L_PROT_U		pte_l2_l_prot_u
722 #define	L2_L_PROT_W		pte_l2_l_prot_w
723 #define	L2_L_PROT_RO		pte_l2_l_prot_ro
724 #define	L2_L_PROT_MASK		pte_l2_l_prot_mask
725 
726 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
727 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
728 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
729 
730 #define	L1_SS_PROTO		pte_l1_ss_proto
731 #define	L1_S_PROTO		pte_l1_s_proto
732 #define	L1_C_PROTO		pte_l1_c_proto
733 #define	L2_S_PROTO		pte_l2_s_proto
734 
735 #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
736 #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
737 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
738 #define	L1_S_PROT_U		L1_S_PROT_U_generic
739 #define	L1_S_PROT_W		L1_S_PROT_W_generic
740 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
741 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
742 
743 #define	L2_S_PROT_U		L2_S_PROT_U_generic
744 #define	L2_S_PROT_W		L2_S_PROT_W_generic
745 #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
746 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
747 
748 #define	L2_L_PROT_U		L2_L_PROT_U_generic
749 #define	L2_L_PROT_W		L2_L_PROT_W_generic
750 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
751 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
752 
753 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
754 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
755 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
756 
757 #define	L1_SS_PROTO		L1_SS_PROTO_generic
758 #define	L1_S_PROTO		L1_S_PROTO_generic
759 #define	L1_C_PROTO		L1_C_PROTO_generic
760 #define	L2_S_PROTO		L2_S_PROTO_generic
761 
762 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
763 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
764 #elif ARM_MMU_V6N != 0
765 #define	L1_S_PROT_U		L1_S_PROT_U_armv6
766 #define	L1_S_PROT_W		L1_S_PROT_W_armv6
767 #define	L1_S_PROT_RO		L1_S_PROT_RO_armv6
768 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv6
769 
770 #define	L2_S_PROT_U		L2_S_PROT_U_armv6n
771 #define	L2_S_PROT_W		L2_S_PROT_W_armv6n
772 #define	L2_S_PROT_RO		L2_S_PROT_RO_armv6n
773 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv6n
774 
775 #define	L2_L_PROT_U		L2_L_PROT_U_armv6n
776 #define	L2_L_PROT_W		L2_L_PROT_W_armv6n
777 #define	L2_L_PROT_RO		L2_L_PROT_RO_armv6n
778 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv6n
779 
780 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv6
781 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv6
782 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv6n
783 
784 /* These prototypes make writeable mappings, while the other MMU types
785  * make read-only mappings. */
786 #define	L1_SS_PROTO		L1_SS_PROTO_armv6
787 #define	L1_S_PROTO		L1_S_PROTO_armv6
788 #define	L1_C_PROTO		L1_C_PROTO_armv6
789 #define	L2_S_PROTO		L2_S_PROTO_armv6n
790 
791 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
792 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
793 #elif ARM_MMU_V6C != 0
794 #define	L1_S_PROT_U		L1_S_PROT_U_generic
795 #define	L1_S_PROT_W		L1_S_PROT_W_generic
796 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
797 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
798 
799 #define	L2_S_PROT_U		L2_S_PROT_U_generic
800 #define	L2_S_PROT_W		L2_S_PROT_W_generic
801 #define	L2_S_PROT_RO		L2_S_PROT_RO_generic
802 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
803 
804 #define	L2_L_PROT_U		L2_L_PROT_U_generic
805 #define	L2_L_PROT_W		L2_L_PROT_W_generic
806 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
807 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
808 
809 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
810 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
811 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
812 
813 #define	L1_SS_PROTO		L1_SS_PROTO_generic
814 #define	L1_S_PROTO		L1_S_PROTO_generic
815 #define	L1_C_PROTO		L1_C_PROTO_generic
816 #define	L2_S_PROTO		L2_S_PROTO_generic
817 
818 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
819 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
820 #elif ARM_MMU_XSCALE == 1
821 #define	L1_S_PROT_U		L1_S_PROT_U_generic
822 #define	L1_S_PROT_W		L1_S_PROT_W_generic
823 #define	L1_S_PROT_RO		L1_S_PROT_RO_generic
824 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
825 
826 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
827 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
828 #define	L2_S_PROT_RO		L2_S_PROT_RO_xscale
829 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
830 
831 #define	L2_L_PROT_U		L2_L_PROT_U_generic
832 #define	L2_L_PROT_W		L2_L_PROT_W_generic
833 #define	L2_L_PROT_RO		L2_L_PROT_RO_generic
834 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
835 
836 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
837 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
838 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
839 
840 #define	L1_SS_PROTO		L1_SS_PROTO_xscale
841 #define	L1_S_PROTO		L1_S_PROTO_xscale
842 #define	L1_C_PROTO		L1_C_PROTO_xscale
843 #define	L2_S_PROTO		L2_S_PROTO_xscale
844 
845 #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
846 #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
847 #elif ARM_MMU_V7 == 1
848 #define	L1_S_PROT_U		L1_S_PROT_U_armv7
849 #define	L1_S_PROT_W		L1_S_PROT_W_armv7
850 #define	L1_S_PROT_RO		L1_S_PROT_RO_armv7
851 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_armv7
852 
853 #define	L2_S_PROT_U		L2_S_PROT_U_armv7
854 #define	L2_S_PROT_W		L2_S_PROT_W_armv7
855 #define	L2_S_PROT_RO		L2_S_PROT_RO_armv7
856 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_armv7
857 
858 #define	L2_L_PROT_U		L2_L_PROT_U_armv7
859 #define	L2_L_PROT_W		L2_L_PROT_W_armv7
860 #define	L2_L_PROT_RO		L2_L_PROT_RO_armv7
861 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_armv7
862 
863 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_armv7
864 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_armv7
865 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_armv7
866 
867 /* These prototypes make writeable mappings, while the other MMU types
868  * make read-only mappings. */
869 #define	L1_SS_PROTO		L1_SS_PROTO_armv7
870 #define	L1_S_PROTO		L1_S_PROTO_armv7
871 #define	L1_C_PROTO		L1_C_PROTO_armv7
872 #define	L2_S_PROTO		L2_S_PROTO_armv7
873 
874 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
875 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
876 #endif /* ARM_NMMUS > 1 */
877 
878 /*
879  * Macros to set and query the write permission on page descriptors.
880  */
881 #define l1pte_set_writable(pte)	(((pte) & ~L1_S_PROT_RO) | L1_S_PROT_W)
882 #define l1pte_set_readonly(pte)	(((pte) & ~L1_S_PROT_W) | L1_S_PROT_RO)
883 #define l2pte_set_writable(pte)	(((pte) & ~L2_S_PROT_RO) | L2_S_PROT_W)
884 #define l2pte_set_readonly(pte)	(((pte) & ~L2_S_PROT_W) | L2_S_PROT_RO)
885 
886 #define l2pte_writable_p(pte)	(((pte) & L2_S_PROT_W) == L2_S_PROT_W && \
887 				 (L2_S_PROT_RO == 0 || \
888 				  ((pte) & L2_S_PROT_RO) != L2_S_PROT_RO))
889 
890 /*
891  * These macros return various bits based on kernel/user and protection.
892  * Note that the compiler will usually fold these at compile time.
893  */
894 #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
895 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : L1_S_PROT_RO))
896 
897 #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
898 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : L2_L_PROT_RO))
899 
900 #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
901 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : L2_S_PROT_RO))
902 
903 /*
904  * Macros to test if a mapping is mappable with an L1 SuperSection,
905  * L1 Section, or an L2 Large Page mapping.
906  */
907 #define	L1_SS_MAPPABLE_P(va, pa, size)					\
908 	((((va) | (pa)) & L1_SS_OFFSET) == 0 && (size) >= L1_SS_SIZE)
909 
910 #define	L1_S_MAPPABLE_P(va, pa, size)					\
911 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
912 
913 #define	L2_L_MAPPABLE_P(va, pa, size)					\
914 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
915 
916 #ifndef _LOCORE
917 /*
918  * Hooks for the pool allocator.
919  */
920 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
921 extern paddr_t physical_start, physical_end;
922 #ifdef PMAP_NEED_ALLOC_POOLPAGE
923 struct vm_page *arm_pmap_alloc_poolpage(int);
924 #define	PMAP_ALLOC_POOLPAGE	arm_pmap_alloc_poolpage
925 #endif
926 #if defined(PMAP_NEED_ALLOC_POOLPAGE) || defined(__HAVE_MM_MD_DIRECT_MAPPED_PHYS)
927 #define	PMAP_MAP_POOLPAGE(pa) \
928         ((vaddr_t)((paddr_t)(pa) - physical_start + KERNEL_BASE))
929 #define PMAP_UNMAP_POOLPAGE(va) \
930         ((paddr_t)((vaddr_t)(va) - KERNEL_BASE + physical_start))
931 #endif
932 
933 /*
934  * pmap-specific data store in the vm_page structure.
935  */
936 #define	__HAVE_VM_PAGE_MD
937 struct vm_page_md {
938 	SLIST_HEAD(,pv_entry) pvh_list;		/* pv_entry list */
939 	int pvh_attrs;				/* page attributes */
940 	u_int uro_mappings;
941 	u_int urw_mappings;
942 	union {
943 		u_short s_mappings[2];	/* Assume kernel count <= 65535 */
944 		u_int i_mappings;
945 	} k_u;
946 #define	kro_mappings	k_u.s_mappings[0]
947 #define	krw_mappings	k_u.s_mappings[1]
948 #define	k_mappings	k_u.i_mappings
949 };
950 
951 /*
952  * Set the default color of each page.
953  */
954 #if ARM_MMU_V6 > 0
955 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
956 	(pg)->mdpage.pvh_attrs = (pg)->phys_addr & arm_cache_prefer_mask
957 #else
958 #define	VM_MDPAGE_PVH_ATTRS_INIT(pg) \
959 	(pg)->mdpage.pvh_attrs = 0
960 #endif
961 
962 #define	VM_MDPAGE_INIT(pg)						\
963 do {									\
964 	SLIST_INIT(&(pg)->mdpage.pvh_list);				\
965 	VM_MDPAGE_PVH_ATTRS_INIT(pg);					\
966 	(pg)->mdpage.uro_mappings = 0;					\
967 	(pg)->mdpage.urw_mappings = 0;					\
968 	(pg)->mdpage.k_mappings = 0;					\
969 } while (/*CONSTCOND*/0)
970 
971 #endif /* !_LOCORE */
972 
973 #endif /* _KERNEL */
974 
975 #endif	/* _ARM32_PMAP_H_ */
976