xref: /netbsd-src/sys/arch/arm/include/arm32/pmap.h (revision d710132b4b8ce7f7cccaaf660cb16aa16b4077a0)
1 /*	$NetBSD: pmap.h,v 1.75 2003/06/18 02:58:09 bsh Exp $	*/
2 
3 /*
4  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
5  * All rights reserved.
6  *
7  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  * 3. All advertising materials mentioning features or use of this software
18  *    must display the following acknowledgement:
19  *	This product includes software developed for the NetBSD Project by
20  *	Wasabi Systems, Inc.
21  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
22  *    or promote products derived from this software without specific prior
23  *    written permission.
24  *
25  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
26  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35  * POSSIBILITY OF SUCH DAMAGE.
36  */
37 
38 /*
39  * Copyright (c) 1994,1995 Mark Brinicombe.
40  * All rights reserved.
41  *
42  * Redistribution and use in source and binary forms, with or without
43  * modification, are permitted provided that the following conditions
44  * are met:
45  * 1. Redistributions of source code must retain the above copyright
46  *    notice, this list of conditions and the following disclaimer.
47  * 2. Redistributions in binary form must reproduce the above copyright
48  *    notice, this list of conditions and the following disclaimer in the
49  *    documentation and/or other materials provided with the distribution.
50  * 3. All advertising materials mentioning features or use of this software
51  *    must display the following acknowledgement:
52  *	This product includes software developed by Mark Brinicombe
53  * 4. The name of the author may not be used to endorse or promote products
54  *    derived from this software without specific prior written permission.
55  *
56  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
57  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
58  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
59  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
60  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
61  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
62  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
63  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
64  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
65  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
66  */
67 
68 #ifndef	_ARM32_PMAP_H_
69 #define	_ARM32_PMAP_H_
70 
71 #ifdef _KERNEL
72 
73 #include <arm/cpuconf.h>
74 #include <arm/arm32/pte.h>
75 #ifndef _LOCORE
76 #include <arm/cpufunc.h>
77 #include <uvm/uvm_object.h>
78 #endif
79 
80 /*
81  * a pmap describes a processes' 4GB virtual address space.  this
82  * virtual address space can be broken up into 4096 1MB regions which
83  * are described by L1 PTEs in the L1 table.
84  *
85  * There is a line drawn at KERNEL_BASE.  Everything below that line
86  * changes when the VM context is switched.  Everything above that line
87  * is the same no matter which VM context is running.  This is achieved
88  * by making the L1 PTEs for those slots above KERNEL_BASE reference
89  * kernel L2 tables.
90  *
91  * The basic layout of the virtual address space thus looks like this:
92  *
93  *	0xffffffff
94  *	.
95  *	.
96  *	.
97  *	KERNEL_BASE
98  *	--------------------
99  *	.
100  *	.
101  *	.
102  *	0x00000000
103  */
104 
105 /*
106  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
107  * A bucket size of 16 provides for 16MB of contiguous virtual address
108  * space per l2_dtable. Most processes will, therefore, require only two or
109  * three of these to map their whole working set.
110  */
111 #define	L2_BUCKET_LOG2	4
112 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
113 
114 /*
115  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
116  * of l2_dtable structures required to track all possible page descriptors
117  * mappable by an L1 translation table is given by the following constants:
118  */
119 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
120 #define	L2_SIZE		(1 << L2_LOG2)
121 
122 #ifndef _LOCORE
123 
124 struct l1_ttable;
125 struct l2_dtable;
126 
127 /*
128  * Track cache/tlb occupancy using the following structure
129  */
130 union pmap_cache_state {
131 	struct {
132 		union {
133 			u_int8_t csu_cache_b[2];
134 			u_int16_t csu_cache;
135 		} cs_cache_u;
136 
137 		union {
138 			u_int8_t csu_tlb_b[2];
139 			u_int16_t csu_tlb;
140 		} cs_tlb_u;
141 	} cs_s;
142 	u_int32_t cs_all;
143 };
144 #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
145 #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
146 #define	cs_cache	cs_s.cs_cache_u.csu_cache
147 #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
148 #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
149 #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
150 
151 /*
152  * Assigned to cs_all to force cacheops to work for a particular pmap
153  */
154 #define	PMAP_CACHE_STATE_ALL	0xffffffffu
155 
156 /*
157  * This structure is used by machine-dependent code to describe
158  * static mappings of devices, created at bootstrap time.
159  */
160 struct pmap_devmap {
161 	vaddr_t		pd_va;		/* virtual address */
162 	paddr_t		pd_pa;		/* physical address */
163 	psize_t		pd_size;	/* size of region */
164 	vm_prot_t	pd_prot;	/* protection code */
165 	int		pd_cache;	/* cache attributes */
166 };
167 
168 /*
169  * The pmap structure itself
170  */
171 struct pmap {
172 	u_int8_t		pm_domain;
173 	boolean_t		pm_remove_all;
174 	struct l1_ttable	*pm_l1;
175 	union pmap_cache_state	pm_cstate;
176 	struct uvm_object	pm_obj;
177 #define	pm_lock pm_obj.vmobjlock
178 	struct l2_dtable	*pm_l2[L2_SIZE];
179 	struct pmap_statistics	pm_stats;
180 	LIST_ENTRY(pmap)	pm_list;
181 };
182 
183 typedef struct pmap *pmap_t;
184 
185 /*
186  * Physical / virtual address structure. In a number of places (particularly
187  * during bootstrapping) we need to keep track of the physical and virtual
188  * addresses of various pages
189  */
190 typedef struct pv_addr {
191 	SLIST_ENTRY(pv_addr) pv_list;
192 	paddr_t pv_pa;
193 	vaddr_t pv_va;
194 } pv_addr_t;
195 
196 /*
197  * Determine various modes for PTEs (user vs. kernel, cacheable
198  * vs. non-cacheable).
199  */
200 #define	PTE_KERNEL	0
201 #define	PTE_USER	1
202 #define	PTE_NOCACHE	0
203 #define	PTE_CACHE	1
204 #define	PTE_PAGETABLE	2
205 
206 /*
207  * Flags that indicate attributes of pages or mappings of pages.
208  *
209  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
210  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
211  * pv_entry's for each page.  They live in the same "namespace" so
212  * that we can clear multiple attributes at a time.
213  *
214  * Note the "non-cacheable" flag generally means the page has
215  * multiple mappings in a given address space.
216  */
217 #define	PVF_MOD		0x01		/* page is modified */
218 #define	PVF_REF		0x02		/* page is referenced */
219 #define	PVF_WIRED	0x04		/* mapping is wired */
220 #define	PVF_WRITE	0x08		/* mapping is writable */
221 #define	PVF_EXEC	0x10		/* mapping is executable */
222 #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
223 #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
224 #define	PVF_NC		(PVF_UNC|PVF_KNC)
225 
226 /*
227  * Commonly referenced structures
228  */
229 extern struct pmap	kernel_pmap_store;
230 extern int		pmap_debug_level; /* Only exists if PMAP_DEBUG */
231 
232 /*
233  * Macros that we need to export
234  */
235 #define pmap_kernel()			(&kernel_pmap_store)
236 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
237 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
238 
239 #define	pmap_is_modified(pg)	\
240 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
241 #define	pmap_is_referenced(pg)	\
242 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
243 
244 #define	pmap_copy(dp, sp, da, l, sa)	/* nothing */
245 
246 #define pmap_phys_address(ppn)		(arm_ptob((ppn)))
247 
248 /*
249  * Functions that we need to export
250  */
251 void	pmap_procwr(struct proc *, vaddr_t, int);
252 void	pmap_remove_all(pmap_t);
253 boolean_t pmap_extract(pmap_t, vaddr_t, paddr_t *);
254 
255 #define	PMAP_NEED_PROCWR
256 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
257 
258 /* Functions we use internally. */
259 void	pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
260 
261 int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
262 boolean_t pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
263 boolean_t pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
264 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
265 
266 void	pmap_debug(int);
267 void	pmap_postinit(void);
268 
269 void	vector_page_setprot(int);
270 
271 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
272 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
273 
274 /* Bootstrapping routines. */
275 void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
276 void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
277 vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
278 void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
279 void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
280 void	pmap_devmap_register(const struct pmap_devmap *);
281 
282 /*
283  * Special page zero routine for use by the idle loop (no cache cleans).
284  */
285 boolean_t	pmap_pageidlezero(paddr_t);
286 #define PMAP_PAGEIDLEZERO(pa)	pmap_pageidlezero((pa))
287 
288 /*
289  * The current top of kernel VM
290  */
291 extern vaddr_t	pmap_curmaxkvaddr;
292 
293 /*
294  * Useful macros and constants
295  */
296 
297 /* Virtual address to page table entry */
298 static __inline pt_entry_t *
299 vtopte(vaddr_t va)
300 {
301 	pd_entry_t *pdep;
302 	pt_entry_t *ptep;
303 
304 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
305 		return (NULL);
306 	return (ptep);
307 }
308 
309 /*
310  * Virtual address to physical address
311  */
312 static __inline paddr_t
313 vtophys(vaddr_t va)
314 {
315 	paddr_t pa;
316 
317 	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
318 		return (0);	/* XXXSCW: Panic? */
319 
320 	return (pa);
321 }
322 
323 /*
324  * The new pmap ensures that page-tables are always mapping Write-Thru.
325  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
326  * on every change.
327  *
328  * Unfortunately, not all CPUs have a write-through cache mode.  So we
329  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
330  * and if there is the chance for PTE syncs to be needed, we define
331  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
332  * the code.
333  */
334 extern int pmap_needs_pte_sync;
335 #if defined(_KERNEL_OPT)
336 /*
337  * StrongARM SA-1 caches do not have a write-through mode.  So, on these,
338  * we need to do PTE syncs.  If only SA-1 is configured, then evaluate
339  * this at compile time.
340  */
341 #if (ARM_MMU_SA1 == 1) && (ARM_NMMUS == 1)
342 #define	PMAP_NEEDS_PTE_SYNC	1
343 #define	PMAP_INCLUDE_PTE_SYNC
344 #elif (ARM_MMU_SA1 == 0)
345 #define	PMAP_NEEDS_PTE_SYNC	0
346 #endif
347 #endif /* _KERNEL_OPT */
348 
349 /*
350  * Provide a fallback in case we were not able to determine it at
351  * compile-time.
352  */
353 #ifndef PMAP_NEEDS_PTE_SYNC
354 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
355 #define	PMAP_INCLUDE_PTE_SYNC
356 #endif
357 
358 #define	PTE_SYNC(pte)							\
359 do {									\
360 	if (PMAP_NEEDS_PTE_SYNC)					\
361 		cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
362 } while (/*CONSTCOND*/0)
363 
364 #define	PTE_SYNC_RANGE(pte, cnt)					\
365 do {									\
366 	if (PMAP_NEEDS_PTE_SYNC) {					\
367 		cpu_dcache_wb_range((vaddr_t)(pte),			\
368 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
369 	}								\
370 } while (/*CONSTCOND*/0)
371 
372 #define	l1pte_valid(pde)	((pde) != 0)
373 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
374 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
375 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
376 
377 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
378 #define	l2pte_valid(pte)	((pte) != 0)
379 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
380 
381 /* L1 and L2 page table macros */
382 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
383 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
384 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
385 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
386 
387 #define	pmap_pte_v(pte)		l2pte_valid(*(pte))
388 #define	pmap_pte_pa(pte)	l2pte_pa(*(pte))
389 
390 /* Size of the kernel part of the L1 page table */
391 #define KERNEL_PD_SIZE	\
392 	(L1_TABLE_SIZE - (KERNEL_BASE >> L1_S_SHIFT) * sizeof(pd_entry_t))
393 
394 /************************* ARM MMU configuration *****************************/
395 
396 #if (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
397 void	pmap_copy_page_generic(paddr_t, paddr_t);
398 void	pmap_zero_page_generic(paddr_t);
399 
400 void	pmap_pte_init_generic(void);
401 #if defined(CPU_ARM8)
402 void	pmap_pte_init_arm8(void);
403 #endif
404 #if defined(CPU_ARM9)
405 void	pmap_pte_init_arm9(void);
406 #endif /* CPU_ARM9 */
407 #endif /* (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0 */
408 
409 #if ARM_MMU_SA1 == 1
410 void	pmap_pte_init_sa1(void);
411 #endif /* ARM_MMU_SA1 == 1 */
412 
413 #if ARM_MMU_XSCALE == 1
414 void	pmap_copy_page_xscale(paddr_t, paddr_t);
415 void	pmap_zero_page_xscale(paddr_t);
416 
417 void	pmap_pte_init_xscale(void);
418 
419 void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
420 #endif /* ARM_MMU_XSCALE == 1 */
421 
422 extern pt_entry_t		pte_l1_s_cache_mode;
423 extern pt_entry_t		pte_l1_s_cache_mask;
424 
425 extern pt_entry_t		pte_l2_l_cache_mode;
426 extern pt_entry_t		pte_l2_l_cache_mask;
427 
428 extern pt_entry_t		pte_l2_s_cache_mode;
429 extern pt_entry_t		pte_l2_s_cache_mask;
430 
431 extern pt_entry_t		pte_l1_s_cache_mode_pt;
432 extern pt_entry_t		pte_l2_l_cache_mode_pt;
433 extern pt_entry_t		pte_l2_s_cache_mode_pt;
434 
435 extern pt_entry_t		pte_l2_s_prot_u;
436 extern pt_entry_t		pte_l2_s_prot_w;
437 extern pt_entry_t		pte_l2_s_prot_mask;
438 
439 extern pt_entry_t		pte_l1_s_proto;
440 extern pt_entry_t		pte_l1_c_proto;
441 extern pt_entry_t		pte_l2_s_proto;
442 
443 extern void (*pmap_copy_page_func)(paddr_t, paddr_t);
444 extern void (*pmap_zero_page_func)(paddr_t);
445 
446 #endif /* !_LOCORE */
447 
448 /*****************************************************************************/
449 
450 /*
451  * tell MI code that the cache is virtually-indexed *and* virtually-tagged.
452  */
453 #define PMAP_CACHE_VIVT
454 
455 /*
456  * Definitions for MMU domains
457  */
458 #define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
459 #define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
460 
461 /*
462  * These macros define the various bit masks in the PTE.
463  *
464  * We use these macros since we use different bits on different processor
465  * models.
466  */
467 #define	L1_S_PROT_U		(L1_S_AP(AP_U))
468 #define	L1_S_PROT_W		(L1_S_AP(AP_W))
469 #define	L1_S_PROT_MASK		(L1_S_PROT_U|L1_S_PROT_W)
470 
471 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
472 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
473 
474 #define	L2_L_PROT_U		(L2_AP(AP_U))
475 #define	L2_L_PROT_W		(L2_AP(AP_W))
476 #define	L2_L_PROT_MASK		(L2_L_PROT_U|L2_L_PROT_W)
477 
478 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
479 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
480 
481 #define	L2_S_PROT_U_generic	(L2_AP(AP_U))
482 #define	L2_S_PROT_W_generic	(L2_AP(AP_W))
483 #define	L2_S_PROT_MASK_generic	(L2_S_PROT_U|L2_S_PROT_W)
484 
485 #define	L2_S_PROT_U_xscale	(L2_AP0(AP_U))
486 #define	L2_S_PROT_W_xscale	(L2_AP0(AP_W))
487 #define	L2_S_PROT_MASK_xscale	(L2_S_PROT_U|L2_S_PROT_W)
488 
489 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
490 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
491 
492 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
493 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
494 
495 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
496 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
497 
498 #define	L2_L_PROTO		(L2_TYPE_L)
499 
500 #define	L2_S_PROTO_generic	(L2_TYPE_S)
501 #define	L2_S_PROTO_xscale	(L2_TYPE_XSCALE_XS)
502 
503 /*
504  * User-visible names for the ones that vary with MMU class.
505  */
506 
507 #if ARM_NMMUS > 1
508 /* More than one MMU class configured; use variables. */
509 #define	L2_S_PROT_U		pte_l2_s_prot_u
510 #define	L2_S_PROT_W		pte_l2_s_prot_w
511 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
512 
513 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
514 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
515 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
516 
517 #define	L1_S_PROTO		pte_l1_s_proto
518 #define	L1_C_PROTO		pte_l1_c_proto
519 #define	L2_S_PROTO		pte_l2_s_proto
520 
521 #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
522 #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
523 #elif (ARM_MMU_GENERIC + ARM_MMU_SA1) != 0
524 #define	L2_S_PROT_U		L2_S_PROT_U_generic
525 #define	L2_S_PROT_W		L2_S_PROT_W_generic
526 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
527 
528 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
529 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
530 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
531 
532 #define	L1_S_PROTO		L1_S_PROTO_generic
533 #define	L1_C_PROTO		L1_C_PROTO_generic
534 #define	L2_S_PROTO		L2_S_PROTO_generic
535 
536 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
537 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
538 #elif ARM_MMU_XSCALE == 1
539 #define	L2_S_PROT_U		L2_S_PROT_U_xscale
540 #define	L2_S_PROT_W		L2_S_PROT_W_xscale
541 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
542 
543 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
544 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
545 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
546 
547 #define	L1_S_PROTO		L1_S_PROTO_xscale
548 #define	L1_C_PROTO		L1_C_PROTO_xscale
549 #define	L2_S_PROTO		L2_S_PROTO_xscale
550 
551 #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
552 #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
553 #endif /* ARM_NMMUS > 1 */
554 
555 /*
556  * These macros return various bits based on kernel/user and protection.
557  * Note that the compiler will usually fold these at compile time.
558  */
559 #define	L1_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \
560 				 (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0))
561 
562 #define	L2_L_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \
563 				 (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0))
564 
565 #define	L2_S_PROT(ku, pr)	((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \
566 				 (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0))
567 
568 /*
569  * Macros to test if a mapping is mappable with an L1 Section mapping
570  * or an L2 Large Page mapping.
571  */
572 #define	L1_S_MAPPABLE_P(va, pa, size)					\
573 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
574 
575 #define	L2_L_MAPPABLE_P(va, pa, size)					\
576 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
577 
578 /*
579  * Hooks for the pool allocator.
580  */
581 #define	POOL_VTOPHYS(va)	vtophys((vaddr_t) (va))
582 
583 #endif /* _KERNEL */
584 
585 #endif	/* _ARM32_PMAP_H_ */
586