xref: /openbsd-src/sys/arch/arm/include/pmap.h (revision f2da64fbbbf1b03f09f390ab01267c93dfd77c4c)
1 /*	$OpenBSD: pmap.h,v 1.46 2016/08/26 11:59:04 kettenis Exp $	*/
2 /*	$NetBSD: pmap.h,v 1.76 2003/09/06 09:10:46 rearnsha Exp $	*/
3 
4 /*
5  * Copyright (c) 2002, 2003 Wasabi Systems, Inc.
6  * All rights reserved.
7  *
8  * Written by Jason R. Thorpe & Steve C. Woodford for Wasabi Systems, Inc.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed for the NetBSD Project by
21  *	Wasabi Systems, Inc.
22  * 4. The name of Wasabi Systems, Inc. may not be used to endorse
23  *    or promote products derived from this software without specific prior
24  *    written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL WASABI SYSTEMS, INC
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Copyright (c) 1994,1995 Mark Brinicombe.
41  * All rights reserved.
42  *
43  * Redistribution and use in source and binary forms, with or without
44  * modification, are permitted provided that the following conditions
45  * are met:
46  * 1. Redistributions of source code must retain the above copyright
47  *    notice, this list of conditions and the following disclaimer.
48  * 2. Redistributions in binary form must reproduce the above copyright
49  *    notice, this list of conditions and the following disclaimer in the
50  *    documentation and/or other materials provided with the distribution.
51  * 3. All advertising materials mentioning features or use of this software
52  *    must display the following acknowledgement:
53  *	This product includes software developed by Mark Brinicombe
54  * 4. The name of the author may not be used to endorse or promote products
55  *    derived from this software without specific prior written permission.
56  *
57  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
58  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
59  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
60  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
61  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
62  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
63  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
64  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
65  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
66  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
67  */
68 
69 #ifndef	_ARM_PMAP_H_
70 #define	_ARM_PMAP_H_
71 
72 #ifdef _KERNEL
73 
74 #include <arm/cpuconf.h>
75 #include <arm/pte.h>
76 #ifndef _LOCORE
77 #include <arm/cpufunc.h>
78 #endif
79 
80 /*
81  * a pmap describes a processes' 4GB virtual address space.  this
82  * virtual address space can be broken up into 4096 1MB regions which
83  * are described by L1 PTEs in the L1 table.
84  *
85  * There is a line drawn at KERNEL_BASE.  Everything below that line
86  * changes when the VM context is switched.  Everything above that line
87  * is the same no matter which VM context is running.  This is achieved
88  * by making the L1 PTEs for those slots above KERNEL_BASE reference
89  * kernel L2 tables.
90  *
91  * The basic layout of the virtual address space thus looks like this:
92  *
93  *	0xffffffff
94  *	.
95  *	.
96  *	.
97  *	KERNEL_BASE
98  *	--------------------
99  *	.
100  *	.
101  *	.
102  *	0x00000000
103  */
104 
105 /*
106  * The number of L2 descriptor tables which can be tracked by an l2_dtable.
107  * A bucket size of 16 provides for 16MB of contiguous virtual address
108  * space per l2_dtable. Most processes will, therefore, require only two or
109  * three of these to map their whole working set.
110  */
111 #define	L2_BUCKET_LOG2	4
112 #define	L2_BUCKET_SIZE	(1 << L2_BUCKET_LOG2)
113 
114 /*
115  * Given the above "L2-descriptors-per-l2_dtable" constant, the number
116  * of l2_dtable structures required to track all possible page descriptors
117  * mappable by an L1 translation table is given by the following constants:
118  */
119 #define	L2_LOG2		((32 - L1_S_SHIFT) - L2_BUCKET_LOG2)
120 #define	L2_SIZE		(1 << L2_LOG2)
121 
122 #ifndef _LOCORE
123 
124 struct l1_ttable;
125 struct l2_dtable;
126 
127 /*
128  * Track cache/tlb occupancy using the following structure
129  */
130 union pmap_cache_state {
131 	struct {
132 		union {
133 			u_int8_t csu_cache_b[2];
134 			u_int16_t csu_cache;
135 		} cs_cache_u;
136 
137 		union {
138 			u_int8_t csu_tlb_b[2];
139 			u_int16_t csu_tlb;
140 		} cs_tlb_u;
141 	} cs_s;
142 	u_int32_t cs_all;
143 };
144 #define	cs_cache_id	cs_s.cs_cache_u.csu_cache_b[0]
145 #define	cs_cache_d	cs_s.cs_cache_u.csu_cache_b[1]
146 #define	cs_cache	cs_s.cs_cache_u.csu_cache
147 #define	cs_tlb_id	cs_s.cs_tlb_u.csu_tlb_b[0]
148 #define	cs_tlb_d	cs_s.cs_tlb_u.csu_tlb_b[1]
149 #define	cs_tlb		cs_s.cs_tlb_u.csu_tlb
150 
151 /*
152  * Assigned to cs_all to force cacheops to work for a particular pmap
153  */
154 #define	PMAP_CACHE_STATE_ALL	0xffffffffu
155 
156 /*
157  * This structure is used by machine-dependent code to describe
158  * static mappings of devices, created at bootstrap time.
159  */
160 struct pmap_devmap {
161 	vaddr_t		pd_va;		/* virtual address */
162 	paddr_t		pd_pa;		/* physical address */
163 	psize_t		pd_size;	/* size of region */
164 	vm_prot_t	pd_prot;	/* protection code */
165 	int		pd_cache;	/* cache attributes */
166 };
167 
168 /*
169  * The pmap structure itself
170  */
171 struct pmap {
172 	u_int8_t		pm_domain;
173 	boolean_t		pm_remove_all;
174 	struct l1_ttable	*pm_l1;
175 	union pmap_cache_state	pm_cstate;
176 	u_int			pm_refs;
177 	struct l2_dtable	*pm_l2[L2_SIZE];
178 	struct pmap_statistics	pm_stats;
179 };
180 
181 typedef struct pmap *pmap_t;
182 
183 /*
184  * MD flags that we use for pmap_enter (in the pa):
185  */
186 #define PMAP_PA_MASK	~((paddr_t)PAGE_MASK) /* to remove the flags */
187 #define PMAP_NOCACHE	0x1 /* non-cacheable memory. */
188 #define PMAP_DEVICE	0x2 /* device memory. */
189 
190 /*
191  * Physical / virtual address structure. In a number of places (particularly
192  * during bootstrapping) we need to keep track of the physical and virtual
193  * addresses of various pages
194  */
195 typedef struct pv_addr {
196 	SLIST_ENTRY(pv_addr) pv_list;
197 	paddr_t pv_pa;
198 	vaddr_t pv_va;
199 } pv_addr_t;
200 
201 /*
202  * Determine various modes for PTEs (user vs. kernel, cacheable
203  * vs. non-cacheable).
204  */
205 #define	PTE_KERNEL	0
206 #define	PTE_USER	1
207 #define	PTE_NOCACHE	0
208 #define	PTE_CACHE	1
209 #define	PTE_PAGETABLE	2
210 
211 /*
212  * Flags that indicate attributes of pages or mappings of pages.
213  *
214  * The PVF_MOD and PVF_REF flags are stored in the mdpage for each
215  * page.  PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual
216  * pv_entry's for each page.  They live in the same "namespace" so
217  * that we can clear multiple attributes at a time.
218  *
219  * Note the "non-cacheable" flag generally means the page has
220  * multiple mappings in a given address space.
221  */
222 #define	PVF_MOD		0x01		/* page is modified */
223 #define	PVF_REF		0x02		/* page is referenced */
224 #define	PVF_WIRED	0x04		/* mapping is wired */
225 #define	PVF_WRITE	0x08		/* mapping is writable */
226 #define	PVF_EXEC	0x10		/* mapping is executable */
227 #define	PVF_UNC		0x20		/* mapping is 'user' non-cacheable */
228 #define	PVF_KNC		0x40		/* mapping is 'kernel' non-cacheable */
229 #define	PVF_NC		(PVF_UNC|PVF_KNC)
230 
231 /*
232  * Commonly referenced structures
233  */
234 extern struct pmap	kernel_pmap_store;
235 
236 /*
237  * Macros that we need to export
238  */
239 #define pmap_kernel()			(&kernel_pmap_store)
240 #define	pmap_resident_count(pmap)	((pmap)->pm_stats.resident_count)
241 #define	pmap_wired_count(pmap)		((pmap)->pm_stats.wired_count)
242 
243 #define	pmap_is_modified(pg)	\
244 	(((pg)->mdpage.pvh_attrs & PVF_MOD) != 0)
245 #define	pmap_is_referenced(pg)	\
246 	(((pg)->mdpage.pvh_attrs & PVF_REF) != 0)
247 
248 #define	pmap_deactivate(p)		do { /* nothing */ } while (0)
249 #define	pmap_copy(dp, sp, da, l, sa)	do { /* nothing */ } while (0)
250 
251 #define pmap_unuse_final(p)		do { /* nothing */ } while (0)
252 #define	pmap_remove_holes(vm)		do { /* nothing */ } while (0)
253 
254 /*
255  * Functions that we need to export
256  */
257 void	pmap_remove_all(pmap_t);
258 void	pmap_uncache_page(paddr_t, vaddr_t);
259 
260 #define PMAP_GROWKERNEL		/* turn on pmap_growkernel interface */
261 
262 /* Functions we use internally. */
263 void	pmap_bootstrap(pd_entry_t *, vaddr_t, vaddr_t);
264 
265 int	pmap_fault_fixup(pmap_t, vaddr_t, vm_prot_t, int);
266 boolean_t pmap_get_pde_pte(pmap_t, vaddr_t, pd_entry_t **, pt_entry_t **);
267 boolean_t pmap_get_pde(pmap_t, vaddr_t, pd_entry_t **);
268 void	pmap_set_pcb_pagedir(pmap_t, struct pcb *);
269 
270 void	pmap_postinit(void);
271 
272 void	vector_page_setprot(int);
273 
274 /* XXX */
275 void pmap_kenter_cache(vaddr_t va, paddr_t pa, vm_prot_t prot, int cacheable);
276 
277 const struct pmap_devmap *pmap_devmap_find_pa(paddr_t, psize_t);
278 const struct pmap_devmap *pmap_devmap_find_va(vaddr_t, vsize_t);
279 
280 /* Bootstrapping routines. */
281 void	pmap_map_section(vaddr_t, vaddr_t, paddr_t, int, int);
282 void	pmap_map_entry(vaddr_t, vaddr_t, paddr_t, int, int);
283 vsize_t	pmap_map_chunk(vaddr_t, vaddr_t, paddr_t, vsize_t, int, int);
284 void	pmap_link_l2pt(vaddr_t, vaddr_t, pv_addr_t *);
285 void	pmap_devmap_bootstrap(vaddr_t, const struct pmap_devmap *);
286 void	pmap_devmap_register(const struct pmap_devmap *);
287 
288 /*
289  * The current top of kernel VM
290  */
291 extern vaddr_t	pmap_curmaxkvaddr;
292 
293 /*
294  * Useful macros and constants
295  */
296 
297 /* Virtual address to page table entry */
298 static __inline pt_entry_t *
299 vtopte(vaddr_t va)
300 {
301 	pd_entry_t *pdep;
302 	pt_entry_t *ptep;
303 
304 	if (pmap_get_pde_pte(pmap_kernel(), va, &pdep, &ptep) == FALSE)
305 		return (NULL);
306 	return (ptep);
307 }
308 
309 /*
310  * The new pmap ensures that page-tables are always mapping Write-Thru.
311  * Thus, on some platforms we can run fast and loose and avoid syncing PTEs
312  * on every change.
313  *
314  * Unfortunately, not all CPUs have a write-through cache mode.  So we
315  * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs,
316  * and if there is the chance for PTE syncs to be needed, we define
317  * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run)
318  * the code.
319  */
320 extern int pmap_needs_pte_sync;
321 
322 #define	PMAP_NEEDS_PTE_SYNC	pmap_needs_pte_sync
323 #define	PMAP_INCLUDE_PTE_SYNC
324 
325 #define	PTE_SYNC(pte)							\
326 do {									\
327 	cpu_drain_writebuf();						\
328 	if (PMAP_NEEDS_PTE_SYNC) {					\
329 		paddr_t pa;						\
330 		cpu_dcache_wb_range((vaddr_t)(pte), sizeof(pt_entry_t));\
331 		if (cpu_sdcache_enabled()) { 				\
332 		(void)pmap_extract(pmap_kernel(), (vaddr_t)(pte), &pa);	\
333 		cpu_sdcache_wb_range((vaddr_t)(pte), (paddr_t)(pa),	\
334 		    sizeof(pt_entry_t));				\
335 		};							\
336 		cpu_drain_writebuf();					\
337 	}								\
338 } while (/*CONSTCOND*/0)
339 
340 #define	PTE_SYNC_RANGE(pte, cnt)					\
341 do {									\
342 	cpu_drain_writebuf();						\
343 	if (PMAP_NEEDS_PTE_SYNC) {					\
344 		paddr_t pa;						\
345 		cpu_dcache_wb_range((vaddr_t)(pte),			\
346 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
347 		if (cpu_sdcache_enabled()) { 				\
348 		(void)pmap_extract(pmap_kernel(), (vaddr_t)(pte), &pa);\
349 		cpu_sdcache_wb_range((vaddr_t)(pte), (paddr_t)(pa),	\
350 		    (cnt) << 2); /* * sizeof(pt_entry_t) */		\
351 		};							\
352 		cpu_drain_writebuf();					\
353 	}								\
354 } while (/*CONSTCOND*/0)
355 
356 #define	l1pte_valid(pde)	(((pde) & L1_TYPE_MASK) != L1_TYPE_INV)
357 #define	l1pte_section_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_S)
358 #define	l1pte_page_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_C)
359 #define	l1pte_fpage_p(pde)	(((pde) & L1_TYPE_MASK) == L1_TYPE_F)
360 
361 #define l2pte_index(v)		(((v) & L2_ADDR_BITS) >> L2_S_SHIFT)
362 #define	l2pte_valid(pte)	(((pte) & L2_TYPE_MASK) != L2_TYPE_INV)
363 #define	l2pte_pa(pte)		((pte) & L2_S_FRAME)
364 #define l2pte_minidata(pte)	(((pte) & \
365 				 (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\
366 				 == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))
367 
368 /* L1 and L2 page table macros */
369 #define pmap_pde_v(pde)		l1pte_valid(*(pde))
370 #define pmap_pde_section(pde)	l1pte_section_p(*(pde))
371 #define pmap_pde_page(pde)	l1pte_page_p(*(pde))
372 #define pmap_pde_fpage(pde)	l1pte_fpage_p(*(pde))
373 
374 /************************* ARM MMU configuration *****************************/
375 
376 #if (ARM_MMU_GENERIC + ARM_MMU_V7) != 0
377 void	pmap_copy_page_generic(struct vm_page *, struct vm_page *);
378 void	pmap_zero_page_generic(struct vm_page *);
379 
380 void	pmap_pte_init_generic(void);
381 #if defined(CPU_ARMv7)
382 void	pmap_pte_init_armv7(void);
383 #endif /* CPU_ARMv7 */
384 #endif /* (ARM_MMU_GENERIC + ARM_MMU_V7) != 0 */
385 
386 #if ARM_MMU_V7 == 1
387 void	pmap_pte_init_v7(void);
388 #endif /* ARM_MMU_V7 == 1 */
389 
390 #if ARM_MMU_XSCALE == 1
391 void	pmap_copy_page_xscale(struct vm_page *, struct vm_page *);
392 void	pmap_zero_page_xscale(struct vm_page *);
393 
394 void	pmap_pte_init_xscale(void);
395 
396 void	xscale_setup_minidata(vaddr_t, vaddr_t, paddr_t);
397 
398 #define	PMAP_UAREA(va)		pmap_uarea(va)
399 void	pmap_uarea(vaddr_t);
400 #endif /* ARM_MMU_XSCALE == 1 */
401 
402 extern pt_entry_t		pte_l1_s_cache_mode;
403 extern pt_entry_t		pte_l1_s_cache_mask;
404 
405 extern pt_entry_t		pte_l2_l_cache_mode;
406 extern pt_entry_t		pte_l2_l_cache_mask;
407 
408 extern pt_entry_t		pte_l2_s_cache_mode;
409 extern pt_entry_t		pte_l2_s_cache_mask;
410 
411 extern pt_entry_t		pte_l1_s_cache_mode_pt;
412 extern pt_entry_t		pte_l2_l_cache_mode_pt;
413 extern pt_entry_t		pte_l2_s_cache_mode_pt;
414 
415 extern pt_entry_t		pte_l1_s_coherent;
416 extern pt_entry_t		pte_l2_l_coherent;
417 extern pt_entry_t		pte_l2_s_coherent;
418 
419 extern pt_entry_t		pte_l1_s_prot_ur;
420 extern pt_entry_t		pte_l1_s_prot_uw;
421 extern pt_entry_t		pte_l1_s_prot_kr;
422 extern pt_entry_t		pte_l1_s_prot_kw;
423 extern pt_entry_t		pte_l1_s_prot_mask;
424 
425 extern pt_entry_t		pte_l2_l_prot_ur;
426 extern pt_entry_t		pte_l2_l_prot_uw;
427 extern pt_entry_t		pte_l2_l_prot_kr;
428 extern pt_entry_t		pte_l2_l_prot_kw;
429 extern pt_entry_t		pte_l2_l_prot_mask;
430 
431 extern pt_entry_t		pte_l2_s_prot_ur;
432 extern pt_entry_t		pte_l2_s_prot_uw;
433 extern pt_entry_t		pte_l2_s_prot_kr;
434 extern pt_entry_t		pte_l2_s_prot_kw;
435 extern pt_entry_t		pte_l2_s_prot_mask;
436 
437 extern pt_entry_t		pte_l1_s_proto;
438 extern pt_entry_t		pte_l1_c_proto;
439 extern pt_entry_t		pte_l2_s_proto;
440 
441 extern void (*pmap_copy_page_func)(struct vm_page *, struct vm_page *);
442 extern void (*pmap_zero_page_func)(struct vm_page *);
443 
444 #endif /* !_LOCORE */
445 
446 /*****************************************************************************/
447 
448 /*
449  * Definitions for MMU domains
450  */
451 #define	PMAP_DOMAINS		15	/* 15 'user' domains (0-14) */
452 #define	PMAP_DOMAIN_KERNEL	15	/* The kernel uses domain #15 */
453 
454 /*
455  * These macros define the various bit masks in the PTE.
456  *
457  * We use these macros since we use different bits on different processor
458  * models.
459  */
460 #define	L1_S_PROT_UR_generic	(L1_S_AP(AP_U))
461 #define	L1_S_PROT_UW_generic	(L1_S_AP(AP_U|AP_W))
462 #define	L1_S_PROT_KR_generic	(L1_S_AP(0))
463 #define	L1_S_PROT_KW_generic	(L1_S_AP(AP_W))
464 #define	L1_S_PROT_MASK_generic	(L1_S_AP(0x03))
465 
466 #define	L1_S_PROT_UR_xscale	(L1_S_AP(AP_U))
467 #define	L1_S_PROT_UW_xscale	(L1_S_AP(AP_U|AP_W))
468 #define	L1_S_PROT_KR_xscale	(L1_S_AP(0))
469 #define	L1_S_PROT_KW_xscale	(L1_S_AP(AP_W))
470 #define	L1_S_PROT_MASK_xscale	(L1_S_AP(0x03))
471 
472 #define	L1_S_PROT_UR_v7		(L1_S_V7_AP(AP_V7_KRUR))
473 #define	L1_S_PROT_UW_v7		(L1_S_V7_AP(AP_KRWURW))
474 #define	L1_S_PROT_KR_v7		(L1_S_V7_AP(AP_V7_KR))
475 #define	L1_S_PROT_KW_v7		(L1_S_V7_AP(AP_KRW))
476 #define	L1_S_PROT_MASK_v7	(L1_S_V7_AP(0x07))
477 
478 #define	L1_S_CACHE_MASK_generic	(L1_S_B|L1_S_C)
479 #define	L1_S_CACHE_MASK_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
480 #define	L1_S_CACHE_MASK_v7	(L1_S_B|L1_S_C|L1_S_V7_TEX_MASK)
481 
482 #define	L1_S_COHERENT_generic	(L1_S_B|L1_S_C)
483 #define	L1_S_COHERENT_xscale	(L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X))
484 #define	L1_S_COHERENT_v7	(L1_S_C)
485 
486 #define	L2_L_PROT_KR_generic	(L2_AP(0))
487 #define	L2_L_PROT_UR_generic	(L2_AP(AP_U))
488 #define	L2_L_PROT_KW_generic	(L2_AP(AP_W))
489 #define	L2_L_PROT_UW_generic	(L2_AP(AP_U|AP_W))
490 #define	L2_L_PROT_MASK_generic	(L2_AP(AP_U|AP_W))
491 
492 #define	L2_L_PROT_KR_xscale	(L2_AP(0))
493 #define	L2_L_PROT_UR_xscale	(L2_AP(AP_U))
494 #define	L2_L_PROT_KW_xscale	(L2_AP(AP_W))
495 #define	L2_L_PROT_UW_xscale	(L2_AP(AP_U|AP_W))
496 #define	L2_L_PROT_MASK_xscale	(L2_AP(AP_U|AP_W))
497 
498 #define	L2_L_PROT_UR_v7		(L2_V7_AP(AP_V7_KRUR))
499 #define	L2_L_PROT_UW_v7		(L2_V7_AP(AP_KRWURW))
500 #define	L2_L_PROT_KR_v7		(L2_V7_AP(AP_V7_KR))
501 #define	L2_L_PROT_KW_v7		(L2_V7_AP(AP_KRW))
502 #define	L2_L_PROT_MASK_v7	(L2_V7_AP(0x07) | L2_V7_L_XN)
503 
504 #define	L2_L_CACHE_MASK_generic	(L2_B|L2_C)
505 #define	L2_L_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
506 #define	L2_L_CACHE_MASK_v7	(L2_B|L2_C|L2_V7_L_TEX_MASK)
507 
508 #define	L2_L_COHERENT_generic	(L2_B|L2_C)
509 #define	L2_L_COHERENT_xscale	(L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X))
510 #define	L2_L_COHERENT_v7	(L2_C)
511 
512 #define	L2_S_PROT_UR_generic	(L2_AP(AP_U))
513 #define	L2_S_PROT_UW_generic	(L2_AP(AP_U|AP_W))
514 #define	L2_S_PROT_KR_generic	(L2_AP(0))
515 #define	L2_S_PROT_KW_generic	(L2_AP(AP_W))
516 #define	L2_S_PROT_MASK_generic	(L2_AP(AP_U|AP_W))
517 
518 #define	L2_S_PROT_UR_xscale	(L2_AP0(AP_U))
519 #define	L2_S_PROT_UW_xscale	(L2_AP0(AP_U|AP_W))
520 #define	L2_S_PROT_KR_xscale	(L2_AP0(0))
521 #define	L2_S_PROT_KW_xscale	(L2_AP0(AP_W))
522 #define	L2_S_PROT_MASK_xscale	(L2_AP0(AP_U|AP_W))
523 
524 #define	L2_S_PROT_UR_v7		(L2_V7_AP(AP_V7_KRUR))
525 #define	L2_S_PROT_UW_v7		(L2_V7_AP(AP_KRWURW))
526 #define	L2_S_PROT_KR_v7		(L2_V7_AP(AP_V7_KR))
527 #define	L2_S_PROT_KW_v7		(L2_V7_AP(AP_KRW))
528 #define	L2_S_PROT_MASK_v7	(L2_V7_AP(0x07) | L2_V7_S_XN)
529 
530 #define	L2_S_CACHE_MASK_generic	(L2_B|L2_C)
531 #define	L2_S_CACHE_MASK_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
532 #define	L2_S_CACHE_MASK_v7	(L2_B|L2_C|L2_V7_S_TEX_MASK)
533 
534 #define	L2_S_COHERENT_generic	(L2_B|L2_C)
535 #define	L2_S_COHERENT_xscale	(L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X))
536 #define	L2_S_COHERENT_v7	(L2_C)
537 
538 #define	L1_S_PROTO_generic	(L1_TYPE_S | L1_S_IMP)
539 #define	L1_S_PROTO_xscale	(L1_TYPE_S)
540 #define	L1_S_PROTO_v7		(L1_TYPE_S)
541 
542 #define	L1_C_PROTO_generic	(L1_TYPE_C | L1_C_IMP2)
543 #define	L1_C_PROTO_xscale	(L1_TYPE_C)
544 #define	L1_C_PROTO_v7		(L1_TYPE_C)
545 
546 #define	L2_L_PROTO		(L2_TYPE_L)
547 
548 #define	L2_S_PROTO_generic	(L2_TYPE_S)
549 #define	L2_S_PROTO_xscale	(L2_TYPE_XSCALE_XS)
550 #define	L2_S_PROTO_v7		(L2_TYPE_S)
551 
552 /*
553  * User-visible names for the ones that vary with MMU class.
554  */
555 
556 #if ARM_NMMUS > 1
557 /* More than one MMU class configured; use variables. */
558 #define	L1_S_PROT_UR		pte_l1_s_prot_ur
559 #define	L1_S_PROT_UW		pte_l1_s_prot_uw
560 #define	L1_S_PROT_KR		pte_l1_s_prot_kr
561 #define	L1_S_PROT_KW		pte_l1_s_prot_kw
562 #define	L1_S_PROT_MASK		pte_l1_s_prot_mask
563 
564 #define	L2_L_PROT_UR		pte_l2_l_prot_ur
565 #define	L2_L_PROT_UW		pte_l2_l_prot_uw
566 #define	L2_L_PROT_KR		pte_l2_l_prot_kr
567 #define	L2_L_PROT_KW		pte_l2_l_prot_kw
568 #define	L2_L_PROT_MASK		pte_l2_l_prot_mask
569 
570 #define	L2_S_PROT_UR		pte_l2_s_prot_ur
571 #define	L2_S_PROT_UW		pte_l2_s_prot_uw
572 #define	L2_S_PROT_KR		pte_l2_s_prot_kr
573 #define	L2_S_PROT_KW		pte_l2_s_prot_kw
574 #define	L2_S_PROT_MASK		pte_l2_s_prot_mask
575 
576 #define	L1_S_CACHE_MASK		pte_l1_s_cache_mask
577 #define	L2_L_CACHE_MASK		pte_l2_l_cache_mask
578 #define	L2_S_CACHE_MASK		pte_l2_s_cache_mask
579 
580 #define	L1_S_COHERENT		pte_l1_s_coherent
581 #define	L2_L_COHERENT		pte_l2_l_coherent
582 #define	L2_S_COHERENT		pte_l2_s_coherent
583 
584 #define	L1_S_PROTO		pte_l1_s_proto
585 #define	L1_C_PROTO		pte_l1_c_proto
586 #define	L2_S_PROTO		pte_l2_s_proto
587 
588 #define	pmap_copy_page(s, d)	(*pmap_copy_page_func)((s), (d))
589 #define	pmap_zero_page(d)	(*pmap_zero_page_func)((d))
590 #elif ARM_MMU_GENERIC == 1
591 #define	L1_S_PROT_UR		L1_S_PROT_UR_generic
592 #define	L1_S_PROT_UW		L1_S_PROT_UW_generic
593 #define	L1_S_PROT_KR		L1_S_PROT_KR_generic
594 #define	L1_S_PROT_KW		L1_S_PROT_KW_generic
595 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_generic
596 
597 #define	L2_L_PROT_UR		L2_L_PROT_UR_generic
598 #define	L2_L_PROT_UW		L2_L_PROT_UW_generic
599 #define	L2_L_PROT_KR		L2_L_PROT_KR_generic
600 #define	L2_L_PROT_KW		L2_L_PROT_KW_generic
601 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_generic
602 
603 #define	L2_S_PROT_UR		L2_S_PROT_UR_generic
604 #define	L2_S_PROT_UW		L2_S_PROT_UW_generic
605 #define	L2_S_PROT_KR		L2_S_PROT_KR_generic
606 #define	L2_S_PROT_KW		L2_S_PROT_KW_generic
607 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_generic
608 
609 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_generic
610 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_generic
611 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_generic
612 
613 #define	L1_S_COHERENT		L1_S_COHERENT_generic
614 #define	L2_L_COHERENT		L2_L_COHERENT_generic
615 #define	L2_S_COHERENT		L2_S_COHERENT_generic
616 
617 #define	L1_S_PROTO		L1_S_PROTO_generic
618 #define	L1_C_PROTO		L1_C_PROTO_generic
619 #define	L2_S_PROTO		L2_S_PROTO_generic
620 
621 #define	pmap_copy_page(s, d)	pmap_copy_page_generic((s), (d))
622 #define	pmap_zero_page(d)	pmap_zero_page_generic((d))
623 #elif ARM_MMU_XSCALE == 1
624 #define	L1_S_PROT_UR		L1_S_PROT_UR_xscale
625 #define	L1_S_PROT_UW		L1_S_PROT_UW_xscale
626 #define	L1_S_PROT_KR		L1_S_PROT_KR_xscale
627 #define	L1_S_PROT_KW		L1_S_PROT_KW_xscale
628 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_xscale
629 
630 #define	L2_L_PROT_UR		L2_L_PROT_UR_xscale
631 #define	L2_L_PROT_UW		L2_L_PROT_UW_xscale
632 #define	L2_L_PROT_KR		L2_L_PROT_KR_xscale
633 #define	L2_L_PROT_KW		L2_L_PROT_KW_xscale
634 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_xscale
635 
636 #define	L2_S_PROT_UR		L2_S_PROT_UR_xscale
637 #define	L2_S_PROT_UW		L2_S_PROT_UW_xscale
638 #define	L2_S_PROT_KR		L2_S_PROT_KR_xscale
639 #define	L2_S_PROT_KW		L2_S_PROT_KW_xscale
640 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_xscale
641 
642 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_xscale
643 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_xscale
644 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_xscale
645 
646 #define	L1_S_COHERENT		L1_S_COHERENT_xscale
647 #define	L2_L_COHERENT		L2_L_COHERENT_xscale
648 #define	L2_S_COHERENT		L2_S_COHERENT_xscale
649 
650 #define	L1_S_PROTO		L1_S_PROTO_xscale
651 #define	L1_C_PROTO		L1_C_PROTO_xscale
652 #define	L2_S_PROTO		L2_S_PROTO_xscale
653 
654 #define	pmap_copy_page(s, d)	pmap_copy_page_xscale((s), (d))
655 #define	pmap_zero_page(d)	pmap_zero_page_xscale((d))
656 #elif ARM_MMU_V7 == 1
657 #define	L1_S_PROT_UR		L1_S_PROT_UR_v7
658 #define	L1_S_PROT_UW		L1_S_PROT_UW_v7
659 #define	L1_S_PROT_KR		L1_S_PROT_KR_v7
660 #define	L1_S_PROT_KW		L1_S_PROT_KW_v7
661 #define	L1_S_PROT_MASK		L1_S_PROT_MASK_v7
662 
663 #define	L2_L_PROT_UR		L2_L_PROT_UR_v7
664 #define	L2_L_PROT_UW		L2_L_PROT_UW_v7
665 #define	L2_L_PROT_KR		L2_L_PROT_KR_v7
666 #define	L2_L_PROT_KW		L2_L_PROT_KW_v7
667 #define	L2_L_PROT_MASK		L2_L_PROT_MASK_v7
668 
669 #define	L2_S_PROT_UR		L2_S_PROT_UR_v7
670 #define	L2_S_PROT_UW		L2_S_PROT_UW_v7
671 #define	L2_S_PROT_KR		L2_S_PROT_KR_v7
672 #define	L2_S_PROT_KW		L2_S_PROT_KW_v7
673 #define	L2_S_PROT_MASK		L2_S_PROT_MASK_v7
674 
675 #define	L1_S_CACHE_MASK		L1_S_CACHE_MASK_v7
676 #define	L2_L_CACHE_MASK		L2_L_CACHE_MASK_v7
677 #define	L2_S_CACHE_MASK		L2_S_CACHE_MASK_v7
678 
679 #define	L1_S_COHERENT		L1_S_COHERENT_v7
680 #define	L2_L_COHERENT		L2_L_COHERENT_v7
681 #define	L2_S_COHERENT		L2_S_COHERENT_v7
682 
683 #define	L1_S_PROTO		L1_S_PROTO_v7
684 #define	L1_C_PROTO		L1_C_PROTO_v7
685 #define	L2_S_PROTO		L2_S_PROTO_v7
686 
687 #define	pmap_copy_page(s, d)	pmap_copy_page_v7((s), (d))
688 #define	pmap_zero_page(d)	pmap_zero_page_v7((d))
689 #endif /* ARM_NMMUS > 1 */
690 
691 /*
692  * These macros return various bits based on kernel/user and protection.
693  * Note that the compiler will usually fold these at compile time.
694  */
695 #ifndef _LOCORE
696 static __inline pt_entry_t
697 L1_S_PROT(int ku, vm_prot_t pr)
698 {
699 	pt_entry_t pte;
700 
701 	if (ku == PTE_USER)
702 		pte = (pr & PROT_WRITE) ? L1_S_PROT_UW : L1_S_PROT_UR;
703 	else
704 		pte = (pr & PROT_WRITE) ? L1_S_PROT_KW : L1_S_PROT_KR;
705 
706 #ifdef CPU_ARMv7
707 	if ((pr & PROT_EXEC) == 0)
708 		pte |= L1_S_V7_XN;
709 #endif
710 
711 	return pte;
712 }
713 static __inline pt_entry_t
714 L2_L_PROT(int ku, vm_prot_t pr)
715 {
716 	pt_entry_t pte;
717 
718 	if (ku == PTE_USER)
719 		pte = (pr & PROT_WRITE) ? L2_L_PROT_UW : L2_L_PROT_UR;
720 	else
721 		pte = (pr & PROT_WRITE) ? L2_L_PROT_KW : L2_L_PROT_KR;
722 
723 #ifdef CPU_ARMv7
724 	if ((pr & PROT_EXEC) == 0)
725 		pte |= L2_V7_L_XN;
726 #endif
727 
728 	return pte;
729 }
730 static __inline pt_entry_t
731 L2_S_PROT(int ku, vm_prot_t pr)
732 {
733 	pt_entry_t pte;
734 
735 	if (ku == PTE_USER)
736 		pte = (pr & PROT_WRITE) ? L2_S_PROT_UW : L2_S_PROT_UR;
737 	else
738 		pte = (pr & PROT_WRITE) ? L2_S_PROT_KW : L2_S_PROT_KR;
739 
740 #ifdef CPU_ARMv7
741 	if ((pr & PROT_EXEC) == 0)
742 		pte |= L2_V7_S_XN;
743 #endif
744 
745 	return pte;
746 }
747 
748 static __inline boolean_t
749 l2pte_is_writeable(pt_entry_t pte, struct pmap *pm)
750 {
751 	return (pte & L2_V7_AP(0x4)) == 0;
752 }
753 #endif
754 
755 /*
756  * Macros to test if a mapping is mappable with an L1 Section mapping
757  * or an L2 Large Page mapping.
758  */
759 #define	L1_S_MAPPABLE_P(va, pa, size)					\
760 	((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE)
761 
762 #define	L2_L_MAPPABLE_P(va, pa, size)					\
763 	((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE)
764 
765 #ifndef _LOCORE
766 /* pmap_prefer bits for VIPT ARMv7 */
767 #define PMAP_PREFER(fo, ap)	pmap_prefer((fo), (ap))
768 vaddr_t	pmap_prefer(vaddr_t, vaddr_t);
769 
770 extern uint32_t pmap_alias_dist;
771 extern uint32_t pmap_alias_bits;
772 
773 /* pmap prefer alias alignment. */
774 #define PMAP_PREFER_ALIGN()	(pmap_alias_dist)
775 /* pmap prefer offset withing alignment. */
776 #define PMAP_PREFER_OFFSET(of)						\
777     (PMAP_PREFER_ALIGN() == 0 ? 0 : ((of) & (PMAP_PREFER_ALIGN() - 1)))
778 
779 
780 #endif /* _LOCORE */
781 
782 #endif /* _KERNEL */
783 
784 #ifndef _LOCORE
785 /*
786  * pmap-specific data store in the vm_page structure.
787  */
788 struct vm_page_md {
789 	struct pv_entry *pvh_list;		/* pv_entry list */
790 	int pvh_attrs;				/* page attributes */
791 	u_int uro_mappings;
792 	u_int urw_mappings;
793 	union {
794 		u_short s_mappings[2];	/* Assume kernel count <= 65535 */
795 		u_int i_mappings;
796 	} k_u;
797 #define	kro_mappings	k_u.s_mappings[0]
798 #define	krw_mappings	k_u.s_mappings[1]
799 #define	k_mappings	k_u.i_mappings
800 };
801 
802 #define	VM_MDPAGE_INIT(pg)						\
803 do {									\
804 	(pg)->mdpage.pvh_list = NULL;					\
805 	(pg)->mdpage.pvh_attrs = 0;					\
806 	(pg)->mdpage.uro_mappings = 0;					\
807 	(pg)->mdpage.urw_mappings = 0;					\
808 	(pg)->mdpage.k_mappings = 0;					\
809 } while (/*CONSTCOND*/0)
810 #endif /* _LOCORE */
811 
812 #endif	/* _ARM_PMAP_H_ */
813