xref: /netbsd-src/sys/arch/m68k/include/pte_coldfire.h (revision 6cb10275d08f045e872662c371fe2f2724f2f6e6)
1 /* $NetBSD: pte_coldfire.h,v 1.2 2014/03/18 18:20:41 riastradh Exp $ */
2 /*-
3  * Copyright (c) 2013 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Matt Thomas of 3am Software Foundry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef _M68K_PTE_COLDFIRE_H_
32 #define _M68K_PTE_COLDFIRE_H_
33 
34 #ifdef __ASSEMBLY__
35 #error use assym.h instead
36 #endif
37 
38 #ifndef __BSD_PT_ENTRY_T
39 #define __BSD_PT_ENTRY_T	__uint32_t
40 typedef __BSD_PT_ENTRY_T	pt_entry_t;
41 #endif
42 
43 #define	MMUTR_VA	__BITS(31,10)	// Virtual Address
44 #define	MMUTR_ID	__BITS(9,2)	// ASID
45 #define	MMUTR_SG	__BIT(1)	// Shared Global
46 #define	MMUTR_V		__BIT(0)	// Valid
47 
48 #define MMUDR_PA	__BITS(31,10)	// Physical Address
49 #define	MMUDR_SZ	__BITS(9,8)	// Entry Size
50 #define	MMUDR_SZ_1MB	0
51 #define	MMUDR_SZ_4KB	1
52 #define	MMUDR_SZ_8KB	2
53 #define	MMUDR_SZ_16MB	3
54 #define	MMUDR_CM	__BITS(7,6)	// Cache Mode
55 #define MMUDR_CM_WT	0		// Write-Through
56 #define MMUDR_CM_WB	1		// Write-Back (Copy-Back)
57 #define MMUDR_CM_NC	2		// Non-cacheable
58 #define MMUDR_CM_NCP	2		// Non-cacheable Precise
59 #define MMUDR_CM_NCI	3		// Non-cacheable Imprecise
60 #define MMUDR_SP	__BIT(5)	// Supervisor Protect
61 #define MMUDR_R		__BIT(4)	// Read Access
62 #define MMUDR_W		__BIT(3)	// Write Access
63 #define MMUDR_X		__BIT(2)	// Execute Access
64 #define MMUDR_LK	__BIT(1)	// Lock Entry
65 #define MMUDR_MBZ0	__BIT(0)	// Must be zero
66 
67 /*
68  * The PTE basically the contents of MMUDR[31:2] | MMUAR[0].
69  * We overload the meaning of MMUDR_LK for indicating wired.
70  * It will be cleared before writing to the TLB.
71  */
72 
73 #ifdef _KERNEL
74 
75 static inline bool
pte_cached_p(pt_entry_t pt_entry)76 pte_cached_p(pt_entry_t pt_entry)
77 {
78 	return (pt_entry & MMUDR_CM_NC) != MMUDR_CM_NC;
79 }
80 
81 static inline bool
pte_modified_p(pt_entry_t pt_entry)82 pte_modified_p(pt_entry_t pt_entry)
83 {
84 	return (pt_entry & MMUDR_W) == MMUDR_W;
85 }
86 
87 static inline bool
pte_valid_p(pt_entry_t pt_entry)88 pte_valid_p(pt_entry_t pt_entry)
89 {
90 	return (pt_entry & MMUAR_V) == MMUAR_V;
91 }
92 
93 static inline bool
pte_exec_p(pt_entry_t pt_entry)94 pte_exec_p(pt_entry_t pt_entry)
95 {
96 	return (pt_entry & MMUDR_X) == MMUDR_X;
97 }
98 
99 static inline bool
pte_deferred_exec_p(pt_entry_t pt_entry)100 pte_deferred_exec_p(pt_entry_t pt_entry)
101 {
102 	return !pte_exec_p(pt_entry);
103 }
104 
105 static inline bool
pte_wired_p(pt_entry_t pt_entry)106 pte_wired_p(pt_entry_t pt_entry)
107 {
108 	return (pt_entry & MMUDR_LK) == MMUDR_LK;
109 }
110 
111 static inline pt_entry_t
pte_nv_entry(bool kernel)112 pte_nv_entry(bool kernel)
113 {
114 	return 0;
115 }
116 
117 static inline paddr_t
pte_to_paddr(pt_entry_t pt_entry)118 pte_to_paddr(pt_entry_t pt_entry)
119 {
120 	return (paddr_t)(pt_entry & MMUDR_PA);
121 }
122 
123 static inline pt_entry_t
pte_ionocached_bits(void)124 pte_ionocached_bits(void)
125 {
126 	return MMUDR_CM_NCP;
127 }
128 
129 static inline pt_entry_t
pte_iocached_bits(void)130 pte_iocached_bits(void)
131 {
132 	return MMUDR_CM_NCP;
133 }
134 
135 static inline pt_entry_t
pte_nocached_bits(void)136 pte_nocached_bits(void)
137 {
138 	return MMUDR_CM_NCP;
139 }
140 
141 static inline pt_entry_t
pte_cached_bits(void)142 pte_cached_bits(void)
143 {
144 	return MMUDR_CM_WB;
145 }
146 
147 static inline pt_entry_t
pte_cached_change(pt_entry_t pt_entry,bool cached)148 pte_cached_change(pt_entry_t pt_entry, bool cached)
149 {
150 	return (pt_entry & ~MMUDR_CM) | (cached ? MMUDR_CM_WB : MMUDR_CM_NCP);
151 }
152 
153 static inline pt_entry_t
pte_wire_entry(pt_entry_t pt_entry)154 pte_wire_entry(pt_entry_t pt_entry)
155 {
156 	return pt_entry | MMUDR_LK;
157 }
158 
159 static inline pt_entry_t
pte_unwire_entry(pt_entry_t pt_entry)160 pte_unwire_entry(pt_entry_t pt_entry)
161 {
162 	return pt_entry & ~MMUDR_LK;
163 }
164 
165 static inline pt_entry_t
pte_prot_nowrite(pt_entry_t pt_entry)166 pte_prot_nowrite(pt_entry_t pt_entry)
167 {
168 	return pt_entry & ~MMUDR_W;
169 }
170 
171 static inline pt_entry_t
pte_prot_downgrade(pt_entry_t pt_entry,vm_prot_t newprot)172 pte_prot_downgrade(pt_entry_t pt_entry, vm_prot_t newprot)
173 {
174 	pt_entry &= ~MMUDR_W;
175 	if ((newprot & VM_PROT_EXECUTE) == 0)
176 		pt_entry &= ~MMUDR_X;
177 	return pt_entry;
178 }
179 
180 static inline pt_entry_t
pte_prot_bits(struct vm_page_md * mdpg,vm_prot_t prot)181 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot)
182 {
183 	KASSERT(prot & VM_PROT_READ);
184 	pt_entry_t pt_entry = MMUDR_R;
185 	if (prot & VM_PROT_EXECUTE) {
186 		/* Only allow exec for managed pages */
187 		if (mdpg != NULL && VM_PAGEMD_EXECPAGE_P(mdpg))
188 			pt_entry |= MMUDR_X;
189 	}
190 	if (prot & VM_PROT_WRITE) {
191 		if (mdpg == NULL || VM_PAGEMD_MODIFIED_P(mdpg))
192 			pt_entry |= MMUDR_W;
193 	}
194 	return pt_entry;
195 }
196 
197 static inline pt_entry_t
pte_flag_bits(struct vm_page_md * mdpg,int flags)198 pte_flag_bits(struct vm_page_md *mdpg, int flags)
199 {
200 	if (__predict_false(flags & PMAP_NOCACHE)) {
201 		if (__predict_true(mdpg != NULL)) {
202 			return pte_nocached_bits();
203 		} else {
204 			return pte_ionocached_bits();
205 		}
206 	} else {
207 		if (__predict_false(mdpg != NULL)) {
208 			return pte_cached_bits();
209 		} else {
210 			return pte_iocached_bits();
211 		}
212 	}
213 }
214 
215 static inline pt_entry_t
pte_make_enter(paddr_t pa,struct vm_page_md * mdpg,vm_prot_t prot,int flags,bool kernel)216 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
217 	int flags, bool kernel)
218 {
219 	pt_entry_t pt_entry = (pt_entry_t) pa & MMUDR_PA;
220 
221 	pt_entry |= pte_flag_bits(mdpg, flags);
222 	pt_entry |= pte_prot_bits(mdpg, prot);
223 
224 	return pt_entry;
225 }
226 
227 static inline pt_entry_t
pte_make_kenter_pa(paddr_t pa,struct vm_page_md * mdpg,vm_prot_t prot,int flags)228 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
229 	int flags)
230 {
231 	pt_entry_t pt_entry = (pt_entry_t) pa & MMUDR_PA;
232 
233 	pt_entry |= MMUDR_LK;
234 	pt_entry |= pte_flag_bits(mdpg, flags);
235 	pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */
236 
237 	return pt_entry;
238 }
239 #endif /* _KERNEL_ */
240 
241 #endif /* _M68K_PTE_COLDFIRE_H_ */
242