xref: /netbsd-src/sys/arch/powerpc/include/booke/pte.h (revision 504fb3492eee85c1d28b75dd941462b7082ae730)
1 /*	$NetBSD: pte.h,v 1.11 2020/08/22 15:34:51 skrll Exp $	*/
2 /*-
3  * Copyright (c) 2010, 2011 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Raytheon BBN Technologies Corp and Defense Advanced Research Projects
8  * Agency and which was developed by Matt Thomas of 3am Software Foundry.
9  *
10  * This material is based upon work supported by the Defense Advanced Research
11  * Projects Agency and Space and Naval Warfare Systems Center, Pacific, under
12  * Contract No. N66001-09-C-2073.
13  * Approved for Public Release, Distribution Unlimited
14  *
15  * Redistribution and use in source and binary forms, with or without
16  * modification, are permitted provided that the following conditions
17  * are met:
18  * 1. Redistributions of source code must retain the above copyright
19  *    notice, this list of conditions and the following disclaimer.
20  * 2. Redistributions in binary form must reproduce the above copyright
21  *    notice, this list of conditions and the following disclaimer in the
22  *    documentation and/or other materials provided with the distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
25  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
26  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36 
37 #ifndef _POWERPC_BOOKE_PTE_H_
38 #define _POWERPC_BOOKE_PTE_H_
39 
40 #ifndef _LOCORE
41 #ifndef __BSD_PT_ENTRY_T
42 #define __BSD_PT_ENTRY_T	__uint32_t
43 typedef __BSD_PT_ENTRY_T	pt_entry_t;
44 #define PRIxPTE			PRIx32
45 #endif
46 #endif
47 
48 #include <powerpc/booke/spr.h>
49 
50 /*
51  * The PTE format is software and must be translated into the various portions
52  * X W R are separted by single bits so that they can map to the MAS2 bits
53  * UX/UW/UR or SX/SW/SR by a mask and a shift.
54  */
55 #define	PTE_IO		(PTE_I|PTE_G|PTE_xW|PTE_xR)
56 #define	PTE_DEFAULT	(PTE_M|PTE_xX|PTE_xW|PTE_xR)
57 #define	PTE_MAS3_MASK	(MAS3_RPN|MAS3_U2|MAS3_U0)
58 #define	PTE_MAS2_MASK	(MAS2_WIMGE)
59 #define	PTE_RPN_MASK	MAS3_RPN		/* MAS3[RPN] */
60 #define	PTE_RWX_MASK	(PTE_xX|PTE_xW|PTE_xR)
61 #define	PTE_WIRED	(MAS3_U0 << 2)		/* page is wired (PTE only) */
62 #define	PTE_xX		(MAS3_U0 << 1)		/* MAS2[UX] | MAS2[SX] */
63 #define	PTE_UNSYNCED	MAS3_U0			/* page needs isync */
64 #define	PTE_xW		MAS3_U1			/* MAS2[UW] | MAS2[SW] */
65 #define	PTE_UNMODIFIED	MAS3_U2			/* page is unmodified */
66 #define	PTE_xR		MAS3_U3			/* MAS2[UR] | MAS2[SR] */
67 #define PTE_RWX_SHIFT	6
68 #define	PTE_UNUSED	0x00000020
69 #define	PTE_WIMGE_MASK	MAS2_WIMGE
70 #define	PTE_WIG		(PTE_W|PTE_I|PTE_G)
71 #define	PTE_W		MAS2_W			/* Write-through */
72 #define	PTE_I		MAS2_I			/* cache-Inhibited */
73 #define	PTE_M		MAS2_M			/* Memory coherence */
74 #define	PTE_G		MAS2_G			/* Guarded */
75 #define	PTE_E		MAS2_E			/* [Little] Endian */
76 
77 #ifndef _LOCORE
78 #ifdef _KERNEL
79 
80 static __inline uint32_t
pte_value(pt_entry_t pt_entry)81 pte_value(pt_entry_t pt_entry)
82 {
83 	return pt_entry;
84 }
85 
86 static __inline bool
pte_cached_p(pt_entry_t pt_entry)87 pte_cached_p(pt_entry_t pt_entry)
88 {
89 	return (pt_entry & PTE_I) == 0;
90 }
91 
92 static __inline bool
pte_modified_p(pt_entry_t pt_entry)93 pte_modified_p(pt_entry_t pt_entry)
94 {
95 	return (pt_entry & (PTE_UNMODIFIED|PTE_xW)) == PTE_xW;
96 }
97 
98 static __inline bool
pte_valid_p(pt_entry_t pt_entry)99 pte_valid_p(pt_entry_t pt_entry)
100 {
101 	return pt_entry != 0;
102 }
103 
104 static __inline bool
pte_exec_p(pt_entry_t pt_entry)105 pte_exec_p(pt_entry_t pt_entry)
106 {
107 	return (pt_entry & PTE_xX) != 0;
108 }
109 
110 static __inline bool
pte_readonly_p(pt_entry_t pt_entry)111 pte_readonly_p(pt_entry_t pt_entry)
112 {
113 	return (pt_entry & PTE_xW) == 0;
114 }
115 
116 static __inline bool
pte_deferred_exec_p(pt_entry_t pt_entry)117 pte_deferred_exec_p(pt_entry_t pt_entry)
118 {
119 	//return (pt_entry & (PTE_xX|PTE_UNSYNCED)) == (PTE_xX|PTE_UNSYNCED);
120 	return (pt_entry & PTE_UNSYNCED) == PTE_UNSYNCED;
121 }
122 
123 static __inline bool
pte_wired_p(pt_entry_t pt_entry)124 pte_wired_p(pt_entry_t pt_entry)
125 {
126 	return (pt_entry & PTE_WIRED) != 0;
127 }
128 
129 static __inline pt_entry_t
pte_nv_entry(bool kernel)130 pte_nv_entry(bool kernel)
131 {
132 	return 0;
133 }
134 
135 static __inline paddr_t
pte_to_paddr(pt_entry_t pt_entry)136 pte_to_paddr(pt_entry_t pt_entry)
137 {
138 	return (paddr_t)(pt_entry & PTE_RPN_MASK);
139 }
140 
141 static __inline pt_entry_t
pte_ionocached_bits(void)142 pte_ionocached_bits(void)
143 {
144 	return PTE_I|PTE_G;
145 }
146 
147 static __inline pt_entry_t
pte_iocached_bits(void)148 pte_iocached_bits(void)
149 {
150 	return PTE_G;
151 }
152 
153 static __inline pt_entry_t
pte_nocached_bits(void)154 pte_nocached_bits(void)
155 {
156 	return PTE_M|PTE_I;
157 }
158 
159 static __inline pt_entry_t
pte_cached_bits(void)160 pte_cached_bits(void)
161 {
162 	return PTE_M;
163 }
164 
165 static __inline pt_entry_t
pte_cached_change(pt_entry_t pt_entry,bool cached)166 pte_cached_change(pt_entry_t pt_entry, bool cached)
167 {
168 	return (pt_entry & ~PTE_I) | (cached ? 0 : PTE_I);
169 }
170 
171 static __inline pt_entry_t
pte_wire_entry(pt_entry_t pt_entry)172 pte_wire_entry(pt_entry_t pt_entry)
173 {
174 	return pt_entry | PTE_WIRED;
175 }
176 
177 static __inline pt_entry_t
pte_unwire_entry(pt_entry_t pt_entry)178 pte_unwire_entry(pt_entry_t pt_entry)
179 {
180 	return pt_entry & ~PTE_WIRED;
181 }
182 
183 static __inline pt_entry_t
pte_prot_nowrite(pt_entry_t pt_entry)184 pte_prot_nowrite(pt_entry_t pt_entry)
185 {
186 	return pt_entry & ~(PTE_xW|PTE_UNMODIFIED);
187 }
188 
189 static __inline pt_entry_t
pte_prot_downgrade(pt_entry_t pt_entry,vm_prot_t newprot)190 pte_prot_downgrade(pt_entry_t pt_entry, vm_prot_t newprot)
191 {
192 	pt_entry &= ~(PTE_xW|PTE_UNMODIFIED);
193 	if ((newprot & VM_PROT_EXECUTE) == 0)
194 		pt_entry &= ~(PTE_xX|PTE_UNSYNCED);
195 	return pt_entry;
196 }
197 
198 static __inline pt_entry_t
pte_prot_bits(struct vm_page_md * mdpg,vm_prot_t prot)199 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot)
200 {
201 	KASSERT(prot & VM_PROT_READ);
202 	pt_entry_t pt_entry = PTE_xR;
203 	if (prot & VM_PROT_EXECUTE) {
204 #if 0
205 		pt_entry |= PTE_xX;
206 		if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg))
207 			pt_entry |= PTE_UNSYNCED;
208 #elif 1
209 		if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg))
210 			pt_entry |= PTE_UNSYNCED;
211 		else
212 			pt_entry |= PTE_xX;
213 #else
214 		pt_entry |= PTE_UNSYNCED;
215 #endif
216 	}
217 	if (prot & VM_PROT_WRITE) {
218 		pt_entry |= PTE_xW;
219 		if (mdpg != NULL && !VM_PAGEMD_MODIFIED_P(mdpg))
220 			pt_entry |= PTE_UNMODIFIED;
221 	}
222 	return pt_entry;
223 }
224 
225 static __inline pt_entry_t
pte_flag_bits(struct vm_page_md * mdpg,int flags)226 pte_flag_bits(struct vm_page_md *mdpg, int flags)
227 {
228 	if (__predict_false(flags & PMAP_NOCACHE)) {
229 		if (__predict_true(mdpg != NULL)) {
230 			return pte_nocached_bits();
231 		} else {
232 			return pte_ionocached_bits();
233 		}
234 	} else {
235 		if (__predict_false(mdpg != NULL)) {
236 			return pte_cached_bits();
237 		} else {
238 			return pte_iocached_bits();
239 		}
240 	}
241 }
242 
243 static __inline pt_entry_t
pte_make_enter(paddr_t pa,struct vm_page_md * mdpg,vm_prot_t prot,int flags,bool kernel)244 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
245 	int flags, bool kernel)
246 {
247 	pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK;
248 
249 	pt_entry |= pte_flag_bits(mdpg, flags);
250 	pt_entry |= pte_prot_bits(mdpg, prot);
251 
252 	return pt_entry;
253 }
254 
255 static __inline pt_entry_t
pte_make_kenter_pa(paddr_t pa,struct vm_page_md * mdpg,vm_prot_t prot,int flags)256 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
257 	int flags)
258 {
259 	pt_entry_t pt_entry = (pt_entry_t) pa & PTE_RPN_MASK;
260 
261 	pt_entry |= PTE_WIRED;
262 	pt_entry |= pte_flag_bits(mdpg, flags);
263 	pt_entry |= pte_prot_bits(NULL, prot); /* pretend unmanaged */
264 
265 	return pt_entry;
266 }
267 
268 static __inline void
pte_set(pt_entry_t * ptep,pt_entry_t pte)269 pte_set(pt_entry_t *ptep, pt_entry_t pte)
270 {
271 	*ptep = pte;
272 }
273 
274 #endif /* _KERNEL */
275 #endif /* !_LOCORE */
276 
277 #endif /* !_POWERPC_BOOKE_PTE_H_ */
278