xref: /netbsd-src/sys/arch/riscv/include/pte.h (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /* $NetBSD: pte.h,v 1.1 2014/09/19 17:36:26 matt Exp $ */
2 /*-
3  * Copyright (c) 2014 The NetBSD Foundation, Inc.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to The NetBSD Foundation
7  * by Matt Thomas of 3am Software Foundry.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
19  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
20  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
22  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  */
30 
31 #ifndef _RISCV_PTE_H_
32 #define _RISCV_PTE_H_
33 
34 //
35 // RV32 page table entry (4 GB VA space)
36 //   [31..22] = PPN[1]
37 //   [21..12] = PPN[0]
38 //   [11.. 9] = software
39 //
40 // RV64 page table entry (4 TB VA space)
41 //   [64..43] = 0
42 //   [42..33] = PPN[2]
43 //   [32..23] = PPN[1]
44 //   [22..13] = PPN[0]
45 //   [12.. 9] = software
46 //
47 // Common to both:
48 //   [8] = SX
49 //   [7] = SW
50 //   [6] = SR
51 //   [5] = UX
52 //   [4] = UW
53 //   [3] = UR
54 //   [2] = G
55 //   [1] = T
56 //   [0] = V
57 //
58 
59 #define NPTEPG		(1 + __BITS(9, 0))	// PTEs per Page
60 #define NSEGPG		NPTEPG
61 #define NPDEPG		NPTEPG
62 #ifdef _LP64
63 #define PTE_PPN		__BITS(63, 13)	// Physical Page Number
64 #define	PTE_PPN0	__BITS(42, 33)	// 1K 8-byte SDEs / PAGE
65 #define	PTE_PPN1	__BITS(32, 23)	// 1K 8-byte PDEs / PAGE
66 #define	PTE_PPN2	__BITS(22, 13)	// 1K 8-byte PTEs / PAGE
67 typedef __uint64_t pt_entry_t;
68 typedef __uint64_t pd_entry_t;
69 #define atomic_cas_pte	atomic_cas_64
70 #define atomic_cas_pde	atomic_cas_64
71 #else
72 #define PTE_PPN		__BITS(31, 12)	// Physical Page Number
73 #define	PTE_PPN0	__BITS(31, 22)	// 1K 4-byte PDEs / PAGE
74 #define	PTE_PPN1	__BITS(21, 12)	// 1K 4-byte PTEs / PAGE
75 typedef __uint32_t pt_entry_t;
76 typedef __uint32_t pd_entry_t;
77 #define atomic_cas_pte	atomic_cas_32
78 #define atomic_cas_pde	atomic_cas_32
79 #endif
80 
81 // These only mean something to NetBSD
82 #define	PTE_NX		__BIT(11)	// Unexecuted
83 #define	PTE_NW		__BIT(10)	// Unmodified
84 #define	PTE_WIRED	__BIT(9)	// Do Not Delete
85 
86 // These are hardware defined bits
87 #define	PTE_SX		__BIT(8)	// Supervisor eXecute
88 #define	PTE_SW		__BIT(7)	// Supervisor Write
89 #define	PTE_SR		__BIT(6)	// Supervisor Read
90 #define	PTE_UX		__BIT(5)	// User eXecute
91 #define	PTE_UW		__BIT(4)	// User Write
92 #define	PTE_UR		__BIT(3)	// User Read
93 #define	PTE_G		__BIT(2)	// Global
94 #define	PTE_T		__BIT(1)	// "Transit" (non-leaf)
95 #define	PTE_V		__BIT(0)	// Valid
96 
97 static inline bool
98 pte_valid_p(pt_entry_t pte)
99 {
100 	return (pte & PTE_V) != 0;
101 }
102 
103 static inline bool
104 pte_wired_p(pt_entry_t pte)
105 {
106 	return (pte & PTE_WIRED) != 0;
107 }
108 
109 static inline bool
110 pte_modified_p(pt_entry_t pte)
111 {
112 	return (pte & PTE_NW) == 0 && (pte & (PTE_UW|PTE_SW)) != 0;
113 }
114 
115 static inline bool
116 pte_cached_p(pt_entry_t pte)
117 {
118 	return true;
119 }
120 
121 static inline bool
122 pte_deferred_exec_p(pt_entry_t pte)
123 {
124 	return (pte & PTE_NX) != 0;
125 }
126 
127 static inline pt_entry_t
128 pte_wire_entry(pt_entry_t pte)
129 {
130 	return pte | PTE_WIRED;
131 }
132 
133 static inline pt_entry_t
134 pte_unwire_entry(pt_entry_t pte)
135 {
136 	return pte & ~PTE_WIRED;
137 }
138 
139 static inline paddr_t
140 pte_to_paddr(pt_entry_t pte)
141 {
142 	return pte & ~PAGE_MASK;
143 }
144 
145 static inline pt_entry_t
146 pte_nv_entry(bool kernel_p)
147 {
148 	return kernel_p ? PTE_G : 0;
149 }
150 
151 static inline pt_entry_t
152 pte_prot_nowrite(pt_entry_t pte)
153 {
154 	return pte & ~(PTE_NW|PTE_SW|PTE_UW);
155 }
156 
157 static inline pt_entry_t
158 pte_prot_downgrade(pt_entry_t pte, vm_prot_t newprot)
159 {
160 	pte &= ~(PTE_NW|PTE_SW|PTE_UW);
161 	if ((newprot & VM_PROT_EXECUTE) == 0)
162 		pte &= ~(PTE_NX|PTE_SX|PTE_UX);
163 	return pte;
164 }
165 
166 static inline pt_entry_t
167 pte_prot_bits(struct vm_page_md *mdpg, vm_prot_t prot, bool kernel_p)
168 {
169 	KASSERT(prot & VM_PROT_READ);
170 	pt_entry_t pt_entry = PTE_SR | (kernel_p ? 0 : PTE_UR);
171 	if (prot & VM_PROT_EXECUTE) {
172 		if (mdpg != NULL && !VM_PAGEMD_EXECPAGE_P(mdpg))
173 			pt_entry |= PTE_NX;
174 		else
175 			pt_entry |= kernel_p ? PTE_SX : PTE_UX;
176 	}
177 	if (prot & VM_PROT_WRITE) {
178 		if (mdpg != NULL && !VM_PAGEMD_MODIFIED_P(mdpg))
179 			pt_entry |= PTE_NW;
180 		else
181 			pt_entry |= PTE_SW | (kernel_p ? 0 : PTE_UW);
182 	}
183 	return pt_entry;
184 }
185 
186 static inline pt_entry_t
187 pte_flag_bits(struct vm_page_md *mdpg, int flags, bool kernel_p)
188 {
189 #if 0
190 	if (__predict_false(flags & PMAP_NOCACHE)) {
191 		if (__predict_true(mdpg != NULL)) {
192 			return pte_nocached_bits();
193 		} else {
194 			return pte_ionocached_bits();
195 		}
196 	} else {
197 		if (__predict_false(mdpg != NULL)) {
198 			return pte_cached_bits();
199 		} else {
200 			return pte_iocached_bits();
201 		}
202 	}
203 #else
204 	return 0;
205 #endif
206 }
207 
208 static inline pt_entry_t
209 pte_make_enter(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
210 	int flags, bool kernel_p)
211 {
212 	pt_entry_t pte = (pt_entry_t) pa & ~PAGE_MASK;
213 
214 	pte |= pte_flag_bits(mdpg, flags, kernel_p);
215 	pte |= pte_prot_bits(mdpg, prot, kernel_p);
216 
217 	if (mdpg == NULL && VM_PAGEMD_REFERENCED_P(mdpg))
218 		pte |= PTE_V;
219 
220 	return pte;
221 }
222 
223 static inline pt_entry_t
224 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
225 	int flags)
226 {
227 	pt_entry_t pte = (pt_entry_t) pa & ~PAGE_MASK;
228 
229 	pte |= PTE_WIRED | PTE_V;
230 	pte |= pte_flag_bits(NULL, flags, true);
231 	pte |= pte_prot_bits(NULL, prot, true); /* pretend unmanaged */
232 
233 	return pte;
234 }
235 
236 static inline void
237 pte_set(pt_entry_t *ptep, pt_entry_t pte)
238 {
239 	*ptep = pte;
240 }
241 
242 static inline pd_entry_t
243 pte_invalid_pde(void)
244 {
245 	return 0;
246 }
247 
248 static inline pd_entry_t
249 pte_pde_pdetab(paddr_t pa)
250 {
251 	return PTE_V | PTE_G | PTE_T | pa;
252 }
253 
254 static inline pd_entry_t
255 pte_pde_ptpage(paddr_t pa)
256 {
257 	return PTE_V | PTE_G | PTE_T | pa;
258 }
259 
260 static inline bool
261 pte_pde_valid_p(pd_entry_t pde)
262 {
263 	return (pde & (PTE_V|PTE_T)) == (PTE_V|PTE_T);
264 }
265 
266 static inline paddr_t
267 pte_pde_to_paddr(pd_entry_t pde)
268 {
269 	return pde & ~PAGE_MASK;
270 }
271 
272 static inline pd_entry_t
273 pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
274 {
275 #ifdef MULTIPROCESSOR
276 #ifdef _LP64
277 	return atomic_cas_64(pdep, opde, npde);
278 #else
279 	return atomic_cas_32(pdep, opde, npde);
280 #endif
281 #else
282 	*pdep = npde;
283 	return 0;
284 #endif
285 }
286 #endif /* _RISCV_PTE_H_ */
287