1 /* $NetBSD: pmap_machdep.h,v 1.8 2023/07/26 07:00:31 skrll Exp $ */
2
3 /*-
4 * Copyright (c) 2022 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Nick Hudson
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #ifndef _AARCH64_PMAP_MACHDEP_H_
33 #define _AARCH64_PMAP_MACHDEP_H_
34
35 #include <arm/cpufunc.h>
36
37 #define PMAP_HWPAGEWALKER 1
38
39 #define PMAP_PDETABSIZE (PAGE_SIZE / sizeof(pd_entry_t))
40 #define PMAP_SEGTABSIZE NSEGPG
41
42 #define PMAP_INVALID_PDETAB_ADDRESS ((pmap_pdetab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
43 #define PMAP_INVALID_SEGTAB_ADDRESS ((pmap_segtab_t *)(VM_MIN_KERNEL_ADDRESS - PAGE_SIZE))
44
45 #define NPTEPG (PAGE_SIZE / sizeof(pt_entry_t))
46 #define NPDEPG (PAGE_SIZE / sizeof(pd_entry_t))
47
48 #define PTPSHIFT 3
49 #define PTPLENGTH (PGSHIFT - PTPSHIFT)
50 #define SEGSHIFT (PGSHIFT + PTPLENGTH) /* LOG2(NBSEG) */
51
52 #define NBSEG (1 << SEGSHIFT) /* bytes/segment */
53 #define SEGOFSET (NBSEG - 1) /* byte offset into segment */
54
55 #define SEGLENGTH (PGSHIFT - 3)
56
57 #define XSEGSHIFT (SEGSHIFT + SEGLENGTH + SEGLENGTH)
58 /* LOG2(NBXSEG) */
59
60 #define NBXSEG (1UL << XSEGSHIFT) /* bytes/xsegment */
61 #define XSEGOFSET (NBXSEG - 1) /* byte offset into xsegment */
62 #define XSEGLENGTH (PGSHIFT - 3)
63 #define NXSEGPG (1 << XSEGLENGTH)
64 #define NSEGPG (1 << SEGLENGTH)
65
66
67 #ifndef __BSD_PTENTRY_T__
68 #define __BSD_PTENTRY_T__
69 #define PRIxPTE PRIx64
70 #endif /* __BSD_PTENTRY_T__ */
71
72 #define KERNEL_PID 0
73
74 #define __HAVE_PMAP_PV_TRACK
75 #define __HAVE_PMAP_MD
76
77 /* XXX temporary */
78 #define __HAVE_UNLOCKED_PMAP
79
80 #define PMAP_PAGE_INIT(pp) \
81 do { \
82 (pp)->pp_md.mdpg_first.pv_next = NULL; \
83 (pp)->pp_md.mdpg_first.pv_pmap = NULL; \
84 (pp)->pp_md.mdpg_first.pv_va = 0; \
85 (pp)->pp_md.mdpg_attrs = 0; \
86 VM_PAGEMD_PVLIST_LOCK_INIT(&(pp)->pp_md); \
87 } while (/* CONSTCOND */ 0)
88
89 struct pmap_md {
90 paddr_t pmd_l0_pa;
91 };
92
93 #define pm_l0_pa pm_md.pmd_l0_pa
94
95 void pmap_md_pdetab_init(struct pmap *);
96 void pmap_md_pdetab_fini(struct pmap *);
97
98 vaddr_t pmap_md_map_poolpage(paddr_t, size_t);
99 paddr_t pmap_md_unmap_poolpage(vaddr_t, size_t);
100
101 struct vm_page *
102 pmap_md_alloc_poolpage(int);
103
104 bool pmap_md_direct_mapped_vaddr_p(vaddr_t);
105 paddr_t pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t);
106 vaddr_t pmap_md_direct_map_paddr(paddr_t);
107 bool pmap_md_io_vaddr_p(vaddr_t);
108
109 void pmap_md_activate_efirt(void);
110 void pmap_md_deactivate_efirt(void);
111
112 void pmap_icache_sync_range(pmap_t, vaddr_t, vaddr_t);
113
114 vsize_t pmap_kenter_range(vaddr_t, paddr_t, vsize_t, vm_prot_t, u_int flags);
115
116 #include <uvm/pmap/vmpagemd.h>
117 #include <uvm/pmap/pmap.h>
118 #include <uvm/pmap/pmap_pvt.h>
119 #include <uvm/pmap/pmap_tlb.h>
120 #include <uvm/pmap/pmap_synci.h>
121 #include <uvm/pmap/tlb.h>
122
123 #include <uvm/uvm_page.h>
124
125 #define POOL_VTOPHYS(va) vtophys((vaddr_t)(va))
126
127 struct pmap_page {
128 struct vm_page_md pp_md;
129 };
130
131 #define PMAP_PAGE_TO_MD(ppage) (&((ppage)->pp_md))
132
133 #define PVLIST_EMPTY_P(pg) VM_PAGEMD_PVLIST_EMPTY_P(VM_PAGE_TO_MD(pg))
134
135 #define LX_BLKPAG_OS_MODIFIED LX_BLKPAG_OS_0
136
137 #define PMAP_PTE_OS0 "modified"
138 #define PMAP_PTE_OS1 "(unk)"
139
140 static inline paddr_t
pmap_l0pa(struct pmap * pm)141 pmap_l0pa(struct pmap *pm)
142 {
143 return pm->pm_l0_pa;
144 }
145
146 #if defined(__PMAP_PRIVATE)
147
148 #include <uvm/uvm_physseg.h>
149 struct vm_page_md;
150
151 void pmap_md_icache_sync_all(void);
152 void pmap_md_icache_sync_range_index(vaddr_t, vsize_t);
153 void pmap_md_page_syncicache(struct vm_page_md *, const kcpuset_t *);
154 bool pmap_md_vca_add(struct vm_page_md *, vaddr_t, pt_entry_t *);
155 void pmap_md_vca_clean(struct vm_page_md *, int);
156 void pmap_md_vca_remove(struct vm_page_md *, vaddr_t, bool, bool);
157 bool pmap_md_ok_to_steal_p(const uvm_physseg_t, size_t);
158
159 void pmap_md_xtab_activate(pmap_t, struct lwp *);
160 void pmap_md_xtab_deactivate(pmap_t);
161
162 vaddr_t pmap_md_direct_map_paddr(paddr_t);
163
164
165 #ifdef MULTIPROCESSOR
166 #define PMAP_NO_PV_UNCACHED
167 #endif
168
169 static inline void
pmap_md_init(void)170 pmap_md_init(void)
171 {
172 // nothing
173 }
174
175
176 static inline bool
pmap_md_tlb_check_entry(void * ctx,vaddr_t va,tlb_asid_t asid,pt_entry_t pte)177 pmap_md_tlb_check_entry(void *ctx, vaddr_t va, tlb_asid_t asid, pt_entry_t pte)
178 {
179 // TLB not walked and so not called.
180 return false;
181 }
182
183
184 static inline bool
pmap_md_virtual_cache_aliasing_p(void)185 pmap_md_virtual_cache_aliasing_p(void)
186 {
187 return false;
188 }
189
190
191 static inline vsize_t
pmap_md_cache_prefer_mask(void)192 pmap_md_cache_prefer_mask(void)
193 {
194 return 0;
195 }
196
197
198 static inline pt_entry_t *
pmap_md_nptep(pt_entry_t * ptep)199 pmap_md_nptep(pt_entry_t *ptep)
200 {
201
202 return ptep + 1;
203 }
204
205
206 static __inline paddr_t
pte_to_paddr(pt_entry_t pte)207 pte_to_paddr(pt_entry_t pte)
208 {
209
210 return l3pte_pa(pte);
211 }
212
213
214 static inline bool
pte_valid_p(pt_entry_t pte)215 pte_valid_p(pt_entry_t pte)
216 {
217
218 return l3pte_valid(pte);
219 }
220
221
222 static inline void
pmap_md_clean_page(struct vm_page_md * md,bool is_src)223 pmap_md_clean_page(struct vm_page_md *md, bool is_src)
224 {
225 }
226
227
228 static inline bool
pte_modified_p(pt_entry_t pte)229 pte_modified_p(pt_entry_t pte)
230 {
231
232 return (pte & LX_BLKPAG_OS_MODIFIED) != 0;
233 }
234
235
236 static inline bool
pte_wired_p(pt_entry_t pte)237 pte_wired_p(pt_entry_t pte)
238 {
239
240 return (pte & LX_BLKPAG_OS_WIRED) != 0;
241 }
242
243
244 static inline pt_entry_t
pte_wire_entry(pt_entry_t pte)245 pte_wire_entry(pt_entry_t pte)
246 {
247
248 return pte | LX_BLKPAG_OS_WIRED;
249 }
250
251
252 static inline pt_entry_t
pte_unwire_entry(pt_entry_t pte)253 pte_unwire_entry(pt_entry_t pte)
254 {
255
256 return pte & ~LX_BLKPAG_OS_WIRED;
257 }
258
259
260 static inline uint64_t
pte_value(pt_entry_t pte)261 pte_value(pt_entry_t pte)
262 {
263
264 return pte;
265 }
266
267 static inline bool
pte_cached_p(pt_entry_t pte)268 pte_cached_p(pt_entry_t pte)
269 {
270
271 return ((pte & LX_BLKPAG_ATTR_MASK) == LX_BLKPAG_ATTR_NORMAL_WB);
272 }
273
274 static inline bool
pte_deferred_exec_p(pt_entry_t pte)275 pte_deferred_exec_p(pt_entry_t pte)
276 {
277
278 return false;
279 }
280
281 static inline pt_entry_t
pte_nv_entry(bool kernel_p)282 pte_nv_entry(bool kernel_p)
283 {
284
285 /* Not valid entry */
286 return kernel_p ? 0 : 0;
287 }
288
289 static inline pt_entry_t
pte_prot_downgrade(pt_entry_t pte,vm_prot_t prot)290 pte_prot_downgrade(pt_entry_t pte, vm_prot_t prot)
291 {
292
293 return (pte & ~LX_BLKPAG_AP)
294 | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
295 }
296
297 static inline pt_entry_t
pte_prot_nowrite(pt_entry_t pte)298 pte_prot_nowrite(pt_entry_t pte)
299 {
300
301 return pte & ~LX_BLKPAG_AF;
302 }
303
304 static inline pt_entry_t
pte_cached_change(pt_entry_t pte,bool cached)305 pte_cached_change(pt_entry_t pte, bool cached)
306 {
307 pte &= ~LX_BLKPAG_ATTR_MASK;
308 pte |= (cached ? LX_BLKPAG_ATTR_NORMAL_WB : LX_BLKPAG_ATTR_NORMAL_NC);
309
310 return pte;
311 }
312
313 static inline void
pte_set(pt_entry_t * ptep,pt_entry_t pte)314 pte_set(pt_entry_t *ptep, pt_entry_t pte)
315 {
316
317 *ptep = pte;
318 dsb(ishst);
319 /*
320 * if this mapping is going to be used by userland then the eret *can*
321 * act as the isb, but might not (apple m1).
322 *
323 * if this mapping is kernel then the isb is always needed (for some
324 * micro-architectures)
325 */
326
327 isb();
328 }
329
330 static inline pd_entry_t
pte_invalid_pde(void)331 pte_invalid_pde(void)
332 {
333
334 return 0;
335 }
336
337
338 static inline pd_entry_t
pte_pde_pdetab(paddr_t pa,bool kernel_p)339 pte_pde_pdetab(paddr_t pa, bool kernel_p)
340 {
341
342 return LX_VALID | LX_TYPE_TBL | (kernel_p ? 0 : LX_BLKPAG_NG) | pa;
343 }
344
345
346 static inline pd_entry_t
pte_pde_ptpage(paddr_t pa,bool kernel_p)347 pte_pde_ptpage(paddr_t pa, bool kernel_p)
348 {
349
350 return LX_VALID | LX_TYPE_TBL | (kernel_p ? 0 : LX_BLKPAG_NG) | pa;
351 }
352
353
354 static inline bool
pte_pde_valid_p(pd_entry_t pde)355 pte_pde_valid_p(pd_entry_t pde)
356 {
357
358 return lxpde_valid(pde);
359 }
360
361
362 static inline paddr_t
pte_pde_to_paddr(pd_entry_t pde)363 pte_pde_to_paddr(pd_entry_t pde)
364 {
365
366 return lxpde_pa(pde);
367 }
368
369
370 static inline pd_entry_t
pte_pde_cas(pd_entry_t * pdep,pd_entry_t opde,pt_entry_t npde)371 pte_pde_cas(pd_entry_t *pdep, pd_entry_t opde, pt_entry_t npde)
372 {
373 #ifdef MULTIPROCESSOR
374 opde = atomic_cas_64(pdep, opde, npde);
375 dsb(ishst);
376 #else
377 *pdep = npde;
378 #endif
379 return opde;
380 }
381
382
383 static inline void
pte_pde_set(pd_entry_t * pdep,pd_entry_t npde)384 pte_pde_set(pd_entry_t *pdep, pd_entry_t npde)
385 {
386
387 *pdep = npde;
388 }
389
390
391 static inline pt_entry_t
pte_memattr(u_int flags)392 pte_memattr(u_int flags)
393 {
394
395 switch (flags & (PMAP_DEV_MASK | PMAP_CACHE_MASK)) {
396 case PMAP_DEV_NP ... PMAP_DEV_NP | PMAP_CACHE_MASK:
397 /* Device-nGnRnE */
398 return LX_BLKPAG_ATTR_DEVICE_MEM_NP;
399 case PMAP_DEV ... PMAP_DEV | PMAP_CACHE_MASK:
400 /* Device-nGnRE */
401 return LX_BLKPAG_ATTR_DEVICE_MEM;
402 case PMAP_NOCACHE:
403 case PMAP_NOCACHE_OVR:
404 case PMAP_WRITE_COMBINE:
405 /* only no-cache */
406 return LX_BLKPAG_ATTR_NORMAL_NC;
407 case PMAP_WRITE_BACK:
408 case 0:
409 default:
410 return LX_BLKPAG_ATTR_NORMAL_WB;
411 }
412 }
413
414
415 static inline pt_entry_t
pte_make_kenter_pa(paddr_t pa,struct vm_page_md * mdpg,vm_prot_t prot,u_int flags)416 pte_make_kenter_pa(paddr_t pa, struct vm_page_md *mdpg, vm_prot_t prot,
417 u_int flags)
418 {
419 KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
420
421 pt_entry_t pte = pa
422 | LX_VALID
423 #ifdef MULTIPROCESSOR
424 | LX_BLKPAG_SH_IS
425 #endif
426 | L3_TYPE_PAG
427 | LX_BLKPAG_AF
428 | LX_BLKPAG_UXN | LX_BLKPAG_PXN
429 | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW)
430 | LX_BLKPAG_OS_WIRED;
431
432 if (prot & VM_PROT_EXECUTE)
433 pte &= ~LX_BLKPAG_PXN;
434
435 pte &= ~LX_BLKPAG_ATTR_MASK;
436 pte |= pte_memattr(flags);
437
438 return pte;
439 }
440
441
442 static inline pt_entry_t
pte_make_enter_efirt(paddr_t pa,vm_prot_t prot,u_int flags)443 pte_make_enter_efirt(paddr_t pa, vm_prot_t prot, u_int flags)
444 {
445 KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
446
447 pt_entry_t npte = pa
448 | LX_VALID
449 #ifdef MULTIPROCESSOR
450 | LX_BLKPAG_SH_IS
451 #endif
452 | L3_TYPE_PAG
453 | LX_BLKPAG_AF
454 | LX_BLKPAG_NG /* | LX_BLKPAG_APUSER */
455 | LX_BLKPAG_UXN | LX_BLKPAG_PXN
456 | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
457
458 if (prot & VM_PROT_EXECUTE)
459 npte &= ~LX_BLKPAG_PXN;
460
461 npte &= ~LX_BLKPAG_ATTR_MASK;
462 npte |= pte_memattr(flags);
463
464 return npte;
465 }
466
467
468 static inline pt_entry_t
pte_make_enter(paddr_t pa,const struct vm_page_md * mdpg,vm_prot_t prot,u_int flags,bool is_kernel_pmap_p)469 pte_make_enter(paddr_t pa, const struct vm_page_md *mdpg, vm_prot_t prot,
470 u_int flags, bool is_kernel_pmap_p)
471 {
472 KASSERTMSG((pa & ~L3_PAG_OA) == 0, "pa %" PRIxPADDR, pa);
473
474 pt_entry_t npte = pa
475 | LX_VALID
476 #ifdef MULTIPROCESSOR
477 | LX_BLKPAG_SH_IS
478 #endif
479 | L3_TYPE_PAG
480 | LX_BLKPAG_UXN | LX_BLKPAG_PXN
481 | (((prot) & (VM_PROT_READ | VM_PROT_WRITE)) == VM_PROT_READ ? LX_BLKPAG_AP_RO : LX_BLKPAG_AP_RW);
482
483 if ((prot & VM_PROT_WRITE) != 0 &&
484 ((flags & VM_PROT_WRITE) != 0 || VM_PAGEMD_MODIFIED_P(mdpg))) {
485 /*
486 * This is a writable mapping, and the page's mod state
487 * indicates it has already been modified. No need for
488 * modified emulation.
489 */
490 npte |= LX_BLKPAG_AF;
491 } else if ((flags & VM_PROT_ALL) || VM_PAGEMD_REFERENCED_P(mdpg)) {
492 /*
493 * - The access type indicates that we don't need to do
494 * referenced emulation.
495 * OR
496 * - The physical page has already been referenced so no need
497 * to re-do referenced emulation here.
498 */
499 npte |= LX_BLKPAG_AF;
500 }
501
502 if (prot & VM_PROT_EXECUTE)
503 npte &= (is_kernel_pmap_p ? ~LX_BLKPAG_PXN : ~LX_BLKPAG_UXN);
504
505 npte &= ~LX_BLKPAG_ATTR_MASK;
506 npte |= pte_memattr(flags);
507
508 /*
509 * Make sure userland mappings get the right permissions
510 */
511 if (!is_kernel_pmap_p) {
512 npte |= LX_BLKPAG_NG | LX_BLKPAG_APUSER;
513 }
514
515 return npte;
516 }
517 #endif /* __PMAP_PRIVATE */
518
519 #endif /* _AARCH64_PMAP_MACHDEP_H_ */
520
521