xref: /netbsd-src/sys/arch/aarch64/aarch64/pmap_machdep.c (revision 6d5cf3578aa4e1fd7879b944aa0477d842bf6c35)
1 /*	$NetBSD: pmap_machdep.c,v 1.6 2023/04/20 08:28:02 skrll Exp $	*/
2 
3 /*-
4  * Copyright (c) 2022 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Nick Hudson
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include "opt_arm_debug.h"
33 #include "opt_efi.h"
34 #include "opt_multiprocessor.h"
35 #include "opt_uvmhist.h"
36 
37 #define __PMAP_PRIVATE
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: pmap_machdep.c,v 1.6 2023/04/20 08:28:02 skrll Exp $");
41 
42 #include <sys/param.h>
43 #include <sys/types.h>
44 
45 #include <sys/buf.h>
46 #include <sys/cpu.h>
47 #include <sys/kernel.h>
48 
49 #include <uvm/uvm.h>
50 #include <uvm/uvm_page.h>
51 #include <uvm/pmap/pmap_pvt.h>
52 
53 #include <aarch64/cpufunc.h>
54 
55 #include <arm/locore.h>
56 
57 #ifdef VERBOSE_INIT_ARM
58 #define VPRINTF(...)	printf(__VA_ARGS__)
59 #else
60 #define VPRINTF(...)	__nothing
61 #endif
62 
63 /* Set to LX_BLKPAG_GP if supported. */
64 uint64_t pmap_attr_gp = 0;
65 
66 /*
67  * Misc variables
68  */
69 vaddr_t virtual_avail;
70 vaddr_t virtual_end;
71 
72 paddr_t
vtophys(vaddr_t va)73 vtophys(vaddr_t va)
74 {
75 	paddr_t pa;
76 
77 	if (pmap_extract(pmap_kernel(), va, &pa) == false)
78 		return 0;
79 	return pa;
80 }
81 
82 bool
pmap_extract_coherency(pmap_t pm,vaddr_t va,paddr_t * pap,bool * coherentp)83 pmap_extract_coherency(pmap_t pm, vaddr_t va, paddr_t *pap, bool *coherentp)
84 {
85 	paddr_t pa;
86 	bool coherency = false;
87 
88 	if (pm == pmap_kernel()) {
89 		if (pmap_md_direct_mapped_vaddr_p(va)) {
90 			pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
91 			goto done;
92 		}
93 		if (pmap_md_io_vaddr_p(va))
94 			panic("pmap_extract: io address %#"PRIxVADDR"", va);
95 
96 		if (va >= pmap_limits.virtual_end)
97 			panic("%s: illegal kernel mapped address %#"PRIxVADDR,
98 			    __func__, va);
99 	}
100 
101 	kpreempt_disable();
102 	const pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
103 	pt_entry_t pte;
104 
105 	if (ptep == NULL || !pte_valid_p(pte = *ptep)) {
106 		kpreempt_enable();
107 		return false;
108 	}
109 	kpreempt_enable();
110 
111 	pa = pte_to_paddr(pte) | (va & PGOFSET);
112 
113 	switch (pte & LX_BLKPAG_ATTR_MASK) {
114 	case LX_BLKPAG_ATTR_NORMAL_NC:
115 	case LX_BLKPAG_ATTR_DEVICE_MEM:
116 	case LX_BLKPAG_ATTR_DEVICE_MEM_NP:
117 		coherency = true;
118 		break;
119 	}
120 
121  done:
122 	if (pap != NULL) {
123 		*pap = pa;
124 	}
125 	if (coherentp != NULL) {
126 		*coherentp = coherency;
127 	}
128 	return true;
129 }
130 
131 
132 bool
pmap_fault_fixup(pmap_t pm,vaddr_t va,vm_prot_t ftype,bool user)133 pmap_fault_fixup(pmap_t pm, vaddr_t va, vm_prot_t ftype, bool user)
134 {
135 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
136 
137 	KASSERT(!user || (pm != pmap_kernel()));
138 
139 	kpreempt_disable();
140 
141 	UVMHIST_LOG(pmaphist, " pm=%#jx, va=%#jx, ftype=%#jx, user=%jd",
142 	    (uintptr_t)pm, va, ftype, user);
143 	UVMHIST_LOG(pmaphist, " ti=%#jx pai=%#jx asid=%#jx",
144 	    (uintptr_t)cpu_tlb_info(curcpu()),
145 	    (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu())),
146 	    (uintptr_t)PMAP_PAI(pm, cpu_tlb_info(curcpu()))->pai_asid, 0);
147 
148 	bool fixed = false;
149 	pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
150 	if (ptep == NULL) {
151 		UVMHIST_LOG(pmaphist, "... no ptep", 0, 0, 0, 0);
152 		goto done;
153 	}
154 
155 	const pt_entry_t opte = *ptep;
156 	if (!l3pte_valid(opte)) {
157 		UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
158 		    opte, va, 0, 0);
159 		goto done;
160 	}
161 
162 	const paddr_t pa = l3pte_pa(opte);
163 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
164 	if (pg == NULL) {
165 		UVMHIST_LOG(pmaphist, "pg not found: va=%016lx", va, 0, 0, 0);
166 		goto done;
167 	}
168 
169 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
170 	UVMHIST_LOG(pmaphist, " pg=%#jx, opte=%#jx, ptep=%#jx", (uintptr_t)pg,
171 	    opte, (uintptr_t)ptep, 0);
172 
173 	if ((ftype & VM_PROT_WRITE) && (opte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RW) {
174 		/*
175 		 * This looks like a good candidate for "page modified"
176 		 * emulation...
177 		 */
178 		pmap_page_set_attributes(mdpg, VM_PAGEMD_MODIFIED | VM_PAGEMD_REFERENCED);
179 
180 		/*
181 		 * Enable write permissions for the page by setting the Access Flag.
182 		 */
183 		// XXXNH LX_BLKPAG_OS_0?
184 		const pt_entry_t npte = opte | LX_BLKPAG_AF | LX_BLKPAG_OS_0;
185 		atomic_swap_64(ptep, npte);
186 		dsb(ishst);
187 		fixed = true;
188 
189 		UVMHIST_LOG(pmaphist, " <-- done (mod emul: changed pte "
190 		    "from %#jx to %#jx)", opte, npte, 0, 0);
191 	} else if ((ftype & VM_PROT_READ) && (opte & LX_BLKPAG_AP) == LX_BLKPAG_AP_RO) {
192 		/*
193 		 * This looks like a good candidate for "page referenced"
194 		 * emulation.
195 		 */
196 
197 		pmap_page_set_attributes(mdpg, VM_PAGEMD_REFERENCED);
198 
199 		/*
200 		 * Enable write permissions for the page by setting the Access Flag.
201 		 */
202 		const pt_entry_t npte = opte | LX_BLKPAG_AF;
203 		atomic_swap_64(ptep, npte);
204 		dsb(ishst);
205 		fixed = true;
206 
207 		UVMHIST_LOG(pmaphist, " <-- done (ref emul: changed pte "
208 		    "from %#jx to %#jx)", opte, npte, 0, 0);
209 	}
210 
211 done:
212 	kpreempt_enable();
213 
214 	return fixed;
215 }
216 
217 
218 void
pmap_icache_sync_range(pmap_t pm,vaddr_t sva,vaddr_t eva)219 pmap_icache_sync_range(pmap_t pm, vaddr_t sva, vaddr_t eva)
220 {
221 	UVMHIST_FUNC(__func__);
222 	UVMHIST_CALLARGS(pmaphist, "pm %#jx sva %#jx eva %#jx",
223 	   (uintptr_t)pm, sva, eva, 0);
224 
225 	KASSERT((sva & PAGE_MASK) == 0);
226 	KASSERT((eva & PAGE_MASK) == 0);
227 
228 	pmap_lock(pm);
229 
230 	for (vaddr_t va = sva; va < eva; va += PAGE_SIZE) {
231 		pt_entry_t * const ptep = pmap_pte_lookup(pm, va);
232 		if (ptep == NULL)
233 			continue;
234 
235 		pt_entry_t opte = *ptep;
236 		if (!l3pte_valid(opte)) {
237 			UVMHIST_LOG(pmaphist, "invalid pte: %016llx: va=%016lx",
238 			    opte, va, 0, 0);
239 			goto done;
240 		}
241 
242 		if (l3pte_readable(opte)) {
243 			cpu_icache_sync_range(va, PAGE_SIZE);
244 		} else {
245 			/*
246 			 * change to accessible temporarily
247 			 * to do cpu_icache_sync_range()
248 			 */
249 			struct pmap_asid_info * const pai = PMAP_PAI(pm,
250 			    cpu_tlb_info(ci));
251 
252 			atomic_swap_64(ptep, opte | LX_BLKPAG_AF);
253 			// tlb_invalidate_addr does the dsb(ishst);
254 			tlb_invalidate_addr(pai->pai_asid, va);
255 			cpu_icache_sync_range(va, PAGE_SIZE);
256 			atomic_swap_64(ptep, opte);
257 			tlb_invalidate_addr(pai->pai_asid, va);
258 		}
259 	}
260 done:
261 	pmap_unlock(pm);
262 }
263 
264 
265 struct vm_page *
pmap_md_alloc_poolpage(int flags)266 pmap_md_alloc_poolpage(int flags)
267 {
268 
269 	/*
270 	 * Any managed page works for us.
271 	 */
272 	return uvm_pagealloc(NULL, 0, NULL, flags);
273 }
274 
275 
276 vaddr_t
pmap_md_map_poolpage(paddr_t pa,size_t len)277 pmap_md_map_poolpage(paddr_t pa, size_t len)
278 {
279 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
280 	const vaddr_t va = pmap_md_direct_map_paddr(pa);
281 	KASSERT(cold || pg != NULL);
282 
283 	if (pg != NULL) {
284 		struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
285 		const pv_entry_t pv = &mdpg->mdpg_first;
286 		const vaddr_t last_va = trunc_page(pv->pv_va);
287 
288 		KASSERT(len == PAGE_SIZE || last_va == pa);
289 		KASSERT(pv->pv_pmap == NULL);
290 		KASSERT(pv->pv_next == NULL);
291 		KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
292 
293 		pv->pv_va = va;
294 	}
295 
296 	return va;
297 }
298 
299 
300 paddr_t
pmap_md_unmap_poolpage(vaddr_t va,size_t len)301 pmap_md_unmap_poolpage(vaddr_t va, size_t len)
302 {
303 	KASSERT(len == PAGE_SIZE);
304 	KASSERT(pmap_md_direct_mapped_vaddr_p(va));
305 
306 	const paddr_t pa = pmap_md_direct_mapped_vaddr_to_paddr(va);
307 	struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
308 
309 	KASSERT(pg);
310 	struct vm_page_md * const mdpg = VM_PAGE_TO_MD(pg);
311 
312 	KASSERT(!VM_PAGEMD_EXECPAGE_P(mdpg));
313 
314 	const pv_entry_t pv = &mdpg->mdpg_first;
315 
316 	/* Note last mapped address for future color check */
317 	pv->pv_va = va;
318 
319 	KASSERT(pv->pv_pmap == NULL);
320 	KASSERT(pv->pv_next == NULL);
321 
322 	return pa;
323 }
324 
325 
326 bool
pmap_md_direct_mapped_vaddr_p(vaddr_t va)327 pmap_md_direct_mapped_vaddr_p(vaddr_t va)
328 {
329 
330 	if (!AARCH64_KVA_P(va))
331 		return false;
332 
333 	paddr_t pa = AARCH64_KVA_TO_PA(va);
334 	if (physical_start <= pa && pa < physical_end)
335 		return true;
336 
337 	return false;
338 }
339 
340 
341 paddr_t
pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)342 pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
343 {
344 
345 	return AARCH64_KVA_TO_PA(va);
346 }
347 
348 
349 vaddr_t
pmap_md_direct_map_paddr(paddr_t pa)350 pmap_md_direct_map_paddr(paddr_t pa)
351 {
352 
353 	return AARCH64_PA_TO_KVA(pa);
354 }
355 
356 
357 bool
pmap_md_io_vaddr_p(vaddr_t va)358 pmap_md_io_vaddr_p(vaddr_t va)
359 {
360 
361 	if (pmap_devmap_find_va(va, PAGE_SIZE)) {
362 		return true;
363 	}
364 	return false;
365 }
366 
367 
368 static void
pmap_md_grow(pmap_pdetab_t * ptb,vaddr_t va,vsize_t vshift,vsize_t * remaining)369 pmap_md_grow(pmap_pdetab_t *ptb, vaddr_t va, vsize_t vshift,
370     vsize_t *remaining)
371 {
372 	KASSERT((va & (NBSEG - 1)) == 0);
373 	const vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
374 	const vsize_t vinc = 1UL << vshift;
375 
376 	for (size_t i = (va >> vshift) & pdetab_mask;
377 	    i < PMAP_PDETABSIZE; i++, va += vinc) {
378 		pd_entry_t * const pde_p =
379 		    &ptb->pde_pde[(va >> vshift) & pdetab_mask];
380 
381 		vaddr_t pdeva;
382 		if (pte_pde_valid_p(*pde_p)) {
383 			const paddr_t pa = pte_pde_to_paddr(*pde_p);
384 			pdeva = pmap_md_direct_map_paddr(pa);
385 		} else {
386 			/*
387 			 * uvm_pageboot_alloc() returns a direct mapped address
388 			 */
389 			pdeva = uvm_pageboot_alloc(Ln_TABLE_SIZE);
390 			paddr_t pdepa = AARCH64_KVA_TO_PA(pdeva);
391 			*pde_p = pte_pde_pdetab(pdepa, true);
392 			memset((void *)pdeva, 0, PAGE_SIZE);
393 		}
394 
395 		if (vshift > SEGSHIFT) {
396 			pmap_md_grow((pmap_pdetab_t *)pdeva, va,
397 			    vshift - SEGLENGTH, remaining);
398 		} else {
399 			if (*remaining > vinc)
400 				*remaining -= vinc;
401 			else
402 				*remaining = 0;
403 		}
404 		if (*remaining == 0)
405 			return;
406 	}
407 }
408 
409 
410 void
pmap_bootstrap(vaddr_t vstart,vaddr_t vend)411 pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
412 {
413 	pmap_t pm = pmap_kernel();
414 
415 	/*
416 	 * Initialise the kernel pmap object
417 	 */
418 	curcpu()->ci_pmap_cur = pm;
419 
420 	virtual_avail = vstart;
421 	virtual_end = vend;
422 
423 	aarch64_tlbi_all();
424 
425 	pm->pm_l0_pa = __SHIFTOUT(reg_ttbr1_el1_read(), TTBR_BADDR);
426 	pm->pm_pdetab = (pmap_pdetab_t *)AARCH64_PA_TO_KVA(pm->pm_l0_pa);
427 
428 	VPRINTF("common ");
429 	pmap_bootstrap_common();
430 
431 	VPRINTF("tlb0 ");
432 	pmap_tlb_info_init(&pmap_tlb0_info);
433 
434 #ifdef MULTIPROCESSOR
435 	VPRINTF("kcpusets ");
436 
437 	kcpuset_create(&pm->pm_onproc, true);
438 	kcpuset_create(&pm->pm_active, true);
439 	KASSERT(pm->pm_onproc != NULL);
440 	KASSERT(pm->pm_active != NULL);
441 	kcpuset_set(pm->pm_onproc, cpu_number());
442 	kcpuset_set(pm->pm_active, cpu_number());
443 #endif
444 
445 	VPRINTF("nkmempages ");
446 	/*
447 	 * Compute the number of pages kmem_arena will have.  This will also
448 	 * be called by uvm_km_bootstrap later, but that doesn't matter
449 	 */
450 	kmeminit_nkmempages();
451 
452 	/* Get size of buffer cache and set an upper limit */
453 	buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
454 	vsize_t bufsz = buf_memcalc();
455 	buf_setvalimit(bufsz);
456 
457 	vsize_t kvmsize = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
458 	    bufsz + 16 * NCARGS + pager_map_size) +
459 	    /*(maxproc * UPAGES) + */nkmempages * NBPG;
460 
461 #ifdef SYSVSHM
462 	kvmsize += shminfo.shmall;
463 #endif
464 
465 	/* Calculate VA address space and roundup to NBSEG tables */
466 	kvmsize = roundup(kvmsize, NBSEG);
467 
468 	/*
469 	 * Initialize `FYI' variables.	Note we're relying on
470 	 * the fact that BSEARCH sorts the vm_physmem[] array
471 	 * for us.  Must do this before uvm_pageboot_alloc()
472 	 * can be called.
473 	 */
474 	pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
475 	pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
476 
477 	/*
478 	 * Update the naive settings in pmap_limits to the actual KVA range.
479 	 */
480 	pmap_limits.virtual_start = vstart;
481 	pmap_limits.virtual_end = vend;
482 
483 	VPRINTF("\nlimits: %" PRIxVADDR " - %" PRIxVADDR "\n", vstart, vend);
484 
485 	const vaddr_t kvmstart = vstart;
486 	pmap_curmaxkvaddr = vstart + kvmsize;
487 
488 	VPRINTF("kva   : %" PRIxVADDR " - %" PRIxVADDR "\n", kvmstart,
489 	    pmap_curmaxkvaddr);
490 
491 	pmap_md_grow(pmap_kernel()->pm_pdetab, kvmstart, XSEGSHIFT, &kvmsize);
492 
493 #if defined(EFI_RUNTIME)
494 	vaddr_t efi_l0va = uvm_pageboot_alloc(Ln_TABLE_SIZE);
495 	KASSERT((efi_l0va & PAGE_MASK) == 0);
496 
497 	pmap_t efipm = pmap_efirt();
498 	efipm->pm_l0_pa = AARCH64_KVA_TO_PA(efi_l0va);
499 	efipm->pm_pdetab = (pmap_pdetab_t *)efi_l0va;
500 
501 #endif
502 
503 	pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
504 	    &pool_allocator_nointr, IPL_NONE);
505 
506 	pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
507 #ifdef KASAN
508 	    NULL,
509 #else
510 	    &pmap_pv_page_allocator,
511 #endif
512 	    IPL_NONE);
513 
514 	// arm_dcache_align
515 	pmap_pvlist_lock_init(CACHE_LINE_SIZE);
516 
517 	VPRINTF("done\n");
518 }
519 
520 
521 void
pmap_md_xtab_activate(pmap_t pm,struct lwp * l)522 pmap_md_xtab_activate(pmap_t pm, struct lwp *l)
523 {
524 	UVMHIST_FUNC(__func__);
525 	UVMHIST_CALLARGS(pmaphist, " (pm=%#jx l=%#jx)", (uintptr_t)pm, (uintptr_t)l, 0, 0);
526 
527 	KASSERT(kpreempt_disabled());
528 
529 	/*
530 	 * Assume that TTBR1 has only global mappings and TTBR0 only
531 	 * has non-global mappings.  To prevent speculation from doing
532 	 * evil things we disable translation table walks using TTBR0
533 	 * before setting the CONTEXTIDR (ASID) or new TTBR0 value.
534 	 * Once both are set, table walks are reenabled.
535 	 */
536 
537 	const uint64_t old_tcrel1 = reg_tcr_el1_read();
538 	reg_tcr_el1_write(old_tcrel1 | TCR_EPD0);
539 	isb();
540 
541 	struct cpu_info * const ci = curcpu();
542 	struct pmap_asid_info * const pai = PMAP_PAI(pm, cpu_tlb_info(ci));
543 
544 	const uint64_t ttbr =
545 	    __SHIFTIN(pai->pai_asid, TTBR_ASID) |
546 	    __SHIFTIN(pm->pm_l0_pa, TTBR_BADDR);
547 
548 	cpu_set_ttbr0(ttbr);
549 
550 	if (pm != pmap_kernel()) {
551 		reg_tcr_el1_write(old_tcrel1 & ~TCR_EPD0);
552 	}
553 
554 	UVMHIST_LOG(maphist, " pm %#jx pm->pm_l0 %016jx pm->pm_l0_pa %016jx asid %ju... done",
555 	    (uintptr_t)pm, (uintptr_t)pm->pm_pdetab, (uintptr_t)pm->pm_l0_pa,
556 	    (uintptr_t)pai->pai_asid);
557 
558 	KASSERTMSG(ci->ci_pmap_asid_cur == pai->pai_asid, "%u vs %u",
559 	    ci->ci_pmap_asid_cur, pai->pai_asid);
560 	ci->ci_pmap_cur = pm;
561 }
562 
563 
564 void
pmap_md_xtab_deactivate(pmap_t pm)565 pmap_md_xtab_deactivate(pmap_t pm)
566 {
567 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
568 
569 	KASSERT(kpreempt_disabled());
570 
571 	struct cpu_info * const ci = curcpu();
572 	/*
573 	 * Disable translation table walks from TTBR0 while no pmap has been
574 	 * activated.
575 	 */
576 	const uint64_t old_tcrel1 = reg_tcr_el1_read();
577 	reg_tcr_el1_write(old_tcrel1 | TCR_EPD0);
578 	isb();
579 
580 	cpu_set_ttbr0(0);
581 
582 	ci->ci_pmap_cur = pmap_kernel();
583 	KASSERTMSG(ci->ci_pmap_asid_cur == KERNEL_PID, "ci_pmap_asid_cur %u",
584 	    ci->ci_pmap_asid_cur);
585 }
586 
587 
588 #if defined(EFI_RUNTIME)
589 void
pmap_md_activate_efirt(void)590 pmap_md_activate_efirt(void)
591 {
592 	kpreempt_disable();
593 
594 	pmap_md_xtab_activate(pmap_efirt(), NULL);
595 }
596 void
pmap_md_deactivate_efirt(void)597 pmap_md_deactivate_efirt(void)
598 {
599 	pmap_md_xtab_deactivate(pmap_efirt());
600 
601 	kpreempt_enable();
602 }
603 #endif
604 
605 
606 void
pmap_md_pdetab_init(struct pmap * pm)607 pmap_md_pdetab_init(struct pmap *pm)
608 {
609 
610 	KASSERT(pm != NULL);
611 
612 	pmap_extract(pmap_kernel(), (vaddr_t)pm->pm_pdetab, &pm->pm_l0_pa);
613 }
614 
615 void
pmap_md_pdetab_fini(struct pmap * pm)616 pmap_md_pdetab_fini(struct pmap *pm)
617 {
618 
619 	KASSERT(pm != NULL);
620 }
621 
622 
623 void
pmap_md_page_syncicache(struct vm_page_md * mdpg,const kcpuset_t * onproc)624 pmap_md_page_syncicache(struct vm_page_md *mdpg, const kcpuset_t *onproc)
625 {
626 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pmaphist);
627 
628 	//XXXNH
629 }
630 
631 
632 bool
pmap_md_ok_to_steal_p(const uvm_physseg_t bank,size_t npgs)633 pmap_md_ok_to_steal_p(const uvm_physseg_t bank, size_t npgs)
634 {
635 
636 	return true;
637 }
638 
639 
640 pd_entry_t *
pmap_l0table(struct pmap * pm)641 pmap_l0table(struct pmap *pm)
642 {
643 
644 	return pm->pm_pdetab->pde_pde;
645 }
646 
647 
648 #define	L1_BLK_MAPPABLE_P(va, pa, size)					\
649     ((((va) | (pa)) & L1_OFFSET) == 0 && (size) >= L1_SIZE)
650 
651 #define	L2_BLK_MAPPABLE_P(va, pa, size)					\
652     ((((va) | (pa)) & L2_OFFSET) == 0 && (size) >= L2_SIZE)
653 
654 
655 vsize_t
pmap_kenter_range(vaddr_t va,paddr_t pa,vsize_t size,vm_prot_t prot,u_int flags)656 pmap_kenter_range(vaddr_t va, paddr_t pa, vsize_t size,
657     vm_prot_t prot, u_int flags)
658 {
659 	pt_entry_t attr;
660 	psize_t blocksize;
661 
662 	vsize_t resid = round_page(size);
663 	vsize_t mapped = 0;
664 
665 	while (resid > 0) {
666 		if (L1_BLK_MAPPABLE_P(va, pa, resid)) {
667 			blocksize = L1_SIZE;
668 			attr = L1_BLOCK;
669 		} else if (L2_BLK_MAPPABLE_P(va, pa, resid)) {
670 			blocksize = L2_SIZE;
671 			attr = L2_BLOCK;
672 		} else {
673 			blocksize = L3_SIZE;
674 			attr = L3_PAGE;
675 		}
676 
677 		pt_entry_t pte = pte_make_kenter_pa(pa, NULL, prot, flags);
678 		pte &= ~LX_TYPE;
679 		attr |= pte;
680 
681 		pmapboot_enter(va, pa, blocksize, blocksize, attr, NULL);
682 
683 		va += blocksize;
684 		pa += blocksize;
685 		resid -= blocksize;
686 		mapped += blocksize;
687 	}
688 
689 	return mapped;
690 }
691 
692 #ifdef MULTIPROCESSOR
693 void
pmap_md_tlb_info_attach(struct pmap_tlb_info * ti,struct cpu_info * ci)694 pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
695 {
696 	/* nothing */
697 }
698 #endif /* MULTIPROCESSOR */
699