1*686b1ff1Sskrll /* $NetBSD: pmap_machdep.c,v 1.20 2024/01/01 17:18:02 skrll Exp $ */
24297c647Smaxv
34297c647Smaxv /*
4e0a18223Sskrll * Copyright (c) 2014, 2019, 2021 The NetBSD Foundation, Inc.
59687a165Smatt * All rights reserved.
69687a165Smatt *
79687a165Smatt * This code is derived from software contributed to The NetBSD Foundation
8e0a18223Sskrll * by Matt Thomas (of 3am Software Foundry), Maxime Villard, and
9e0a18223Sskrll * Nick Hudson.
109687a165Smatt *
119687a165Smatt * Redistribution and use in source and binary forms, with or without
129687a165Smatt * modification, are permitted provided that the following conditions
139687a165Smatt * are met:
149687a165Smatt * 1. Redistributions of source code must retain the above copyright
159687a165Smatt * notice, this list of conditions and the following disclaimer.
169687a165Smatt * 2. Redistributions in binary form must reproduce the above copyright
179687a165Smatt * notice, this list of conditions and the following disclaimer in the
189687a165Smatt * documentation and/or other materials provided with the distribution.
199687a165Smatt *
209687a165Smatt * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
219687a165Smatt * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
229687a165Smatt * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
239687a165Smatt * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
249687a165Smatt * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
259687a165Smatt * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
269687a165Smatt * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
279687a165Smatt * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
289687a165Smatt * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
299687a165Smatt * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
309687a165Smatt * POSSIBILITY OF SUCH DAMAGE.
319687a165Smatt */
329687a165Smatt
3317949b8dSskrll #include "opt_riscv_debug.h"
3408c3a075Sskrll #include "opt_multiprocessor.h"
3517949b8dSskrll
369687a165Smatt #define __PMAP_PRIVATE
379687a165Smatt
389687a165Smatt #include <sys/cdefs.h>
39*686b1ff1Sskrll __RCSID("$NetBSD: pmap_machdep.c,v 1.20 2024/01/01 17:18:02 skrll Exp $");
409687a165Smatt
419687a165Smatt #include <sys/param.h>
4217949b8dSskrll #include <sys/buf.h>
436345bad4Sskrll #include <sys/cpu.h>
449687a165Smatt
459687a165Smatt #include <uvm/uvm.h>
469687a165Smatt
4717949b8dSskrll #include <riscv/machdep.h>
4817949b8dSskrll #include <riscv/sysreg.h>
4917949b8dSskrll
5017949b8dSskrll #ifdef VERBOSE_INIT_RISCV
5117949b8dSskrll #define VPRINTF(...) printf(__VA_ARGS__)
5217949b8dSskrll #else
5317949b8dSskrll #define VPRINTF(...) __nothing
5417949b8dSskrll #endif
559687a165Smatt
564297c647Smaxv vaddr_t pmap_direct_base __read_mostly;
574297c647Smaxv vaddr_t pmap_direct_end __read_mostly;
584297c647Smaxv
599687a165Smatt void
pmap_zero_page(paddr_t pa)609687a165Smatt pmap_zero_page(paddr_t pa)
619687a165Smatt {
627bf4397cSskrll #ifdef _LP64
634297c647Smaxv #ifdef PMAP_DIRECT_MAP
644297c647Smaxv memset((void *)PMAP_DIRECT_MAP(pa), 0, PAGE_SIZE);
659687a165Smatt #else
664297c647Smaxv #error "no direct map"
679687a165Smatt #endif
687bf4397cSskrll #else
697bf4397cSskrll KASSERT(false);
707bf4397cSskrll #endif
719687a165Smatt }
729687a165Smatt
739687a165Smatt void
pmap_copy_page(paddr_t src,paddr_t dst)749687a165Smatt pmap_copy_page(paddr_t src, paddr_t dst)
759687a165Smatt {
767bf4397cSskrll #ifdef _LP64
774297c647Smaxv #ifdef PMAP_DIRECT_MAP
784297c647Smaxv memcpy((void *)PMAP_DIRECT_MAP(dst), (const void *)PMAP_DIRECT_MAP(src),
799687a165Smatt PAGE_SIZE);
809687a165Smatt #else
814297c647Smaxv #error "no direct map"
829687a165Smatt #endif
837bf4397cSskrll #else
847bf4397cSskrll KASSERT(false);
857bf4397cSskrll #endif
869687a165Smatt }
879687a165Smatt
889687a165Smatt struct vm_page *
pmap_md_alloc_poolpage(int flags)899687a165Smatt pmap_md_alloc_poolpage(int flags)
909687a165Smatt {
919687a165Smatt
929687a165Smatt return uvm_pagealloc(NULL, 0, NULL, flags);
939687a165Smatt }
949687a165Smatt
959687a165Smatt vaddr_t
pmap_md_map_poolpage(paddr_t pa,vsize_t len)969687a165Smatt pmap_md_map_poolpage(paddr_t pa, vsize_t len)
979687a165Smatt {
9875b842b8Sskrll #ifdef _LP64
994297c647Smaxv return PMAP_DIRECT_MAP(pa);
10075b842b8Sskrll #else
10175b842b8Sskrll panic("not supported");
10275b842b8Sskrll #endif
1039687a165Smatt }
1049687a165Smatt
1059687a165Smatt void
pmap_md_unmap_poolpage(vaddr_t pa,vsize_t len)1069687a165Smatt pmap_md_unmap_poolpage(vaddr_t pa, vsize_t len)
1079687a165Smatt {
1089687a165Smatt /* nothing to do */
1099687a165Smatt }
1109687a165Smatt
11175b842b8Sskrll
1129687a165Smatt bool
pmap_md_direct_mapped_vaddr_p(vaddr_t va)1139687a165Smatt pmap_md_direct_mapped_vaddr_p(vaddr_t va)
1149687a165Smatt {
11575b842b8Sskrll #ifdef _LP64
11675b842b8Sskrll return RISCV_DIRECTMAP_P(va);
11775b842b8Sskrll #else
11875b842b8Sskrll return false;
11975b842b8Sskrll #endif
1209687a165Smatt }
1219687a165Smatt
1229687a165Smatt bool
pmap_md_io_vaddr_p(vaddr_t va)1239687a165Smatt pmap_md_io_vaddr_p(vaddr_t va)
1249687a165Smatt {
1259687a165Smatt return false;
1269687a165Smatt }
1279687a165Smatt
1289687a165Smatt paddr_t
pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)1299687a165Smatt pmap_md_direct_mapped_vaddr_to_paddr(vaddr_t va)
1309687a165Smatt {
1319687a165Smatt #ifdef _LP64
1327bf4397cSskrll #ifdef PMAP_DIRECT_MAP
1334297c647Smaxv return PMAP_DIRECT_UNMAP(va);
1344297c647Smaxv #else
13517949b8dSskrll KASSERT(false);
13617949b8dSskrll return 0;
1379687a165Smatt #endif
1387bf4397cSskrll #else
1397bf4397cSskrll KASSERT(false);
1407bf4397cSskrll return 0;
1417bf4397cSskrll #endif
1429687a165Smatt }
1439687a165Smatt
1449687a165Smatt vaddr_t
pmap_md_direct_map_paddr(paddr_t pa)1459687a165Smatt pmap_md_direct_map_paddr(paddr_t pa)
1469687a165Smatt {
14775b842b8Sskrll #ifdef _LP64
1484297c647Smaxv return PMAP_DIRECT_MAP(pa);
14975b842b8Sskrll #else
15075b842b8Sskrll panic("not supported");
15175b842b8Sskrll #endif
1529687a165Smatt }
1539687a165Smatt
1549687a165Smatt void
pmap_md_init(void)1559687a165Smatt pmap_md_init(void)
1569687a165Smatt {
1579687a165Smatt pmap_tlb_info_evcnt_attach(&pmap_tlb0_info);
1589687a165Smatt }
1599687a165Smatt
1609687a165Smatt bool
pmap_md_ok_to_steal_p(const uvm_physseg_t bank,size_t npgs)1615bd7eba2Smaxv pmap_md_ok_to_steal_p(const uvm_physseg_t bank, size_t npgs)
1625bd7eba2Smaxv {
1635bd7eba2Smaxv return true;
1645bd7eba2Smaxv }
1655bd7eba2Smaxv
16608c3a075Sskrll #ifdef MULTIPROCESSOR
16708c3a075Sskrll void
pmap_md_tlb_info_attach(struct pmap_tlb_info * ti,struct cpu_info * ci)16808c3a075Sskrll pmap_md_tlb_info_attach(struct pmap_tlb_info *ti, struct cpu_info *ci)
16908c3a075Sskrll {
17008c3a075Sskrll }
17108c3a075Sskrll #endif
17208c3a075Sskrll
1739687a165Smatt
1749687a165Smatt void
pmap_md_xtab_activate(struct pmap * pmap,struct lwp * l)175e0a18223Sskrll pmap_md_xtab_activate(struct pmap *pmap, struct lwp *l)
1769687a165Smatt {
17775b842b8Sskrll // UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
17875b842b8Sskrll
1796345bad4Sskrll // struct cpu_info * const ci = curcpu();
180*686b1ff1Sskrll struct pmap_tlb_info * const ti = cpu_tlb_info(ci);
181*686b1ff1Sskrll struct pmap_asid_info * const pai = PMAP_PAI(pmap, ti);
18217949b8dSskrll
18317949b8dSskrll uint64_t satp =
18417949b8dSskrll #ifdef _LP64
18517949b8dSskrll __SHIFTIN(SATP_MODE_SV39, SATP_MODE) |
18617949b8dSskrll #else
18717949b8dSskrll __SHIFTIN(SATP_MODE_SV32, SATP_MODE) |
18817949b8dSskrll #endif
18917949b8dSskrll __SHIFTIN(pai->pai_asid, SATP_ASID) |
19017949b8dSskrll __SHIFTIN(pmap->pm_md.md_ppn, SATP_PPN);
19117949b8dSskrll
192b2c96440Ssimonb csr_satp_write(satp);
193*686b1ff1Sskrll
194*686b1ff1Sskrll if (l && !tlbinfo_asids_p(ti)) {
195*686b1ff1Sskrll tlb_invalidate_all();
196*686b1ff1Sskrll }
1979687a165Smatt }
1989687a165Smatt
1999687a165Smatt void
pmap_md_xtab_deactivate(struct pmap * pmap)200e0a18223Sskrll pmap_md_xtab_deactivate(struct pmap *pmap)
201e0a18223Sskrll {
20217949b8dSskrll
20375b842b8Sskrll /* switch to kernel pmap */
20475b842b8Sskrll pmap_md_xtab_activate(pmap_kernel(), NULL);
205e0a18223Sskrll }
206e0a18223Sskrll
207e0a18223Sskrll void
pmap_md_pdetab_init(struct pmap * pmap)2089687a165Smatt pmap_md_pdetab_init(struct pmap *pmap)
2099687a165Smatt {
21017949b8dSskrll KASSERT(pmap != NULL);
21117949b8dSskrll
21275b842b8Sskrll const vaddr_t pdetabva = (vaddr_t)pmap->pm_pdetab;
21317949b8dSskrll const paddr_t pdetabpa = pmap_md_direct_mapped_vaddr_to_paddr(pdetabva);
21417949b8dSskrll pmap->pm_md.md_ppn = pdetabpa >> PAGE_SHIFT;
21575b842b8Sskrll
21675b842b8Sskrll /* XXXSB can we "pre-optimise" this by keeping a list of pdes to copy? */
21775b842b8Sskrll /* XXXSB for relatively normal size memory (8gb) we only need 10-20ish ptes? */
21875b842b8Sskrll /* XXXSB most (all?) of these ptes are in two consecutive ranges. */
21975b842b8Sskrll for (size_t i = NPDEPG / 2; i < NPDEPG; ++i) {
22075b842b8Sskrll /*
22175b842b8Sskrll * XXXSB where/when do new entries in pmap_kernel()->pm_pdetab
22275b842b8Sskrll * XXXSB get added to existing pmaps?
22375b842b8Sskrll *
22475b842b8Sskrll * pmap_growkernal doesn't have support for fixing up exiting
22575b842b8Sskrll * pmaps. (yet)
22675b842b8Sskrll *
22775b842b8Sskrll * Various options:
22875b842b8Sskrll *
22975b842b8Sskrll * - do the x86 thing. maintain a list of pmaps and update them
23075b842b8Sskrll * all in pmap_growkernel.
23175b842b8Sskrll * - make sure the top level entries are populated and them simply
23275b842b8Sskrll * copy "them all" here. If pmap_growkernel runs the new entries
23375b842b8Sskrll * will become visible to all pmaps.
23475b842b8Sskrll * - ...
23575b842b8Sskrll */
23675b842b8Sskrll
23775b842b8Sskrll /* XXXSB is this any faster than blindly copying all "high" entries? */
23875b842b8Sskrll pd_entry_t pde = pmap_kernel()->pm_pdetab->pde_pde[i];
23975b842b8Sskrll
24075b842b8Sskrll /* we might have leaf entries (direct map) as well as non-leaf */
24175b842b8Sskrll if (pde) {
24275b842b8Sskrll pmap->pm_pdetab->pde_pde[i] = pde;
24375b842b8Sskrll }
24475b842b8Sskrll }
24517949b8dSskrll }
24617949b8dSskrll
24717949b8dSskrll void
pmap_md_pdetab_fini(struct pmap * pmap)248d98156b4Sskrll pmap_md_pdetab_fini(struct pmap *pmap)
249d98156b4Sskrll {
25075b842b8Sskrll
25175b842b8Sskrll if (pmap == pmap_kernel())
25275b842b8Sskrll return;
25375b842b8Sskrll for (size_t i = NPDEPG / 2; i < NPDEPG; ++i) {
25475b842b8Sskrll KASSERT(pte_invalid_pde() == 0);
25575b842b8Sskrll pmap->pm_pdetab->pde_pde[i] = 0;
256d98156b4Sskrll }
25775b842b8Sskrll }
25875b842b8Sskrll
25975b842b8Sskrll static void
pmap_md_grow(pmap_pdetab_t * ptb,vaddr_t va,vsize_t vshift,vsize_t * remaining)26075b842b8Sskrll pmap_md_grow(pmap_pdetab_t *ptb, vaddr_t va, vsize_t vshift,
26175b842b8Sskrll vsize_t *remaining)
26275b842b8Sskrll {
26375b842b8Sskrll KASSERT((va & (NBSEG - 1)) == 0);
26475b842b8Sskrll #ifdef _LP64
26575b842b8Sskrll const vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
26675b842b8Sskrll const vsize_t vinc = 1UL << vshift;
26775b842b8Sskrll
26875b842b8Sskrll for (size_t i = (va >> vshift) & pdetab_mask;
26975b842b8Sskrll i < PMAP_PDETABSIZE; i++, va += vinc) {
27075b842b8Sskrll pd_entry_t * const pde_p =
27175b842b8Sskrll &ptb->pde_pde[(va >> vshift) & pdetab_mask];
27275b842b8Sskrll
27375b842b8Sskrll vaddr_t pdeva;
27475b842b8Sskrll if (pte_pde_valid_p(*pde_p)) {
27575b842b8Sskrll const paddr_t pa = pte_pde_to_paddr(*pde_p);
27675b842b8Sskrll pdeva = pmap_md_direct_map_paddr(pa);
27775b842b8Sskrll } else {
27875b842b8Sskrll /*
27975b842b8Sskrll * uvm_pageboot_alloc() returns a direct mapped address
28075b842b8Sskrll */
28175b842b8Sskrll pdeva = uvm_pageboot_alloc(PAGE_SIZE);
28275b842b8Sskrll paddr_t pdepa = RISCV_KVA_TO_PA(pdeva);
28375b842b8Sskrll *pde_p = pte_pde_pdetab(pdepa, true);
28475b842b8Sskrll memset((void *)pdeva, 0, PAGE_SIZE);
28575b842b8Sskrll }
28675b842b8Sskrll
28775b842b8Sskrll if (vshift > SEGSHIFT) {
28875b842b8Sskrll pmap_md_grow((pmap_pdetab_t *)pdeva, va,
28975b842b8Sskrll vshift - SEGLENGTH, remaining);
29075b842b8Sskrll } else {
29175b842b8Sskrll if (*remaining > vinc)
29275b842b8Sskrll *remaining -= vinc;
29375b842b8Sskrll else
29475b842b8Sskrll *remaining = 0;
29575b842b8Sskrll }
29675b842b8Sskrll if (*remaining == 0)
29775b842b8Sskrll return;
29875b842b8Sskrll }
29975b842b8Sskrll #endif
30075b842b8Sskrll }
30175b842b8Sskrll
302d98156b4Sskrll
303d98156b4Sskrll void
pmap_bootstrap(vaddr_t vstart,vaddr_t vend)30417949b8dSskrll pmap_bootstrap(vaddr_t vstart, vaddr_t vend)
30517949b8dSskrll {
30675b842b8Sskrll extern pmap_pdetab_t bootstrap_pde[PAGE_SIZE / sizeof(pd_entry_t)];
30775b842b8Sskrll
30875b842b8Sskrll // pmap_pdetab_t * const kptb = &pmap_kern_pdetab;
30917949b8dSskrll pmap_t pm = pmap_kernel();
31017949b8dSskrll
31175b842b8Sskrll VPRINTF("common ");
31217949b8dSskrll pmap_bootstrap_common();
31317949b8dSskrll
31408c3a075Sskrll #ifdef MULTIPROCESSOR
31508c3a075Sskrll VPRINTF("cpusets ");
31608c3a075Sskrll struct cpu_info * const ci = curcpu();
31708c3a075Sskrll kcpuset_create(&ci->ci_shootdowncpus, true);
31808c3a075Sskrll #endif
31908c3a075Sskrll
32075b842b8Sskrll VPRINTF("bs_pde %p ", bootstrap_pde);
32117949b8dSskrll
32275b842b8Sskrll // kend = (kend + 0x200000 - 1) & -0x200000;
32375b842b8Sskrll
32475b842b8Sskrll /* Use the tables we already built in init_riscv() */
32575b842b8Sskrll pm->pm_pdetab = bootstrap_pde;
32675b842b8Sskrll
32775b842b8Sskrll /* Get the PPN for our page table root */
32875b842b8Sskrll pm->pm_md.md_ppn = atop(KERN_VTOPHYS((vaddr_t)bootstrap_pde));
32917949b8dSskrll
33017949b8dSskrll /* Setup basic info like pagesize=PAGE_SIZE */
33175b842b8Sskrll // uvm_md_init();
33217949b8dSskrll
33317949b8dSskrll /* init the lock */
33475b842b8Sskrll // XXXNH per cpu?
33517949b8dSskrll pmap_tlb_info_init(&pmap_tlb0_info);
33617949b8dSskrll
337*686b1ff1Sskrll VPRINTF("ASID max %x ", pmap_tlb0_info.ti_asid_max);
338*686b1ff1Sskrll
33917949b8dSskrll #ifdef MULTIPROCESSOR
34017949b8dSskrll VPRINTF("kcpusets ");
34117949b8dSskrll
34217949b8dSskrll kcpuset_create(&pm->pm_onproc, true);
34317949b8dSskrll kcpuset_create(&pm->pm_active, true);
34417949b8dSskrll KASSERT(pm->pm_onproc != NULL);
34517949b8dSskrll KASSERT(pm->pm_active != NULL);
3466345bad4Sskrll
3476345bad4Sskrll kcpuset_set(pm->pm_onproc, cpu_index(ci));
3486345bad4Sskrll kcpuset_set(pm->pm_active, cpu_index(ci));
34917949b8dSskrll #endif
35017949b8dSskrll
35117949b8dSskrll VPRINTF("nkmempages ");
35217949b8dSskrll /*
35317949b8dSskrll * Compute the number of pages kmem_arena will have. This will also
35417949b8dSskrll * be called by uvm_km_bootstrap later, but that doesn't matter
35517949b8dSskrll */
35617949b8dSskrll kmeminit_nkmempages();
35717949b8dSskrll
35817949b8dSskrll /* Get size of buffer cache and set an upper limit */
35917949b8dSskrll buf_setvalimit((VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS) / 8);
36017949b8dSskrll vsize_t bufsz = buf_memcalc();
36117949b8dSskrll buf_setvalimit(bufsz);
36217949b8dSskrll
36317949b8dSskrll vsize_t kvmsize = (VM_PHYS_SIZE + (ubc_nwins << ubc_winshift) +
36417949b8dSskrll bufsz + 16 * NCARGS + pager_map_size) +
36517949b8dSskrll /*(maxproc * UPAGES) + */nkmempages * NBPG;
36617949b8dSskrll
36717949b8dSskrll #ifdef SYSVSHM
36817949b8dSskrll kvmsize += shminfo.shmall;
36917949b8dSskrll #endif
37017949b8dSskrll
37117949b8dSskrll /* Calculate VA address space and roundup to NBSEG tables */
37217949b8dSskrll kvmsize = roundup(kvmsize, NBSEG);
37317949b8dSskrll
37475b842b8Sskrll
37517949b8dSskrll /*
37617949b8dSskrll * Initialize `FYI' variables. Note we're relying on
37717949b8dSskrll * the fact that BSEARCH sorts the vm_physmem[] array
37817949b8dSskrll * for us. Must do this before uvm_pageboot_alloc()
37917949b8dSskrll * can be called.
38017949b8dSskrll */
38117949b8dSskrll pmap_limits.avail_start = ptoa(uvm_physseg_get_start(uvm_physseg_get_first()));
38217949b8dSskrll pmap_limits.avail_end = ptoa(uvm_physseg_get_end(uvm_physseg_get_last()));
38317949b8dSskrll
38417949b8dSskrll /*
38517949b8dSskrll * Update the naive settings in pmap_limits to the actual KVA range.
38617949b8dSskrll */
38717949b8dSskrll pmap_limits.virtual_start = vstart;
38817949b8dSskrll pmap_limits.virtual_end = vend;
38917949b8dSskrll
39075b842b8Sskrll VPRINTF("limits: %" PRIxVADDR " - %" PRIxVADDR "\n", vstart, vend);
39175b842b8Sskrll
39275b842b8Sskrll const vaddr_t kvmstart = vstart;
39375b842b8Sskrll pmap_curmaxkvaddr = vstart + kvmsize;
39475b842b8Sskrll
39575b842b8Sskrll VPRINTF("kva : %" PRIxVADDR " - %" PRIxVADDR "\n", kvmstart,
39675b842b8Sskrll pmap_curmaxkvaddr);
39775b842b8Sskrll
39875b842b8Sskrll pmap_md_grow(pmap_kernel()->pm_pdetab, kvmstart, XSEGSHIFT, &kvmsize);
39917949b8dSskrll
40017949b8dSskrll /*
40117949b8dSskrll * Initialize the pools.
40217949b8dSskrll */
40375b842b8Sskrll
40417949b8dSskrll pool_init(&pmap_pmap_pool, PMAP_SIZE, 0, 0, 0, "pmappl",
40517949b8dSskrll &pool_allocator_nointr, IPL_NONE);
40617949b8dSskrll
40775b842b8Sskrll pool_init(&pmap_pv_pool, sizeof(struct pv_entry), 0, 0, 0, "pvpl",
40875b842b8Sskrll #ifdef KASAN
40975b842b8Sskrll NULL,
41075b842b8Sskrll #else
41175b842b8Sskrll &pmap_pv_page_allocator,
41275b842b8Sskrll #endif
41375b842b8Sskrll IPL_NONE);
41475b842b8Sskrll
41575b842b8Sskrll // riscv_dcache_align
41675b842b8Sskrll pmap_pvlist_lock_init(CACHE_LINE_SIZE);
4179687a165Smatt }
4189687a165Smatt
41975b842b8Sskrll
42075b842b8Sskrll vsize_t
pmap_kenter_range(vaddr_t va,paddr_t pa,vsize_t size,vm_prot_t prot,u_int flags)42175b842b8Sskrll pmap_kenter_range(vaddr_t va, paddr_t pa, vsize_t size,
42275b842b8Sskrll vm_prot_t prot, u_int flags)
42375b842b8Sskrll {
42475b842b8Sskrll extern pd_entry_t l1_pte[PAGE_SIZE / sizeof(pd_entry_t)];
42575b842b8Sskrll
42675b842b8Sskrll vaddr_t sva = MEGAPAGE_TRUNC(va);
42775b842b8Sskrll paddr_t spa = MEGAPAGE_TRUNC(pa);
42875b842b8Sskrll const vaddr_t eva = MEGAPAGE_ROUND(va + size);
42975b842b8Sskrll const vaddr_t pdetab_mask = PMAP_PDETABSIZE - 1;
43075b842b8Sskrll const vsize_t vshift = SEGSHIFT;
43175b842b8Sskrll
43275b842b8Sskrll while (sva < eva) {
43375b842b8Sskrll const size_t sidx = (sva >> vshift) & pdetab_mask;
43475b842b8Sskrll
43575b842b8Sskrll l1_pte[sidx] = PA_TO_PTE(spa) | PTE_KERN | PTE_HARDWIRED | PTE_RW;
43675b842b8Sskrll spa += NBSEG;
43775b842b8Sskrll sva += NBSEG;
43875b842b8Sskrll }
43975b842b8Sskrll
44075b842b8Sskrll return 0;
44175b842b8Sskrll }
442