1*70211727Srin /* $NetBSD: loadfile_machdep.c,v 1.17 2022/04/29 20:24:02 rin Exp $ */
297aa1417Scdi
397aa1417Scdi /*-
497aa1417Scdi * Copyright (c) 2005 The NetBSD Foundation, Inc.
597aa1417Scdi * All rights reserved.
697aa1417Scdi *
797aa1417Scdi * This work is based on the code contributed by Robert Drehmel to the
897aa1417Scdi * FreeBSD project.
997aa1417Scdi *
1097aa1417Scdi * Redistribution and use in source and binary forms, with or without
1197aa1417Scdi * modification, are permitted provided that the following conditions
1297aa1417Scdi * are met:
1397aa1417Scdi * 1. Redistributions of source code must retain the above copyright
1497aa1417Scdi * notice, this list of conditions and the following disclaimer.
1597aa1417Scdi * 2. Redistributions in binary form must reproduce the above copyright
1697aa1417Scdi * notice, this list of conditions and the following disclaimer in the
1797aa1417Scdi * documentation and/or other materials provided with the distribution.
1897aa1417Scdi *
1997aa1417Scdi * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
2097aa1417Scdi * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
2197aa1417Scdi * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
2297aa1417Scdi * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
2397aa1417Scdi * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
2497aa1417Scdi * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
2597aa1417Scdi * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
2697aa1417Scdi * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
2797aa1417Scdi * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
2897aa1417Scdi * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
2997aa1417Scdi * POSSIBILITY OF SUCH DAMAGE.
3097aa1417Scdi */
3197aa1417Scdi
32*70211727Srin #include <sys/param.h>
33*70211727Srin
3497aa1417Scdi #include <lib/libsa/stand.h>
354f951ec8She #include <lib/libkern/libkern.h>
3697aa1417Scdi
3797aa1417Scdi #include <machine/pte.h>
3897aa1417Scdi #include <machine/cpu.h>
3997aa1417Scdi #include <machine/ctlreg.h>
4097aa1417Scdi #include <machine/vmparam.h>
4197aa1417Scdi #include <machine/promlib.h>
42b11afb2fSpalle #include <machine/hypervisor.h>
4397aa1417Scdi
4497aa1417Scdi #include "boot.h"
4597aa1417Scdi #include "openfirm.h"
4697aa1417Scdi
4797aa1417Scdi
4897aa1417Scdi #define MAXSEGNUM 50
495ed1df53Suwe #define hi(val) ((uint32_t)(((val) >> 32) & (uint32_t)-1))
505ed1df53Suwe #define lo(val) ((uint32_t)((val) & (uint32_t)-1))
5197aa1417Scdi
5297aa1417Scdi
5397aa1417Scdi typedef int phandle_t;
5497aa1417Scdi
555ed1df53Suwe extern void itlb_enter(vaddr_t, uint32_t, uint32_t);
565ed1df53Suwe extern void dtlb_enter(vaddr_t, uint32_t, uint32_t);
573ab04256Smartin extern void dtlb_replace(vaddr_t, uint32_t, uint32_t);
5897aa1417Scdi extern vaddr_t itlb_va_to_pa(vaddr_t);
5997aa1417Scdi extern vaddr_t dtlb_va_to_pa(vaddr_t);
6097aa1417Scdi
6197aa1417Scdi static void tlb_init(void);
62b11afb2fSpalle static void tlb_init_sun4u(void);
63b11afb2fSpalle #ifdef SUN4V
64b11afb2fSpalle static void tlb_init_sun4v(void);
65b11afb2fSpalle #endif
66b11afb2fSpalle void sparc64_finalize_tlb_sun4u(u_long);
67b11afb2fSpalle #ifdef SUN4V
68b11afb2fSpalle void sparc64_finalize_tlb_sun4v(u_long);
69b11afb2fSpalle #endif
7097aa1417Scdi static int mmu_mapin(vaddr_t, vsize_t);
71b11afb2fSpalle static int mmu_mapin_sun4u(vaddr_t, vsize_t);
72b11afb2fSpalle #ifdef SUN4V
73b11afb2fSpalle static int mmu_mapin_sun4v(vaddr_t, vsize_t);
74b11afb2fSpalle #endif
7597aa1417Scdi static ssize_t mmu_read(int, void *, size_t);
7697aa1417Scdi static void* mmu_memcpy(void *, const void *, size_t);
7797aa1417Scdi static void* mmu_memset(void *, int, size_t);
7897aa1417Scdi static void mmu_freeall(void);
7997aa1417Scdi
8097aa1417Scdi static int ofw_mapin(vaddr_t, vsize_t);
8197aa1417Scdi static ssize_t ofw_read(int, void *, size_t);
8297aa1417Scdi static void* ofw_memcpy(void *, const void *, size_t);
8397aa1417Scdi static void* ofw_memset(void *, int, size_t);
8497aa1417Scdi static void ofw_freeall(void);
8597aa1417Scdi
86c95f237aStsutsui #if 0
8797aa1417Scdi static int nop_mapin(vaddr_t, vsize_t);
88c95f237aStsutsui #endif
8997aa1417Scdi static ssize_t nop_read(int, void *, size_t);
9097aa1417Scdi static void* nop_memcpy(void *, const void *, size_t);
9197aa1417Scdi static void* nop_memset(void *, int, size_t);
9297aa1417Scdi static void nop_freeall(void);
9397aa1417Scdi
9497aa1417Scdi
9597aa1417Scdi struct tlb_entry *dtlb_store = 0;
9697aa1417Scdi struct tlb_entry *itlb_store = 0;
9797aa1417Scdi
9897aa1417Scdi int dtlb_slot;
9997aa1417Scdi int itlb_slot;
10097aa1417Scdi int dtlb_slot_max;
10197aa1417Scdi int itlb_slot_max;
10297aa1417Scdi
10397aa1417Scdi static struct kvamap {
10497aa1417Scdi uint64_t start;
10597aa1417Scdi uint64_t end;
10697aa1417Scdi } kvamap[MAXSEGNUM];
10797aa1417Scdi
10897aa1417Scdi static struct memsw {
10997aa1417Scdi ssize_t (* read)(int f, void *addr, size_t size);
11097aa1417Scdi void* (* memcpy)(void *dst, const void *src, size_t size);
11197aa1417Scdi void* (* memset)(void *dst, int c, size_t size);
11297aa1417Scdi void (* freeall)(void);
11397aa1417Scdi } memswa[] = {
11497aa1417Scdi { nop_read, nop_memcpy, nop_memset, nop_freeall },
11597aa1417Scdi { ofw_read, ofw_memcpy, ofw_memset, ofw_freeall },
11697aa1417Scdi { mmu_read, mmu_memcpy, mmu_memset, mmu_freeall }
11797aa1417Scdi };
11897aa1417Scdi
11997aa1417Scdi static struct memsw *memsw = &memswa[0];
12097aa1417Scdi
121b11afb2fSpalle #ifdef SUN4V
122b11afb2fSpalle static int sun4v = 0;
123b11afb2fSpalle #endif
12497aa1417Scdi
12597aa1417Scdi /*
12697aa1417Scdi * Check if a memory region is already mapped. Return length and virtual
12797aa1417Scdi * address of unmapped sub-region, if any.
12897aa1417Scdi */
12997aa1417Scdi static uint64_t
kvamap_extract(vaddr_t va,vsize_t len,vaddr_t * new_va)13097aa1417Scdi kvamap_extract(vaddr_t va, vsize_t len, vaddr_t *new_va)
13197aa1417Scdi {
13297aa1417Scdi int i;
13397aa1417Scdi
13497aa1417Scdi *new_va = va;
13597aa1417Scdi for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
136*70211727Srin if (kvamap[i].start == 0)
13797aa1417Scdi break;
13897aa1417Scdi if ((kvamap[i].start <= va) && (va < kvamap[i].end)) {
139158ff069Smartin uint64_t va_len = kvamap[i].end - va;
14097aa1417Scdi len = (va_len < len) ? len - va_len : 0;
14197aa1417Scdi *new_va = kvamap[i].end;
14297aa1417Scdi }
14397aa1417Scdi }
14497aa1417Scdi
145158ff069Smartin return len;
14697aa1417Scdi }
14797aa1417Scdi
14897aa1417Scdi /*
14997aa1417Scdi * Record new kernel mapping.
15097aa1417Scdi */
15197aa1417Scdi static void
kvamap_enter(uint64_t va,uint64_t len)15297aa1417Scdi kvamap_enter(uint64_t va, uint64_t len)
15397aa1417Scdi {
15497aa1417Scdi int i;
15597aa1417Scdi
15697aa1417Scdi DPRINTF(("kvamap_enter: %d@%p\n", (int)len, (void*)(u_long)va));
15797aa1417Scdi for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) {
158*70211727Srin if (kvamap[i].start == 0) {
15997aa1417Scdi kvamap[i].start = va;
16097aa1417Scdi kvamap[i].end = va + len;
16197aa1417Scdi break;
16297aa1417Scdi }
16397aa1417Scdi }
16497aa1417Scdi
16597aa1417Scdi if (i == MAXSEGNUM) {
16697aa1417Scdi panic("Too many allocations requested.");
16797aa1417Scdi }
16897aa1417Scdi }
16997aa1417Scdi
17097aa1417Scdi /*
17197aa1417Scdi * Initialize TLB as required by MMU mapping functions.
17297aa1417Scdi */
17397aa1417Scdi static void
tlb_init(void)17497aa1417Scdi tlb_init(void)
17597aa1417Scdi {
176b11afb2fSpalle phandle_t root;
177b11afb2fSpalle #ifdef SUN4V
178b11afb2fSpalle char buf[128];
179b11afb2fSpalle #endif
180b11afb2fSpalle
181b11afb2fSpalle if (dtlb_store != NULL) {
182b11afb2fSpalle return;
183b11afb2fSpalle }
184b11afb2fSpalle
185b11afb2fSpalle if ( (root = prom_findroot()) == -1) {
186b11afb2fSpalle panic("tlb_init: prom_findroot()");
187b11afb2fSpalle }
188b11afb2fSpalle #ifdef SUN4V
189b11afb2fSpalle if (_prom_getprop(root, "compatible", buf, sizeof(buf)) > 0 &&
190b11afb2fSpalle strcmp(buf, "sun4v") == 0) {
191b11afb2fSpalle tlb_init_sun4v();
192b11afb2fSpalle sun4v = 1;
193b11afb2fSpalle }
194b11afb2fSpalle else {
195b11afb2fSpalle #endif
196b11afb2fSpalle tlb_init_sun4u();
197b11afb2fSpalle #ifdef SUN4V
198b11afb2fSpalle }
199b11afb2fSpalle #endif
200b11afb2fSpalle
201b11afb2fSpalle dtlb_store = alloc(dtlb_slot_max * sizeof(*dtlb_store));
202b11afb2fSpalle itlb_store = alloc(itlb_slot_max * sizeof(*itlb_store));
203b11afb2fSpalle if (dtlb_store == NULL || itlb_store == NULL) {
204b11afb2fSpalle panic("tlb_init: malloc");
205b11afb2fSpalle }
206b11afb2fSpalle
207b11afb2fSpalle dtlb_slot = itlb_slot = 0;
208b11afb2fSpalle }
209b11afb2fSpalle
210b11afb2fSpalle /*
211b11afb2fSpalle * Initialize TLB as required by MMU mapping functions - sun4u.
212b11afb2fSpalle */
213b11afb2fSpalle static void
tlb_init_sun4u(void)214b11afb2fSpalle tlb_init_sun4u(void)
215b11afb2fSpalle {
21697aa1417Scdi phandle_t child;
21797aa1417Scdi phandle_t root;
21897aa1417Scdi char buf[128];
219eebd41a9Smaxv bool foundcpu = false;
22097aa1417Scdi u_int bootcpu;
22197aa1417Scdi u_int cpu;
22297aa1417Scdi
22397aa1417Scdi bootcpu = get_cpuid();
22497aa1417Scdi
22597aa1417Scdi if ( (root = prom_findroot()) == -1) {
22697aa1417Scdi panic("tlb_init: prom_findroot()");
22797aa1417Scdi }
22897aa1417Scdi
22997aa1417Scdi for (child = prom_firstchild(root); child != 0;
23097aa1417Scdi child = prom_nextsibling(child)) {
23197aa1417Scdi if (child == -1) {
23297aa1417Scdi panic("tlb_init: OF_child");
23397aa1417Scdi }
23497aa1417Scdi if (_prom_getprop(child, "device_type", buf, sizeof(buf)) > 0 &&
23597aa1417Scdi strcmp(buf, "cpu") == 0) {
23697aa1417Scdi if (_prom_getprop(child, "upa-portid", &cpu,
23797aa1417Scdi sizeof(cpu)) == -1 && _prom_getprop(child, "portid",
23897aa1417Scdi &cpu, sizeof(cpu)) == -1)
2398276d7c5Snakayama panic("tlb_init: prom_getprop");
240eebd41a9Smaxv foundcpu = true;
24197aa1417Scdi if (cpu == bootcpu)
24297aa1417Scdi break;
24397aa1417Scdi }
24497aa1417Scdi }
245eebd41a9Smaxv if (!foundcpu)
246eebd41a9Smaxv panic("tlb_init: no cpu found!");
24797aa1417Scdi if (cpu != bootcpu)
2488276d7c5Snakayama panic("tlb_init: no node for bootcpu?!?!");
24997aa1417Scdi if (_prom_getprop(child, "#dtlb-entries", &dtlb_slot_max,
25097aa1417Scdi sizeof(dtlb_slot_max)) == -1 ||
25197aa1417Scdi _prom_getprop(child, "#itlb-entries", &itlb_slot_max,
25297aa1417Scdi sizeof(itlb_slot_max)) == -1)
2538276d7c5Snakayama panic("tlb_init: prom_getprop");
25497aa1417Scdi }
25597aa1417Scdi
256b11afb2fSpalle #ifdef SUN4V
257b11afb2fSpalle /*
258b11afb2fSpalle * Initialize TLB as required by MMU mapping functions - sun4v.
259b11afb2fSpalle */
260b11afb2fSpalle static void
tlb_init_sun4v(void)261b11afb2fSpalle tlb_init_sun4v(void)
262b11afb2fSpalle {
263b11afb2fSpalle psize_t len;
264b11afb2fSpalle paddr_t pa;
265b11afb2fSpalle int64_t hv_rc;
266b11afb2fSpalle
267b11afb2fSpalle hv_mach_desc((paddr_t)NULL, &len); /* Trick to get actual length */
268b11afb2fSpalle if ( !len ) {
269b11afb2fSpalle panic("init_tlb: hv_mach_desc() failed");
27097aa1417Scdi }
271b11afb2fSpalle pa = OF_alloc_phys(len, 16);
272b11afb2fSpalle if ( pa == -1 ) {
273b11afb2fSpalle panic("OF_alloc_phys() failed");
274b11afb2fSpalle }
275b11afb2fSpalle hv_rc = hv_mach_desc(pa, &len);
276b11afb2fSpalle if (hv_rc != H_EOK) {
277b11afb2fSpalle panic("hv_mach_desc() failed");
278b11afb2fSpalle }
279b11afb2fSpalle /* XXX dig out TLB node info - 64 is ok for loading the kernel */
280b11afb2fSpalle dtlb_slot_max = itlb_slot_max = 64;
281b11afb2fSpalle }
282b11afb2fSpalle #endif
28397aa1417Scdi
28497aa1417Scdi /*
28597aa1417Scdi * Map requested memory region with permanent 4MB pages.
28697aa1417Scdi */
28797aa1417Scdi static int
mmu_mapin(vaddr_t rva,vsize_t len)28897aa1417Scdi mmu_mapin(vaddr_t rva, vsize_t len)
28997aa1417Scdi {
29097aa1417Scdi len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
29197aa1417Scdi rva &= ~PAGE_MASK_4M;
29297aa1417Scdi
29397aa1417Scdi tlb_init();
294b11afb2fSpalle
295b11afb2fSpalle #if SUN4V
296b11afb2fSpalle if ( sun4v )
297b11afb2fSpalle return mmu_mapin_sun4v(rva, len);
298b11afb2fSpalle else
299b11afb2fSpalle #endif
300b11afb2fSpalle return mmu_mapin_sun4u(rva, len);
301b11afb2fSpalle }
302b11afb2fSpalle
303b11afb2fSpalle /*
304b11afb2fSpalle * Map requested memory region with permanent 4MB pages - sun4u.
305b11afb2fSpalle */
306b11afb2fSpalle static int
mmu_mapin_sun4u(vaddr_t rva,vsize_t len)307b11afb2fSpalle mmu_mapin_sun4u(vaddr_t rva, vsize_t len)
308b11afb2fSpalle {
309b11afb2fSpalle uint64_t data;
310b11afb2fSpalle paddr_t pa;
311b11afb2fSpalle vaddr_t va, mva;
312b11afb2fSpalle
3138276d7c5Snakayama for (pa = (paddr_t)-1; len > 0; rva = va) {
31497aa1417Scdi if ( (len = kvamap_extract(rva, len, &va)) == 0) {
31597aa1417Scdi /* The rest is already mapped */
31697aa1417Scdi break;
31797aa1417Scdi }
31897aa1417Scdi
31997aa1417Scdi if (dtlb_va_to_pa(va) == (u_long)-1 ||
32097aa1417Scdi itlb_va_to_pa(va) == (u_long)-1) {
32197aa1417Scdi /* Allocate a physical page, claim the virtual area */
3228276d7c5Snakayama if (pa == (paddr_t)-1) {
3238276d7c5Snakayama pa = OF_alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
3248276d7c5Snakayama if (pa == (paddr_t)-1)
32597aa1417Scdi panic("out of memory");
3268276d7c5Snakayama mva = OF_claim_virt(va, PAGE_SIZE_4M);
32797aa1417Scdi if (mva != va) {
32897aa1417Scdi panic("can't claim virtual page "
32997aa1417Scdi "(wanted %#lx, got %#lx)",
33097aa1417Scdi va, mva);
33197aa1417Scdi }
33297aa1417Scdi /* The mappings may have changed, be paranoid. */
33397aa1417Scdi continue;
33497aa1417Scdi }
33597aa1417Scdi
33697aa1417Scdi /*
33797aa1417Scdi * Actually, we can only allocate two pages less at
33897aa1417Scdi * most (depending on the kernel TSB size).
33997aa1417Scdi */
34097aa1417Scdi if (dtlb_slot >= dtlb_slot_max)
34197aa1417Scdi panic("mmu_mapin: out of dtlb_slots");
34297aa1417Scdi if (itlb_slot >= itlb_slot_max)
34397aa1417Scdi panic("mmu_mapin: out of itlb_slots");
34497aa1417Scdi
3453bf07950Snakayama DPRINTF(("mmu_mapin: 0x%lx:0x%x.0x%x\n", va,
3463bf07950Snakayama hi(pa), lo(pa)));
34797aa1417Scdi
3484ad85300Spalle data = SUN4U_TSB_DATA(0, /* global */
34997aa1417Scdi PGSZ_4M, /* 4mb page */
35097aa1417Scdi pa, /* phys.address */
35197aa1417Scdi 1, /* privileged */
35297aa1417Scdi 1, /* write */
35397aa1417Scdi 1, /* cache */
35497aa1417Scdi 1, /* alias */
35597aa1417Scdi 1, /* valid */
3562ee28d15Smacallan 0, /* endianness */
3572ee28d15Smacallan 0 /* wc */
35897aa1417Scdi );
3594ad85300Spalle data |= SUN4U_TLB_L | SUN4U_TLB_CV; /* locked, virt.cache */
36097aa1417Scdi
36197aa1417Scdi dtlb_store[dtlb_slot].te_pa = pa;
36297aa1417Scdi dtlb_store[dtlb_slot].te_va = va;
36397aa1417Scdi dtlb_slot++;
36497aa1417Scdi dtlb_enter(va, hi(data), lo(data));
3658276d7c5Snakayama pa = (paddr_t)-1;
36697aa1417Scdi }
36797aa1417Scdi
36897aa1417Scdi kvamap_enter(va, PAGE_SIZE_4M);
36997aa1417Scdi
37097aa1417Scdi len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
37197aa1417Scdi va += PAGE_SIZE_4M;
37297aa1417Scdi }
37397aa1417Scdi
3748276d7c5Snakayama if (pa != (paddr_t)-1) {
37597aa1417Scdi OF_free_phys(pa, PAGE_SIZE_4M);
37697aa1417Scdi }
37797aa1417Scdi
37897aa1417Scdi return (0);
37997aa1417Scdi }
38097aa1417Scdi
381b11afb2fSpalle #ifdef SUN4V
382b11afb2fSpalle /*
383b11afb2fSpalle * Map requested memory region with permanent 4MB pages - sun4v.
384b11afb2fSpalle */
385b11afb2fSpalle static int
mmu_mapin_sun4v(vaddr_t rva,vsize_t len)386b11afb2fSpalle mmu_mapin_sun4v(vaddr_t rva, vsize_t len)
387b11afb2fSpalle {
388b11afb2fSpalle uint64_t data;
389b11afb2fSpalle paddr_t pa;
390b11afb2fSpalle vaddr_t va, mva;
391b11afb2fSpalle int64_t hv_rc;
392b11afb2fSpalle
393b11afb2fSpalle for (pa = (paddr_t)-1; len > 0; rva = va) {
394b11afb2fSpalle if ( (len = kvamap_extract(rva, len, &va)) == 0) {
395b11afb2fSpalle /* The rest is already mapped */
396b11afb2fSpalle break;
397b11afb2fSpalle }
398b11afb2fSpalle
399b11afb2fSpalle /* Allocate a physical page, claim the virtual area */
400b11afb2fSpalle if (pa == (paddr_t)-1) {
401b11afb2fSpalle pa = OF_alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M);
402b11afb2fSpalle if (pa == (paddr_t)-1)
403b11afb2fSpalle panic("out of memory");
404b11afb2fSpalle mva = OF_claim_virt(va, PAGE_SIZE_4M);
405b11afb2fSpalle if (mva != va) {
406b11afb2fSpalle panic("can't claim virtual page "
407b11afb2fSpalle "(wanted %#lx, got %#lx)",
408b11afb2fSpalle va, mva);
409b11afb2fSpalle }
410b11afb2fSpalle }
411b11afb2fSpalle
412b11afb2fSpalle /*
413b11afb2fSpalle * Actually, we can only allocate two pages less at
414b11afb2fSpalle * most (depending on the kernel TSB size).
415b11afb2fSpalle */
416b11afb2fSpalle if (dtlb_slot >= dtlb_slot_max)
417b11afb2fSpalle panic("mmu_mapin: out of dtlb_slots");
418b11afb2fSpalle if (itlb_slot >= itlb_slot_max)
419b11afb2fSpalle panic("mmu_mapin: out of itlb_slots");
420b11afb2fSpalle
421b11afb2fSpalle DPRINTF(("mmu_mapin: 0x%lx:0x%x.0x%x\n", va,
422b11afb2fSpalle hi(pa), lo(pa)));
423b11afb2fSpalle
424b11afb2fSpalle data = SUN4V_TSB_DATA(
425b11afb2fSpalle 0, /* global */
426b11afb2fSpalle PGSZ_4M, /* 4mb page */
427b11afb2fSpalle pa, /* phys.address */
428b11afb2fSpalle 1, /* privileged */
429b11afb2fSpalle 1, /* write */
430b11afb2fSpalle 1, /* cache */
431b11afb2fSpalle 1, /* alias */
432b11afb2fSpalle 1, /* valid */
4332ee28d15Smacallan 0, /* endianness */
4342ee28d15Smacallan 0 /* wc */
435b11afb2fSpalle );
436b11afb2fSpalle data |= SUN4V_TLB_CV; /* virt.cache */
437b11afb2fSpalle
438b11afb2fSpalle dtlb_store[dtlb_slot].te_pa = pa;
439b11afb2fSpalle dtlb_store[dtlb_slot].te_va = va;
440b11afb2fSpalle dtlb_slot++;
441b11afb2fSpalle hv_rc = hv_mmu_map_perm_addr(va, data, MAP_DTLB);
442b11afb2fSpalle if ( hv_rc != H_EOK ) {
443b11afb2fSpalle panic("hv_mmu_map_perm_addr() failed - rc = %ld", hv_rc);
444b11afb2fSpalle }
445b11afb2fSpalle
446b11afb2fSpalle kvamap_enter(va, PAGE_SIZE_4M);
447b11afb2fSpalle
448b11afb2fSpalle pa = (paddr_t)-1;
449b11afb2fSpalle
450b11afb2fSpalle len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len;
451b11afb2fSpalle va += PAGE_SIZE_4M;
452b11afb2fSpalle }
453b11afb2fSpalle
454b11afb2fSpalle if (pa != (paddr_t)-1) {
455b11afb2fSpalle OF_free_phys(pa, PAGE_SIZE_4M);
456b11afb2fSpalle }
457b11afb2fSpalle
458b11afb2fSpalle return (0);
459b11afb2fSpalle }
460b11afb2fSpalle #endif
461b11afb2fSpalle
46297aa1417Scdi static ssize_t
mmu_read(int f,void * addr,size_t size)46397aa1417Scdi mmu_read(int f, void *addr, size_t size)
46497aa1417Scdi {
46597aa1417Scdi mmu_mapin((vaddr_t)addr, size);
46697aa1417Scdi return read(f, addr, size);
46797aa1417Scdi }
46897aa1417Scdi
46997aa1417Scdi static void*
mmu_memcpy(void * dst,const void * src,size_t size)47097aa1417Scdi mmu_memcpy(void *dst, const void *src, size_t size)
47197aa1417Scdi {
47297aa1417Scdi mmu_mapin((vaddr_t)dst, size);
47397aa1417Scdi return memcpy(dst, src, size);
47497aa1417Scdi }
47597aa1417Scdi
47697aa1417Scdi static void*
mmu_memset(void * dst,int c,size_t size)47797aa1417Scdi mmu_memset(void *dst, int c, size_t size)
47897aa1417Scdi {
47997aa1417Scdi mmu_mapin((vaddr_t)dst, size);
48097aa1417Scdi return memset(dst, c, size);
48197aa1417Scdi }
48297aa1417Scdi
48397aa1417Scdi static void
mmu_freeall(void)48497aa1417Scdi mmu_freeall(void)
48597aa1417Scdi {
48697aa1417Scdi int i;
48797aa1417Scdi
48897aa1417Scdi dtlb_slot = itlb_slot = 0;
48997aa1417Scdi for (i = 0; i < MAXSEGNUM; i++) {
49097aa1417Scdi /* XXX return all mappings to PROM and unmap the pages! */
49197aa1417Scdi kvamap[i].start = kvamap[i].end = 0;
49297aa1417Scdi }
49397aa1417Scdi }
49497aa1417Scdi
49597aa1417Scdi /*
49697aa1417Scdi * Claim requested memory region in OpenFirmware allocation pool.
49797aa1417Scdi */
49897aa1417Scdi static int
ofw_mapin(vaddr_t rva,vsize_t len)49997aa1417Scdi ofw_mapin(vaddr_t rva, vsize_t len)
50097aa1417Scdi {
50197aa1417Scdi vaddr_t va;
50297aa1417Scdi
50397aa1417Scdi len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M);
50497aa1417Scdi rva &= ~PAGE_MASK_4M;
50597aa1417Scdi
50697aa1417Scdi if ( (len = kvamap_extract(rva, len, &va)) != 0) {
50797aa1417Scdi if (OF_claim((void *)(long)va, len, PAGE_SIZE_4M) == (void*)-1){
50897aa1417Scdi panic("ofw_mapin: Cannot claim memory.");
50997aa1417Scdi }
51097aa1417Scdi kvamap_enter(va, len);
51197aa1417Scdi }
51297aa1417Scdi
51397aa1417Scdi return (0);
51497aa1417Scdi }
51597aa1417Scdi
51697aa1417Scdi static ssize_t
ofw_read(int f,void * addr,size_t size)51797aa1417Scdi ofw_read(int f, void *addr, size_t size)
51897aa1417Scdi {
51997aa1417Scdi ofw_mapin((vaddr_t)addr, size);
52097aa1417Scdi return read(f, addr, size);
52197aa1417Scdi }
52297aa1417Scdi
52397aa1417Scdi static void*
ofw_memcpy(void * dst,const void * src,size_t size)52497aa1417Scdi ofw_memcpy(void *dst, const void *src, size_t size)
52597aa1417Scdi {
52697aa1417Scdi ofw_mapin((vaddr_t)dst, size);
52797aa1417Scdi return memcpy(dst, src, size);
52897aa1417Scdi }
52997aa1417Scdi
53097aa1417Scdi static void*
ofw_memset(void * dst,int c,size_t size)53197aa1417Scdi ofw_memset(void *dst, int c, size_t size)
53297aa1417Scdi {
53397aa1417Scdi ofw_mapin((vaddr_t)dst, size);
53497aa1417Scdi return memset(dst, c, size);
53597aa1417Scdi }
53697aa1417Scdi
53797aa1417Scdi static void
ofw_freeall(void)53897aa1417Scdi ofw_freeall(void)
53997aa1417Scdi {
54097aa1417Scdi int i;
54197aa1417Scdi
54297aa1417Scdi dtlb_slot = itlb_slot = 0;
54397aa1417Scdi for (i = 0; i < MAXSEGNUM; i++) {
54497aa1417Scdi OF_release((void*)(u_long)kvamap[i].start,
54597aa1417Scdi (u_int)(kvamap[i].end - kvamap[i].start));
54697aa1417Scdi kvamap[i].start = kvamap[i].end = 0;
54797aa1417Scdi }
54897aa1417Scdi }
54997aa1417Scdi
55097aa1417Scdi /*
55197aa1417Scdi * NOP implementation exists solely for kernel header loading sake. Here
55297aa1417Scdi * we use alloc() interface to allocate memory and avoid doing some dangerous
55397aa1417Scdi * things.
55497aa1417Scdi */
55597aa1417Scdi static ssize_t
nop_read(int f,void * addr,size_t size)55697aa1417Scdi nop_read(int f, void *addr, size_t size)
55797aa1417Scdi {
55897aa1417Scdi return read(f, addr, size);
55997aa1417Scdi }
56097aa1417Scdi
56197aa1417Scdi static void*
nop_memcpy(void * dst,const void * src,size_t size)56297aa1417Scdi nop_memcpy(void *dst, const void *src, size_t size)
56397aa1417Scdi {
56497aa1417Scdi /*
56597aa1417Scdi * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers
56697aa1417Scdi * right after the highest kernel address which will not be mapped with
56797aa1417Scdi * nop_XXX operations.
56897aa1417Scdi */
56997aa1417Scdi return (dst);
57097aa1417Scdi }
57197aa1417Scdi
57297aa1417Scdi static void*
nop_memset(void * dst,int c,size_t size)57397aa1417Scdi nop_memset(void *dst, int c, size_t size)
57497aa1417Scdi {
57597aa1417Scdi return memset(dst, c, size);
57697aa1417Scdi }
57797aa1417Scdi
57897aa1417Scdi static void
nop_freeall(void)57997aa1417Scdi nop_freeall(void)
58097aa1417Scdi { }
58197aa1417Scdi
58297aa1417Scdi /*
58397aa1417Scdi * loadfile() hooks.
58497aa1417Scdi */
58597aa1417Scdi ssize_t
sparc64_read(int f,void * addr,size_t size)58697aa1417Scdi sparc64_read(int f, void *addr, size_t size)
58797aa1417Scdi {
58897aa1417Scdi return (*memsw->read)(f, addr, size);
58997aa1417Scdi }
59097aa1417Scdi
59197aa1417Scdi void*
sparc64_memcpy(void * dst,const void * src,size_t size)59297aa1417Scdi sparc64_memcpy(void *dst, const void *src, size_t size)
59397aa1417Scdi {
59497aa1417Scdi return (*memsw->memcpy)(dst, src, size);
59597aa1417Scdi }
59697aa1417Scdi
59797aa1417Scdi void*
sparc64_memset(void * dst,int c,size_t size)59897aa1417Scdi sparc64_memset(void *dst, int c, size_t size)
59997aa1417Scdi {
60097aa1417Scdi return (*memsw->memset)(dst, c, size);
60197aa1417Scdi }
60297aa1417Scdi
60397aa1417Scdi /*
6043ab04256Smartin * Remove write permissions from text mappings in the dTLB.
6053ab04256Smartin * Add entries in the iTLB.
6063ab04256Smartin */
6073ab04256Smartin void
sparc64_finalize_tlb(u_long data_va)6083ab04256Smartin sparc64_finalize_tlb(u_long data_va)
6093ab04256Smartin {
610b11afb2fSpalle #ifdef SUN4V
611b11afb2fSpalle if ( sun4v )
612b11afb2fSpalle sparc64_finalize_tlb_sun4v(data_va);
613b11afb2fSpalle else
614b11afb2fSpalle #endif
615b11afb2fSpalle sparc64_finalize_tlb_sun4u(data_va);
616b11afb2fSpalle }
617b11afb2fSpalle
618b11afb2fSpalle /*
619b11afb2fSpalle * Remove write permissions from text mappings in the dTLB - sun4u.
620b11afb2fSpalle * Add entries in the iTLB.
621b11afb2fSpalle */
622b11afb2fSpalle void
sparc64_finalize_tlb_sun4u(u_long data_va)623b11afb2fSpalle sparc64_finalize_tlb_sun4u(u_long data_va)
624b11afb2fSpalle {
6253ab04256Smartin int i;
6263ab04256Smartin int64_t data;
6276d59516dSmartin bool writable_text = false;
6283ab04256Smartin
6293ab04256Smartin for (i = 0; i < dtlb_slot; i++) {
6306d59516dSmartin if (dtlb_store[i].te_va >= data_va) {
6316d59516dSmartin /*
6326d59516dSmartin * If (for whatever reason) the start of the
6336d59516dSmartin * writable section is right at the start of
6346d59516dSmartin * the kernel, we need to map it into the ITLB
6356d59516dSmartin * nevertheless (and don't make it readonly).
6366d59516dSmartin */
6376d59516dSmartin if (i == 0 && dtlb_store[i].te_va == data_va)
6386d59516dSmartin writable_text = true;
6396d59516dSmartin else
6403ab04256Smartin continue;
6416d59516dSmartin }
6423ab04256Smartin
6434ad85300Spalle data = SUN4U_TSB_DATA(0, /* global */
6443ab04256Smartin PGSZ_4M, /* 4mb page */
6453ab04256Smartin dtlb_store[i].te_pa, /* phys.address */
6463ab04256Smartin 1, /* privileged */
6473ab04256Smartin 0, /* write */
6483ab04256Smartin 1, /* cache */
6493ab04256Smartin 1, /* alias */
6503ab04256Smartin 1, /* valid */
6512ee28d15Smacallan 0, /* endianness */
6522ee28d15Smacallan 0 /* wc */
6533ab04256Smartin );
6544ad85300Spalle data |= SUN4U_TLB_L | SUN4U_TLB_CV; /* locked, virt.cache */
6556d59516dSmartin if (!writable_text)
6563ab04256Smartin dtlb_replace(dtlb_store[i].te_va, hi(data), lo(data));
6573ab04256Smartin itlb_store[itlb_slot] = dtlb_store[i];
6583ab04256Smartin itlb_slot++;
6593ab04256Smartin itlb_enter(dtlb_store[i].te_va, hi(data), lo(data));
6603ab04256Smartin }
6616d59516dSmartin if (writable_text)
6626d59516dSmartin printf("WARNING: kernel text mapped writable!\n");
663b11afb2fSpalle
6643ab04256Smartin }
6653ab04256Smartin
666b11afb2fSpalle #ifdef SUN4V
667b11afb2fSpalle /*
668b11afb2fSpalle * Remove write permissions from text mappings in the dTLB - sun4v.
669b11afb2fSpalle * Add entries in the iTLB.
670b11afb2fSpalle */
671b11afb2fSpalle void
sparc64_finalize_tlb_sun4v(u_long data_va)672b11afb2fSpalle sparc64_finalize_tlb_sun4v(u_long data_va)
673b11afb2fSpalle {
674b11afb2fSpalle int i;
675b11afb2fSpalle int64_t data;
676b11afb2fSpalle bool writable_text = false;
677b11afb2fSpalle int64_t hv_rc;
678b11afb2fSpalle
679b11afb2fSpalle for (i = 0; i < dtlb_slot; i++) {
680b11afb2fSpalle if (dtlb_store[i].te_va >= data_va) {
681b11afb2fSpalle /*
682b11afb2fSpalle * If (for whatever reason) the start of the
683b11afb2fSpalle * writable section is right at the start of
684b11afb2fSpalle * the kernel, we need to map it into the ITLB
685b11afb2fSpalle * nevertheless (and don't make it readonly).
686b11afb2fSpalle */
687b11afb2fSpalle if (i == 0 && dtlb_store[i].te_va == data_va)
688b11afb2fSpalle writable_text = true;
689b11afb2fSpalle else
690b11afb2fSpalle continue;
691b11afb2fSpalle }
692b11afb2fSpalle
693b11afb2fSpalle data = SUN4V_TSB_DATA(
694b11afb2fSpalle 0, /* global */
695b11afb2fSpalle PGSZ_4M, /* 4mb page */
696b11afb2fSpalle dtlb_store[i].te_pa, /* phys.address */
697b11afb2fSpalle 1, /* privileged */
698b11afb2fSpalle 0, /* write */
699b11afb2fSpalle 1, /* cache */
700b11afb2fSpalle 1, /* alias */
701b11afb2fSpalle 1, /* valid */
7022ee28d15Smacallan 0, /* endianness */
7032ee28d15Smacallan 0 /* wc */
704b11afb2fSpalle );
705e34c5f4eSpalle data |= SUN4V_TLB_CV|SUN4V_TLB_X; /* virt.cache, executable */
706b11afb2fSpalle if (!writable_text) {
707b11afb2fSpalle hv_rc = hv_mmu_unmap_perm_addr(dtlb_store[i].te_va,
708b11afb2fSpalle MAP_DTLB);
709b11afb2fSpalle if ( hv_rc != H_EOK ) {
710b11afb2fSpalle panic("hv_mmu_unmap_perm_addr() failed - "
711b11afb2fSpalle "rc = %ld", hv_rc);
712b11afb2fSpalle }
713b11afb2fSpalle hv_rc = hv_mmu_map_perm_addr(dtlb_store[i].te_va, data,
714b11afb2fSpalle MAP_DTLB);
715b11afb2fSpalle if ( hv_rc != H_EOK ) {
716b11afb2fSpalle panic("hv_mmu_map_perm_addr() failed - "
717b11afb2fSpalle "rc = %ld", hv_rc);
718b11afb2fSpalle }
719b11afb2fSpalle }
720b11afb2fSpalle
721b11afb2fSpalle itlb_store[itlb_slot] = dtlb_store[i];
722b11afb2fSpalle itlb_slot++;
723b11afb2fSpalle hv_rc = hv_mmu_map_perm_addr(dtlb_store[i].te_va, data,
724b11afb2fSpalle MAP_ITLB);
725b11afb2fSpalle if ( hv_rc != H_EOK ) {
726b11afb2fSpalle panic("hv_mmu_map_perm_addr() failed - rc = %ld", hv_rc);
727b11afb2fSpalle }
728b11afb2fSpalle }
729b11afb2fSpalle if (writable_text)
730b11afb2fSpalle printf("WARNING: kernel text mapped writable!\n");
731b11afb2fSpalle }
732b11afb2fSpalle #endif
733b11afb2fSpalle
7343ab04256Smartin /*
73597aa1417Scdi * Record kernel mappings in bootinfo structure.
73697aa1417Scdi */
73797aa1417Scdi void
sparc64_bi_add(void)73897aa1417Scdi sparc64_bi_add(void)
73997aa1417Scdi {
74097aa1417Scdi int i;
74197aa1417Scdi int itlb_size, dtlb_size;
74297aa1417Scdi struct btinfo_count bi_count;
74397aa1417Scdi struct btinfo_tlb *bi_itlb, *bi_dtlb;
74497aa1417Scdi
74597aa1417Scdi bi_count.count = itlb_slot;
74697aa1417Scdi bi_add(&bi_count, BTINFO_ITLB_SLOTS, sizeof(bi_count));
74797aa1417Scdi bi_count.count = dtlb_slot;
74897aa1417Scdi bi_add(&bi_count, BTINFO_DTLB_SLOTS, sizeof(bi_count));
74997aa1417Scdi
75097aa1417Scdi itlb_size = sizeof(*bi_itlb) + sizeof(struct tlb_entry) * itlb_slot;
75197aa1417Scdi dtlb_size = sizeof(*bi_dtlb) + sizeof(struct tlb_entry) * dtlb_slot;
75297aa1417Scdi
75397aa1417Scdi bi_itlb = alloc(itlb_size);
75497aa1417Scdi bi_dtlb = alloc(dtlb_size);
75597aa1417Scdi
75697aa1417Scdi if ((bi_itlb == NULL) || (bi_dtlb == NULL)) {
75797aa1417Scdi panic("Out of memory in sparc64_bi_add.\n");
75897aa1417Scdi }
75997aa1417Scdi
76097aa1417Scdi for (i = 0; i < itlb_slot; i++) {
76197aa1417Scdi bi_itlb->tlb[i].te_va = itlb_store[i].te_va;
76297aa1417Scdi bi_itlb->tlb[i].te_pa = itlb_store[i].te_pa;
76397aa1417Scdi }
76497aa1417Scdi bi_add(bi_itlb, BTINFO_ITLB, itlb_size);
76597aa1417Scdi
76697aa1417Scdi for (i = 0; i < dtlb_slot; i++) {
76797aa1417Scdi bi_dtlb->tlb[i].te_va = dtlb_store[i].te_va;
76897aa1417Scdi bi_dtlb->tlb[i].te_pa = dtlb_store[i].te_pa;
76997aa1417Scdi }
77097aa1417Scdi bi_add(bi_dtlb, BTINFO_DTLB, dtlb_size);
77197aa1417Scdi }
77297aa1417Scdi
77397aa1417Scdi /*
77497aa1417Scdi * Choose kernel image mapping strategy:
77597aa1417Scdi *
77697aa1417Scdi * LOADFILE_NOP_ALLOCATOR To load kernel image headers
77797aa1417Scdi * LOADFILE_OFW_ALLOCATOR To map the kernel by OpenFirmware means
77897aa1417Scdi * LOADFILE_MMU_ALLOCATOR To use permanent 4MB mappings
77997aa1417Scdi */
78097aa1417Scdi void
loadfile_set_allocator(int type)78197aa1417Scdi loadfile_set_allocator(int type)
78297aa1417Scdi {
78397aa1417Scdi if (type >= (sizeof(memswa) / sizeof(struct memsw))) {
78497aa1417Scdi panic("Bad allocator request.\n");
78597aa1417Scdi }
78697aa1417Scdi
78797aa1417Scdi /*
78897aa1417Scdi * Release all memory claimed by previous allocator and schedule
78997aa1417Scdi * another allocator for succeeding memory allocation calls.
79097aa1417Scdi */
79197aa1417Scdi (*memsw->freeall)();
79297aa1417Scdi memsw = &memswa[type];
79397aa1417Scdi }
794