1 /* $NetBSD: loadfile_machdep.c,v 1.16 2016/11/04 20:04:11 macallan Exp $ */ 2 3 /*- 4 * Copyright (c) 2005 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This work is based on the code contributed by Robert Drehmel to the 8 * FreeBSD project. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 #include <lib/libsa/stand.h> 33 #include <lib/libkern/libkern.h> 34 35 #include <machine/pte.h> 36 #include <machine/cpu.h> 37 #include <machine/ctlreg.h> 38 #include <machine/vmparam.h> 39 #include <machine/promlib.h> 40 #include <machine/hypervisor.h> 41 42 #include "boot.h" 43 #include "openfirm.h" 44 45 46 #define MAXSEGNUM 50 47 #define hi(val) ((uint32_t)(((val) >> 32) & (uint32_t)-1)) 48 #define lo(val) ((uint32_t)((val) & (uint32_t)-1)) 49 50 #define roundup2(x, y) (((x)+((y)-1))&(~((y)-1))) 51 52 53 typedef int phandle_t; 54 55 extern void itlb_enter(vaddr_t, uint32_t, uint32_t); 56 extern void dtlb_enter(vaddr_t, uint32_t, uint32_t); 57 extern void dtlb_replace(vaddr_t, uint32_t, uint32_t); 58 extern vaddr_t itlb_va_to_pa(vaddr_t); 59 extern vaddr_t dtlb_va_to_pa(vaddr_t); 60 61 static void tlb_init(void); 62 static void tlb_init_sun4u(void); 63 #ifdef SUN4V 64 static void tlb_init_sun4v(void); 65 #endif 66 void sparc64_finalize_tlb_sun4u(u_long); 67 #ifdef SUN4V 68 void sparc64_finalize_tlb_sun4v(u_long); 69 #endif 70 static int mmu_mapin(vaddr_t, vsize_t); 71 static int mmu_mapin_sun4u(vaddr_t, vsize_t); 72 #ifdef SUN4V 73 static int mmu_mapin_sun4v(vaddr_t, vsize_t); 74 #endif 75 static ssize_t mmu_read(int, void *, size_t); 76 static void* mmu_memcpy(void *, const void *, size_t); 77 static void* mmu_memset(void *, int, size_t); 78 static void mmu_freeall(void); 79 80 static int ofw_mapin(vaddr_t, vsize_t); 81 static ssize_t ofw_read(int, void *, size_t); 82 static void* ofw_memcpy(void *, const void *, size_t); 83 static void* ofw_memset(void *, int, size_t); 84 static void ofw_freeall(void); 85 86 #if 0 87 static int nop_mapin(vaddr_t, vsize_t); 88 #endif 89 static ssize_t nop_read(int, void *, size_t); 90 static void* nop_memcpy(void *, const void *, size_t); 91 static void* nop_memset(void *, int, size_t); 92 static void nop_freeall(void); 93 94 95 struct tlb_entry *dtlb_store = 0; 96 struct tlb_entry *itlb_store = 0; 97 98 int dtlb_slot; 99 int itlb_slot; 100 int dtlb_slot_max; 101 int itlb_slot_max; 102 103 static struct kvamap { 104 uint64_t start; 105 uint64_t end; 106 } kvamap[MAXSEGNUM]; 107 108 static struct memsw { 109 ssize_t (* read)(int f, void *addr, size_t size); 110 void* (* memcpy)(void *dst, const void *src, size_t size); 111 void* (* memset)(void *dst, int c, size_t size); 112 void (* freeall)(void); 113 } memswa[] = { 114 { nop_read, nop_memcpy, nop_memset, nop_freeall }, 115 { ofw_read, ofw_memcpy, ofw_memset, ofw_freeall }, 116 { mmu_read, mmu_memcpy, mmu_memset, mmu_freeall } 117 }; 118 119 static struct memsw *memsw = &memswa[0]; 120 121 #ifdef SUN4V 122 static int sun4v = 0; 123 #endif 124 125 /* 126 * Check if a memory region is already mapped. Return length and virtual 127 * address of unmapped sub-region, if any. 128 */ 129 static uint64_t 130 kvamap_extract(vaddr_t va, vsize_t len, vaddr_t *new_va) 131 { 132 int i; 133 134 *new_va = va; 135 for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) { 136 if (kvamap[i].start == NULL) 137 break; 138 if ((kvamap[i].start <= va) && (va < kvamap[i].end)) { 139 uint64_t va_len = kvamap[i].end - va; 140 len = (va_len < len) ? len - va_len : 0; 141 *new_va = kvamap[i].end; 142 } 143 } 144 145 return len; 146 } 147 148 /* 149 * Record new kernel mapping. 150 */ 151 static void 152 kvamap_enter(uint64_t va, uint64_t len) 153 { 154 int i; 155 156 DPRINTF(("kvamap_enter: %d@%p\n", (int)len, (void*)(u_long)va)); 157 for (i = 0; (len > 0) && (i < MAXSEGNUM); i++) { 158 if (kvamap[i].start == NULL) { 159 kvamap[i].start = va; 160 kvamap[i].end = va + len; 161 break; 162 } 163 } 164 165 if (i == MAXSEGNUM) { 166 panic("Too many allocations requested."); 167 } 168 } 169 170 /* 171 * Initialize TLB as required by MMU mapping functions. 172 */ 173 static void 174 tlb_init(void) 175 { 176 phandle_t root; 177 #ifdef SUN4V 178 char buf[128]; 179 #endif 180 181 if (dtlb_store != NULL) { 182 return; 183 } 184 185 if ( (root = prom_findroot()) == -1) { 186 panic("tlb_init: prom_findroot()"); 187 } 188 #ifdef SUN4V 189 if (_prom_getprop(root, "compatible", buf, sizeof(buf)) > 0 && 190 strcmp(buf, "sun4v") == 0) { 191 tlb_init_sun4v(); 192 sun4v = 1; 193 } 194 else { 195 #endif 196 tlb_init_sun4u(); 197 #ifdef SUN4V 198 } 199 #endif 200 201 dtlb_store = alloc(dtlb_slot_max * sizeof(*dtlb_store)); 202 itlb_store = alloc(itlb_slot_max * sizeof(*itlb_store)); 203 if (dtlb_store == NULL || itlb_store == NULL) { 204 panic("tlb_init: malloc"); 205 } 206 207 dtlb_slot = itlb_slot = 0; 208 } 209 210 /* 211 * Initialize TLB as required by MMU mapping functions - sun4u. 212 */ 213 static void 214 tlb_init_sun4u(void) 215 { 216 phandle_t child; 217 phandle_t root; 218 char buf[128]; 219 bool foundcpu = false; 220 u_int bootcpu; 221 u_int cpu; 222 223 bootcpu = get_cpuid(); 224 225 if ( (root = prom_findroot()) == -1) { 226 panic("tlb_init: prom_findroot()"); 227 } 228 229 for (child = prom_firstchild(root); child != 0; 230 child = prom_nextsibling(child)) { 231 if (child == -1) { 232 panic("tlb_init: OF_child"); 233 } 234 if (_prom_getprop(child, "device_type", buf, sizeof(buf)) > 0 && 235 strcmp(buf, "cpu") == 0) { 236 if (_prom_getprop(child, "upa-portid", &cpu, 237 sizeof(cpu)) == -1 && _prom_getprop(child, "portid", 238 &cpu, sizeof(cpu)) == -1) 239 panic("tlb_init: prom_getprop"); 240 foundcpu = true; 241 if (cpu == bootcpu) 242 break; 243 } 244 } 245 if (!foundcpu) 246 panic("tlb_init: no cpu found!"); 247 if (cpu != bootcpu) 248 panic("tlb_init: no node for bootcpu?!?!"); 249 if (_prom_getprop(child, "#dtlb-entries", &dtlb_slot_max, 250 sizeof(dtlb_slot_max)) == -1 || 251 _prom_getprop(child, "#itlb-entries", &itlb_slot_max, 252 sizeof(itlb_slot_max)) == -1) 253 panic("tlb_init: prom_getprop"); 254 } 255 256 #ifdef SUN4V 257 /* 258 * Initialize TLB as required by MMU mapping functions - sun4v. 259 */ 260 static void 261 tlb_init_sun4v(void) 262 { 263 psize_t len; 264 paddr_t pa; 265 int64_t hv_rc; 266 267 hv_mach_desc((paddr_t)NULL, &len); /* Trick to get actual length */ 268 if ( !len ) { 269 panic("init_tlb: hv_mach_desc() failed"); 270 } 271 pa = OF_alloc_phys(len, 16); 272 if ( pa == -1 ) { 273 panic("OF_alloc_phys() failed"); 274 } 275 hv_rc = hv_mach_desc(pa, &len); 276 if (hv_rc != H_EOK) { 277 panic("hv_mach_desc() failed"); 278 } 279 /* XXX dig out TLB node info - 64 is ok for loading the kernel */ 280 dtlb_slot_max = itlb_slot_max = 64; 281 } 282 #endif 283 284 /* 285 * Map requested memory region with permanent 4MB pages. 286 */ 287 static int 288 mmu_mapin(vaddr_t rva, vsize_t len) 289 { 290 len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M); 291 rva &= ~PAGE_MASK_4M; 292 293 tlb_init(); 294 295 #if SUN4V 296 if ( sun4v ) 297 return mmu_mapin_sun4v(rva, len); 298 else 299 #endif 300 return mmu_mapin_sun4u(rva, len); 301 } 302 303 /* 304 * Map requested memory region with permanent 4MB pages - sun4u. 305 */ 306 static int 307 mmu_mapin_sun4u(vaddr_t rva, vsize_t len) 308 { 309 uint64_t data; 310 paddr_t pa; 311 vaddr_t va, mva; 312 313 for (pa = (paddr_t)-1; len > 0; rva = va) { 314 if ( (len = kvamap_extract(rva, len, &va)) == 0) { 315 /* The rest is already mapped */ 316 break; 317 } 318 319 if (dtlb_va_to_pa(va) == (u_long)-1 || 320 itlb_va_to_pa(va) == (u_long)-1) { 321 /* Allocate a physical page, claim the virtual area */ 322 if (pa == (paddr_t)-1) { 323 pa = OF_alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M); 324 if (pa == (paddr_t)-1) 325 panic("out of memory"); 326 mva = OF_claim_virt(va, PAGE_SIZE_4M); 327 if (mva != va) { 328 panic("can't claim virtual page " 329 "(wanted %#lx, got %#lx)", 330 va, mva); 331 } 332 /* The mappings may have changed, be paranoid. */ 333 continue; 334 } 335 336 /* 337 * Actually, we can only allocate two pages less at 338 * most (depending on the kernel TSB size). 339 */ 340 if (dtlb_slot >= dtlb_slot_max) 341 panic("mmu_mapin: out of dtlb_slots"); 342 if (itlb_slot >= itlb_slot_max) 343 panic("mmu_mapin: out of itlb_slots"); 344 345 DPRINTF(("mmu_mapin: 0x%lx:0x%x.0x%x\n", va, 346 hi(pa), lo(pa))); 347 348 data = SUN4U_TSB_DATA(0, /* global */ 349 PGSZ_4M, /* 4mb page */ 350 pa, /* phys.address */ 351 1, /* privileged */ 352 1, /* write */ 353 1, /* cache */ 354 1, /* alias */ 355 1, /* valid */ 356 0, /* endianness */ 357 0 /* wc */ 358 ); 359 data |= SUN4U_TLB_L | SUN4U_TLB_CV; /* locked, virt.cache */ 360 361 dtlb_store[dtlb_slot].te_pa = pa; 362 dtlb_store[dtlb_slot].te_va = va; 363 dtlb_slot++; 364 dtlb_enter(va, hi(data), lo(data)); 365 pa = (paddr_t)-1; 366 } 367 368 kvamap_enter(va, PAGE_SIZE_4M); 369 370 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len; 371 va += PAGE_SIZE_4M; 372 } 373 374 if (pa != (paddr_t)-1) { 375 OF_free_phys(pa, PAGE_SIZE_4M); 376 } 377 378 return (0); 379 } 380 381 #ifdef SUN4V 382 /* 383 * Map requested memory region with permanent 4MB pages - sun4v. 384 */ 385 static int 386 mmu_mapin_sun4v(vaddr_t rva, vsize_t len) 387 { 388 uint64_t data; 389 paddr_t pa; 390 vaddr_t va, mva; 391 int64_t hv_rc; 392 393 for (pa = (paddr_t)-1; len > 0; rva = va) { 394 if ( (len = kvamap_extract(rva, len, &va)) == 0) { 395 /* The rest is already mapped */ 396 break; 397 } 398 399 /* Allocate a physical page, claim the virtual area */ 400 if (pa == (paddr_t)-1) { 401 pa = OF_alloc_phys(PAGE_SIZE_4M, PAGE_SIZE_4M); 402 if (pa == (paddr_t)-1) 403 panic("out of memory"); 404 mva = OF_claim_virt(va, PAGE_SIZE_4M); 405 if (mva != va) { 406 panic("can't claim virtual page " 407 "(wanted %#lx, got %#lx)", 408 va, mva); 409 } 410 } 411 412 /* 413 * Actually, we can only allocate two pages less at 414 * most (depending on the kernel TSB size). 415 */ 416 if (dtlb_slot >= dtlb_slot_max) 417 panic("mmu_mapin: out of dtlb_slots"); 418 if (itlb_slot >= itlb_slot_max) 419 panic("mmu_mapin: out of itlb_slots"); 420 421 DPRINTF(("mmu_mapin: 0x%lx:0x%x.0x%x\n", va, 422 hi(pa), lo(pa))); 423 424 data = SUN4V_TSB_DATA( 425 0, /* global */ 426 PGSZ_4M, /* 4mb page */ 427 pa, /* phys.address */ 428 1, /* privileged */ 429 1, /* write */ 430 1, /* cache */ 431 1, /* alias */ 432 1, /* valid */ 433 0, /* endianness */ 434 0 /* wc */ 435 ); 436 data |= SUN4V_TLB_CV; /* virt.cache */ 437 438 dtlb_store[dtlb_slot].te_pa = pa; 439 dtlb_store[dtlb_slot].te_va = va; 440 dtlb_slot++; 441 hv_rc = hv_mmu_map_perm_addr(va, data, MAP_DTLB); 442 if ( hv_rc != H_EOK ) { 443 panic("hv_mmu_map_perm_addr() failed - rc = %ld", hv_rc); 444 } 445 446 kvamap_enter(va, PAGE_SIZE_4M); 447 448 pa = (paddr_t)-1; 449 450 len -= len > PAGE_SIZE_4M ? PAGE_SIZE_4M : len; 451 va += PAGE_SIZE_4M; 452 } 453 454 if (pa != (paddr_t)-1) { 455 OF_free_phys(pa, PAGE_SIZE_4M); 456 } 457 458 return (0); 459 } 460 #endif 461 462 static ssize_t 463 mmu_read(int f, void *addr, size_t size) 464 { 465 mmu_mapin((vaddr_t)addr, size); 466 return read(f, addr, size); 467 } 468 469 static void* 470 mmu_memcpy(void *dst, const void *src, size_t size) 471 { 472 mmu_mapin((vaddr_t)dst, size); 473 return memcpy(dst, src, size); 474 } 475 476 static void* 477 mmu_memset(void *dst, int c, size_t size) 478 { 479 mmu_mapin((vaddr_t)dst, size); 480 return memset(dst, c, size); 481 } 482 483 static void 484 mmu_freeall(void) 485 { 486 int i; 487 488 dtlb_slot = itlb_slot = 0; 489 for (i = 0; i < MAXSEGNUM; i++) { 490 /* XXX return all mappings to PROM and unmap the pages! */ 491 kvamap[i].start = kvamap[i].end = 0; 492 } 493 } 494 495 /* 496 * Claim requested memory region in OpenFirmware allocation pool. 497 */ 498 static int 499 ofw_mapin(vaddr_t rva, vsize_t len) 500 { 501 vaddr_t va; 502 503 len = roundup2(len + (rva & PAGE_MASK_4M), PAGE_SIZE_4M); 504 rva &= ~PAGE_MASK_4M; 505 506 if ( (len = kvamap_extract(rva, len, &va)) != 0) { 507 if (OF_claim((void *)(long)va, len, PAGE_SIZE_4M) == (void*)-1){ 508 panic("ofw_mapin: Cannot claim memory."); 509 } 510 kvamap_enter(va, len); 511 } 512 513 return (0); 514 } 515 516 static ssize_t 517 ofw_read(int f, void *addr, size_t size) 518 { 519 ofw_mapin((vaddr_t)addr, size); 520 return read(f, addr, size); 521 } 522 523 static void* 524 ofw_memcpy(void *dst, const void *src, size_t size) 525 { 526 ofw_mapin((vaddr_t)dst, size); 527 return memcpy(dst, src, size); 528 } 529 530 static void* 531 ofw_memset(void *dst, int c, size_t size) 532 { 533 ofw_mapin((vaddr_t)dst, size); 534 return memset(dst, c, size); 535 } 536 537 static void 538 ofw_freeall(void) 539 { 540 int i; 541 542 dtlb_slot = itlb_slot = 0; 543 for (i = 0; i < MAXSEGNUM; i++) { 544 OF_release((void*)(u_long)kvamap[i].start, 545 (u_int)(kvamap[i].end - kvamap[i].start)); 546 kvamap[i].start = kvamap[i].end = 0; 547 } 548 } 549 550 /* 551 * NOP implementation exists solely for kernel header loading sake. Here 552 * we use alloc() interface to allocate memory and avoid doing some dangerous 553 * things. 554 */ 555 static ssize_t 556 nop_read(int f, void *addr, size_t size) 557 { 558 return read(f, addr, size); 559 } 560 561 static void* 562 nop_memcpy(void *dst, const void *src, size_t size) 563 { 564 /* 565 * Real NOP to make LOAD_HDR work: loadfile_elfXX copies ELF headers 566 * right after the highest kernel address which will not be mapped with 567 * nop_XXX operations. 568 */ 569 return (dst); 570 } 571 572 static void* 573 nop_memset(void *dst, int c, size_t size) 574 { 575 return memset(dst, c, size); 576 } 577 578 static void 579 nop_freeall(void) 580 { } 581 582 /* 583 * loadfile() hooks. 584 */ 585 ssize_t 586 sparc64_read(int f, void *addr, size_t size) 587 { 588 return (*memsw->read)(f, addr, size); 589 } 590 591 void* 592 sparc64_memcpy(void *dst, const void *src, size_t size) 593 { 594 return (*memsw->memcpy)(dst, src, size); 595 } 596 597 void* 598 sparc64_memset(void *dst, int c, size_t size) 599 { 600 return (*memsw->memset)(dst, c, size); 601 } 602 603 /* 604 * Remove write permissions from text mappings in the dTLB. 605 * Add entries in the iTLB. 606 */ 607 void 608 sparc64_finalize_tlb(u_long data_va) 609 { 610 #ifdef SUN4V 611 if ( sun4v ) 612 sparc64_finalize_tlb_sun4v(data_va); 613 else 614 #endif 615 sparc64_finalize_tlb_sun4u(data_va); 616 } 617 618 /* 619 * Remove write permissions from text mappings in the dTLB - sun4u. 620 * Add entries in the iTLB. 621 */ 622 void 623 sparc64_finalize_tlb_sun4u(u_long data_va) 624 { 625 int i; 626 int64_t data; 627 bool writable_text = false; 628 629 for (i = 0; i < dtlb_slot; i++) { 630 if (dtlb_store[i].te_va >= data_va) { 631 /* 632 * If (for whatever reason) the start of the 633 * writable section is right at the start of 634 * the kernel, we need to map it into the ITLB 635 * nevertheless (and don't make it readonly). 636 */ 637 if (i == 0 && dtlb_store[i].te_va == data_va) 638 writable_text = true; 639 else 640 continue; 641 } 642 643 data = SUN4U_TSB_DATA(0, /* global */ 644 PGSZ_4M, /* 4mb page */ 645 dtlb_store[i].te_pa, /* phys.address */ 646 1, /* privileged */ 647 0, /* write */ 648 1, /* cache */ 649 1, /* alias */ 650 1, /* valid */ 651 0, /* endianness */ 652 0 /* wc */ 653 ); 654 data |= SUN4U_TLB_L | SUN4U_TLB_CV; /* locked, virt.cache */ 655 if (!writable_text) 656 dtlb_replace(dtlb_store[i].te_va, hi(data), lo(data)); 657 itlb_store[itlb_slot] = dtlb_store[i]; 658 itlb_slot++; 659 itlb_enter(dtlb_store[i].te_va, hi(data), lo(data)); 660 } 661 if (writable_text) 662 printf("WARNING: kernel text mapped writable!\n"); 663 664 } 665 666 #ifdef SUN4V 667 /* 668 * Remove write permissions from text mappings in the dTLB - sun4v. 669 * Add entries in the iTLB. 670 */ 671 void 672 sparc64_finalize_tlb_sun4v(u_long data_va) 673 { 674 int i; 675 int64_t data; 676 bool writable_text = false; 677 int64_t hv_rc; 678 679 for (i = 0; i < dtlb_slot; i++) { 680 if (dtlb_store[i].te_va >= data_va) { 681 /* 682 * If (for whatever reason) the start of the 683 * writable section is right at the start of 684 * the kernel, we need to map it into the ITLB 685 * nevertheless (and don't make it readonly). 686 */ 687 if (i == 0 && dtlb_store[i].te_va == data_va) 688 writable_text = true; 689 else 690 continue; 691 } 692 693 data = SUN4V_TSB_DATA( 694 0, /* global */ 695 PGSZ_4M, /* 4mb page */ 696 dtlb_store[i].te_pa, /* phys.address */ 697 1, /* privileged */ 698 0, /* write */ 699 1, /* cache */ 700 1, /* alias */ 701 1, /* valid */ 702 0, /* endianness */ 703 0 /* wc */ 704 ); 705 data |= SUN4V_TLB_CV|SUN4V_TLB_X; /* virt.cache, executable */ 706 if (!writable_text) { 707 hv_rc = hv_mmu_unmap_perm_addr(dtlb_store[i].te_va, 708 MAP_DTLB); 709 if ( hv_rc != H_EOK ) { 710 panic("hv_mmu_unmap_perm_addr() failed - " 711 "rc = %ld", hv_rc); 712 } 713 hv_rc = hv_mmu_map_perm_addr(dtlb_store[i].te_va, data, 714 MAP_DTLB); 715 if ( hv_rc != H_EOK ) { 716 panic("hv_mmu_map_perm_addr() failed - " 717 "rc = %ld", hv_rc); 718 } 719 } 720 721 itlb_store[itlb_slot] = dtlb_store[i]; 722 itlb_slot++; 723 hv_rc = hv_mmu_map_perm_addr(dtlb_store[i].te_va, data, 724 MAP_ITLB); 725 if ( hv_rc != H_EOK ) { 726 panic("hv_mmu_map_perm_addr() failed - rc = %ld", hv_rc); 727 } 728 } 729 if (writable_text) 730 printf("WARNING: kernel text mapped writable!\n"); 731 } 732 #endif 733 734 /* 735 * Record kernel mappings in bootinfo structure. 736 */ 737 void 738 sparc64_bi_add(void) 739 { 740 int i; 741 int itlb_size, dtlb_size; 742 struct btinfo_count bi_count; 743 struct btinfo_tlb *bi_itlb, *bi_dtlb; 744 745 bi_count.count = itlb_slot; 746 bi_add(&bi_count, BTINFO_ITLB_SLOTS, sizeof(bi_count)); 747 bi_count.count = dtlb_slot; 748 bi_add(&bi_count, BTINFO_DTLB_SLOTS, sizeof(bi_count)); 749 750 itlb_size = sizeof(*bi_itlb) + sizeof(struct tlb_entry) * itlb_slot; 751 dtlb_size = sizeof(*bi_dtlb) + sizeof(struct tlb_entry) * dtlb_slot; 752 753 bi_itlb = alloc(itlb_size); 754 bi_dtlb = alloc(dtlb_size); 755 756 if ((bi_itlb == NULL) || (bi_dtlb == NULL)) { 757 panic("Out of memory in sparc64_bi_add.\n"); 758 } 759 760 for (i = 0; i < itlb_slot; i++) { 761 bi_itlb->tlb[i].te_va = itlb_store[i].te_va; 762 bi_itlb->tlb[i].te_pa = itlb_store[i].te_pa; 763 } 764 bi_add(bi_itlb, BTINFO_ITLB, itlb_size); 765 766 for (i = 0; i < dtlb_slot; i++) { 767 bi_dtlb->tlb[i].te_va = dtlb_store[i].te_va; 768 bi_dtlb->tlb[i].te_pa = dtlb_store[i].te_pa; 769 } 770 bi_add(bi_dtlb, BTINFO_DTLB, dtlb_size); 771 } 772 773 /* 774 * Choose kernel image mapping strategy: 775 * 776 * LOADFILE_NOP_ALLOCATOR To load kernel image headers 777 * LOADFILE_OFW_ALLOCATOR To map the kernel by OpenFirmware means 778 * LOADFILE_MMU_ALLOCATOR To use permanent 4MB mappings 779 */ 780 void 781 loadfile_set_allocator(int type) 782 { 783 if (type >= (sizeof(memswa) / sizeof(struct memsw))) { 784 panic("Bad allocator request.\n"); 785 } 786 787 /* 788 * Release all memory claimed by previous allocator and schedule 789 * another allocator for succeeding memory allocation calls. 790 */ 791 (*memsw->freeall)(); 792 memsw = &memswa[type]; 793 } 794