1 /* $NetBSD: sun2.c,v 1.8 2007/03/04 06:00:55 christos Exp $ */ 2 3 /*- 4 * Copyright (c) 1998 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Gordon W. Ross and Matthew Fredette. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Standalone functions specific to the Sun2. 41 */ 42 43 /* Need to avoid conflicts on these: */ 44 #define get_pte sun2_get_pte 45 #define set_pte sun2_set_pte 46 #define get_segmap sun2_get_segmap 47 #define set_segmap sun2_set_segmap 48 49 /* 50 * We need to get the sun2 NBSG definition, even if we're 51 * building this with a different sun68k target. 52 */ 53 #include <arch/sun2/include/param.h> 54 55 #include <sys/param.h> 56 #include <machine/idprom.h> 57 #include <machine/mon.h> 58 59 #include <arch/sun2/include/pte.h> 60 #include <arch/sun2/sun2/control.h> 61 #ifdef notyet 62 #include <arch/sun3/sun3/vme.h> 63 #else 64 #define VME16_BASE MBIO_BASE 65 #define VME16_MASK MBIO_MASK 66 #endif 67 #include <arch/sun2/sun2/mbmem.h> 68 #include <arch/sun2/sun2/mbio.h> 69 70 #include <stand.h> 71 72 #include "libsa.h" 73 #include "dvma.h" 74 #include "saio.h" /* enum MAPTYPES */ 75 76 #define OBIO_MASK 0xFFFFFF 77 78 u_int get_pte(vaddr_t); 79 void set_pte(vaddr_t, u_int); 80 char * dvma2_alloc(int); 81 void dvma2_free(char *, int); 82 char * dvma2_mapin(char *, int); 83 void dvma2_mapout(char *, int); 84 char * dev2_mapin(int, u_long, int); 85 86 struct mapinfo { 87 int maptype; 88 int pgtype; 89 u_int base; 90 u_int mask; 91 }; 92 93 #ifdef notyet 94 struct mapinfo 95 sun2_mapinfo[MAP__NTYPES] = { 96 /* On-board memory, I/O */ 97 { MAP_MAINMEM, PGT_OBMEM, 0, ~0 }, 98 { MAP_OBIO, PGT_OBIO, 0, OBIO_MASK }, 99 /* Multibus memory, I/O */ 100 { MAP_MBMEM, PGT_MBMEM, MBMEM_BASE, MBMEM_MASK }, 101 { MAP_MBIO, PGT_MBIO, MBIO_BASE, MBIO_MASK }, 102 /* VME A16 */ 103 { MAP_VME16A16D, PGT_VME_D16, VME16_BASE, VME16_MASK }, 104 { MAP_VME16A32D, 0, 0, 0 }, 105 /* VME A24 */ 106 { MAP_VME24A16D, 0, 0, 0 }, 107 { MAP_VME24A32D, 0, 0, 0 }, 108 /* VME A32 */ 109 { MAP_VME32A16D, 0, 0, 0 }, 110 { MAP_VME32A32D, 0, 0, 0 }, 111 }; 112 #endif 113 114 /* The virtual address we will use for PROM device mappings. */ 115 int sun2_devmap = SUN3_MONSHORTSEG; 116 117 char * 118 dev2_mapin(int maptype, u_long physaddr, int length) 119 { 120 #ifdef notyet 121 u_int i, pa, pte, pgva, va; 122 123 if ((sun2_devmap + length) > SUN3_MONSHORTPAGE) 124 panic("dev2_mapin: length=%d", length); 125 126 for (i = 0; i < MAP__NTYPES; i++) 127 if (sun2_mapinfo[i].maptype == maptype) 128 goto found; 129 panic("dev2_mapin: bad maptype"); 130 found: 131 132 if (physaddr & ~(sun2_mapinfo[i].mask)) 133 panic("dev2_mapin: bad address"); 134 pa = sun2_mapinfo[i].base += physaddr; 135 136 pte = PA_PGNUM(pa) | PG_PERM | 137 sun2_mapinfo[i].pgtype; 138 139 va = pgva = sun2_devmap; 140 do { 141 set_pte(pgva, pte); 142 pgva += NBPG; 143 pte += 1; 144 length -= NBPG; 145 } while (length > 0); 146 sun2_devmap = pgva; 147 va += (physaddr & PGOFSET); 148 149 #ifdef DEBUG_PROM 150 if (debug) 151 printf("dev2_mapin: va=0x%x pte=0x%x\n", 152 va, get_pte(va)); 153 #endif 154 return ((char*)va); 155 #else 156 panic("dev2_mapin"); 157 return(NULL); 158 #endif 159 } 160 161 /***************************************************************** 162 * DVMA support 163 */ 164 165 /* 166 * The easiest way to deal with the need for DVMA mappings is to 167 * create a DVMA alias mapping of the entire address range used by 168 * the boot program. That way, dvma_mapin can just compute the 169 * DVMA alias address, and dvma_mapout does nothing. 170 * 171 * Note that this assumes that standalone programs will do I/O 172 * operations only within range (SA_MIN_VA .. SA_MAX_VA) checked. 173 */ 174 175 #define DVMA_BASE 0x00f00000 176 #define DVMA_MAPLEN 0x38000 /* 256K - 32K (save MONSHORTSEG) */ 177 178 #define SA_MIN_VA 0x220000 179 #define SA_MAX_VA (SA_MIN_VA + DVMA_MAPLEN) 180 181 /* This points to the end of the free DVMA space. */ 182 u_int dvma2_end = DVMA_BASE + DVMA_MAPLEN; 183 184 void 185 dvma2_init(void) 186 { 187 int segva, dmava, sme; 188 189 segva = SA_MIN_VA; 190 dmava = DVMA_BASE; 191 192 while (segva < SA_MAX_VA) { 193 sme = get_segmap(segva); 194 set_segmap(dmava, sme); 195 segva += NBSG; 196 dmava += NBSG; 197 } 198 } 199 200 /* Convert a local address to a DVMA address. */ 201 char * 202 dvma2_mapin(char *addr, int len) 203 { 204 int va = (int)addr; 205 206 /* Make sure the address is in the DVMA map. */ 207 if ((va < SA_MIN_VA) || (va >= SA_MAX_VA)) 208 panic("dvma2_mapin: 0x%x outside 0x%x..0x%x", 209 va, SA_MIN_VA, SA_MAX_VA); 210 211 va -= SA_MIN_VA; 212 va += DVMA_BASE; 213 214 return ((char *) va); 215 } 216 217 /* Destroy a DVMA address alias. */ 218 void 219 dvma2_mapout(char *addr, int len) 220 { 221 int va = (int)addr; 222 223 /* Make sure the address is in the DVMA map. */ 224 if ((va < DVMA_BASE) || (va >= (DVMA_BASE + DVMA_MAPLEN))) 225 panic("dvma2_mapout"); 226 } 227 228 char * 229 dvma2_alloc(int len) 230 { 231 len = m68k_round_page(len); 232 dvma2_end -= len; 233 return((char*)dvma2_end); 234 } 235 236 void 237 dvma2_free(char *dvma, int len) 238 { 239 /* not worth the trouble */ 240 } 241 242 /***************************************************************** 243 * Control space stuff... 244 */ 245 246 u_int 247 get_pte(vaddr_t va) 248 { 249 u_int pte; 250 251 pte = get_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE, va)); 252 if (pte & PG_VALID) { 253 /* 254 * This clears bit 30 (the kernel readable bit, which 255 * should always be set), bit 28 (which should always 256 * be set) and bit 26 (the user writable bit, which we 257 * always have tracking the kernel writable bit). In 258 * the protection, this leaves bit 29 (the kernel 259 * writable bit) and bit 27 (the user readable bit). 260 * See pte2.h for more about this hack. 261 */ 262 pte &= ~(0x54000000); 263 /* 264 * Flip bit 27 (the user readable bit) to become bit 265 * 27 (the PG_SYSTEM bit). 266 */ 267 pte ^= (PG_SYSTEM); 268 } 269 return (pte); 270 } 271 272 void 273 set_pte(vaddr_t va, u_int pte) 274 { 275 if (pte & PG_VALID) { 276 /* Clear bit 26 (the user writable bit). */ 277 pte &= (~0x04000000); 278 /* 279 * Flip bit 27 (the PG_SYSTEM bit) to become bit 27 280 * (the user readable bit). 281 */ 282 pte ^= (PG_SYSTEM); 283 /* 284 * Always set bits 30 (the kernel readable bit) and 285 * bit 28, and set bit 26 (the user writable bit) iff 286 * bit 29 (the kernel writable bit) is set *and* bit 287 * 27 (the user readable bit) is set. This latter bit 288 * of logic is expressed in the bizarre second term 289 * below, chosen because it needs no branches. 290 */ 291 #if (PG_WRITE >> 2) != PG_SYSTEM 292 #error "PG_WRITE and PG_SYSTEM definitions don't match!" 293 #endif 294 pte |= 0x50000000 295 | ((((pte & PG_WRITE) >> 2) & pte) >> 1); 296 } 297 set_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE, va), pte); 298 } 299 300 int 301 get_segmap(vaddr_t va) 302 { 303 va = CONTROL_ADDR_BUILD(SEGMAP_BASE, va); 304 return (get_control_byte(va)); 305 } 306 307 void 308 set_segmap(vaddr_t va, int sme) 309 { 310 va = CONTROL_ADDR_BUILD(SEGMAP_BASE, va); 311 set_control_byte(va, sme); 312 } 313 314 /* 315 * Copy the IDPROM contents into the passed buffer. 316 * The caller (idprom.c) will do the checksum. 317 */ 318 void 319 sun2_getidprom(u_char *dst) 320 { 321 vaddr_t src; /* control space address */ 322 int len, x; 323 324 src = IDPROM_BASE; 325 len = sizeof(struct idprom); 326 do { 327 x = get_control_byte(src); 328 src += NBPG; 329 *dst++ = x; 330 } while (--len > 0); 331 } 332 333 /***************************************************************** 334 * Init our function pointers, etc. 335 */ 336 337 /* 338 * For booting, the PROM in fredette's Sun 2/120 doesn't map 339 * much main memory, and what is mapped is mapped strangely. 340 * Low virtual memory is mapped like: 341 * 342 * 0x000000 - 0x0bffff virtual -> 0x000000 - 0x0bffff physical 343 * 0x0c0000 - 0x0fffff virtual -> invalid 344 * 0x100000 - 0x13ffff virtual -> 0x0c0000 - 0x0fffff physical 345 * 0x200800 - 0x3fffff virtual -> 0x200800 - 0x3fffff physical 346 * 347 * I think the SunOS authors wanted to load kernels starting at 348 * physical zero, and assumed that kernels would be less 349 * than 768K (0x0c0000) long. Also, the PROM maps physical 350 * 0x0c0000 - 0x0fffff into DVMA space, so we can't take the 351 * easy road and just add more mappings to use that physical 352 * memory while loading (the PROM might do DMA there). 353 * 354 * What we do, then, is assume a 4MB machine (you'll really 355 * need that to run NetBSD at all anyways), and we map two 356 * chunks of physical and virtual space: 357 * 358 * 0x400000 - 0x4bffff virtual -> 0x000000 - 0x0bffff physical 359 * 0x4c0000 - 0x600000 virtual -> 0x2c0000 - 0x3fffff physical 360 * 361 * And then we load starting at virtual 0x400000. We will do 362 * all of this mapping just by copying PMEGs. 363 * 364 * After the load is done, but before we enter the kernel, we're 365 * done with the PROM, so we copy the part of the kernel that 366 * got loaded at physical 0x2c0000 down to physical 0x0c0000. 367 * This can't just be a PMEG copy; we've actually got to move 368 * bytes in physical memory. 369 * 370 * These two chunks of physical and virtual space are defined 371 * in macros below. Some of the macros are only for completeness: 372 */ 373 #define MEM_CHUNK0_SIZE (0x0c0000) 374 #define MEM_CHUNK0_LOAD_PHYS (0x000000) 375 #define MEM_CHUNK0_LOAD_VIRT (0x400000) 376 #define MEM_CHUNK0_LOAD_VIRT_PROM MEM_CHUNK0_LOAD_PHYS 377 #define MEM_CHUNK0_COPY_PHYS MEM_CHUNK0_LOAD_PHYS 378 #define MEM_CHUNK0_COPY_VIRT MEM_CHUNK0_COPY_PHYS 379 380 #define MEM_CHUNK1_SIZE (0x140000) 381 #define MEM_CHUNK1_LOAD_PHYS (0x2c0000) 382 #define MEM_CHUNK1_LOAD_VIRT (MEM_CHUNK0_LOAD_VIRT + MEM_CHUNK0_SIZE) 383 #define MEM_CHUNK1_LOAD_VIRT_PROM MEM_CHUNK1_LOAD_PHYS 384 #define MEM_CHUNK1_COPY_PHYS (MEM_CHUNK0_LOAD_PHYS + MEM_CHUNK0_SIZE) 385 #define MEM_CHUNK1_COPY_VIRT MEM_CHUNK1_COPY_PHYS 386 387 /* Maps memory for loading. */ 388 u_long 389 sun2_map_mem_load(void) 390 { 391 vaddr_t off; 392 393 /* Map chunk zero for loading. */ 394 for(off = 0; off < MEM_CHUNK0_SIZE; off += NBSG) 395 set_segmap(MEM_CHUNK0_LOAD_VIRT + off, 396 get_segmap(MEM_CHUNK0_LOAD_VIRT_PROM + off)); 397 398 /* Map chunk one for loading. */ 399 for(off = 0; off < MEM_CHUNK1_SIZE; off += NBSG) 400 set_segmap(MEM_CHUNK1_LOAD_VIRT + off, 401 get_segmap(MEM_CHUNK1_LOAD_VIRT_PROM + off)); 402 403 /* Tell our caller where in virtual space to load. */ 404 return MEM_CHUNK0_LOAD_VIRT; 405 } 406 407 /* Remaps memory for running. */ 408 void * 409 sun2_map_mem_run(void *entry) 410 { 411 vaddr_t off, off_end; 412 int sme; 413 u_int pte; 414 415 /* Chunk zero is already mapped and copied. */ 416 417 /* Chunk one needs to be mapped and copied. */ 418 pte = (get_pte(0) & ~PG_FRAME); 419 for(off = 0; off < MEM_CHUNK1_SIZE; ) { 420 421 /* 422 * We use the PMEG immediately before the 423 * segment we're copying in the PROM virtual 424 * mapping of the chunk. If this is the first 425 * segment, this is the PMEG the PROM used to 426 * map 0x2b8000 virtual to 0x2b8000 physical, 427 * which I'll assume is unused. For the second 428 * and subsequent segments, this will be the 429 * PMEG used to map the previous segment, which 430 * is now (since we already copied it) unused. 431 */ 432 sme = get_segmap((MEM_CHUNK1_LOAD_VIRT_PROM + off) - NBSG); 433 set_segmap(MEM_CHUNK1_COPY_VIRT + off, sme); 434 435 /* Set the PTEs in this new PMEG. */ 436 for(off_end = off + NBSG; off < off_end; off += NBPG) 437 set_pte(MEM_CHUNK1_COPY_VIRT + off, 438 pte | PA_PGNUM(MEM_CHUNK1_COPY_PHYS + off)); 439 440 /* Copy this segment. */ 441 memcpy((void *)(MEM_CHUNK1_COPY_VIRT + (off - NBSG)), 442 (void *)(MEM_CHUNK1_LOAD_VIRT + (off - NBSG)), 443 NBSG); 444 } 445 446 /* Tell our caller where in virtual space to enter. */ 447 return ((void *)entry) - MEM_CHUNK0_LOAD_VIRT; 448 } 449 450 void 451 sun2_init(void) 452 { 453 /* Set the function pointers. */ 454 dev_mapin_p = dev2_mapin; 455 dvma_alloc_p = dvma2_alloc; 456 dvma_free_p = dvma2_free; 457 dvma_mapin_p = dvma2_mapin; 458 dvma_mapout_p = dvma2_mapout; 459 460 /* Prepare DVMA segment. */ 461 dvma2_init(); 462 } 463