1 /* $NetBSD: amiga_init.c,v 1.133 2024/01/09 07:28:25 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1994 Michael L. Hitch
5 * Copyright (c) 1993 Markus Wild
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. All advertising materials mentioning features or use of this software
17 * must display the following acknowledgement:
18 * This product includes software developed by Markus Wild.
19 * 4. The name of the author may not be used to endorse or promote products
20 * derived from this software without specific prior written permission
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include "opt_amigaccgrf.h"
35 #include "opt_p5ppc68kboard.h"
36 #include "opt_devreload.h"
37 #include "opt_m68k_arch.h"
38 #include "z3rambd.h"
39 #include "ser.h"
40
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: amiga_init.c,v 1.133 2024/01/09 07:28:25 thorpej Exp $");
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/ioctl.h>
47 #include <sys/select.h>
48 #include <sys/tty.h>
49 #include <sys/buf.h>
50 #include <sys/msgbuf.h>
51 #include <sys/mbuf.h>
52 #include <sys/protosw.h>
53 #include <sys/domain.h>
54 #include <sys/dkbad.h>
55 #include <sys/reboot.h>
56 #include <sys/exec.h>
57
58 #include <dev/mm.h>
59 #include <uvm/uvm_extern.h>
60
61 #include <machine/pte.h>
62 #include <machine/cpu.h>
63 #include <amiga/amiga/cc.h>
64 #include <amiga/amiga/cia.h>
65 #include <amiga/amiga/custom.h>
66 #include <amiga/amiga/cfdev.h>
67 #include <amiga/amiga/drcustom.h>
68 #include <amiga/amiga/gayle.h>
69 #include <amiga/amiga/memlist.h>
70 #include <amiga/dev/zbusvar.h>
71 #include <amiga/dev/z3rambdvar.h>
72
73 #define RELOC(v, t) *((t*)((u_int)&(v) + loadbase))
74
75 extern u_int lowram;
76 extern u_int Umap;
77 extern u_long boot_partition;
78 extern vaddr_t m68k_uptbase;
79
80 #ifdef P5PPC68KBOARD
81 extern int p5ppc;
82 #endif
83
84 extern char *esym;
85
86 #ifdef GRF_AGA
87 extern u_long aga_enable;
88 #endif
89
90 #if NSER > 0
91 extern int serconsole;
92 #endif
93
94 extern u_long noncontig_enable;
95
96 /*
97 * some addresses used in locore
98 */
99 vaddr_t INTREQRaddr;
100 vaddr_t INTREQWaddr;
101
102 /*
103 * these are used by the extended spl?() macros.
104 */
105 volatile unsigned short *amiga_intena_read, *amiga_intena_write;
106
107 vaddr_t CHIPMEMADDR;
108 vaddr_t chipmem_start;
109 vaddr_t chipmem_end;
110
111 vaddr_t z2mem_start; /* XXX */
112 static vaddr_t z2mem_end; /* XXX */
113 int use_z2_mem = 1; /* XXX */
114
115 u_long boot_fphystart, boot_fphysize, boot_cphysize;
116 static u_int start_c_fphystart;
117 static u_int start_c_pstart;
118
119 static u_long boot_flags;
120
121 struct boot_memlist *memlist;
122
123 struct cfdev *cfdev;
124 int ncfdev;
125
126 u_long scsi_nosync;
127 int shift_nosync;
128
129 void start_c(int, u_int, u_int, u_int, char *, u_int, u_long, u_long, u_int);
130 void rollcolor(int);
131 #ifdef DEVRELOAD
132 static int kernel_image_magic_size(void);
133 static void kernel_image_magic_copy(u_char *);
134 int kernel_reload_write(struct uio *);
135 extern void kernel_reload(char *, u_long, u_long, u_long, u_long,
136 u_long, u_long, u_long, u_long, u_long, u_long);
137 #endif
138 extern void etext(void);
139 void start_c_finish(void);
140
141 void *
chipmem_steal(long amount)142 chipmem_steal(long amount)
143 {
144 /*
145 * steal from top of chipmem, so we don't collide with
146 * the kernel loaded into chipmem in the not-yet-mapped state.
147 */
148 vaddr_t p = chipmem_end - amount;
149 if (p & 1)
150 p = p - 1;
151 chipmem_end = p;
152 if(chipmem_start > chipmem_end)
153 panic("not enough chip memory");
154 return((void *)p);
155 }
156
157 /*
158 * XXX
159 * used by certain drivers currently to allocate zorro II memory
160 * for bounce buffers, if use_z2_mem is NULL, chipmem will be
161 * returned instead.
162 * XXX
163 */
164 void *
alloc_z2mem(long amount)165 alloc_z2mem(long amount)
166 {
167 if (use_z2_mem && z2mem_end && (z2mem_end - amount) >= z2mem_start) {
168 z2mem_end -= amount;
169 return ((void *)z2mem_end);
170 }
171 return (alloc_chipmem(amount));
172 }
173
174
175 /*
176 * this is the C-level entry function, it's called from locore.s.
177 * Preconditions:
178 * Interrupts are disabled
179 * PA may not be == VA, so we may have to relocate addresses
180 * before enabling the MMU
181 * Exec is no longer available (because we're loaded all over
182 * low memory, no ExecBase is available anymore)
183 *
184 * It's purpose is:
185 * Do the things that are done in locore.s in the hp300 version,
186 * this includes allocation of kernel maps and enabling the MMU.
187 *
188 * Some of the code in here is `stolen' from Amiga MACH, and was
189 * written by Bryan Ford and Niklas Hallqvist.
190 *
191 * Very crude 68040 support by Michael L. Hitch.
192 *
193 */
194
195 int kernel_copyback = 1;
196
197 __attribute__ ((no_instrument_function))
198 void
start_c(int id,u_int fphystart,u_int fphysize,u_int cphysize,char * esym_addr,u_int flags,u_long inh_sync,u_long boot_part,u_int loadbase)199 start_c(int id, u_int fphystart, u_int fphysize, u_int cphysize,
200 char *esym_addr, u_int flags, u_long inh_sync, u_long boot_part,
201 u_int loadbase)
202 {
203 extern char end[];
204 struct cfdev *cd;
205 paddr_t pstart, pend;
206 vaddr_t vstart, vend;
207 psize_t avail;
208 paddr_t ptpa;
209 psize_t ptsize;
210 u_int ptextra, kstsize;
211 paddr_t Sysptmap_pa;
212 register st_entry_t sg_proto, *sg;
213 #if defined(M68040) || defined(M68060)
214 register st_entry_t *esg;
215 #endif
216 register pt_entry_t pg_proto, *pg, *epg;
217 vaddr_t end_loaded;
218 u_int ncd;
219 #if defined(M68040) || defined(M68060)
220 u_int i, nl1desc, nl2desc;
221 #endif
222 vaddr_t kva;
223 struct boot_memlist *ml;
224
225 #ifdef DEBUG_KERNEL_START
226 /* XXX this only is valid if Altais is in slot 0 */
227 volatile u_int8_t *altaiscolpt = (u_int8_t *)0x200003c8;
228 volatile u_int8_t *altaiscol = (u_int8_t *)0x200003c9;
229 #endif
230
231 #ifdef DEBUG_KERNEL_START
232 if ((id>>24)==0x7D) {
233 *altaiscolpt = 0;
234 *altaiscol = 40;
235 *altaiscol = 0;
236 *altaiscol = 0;
237 } else
238 ((volatile struct Custom *)0xdff000)->color[0] = 0xa00; /* RED */
239 #endif
240
241 #ifdef LIMITMEM
242 if (fphysize > LIMITMEM*1024*1024)
243 fphysize = LIMITMEM*1024*1024;
244 #endif
245
246 RELOC(boot_fphystart, u_long) = fphystart;
247 RELOC(boot_fphysize, u_long) = fphysize;
248 RELOC(boot_cphysize, u_long) = cphysize;
249
250 RELOC(machineid, int) = id;
251 RELOC(chipmem_end, vaddr_t) = cphysize;
252 RELOC(esym, char *) = esym_addr;
253 RELOC(boot_flags, u_long) = flags;
254 RELOC(boot_partition, u_long) = boot_part;
255 #ifdef GRF_AGA
256 if (flags & 1)
257 RELOC(aga_enable, u_long) |= 1;
258 #endif
259 if (flags & (3 << 1))
260 RELOC(noncontig_enable, u_long) = (flags >> 1) & 3;
261 #if NSER > 0
262 if (flags & (1 << 3))
263 RELOC(serconsole, int) = 0;
264 #endif
265
266 RELOC(scsi_nosync, u_long) = inh_sync;
267
268 /*
269 * the kernel ends at end(), plus the cfdev and memlist structures
270 * we placed there in the loader. Correct for this now. Also,
271 * account for kernel symbols if they are present.
272 */
273 if (esym_addr == NULL)
274 end_loaded = (vaddr_t)&end;
275 else
276 end_loaded = (vaddr_t)esym_addr;
277 RELOC(ncfdev, int) = *(int *)(&RELOC(*(u_int *)end_loaded, u_int));
278 RELOC(cfdev, struct cfdev *) = (struct cfdev *) ((int)end_loaded + 4);
279 end_loaded += 4 + RELOC(ncfdev, int) * sizeof(struct cfdev);
280
281 RELOC(memlist, struct boot_memlist *) =
282 (struct boot_memlist *)end_loaded;
283 ml = &RELOC(*(struct boot_memlist *)end_loaded, struct boot_memlist);
284 end_loaded = (vaddr_t)&((RELOC(memlist, struct boot_memlist *))->
285 m_seg[ml->m_nseg]);
286
287 /*
288 * Get ZorroII (16-bit) memory if there is any and it's not where the
289 * kernel is loaded.
290 */
291 if (ml->m_nseg > 0 && ml->m_nseg < 16 && RELOC(use_z2_mem, int)) {
292 struct boot_memseg *sp, *esp;
293
294 sp = ml->m_seg;
295 esp = sp + ml->m_nseg;
296 for (; sp < esp; sp++) {
297 if ((sp->ms_attrib & (MEMF_FAST | MEMF_24BITDMA))
298 != (MEMF_FAST|MEMF_24BITDMA))
299 continue;
300 if (sp->ms_start == fphystart)
301 continue;
302 RELOC(z2mem_end, paddr_t) =
303 sp->ms_start + sp->ms_size;
304 RELOC(z2mem_start, paddr_t) =
305 RELOC(z2mem_end, paddr_t) - MAXPHYS *
306 RELOC(use_z2_mem, int) * 7;
307 RELOC(NZTWOMEMPG, u_int) =
308 (RELOC(z2mem_end, paddr_t) -
309 RELOC(z2mem_start, paddr_t)) / PAGE_SIZE;
310 if ((RELOC(z2mem_end, paddr_t) -
311 RELOC(z2mem_start, paddr_t)) > sp->ms_size) {
312 RELOC(NZTWOMEMPG, u_int) = sp->ms_size /
313 PAGE_SIZE;
314 RELOC(z2mem_start, paddr_t) =
315 RELOC(z2mem_end, paddr_t) - sp->ms_size;
316 }
317 break;
318 }
319 }
320
321 /*
322 * Scan ConfigDev list and get size of Zorro I/O boards that are
323 * outside the Zorro II I/O area.
324 */
325 for (RELOC(ZBUSAVAIL, u_int) = 0, cd =
326 &RELOC(*RELOC(cfdev, struct cfdev *),struct cfdev),
327 ncd = RELOC(ncfdev, int); ncd > 0; ncd--, cd++) {
328 int bd_type = cd->rom.type & (ERT_TYPEMASK | ERTF_MEMLIST);
329
330 /*
331 * Hack to support p5bus and p5pb on CyberStorm Mk-III / PPC
332 * and Blizzard PPC. XXX: this hack should only be active if
333 * non-autoconfiguring CyberVision PPC or BlizzardVision PPC
334 * was found.
335 */
336 if (cd->rom.manid == 8512 &&
337 (cd->rom.prodid == 100 || cd->rom.prodid == 110))
338 RELOC(ZBUSAVAIL, u_int) += m68k_round_page(0x1400000);
339 #if NZ3RAMBD > 0
340 if (z3rambd_match_id(cd->rom.manid, cd->rom.prodid) > 0)
341 {
342 /* XXX: remove board from memlist */
343 } else
344 #endif
345 if (bd_type != ERT_ZORROIII &&
346 (bd_type != ERT_ZORROII || isztwopa(cd->addr)))
347 continue; /* It's not Z2 or Z3 I/O board */
348 /*
349 * Hack to adjust board size for Zorro III boards that
350 * do not specify an extended size or subsize. This is
351 * specifically for the GVP Spectrum and hopefully won't
352 * break with other boards that configure like this.
353 */
354 if (bd_type == ERT_ZORROIII &&
355 !(cd->rom.flags & ERFF_EXTENDED) &&
356 (cd->rom.flags & ERT_Z3_SSMASK) == 0)
357 cd->size = 0x10000 <<
358 ((cd->rom.type - 1) & ERT_MEMMASK);
359
360 RELOC(ZBUSAVAIL, u_int) += m68k_round_page(cd->size);
361 }
362
363 /*
364 * assume KVA_MIN == 0. We subtract the kernel code (and
365 * the configdev's and memlists) from the virtual and
366 * physical starts and ends.
367 */
368 vend = fphysize;
369 avail = vend;
370 vstart = end_loaded;
371 vstart = m68k_round_page(vstart);
372 pstart = (paddr_t)vstart + fphystart;
373 pend = vend + fphystart;
374 avail -= vstart;
375
376 /*
377 * save KVA of lwp0 u-area and allocate it.
378 */
379 RELOC(lwp0uarea, vaddr_t) = vstart;
380 pstart += USPACE;
381 vstart += USPACE;
382 avail -= USPACE;
383
384 #if defined(M68040) || defined(M68060)
385 if (RELOC(mmutype, int) == MMU_68040)
386 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
387 else
388 #endif
389 kstsize = 1;
390
391 /*
392 * allocate the kernel segment table
393 */
394 RELOC(Sysseg_pa, u_int) = pstart;
395 RELOC(Sysseg, u_int) = vstart;
396 vstart += PAGE_SIZE * kstsize;
397 pstart += PAGE_SIZE * kstsize;
398 avail -= PAGE_SIZE * kstsize;
399
400 /*
401 * allocate kernel page table map
402 */
403 RELOC(Sysptmap, u_int) = vstart;
404 Sysptmap_pa = pstart;
405 vstart += PAGE_SIZE;
406 pstart += PAGE_SIZE;
407 avail -= PAGE_SIZE;
408
409 /*
410 * allocate initial page table pages
411 */
412 ptpa = pstart;
413 #ifdef DRACO
414 if ((id>>24)==0x7D) {
415 ptextra = NDRCCPG
416 + RELOC(NZTWOMEMPG, u_int)
417 + btoc(RELOC(ZBUSAVAIL, u_int));
418 } else
419 #endif
420 ptextra = NCHIPMEMPG + NCIAPG + NZTWOROMPG + RELOC(NZTWOMEMPG, u_int) +
421 btoc(RELOC(ZBUSAVAIL, u_int)) + NPCMCIAPG;
422
423 ptsize = (RELOC(Sysptsize, u_int) +
424 howmany(ptextra, NPTEPG)) << PGSHIFT;
425
426 vstart += ptsize;
427 pstart += ptsize;
428 avail -= ptsize;
429
430 /*
431 * Sysmap is now placed at the end of Supervisor virtual address space.
432 */
433 RELOC(Sysmap, u_int *) = (u_int *)SYSMAP_VA;
434
435 /*
436 * initialize segment table and page table map
437 */
438 #if defined(M68040) || defined(M68060)
439 if (RELOC(mmutype, int) == MMU_68040) {
440 /*
441 * First invalidate the entire "segment table" pages
442 * (levels 1 and 2 have the same "invalid" values).
443 */
444 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
445 esg = &sg[kstsize * NPTEPG];
446 while (sg < esg)
447 *sg++ = SG_NV;
448 /*
449 * Initialize level 2 descriptors (which immediately
450 * follow the level 1 table). We need:
451 * NPTEPG / SG4_LEV3SIZE
452 * level 2 descriptors to map each of the nptpages
453 * pages of PTEs. Note that we set the "used" bit
454 * now to save the HW the expense of doing it.
455 */
456 nl2desc = (ptsize >> PGSHIFT) * (NPTEPG / SG4_LEV3SIZE);
457 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
458 sg = &sg[SG4_LEV1SIZE];
459 esg = &sg[nl2desc];
460 sg_proto = ptpa | SG_U | SG_RW | SG_V;
461 while (sg < esg) {
462 *sg++ = sg_proto;
463 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
464 }
465
466 /*
467 * Initialize level 1 descriptors. We need:
468 * howmany(nl2desc, SG4_LEV2SIZE)
469 * level 1 descriptors to map the 'nl2desc' level 2's.
470 */
471 nl1desc = howmany(nl2desc, SG4_LEV2SIZE);
472 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
473 esg = &sg[nl1desc];
474 sg_proto = (paddr_t)&sg[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
475 while (sg < esg) {
476 *sg++ = sg_proto;
477 sg_proto += (SG4_LEV2SIZE * sizeof(st_entry_t));
478 }
479
480 /* Sysmap is last entry in level 1 */
481 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
482 sg = &sg[SG4_LEV1SIZE - 1];
483 *sg = sg_proto;
484
485 /*
486 * Kernel segment table at end of next level 2 table
487 */
488 i = SG4_LEV1SIZE + (nl1desc * SG4_LEV2SIZE);
489 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
490 sg = &sg[i + SG4_LEV2SIZE - (NPTEPG / SG4_LEV3SIZE)];
491 esg = &sg[NPTEPG / SG4_LEV3SIZE];
492 sg_proto = Sysptmap_pa | SG_U | SG_RW | SG_V;
493 while (sg < esg) {
494 *sg++ = sg_proto;
495 sg_proto += (SG4_LEV3SIZE * sizeof (st_entry_t));
496 }
497
498 /* Include additional level 2 table for Sysmap in protostfree */
499 RELOC(protostfree, u_int) =
500 (~0 << (1 + nl1desc + 1)) /* & ~(~0 << MAXKL2SIZE) */;
501
502 /*
503 * Initialize Sysptmap
504 */
505 pg = (pt_entry_t *)Sysptmap_pa;
506 epg = &pg[ptsize >> PGSHIFT];
507 pg_proto = ptpa | PG_RW | PG_CI | PG_V;
508 while (pg < epg) {
509 *pg++ = pg_proto;
510 pg_proto += PAGE_SIZE;
511 }
512 /*
513 * Invalidate rest of Sysptmap page
514 */
515 epg = (pt_entry_t *)(Sysptmap_pa + PAGE_SIZE - sizeof(st_entry_t));
516 while (pg < epg)
517 *pg++ = SG_NV;
518 pg = (pt_entry_t *)Sysptmap_pa;
519 pg = &pg[SYSMAP_VA >> SEGSHIFT];
520 *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V;
521 } else
522 #endif /* M68040 */
523 {
524 /*
525 * Map the page table pages in both the HW segment table
526 * and the software Sysptmap.
527 */
528 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
529 pg = (pt_entry_t *)Sysptmap_pa;
530 epg = &pg[ptsize >> PGSHIFT];
531 sg_proto = ptpa | SG_RW | SG_V;
532 pg_proto = ptpa | PG_RW | PG_CI | PG_V;
533 while (pg < epg) {
534 *sg++ = sg_proto;
535 *pg++ = pg_proto;
536 sg_proto += PAGE_SIZE;
537 pg_proto += PAGE_SIZE;
538 }
539 /*
540 * invalidate the remainder of each table
541 */
542 epg = (pt_entry_t *)Sysptmap_pa;
543 epg = &epg[TIA_SIZE];
544 while (pg < epg) {
545 *sg++ = SG_NV;
546 *pg++ = PG_NV;
547 }
548 sg = (st_entry_t *)RELOC(Sysseg_pa, u_int);
549 sg = &sg[SYSMAP_VA >> SEGSHIFT];
550 pg = (pt_entry_t *)Sysptmap_pa;
551 pg = &pg[SYSMAP_VA >> SEGSHIFT];
552 *sg = Sysptmap_pa | SG_RW | SG_V;
553 *pg = Sysptmap_pa | PG_RW | PG_CI | PG_V;
554 /* XXX zero out rest of page? */
555 }
556
557 /*
558 * initialize kernel page table page(s) (assume load at VA 0)
559 */
560 pg_proto = fphystart | PG_RO | PG_V; /* text pages are RO */
561 pg = (pt_entry_t *)ptpa;
562 *pg++ = PG_NV; /* Make page 0 invalid */
563 pg_proto += PAGE_SIZE;
564 for (kva = PAGE_SIZE; kva < (vaddr_t)etext;
565 kva += PAGE_SIZE, pg_proto += PAGE_SIZE)
566 *pg++ = pg_proto;
567
568 /*
569 * data, bss and dynamic tables are read/write
570 */
571 pg_proto = (pg_proto & PG_FRAME) | PG_RW | PG_V;
572
573 #if defined(M68040) || defined(M68060)
574 /*
575 * Map the kernel segment table cache invalidated for 68040/68060.
576 * (for the 68040 not strictly necessary, but recommended by Motorola;
577 * for the 68060 mandatory)
578 */
579 if (RELOC(mmutype, int) == MMU_68040) {
580
581 if (RELOC(kernel_copyback, int))
582 pg_proto |= PG_CCB;
583
584 /*
585 * ASSUME: segment table and statically allocated page tables
586 * of the kernel are contiguously allocated, start at
587 * Sysseg and end at the current value of vstart.
588 */
589 for (; kva < RELOC(Sysseg, u_int);
590 kva += PAGE_SIZE, pg_proto += PAGE_SIZE)
591 *pg++ = pg_proto;
592
593 pg_proto = (pg_proto & ~PG_CCB) | PG_CI;
594 for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE)
595 *pg++ = pg_proto;
596
597 pg_proto = (pg_proto & ~PG_CI);
598 if (RELOC(kernel_copyback, int))
599 pg_proto |= PG_CCB;
600 }
601 #endif
602 /*
603 * go till end of data allocated so far
604 * plus lwp0 u-area (to be allocated)
605 */
606 for (; kva < vstart; kva += PAGE_SIZE, pg_proto += PAGE_SIZE)
607 *pg++ = pg_proto;
608 /*
609 * invalidate remainder of kernel PT
610 */
611 while (pg < (pt_entry_t *) (ptpa + ptsize))
612 *pg++ = PG_NV;
613
614 /*
615 * validate internal IO PTEs following current vstart
616 */
617 pg = &((u_int *)ptpa)[vstart >> PGSHIFT];
618 #ifdef DRACO
619 if ((id >> 24) == 0x7D) {
620 RELOC(DRCCADDR, u_int) = vstart;
621 RELOC(CIAADDR, vaddr_t) =
622 RELOC(DRCCADDR, u_int) + DRCIAPG * PAGE_SIZE;
623 if (RELOC(z2mem_end, vaddr_t) == 0)
624 RELOC(ZBUSADDR, vaddr_t) =
625 RELOC(DRCCADDR, u_int) + NDRCCPG * PAGE_SIZE;
626 pg_proto = DRCCBASE | PG_RW | PG_CI | PG_V;
627 while (pg_proto < DRZ2BASE) {
628 *pg++ = pg_proto;
629 pg_proto += DRCCSTRIDE;
630 vstart += PAGE_SIZE;
631 }
632
633 /* NCR 53C710 chip */
634 *pg++ = DRSCSIBASE | PG_RW | PG_CI | PG_V;
635 vstart += PAGE_SIZE;
636
637 #ifdef DEBUG_KERNEL_START
638 /*
639 * early rollcolor Altais mapping
640 * XXX (only works if in slot 0)
641 */
642 *pg++ = 0x20000000 | PG_RW | PG_CI | PG_V;
643 vstart += PAGE_SIZE;
644 #endif
645 } else
646 #endif
647 {
648 RELOC(CHIPMEMADDR, vaddr_t) = vstart;
649 pg_proto = CHIPMEMBASE | PG_RW | PG_CI | PG_V;
650 /* CI needed here?? */
651 while (pg_proto < CHIPMEMTOP) {
652 *pg++ = pg_proto;
653 pg_proto += PAGE_SIZE;
654 vstart += PAGE_SIZE;
655 }
656 }
657 if (RELOC(z2mem_end, paddr_t)) { /* XXX */
658 RELOC(ZTWOMEMADDR, vaddr_t) = vstart;
659 RELOC(ZBUSADDR, vaddr_t) = RELOC(ZTWOMEMADDR, vaddr_t) +
660 RELOC(NZTWOMEMPG, u_int) * PAGE_SIZE;
661 pg_proto = RELOC(z2mem_start, paddr_t) | /* XXX */
662 PG_RW | PG_V; /* XXX */
663 while (pg_proto < RELOC(z2mem_end, paddr_t)) { /* XXX */
664 *pg++ = pg_proto; /* XXX */
665 pg_proto += PAGE_SIZE; /* XXX */
666 vstart += PAGE_SIZE;
667 } /* XXX */
668 } /* XXX */
669 #ifdef DRACO
670 if ((id >> 24) != 0x7D)
671 #endif
672 {
673 RELOC(CIAADDR, vaddr_t) = vstart;
674 pg_proto = CIABASE | PG_RW | PG_CI | PG_V;
675 while (pg_proto < CIATOP) {
676 *pg++ = pg_proto;
677 pg_proto += PAGE_SIZE;
678 vstart += PAGE_SIZE;
679 }
680 RELOC(ZTWOROMADDR, vaddr_t) = vstart;
681 pg_proto = ZTWOROMBASE | PG_RW | PG_CI | PG_V;
682 while (pg_proto < ZTWOROMTOP) {
683 *pg++ = pg_proto;
684 pg_proto += PAGE_SIZE;
685 vstart += PAGE_SIZE;
686 }
687 RELOC(ZBUSADDR, vaddr_t) = vstart;
688 /* not on 8k boundary :-( */
689 RELOC(CIAADDR, vaddr_t) += PAGE_SIZE/2;
690 RELOC(CUSTOMADDR, vaddr_t) =
691 RELOC(ZTWOROMADDR, vaddr_t) - ZTWOROMBASE + CUSTOMBASE;
692 }
693
694 /*
695 *[ following page tables MAY be allocated to ZORRO3 space,
696 * but they're then later mapped in autoconf.c ]
697 */
698 vstart += RELOC(ZBUSAVAIL, u_int);
699
700 /*
701 * init mem sizes
702 */
703 RELOC(maxmem, u_int) = pend >> PGSHIFT;
704 RELOC(lowram, u_int) = fphystart;
705 RELOC(physmem, u_int) = fphysize >> PGSHIFT;
706
707 RELOC(virtual_avail, u_int) = vstart;
708
709 /*
710 * Put user page tables starting at next 16MB boundary, to make kernel
711 * dumps more readable, with guaranteed 16MB of.
712 * XXX 16 MB instead of 256 MB should be enough, but...
713 * we need to fix the fastmem loading first. (see comment at line 375)
714 */
715 RELOC(m68k_uptbase, vaddr_t) =
716 roundup(vstart + 0x10000000, 0x10000000);
717
718 /*
719 * set this before copying the kernel, so the variable is updated in
720 * the `real' place too. protorp[0] is already preset to the
721 * CRP setting.
722 */
723 RELOC(protorp[1], u_int) = RELOC(Sysseg_pa, u_int);
724
725 RELOC(start_c_fphystart, u_int) = fphystart;
726 RELOC(start_c_pstart, u_int) = pstart;
727
728 /*
729 * copy over the kernel (and all now initialized variables)
730 * to fastram. DONT use bcopy(), this beast is much larger
731 * than 128k !
732 */
733 if (loadbase == 0) {
734 register paddr_t *lp, *le, *fp;
735
736 lp = (paddr_t *)0;
737 le = (paddr_t *)end_loaded;
738 fp = (paddr_t *)fphystart;
739 while (lp < le)
740 *fp++ = *lp++;
741 }
742
743 #ifdef DEBUG_KERNEL_START
744 if ((id>>24)==0x7D) {
745 *altaiscolpt = 0;
746 *altaiscol = 40;
747 *altaiscol = 40;
748 *altaiscol = 0;
749 } else
750 ((volatile struct Custom *)0xdff000)->color[0] = 0xAA0; /* YELLOW */
751 #endif
752 /*
753 * prepare to enable the MMU
754 */
755 #if defined(M68040) || defined(M68060)
756 if (RELOC(mmutype, int) == MMU_68040) {
757 if (id & AMIGA_68060) {
758 /* do i need to clear the branch cache? */
759 __asm volatile ( ".word 0x4e7a,0x0002;"
760 "orl #0x400000,%%d0;"
761 ".word 0x4e7b,0x0002" : : : "d0");
762 }
763
764 /*
765 * movel Sysseg_pa,%a0;
766 * movec %a0,%srp;
767 */
768
769 __asm volatile ("movel %0,%%a0; .word 0x4e7b,0x8807"
770 : : "a" (RELOC(Sysseg_pa, u_int)) : "a0");
771
772 #ifdef DEBUG_KERNEL_START
773 if ((id>>24)==0x7D) {
774 *altaiscolpt = 0;
775 *altaiscol = 40;
776 *altaiscol = 33;
777 *altaiscol = 0;
778 } else
779 ((volatile struct Custom *)0xdff000)->color[0] = 0xA70; /* ORANGE */
780 #endif
781 } else
782 #endif
783 {
784 /*
785 * setup and load SRP (see pmap.h)
786 */
787 __asm volatile ("pmove %0@,%%srp":: "a" (&RELOC(protorp, u_int)));
788 }
789 }
790
791 void
start_c_finish(void)792 start_c_finish(void)
793 {
794 extern u_int32_t delaydivisor;
795 #ifdef P5PPC68KBOARD
796 struct cfdev *cdp, *ecdp;
797 #endif
798
799 #ifdef DEBUG_KERNEL_START
800 #ifdef DRACO
801 if ((id >> 24) == 0x7D) { /* mapping on, is_draco() is valid */
802 int i;
803 /* XXX experimental Altais register mapping only */
804 altaiscolpt = (volatile u_int8_t *)(DRCCADDR+PAGE_SIZE*9+0x3c8);
805 altaiscol = altaiscolpt + 1;
806 for (i=0; i<140000; i++) {
807 *altaiscolpt = 0;
808 *altaiscol = 0;
809 *altaiscol = 40;
810 *altaiscol = 0;
811 }
812 } else
813 #endif
814 ((volatile struct Custom *)CUSTOMADDR)->color[0] = 0x0a0; /* GREEN */
815 #endif
816
817 pmap_bootstrap(start_c_pstart, start_c_fphystart);
818 pmap_bootstrap_finalize();
819
820 /*
821 * to make life easier in locore.s, set these addresses explicitly
822 */
823 CIAAbase = CIAADDR + 0x1001; /* CIA-A at odd addresses ! */
824 CIABbase = CIAADDR;
825 CUSTOMbase = CUSTOMADDR;
826 #ifdef DRACO
827 if (is_draco()) {
828 draco_intena = (volatile u_int8_t *)DRCCADDR+1;
829 draco_intpen = draco_intena + PAGE_SIZE;
830 draco_intfrc = draco_intpen + PAGE_SIZE;
831 draco_misc = draco_intfrc + PAGE_SIZE;
832 draco_ioct = (struct drioct *)(DRCCADDR + DRIOCTLPG*PAGE_SIZE);
833 } else
834 #endif
835 {
836 INTREQRaddr = (vaddr_t)&custom.intreqr;
837 INTREQWaddr = (vaddr_t)&custom.intreq;
838 }
839 /*
840 * Get our chip memory allocation system working
841 */
842 chipmem_start += CHIPMEMADDR;
843 chipmem_end += CHIPMEMADDR;
844
845 /* XXX is: this MUST NOT BE DONE before the pmap_bootstrap() call */
846 if (z2mem_end) {
847 z2mem_end = ZTWOMEMADDR + NZTWOMEMPG * PAGE_SIZE;
848 z2mem_start = ZTWOMEMADDR;
849 }
850
851 /*
852 * disable all interrupts but enable allow them to be enabled
853 * by specific driver code (global int enable bit)
854 */
855 #ifdef DRACO
856 if (is_draco()) {
857 /* XXX to be done. For now, just: */
858 *draco_intena = 0;
859 *draco_intpen = 0;
860 *draco_intfrc = 0;
861 ciaa.icr = 0x7f; /* and keyboard */
862 ciab.icr = 0x7f; /* and again */
863
864 draco_ioct->io_control &=
865 ~(DRCNTRL_KBDINTENA|DRCNTRL_FDCINTENA); /* and another */
866
867 draco_ioct->io_status2 &=
868 ~(DRSTAT2_PARIRQENA|DRSTAT2_TMRINTENA); /* some more */
869
870 *(volatile u_int8_t *)(DRCCADDR + 1 +
871 DRSUPIOPG*PAGE_SIZE + 4*(0x3F8 + 1)) = 0; /* and com0 */
872
873 *(volatile u_int8_t *)(DRCCADDR + 1 +
874 DRSUPIOPG*PAGE_SIZE + 4*(0x2F8 + 1)) = 0; /* and com1 */
875
876 draco_ioct->io_control |= DRCNTRL_WDOGDIS; /* stop Fido */
877 *draco_misc &= ~1/*DRMISC_FASTZ2*/;
878
879 } else
880 #endif
881 {
882 custom.intena = 0x7fff; /* disable ints */
883 custom.intena = INTF_SETCLR | INTF_INTEN;
884 /* but allow them */
885 custom.intreq = 0x7fff; /* clear any current */
886 ciaa.icr = 0x7f; /* and keyboard */
887 ciab.icr = 0x7f; /* and again */
888
889 /*
890 * remember address of read and write intena register for use
891 * by extended spl?() macros.
892 */
893 amiga_intena_read = &custom.intenar;
894 amiga_intena_write = &custom.intena;
895 }
896
897 /*
898 * This is needed for 3000's with superkick ROM's. Bit 7 of
899 * 0xde0002 enables the ROM if set. If this isn't set the machine
900 * has to be powercycled in order for it to boot again. ICKA! RFH
901 */
902 if (is_a3000()) {
903 volatile unsigned char *a3000_magic_reset;
904
905 a3000_magic_reset = (volatile unsigned char *)ztwomap(0xde0002);
906
907 /* Turn SuperKick ROM (V36) back on */
908 *a3000_magic_reset |= 0x80;
909 }
910
911 #ifdef P5PPC68KBOARD
912 /*
913 * Are we an P5 PPC/68K board? install different reset
914 * routine.
915 */
916
917 for (cdp = cfdev, ecdp = &cfdev[ncfdev]; cdp < ecdp; cdp++) {
918 if (cdp->rom.manid == 8512 &&
919 (cdp->rom.prodid == 100 || cdp->rom.prodid == 110)) {
920 p5ppc = 1;
921 break;
922 }
923 }
924 #endif
925 /*
926 * preliminary delay divisor value
927 */
928
929 if (machineid & AMIGA_68060)
930 delaydivisor = (1024 * 1) / 80; /* 80 MHz 68060 w. BTC */
931
932 else if (machineid & AMIGA_68040)
933 delaydivisor = (1024 * 3) / 40; /* 40 MHz 68040 */
934
935 else if (machineid & AMIGA_68030)
936 delaydivisor = (1024 * 8) / 50; /* 50 MHz 68030 */
937
938 else
939 delaydivisor = (1024 * 8) / 33; /* 33 MHz 68020 */
940 }
941
942 void
rollcolor(int color)943 rollcolor(int color)
944 {
945 int s, i;
946
947 s = splhigh();
948 /*
949 * need to adjust count -
950 * too slow when cache off, too fast when cache on
951 */
952 for (i = 0; i < 400000; i++)
953 ((volatile struct Custom *)CUSTOMbase)->color[0] = color;
954 splx(s);
955 }
956
957 #ifdef DEVRELOAD
958 /*
959 * Kernel reloading code
960 */
961
962 static struct exec kernel_exec;
963 static u_char *kernel_image;
964 static u_long kernel_text_size, kernel_load_ofs;
965 static u_long kernel_load_phase;
966 static u_long kernel_load_endseg;
967 static u_long kernel_symbol_size, kernel_symbol_esym;
968
969 /* This supports the /dev/reload device, major 2, minor 20,
970 hooked into mem.c. Author: Bryan Ford. */
971
972 /*
973 * This is called below to find out how much magic storage
974 * will be needed after a kernel image to be reloaded.
975 */
976 static int
kernel_image_magic_size(void)977 kernel_image_magic_size(void)
978 {
979 int sz;
980
981 /* 4 + cfdev's + Mem_Seg's + 4 */
982 sz = 8 + ncfdev * sizeof(struct cfdev)
983 + memlist->m_nseg * sizeof(struct boot_memseg);
984 return(sz);
985 }
986
987 /* This actually copies the magic information. */
988 static void
kernel_image_magic_copy(u_char * dest)989 kernel_image_magic_copy(u_char *dest)
990 {
991 *((int*)dest) = ncfdev;
992 dest += 4;
993 memcpy(dest, cfdev, ncfdev * sizeof(struct cfdev)
994 + memlist->m_nseg * sizeof(struct boot_memseg) + 4);
995 }
996
997 #undef AOUT_LDPGSZ
998 #define AOUT_LDPGSZ 8192 /* XXX ??? */
999
1000 int
kernel_reload_write(struct uio * uio)1001 kernel_reload_write(struct uio *uio)
1002 {
1003 extern int eclockfreq;
1004 struct iovec *iov;
1005 int error, c;
1006
1007 iov = uio->uio_iov;
1008
1009 if (kernel_image == 0) {
1010 /*
1011 * We have to get at least the whole exec header
1012 * in the first write.
1013 */
1014 if (iov->iov_len < sizeof(kernel_exec))
1015 return ENOEXEC; /* XXX */
1016
1017 /*
1018 * Pull in the exec header and check it.
1019 */
1020 if ((error = uiomove((void *)&kernel_exec, sizeof(kernel_exec),
1021 uio)) != 0)
1022 return(error);
1023 printf("loading kernel %ld+%ld+%ld+%ld\n", kernel_exec.a_text,
1024 kernel_exec.a_data, kernel_exec.a_bss,
1025 esym == NULL ? 0 : kernel_exec.a_syms);
1026 /*
1027 * Looks good - allocate memory for a kernel image.
1028 */
1029 kernel_text_size = (kernel_exec.a_text
1030 + AOUT_LDPGSZ - 1) & (-AOUT_LDPGSZ);
1031 /*
1032 * Estimate space needed for symbol names, since we don't
1033 * know how big it really is.
1034 */
1035 if (esym != NULL) {
1036 kernel_symbol_size = kernel_exec.a_syms;
1037 kernel_symbol_size += 16 * (kernel_symbol_size / 12);
1038 }
1039 /*
1040 * XXX - should check that image will fit in CHIP memory
1041 * XXX return an error if it doesn't
1042 */
1043 if ((kernel_text_size + kernel_exec.a_data +
1044 kernel_exec.a_bss + kernel_symbol_size +
1045 kernel_image_magic_size()) > boot_cphysize)
1046 return (EFBIG);
1047 kernel_image = malloc(kernel_text_size + kernel_exec.a_data
1048 + kernel_exec.a_bss
1049 + kernel_symbol_size
1050 + kernel_image_magic_size(),
1051 M_TEMP, M_WAITOK);
1052 kernel_load_ofs = 0;
1053 kernel_load_phase = 0;
1054 kernel_load_endseg = kernel_exec.a_text;
1055 return(0);
1056 }
1057 /*
1058 * Continue loading in the kernel image.
1059 */
1060 c = uimin(iov->iov_len, kernel_load_endseg - kernel_load_ofs);
1061 c = uimin(c, MAXPHYS);
1062 if ((error = uiomove(kernel_image + kernel_load_ofs, (int)c, uio)) != 0)
1063 return(error);
1064 kernel_load_ofs += c;
1065
1066 /*
1067 * Fun and games to handle loading symbols - the length of the
1068 * string table isn't know until after the symbol table has
1069 * been loaded. We have to load the kernel text, data, and
1070 * the symbol table, then get the size of the strings. A
1071 * new kernel image is then allocated and the data currently
1072 * loaded moved to the new image. Then continue reading the
1073 * string table. This has problems if there isn't enough
1074 * room to allocate space for the two copies of the kernel
1075 * image. So the approach I took is to guess at the size
1076 * of the symbol strings. If the guess is wrong, the symbol
1077 * table is ignored.
1078 */
1079
1080 if (kernel_load_ofs != kernel_load_endseg)
1081 return(0);
1082
1083 switch (kernel_load_phase) {
1084 case 0: /* done loading kernel text */
1085 kernel_load_ofs = kernel_text_size;
1086 kernel_load_endseg = kernel_load_ofs + kernel_exec.a_data;
1087 kernel_load_phase = 1;
1088 break;
1089 case 1: /* done loading kernel data */
1090 for(c = 0; c < kernel_exec.a_bss; c++)
1091 kernel_image[kernel_load_ofs + c] = 0;
1092 kernel_load_ofs += kernel_exec.a_bss;
1093 if (esym) {
1094 kernel_load_endseg = kernel_load_ofs
1095 + kernel_exec.a_syms + 8;
1096 *((u_long *)(kernel_image + kernel_load_ofs)) =
1097 kernel_exec.a_syms;
1098 kernel_load_ofs += 4;
1099 kernel_load_phase = 3;
1100 break;
1101 }
1102 /*FALLTHROUGH*/
1103 case 2: /* done loading kernel */
1104
1105 /*
1106 * Put the finishing touches on the kernel image.
1107 */
1108 kernel_image_magic_copy(kernel_image + kernel_load_ofs);
1109 /*
1110 * Start the new kernel with code in locore.s.
1111 */
1112 kernel_reload(kernel_image,
1113 kernel_load_ofs + kernel_image_magic_size(),
1114 kernel_exec.a_entry, boot_fphystart, boot_fphysize,
1115 boot_cphysize, kernel_symbol_esym, eclockfreq,
1116 boot_flags, scsi_nosync, boot_partition);
1117 /*
1118 * kernel_reload() now checks to see if the reload_code
1119 * is at the same location in the new kernel.
1120 * If it isn't, it will return and we will return
1121 * an error.
1122 */
1123 free(kernel_image, M_TEMP);
1124 kernel_image = NULL;
1125 return (ENODEV); /* Say operation not supported */
1126 case 3: /* done loading kernel symbol table */
1127 c = *((u_long *)(kernel_image + kernel_load_ofs - 4));
1128 if (c > 16 * (kernel_exec.a_syms / 12))
1129 c = 16 * (kernel_exec.a_syms / 12);
1130 kernel_load_endseg += c - 4;
1131 kernel_symbol_esym = kernel_load_endseg;
1132 #ifdef notyet
1133 kernel_image_copy = kernel_image;
1134 kernel_image = malloc(kernel_load_ofs + c
1135 + kernel_image_magic_size(), M_TEMP, M_WAITOK);
1136 if (kernel_image == NULL)
1137 panic("kernel_reload failed second malloc");
1138 for (c = 0; c < kernel_load_ofs; c += MAXPHYS)
1139 memcpy(kernel_image + c, kernel_image_copy + c,
1140 (kernel_load_ofs - c) > MAXPHYS ? MAXPHYS :
1141 kernel_load_ofs - c);
1142 #endif
1143 kernel_load_phase = 2;
1144 }
1145 return(0);
1146 }
1147 #endif
1148
1149 int
mm_md_readwrite(dev_t dev,struct uio * uio)1150 mm_md_readwrite(dev_t dev, struct uio *uio)
1151 {
1152
1153 switch (minor(dev)) {
1154 #ifdef DEVRELOAD
1155 case DEV_RELOAD:
1156 if (uio->uio_rw == UIO_READ)
1157 return 0;
1158 return kernel_reload_write(uio);
1159 #endif
1160 default:
1161 return ENXIO;
1162 }
1163 }
1164