1/* $NetBSD: locore.s,v 1.28 1997/10/13 00:19:37 thorpej Exp $ */ 2 3/* 4 * Copyright (c) 1988 University of Utah. 5 * Copyright (c) 1980, 1990, 1993 6 * The Regents of the University of California. All rights reserved. 7 * 8 * This code is derived from software contributed to Berkeley by 9 * the Systems Programming Group of the University of Utah Computer 10 * Science Department. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. All advertising materials mentioning features or use of this software 21 * must display the following acknowledgement: 22 * This product includes software developed by the University of 23 * California, Berkeley and its contributors. 24 * 4. Neither the name of the University nor the names of its contributors 25 * may be used to endorse or promote products derived from this software 26 * without specific prior written permission. 27 * 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 38 * SUCH DAMAGE. 39 * 40 * from: Utah $Hdr: locore.s 1.66 92/12/22$ 41 * 42 * @(#)locore.s 8.6 (Berkeley) 5/27/94 43 */ 44 45#include "assym.h" 46#include <machine/asm.h> 47#include <machine/trap.h> 48 49/* 50 * Temporary stack for a variety of purposes. 51 * Try and make this the first thing is the data segment so it 52 * is page aligned. Note that if we overflow here, we run into 53 * our text segment. 54 */ 55 .data 56 .space NBPG 57tmpstk: 58 59#define RELOC(var, ar) \ 60 lea var,ar 61 62#define CALLBUG(func) \ 63 trap #15; .short func 64 65/* 66 * Initialization 67 * 68 * The bootstrap loader loads us in starting at 0, and VBR is non-zero. 69 * On entry, args on stack are boot device, boot filename, console unit, 70 * boot flags (howto), boot device name, filesystem type name. 71 */ 72 .comm _lowram,4 73 .comm _esym,4 74 75 .text 76 .globl _edata 77 .globl _etext,_end 78 .globl start,_kernel_text 79| This is for kvm_mkdb, and should be the address of the beginning 80| of the kernel text segment (not necessarily the same as kernbase). 81_kernel_text: 82start: | start of kernel and .text! 83 movw #PSL_HIGHIPL,sr | no interrupts 84 movl #0,a5 | RAM starts at 0 (a5) 85 movl sp@(4), d7 | get boothowto 86 movl sp@(8), d6 | get bootaddr 87 movl sp@(12),d5 | get bootctrllun 88 movl sp@(16),d4 | get bootdevlun 89 movl sp@(20),d3 | get bootpart 90 movl sp@(24),d2 | get esyms 91 92 RELOC(_bootpart,a0) 93 movl d3, a0@ | save bootpart 94 RELOC(_bootdevlun,a0) 95 movl d4, a0@ | save bootdevlun 96 RELOC(_bootctrllun,a0) 97 movl d5, a0@ | save booctrllun 98 RELOC(_bootaddr,a0) 99 movl d6, a0@ | save bootaddr 100 RELOC(_boothowto,a0) 101 movl d7, a0@ | save boothowto 102 /* note: d3-d7 free, d2 still in use */ 103 104 RELOC(tmpstk, a0) 105 movl a0,sp | give ourselves a temporary stack 106 107 RELOC(_edata,a0) | clear out BSS 108 movl #_end-4,d0 | (must be <= 256 kB) 109 subl #_edata,d0 110 lsrl #2,d0 1111: clrl a0@+ 112 dbra d0,1b 113 114 RELOC(_esym, a0) 115 movl d2,a0@ | store end of symbol table 116 /* d2 now free */ 117 RELOC(_lowram, a0) 118 movl a5,a0@ | store start of physical memory 119 120 movl #CACHE_OFF,d0 121 movc d0,cacr | clear and disable on-chip cache(s) 122 123 /* ask the Bug what we are... */ 124 clrl sp@- 125 CALLBUG(MVMEPROM_GETBRDID) 126 movl sp@+,a1 127 128 /* copy to a struct mvmeprom_brdid */ 129 movl #MVMEPROM_BRDID_SIZE,d0 130 RELOC(_boardid,a0) 1311: movb a1@+,a0@+ 132 subql #1,d0 133 bne 1b 134 135 /* 136 * Grab the model number from _boardid and use the value 137 * to setup machineid, cputype, and mmutype. 138 */ 139 clrl d0 140 RELOC(_boardid,a1) 141 movw a1@(MVMEPROM_BRDID_MODEL_OFFSET),d0 142 RELOC(_machineid,a0) 143 movl d0,a0@ 144 145#ifdef MVME147 146 /* MVME-147 - 68030 CPU/MMU, 68882 FPU */ 147 cmpw #MVME_147,d0 148 jne Lnot147 149 RELOC(_mmutype,a0) 150 movl #MMU_68030,a0@ 151 RELOC(_cputype,a0) 152 movl #CPU_68030,a0@ 153 RELOC(_fputype,a0) 154 movl #FPU_68882,a0@ 155 156 /* XXXCDC SHUTUP 147 CALL */ 157 movb #0, 0xfffe1026 | serial interrupt off 158 movb #0, 0xfffe1018 | timer 1 off 159 movb #0, 0xfffe1028 | ethernet off 160 /* XXXCDC SHUTUP 147 CALL */ 161 162 /* Save our ethernet address */ 163 RELOC(_myea, a0) 164 movl 0xfffe0778,a0@ | XXXCDC -- HARDWIRED HEX 165 166 /* initialize memory sizes (for pmap_bootstrap) */ 167#ifndef MACHINE_NONCONTIG 168 movl 0xfffe0774,d1 | XXXCDC -- hardwired HEX 169 moveq #PGSHIFT,d2 170 lsrl d2,d1 | convert to page (click) number 171 RELOC(_maxmem, a0) 172 movl d1,a0@ | save as maxmem 173 movl a5,d0 | lowram value from ROM via boot 174 lsrl d2,d0 | convert to page number 175 subl d0,d1 | compute amount of RAM present 176 RELOC(_physmem, a0) 177 movl d1,a0@ | and physmem 178#else 179 /* initialise list of physical memory segments */ 180 RELOC(_phys_seg_list, a0) 181 movl a5,a0@ | phys_seg_list[0].ps_start 182 movl 0xfffe0774,d1 | End + 1 of onboard memory 183 movl d1,a0@(4) | phys_seg_list[0].ps_end 184 clrl a0@(8) | phys_seg_list[0].ps_startpage 185 movl 0xfffe0764,a0@(0x0c) | Start of offboard segment 186 beq Lsavmaxmem | Jump if none defined 187 movl 0xfffe0768,d1 | End of offboard segment 188 addql #1,d1 | +1 189 movl d1,a0@(0x10) | phys_seg_list[1].ps_end 190 clrl a0@(0x14) | phys_seg_list[1].ps_startpage 191Lsavmaxmem: 192 moveq #PGSHIFT,d2 193 lsrl d2,d1 | convert to page (click) number 194 RELOC(_maxmem, a0) 195 movl d1,a0@ | save as maxmem 196#endif 197 198 jra Lstart1 199Lnot147: 200#endif 201 202#ifdef MVME162 203 /* MVME-162 - 68040 CPU/MMU/FPU */ 204 cmpw #MVME_162,d0 205 jne Lnot162 206 RELOC(_mmutype,a0) 207 movl #MMU_68040,a0@ 208 RELOC(_cputype,a0) 209 movl #CPU_68040,a0@ 210 RELOC(_fputype,a0) 211 movl #FPU_68040,a0@ 212#if 1 /* XXX */ 213 jra Lnotyet 214#else 215 /* XXX more XXX */ 216 jra Lstart1 217#endif 218Lnot162: 219#endif 220 221#ifdef MVME167 222 /* MVME-167 (also 166) - 68040 CPU/MMU/FPU */ 223 cmpw #MVME_166,d0 224 jeq Lis167 225 cmpw #MVME_167,d0 226 jne Lnot167 227Lis167: 228 RELOC(_mmutype,a0) 229 movl #MMU_68040,a0@ 230 RELOC(_cputype,a0) 231 movl #CPU_68040,a0@ 232 RELOC(_fputype,a0) 233 movl #FPU_68040,a0@ 234#if 1 /* XXX */ 235 jra Lnotyet 236#else 237 /* XXX more XXX */ 238 jra Lstart1 239#endif 240Lnot167: 241#endif 242 243#ifdef MVME177 244 /* MVME-177 (what about 172??) - 68060 CPU/MMU/FPU */ 245 cmpw #MVME_177,d0 246 jne Lnot177 247 RELOC(_mmutype,a0) 248 movl #MMU_68060,a0@ 249 RELOC(_cputype,a0) 250 movl #CPU_68060,a0@ 251 RELOC(_fputype,a0) 252 movl #FPU_68060,a0@ 253#if 1 254 jra Lnotyet 255#else 256 /* XXX more XXX */ 257 jra Lstart1 258#endif 259Lnot177: 260#endif 261 262 /* 263 * If we fall to here, the board is not supported. 264 * Print a warning, then drop out to the Bug. 265 */ 266 .data 267Lnotconf: 268 .ascii "Sorry, the kernel isn't configured for this model." 269Lenotconf: 270 271 .even 272 .text 273 movl #Lenotconf,sp@- 274 movl #Lnotconf,sp@- 275 CALLBUG(MVMEPROM_OUTSTRCRLF) 276 addql #8,sp | clean up stack after call 277 278 CALLBUG(MVMEPROM_EXIT) 279 /* NOTREACHED */ 280 281Lnotyet: 282 /* 283 * If we get here, it means a particular model 284 * doesn't have the necessary support code in the 285 * kernel. Print a warning, then drop out to the Bug. 286 */ 287 .data 288Lnotsupp: 289 .ascii "Sorry, NetBSD doesn't support this model yet." 290Lenotsupp: 291 292 .even 293 .text 294 movl #Lenotsupp,sp@- 295 movl #Lnotsupp,sp@- 296 CALLBUG(MVMEPROM_OUTSTRCRLF) 297 addql #8,sp | clean up stack after call 298 299 CALLBUG(MVMEPROM_EXIT) 300 /* NOTREACHED */ 301 302Lstart1: 303/* initialize source/destination control registers for movs */ 304 moveq #FC_USERD,d0 | user space 305 movc d0,sfc | as source 306 movc d0,dfc | and destination of transfers 307/* configure kernel and proc0 VA space so we can get going */ 308 .globl _Sysseg, _pmap_bootstrap, _avail_start 309#ifdef DDB 310 RELOC(_esym,a0) | end of static kernel test/data/syms 311 movl a0@,d2 312 jne Lstart2 313#endif 314 movl #_end,d2 | end of static kernel text/data 315Lstart2: 316 addl #NBPG-1,d2 317 andl #PG_FRAME,d2 | round to a page 318 movl d2,a4 319 addl a5,a4 | convert to PA 320 movl #0, sp@- | firstpa 321 pea a4@ | nextpa 322 RELOC(_pmap_bootstrap,a0) 323 jbsr a0@ | pmap_bootstrap(firstpa, nextpa) 324 addql #8,sp 325 326/* 327 * Enable the MMU. 328 * Since the kernel is mapped logical == physical, we just turn it on. 329 */ 330 RELOC(_Sysseg, a0) | system segment table addr 331 movl a0@,d1 | read value (a KVA) 332 addl a5,d1 | convert to PA 333 RELOC(_mmutype, a0) 334 cmpl #MMU_68040,a0@ | 68040? 335 jne Lmotommu1 | no, skip 336 .long 0x4e7b1807 | movc d1,srp 337 jra Lstploaddone 338Lmotommu1: 339 RELOC(_protorp, a0) 340 movl #0x80000202,a0@ | nolimit + share global + 4 byte PTEs 341 movl d1,a0@(4) | + segtable address 342 pmove a0@,srp | load the supervisor root pointer 343 movl #0x80000002,a0@ | reinit upper half for CRP loads 344Lstploaddone: 345 RELOC(_mmutype, a0) 346 cmpl #MMU_68040,a0@ | 68040? 347 jne Lmotommu2 | no, skip 348 moveq #0,d0 | ensure TT regs are disabled 349 .long 0x4e7b0004 | movc d0,itt0 350 .long 0x4e7b0005 | movc d0,itt1 351 .long 0x4e7b0006 | movc d0,dtt0 352 .long 0x4e7b0007 | movc d0,dtt1 353 .word 0xf4d8 | cinva bc 354 .word 0xf518 | pflusha 355 movl #0x8000,d0 356 .long 0x4e7b0003 | movc d0,tc 357 movl #0x80008000,d0 358 movc d0,cacr | turn on both caches 359 jmp Lenab1 360Lmotommu2: 361 RELOC(_prototc, a2) 362 movl #0x82c0aa00,a2@ | value to load TC with 363 pmove a2@,tc | load it 364 365/* 366 * Should be running mapped from this point on 367 */ 368Lenab1: 369/* select the software page size now */ 370 lea tmpstk,sp | temporary stack 371 jbsr _vm_set_page_size | select software page size 372/* set kernel stack, user SP, and initial pcb */ 373 movl _proc0paddr,a1 | get proc0 pcb addr 374 lea a1@(USPACE-4),sp | set kernel stack to end of area 375 lea _proc0,a2 | initialize proc0.p_addr so that 376 movl a1,a2@(P_ADDR) | we don't deref NULL in trap() 377 movl #USRSTACK-4,a2 378 movl a2,usp | init user SP 379 movl a1,_curpcb | proc0 is running 380 381 tstl _fputype | Have an FPU? 382 jeq Lenab2 | No, skip. 383 clrl a1@(PCB_FPCTX) | ensure null FP context 384 movl a1,sp@- 385 jbsr _m68881_restore | restore it (does not kill a1) 386 addql #4,sp 387Lenab2: 388/* flush TLB and turn on caches */ 389 jbsr _TBIA | invalidate TLB 390 cmpl #MMU_68040,_mmutype | 68040? 391 jeq Lnocache0 | yes, cache already on 392 movl #CACHE_ON,d0 393 movc d0,cacr | clear cache(s) 394 jra Lnocache0 395Lnocache0: 396/* final setup for C code */ 397 movl #_vectab,d0 | set VBR 398 movc d0,vbr 399 jbsr _mvme68k_init | additional pre-main initialization 400 movw #PSL_LOWIPL,sr | lower SPL 401 402/* 403 * Create a fake exception frame so that cpu_fork() can copy it. 404 * main() nevers returns; we exit to user mode from a forked process 405 * later on. 406 */ 407 clrw sp@- | vector offset/frame type 408 clrl sp@- | PC - filled in by "execve" 409 movw #PSL_USER,sp@- | in user mode 410 clrl sp@- | stack adjust count and padding 411 lea sp@(-64),sp | construct space for D0-D7/A0-A7 412 lea _proc0,a0 | save pointer to frame 413 movl sp,a0@(P_MD_REGS) | in proc0.p_md.md_regs 414 415 jra _main | main() 416 417 .globl _proc_trampoline 418_proc_trampoline: 419 movl a3@(P_MD_REGS),sp | process' frame pointer in sp 420 movl a3,sp@- 421 jbsr a2@ 422 addql #4,sp 423 movl sp@(FR_SP),a0 | grab and load 424 movl a0,usp | user SP 425 moveml sp@+,#0x7FFF | restore most user regs 426 addql #8,sp | toss SP and stack adjust 427 jra rei | and return 428 429/* 430 * Use common m68k sigcode. 431 */ 432#include <m68k/m68k/sigcode.s> 433 434/* 435 * Trap/interrupt vector routines 436 */ 437#include <m68k/m68k/trap_subr.s> 438 439 .text 440 .globl _trap, _nofault, _longjmp 441_buserr: 442 tstl _nofault | device probe? 443 jeq Lberr | no, handle as usual 444 movl _nofault,sp@- | yes, 445 jbsr _longjmp | longjmp(nofault) 446Lberr: 447#if defined(M68040) 448 cmpl #MMU_68040,_mmutype | 68040? 449 jne _addrerr | no, skip 450 clrl sp@- | stack adjust count 451 moveml #0xFFFF,sp@- | save user registers 452 movl usp,a0 | save the user SP 453 movl a0,sp@(FR_SP) | in the savearea 454 lea sp@(FR_HW),a1 | grab base of HW berr frame 455 moveq #0,d0 456 movw a1@(12),d0 | grab SSW 457 movl a1@(20),d1 | and fault VA 458 btst #11,d0 | check for mis-aligned access 459 jeq Lberr2 | no, skip 460 addl #3,d1 | yes, get into next page 461 andl #PG_FRAME,d1 | and truncate 462Lberr2: 463 movl d1,sp@- | push fault VA 464 movl d0,sp@- | and padded SSW 465 btst #10,d0 | ATC bit set? 466 jeq Lisberr | no, must be a real bus error 467 movc dfc,d1 | yes, get MMU fault 468 movc d0,dfc | store faulting function code 469 movl sp@(4),a0 | get faulting address 470 .word 0xf568 | ptestr a0@ 471 movc d1,dfc 472 .long 0x4e7a0805 | movc mmusr,d0 473 movw d0,sp@ | save (ONLY LOW 16 BITS!) 474 jra Lismerr 475#endif 476_addrerr: 477 clrl sp@- | stack adjust count 478 moveml #0xFFFF,sp@- | save user registers 479 movl usp,a0 | save the user SP 480 movl a0,sp@(FR_SP) | in the savearea 481 lea sp@(FR_HW),a1 | grab base of HW berr frame 482#if defined(M68040) 483 cmpl #MMU_68040,_mmutype | 68040? 484 jne Lbenot040 | no, skip 485 movl a1@(8),sp@- | yes, push fault address 486 clrl sp@- | no SSW for address fault 487 jra Lisaerr | go deal with it 488Lbenot040: 489#endif 490 moveq #0,d0 491 movw a1@(10),d0 | grab SSW for fault processing 492 btst #12,d0 | RB set? 493 jeq LbeX0 | no, test RC 494 bset #14,d0 | yes, must set FB 495 movw d0,a1@(10) | for hardware too 496LbeX0: 497 btst #13,d0 | RC set? 498 jeq LbeX1 | no, skip 499 bset #15,d0 | yes, must set FC 500 movw d0,a1@(10) | for hardware too 501LbeX1: 502 btst #8,d0 | data fault? 503 jeq Lbe0 | no, check for hard cases 504 movl a1@(16),d1 | fault address is as given in frame 505 jra Lbe10 | thats it 506Lbe0: 507 btst #4,a1@(6) | long (type B) stack frame? 508 jne Lbe4 | yes, go handle 509 movl a1@(2),d1 | no, can use save PC 510 btst #14,d0 | FB set? 511 jeq Lbe3 | no, try FC 512 addql #4,d1 | yes, adjust address 513 jra Lbe10 | done 514Lbe3: 515 btst #15,d0 | FC set? 516 jeq Lbe10 | no, done 517 addql #2,d1 | yes, adjust address 518 jra Lbe10 | done 519Lbe4: 520 movl a1@(36),d1 | long format, use stage B address 521 btst #15,d0 | FC set? 522 jeq Lbe10 | no, all done 523 subql #2,d1 | yes, adjust address 524Lbe10: 525 movl d1,sp@- | push fault VA 526 movl d0,sp@- | and padded SSW 527 movw a1@(6),d0 | get frame format/vector offset 528 andw #0x0FFF,d0 | clear out frame format 529 cmpw #12,d0 | address error vector? 530 jeq Lisaerr | yes, go to it 531 movl d1,a0 | fault address 532 movl sp@,d0 | function code from ssw 533 btst #8,d0 | data fault? 534 jne Lbe10a 535 movql #1,d0 | user program access FC 536 | (we dont seperate data/program) 537 btst #5,a1@ | supervisor mode? 538 jeq Lbe10a | if no, done 539 movql #5,d0 | else supervisor program access 540Lbe10a: 541 ptestr d0,a0@,#7 | do a table search 542 pmove psr,sp@ | save result 543 movb sp@,d1 544 btst #2,d1 | invalid (incl. limit viol. and berr)? 545 jeq Lmightnotbemerr | no -> wp check 546 btst #7,d1 | is it MMU table berr? 547 jeq Lismerr | no, must be fast 548 jra Lisberr1 | real bus err needs not be fast. 549Lmightnotbemerr: 550 btst #3,d1 | write protect bit set? 551 jeq Lisberr1 | no: must be bus error 552 movl sp@,d0 | ssw into low word of d0 553 andw #0xc0,d0 | Write protect is set on page: 554 cmpw #0x40,d0 | was it read cycle? 555 jeq Lisberr1 | yes, was not WPE, must be bus err 556Lismerr: 557 movl #T_MMUFLT,sp@- | show that we are an MMU fault 558 jra _ASM_LABEL(faultstkadj) | and deal with it 559Lisaerr: 560 movl #T_ADDRERR,sp@- | mark address error 561 jra _ASM_LABEL(faultstkadj) | and deal with it 562Lisberr1: 563 clrw sp@ | re-clear pad word 564Lisberr: 565 movl #T_BUSERR,sp@- | mark bus error 566 jra _ASM_LABEL(faultstkadj) | and deal with it 567 568/* 569 * FP exceptions. 570 */ 571_fpfline: 572#if defined(M68040) 573 cmpw #0x202c,sp@(6) | format type 2? 574 jne _illinst | no, not an FP emulation 575#ifdef FPSP 576 .globl fpsp_unimp 577 jmp fpsp_unimp | yes, go handle it 578#else 579 clrl sp@- | stack adjust count 580 moveml #0xFFFF,sp@- | save registers 581 moveq #T_FPEMULI,d0 | denote as FP emulation trap 582 jra fault | do it 583#endif 584#else 585 jra _illinst 586#endif 587 588_fpunsupp: 589#if defined(M68040) 590 cmpl #MMU_68040,_mmutype | 68040? 591 jne _illinst | no, treat as illinst 592#ifdef FPSP 593 .globl fpsp_unsupp 594 jmp fpsp_unsupp | yes, go handle it 595#else 596 clrl sp@- | stack adjust count 597 moveml #0xFFFF,sp@- | save registers 598 moveq #T_FPEMULD,d0 | denote as FP emulation trap 599 jra fault | do it 600#endif 601#else 602 jra _illinst 603#endif 604 605/* 606 * Handles all other FP coprocessor exceptions. 607 * Note that since some FP exceptions generate mid-instruction frames 608 * and may cause signal delivery, we need to test for stack adjustment 609 * after the trap call. 610 */ 611 .globl _fpfault 612_fpfault: 613 clrl sp@- | stack adjust count 614 moveml #0xFFFF,sp@- | save user registers 615 movl usp,a0 | and save 616 movl a0,sp@(FR_SP) | the user stack pointer 617 clrl sp@- | no VA arg 618 movl _curpcb,a0 | current pcb 619 lea a0@(PCB_FPCTX),a0 | address of FP savearea 620 fsave a0@ | save state 621#if defined(M68040) || defined(M68060) 622 /* always null state frame on 68040, 68060 */ 623 cmpl #FPU_68040,_fputype 624 jle Lfptnull 625#endif 626 tstb a0@ | null state frame? 627 jeq Lfptnull | yes, safe 628 clrw d0 | no, need to tweak BIU 629 movb a0@(1),d0 | get frame size 630 bset #3,a0@(0,d0:w) | set exc_pend bit of BIU 631Lfptnull: 632 fmovem fpsr,sp@- | push fpsr as code argument 633 frestore a0@ | restore state 634 movl #T_FPERR,sp@- | push type arg 635 jra _ASM_LABEL(faultstkadj) | call trap and deal with stack cleanup 636 637/* 638 * Other exceptions only cause four and six word stack frame and require 639 * no post-trap stack adjustment. 640 */ 641 642 .globl _straytrap 643_badtrap: 644 moveml #0xC0C0,sp@- | save scratch regs 645 movw sp@(22),sp@- | push exception vector info 646 clrw sp@- 647 movl sp@(22),sp@- | and PC 648 jbsr _straytrap | report 649 addql #8,sp | pop args 650 moveml sp@+,#0x0303 | restore regs 651 jra rei | all done 652 653 .globl _syscall 654_trap0: 655 clrl sp@- | stack adjust count 656 moveml #0xFFFF,sp@- | save user registers 657 movl usp,a0 | save the user SP 658 movl a0,sp@(FR_SP) | in the savearea 659 movl d0,sp@- | push syscall number 660 jbsr _syscall | handle it 661 addql #4,sp | pop syscall arg 662 tstl _astpending 663 jne Lrei2 664 tstb _ssir 665 jeq Ltrap1 666 movw #SPL1,sr 667 tstb _ssir 668 jne Lsir1 669Ltrap1: 670 movl sp@(FR_SP),a0 | grab and restore 671 movl a0,usp | user SP 672 moveml sp@+,#0x7FFF | restore most registers 673 addql #8,sp | pop SP and stack adjust 674 rte 675 676/* 677 * Routines for traps 1 and 2. The meaning of the two traps depends 678 * on whether we are an HPUX compatible process or a native 4.3 process. 679 * Our native 4.3 implementation uses trap 1 as sigreturn() and trap 2 680 * as a breakpoint trap. HPUX uses trap 1 for a breakpoint, so we have 681 * to make adjustments so that trap 2 is used for sigreturn. 682 */ 683_trap1: 684#ifdef COMPAT_HPUX 685 btst #MDP_TRCB,mdpflag | being traced by an HPUX process? 686 jeq sigreturn | no, trap1 is sigreturn 687 jra _trace | yes, trap1 is breakpoint 688#else 689 jra sigreturn | no, trap1 is sigreturn 690#endif 691 692_trap2: 693#ifdef COMPAT_HPUX 694 btst #MDP_TRCB,mdpflag | being traced by an HPUX process? 695 jeq _trace | no, trap2 is breakpoint 696 jra sigreturn | yes, trap2 is sigreturn 697#else 698 jra _trace | no, trap2 is breakpoint 699#endif 700 701/* 702 * Trap 12 is the entry point for the cachectl "syscall" (both HPUX & BSD) 703 * cachectl(command, addr, length) 704 * command in d0, addr in a1, length in d1 705 */ 706 .globl _cachectl 707_trap12: 708 movl d1,sp@- | push length 709 movl a1,sp@- | push addr 710 movl d0,sp@- | push command 711 jbsr _cachectl | do it 712 lea sp@(12),sp | pop args 713 jra rei | all done 714 715/* 716 * Trap 15 is used for: 717 * - KGDB traps 718 * - trace traps for SUN binaries (not fully supported yet) 719 * We just pass it on and let trap() sort it all out 720 */ 721_trap15: 722 clrl sp@- 723 moveml #0xFFFF,sp@- 724#ifdef KGDB 725 moveq #T_TRAP15,d0 726 movw sp@(FR_HW),d1 | get PSW 727 andw #PSL_S,d1 | from user mode? 728 jeq fault | yes, just a regular fault 729 movl d0,sp@- 730 .globl _kgdb_trap_glue 731 jbsr _kgdb_trap_glue | returns if no debugger 732 addl #4,sp 733#endif 734 moveq #T_TRAP15,d0 735 jra fault 736 737/* 738 * Hit a breakpoint (trap 1 or 2) instruction. 739 * Push the code and treat as a normal fault. 740 */ 741_trace: 742 clrl sp@- 743 moveml #0xFFFF,sp@- 744#ifdef KGDB 745 moveq #T_TRACE,d0 746 movw sp@(FR_HW),d1 | get SSW 747 andw #PSL_S,d1 | from user mode? 748 jeq fault | no, regular fault 749 movl d0,sp@- 750 jbsr _kgdb_trap_glue | returns if no debugger 751 addl #4,sp 752#endif 753 moveq #T_TRACE,d0 754 jra fault 755 756/* 757 * The sigreturn() syscall comes here. It requires special handling 758 * because we must open a hole in the stack to fill in the (possibly much 759 * larger) original stack frame. 760 */ 761sigreturn: 762 lea sp@(-84),sp | leave enough space for largest frame 763 movl sp@(84),sp@ | move up current 8 byte frame 764 movl sp@(88),sp@(4) 765 movl #84,sp@- | default: adjust by 84 bytes 766 moveml #0xFFFF,sp@- | save user registers 767 movl usp,a0 | save the user SP 768 movl a0,sp@(FR_SP) | in the savearea 769 movl #SYS_sigreturn,sp@- | push syscall number 770 jbsr _syscall | handle it 771 addql #4,sp | pop syscall# 772 movl sp@(FR_SP),a0 | grab and restore 773 movl a0,usp | user SP 774 lea sp@(FR_HW),a1 | pointer to HW frame 775 movw sp@(FR_ADJ),d0 | do we need to adjust the stack? 776 jeq Lsigr1 | no, just continue 777 moveq #92,d1 | total size 778 subw d0,d1 | - hole size = frame size 779 lea a1@(92),a0 | destination 780 addw d1,a1 | source 781 lsrw #1,d1 | convert to word count 782 subqw #1,d1 | minus 1 for dbf 783Lsigrlp: 784 movw a1@-,a0@- | copy a word 785 dbf d1,Lsigrlp | continue 786 movl a0,a1 | new HW frame base 787Lsigr1: 788 movl a1,sp@(FR_SP) | new SP value 789 moveml sp@+,#0x7FFF | restore user registers 790 movl sp@,sp | and our SP 791 jra rei | all done 792 793/* 794 * Interrupt handlers. 795 * 796 * For auto-vectored interrupts, the CPU provides the 797 * vector 0x18+level. Note we count spurious interrupts, 798 * but don't do anything else with them. 799 * 800 * _intrhand_autovec is the entry point for auto-vectored 801 * interrupts. 802 * 803 * For vectored interrupts, we pull the pc, evec, and exception frame 804 * and pass them to the vectored interrupt dispatcher. The vectored 805 * interrupt dispatcher will deal with strays. 806 * 807 * _intrhand_vectored is the entry point for vectored interrupts. 808 */ 809 810#define INTERRUPT_SAVEREG moveml #0xC0C0,sp@- 811#define INTERRUPT_RESTOREREG moveml sp@+,#0x0303 812 813 .globl _isrdispatch_autovec,_nmintr 814 .globl _isrdispatch_vectored 815 816_spurintr: /* Level 0 */ 817 addql #1,_intrcnt+0 818 addql #1,_cnt+V_INTR 819 jra rei 820 821_intrhand_autovec: /* Levels 1 through 6 */ 822 INTERRUPT_SAVEREG 823 movw sp@(22),sp@- | push exception vector 824 clrw sp@- 825 jbsr _isrdispatch_autovec | call dispatcher 826 addql #4,sp 827 INTERRUPT_RESTOREREG 828 jra rei | all done 829 830_lev7intr: /* Level 7: NMI */ 831 addql #1,_intrcnt+32 832 clrl sp@- 833 moveml #0xFFFF,sp@- | save registers 834 movl usp,a0 | and save 835 movl a0,sp@(FR_SP) | the user stack pointer 836 jbsr _nmintr | call handler: XXX wrapper 837 movl sp@(FR_SP),a0 | restore 838 movl a0,usp | user SP 839 moveml sp@+,#0x7FFF | and remaining registers 840 addql #8,sp | pop SP and stack adjust 841 jra rei | all done 842 843 .globl _intrhand_vectored 844_intrhand_vectored: 845 INTERRUPT_SAVEREG 846 lea sp@(16),a1 | get pointer to frame 847 movl a1,sp@- 848 movw sp@(26),d0 849 movl d0,sp@- | push exception vector info 850 movl sp@(26),sp@- | and PC 851 jbsr _isrdispatch_vectored | call dispatcher 852 lea sp@(12),sp | pop value args 853 INTERRUPT_RESTOREREG 854 jra rei | all done 855 856#undef INTERRUPT_SAVEREG 857#undef INTERRUPT_RESTOREREG 858 859/* 860 * Emulation of VAX REI instruction. 861 * 862 * This code deals with checking for and servicing ASTs 863 * (profiling, scheduling) and software interrupts (network, softclock). 864 * We check for ASTs first, just like the VAX. To avoid excess overhead 865 * the T_ASTFLT handling code will also check for software interrupts so we 866 * do not have to do it here. After identifing that we need an AST we 867 * drop the IPL to allow device interrupts. 868 * 869 * This code is complicated by the fact that sendsig may have been called 870 * necessitating a stack cleanup. 871 */ 872 .comm _ssir,1 873 .globl _astpending 874 .globl rei 875rei: 876 tstl _astpending | AST pending? 877 jeq Lchksir | no, go check for SIR 878Lrei1: 879 btst #5,sp@ | yes, are we returning to user mode? 880 jne Lchksir | no, go check for SIR 881 movw #PSL_LOWIPL,sr | lower SPL 882 clrl sp@- | stack adjust 883 moveml #0xFFFF,sp@- | save all registers 884 movl usp,a1 | including 885 movl a1,sp@(FR_SP) | the users SP 886Lrei2: 887 clrl sp@- | VA == none 888 clrl sp@- | code == none 889 movl #T_ASTFLT,sp@- | type == async system trap 890 jbsr _trap | go handle it 891 lea sp@(12),sp | pop value args 892 movl sp@(FR_SP),a0 | restore user SP 893 movl a0,usp | from save area 894 movw sp@(FR_ADJ),d0 | need to adjust stack? 895 jne Laststkadj | yes, go to it 896 moveml sp@+,#0x7FFF | no, restore most user regs 897 addql #8,sp | toss SP and stack adjust 898 rte | and do real RTE 899Laststkadj: 900 lea sp@(FR_HW),a1 | pointer to HW frame 901 addql #8,a1 | source pointer 902 movl a1,a0 | source 903 addw d0,a0 | + hole size = dest pointer 904 movl a1@-,a0@- | copy 905 movl a1@-,a0@- | 8 bytes 906 movl a0,sp@(FR_SP) | new SSP 907 moveml sp@+,#0x7FFF | restore user registers 908 movl sp@,sp | and our SP 909 rte | and do real RTE 910Lchksir: 911 tstb _ssir | SIR pending? 912 jeq Ldorte | no, all done 913 movl d0,sp@- | need a scratch register 914 movw sp@(4),d0 | get SR 915 andw #PSL_IPL7,d0 | mask all but IPL 916 jne Lnosir | came from interrupt, no can do 917 movl sp@+,d0 | restore scratch register 918Lgotsir: 919 movw #SPL1,sr | prevent others from servicing int 920 tstb _ssir | too late? 921 jeq Ldorte | yes, oh well... 922 clrl sp@- | stack adjust 923 moveml #0xFFFF,sp@- | save all registers 924 movl usp,a1 | including 925 movl a1,sp@(FR_SP) | the users SP 926Lsir1: 927 clrl sp@- | VA == none 928 clrl sp@- | code == none 929 movl #T_SSIR,sp@- | type == software interrupt 930 jbsr _trap | go handle it 931 lea sp@(12),sp | pop value args 932 movl sp@(FR_SP),a0 | restore 933 movl a0,usp | user SP 934 moveml sp@+,#0x7FFF | and all remaining registers 935 addql #8,sp | pop SP and stack adjust 936 rte 937Lnosir: 938 movl sp@+,d0 | restore scratch register 939Ldorte: 940 rte | real return 941 942/* 943 * Primitives 944 */ 945 946/* 947 * Use common m68k support routines. 948 */ 949#include <m68k/m68k/support.s> 950 951 .globl _whichqs,_qs,_cnt,_panic 952 .globl _curproc,_want_resched 953 954/* 955 * Use common m68k process manipulation routines. 956 */ 957#include <m68k/m68k/proc_subr.s> 958 959Lsw0: 960 .asciz "switch" 961 .even 962 963 .globl _curpcb 964 .globl _masterpaddr | XXX compatibility (debuggers) 965 .data 966_masterpaddr: | XXX compatibility (debuggers) 967_curpcb: 968 .long 0 969mdpflag: 970 .byte 0 | copy of proc md_flags low byte 971 .align 2 972 .comm nullpcb,SIZEOF_PCB 973 .text 974 975/* 976 * At exit of a process, do a switch for the last time. 977 * Switch to a safe stack and PCB, and deallocate the process's resources. 978 */ 979ENTRY(switch_exit) 980 movl sp@(4),a0 981 movl #nullpcb,_curpcb | save state into garbage pcb 982 lea tmpstk,sp | goto a tmp stack 983 984 /* Free old process's resources. */ 985 movl #USPACE,sp@- | size of u-area 986 movl a0@(P_ADDR),sp@- | address of process's u-area 987 movl _kernel_map,sp@- | map it was allocated in 988 jbsr _kmem_free | deallocate it 989 lea sp@(12),sp | pop args 990 991 jra _cpu_switch 992 993/* 994 * When no processes are on the runq, Swtch branches to Idle 995 * to wait for something to come ready. 996 */ 997 .globl Idle 998Idle: 999 stop #PSL_LOWIPL 1000 movw #PSL_HIGHIPL,sr 1001 movl _whichqs,d0 1002 jeq Idle 1003 jra Lsw1 1004 1005Lbadsw: 1006 movl #Lsw0,sp@- 1007 jbsr _panic 1008 /*NOTREACHED*/ 1009 1010/* 1011 * cpu_switch() 1012 * 1013 * NOTE: On the mc68851 (318/319/330) we attempt to avoid flushing the 1014 * entire ATC. The effort involved in selective flushing may not be 1015 * worth it, maybe we should just flush the whole thing? 1016 * 1017 * NOTE 2: With the new VM layout we now no longer know if an inactive 1018 * user's PTEs have been changed (formerly denoted by the SPTECHG p_flag 1019 * bit). For now, we just always flush the full ATC. 1020 */ 1021ENTRY(cpu_switch) 1022 movl _curpcb,a0 | current pcb 1023 movw sr,a0@(PCB_PS) | save sr before changing ipl 1024#ifdef notyet 1025 movl _curproc,sp@- | remember last proc running 1026#endif 1027 clrl _curproc 1028 1029 /* 1030 * Find the highest-priority queue that isn't empty, 1031 * then take the first proc from that queue. 1032 */ 1033 movw #PSL_HIGHIPL,sr | lock out interrupts 1034 movl _whichqs,d0 1035 jeq Idle 1036Lsw1: 1037 movl d0,d1 1038 negl d0 1039 andl d1,d0 1040 bfffo d0{#0:#32},d1 1041 eorib #31,d1 1042 1043 movl d1,d0 1044 lslb #3,d1 | convert queue number to index 1045 addl #_qs,d1 | locate queue (q) 1046 movl d1,a1 1047 movl a1@(P_FORW),a0 | p = q->p_forw 1048 cmpal d1,a0 | anyone on queue? 1049 jeq Lbadsw | no, panic 1050 movl a0@(P_FORW),a1@(P_FORW) | q->p_forw = p->p_forw 1051 movl a0@(P_FORW),a1 | n = p->p_forw 1052 movl d1,a1@(P_BACK) | n->p_back = q 1053 cmpal d1,a1 | anyone left on queue? 1054 jne Lsw2 | yes, skip 1055 movl _whichqs,d1 1056 bclr d0,d1 | no, clear bit 1057 movl d1,_whichqs 1058Lsw2: 1059 movl a0,_curproc 1060 clrl _want_resched 1061#ifdef notyet 1062 movl sp@+,a1 1063 cmpl a0,a1 | switching to same proc? 1064 jeq Lswdone | yes, skip save and restore 1065#endif 1066 /* 1067 * Save state of previous process in its pcb. 1068 */ 1069 movl _curpcb,a1 1070 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers 1071 movl usp,a2 | grab USP (a2 has been saved) 1072 movl a2,a1@(PCB_USP) | and save it 1073 1074 tstl _fputype | Do we have an FPU? 1075 jeq Lswnofpsave | No Then don't attempt save. 1076 lea a1@(PCB_FPCTX),a2 | pointer to FP save area 1077 fsave a2@ | save FP state 1078#if defined(M68020) || defined(M68030) || defined(M68040) 1079#if defined(M68060) 1080 cmpl #FPU_68060,_fputype 1081 jeq Lsavfp60 1082#endif 1083 tstb a2@ | null state frame? 1084 jeq Lswnofpsave | yes, all done 1085 fmovem fp0-fp7,a2@(216) | save FP general registers 1086 fmovem fpcr/fpsr/fpi,a2@(312) | save FP control registers 1087#if defined(M68060) 1088 jra Lswnofpsave 1089Lsavfp60: 1090#endif 1091#endif 1092#if defined(M68060) 1093 tstb a2@(2) | null state frame? 1094 jeq Lswnofpsave | yes, all done 1095 fmovem fp0-fp7,a2@(216) | save FP general registers 1096 fmovem fpcr,a2@(312) | save FP control registers 1097 fmovem fpsr,a2@(316) 1098 fmovem fpi,a2@(320) 1099#endif 1100Lswnofpsave: 1101 1102#ifdef DIAGNOSTIC 1103 tstl a0@(P_WCHAN) 1104 jne Lbadsw 1105 cmpb #SRUN,a0@(P_STAT) 1106 jne Lbadsw 1107#endif 1108 clrl a0@(P_BACK) | clear back link 1109 movb a0@(P_MD_FLAGS+3),mdpflag | low byte of p_md.md_flags 1110 movl a0@(P_ADDR),a1 | get p_addr 1111 movl a1,_curpcb 1112 1113 /* see if pmap_activate needs to be called; should remove this */ 1114 movl a0@(P_VMSPACE),a0 | vmspace = p->p_vmspace 1115#ifdef DIAGNOSTIC 1116 tstl a0 | map == VM_MAP_NULL? 1117 jeq Lbadsw | panic 1118#endif 1119 movl a0@(VM_PMAP),a0 | pmap = vmspace->vm_map.pmap 1120 tstl a0@(PM_STCHG) | pmap->st_changed? 1121 jeq Lswnochg | no, skip 1122 pea a1@ | push pcb (at p_addr) 1123 pea a0@ | push pmap 1124 jbsr _pmap_activate | pmap_activate(pmap, pcb) 1125 addql #8,sp 1126 movl _curpcb,a1 | restore p_addr 1127Lswnochg: 1128 1129 lea tmpstk,sp | now goto a tmp stack for NMI 1130#if defined(M68040) 1131 cmpl #MMU_68040,_mmutype | 68040? 1132 jne Lres1a | no, skip 1133 .word 0xf518 | yes, pflusha 1134 movl a1@(PCB_USTP),d0 | get USTP 1135 moveq #PGSHIFT,d1 1136 lsll d1,d0 | convert to addr 1137 .long 0x4e7b0806 | movc d0,urp 1138 jra Lcxswdone 1139Lres1a: 1140#endif 1141 movl #CACHE_CLR,d0 1142 movc d0,cacr | invalidate cache(s) 1143 pflusha | flush entire TLB 1144 movl a1@(PCB_USTP),d0 | get USTP 1145 moveq #PGSHIFT,d1 1146 lsll d1,d0 | convert to addr 1147 lea _protorp,a0 | CRP prototype 1148 movl d0,a0@(4) | stash USTP 1149 pmove a0@,crp | load new user root pointer 1150Lcxswdone: 1151 moveml a1@(PCB_REGS),#0xFCFC | and registers 1152 movl a1@(PCB_USP),a0 1153 movl a0,usp | and USP 1154 1155 tstl _fputype | If we don't have an FPU, 1156 jeq Lnofprest | don't try to restore it. 1157 lea a1@(PCB_FPCTX),a0 | pointer to FP save area 1158#if defined(M68020) || defined(M68030) || defined(M68040) 1159#if defined(M68060) 1160 cmpl #FPU_68060,_fputype 1161 jeq Lresfp60rest1 1162#endif 1163 tstb a0@ | null state frame? 1164 jeq Lresfprest | yes, easy 1165#if defined(M68040) 1166 cmpl #FPU_68040,_fputype | 68040? 1167 jne Lresnot040 | no, skip 1168 clrl sp@- | yes... 1169 frestore sp@+ | ...magic! 1170Lresnot040: 1171#endif 1172 fmovem a0@(312),fpcr/fpsr/fpi | restore FP control registers 1173 fmovem a0@(216),fp0-fp7 | restore FP general registers 1174#if defined(M68060) 1175 jra Lresfprest 1176Lresfp60rest1: 1177#endif 1178#endif 1179#if defined(M68060) 1180 tstb a0@(2) | null state frame? 1181 jeq Lresfprest | yes, easy 1182 fmovem a0@(312),fpcr | restore FP control registers 1183 fmovem a0@(316),fpsr 1184 fmovem a0@(320),fpi 1185 fmovem a0@(216),fp0-fp7 | restore FP general registers 1186#endif 1187Lresfprest: 1188 frestore a0@ | restore state 1189Lnofprest: 1190 movw a1@(PCB_PS),sr | no, restore PS 1191 moveq #1,d0 | return 1 (for alternate returns) 1192 rts 1193 1194/* 1195 * savectx(pcb) 1196 * Update pcb, saving current processor state. 1197 */ 1198ENTRY(savectx) 1199 movl sp@(4),a1 1200 movw sr,a1@(PCB_PS) 1201 movl usp,a0 | grab USP 1202 movl a0,a1@(PCB_USP) | and save it 1203 moveml #0xFCFC,a1@(PCB_REGS) | save non-scratch registers 1204 1205 tstl _fputype | Do we have FPU? 1206 jeq Lsvnofpsave | No? Then don't save state. 1207 lea a1@(PCB_FPCTX),a0 | pointer to FP save area 1208 fsave a0@ | save FP state 1209#if defined(M68020) || defined(M68030) || defined(M68040) 1210#if defined(M68060) 1211 cmpl #FPU_68060,_fputype 1212 jeq Lsvsavfp60 1213#endif 1214 tstb a0@ | null state frame? 1215 jeq Lsvnofpsave | yes, all done 1216 fmovem fp0-fp7,a0@(216) | save FP general registers 1217 fmovem fpcr/fpsr/fpi,a0@(312) | save FP control registers 1218#if defined(M68060) 1219 jra Lsvnofpsave 1220Lsvsavfp60: 1221#endif 1222#endif 1223#if defined(M68060) 1224 tstb a0@(2) | null state frame? 1225 jeq Lsvnofpsave | yes, all done 1226 fmovem fp0-fp7,a0@(216) | save FP general registers 1227 fmovem fpcr,a0@(312) | save FP control registers 1228 fmovem fpsr,a0@(316) 1229 fmovem fpi,a0@(320) 1230#endif 1231Lsvnofpsave: 1232 moveq #0,d0 | return 0 1233 rts 1234 1235#if defined(M68040) 1236ENTRY(suline) 1237 movl sp@(4),a0 | address to write 1238 movl _curpcb,a1 | current pcb 1239 movl #Lslerr,a1@(PCB_ONFAULT) | where to return to on a fault 1240 movl sp@(8),a1 | address of line 1241 movl a1@+,d0 | get lword 1242 movsl d0,a0@+ | put lword 1243 nop | sync 1244 movl a1@+,d0 | get lword 1245 movsl d0,a0@+ | put lword 1246 nop | sync 1247 movl a1@+,d0 | get lword 1248 movsl d0,a0@+ | put lword 1249 nop | sync 1250 movl a1@+,d0 | get lword 1251 movsl d0,a0@+ | put lword 1252 nop | sync 1253 moveq #0,d0 | indicate no fault 1254 jra Lsldone 1255Lslerr: 1256 moveq #-1,d0 1257Lsldone: 1258 movl _curpcb,a1 | current pcb 1259 clrl a1@(PCB_ONFAULT) | clear fault address 1260 rts 1261#endif 1262 1263/* 1264 * Invalidate entire TLB. 1265 */ 1266ENTRY(TBIA) 1267__TBIA: 1268#if defined(M68040) 1269 cmpl #MMU_68040,_mmutype | 68040? 1270 jne Lmotommu3 | no, skip 1271 .word 0xf518 | yes, pflusha 1272 rts 1273Lmotommu3: 1274#endif 1275 tstl _mmutype | what mmu? 1276 jpl Lmc68851a | 68851 implies no d-cache 1277 movl #DC_CLEAR,d0 1278 movc d0,cacr | invalidate on-chip d-cache 1279Lmc68851a: 1280 rts 1281 1282/* 1283 * Invalidate any TLB entry for given VA (TB Invalidate Single) 1284 */ 1285ENTRY(TBIS) 1286#ifdef DEBUG 1287 tstl fulltflush | being conservative? 1288 jne __TBIA | yes, flush entire TLB 1289#endif 1290#if defined(M68040) 1291 cmpl #MMU_68040,_mmutype | 68040? 1292 jne Lmotommu4 | no, skip 1293 movl sp@(4),a0 1294 movc dfc,d1 1295 moveq #1,d0 | user space 1296 movc d0,dfc 1297 .word 0xf508 | pflush a0@ 1298 moveq #5,d0 | super space 1299 movc d0,dfc 1300 .word 0xf508 | pflush a0@ 1301 movc d1,dfc 1302 rts 1303Lmotommu4: 1304#endif 1305 tstl _mmutype | is 68851? 1306 jpl Lmc68851b | 1307 movl sp@(4),a0 | get addr to flush 1308 pflush #0,#0,a0@ | flush address from both sides 1309 movl #DC_CLEAR,d0 1310 movc d0,cacr | invalidate on-chip data cache 1311 rts 1312Lmc68851b: 1313 pflushs #0,#0,a0@ | flush address from both sides 1314 rts 1315 1316/* 1317 * Invalidate supervisor side of TLB 1318 */ 1319ENTRY(TBIAS) 1320#ifdef DEBUG 1321 tstl fulltflush | being conservative? 1322 jne __TBIA | yes, flush everything 1323#endif 1324#if defined(M68040) 1325 cmpl #MMU_68040,_mmutype | 68040? 1326 jne Lmotommu5 | no, skip 1327 .word 0xf518 | yes, pflusha (for now) XXX 1328 rts 1329Lmotommu5: 1330#endif 1331 pflush #4,#4 | flush supervisor TLB entries 1332 movl #DC_CLEAR,d0 1333 movc d0,cacr | invalidate on-chip d-cache 1334 rts 1335 1336/* 1337 * Invalidate user side of TLB 1338 */ 1339ENTRY(TBIAU) 1340#ifdef DEBUG 1341 tstl fulltflush | being conservative? 1342 jne __TBIA | yes, flush everything 1343#endif 1344#if defined(M68040) 1345 cmpl #MMU_68040,_mmutype | 68040? 1346 jne Lmotommu6 | no, skip 1347 .word 0xf518 | yes, pflusha (for now) XXX 1348 rts 1349Lmotommu6: 1350#endif 1351 pflush #0,#4 | flush user TLB entries 1352 movl #DC_CLEAR,d0 1353 movc d0,cacr | invalidate on-chip d-cache 1354 rts 1355 1356/* 1357 * Invalidate instruction cache 1358 */ 1359ENTRY(ICIA) 1360#if defined(M68040) 1361ENTRY(ICPA) 1362 cmpl #MMU_68040,_mmutype | 68040 1363 jne Lmotommu7 | no, skip 1364 .word 0xf498 | cinva ic 1365 rts 1366Lmotommu7: 1367#endif 1368 movl #IC_CLEAR,d0 1369 movc d0,cacr | invalidate i-cache 1370 rts 1371 1372/* 1373 * Invalidate data cache. 1374 * NOTE: we do not flush 68030 on-chip cache as there are no aliasing 1375 * problems with DC_WA. The only cases we have to worry about are context 1376 * switch and TLB changes, both of which are handled "in-line" in resume 1377 * and TBI*. 1378 */ 1379ENTRY(DCIA) 1380__DCIA: 1381#if defined(M68040) 1382 cmpl #MMU_68040,_mmutype | 68040 1383 jne Lmotommu8 | no, skip 1384 /* XXX implement */ 1385 rts 1386Lmotommu8: 1387#endif 1388 rts 1389 1390ENTRY(DCIS) 1391__DCIS: 1392#if defined(M68040) 1393 cmpl #MMU_68040,_mmutype | 68040 1394 jne Lmotommu9 | no, skip 1395 /* XXX implement */ 1396 rts 1397Lmotommu9: 1398#endif 1399 rts 1400 1401ENTRY(DCIU) 1402__DCIU: 1403#if defined(M68040) 1404 cmpl #MMU_68040,_mmutype | 68040 1405 jne LmotommuA | no, skip 1406 /* XXX implement */ 1407 rts 1408LmotommuA: 1409#endif 1410 rts 1411 1412#if defined(M68040) 1413ENTRY(ICPL) 1414 movl sp@(4),a0 | address 1415 .word 0xf488 | cinvl ic,a0@ 1416 rts 1417ENTRY(ICPP) 1418 movl sp@(4),a0 | address 1419 .word 0xf490 | cinvp ic,a0@ 1420 rts 1421ENTRY(DCPL) 1422 movl sp@(4),a0 | address 1423 .word 0xf448 | cinvl dc,a0@ 1424 rts 1425ENTRY(DCPP) 1426 movl sp@(4),a0 | address 1427 .word 0xf450 | cinvp dc,a0@ 1428 rts 1429ENTRY(DCPA) 1430 .word 0xf458 | cinva dc 1431 rts 1432ENTRY(DCFL) 1433 movl sp@(4),a0 | address 1434 .word 0xf468 | cpushl dc,a0@ 1435 rts 1436ENTRY(DCFP) 1437 movl sp@(4),a0 | address 1438 .word 0xf470 | cpushp dc,a0@ 1439 rts 1440#endif 1441 1442ENTRY(PCIA) 1443#if defined(M68040) 1444ENTRY(DCFA) 1445 cmpl #MMU_68040,_mmutype | 68040 1446 jne LmotommuB | no, skip 1447 .word 0xf478 | cpusha dc 1448 rts 1449LmotommuB: 1450#endif 1451 movl #DC_CLEAR,d0 1452 movc d0,cacr | invalidate on-chip d-cache 1453 rts 1454 1455ENTRY(ecacheon) 1456 rts 1457 1458ENTRY(ecacheoff) 1459 rts 1460 1461/* 1462 * Get callers current SP value. 1463 * Note that simply taking the address of a local variable in a C function 1464 * doesn't work because callee saved registers may be outside the stack frame 1465 * defined by A6 (e.g. GCC generated code). 1466 */ 1467 .globl _getsp 1468_getsp: 1469 movl sp,d0 | get current SP 1470 addql #4,d0 | compensate for return address 1471 rts 1472 1473 .globl _getsfc, _getdfc 1474_getsfc: 1475 movc sfc,d0 1476 rts 1477_getdfc: 1478 movc dfc,d0 1479 rts 1480 1481/* 1482 * Load a new user segment table pointer. 1483 */ 1484ENTRY(loadustp) 1485 movl sp@(4),d0 | new USTP 1486 moveq #PGSHIFT, d1 1487 lsll d1,d0 | convert to addr 1488#if defined(M68040) 1489 cmpl #MMU_68040,_mmutype | 68040? 1490 jne LmotommuC | no, skip 1491 .long 0x4e7b0806 | movc d0,urp 1492 rts 1493LmotommuC: 1494#endif 1495 lea _protorp,a0 | CRP prototype 1496 movl d0,a0@(4) | stash USTP 1497 pmove a0@,crp | load root pointer 1498 movl #DC_CLEAR,d0 1499 movc d0,cacr | invalidate on-chip d-cache 1500 rts | since pmove flushes TLB 1501 1502ENTRY(ploadw) 1503 movl sp@(4),a0 | address to load 1504 ploadw #1,a0@ | pre-load translation 1505 rts 1506 1507/* 1508 * Set processor priority level calls. Most are implemented with 1509 * inline asm expansions. However, spl0 requires special handling 1510 * as we need to check for our emulated software interrupts. 1511 */ 1512 1513ENTRY(spl0) 1514 moveq #0,d0 1515 movw sr,d0 | get old SR for return 1516 movw #PSL_LOWIPL,sr | restore new SR 1517 tstb _ssir | software interrupt pending? 1518 jeq Lspldone | no, all done 1519 subql #4,sp | make room for RTE frame 1520 movl sp@(4),sp@(2) | position return address 1521 clrw sp@(6) | set frame type 0 1522 movw #PSL_LOWIPL,sp@ | and new SR 1523 jra Lgotsir | go handle it 1524Lspldone: 1525 rts 1526 1527ENTRY(getsr) 1528 moveq #0,d0 1529 movw sr,d0 1530 rts 1531 1532/* 1533 * _delay(unsigned N) 1534 * 1535 * Delay for at least (N/256) microseconds. 1536 * This routine depends on the variable: delay_divisor 1537 * which should be set based on the CPU clock rate. 1538 */ 1539 .globl __delay 1540__delay: 1541 | d0 = arg = (usecs << 8) 1542 movl sp@(4),d0 1543 | d1 = delay_divisor 1544 movl _delay_divisor,d1 1545L_delay: 1546 subl d1,d0 1547 jgt L_delay 1548 rts 1549 1550/* 1551 * Save and restore 68881 state. 1552 */ 1553ENTRY(m68881_save) 1554 movl sp@(4),a0 | save area pointer 1555 fsave a0@ | save state 1556#if defined(M68020) || defined(M68030) || defined(M68040) 1557#if defined(M68060) 1558 cmpl #FPU_68060,_fputype 1559 jeq Lm68060fpsave 1560#endif 1561Lm68881fpsave: 1562 tstb a0@ | null state frame? 1563 jeq Lm68881sdone | yes, all done 1564 fmovem fp0-fp7,a0@(216) | save FP general registers 1565 fmovem fpcr/fpsr/fpi,a0@(312) | save FP control registers 1566Lm68881sdone: 1567 rts 1568#endif 1569#if defined(M68060) 1570Lm68060fpsave: 1571 tstb a0@(2) | null state frame? 1572 jeq Lm68060sdone | yes, all done 1573 fmovem fp0-fp7,a0@(216) | save FP general registers 1574 fmovem fpcr,a0@(312) | save FP control registers 1575 fmovem fpsr,a0@(316) 1576 fmovem fpi,a0@(320) 1577Lm68060sdone: 1578 rts 1579#endif 1580 1581ENTRY(m68881_restore) 1582 movl sp@(4),a0 | save area pointer 1583#if defined(M68020) || defined(M68030) || defined(M68040) 1584#if defined(M68060) 1585 cmpl #FPU_68060,_fputype 1586 jeq Lm68060fprestore 1587#endif 1588Lm68881fprestore: 1589 tstb a0@ | null state frame? 1590 jeq Lm68881rdone | yes, easy 1591 fmovem a0@(312),fpcr/fpsr/fpi | restore FP control registers 1592 fmovem a0@(216),fp0-fp7 | restore FP general registers 1593Lm68881rdone: 1594 frestore a0@ | restore state 1595 rts 1596#endif 1597#if defined(M68060) 1598Lm68060fprestore: 1599 tstb a0@(2) | null state frame? 1600 jeq Lm68060fprdone | yes, easy 1601 fmovem a0@(312),fpcr | restore FP control registers 1602 fmovem a0@(316),fpsr 1603 fmovem a0@(320),fpi 1604 fmovem a0@(216),fp0-fp7 | restore FP general registers 1605Lm68060fprdone: 1606 frestore a0@ | restore state 1607 rts 1608#endif 1609 1610/* 1611 * Handle the nitty-gritty of rebooting the machine. 1612 * Basically we just turn off the MMU and jump to the appropriate ROM routine. 1613 */ 1614 .globl _doboot 1615_doboot: 1616#if defined(M68040) 1617 cmpl #MMU_68040,_mmutype | 68040? 1618 jeq Lnocache5 | yes, skip 1619#endif 1620 movl #CACHE_OFF,d0 1621 movc d0,cacr | disable on-chip cache(s) 1622Lnocache5: 1623 movl _boothowto,d0 | load howto 1624 | (used to load bootdev in d1 here) 1625 movl sp@(4),d2 | arg 1626 lea tmpstk,sp | physical SP in case of NMI 1627 movl #0,a7@- | value for pmove to TC (turn off MMU) 1628 pmove a7@,tc | disable MMU 1629 movl #0, d3 1630 movc d3,vbr | ROM VBR 1631 andl #RB_SBOOT, d0 | mask off 1632 tstl d0 | 1633 bne Lsboot | sboot? 1634 /* NOT sboot */ 1635 cmpl #0, d2 | autoboot? 1636 beq 1f | yes! 1637 trap #15 | return to bug 1638 .short MVMEPROM_EXIT | exit 16391: movl #0xff800004,a0 | restart the BUG 1640 movl a0@, a0 | get PC 1641 jmp a0@ | go! 1642 1643Lsboot: /* sboot */ 1644 cmpl #0, d2 | autoboot? 1645 beq 1f | yes! 1646 jmp 0x4000 | back to sboot 16471: jmp 0x400a | tell sboot to reboot us 1648 1649 .data 1650 .globl _machineid,_mmutype,_cputype,_fputype,_ectype,_protorp,_prototc 1651_machineid: 1652 .long MVME_147 | default to MVME_147 1653_mmutype: 1654 .long MMU_68030 | default to MMU_68030 1655_cputype: 1656 .long CPU_68030 | default to CPU_68030 1657_fputype: 1658 .long FPU_68882 | default to FPU_68882 1659_ectype: 1660 .long EC_NONE | external cache type, default to none 1661_protorp: 1662 .long 0,0 | prototype root pointer 1663_prototc: 1664 .long 0 | prototype translation control 1665 .globl _bootpart,_bootdevlun,_bootctrllun,_bootaddr,_boothowto 1666_bootpart: 1667 .long 0 1668_bootdevlun: 1669 .long 0 1670_bootctrllun: 1671 .long 0 1672_bootaddr: 1673 .long 0 1674_boothowto: 1675 .long 0 1676 .globl _cold 1677_cold: 1678 .long 1 | cold start flag 1679 .globl _want_resched 1680_want_resched: 1681 .long 0 1682 .globl _intiobase, _intiolimit 1683 .globl _proc0paddr 1684_proc0paddr: 1685 .long 0 | KVA of proc0 u-area 1686_intiobase: 1687 .long 0 | KVA of base of internal IO space 1688_intiolimit: 1689 .long 0 | KVA of end of internal IO space 1690#ifdef DEBUG 1691 .globl fulltflush, fullcflush 1692fulltflush: 1693 .long 0 1694fullcflush: 1695 .long 0 1696#endif 1697/* interrupt counters */ 1698 .globl _intrcnt,_eintrcnt,_intrnames,_eintrnames 1699_intrnames: 1700 .asciz "spur" 1701 .asciz "lev1" 1702 .asciz "lev2" 1703 .asciz "lev3" 1704 .asciz "lev4" 1705 .asciz "clock" 1706 .asciz "lev6" 1707 .asciz "nmi" 1708 .asciz "statclock" 1709_eintrnames: 1710 .even 1711_intrcnt: 1712 .long 0,0,0,0,0,0,0,0,0,0 1713_eintrcnt: 1714 1715#include <mvme68k/mvme68k/vectors.s> 1716