1/* $NetBSD: locore.S,v 1.6 2021/10/28 11:11:11 christos Exp $ */ 2/* $OpenBSD: locore.S,v 1.158 2008/07/28 19:08:46 miod Exp $ */ 3 4/* 5 * Copyright (c) 1998-2004 Michael Shalayeff 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT, 21 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 23 * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 25 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING 26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 * 29 * Portitions of this file are derived from other sources, see 30 * the copyrights and acknowledgements below. 31 */ 32/* 33 * Copyright (c) 1990,1991,1992,1994 The University of Utah and 34 * the Computer Systems Laboratory (CSL). All rights reserved. 35 * 36 * THE UNIVERSITY OF UTAH AND CSL PROVIDE THIS SOFTWARE IN ITS "AS IS" 37 * CONDITION, AND DISCLAIM ANY LIABILITY OF ANY KIND FOR ANY DAMAGES 38 * WHATSOEVER RESULTING FROM ITS USE. 39 * 40 * CSL requests users of this software to return to csl-dist@cs.utah.edu any 41 * improvements that they make and grant CSL redistribution rights. 42 * 43 * Utah $Hdr: locore.s 1.62 94/12/15$ 44 */ 45/* 46 * (c) Copyright 1988 HEWLETT-PACKARD COMPANY 47 * 48 * To anyone who acknowledges that this file is provided "AS IS" 49 * without any express or implied warranty: 50 * permission to use, copy, modify, and distribute this file 51 * for any purpose is hereby granted without fee, provided that 52 * the above copyright notice and this notice appears in all 53 * copies, and that the name of Hewlett-Packard Company not be 54 * used in advertising or publicity pertaining to distribution 55 * of the software without specific, written prior permission. 56 * Hewlett-Packard Company makes no representations about the 57 * suitability of this software for any purpose. 58 */ 59 60#include "opt_multiprocessor.h" 61#include "opt_cputype.h" 62#include "opt_ddb.h" 63#include "opt_kgdb.h" 64 65#include <sys/errno.h> 66#include <machine/param.h> 67#include <machine/asm.h> 68#include <machine/psl.h> 69#include <machine/trap.h> 70#include <machine/iomod.h> 71#include <machine/pdc.h> 72#include <machine/reg.h> 73#include <machine/cpu.h> 74 75#include "assym.h" 76 77/* Some aliases for the macros in assym.h. */ 78#define TRAPFRAME_SIZEOF trapframe_SIZEOF 79 80/* 81 * Very crude debugging macros that write to com1. 82 */ 83 84#if 1 85#define COM1_TX_REG (0xffd00000 + 0x5000 + 0x800) 86#else 87#define COM1_TX_REG (0xf0823000 + 0x800) 88#endif 89#define _DEBUG_PUTCHAR(reg1, reg2) ! \ 90 ldil L%COM1_TX_REG, %reg1 ! \ 91 stb %reg2, R%COM1_TX_REG(%sr1, %reg1) ! \ 92 ldil L%10000000, %reg1 ! \ 93 ldi 1, %reg2 ! \ 94 comb,<>,n %reg1, %r0, -8 ! \ 95 sub %reg1, %reg2, %reg1 96#define DEBUG_PUTCHAR(reg1, reg2, ch) ! \ 97 ldi ch, %reg2 ! \ 98 _DEBUG_PUTCHAR(reg1,reg2) 99#define _DEBUG_DUMPN(reg1, reg2, reg3, p) ! \ 100 extru %reg3, p, 4, %reg2 ! \ 101 comib,>>,n 10, %reg2, 0 ! \ 102 addi 39, %reg2, %reg2 ! \ 103 addi 48, %reg2, %reg2 ! \ 104 _DEBUG_PUTCHAR(reg1,reg2) 105#define DEBUG_DUMP32(reg1, reg2, reg3) ! \ 106 DEBUG_PUTCHAR(reg1,reg2,58) ! \ 107 _DEBUG_DUMPN(reg1, reg2, reg3, 3) ! \ 108 _DEBUG_DUMPN(reg1, reg2, reg3, 7) ! \ 109 _DEBUG_DUMPN(reg1, reg2, reg3, 11) ! \ 110 _DEBUG_DUMPN(reg1, reg2, reg3, 15) ! \ 111 _DEBUG_DUMPN(reg1, reg2, reg3, 19) ! \ 112 _DEBUG_DUMPN(reg1, reg2, reg3, 23) ! \ 113 _DEBUG_DUMPN(reg1, reg2, reg3, 27) ! \ 114 _DEBUG_DUMPN(reg1, reg2, reg3, 31) 115 116/* 117 * hv-specific instructions 118 */ 119#define DR_PAGE0 diag (0x70 << 5) 120#define DR_PAGE1 diag (0x72 << 5) 121 122#define MTCPU_T(x,t) diag ((t) << 21) | ((x) << 16) | (0xb0 << 5) 123#define MFCPU_T(r,x) diag ((r) << 21) | ((x) << 16) | (0xd0 << 5) 124#define MTCPU_C(x,t) diag ((t) << 21) | ((x) << 16) | (0x12 << 5) 125#define MFCPU_C(r,x) diag ((r) << 21) | ((x) << 16) | (0x30 << 5) 126#define MFCPU_U(r,x) diag ((r) << 21) | ((x)) | (0x45 << 5) 127#define MTCPU_U(x,r) diag ((r) << 21) | ((x) << 16) | (0xc2 << 5) 128 129 .import $global$, data 130 .import pdc, data 131 .import boothowto, data 132 .import bootdev, data 133 .import esym, data 134 .import virtual_avail, data 135 .import lwp0, data 136 .import panic, code 137 .import fpu_csw, data 138 .import hppa_interrupt_register, data 139 140 BSS(pdc_stack, 4) /* temp stack for PDC call */ 141 BSS(kernelmapped, 4) /* set when kernel is mapped */ 142 BSS(hppa_vtop, 4) /* a vtop translation table addr (pa=va) */ 143 144 .text 145 .import kernel_setup, entry 146 147/* 148 * This is the starting location for the kernel 149 */ 150ENTRY_NOPROFILE(start,0) 151/* 152 * bootapiver <= 2 153 * start(pdc, boothowto, bootdev, esym, bootapiver, argv, argc) 154 * 155 * bootapiver == 3 156 * start(pdc, boothowto, bootdev, esym, bootapiver, bootinfo) 157 * 158 * pdc - PDC entry point 159 * boothowto - boot flags (see "reboot.h") 160 * bootdev - boot device (index into bdevsw) 161 * esym - end of symbol table (or &end if not present) 162 * bootapiver - /boot API version 163 * argv - options block passed from /boot 164 * argc - the length of the block 165 * bootinfo - pointer to a struct bootinfo. 166 */ 167 168 /* 169 * save the pdc, boothowto, bootdev and esym arguments 170 */ 171 ldil L%pdc,%r1 172 stw %arg0,R%pdc(%r1) 173 ldil L%boothowto,%r1 174 stw %arg1,R%boothowto(%r1) 175 ldil L%bootdev,%r1 176 stw %arg2,R%bootdev(%r1) 177 178 comb,<> %r0, %arg3, 1f 179 nop 180 181 ldil L%end, %arg3 182 ldo R%end(%arg3), %arg3 183 1841: 185 ldil L%esym,%r1 186 stw %arg3,R%esym(%r1) 187 188 /* 189 * Put page aligned %arg3 into %t3. It is the start of available 190 * memory. 191 */ 192 ldo NBPG-1(%arg3), %t3 193 dep %r0, 31, PGSHIFT, %t3 194 195 /* bootinfo struct address for hppa_init, if bootapiver is > 2 */ 196 ldw HPPA_FRAME_ARG(4)(%sp), %t1 197 ldw HPPA_FRAME_ARG(5)(%sp), %r5 198 comiclr,< 2, %t1, %r0 199 copy %r0, %r5 200 201 /* assuming size being page-aligned */ 202#define STACK_ALLOC(n,s) \ 203 ldil L%(n), %t1 ! \ 204 ldil L%(s), %t2 ! \ 205 stw %t3, R%(n)(%t1) ! \ 206 add %t3, %t2, %t3 207 208 STACK_ALLOC(pdc_stack, PDC_STACKSIZE) 209 210 /* zero fake trapframe and lwp0 u-area */ 211 /* XXX - we should create a real trapframe for lwp0 */ 212 copy %t3, %t2 213 ldi NBPG+TRAPFRAME_SIZEOF, %t1 214L$start_zero_tf: 215 stws,ma %r0, 4(%t2) 216 addib,>= -8, %t1, L$start_zero_tf 217 stws,ma %r0, 4(%t2) /* XXX could use ,bc here, but gas is broken */ 218 219 /* 220 * kernel stack starts a page and a trapframe above uarea address. 221 */ 222 ldo NBPG+TRAPFRAME_SIZEOF(%t3), %sp 223 mtctl %t3, CR_FPPADDR 224 225 /* initialize the pcb */ 226 stw %r0, PCB_ONFAULT(%t3) 227 stw %r0, PCB_SPACE(%t3) /* XXX HPPA_SID_KERNEL == 0 */ 228 229 /* 230 * Setup various pointers. 231 * 232 * First free memory is %t3 plus normal U space. The last page of 233 * USPACE is the redzone if DIAGNOSTIC (see param.h). 234 */ 235 ldil L%USPACE, %r4 236 add %t3, %r4, %r4 237 238 ldil L%lwp0, %t2 239 stw %t3, R%lwp0+L_PCB(%t2) /* XXXuvm_lwp_getuarea */ 240 ldo NBPG(%t3), %t1 241 stw %t1, R%lwp0+L_MD_REGS(%t2) 242 243 ldil L%TFF_LAST, %t1 244 stw %t1, TF_FLAGS-TRAPFRAME_SIZEOF(%sp) 245 stw %t3, TF_CR30-TRAPFRAME_SIZEOF(%sp) 246 247 /* 248 * disable all coprocessors 249 */ 250 mtctl %r0, CR_CCR 251 252#ifdef MULTIPROCESSOR 253 254#define PZ_MEM_RENDEZ 0x10 255#define PZ_MEM_RENDEZ_HI 0x28 256 257 /* Setup SMP rendezvous address. */ 258 ldil L%hw_cpu_spinup_trampoline, %r1 259 ldo R%hw_cpu_spinup_trampoline(%r1), %r1 260 stw %r1, PZ_MEM_RENDEZ(%r0) 261 stw %r0, PZ_MEM_RENDEZ_HI(%r0) 262#endif 263 264 /* 265 * We need to set the Q bit so that we can take TLB misses after we 266 * turn on virtual memory. 267 */ 268 copy %sp, %arg0 269 ldil L%qisnowon, %rp 270 ldo R%qisnowon(%rp), %rp 271 272 b kernel_setup 273 ldi PSW_Q|PSW_I, %arg1 274 275qisnowon: 276 copy %r4, %arg0 277 copy %r5, %arg1 278 /* 279 * call C routine hppa_init() to initialize VM 280 */ 281 .import hppa_init, code 282 CALL(hppa_init, %r1) 283 284 /* 285 * Cannot change the queues or IPSW with the Q-bit on 286 */ 287 rsm RESET_PSW, %r0 288 nop ! nop ! nop ! nop ! nop ! nop ! nop 289 290 /* 291 * We need to do an rfi to get the C bit set 292 */ 293 mtctl %r0, %pcsq 294 mtctl %r0, %pcsq 295 ldil L%virtual_mode, %t1 296 ldo R%virtual_mode(%t1), %t1 297 mtctl %t1, %pcoq 298 ldo 4(%t1), %t1 299 mtctl %t1, %pcoq 300 GET_CURCPU(%t1) 301 ldw CI_PSW(%t1), %t2 302 mtctl %t2, %ipsw 303 rfi 304 nop 305 nop 306 nop 307 nop 308 nop 309 nop 310 nop 311 312virtual_mode: 313 314 ldil L%kernelmapped, %t1 315 stw %t1, R%kernelmapped(%t1) 316 317#ifdef DDB 318 .import Debugger, code 319 /* have to call debugger from here, from virtual mode */ 320 ldil L%boothowto, %r1 321 ldw R%boothowto(%r1), %r1 322 bb,>= %r1, 25, L$noddb 323 nop 324 325 break HPPA_BREAK_KERNEL, HPPA_BREAK_KGDB 326 nop 327L$noddb: 328#endif 329 330 .import main,code 331 CALL(main, %r1) 332 /* should never return... */ 333 bv (%rp) 334 nop 335EXIT(start) 336 337 338/* 339 * void kernel_setup(register_t sp, register_t psw) 340 */ 341LEAF_ENTRY_NOPROFILE(kernel_setup) 342 343 /* 344 * disable interrupts and turn off all bits in the psw so that 345 * we start in a known state. 346 */ 347 rsm RESET_PSW, %r0 348 nop ! nop ! nop ! nop ! nop ! nop 349 350 /* 351 * go to virtual mode... 352 * get things ready for the kernel to run in virtual mode 353 */ 354 ldi HPPA_PID_KERNEL, %r1 355 mtctl %r1, %pidr1 356 mtctl %r1, %pidr2 357#if pbably_not_worth_it 358 mtctl %r0, %pidr3 359 mtctl %r0, %pidr4 360#endif 361 mtsp %r0, %sr0 362 mtsp %r0, %sr1 363 mtsp %r0, %sr2 364 mtsp %r0, %sr3 365 mtsp %r0, %sr4 366 mtsp %r0, %sr5 367 mtsp %r0, %sr6 368 mtsp %r0, %sr7 369 370 /* 371 * to keep the spl() routines consistent we need to put the correct 372 * spl level into eiem, and reset any pending interrupts 373 */ 374 ldi -1, %r1 375 mtctl %r0, %eiem /* disable interrupts */ 376 mtctl %r1, %eirr 377 378 /* 379 * load address of interrupt vector table 380 */ 381 ldil L%ivaaddr, %t2 382 ldo R%ivaaddr(%t2), %t2 383 mtctl %t2, %iva 384 385 /* 386 * set up the dp pointer so that we can do quick references off of it 387 */ 388 ldil L%$global$, %dp 389 ldo R%$global$(%dp), %dp 390 391 /* 392 * Create a stack frame for us to call C with. Clear out the previous 393 * sp marker to mark that this is the first frame on the stack. 394 */ 395 copy %arg0, %sp 396 ldo 0(%arg0), %r3 397 stw,ma %r0, HPPA_FRAME_SIZE(%sp) 398 stw %r0, HPPA_FRAME_CRP(%sp) 399 stw %r0, HPPA_FRAME_PSP(%sp) 400 401 /* 402 * We need to set the Q bit so that we can take TLB misses after we 403 * turn on virtual memory. 404 */ 405 406 mtctl %r0, %pcsq 407 mtctl %r0, %pcsq 408 mtctl %rp, %pcoq 409 ldo 4(%rp), %rp 410 mtctl %rp, %pcoq 411 mtctl %arg1, %ipsw 412 rfi 413 nop 414 nop 415EXIT(kernel_setup) 416 417 418#ifdef MULTIPROCESSOR 419/* 420 * Trampoline to spin up secondary processors. 421 */ 422LEAF_ENTRY_NOPROFILE(hw_cpu_spinup_trampoline) 423 424 /* 425 * disable interrupts and turn off all bits in the psw so that 426 * we start in a known state. 427 */ 428 rsm RESET_PSW, %r0 429 nop ! nop ! nop ! nop ! nop ! nop 430 431 /* go to virtual mode... 432 /* get things ready for the kernel to run in virtual mode */ 433 ldi HPPA_PID_KERNEL, %r1 434 mtctl %r1, %pidr1 435 mtctl %r1, %pidr2 436#if pbably_not_worth_it 437 mtctl %r0, %pidr3 438 mtctl %r0, %pidr4 439#endif 440 mtsp %r0, %sr0 441 mtsp %r0, %sr1 442 mtsp %r0, %sr2 443 mtsp %r0, %sr3 444 mtsp %r0, %sr4 445 mtsp %r0, %sr5 446 mtsp %r0, %sr6 447 mtsp %r0, %sr7 448 449 /* 450 * disable all coprocessors 451 */ 452 mtctl %r0, CR_CCR 453 454 /* 455 * to keep the spl() routines consistent we need to put the correct 456 * spl level into eiem, and reset any pending interrupts 457 */ 458 ldi -1, %r1 459 mtctl %r0, %eiem /* disable interrupts */ 460 mtctl %r1, %eirr 461 462 /* 463 * load address of interrupt vector table 464 */ 465 ldil L%ivaaddr, %t2 466 ldo R%ivaaddr(%t2), %t2 467 mtctl %t2, %iva 468 469 /* 470 * set up the dp pointer so that we can do quick references off of it 471 */ 472 ldil L%$global$, %dp 473 ldo R%$global$(%dp), %dp 474 475 /* 476 * Store address of cpu_info in CR_CURCPU. 477 */ 478 ldil L%cpu_hatch_info, %r3 479 ldw R%cpu_hatch_info(%r3), %r3 480 mtctl %r3, CR_CURCPU 481 482 /* 483 * Setup the stack frame for us to call C with and mark this as the 484 * first frame on the stack. 485 */ 486 ldw CI_STACK(%r3), %sp 487 stw,ma %r0, HPPA_FRAME_SIZE(%sp) 488 stw %r0, HPPA_FRAME_CRP(%sp) 489 stw %r0, HPPA_FRAME_PSP(%sp) 490 491 /* Provide CPU with page tables. */ 492 ldil L%hppa_vtop, %t1 493 ldw R%hppa_vtop(%t1), %t1 494 mtctl %t1, CR_VTOP 495 496 /* Turn on the Q bit so that we can handle TLB traps. */ 497 ldil L%qenabled, %t1 498 ldo R%qenabled(%t1), %t1 499 mtctl %r0, %pcsq 500 mtctl %r0, %pcsq 501 mtctl %t1, %pcoq 502 ldo 4(%t1), %t1 503 mtctl %t1, %pcoq 504 ldi PSW_Q|PSW_I, %t2 505 mtctl %t2, %ipsw 506 rfi 507 nop 508 509qenabled: 510 /* Call C routine to setup CPU. */ 511 .import cpu_hw_init, code 512 CALL(cpu_hw_init, %r1) 513 514 /* Switch CPU mode. */ 515 ldil L%cpu_spinup_vm, %t1 516 ldo R%cpu_spinup_vm(%t1), %t1 517 mtctl %r0, %pcsq 518 mtctl %r0, %pcsq 519 mtctl %t1, %pcoq 520 ldo 4(%t1), %t1 521 mtctl %t1, %pcoq 522 mfctl CR_CURCPU, %t2 523 ldw CI_PSW(%t2), %t2 524 mtctl %t2, %ipsw 525 rfi 526 nop 527 528cpu_spinup_vm: 529 530 /* 531 * Okay, time to return to the land of C. 532 */ 533 b cpu_hatch 534 nop 535 536EXIT(hw_cpu_spinup_trampoline) 537#endif 538 539 540/* 541 * int pdc_call(iodcio_t func,int pdc_flag, ...) 542 */ 543ENTRY(pdc_call,160) 544 545 mfctl %eiem, %t1 546 mtctl %r0, %eiem /* disable interrupts */ 547 stw %rp, HPPA_FRAME_CRP(%sp) 548 copy %arg0, %r31 549 copy %sp, %ret1 550 551 ldil L%kernelmapped, %ret0 552 ldw R%kernelmapped(%ret0), %ret0 553 comb,= %r0, %ret0, pdc_call_unmapped1 554 nop 555 556 ldil L%pdc_stack, %ret1 557 ldw R%pdc_stack(%ret1), %ret1 558 559pdc_call_unmapped1: 560 copy %sp, %r1 561 ldo HPPA_FRAME_SIZE+24*4(%ret1), %sp 562 563 stw %r1, HPPA_FRAME_PSP(%sp) 564 565 /* save kernelmapped and eiem */ 566 stw %ret0, HPPA_FRAME_ARG(21)(%sp) 567 stw %t1, HPPA_FRAME_ARG(22)(%sp) 568 569 /* copy arguments */ 570 copy %arg2, %arg0 571 copy %arg3, %arg1 572 ldw HPPA_FRAME_ARG(4)(%r1), %arg2 573 ldw HPPA_FRAME_ARG(5)(%r1), %arg3 574 ldw HPPA_FRAME_ARG(6)(%r1), %t1 575 ldw HPPA_FRAME_ARG(7)(%r1), %t2 576 ldw HPPA_FRAME_ARG(8)(%r1), %t3 577 ldw HPPA_FRAME_ARG(9)(%r1), %t4 578 stw %t1, HPPA_FRAME_ARG(4)(%sp) /* XXX can use ,bc */ 579 stw %t2, HPPA_FRAME_ARG(5)(%sp) 580 stw %t3, HPPA_FRAME_ARG(6)(%sp) 581 stw %t4, HPPA_FRAME_ARG(7)(%sp) 582 ldw HPPA_FRAME_ARG(10)(%r1), %t1 583 ldw HPPA_FRAME_ARG(11)(%r1), %t2 584 ldw HPPA_FRAME_ARG(12)(%r1), %t3 585 ldw HPPA_FRAME_ARG(13)(%r1), %t4 586 stw %t1, HPPA_FRAME_ARG(8)(%sp) 587 stw %t2, HPPA_FRAME_ARG(9)(%sp) 588 stw %t3, HPPA_FRAME_ARG(10)(%sp) 589 stw %t4, HPPA_FRAME_ARG(11)(%sp) 590 591 /* save temp control regs */ 592 mfctl %cr24, %t1 593 mfctl %cr25, %t2 594 mfctl %cr26, %t3 595 mfctl %cr27, %t4 596 stw %t1, HPPA_FRAME_ARG(12)(%sp) /* XXX can use ,bc */ 597 stw %t2, HPPA_FRAME_ARG(13)(%sp) 598 stw %t3, HPPA_FRAME_ARG(14)(%sp) 599 stw %t4, HPPA_FRAME_ARG(15)(%sp) 600 mfctl %cr28, %t1 601 mfctl %cr29, %t2 602 mfctl %cr30, %t3 603 mfctl %cr31, %t4 604 stw %t1, HPPA_FRAME_ARG(16)(%sp) 605 stw %t2, HPPA_FRAME_ARG(17)(%sp) 606 stw %t3, HPPA_FRAME_ARG(18)(%sp) 607 stw %t4, HPPA_FRAME_ARG(19)(%sp) 608 609 comb,= %r0, %ret0, pdc_call_unmapped2 610 nop 611 612 copy %arg0, %t4 613 ldi PSW_Q, %arg0 /* (!pdc_flag && args[0] == PDC_PIM)? PSW_M:0) */ 614 break HPPA_BREAK_KERNEL, HPPA_BREAK_SET_PSW 615 nop 616 stw %ret0, HPPA_FRAME_ARG(23)(%sp) 617 copy %t4, %arg0 618 619pdc_call_unmapped2: 620 .call 621 blr %r0, %rp 622 bv,n (%r31) 623 nop 624 625 /* load temp control regs */ 626 ldw HPPA_FRAME_ARG(12)(%sp), %t1 627 ldw HPPA_FRAME_ARG(13)(%sp), %t2 628 ldw HPPA_FRAME_ARG(14)(%sp), %t3 629 ldw HPPA_FRAME_ARG(15)(%sp), %t4 630 mtctl %t1, %cr24 631 mtctl %t2, %cr25 632 mtctl %t3, %cr26 633 mtctl %t4, %cr27 634 ldw HPPA_FRAME_ARG(16)(%sp), %t1 635 ldw HPPA_FRAME_ARG(17)(%sp), %t2 636 ldw HPPA_FRAME_ARG(18)(%sp), %t3 637 ldw HPPA_FRAME_ARG(19)(%sp), %t4 638 mtctl %t1, %cr28 639 mtctl %t2, %cr29 640 mtctl %t3, %cr30 641 mtctl %t4, %cr31 642 643 ldw HPPA_FRAME_ARG(21)(%sp), %t1 644 ldw HPPA_FRAME_ARG(22)(%sp), %t2 645 comb,= %r0, %t1, pdc_call_unmapped3 646 nop 647 648 copy %ret0, %t3 649 ldw HPPA_FRAME_ARG(23)(%sp), %arg0 650 break HPPA_BREAK_KERNEL, HPPA_BREAK_SET_PSW 651 nop 652 copy %t3, %ret0 653 654pdc_call_unmapped3: 655 ldw HPPA_FRAME_PSP(%sp), %sp 656 ldw HPPA_FRAME_CRP(%sp), %rp 657 bv %r0(%rp) 658 mtctl %t2, %eiem /* enable interrupts */ 659EXIT(pdc_call) 660 661/* 662 * int splraise(int ncpl); 663 */ 664LEAF_ENTRY(splraise) 665 GET_CURCPU(%t1) 666 sh2addl %arg0, %t1, %arg0 667 ldw CI_IMASK(%arg0), %arg0 668 ldw CI_CPL(%t1), %ret0 669 or %ret0, %arg0, %arg0 670 bv %r0(%rp) 671 stw %arg0, CI_CPL(%t1) 672EXIT(splraise) 673 674/* 675 * int spllower(int ncpl); 676 */ 677ENTRY(spllower,HPPA_FRAME_SIZE) 678 GET_CURCPU(%t1) 679 680 ldw CI_IPENDING(%t1), %r1 ; load ipending 681 andcm,<> %r1, %arg0, %r1 ; and with complement of new cpl 682 bv %r0(%rp) 683 stw %arg0, CI_CPL(%t1) ; store new cpl 684 685 /* 686 * Dispatch interrupts. There's a chance 687 * that we may end up not dispatching anything; 688 * in between our load of ipending and this 689 * disabling of interrupts, something else may 690 * have come in and dispatched some or all 691 * of what we previously saw in ipending. 692 */ 693 mfctl %eiem, %arg1 694 mtctl %r0, %eiem ; disable interrupts 695 696 ldw CI_IPENDING(%t1), %r1 ; load ipending 697 andcm,<> %r1, %arg0, %r1 ; and with complement of new cpl 698 b,n spllower_out ; branch if we got beaten 699 700spllower_dispatch: 701 /* start stack calling convention */ 702 stw %rp, HPPA_FRAME_CRP(%sp) 703 copy %r3, %r1 704 copy %sp, %r3 705 stw,ma %r1, HPPA_FRAME_SIZE(%sp) 706 707 /* save ncpl and %eiem */ 708 stw %arg0, HPPA_FRAME_ARG(0)(%r3) 709 stw %arg1, HPPA_FRAME_ARG(1)(%r3) 710 711 /* call hppa_intr_dispatch */ 712 ldil L%hppa_intr_dispatch, %r1 713 ldo R%hppa_intr_dispatch(%r1), %r1 714 blr %r0, %rp 715 .call 716 bv %r0(%r1) 717 copy %r0, %arg2 ; call with a NULL frame 718 719 /* restore %eiem, we don't need ncpl */ 720 ldw HPPA_FRAME_ARG(1)(%r3), %arg1 721 722 /* end stack calling convention */ 723 ldw HPPA_FRAME_CRP(%r3), %rp 724 ldo HPPA_FRAME_SIZE(%r3), %sp 725 ldw,mb -HPPA_FRAME_SIZE(%sp), %r3 726 727spllower_out: 728 /* 729 * Now return, storing %eiem in the delay slot. 730 * (hppa_intr_dispatch leaves it zero). I think 731 * doing this in the delay slot is important to 732 * prevent recursion, but I might be being too 733 * paranoid. 734 */ 735 bv %r0(%rp) 736 mtctl %arg1, %eiem 737EXIT(spllower) 738 739/* 740 * void hppa_intr_schedule(int mask); 741 */ 742ENTRY(hppa_intr_schedule,0) 743 GET_CURCPU(%t2) 744 mfctl %eiem, %arg1 745 mtctl %r0, %eiem ; disable interrupts 746 ldw CI_IPENDING(%t2), %r1 ; load ipending 747 or %r1, %arg0, %r1 ; or in mask 748 stw %r1, CI_IPENDING(%t2) ; store ipending 749 ldw CI_CPL(%t2), %arg0 ; load cpl 750 andcm,= %r1, %arg0, %r1 ; and ipending with ~cpl 751 b,n spllower_dispatch ; dispatch if we can 752 bv %r0(%rp) 753 mtctl %arg1, %eiem 754EXIT(hppa_intr_schedule) 755 756/* 757 * void cpu_die(void); 758 */ 759LEAF_ENTRY_NOPROFILE(cpu_die) 760 rsm RESET_PSW, %r0 761 nop 762 nop 763 mtsp %r0, %sr0 764 ldil L%LBCAST_ADDR, %r25 765 ldi CMD_RESET, %r26 766 stw %r26, R%iomod_command(%r25) 767forever: ; Loop until bus reset takes effect. 768 b,n forever 769 nop 770 nop 771EXIT(cpu_die) 772 773/* Include the system call and trap handling. */ 774#include <hppa/hppa/trap.S> 775 776/* Include the userspace copyin/copyout functions. */ 777#include <hppa/hppa/copy.S> 778 779/* Include the support functions. */ 780#include <hppa/hppa/support.S> 781 782/* 783 * struct lwp * 784 * cpu_switchto(struct lwp *oldl, struct lwp *newl, bool returning) 785 */ 786 .align 32 787ENTRY(cpu_switchto,128) 788 /* start stack calling convention */ 789 stw %rp, HPPA_FRAME_CRP(%sp) 790 copy %r3, %r1 791 copy %sp, %r3 792 stwm %r1, HPPA_FRAME_SIZE+16*4(%sp) 793 /* Frame marker and callee saves */ 794 stw %r3, HPPA_FRAME_PSP(%sp) 795 796#ifdef DIAGNOSTIC 797 b,n switch_diag 798 799switch_error: 800 copy %t1, %arg1 801 ldil L%panic, %r1 802 ldil L%Lcspstr, %arg0 803 ldo R%panic(%r1), %r1 804 ldo R%Lcspstr(%arg0), %arg0 805 .call 806 blr %r0, %rp 807 bv,n %r0(%r1) 808 nop 809Lcspstr: 810 .asciz "cpu_switchto: 0x%08x stack/len 0x%08x" 811 .align 8 812 813switch_diag: 814 /* 815 * Either we must be switching to the same LWP, or 816 * the new LWP's kernel stack must be reasonable. 817 */ 818 comb,=,n %arg0, %arg1, kstack_ok 819 820 /* 821 * cpu_lwp_fork sets the initial stack to a page above uarea address. 822 * Check that the stack is above this value for oldl. 823 */ 824 ldw L_PCB(%arg1), %arg2 825 ldw PCB_KSP(%arg2), %t1 /* t1 for switch_error */ 826 ldo NBPG(%arg2), %arg2 827 comb,>>,n %arg2, %t1, switch_error 828 nop 829 830 /* make sure the stack hasn't grown too big (> USPACE) */ 831 sub %t1, %arg2, %t1 /* t1 for switch_error */ 832 ldil L%USPACE, %arg2 833 ldo R%USPACE(%arg2), %arg2 834 comb,<<=,n %arg2, %t1, switch_error 835 nop 836kstack_ok: 837#endif 838 839 /* 840 * save old LWP context 841 * 842 * arg0: old LWP (oldl) 843 * arg1: new LWP (newl) 844 */ 845 846 ldw L_PCB(%arg0), %t3 /* oldl pcb */ 847 stw %sp, PCB_KSP(%t3) 848 fdc %r0(%t3) /* flush oldl pcb - surely fdc PCB_KSP(%t3) */ 849 850 /* 851 * Save the callee-save registers. We don't need to do 852 * r3 here as it was done during stack calling convention. 853 */ 854 stw %r4, 1*4(%r3) 855 stw %r5, 2*4(%r3) 856 stw %r6, 3*4(%r3) 857 stw %r7, 4*4(%r3) 858 stw %r8, 5*4(%r3) 859 stw %r9, 6*4(%r3) 860 stw %r10, 7*4(%r3) 861 stw %r11, 8*4(%r3) 862 stw %r12, 9*4(%r3) 863 stw %r13, 10*4(%r3) 864 stw %r14, 11*4(%r3) 865 stw %r15, 12*4(%r3) 866 stw %r16, 13*4(%r3) 867 stw %r17, 14*4(%r3) 868 stw %r18, 15*4(%r3) 869 870 /* 871 * restore new LWP context 872 * 873 * arg0: old LWP (oldl) 874 * arg1: new LWP (newl) 875 */ 876 ldw L_MD(%arg1), %t1 877 ldw L_PCB(%arg1), %t3 878 ldw PCB_KSP(%t3), %sp /* restore stack of newl */ 879 880 fdc %r0(%t3) /* Flush newl PCB - why? */ 881 882#if 0 883 ldw TF_CR9(%t1), %t3 /* pmap_activate? */ 884 mtctl %t3, %pidr2 /* pmap_activate? */ 885#endif 886 ldw TF_CR30(%t1), %t2 /* pmap_activate? */ 887 mtctl %t2, CR_FPPADDR /* pmap_activate? */ 888 889 SET_CURLWP(%arg1, %t2) 890 891 ldo -(HPPA_FRAME_SIZE+16*4)(%sp), %r3 892 893 ldw 1*4(%r3), %r4 894 ldw 2*4(%r3), %r5 895 ldw 3*4(%r3), %r6 896 ldw 4*4(%r3), %r7 897 ldw 5*4(%r3), %r8 898 ldw 6*4(%r3), %r9 899 ldw 7*4(%r3), %r10 900 ldw 8*4(%r3), %r11 901 ldw 9*4(%r3), %r12 902 ldw 10*4(%r3), %r13 903 ldw 11*4(%r3), %r14 904 ldw 12*4(%r3), %r15 905 ldw 13*4(%r3), %r16 906 ldw 14*4(%r3), %r17 907 ldw 15*4(%r3), %r18 908 909 /* 910 * Check for restartable atomic sequences (RAS) 911 */ 912 ldw L_PROC(%arg1), %t1 913 ldw P_RASLIST(%t1), %t1 914 comb,=,n %r0, %t1, noras 915 916 /* 917 * Save some caller-saves we want to preserve. 918 * 919 * We save oldl (%arg0) and newl (%arg1) for the benefit of 920 * lwp_trampoline() for when it calls lwp_startup(). 921 * 922 * oldl (%arg0) is saved as it's the return value 923 */ 924 stw %arg0, HPPA_FRAME_ARG(0)(%r3) /* oldl */ 925 stw %arg1, HPPA_FRAME_ARG(1)(%r3) /* newl */ 926 927 copy %arg1, %arg0 928 929 .import hppa_ras, code 930 CALL(hppa_ras, %r1) 931 932 /* restore caller-saves */ 933 ldw HPPA_FRAME_ARG(1)(%r3), %arg1 934 ldw HPPA_FRAME_ARG(0)(%r3), %arg0 935 936noras: 937 938 /* 939 * We do have a hardware FPU. If the LWP 940 * that we just switched to has its state in the 941 * FPU, enable the FPU, else disable it, so if 942 * the LWP does try to use the coprocessor 943 * we'll get an assist emulation trap to swap 944 * states. 945 */ 946 GET_CURCPU(%t1) 947 mfctl CR_CCR, %r1 948 mfctl CR_FPPADDR, %t2 949 ldw CI_FPU_STATE(%t1), %t1 950 depi 0, 25, 2, %r1 ; disables the FPU 951 comb,<>,n %t1, %t2, 0 ; nullify if LWPs different 952 depi 3, 25, 2, %r1 ; enables the FPU 953 mtctl %r1, CR_CCR 954 955switch_return: 956 copy %arg0, %ret0 957 958 ldw HPPA_FRAME_CRP(%r3), %rp 959 bv 0(%rp) 960 ldwm -(HPPA_FRAME_SIZE+16*4)(%sp), %r3 961EXIT(cpu_switchto) 962 963/* 964 * This is the first code run in a new LWP after 965 * cpu_switchto() has switched to it for the first time. 966 * 967 * This happens courtesy of the setup in cpu_lwp_fork() which 968 * arranges for cpu_switchto() to call us with a frame containing 969 * the first kernel function to call, and its argument. 970 * 971 * cpu_switchto() also makes sure that %arg0 and %arg1 are (still) 972 * oldl and newl respectively. 973 */ 974ENTRY_NOPROFILE(lwp_trampoline,HPPA_FRAME_SIZE) 975 /* no return point */ 976 stw %r0, HPPA_FRAME_CRP(%sp) 977 978 /* %arg0, %arg1 are still valid from cpu_switchto */ 979 .import lwp_startup, code 980 CALL(lwp_startup, %r1) 981 982 /* get trampoline func (%t3) and arg (%arg0) */ 983 ldw HPPA_FRAME_ARG(3)(%sp), %arg0 984 ldw HPPA_FRAME_ARG(2)(%sp), %t3 985 986 /* call the first kernel function */ 987 .call 988 blr %r0, %rp 989 bv,n %r0(%t3) 990 nop 991 992 /* 993 * Since the first kernel function returned, 994 * this LWP was created by the fork() 995 * syscall, which we now return from. 996 */ 997 GET_CURLWP(%t2) 998 .call 999 b syscall_return 1000 ldw L_MD(%t2), %t3 1001EXIT(lwp_trampoline) 1002 1003/* Include the signal code, used in compat code */ 1004#include <hppa/hppa/sigcode.S> 1005 1006 .end 1007