1 /* $NetBSD: frame.h,v 1.21 2008/11/19 06:22:15 matt Exp $ */ 2 3 /* 4 * Copyright (c) 1994-1997 Mark Brinicombe. 5 * Copyright (c) 1994 Brini. 6 * All rights reserved. 7 * 8 * This code is derived from software written for Brini by Mark Brinicombe 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by Brini. 21 * 4. The name of the company nor the name of the author may be used to 22 * endorse or promote products derived from this software without specific 23 * prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 26 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 27 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 28 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 29 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 30 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 31 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 35 * SUCH DAMAGE. 36 * 37 * RiscBSD kernel project 38 * 39 * frame.h 40 * 41 * Stack frames structures 42 * 43 * Created : 30/09/94 44 */ 45 46 #ifndef _ARM32_FRAME_H_ 47 #define _ARM32_FRAME_H_ 48 49 #include <arm/frame.h> /* Common ARM stack frames */ 50 51 #ifndef _LOCORE 52 53 /* 54 * System stack frames. 55 */ 56 57 typedef struct irqframe { 58 unsigned int if_spsr; 59 unsigned int if_r0; 60 unsigned int if_r1; 61 unsigned int if_r2; 62 unsigned int if_r3; 63 unsigned int if_r4; 64 unsigned int if_r5; 65 unsigned int if_r6; 66 unsigned int if_r7; 67 unsigned int if_r8; 68 unsigned int if_r9; 69 unsigned int if_r10; 70 unsigned int if_r11; 71 unsigned int if_r12; 72 unsigned int if_usr_sp; 73 unsigned int if_usr_lr; 74 unsigned int if_svc_sp; 75 unsigned int if_svc_lr; 76 unsigned int if_pc; 77 } irqframe_t; 78 79 struct clockframe { 80 struct irqframe cf_if; 81 }; 82 83 /* 84 * Switch frame. 85 * 86 * Should be a multiple of 8 bytes for dumpsys. 87 */ 88 89 struct switchframe { 90 u_int sf_r4; 91 u_int sf_r5; 92 u_int sf_r6; 93 u_int sf_r7; 94 u_int sf_sp; 95 u_int sf_pc; 96 }; 97 98 /* 99 * Stack frame. Used during stack traces (db_trace.c) 100 */ 101 struct frame { 102 u_int fr_fp; 103 u_int fr_sp; 104 u_int fr_lr; 105 u_int fr_pc; 106 }; 107 108 #ifdef _KERNEL 109 void validate_trapframe(trapframe_t *, int); 110 #endif /* _KERNEL */ 111 112 #else /* _LOCORE */ 113 114 #include "opt_compat_netbsd.h" 115 #include "opt_execfmt.h" 116 #include "opt_multiprocessor.h" 117 #include "opt_cpuoptions.h" 118 #include "opt_arm_debug.h" 119 120 #include <machine/cpu.h> 121 122 /* 123 * This macro is used by DO_AST_AND_RESTORE_ALIGNMENT_FAULTS to process 124 * any pending softints. 125 */ 126 #ifdef __HAVE_FAST_SOFTINTS 127 #define DO_PENDING_SOFTINTS \ 128 ldr r0, [r4, #CI_INTR_DEPTH]/* Get current intr depth */ ;\ 129 teq r0, #0 /* Test for 0. */ ;\ 130 bne 10f /* skip softints if != 0 */ ;\ 131 ldr r0, [r4, #CI_CPL] /* Get current priority level */;\ 132 ldr r1, [r4, #CI_SOFTINTS] /* Get pending softint mask */ ;\ 133 movs r0, r1, lsr r0 /* shift mask by cpl */ ;\ 134 blne _C_LABEL(dosoftints) /* dosoftints(void) */ ;\ 135 10: 136 #else 137 #define DO_PENDING_SOFTINTS /* nothing */ 138 #endif 139 140 /* 141 * AST_ALIGNMENT_FAULT_LOCALS and ENABLE_ALIGNMENT_FAULTS 142 * These are used in order to support dynamic enabling/disabling of 143 * alignment faults when executing old a.out ARM binaries. 144 * 145 * Note that when ENABLE_ALIGNMENTS_FAULTS finishes r4 will contain 146 * pointer to the cpu's cpu_info. DO_AST_AND_RESTORE_ALIGNMENT_FAULTS 147 * relies on r4 being preserved. 148 */ 149 #ifdef EXEC_AOUT 150 #if defined(PROCESS_ID_IS_CURLWP) || defined(PROCESS_ID_IS_CURCPU) 151 152 #define AST_ALIGNMENT_FAULT_LOCALS \ 153 .Laflt_cpufuncs: ;\ 154 .word _C_LABEL(cpufuncs) 155 156 #elif !defined(MULTIPROCESSOR) 157 158 /* 159 * Local variables needed by the AST/Alignment Fault macroes 160 */ 161 #define AST_ALIGNMENT_FAULT_LOCALS \ 162 .Laflt_cpufuncs: ;\ 163 .word _C_LABEL(cpufuncs) ;\ 164 .Laflt_cpu_info_store: ;\ 165 .word _C_LABEL(cpu_info_store) 166 167 #define GET_CURCPU(rX) \ 168 ldr rX, .Laflt_cpu_info_store 169 170 #else /* !MULTIPROCESSOR */ 171 172 #define AST_ALIGNMENT_FAULT_LOCALS \ 173 .Laflt_cpufuncs: ;\ 174 .word _C_LABEL(cpufuncs) ;\ 175 .Laflt_cpu_info: ;\ 176 .word _C_LABEL(cpu_info) 177 178 #define GET_CURCPU(rX) \ 179 ldr rX, .Laflt_cpu_info ;\ 180 bl _C_LABEL(cpu_number) ;\ 181 ldr r0, [rX, r0, lsl #2] 182 183 #endif /* MULTIPROCESSOR */ 184 185 /* 186 * This macro must be invoked following PUSHFRAMEINSVC or PUSHFRAME at 187 * the top of interrupt/exception handlers. 188 * 189 * When invoked, r0 *must* contain the value of SPSR on the current 190 * trap/interrupt frame. This is always the case if ENABLE_ALIGNMENT_FAULTS 191 * is invoked immediately after PUSHFRAMEINSVC or PUSHFRAME. 192 */ 193 #define ENABLE_ALIGNMENT_FAULTS \ 194 and r0, r0, #(PSR_MODE) /* Test for USR32 mode */ ;\ 195 teq r0, #(PSR_USR32_MODE) ;\ 196 GET_CURCPU(r4) /* r4 = cpuinfo */ ;\ 197 bne 1f /* Not USR mode skip AFLT */ ;\ 198 ldr r1, [r4, #CI_CURPCB] /* get curpcb from cpu_info */ ;\ 199 ldr r1, [r1, #PCB_FLAGS] /* Fetch curpcb->pcb_flags */ ;\ 200 tst r1, #PCB_NOALIGNFLT ;\ 201 beq 1f /* AFLTs already enabled */ ;\ 202 ldr r2, .Laflt_cpufuncs ;\ 203 ldr r1, [r4, #CI_CTRL] /* Fetch control register */ ;\ 204 mov r0, #-1 ;\ 205 mov lr, pc ;\ 206 ldr pc, [r2, #CF_CONTROL] /* Enable alignment faults */ ;\ 207 1: 208 209 /* 210 * This macro must be invoked just before PULLFRAMEFROMSVCANDEXIT or 211 * PULLFRAME at the end of interrupt/exception handlers. We know that 212 * r4 points to cpu_info since that is what ENABLE_ALIGNMENT_FAULTS did 213 * for use. 214 */ 215 #define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \ 216 DO_PENDING_SOFTINTS ;\ 217 ldr r0, [sp] /* Get the SPSR from stack */ ;\ 218 mrs r5, cpsr /* save CPSR */ ;\ 219 orr r1, r5, #(IF32_bits) ;\ 220 msr cpsr_c, r1 /* Disable interrupts */ ;\ 221 and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\ 222 teq r0, #(PSR_USR32_MODE) ;\ 223 bne 3f /* Nope, get out now */ ;\ 224 1: ldr r1, [r4, #CI_ASTPENDING] /* Pending AST? */ ;\ 225 teq r1, #0x00000000 ;\ 226 bne 2f /* Yup. Go deal with it */ ;\ 227 ldr r1, [r4, #CI_CURPCB] /* Get current PCB */ ;\ 228 ldr r0, [r1, #PCB_FLAGS] /* Fetch curpcb->pcb_flags */ ;\ 229 tst r0, #PCB_NOALIGNFLT ;\ 230 beq 3f /* Keep AFLTs enabled */ ;\ 231 ldr r1, [r4, #CI_CTRL] /* Fetch control register */ ;\ 232 ldr r2, .Laflt_cpufuncs ;\ 233 mov r0, #-1 ;\ 234 bic r1, r1, #CPU_CONTROL_AFLT_ENABLE /* Disable AFLTs */ ;\ 235 adr lr, 3f ;\ 236 ldr pc, [r2, #CF_CONTROL] /* Set new CTRL reg value */ ;\ 237 /* NOTREACHED */ \ 238 2: mov r1, #0x00000000 ;\ 239 str r1, [r4, #CI_ASTPENDING] /* Clear astpending */ ;\ 240 bic r5, r5, #(IF32_bits) ;\ 241 msr cpsr_c, r5 /* Restore interrupts */ ;\ 242 mov r0, sp ;\ 243 bl _C_LABEL(ast) /* ast(frame) */ ;\ 244 orr r0, r5, #(IF32_bits) /* Disable IRQs */ ;\ 245 msr cpsr_c, r0 ;\ 246 b 1b /* Back around again */ ;\ 247 3: 248 249 #else /* !EXEC_AOUT */ 250 251 #if defined(PROCESS_ID_IS_CURLWP) || defined(PROCESS_ID_IS_CURCPU) 252 #define AST_ALIGNMENT_FAULT_LOCALS 253 254 #elif !defined(MULTIPROCESSOR) 255 #define AST_ALIGNMENT_FAULT_LOCALS \ 256 .Laflt_cpu_info_store: ;\ 257 .word _C_LABEL(cpu_info_store) 258 259 #define GET_CURCPU(rX) \ 260 ldr rX, .Laflt_cpu_info_store 261 262 #else 263 #define AST_ALIGNMENT_FAULT_LOCALS \ 264 .Laflt_cpu_info: ;\ 265 .word _C_LABEL(cpu_info) 266 267 #define GET_CURCPU(rX) \ 268 bl _C_LABEL(cpu_number) ;\ 269 ldr r1, .Laflt_cpu_info ;\ 270 ldr rX, [r1, r0, lsl #2] 271 272 #endif 273 274 #define ENABLE_ALIGNMENT_FAULTS GET_CURCPU(r4) 275 276 #define DO_AST_AND_RESTORE_ALIGNMENT_FAULTS \ 277 DO_PENDING_SOFTINTS ;\ 278 ldr r0, [sp] /* Get the SPSR from stack */ ;\ 279 mrs r5, cpsr /* save CPSR */ ;\ 280 orr r1, r5, #(IF32_bits) ;\ 281 msr cpsr_c, r1 /* Disable interrupts */ ;\ 282 and r0, r0, #(PSR_MODE) /* Returning to USR mode? */ ;\ 283 teq r0, #(PSR_USR32_MODE) ;\ 284 bne 2f /* Nope, get out now */ ;\ 285 1: ldr r1, [r4, #CI_ASTPENDING] /* Pending AST? */ ;\ 286 teq r1, #0x00000000 ;\ 287 beq 2f /* Nope. Just bail */ ;\ 288 mov r1, #0x00000000 ;\ 289 str r1, [r4, #CI_ASTPENDING] /* Clear astpending */ ;\ 290 bic r5, r5, #(IF32_bits) ;\ 291 msr cpsr_c, r5 /* Restore interrupts */ ;\ 292 mov r0, sp ;\ 293 bl _C_LABEL(ast) /* ast(frame) */ ;\ 294 orr r0, r5, #(IF32_bits) /* Disable IRQs */ ;\ 295 msr cpsr_c, r0 ;\ 296 b 1b ;\ 297 2: 298 #endif /* EXEC_AOUT */ 299 300 #ifdef ARM_LOCK_CAS_DEBUG 301 #define LOCK_CAS_DEBUG_LOCALS \ 302 .L_lock_cas_restart: ;\ 303 .word _C_LABEL(_lock_cas_restart) 304 305 #if defined(__ARMEB__) 306 #define LOCK_CAS_DEBUG_COUNT_RESTART \ 307 ble 99f ;\ 308 ldr r0, .L_lock_cas_restart ;\ 309 ldmia r0, {r1-r2} /* load ev_count */ ;\ 310 adds r2, r2, #1 /* 64-bit incr (lo) */ ;\ 311 adc r1, r1, #0 /* 64-bit incr (hi) */ ;\ 312 stmia r0, {r1-r2} /* store ev_count */ 313 #else /* __ARMEB__ */ 314 #define LOCK_CAS_DEBUG_COUNT_RESTART \ 315 ble 99f ;\ 316 ldr r0, .L_lock_cas_restart ;\ 317 ldmia r0, {r1-r2} /* load ev_count */ ;\ 318 adds r1, r1, #1 /* 64-bit incr (lo) */ ;\ 319 adc r2, r2, #0 /* 64-bit incr (hi) */ ;\ 320 stmia r0, {r1-r2} /* store ev_count */ 321 #endif /* __ARMEB__ */ 322 #else /* ARM_LOCK_CAS_DEBUG */ 323 #define LOCK_CAS_DEBUG_LOCALS /* nothing */ 324 #define LOCK_CAS_DEBUG_COUNT_RESTART /* nothing */ 325 #endif /* ARM_LOCK_CAS_DEBUG */ 326 327 #define LOCK_CAS_CHECK_LOCALS \ 328 .L_lock_cas: ;\ 329 .word _C_LABEL(_lock_cas) ;\ 330 .L_lock_cas_end: ;\ 331 .word _C_LABEL(_lock_cas_end) ;\ 332 LOCK_CAS_DEBUG_LOCALS 333 334 #define LOCK_CAS_CHECK \ 335 ldr r0, [sp] /* get saved PSR */ ;\ 336 and r0, r0, #(PSR_MODE) /* check for SVC32 mode */ ;\ 337 teq r0, #(PSR_SVC32_MODE) ;\ 338 bne 99f /* nope, get out now */ ;\ 339 ldr r0, [sp, #(IF_PC)] ;\ 340 ldr r1, .L_lock_cas_end ;\ 341 cmp r0, r1 ;\ 342 bge 99f ;\ 343 ldr r1, .L_lock_cas ;\ 344 cmp r0, r1 ;\ 345 strgt r1, [sp, #(IF_PC)] ;\ 346 LOCK_CAS_DEBUG_COUNT_RESTART ;\ 347 99: 348 349 /* 350 * ASM macros for pushing and pulling trapframes from the stack 351 * 352 * These macros are used to handle the irqframe and trapframe structures 353 * defined above. 354 */ 355 356 /* 357 * PUSHFRAME - macro to push a trap frame on the stack in the current mode 358 * Since the current mode is used, the SVC lr field is not defined. 359 * 360 * NOTE: r13 and r14 are stored separately as a work around for the 361 * SA110 rev 2 STM^ bug 362 */ 363 364 #define PUSHFRAME \ 365 str lr, [sp, #-4]!; /* Push the return address */ \ 366 sub sp, sp, #(4*17); /* Adjust the stack pointer */ \ 367 stmia sp, {r0-r12}; /* Push the user mode registers */ \ 368 add r0, sp, #(4*13); /* Adjust the stack pointer */ \ 369 stmia r0, {r13-r14}^; /* Push the user mode registers */ \ 370 mov r0, r0; /* NOP for previous instruction */ \ 371 mrs r0, spsr_all; /* Put the SPSR on the stack */ \ 372 str r0, [sp, #-4]! 373 374 /* 375 * PULLFRAME - macro to pull a trap frame from the stack in the current mode 376 * Since the current mode is used, the SVC lr field is ignored. 377 */ 378 379 #define PULLFRAME \ 380 ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \ 381 msr spsr_all, r0; \ 382 ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ 383 mov r0, r0; /* NOP for previous instruction */ \ 384 add sp, sp, #(4*17); /* Adjust the stack pointer */ \ 385 ldr lr, [sp], #0x0004 /* Pull the return address */ 386 387 /* 388 * PUSHFRAMEINSVC - macro to push a trap frame on the stack in SVC32 mode 389 * This should only be used if the processor is not currently in SVC32 390 * mode. The processor mode is switched to SVC mode and the trap frame is 391 * stored. The SVC lr field is used to store the previous value of 392 * lr in SVC mode. 393 * 394 * NOTE: r13 and r14 are stored separately as a work around for the 395 * SA110 rev 2 STM^ bug 396 */ 397 398 #define PUSHFRAMEINSVC \ 399 stmdb sp, {r0-r3}; /* Save 4 registers */ \ 400 mov r0, lr; /* Save xxx32 r14 */ \ 401 mov r1, sp; /* Save xxx32 sp */ \ 402 mrs r3, spsr; /* Save xxx32 spsr */ \ 403 mrs r2, cpsr; /* Get the CPSR */ \ 404 bic r2, r2, #(PSR_MODE); /* Fix for SVC mode */ \ 405 orr r2, r2, #(PSR_SVC32_MODE); \ 406 msr cpsr_c, r2; /* Punch into SVC mode */ \ 407 mov r2, sp; /* Save SVC sp */ \ 408 str r0, [sp, #-4]!; /* Push return address */ \ 409 str lr, [sp, #-4]!; /* Push SVC lr */ \ 410 str r2, [sp, #-4]!; /* Push SVC sp */ \ 411 msr spsr_all, r3; /* Restore correct spsr */ \ 412 ldmdb r1, {r0-r3}; /* Restore 4 regs from xxx mode */ \ 413 sub sp, sp, #(4*15); /* Adjust the stack pointer */ \ 414 stmia sp, {r0-r12}; /* Push the user mode registers */ \ 415 add r0, sp, #(4*13); /* Adjust the stack pointer */ \ 416 stmia r0, {r13-r14}^; /* Push the user mode registers */ \ 417 mov r0, r0; /* NOP for previous instruction */ \ 418 mrs r0, spsr_all; /* Put the SPSR on the stack */ \ 419 str r0, [sp, #-4]! 420 421 /* 422 * PULLFRAMEFROMSVCANDEXIT - macro to pull a trap frame from the stack 423 * in SVC32 mode and restore the saved processor mode and PC. 424 * This should be used when the SVC lr register needs to be restored on 425 * exit. 426 */ 427 428 #define PULLFRAMEFROMSVCANDEXIT \ 429 ldr r0, [sp], #0x0004; /* Get the SPSR from stack */ \ 430 msr spsr_all, r0; /* restore SPSR */ \ 431 ldmia sp, {r0-r14}^; /* Restore registers (usr mode) */ \ 432 mov r0, r0; /* NOP for previous instruction */ \ 433 add sp, sp, #(4*15); /* Adjust the stack pointer */ \ 434 ldmia sp, {sp, lr, pc}^ /* Restore lr and exit */ 435 436 #endif /* _LOCORE */ 437 438 #endif /* _ARM32_FRAME_H_ */ 439