1/* $NetBSD: cpuswitch.S,v 1.90 2015/04/08 12:07:40 matt Exp $ */ 2 3/* 4 * Copyright 2003 Wasabi Systems, Inc. 5 * All rights reserved. 6 * 7 * Written by Steve C. Woodford for Wasabi Systems, Inc. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * Wasabi Systems, Inc. 21 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 22 * or promote products derived from this software without specific prior 23 * written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37/* 38 * Copyright (c) 1994-1998 Mark Brinicombe. 39 * Copyright (c) 1994 Brini. 40 * All rights reserved. 41 * 42 * This code is derived from software written for Brini by Mark Brinicombe 43 * 44 * Redistribution and use in source and binary forms, with or without 45 * modification, are permitted provided that the following conditions 46 * are met: 47 * 1. Redistributions of source code must retain the above copyright 48 * notice, this list of conditions and the following disclaimer. 49 * 2. Redistributions in binary form must reproduce the above copyright 50 * notice, this list of conditions and the following disclaimer in the 51 * documentation and/or other materials provided with the distribution. 52 * 3. All advertising materials mentioning features or use of this software 53 * must display the following acknowledgement: 54 * This product includes software developed by Brini. 55 * 4. The name of the company nor the name of the author may be used to 56 * endorse or promote products derived from this software without specific 57 * prior written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED 60 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 61 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 62 * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, 63 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES 64 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 65 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 66 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 67 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 68 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 69 * SUCH DAMAGE. 70 * 71 * RiscBSD kernel project 72 * 73 * cpuswitch.S 74 * 75 * cpu switching functions 76 * 77 * Created : 15/10/94 78 */ 79 80#include "opt_armfpe.h" 81#include "opt_arm32_pmap.h" 82#include "opt_multiprocessor.h" 83#include "opt_cpuoptions.h" 84#include "opt_lockdebug.h" 85 86#include "assym.h" 87#include <arm/asm.h> 88#include <arm/locore.h> 89 90 RCSID("$NetBSD: cpuswitch.S,v 1.90 2015/04/08 12:07:40 matt Exp $") 91 92/* LINTSTUB: include <sys/param.h> */ 93 94#undef IRQdisable 95#undef IRQenable 96 97/* 98 * New experimental definitions of IRQdisable and IRQenable 99 * These keep FIQ's enabled since FIQ's are special. 100 */ 101 102#ifdef _ARM_ARCH_6 103#define IRQdisable cpsid i 104#define IRQenable cpsie i 105#else 106#define IRQdisable \ 107 mrs r14, cpsr ; \ 108 orr r14, r14, #(I32_bit) ; \ 109 msr cpsr_c, r14 110 111#define IRQenable \ 112 mrs r14, cpsr ; \ 113 bic r14, r14, #(I32_bit) ; \ 114 msr cpsr_c, r14 115 116#endif 117 118 .text 119 120/* 121 * struct lwp * 122 * cpu_switchto(struct lwp *current, struct lwp *next) 123 * 124 * Switch to the specified next LWP 125 * Arguments: 126 * 127 * r0 'struct lwp *' of the current LWP (or NULL if exiting) 128 * r1 'struct lwp *' of the LWP to switch to 129 * r2 returning 130 */ 131ENTRY(cpu_switchto) 132 mov ip, sp 133 push {r4-r7, ip, lr} 134 135 /* move lwps into caller saved registers */ 136 mov r6, r1 137 mov r4, r0 138 139#ifdef TPIDRPRW_IS_CURCPU 140 GET_CURCPU(r5) 141#else 142 ldr r5, [r6, #L_CPU] /* get cpu from new lwp */ 143#endif 144 145 /* rem: r4 = old lwp */ 146 /* rem: r5 = curcpu() */ 147 /* rem: r6 = new lwp */ 148 /* rem: interrupts are enabled */ 149 150 /* 151 * If the old lwp on entry to cpu_switchto was zero then the 152 * process that called it was exiting. This means that we do 153 * not need to save the current context. Instead we can jump 154 * straight to restoring the context for the new process. 155 */ 156 teq r4, #0 157 beq .Ldo_switch 158 159 /* rem: r4 = old lwp */ 160 /* rem: r5 = curcpu() */ 161 /* rem: r6 = new lwp */ 162 /* rem: interrupts are enabled */ 163 164 /* Save old context */ 165 166 /* Get the user structure for the old lwp. */ 167 ldr r7, [r4, #(L_PCB)] 168 169 /* Save all the registers in the old lwp's pcb */ 170#if defined(_ARM_ARCH_DWORD_OK) 171 strd r8, r9, [r7, #(PCB_R8)] 172 strd r10, r11, [r7, #(PCB_R10)] 173 strd r12, r13, [r7, #(PCB_R12)] 174#else 175 add r0, r7, #(PCB_R8) 176 stmia r0, {r8-r13} 177#endif 178 179#ifdef _ARM_ARCH_6 180 /* 181 * Save user read/write thread/process id register 182 */ 183 mrc p15, 0, r0, c13, c0, 2 184 str r0, [r7, #(PCB_USER_PID_RW)] 185#endif 186 /* 187 * NOTE: We can now use r8-r13 until it is time to restore 188 * them for the new process. 189 */ 190 191 /* rem: r4 = old lwp */ 192 /* rem: r5 = curcpu() */ 193 /* rem: r6 = new lwp */ 194 /* rem: interrupts are enabled */ 195 196 /* Restore saved context */ 197 198.Ldo_switch: 199 /* rem: r4 = old lwp */ 200 /* rem: r5 = curcpu() */ 201 /* rem: r6 = new lwp */ 202 203 IRQdisable 204#if defined(TPIDRPRW_IS_CURLWP) 205 mcr p15, 0, r6, c13, c0, 4 /* set current lwp */ 206#endif 207 208 /* We have a new curlwp now so make a note of it */ 209 str r6, [r5, #(CI_CURLWP)] 210 /* Get the new pcb */ 211 ldr r7, [r6, #(L_PCB)] 212 213 /* make sure we are using the new lwp's stack */ 214 ldr sp, [r7, #(PCB_KSP)] 215 216 /* At this point we can allow IRQ's again. */ 217 IRQenable 218 219 /* rem: r4 = old lwp */ 220 /* rem: r5 = curcpu() */ 221 /* rem: r6 = new lwp */ 222 /* rem: r7 = new pcb */ 223 /* rem: interrupts are enabled */ 224 225 /* 226 * If we are switching to a system lwp, don't bother restoring 227 * thread or vfp registers and skip the ras check. 228 */ 229 ldr r0, [r6, #(L_FLAG)] 230 tst r0, #(LW_SYSTEM) 231 bne .Lswitch_do_restore 232 233#ifdef _ARM_ARCH_6 234 /* 235 * Restore user thread/process id registers 236 */ 237 ldr r0, [r7, #(PCB_USER_PID_RW)] 238 mcr p15, 0, r0, c13, c0, 2 239 ldr r0, [r6, #(L_PRIVATE)] 240 mcr p15, 0, r0, c13, c0, 3 241#endif 242 243#ifdef FPU_VFP 244 /* 245 * If we have a VFP, we need to load FPEXC. 246 */ 247 ldr r0, [r5, #(CI_VFP_ID)] 248 cmp r0, #0 249 ldrne r0, [r7, #(PCB_VFP_FPEXC)] 250 vmsrne fpexc, r0 251#endif 252 253 /* 254 * Check for restartable atomic sequences (RAS). 255 */ 256 257 ldr r0, [r6, #(L_PROC)] /* fetch the proc for ras_lookup */ 258 ldr r2, [r0, #(P_RASLIST)] 259 cmp r2, #0 /* p->p_nras == 0? */ 260 beq .Lswitch_do_restore 261 262 /* we can use r8 since we haven't restored saved registers yet. */ 263 ldr r8, [r6, #(L_MD_TF)] /* r1 = trapframe (used below) */ 264 ldr r1, [r8, #(TF_PC)] /* second ras_lookup() arg */ 265 bl _C_LABEL(ras_lookup) 266 cmn r0, #1 /* -1 means "not in a RAS" */ 267 strne r0, [r8, #(TF_PC)] 268 269 /* rem: r4 = old lwp */ 270 /* rem: r5 = curcpu() */ 271 /* rem: r6 = new lwp */ 272 /* rem: r7 = new pcb */ 273 274.Lswitch_do_restore: 275 /* Restore all the saved registers */ 276#ifdef __XSCALE__ 277 ldr r8, [r7, #(PCB_R8)] 278 ldr r9, [r7, #(PCB_R9)] 279 ldr r10, [r7, #(PCB_R10)] 280 ldr r11, [r7, #(PCB_R11)] 281 ldr r12, [r7, #(PCB_R12)] 282#elif defined(_ARM_ARCH_DWORD_OK) 283 ldrd r8, r9, [r7, #(PCB_R8)] 284 ldrd r10, r11, [r7, #(PCB_R10)] 285 ldr r12, [r7, #(PCB_R12)] 286#else 287 add r0, r7, #PCB_R8 288 ldmia r0, {r8-r12} 289#endif 290 291 /* Record the old lwp for pmap_activate()'s benefit */ 292#ifndef ARM_MMU_EXTENDED 293 str r4, [r5, #CI_LASTLWP] 294#endif 295 296 /* cpu_switchto returns the old lwp */ 297 mov r0, r4 298 /* lwp_trampoline expects new lwp as its second argument */ 299 mov r1, r6 300 301#ifdef _ARM_ARCH_7 302 clrex /* cause any subsequent STREX* to fail */ 303#endif 304 305 /* 306 * Pull the registers that got pushed when cpu_switchto() was called, 307 * and return. 308 */ 309 pop {r4-r7, ip, pc} 310 311END(cpu_switchto) 312 313ENTRY_NP(lwp_trampoline) 314 /* 315 * cpu_switchto gives us: 316 * arg0(r0) = old lwp 317 * arg1(r1) = new lwp 318 * setup by cpu_lwp_fork: 319 * r4 = func to call 320 * r5 = arg to func 321 * r6 = <unused> 322 * r7 = spsr mode 323 */ 324 bl _C_LABEL(lwp_startup) 325 326 mov fp, #0 /* top stack frame */ 327 mov r0, r5 328 mov r1, sp 329#ifdef _ARM_ARCH_5 330 blx r4 331#else 332 mov lr, pc 333 mov pc, r4 334#endif 335 336 GET_CPSR(r0) 337 CPSID_I(r0, r0) /* Kill irq's */ 338 339 GET_CURCPU(r4) /* for DO_AST */ 340 DO_AST_AND_RESTORE_ALIGNMENT_FAULTS 341 PULLFRAME 342 343 movs pc, lr /* Exit */ 344END(lwp_trampoline) 345 346AST_ALIGNMENT_FAULT_LOCALS 347 348#ifdef __HAVE_FAST_SOFTINTS 349/* 350 * Called at IPL_HIGH 351 * r0 = new lwp 352 * r1 = ipl for softint_dispatch 353 */ 354ENTRY_NP(softint_switch) 355 push {r4, r6, r7, lr} 356 357 ldr r7, [r0, #L_CPU] /* get curcpu */ 358#if defined(TPIDRPRW_IS_CURLWP) 359 mrc p15, 0, r4, c13, c0, 4 /* get old lwp */ 360#else 361 ldr r4, [r7, #(CI_CURLWP)] /* get old lwp */ 362#endif 363 mrs r6, cpsr /* we need to save this */ 364 365 /* 366 * If the soft lwp blocks, it needs to return to softint_tramp 367 */ 368 mov r2, sp /* think ip */ 369 adr r3, softint_tramp /* think lr */ 370 push {r2-r3} 371 push {r4-r7} 372 373 mov r5, r0 /* save new lwp */ 374 375 ldr r2, [r4, #(L_PCB)] /* get old lwp's pcb */ 376 377 /* Save all the registers into the old lwp's pcb */ 378#if defined(__XSCALE__) || defined(_ARM_ARCH_6) 379 strd r8, r9, [r2, #(PCB_R8)] 380 strd r10, r11, [r2, #(PCB_R10)] 381 strd r12, r13, [r2, #(PCB_R12)] 382#else 383 add r3, r2, #(PCB_R8) 384 stmia r3, {r8-r13} 385#endif 386 387#ifdef _ARM_ARCH_6 388 /* 389 * Save user read/write thread/process id register in cause it was 390 * set in userland. 391 */ 392 mrc p15, 0, r0, c13, c0, 2 393 str r0, [r2, #(PCB_USER_PID_RW)] 394#endif 395 396 /* this is an invariant so load before disabling intrs */ 397 ldr r2, [r5, #(L_PCB)] /* get new lwp's pcb */ 398 399 IRQdisable 400 /* 401 * We're switching to a bound LWP so its l_cpu is already correct. 402 */ 403#if defined(TPIDRPRW_IS_CURLWP) 404 mcr p15, 0, r5, c13, c0, 4 /* save new lwp */ 405#endif 406 str r5, [r7, #(CI_CURLWP)] /* save new lwp */ 407 408 /* 409 * Normally, we'd get {r8-r13} but since this is a softint lwp 410 * its existing state doesn't matter. We start the stack just 411 * below the trapframe. 412 */ 413 ldr sp, [r5, #(L_MD_TF)] /* get new lwp's stack ptr */ 414 415 /* At this point we can allow IRQ's again. */ 416 IRQenable 417 /* r1 still has ipl */ 418 mov r0, r4 /* r0 has pinned (old) lwp */ 419 bl _C_LABEL(softint_dispatch) 420 /* 421 * If we've returned, we need to change everything back and return. 422 */ 423 ldr r2, [r4, #(L_PCB)] /* get pinned lwp's pcb */ 424 425 /* 426 * We don't need to restore all the registers since another lwp was 427 * never executed. But we do need the SP from the formerly pinned lwp. 428 */ 429 430 IRQdisable 431#if defined(TPIDRPRW_IS_CURLWP) 432 mcr p15, 0, r4, c13, c0, 4 /* restore pinned lwp */ 433#endif 434 str r4, [r7, #(CI_CURLWP)] /* restore pinned lwp */ 435 ldr sp, [r2, #(PCB_KSP)] /* now running on the old stack. */ 436 437 /* At this point we can allow IRQ's again. */ 438 msr cpsr_c, r6 439 440 /* 441 * Grab the registers that got pushed at the start and return. 442 */ 443 pop {r4-r7, ip, lr} /* eat switch frame */ 444 pop {r4, r6, r7, pc} /* pop stack and return */ 445 446END(softint_switch) 447 448/* 449 * r0 = previous LWP (the soft lwp) 450 * r4 = original LWP (the current lwp) 451 * r6 = original CPSR 452 * r7 = curcpu() 453 */ 454ENTRY_NP(softint_tramp) 455 ldr r3, [r7, #(CI_MTX_COUNT)] /* readust after mi_switch */ 456 add r3, r3, #1 457 str r3, [r7, #(CI_MTX_COUNT)] 458 459 mov r3, #0 /* tell softint_dispatch */ 460 str r3, [r0, #(L_CTXSWTCH)] /* the soft lwp blocked */ 461 462 msr cpsr_c, r6 /* restore interrupts */ 463 pop {r4, r6, r7, pc} /* pop stack and return */ 464END(softint_tramp) 465#endif /* __HAVE_FAST_SOFTINTS */ 466