1/* CMSE wrapper function used to save, clear and restore callee saved registers 2 for cmse_nonsecure_call's. 3 4 Copyright (C) 2016-2020 Free Software Foundation, Inc. 5 Contributed by ARM Ltd. 6 7 This file is free software; you can redistribute it and/or modify it 8 under the terms of the GNU General Public License as published by the 9 Free Software Foundation; either version 3, or (at your option) any 10 later version. 11 12 This file is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 Under Section 7 of GPL version 3, you are granted additional 18 permissions described in the GCC Runtime Library Exception, version 19 3.1, as published by the Free Software Foundation. 20 21 You should have received a copy of the GNU General Public License and 22 a copy of the GCC Runtime Library Exception along with this program; 23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 24 <http://www.gnu.org/licenses/>. */ 25 26.syntax unified 27#ifdef __ARM_PCS_VFP 28# if (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1) 29 .fpu fpv5-d16 30# else 31 .fpu fpv4-sp-d16 32# endif 33#endif 34 35.thumb 36.global __gnu_cmse_nonsecure_call 37__gnu_cmse_nonsecure_call: 38#if defined(__ARM_ARCH_8M_MAIN__) 39push {r5-r11,lr} 40mov r7, r4 41mov r8, r4 42mov r9, r4 43mov r10, r4 44mov r11, r4 45mov ip, r4 46 47/* Save and clear callee-saved registers only if we are dealing with hard float 48 ABI. The unused caller-saved registers have already been cleared by GCC 49 generated code. */ 50#ifdef __ARM_PCS_VFP 51vpush.f64 {d8-d15} 52mov r5, #0 53vmov d8, r5, r5 54#if __ARM_FP & 0x04 55vmov s18, s19, r5, r5 56vmov s20, s21, r5, r5 57vmov s22, s23, r5, r5 58vmov s24, s25, r5, r5 59vmov s26, s27, r5, r5 60vmov s28, s29, r5, r5 61vmov s30, s31, r5, r5 62#elif (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1) 63vmov.f64 d9, d8 64vmov.f64 d10, d8 65vmov.f64 d11, d8 66vmov.f64 d12, d8 67vmov.f64 d13, d8 68vmov.f64 d14, d8 69vmov.f64 d15, d8 70#else 71#error "Half precision implementation not supported." 72#endif 73/* Clear the cumulative exception-status bits (0-4,7) and the 74 condition code bits (28-31) of the FPSCR. */ 75vmrs r5, fpscr 76movw r6, #65376 77movt r6, #4095 78ands r5, r6 79vmsr fpscr, r5 80 81/* We are not dealing with hard float ABI, so we can safely use the vlstm and 82 vlldm instructions without needing to preserve the registers used for 83 argument passing. */ 84#else 85sub sp, sp, #0x88 /* Reserve stack space to save all floating point 86 registers, including FPSCR. */ 87vlstm sp /* Lazy store and clearance of d0-d16 and FPSCR. */ 88#endif /* __ARM_PCS_VFP */ 89 90/* Make sure to clear the 'GE' bits of the APSR register if 32-bit SIMD 91 instructions are available. */ 92#if defined(__ARM_FEATURE_SIMD32) 93msr APSR_nzcvqg, r4 94#else 95msr APSR_nzcvq, r4 96#endif 97 98mov r5, r4 99mov r6, r4 100blxns r4 101 102#ifdef __ARM_PCS_VFP 103vpop.f64 {d8-d15} 104#else 105/* VLLDM erratum mitigation sequence. */ 106mrs r5, control 107tst r5, #8 /* CONTROL_S.SFPA */ 108it ne 109.inst.w 0xeeb00a40 /* vmovne s0, s0 */ 110vlldm sp /* Lazy restore of d0-d16 and FPSCR. */ 111add sp, sp, #0x88 /* Free space used to save floating point registers. */ 112#endif /* __ARM_PCS_VFP */ 113 114pop {r5-r11, pc} 115 116#elif defined (__ARM_ARCH_8M_BASE__) 117push {r5-r7, lr} 118mov r5, r8 119mov r6, r9 120mov r7, r10 121push {r5-r7} 122mov r5, r11 123push {r5} 124mov r5, r4 125mov r6, r4 126mov r7, r4 127mov r8, r4 128mov r9, r4 129mov r10, r4 130mov r11, r4 131mov ip, r4 132msr APSR_nzcvq, r4 133blxns r4 134pop {r5} 135mov r11, r5 136pop {r5-r7} 137mov r10, r7 138mov r9, r6 139mov r8, r5 140pop {r5-r7, pc} 141 142#else 143#error "This should only be used for armv8-m base- and mainline." 144#endif 145