1/* CMSE wrapper function used to save, clear and restore callee saved registers 2 for cmse_nonsecure_call's. 3 4 Copyright (C) 2016-2022 Free Software Foundation, Inc. 5 Contributed by ARM Ltd. 6 7 This file is free software; you can redistribute it and/or modify it 8 under the terms of the GNU General Public License as published by the 9 Free Software Foundation; either version 3, or (at your option) any 10 later version. 11 12 This file is distributed in the hope that it will be useful, but 13 WITHOUT ANY WARRANTY; without even the implied warranty of 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 General Public License for more details. 16 17 Under Section 7 of GPL version 3, you are granted additional 18 permissions described in the GCC Runtime Library Exception, version 19 3.1, as published by the Free Software Foundation. 20 21 You should have received a copy of the GNU General Public License and 22 a copy of the GCC Runtime Library Exception along with this program; 23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see 24 <http://www.gnu.org/licenses/>. */ 25 26.syntax unified 27#ifdef __ARM_PCS_VFP 28# if (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1) 29 .fpu fpv5-d16 30# else 31 .fpu fpv4-sp-d16 32# endif 33#endif 34 35.thumb 36.type __gnu_cmse_nonsecure_call, %function 37.global __gnu_cmse_nonsecure_call 38__gnu_cmse_nonsecure_call: 39#if defined(__ARM_ARCH_8M_MAIN__) 40push {r5-r11,lr} 41mov r7, r4 42mov r8, r4 43mov r9, r4 44mov r10, r4 45mov r11, r4 46mov ip, r4 47 48/* Save and clear callee-saved registers only if we are dealing with hard float 49 ABI. The unused caller-saved registers have already been cleared by GCC 50 generated code. */ 51#ifdef __ARM_PCS_VFP 52vpush.f64 {d8-d15} 53mov r5, #0 54vmov d8, r5, r5 55#if __ARM_FP & 0x04 56vmov s18, s19, r5, r5 57vmov s20, s21, r5, r5 58vmov s22, s23, r5, r5 59vmov s24, s25, r5, r5 60vmov s26, s27, r5, r5 61vmov s28, s29, r5, r5 62vmov s30, s31, r5, r5 63#elif (__ARM_FP & 0x8) || (__ARM_FEATURE_MVE & 1) 64vmov.f64 d9, d8 65vmov.f64 d10, d8 66vmov.f64 d11, d8 67vmov.f64 d12, d8 68vmov.f64 d13, d8 69vmov.f64 d14, d8 70vmov.f64 d15, d8 71#else 72#error "Half precision implementation not supported." 73#endif 74/* Clear the cumulative exception-status bits (0-4,7) and the 75 condition code bits (28-31) of the FPSCR. */ 76vmrs r5, fpscr 77movw r6, #65376 78movt r6, #4095 79ands r5, r6 80vmsr fpscr, r5 81 82/* We are not dealing with hard float ABI, so we can safely use the vlstm and 83 vlldm instructions without needing to preserve the registers used for 84 argument passing. */ 85#else 86sub sp, sp, #0x88 /* Reserve stack space to save all floating point 87 registers, including FPSCR. */ 88vlstm sp /* Lazy store and clearance of d0-d16 and FPSCR. */ 89#endif /* __ARM_PCS_VFP */ 90 91/* Make sure to clear the 'GE' bits of the APSR register if 32-bit SIMD 92 instructions are available. */ 93#if defined(__ARM_FEATURE_SIMD32) 94msr APSR_nzcvqg, r4 95#else 96msr APSR_nzcvq, r4 97#endif 98 99mov r5, r4 100mov r6, r4 101blxns r4 102 103#ifdef __ARM_PCS_VFP 104vpop.f64 {d8-d15} 105#else 106/* VLLDM erratum mitigation sequence. */ 107mrs r5, control 108tst r5, #8 /* CONTROL_S.SFPA */ 109it ne 110.inst.w 0xeeb00a40 /* vmovne s0, s0 */ 111vlldm sp /* Lazy restore of d0-d16 and FPSCR. */ 112add sp, sp, #0x88 /* Free space used to save floating point registers. */ 113#endif /* __ARM_PCS_VFP */ 114 115pop {r5-r11, pc} 116 117#elif defined (__ARM_ARCH_8M_BASE__) 118push {r5-r7, lr} 119mov r5, r8 120mov r6, r9 121mov r7, r10 122push {r5-r7} 123mov r5, r11 124push {r5} 125mov r5, r4 126mov r6, r4 127mov r7, r4 128mov r8, r4 129mov r9, r4 130mov r10, r4 131mov r11, r4 132mov ip, r4 133msr APSR_nzcvq, r4 134blxns r4 135pop {r5} 136mov r11, r5 137pop {r5-r7} 138mov r10, r7 139mov r9, r6 140mov r8, r5 141pop {r5-r7, pc} 142 143#else 144#error "This should only be used for armv8-m base- and mainline." 145#endif 146.size __gnu_cmse_nonsecure_call, .-__gnu_cmse_nonsecure_call 147