1 /* ===-- clear_cache.c - Implement __clear_cache ---------------------------=== 2 * 3 * The LLVM Compiler Infrastructure 4 * 5 * This file is dual licensed under the MIT and the University of Illinois Open 6 * Source Licenses. See LICENSE.TXT for details. 7 * 8 * ===----------------------------------------------------------------------=== 9 */ 10 11 #include "int_lib.h" 12 #include <stddef.h> 13 14 #if __APPLE__ 15 #include <libkern/OSCacheControl.h> 16 #endif 17 #if (defined(__FreeBSD__) || defined(__Bitrig__)) && defined(__arm__) 18 #include <sys/types.h> 19 #include <machine/sysarch.h> 20 #endif 21 22 #if defined(__NetBSD__) && defined(__arm__) 23 #include <machine/sysarch.h> 24 #endif 25 26 #if defined(__NetBSD__) && defined(__ppc__) 27 #include <machine/cpu.h> 28 #endif 29 30 #if defined(__mips__) 31 #include <sys/cachectl.h> 32 #include <sys/syscall.h> 33 #include <unistd.h> 34 #if defined(__ANDROID__) && defined(__LP64__) 35 /* 36 * clear_mips_cache - Invalidates instruction cache for Mips. 37 */ 38 static void clear_mips_cache(const void* Addr, size_t Size) { 39 asm volatile ( 40 ".set push\n" 41 ".set noreorder\n" 42 ".set noat\n" 43 "beq %[Size], $zero, 20f\n" /* If size == 0, branch around. */ 44 "nop\n" 45 "daddu %[Size], %[Addr], %[Size]\n" /* Calculate end address + 1 */ 46 "rdhwr $v0, $1\n" /* Get step size for SYNCI. 47 $1 is $HW_SYNCI_Step */ 48 "beq $v0, $zero, 20f\n" /* If no caches require 49 synchronization, branch 50 around. */ 51 "nop\n" 52 "10:\n" 53 "synci 0(%[Addr])\n" /* Synchronize all caches around 54 address. */ 55 "daddu %[Addr], %[Addr], $v0\n" /* Add step size. */ 56 "sltu $at, %[Addr], %[Size]\n" /* Compare current with end 57 address. */ 58 "bne $at, $zero, 10b\n" /* Branch if more to do. */ 59 "nop\n" 60 "sync\n" /* Clear memory hazards. */ 61 "20:\n" 62 "bal 30f\n" 63 "nop\n" 64 "30:\n" 65 "daddiu $ra, $ra, 12\n" /* $ra has a value of $pc here. 66 Add offset of 12 to point to the 67 instruction after the last nop. 68 */ 69 "jr.hb $ra\n" /* Return, clearing instruction 70 hazards. */ 71 "nop\n" 72 ".set pop\n" 73 : [Addr] "+r"(Addr), [Size] "+r"(Size) 74 :: "at", "ra", "v0", "memory" 75 ); 76 } 77 #endif 78 #endif 79 80 #if defined(__ANDROID__) && defined(__arm__) 81 #include <asm/unistd.h> 82 #endif 83 84 /* 85 * The compiler generates calls to __clear_cache() when creating 86 * trampoline functions on the stack for use with nested functions. 87 * It is expected to invalidate the instruction cache for the 88 * specified range. 89 */ 90 91 void __clear_cache(void *start, void *end) { 92 #if __i386__ || __x86_64__ 93 /* 94 * Intel processors have a unified instruction and data cache 95 * so there is nothing to do 96 */ 97 #elif defined(__arm__) && !defined(__APPLE__) 98 #if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__Bitrig__) 99 struct arm_sync_icache_args arg; 100 101 arg.addr = (uintptr_t)start; 102 arg.len = (uintptr_t)end - (uintptr_t)start; 103 104 sysarch(ARM_SYNC_ICACHE, &arg); 105 #elif defined(__ANDROID__) 106 register int start_reg __asm("r0") = (int) (intptr_t) start; 107 const register int end_reg __asm("r1") = (int) (intptr_t) end; 108 const register int flags __asm("r2") = 0; 109 const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush; 110 __asm __volatile("svc 0x0" : "=r"(start_reg) 111 : "r"(syscall_nr), "r"(start_reg), "r"(end_reg), "r"(flags) : "r0"); 112 if (start_reg != 0) { 113 compilerrt_abort(); 114 } 115 #else 116 compilerrt_abort(); 117 #endif 118 #elif defined(__mips__) 119 const uintptr_t start_int = (uintptr_t) start; 120 const uintptr_t end_int = (uintptr_t) end; 121 #if defined(__ANDROID__) && defined(__LP64__) 122 // Call synci implementation for short address range. 123 const uintptr_t address_range_limit = 256; 124 if ((end_int - start_int) <= address_range_limit) { 125 clear_mips_cache(start, (end_int - start_int)); 126 } else { 127 syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE); 128 } 129 #else 130 syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE); 131 #endif 132 #elif defined(__aarch64__) && !defined(__APPLE__) 133 uint64_t xstart = (uint64_t)(uintptr_t) start; 134 uint64_t xend = (uint64_t)(uintptr_t) end; 135 uint64_t addr; 136 137 // Get Cache Type Info 138 uint64_t ctr_el0; 139 __asm __volatile("mrs %0, ctr_el0" : "=r"(ctr_el0)); 140 141 /* 142 * dc & ic instructions must use 64bit registers so we don't use 143 * uintptr_t in case this runs in an IPL32 environment. 144 */ 145 const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15); 146 for (addr = xstart & ~(dcache_line_size - 1); addr < xend; 147 addr += dcache_line_size) 148 __asm __volatile("dc cvau, %0" :: "r"(addr)); 149 __asm __volatile("dsb ish"); 150 151 const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15); 152 for (addr = xstart & ~(icache_line_size - 1); addr < xend; 153 addr += icache_line_size) 154 __asm __volatile("ic ivau, %0" :: "r"(addr)); 155 __asm __volatile("isb sy"); 156 #elif defined(__sparc__) 157 uintptr_t xstart = (uintptr_t) start & ~(uintptr_t)3; 158 uintptr_t xend = (uintptr_t) end; 159 160 for (; xstart < xend; xstart += 4) { 161 __asm __volatile("flush %0" :: "r" (xstart)); 162 } 163 #elif defined(__NetBSD__) && defined(__ppc__) 164 __syncicache(start, (uintptr_t)end - (uintptr_t)start); 165 #else 166 #if __APPLE__ 167 /* On Darwin, sys_icache_invalidate() provides this functionality */ 168 sys_icache_invalidate(start, end-start); 169 #else 170 compilerrt_abort(); 171 #endif 172 #endif 173 } 174 175