/llvm-project/compiler-rt/lib/asan_abi/ |
H A D | asan_abi_shim.cpp | 72 void __asan_report_load1(uptr addr) { in __asan_report_load1() argument 73 __asan_abi_report_load_n((void *)addr, 1, true); in __asan_report_load1() 75 void __asan_report_load2(uptr addr) { in __asan_report_load2() argument 76 __asan_abi_report_load_n((void *)addr, 2, true); in __asan_report_load2() 78 void __asan_report_load4(uptr addr) { in __asan_report_load4() argument 79 __asan_abi_report_load_n((void *)addr, 4, true); in __asan_report_load4() 81 void __asan_report_load8(uptr addr) { in __asan_report_load8() argument 82 __asan_abi_report_load_n((void *)addr, 8, true); in __asan_report_load8() 84 void __asan_report_load16(uptr addr) { in __asan_report_load16() argument 85 __asan_abi_report_load_n((void *)addr, 16, true); in __asan_report_load16() [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/ |
H A D | arm64-zeroreg.ll | 10 define void @func(ptr %addr) { 18 %v0 = load volatile i64, ptr %addr 19 %v1 = load volatile i64, ptr %addr 20 %v2 = load volatile i64, ptr %addr 21 %v3 = load volatile i64, ptr %addr 22 %v4 = load volatile i64, ptr %addr 23 %v5 = load volatile i64, ptr %addr 24 %v6 = load volatile i64, ptr %addr 25 %v7 = load volatile i64, ptr %addr 26 %v8 = load volatile i64, ptr %addr [all …]
|
H A D | v8.4-atomic-128.ll | 4 define void @test_atomic_load(ptr %addr) { 9 %res.0 = load atomic i128, ptr %addr monotonic, align 16 10 store i128 %res.0, ptr %addr 14 %res.1 = load atomic i128, ptr %addr unordered, align 16 15 store i128 %res.1, ptr %addr 20 %res.2 = load atomic i128, ptr %addr acquire, align 16 21 store i128 %res.2, ptr %addr 26 %res.3 = load atomic i128, ptr %addr seq_cst, align 16 27 store i128 %res.3, ptr %addr 33 %addr8.1 = getelementptr i8, ptr %addr, i32 32 [all …]
|
H A D | arm64_32-atomics.ll | 4 define i8 @test_load_8(ptr %addr) { 7 %val = load atomic i8, ptr %addr seq_cst, align 1 11 define i16 @test_load_16(ptr %addr) { 14 %val = load atomic i16, ptr %addr acquire, align 2 18 define i32 @test_load_32(ptr %addr) { 21 %val = load atomic i32, ptr %addr seq_cst, align 4 25 define i64 @test_load_64(ptr %addr) { 28 %val = load atomic i64, ptr %addr seq_cst, align 8 32 define ptr @test_load_ptr(ptr %addr) { 35 %val = load atomic ptr, ptr %addr seq_cst, align 8 [all …]
|
H A D | arm64_32-neon.ll | 10 define void @test_split_16B(<4 x float> %val, ptr %addr) { 13 store <4 x float> %val, ptr %addr, align 8 17 define void @test_split_16B_splat(<4 x i32>, ptr %addr) { 26 store <4 x i32> %vec, ptr %addr, align 8 34 define {%vec, %vec} @test_neon_load(ptr %addr) { 37 %res = call {%vec, %vec} @llvm.aarch64.neon.ld2r.v2f64.p0(ptr %addr) 42 define {%vec, %vec} @test_neon_load_lane(ptr %addr, %vec %in1, %vec %in2) { 45 …res = call {%vec, %vec} @llvm.aarch64.neon.ld2lane.v2f64.p0(%vec %in1, %vec %in2, i64 0, ptr %addr) 50 define void @test_neon_store(ptr %addr, %vec %in1, %vec %in2) { 53 call void @llvm.aarch64.neon.st2.v2f64.p0(%vec %in1, %vec %in2, ptr %addr) [all …]
|
/llvm-project/clang/test/Sema/ |
H A D | aarch64-neon-ranges.c |
|
H A D | builtins-arm64-exclusive.c | 7 int test_ldrex(char *addr) { in test_ldrex() argument 9 sum += __builtin_arm_ldrex(addr); in test_ldrex() 10 sum += __builtin_arm_ldrex((short *)addr); in test_ldrex() 11 sum += __builtin_arm_ldrex((int *)addr); in test_ldrex() 12 sum += __builtin_arm_ldrex((long long *)addr); in test_ldrex() 13 sum += __builtin_arm_ldrex((__int128 *)addr); in test_ldrex() 14 sum += __builtin_arm_ldrex((float *)addr); in test_ldrex() 15 sum += __builtin_arm_ldrex((double *)addr); in test_ldrex() 16 sum += *__builtin_arm_ldrex((int **)addr); in test_ldrex() 17 sum += __builtin_arm_ldrex((struct Simple **)addr)->a; in test_ldrex() [all …]
|
H A D | builtins-arm-exclusive.c | 7 int test_ldrex(char *addr) { in test_ldrex() argument 9 sum += __builtin_arm_ldrex(addr); in test_ldrex() 10 sum += __builtin_arm_ldrex((short *)addr); in test_ldrex() 11 sum += __builtin_arm_ldrex((int *)addr); in test_ldrex() 12 sum += __builtin_arm_ldrex((long long *)addr); in test_ldrex() 13 sum += __builtin_arm_ldrex((float *)addr); in test_ldrex() 14 sum += __builtin_arm_ldrex((double *)addr); in test_ldrex() 15 sum += *__builtin_arm_ldrex((int **)addr); in test_ldrex() 16 sum += __builtin_arm_ldrex((struct Simple **)addr)->a; in test_ldrex() 17 sum += __builtin_arm_ldrex((volatile char *)addr); in test_ldrex() [all …]
|
/llvm-project/compiler-rt/lib/tsan/rtl/ |
H A D | tsan_interface.inc | 21 void __tsan_read1(void *addr) { 22 MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 1, kAccessRead); 25 void __tsan_read2(void *addr) { 26 MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, kAccessRead); 29 void __tsan_read4(void *addr) { 30 MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, kAccessRead); 33 void __tsan_read8(void *addr) { 34 MemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, kAccessRead); 37 void __tsan_read16(void *addr) { 38 MemoryAccess16(cur_thread(), CALLERPC, (uptr)addr, kAccessRead); [all …]
|
/llvm-project/clang/lib/CodeGen/ |
H A D | CGBuilder.h | 59 llvm::Value *emitRawPointerFromAddress(Address Addr) const { in emitRawPointerFromAddress() 60 return Addr.getBasePointer(); in emitRawPointerFromAddress() 64 Address createConstGEP2_32(Address Addr, unsigned Idx0, unsigned Idx1, in createConstGEP2_32() 70 Addr.getElementType(), emitRawPointerFromAddress(Addr), Idx0, Idx1, in createConstGEP2_32() 74 Addr.getElementType(), emitRawPointerFromAddress(Addr), Idx0, Idx1, in createConstGEP2_32() 77 DL.getIndexSizeInBits(Addr.getType()->getPointerAddressSpace()), 0, in createConstGEP2_32() 82 Addr.getAlignment().alignmentAtOffset( in createConstGEP2_32() 84 IsInBounds ? Addr in createConstGEP2_32() 58 emitRawPointerFromAddress(Address Addr) emitRawPointerFromAddress() argument 63 createConstGEP2_32(Address Addr,unsigned Idx0,unsigned Idx1,const llvm::Twine & Name) createConstGEP2_32() argument 112 CreateLoad(Address Addr,const char * Name) CreateLoad() argument 163 CreateFlagStore(bool Value,llvm::Value * Addr) CreateFlagStore() argument 411 CreatePreserveStructAccessIndex(Address Addr,unsigned Index,unsigned FieldIndex,llvm::MDNode * DbgInfo) CreatePreserveStructAccessIndex() argument 427 CreatePreserveUnionAccessIndex(Address Addr,unsigned FieldIndex,llvm::MDNode * DbgInfo) CreatePreserveUnionAccessIndex() argument 435 CreateLaunderInvariantGroup(Address Addr) CreateLaunderInvariantGroup() argument 441 CreateStripInvariantGroup(Address Addr) CreateStripInvariantGroup() argument [all...] |
/llvm-project/llvm/test/Transforms/InstCombine/ |
H A D | atomicrmw.ll | 10 define i32 @atomic_add_zero(ptr %addr) { 12 ; CHECK-NEXT: [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4 15 %res = atomicrmw add ptr %addr, i32 0 monotonic 19 define i32 @atomic_or_zero(ptr %addr) { 21 ; CHECK-NEXT: [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4 24 %res = atomicrmw or ptr %addr, i32 0 monotonic 29 define i32 @atomic_sub_zero(ptr %addr) { 31 ; CHECK-NEXT: [[RES:%.*]] = atomicrmw or ptr [[ADDR:%.*]], i32 0 monotonic, align 4 34 %res = atomicrmw sub ptr %addr, i32 0 monotonic 39 define i32 @atomic_and_allones(ptr %addr) { [all …]
|
/llvm-project/llvm/test/ThinLTO/X86/ |
H A D | summary-matching.ll | 93 %a.addr = alloca i32, align 4 94 %b.addr = alloca i32, align 4 96 store i32 %a, ptr %a.addr, align 4 97 store i32 %b, ptr %b.addr, align 4 98 %0 = load i32, ptr %a.addr, align 4 99 %1 = load i32, ptr %b.addr, align 4 109 %a.addr = alloca i32, align 4 110 %b.addr = alloca i32, align 4 113 store i32 %a, ptr %a.addr, align 4 114 store i32 %b, ptr %b.addr, align 4 [all …]
|
/llvm-project/polly/test/CodeGen/ |
H A D | phi_scalar_simple_2.ll | 16 ; CHECK-DAG: %x.addr.2.s2a = alloca i32 17 ; CHECK-DAG: %x.addr.2.phiops = alloca i32 18 ; CHECK-DAG: %x.addr.1.s2a = alloca i32 19 ; CHECK-DAG: %x.addr.1.phiops = alloca i32 20 ; CHECK-DAG: %x.addr.0.s2a = alloca i32 21 ; CHECK-DAG: %x.addr.0.phiops = alloca i32 27 ; CHECK: %x.addr.0.merge = phi i32 [ %x.addr.0.final_reload, %polly.exiting ], [ %x.addr.0,… 28 ; CHECK: ret i32 %x.addr.0.merge 31 ; CHECK-NEXT: store i32 %x, ptr %x.addr.0.phiops 35 ; CHECK: %x.addr.0.final_reload = load i32, ptr %x.addr.0.s2a [all …]
|
H A D | phi_scalar_simple_1.ll | 15 ; CHECK-DAG: %x.addr.1.lcssa.s2a = alloca i32 16 ; CHECK-DAG: %x.addr.1.lcssa.phiops = alloca i32 17 ; CHECK-DAG: %x.addr.1.s2a = alloca i32 18 ; CHECK-DAG: %x.addr.1.phiops = alloca i32 19 ; CHECK-DAG: %x.addr.0.s2a = alloca i32 20 ; CHECK-DAG: %x.addr.0.phiops = alloca i32 25 ; CHECK: %x.addr.0.merge = phi i32 [ %x.addr.0.final_reload, %polly.exiting ], [ %x.addr.0,… 26 ; CHECK: ret i32 %x.addr.0.merge 29 ; CHECK: store i32 %x, ptr %x.addr.0.phiops 32 ; CHECK: %x.addr.0.final_reload = load i32, ptr %x.addr.0.s2a [all …]
|
/llvm-project/compiler-rt/test/sanitizer_common/TestCases/ |
H A D | sanitizer_coverage_trace_loads_stores.cpp | 12 void __sanitizer_cov_load1(uint8_t *addr) { printf("load1: %p\n", addr); } in __sanitizer_cov_load1() argument 13 void __sanitizer_cov_load2(uint16_t *addr) { printf("load2: %p\n", addr); } in __sanitizer_cov_load2() argument 14 void __sanitizer_cov_load4(uint32_t *addr) { printf("load4: %p\n", addr); } in __sanitizer_cov_load4() argument 15 void __sanitizer_cov_load8(uint64_t *addr) { printf("load8: %p\n", addr); } in __sanitizer_cov_load8() argument 16 void __sanitizer_cov_load16(__int128 *addr) { printf("load16: %p\n", addr); } in __sanitizer_cov_load16() argument 18 void __sanitizer_cov_store1(uint8_t *addr) { printf("store1: %p\n", addr); } in __sanitizer_cov_store1() argument 19 void __sanitizer_cov_store2(uint16_t *addr) { printf("store2: %p\n", addr); } in __sanitizer_cov_store2() argument 20 void __sanitizer_cov_store4(uint32_t *addr) { printf("store4: %p\n", addr); } in __sanitizer_cov_store4() argument 21 void __sanitizer_cov_store8(uint64_t *addr) { printf("store8: %p\n", addr); } in __sanitizer_cov_store8() argument 22 void __sanitizer_cov_store16(__int128 *addr) { printf("store16: %p\n", addr); } in __sanitizer_cov_store16() argument [all …]
|
/llvm-project/llvm/test/CodeGen/Hexagon/ |
H A D | early-if-conversion-bug1.ll | 35 %this.addr.i66 = alloca ptr, align 4 36 %__s.addr.i67 = alloca ptr, align 4 37 %__n.addr.i68 = alloca i32, align 4 38 %__p.addr.i.i = alloca ptr, align 4 39 %this.addr.i.i.i13.i.i = alloca ptr, align 4 40 %this.addr.i.i14.i.i = alloca ptr, align 4 41 %this.addr.i15.i.i = alloca ptr, align 4 42 %__x.addr.i.i.i.i.i = alloca ptr, align 4 43 %__r.addr.i.i.i.i = alloca ptr, align 4 44 %this.addr [all...] |
H A D | clr_set_toggle.ll | 8 %x.addr = alloca i32, align 4 9 store i32 %x, ptr %x.addr, align 4 10 %0 = load i32, ptr %x.addr, align 4 19 %x.addr = alloca i64, align 8 20 store i64 %x, ptr %x.addr, align 8 21 %0 = load i64, ptr %x.addr, align 8 30 %x.addr = alloca i64, align 8 31 store i64 %x, ptr %x.addr, align 8 32 %0 = load i64, ptr %x.addr, align 8 41 %x.addr [all...] |
H A D | intrinsics-v60-vmpy-acc-128B.ll | 10 %a.addr = alloca <64 x i32>, align 256 11 %b.addr = alloca i32, align 4 12 store <64 x i32> %a, ptr %a.addr, align 256 13 store i32 %b, ptr %b.addr, align 4 15 %1 = load <64 x i32>, ptr %a.addr, align 256 16 %2 = load i32, ptr %b.addr, align 4 26 %a.addr = alloca <64 x i32>, align 256 27 %b.addr = alloca i32, align 4 28 store <64 x i32> %a, ptr %a.addr, align 256 29 store i32 %b, ptr %b.addr, alig [all...] |
/llvm-project/llvm/test/CodeGen/X86/ |
H A D | pr30430.ll | 106 %__A.addr.i = alloca float, align 4 107 %__B.addr.i = alloca float, align 4 108 %__C.addr.i = alloca float, align 4 109 %__D.addr.i = alloca float, align 4 110 %__E.addr.i = alloca float, align 4 111 %__F.addr.i = alloca float, align 4 112 %__G.addr.i = alloca float, align 4 113 %__H.addr.i = alloca float, align 4 114 %__I.addr.i = alloca float, align 4 115 %__J.addr.i = alloca float, align 4 [all …]
|
/llvm-project/llvm/test/CodeGen/ARM/ |
H A D | vldm-sched-a9.ll | 34 %addr.1 = getelementptr inbounds i64, ptr %src0, i32 1 35 %el.1 = load i64, ptr %addr.1, align 8 36 %addr.2 = getelementptr inbounds i64, ptr %src0, i32 2 37 %el.2 = load i64, ptr %addr.2, align 8 38 %addr.3 = getelementptr inbounds i64, ptr %src0, i32 3 39 %el.3 = load i64, ptr %addr.3, align 8 40 %addr.4 = getelementptr inbounds i64, ptr %src0, i32 4 41 %el.4 = load i64, ptr %addr.4, align 8 42 %addr.5 = getelementptr inbounds i64, ptr %src0, i32 5 43 %el.5 = load i64, ptr %addr.5, align 8 [all …]
|
/llvm-project/llvm/test/CodeGen/PowerPC/ |
H A D | fp-int-conversions-direct-moves.ll | 16 %arg.addr = alloca float, align 4 17 store float %arg, ptr %arg.addr, align 4 18 %0 = load float, ptr %arg.addr, align 4 32 %arg.addr = alloca i8, align 1 33 store i8 %arg, ptr %arg.addr, align 1 34 %0 = load i8, ptr %arg.addr, align 1 48 %arg.addr = alloca double, align 8 49 store double %arg, ptr %arg.addr, align 8 50 %0 = load double, ptr %arg.addr, align 8 64 %arg.addr = alloca i8, align 1 [all …]
|
/llvm-project/llvm/test/Instrumentation/MemorySanitizer/ |
H A D | vector-load-store.ll | 3 …eck-access-address=1 -S -passes=msan 2>&1 | FileCheck %s --check-prefixes=ADDR --implicit-check-no… 19 ; ADDR-LABEL: @load.v1i32( 20 ; ADDR-NEXT: [[TMP1:%.*]] = load i64, ptr @__msan_param_tls, align 8 21 ; ADDR-NEXT: call void @llvm.donothing() 22 ; ADDR-NEXT: [[_MSCMP:%.*]] = icmp ne i64 [[TMP1]], 0 23 ; ADDR-NEXT: br i1 [[_MSCMP]], label [[TMP2:%.*]], label [[TMP3:%.*]], !prof [[PROF0:![0-9]+]] 24 ; ADDR: 2: 25 ; ADDR-NEXT: call void @__msan_warning_noreturn() #[[ATTR3:[0-9]+]] 26 ; ADDR-NEXT: unreachable 27 ; ADDR: 3: [all …]
|
/llvm-project/clang/test/CodeGen/ |
H A D | inline-asm-x86-flag-output.c | 3 int test_cca(long nr, volatile long *addr) { in test_cca() argument 5 …"cmp $2,$1", "={@cca},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %addr, i64 %nr) in test_cca() 8 : "=@cca"(x), "=m"(*(volatile long *)(addr)) in test_cca() 16 int test_ccae(long nr, volatile long *addr) { in test_ccae() argument 18 …cmp $2,$1", "={@ccae},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %addr, i64 %nr) in test_ccae() 21 : "=@ccae"(x), "=m"(*(volatile long *)(addr)) in test_ccae() 29 int test_ccb(long nr, volatile long *addr) { in test_ccb() argument 31 …"cmp $2,$1", "={@ccb},=*m,r,~{cc},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %addr, i64 %nr) in test_ccb() 34 : "=@ccb"(x), "=m"(*(volatile long *)(addr)) in test_ccb() 42 int test_ccbe(long nr, volatile long *addr) { in test_ccbe() argument [all …]
|
H A D | builtins-arm-exclusive.c | 8 int test_ldrex(char *addr, long long *addr64, float *addrfloat) { in test_ldrex() argument 12 sum += __builtin_arm_ldrex(addr); in test_ldrex() 13 // CHECK: [[INTRES:%.*]] = call i32 @llvm.arm.ldrex.p0(ptr elementtype(i8) %addr) in test_ldrex() 16 // CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i8) %addr) in test_ldrex() 19 sum += __builtin_arm_ldrex((short *)addr); in test_ldrex() 20 // CHECK: [[INTRES:%.*]] = call i32 @llvm.arm.ldrex.p0(ptr elementtype(i16) %addr) in test_ldrex() 23 // CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i16) %addr) in test_ldrex() 26 sum += __builtin_arm_ldrex((int *)addr); in test_ldrex() 27 // CHECK: call i32 @llvm.arm.ldrex.p0(ptr elementtype(i32) %addr) in test_ldrex() 29 // CHECK-ARM64: [[INTRES:%.*]] = call i64 @llvm.aarch64.ldxr.p0(ptr elementtype(i32) %addr) in test_ldrex() [all …]
|
/llvm-project/llvm/test/CodeGen/AArch64/GlobalISel/ |
H A D | v8.4-atomic-128.ll | 4 define void @test_atomic_load(ptr %addr) { 11 %res.0 = load atomic i128, ptr %addr monotonic, align 16 12 store i128 %res.0, ptr %addr 18 %res.1 = load atomic i128, ptr %addr unordered, align 16 19 store i128 %res.1, ptr %addr 26 %res.2 = load atomic i128, ptr %addr acquire, align 16 27 store i128 %res.2, ptr %addr 34 %res.3 = load atomic i128, ptr %addr seq_cst, align 16 35 store i128 %res.3, ptr %addr 42 %addr8.1 = getelementptr i8, ptr %addr, i32 8 [all …]
|