1 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \ 2 // RUN: -triple i686--windows -Oz -emit-llvm %s -o - \ 3 // RUN: | FileCheck %s -check-prefixes CHECK,CHECK-I386,CHECK-INTEL 4 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \ 5 // RUN: -triple thumbv7--windows -Oz -emit-llvm %s -o - \ 6 // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-ARM,CHECK-ARM-ARM64,CHECK-ARM-X64 7 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \ 8 // RUN: -triple x86_64--windows -Oz -emit-llvm -target-feature +cx16 %s -o - \ 9 // RUN: | FileCheck %s --check-prefixes CHECK,CHECK-X64,CHECK-ARM-X64,CHECK-INTEL,CHECK-64 10 // RUN: %clang_cc1 -ffreestanding -fms-extensions -fms-compatibility -fms-compatibility-version=17.00 \ 11 // RUN: -triple aarch64-windows -Oz -emit-llvm %s -o - \ 12 // RUN: | FileCheck %s --check-prefixes CHECK-ARM-ARM64,CHECK-ARM-X64,CHECK-ARM64,CHECK-64 13 14 // intrin.h needs size_t, but -ffreestanding prevents us from getting it from 15 // stddef.h. Work around it with this typedef. 16 typedef __SIZE_TYPE__ size_t; 17 18 #include <intrin.h> 19 20 #if defined(__i386__) || defined(__x86_64__) 21 void test__stosb(unsigned char *Dest, unsigned char Data, size_t Count) { 22 return __stosb(Dest, Data, Count); 23 } 24 25 // CHECK-I386: define{{.*}}void @test__stosb 26 // CHECK-I386: tail call void @llvm.memset.p0.i32(ptr align 1 %Dest, i8 %Data, i32 %Count, i1 true) 27 // CHECK-I386: ret void 28 // CHECK-I386: } 29 30 // CHECK-X64: define{{.*}}void @test__stosb 31 // CHECK-X64: tail call void @llvm.memset.p0.i64(ptr align 1 %Dest, i8 %Data, i64 %Count, i1 true) 32 // CHECK-X64: ret void 33 // CHECK-X64: } 34 35 void test__movsb(unsigned char *Dest, unsigned char *Src, size_t Count) { 36 return __movsb(Dest, Src, Count); 37 } 38 // CHECK-I386-LABEL: define{{.*}} void @test__movsb 39 // CHECK-I386: tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsb\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count) 40 // CHECK-I386: ret void 41 // CHECK-I386: } 42 43 // CHECK-X64-LABEL: define{{.*}} void @test__movsb 44 // CHECK-X64: call { ptr, ptr, i64 } asm sideeffect "rep movsb", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count) 45 // CHECK-X64: ret void 46 // CHECK-X64: } 47 48 void test__stosw(unsigned short *Dest, unsigned short Data, size_t Count) { 49 return __stosw(Dest, Data, Count); 50 } 51 // CHECK-I386-LABEL: define{{.*}} void @test__stosw 52 // CHECK-I386: call { ptr, i32 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, ptr %Dest, i32 %Count) 53 // CHECK-I386: ret void 54 // CHECK-I386: } 55 56 // CHECK-X64-LABEL: define{{.*}} void @test__stosw 57 // CHECK-X64: call { ptr, i64 } asm sideeffect "rep stosw", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i16 %Data, ptr %Dest, i64 %Count) 58 // CHECK-X64: ret void 59 // CHECK-X64: } 60 61 void test__movsw(unsigned short *Dest, unsigned short *Src, size_t Count) { 62 return __movsw(Dest, Src, Count); 63 } 64 // CHECK-I386-LABEL: define{{.*}} void @test__movsw 65 // CHECK-I386: tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movsw\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count) 66 // CHECK-I386: ret void 67 // CHECK-I386: } 68 69 // CHECK-X64-LABEL: define{{.*}} void @test__movsw 70 // CHECK-X64: call { ptr, ptr, i64 } asm sideeffect "rep movsw", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count) 71 // CHECK-X64: ret void 72 // CHECK-X64: } 73 74 void test__stosd(unsigned long *Dest, unsigned long Data, size_t Count) { 75 return __stosd(Dest, Data, Count); 76 } 77 // CHECK-I386-LABEL: define{{.*}} void @test__stosd 78 // CHECK-I386: call { ptr, i32 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, ptr %Dest, i32 %Count) 79 // CHECK-I386: ret void 80 // CHECK-I386: } 81 82 // CHECK-X64-LABEL: define{{.*}} void @test__stosd 83 // CHECK-X64: call { ptr, i64 } asm sideeffect "rep stos$(l$|d$)", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i32 %Data, ptr %Dest, i64 %Count) 84 // CHECK-X64: ret void 85 // CHECK-X64: } 86 87 void test__movsd(unsigned long *Dest, unsigned long *Src, size_t Count) { 88 return __movsd(Dest, Src, Count); 89 } 90 // CHECK-I386-LABEL: define{{.*}} void @test__movsd 91 // CHECK-I386: tail call { ptr, ptr, i32 } asm sideeffect "xchg $(%esi, $1$|$1, esi$)\0Arep movs$(l$|d$)\0Axchg $(%esi, $1$|$1, esi$)", "={di},=r,={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i32 %Count) 92 // CHECK-I386: ret void 93 // CHECK-I386: } 94 95 // CHECK-X64-LABEL: define{{.*}} void @test__movsd 96 // CHECK-X64: call { ptr, ptr, i64 } asm sideeffect "rep movs$(l$|d$)", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count) 97 // CHECK-X64: ret void 98 // CHECK-X64: } 99 100 #ifdef __x86_64__ 101 void test__stosq(unsigned __int64 *Dest, unsigned __int64 Data, size_t Count) { 102 return __stosq(Dest, Data, Count); 103 } 104 // CHECK-X64-LABEL: define{{.*}} void @test__stosq 105 // CHECK-X64: call { ptr, i64 } asm sideeffect "rep stosq", "={di},={cx},{ax},0,1,~{memory},~{dirflag},~{fpsr},~{flags}"(i64 %Data, ptr %Dest, i64 %Count) 106 // CHECK-X64: ret void 107 // CHECK-X64: } 108 109 void test__movsq(unsigned __int64 *Dest, unsigned __int64 *Src, size_t Count) { 110 return __movsq(Dest, Src, Count); 111 } 112 // CHECK-X64-LABEL: define{{.*}} void @test__movsq 113 // CHECK-X64: call { ptr, ptr, i64 } asm sideeffect "rep movsq", "={di},={si},={cx},0,1,2,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr %Dest, ptr %Src, i64 %Count) 114 // CHECK-X64: ret void 115 // CHECK-X64: } 116 #endif 117 118 void test__ud2(void) { 119 __ud2(); 120 } 121 // CHECK-INTEL-LABEL: define{{.*}} void @test__ud2() 122 // CHECK-INTEL: call void @llvm.trap() 123 124 void test__int2c(void) { 125 __int2c(); 126 } 127 // CHECK-INTEL-LABEL: define{{.*}} void @test__int2c() 128 // CHECK-INTEL: call void asm sideeffect "int $$0x2c", ""() #[[NORETURN:[0-9]+]] 129 130 131 #endif 132 133 void *test_ReturnAddress(void) { 134 return _ReturnAddress(); 135 } 136 // CHECK-LABEL: define{{.*}}ptr @test_ReturnAddress() 137 // CHECK: = tail call ptr @llvm.returnaddress(i32 0) 138 // CHECK: ret ptr 139 140 #if defined(__i386__) || defined(__x86_64__) || defined (__aarch64__) 141 void *test_AddressOfReturnAddress(void) { 142 return _AddressOfReturnAddress(); 143 } 144 // CHECK-INTEL-LABEL: define dso_local ptr @test_AddressOfReturnAddress() 145 // CHECK-INTEL: = tail call ptr @llvm.addressofreturnaddress.p0() 146 // CHECK-INTEL: ret ptr 147 #endif 148 149 unsigned char test_BitScanForward(unsigned long *Index, unsigned long Mask) { 150 return _BitScanForward(++Index, Mask); 151 } 152 // CHECK: define{{.*}}i8 @test_BitScanForward(ptr {{.*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{ 153 // CHECK: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i32 %Mask, 0 154 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] 155 // CHECK: [[END_LABEL]]: 156 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] 157 // CHECK: ret i8 [[RESULT]] 158 // CHECK: [[ISNOTZERO_LABEL]]: 159 // CHECK: [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds nuw i8, ptr %Index, {{i64|i32}} 4 160 // CHECK: [[INDEX:%[0-9]+]] = tail call range(i32 0, 33) i32 @llvm.cttz.i32(i32 %Mask, i1 true) 161 // CHECK: store i32 [[INDEX]], ptr [[IDXGEP]], align 4 162 // CHECK: br label %[[END_LABEL]] 163 164 unsigned char test_BitScanReverse(unsigned long *Index, unsigned long Mask) { 165 return _BitScanReverse(++Index, Mask); 166 } 167 // CHECK: define{{.*}}i8 @test_BitScanReverse(ptr {{.*}}%Index, i32 {{[a-z_ ]*}}%Mask){{.*}}{ 168 // CHECK: [[ISNOTZERO:%[0-9]+]] = icmp eq i32 %Mask, 0 169 // CHECK: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] 170 // CHECK: [[END_LABEL]]: 171 // CHECK: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] 172 // CHECK: ret i8 [[RESULT]] 173 // CHECK: [[ISNOTZERO_LABEL]]: 174 // CHECK: [[IDXGEP:%[a-z0-9._]+]] = getelementptr inbounds nuw i8, ptr %Index, {{i64|i32}} 4 175 // CHECK: [[REVINDEX:%[0-9]+]] = tail call range(i32 0, 33) i32 @llvm.ctlz.i32(i32 %Mask, i1 true) 176 // CHECK: [[INDEX:%[0-9]+]] = xor i32 [[REVINDEX]], 31 177 // CHECK: store i32 [[INDEX]], ptr [[IDXGEP]], align 4 178 // CHECK: br label %[[END_LABEL]] 179 180 #if defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) 181 unsigned char test_BitScanForward64(unsigned long *Index, unsigned __int64 Mask) { 182 return _BitScanForward64(Index, Mask); 183 } 184 // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanForward64(ptr {{.*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{ 185 // CHECK-ARM-X64: [[ISNOTZERO:%[a-z0-9._]+]] = icmp eq i64 %Mask, 0 186 // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] 187 // CHECK-ARM-X64: [[END_LABEL]]: 188 // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] 189 // CHECK-ARM-X64: ret i8 [[RESULT]] 190 // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]: 191 // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = tail call range(i64 0, 65) i64 @llvm.cttz.i64(i64 %Mask, i1 true) 192 // CHECK-ARM-X64: [[TRUNC_INDEX:%[0-9]+]] = trunc nuw nsw i64 [[INDEX]] to i32 193 // CHECK-ARM-X64: store i32 [[TRUNC_INDEX]], ptr %Index, align 4 194 // CHECK-ARM-X64: br label %[[END_LABEL]] 195 196 unsigned char test_BitScanReverse64(unsigned long *Index, unsigned __int64 Mask) { 197 return _BitScanReverse64(Index, Mask); 198 } 199 // CHECK-ARM-X64: define{{.*}}i8 @test_BitScanReverse64(ptr {{.*}}%Index, i64 {{[a-z_ ]*}}%Mask){{.*}}{ 200 // CHECK-ARM-X64: [[ISNOTZERO:%[0-9]+]] = icmp eq i64 %Mask, 0 201 // CHECK-ARM-X64: br i1 [[ISNOTZERO]], label %[[END_LABEL:[a-z0-9._]+]], label %[[ISNOTZERO_LABEL:[a-z0-9._]+]] 202 // CHECK-ARM-X64: [[END_LABEL]]: 203 // CHECK-ARM-X64: [[RESULT:%[a-z0-9._]+]] = phi i8 [ 0, %[[ISZERO_LABEL:[a-z0-9._]+]] ], [ 1, %[[ISNOTZERO_LABEL]] ] 204 // CHECK-ARM-X64: ret i8 [[RESULT]] 205 // CHECK-ARM-X64: [[ISNOTZERO_LABEL]]: 206 // CHECK-ARM-X64: [[REVINDEX:%[0-9]+]] = tail call range(i64 0, 65) i64 @llvm.ctlz.i64(i64 %Mask, i1 true) 207 // CHECK-ARM-X64: [[TRUNC_REVINDEX:%[0-9]+]] = trunc nuw nsw i64 [[REVINDEX]] to i32 208 // CHECK-ARM-X64: [[INDEX:%[0-9]+]] = xor i32 [[TRUNC_REVINDEX]], 63 209 // CHECK-ARM-X64: store i32 [[INDEX]], ptr %Index, align 4 210 // CHECK-ARM-X64: br label %[[END_LABEL]] 211 #endif 212 213 void *test_InterlockedExchangePointer(void * volatile *Target, void *Value) { 214 return _InterlockedExchangePointer(Target, Value); 215 } 216 217 // CHECK: define{{.*}}ptr @test_InterlockedExchangePointer(ptr {{.*}}%Target, ptr {{[a-z_ ]*}}%Value){{.*}}{ 218 // CHECK: %[[VALUE:[0-9]+]] = ptrtoint ptr %Value to [[iPTR:i[0-9]+]] 219 // CHECK: %[[EXCHANGE:[0-9]+]] = atomicrmw xchg ptr %Target, [[iPTR]] %[[VALUE]] seq_cst, align {{4|8}} 220 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to ptr 221 // CHECK: ret ptr %[[RESULT]] 222 // CHECK: } 223 224 #if defined(__arm__) || defined(__aarch64__) 225 void *test_InterlockedExchangePointer_acq(void * volatile *Target, void *Value) { 226 return _InterlockedExchangePointer_acq(Target, Value); 227 } 228 229 // CHECK-ARM-ARM64: define{{.*}}ptr @test_InterlockedExchangePointer_acq(ptr {{.*}}%Target, ptr {{[a-z_ ]*}}%Value){{.*}}{ 230 // CHECK-ARM-ARM64: %[[VALUE:[0-9]+]] = ptrtoint ptr %Value to [[iPTR:i[0-9]+]] 231 // CHECK-ARM-ARM64: %[[EXCHANGE:[0-9]+]] = atomicrmw xchg ptr %Target, [[iPTR]] %[[VALUE]] acquire, align {{4|8}} 232 // CHECK-ARM-ARM64: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to ptr 233 // CHECK-ARM-ARM64: ret ptr %[[RESULT]] 234 // CHECK-ARM-ARM64: } 235 236 void *test_InterlockedExchangePointer_nf(void * volatile *Target, void *Value) { 237 return _InterlockedExchangePointer_nf(Target, Value); 238 } 239 240 // CHECK-ARM-ARM64: define{{.*}}ptr @test_InterlockedExchangePointer_nf(ptr {{.*}}%Target, ptr {{[a-z_ ]*}}%Value){{.*}}{ 241 // CHECK-ARM-ARM64: %[[VALUE:[0-9]+]] = ptrtoint ptr %Value to [[iPTR]] 242 // CHECK-ARM-ARM64: %[[EXCHANGE:[0-9]+]] = atomicrmw xchg ptr %Target, [[iPTR]] %[[VALUE]] monotonic, align {{4|8}} 243 // CHECK-ARM-ARM64: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to ptr 244 // CHECK-ARM-ARM64: ret ptr %[[RESULT]] 245 // CHECK-ARM-ARM64: } 246 247 void *test_InterlockedExchangePointer_rel(void * volatile *Target, void *Value) { 248 return _InterlockedExchangePointer_rel(Target, Value); 249 } 250 251 // CHECK-ARM-ARM64: define{{.*}}ptr @test_InterlockedExchangePointer_rel(ptr {{.*}}%Target, ptr {{[a-z_ ]*}}%Value){{.*}}{ 252 // CHECK-ARM-ARM64: %[[VALUE:[0-9]+]] = ptrtoint ptr %Value to [[iPTR]] 253 // CHECK-ARM-ARM64: %[[EXCHANGE:[0-9]+]] = atomicrmw xchg ptr %Target, [[iPTR]] %[[VALUE]] release, align {{4|8}} 254 // CHECK-ARM-ARM64: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXCHANGE]] to ptr 255 // CHECK-ARM-ARM64: ret ptr %[[RESULT]] 256 // CHECK-ARM-ARM64: } 257 #endif 258 259 void *test_InterlockedCompareExchangePointer(void * volatile *Destination, 260 void *Exchange, void *Comparand) { 261 return _InterlockedCompareExchangePointer(Destination, Exchange, Comparand); 262 } 263 264 // CHECK: define{{.*}}ptr @test_InterlockedCompareExchangePointer(ptr {{.*}}%Destination, ptr {{[a-z_ ]*}}%Exchange, ptr {{[a-z_ ]*}}%Comparand){{.*}}{ 265 // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint ptr %Exchange to [[iPTR]] 266 // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint ptr %Comparand to [[iPTR]] 267 // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile ptr %[[DEST:.+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] seq_cst seq_cst, align {{4|8}} 268 // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0 269 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to ptr 270 // CHECK: ret ptr %[[RESULT:[0-9]+]] 271 // CHECK: } 272 273 void *test_InterlockedCompareExchangePointer_nf(void * volatile *Destination, 274 void *Exchange, void *Comparand) { 275 return _InterlockedCompareExchangePointer_nf(Destination, Exchange, Comparand); 276 } 277 278 // CHECK: define{{.*}}ptr @test_InterlockedCompareExchangePointer_nf(ptr {{.*}}%Destination, ptr {{[a-z_ ]*}}%Exchange, ptr {{[a-z_ ]*}}%Comparand){{.*}}{ 279 // CHECK: %[[EXCHANGE:[0-9]+]] = ptrtoint ptr %Exchange to [[iPTR]] 280 // CHECK: %[[COMPARAND:[0-9]+]] = ptrtoint ptr %Comparand to [[iPTR]] 281 // CHECK: %[[XCHG:[0-9]+]] = cmpxchg volatile ptr %[[DEST:.+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] monotonic monotonic, align {{4|8}} 282 // CHECK: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0 283 // CHECK: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to ptr 284 // CHECK: ret ptr %[[RESULT:[0-9]+]] 285 // CHECK: } 286 287 #if defined(__arm__) || defined(__aarch64__) 288 void *test_InterlockedCompareExchangePointer_acq(void * volatile *Destination, 289 void *Exchange, void *Comparand) { 290 return _InterlockedCompareExchangePointer_acq(Destination, Exchange, Comparand); 291 } 292 293 // CHECK-ARM-ARM64: define{{.*}}ptr @test_InterlockedCompareExchangePointer_acq(ptr {{.*}}%Destination, ptr {{[a-z_ ]*}}%Exchange, ptr {{[a-z_ ]*}}%Comparand){{.*}}{ 294 // CHECK-ARM-ARM64: %[[EXCHANGE:[0-9]+]] = ptrtoint ptr %Exchange to [[iPTR]] 295 // CHECK-ARM-ARM64: %[[COMPARAND:[0-9]+]] = ptrtoint ptr %Comparand to [[iPTR]] 296 // CHECK-ARM-ARM64: %[[XCHG:[0-9]+]] = cmpxchg volatile ptr %[[DEST:.+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] acquire acquire, align {{4|8}} 297 // CHECK-ARM-ARM64: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0 298 // CHECK-ARM-ARM64: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to ptr 299 // CHECK-ARM-ARM64: ret ptr %[[RESULT:[0-9]+]] 300 // CHECK-ARM-ARM64: } 301 302 303 void *test_InterlockedCompareExchangePointer_rel(void * volatile *Destination, 304 void *Exchange, void *Comparand) { 305 return _InterlockedCompareExchangePointer_rel(Destination, Exchange, Comparand); 306 } 307 308 // CHECK-ARM-ARM64: define{{.*}}ptr @test_InterlockedCompareExchangePointer_rel(ptr {{.*}}%Destination, ptr {{[a-z_ ]*}}%Exchange, ptr {{[a-z_ ]*}}%Comparand){{.*}}{ 309 // CHECK-ARM-ARM64: %[[EXCHANGE:[0-9]+]] = ptrtoint ptr %Exchange to [[iPTR]] 310 // CHECK-ARM-ARM64: %[[COMPARAND:[0-9]+]] = ptrtoint ptr %Comparand to [[iPTR]] 311 // CHECK-ARM-ARM64: %[[XCHG:[0-9]+]] = cmpxchg volatile ptr %[[DEST:.+]], [[iPTR]] %[[COMPARAND:[0-9]+]], [[iPTR]] %[[EXCHANGE:[0-9]+]] release monotonic, align {{4|8}} 312 // CHECK-ARM-ARM64: %[[EXTRACT:[0-9]+]] = extractvalue { [[iPTR]], i1 } %[[XCHG]], 0 313 // CHECK-ARM-ARM64: %[[RESULT:[0-9]+]] = inttoptr [[iPTR]] %[[EXTRACT]] to ptr 314 // CHECK-ARM-ARM64: ret ptr %[[RESULT:[0-9]+]] 315 // CHECK-ARM-ARM64: } 316 #endif 317 318 char test_InterlockedExchange8(char volatile *value, char mask) { 319 return _InterlockedExchange8(value, mask); 320 } 321 // CHECK: define{{.*}}i8 @test_InterlockedExchange8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 322 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask seq_cst, align 1 323 // CHECK: ret i8 [[RESULT:%[0-9]+]] 324 // CHECK: } 325 326 short test_InterlockedExchange16(short volatile *value, short mask) { 327 return _InterlockedExchange16(value, mask); 328 } 329 // CHECK: define{{.*}}i16 @test_InterlockedExchange16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 330 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask seq_cst, align 2 331 // CHECK: ret i16 [[RESULT:%[0-9]+]] 332 // CHECK: } 333 334 long test_InterlockedExchange(long volatile *value, long mask) { 335 return _InterlockedExchange(value, mask); 336 } 337 // CHECK: define{{.*}}i32 @test_InterlockedExchange(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 338 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask seq_cst, align 4 339 // CHECK: ret i32 [[RESULT:%[0-9]+]] 340 // CHECK: } 341 342 char test_InterlockedExchangeAdd8(char volatile *value, char mask) { 343 return _InterlockedExchangeAdd8(value, mask); 344 } 345 // CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 346 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask seq_cst, align 1 347 // CHECK: ret i8 [[RESULT:%[0-9]+]] 348 // CHECK: } 349 350 short test_InterlockedExchangeAdd16(short volatile *value, short mask) { 351 return _InterlockedExchangeAdd16(value, mask); 352 } 353 // CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 354 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask seq_cst, align 2 355 // CHECK: ret i16 [[RESULT:%[0-9]+]] 356 // CHECK: } 357 358 long test_InterlockedExchangeAdd(long volatile *value, long mask) { 359 return _InterlockedExchangeAdd(value, mask); 360 } 361 // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 362 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask seq_cst, align 4 363 // CHECK: ret i32 [[RESULT:%[0-9]+]] 364 // CHECK: } 365 366 char test_InterlockedExchangeSub8(char volatile *value, char mask) { 367 return _InterlockedExchangeSub8(value, mask); 368 } 369 // CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 370 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i8 %mask seq_cst, align 1 371 // CHECK: ret i8 [[RESULT:%[0-9]+]] 372 // CHECK: } 373 374 short test_InterlockedExchangeSub16(short volatile *value, short mask) { 375 return _InterlockedExchangeSub16(value, mask); 376 } 377 // CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 378 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i16 %mask seq_cst, align 2 379 // CHECK: ret i16 [[RESULT:%[0-9]+]] 380 // CHECK: } 381 382 long test_InterlockedExchangeSub(long volatile *value, long mask) { 383 return _InterlockedExchangeSub(value, mask); 384 } 385 // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 386 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i32 %mask seq_cst, align 4 387 // CHECK: ret i32 [[RESULT:%[0-9]+]] 388 // CHECK: } 389 390 char test_InterlockedOr8(char volatile *value, char mask) { 391 return _InterlockedOr8(value, mask); 392 } 393 // CHECK: define{{.*}}i8 @test_InterlockedOr8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 394 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask seq_cst, align 1 395 // CHECK: ret i8 [[RESULT:%[0-9]+]] 396 // CHECK: } 397 398 short test_InterlockedOr16(short volatile *value, short mask) { 399 return _InterlockedOr16(value, mask); 400 } 401 // CHECK: define{{.*}}i16 @test_InterlockedOr16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 402 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask seq_cst, align 2 403 // CHECK: ret i16 [[RESULT:%[0-9]+]] 404 // CHECK: } 405 406 long test_InterlockedOr(long volatile *value, long mask) { 407 return _InterlockedOr(value, mask); 408 } 409 // CHECK: define{{.*}}i32 @test_InterlockedOr(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 410 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask seq_cst, align 4 411 // CHECK: ret i32 [[RESULT:%[0-9]+]] 412 // CHECK: } 413 414 char test_InterlockedXor8(char volatile *value, char mask) { 415 return _InterlockedXor8(value, mask); 416 } 417 // CHECK: define{{.*}}i8 @test_InterlockedXor8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 418 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask seq_cst, align 1 419 // CHECK: ret i8 [[RESULT:%[0-9]+]] 420 // CHECK: } 421 422 short test_InterlockedXor16(short volatile *value, short mask) { 423 return _InterlockedXor16(value, mask); 424 } 425 // CHECK: define{{.*}}i16 @test_InterlockedXor16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 426 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask seq_cst, align 2 427 // CHECK: ret i16 [[RESULT:%[0-9]+]] 428 // CHECK: } 429 430 long test_InterlockedXor(long volatile *value, long mask) { 431 return _InterlockedXor(value, mask); 432 } 433 // CHECK: define{{.*}}i32 @test_InterlockedXor(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 434 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask seq_cst, align 4 435 // CHECK: ret i32 [[RESULT:%[0-9]+]] 436 // CHECK: } 437 438 char test_InterlockedAnd8(char volatile *value, char mask) { 439 return _InterlockedAnd8(value, mask); 440 } 441 // CHECK: define{{.*}}i8 @test_InterlockedAnd8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 442 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask seq_cst, align 1 443 // CHECK: ret i8 [[RESULT:%[0-9]+]] 444 // CHECK: } 445 446 short test_InterlockedAnd16(short volatile *value, short mask) { 447 return _InterlockedAnd16(value, mask); 448 } 449 // CHECK: define{{.*}}i16 @test_InterlockedAnd16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 450 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask seq_cst, align 2 451 // CHECK: ret i16 [[RESULT:%[0-9]+]] 452 // CHECK: } 453 454 long test_InterlockedAnd(long volatile *value, long mask) { 455 return _InterlockedAnd(value, mask); 456 } 457 // CHECK: define{{.*}}i32 @test_InterlockedAnd(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 458 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask seq_cst, align 4 459 // CHECK: ret i32 [[RESULT:%[0-9]+]] 460 // CHECK: } 461 462 char test_InterlockedCompareExchange8(char volatile *Destination, char Exchange, char Comperand) { 463 return _InterlockedCompareExchange8(Destination, Exchange, Comperand); 464 } 465 // CHECK: define{{.*}}i8 @test_InterlockedCompareExchange8(ptr{{.*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{ 466 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange seq_cst seq_cst, align 1 467 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0 468 // CHECK: ret i8 [[RESULT]] 469 // CHECK: } 470 471 short test_InterlockedCompareExchange16(short volatile *Destination, short Exchange, short Comperand) { 472 return _InterlockedCompareExchange16(Destination, Exchange, Comperand); 473 } 474 // CHECK: define{{.*}}i16 @test_InterlockedCompareExchange16(ptr{{.*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{ 475 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange seq_cst seq_cst, align 2 476 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0 477 // CHECK: ret i16 [[RESULT]] 478 // CHECK: } 479 480 long test_InterlockedCompareExchange(long volatile *Destination, long Exchange, long Comperand) { 481 return _InterlockedCompareExchange(Destination, Exchange, Comperand); 482 } 483 // CHECK: define{{.*}}i32 @test_InterlockedCompareExchange(ptr{{.*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ 484 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange seq_cst seq_cst, align 4 485 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 486 // CHECK: ret i32 [[RESULT]] 487 // CHECK: } 488 489 __int64 test_InterlockedCompareExchange64(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) { 490 return _InterlockedCompareExchange64(Destination, Exchange, Comperand); 491 } 492 // CHECK: define{{.*}}i64 @test_InterlockedCompareExchange64(ptr{{.*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{ 493 // CHECK: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange seq_cst seq_cst, align 8 494 // CHECK: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0 495 // CHECK: ret i64 [[RESULT]] 496 // CHECK: } 497 498 #if defined(__x86_64__) || defined(__aarch64__) 499 unsigned char test_InterlockedCompareExchange128( 500 __int64 volatile *Destination, __int64 ExchangeHigh, 501 __int64 ExchangeLow, __int64 *ComparandResult) { 502 return _InterlockedCompareExchange128(++Destination, ++ExchangeHigh, 503 ++ExchangeLow, ++ComparandResult); 504 } 505 // CHECK-64: define{{.*}}i8 @test_InterlockedCompareExchange128(ptr{{.*}}%Destination, i64{{[a-z_ ]*}}%ExchangeHigh, i64{{[a-z_ ]*}}%ExchangeLow, ptr{{.*}}%ComparandResult){{.*}}{ 506 // CHECK-64: %incdec.ptr = getelementptr inbounds nuw i8, ptr %Destination, i64 8 507 // CHECK-64: %inc = add nsw i64 %ExchangeHigh, 1 508 // CHECK-64: %inc1 = add nsw i64 %ExchangeLow, 1 509 // CHECK-64: %incdec.ptr2 = getelementptr inbounds nuw i8, ptr %ComparandResult, i64 8 510 // CHECK-64: [[EH:%[0-9]+]] = zext i64 %inc to i128 511 // CHECK-64: [[EL:%[0-9]+]] = zext i64 %inc1 to i128 512 // CHECK-64: [[EHS:%[0-9]+]] = shl nuw i128 [[EH]], 64 513 // CHECK-64: [[EXP:%[0-9]+]] = or disjoint i128 [[EHS]], [[EL]] 514 // CHECK-64: [[ORG:%[0-9]+]] = load i128, ptr %incdec.ptr2, align 8 515 // CHECK-64: [[RES:%[0-9]+]] = cmpxchg volatile ptr %incdec.ptr, i128 [[ORG]], i128 [[EXP]] seq_cst seq_cst, align 16 516 // CHECK-64: [[OLD:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 0 517 // CHECK-64: store i128 [[OLD]], ptr %incdec.ptr2, align 8 518 // CHECK-64: [[SUC1:%[0-9]+]] = extractvalue { i128, i1 } [[RES]], 1 519 // CHECK-64: [[SUC8:%[0-9]+]] = zext i1 [[SUC1]] to i8 520 // CHECK-64: ret i8 [[SUC8]] 521 // CHECK-64: } 522 #endif 523 524 #if defined(__aarch64__) 525 unsigned char test_InterlockedCompareExchange128_acq( 526 __int64 volatile *Destination, __int64 ExchangeHigh, 527 __int64 ExchangeLow, __int64 *ComparandResult) { 528 return _InterlockedCompareExchange128_acq(Destination, ExchangeHigh, 529 ExchangeLow, ComparandResult); 530 } 531 unsigned char test_InterlockedCompareExchange128_nf( 532 __int64 volatile *Destination, __int64 ExchangeHigh, 533 __int64 ExchangeLow, __int64 *ComparandResult) { 534 return _InterlockedCompareExchange128_nf(Destination, ExchangeHigh, 535 ExchangeLow, ComparandResult); 536 } 537 unsigned char test_InterlockedCompareExchange128_rel( 538 __int64 volatile *Destination, __int64 ExchangeHigh, 539 __int64 ExchangeLow, __int64 *ComparandResult) { 540 return _InterlockedCompareExchange128_rel(Destination, ExchangeHigh, 541 ExchangeLow, ComparandResult); 542 } 543 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_acq({{.*}}) 544 // CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} acquire acquire, align 16 545 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_nf({{.*}}) 546 // CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} monotonic monotonic, align 16 547 // CHECK-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange128_rel({{.*}}) 548 // CHECK-ARM64: cmpxchg volatile ptr %{{.*}}, i128 %{{.*}}, i128 %{{.*}} release monotonic, align 16 549 #endif 550 551 short test_InterlockedIncrement16(short volatile *Addend) { 552 return _InterlockedIncrement16(++Addend); 553 } 554 // CHECK: define{{.*}}i16 @test_InterlockedIncrement16(ptr{{.*}}%Addend){{.*}}{ 555 // CHECK: %incdec.ptr = getelementptr inbounds nuw i8, ptr %Addend, {{i64|i32}} 2 556 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %incdec.ptr, i16 1 seq_cst, align 2 557 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1 558 // CHECK: ret i16 [[RESULT]] 559 // CHECK: } 560 561 long test_InterlockedIncrement(long volatile *Addend) { 562 return _InterlockedIncrement(++Addend); 563 } 564 // CHECK: define{{.*}}i32 @test_InterlockedIncrement(ptr{{.*}}%Addend){{.*}}{ 565 // CHECK: %incdec.ptr = getelementptr inbounds nuw i8, ptr %Addend, {{i64|i32}} 4 566 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %incdec.ptr, i32 1 seq_cst, align 4 567 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 568 // CHECK: ret i32 [[RESULT]] 569 // CHECK: } 570 571 short test_InterlockedDecrement16(short volatile *Addend) { 572 return _InterlockedDecrement16(Addend); 573 } 574 // CHECK: define{{.*}}i16 @test_InterlockedDecrement16(ptr{{.*}}%Addend){{.*}}{ 575 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 seq_cst, align 2 576 // CHECK: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1 577 // CHECK: ret i16 [[RESULT]] 578 // CHECK: } 579 580 long test_InterlockedDecrement(long volatile *Addend) { 581 return _InterlockedDecrement(Addend); 582 } 583 // CHECK: define{{.*}}i32 @test_InterlockedDecrement(ptr{{.*}}%Addend){{.*}}{ 584 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 seq_cst, align 4 585 // CHECK: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 586 // CHECK: ret i32 [[RESULT]] 587 // CHECK: } 588 589 char test_iso_volatile_load8(char volatile *p) { return __iso_volatile_load8(p); } 590 short test_iso_volatile_load16(short volatile *p) { return __iso_volatile_load16(p); } 591 int test_iso_volatile_load32(int volatile *p) { return __iso_volatile_load32(p); } 592 __int64 test_iso_volatile_load64(__int64 volatile *p) { return __iso_volatile_load64(p); } 593 594 // CHECK: define{{.*}}i8 @test_iso_volatile_load8(ptr{{.*}}%p) 595 // CHECK: = load volatile i8, ptr %p 596 // CHECK: define{{.*}}i16 @test_iso_volatile_load16(ptr{{.*}}%p) 597 // CHECK: = load volatile i16, ptr %p 598 // CHECK: define{{.*}}i32 @test_iso_volatile_load32(ptr{{.*}}%p) 599 // CHECK: = load volatile i32, ptr %p 600 // CHECK: define{{.*}}i64 @test_iso_volatile_load64(ptr{{.*}}%p) 601 // CHECK: = load volatile i64, ptr %p 602 603 void test_iso_volatile_store8(char volatile *p, char v) { __iso_volatile_store8(p, v); } 604 void test_iso_volatile_store16(short volatile *p, short v) { __iso_volatile_store16(p, v); } 605 void test_iso_volatile_store32(int volatile *p, int v) { __iso_volatile_store32(p, v); } 606 void test_iso_volatile_store64(__int64 volatile *p, __int64 v) { __iso_volatile_store64(p, v); } 607 608 // CHECK: define{{.*}}void @test_iso_volatile_store8(ptr{{.*}}%p, i8 {{[a-z_ ]*}}%v) 609 // CHECK: store volatile i8 %v, ptr %p 610 // CHECK: define{{.*}}void @test_iso_volatile_store16(ptr{{.*}}%p, i16 {{[a-z_ ]*}}%v) 611 // CHECK: store volatile i16 %v, ptr %p 612 // CHECK: define{{.*}}void @test_iso_volatile_store32(ptr{{.*}}%p, i32 {{[a-z_ ]*}}%v) 613 // CHECK: store volatile i32 %v, ptr %p 614 // CHECK: define{{.*}}void @test_iso_volatile_store64(ptr{{.*}}%p, i64 {{[a-z_ ]*}}%v) 615 // CHECK: store volatile i64 %v, ptr %p 616 617 618 #if defined(__i386__) || defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) 619 __int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) { 620 return _InterlockedExchange64(value, mask); 621 } 622 // CHECK: define{{.*}}i64 @test_InterlockedExchange64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 623 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask seq_cst, align 8 624 // CHECK: ret i64 [[RESULT:%[0-9]+]] 625 // CHECK: } 626 627 __int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) { 628 return _InterlockedExchangeAdd64(value, mask); 629 } 630 // CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 631 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask seq_cst, align 8 632 // CHECK: ret i64 [[RESULT:%[0-9]+]] 633 // CHECK: } 634 635 __int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) { 636 return _InterlockedExchangeSub64(value, mask); 637 } 638 // CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 639 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i64 %mask seq_cst, align 8 640 // CHECK: ret i64 [[RESULT:%[0-9]+]] 641 // CHECK: } 642 643 __int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) { 644 return _InterlockedOr64(value, mask); 645 } 646 // CHECK: define{{.*}}i64 @test_InterlockedOr64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 647 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask seq_cst, align 8 648 // CHECK: ret i64 [[RESULT:%[0-9]+]] 649 // CHECK: } 650 651 __int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) { 652 return _InterlockedXor64(value, mask); 653 } 654 // CHECK: define{{.*}}i64 @test_InterlockedXor64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 655 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask seq_cst, align 8 656 // CHECK: ret i64 [[RESULT:%[0-9]+]] 657 // CHECK: } 658 659 __int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) { 660 return _InterlockedAnd64(value, mask); 661 } 662 // CHECK: define{{.*}}i64 @test_InterlockedAnd64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 663 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask seq_cst, align 8 664 // CHECK: ret i64 [[RESULT:%[0-9]+]] 665 // CHECK: } 666 667 __int64 test_InterlockedIncrement64(__int64 volatile *Addend) { 668 return _InterlockedIncrement64(Addend); 669 } 670 // CHECK: define{{.*}}i64 @test_InterlockedIncrement64(ptr{{.*}}%Addend){{.*}}{ 671 // CHECK: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 seq_cst, align 8 672 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 673 // CHECK: ret i64 [[RESULT]] 674 // CHECK: } 675 676 __int64 test_InterlockedDecrement64(__int64 volatile *Addend) { 677 return _InterlockedDecrement64(Addend); 678 } 679 // CHECK: define{{.*}}i64 @test_InterlockedDecrement64(ptr{{.*}}%Addend){{.*}}{ 680 // CHECK: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 seq_cst, align 8 681 // CHECK: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 682 // CHECK: ret i64 [[RESULT]] 683 // CHECK: } 684 685 #endif 686 687 #if defined(__i386__) || defined(__x86_64__) 688 long test_InterlockedExchange_HLEAcquire(long volatile *Target, long Value) { 689 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLEAcquire(ptr{{.*}}%Target, i32{{[a-z_ ]*}}%Value) 690 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Target, i32 %Value, ptr elementtype(i32) %Target) 691 return _InterlockedExchange_HLEAcquire(Target, Value); 692 } 693 long test_InterlockedExchange_HLERelease(long volatile *Target, long Value) { 694 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedExchange_HLERelease(ptr{{.*}}%Target, i32{{[a-z_ ]*}}%Value) 695 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Target, i32 %Value, ptr elementtype(i32) %Target) 696 return _InterlockedExchange_HLERelease(Target, Value); 697 } 698 long test_InterlockedCompareExchange_HLEAcquire(long volatile *Destination, 699 long Exchange, long Comparand) { 700 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLEAcquire(ptr{{.*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand) 701 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, ptr elementtype(i32) %Destination) 702 return _InterlockedCompareExchange_HLEAcquire(Destination, Exchange, Comparand); 703 } 704 long test_InterlockedCompareExchange_HLERelease(long volatile *Destination, 705 long Exchange, long Comparand) { 706 // CHECK-INTEL: define{{.*}} i32 @test_InterlockedCompareExchange_HLERelease(ptr{{.*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comparand) 707 // CHECK-INTEL: call i32 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i32) %Destination, i32 %Exchange, i32 %Comparand, ptr elementtype(i32) %Destination) 708 return _InterlockedCompareExchange_HLERelease(Destination, Exchange, Comparand); 709 } 710 #endif 711 #if defined(__x86_64__) 712 __int64 test_InterlockedExchange64_HLEAcquire(__int64 volatile *Target, __int64 Value) { 713 // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLEAcquire(ptr{{.*}}%Target, i64{{[a-z_ ]*}}%Value) 714 // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Target, i64 %Value, ptr elementtype(i64) %Target) 715 return _InterlockedExchange64_HLEAcquire(Target, Value); 716 } 717 __int64 test_InterlockedExchange64_HLERelease(__int64 volatile *Target, __int64 Value) { 718 // CHECK-X64: define{{.*}} i64 @test_InterlockedExchange64_HLERelease(ptr{{.*}}%Target, i64{{[a-z_ ]*}}%Value) 719 // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; xchg $($0, $1$|$1, $0$)", "=r,=*m,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Target, i64 %Value, ptr elementtype(i64) %Target) 720 return _InterlockedExchange64_HLERelease(Target, Value); 721 } 722 __int64 test_InterlockedCompareExchange64_HLEAcquire(__int64 volatile *Destination, 723 __int64 Exchange, __int64 Comparand) { 724 // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLEAcquire(ptr{{.*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand) 725 // CHECK-X64: call i64 asm sideeffect ".byte 0xf2 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, ptr elementtype(i64) %Destination) 726 return _InterlockedCompareExchange64_HLEAcquire(Destination, Exchange, Comparand); 727 } 728 __int64 test_InterlockedCompareExchange64_HLERelease(__int64 volatile *Destination, 729 __int64 Exchange, __int64 Comparand) { 730 // CHECK-X64: define{{.*}} i64 @test_InterlockedCompareExchange64_HLERelease(ptr{{.*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comparand) 731 // CHECK-X64: call i64 asm sideeffect ".byte 0xf3 ; lock ; cmpxchg $($2, $1$|$1, $2$)", "={ax},=*m,r,0,*m,~{memory},~{dirflag},~{fpsr},~{flags}"(ptr elementtype(i64) %Destination, i64 %Exchange, i64 %Comparand, ptr elementtype(i64) %Destination) 732 return _InterlockedCompareExchange64_HLERelease(Destination, Exchange, Comparand); 733 } 734 #endif 735 736 #if defined(__arm__) || defined(__aarch64__) 737 char test_InterlockedExchangeAdd8_acq(char volatile *value, char mask) { 738 return _InterlockedExchangeAdd8_acq(value, mask); 739 } 740 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 741 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask acquire, align 1 742 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 743 // CHECK-ARM-ARM64: } 744 char test_InterlockedExchangeAdd8_rel(char volatile *value, char mask) { 745 return _InterlockedExchangeAdd8_rel(value, mask); 746 } 747 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 748 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask release, align 1 749 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 750 // CHECK-ARM-ARM64: } 751 char test_InterlockedExchangeAdd8_nf(char volatile *value, char mask) { 752 return _InterlockedExchangeAdd8_nf(value, mask); 753 } 754 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 755 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask monotonic, align 1 756 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 757 // CHECK-ARM-ARM64: } 758 short test_InterlockedExchangeAdd16_acq(short volatile *value, short mask) { 759 return _InterlockedExchangeAdd16_acq(value, mask); 760 } 761 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 762 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask acquire, align 2 763 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 764 // CHECK-ARM-ARM64: } 765 short test_InterlockedExchangeAdd16_rel(short volatile *value, short mask) { 766 return _InterlockedExchangeAdd16_rel(value, mask); 767 } 768 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 769 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask release, align 2 770 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 771 // CHECK-ARM-ARM64: } 772 short test_InterlockedExchangeAdd16_nf(short volatile *value, short mask) { 773 return _InterlockedExchangeAdd16_nf(value, mask); 774 } 775 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 776 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask monotonic, align 2 777 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 778 // CHECK-ARM-ARM64: } 779 long test_InterlockedExchangeAdd_acq(long volatile *value, long mask) { 780 return _InterlockedExchangeAdd_acq(value, mask); 781 } 782 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 783 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask acquire, align 4 784 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 785 // CHECK-ARM-ARM64: } 786 long test_InterlockedExchangeAdd_rel(long volatile *value, long mask) { 787 return _InterlockedExchangeAdd_rel(value, mask); 788 } 789 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 790 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask release, align 4 791 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 792 // CHECK-ARM-ARM64: } 793 long test_InterlockedExchangeAdd_nf(long volatile *value, long mask) { 794 return _InterlockedExchangeAdd_nf(value, mask); 795 } 796 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 797 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask monotonic, align 4 798 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 799 // CHECK-ARM-ARM64: } 800 __int64 test_InterlockedExchangeAdd64_acq(__int64 volatile *value, __int64 mask) { 801 return _InterlockedExchangeAdd64_acq(value, mask); 802 } 803 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 804 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask acquire, align 8 805 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 806 // CHECK-ARM-ARM64: } 807 __int64 test_InterlockedExchangeAdd64_rel(__int64 volatile *value, __int64 mask) { 808 return _InterlockedExchangeAdd64_rel(value, mask); 809 } 810 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 811 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask release, align 8 812 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 813 // CHECK-ARM-ARM64: } 814 __int64 test_InterlockedExchangeAdd64_nf(__int64 volatile *value, __int64 mask) { 815 return _InterlockedExchangeAdd64_nf(value, mask); 816 } 817 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 818 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask monotonic, align 8 819 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 820 // CHECK-ARM-ARM64: } 821 822 char test_InterlockedExchange8_acq(char volatile *value, char mask) { 823 return _InterlockedExchange8_acq(value, mask); 824 } 825 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 826 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask acquire, align 1 827 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 828 // CHECK-ARM-ARM64: } 829 char test_InterlockedExchange8_rel(char volatile *value, char mask) { 830 return _InterlockedExchange8_rel(value, mask); 831 } 832 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 833 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask release, align 1 834 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 835 // CHECK-ARM-ARM64: } 836 char test_InterlockedExchange8_nf(char volatile *value, char mask) { 837 return _InterlockedExchange8_nf(value, mask); 838 } 839 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 840 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask monotonic, align 1 841 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 842 // CHECK-ARM-ARM64: } 843 short test_InterlockedExchange16_acq(short volatile *value, short mask) { 844 return _InterlockedExchange16_acq(value, mask); 845 } 846 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 847 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask acquire, align 2 848 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 849 // CHECK-ARM-ARM64: } 850 short test_InterlockedExchange16_rel(short volatile *value, short mask) { 851 return _InterlockedExchange16_rel(value, mask); 852 } 853 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 854 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask release, align 2 855 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 856 // CHECK-ARM-ARM64: } 857 short test_InterlockedExchange16_nf(short volatile *value, short mask) { 858 return _InterlockedExchange16_nf(value, mask); 859 } 860 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 861 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask monotonic, align 2 862 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 863 // CHECK-ARM-ARM64: } 864 long test_InterlockedExchange_acq(long volatile *value, long mask) { 865 return _InterlockedExchange_acq(value, mask); 866 } 867 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 868 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask acquire, align 4 869 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 870 // CHECK-ARM-ARM64: } 871 long test_InterlockedExchange_rel(long volatile *value, long mask) { 872 return _InterlockedExchange_rel(value, mask); 873 } 874 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 875 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask release, align 4 876 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 877 // CHECK-ARM-ARM64: } 878 long test_InterlockedExchange_nf(long volatile *value, long mask) { 879 return _InterlockedExchange_nf(value, mask); 880 } 881 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 882 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask monotonic, align 4 883 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 884 // CHECK-ARM-ARM64: } 885 __int64 test_InterlockedExchange64_acq(__int64 volatile *value, __int64 mask) { 886 return _InterlockedExchange64_acq(value, mask); 887 } 888 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 889 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask acquire, align 8 890 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 891 // CHECK-ARM-ARM64: } 892 __int64 test_InterlockedExchange64_rel(__int64 volatile *value, __int64 mask) { 893 return _InterlockedExchange64_rel(value, mask); 894 } 895 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 896 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask release, align 8 897 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 898 // CHECK-ARM-ARM64: } 899 __int64 test_InterlockedExchange64_nf(__int64 volatile *value, __int64 mask) { 900 return _InterlockedExchange64_nf(value, mask); 901 } 902 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 903 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask monotonic, align 8 904 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 905 // CHECK-ARM-ARM64: } 906 907 char test_InterlockedCompareExchange8_acq(char volatile *Destination, char Exchange, char Comperand) { 908 return _InterlockedCompareExchange8_acq(Destination, Exchange, Comperand); 909 } 910 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_acq(ptr{{.*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{ 911 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange acquire acquire, align 1 912 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0 913 // CHECK-ARM-ARM64: ret i8 [[RESULT]] 914 // CHECK-ARM-ARM64: } 915 916 char test_InterlockedCompareExchange8_rel(char volatile *Destination, char Exchange, char Comperand) { 917 return _InterlockedCompareExchange8_rel(Destination, Exchange, Comperand); 918 } 919 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_rel(ptr{{.*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{ 920 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange release monotonic, align 1 921 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0 922 // CHECK-ARM-ARM64: ret i8 [[RESULT]] 923 // CHECK-ARM-ARM64: } 924 925 char test_InterlockedCompareExchange8_nf(char volatile *Destination, char Exchange, char Comperand) { 926 return _InterlockedCompareExchange8_nf(Destination, Exchange, Comperand); 927 } 928 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedCompareExchange8_nf(ptr{{.*}}%Destination, i8{{[a-z_ ]*}}%Exchange, i8{{[a-z_ ]*}}%Comperand){{.*}}{ 929 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i8 %Comperand, i8 %Exchange monotonic monotonic, align 1 930 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i8, i1 } [[TMP]], 0 931 // CHECK-ARM-ARM64: ret i8 [[RESULT]] 932 // CHECK-ARM-ARM64: } 933 934 short test_InterlockedCompareExchange16_acq(short volatile *Destination, short Exchange, short Comperand) { 935 return _InterlockedCompareExchange16_acq(Destination, Exchange, Comperand); 936 } 937 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_acq(ptr{{.*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{ 938 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange acquire acquire, align 2 939 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0 940 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 941 // CHECK-ARM-ARM64: } 942 943 short test_InterlockedCompareExchange16_rel(short volatile *Destination, short Exchange, short Comperand) { 944 return _InterlockedCompareExchange16_rel(Destination, Exchange, Comperand); 945 } 946 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_rel(ptr{{.*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{ 947 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange release monotonic, align 2 948 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0 949 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 950 // CHECK-ARM-ARM64: } 951 952 short test_InterlockedCompareExchange16_nf(short volatile *Destination, short Exchange, short Comperand) { 953 return _InterlockedCompareExchange16_nf(Destination, Exchange, Comperand); 954 } 955 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedCompareExchange16_nf(ptr{{.*}}%Destination, i16{{[a-z_ ]*}}%Exchange, i16{{[a-z_ ]*}}%Comperand){{.*}}{ 956 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i16 %Comperand, i16 %Exchange monotonic monotonic, align 2 957 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i16, i1 } [[TMP]], 0 958 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 959 // CHECK-ARM-ARM64: } 960 961 long test_InterlockedCompareExchange_acq(long volatile *Destination, long Exchange, long Comperand) { 962 return _InterlockedCompareExchange_acq(Destination, Exchange, Comperand); 963 } 964 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_acq(ptr{{.*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ 965 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange acquire acquire, align 4 966 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 967 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 968 // CHECK-ARM-ARM64: } 969 970 long test_InterlockedCompareExchange_rel(long volatile *Destination, long Exchange, long Comperand) { 971 return _InterlockedCompareExchange_rel(Destination, Exchange, Comperand); 972 } 973 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_rel(ptr{{.*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ 974 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange release monotonic, align 4 975 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 976 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 977 // CHECK-ARM-ARM64: } 978 979 long test_InterlockedCompareExchange_nf(long volatile *Destination, long Exchange, long Comperand) { 980 return _InterlockedCompareExchange_nf(Destination, Exchange, Comperand); 981 } 982 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedCompareExchange_nf(ptr{{.*}}%Destination, i32{{[a-z_ ]*}}%Exchange, i32{{[a-z_ ]*}}%Comperand){{.*}}{ 983 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i32 %Comperand, i32 %Exchange monotonic monotonic, align 4 984 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i32, i1 } [[TMP]], 0 985 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 986 // CHECK-ARM-ARM64: } 987 988 __int64 test_InterlockedCompareExchange64_acq(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) { 989 return _InterlockedCompareExchange64_acq(Destination, Exchange, Comperand); 990 } 991 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_acq(ptr{{.*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{ 992 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange acquire acquire, align 8 993 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0 994 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 995 // CHECK-ARM-ARM64: } 996 997 __int64 test_InterlockedCompareExchange64_rel(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) { 998 return _InterlockedCompareExchange64_rel(Destination, Exchange, Comperand); 999 } 1000 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_rel(ptr{{.*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{ 1001 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange release monotonic, align 8 1002 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0 1003 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 1004 // CHECK-ARM-ARM64: } 1005 1006 __int64 test_InterlockedCompareExchange64_nf(__int64 volatile *Destination, __int64 Exchange, __int64 Comperand) { 1007 return _InterlockedCompareExchange64_nf(Destination, Exchange, Comperand); 1008 } 1009 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedCompareExchange64_nf(ptr{{.*}}%Destination, i64{{[a-z_ ]*}}%Exchange, i64{{[a-z_ ]*}}%Comperand){{.*}}{ 1010 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = cmpxchg volatile ptr %Destination, i64 %Comperand, i64 %Exchange monotonic monotonic, align 8 1011 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = extractvalue { i64, i1 } [[TMP]], 0 1012 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 1013 // CHECK-ARM-ARM64: } 1014 1015 char test_InterlockedOr8_acq(char volatile *value, char mask) { 1016 return _InterlockedOr8_acq(value, mask); 1017 } 1018 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1019 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask acquire, align 1 1020 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1021 // CHECK-ARM-ARM64: } 1022 1023 char test_InterlockedOr8_rel(char volatile *value, char mask) { 1024 return _InterlockedOr8_rel(value, mask); 1025 } 1026 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1027 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask release, align 1 1028 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1029 // CHECK-ARM-ARM64: } 1030 1031 char test_InterlockedOr8_nf(char volatile *value, char mask) { 1032 return _InterlockedOr8_nf(value, mask); 1033 } 1034 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1035 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask monotonic, align 1 1036 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1037 // CHECK-ARM-ARM64: } 1038 1039 short test_InterlockedOr16_acq(short volatile *value, short mask) { 1040 return _InterlockedOr16_acq(value, mask); 1041 } 1042 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1043 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask acquire, align 2 1044 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1045 // CHECK-ARM-ARM64: } 1046 1047 short test_InterlockedOr16_rel(short volatile *value, short mask) { 1048 return _InterlockedOr16_rel(value, mask); 1049 } 1050 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1051 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask release, align 2 1052 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1053 // CHECK-ARM-ARM64: } 1054 1055 short test_InterlockedOr16_nf(short volatile *value, short mask) { 1056 return _InterlockedOr16_nf(value, mask); 1057 } 1058 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1059 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask monotonic, align 2 1060 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1061 // CHECK-ARM-ARM64: } 1062 1063 long test_InterlockedOr_acq(long volatile *value, long mask) { 1064 return _InterlockedOr_acq(value, mask); 1065 } 1066 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1067 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask acquire, align 4 1068 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1069 // CHECK-ARM-ARM64: } 1070 1071 long test_InterlockedOr_rel(long volatile *value, long mask) { 1072 return _InterlockedOr_rel(value, mask); 1073 } 1074 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1075 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask release, align 4 1076 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1077 // CHECK-ARM-ARM64: } 1078 1079 long test_InterlockedOr_nf(long volatile *value, long mask) { 1080 return _InterlockedOr_nf(value, mask); 1081 } 1082 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1083 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask monotonic, align 4 1084 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1085 // CHECK-ARM-ARM64: } 1086 1087 __int64 test_InterlockedOr64_acq(__int64 volatile *value, __int64 mask) { 1088 return _InterlockedOr64_acq(value, mask); 1089 } 1090 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1091 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask acquire, align 8 1092 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1093 // CHECK-ARM-ARM64: } 1094 1095 __int64 test_InterlockedOr64_rel(__int64 volatile *value, __int64 mask) { 1096 return _InterlockedOr64_rel(value, mask); 1097 } 1098 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1099 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask release, align 8 1100 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1101 // CHECK-ARM-ARM64: } 1102 1103 __int64 test_InterlockedOr64_nf(__int64 volatile *value, __int64 mask) { 1104 return _InterlockedOr64_nf(value, mask); 1105 } 1106 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1107 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask monotonic, align 8 1108 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1109 // CHECK-ARM-ARM64: } 1110 1111 char test_InterlockedXor8_acq(char volatile *value, char mask) { 1112 return _InterlockedXor8_acq(value, mask); 1113 } 1114 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1115 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask acquire, align 1 1116 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1117 // CHECK-ARM-ARM64: } 1118 1119 char test_InterlockedXor8_rel(char volatile *value, char mask) { 1120 return _InterlockedXor8_rel(value, mask); 1121 } 1122 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1123 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask release, align 1 1124 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1125 // CHECK-ARM-ARM64: } 1126 1127 char test_InterlockedXor8_nf(char volatile *value, char mask) { 1128 return _InterlockedXor8_nf(value, mask); 1129 } 1130 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1131 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask monotonic, align 1 1132 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1133 // CHECK-ARM-ARM64: } 1134 1135 short test_InterlockedXor16_acq(short volatile *value, short mask) { 1136 return _InterlockedXor16_acq(value, mask); 1137 } 1138 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1139 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask acquire, align 2 1140 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1141 // CHECK-ARM-ARM64: } 1142 1143 short test_InterlockedXor16_rel(short volatile *value, short mask) { 1144 return _InterlockedXor16_rel(value, mask); 1145 } 1146 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1147 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask release, align 2 1148 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1149 // CHECK-ARM-ARM64: } 1150 1151 short test_InterlockedXor16_nf(short volatile *value, short mask) { 1152 return _InterlockedXor16_nf(value, mask); 1153 } 1154 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1155 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask monotonic, align 2 1156 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1157 // CHECK-ARM-ARM64: } 1158 1159 long test_InterlockedXor_acq(long volatile *value, long mask) { 1160 return _InterlockedXor_acq(value, mask); 1161 } 1162 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1163 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask acquire, align 4 1164 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1165 // CHECK-ARM-ARM64: } 1166 1167 long test_InterlockedXor_rel(long volatile *value, long mask) { 1168 return _InterlockedXor_rel(value, mask); 1169 } 1170 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1171 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask release, align 4 1172 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1173 // CHECK-ARM-ARM64: } 1174 1175 long test_InterlockedXor_nf(long volatile *value, long mask) { 1176 return _InterlockedXor_nf(value, mask); 1177 } 1178 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1179 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask monotonic, align 4 1180 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1181 // CHECK-ARM-ARM64: } 1182 1183 __int64 test_InterlockedXor64_acq(__int64 volatile *value, __int64 mask) { 1184 return _InterlockedXor64_acq(value, mask); 1185 } 1186 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1187 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask acquire, align 8 1188 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1189 // CHECK-ARM-ARM64: } 1190 1191 __int64 test_InterlockedXor64_rel(__int64 volatile *value, __int64 mask) { 1192 return _InterlockedXor64_rel(value, mask); 1193 } 1194 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1195 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask release, align 8 1196 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1197 // CHECK-ARM-ARM64: } 1198 1199 __int64 test_InterlockedXor64_nf(__int64 volatile *value, __int64 mask) { 1200 return _InterlockedXor64_nf(value, mask); 1201 } 1202 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1203 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask monotonic, align 8 1204 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1205 // CHECK-ARM-ARM64: } 1206 1207 char test_InterlockedAnd8_acq(char volatile *value, char mask) { 1208 return _InterlockedAnd8_acq(value, mask); 1209 } 1210 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1211 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask acquire, align 1 1212 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1213 // CHECK-ARM-ARM64: } 1214 1215 char test_InterlockedAnd8_rel(char volatile *value, char mask) { 1216 return _InterlockedAnd8_rel(value, mask); 1217 } 1218 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1219 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask release, align 1 1220 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1221 // CHECK-ARM-ARM64: } 1222 1223 char test_InterlockedAnd8_nf(char volatile *value, char mask) { 1224 return _InterlockedAnd8_nf(value, mask); 1225 } 1226 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{ 1227 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask monotonic, align 1 1228 // CHECK-ARM-ARM64: ret i8 [[RESULT:%[0-9]+]] 1229 // CHECK-ARM-ARM64: } 1230 1231 short test_InterlockedAnd16_acq(short volatile *value, short mask) { 1232 return _InterlockedAnd16_acq(value, mask); 1233 } 1234 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1235 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask acquire, align 2 1236 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1237 // CHECK-ARM-ARM64: } 1238 1239 short test_InterlockedAnd16_rel(short volatile *value, short mask) { 1240 return _InterlockedAnd16_rel(value, mask); 1241 } 1242 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1243 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask release, align 2 1244 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1245 // CHECK-ARM-ARM64: } 1246 1247 short test_InterlockedAnd16_nf(short volatile *value, short mask) { 1248 return _InterlockedAnd16_nf(value, mask); 1249 } 1250 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{ 1251 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask monotonic, align 2 1252 // CHECK-ARM-ARM64: ret i16 [[RESULT:%[0-9]+]] 1253 // CHECK-ARM-ARM64: } 1254 1255 long test_InterlockedAnd_acq(long volatile *value, long mask) { 1256 return _InterlockedAnd_acq(value, mask); 1257 } 1258 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1259 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask acquire, align 4 1260 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1261 // CHECK-ARM-ARM64: } 1262 1263 long test_InterlockedAnd_rel(long volatile *value, long mask) { 1264 return _InterlockedAnd_rel(value, mask); 1265 } 1266 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1267 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask release, align 4 1268 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1269 // CHECK-ARM-ARM64: } 1270 1271 long test_InterlockedAnd_nf(long volatile *value, long mask) { 1272 return _InterlockedAnd_nf(value, mask); 1273 } 1274 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{ 1275 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask monotonic, align 4 1276 // CHECK-ARM-ARM64: ret i32 [[RESULT:%[0-9]+]] 1277 // CHECK-ARM-ARM64: } 1278 1279 __int64 test_InterlockedAnd64_acq(__int64 volatile *value, __int64 mask) { 1280 return _InterlockedAnd64_acq(value, mask); 1281 } 1282 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1283 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask acquire, align 8 1284 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1285 // CHECK-ARM-ARM64: } 1286 1287 __int64 test_InterlockedAnd64_rel(__int64 volatile *value, __int64 mask) { 1288 return _InterlockedAnd64_rel(value, mask); 1289 } 1290 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1291 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask release, align 8 1292 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1293 // CHECK-ARM-ARM64: } 1294 1295 __int64 test_InterlockedAnd64_nf(__int64 volatile *value, __int64 mask) { 1296 return _InterlockedAnd64_nf(value, mask); 1297 } 1298 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{ 1299 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask monotonic, align 8 1300 // CHECK-ARM-ARM64: ret i64 [[RESULT:%[0-9]+]] 1301 // CHECK-ARM-ARM64: } 1302 1303 short test_InterlockedIncrement16_acq(short volatile *Addend) { 1304 return _InterlockedIncrement16_acq(Addend); 1305 } 1306 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_acq(ptr{{.*}}%Addend){{.*}}{ 1307 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 acquire, align 2 1308 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1 1309 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 1310 // CHECK-ARM-ARM64: } 1311 1312 short test_InterlockedIncrement16_rel(short volatile *Addend) { 1313 return _InterlockedIncrement16_rel(Addend); 1314 } 1315 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_rel(ptr{{.*}}%Addend){{.*}}{ 1316 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 release, align 2 1317 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1 1318 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 1319 // CHECK-ARM-ARM64: } 1320 1321 short test_InterlockedIncrement16_nf(short volatile *Addend) { 1322 return _InterlockedIncrement16_nf(Addend); 1323 } 1324 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedIncrement16_nf(ptr{{.*}}%Addend){{.*}}{ 1325 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i16 1 monotonic, align 2 1326 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], 1 1327 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 1328 // CHECK-ARM-ARM64: } 1329 1330 long test_InterlockedIncrement_acq(long volatile *Addend) { 1331 return _InterlockedIncrement_acq(Addend); 1332 } 1333 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_acq(ptr{{.*}}%Addend){{.*}}{ 1334 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 acquire, align 4 1335 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 1336 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 1337 // CHECK-ARM-ARM64: } 1338 1339 long test_InterlockedIncrement_rel(long volatile *Addend) { 1340 return _InterlockedIncrement_rel(Addend); 1341 } 1342 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_rel(ptr{{.*}}%Addend){{.*}}{ 1343 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 release, align 4 1344 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 1345 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 1346 // CHECK-ARM-ARM64: } 1347 1348 long test_InterlockedIncrement_nf(long volatile *Addend) { 1349 return _InterlockedIncrement_nf(Addend); 1350 } 1351 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedIncrement_nf(ptr{{.*}}%Addend){{.*}}{ 1352 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i32 1 monotonic, align 4 1353 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], 1 1354 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 1355 // CHECK-ARM-ARM64: } 1356 1357 __int64 test_InterlockedIncrement64_acq(__int64 volatile *Addend) { 1358 return _InterlockedIncrement64_acq(Addend); 1359 } 1360 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_acq(ptr{{.*}}%Addend){{.*}}{ 1361 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 acquire, align 8 1362 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 1363 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 1364 // CHECK-ARM-ARM64: } 1365 1366 __int64 test_InterlockedIncrement64_rel(__int64 volatile *Addend) { 1367 return _InterlockedIncrement64_rel(Addend); 1368 } 1369 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_rel(ptr{{.*}}%Addend){{.*}}{ 1370 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 release, align 8 1371 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 1372 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 1373 // CHECK-ARM-ARM64: } 1374 1375 __int64 test_InterlockedIncrement64_nf(__int64 volatile *Addend) { 1376 return _InterlockedIncrement64_nf(Addend); 1377 } 1378 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedIncrement64_nf(ptr{{.*}}%Addend){{.*}}{ 1379 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw add ptr %Addend, i64 1 monotonic, align 8 1380 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], 1 1381 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 1382 // CHECK-ARM-ARM64: } 1383 1384 short test_InterlockedDecrement16_acq(short volatile *Addend) { 1385 return _InterlockedDecrement16_acq(Addend); 1386 } 1387 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_acq(ptr{{.*}}%Addend){{.*}}{ 1388 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 acquire, align 2 1389 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1 1390 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 1391 // CHECK-ARM-ARM64: } 1392 1393 short test_InterlockedDecrement16_rel(short volatile *Addend) { 1394 return _InterlockedDecrement16_rel(Addend); 1395 } 1396 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_rel(ptr{{.*}}%Addend){{.*}}{ 1397 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 release, align 2 1398 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1 1399 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 1400 // CHECK-ARM-ARM64: } 1401 1402 short test_InterlockedDecrement16_nf(short volatile *Addend) { 1403 return _InterlockedDecrement16_nf(Addend); 1404 } 1405 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedDecrement16_nf(ptr{{.*}}%Addend){{.*}}{ 1406 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i16 1 monotonic, align 2 1407 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i16 [[TMP]], -1 1408 // CHECK-ARM-ARM64: ret i16 [[RESULT]] 1409 // CHECK-ARM-ARM64: } 1410 1411 long test_InterlockedDecrement_acq(long volatile *Addend) { 1412 return _InterlockedDecrement_acq(Addend); 1413 } 1414 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_acq(ptr{{.*}}%Addend){{.*}}{ 1415 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 acquire, align 4 1416 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 1417 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 1418 // CHECK-ARM-ARM64: } 1419 1420 long test_InterlockedDecrement_rel(long volatile *Addend) { 1421 return _InterlockedDecrement_rel(Addend); 1422 } 1423 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_rel(ptr{{.*}}%Addend){{.*}}{ 1424 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 release, align 4 1425 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 1426 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 1427 // CHECK-ARM-ARM64: } 1428 1429 long test_InterlockedDecrement_nf(long volatile *Addend) { 1430 return _InterlockedDecrement_nf(Addend); 1431 } 1432 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedDecrement_nf(ptr{{.*}}%Addend){{.*}}{ 1433 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i32 1 monotonic, align 4 1434 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i32 [[TMP]], -1 1435 // CHECK-ARM-ARM64: ret i32 [[RESULT]] 1436 // CHECK-ARM-ARM64: } 1437 1438 __int64 test_InterlockedDecrement64_acq(__int64 volatile *Addend) { 1439 return _InterlockedDecrement64_acq(Addend); 1440 } 1441 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_acq(ptr{{.*}}%Addend){{.*}}{ 1442 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 acquire, align 8 1443 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 1444 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 1445 // CHECK-ARM-ARM64: } 1446 1447 __int64 test_InterlockedDecrement64_rel(__int64 volatile *Addend) { 1448 return _InterlockedDecrement64_rel(Addend); 1449 } 1450 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_rel(ptr{{.*}}%Addend){{.*}}{ 1451 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 release, align 8 1452 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 1453 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 1454 // CHECK-ARM-ARM64: } 1455 1456 __int64 test_InterlockedDecrement64_nf(__int64 volatile *Addend) { 1457 return _InterlockedDecrement64_nf(Addend); 1458 } 1459 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedDecrement64_nf(ptr{{.*}}%Addend){{.*}}{ 1460 // CHECK-ARM-ARM64: [[TMP:%[0-9]+]] = atomicrmw sub ptr %Addend, i64 1 monotonic, align 8 1461 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = add i64 [[TMP]], -1 1462 // CHECK-ARM-ARM64: ret i64 [[RESULT]] 1463 // CHECK-ARM-ARM64: } 1464 #endif 1465 1466 void test__fastfail(void) { 1467 __fastfail(42); 1468 } 1469 // CHECK-LABEL: define{{.*}} void @test__fastfail() 1470 // CHECK-ARM: call void asm sideeffect "udf #251", "{r0}"(i32 42) #[[NORETURN:[0-9]+]] 1471 // CHECK-INTEL: call void asm sideeffect "int $$0x29", "{cx}"(i32 42) #[[NORETURN]] 1472 // CHECK-ARM64: call void asm sideeffect "brk #0xF003", "{w0}"(i32 42) #[[NORETURN:[0-9]+]] 1473 1474 // Attributes come last. 1475 1476 // CHECK: attributes #[[NORETURN]] = { noreturn{{.*}} } 1477