Lines Matching defs:mask
318 char test_InterlockedExchange8(char volatile *value, char mask) {
319 return _InterlockedExchange8(value, mask);
321 // CHECK: define{{.*}}i8 @test_InterlockedExchange8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
322 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask seq_cst, align 1
326 short test_InterlockedExchange16(short volatile *value, short mask) {
327 return _InterlockedExchange16(value, mask);
329 // CHECK: define{{.*}}i16 @test_InterlockedExchange16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
330 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask seq_cst, align 2
334 long test_InterlockedExchange(long volatile *value, long mask) {
335 return _InterlockedExchange(value, mask);
337 // CHECK: define{{.*}}i32 @test_InterlockedExchange(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
338 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask seq_cst, align 4
342 char test_InterlockedExchangeAdd8(char volatile *value, char mask) {
343 return _InterlockedExchangeAdd8(value, mask);
345 // CHECK: define{{.*}}i8 @test_InterlockedExchangeAdd8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
346 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask seq_cst, align 1
350 short test_InterlockedExchangeAdd16(short volatile *value, short mask) {
351 return _InterlockedExchangeAdd16(value, mask);
353 // CHECK: define{{.*}}i16 @test_InterlockedExchangeAdd16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
354 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask seq_cst, align 2
358 long test_InterlockedExchangeAdd(long volatile *value, long mask) {
359 return _InterlockedExchangeAdd(value, mask);
361 // CHECK: define{{.*}}i32 @test_InterlockedExchangeAdd(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
362 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask seq_cst, align 4
366 char test_InterlockedExchangeSub8(char volatile *value, char mask) {
367 return _InterlockedExchangeSub8(value, mask);
369 // CHECK: define{{.*}}i8 @test_InterlockedExchangeSub8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
370 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i8 %mask seq_cst, align 1
374 short test_InterlockedExchangeSub16(short volatile *value, short mask) {
375 return _InterlockedExchangeSub16(value, mask);
377 // CHECK: define{{.*}}i16 @test_InterlockedExchangeSub16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
378 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i16 %mask seq_cst, align 2
382 long test_InterlockedExchangeSub(long volatile *value, long mask) {
383 return _InterlockedExchangeSub(value, mask);
385 // CHECK: define{{.*}}i32 @test_InterlockedExchangeSub(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
386 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i32 %mask seq_cst, align 4
390 char test_InterlockedOr8(char volatile *value, char mask) {
391 return _InterlockedOr8(value, mask);
393 // CHECK: define{{.*}}i8 @test_InterlockedOr8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
394 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask seq_cst, align 1
398 short test_InterlockedOr16(short volatile *value, short mask) {
399 return _InterlockedOr16(value, mask);
401 // CHECK: define{{.*}}i16 @test_InterlockedOr16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
402 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask seq_cst, align 2
406 long test_InterlockedOr(long volatile *value, long mask) {
407 return _InterlockedOr(value, mask);
409 // CHECK: define{{.*}}i32 @test_InterlockedOr(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
410 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask seq_cst, align 4
414 char test_InterlockedXor8(char volatile *value, char mask) {
415 return _InterlockedXor8(value, mask);
417 // CHECK: define{{.*}}i8 @test_InterlockedXor8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
418 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask seq_cst, align 1
422 short test_InterlockedXor16(short volatile *value, short mask) {
423 return _InterlockedXor16(value, mask);
425 // CHECK: define{{.*}}i16 @test_InterlockedXor16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
426 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask seq_cst, align 2
430 long test_InterlockedXor(long volatile *value, long mask) {
431 return _InterlockedXor(value, mask);
433 // CHECK: define{{.*}}i32 @test_InterlockedXor(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
434 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask seq_cst, align 4
438 char test_InterlockedAnd8(char volatile *value, char mask) {
439 return _InterlockedAnd8(value, mask);
441 // CHECK: define{{.*}}i8 @test_InterlockedAnd8(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
442 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask seq_cst, align 1
446 short test_InterlockedAnd16(short volatile *value, short mask) {
447 return _InterlockedAnd16(value, mask);
449 // CHECK: define{{.*}}i16 @test_InterlockedAnd16(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
450 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask seq_cst, align 2
454 long test_InterlockedAnd(long volatile *value, long mask) {
455 return _InterlockedAnd(value, mask);
457 // CHECK: define{{.*}}i32 @test_InterlockedAnd(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
458 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask seq_cst, align 4
619 __int64 test_InterlockedExchange64(__int64 volatile *value, __int64 mask) {
620 return _InterlockedExchange64(value, mask);
622 // CHECK: define{{.*}}i64 @test_InterlockedExchange64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
623 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask seq_cst, align 8
627 __int64 test_InterlockedExchangeAdd64(__int64 volatile *value, __int64 mask) {
628 return _InterlockedExchangeAdd64(value, mask);
630 // CHECK: define{{.*}}i64 @test_InterlockedExchangeAdd64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
631 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask seq_cst, align 8
635 __int64 test_InterlockedExchangeSub64(__int64 volatile *value, __int64 mask) {
636 return _InterlockedExchangeSub64(value, mask);
638 // CHECK: define{{.*}}i64 @test_InterlockedExchangeSub64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
639 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw sub ptr %value, i64 %mask seq_cst, align 8
643 __int64 test_InterlockedOr64(__int64 volatile *value, __int64 mask) {
644 return _InterlockedOr64(value, mask);
646 // CHECK: define{{.*}}i64 @test_InterlockedOr64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
647 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask seq_cst, align 8
651 __int64 test_InterlockedXor64(__int64 volatile *value, __int64 mask) {
652 return _InterlockedXor64(value, mask);
654 // CHECK: define{{.*}}i64 @test_InterlockedXor64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
655 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask seq_cst, align 8
659 __int64 test_InterlockedAnd64(__int64 volatile *value, __int64 mask) {
660 return _InterlockedAnd64(value, mask);
662 // CHECK: define{{.*}}i64 @test_InterlockedAnd64(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
663 // CHECK: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask seq_cst, align 8
737 char test_InterlockedExchangeAdd8_acq(char volatile *value, char mask) {
738 return _InterlockedExchangeAdd8_acq(value, mask);
740 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
741 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask acquire, align 1
744 char test_InterlockedExchangeAdd8_rel(char volatile *value, char mask) {
745 return _InterlockedExchangeAdd8_rel(value, mask);
747 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
748 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask release, align 1
751 char test_InterlockedExchangeAdd8_nf(char volatile *value, char mask) {
752 return _InterlockedExchangeAdd8_nf(value, mask);
754 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchangeAdd8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
755 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i8 %mask monotonic, align 1
758 short test_InterlockedExchangeAdd16_acq(short volatile *value, short mask) {
759 return _InterlockedExchangeAdd16_acq(value, mask);
761 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
762 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask acquire, align 2
765 short test_InterlockedExchangeAdd16_rel(short volatile *value, short mask) {
766 return _InterlockedExchangeAdd16_rel(value, mask);
768 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
769 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask release, align 2
772 short test_InterlockedExchangeAdd16_nf(short volatile *value, short mask) {
773 return _InterlockedExchangeAdd16_nf(value, mask);
775 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchangeAdd16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
776 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i16 %mask monotonic, align 2
779 long test_InterlockedExchangeAdd_acq(long volatile *value, long mask) {
780 return _InterlockedExchangeAdd_acq(value, mask);
782 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
783 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask acquire, align 4
786 long test_InterlockedExchangeAdd_rel(long volatile *value, long mask) {
787 return _InterlockedExchangeAdd_rel(value, mask);
789 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
790 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask release, align 4
793 long test_InterlockedExchangeAdd_nf(long volatile *value, long mask) {
794 return _InterlockedExchangeAdd_nf(value, mask);
796 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchangeAdd_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
797 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i32 %mask monotonic, align 4
800 __int64 test_InterlockedExchangeAdd64_acq(__int64 volatile *value, __int64 mask) {
801 return _InterlockedExchangeAdd64_acq(value, mask);
803 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
804 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask acquire, align 8
807 __int64 test_InterlockedExchangeAdd64_rel(__int64 volatile *value, __int64 mask) {
808 return _InterlockedExchangeAdd64_rel(value, mask);
810 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
811 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask release, align 8
814 __int64 test_InterlockedExchangeAdd64_nf(__int64 volatile *value, __int64 mask) {
815 return _InterlockedExchangeAdd64_nf(value, mask);
817 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchangeAdd64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
818 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw add ptr %value, i64 %mask monotonic, align 8
822 char test_InterlockedExchange8_acq(char volatile *value, char mask) {
823 return _InterlockedExchange8_acq(value, mask);
825 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
826 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask acquire, align 1
829 char test_InterlockedExchange8_rel(char volatile *value, char mask) {
830 return _InterlockedExchange8_rel(value, mask);
832 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
833 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask release, align 1
836 char test_InterlockedExchange8_nf(char volatile *value, char mask) {
837 return _InterlockedExchange8_nf(value, mask);
839 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedExchange8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
840 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i8 %mask monotonic, align 1
843 short test_InterlockedExchange16_acq(short volatile *value, short mask) {
844 return _InterlockedExchange16_acq(value, mask);
846 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
847 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask acquire, align 2
850 short test_InterlockedExchange16_rel(short volatile *value, short mask) {
851 return _InterlockedExchange16_rel(value, mask);
853 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
854 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask release, align 2
857 short test_InterlockedExchange16_nf(short volatile *value, short mask) {
858 return _InterlockedExchange16_nf(value, mask);
860 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedExchange16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
861 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i16 %mask monotonic, align 2
864 long test_InterlockedExchange_acq(long volatile *value, long mask) {
865 return _InterlockedExchange_acq(value, mask);
867 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
868 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask acquire, align 4
871 long test_InterlockedExchange_rel(long volatile *value, long mask) {
872 return _InterlockedExchange_rel(value, mask);
874 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
875 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask release, align 4
878 long test_InterlockedExchange_nf(long volatile *value, long mask) {
879 return _InterlockedExchange_nf(value, mask);
881 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedExchange_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
882 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i32 %mask monotonic, align 4
885 __int64 test_InterlockedExchange64_acq(__int64 volatile *value, __int64 mask) {
886 return _InterlockedExchange64_acq(value, mask);
888 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
889 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask acquire, align 8
892 __int64 test_InterlockedExchange64_rel(__int64 volatile *value, __int64 mask) {
893 return _InterlockedExchange64_rel(value, mask);
895 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
896 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask release, align 8
899 __int64 test_InterlockedExchange64_nf(__int64 volatile *value, __int64 mask) {
900 return _InterlockedExchange64_nf(value, mask);
902 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedExchange64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
903 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xchg ptr %value, i64 %mask monotonic, align 8
1015 char test_InterlockedOr8_acq(char volatile *value, char mask) {
1016 return _InterlockedOr8_acq(value, mask);
1018 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1019 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask acquire, align 1
1023 char test_InterlockedOr8_rel(char volatile *value, char mask) {
1024 return _InterlockedOr8_rel(value, mask);
1026 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1027 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask release, align 1
1031 char test_InterlockedOr8_nf(char volatile *value, char mask) {
1032 return _InterlockedOr8_nf(value, mask);
1034 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedOr8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1035 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i8 %mask monotonic, align 1
1039 short test_InterlockedOr16_acq(short volatile *value, short mask) {
1040 return _InterlockedOr16_acq(value, mask);
1042 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1043 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask acquire, align 2
1047 short test_InterlockedOr16_rel(short volatile *value, short mask) {
1048 return _InterlockedOr16_rel(value, mask);
1050 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1051 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask release, align 2
1055 short test_InterlockedOr16_nf(short volatile *value, short mask) {
1056 return _InterlockedOr16_nf(value, mask);
1058 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedOr16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1059 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i16 %mask monotonic, align 2
1063 long test_InterlockedOr_acq(long volatile *value, long mask) {
1064 return _InterlockedOr_acq(value, mask);
1066 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1067 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask acquire, align 4
1071 long test_InterlockedOr_rel(long volatile *value, long mask) {
1072 return _InterlockedOr_rel(value, mask);
1074 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1075 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask release, align 4
1079 long test_InterlockedOr_nf(long volatile *value, long mask) {
1080 return _InterlockedOr_nf(value, mask);
1082 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedOr_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1083 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i32 %mask monotonic, align 4
1087 __int64 test_InterlockedOr64_acq(__int64 volatile *value, __int64 mask) {
1088 return _InterlockedOr64_acq(value, mask);
1090 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1091 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask acquire, align 8
1095 __int64 test_InterlockedOr64_rel(__int64 volatile *value, __int64 mask) {
1096 return _InterlockedOr64_rel(value, mask);
1098 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1099 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask release, align 8
1103 __int64 test_InterlockedOr64_nf(__int64 volatile *value, __int64 mask) {
1104 return _InterlockedOr64_nf(value, mask);
1106 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedOr64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1107 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw or ptr %value, i64 %mask monotonic, align 8
1111 char test_InterlockedXor8_acq(char volatile *value, char mask) {
1112 return _InterlockedXor8_acq(value, mask);
1114 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1115 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask acquire, align 1
1119 char test_InterlockedXor8_rel(char volatile *value, char mask) {
1120 return _InterlockedXor8_rel(value, mask);
1122 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1123 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask release, align 1
1127 char test_InterlockedXor8_nf(char volatile *value, char mask) {
1128 return _InterlockedXor8_nf(value, mask);
1130 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedXor8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1131 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i8 %mask monotonic, align 1
1135 short test_InterlockedXor16_acq(short volatile *value, short mask) {
1136 return _InterlockedXor16_acq(value, mask);
1138 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1139 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask acquire, align 2
1143 short test_InterlockedXor16_rel(short volatile *value, short mask) {
1144 return _InterlockedXor16_rel(value, mask);
1146 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1147 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask release, align 2
1151 short test_InterlockedXor16_nf(short volatile *value, short mask) {
1152 return _InterlockedXor16_nf(value, mask);
1154 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedXor16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1155 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i16 %mask monotonic, align 2
1159 long test_InterlockedXor_acq(long volatile *value, long mask) {
1160 return _InterlockedXor_acq(value, mask);
1162 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1163 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask acquire, align 4
1167 long test_InterlockedXor_rel(long volatile *value, long mask) {
1168 return _InterlockedXor_rel(value, mask);
1170 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1171 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask release, align 4
1175 long test_InterlockedXor_nf(long volatile *value, long mask) {
1176 return _InterlockedXor_nf(value, mask);
1178 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedXor_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1179 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i32 %mask monotonic, align 4
1183 __int64 test_InterlockedXor64_acq(__int64 volatile *value, __int64 mask) {
1184 return _InterlockedXor64_acq(value, mask);
1186 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1187 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask acquire, align 8
1191 __int64 test_InterlockedXor64_rel(__int64 volatile *value, __int64 mask) {
1192 return _InterlockedXor64_rel(value, mask);
1194 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1195 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask release, align 8
1199 __int64 test_InterlockedXor64_nf(__int64 volatile *value, __int64 mask) {
1200 return _InterlockedXor64_nf(value, mask);
1202 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedXor64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1203 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw xor ptr %value, i64 %mask monotonic, align 8
1207 char test_InterlockedAnd8_acq(char volatile *value, char mask) {
1208 return _InterlockedAnd8_acq(value, mask);
1210 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_acq(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1211 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask acquire, align 1
1215 char test_InterlockedAnd8_rel(char volatile *value, char mask) {
1216 return _InterlockedAnd8_rel(value, mask);
1218 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_rel(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1219 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask release, align 1
1223 char test_InterlockedAnd8_nf(char volatile *value, char mask) {
1224 return _InterlockedAnd8_nf(value, mask);
1226 // CHECK-ARM-ARM64: define{{.*}}i8 @test_InterlockedAnd8_nf(ptr{{.*}}%value, i8{{[a-z_ ]*}}%mask){{.*}}{
1227 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i8 %mask monotonic, align 1
1231 short test_InterlockedAnd16_acq(short volatile *value, short mask) {
1232 return _InterlockedAnd16_acq(value, mask);
1234 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_acq(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1235 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask acquire, align 2
1239 short test_InterlockedAnd16_rel(short volatile *value, short mask) {
1240 return _InterlockedAnd16_rel(value, mask);
1242 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_rel(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1243 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask release, align 2
1247 short test_InterlockedAnd16_nf(short volatile *value, short mask) {
1248 return _InterlockedAnd16_nf(value, mask);
1250 // CHECK-ARM-ARM64: define{{.*}}i16 @test_InterlockedAnd16_nf(ptr{{.*}}%value, i16{{[a-z_ ]*}}%mask){{.*}}{
1251 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i16 %mask monotonic, align 2
1255 long test_InterlockedAnd_acq(long volatile *value, long mask) {
1256 return _InterlockedAnd_acq(value, mask);
1258 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_acq(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1259 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask acquire, align 4
1263 long test_InterlockedAnd_rel(long volatile *value, long mask) {
1264 return _InterlockedAnd_rel(value, mask);
1266 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_rel(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1267 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask release, align 4
1271 long test_InterlockedAnd_nf(long volatile *value, long mask) {
1272 return _InterlockedAnd_nf(value, mask);
1274 // CHECK-ARM-ARM64: define{{.*}}i32 @test_InterlockedAnd_nf(ptr{{.*}}%value, i32{{[a-z_ ]*}}%mask){{.*}}{
1275 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i32 %mask monotonic, align 4
1279 __int64 test_InterlockedAnd64_acq(__int64 volatile *value, __int64 mask) {
1280 return _InterlockedAnd64_acq(value, mask);
1282 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_acq(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1283 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask acquire, align 8
1287 __int64 test_InterlockedAnd64_rel(__int64 volatile *value, __int64 mask) {
1288 return _InterlockedAnd64_rel(value, mask);
1290 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_rel(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1291 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask release, align 8
1295 __int64 test_InterlockedAnd64_nf(__int64 volatile *value, __int64 mask) {
1296 return _InterlockedAnd64_nf(value, mask);
1298 // CHECK-ARM-ARM64: define{{.*}}i64 @test_InterlockedAnd64_nf(ptr{{.*}}%value, i64{{[a-z_ ]*}}%mask){{.*}}{
1299 // CHECK-ARM-ARM64: [[RESULT:%[0-9]+]] = atomicrmw and ptr %value, i64 %mask monotonic, align 8