1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py 2 // RUN: %clang_cc1 -triple aarch64-linux-gnu -target-feature +neon \ 3 // RUN: -target-feature +sha3 -emit-llvm -o - %s \ 4 // RUN: | FileCheck %s 5 6 // REQUIRES: aarch64-registered-target || arm-registered-target 7 8 #include <arm_neon.h> 9 10 // CHECK-LABEL: @test_vsha512h( 11 // CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512h 12 // 13 void test_vsha512h(uint64x2_t hash_ed, uint64x2_t hash_gf, uint64x2_t kwh_kwh2) { 14 uint64x2_t result = vsha512hq_u64(hash_ed, hash_gf, kwh_kwh2); 15 } 16 17 // CHECK-LABEL: @test_vsha512h2( 18 // CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512h2 19 // 20 void test_vsha512h2(uint64x2_t sum_ab, uint64x2_t hash_c_, uint64x2_t hash_ab) { 21 uint64x2_t result = vsha512h2q_u64(sum_ab, hash_c_, hash_ab); 22 } 23 24 // CHECK-LABEL: @test_vsha512su0( 25 // CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512su0 26 // 27 void test_vsha512su0(uint64x2_t w0_1, uint64x2_t w2_) { 28 uint64x2_t result = vsha512su0q_u64(w0_1, w2_); 29 } 30 31 // CHECK-LABEL: @test_vsha512su1( 32 // CHECK: call <2 x i64> @llvm.aarch64.crypto.sha512su1 33 // 34 void test_vsha512su1(uint64x2_t s01_s02, uint64x2_t w14_15, uint64x2_t w9_10) { 35 uint64x2_t result = vsha512su1q_u64(s01_s02, w14_15, w9_10); 36 } 37 38 // CHECK-LABEL: @test_vrax1( 39 // CHECK: call <2 x i64> @llvm.aarch64.crypto.rax1 40 // 41 void test_vrax1(uint64x2_t a, uint64x2_t b) { 42 uint64x2_t result = vrax1q_u64(a, b); 43 } 44 45 46 // CHECK-LABEL: @test_xar( 47 // CHECK: call <2 x i64> @llvm.aarch64.crypto.xar 48 // 49 void test_xar(uint64x2_t a, uint64x2_t b) { 50 uint64x2_t result = vxarq_u64(a, b, 10); 51 } 52 53 54 // CHECK-LABEL: @test_vbcax_u8( 55 // CHECK: call <16 x i8> @llvm.aarch64.crypto.bcaxu.v16i8 56 // 57 void test_vbcax_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) { 58 uint8x16_t result = vbcaxq_u8(a, b, c); 59 } 60 61 // CHECK-LABEL: @test_vbcax_u16( 62 // CHECK: call <8 x i16> @llvm.aarch64.crypto.bcaxu.v8i16 63 // 64 void test_vbcax_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) { 65 uint16x8_t result = vbcaxq_u16(a, b, c); 66 } 67 68 // CHECK-LABEL: @test_vbcax_u32( 69 // CHECK: call <4 x i32> @llvm.aarch64.crypto.bcaxu.v4i32 70 // 71 void test_vbcax_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) { 72 uint32x4_t result = vbcaxq_u32(a, b, c); 73 } 74 75 // CHECK-LABEL: @test_vbcax_u64( 76 // CHECK: call <2 x i64> @llvm.aarch64.crypto.bcaxu.v2i64 77 // 78 void test_vbcax_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) { 79 uint64x2_t result = vbcaxq_u64(a, b, c); 80 } 81 82 // CHECK-LABEL: @test_vbcax_s8( 83 // CHECK: call <16 x i8> @llvm.aarch64.crypto.bcaxs.v16i8 84 // 85 void test_vbcax_s8(int8x16_t a, int8x16_t b, int8x16_t c) { 86 int8x16_t result = vbcaxq_s8(a, b, c); 87 } 88 89 // CHECK-LABEL: @test_vbcax_s16( 90 // CHECK: call <8 x i16> @llvm.aarch64.crypto.bcaxs.v8i16 91 // 92 void test_vbcax_s16(int16x8_t a, int16x8_t b, int16x8_t c) { 93 int16x8_t result = vbcaxq_s16(a, b, c); 94 } 95 96 // CHECK-LABEL: @test_vbcax_s32( 97 // CHECK: call <4 x i32> @llvm.aarch64.crypto.bcaxs.v4i32 98 // 99 void test_vbcax_s32(int32x4_t a, int32x4_t b, int32x4_t c) { 100 int32x4_t result = vbcaxq_s32(a, b, c); 101 } 102 103 // CHECK-LABEL: @test_vbcax_s64( 104 // CHECK: call <2 x i64> @llvm.aarch64.crypto.bcaxs.v2i64 105 // 106 void test_vbcax_s64(int64x2_t a, int64x2_t b, int64x2_t c) { 107 int64x2_t result = vbcaxq_s64(a, b, c); 108 } 109 110 // CHECK-LABEL: @test_veor3_u8( 111 // CHECK: call <16 x i8> @llvm.aarch64.crypto.eor3u.v16i8 112 // 113 void test_veor3_u8(uint8x16_t a, uint8x16_t b, uint8x16_t c) { 114 uint8x16_t result = veor3q_u8(a, b, c); 115 } 116 117 // CHECK-LABEL: @test_veor3_u16( 118 // CHECK: call <8 x i16> @llvm.aarch64.crypto.eor3u.v8i16 119 // 120 void test_veor3_u16(uint16x8_t a, uint16x8_t b, uint16x8_t c) { 121 uint16x8_t result = veor3q_u16(a, b, c); 122 } 123 124 // CHECK-LABEL: @test_veor3_u32( 125 // CHECK: call <4 x i32> @llvm.aarch64.crypto.eor3u.v4i32 126 // 127 void test_veor3_u32(uint32x4_t a, uint32x4_t b, uint32x4_t c) { 128 uint32x4_t result = veor3q_u32(a, b, c); 129 } 130 131 // CHECK-LABEL: @test_veor3_u64( 132 // CHECK: call <2 x i64> @llvm.aarch64.crypto.eor3u.v2i64 133 // 134 void test_veor3_u64(uint64x2_t a, uint64x2_t b, uint64x2_t c) { 135 uint64x2_t result = veor3q_u64(a, b, c); 136 } 137 138 // CHECK-LABEL: @test_veor3_s8( 139 // CHECK: call <16 x i8> @llvm.aarch64.crypto.eor3s.v16i8 140 // 141 void test_veor3_s8(int8x16_t a, int8x16_t b, int8x16_t c) { 142 int8x16_t result = veor3q_s8(a, b, c); 143 } 144 145 // CHECK-LABEL: @test_veor3_s16( 146 // CHECK: call <8 x i16> @llvm.aarch64.crypto.eor3s.v8i16 147 // 148 void test_veor3_s16(int16x8_t a, int16x8_t b, int16x8_t c) { 149 int16x8_t result = veor3q_s16(a, b, c); 150 } 151 152 // CHECK-LABEL: @test_veor3_s32( 153 // CHECK: call <4 x i32> @llvm.aarch64.crypto.eor3s.v4i32 154 // 155 void test_veor3_s32(int32x4_t a, int32x4_t b, int32x4_t c) { 156 int32x4_t result = veor3q_s32(a, b, c); 157 } 158 159 // CHECK-LABEL: @test_veor3_s64( 160 // CHECK: call <2 x i64> @llvm.aarch64.crypto.eor3s.v2i64 161 // 162 void test_veor3_s64(int64x2_t a, int64x2_t b, int64x2_t c) { 163 int64x2_t result = veor3q_s64(a, b, c); 164 } 165