xref: /llvm-project/clang/test/CodeGen/AArch64/bf16-reinterpret-intrinsics.c (revision 207e5ccceec8d3cc3f32723e78f2a142bc61b07d)
1 // RUN: %clang_cc1 -triple aarch64 -target-feature +neon -target-feature +bf16 \
2 // RUN: -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -passes=mem2reg \
4 // RUN: | FileCheck %s
5 
6 // REQUIRES: aarch64-registered-target
7 
8 #include <arm_neon.h>
9 
10 // CHECK-LABEL: @test_vreinterpret_bf16_s8(
11 // CHECK-NEXT:  entry:
12 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
13 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
14 //
15 bfloat16x4_t test_vreinterpret_bf16_s8(int8x8_t a)      { return vreinterpret_bf16_s8(a);    }
16 // CHECK-LABEL: @test_vreinterpret_bf16_s16(
17 // CHECK-NEXT:  entry:
18 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
19 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
20 //
21 bfloat16x4_t test_vreinterpret_bf16_s16(int16x4_t a)    { return vreinterpret_bf16_s16(a);   }
22 // CHECK-LABEL: @test_vreinterpret_bf16_s32(
23 // CHECK-NEXT:  entry:
24 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A:%.*]] to <4 x bfloat>
25 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
26 //
27 bfloat16x4_t test_vreinterpret_bf16_s32(int32x2_t a)    { return vreinterpret_bf16_s32(a);   }
28 // CHECK-LABEL: @test_vreinterpret_bf16_f32(
29 // CHECK-NEXT:  entry:
30 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x float> [[A:%.*]] to <4 x bfloat>
31 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
32 //
33 bfloat16x4_t test_vreinterpret_bf16_f32(float32x2_t a)  { return vreinterpret_bf16_f32(a);   }
34 // CHECK-LABEL: @test_vreinterpret_bf16_u8(
35 // CHECK-NEXT:  entry:
36 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
37 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
38 //
39 bfloat16x4_t test_vreinterpret_bf16_u8(uint8x8_t a)     { return vreinterpret_bf16_u8(a);    }
40 // CHECK-LABEL: @test_vreinterpret_bf16_u16(
41 // CHECK-NEXT:  entry:
42 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
43 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
44 //
45 bfloat16x4_t test_vreinterpret_bf16_u16(uint16x4_t a)   { return vreinterpret_bf16_u16(a);   }
46 // CHECK-LABEL: @test_vreinterpret_bf16_u32(
47 // CHECK-NEXT:  entry:
48 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i32> [[A:%.*]] to <4 x bfloat>
49 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
50 //
51 bfloat16x4_t test_vreinterpret_bf16_u32(uint32x2_t a)   { return vreinterpret_bf16_u32(a);   }
52 // CHECK-LABEL: @test_vreinterpret_bf16_p8(
53 // CHECK-NEXT:  entry:
54 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i8> [[A:%.*]] to <4 x bfloat>
55 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
56 //
57 bfloat16x4_t test_vreinterpret_bf16_p8(poly8x8_t a)     { return vreinterpret_bf16_p8(a);    }
58 // CHECK-LABEL: @test_vreinterpret_bf16_p16(
59 // CHECK-NEXT:  entry:
60 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i16> [[A:%.*]] to <4 x bfloat>
61 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
62 //
63 bfloat16x4_t test_vreinterpret_bf16_p16(poly16x4_t a)   { return vreinterpret_bf16_p16(a);   }
64 // CHECK-LABEL: @test_vreinterpret_bf16_u64(
65 // CHECK-NEXT:  entry:
66 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
67 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
68 //
69 bfloat16x4_t test_vreinterpret_bf16_u64(uint64x1_t a)   { return vreinterpret_bf16_u64(a);   }
70 // CHECK-LABEL: @test_vreinterpret_bf16_s64(
71 // CHECK-NEXT:  entry:
72 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
73 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
74 //
75 bfloat16x4_t test_vreinterpret_bf16_s64(int64x1_t a)    { return vreinterpret_bf16_s64(a);   }
76 // CHECK-LABEL: @test_vreinterpretq_bf16_s8(
77 // CHECK-NEXT:  entry:
78 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
79 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
80 //
81 bfloat16x8_t test_vreinterpretq_bf16_s8(int8x16_t a)    { return vreinterpretq_bf16_s8(a);   }
82 // CHECK-LABEL: @test_vreinterpretq_bf16_s16(
83 // CHECK-NEXT:  entry:
84 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
85 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
86 //
87 bfloat16x8_t test_vreinterpretq_bf16_s16(int16x8_t a)   { return vreinterpretq_bf16_s16(a);  }
88 // CHECK-LABEL: @test_vreinterpretq_bf16_s32(
89 // CHECK-NEXT:  entry:
90 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <8 x bfloat>
91 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
92 //
93 bfloat16x8_t test_vreinterpretq_bf16_s32(int32x4_t a)   { return vreinterpretq_bf16_s32(a);  }
94 // CHECK-LABEL: @test_vreinterpretq_bf16_f32(
95 // CHECK-NEXT:  entry:
96 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <8 x bfloat>
97 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
98 //
99 bfloat16x8_t test_vreinterpretq_bf16_f32(float32x4_t a) { return vreinterpretq_bf16_f32(a);  }
100 // CHECK-LABEL: @test_vreinterpretq_bf16_u8(
101 // CHECK-NEXT:  entry:
102 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
103 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
104 //
105 bfloat16x8_t test_vreinterpretq_bf16_u8(uint8x16_t a)   { return vreinterpretq_bf16_u8(a);   }
106 // CHECK-LABEL: @test_vreinterpretq_bf16_u16(
107 // CHECK-NEXT:  entry:
108 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
109 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
110 //
111 bfloat16x8_t test_vreinterpretq_bf16_u16(uint16x8_t a)  { return vreinterpretq_bf16_u16(a);  }
112 // CHECK-LABEL: @test_vreinterpretq_bf16_u32(
113 // CHECK-NEXT:  entry:
114 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x i32> [[A:%.*]] to <8 x bfloat>
115 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
116 //
117 bfloat16x8_t test_vreinterpretq_bf16_u32(uint32x4_t a)  { return vreinterpretq_bf16_u32(a);  }
118 // CHECK-LABEL: @test_vreinterpretq_bf16_p8(
119 // CHECK-NEXT:  entry:
120 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <16 x i8> [[A:%.*]] to <8 x bfloat>
121 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
122 //
123 bfloat16x8_t test_vreinterpretq_bf16_p8(poly8x16_t a)   { return vreinterpretq_bf16_p8(a);   }
124 // CHECK-LABEL: @test_vreinterpretq_bf16_p16(
125 // CHECK-NEXT:  entry:
126 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x i16> [[A:%.*]] to <8 x bfloat>
127 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
128 //
129 bfloat16x8_t test_vreinterpretq_bf16_p16(poly16x8_t a)  { return vreinterpretq_bf16_p16(a);  }
130 // CHECK-LABEL: @test_vreinterpretq_bf16_u64(
131 // CHECK-NEXT:  entry:
132 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
133 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
134 //
135 bfloat16x8_t test_vreinterpretq_bf16_u64(uint64x2_t a)  { return vreinterpretq_bf16_u64(a);  }
136 // CHECK-LABEL: @test_vreinterpretq_bf16_s64(
137 // CHECK-NEXT:  entry:
138 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
139 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
140 //
141 bfloat16x8_t test_vreinterpretq_bf16_s64(int64x2_t a)   { return vreinterpretq_bf16_s64(a);  }
142 // CHECK-LABEL: @test_vreinterpret_bf16_p64(
143 // CHECK-NEXT:  entry:
144 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x i64> [[A:%.*]] to <4 x bfloat>
145 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
146 //
147 bfloat16x4_t test_vreinterpret_bf16_p64(poly64x1_t a)   { return vreinterpret_bf16_p64(a);   }
148 // CHECK-LABEL: @test_vreinterpretq_bf16_p64(
149 // CHECK-NEXT:  entry:
150 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x i64> [[A:%.*]] to <8 x bfloat>
151 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
152 //
153 bfloat16x8_t test_vreinterpretq_bf16_p64(poly64x2_t a)  { return vreinterpretq_bf16_p64(a);  }
154 // CHECK-LABEL: @test_vreinterpretq_bf16_p128(
155 // CHECK-NEXT:  entry:
156 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast i128 [[A:%.*]] to <8 x bfloat>
157 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
158 //
159 bfloat16x8_t test_vreinterpretq_bf16_p128(poly128_t a)  { return vreinterpretq_bf16_p128(a); }
160 // CHECK-LABEL: @test_vreinterpret_bf16_f64(
161 // CHECK-NEXT:  entry:
162 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <1 x double> [[A:%.*]] to <4 x bfloat>
163 // CHECK-NEXT:    ret <4 x bfloat> [[TMP0]]
164 //
165 bfloat16x4_t test_vreinterpret_bf16_f64(float64x1_t a)  { return vreinterpret_bf16_f64(a);  }
166 // CHECK-LABEL: @test_vreinterpretq_bf16_f64(
167 // CHECK-NEXT:  entry:
168 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <2 x double> [[A:%.*]] to <8 x bfloat>
169 // CHECK-NEXT:    ret <8 x bfloat> [[TMP0]]
170 //
171 bfloat16x8_t test_vreinterpretq_bf16_f64(float64x2_t a) { return vreinterpretq_bf16_f64(a); }
172 // CHECK-LABEL: @test_vreinterpret_s8_bf16(
173 // CHECK-NEXT:  entry:
174 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
175 // CHECK-NEXT:    ret <8 x i8> [[TMP0]]
176 //
177 int8x8_t    test_vreinterpret_s8_bf16(bfloat16x4_t a)    { return vreinterpret_s8_bf16(a);    }
178 // CHECK-LABEL: @test_vreinterpret_s16_bf16(
179 // CHECK-NEXT:  entry:
180 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
181 // CHECK-NEXT:    ret <4 x i16> [[TMP0]]
182 //
183 int16x4_t   test_vreinterpret_s16_bf16(bfloat16x4_t a)   { return vreinterpret_s16_bf16(a);   }
184 // CHECK-LABEL: @test_vreinterpret_s32_bf16(
185 // CHECK-NEXT:  entry:
186 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x i32>
187 // CHECK-NEXT:    ret <2 x i32> [[TMP0]]
188 //
189 int32x2_t   test_vreinterpret_s32_bf16(bfloat16x4_t a)   { return vreinterpret_s32_bf16(a);   }
190 // CHECK-LABEL: @test_vreinterpret_f32_bf16(
191 // CHECK-NEXT:  entry:
192 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x float>
193 // CHECK-NEXT:    ret <2 x float> [[TMP0]]
194 //
195 float32x2_t test_vreinterpret_f32_bf16(bfloat16x4_t a)   { return vreinterpret_f32_bf16(a);   }
196 // CHECK-LABEL: @test_vreinterpret_u8_bf16(
197 // CHECK-NEXT:  entry:
198 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
199 // CHECK-NEXT:    ret <8 x i8> [[TMP0]]
200 //
201 uint8x8_t   test_vreinterpret_u8_bf16(bfloat16x4_t a)    { return vreinterpret_u8_bf16(a);    }
202 // CHECK-LABEL: @test_vreinterpret_u16_bf16(
203 // CHECK-NEXT:  entry:
204 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
205 // CHECK-NEXT:    ret <4 x i16> [[TMP0]]
206 //
207 uint16x4_t  test_vreinterpret_u16_bf16(bfloat16x4_t a)   { return vreinterpret_u16_bf16(a);   }
208 // CHECK-LABEL: @test_vreinterpret_u32_bf16(
209 // CHECK-NEXT:  entry:
210 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <2 x i32>
211 // CHECK-NEXT:    ret <2 x i32> [[TMP0]]
212 //
213 uint32x2_t  test_vreinterpret_u32_bf16(bfloat16x4_t a)   { return vreinterpret_u32_bf16(a);   }
214 // CHECK-LABEL: @test_vreinterpret_p8_bf16(
215 // CHECK-NEXT:  entry:
216 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <8 x i8>
217 // CHECK-NEXT:    ret <8 x i8> [[TMP0]]
218 //
219 poly8x8_t   test_vreinterpret_p8_bf16(bfloat16x4_t a)    { return vreinterpret_p8_bf16(a);    }
220 // CHECK-LABEL: @test_vreinterpret_p16_bf16(
221 // CHECK-NEXT:  entry:
222 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <4 x i16>
223 // CHECK-NEXT:    ret <4 x i16> [[TMP0]]
224 //
225 poly16x4_t  test_vreinterpret_p16_bf16(bfloat16x4_t a)   { return vreinterpret_p16_bf16(a);   }
226 // CHECK-LABEL: @test_vreinterpret_u64_bf16(
227 // CHECK-NEXT:  entry:
228 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
229 // CHECK-NEXT:    ret <1 x i64> [[TMP0]]
230 //
231 uint64x1_t  test_vreinterpret_u64_bf16(bfloat16x4_t a)   { return vreinterpret_u64_bf16(a);   }
232 // CHECK-LABEL: @test_vreinterpret_s64_bf16(
233 // CHECK-NEXT:  entry:
234 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
235 // CHECK-NEXT:    ret <1 x i64> [[TMP0]]
236 //
237 int64x1_t   test_vreinterpret_s64_bf16(bfloat16x4_t a)   { return vreinterpret_s64_bf16(a);   }
238 // CHECK-LABEL: @test_vreinterpret_p64_bf16(
239 // CHECK-NEXT:  entry:
240 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x i64>
241 // CHECK-NEXT:    ret <1 x i64> [[TMP0]]
242 //
243 poly64x1_t  test_vreinterpret_p64_bf16(bfloat16x4_t a)   { return vreinterpret_p64_bf16(a);   }
244 // CHECK-LABEL: @test_vreinterpretq_s8_bf16(
245 // CHECK-NEXT:  entry:
246 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
247 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
248 //
249 int8x16_t   test_vreinterpretq_s8_bf16(bfloat16x8_t a)   { return vreinterpretq_s8_bf16(a);   }
250 // CHECK-LABEL: @test_vreinterpretq_s16_bf16(
251 // CHECK-NEXT:  entry:
252 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
253 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
254 //
255 int16x8_t   test_vreinterpretq_s16_bf16(bfloat16x8_t a)  { return vreinterpretq_s16_bf16(a);  }
256 // CHECK-LABEL: @test_vreinterpretq_s32_bf16(
257 // CHECK-NEXT:  entry:
258 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x i32>
259 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
260 //
261 int32x4_t   test_vreinterpretq_s32_bf16(bfloat16x8_t a)  { return vreinterpretq_s32_bf16(a);  }
262 // CHECK-LABEL: @test_vreinterpretq_f32_bf16(
263 // CHECK-NEXT:  entry:
264 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x float>
265 // CHECK-NEXT:    ret <4 x float> [[TMP0]]
266 //
267 float32x4_t test_vreinterpretq_f32_bf16(bfloat16x8_t a)  { return vreinterpretq_f32_bf16(a);  }
268 // CHECK-LABEL: @test_vreinterpretq_u8_bf16(
269 // CHECK-NEXT:  entry:
270 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
271 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
272 //
273 uint8x16_t  test_vreinterpretq_u8_bf16(bfloat16x8_t a)   { return vreinterpretq_u8_bf16(a);   }
274 // CHECK-LABEL: @test_vreinterpretq_u16_bf16(
275 // CHECK-NEXT:  entry:
276 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
277 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
278 //
279 uint16x8_t  test_vreinterpretq_u16_bf16(bfloat16x8_t a)  { return vreinterpretq_u16_bf16(a);  }
280 // CHECK-LABEL: @test_vreinterpretq_u32_bf16(
281 // CHECK-NEXT:  entry:
282 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <4 x i32>
283 // CHECK-NEXT:    ret <4 x i32> [[TMP0]]
284 //
285 uint32x4_t  test_vreinterpretq_u32_bf16(bfloat16x8_t a)  { return vreinterpretq_u32_bf16(a);  }
286 // CHECK-LABEL: @test_vreinterpretq_p8_bf16(
287 // CHECK-NEXT:  entry:
288 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <16 x i8>
289 // CHECK-NEXT:    ret <16 x i8> [[TMP0]]
290 //
291 poly8x16_t  test_vreinterpretq_p8_bf16(bfloat16x8_t a)   { return vreinterpretq_p8_bf16(a);   }
292 // CHECK-LABEL: @test_vreinterpretq_p16_bf16(
293 // CHECK-NEXT:  entry:
294 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <8 x i16>
295 // CHECK-NEXT:    ret <8 x i16> [[TMP0]]
296 //
297 poly16x8_t  test_vreinterpretq_p16_bf16(bfloat16x8_t a)  { return vreinterpretq_p16_bf16(a);  }
298 // CHECK-LABEL: @test_vreinterpretq_u64_bf16(
299 // CHECK-NEXT:  entry:
300 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
301 // CHECK-NEXT:    ret <2 x i64> [[TMP0]]
302 //
303 uint64x2_t  test_vreinterpretq_u64_bf16(bfloat16x8_t a)  { return vreinterpretq_u64_bf16(a);  }
304 // CHECK-LABEL: @test_vreinterpretq_s64_bf16(
305 // CHECK-NEXT:  entry:
306 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
307 // CHECK-NEXT:    ret <2 x i64> [[TMP0]]
308 //
309 int64x2_t   test_vreinterpretq_s64_bf16(bfloat16x8_t a)  { return vreinterpretq_s64_bf16(a);  }
310 // CHECK-LABEL: @test_vreinterpretq_p64_bf16(
311 // CHECK-NEXT:  entry:
312 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x i64>
313 // CHECK-NEXT:    ret <2 x i64> [[TMP0]]
314 //
315 poly64x2_t  test_vreinterpretq_p64_bf16(bfloat16x8_t a)  { return vreinterpretq_p64_bf16(a);  }
316 // CHECK-LABEL: @test_vreinterpretq_p128_bf16(
317 // CHECK-NEXT:  entry:
318 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to i128
319 // CHECK-NEXT:    ret i128 [[TMP0]]
320 //
321 poly128_t   test_vreinterpretq_p128_bf16(bfloat16x8_t a) { return vreinterpretq_p128_bf16(a); }
322 // CHECK-LABEL: @test_vreinterpret_f64_bf16(
323 // CHECK-NEXT:  entry:
324 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <4 x bfloat> [[A:%.*]] to <1 x double>
325 // CHECK-NEXT:    ret <1 x double> [[TMP0]]
326 //
327 float64x1_t test_vreinterpret_f64_bf16(bfloat16x4_t a)   { return vreinterpret_f64_bf16(a);   }
328 // CHECK-LABEL: @test_vreinterpretq_f64_bf16(
329 // CHECK-NEXT:  entry:
330 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast <8 x bfloat> [[A:%.*]] to <2 x double>
331 // CHECK-NEXT:    ret <2 x double> [[TMP0]]
332 //
333 float64x2_t test_vreinterpretq_f64_bf16(bfloat16x8_t a)  { return vreinterpretq_f64_bf16(a);  }
334