xref: /llvm-project/clang/test/CodeGen/arm-v8.6a-neon-intrinsics.c (revision 7c1d9b15eee3a34678addab2bab66f3020ac0753)
1 // RUN: %clang_cc1 -triple armv8.6a-arm-none-eabi -target-feature +neon -target-feature +fullfp16 -target-feature +i8mm \
2 // RUN: -disable-O0-optnone -emit-llvm -o - %s \
3 // RUN: | opt -S -passes=mem2reg,sroa \
4 // RUN: | FileCheck %s
5 
6 // REQUIRES: arm-registered-target
7 
8 #include <arm_neon.h>
9 
10 // CHECK-LABEL: test_vmmlaq_s32
11 // CHECK: [[VAL:%.*]] = call <4 x i32> @llvm.arm.neon.smmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
12 // CHECK: ret <4 x i32> [[VAL]]
test_vmmlaq_s32(int32x4_t r,int8x16_t a,int8x16_t b)13 int32x4_t test_vmmlaq_s32(int32x4_t r, int8x16_t a, int8x16_t b) {
14   return vmmlaq_s32(r, a, b);
15 }
16 
17 // CHECK-LABEL: test_vmmlaq_u32
18 // CHECK: [[VAL:%.*]] = call <4 x i32> @llvm.arm.neon.ummla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
19 // CHECK: ret <4 x i32> [[VAL]]
test_vmmlaq_u32(uint32x4_t r,uint8x16_t a,uint8x16_t b)20 uint32x4_t test_vmmlaq_u32(uint32x4_t r, uint8x16_t a, uint8x16_t b) {
21   return vmmlaq_u32(r, a, b);
22 }
23 
24 // CHECK-LABEL: test_vusmmlaq_s32
25 // CHECK: [[VAL:%.*]] = call <4 x i32> @llvm.arm.neon.usmmla.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> %b)
26 // CHECK: ret <4 x i32> [[VAL]]
test_vusmmlaq_s32(int32x4_t r,uint8x16_t a,int8x16_t b)27 int32x4_t test_vusmmlaq_s32(int32x4_t r, uint8x16_t a, int8x16_t b) {
28   return vusmmlaq_s32(r, a, b);
29 }
30 
31 // CHECK-LABEL: test_vusdot_s32
32 // CHECK: [[VAL:%.*]] = call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> %b)
33 // CHECK: ret <2 x i32> [[VAL]]
test_vusdot_s32(int32x2_t r,uint8x8_t a,int8x8_t b)34 int32x2_t test_vusdot_s32(int32x2_t r, uint8x8_t a, int8x8_t b) {
35   return vusdot_s32(r, a, b);
36 }
37 
38 // CHECK-LABEL: test_vusdot_lane_s32
39 // CHECK: [[TMP0:%.*]] = bitcast <8 x i8> %b to <2 x i32>
40 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
41 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
42 // CHECK: [[LANE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP2]], <2 x i32> zeroinitializer
43 // CHECK: [[TMP3:%.*]] = bitcast <2 x i32> [[LANE]] to <8 x i8>
44 // CHECK: [[TMP4:%.*]] = bitcast <2 x i32> %r to <8 x i8>
45 // CHECK: [[OP:%.*]] = call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> %a, <8 x i8> [[TMP3]])
46 // CHECK: ret <2 x i32> [[OP]]
test_vusdot_lane_s32(int32x2_t r,uint8x8_t a,int8x8_t b)47 int32x2_t test_vusdot_lane_s32(int32x2_t r, uint8x8_t a, int8x8_t b) {
48   return vusdot_lane_s32(r, a, b, 0);
49 }
50 
51 // CHECK-LABEL: test_vsudot_lane_s32
52 // CHECK: [[TMP0:%.*]] = bitcast <8 x i8> %b to <2 x i32>
53 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
54 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
55 // CHECK: [[LANE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP2]], <2 x i32> zeroinitializer
56 // CHECK: [[TMP3:%.*]] = bitcast <2 x i32> [[LANE]] to <8 x i8>
57 // CHECK: [[TMP4:%.*]] = bitcast <2 x i32> %r to <8 x i8>
58 // CHECK: [[OP:%.*]] = call <2 x i32> @llvm.arm.neon.usdot.v2i32.v8i8(<2 x i32> %r, <8 x i8> [[TMP3]], <8 x i8> %a)
59 // CHECK: ret <2 x i32> [[OP]]
test_vsudot_lane_s32(int32x2_t r,int8x8_t a,uint8x8_t b)60 int32x2_t test_vsudot_lane_s32(int32x2_t r, int8x8_t a, uint8x8_t b) {
61   return vsudot_lane_s32(r, a, b, 0);
62 }
63 
64 // CHECK-LABEL: test_vusdotq_lane_s32
65 // CHECK: [[TMP0:%.*]] = bitcast <8 x i8> %b to <2 x i32>
66 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
67 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
68 // CHECK: [[LANE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP2]], <4 x i32> zeroinitializer
69 // CHECK: [[TMP4:%.*]] = bitcast <4 x i32> [[LANE]] to <16 x i8>
70 // CHECK: [[TMP5:%.*]] = bitcast <4 x i32> %r to <16 x i8>
71 // CHECK: [[OP:%.*]] = call <4 x i32> @llvm.arm.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %a, <16 x i8> [[TMP4]])
72 // CHECK: ret <4 x i32> [[OP]]
test_vusdotq_lane_s32(int32x4_t r,uint8x16_t a,int8x8_t b)73 int32x4_t test_vusdotq_lane_s32(int32x4_t r, uint8x16_t a, int8x8_t b) {
74   return vusdotq_lane_s32(r, a, b, 0);
75 }
76 
77 // CHECK-LABEL: test_vsudotq_lane_s32
78 // CHECK: [[TMP0:%.*]] = bitcast <8 x i8> %b to <2 x i32>
79 // CHECK: [[TMP1:%.*]] = bitcast <2 x i32> [[TMP0]] to <8 x i8>
80 // CHECK: [[TMP2:%.*]] = bitcast <8 x i8> [[TMP1]] to <2 x i32>
81 // CHECK: [[LANE:%.*]] = shufflevector <2 x i32> [[TMP2]], <2 x i32> [[TMP2]], <4 x i32> zeroinitializer
82 // CHECK: [[TMP4:%.*]] = bitcast <4 x i32> %r to <16 x i8>
83 // CHECK: [[OP:%.*]] = call <4 x i32> @llvm.arm.neon.usdot.v4i32.v16i8(<4 x i32> %r, <16 x i8> %3, <16 x i8> %a)
84 // CHECK: ret <4 x i32> [[OP]]
test_vsudotq_lane_s32(int32x4_t r,int8x16_t a,uint8x8_t b)85 int32x4_t test_vsudotq_lane_s32(int32x4_t r, int8x16_t a, uint8x8_t b) {
86   return vsudotq_lane_s32(r, a, b, 0);
87 }
88