1 // RUN: %clang_cc1 -triple i386-unknown-unknown -fsyntax-only -ffreestanding -Wcast-qual %s -verify
2 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fsyntax-only -ffreestanding -Wcast-qual %s -verify
3 // RUN: %clang_cc1 -triple i386-unknown-unknown -fsyntax-only -ffreestanding -flax-vector-conversions=none -Wcast-qual %s -verify
4 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fsyntax-only -ffreestanding -flax-vector-conversions=none -Wcast-qual %s -verify
5 // RUN: %clang_cc1 -triple i386-unknown-unknown -fsyntax-only -ffreestanding -Wcast-qual -x c++ %s -verify
6 // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fsyntax-only -ffreestanding -Wcast-qual -x c++ %s -verify
7 // expected-no-diagnostics
8
9 // Include the metaheader that includes all x86 intrinsic headers.
10 #include <x86intrin.h>
11
mm_empty_wrap(void)12 void __attribute__((__target__("mmx"))) mm_empty_wrap(void) {
13 _mm_empty();
14 }
15
mm_add_ss_wrap(__m128 a,__m128 b)16 __m128 __attribute__((__target__("sse"))) mm_add_ss_wrap(__m128 a, __m128 b) {
17 return _mm_add_ss(a, b);
18 }
19
mm_prefetch_wrap(const void * p)20 void __attribute__((__target__("sse"))) mm_prefetch_wrap(const void *p) {
21 _mm_prefetch(p, 0x3);
22 }
23
mm_sqrt_sd_wrap(__m128d a,__m128d b)24 __m128d __attribute__((__target__("sse2"))) mm_sqrt_sd_wrap(__m128d a, __m128d b) {
25 return _mm_sqrt_sd(a, b);
26 }
27
mm_mwait_wrap(int a)28 void __attribute__((__target__("sse3"))) mm_mwait_wrap(int a) {
29 _mm_mwait(0, 0);
30 }
31
mm_abs_pi8_wrap(__m64 a)32 __m64 __attribute__((__target__("ssse3"))) mm_abs_pi8_wrap(__m64 a) {
33 return _mm_abs_pi8(a);
34 }
35
mm_minpos_epu16_wrap(__m128i v)36 __m128i __attribute__((__target__("sse4.1"))) mm_minpos_epu16_wrap(__m128i v) {
37 return _mm_minpos_epu16(v);
38 }
39
mm_crc32_u8_wrap(unsigned int c,unsigned char d)40 unsigned int __attribute__((__target__("sse4.2"))) mm_crc32_u8_wrap(unsigned int c, unsigned char d) {
41 return _mm_crc32_u8(c, d);
42 }
43
mm_aesenc_si128_wrap(__m128i v,__m128i r)44 __m128i __attribute__((__target__("aes"))) mm_aesenc_si128_wrap(__m128i v, __m128i r) {
45 return _mm_aesenc_si128(v, r);
46 }
47
mm256_add_pd_wrap(__m256d a,__m256d b)48 __m256d __attribute__((__target__("avx"))) mm256_add_pd_wrap(__m256d a, __m256d b) {
49 return _mm256_add_pd(a, b);
50 }
51
mm256_abs_epi8_wrap(__m256i a)52 __m256i __attribute__((__target__("avx2"))) mm256_abs_epi8_wrap(__m256i a) {
53 return _mm256_abs_epi8(a);
54 }
55
tzcnt_u16_wrap(unsigned short x)56 unsigned short __attribute__((__target__("bmi"))) tzcnt_u16_wrap(unsigned short x) {
57 return __tzcnt_u16(x);
58 }
59
bzhi_u32_wrap(unsigned int x,unsigned int y)60 unsigned int __attribute__((__target__("bmi2"))) bzhi_u32_wrap(unsigned int x, unsigned int y) {
61 return _bzhi_u32(x, y);
62 }
63
lzcnt16_wrap(unsigned short x)64 unsigned short __attribute__((__target__("lzcnt"))) lzcnt16_wrap(unsigned short x) {
65 return __lzcnt16(x);
66 }
67
mm256_fmsubadd_pd_wrap(__m256d a,__m256d b,__m256d c)68 __m256d __attribute__((__target__("fma"))) mm256_fmsubadd_pd_wrap(__m256d a, __m256d b, __m256d c) {
69 return _mm256_fmsubadd_pd(a, b, c);
70 }
71
mm512_setzero_si512_wrap(void)72 __m512i __attribute__((__target__("avx512f"))) mm512_setzero_si512_wrap(void) {
73 return _mm512_setzero_si512();
74 }
75
mm_cmpeq_epi32_mask_wrap(__m128i a,__m128i b)76 __mmask8 __attribute__((__target__("avx512vl"))) mm_cmpeq_epi32_mask_wrap(__m128i a, __m128i b) {
77 return _mm_cmpeq_epi32_mask(a, b);
78 }
79
mm512_mullo_epi64_wrap(__m512i a,__m512i b)80 __m512i __attribute__((__target__("avx512dq"))) mm512_mullo_epi64_wrap(__m512i a, __m512i b) {
81 return _mm512_mullo_epi64(a, b);
82 }
83
mm_cmpeq_epi8_mask_wrap(__m128i a,__m128i b)84 __mmask16 __attribute__((__target__("avx512vl,avx512bw"))) mm_cmpeq_epi8_mask_wrap(__m128i a, __m128i b) {
85 return _mm_cmpeq_epi8_mask(a, b);
86 }
87
mm256_mullo_epi64_wrap(__m256i a,__m256i b)88 __m256i __attribute__((__target__("avx512vl,avx512dq"))) mm256_mullo_epi64_wrap(__m256i a, __m256i b) {
89 return _mm256_mullo_epi64(a, b);
90 }
91
rdrand16_step_wrap(unsigned short * p)92 int __attribute__((__target__("rdrnd"))) rdrand16_step_wrap(unsigned short *p) {
93 return _rdrand16_step(p);
94 }
95
96 #if defined(__x86_64__)
readfsbase_u32_wrap(void)97 unsigned int __attribute__((__target__("fsgsbase"))) readfsbase_u32_wrap(void) {
98 return _readfsbase_u32();
99 }
100 #endif
101
xbegin_wrap(void)102 unsigned int __attribute__((__target__("rtm"))) xbegin_wrap(void) {
103 return _xbegin();
104 }
105
mm_sha1nexte_epu32_wrap(__m128i x,__m128i y)106 __m128i __attribute__((__target__("sha"))) mm_sha1nexte_epu32_wrap(__m128i x, __m128i y) {
107 return _mm_sha1nexte_epu32(x, y);
108 }
109
rdseed16_step_wrap(unsigned short * p)110 int __attribute__((__target__("rdseed"))) rdseed16_step_wrap(unsigned short *p) {
111 return _rdseed16_step(p);
112 }
113
mm_extract_si64_wrap(__m128i x,__m128i y)114 __m128i __attribute__((__target__("sse4a"))) mm_extract_si64_wrap(__m128i x, __m128i y) {
115 return _mm_extract_si64(x, y);
116 }
117
mm_macc_ps_wrap(__m128 a,__m128 b,__m128 c)118 __m128 __attribute__((__target__("fma4"))) mm_macc_ps_wrap(__m128 a, __m128 b, __m128 c) {
119 return _mm_macc_ps(a, b, c);
120 }
121
mm256_frcz_ps_wrap(__m256 a)122 __m256 __attribute__((__target__("xop"))) mm256_frcz_ps_wrap(__m256 a) {
123 return _mm256_frcz_ps(a);
124 }
125
blcfill_u32_wrap(unsigned int a)126 unsigned int __attribute__((__target__("tbm"))) blcfill_u32_wrap(unsigned int a) {
127 return __blcfill_u32(a);
128 }
129
mm_cvtph_ps_wrap(__m128i a)130 __m128 __attribute__((__target__("f16c"))) mm_cvtph_ps_wrap(__m128i a) {
131 return _mm_cvtph_ps(a);
132 }
133
xtest_wrap(void)134 int __attribute__((__target__("rtm"))) xtest_wrap(void) {
135 return _xtest();
136 }
137