xref: /llvm-project/clang/lib/Headers/gfniintrin.h (revision 3f25f23a2b8aaff300e751d4724a3ddba4d694eb)
1 /*===----------------- gfniintrin.h - GFNI intrinsics ----------------------===
2  *
3  *
4  * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5  * See https://llvm.org/LICENSE.txt for license information.
6  * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7  *
8  *===-----------------------------------------------------------------------===
9  */
10 #ifndef __IMMINTRIN_H
11 #error "Never use <gfniintrin.h> directly; include <immintrin.h> instead."
12 #endif
13 
14 #ifndef __GFNIINTRIN_H
15 #define __GFNIINTRIN_H
16 
17 #if defined(__EVEX512__) && !defined(__AVX10_1_512__)
18 /* Default attributes for simple form (no masking). */
19 #define __DEFAULT_FN_ATTRS                                                     \
20   __attribute__((__always_inline__, __nodebug__,                               \
21                  __target__("gfni,no-evex512"), __min_vector_width__(128)))
22 
23 /* Default attributes for YMM unmasked form. */
24 #define __DEFAULT_FN_ATTRS_Y                                                   \
25   __attribute__((__always_inline__, __nodebug__,                               \
26                  __target__("avx,gfni,no-evex512"),                            \
27                  __min_vector_width__(256)))
28 
29 /* Default attributes for VLX masked forms. */
30 #define __DEFAULT_FN_ATTRS_VL128                                               \
31   __attribute__((__always_inline__, __nodebug__,                               \
32                  __target__("avx512bw,avx512vl,gfni,no-evex512"),              \
33                  __min_vector_width__(128)))
34 #define __DEFAULT_FN_ATTRS_VL256                                               \
35   __attribute__((__always_inline__, __nodebug__,                               \
36                  __target__("avx512bw,avx512vl,gfni,no-evex512"),              \
37                  __min_vector_width__(256)))
38 #else
39 /* Default attributes for simple form (no masking). */
40 #define __DEFAULT_FN_ATTRS                                                     \
41   __attribute__((__always_inline__, __nodebug__, __target__("gfni"),           \
42                  __min_vector_width__(128)))
43 
44 /* Default attributes for YMM unmasked form. */
45 #define __DEFAULT_FN_ATTRS_Y                                                   \
46   __attribute__((__always_inline__, __nodebug__, __target__("avx,gfni"),       \
47                  __min_vector_width__(256)))
48 
49 /* Default attributes for VLX masked forms. */
50 #define __DEFAULT_FN_ATTRS_VL128                                               \
51   __attribute__((__always_inline__, __nodebug__,                               \
52                  __target__("avx512bw,avx512vl,gfni"),                         \
53                  __min_vector_width__(128)))
54 #define __DEFAULT_FN_ATTRS_VL256                                               \
55   __attribute__((__always_inline__, __nodebug__,                               \
56                  __target__("avx512bw,avx512vl,gfni"),                         \
57                  __min_vector_width__(256)))
58 #endif
59 
60 /* Default attributes for ZMM unmasked forms. */
61 #define __DEFAULT_FN_ATTRS_Z                                                   \
62   __attribute__((__always_inline__, __nodebug__,                               \
63                  __target__("avx512f,evex512,gfni"),                           \
64                  __min_vector_width__(512)))
65 /* Default attributes for ZMM masked forms. */
66 #define __DEFAULT_FN_ATTRS_Z_MASK                                              \
67   __attribute__((__always_inline__, __nodebug__,                               \
68                  __target__("avx512bw,evex512,gfni"),                          \
69                  __min_vector_width__(512)))
70 
71 #define _mm_gf2p8affineinv_epi64_epi8(A, B, I) \
72   ((__m128i)__builtin_ia32_vgf2p8affineinvqb_v16qi((__v16qi)(__m128i)(A), \
73                                                    (__v16qi)(__m128i)(B), \
74                                                    (char)(I)))
75 
76 #define _mm_gf2p8affine_epi64_epi8(A, B, I) \
77   ((__m128i)__builtin_ia32_vgf2p8affineqb_v16qi((__v16qi)(__m128i)(A), \
78                                                    (__v16qi)(__m128i)(B), \
79                                                    (char)(I)))
80 
81 static __inline__ __m128i __DEFAULT_FN_ATTRS
82 _mm_gf2p8mul_epi8(__m128i __A, __m128i __B)
83 {
84   return (__m128i) __builtin_ia32_vgf2p8mulb_v16qi((__v16qi) __A,
85               (__v16qi) __B);
86 }
87 
88 #ifdef __AVXINTRIN_H
89 #define _mm256_gf2p8affineinv_epi64_epi8(A, B, I) \
90   ((__m256i)__builtin_ia32_vgf2p8affineinvqb_v32qi((__v32qi)(__m256i)(A), \
91                                                    (__v32qi)(__m256i)(B), \
92                                                    (char)(I)))
93 
94 #define _mm256_gf2p8affine_epi64_epi8(A, B, I) \
95   ((__m256i)__builtin_ia32_vgf2p8affineqb_v32qi((__v32qi)(__m256i)(A), \
96                                                    (__v32qi)(__m256i)(B), \
97                                                    (char)(I)))
98 
99 static __inline__ __m256i __DEFAULT_FN_ATTRS_Y
100 _mm256_gf2p8mul_epi8(__m256i __A, __m256i __B)
101 {
102   return (__m256i) __builtin_ia32_vgf2p8mulb_v32qi((__v32qi) __A,
103               (__v32qi) __B);
104 }
105 #endif /* __AVXINTRIN_H */
106 
107 #ifdef __AVX512BWINTRIN_H
108 #define _mm512_gf2p8affineinv_epi64_epi8(A, B, I) \
109   ((__m512i)__builtin_ia32_vgf2p8affineinvqb_v64qi((__v64qi)(__m512i)(A), \
110                                                    (__v64qi)(__m512i)(B), \
111                                                    (char)(I)))
112 
113 #define _mm512_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
114   ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
115          (__v64qi)_mm512_gf2p8affineinv_epi64_epi8(A, B, I), \
116          (__v64qi)(__m512i)(S)))
117 
118 #define _mm512_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
119   _mm512_mask_gf2p8affineinv_epi64_epi8((__m512i)_mm512_setzero_si512(), \
120          U, A, B, I)
121 
122 #define _mm512_gf2p8affine_epi64_epi8(A, B, I) \
123   ((__m512i)__builtin_ia32_vgf2p8affineqb_v64qi((__v64qi)(__m512i)(A), \
124                                                    (__v64qi)(__m512i)(B), \
125                                                    (char)(I)))
126 
127 #define _mm512_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
128   ((__m512i)__builtin_ia32_selectb_512((__mmask64)(U), \
129          (__v64qi)_mm512_gf2p8affine_epi64_epi8((A), (B), (I)), \
130          (__v64qi)(__m512i)(S)))
131 
132 #define _mm512_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
133   _mm512_mask_gf2p8affine_epi64_epi8((__m512i)_mm512_setzero_si512(), \
134          U, A, B, I)
135 
136 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z
137 _mm512_gf2p8mul_epi8(__m512i __A, __m512i __B)
138 {
139   return (__m512i) __builtin_ia32_vgf2p8mulb_v64qi((__v64qi) __A,
140               (__v64qi) __B);
141 }
142 
143 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
144 _mm512_mask_gf2p8mul_epi8(__m512i __S, __mmask64 __U, __m512i __A, __m512i __B)
145 {
146   return (__m512i) __builtin_ia32_selectb_512(__U,
147               (__v64qi) _mm512_gf2p8mul_epi8(__A, __B),
148               (__v64qi) __S);
149 }
150 
151 static __inline__ __m512i __DEFAULT_FN_ATTRS_Z_MASK
152 _mm512_maskz_gf2p8mul_epi8(__mmask64 __U, __m512i __A, __m512i __B)
153 {
154   return _mm512_mask_gf2p8mul_epi8((__m512i)_mm512_setzero_si512(),
155               __U, __A, __B);
156 }
157 #endif /* __AVX512BWINTRIN_H */
158 
159 #ifdef __AVX512VLBWINTRIN_H
160 #define _mm_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
161   ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
162          (__v16qi)_mm_gf2p8affineinv_epi64_epi8(A, B, I), \
163          (__v16qi)(__m128i)(S)))
164 
165 #define _mm_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
166   _mm_mask_gf2p8affineinv_epi64_epi8((__m128i)_mm_setzero_si128(), \
167          U, A, B, I)
168 
169 #define _mm256_mask_gf2p8affineinv_epi64_epi8(S, U, A, B, I) \
170   ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
171          (__v32qi)_mm256_gf2p8affineinv_epi64_epi8(A, B, I), \
172          (__v32qi)(__m256i)(S)))
173 
174 #define _mm256_maskz_gf2p8affineinv_epi64_epi8(U, A, B, I) \
175   _mm256_mask_gf2p8affineinv_epi64_epi8((__m256i)_mm256_setzero_si256(), \
176          U, A, B, I)
177 
178 #define _mm_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
179   ((__m128i)__builtin_ia32_selectb_128((__mmask16)(U), \
180          (__v16qi)_mm_gf2p8affine_epi64_epi8(A, B, I), \
181          (__v16qi)(__m128i)(S)))
182 
183 #define _mm_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
184   _mm_mask_gf2p8affine_epi64_epi8((__m128i)_mm_setzero_si128(), U, A, B, I)
185 
186 #define _mm256_mask_gf2p8affine_epi64_epi8(S, U, A, B, I) \
187   ((__m256i)__builtin_ia32_selectb_256((__mmask32)(U), \
188          (__v32qi)_mm256_gf2p8affine_epi64_epi8(A, B, I), \
189          (__v32qi)(__m256i)(S)))
190 
191 #define _mm256_maskz_gf2p8affine_epi64_epi8(U, A, B, I) \
192   _mm256_mask_gf2p8affine_epi64_epi8((__m256i)_mm256_setzero_si256(), \
193          U, A, B, I)
194 
195 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
196 _mm_mask_gf2p8mul_epi8(__m128i __S, __mmask16 __U, __m128i __A, __m128i __B)
197 {
198   return (__m128i) __builtin_ia32_selectb_128(__U,
199               (__v16qi) _mm_gf2p8mul_epi8(__A, __B),
200               (__v16qi) __S);
201 }
202 
203 static __inline__ __m128i __DEFAULT_FN_ATTRS_VL128
204 _mm_maskz_gf2p8mul_epi8(__mmask16 __U, __m128i __A, __m128i __B)
205 {
206   return _mm_mask_gf2p8mul_epi8((__m128i)_mm_setzero_si128(),
207               __U, __A, __B);
208 }
209 
210 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
211 _mm256_mask_gf2p8mul_epi8(__m256i __S, __mmask32 __U, __m256i __A, __m256i __B)
212 {
213   return (__m256i) __builtin_ia32_selectb_256(__U,
214               (__v32qi) _mm256_gf2p8mul_epi8(__A, __B),
215               (__v32qi) __S);
216 }
217 
218 static __inline__ __m256i __DEFAULT_FN_ATTRS_VL256
219 _mm256_maskz_gf2p8mul_epi8(__mmask32 __U, __m256i __A, __m256i __B)
220 {
221   return _mm256_mask_gf2p8mul_epi8((__m256i)_mm256_setzero_si256(),
222               __U, __A, __B);
223 }
224 #endif /* __AVX512VLBWINTRIN_H */
225 
226 #undef __DEFAULT_FN_ATTRS
227 #undef __DEFAULT_FN_ATTRS_Y
228 #undef __DEFAULT_FN_ATTRS_Z
229 #undef __DEFAULT_FN_ATTRS_VL128
230 #undef __DEFAULT_FN_ATTRS_VL256
231 
232 #endif /* __GFNIINTRIN_H */
233 
234