xref: /minix3/external/bsd/llvm/dist/clang/lib/Headers/avx512erintrin.h (revision 0a6a1f1d05b60e214de2f05a7310ddd1f0e590e7)
1*0a6a1f1dSLionel Sambuc /*===---- avx512fintrin.h - AVX2 intrinsics -----------------------------------===
2*0a6a1f1dSLionel Sambuc  *
3*0a6a1f1dSLionel Sambuc  * Permission is hereby granted, free of charge, to any person obtaining a copy
4*0a6a1f1dSLionel Sambuc  * of this software and associated documentation files (the "Software"), to deal
5*0a6a1f1dSLionel Sambuc  * in the Software without restriction, including without limitation the rights
6*0a6a1f1dSLionel Sambuc  * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7*0a6a1f1dSLionel Sambuc  * copies of the Software, and to permit persons to whom the Software is
8*0a6a1f1dSLionel Sambuc  * furnished to do so, subject to the following conditions:
9*0a6a1f1dSLionel Sambuc  *
10*0a6a1f1dSLionel Sambuc  * The above copyright notice and this permission notice shall be included in
11*0a6a1f1dSLionel Sambuc  * all copies or substantial portions of the Software.
12*0a6a1f1dSLionel Sambuc  *
13*0a6a1f1dSLionel Sambuc  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14*0a6a1f1dSLionel Sambuc  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15*0a6a1f1dSLionel Sambuc  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16*0a6a1f1dSLionel Sambuc  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17*0a6a1f1dSLionel Sambuc  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18*0a6a1f1dSLionel Sambuc  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19*0a6a1f1dSLionel Sambuc  * THE SOFTWARE.
20*0a6a1f1dSLionel Sambuc  *
21*0a6a1f1dSLionel Sambuc  *===-----------------------------------------------------------------------===
22*0a6a1f1dSLionel Sambuc  */
23*0a6a1f1dSLionel Sambuc #ifndef __IMMINTRIN_H
24*0a6a1f1dSLionel Sambuc #error "Never use <avx512erintrin.h> directly; include <immintrin.h> instead."
25*0a6a1f1dSLionel Sambuc #endif
26*0a6a1f1dSLionel Sambuc 
27*0a6a1f1dSLionel Sambuc #ifndef __AVX512ERINTRIN_H
28*0a6a1f1dSLionel Sambuc #define __AVX512ERINTRIN_H
29*0a6a1f1dSLionel Sambuc 
30*0a6a1f1dSLionel Sambuc 
31*0a6a1f1dSLionel Sambuc // rsqrt28
32*0a6a1f1dSLionel Sambuc static  __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
_mm512_rsqrt28_round_pd(__m512d __A,int __R)33*0a6a1f1dSLionel Sambuc _mm512_rsqrt28_round_pd (__m512d __A, int __R)
34*0a6a1f1dSLionel Sambuc {
35*0a6a1f1dSLionel Sambuc   return (__m512d)__builtin_ia32_rsqrt28pd_mask ((__v8df)__A,
36*0a6a1f1dSLionel Sambuc                                                  (__v8df)_mm512_setzero_pd(),
37*0a6a1f1dSLionel Sambuc                                                  (__mmask8)-1,
38*0a6a1f1dSLionel Sambuc                                                  __R);
39*0a6a1f1dSLionel Sambuc }
40*0a6a1f1dSLionel Sambuc static  __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
_mm512_rsqrt28_round_ps(__m512 __A,int __R)41*0a6a1f1dSLionel Sambuc _mm512_rsqrt28_round_ps(__m512 __A, int __R)
42*0a6a1f1dSLionel Sambuc {
43*0a6a1f1dSLionel Sambuc   return (__m512)__builtin_ia32_rsqrt28ps_mask ((__v16sf)__A,
44*0a6a1f1dSLionel Sambuc                                                 (__v16sf)_mm512_setzero_ps(),
45*0a6a1f1dSLionel Sambuc                                                 (__mmask16)-1,
46*0a6a1f1dSLionel Sambuc                                                 __R);
47*0a6a1f1dSLionel Sambuc }
48*0a6a1f1dSLionel Sambuc 
49*0a6a1f1dSLionel Sambuc static  __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_rsqrt28_round_ss(__m128 __A,__m128 __B,int __R)50*0a6a1f1dSLionel Sambuc _mm_rsqrt28_round_ss(__m128 __A, __m128 __B, int __R)
51*0a6a1f1dSLionel Sambuc {
52*0a6a1f1dSLionel Sambuc   return (__m128) __builtin_ia32_rsqrt28ss_mask ((__v4sf) __A,
53*0a6a1f1dSLionel Sambuc              (__v4sf) __B,
54*0a6a1f1dSLionel Sambuc              (__v4sf)
55*0a6a1f1dSLionel Sambuc              _mm_setzero_ps (),
56*0a6a1f1dSLionel Sambuc              (__mmask8) -1,
57*0a6a1f1dSLionel Sambuc              __R);
58*0a6a1f1dSLionel Sambuc }
59*0a6a1f1dSLionel Sambuc 
60*0a6a1f1dSLionel Sambuc static  __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_rsqrt28_round_sd(__m128d __A,__m128d __B,int __R)61*0a6a1f1dSLionel Sambuc _mm_rsqrt28_round_sd (__m128d __A, __m128d __B, int __R)
62*0a6a1f1dSLionel Sambuc {
63*0a6a1f1dSLionel Sambuc   return (__m128d) __builtin_ia32_rsqrt28sd_mask ((__v2df) __A,
64*0a6a1f1dSLionel Sambuc               (__v2df) __B,
65*0a6a1f1dSLionel Sambuc               (__v2df)
66*0a6a1f1dSLionel Sambuc               _mm_setzero_pd (),
67*0a6a1f1dSLionel Sambuc               (__mmask8) -1,
68*0a6a1f1dSLionel Sambuc              __R);
69*0a6a1f1dSLionel Sambuc }
70*0a6a1f1dSLionel Sambuc 
71*0a6a1f1dSLionel Sambuc 
72*0a6a1f1dSLionel Sambuc // rcp28
73*0a6a1f1dSLionel Sambuc static  __inline__ __m512d __attribute__((__always_inline__, __nodebug__))
_mm512_rcp28_round_pd(__m512d __A,int __R)74*0a6a1f1dSLionel Sambuc _mm512_rcp28_round_pd (__m512d __A, int __R)
75*0a6a1f1dSLionel Sambuc {
76*0a6a1f1dSLionel Sambuc   return (__m512d)__builtin_ia32_rcp28pd_mask ((__v8df)__A,
77*0a6a1f1dSLionel Sambuc                                                (__v8df)_mm512_setzero_pd(),
78*0a6a1f1dSLionel Sambuc                                                (__mmask8)-1,
79*0a6a1f1dSLionel Sambuc                                                __R);
80*0a6a1f1dSLionel Sambuc }
81*0a6a1f1dSLionel Sambuc 
82*0a6a1f1dSLionel Sambuc static  __inline__ __m512 __attribute__((__always_inline__, __nodebug__))
_mm512_rcp28_round_ps(__m512 __A,int __R)83*0a6a1f1dSLionel Sambuc _mm512_rcp28_round_ps (__m512 __A, int __R)
84*0a6a1f1dSLionel Sambuc {
85*0a6a1f1dSLionel Sambuc   return (__m512)__builtin_ia32_rcp28ps_mask ((__v16sf)__A,
86*0a6a1f1dSLionel Sambuc                                               (__v16sf)_mm512_setzero_ps (),
87*0a6a1f1dSLionel Sambuc                                               (__mmask16)-1,
88*0a6a1f1dSLionel Sambuc                                               __R);
89*0a6a1f1dSLionel Sambuc }
90*0a6a1f1dSLionel Sambuc 
91*0a6a1f1dSLionel Sambuc static  __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_rcp28_round_ss(__m128 __A,__m128 __B,int __R)92*0a6a1f1dSLionel Sambuc _mm_rcp28_round_ss (__m128 __A, __m128 __B, int __R)
93*0a6a1f1dSLionel Sambuc {
94*0a6a1f1dSLionel Sambuc   return (__m128) __builtin_ia32_rcp28ss_mask ((__v4sf) __A,
95*0a6a1f1dSLionel Sambuc              (__v4sf) __B,
96*0a6a1f1dSLionel Sambuc              (__v4sf)
97*0a6a1f1dSLionel Sambuc              _mm_setzero_ps (),
98*0a6a1f1dSLionel Sambuc              (__mmask8) -1,
99*0a6a1f1dSLionel Sambuc              __R);
100*0a6a1f1dSLionel Sambuc }
101*0a6a1f1dSLionel Sambuc static  __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_rcp28_round_sd(__m128d __A,__m128d __B,int __R)102*0a6a1f1dSLionel Sambuc _mm_rcp28_round_sd (__m128d __A, __m128d __B, int __R)
103*0a6a1f1dSLionel Sambuc {
104*0a6a1f1dSLionel Sambuc   return (__m128d) __builtin_ia32_rcp28sd_mask ((__v2df) __A,
105*0a6a1f1dSLionel Sambuc               (__v2df) __B,
106*0a6a1f1dSLionel Sambuc               (__v2df)
107*0a6a1f1dSLionel Sambuc               _mm_setzero_pd (),
108*0a6a1f1dSLionel Sambuc               (__mmask8) -1,
109*0a6a1f1dSLionel Sambuc              __R);
110*0a6a1f1dSLionel Sambuc }
111*0a6a1f1dSLionel Sambuc 
112*0a6a1f1dSLionel Sambuc #endif // __AVX512ERINTRIN_H
113