Lines Matching full:2

9 define <2 x double> @splat_v2f64(<2 x double> %x) #0 {
16 %add = fadd <2 x double> %x, <double 1.0, double 1.0>
17 ret <2 x double> %add
20 define <2 x double> @splat_v2f64_pgso(<2 x double> %x) !prof !14 {
27 %add = fadd <2 x double> %x, <double 1.0, double 1.0>
28 ret <2 x double> %add
93 define <2 x i64> @splat_v2i64(<2 x i64> %x) #1 {
96 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
103 ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2]
106 %add = add <2 x i64> %x, <i64 2, i64 2>
107 ret <2 x i64> %add
110 define <2 x i64> @splat_v2i64_pgso(<2 x i64> %x) !prof !14 {
113 ; AVX-NEXT: vmovddup {{.*#+}} xmm1 = [2,2]
120 ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [2,2]
123 %add = add <2 x i64> %x, <i64 2, i64 2>
124 ret <2 x i64> %add
133 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
142 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2,2,2,2]
145 %add = add <4 x i64> %x, <i64 2, i64 2, i64 2, i64 2>
153 ; AVX-NEXT: vmovddup {{.*#+}} xmm2 = [2,2]
162 ; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm1 = [2,2,2,2]
165 %add = add <4 x i64> %x, <i64 2, i64 2, i64 2, i64 2>
173 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [2,2,2,2]
179 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2]
182 %add = add <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
189 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [2,2,2,2]
195 ; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm1 = [2,2,2,2]
198 %add = add <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
207 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [2,2,2,2]
215 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
218 %add = add <8 x i32> %x, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
226 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [2,2,2,2]
234 ; AVX2-NEXT: vpbroadcastd {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2]
237 %add = add <8 x i32> %x, <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>
250 ; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2]
253 %add = add <8 x i16> %x, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
265 ; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2]
268 %add = add <8 x i16> %x, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2>
277 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2]
285 ; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
288 …dd <16 x i16> %x, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i1…
296 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2]
304 ; AVX2-NEXT: vpbroadcastw {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
307 …dd <16 x i16> %x, <i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i16 2, i1…
320 ; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
323 …%add = add <16 x i8> %x, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, …
335 ; AVX2-NEXT: vpbroadcastb {{.*#+}} xmm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
338 …%add = add <16 x i8> %x, <i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, …
347 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
355 … AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
3582, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2,…
366 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm2 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2]
374 … AVX2-NEXT: vpbroadcastb {{.*#+}} ymm1 = [2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2
3772, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2, i8 2,…
391 ; AVX-NEXT: vblendps {{.*#+}} xmm0 = mem[0,1],xmm1[2,3]
392 ; AVX-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1,2,3],mem[4,5,6,7]
398 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0,1],mem[2,3,4,5,6,7]
403 %1 = shufflevector <4 x i64> %0, <4 x i64> undef, <3 x i32> <i32 undef, i32 undef, i32 2>
413 !1 = !{!2, !3, !4, !5, !6, !7, !8, !9}
414 !2 = !{!"ProfileFormat", !"InstrProf"}
425 !13 = !{i32 999999, i64 1, i32 2}