1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefix=SSE-32 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 -O3 | FileCheck %s --check-prefix=SSE-64 4; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-32 5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx -O3 | FileCheck %s --check-prefix=AVX-64 6; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefix=AVX512-32 7; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512vl -O3 | FileCheck %s --check-prefix=AVX512-64 8; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512f -mattr=+avx512f -O3 | FileCheck %s --check-prefix=AVX512F-32 9; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f -mattr=+avx512f -O3 | FileCheck %s --check-prefix=AVX512F-64 10 11define <2 x i32> @test_v2f32_ogt_s(<2 x i32> %a, <2 x i32> %b, <2 x float> %f1, <2 x float> %f2) #0 { 12; SSE-32-LABEL: test_v2f32_ogt_s: 13; SSE-32: # %bb.0: 14; SSE-32-NEXT: pushl %ebp 15; SSE-32-NEXT: movl %esp, %ebp 16; SSE-32-NEXT: andl $-16, %esp 17; SSE-32-NEXT: subl $16, %esp 18; SSE-32-NEXT: movaps 8(%ebp), %xmm4 19; SSE-32-NEXT: xorl %eax, %eax 20; SSE-32-NEXT: comiss %xmm4, %xmm2 21; SSE-32-NEXT: movl $-1, %ecx 22; SSE-32-NEXT: movl $0, %edx 23; SSE-32-NEXT: cmoval %ecx, %edx 24; SSE-32-NEXT: movd %edx, %xmm3 25; SSE-32-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1,1,1] 26; SSE-32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,1,1] 27; SSE-32-NEXT: comiss %xmm4, %xmm2 28; SSE-32-NEXT: cmoval %ecx, %eax 29; SSE-32-NEXT: movd %eax, %xmm2 30; SSE-32-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] 31; SSE-32-NEXT: pand %xmm3, %xmm0 32; SSE-32-NEXT: pandn %xmm1, %xmm3 33; SSE-32-NEXT: por %xmm3, %xmm0 34; SSE-32-NEXT: movl %ebp, %esp 35; SSE-32-NEXT: popl %ebp 36; SSE-32-NEXT: retl 37; 38; SSE-64-LABEL: test_v2f32_ogt_s: 39; SSE-64: # %bb.0: 40; SSE-64-NEXT: xorl %eax, %eax 41; SSE-64-NEXT: comiss %xmm3, %xmm2 42; SSE-64-NEXT: movl $-1, %ecx 43; SSE-64-NEXT: movl $0, %edx 44; SSE-64-NEXT: cmoval %ecx, %edx 45; SSE-64-NEXT: movd %edx, %xmm4 46; SSE-64-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,1,1] 47; SSE-64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,1,1] 48; SSE-64-NEXT: comiss %xmm3, %xmm2 49; SSE-64-NEXT: cmoval %ecx, %eax 50; SSE-64-NEXT: movd %eax, %xmm2 51; SSE-64-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] 52; SSE-64-NEXT: pand %xmm4, %xmm0 53; SSE-64-NEXT: pandn %xmm1, %xmm4 54; SSE-64-NEXT: por %xmm4, %xmm0 55; SSE-64-NEXT: retq 56; 57; AVX-32-LABEL: test_v2f32_ogt_s: 58; AVX-32: # %bb.0: 59; AVX-32-NEXT: pushl %ebp 60; AVX-32-NEXT: movl %esp, %ebp 61; AVX-32-NEXT: andl $-16, %esp 62; AVX-32-NEXT: subl $16, %esp 63; AVX-32-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3] 64; AVX-32-NEXT: xorl %eax, %eax 65; AVX-32-NEXT: vcomiss 12(%ebp), %xmm3 66; AVX-32-NEXT: movl $-1, %ecx 67; AVX-32-NEXT: movl $0, %edx 68; AVX-32-NEXT: cmoval %ecx, %edx 69; AVX-32-NEXT: vcomiss 8(%ebp), %xmm2 70; AVX-32-NEXT: cmoval %ecx, %eax 71; AVX-32-NEXT: vmovd %eax, %xmm2 72; AVX-32-NEXT: vpinsrd $1, %edx, %xmm2, %xmm2 73; AVX-32-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 74; AVX-32-NEXT: movl %ebp, %esp 75; AVX-32-NEXT: popl %ebp 76; AVX-32-NEXT: retl 77; 78; AVX-64-LABEL: test_v2f32_ogt_s: 79; AVX-64: # %bb.0: 80; AVX-64-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3] 81; AVX-64-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3] 82; AVX-64-NEXT: xorl %eax, %eax 83; AVX-64-NEXT: vcomiss %xmm4, %xmm5 84; AVX-64-NEXT: movl $-1, %ecx 85; AVX-64-NEXT: movl $0, %edx 86; AVX-64-NEXT: cmoval %ecx, %edx 87; AVX-64-NEXT: vcomiss %xmm3, %xmm2 88; AVX-64-NEXT: cmoval %ecx, %eax 89; AVX-64-NEXT: vmovd %eax, %xmm2 90; AVX-64-NEXT: vpinsrd $1, %edx, %xmm2, %xmm2 91; AVX-64-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 92; AVX-64-NEXT: retq 93; 94; AVX512-32-LABEL: test_v2f32_ogt_s: 95; AVX512-32: # %bb.0: 96; AVX512-32-NEXT: pushl %ebp 97; AVX512-32-NEXT: movl %esp, %ebp 98; AVX512-32-NEXT: andl $-16, %esp 99; AVX512-32-NEXT: subl $16, %esp 100; AVX512-32-NEXT: vcomiss 8(%ebp), %xmm2 101; AVX512-32-NEXT: seta %al 102; AVX512-32-NEXT: andl $1, %eax 103; AVX512-32-NEXT: kmovw %eax, %k0 104; AVX512-32-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] 105; AVX512-32-NEXT: vcomiss 12(%ebp), %xmm2 106; AVX512-32-NEXT: seta %al 107; AVX512-32-NEXT: kmovw %eax, %k1 108; AVX512-32-NEXT: kshiftlw $15, %k1, %k1 109; AVX512-32-NEXT: kshiftrw $14, %k1, %k1 110; AVX512-32-NEXT: korw %k1, %k0, %k1 111; AVX512-32-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} 112; AVX512-32-NEXT: movl %ebp, %esp 113; AVX512-32-NEXT: popl %ebp 114; AVX512-32-NEXT: retl 115; 116; AVX512-64-LABEL: test_v2f32_ogt_s: 117; AVX512-64: # %bb.0: 118; AVX512-64-NEXT: vcomiss %xmm3, %xmm2 119; AVX512-64-NEXT: seta %al 120; AVX512-64-NEXT: andl $1, %eax 121; AVX512-64-NEXT: kmovw %eax, %k0 122; AVX512-64-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] 123; AVX512-64-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] 124; AVX512-64-NEXT: vcomiss %xmm3, %xmm2 125; AVX512-64-NEXT: seta %al 126; AVX512-64-NEXT: kmovw %eax, %k1 127; AVX512-64-NEXT: kshiftlw $15, %k1, %k1 128; AVX512-64-NEXT: kshiftrw $14, %k1, %k1 129; AVX512-64-NEXT: korw %k1, %k0, %k1 130; AVX512-64-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} 131; AVX512-64-NEXT: retq 132; 133; AVX512F-32-LABEL: test_v2f32_ogt_s: 134; AVX512F-32: # %bb.0: 135; AVX512F-32-NEXT: pushl %ebp 136; AVX512F-32-NEXT: movl %esp, %ebp 137; AVX512F-32-NEXT: andl $-16, %esp 138; AVX512F-32-NEXT: subl $16, %esp 139; AVX512F-32-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 140; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 141; AVX512F-32-NEXT: vcomiss 8(%ebp), %xmm2 142; AVX512F-32-NEXT: seta %al 143; AVX512F-32-NEXT: andl $1, %eax 144; AVX512F-32-NEXT: kmovw %eax, %k0 145; AVX512F-32-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] 146; AVX512F-32-NEXT: vcomiss 12(%ebp), %xmm2 147; AVX512F-32-NEXT: seta %al 148; AVX512F-32-NEXT: kmovw %eax, %k1 149; AVX512F-32-NEXT: kshiftlw $15, %k1, %k1 150; AVX512F-32-NEXT: kshiftrw $14, %k1, %k1 151; AVX512F-32-NEXT: korw %k1, %k0, %k1 152; AVX512F-32-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} 153; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 154; AVX512F-32-NEXT: movl %ebp, %esp 155; AVX512F-32-NEXT: popl %ebp 156; AVX512F-32-NEXT: vzeroupper 157; AVX512F-32-NEXT: retl 158; 159; AVX512F-64-LABEL: test_v2f32_ogt_s: 160; AVX512F-64: # %bb.0: 161; AVX512F-64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 162; AVX512F-64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 163; AVX512F-64-NEXT: vcomiss %xmm3, %xmm2 164; AVX512F-64-NEXT: seta %al 165; AVX512F-64-NEXT: andl $1, %eax 166; AVX512F-64-NEXT: kmovw %eax, %k0 167; AVX512F-64-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] 168; AVX512F-64-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] 169; AVX512F-64-NEXT: vcomiss %xmm3, %xmm2 170; AVX512F-64-NEXT: seta %al 171; AVX512F-64-NEXT: kmovw %eax, %k1 172; AVX512F-64-NEXT: kshiftlw $15, %k1, %k1 173; AVX512F-64-NEXT: kshiftrw $14, %k1, %k1 174; AVX512F-64-NEXT: korw %k1, %k0, %k1 175; AVX512F-64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} 176; AVX512F-64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 177; AVX512F-64-NEXT: vzeroupper 178; AVX512F-64-NEXT: retq 179 %cond = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32( 180 <2 x float> %f1, <2 x float> %f2, metadata !"ogt", 181 metadata !"fpexcept.strict") #0 182 %res = select <2 x i1> %cond, <2 x i32> %a, <2 x i32> %b 183 ret <2 x i32> %res 184} 185 186define <2 x i32> @test_v2f32_oeq_q(<2 x i32> %a, <2 x i32> %b, <2 x float> %f1, <2 x float> %f2) #0 { 187; SSE-32-LABEL: test_v2f32_oeq_q: 188; SSE-32: # %bb.0: 189; SSE-32-NEXT: pushl %ebp 190; SSE-32-NEXT: movl %esp, %ebp 191; SSE-32-NEXT: andl $-16, %esp 192; SSE-32-NEXT: subl $16, %esp 193; SSE-32-NEXT: movaps 8(%ebp), %xmm4 194; SSE-32-NEXT: xorl %eax, %eax 195; SSE-32-NEXT: ucomiss %xmm4, %xmm2 196; SSE-32-NEXT: movl $-1, %ecx 197; SSE-32-NEXT: movl $-1, %edx 198; SSE-32-NEXT: cmovnel %eax, %edx 199; SSE-32-NEXT: cmovpl %eax, %edx 200; SSE-32-NEXT: movd %edx, %xmm3 201; SSE-32-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,1,1,1] 202; SSE-32-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,1,1] 203; SSE-32-NEXT: ucomiss %xmm4, %xmm2 204; SSE-32-NEXT: cmovnel %eax, %ecx 205; SSE-32-NEXT: cmovpl %eax, %ecx 206; SSE-32-NEXT: movd %ecx, %xmm2 207; SSE-32-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1] 208; SSE-32-NEXT: pand %xmm3, %xmm0 209; SSE-32-NEXT: pandn %xmm1, %xmm3 210; SSE-32-NEXT: por %xmm3, %xmm0 211; SSE-32-NEXT: movl %ebp, %esp 212; SSE-32-NEXT: popl %ebp 213; SSE-32-NEXT: retl 214; 215; SSE-64-LABEL: test_v2f32_oeq_q: 216; SSE-64: # %bb.0: 217; SSE-64-NEXT: xorl %eax, %eax 218; SSE-64-NEXT: ucomiss %xmm3, %xmm2 219; SSE-64-NEXT: movl $-1, %ecx 220; SSE-64-NEXT: movl $-1, %edx 221; SSE-64-NEXT: cmovnel %eax, %edx 222; SSE-64-NEXT: cmovpl %eax, %edx 223; SSE-64-NEXT: movd %edx, %xmm4 224; SSE-64-NEXT: shufps {{.*#+}} xmm3 = xmm3[1,1,1,1] 225; SSE-64-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,1,1,1] 226; SSE-64-NEXT: ucomiss %xmm3, %xmm2 227; SSE-64-NEXT: cmovnel %eax, %ecx 228; SSE-64-NEXT: cmovpl %eax, %ecx 229; SSE-64-NEXT: movd %ecx, %xmm2 230; SSE-64-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1] 231; SSE-64-NEXT: pand %xmm4, %xmm0 232; SSE-64-NEXT: pandn %xmm1, %xmm4 233; SSE-64-NEXT: por %xmm4, %xmm0 234; SSE-64-NEXT: retq 235; 236; AVX-32-LABEL: test_v2f32_oeq_q: 237; AVX-32: # %bb.0: 238; AVX-32-NEXT: pushl %ebp 239; AVX-32-NEXT: movl %esp, %ebp 240; AVX-32-NEXT: andl $-16, %esp 241; AVX-32-NEXT: subl $16, %esp 242; AVX-32-NEXT: vmovshdup {{.*#+}} xmm3 = xmm2[1,1,3,3] 243; AVX-32-NEXT: xorl %eax, %eax 244; AVX-32-NEXT: vucomiss 12(%ebp), %xmm3 245; AVX-32-NEXT: movl $-1, %ecx 246; AVX-32-NEXT: movl $-1, %edx 247; AVX-32-NEXT: cmovnel %eax, %edx 248; AVX-32-NEXT: cmovpl %eax, %edx 249; AVX-32-NEXT: vucomiss 8(%ebp), %xmm2 250; AVX-32-NEXT: cmovnel %eax, %ecx 251; AVX-32-NEXT: cmovpl %eax, %ecx 252; AVX-32-NEXT: vmovd %ecx, %xmm2 253; AVX-32-NEXT: vpinsrd $1, %edx, %xmm2, %xmm2 254; AVX-32-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 255; AVX-32-NEXT: movl %ebp, %esp 256; AVX-32-NEXT: popl %ebp 257; AVX-32-NEXT: retl 258; 259; AVX-64-LABEL: test_v2f32_oeq_q: 260; AVX-64: # %bb.0: 261; AVX-64-NEXT: vmovshdup {{.*#+}} xmm4 = xmm3[1,1,3,3] 262; AVX-64-NEXT: vmovshdup {{.*#+}} xmm5 = xmm2[1,1,3,3] 263; AVX-64-NEXT: xorl %eax, %eax 264; AVX-64-NEXT: vucomiss %xmm4, %xmm5 265; AVX-64-NEXT: movl $-1, %ecx 266; AVX-64-NEXT: movl $-1, %edx 267; AVX-64-NEXT: cmovnel %eax, %edx 268; AVX-64-NEXT: cmovpl %eax, %edx 269; AVX-64-NEXT: vucomiss %xmm3, %xmm2 270; AVX-64-NEXT: cmovnel %eax, %ecx 271; AVX-64-NEXT: cmovpl %eax, %ecx 272; AVX-64-NEXT: vmovd %ecx, %xmm2 273; AVX-64-NEXT: vpinsrd $1, %edx, %xmm2, %xmm2 274; AVX-64-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 275; AVX-64-NEXT: retq 276; 277; AVX512-32-LABEL: test_v2f32_oeq_q: 278; AVX512-32: # %bb.0: 279; AVX512-32-NEXT: pushl %ebp 280; AVX512-32-NEXT: movl %esp, %ebp 281; AVX512-32-NEXT: andl $-16, %esp 282; AVX512-32-NEXT: subl $16, %esp 283; AVX512-32-NEXT: vucomiss 8(%ebp), %xmm2 284; AVX512-32-NEXT: setnp %al 285; AVX512-32-NEXT: sete %cl 286; AVX512-32-NEXT: testb %al, %cl 287; AVX512-32-NEXT: setne %al 288; AVX512-32-NEXT: andl $1, %eax 289; AVX512-32-NEXT: kmovw %eax, %k0 290; AVX512-32-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] 291; AVX512-32-NEXT: vucomiss 12(%ebp), %xmm2 292; AVX512-32-NEXT: setnp %al 293; AVX512-32-NEXT: sete %cl 294; AVX512-32-NEXT: testb %al, %cl 295; AVX512-32-NEXT: setne %al 296; AVX512-32-NEXT: kmovw %eax, %k1 297; AVX512-32-NEXT: kshiftlw $15, %k1, %k1 298; AVX512-32-NEXT: kshiftrw $14, %k1, %k1 299; AVX512-32-NEXT: korw %k1, %k0, %k1 300; AVX512-32-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} 301; AVX512-32-NEXT: movl %ebp, %esp 302; AVX512-32-NEXT: popl %ebp 303; AVX512-32-NEXT: retl 304; 305; AVX512-64-LABEL: test_v2f32_oeq_q: 306; AVX512-64: # %bb.0: 307; AVX512-64-NEXT: vucomiss %xmm3, %xmm2 308; AVX512-64-NEXT: setnp %al 309; AVX512-64-NEXT: sete %cl 310; AVX512-64-NEXT: testb %al, %cl 311; AVX512-64-NEXT: setne %al 312; AVX512-64-NEXT: andl $1, %eax 313; AVX512-64-NEXT: kmovw %eax, %k0 314; AVX512-64-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] 315; AVX512-64-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] 316; AVX512-64-NEXT: vucomiss %xmm3, %xmm2 317; AVX512-64-NEXT: setnp %al 318; AVX512-64-NEXT: sete %cl 319; AVX512-64-NEXT: testb %al, %cl 320; AVX512-64-NEXT: setne %al 321; AVX512-64-NEXT: kmovw %eax, %k1 322; AVX512-64-NEXT: kshiftlw $15, %k1, %k1 323; AVX512-64-NEXT: kshiftrw $14, %k1, %k1 324; AVX512-64-NEXT: korw %k1, %k0, %k1 325; AVX512-64-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} 326; AVX512-64-NEXT: retq 327; 328; AVX512F-32-LABEL: test_v2f32_oeq_q: 329; AVX512F-32: # %bb.0: 330; AVX512F-32-NEXT: pushl %ebp 331; AVX512F-32-NEXT: movl %esp, %ebp 332; AVX512F-32-NEXT: andl $-16, %esp 333; AVX512F-32-NEXT: subl $16, %esp 334; AVX512F-32-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 335; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 336; AVX512F-32-NEXT: vucomiss 8(%ebp), %xmm2 337; AVX512F-32-NEXT: setnp %al 338; AVX512F-32-NEXT: sete %cl 339; AVX512F-32-NEXT: testb %al, %cl 340; AVX512F-32-NEXT: setne %al 341; AVX512F-32-NEXT: andl $1, %eax 342; AVX512F-32-NEXT: kmovw %eax, %k0 343; AVX512F-32-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] 344; AVX512F-32-NEXT: vucomiss 12(%ebp), %xmm2 345; AVX512F-32-NEXT: setnp %al 346; AVX512F-32-NEXT: sete %cl 347; AVX512F-32-NEXT: testb %al, %cl 348; AVX512F-32-NEXT: setne %al 349; AVX512F-32-NEXT: kmovw %eax, %k1 350; AVX512F-32-NEXT: kshiftlw $15, %k1, %k1 351; AVX512F-32-NEXT: kshiftrw $14, %k1, %k1 352; AVX512F-32-NEXT: korw %k1, %k0, %k1 353; AVX512F-32-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} 354; AVX512F-32-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 355; AVX512F-32-NEXT: movl %ebp, %esp 356; AVX512F-32-NEXT: popl %ebp 357; AVX512F-32-NEXT: vzeroupper 358; AVX512F-32-NEXT: retl 359; 360; AVX512F-64-LABEL: test_v2f32_oeq_q: 361; AVX512F-64: # %bb.0: 362; AVX512F-64-NEXT: # kill: def $xmm1 killed $xmm1 def $zmm1 363; AVX512F-64-NEXT: # kill: def $xmm0 killed $xmm0 def $zmm0 364; AVX512F-64-NEXT: vucomiss %xmm3, %xmm2 365; AVX512F-64-NEXT: setnp %al 366; AVX512F-64-NEXT: sete %cl 367; AVX512F-64-NEXT: testb %al, %cl 368; AVX512F-64-NEXT: setne %al 369; AVX512F-64-NEXT: andl $1, %eax 370; AVX512F-64-NEXT: kmovw %eax, %k0 371; AVX512F-64-NEXT: vmovshdup {{.*#+}} xmm3 = xmm3[1,1,3,3] 372; AVX512F-64-NEXT: vmovshdup {{.*#+}} xmm2 = xmm2[1,1,3,3] 373; AVX512F-64-NEXT: vucomiss %xmm3, %xmm2 374; AVX512F-64-NEXT: setnp %al 375; AVX512F-64-NEXT: sete %cl 376; AVX512F-64-NEXT: testb %al, %cl 377; AVX512F-64-NEXT: setne %al 378; AVX512F-64-NEXT: kmovw %eax, %k1 379; AVX512F-64-NEXT: kshiftlw $15, %k1, %k1 380; AVX512F-64-NEXT: kshiftrw $14, %k1, %k1 381; AVX512F-64-NEXT: korw %k1, %k0, %k1 382; AVX512F-64-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} 383; AVX512F-64-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 384; AVX512F-64-NEXT: vzeroupper 385; AVX512F-64-NEXT: retq 386 %cond = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f32( 387 <2 x float> %f1, <2 x float> %f2, metadata !"oeq", 388 metadata !"fpexcept.strict") #0 389 %res = select <2 x i1> %cond, <2 x i32> %a, <2 x i32> %b 390 ret <2 x i32> %res 391} 392 393attributes #0 = { strictfp nounwind } 394 395declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f32(<2 x float>, <2 x float>, metadata, metadata) 396declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float>, <2 x float>, metadata, metadata) 397