1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=X86 3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=X64 4 5define void @vec3_setcc_crash(ptr %in, ptr %out) { 6; X86-LABEL: vec3_setcc_crash: 7; X86: # %bb.0: 8; X86-NEXT: movl {{[0-9]+}}(%esp), %eax 9; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx 10; X86-NEXT: vmovdqa (%ecx), %xmm0 11; X86-NEXT: vptestnmd %xmm0, %xmm0, %k1 12; X86-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} 13; X86-NEXT: vpextrd $2, %xmm0, 8(%eax) 14; X86-NEXT: vpextrd $1, %xmm0, 4(%eax) 15; X86-NEXT: vmovd %xmm0, (%eax) 16; X86-NEXT: retl 17; 18; X64-LABEL: vec3_setcc_crash: 19; X64: # %bb.0: 20; X64-NEXT: vmovdqa (%rdi), %xmm0 21; X64-NEXT: vptestnmd %xmm0, %xmm0, %k1 22; X64-NEXT: vmovdqa32 %xmm0, %xmm0 {%k1} {z} 23; X64-NEXT: vpextrd $2, %xmm0, 8(%rsi) 24; X64-NEXT: vmovq %xmm0, (%rsi) 25; X64-NEXT: retq 26 %a = load <3 x i32>, ptr %in 27 %cmp = icmp eq <3 x i32> %a, zeroinitializer 28 %c = select <3 x i1> %cmp, <3 x i32> %a, <3 x i32> zeroinitializer 29 store <3 x i32> %c, ptr %out 30 ret void 31} 32