1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_mem_shuffle 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=btver2 | FileCheck %s 3 4define <2 x i64> @test5(ptr %base, <2 x i64> %src0) { 5; CHECK-LABEL: test5: 6; CHECK: # %bb.0: 7; CHECK-NEXT: vpinsrq $1, (%rdi), %xmm0, %xmm0 8; CHECK-NEXT: retq 9 %res = call <2 x i64> @llvm.masked.expandload.v2i64(ptr %base, <2 x i1> <i1 false, i1 true>, <2 x i64> %src0) 10 ret <2 x i64>%res 11} 12declare <2 x i64> @llvm.masked.expandload.v2i64(ptr, <2 x i1>, <2 x i64>) 13 14define void @test11(ptr %base, <2 x i64> %V, <2 x i1> %mask) { 15; CHECK-LABEL: test11: 16; CHECK: # %bb.0: 17; CHECK-NEXT: vpsllq $63, %xmm1, %xmm1 18; CHECK-NEXT: vmovmskpd %xmm1, %eax 19; CHECK-NEXT: testb $1, %al 20; CHECK-NEXT: jne .LBB1_1 21; CHECK-NEXT: # %bb.2: # %else 22; CHECK-NEXT: testb $2, %al 23; CHECK-NEXT: jne .LBB1_3 24; CHECK-NEXT: .LBB1_4: # %else2 25; CHECK-NEXT: retq 26; CHECK-NEXT: .LBB1_1: # %cond.store 27; CHECK-NEXT: vmovq %xmm0, (%rdi) 28; CHECK-NEXT: addq $8, %rdi 29; CHECK-NEXT: testb $2, %al 30; CHECK-NEXT: je .LBB1_4 31; CHECK-NEXT: .LBB1_3: # %cond.store1 32; CHECK-NEXT: vpextrq $1, %xmm0, (%rdi) 33; CHECK-NEXT: retq 34 call void @llvm.masked.compressstore.v2i64(<2 x i64> %V, ptr %base, <2 x i1> %mask) 35 ret void 36} 37declare void @llvm.masked.compressstore.v2i64(<2 x i64>, ptr , <2 x i1>) 38