1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_mem_shuffle --version 4 2; RUN: llc -mtriple=i686-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s --check-prefix=X86 3; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 < %s | FileCheck %s --check-prefix=X64 4 5; Test for case where insertps folds the load of an insertion element from a constant pool. 6 7define <4 x float> @fold_from_constantpool(<4 x float> %a) { 8; X86-LABEL: fold_from_constantpool: 9; X86: # %bb.0: 10; X86-NEXT: insertps $0, {{\.?LCPI[0-9]+_[0-9]+}}+4, %xmm0 # xmm0 = mem[0],xmm0[1,2,3] 11; X86-NEXT: retl 12; 13; X64-LABEL: fold_from_constantpool: 14; X64: # %bb.0: 15; X64-NEXT: insertps $0, {{\.?LCPI[0-9]+_[0-9]+}}+4(%rip), %xmm0 # xmm0 = mem[0],xmm0[1,2,3] 16; X64-NEXT: retq 17 %1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a, <4 x float> <float 0.0, float 1.0, float 0.0, float 0.0>, i8 64) 18 ret <4 x float> %1 19} 20 21declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone 22