1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py 2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s 3; RUN: llc -O0 < %s -mtriple=x86_64-unknown-unknown -mattr=avx,slow-unaligned-mem-32 | FileCheck %s -check-prefix=CHECK_O0 4 5define void @test_256_load(ptr nocapture %d, ptr nocapture %f, ptr nocapture %i) nounwind { 6; CHECK-LABEL: test_256_load: 7; CHECK: # %bb.0: # %entry 8; CHECK-NEXT: pushq %r15 9; CHECK-NEXT: pushq %r14 10; CHECK-NEXT: pushq %rbx 11; CHECK-NEXT: subq $96, %rsp 12; CHECK-NEXT: movq %rdx, %rbx 13; CHECK-NEXT: movq %rsi, %r14 14; CHECK-NEXT: movq %rdi, %r15 15; CHECK-NEXT: vmovaps (%rdi), %ymm0 16; CHECK-NEXT: vmovups %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill 17; CHECK-NEXT: vmovaps (%rsi), %ymm1 18; CHECK-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill 19; CHECK-NEXT: vmovaps (%rdx), %ymm2 20; CHECK-NEXT: vmovups %ymm2, (%rsp) # 32-byte Spill 21; CHECK-NEXT: callq dummy@PLT 22; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload 23; CHECK-NEXT: vmovaps %ymm0, (%r15) 24; CHECK-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload 25; CHECK-NEXT: vmovaps %ymm0, (%r14) 26; CHECK-NEXT: vmovups (%rsp), %ymm0 # 32-byte Reload 27; CHECK-NEXT: vmovaps %ymm0, (%rbx) 28; CHECK-NEXT: addq $96, %rsp 29; CHECK-NEXT: popq %rbx 30; CHECK-NEXT: popq %r14 31; CHECK-NEXT: popq %r15 32; CHECK-NEXT: vzeroupper 33; CHECK-NEXT: retq 34; 35; CHECK_O0-LABEL: test_256_load: 36; CHECK_O0: # %bb.0: # %entry 37; CHECK_O0-NEXT: subq $184, %rsp 38; CHECK_O0-NEXT: movq %rdi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 39; CHECK_O0-NEXT: movq %rsi, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 40; CHECK_O0-NEXT: movq %rdx, {{[-0-9]+}}(%r{{[sb]}}p) # 8-byte Spill 41; CHECK_O0-NEXT: vmovapd (%rdi), %ymm0 42; CHECK_O0-NEXT: vmovups %ymm0, (%rsp) # 32-byte Spill 43; CHECK_O0-NEXT: vmovaps (%rsi), %ymm1 44; CHECK_O0-NEXT: vmovups %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill 45; CHECK_O0-NEXT: vmovdqa (%rdx), %ymm2 46; CHECK_O0-NEXT: vmovups %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill 47; CHECK_O0-NEXT: callq dummy@PLT 48; CHECK_O0-NEXT: vmovups (%rsp), %ymm2 # 32-byte Reload 49; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdi # 8-byte Reload 50; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Reload 51; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rsi # 8-byte Reload 52; CHECK_O0-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload 53; CHECK_O0-NEXT: movq {{[-0-9]+}}(%r{{[sb]}}p), %rdx # 8-byte Reload 54; CHECK_O0-NEXT: vmovapd %ymm2, (%rdi) 55; CHECK_O0-NEXT: vmovaps %ymm1, (%rsi) 56; CHECK_O0-NEXT: vmovdqa %ymm0, (%rdx) 57; CHECK_O0-NEXT: addq $184, %rsp 58; CHECK_O0-NEXT: vzeroupper 59; CHECK_O0-NEXT: retq 60entry: 61 %tmp1.i = load <4 x double>, ptr %d, align 32 62 %tmp1.i17 = load <8 x float>, ptr %f, align 32 63 %tmp1.i16 = load <4 x i64>, ptr %i, align 32 64 tail call void @dummy(<4 x double> %tmp1.i, <8 x float> %tmp1.i17, <4 x i64> %tmp1.i16) nounwind 65 store <4 x double> %tmp1.i, ptr %d, align 32 66 store <8 x float> %tmp1.i17, ptr %f, align 32 67 store <4 x i64> %tmp1.i16, ptr %i, align 32 68 ret void 69} 70 71declare void @dummy(<4 x double>, <8 x float>, <4 x i64>) 72 73;; 74;; The two tests below check that we must fold load + scalar_to_vector 75;; + ins_subvec+ zext into only a single vmovss or vmovsd or vinsertps from memory 76 77define <8 x float> @mov00(<8 x float> %v, ptr %ptr) nounwind { 78; CHECK-LABEL: mov00: 79; CHECK: # %bb.0: 80; CHECK-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 81; CHECK-NEXT: retq 82; 83; CHECK_O0-LABEL: mov00: 84; CHECK_O0: # %bb.0: 85; CHECK_O0-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero 86; CHECK_O0-NEXT: # kill: def $ymm0 killed $xmm0 87; CHECK_O0-NEXT: retq 88 %val = load float, ptr %ptr 89 %i0 = insertelement <8 x float> zeroinitializer, float %val, i32 0 90 ret <8 x float> %i0 91} 92 93define <4 x double> @mov01(<4 x double> %v, ptr %ptr) nounwind { 94; CHECK-LABEL: mov01: 95; CHECK: # %bb.0: 96; CHECK-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 97; CHECK-NEXT: retq 98; 99; CHECK_O0-LABEL: mov01: 100; CHECK_O0: # %bb.0: 101; CHECK_O0-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero 102; CHECK_O0-NEXT: # kill: def $ymm0 killed $xmm0 103; CHECK_O0-NEXT: retq 104 %val = load double, ptr %ptr 105 %i0 = insertelement <4 x double> zeroinitializer, double %val, i32 0 106 ret <4 x double> %i0 107} 108 109define void @storev16i16(<16 x i16> %a) nounwind { 110; CHECK-LABEL: storev16i16: 111; CHECK: # %bb.0: 112; CHECK-NEXT: vmovaps %ymm0, (%rax) 113; 114; CHECK_O0-LABEL: storev16i16: 115; CHECK_O0: # %bb.0: 116; CHECK_O0-NEXT: # implicit-def: $rax 117; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax) 118 store <16 x i16> %a, ptr undef, align 32 119 unreachable 120} 121 122define void @storev16i16_01(<16 x i16> %a) nounwind { 123; CHECK-LABEL: storev16i16_01: 124; CHECK: # %bb.0: 125; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax) 126; CHECK-NEXT: vmovups %xmm0, (%rax) 127; 128; CHECK_O0-LABEL: storev16i16_01: 129; CHECK_O0: # %bb.0: 130; CHECK_O0-NEXT: # implicit-def: $rax 131; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax) 132 store <16 x i16> %a, ptr undef, align 4 133 unreachable 134} 135 136define void @storev32i8(<32 x i8> %a) nounwind { 137; CHECK-LABEL: storev32i8: 138; CHECK: # %bb.0: 139; CHECK-NEXT: vmovaps %ymm0, (%rax) 140; 141; CHECK_O0-LABEL: storev32i8: 142; CHECK_O0: # %bb.0: 143; CHECK_O0-NEXT: # implicit-def: $rax 144; CHECK_O0-NEXT: vmovdqa %ymm0, (%rax) 145 store <32 x i8> %a, ptr undef, align 32 146 unreachable 147} 148 149define void @storev32i8_01(<32 x i8> %a) nounwind { 150; CHECK-LABEL: storev32i8_01: 151; CHECK: # %bb.0: 152; CHECK-NEXT: vextractf128 $1, %ymm0, (%rax) 153; CHECK-NEXT: vmovups %xmm0, (%rax) 154; 155; CHECK_O0-LABEL: storev32i8_01: 156; CHECK_O0: # %bb.0: 157; CHECK_O0-NEXT: # implicit-def: $rax 158; CHECK_O0-NEXT: vmovdqu %ymm0, (%rax) 159 store <32 x i8> %a, ptr undef, align 4 160 unreachable 161} 162 163; It is faster to make two saves, if the data is already in xmm registers. For 164; example, after making an integer operation. 165define void @double_save(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind ssp { 166; CHECK-LABEL: double_save: 167; CHECK: # %bb.0: 168; CHECK-NEXT: vmovaps %xmm1, 16(%rdi) 169; CHECK-NEXT: vmovaps %xmm0, (%rdi) 170; CHECK-NEXT: retq 171; 172; CHECK_O0-LABEL: double_save: 173; CHECK_O0: # %bb.0: 174; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 175; CHECK_O0-NEXT: # implicit-def: $ymm0 176; CHECK_O0-NEXT: vmovaps %xmm2, %xmm0 177; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 178; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) 179; CHECK_O0-NEXT: vzeroupper 180; CHECK_O0-NEXT: retq 181 %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 182 store <8 x i32> %Z, ptr %P, align 16 183 ret void 184} 185 186define void @double_save_volatile(<4 x i32> %A, <4 x i32> %B, ptr %P) nounwind { 187; CHECK-LABEL: double_save_volatile: 188; CHECK: # %bb.0: 189; CHECK-NEXT: # kill: def $xmm0 killed $xmm0 def $ymm0 190; CHECK-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 191; CHECK-NEXT: vmovups %ymm0, (%rdi) 192; CHECK-NEXT: vzeroupper 193; CHECK-NEXT: retq 194; 195; CHECK_O0-LABEL: double_save_volatile: 196; CHECK_O0: # %bb.0: 197; CHECK_O0-NEXT: vmovaps %xmm0, %xmm2 198; CHECK_O0-NEXT: # implicit-def: $ymm0 199; CHECK_O0-NEXT: vmovaps %xmm2, %xmm0 200; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 201; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) 202; CHECK_O0-NEXT: vzeroupper 203; CHECK_O0-NEXT: retq 204 %Z = shufflevector <4 x i32>%A, <4 x i32>%B, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> 205 store volatile <8 x i32> %Z, ptr %P, align 16 206 ret void 207} 208 209declare void @llvm.x86.avx.maskstore.ps.256(ptr, <8 x i32>, <8 x float>) nounwind 210 211define void @f_f() nounwind { 212; CHECK-LABEL: f_f: 213; CHECK: # %bb.0: # %allocas 214; CHECK-NEXT: xorl %eax, %eax 215; CHECK-NEXT: testb %al, %al 216; CHECK-NEXT: jne .LBB9_2 217; CHECK-NEXT: # %bb.1: # %cif_mask_all 218; CHECK-NEXT: .LBB9_2: # %cif_mask_mixed 219; CHECK-NEXT: xorl %eax, %eax 220; CHECK-NEXT: testb %al, %al 221; CHECK-NEXT: jne .LBB9_4 222; CHECK-NEXT: # %bb.3: # %cif_mixed_test_all 223; CHECK-NEXT: vmovss {{.*#+}} xmm0 = [4294967295,0,0,0] 224; CHECK-NEXT: vmaskmovps %ymm0, %ymm0, (%rax) 225; CHECK-NEXT: .LBB9_4: # %cif_mixed_test_any_check 226; 227; CHECK_O0-LABEL: f_f: 228; CHECK_O0: # %bb.0: # %allocas 229; CHECK_O0-NEXT: # implicit-def: $al 230; CHECK_O0-NEXT: testb $1, %al 231; CHECK_O0-NEXT: jne .LBB9_1 232; CHECK_O0-NEXT: jmp .LBB9_2 233; CHECK_O0-NEXT: .LBB9_1: # %cif_mask_all 234; CHECK_O0-NEXT: .LBB9_2: # %cif_mask_mixed 235; CHECK_O0-NEXT: # implicit-def: $al 236; CHECK_O0-NEXT: testb $1, %al 237; CHECK_O0-NEXT: jne .LBB9_3 238; CHECK_O0-NEXT: jmp .LBB9_4 239; CHECK_O0-NEXT: .LBB9_3: # %cif_mixed_test_all 240; CHECK_O0-NEXT: vmovdqa {{.*#+}} xmm0 = [4294967295,0,0,0] 241; CHECK_O0-NEXT: vmovdqa %xmm0, %xmm0 242; CHECK_O0-NEXT: # kill: def $ymm0 killed $xmm0 243; CHECK_O0-NEXT: # implicit-def: $rax 244; CHECK_O0-NEXT: # implicit-def: $ymm1 245; CHECK_O0-NEXT: vmaskmovps %ymm1, %ymm0, (%rax) 246; CHECK_O0-NEXT: .LBB9_4: # %cif_mixed_test_any_check 247allocas: 248 br i1 undef, label %cif_mask_all, label %cif_mask_mixed 249 250cif_mask_all: 251 unreachable 252 253cif_mask_mixed: 254 br i1 undef, label %cif_mixed_test_all, label %cif_mixed_test_any_check 255 256cif_mixed_test_all: 257 call void @llvm.x86.avx.maskstore.ps.256(ptr undef, <8 x i32> <i32 -1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, <8 x float> undef) nounwind 258 unreachable 259 260cif_mixed_test_any_check: 261 unreachable 262} 263 264define void @add8i32(ptr %ret, ptr %bp) nounwind { 265; CHECK-LABEL: add8i32: 266; CHECK: # %bb.0: 267; CHECK-NEXT: vmovups (%rsi), %xmm0 268; CHECK-NEXT: vmovups 16(%rsi), %xmm1 269; CHECK-NEXT: vmovups %xmm1, 16(%rdi) 270; CHECK-NEXT: vmovups %xmm0, (%rdi) 271; CHECK-NEXT: retq 272; 273; CHECK_O0-LABEL: add8i32: 274; CHECK_O0: # %bb.0: 275; CHECK_O0-NEXT: vmovdqu (%rsi), %xmm2 276; CHECK_O0-NEXT: vmovdqu 16(%rsi), %xmm1 277; CHECK_O0-NEXT: # implicit-def: $ymm0 278; CHECK_O0-NEXT: vmovaps %xmm2, %xmm0 279; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 280; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) 281; CHECK_O0-NEXT: vzeroupper 282; CHECK_O0-NEXT: retq 283 %b = load <8 x i32>, ptr %bp, align 1 284 %x = add <8 x i32> zeroinitializer, %b 285 store <8 x i32> %x, ptr %ret, align 1 286 ret void 287} 288 289define void @add4i64a64(ptr %ret, ptr %bp) nounwind { 290; CHECK-LABEL: add4i64a64: 291; CHECK: # %bb.0: 292; CHECK-NEXT: vmovaps (%rsi), %ymm0 293; CHECK-NEXT: vmovaps %ymm0, (%rdi) 294; CHECK-NEXT: vzeroupper 295; CHECK-NEXT: retq 296; 297; CHECK_O0-LABEL: add4i64a64: 298; CHECK_O0: # %bb.0: 299; CHECK_O0-NEXT: vmovaps (%rsi), %ymm0 300; CHECK_O0-NEXT: vmovdqa %ymm0, (%rdi) 301; CHECK_O0-NEXT: vzeroupper 302; CHECK_O0-NEXT: retq 303 %b = load <4 x i64>, ptr %bp, align 64 304 %x = add <4 x i64> zeroinitializer, %b 305 store <4 x i64> %x, ptr %ret, align 64 306 ret void 307} 308 309define void @add4i64a16(ptr %ret, ptr %bp) nounwind { 310; CHECK-LABEL: add4i64a16: 311; CHECK: # %bb.0: 312; CHECK-NEXT: vmovaps (%rsi), %xmm0 313; CHECK-NEXT: vmovaps 16(%rsi), %xmm1 314; CHECK-NEXT: vmovaps %xmm1, 16(%rdi) 315; CHECK-NEXT: vmovaps %xmm0, (%rdi) 316; CHECK-NEXT: retq 317; 318; CHECK_O0-LABEL: add4i64a16: 319; CHECK_O0: # %bb.0: 320; CHECK_O0-NEXT: vmovdqa (%rsi), %xmm2 321; CHECK_O0-NEXT: vmovdqa 16(%rsi), %xmm1 322; CHECK_O0-NEXT: # implicit-def: $ymm0 323; CHECK_O0-NEXT: vmovaps %xmm2, %xmm0 324; CHECK_O0-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 325; CHECK_O0-NEXT: vmovdqu %ymm0, (%rdi) 326; CHECK_O0-NEXT: vzeroupper 327; CHECK_O0-NEXT: retq 328 %b = load <4 x i64>, ptr %bp, align 16 329 %x = add <4 x i64> zeroinitializer, %b 330 store <4 x i64> %x, ptr %ret, align 16 331 ret void 332} 333 334; This used to crash. 335; v2i128 may not be a "simple" (MVT) type, but we can split that. 336; This example gets split further in legalization. 337 338define void @PR43916(<2 x i128> %y, ptr %z) { 339; CHECK-LABEL: PR43916: 340; CHECK: # %bb.0: 341; CHECK-NEXT: movq %rcx, 24(%r8) 342; CHECK-NEXT: movq %rdx, 16(%r8) 343; CHECK-NEXT: movq %rsi, 8(%r8) 344; CHECK-NEXT: movq %rdi, (%r8) 345; CHECK-NEXT: retq 346; 347; CHECK_O0-LABEL: PR43916: 348; CHECK_O0: # %bb.0: 349; CHECK_O0-NEXT: movq %rdi, (%r8) 350; CHECK_O0-NEXT: movq %rsi, 8(%r8) 351; CHECK_O0-NEXT: movq %rdx, 16(%r8) 352; CHECK_O0-NEXT: movq %rcx, 24(%r8) 353; CHECK_O0-NEXT: retq 354 store <2 x i128> %y, ptr %z, align 16 355 ret void 356} 357