Lines Matching full:align

25 ; CHECK-NEXT:    [[TMP2:%.*]] = alloca [[TMP0:%.*]], align 16
26 ; CHECK-NEXT: [[MEMTMP:%.*]] = alloca [[TMP0]], align 16
29 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[AGG_RESULT:%.*]], ptr align 16 [[TMP2]], i32 32, i1 false)
34 %memtmp = alloca %0, align 16
37 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %tmp2, ptr align 16 %memtmp, i32 32, i1 false)
38 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %agg.result, ptr align 16 %tmp2, i32 32, i1 false)
49 ; CHECK-NEXT: call void @llvm.memmove.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P:%.*]], i32 32, i1 false)
52 %memtmp = alloca %0, align 16
53 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
54 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
63 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 @C, i32 32, i1 false)
66 %memtmp = alloca %0, align 16
67 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 @C, i32 32, i1 false)
68 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
77 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P:%.*]], i32 32, i1 false)
80 %memtmp = alloca %0, align 16
81 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
82 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
91 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P:%.*]], i32 32, i1 false)
94 %memtmp = alloca %0, align 16
95 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
96 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
105 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P:%.*]], i32 32, i1 false)
108 %memtmp = alloca %0, align 16
109 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
110 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
118 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P:%.*]], i32 32, i1 false)
121 %memtmp = alloca %0, align 16
122 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
123 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
131 ; CHECK-NEXT: [[TMP:%.*]] = alloca [16 x i8], align 1
132 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 [[TMP]], ptr align 1 [[SRC:%.*]], i32 16, i1 false)
133 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 [[DEST:%.*]], ptr align 1 [[TMP]], i32 16, i1 false)
136 %tmp = alloca [16 x i8], align 1
137 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 %tmp, ptr align 1 %src, i32 16, i1 false)
138 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 %dest, ptr align 1 %tmp, i32 16, i1 false)
148 %tmp = alloca [16 x i8], align 1
151 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 %tmp, ptr align 1 %src, i32 16, i1 false)
152 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 %dest, ptr align 1 %tmp, i32 16, i1 false)
159 ; CHECK-NEXT: [[TMP:%.*]] = alloca [16 x i8], align 1
162 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 [[TMP]], ptr align 1 [[SRC]], i32 16, i1 false)
163 ; CHECK-NEXT: call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 [[DEST]], ptr align 1 [[TMP]], i32 16, i1 false)
166 %tmp = alloca [16 x i8], align 1
169 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 %tmp, ptr align 1 %src, i32 16, i1 false)
170 call void @llvm.memcpy.inline.p0.p0.i32(ptr align 1 %dest, ptr align 1 %tmp, i32 16, i1 false)
178 ; CHECK-NEXT: [[X_0:%.*]] = alloca [[TMP0:%.*]], align 16
179 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[AGG_RESULT:%.*]], ptr align 16 @x, i32 32, i1 false)
183 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %x.0, ptr align 16 @x, i32 32, i1 false)
184 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %agg.result, ptr align 16 %x.0, i32 32, i1 false)
192 ; CHECK-NEXT: call void @test4a(ptr byval(i8) align 1 [[P:%.*]])
196 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 %P, i64 8, i1 false)
197 call void @test4a(ptr align 1 byval(i8) %A)
204 ; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
205 ; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[A1]], ptr addrspace(1) align 4 [[P:%.*]], i64 8, i1 false)
206 ; CHECK-NEXT: call void @test4a(ptr byval(i8) align 1 [[A1]])
210 call void @llvm.memcpy.p0.p1.i64(ptr align 4 %a1, ptr addrspace(1) align 4 %P, i64 8, i1 false)
211 call void @test4a(ptr align 1 byval(i8) %a1)
217 ; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
218 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[A1]], ptr align 4 [[P:%.*]], i64 8, i1 false)
219 ; CHECK-NEXT: store i8 0, ptr [[A1]], align 1
220 ; CHECK-NEXT: call void @test4a(ptr byval(i8) align 1 [[A1]])
224 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a1, ptr align 4 %P, i64 8, i1 false)
226 call void @test4a(ptr align 1 byval(i8) %a1)
232 ; CHECK-NEXT: [[A1:%.*]] = alloca [[TMP1:%.*]], align 8
233 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[A1]], ptr align 4 [[P:%.*]], i64 8, i1 false)
234 ; CHECK-NEXT: [[X:%.*]] = load i8, ptr [[A1]], align 1
235 ; CHECK-NEXT: call void @test4a(ptr byval(i8) align 1 [[P]])
239 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a1, ptr align 4 %P, i64 8, i1 false)
241 call void @test4a(ptr align 1 byval(i8) %a1)
249 ; CHECK-NEXT: call void @test4a(ptr byval(i8) align 1 [[P:%.*]])
255 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %a1, ptr align 4 %P, i64 8, i1 false)
259 call void @test4a(ptr align 1 byval(i8) %a1)
266 declare void @test4a(ptr align 1 byval(i8))
270 @sS = external global %struct.S, align 16
272 declare void @test5a(ptr align 16 byval(%struct.S)) nounwind ssp
279 ; CHECK-NEXT: [[Y:%.*]] = alloca [[STRUCT_S:%.*]], align 16
280 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[Y]], ptr align 16 @sS, i64 32, i1 false)
282 ; CHECK-NEXT: store i8 4, ptr [[A]], align 1
283 ; CHECK-NEXT: call void @test5a(ptr byval([[STRUCT_S]]) align 16 [[Y]])
287 %y = alloca %struct.S, align 16
288 call void @llvm.memcpy.p0.p0.i64(ptr align 16 %y, ptr align 16 @sS, i64 32, i1 false)
291 call void @test5a(ptr align 16 byval(%struct.S) %y)
300 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %P, ptr align 4 %P, i64 8, i1 false)
309 define i32 @test7(ptr nocapture align 8 byval(%struct.p) %q) nounwind ssp {
312 ; CHECK-NEXT: [[CALL:%.*]] = call i32 @g(ptr byval([[STRUCT_P:%.*]]) align 8 [[Q:%.*]]) #[[ATTR2]]
316 %agg.tmp = alloca %struct.p, align 4
317 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %agg.tmp, ptr align 4 %q, i64 48, i1 false)
318 %call = call i32 @g(ptr align 8 byval(%struct.p) %agg.tmp) nounwind
322 declare i32 @g(ptr align 8 byval(%struct.p))
350 ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_BIG:%.*]], align 4
351 ; CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_BIG]], align 4
359 %b = alloca %struct.big, align 4
360 %tmp = alloca %struct.big, align 4
364 call void @llvm.memcpy.p1.p1.i64(ptr addrspace(1) align 4 %0, ptr addrspace(1) align 4 %1, i64 200, i1 false)
372 ; CHECK-NEXT: [[B:%.*]] = alloca [[STRUCT_BIG:%.*]], align 4
373 ; CHECK-NEXT: [[TMP:%.*]] = alloca [[STRUCT_BIG]], align 4
379 %b = alloca %struct.big, align 4
380 %tmp = alloca %struct.big, align 4
382 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %b, ptr align 4 %tmp, i64 200, i1 false)
396 ; CHECK-NEXT: [[A:%.*]] = alloca i32, align 4
397 ; CHECK-NEXT: store i32 [[Y:%.*]], ptr [[A]], align 4
399 ; CHECK-NEXT: [[C:%.*]] = load i32, ptr [[A]], align 4
400 ; CHECK-NEXT: store i32 [[C]], ptr [[X:%.*]], align 4
403 %a = alloca i32, align 4
414 ; CHECK-NEXT: call void @llvm.memset.p1.i64(ptr addrspace(1) align 4 [[P:%.*]], i8 0, i64 80, i1 false)
417 %A = alloca [20 x i32], align 4
418 call void @llvm.memset.p0.i64(ptr align 4 %A, i8 0, i64 80, i1 false)
419 call void @llvm.memcpy.p1.p0.i64(ptr addrspace(1) align 4 %P, ptr align 4 %A, i64 80, i1 false)
430 define void @immut_param(ptr align 4 noalias %val) {
432 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) [[VAL:%.*]])
435 %val1 = alloca i8, align 4
436 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
437 call void @f(ptr align 4 nocapture noalias readonly %val1)
442 define void @immut_param_maycapture(ptr align 4 noalias %val) {
444 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
445 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 1, i1 false)
446 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 [[VAL1]])
449 %val1 = alloca i8, align 4
450 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
451 call void @f(ptr align 4 noalias readonly %val1)
456 define void @immut_param_mayalias(ptr align 4 noalias %val) {
458 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
460 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 1, i1 false)
461 ; CHECK-NEXT: call void @f(ptr readonly align 4 captures(none) [[VAL1]])
464 %val1 = alloca i8, align 4
466 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
467 call void @f(ptr align 4 nocapture readonly %val1)
473 define void @immut_param_unescaped_alloca(ptr align 4 noalias %val) {
475 ; CHECK-NEXT: call void @f(ptr readonly align 4 captures(none) [[VAL:%.*]])
478 %val1 = alloca i8, align 4
479 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
480 call void @f(ptr align 4 nocapture readonly %val1)
486 define void @immut_param_memory_argmem_read(ptr align 4 noalias %val) {
488 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
490 ; CHECK-NEXT: call void @f(ptr readonly align 4 captures(none) [[VAL:%.*]]) #[[ATTR6:[0-9]+]]
493 %val1 = alloca i8, align 4
495 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
496 call void @f(ptr align 4 nocapture readonly %val1) memory(argmem: read)
503 define void @immut_param_memory_argmem_read_no_readonly(ptr align 4 noalias %val) {
505 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
507 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 1, i1 false)
508 ; CHECK-NEXT: call void @f(ptr align 4 captures(none) [[VAL1]]) #[[ATTR6]]
511 %val1 = alloca i8, align 4
513 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
514 call void @f(ptr align 4 nocapture %val1) memory(argmem: read)
519 define void @immut_param_maywrite(ptr align 4 noalias %val) {
521 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
522 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 1, i1 false)
523 ; CHECK-NEXT: call void @f(ptr noalias align 4 captures(none) [[VAL1]])
526 %val1 = alloca i8, align 4
527 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
528 call void @f(ptr align 4 nocapture noalias %val1)
532 define void @immut_param_readonly(ptr align 4 noalias %val) {
534 ; CHECK-NEXT: call void @f_full_readonly(ptr align 4 [[VAL:%.*]])
537 %val1 = alloca i8, align 4
538 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
539 call void @f_full_readonly(ptr align 4 %val1)
543 define void @immut_param_no_align(ptr align 4 noalias %val) {
548 %val1 = alloca i8, align 4
549 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
556 define void @immut_param_global(ptr align 4 noalias %val) {
558 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 @gp, ptr align 4 [[VAL:%.*]], i64 1, i1 false)
559 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) @gp)
562 call void @llvm.memcpy.p0.p0.i64(ptr align 4 @gp, ptr align 4 %val, i64 1, i1 false)
563 call void @f(ptr nocapture align 4 noalias readonly @gp)
568 define void @immut_param_vla(ptr align 4 noalias %val, i64 %n) {
570 ; CHECK-NEXT: [[VAL1:%.*]] = alloca ptr, i64 [[N:%.*]], align 4
571 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 1, i1 false)
572 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) [[VAL1]])
576 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
577 call void @f(ptr nocapture align 4 noalias readonly %val1)
582 define void @immut_param_scalable_vector(ptr align 4 noalias %val) {
584 ; CHECK-NEXT: [[VAL1:%.*]] = alloca <vscale x 2 x i32>, align 8
585 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 2, i1 false)
586 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) [[VAL1]])
590 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 2, i1 false)
591 call void @f(ptr nocapture align 4 noalias readonly %val1)
596 define void @immut_param_modified_dst(ptr align 4 noalias %val) {
598 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
599 ; CHECK-NEXT: store i32 13, ptr [[VAL1]], align 4
600 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) [[VAL1]])
603 %val1 = alloca i8, align 4
604 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
606 call void @f(ptr nocapture align 4 noalias readonly %val1)
611 define void @immut_param_modified_src(ptr align 4 noalias %val) {
613 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
614 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 1, i1 false)
615 ; CHECK-NEXT: store i32 13, ptr [[VAL]], align 4
616 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) [[VAL1]])
619 %val1 = alloca i8, align 4
620 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
622 call void @f(ptr nocapture align 4 noalias readonly %val1)
627 define void @immut_param_volatile(ptr align 4 noalias %val) {
629 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
630 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 1, i1 true)
631 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) [[VAL1]])
634 %val1 = alloca i8, align 4
635 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 true)
636 call void @f(ptr nocapture align 4 noalias readonly %val1)
641 define void @immut_param_different_addrespace(ptr addrspace(1) align 4 noalias %val) {
643 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
644 ; CHECK-NEXT: call void @llvm.memcpy.p0.p1.i64(ptr align 4 [[VAL1]], ptr addrspace(1) align 4 [[VAL:%.*]], i64 1, i1 false)
645 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) [[VAL1]])
648 %val1 = alloca i8, align 4
649 call void @llvm.memcpy.p0.p1.i64(ptr align 4 %val1, ptr addrspace(1) align 4 %val, i64 1, i1 false)
650 call void @f(ptr nocapture align 4 noalias readonly %val1)
654 define void @immut_param_bigger_align(ptr align 16 noalias %val) {
659 %val1 = alloca i8, align 4
660 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr %val, i64 1, i1 false)
666 define void @immut_param_smaller_align(ptr align 4 noalias %val) {
668 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 16
669 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 16 [[VAL1]], ptr [[VAL:%.*]], i64 1, i1 false)
673 %val1 = alloca i8, align 16
674 call void @llvm.memcpy.p0.p0.i64(ptr align 16 %val1, ptr %val, i64 1, i1 false)
681 ; CHECK-NEXT: [[VAL:%.*]] = alloca i8, align 4
682 ; CHECK-NEXT: store i32 42, ptr [[VAL]], align 4
686 %val = alloca i8, align 1
688 %val1 = alloca i8, align 4
698 ; CHECK-NEXT: [[VAL1:%.*]] = alloca [4 x i8], align 4
699 ; CHECK-NEXT: [[VAL2:%.*]] = alloca [16 x i8], align 16
701 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL3]], ptr align 4 [[VAL:%.*]], i64 4, i1 false)
705 %val1 = alloca [4 x i8], align 4
706 %val2 = alloca [16 x i8], align 16
708 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val3, ptr align 4 %val, i64 4, i1 false)
716 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
717 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL:%.*]], i64 1, i1 false)
721 %val1 = alloca i8, align 4
722 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
729 ; CHECK-NEXT: [[VAL:%.*]] = alloca i8, align 4
730 ; CHECK-NEXT: store i32 42, ptr [[VAL]], align 4
734 %val = alloca i8, align 4
736 %val1 = alloca i8, align 4
737 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
745 ; CHECK-NEXT: [[VAL:%.*]] = alloca i8, align 4
746 ; CHECK-NEXT: store i32 42, ptr [[VAL]], align 4
747 ; CHECK-NEXT: [[VAL1:%.*]] = alloca i8, align 4
748 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL1]], ptr align 4 [[VAL]], i64 1, i1 false)
752 %val = alloca i8, align 4
754 %val1 = alloca i8, align 4
755 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
762 define void @immut_valid_align_branched(i1 %c, ptr noalias align 4 %val) {
764 ; CHECK-NEXT: [[VAL1:%.*]] = alloca [4 x i8], align 4
765 ; CHECK-NEXT: [[VAL2:%.*]] = alloca [16 x i8], align 4
767 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL3]], ptr align 4 [[VAL:%.*]], i64 4, i1 false)
771 %val1 = alloca [4 x i8], align 4
772 %val2 = alloca [16 x i8], align 4
774 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val3, ptr align 4 %val, i64 4, i1 false)
780 define void @immut_param_noalias_metadata(ptr align 4 byval(i32) %ptr) {
782 ; CHECK-NEXT: store i32 1, ptr [[PTR:%.*]], align 4, !noalias [[META0:![0-9]+]]
786 %tmp = alloca i32, align 4
788 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 %ptr, i64 4, i1 false)
793 define void @byval_param_noalias_metadata(ptr align 4 byval(i32) %ptr) {
795 ; CHECK-NEXT: store i32 1, ptr [[PTR:%.*]], align 4, !noalias [[META0]]
796 ; CHECK-NEXT: call void @f_byval(ptr byval(i32) align 4 [[PTR]])
799 %tmp = alloca i32, align 4
801 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 %ptr, i64 4, i1 false)
802 call void @f_byval(ptr align 4 byval(i32) %tmp), !alias.scope !2
806 define void @byval_param_profile_metadata(ptr align 4 byval(i32) %ptr) {
808 ; CHECK-NEXT: store i32 1, ptr [[PTR2:%.*]], align 4
809 ; CHECK-NEXT: call void @f_byval(ptr byval(i32) align 4 [[PTR2]]), !prof [[PROF3:![0-9]+]], !memprof [[META4:![0-9]+]], !callsite [[META7:![0-9]+]]
812 %tmp = alloca i32, align 4
814 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %tmp, ptr align 4 %ptr, i64 4, i1 false)
815 call void @f_byval(ptr align 4 byval(i32) %tmp), !memprof !3, !callsite !6, !prof !7
833 ; CHECK-NEXT: [[MEMTMP:%.*]] = alloca [32 x i8], align 16
835 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[MEMTMP]], ptr align 16 [[P:%.*]], i32 32, i1 false)
837 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[MEMTMP]], i32 32, i1 false)
840 %memtmp = alloca [32 x i8], align 16
842 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
844 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
850 ; CHECK-NEXT: [[MEMTMP:%.*]] = alloca [32 x i8], align 16
851 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[MEMTMP]], ptr align 16 [[P:%.*]], i32 32, i1 false)
853 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P]], i32 32, i1 false)
857 %memtmp = alloca [32 x i8], align 16
858 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
860 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
868 ; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i32(ptr align 16 [[Q:%.*]], ptr align 16 [[P:%.*]], i32 32, i1 false)
872 %memtmp = alloca [32 x i8], align 16
873 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %memtmp, ptr align 16 %P, i32 32, i1 false)
875 call void @llvm.memcpy.p0.p0.i32(ptr align 16 %Q, ptr align 16 %memtmp, i32 32, i1 false)
883 ; CHECK-NEXT: call void @test4a(ptr byval(i8) align 1 [[P:%.*]])
888 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %A, ptr align 4 %P, i64 8, i1 false)
890 call void @test4a(ptr align 1 byval(i8) %A)
895 define void @memcpy_immut_escape_after(ptr align 4 noalias %val) {
898 ; CHECK-NEXT: call void @f(ptr noalias readonly align 4 captures(none) [[VAL:%.*]])
902 %val1 = alloca i8, align 4
903 call void @llvm.memcpy.p0.p0.i64(ptr align 4 %val1, ptr align 4 %val, i64 1, i1 false)
905 call void @f(ptr align 4 nocapture noalias readonly %val1)