1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -passes=instcombine -S < %s | FileCheck %s 3 4;; ---- memset ----- 5 6; Ensure 0-length memset is removed 7define void @test_memset_zero_length(ptr %dest) { 8; CHECK-LABEL: @test_memset_zero_length( 9; CHECK-NEXT: ret void 10; 11 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %dest, i8 1, i32 0, i32 1) 12 ret void 13} 14 15define void @test_memset_to_store(ptr %dest) { 16; CHECK-LABEL: @test_memset_to_store( 17; CHECK-NEXT: store atomic i8 1, ptr [[DEST:%.*]] unordered, align 1 18; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 1 [[DEST]], i8 1, i32 2, i32 1) 19; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 1 [[DEST]], i8 1, i32 4, i32 1) 20; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 1 [[DEST]], i8 1, i32 8, i32 1) 21; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 1 [[DEST]], i8 1, i32 16, i32 1) 22; CHECK-NEXT: ret void 23; 24 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %dest, i8 1, i32 1, i32 1) 25 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %dest, i8 1, i32 2, i32 1) 26 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %dest, i8 1, i32 4, i32 1) 27 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %dest, i8 1, i32 8, i32 1) 28 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 1 %dest, i8 1, i32 16, i32 1) 29 ret void 30} 31 32define void @test_memset_to_store_2(ptr %dest) { 33; CHECK-LABEL: @test_memset_to_store_2( 34; CHECK-NEXT: store atomic i8 1, ptr [[DEST:%.*]] unordered, align 2 35; CHECK-NEXT: store atomic i16 257, ptr [[DEST]] unordered, align 2 36; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 2 [[DEST]], i8 1, i32 4, i32 2) 37; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 2 [[DEST]], i8 1, i32 8, i32 2) 38; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 2 [[DEST]], i8 1, i32 16, i32 2) 39; CHECK-NEXT: ret void 40; 41 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 2 %dest, i8 1, i32 1, i32 1) 42 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 2 %dest, i8 1, i32 2, i32 2) 43 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 2 %dest, i8 1, i32 4, i32 2) 44 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 2 %dest, i8 1, i32 8, i32 2) 45 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 2 %dest, i8 1, i32 16, i32 2) 46 ret void 47} 48 49define void @test_memset_to_store_4(ptr %dest) { 50; CHECK-LABEL: @test_memset_to_store_4( 51; CHECK-NEXT: store atomic i8 1, ptr [[DEST:%.*]] unordered, align 4 52; CHECK-NEXT: store atomic i16 257, ptr [[DEST]] unordered, align 4 53; CHECK-NEXT: store atomic i32 16843009, ptr [[DEST]] unordered, align 4 54; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 4 [[DEST]], i8 1, i32 8, i32 4) 55; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 4 [[DEST]], i8 1, i32 16, i32 4) 56; CHECK-NEXT: ret void 57; 58 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %dest, i8 1, i32 1, i32 1) 59 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %dest, i8 1, i32 2, i32 2) 60 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %dest, i8 1, i32 4, i32 4) 61 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %dest, i8 1, i32 8, i32 4) 62 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 4 %dest, i8 1, i32 16, i32 4) 63 ret void 64} 65 66define void @test_memset_to_store_8(ptr %dest) { 67; CHECK-LABEL: @test_memset_to_store_8( 68; CHECK-NEXT: store atomic i8 1, ptr [[DEST:%.*]] unordered, align 8 69; CHECK-NEXT: store atomic i16 257, ptr [[DEST]] unordered, align 8 70; CHECK-NEXT: store atomic i32 16843009, ptr [[DEST]] unordered, align 8 71; CHECK-NEXT: store atomic i64 72340172838076673, ptr [[DEST]] unordered, align 8 72; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 8 [[DEST]], i8 1, i32 16, i32 8) 73; CHECK-NEXT: ret void 74; 75 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %dest, i8 1, i32 1, i32 1) 76 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %dest, i8 1, i32 2, i32 2) 77 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %dest, i8 1, i32 4, i32 4) 78 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %dest, i8 1, i32 8, i32 8) 79 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 8 %dest, i8 1, i32 16, i32 8) 80 ret void 81} 82 83define void @test_memset_to_store_16(ptr %dest) { 84; CHECK-LABEL: @test_memset_to_store_16( 85; CHECK-NEXT: store atomic i8 1, ptr [[DEST:%.*]] unordered, align 16 86; CHECK-NEXT: store atomic i16 257, ptr [[DEST]] unordered, align 16 87; CHECK-NEXT: store atomic i32 16843009, ptr [[DEST]] unordered, align 16 88; CHECK-NEXT: store atomic i64 72340172838076673, ptr [[DEST]] unordered, align 16 89; CHECK-NEXT: call void @llvm.memset.element.unordered.atomic.p0.i32(ptr nonnull align 16 [[DEST]], i8 1, i32 16, i32 16) 90; CHECK-NEXT: ret void 91; 92 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %dest, i8 1, i32 1, i32 1) 93 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %dest, i8 1, i32 2, i32 2) 94 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %dest, i8 1, i32 4, i32 4) 95 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %dest, i8 1, i32 8, i32 8) 96 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %dest, i8 1, i32 16, i32 16) 97 ret void 98} 99 100declare void @llvm.memset.element.unordered.atomic.p0.i32(ptr nocapture writeonly, i8, i32, i32) nounwind argmemonly 101 102 103;; ========================================= 104;; ----- memmove ------ 105 106 107@gconst = constant [32 x i8] c"0123456789012345678901234567890\00" 108; Check that a memmove from a global constant is converted into a memcpy 109define void @test_memmove_to_memcpy(ptr %dest) { 110; CHECK-LABEL: @test_memmove_to_memcpy( 111; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 [[DEST:%.*]], ptr nonnull align 16 @gconst, i32 32, i32 1) 112; CHECK-NEXT: ret void 113; 114 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 @gconst, i32 32, i32 1) 115 ret void 116} 117 118define void @test_memmove_zero_length(ptr %dest, ptr %src) { 119; CHECK-LABEL: @test_memmove_zero_length( 120; CHECK-NEXT: ret void 121; 122 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 0, i32 1) 123 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 0, i32 2) 124 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 0, i32 4) 125 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 0, i32 8) 126 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 0, i32 16) 127 ret void 128} 129 130; memmove with src==dest is removed 131define void @test_memmove_removed(ptr %srcdest, i32 %sz) { 132; CHECK-LABEL: @test_memmove_removed( 133; CHECK-NEXT: ret void 134; 135 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %srcdest, ptr align 1 %srcdest, i32 %sz, i32 1) 136 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 2 %srcdest, ptr align 2 %srcdest, i32 %sz, i32 2) 137 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %srcdest, ptr align 4 %srcdest, i32 %sz, i32 4) 138 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %srcdest, ptr align 8 %srcdest, i32 %sz, i32 8) 139 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %srcdest, ptr align 16 %srcdest, i32 %sz, i32 16) 140 ret void 141} 142 143; memmove with a small constant length is converted to a load/store pair 144define void @test_memmove_loadstore(ptr %dest, ptr %src) { 145; CHECK-LABEL: @test_memmove_loadstore( 146; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 1 147; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 1 148; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 1 [[DEST]], ptr nonnull align 1 [[SRC]], i32 2, i32 1) 149; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 1 [[DEST]], ptr nonnull align 1 [[SRC]], i32 4, i32 1) 150; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 1 [[DEST]], ptr nonnull align 1 [[SRC]], i32 8, i32 1) 151; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 1 [[DEST]], ptr nonnull align 1 [[SRC]], i32 16, i32 1) 152; CHECK-NEXT: ret void 153; 154 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 1, i32 1) 155 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 2, i32 1) 156 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 4, i32 1) 157 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 8, i32 1) 158 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 16, i32 1) 159 ret void 160} 161 162define void @test_memmove_loadstore_2(ptr %dest, ptr %src) { 163; CHECK-LABEL: @test_memmove_loadstore_2( 164; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 2 165; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 2 166; CHECK-NEXT: [[TMP2:%.*]] = load atomic i16, ptr [[SRC]] unordered, align 2 167; CHECK-NEXT: store atomic i16 [[TMP2]], ptr [[DEST]] unordered, align 2 168; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 2 [[DEST]], ptr nonnull align 2 [[SRC]], i32 4, i32 2) 169; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 2 [[DEST]], ptr nonnull align 2 [[SRC]], i32 8, i32 2) 170; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 2 [[DEST]], ptr nonnull align 2 [[SRC]], i32 16, i32 2) 171; CHECK-NEXT: ret void 172; 173 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 1, i32 1) 174 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 2, i32 2) 175 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 4, i32 2) 176 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 8, i32 2) 177 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 16, i32 2) 178 ret void 179} 180 181define void @test_memmove_loadstore_4(ptr %dest, ptr %src) { 182; CHECK-LABEL: @test_memmove_loadstore_4( 183; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 4 184; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 4 185; CHECK-NEXT: [[TMP2:%.*]] = load atomic i16, ptr [[SRC]] unordered, align 4 186; CHECK-NEXT: store atomic i16 [[TMP2]], ptr [[DEST]] unordered, align 4 187; CHECK-NEXT: [[TMP3:%.*]] = load atomic i32, ptr [[SRC]] unordered, align 4 188; CHECK-NEXT: store atomic i32 [[TMP3]], ptr [[DEST]] unordered, align 4 189; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 4 [[DEST]], ptr nonnull align 4 [[SRC]], i32 8, i32 4) 190; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 4 [[DEST]], ptr nonnull align 4 [[SRC]], i32 16, i32 4) 191; CHECK-NEXT: ret void 192; 193 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 1, i32 1) 194 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 2, i32 2) 195 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 4, i32 4) 196 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 8, i32 4) 197 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 16, i32 4) 198 ret void 199} 200 201define void @test_memmove_loadstore_8(ptr %dest, ptr %src) { 202; CHECK-LABEL: @test_memmove_loadstore_8( 203; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 8 204; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 8 205; CHECK-NEXT: [[TMP2:%.*]] = load atomic i16, ptr [[SRC]] unordered, align 8 206; CHECK-NEXT: store atomic i16 [[TMP2]], ptr [[DEST]] unordered, align 8 207; CHECK-NEXT: [[TMP3:%.*]] = load atomic i32, ptr [[SRC]] unordered, align 8 208; CHECK-NEXT: store atomic i32 [[TMP3]], ptr [[DEST]] unordered, align 8 209; CHECK-NEXT: [[TMP4:%.*]] = load atomic i64, ptr [[SRC]] unordered, align 8 210; CHECK-NEXT: store atomic i64 [[TMP4]], ptr [[DEST]] unordered, align 8 211; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 8 [[DEST]], ptr nonnull align 8 [[SRC]], i32 16, i32 8) 212; CHECK-NEXT: ret void 213; 214 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 1, i32 1) 215 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 2, i32 2) 216 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 4, i32 4) 217 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 8, i32 8) 218 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 16, i32 8) 219 ret void 220} 221 222define void @test_memmove_loadstore_16(ptr %dest, ptr %src) { 223; CHECK-LABEL: @test_memmove_loadstore_16( 224; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 16 225; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 16 226; CHECK-NEXT: [[TMP2:%.*]] = load atomic i16, ptr [[SRC]] unordered, align 16 227; CHECK-NEXT: store atomic i16 [[TMP2]], ptr [[DEST]] unordered, align 16 228; CHECK-NEXT: [[TMP3:%.*]] = load atomic i32, ptr [[SRC]] unordered, align 16 229; CHECK-NEXT: store atomic i32 [[TMP3]], ptr [[DEST]] unordered, align 16 230; CHECK-NEXT: [[TMP4:%.*]] = load atomic i64, ptr [[SRC]] unordered, align 16 231; CHECK-NEXT: store atomic i64 [[TMP4]], ptr [[DEST]] unordered, align 16 232; CHECK-NEXT: call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nonnull align 16 [[DEST]], ptr nonnull align 16 [[SRC]], i32 16, i32 16) 233; CHECK-NEXT: ret void 234; 235 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 1, i32 1) 236 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 2, i32 2) 237 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 4, i32 4) 238 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 8, i32 8) 239 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 16, i32 16) 240 ret void 241} 242 243declare void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i32) nounwind argmemonly 244 245;; ========================================= 246;; ----- memcpy ------ 247 248define void @test_memcpy_zero_length(ptr %dest, ptr %src) { 249; CHECK-LABEL: @test_memcpy_zero_length( 250; CHECK-NEXT: ret void 251; 252 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 0, i32 1) 253 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 0, i32 2) 254 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 0, i32 4) 255 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 0, i32 8) 256 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 0, i32 16) 257 ret void 258} 259 260; memcpy with src==dest is removed 261define void @test_memcpy_removed(ptr %srcdest, i32 %sz) { 262; CHECK-LABEL: @test_memcpy_removed( 263; CHECK-NEXT: ret void 264; 265 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %srcdest, ptr align 1 %srcdest, i32 %sz, i32 1) 266 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 2 %srcdest, ptr align 2 %srcdest, i32 %sz, i32 2) 267 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %srcdest, ptr align 4 %srcdest, i32 %sz, i32 4) 268 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %srcdest, ptr align 8 %srcdest, i32 %sz, i32 8) 269 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %srcdest, ptr align 16 %srcdest, i32 %sz, i32 16) 270 ret void 271} 272 273; memcpy with a small constant length is converted to a load/store pair 274define void @test_memcpy_loadstore(ptr %dest, ptr %src) { 275; CHECK-LABEL: @test_memcpy_loadstore( 276; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 1 277; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 1 278; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 1 [[DEST]], ptr nonnull align 1 [[SRC]], i32 2, i32 1) 279; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 1 [[DEST]], ptr nonnull align 1 [[SRC]], i32 4, i32 1) 280; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 1 [[DEST]], ptr nonnull align 1 [[SRC]], i32 8, i32 1) 281; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 1 [[DEST]], ptr nonnull align 1 [[SRC]], i32 16, i32 1) 282; CHECK-NEXT: ret void 283; 284 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 1, i32 1) 285 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 2, i32 1) 286 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 4, i32 1) 287 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 8, i32 1) 288 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 1 %dest, ptr align 1 %src, i32 16, i32 1) 289 ret void 290} 291 292define void @test_memcpy_loadstore_2(ptr %dest, ptr %src) { 293; CHECK-LABEL: @test_memcpy_loadstore_2( 294; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 2 295; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 2 296; CHECK-NEXT: [[TMP2:%.*]] = load atomic i16, ptr [[SRC]] unordered, align 2 297; CHECK-NEXT: store atomic i16 [[TMP2]], ptr [[DEST]] unordered, align 2 298; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 2 [[DEST]], ptr nonnull align 2 [[SRC]], i32 4, i32 2) 299; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 2 [[DEST]], ptr nonnull align 2 [[SRC]], i32 8, i32 2) 300; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 2 [[DEST]], ptr nonnull align 2 [[SRC]], i32 16, i32 2) 301; CHECK-NEXT: ret void 302; 303 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 1, i32 1) 304 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 2, i32 2) 305 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 4, i32 2) 306 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 8, i32 2) 307 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 2 %dest, ptr align 2 %src, i32 16, i32 2) 308 ret void 309} 310 311define void @test_memcpy_loadstore_4(ptr %dest, ptr %src) { 312; CHECK-LABEL: @test_memcpy_loadstore_4( 313; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 4 314; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 4 315; CHECK-NEXT: [[TMP2:%.*]] = load atomic i16, ptr [[SRC]] unordered, align 4 316; CHECK-NEXT: store atomic i16 [[TMP2]], ptr [[DEST]] unordered, align 4 317; CHECK-NEXT: [[TMP3:%.*]] = load atomic i32, ptr [[SRC]] unordered, align 4 318; CHECK-NEXT: store atomic i32 [[TMP3]], ptr [[DEST]] unordered, align 4 319; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 4 [[DEST]], ptr nonnull align 4 [[SRC]], i32 8, i32 4) 320; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 4 [[DEST]], ptr nonnull align 4 [[SRC]], i32 16, i32 4) 321; CHECK-NEXT: ret void 322; 323 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 1, i32 1) 324 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 2, i32 2) 325 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 4, i32 4) 326 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 8, i32 4) 327 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 4 %dest, ptr align 4 %src, i32 16, i32 4) 328 ret void 329} 330 331define void @test_memcpy_loadstore_8(ptr %dest, ptr %src) { 332; CHECK-LABEL: @test_memcpy_loadstore_8( 333; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 8 334; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 8 335; CHECK-NEXT: [[TMP2:%.*]] = load atomic i16, ptr [[SRC]] unordered, align 8 336; CHECK-NEXT: store atomic i16 [[TMP2]], ptr [[DEST]] unordered, align 8 337; CHECK-NEXT: [[TMP3:%.*]] = load atomic i32, ptr [[SRC]] unordered, align 8 338; CHECK-NEXT: store atomic i32 [[TMP3]], ptr [[DEST]] unordered, align 8 339; CHECK-NEXT: [[TMP4:%.*]] = load atomic i64, ptr [[SRC]] unordered, align 8 340; CHECK-NEXT: store atomic i64 [[TMP4]], ptr [[DEST]] unordered, align 8 341; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 8 [[DEST]], ptr nonnull align 8 [[SRC]], i32 16, i32 8) 342; CHECK-NEXT: ret void 343; 344 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 1, i32 1) 345 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 2, i32 2) 346 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 4, i32 4) 347 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 8, i32 8) 348 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 8 %dest, ptr align 8 %src, i32 16, i32 8) 349 ret void 350} 351 352define void @test_memcpy_loadstore_16(ptr %dest, ptr %src) { 353; CHECK-LABEL: @test_memcpy_loadstore_16( 354; CHECK-NEXT: [[TMP1:%.*]] = load atomic i8, ptr [[SRC:%.*]] unordered, align 16 355; CHECK-NEXT: store atomic i8 [[TMP1]], ptr [[DEST:%.*]] unordered, align 16 356; CHECK-NEXT: [[TMP2:%.*]] = load atomic i16, ptr [[SRC]] unordered, align 16 357; CHECK-NEXT: store atomic i16 [[TMP2]], ptr [[DEST]] unordered, align 16 358; CHECK-NEXT: [[TMP3:%.*]] = load atomic i32, ptr [[SRC]] unordered, align 16 359; CHECK-NEXT: store atomic i32 [[TMP3]], ptr [[DEST]] unordered, align 16 360; CHECK-NEXT: [[TMP4:%.*]] = load atomic i64, ptr [[SRC]] unordered, align 16 361; CHECK-NEXT: store atomic i64 [[TMP4]], ptr [[DEST]] unordered, align 16 362; CHECK-NEXT: call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nonnull align 16 [[DEST]], ptr nonnull align 16 [[SRC]], i32 16, i32 16) 363; CHECK-NEXT: ret void 364; 365 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 1, i32 1) 366 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 2, i32 2) 367 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 4, i32 4) 368 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 8, i32 8) 369 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 16, i32 16) 370 ret void 371} 372 373define void @test_undefined(ptr %dest, ptr %src, i1 %c1) { 374; CHECK-LABEL: @test_undefined( 375; CHECK-NEXT: entry: 376; CHECK-NEXT: br i1 [[C1:%.*]], label [[OK:%.*]], label [[UNDEFINED:%.*]] 377; CHECK: undefined: 378; CHECK-NEXT: store i1 true, ptr poison, align 1 379; CHECK-NEXT: br label [[OK]] 380; CHECK: ok: 381; CHECK-NEXT: ret void 382; 383entry: 384 br i1 %c1, label %ok, label %undefined 385undefined: 386 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 7, i32 4) 387 call void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 -8, i32 4) 388 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 7, i32 4) 389 call void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr align 16 %dest, ptr align 16 %src, i32 -8, i32 4) 390 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %dest, i8 1, i32 7, i32 4) 391 call void @llvm.memset.element.unordered.atomic.p0.i32(ptr align 16 %dest, i8 1, i32 -8, i32 4) 392 br label %ok 393ok: 394 ret void 395} 396 397declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr nocapture writeonly, ptr nocapture readonly, i32, i32) nounwind argmemonly 398