1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py 2; RUN: opt -S -passes=slp-vectorizer -mattr=+sse < %s | FileCheck %s --check-prefixes=CHECK,SSE 3; RUN: opt -S -passes=slp-vectorizer -mattr=+avx512f < %s | FileCheck %s --check-prefixes=CHECK,AVX512 4 5target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" 6target triple = "x86_64-unknown-linux-gnu" 7 8; Function Attrs: norecurse nounwind readnone uwtable 9define zeroext i8 @foo(i32 %x, i32 %y, i32 %a, i32 %b) local_unnamed_addr #0 { 10; CHECK-LABEL: @foo( 11; CHECK-NEXT: entry: 12; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Y:%.*]] 13; CHECK-NEXT: [[B_A:%.*]] = select i1 [[CMP]], i32 [[B:%.*]], i32 [[A:%.*]] 14; CHECK-NEXT: [[RETVAL_0:%.*]] = trunc i32 [[B_A]] to i8 15; CHECK-NEXT: ret i8 [[RETVAL_0]] 16; 17entry: 18 %cmp = icmp slt i32 %x, %y 19 %b.a = select i1 %cmp, i32 %b, i32 %a 20 %retval.0 = trunc i32 %b.a to i8 21 ret i8 %retval.0 22} 23 24define void @bar(ptr noalias nocapture readonly %a, ptr noalias nocapture readonly %b, ptr noalias nocapture readonly %c, ptr noalias nocapture readonly %d, ptr noalias nocapture %e, i32 %w) local_unnamed_addr #1 { 25; CHECK-LABEL: @bar( 26; CHECK-NEXT: entry: 27; CHECK-NEXT: [[TMP0:%.*]] = insertelement <16 x i32> poison, i32 [[W:%.*]], i32 0 28; CHECK-NEXT: [[SHUFFLE:%.*]] = shufflevector <16 x i32> [[TMP0]], <16 x i32> poison, <16 x i32> zeroinitializer 29; CHECK-NEXT: br label [[FOR_BODY:%.*]] 30; CHECK: for.body: 31; CHECK-NEXT: [[I_0356:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY]] ] 32; CHECK-NEXT: [[A_ADDR_0355:%.*]] = phi ptr [ [[A:%.*]], [[ENTRY]] ], [ [[ADD_PTR:%.*]], [[FOR_BODY]] ] 33; CHECK-NEXT: [[E_ADDR_0354:%.*]] = phi ptr [ [[E:%.*]], [[ENTRY]] ], [ [[ADD_PTR192:%.*]], [[FOR_BODY]] ] 34; CHECK-NEXT: [[D_ADDR_0353:%.*]] = phi ptr [ [[D:%.*]], [[ENTRY]] ], [ [[ADD_PTR191:%.*]], [[FOR_BODY]] ] 35; CHECK-NEXT: [[C_ADDR_0352:%.*]] = phi ptr [ [[C:%.*]], [[ENTRY]] ], [ [[ADD_PTR190:%.*]], [[FOR_BODY]] ] 36; CHECK-NEXT: [[B_ADDR_0351:%.*]] = phi ptr [ [[B:%.*]], [[ENTRY]] ], [ [[ADD_PTR189:%.*]], [[FOR_BODY]] ] 37; CHECK-NEXT: [[TMP2:%.*]] = load <16 x i8>, ptr [[C_ADDR_0352]], align 1 38; CHECK-NEXT: [[TMP4:%.*]] = load <16 x i8>, ptr [[D_ADDR_0353]], align 1 39; CHECK-NEXT: [[TMP6:%.*]] = load <16 x i8>, ptr [[A_ADDR_0355]], align 1 40; CHECK-NEXT: [[TMP8:%.*]] = load <16 x i8>, ptr [[B_ADDR_0351]], align 1 41; CHECK-NEXT: [[TMP9:%.*]] = icmp ult <16 x i8> [[TMP2]], [[TMP4]] 42; CHECK-NEXT: [[TMP10:%.*]] = select <16 x i1> [[TMP9]], <16 x i8> [[TMP8]], <16 x i8> [[TMP6]] 43; CHECK-NEXT: [[TMP11:%.*]] = zext <16 x i8> [[TMP10]] to <16 x i32> 44; CHECK-NEXT: [[TMP12:%.*]] = mul <16 x i32> [[TMP11]], [[SHUFFLE]] 45; CHECK-NEXT: [[TMP13:%.*]] = trunc <16 x i32> [[TMP12]] to <16 x i8> 46; CHECK-NEXT: store <16 x i8> [[TMP13]], ptr [[E_ADDR_0354]], align 1 47; CHECK-NEXT: [[INC]] = add nuw nsw i32 [[I_0356]], 1 48; CHECK-NEXT: [[ADD_PTR]] = getelementptr inbounds i8, ptr [[A_ADDR_0355]], i64 16 49; CHECK-NEXT: [[ADD_PTR189]] = getelementptr inbounds i8, ptr [[B_ADDR_0351]], i64 16 50; CHECK-NEXT: [[ADD_PTR190]] = getelementptr inbounds i8, ptr [[C_ADDR_0352]], i64 16 51; CHECK-NEXT: [[ADD_PTR191]] = getelementptr inbounds i8, ptr [[D_ADDR_0353]], i64 16 52; CHECK-NEXT: [[ADD_PTR192]] = getelementptr inbounds i8, ptr [[E_ADDR_0354]], i64 16 53; CHECK-NEXT: [[EXITCOND:%.*]] = icmp eq i32 [[INC]], 8 54; CHECK-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]] 55; CHECK: for.end: 56; CHECK-NEXT: ret void 57; 58entry: 59 br label %for.body 60 61for.body: ; preds = %for.body, %entry 62 %i.0356 = phi i32 [ 0, %entry ], [ %inc, %for.body ] 63 %a.addr.0355 = phi ptr [ %a, %entry ], [ %add.ptr, %for.body ] 64 %e.addr.0354 = phi ptr [ %e, %entry ], [ %add.ptr192, %for.body ] 65 %d.addr.0353 = phi ptr [ %d, %entry ], [ %add.ptr191, %for.body ] 66 %c.addr.0352 = phi ptr [ %c, %entry ], [ %add.ptr190, %for.body ] 67 %b.addr.0351 = phi ptr [ %b, %entry ], [ %add.ptr189, %for.body ] 68 %0 = load i8, ptr %c.addr.0352, align 1 69 %1 = load i8, ptr %d.addr.0353, align 1 70 %2 = load i8, ptr %a.addr.0355, align 1 71 %3 = load i8, ptr %b.addr.0351, align 1 72 %cmp.i = icmp ult i8 %0, %1 73 %b.a.i.v.v = select i1 %cmp.i, i8 %3, i8 %2 74 %b.a.i.v = zext i8 %b.a.i.v.v to i32 75 %b.a.i = mul i32 %b.a.i.v, %w 76 %retval.0.i = trunc i32 %b.a.i to i8 77 store i8 %retval.0.i, ptr %e.addr.0354, align 1 78 %arrayidx9 = getelementptr inbounds i8, ptr %c.addr.0352, i64 1 79 %4 = load i8, ptr %arrayidx9, align 1 80 %arrayidx11 = getelementptr inbounds i8, ptr %d.addr.0353, i64 1 81 %5 = load i8, ptr %arrayidx11, align 1 82 %arrayidx13 = getelementptr inbounds i8, ptr %a.addr.0355, i64 1 83 %6 = load i8, ptr %arrayidx13, align 1 84 %arrayidx16 = getelementptr inbounds i8, ptr %b.addr.0351, i64 1 85 %7 = load i8, ptr %arrayidx16, align 1 86 %cmp.i348 = icmp ult i8 %4, %5 87 %b.a.i349.v.v = select i1 %cmp.i348, i8 %7, i8 %6 88 %b.a.i349.v = zext i8 %b.a.i349.v.v to i32 89 %b.a.i349 = mul i32 %b.a.i349.v, %w 90 %retval.0.i350 = trunc i32 %b.a.i349 to i8 91 %arrayidx20 = getelementptr inbounds i8, ptr %e.addr.0354, i64 1 92 store i8 %retval.0.i350, ptr %arrayidx20, align 1 93 %arrayidx21 = getelementptr inbounds i8, ptr %c.addr.0352, i64 2 94 %8 = load i8, ptr %arrayidx21, align 1 95 %arrayidx23 = getelementptr inbounds i8, ptr %d.addr.0353, i64 2 96 %9 = load i8, ptr %arrayidx23, align 1 97 %arrayidx25 = getelementptr inbounds i8, ptr %a.addr.0355, i64 2 98 %10 = load i8, ptr %arrayidx25, align 1 99 %arrayidx28 = getelementptr inbounds i8, ptr %b.addr.0351, i64 2 100 %11 = load i8, ptr %arrayidx28, align 1 101 %cmp.i345 = icmp ult i8 %8, %9 102 %b.a.i346.v.v = select i1 %cmp.i345, i8 %11, i8 %10 103 %b.a.i346.v = zext i8 %b.a.i346.v.v to i32 104 %b.a.i346 = mul i32 %b.a.i346.v, %w 105 %retval.0.i347 = trunc i32 %b.a.i346 to i8 106 %arrayidx32 = getelementptr inbounds i8, ptr %e.addr.0354, i64 2 107 store i8 %retval.0.i347, ptr %arrayidx32, align 1 108 %arrayidx33 = getelementptr inbounds i8, ptr %c.addr.0352, i64 3 109 %12 = load i8, ptr %arrayidx33, align 1 110 %arrayidx35 = getelementptr inbounds i8, ptr %d.addr.0353, i64 3 111 %13 = load i8, ptr %arrayidx35, align 1 112 %arrayidx37 = getelementptr inbounds i8, ptr %a.addr.0355, i64 3 113 %14 = load i8, ptr %arrayidx37, align 1 114 %arrayidx40 = getelementptr inbounds i8, ptr %b.addr.0351, i64 3 115 %15 = load i8, ptr %arrayidx40, align 1 116 %cmp.i342 = icmp ult i8 %12, %13 117 %b.a.i343.v.v = select i1 %cmp.i342, i8 %15, i8 %14 118 %b.a.i343.v = zext i8 %b.a.i343.v.v to i32 119 %b.a.i343 = mul i32 %b.a.i343.v, %w 120 %retval.0.i344 = trunc i32 %b.a.i343 to i8 121 %arrayidx44 = getelementptr inbounds i8, ptr %e.addr.0354, i64 3 122 store i8 %retval.0.i344, ptr %arrayidx44, align 1 123 %arrayidx45 = getelementptr inbounds i8, ptr %c.addr.0352, i64 4 124 %16 = load i8, ptr %arrayidx45, align 1 125 %arrayidx47 = getelementptr inbounds i8, ptr %d.addr.0353, i64 4 126 %17 = load i8, ptr %arrayidx47, align 1 127 %arrayidx49 = getelementptr inbounds i8, ptr %a.addr.0355, i64 4 128 %18 = load i8, ptr %arrayidx49, align 1 129 %arrayidx52 = getelementptr inbounds i8, ptr %b.addr.0351, i64 4 130 %19 = load i8, ptr %arrayidx52, align 1 131 %cmp.i339 = icmp ult i8 %16, %17 132 %b.a.i340.v.v = select i1 %cmp.i339, i8 %19, i8 %18 133 %b.a.i340.v = zext i8 %b.a.i340.v.v to i32 134 %b.a.i340 = mul i32 %b.a.i340.v, %w 135 %retval.0.i341 = trunc i32 %b.a.i340 to i8 136 %arrayidx56 = getelementptr inbounds i8, ptr %e.addr.0354, i64 4 137 store i8 %retval.0.i341, ptr %arrayidx56, align 1 138 %arrayidx57 = getelementptr inbounds i8, ptr %c.addr.0352, i64 5 139 %20 = load i8, ptr %arrayidx57, align 1 140 %arrayidx59 = getelementptr inbounds i8, ptr %d.addr.0353, i64 5 141 %21 = load i8, ptr %arrayidx59, align 1 142 %arrayidx61 = getelementptr inbounds i8, ptr %a.addr.0355, i64 5 143 %22 = load i8, ptr %arrayidx61, align 1 144 %arrayidx64 = getelementptr inbounds i8, ptr %b.addr.0351, i64 5 145 %23 = load i8, ptr %arrayidx64, align 1 146 %cmp.i336 = icmp ult i8 %20, %21 147 %b.a.i337.v.v = select i1 %cmp.i336, i8 %23, i8 %22 148 %b.a.i337.v = zext i8 %b.a.i337.v.v to i32 149 %b.a.i337 = mul i32 %b.a.i337.v, %w 150 %retval.0.i338 = trunc i32 %b.a.i337 to i8 151 %arrayidx68 = getelementptr inbounds i8, ptr %e.addr.0354, i64 5 152 store i8 %retval.0.i338, ptr %arrayidx68, align 1 153 %arrayidx69 = getelementptr inbounds i8, ptr %c.addr.0352, i64 6 154 %24 = load i8, ptr %arrayidx69, align 1 155 %arrayidx71 = getelementptr inbounds i8, ptr %d.addr.0353, i64 6 156 %25 = load i8, ptr %arrayidx71, align 1 157 %arrayidx73 = getelementptr inbounds i8, ptr %a.addr.0355, i64 6 158 %26 = load i8, ptr %arrayidx73, align 1 159 %arrayidx76 = getelementptr inbounds i8, ptr %b.addr.0351, i64 6 160 %27 = load i8, ptr %arrayidx76, align 1 161 %cmp.i333 = icmp ult i8 %24, %25 162 %b.a.i334.v.v = select i1 %cmp.i333, i8 %27, i8 %26 163 %b.a.i334.v = zext i8 %b.a.i334.v.v to i32 164 %b.a.i334 = mul i32 %b.a.i334.v, %w 165 %retval.0.i335 = trunc i32 %b.a.i334 to i8 166 %arrayidx80 = getelementptr inbounds i8, ptr %e.addr.0354, i64 6 167 store i8 %retval.0.i335, ptr %arrayidx80, align 1 168 %arrayidx81 = getelementptr inbounds i8, ptr %c.addr.0352, i64 7 169 %28 = load i8, ptr %arrayidx81, align 1 170 %arrayidx83 = getelementptr inbounds i8, ptr %d.addr.0353, i64 7 171 %29 = load i8, ptr %arrayidx83, align 1 172 %arrayidx85 = getelementptr inbounds i8, ptr %a.addr.0355, i64 7 173 %30 = load i8, ptr %arrayidx85, align 1 174 %arrayidx88 = getelementptr inbounds i8, ptr %b.addr.0351, i64 7 175 %31 = load i8, ptr %arrayidx88, align 1 176 %cmp.i330 = icmp ult i8 %28, %29 177 %b.a.i331.v.v = select i1 %cmp.i330, i8 %31, i8 %30 178 %b.a.i331.v = zext i8 %b.a.i331.v.v to i32 179 %b.a.i331 = mul i32 %b.a.i331.v, %w 180 %retval.0.i332 = trunc i32 %b.a.i331 to i8 181 %arrayidx92 = getelementptr inbounds i8, ptr %e.addr.0354, i64 7 182 store i8 %retval.0.i332, ptr %arrayidx92, align 1 183 %arrayidx93 = getelementptr inbounds i8, ptr %c.addr.0352, i64 8 184 %32 = load i8, ptr %arrayidx93, align 1 185 %arrayidx95 = getelementptr inbounds i8, ptr %d.addr.0353, i64 8 186 %33 = load i8, ptr %arrayidx95, align 1 187 %arrayidx97 = getelementptr inbounds i8, ptr %a.addr.0355, i64 8 188 %34 = load i8, ptr %arrayidx97, align 1 189 %arrayidx100 = getelementptr inbounds i8, ptr %b.addr.0351, i64 8 190 %35 = load i8, ptr %arrayidx100, align 1 191 %cmp.i327 = icmp ult i8 %32, %33 192 %b.a.i328.v.v = select i1 %cmp.i327, i8 %35, i8 %34 193 %b.a.i328.v = zext i8 %b.a.i328.v.v to i32 194 %b.a.i328 = mul i32 %b.a.i328.v, %w 195 %retval.0.i329 = trunc i32 %b.a.i328 to i8 196 %arrayidx104 = getelementptr inbounds i8, ptr %e.addr.0354, i64 8 197 store i8 %retval.0.i329, ptr %arrayidx104, align 1 198 %arrayidx105 = getelementptr inbounds i8, ptr %c.addr.0352, i64 9 199 %36 = load i8, ptr %arrayidx105, align 1 200 %arrayidx107 = getelementptr inbounds i8, ptr %d.addr.0353, i64 9 201 %37 = load i8, ptr %arrayidx107, align 1 202 %arrayidx109 = getelementptr inbounds i8, ptr %a.addr.0355, i64 9 203 %38 = load i8, ptr %arrayidx109, align 1 204 %arrayidx112 = getelementptr inbounds i8, ptr %b.addr.0351, i64 9 205 %39 = load i8, ptr %arrayidx112, align 1 206 %cmp.i324 = icmp ult i8 %36, %37 207 %b.a.i325.v.v = select i1 %cmp.i324, i8 %39, i8 %38 208 %b.a.i325.v = zext i8 %b.a.i325.v.v to i32 209 %b.a.i325 = mul i32 %b.a.i325.v, %w 210 %retval.0.i326 = trunc i32 %b.a.i325 to i8 211 %arrayidx116 = getelementptr inbounds i8, ptr %e.addr.0354, i64 9 212 store i8 %retval.0.i326, ptr %arrayidx116, align 1 213 %arrayidx117 = getelementptr inbounds i8, ptr %c.addr.0352, i64 10 214 %40 = load i8, ptr %arrayidx117, align 1 215 %arrayidx119 = getelementptr inbounds i8, ptr %d.addr.0353, i64 10 216 %41 = load i8, ptr %arrayidx119, align 1 217 %arrayidx121 = getelementptr inbounds i8, ptr %a.addr.0355, i64 10 218 %42 = load i8, ptr %arrayidx121, align 1 219 %arrayidx124 = getelementptr inbounds i8, ptr %b.addr.0351, i64 10 220 %43 = load i8, ptr %arrayidx124, align 1 221 %cmp.i321 = icmp ult i8 %40, %41 222 %b.a.i322.v.v = select i1 %cmp.i321, i8 %43, i8 %42 223 %b.a.i322.v = zext i8 %b.a.i322.v.v to i32 224 %b.a.i322 = mul i32 %b.a.i322.v, %w 225 %retval.0.i323 = trunc i32 %b.a.i322 to i8 226 %arrayidx128 = getelementptr inbounds i8, ptr %e.addr.0354, i64 10 227 store i8 %retval.0.i323, ptr %arrayidx128, align 1 228 %arrayidx129 = getelementptr inbounds i8, ptr %c.addr.0352, i64 11 229 %44 = load i8, ptr %arrayidx129, align 1 230 %arrayidx131 = getelementptr inbounds i8, ptr %d.addr.0353, i64 11 231 %45 = load i8, ptr %arrayidx131, align 1 232 %arrayidx133 = getelementptr inbounds i8, ptr %a.addr.0355, i64 11 233 %46 = load i8, ptr %arrayidx133, align 1 234 %arrayidx136 = getelementptr inbounds i8, ptr %b.addr.0351, i64 11 235 %47 = load i8, ptr %arrayidx136, align 1 236 %cmp.i318 = icmp ult i8 %44, %45 237 %b.a.i319.v.v = select i1 %cmp.i318, i8 %47, i8 %46 238 %b.a.i319.v = zext i8 %b.a.i319.v.v to i32 239 %b.a.i319 = mul i32 %b.a.i319.v, %w 240 %retval.0.i320 = trunc i32 %b.a.i319 to i8 241 %arrayidx140 = getelementptr inbounds i8, ptr %e.addr.0354, i64 11 242 store i8 %retval.0.i320, ptr %arrayidx140, align 1 243 %arrayidx141 = getelementptr inbounds i8, ptr %c.addr.0352, i64 12 244 %48 = load i8, ptr %arrayidx141, align 1 245 %arrayidx143 = getelementptr inbounds i8, ptr %d.addr.0353, i64 12 246 %49 = load i8, ptr %arrayidx143, align 1 247 %arrayidx145 = getelementptr inbounds i8, ptr %a.addr.0355, i64 12 248 %50 = load i8, ptr %arrayidx145, align 1 249 %arrayidx148 = getelementptr inbounds i8, ptr %b.addr.0351, i64 12 250 %51 = load i8, ptr %arrayidx148, align 1 251 %cmp.i315 = icmp ult i8 %48, %49 252 %b.a.i316.v.v = select i1 %cmp.i315, i8 %51, i8 %50 253 %b.a.i316.v = zext i8 %b.a.i316.v.v to i32 254 %b.a.i316 = mul i32 %b.a.i316.v, %w 255 %retval.0.i317 = trunc i32 %b.a.i316 to i8 256 %arrayidx152 = getelementptr inbounds i8, ptr %e.addr.0354, i64 12 257 store i8 %retval.0.i317, ptr %arrayidx152, align 1 258 %arrayidx153 = getelementptr inbounds i8, ptr %c.addr.0352, i64 13 259 %52 = load i8, ptr %arrayidx153, align 1 260 %arrayidx155 = getelementptr inbounds i8, ptr %d.addr.0353, i64 13 261 %53 = load i8, ptr %arrayidx155, align 1 262 %arrayidx157 = getelementptr inbounds i8, ptr %a.addr.0355, i64 13 263 %54 = load i8, ptr %arrayidx157, align 1 264 %arrayidx160 = getelementptr inbounds i8, ptr %b.addr.0351, i64 13 265 %55 = load i8, ptr %arrayidx160, align 1 266 %cmp.i312 = icmp ult i8 %52, %53 267 %b.a.i313.v.v = select i1 %cmp.i312, i8 %55, i8 %54 268 %b.a.i313.v = zext i8 %b.a.i313.v.v to i32 269 %b.a.i313 = mul i32 %b.a.i313.v, %w 270 %retval.0.i314 = trunc i32 %b.a.i313 to i8 271 %arrayidx164 = getelementptr inbounds i8, ptr %e.addr.0354, i64 13 272 store i8 %retval.0.i314, ptr %arrayidx164, align 1 273 %arrayidx165 = getelementptr inbounds i8, ptr %c.addr.0352, i64 14 274 %56 = load i8, ptr %arrayidx165, align 1 275 %arrayidx167 = getelementptr inbounds i8, ptr %d.addr.0353, i64 14 276 %57 = load i8, ptr %arrayidx167, align 1 277 %arrayidx169 = getelementptr inbounds i8, ptr %a.addr.0355, i64 14 278 %58 = load i8, ptr %arrayidx169, align 1 279 %arrayidx172 = getelementptr inbounds i8, ptr %b.addr.0351, i64 14 280 %59 = load i8, ptr %arrayidx172, align 1 281 %cmp.i309 = icmp ult i8 %56, %57 282 %b.a.i310.v.v = select i1 %cmp.i309, i8 %59, i8 %58 283 %b.a.i310.v = zext i8 %b.a.i310.v.v to i32 284 %b.a.i310 = mul i32 %b.a.i310.v, %w 285 %retval.0.i311 = trunc i32 %b.a.i310 to i8 286 %arrayidx176 = getelementptr inbounds i8, ptr %e.addr.0354, i64 14 287 store i8 %retval.0.i311, ptr %arrayidx176, align 1 288 %arrayidx177 = getelementptr inbounds i8, ptr %c.addr.0352, i64 15 289 %60 = load i8, ptr %arrayidx177, align 1 290 %arrayidx179 = getelementptr inbounds i8, ptr %d.addr.0353, i64 15 291 %61 = load i8, ptr %arrayidx179, align 1 292 %arrayidx181 = getelementptr inbounds i8, ptr %a.addr.0355, i64 15 293 %62 = load i8, ptr %arrayidx181, align 1 294 %arrayidx184 = getelementptr inbounds i8, ptr %b.addr.0351, i64 15 295 %63 = load i8, ptr %arrayidx184, align 1 296 %cmp.i306 = icmp ult i8 %60, %61 297 %b.a.i307.v.v = select i1 %cmp.i306, i8 %63, i8 %62 298 %b.a.i307.v = zext i8 %b.a.i307.v.v to i32 299 %b.a.i307 = mul i32 %b.a.i307.v, %w 300 %retval.0.i308 = trunc i32 %b.a.i307 to i8 301 %arrayidx188 = getelementptr inbounds i8, ptr %e.addr.0354, i64 15 302 store i8 %retval.0.i308, ptr %arrayidx188, align 1 303 %inc = add nuw nsw i32 %i.0356, 1 304 %add.ptr = getelementptr inbounds i8, ptr %a.addr.0355, i64 16 305 %add.ptr189 = getelementptr inbounds i8, ptr %b.addr.0351, i64 16 306 %add.ptr190 = getelementptr inbounds i8, ptr %c.addr.0352, i64 16 307 %add.ptr191 = getelementptr inbounds i8, ptr %d.addr.0353, i64 16 308 %add.ptr192 = getelementptr inbounds i8, ptr %e.addr.0354, i64 16 309 %exitcond = icmp eq i32 %inc, 8 310 br i1 %exitcond, label %for.end, label %for.body 311 312for.end: ; preds = %for.body 313 ret void 314} 315 316@ib = local_unnamed_addr global [64 x i32] [i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0, i32 1, i32 1, i32 0, i32 0, i32 1, i32 0, i32 1, i32 0], align 16 317@ia = common local_unnamed_addr global [64 x i32] zeroinitializer, align 16 318 319define i32 @foo1() local_unnamed_addr #0 { 320; SSE-LABEL: @foo1( 321; SSE-NEXT: entry: 322; SSE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @ib, align 16 323; SSE-NEXT: [[TMP1:%.*]] = xor <4 x i32> [[TMP0]], splat (i32 -1) 324; SSE-NEXT: store <4 x i32> [[TMP1]], ptr @ia, align 16 325; SSE-NEXT: [[TMP2:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 4), align 16 326; SSE-NEXT: [[TMP3:%.*]] = xor <4 x i32> [[TMP2]], splat (i32 -1) 327; SSE-NEXT: store <4 x i32> [[TMP3]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 4), align 16 328; SSE-NEXT: [[TMP4:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 8), align 16 329; SSE-NEXT: [[TMP5:%.*]] = xor <4 x i32> [[TMP4]], splat (i32 -1) 330; SSE-NEXT: store <4 x i32> [[TMP5]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 8), align 16 331; SSE-NEXT: [[TMP6:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 12), align 16 332; SSE-NEXT: [[TMP7:%.*]] = xor <4 x i32> [[TMP6]], splat (i32 -1) 333; SSE-NEXT: store <4 x i32> [[TMP7]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 12), align 16 334; SSE-NEXT: [[TMP8:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 16), align 16 335; SSE-NEXT: [[TMP9:%.*]] = xor <4 x i32> [[TMP8]], splat (i32 -1) 336; SSE-NEXT: store <4 x i32> [[TMP9]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 16), align 16 337; SSE-NEXT: [[TMP10:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 20), align 16 338; SSE-NEXT: [[TMP11:%.*]] = xor <4 x i32> [[TMP10]], splat (i32 -1) 339; SSE-NEXT: store <4 x i32> [[TMP11]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 20), align 16 340; SSE-NEXT: [[TMP12:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 24), align 16 341; SSE-NEXT: [[TMP13:%.*]] = xor <4 x i32> [[TMP12]], splat (i32 -1) 342; SSE-NEXT: store <4 x i32> [[TMP13]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 24), align 16 343; SSE-NEXT: [[TMP14:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 28), align 16 344; SSE-NEXT: [[TMP15:%.*]] = xor <4 x i32> [[TMP14]], splat (i32 -1) 345; SSE-NEXT: store <4 x i32> [[TMP15]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 28), align 16 346; SSE-NEXT: [[TMP16:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 32), align 16 347; SSE-NEXT: [[TMP17:%.*]] = xor <4 x i32> [[TMP16]], splat (i32 -1) 348; SSE-NEXT: store <4 x i32> [[TMP17]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 32), align 16 349; SSE-NEXT: [[TMP18:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 36), align 16 350; SSE-NEXT: [[TMP19:%.*]] = xor <4 x i32> [[TMP18]], splat (i32 -1) 351; SSE-NEXT: store <4 x i32> [[TMP19]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 36), align 16 352; SSE-NEXT: [[TMP20:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 40), align 16 353; SSE-NEXT: [[TMP21:%.*]] = xor <4 x i32> [[TMP20]], splat (i32 -1) 354; SSE-NEXT: store <4 x i32> [[TMP21]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 40), align 16 355; SSE-NEXT: [[TMP22:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 44), align 16 356; SSE-NEXT: [[TMP23:%.*]] = xor <4 x i32> [[TMP22]], splat (i32 -1) 357; SSE-NEXT: store <4 x i32> [[TMP23]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 44), align 16 358; SSE-NEXT: [[TMP24:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 48), align 16 359; SSE-NEXT: [[TMP25:%.*]] = xor <4 x i32> [[TMP24]], splat (i32 -1) 360; SSE-NEXT: store <4 x i32> [[TMP25]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 48), align 16 361; SSE-NEXT: [[TMP26:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 52), align 16 362; SSE-NEXT: [[TMP27:%.*]] = xor <4 x i32> [[TMP26]], splat (i32 -1) 363; SSE-NEXT: store <4 x i32> [[TMP27]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 52), align 16 364; SSE-NEXT: [[TMP28:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 56), align 16 365; SSE-NEXT: [[TMP29:%.*]] = xor <4 x i32> [[TMP28]], splat (i32 -1) 366; SSE-NEXT: store <4 x i32> [[TMP29]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 56), align 16 367; SSE-NEXT: [[TMP30:%.*]] = load <4 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 60), align 16 368; SSE-NEXT: [[TMP31:%.*]] = xor <4 x i32> [[TMP30]], splat (i32 -1) 369; SSE-NEXT: store <4 x i32> [[TMP31]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 60), align 16 370; SSE-NEXT: br label [[FOR_BODY5:%.*]] 371; SSE: for.cond3: 372; SSE-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV:%.*]], 1 373; SSE-NEXT: [[CMP4:%.*]] = icmp ult i64 [[INDVARS_IV]], 63 374; SSE-NEXT: br i1 [[CMP4]], label [[FOR_BODY5]], label [[FOR_END14:%.*]] 375; SSE: for.body5: 376; SSE-NEXT: [[INDVARS_IV]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT]], [[FOR_COND3:%.*]] ] 377; SSE-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [64 x i32], ptr @ia, i64 0, i64 [[INDVARS_IV]] 378; SSE-NEXT: [[TMP32:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4 379; SSE-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [64 x i32], ptr @ib, i64 0, i64 [[INDVARS_IV]] 380; SSE-NEXT: [[TMP33:%.*]] = load i32, ptr [[ARRAYIDX9]], align 4 381; SSE-NEXT: [[NEG10:%.*]] = xor i32 [[TMP33]], -1 382; SSE-NEXT: [[CMP11:%.*]] = icmp eq i32 [[TMP32]], [[NEG10]] 383; SSE-NEXT: br i1 [[CMP11]], label [[FOR_COND3]], label [[IF_THEN:%.*]] 384; SSE: if.then: 385; SSE-NEXT: tail call void @abort() 386; SSE-NEXT: unreachable 387; SSE: for.end14: 388; SSE-NEXT: ret i32 0 389; 390; AVX512-LABEL: @foo1( 391; AVX512-NEXT: entry: 392; AVX512-NEXT: [[TMP0:%.*]] = load <16 x i32>, ptr @ib, align 16 393; AVX512-NEXT: [[TMP1:%.*]] = xor <16 x i32> [[TMP0]], splat (i32 -1) 394; AVX512-NEXT: store <16 x i32> [[TMP1]], ptr @ia, align 16 395; AVX512-NEXT: [[TMP2:%.*]] = load <16 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 16), align 16 396; AVX512-NEXT: [[TMP3:%.*]] = xor <16 x i32> [[TMP2]], splat (i32 -1) 397; AVX512-NEXT: store <16 x i32> [[TMP3]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 16), align 16 398; AVX512-NEXT: [[TMP4:%.*]] = load <16 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 32), align 16 399; AVX512-NEXT: [[TMP5:%.*]] = xor <16 x i32> [[TMP4]], splat (i32 -1) 400; AVX512-NEXT: store <16 x i32> [[TMP5]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 32), align 16 401; AVX512-NEXT: [[TMP6:%.*]] = load <16 x i32>, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 48), align 16 402; AVX512-NEXT: [[TMP7:%.*]] = xor <16 x i32> [[TMP6]], splat (i32 -1) 403; AVX512-NEXT: store <16 x i32> [[TMP7]], ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 48), align 16 404; AVX512-NEXT: br label [[FOR_BODY5:%.*]] 405; AVX512: for.cond3: 406; AVX512-NEXT: [[INDVARS_IV_NEXT:%.*]] = add nuw nsw i64 [[INDVARS_IV:%.*]], 1 407; AVX512-NEXT: [[CMP4:%.*]] = icmp ult i64 [[INDVARS_IV]], 63 408; AVX512-NEXT: br i1 [[CMP4]], label [[FOR_BODY5]], label [[FOR_END14:%.*]] 409; AVX512: for.body5: 410; AVX512-NEXT: [[INDVARS_IV]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT]], [[FOR_COND3:%.*]] ] 411; AVX512-NEXT: [[ARRAYIDX7:%.*]] = getelementptr inbounds [64 x i32], ptr @ia, i64 0, i64 [[INDVARS_IV]] 412; AVX512-NEXT: [[TMP8:%.*]] = load i32, ptr [[ARRAYIDX7]], align 4 413; AVX512-NEXT: [[ARRAYIDX9:%.*]] = getelementptr inbounds [64 x i32], ptr @ib, i64 0, i64 [[INDVARS_IV]] 414; AVX512-NEXT: [[TMP9:%.*]] = load i32, ptr [[ARRAYIDX9]], align 4 415; AVX512-NEXT: [[NEG10:%.*]] = xor i32 [[TMP9]], -1 416; AVX512-NEXT: [[CMP11:%.*]] = icmp eq i32 [[TMP8]], [[NEG10]] 417; AVX512-NEXT: br i1 [[CMP11]], label [[FOR_COND3]], label [[IF_THEN:%.*]] 418; AVX512: if.then: 419; AVX512-NEXT: tail call void @abort() 420; AVX512-NEXT: unreachable 421; AVX512: for.end14: 422; AVX512-NEXT: ret i32 0 423; 424entry: 425 %0 = load i32, ptr @ib, align 16 426 %neg = xor i32 %0, -1 427 store i32 %neg, ptr @ia, align 16 428 %1 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 1), align 4 429 %neg.1 = xor i32 %1, -1 430 store i32 %neg.1, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 1), align 4 431 %2 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 2), align 8 432 %neg.2 = xor i32 %2, -1 433 store i32 %neg.2, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 2), align 8 434 %3 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 3), align 4 435 %neg.3 = xor i32 %3, -1 436 store i32 %neg.3, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 3), align 4 437 %4 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 4), align 16 438 %neg.4 = xor i32 %4, -1 439 store i32 %neg.4, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 4), align 16 440 %5 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 5), align 4 441 %neg.5 = xor i32 %5, -1 442 store i32 %neg.5, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 5), align 4 443 %6 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 6), align 8 444 %neg.6 = xor i32 %6, -1 445 store i32 %neg.6, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 6), align 8 446 %7 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 7), align 4 447 %neg.7 = xor i32 %7, -1 448 store i32 %neg.7, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 7), align 4 449 %8 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 8), align 16 450 %neg.8 = xor i32 %8, -1 451 store i32 %neg.8, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 8), align 16 452 %9 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 9), align 4 453 %neg.9 = xor i32 %9, -1 454 store i32 %neg.9, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 9), align 4 455 %10 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 10), align 8 456 %neg.10 = xor i32 %10, -1 457 store i32 %neg.10, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 10), align 8 458 %11 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 11), align 4 459 %neg.11 = xor i32 %11, -1 460 store i32 %neg.11, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 11), align 4 461 %12 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 12), align 16 462 %neg.12 = xor i32 %12, -1 463 store i32 %neg.12, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 12), align 16 464 %13 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 13), align 4 465 %neg.13 = xor i32 %13, -1 466 store i32 %neg.13, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 13), align 4 467 %14 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 14), align 8 468 %neg.14 = xor i32 %14, -1 469 store i32 %neg.14, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 14), align 8 470 %15 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 15), align 4 471 %neg.15 = xor i32 %15, -1 472 store i32 %neg.15, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 15), align 4 473 %16 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 16), align 16 474 %neg.16 = xor i32 %16, -1 475 store i32 %neg.16, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 16), align 16 476 %17 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 17), align 4 477 %neg.17 = xor i32 %17, -1 478 store i32 %neg.17, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 17), align 4 479 %18 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 18), align 8 480 %neg.18 = xor i32 %18, -1 481 store i32 %neg.18, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 18), align 8 482 %19 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 19), align 4 483 %neg.19 = xor i32 %19, -1 484 store i32 %neg.19, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 19), align 4 485 %20 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 20), align 16 486 %neg.20 = xor i32 %20, -1 487 store i32 %neg.20, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 20), align 16 488 %21 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 21), align 4 489 %neg.21 = xor i32 %21, -1 490 store i32 %neg.21, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 21), align 4 491 %22 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 22), align 8 492 %neg.22 = xor i32 %22, -1 493 store i32 %neg.22, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 22), align 8 494 %23 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 23), align 4 495 %neg.23 = xor i32 %23, -1 496 store i32 %neg.23, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 23), align 4 497 %24 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 24), align 16 498 %neg.24 = xor i32 %24, -1 499 store i32 %neg.24, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 24), align 16 500 %25 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 25), align 4 501 %neg.25 = xor i32 %25, -1 502 store i32 %neg.25, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 25), align 4 503 %26 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 26), align 8 504 %neg.26 = xor i32 %26, -1 505 store i32 %neg.26, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 26), align 8 506 %27 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 27), align 4 507 %neg.27 = xor i32 %27, -1 508 store i32 %neg.27, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 27), align 4 509 %28 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 28), align 16 510 %neg.28 = xor i32 %28, -1 511 store i32 %neg.28, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 28), align 16 512 %29 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 29), align 4 513 %neg.29 = xor i32 %29, -1 514 store i32 %neg.29, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 29), align 4 515 %30 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 30), align 8 516 %neg.30 = xor i32 %30, -1 517 store i32 %neg.30, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 30), align 8 518 %31 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 31), align 4 519 %neg.31 = xor i32 %31, -1 520 store i32 %neg.31, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 31), align 4 521 %32 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 32), align 16 522 %neg.32 = xor i32 %32, -1 523 store i32 %neg.32, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 32), align 16 524 %33 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 33), align 4 525 %neg.33 = xor i32 %33, -1 526 store i32 %neg.33, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 33), align 4 527 %34 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 34), align 8 528 %neg.34 = xor i32 %34, -1 529 store i32 %neg.34, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 34), align 8 530 %35 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 35), align 4 531 %neg.35 = xor i32 %35, -1 532 store i32 %neg.35, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 35), align 4 533 %36 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 36), align 16 534 %neg.36 = xor i32 %36, -1 535 store i32 %neg.36, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 36), align 16 536 %37 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 37), align 4 537 %neg.37 = xor i32 %37, -1 538 store i32 %neg.37, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 37), align 4 539 %38 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 38), align 8 540 %neg.38 = xor i32 %38, -1 541 store i32 %neg.38, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 38), align 8 542 %39 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 39), align 4 543 %neg.39 = xor i32 %39, -1 544 store i32 %neg.39, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 39), align 4 545 %40 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 40), align 16 546 %neg.40 = xor i32 %40, -1 547 store i32 %neg.40, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 40), align 16 548 %41 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 41), align 4 549 %neg.41 = xor i32 %41, -1 550 store i32 %neg.41, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 41), align 4 551 %42 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 42), align 8 552 %neg.42 = xor i32 %42, -1 553 store i32 %neg.42, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 42), align 8 554 %43 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 43), align 4 555 %neg.43 = xor i32 %43, -1 556 store i32 %neg.43, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 43), align 4 557 %44 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 44), align 16 558 %neg.44 = xor i32 %44, -1 559 store i32 %neg.44, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 44), align 16 560 %45 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 45), align 4 561 %neg.45 = xor i32 %45, -1 562 store i32 %neg.45, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 45), align 4 563 %46 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 46), align 8 564 %neg.46 = xor i32 %46, -1 565 store i32 %neg.46, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 46), align 8 566 %47 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 47), align 4 567 %neg.47 = xor i32 %47, -1 568 store i32 %neg.47, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 47), align 4 569 %48 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 48), align 16 570 %neg.48 = xor i32 %48, -1 571 store i32 %neg.48, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 48), align 16 572 %49 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 49), align 4 573 %neg.49 = xor i32 %49, -1 574 store i32 %neg.49, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 49), align 4 575 %50 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 50), align 8 576 %neg.50 = xor i32 %50, -1 577 store i32 %neg.50, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 50), align 8 578 %51 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 51), align 4 579 %neg.51 = xor i32 %51, -1 580 store i32 %neg.51, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 51), align 4 581 %52 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 52), align 16 582 %neg.52 = xor i32 %52, -1 583 store i32 %neg.52, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 52), align 16 584 %53 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 53), align 4 585 %neg.53 = xor i32 %53, -1 586 store i32 %neg.53, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 53), align 4 587 %54 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 54), align 8 588 %neg.54 = xor i32 %54, -1 589 store i32 %neg.54, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 54), align 8 590 %55 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 55), align 4 591 %neg.55 = xor i32 %55, -1 592 store i32 %neg.55, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 55), align 4 593 %56 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 56), align 16 594 %neg.56 = xor i32 %56, -1 595 store i32 %neg.56, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 56), align 16 596 %57 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 57), align 4 597 %neg.57 = xor i32 %57, -1 598 store i32 %neg.57, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 57), align 4 599 %58 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 58), align 8 600 %neg.58 = xor i32 %58, -1 601 store i32 %neg.58, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 58), align 8 602 %59 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 59), align 4 603 %neg.59 = xor i32 %59, -1 604 store i32 %neg.59, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 59), align 4 605 %60 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 60), align 16 606 %neg.60 = xor i32 %60, -1 607 store i32 %neg.60, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 60), align 16 608 %61 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 61), align 4 609 %neg.61 = xor i32 %61, -1 610 store i32 %neg.61, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 61), align 4 611 %62 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 62), align 8 612 %neg.62 = xor i32 %62, -1 613 store i32 %neg.62, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 62), align 8 614 %63 = load i32, ptr getelementptr inbounds ([64 x i32], ptr @ib, i64 0, i64 63), align 4 615 %neg.63 = xor i32 %63, -1 616 store i32 %neg.63, ptr getelementptr inbounds ([64 x i32], ptr @ia, i64 0, i64 63), align 4 617 br label %for.body5 618 619for.cond3: ; preds = %for.body5 620 %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1 621 %cmp4 = icmp ult i64 %indvars.iv, 63 622 br i1 %cmp4, label %for.body5, label %for.end14 623 624for.body5: ; preds = %entry, %for.cond3 625 %indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.cond3 ] 626 %arrayidx7 = getelementptr inbounds [64 x i32], ptr @ia, i64 0, i64 %indvars.iv 627 %64 = load i32, ptr %arrayidx7, align 4 628 %arrayidx9 = getelementptr inbounds [64 x i32], ptr @ib, i64 0, i64 %indvars.iv 629 %65 = load i32, ptr %arrayidx9, align 4 630 %neg10 = xor i32 %65, -1 631 %cmp11 = icmp eq i32 %64, %neg10 632 br i1 %cmp11, label %for.cond3, label %if.then 633 634if.then: ; preds = %for.body5 635 tail call void @abort() #2 636 unreachable 637 638for.end14: ; preds = %for.cond3 639 ret i32 0 640} 641 642declare void @abort() #2 643