1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 5 2 // RUN: %clang_cc1 -triple s390x-linux-gnu -target-cpu z13 -fzvector \ 3 // RUN: -emit-llvm -o - -W -Wall -Werror \ 4 // RUN: %s | opt -S -passes=mem2reg | FileCheck %s 5 6 volatile vector signed char sc, sc2; 7 volatile vector unsigned char uc, uc2; 8 volatile vector bool char bc, bc2; 9 10 volatile vector signed short ss, ss2; 11 volatile vector unsigned short us, us2; 12 volatile vector bool short bs, bs2; 13 14 volatile vector signed int si, si2; 15 volatile vector unsigned int ui, ui2; 16 volatile vector bool int bi, bi2; 17 18 volatile vector signed long long sl, sl2; 19 volatile vector unsigned long long ul, ul2; 20 volatile vector bool long long bl, bl2; 21 22 volatile vector signed __int128 slll, slll2; 23 volatile vector unsigned __int128 ulll, ulll2; 24 volatile vector bool __int128 blll, blll2; 25 26 volatile vector double fd, fd2; 27 28 volatile int cnt; 29 30 // CHECK-LABEL: define dso_local void @test_assign( 31 // CHECK-SAME: ) #[[ATTR0:[0-9]+]] { 32 // CHECK-NEXT: [[ENTRY:.*:]] 33 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 34 // CHECK-NEXT: store volatile <16 x i8> [[TMP0]], ptr @sc, align 8 35 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 36 // CHECK-NEXT: store volatile <16 x i8> [[TMP1]], ptr @uc, align 8 37 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 38 // CHECK-NEXT: store volatile <8 x i16> [[TMP2]], ptr @ss, align 8 39 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 40 // CHECK-NEXT: store volatile <8 x i16> [[TMP3]], ptr @us, align 8 41 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 42 // CHECK-NEXT: store volatile <4 x i32> [[TMP4]], ptr @si, align 8 43 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 44 // CHECK-NEXT: store volatile <4 x i32> [[TMP5]], ptr @ui, align 8 45 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 46 // CHECK-NEXT: store volatile <2 x i64> [[TMP6]], ptr @sl, align 8 47 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 48 // CHECK-NEXT: store volatile <2 x i64> [[TMP7]], ptr @ul, align 8 49 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 50 // CHECK-NEXT: store volatile <1 x i128> [[TMP8]], ptr @slll, align 8 51 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 52 // CHECK-NEXT: store volatile <1 x i128> [[TMP9]], ptr @ulll, align 8 53 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 54 // CHECK-NEXT: store volatile <2 x double> [[TMP10]], ptr @fd, align 8 55 // CHECK-NEXT: ret void 56 // 57 void test_assign(void) { 58 59 sc = sc2; 60 uc = uc2; 61 62 ss = ss2; 63 us = us2; 64 65 si = si2; 66 ui = ui2; 67 68 sl = sl2; 69 ul = ul2; 70 71 slll = slll2; 72 ulll = ulll2; 73 74 fd = fd2; 75 } 76 77 // CHECK-LABEL: define dso_local void @test_pos( 78 // CHECK-SAME: ) #[[ATTR0]] { 79 // CHECK-NEXT: [[ENTRY:.*:]] 80 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 81 // CHECK-NEXT: store volatile <16 x i8> [[TMP0]], ptr @sc, align 8 82 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 83 // CHECK-NEXT: store volatile <16 x i8> [[TMP1]], ptr @uc, align 8 84 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 85 // CHECK-NEXT: store volatile <8 x i16> [[TMP2]], ptr @ss, align 8 86 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 87 // CHECK-NEXT: store volatile <8 x i16> [[TMP3]], ptr @us, align 8 88 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 89 // CHECK-NEXT: store volatile <4 x i32> [[TMP4]], ptr @si, align 8 90 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 91 // CHECK-NEXT: store volatile <4 x i32> [[TMP5]], ptr @ui, align 8 92 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 93 // CHECK-NEXT: store volatile <2 x i64> [[TMP6]], ptr @sl, align 8 94 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 95 // CHECK-NEXT: store volatile <2 x i64> [[TMP7]], ptr @ul, align 8 96 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 97 // CHECK-NEXT: store volatile <1 x i128> [[TMP8]], ptr @slll, align 8 98 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 99 // CHECK-NEXT: store volatile <1 x i128> [[TMP9]], ptr @ulll, align 8 100 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 101 // CHECK-NEXT: store volatile <2 x double> [[TMP10]], ptr @fd, align 8 102 // CHECK-NEXT: ret void 103 // 104 void test_pos(void) { 105 106 sc = +sc2; 107 uc = +uc2; 108 109 ss = +ss2; 110 us = +us2; 111 112 si = +si2; 113 ui = +ui2; 114 115 sl = +sl2; 116 ul = +ul2; 117 118 slll = +slll2; 119 ulll = +ulll2; 120 121 fd = +fd2; 122 } 123 124 // CHECK-LABEL: define dso_local void @test_neg( 125 // CHECK-SAME: ) #[[ATTR0]] { 126 // CHECK-NEXT: [[ENTRY:.*:]] 127 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 128 // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i8> zeroinitializer, [[TMP0]] 129 // CHECK-NEXT: store volatile <16 x i8> [[SUB]], ptr @sc, align 8 130 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 131 // CHECK-NEXT: [[SUB1:%.*]] = sub <8 x i16> zeroinitializer, [[TMP1]] 132 // CHECK-NEXT: store volatile <8 x i16> [[SUB1]], ptr @ss, align 8 133 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 134 // CHECK-NEXT: [[SUB2:%.*]] = sub <4 x i32> zeroinitializer, [[TMP2]] 135 // CHECK-NEXT: store volatile <4 x i32> [[SUB2]], ptr @si, align 8 136 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 137 // CHECK-NEXT: [[SUB3:%.*]] = sub <2 x i64> zeroinitializer, [[TMP3]] 138 // CHECK-NEXT: store volatile <2 x i64> [[SUB3]], ptr @sl, align 8 139 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 140 // CHECK-NEXT: [[SUB4:%.*]] = sub <1 x i128> zeroinitializer, [[TMP4]] 141 // CHECK-NEXT: store volatile <1 x i128> [[SUB4]], ptr @slll, align 8 142 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 143 // CHECK-NEXT: [[FNEG:%.*]] = fneg <2 x double> [[TMP5]] 144 // CHECK-NEXT: store volatile <2 x double> [[FNEG]], ptr @fd, align 8 145 // CHECK-NEXT: ret void 146 // 147 void test_neg(void) { 148 149 sc = -sc2; 150 ss = -ss2; 151 si = -si2; 152 sl = -sl2; 153 slll = -slll2; 154 fd = -fd2; 155 } 156 157 // CHECK-LABEL: define dso_local void @test_preinc( 158 // CHECK-SAME: ) #[[ATTR0]] { 159 // CHECK-NEXT: [[ENTRY:.*:]] 160 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 161 // CHECK-NEXT: [[INC:%.*]] = add <16 x i8> [[TMP0]], splat (i8 1) 162 // CHECK-NEXT: store volatile <16 x i8> [[INC]], ptr @sc2, align 8 163 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 164 // CHECK-NEXT: [[INC1:%.*]] = add <16 x i8> [[TMP1]], splat (i8 1) 165 // CHECK-NEXT: store volatile <16 x i8> [[INC1]], ptr @uc2, align 8 166 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 167 // CHECK-NEXT: [[INC2:%.*]] = add <8 x i16> [[TMP2]], splat (i16 1) 168 // CHECK-NEXT: store volatile <8 x i16> [[INC2]], ptr @ss2, align 8 169 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 170 // CHECK-NEXT: [[INC3:%.*]] = add <8 x i16> [[TMP3]], splat (i16 1) 171 // CHECK-NEXT: store volatile <8 x i16> [[INC3]], ptr @us2, align 8 172 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 173 // CHECK-NEXT: [[INC4:%.*]] = add <4 x i32> [[TMP4]], splat (i32 1) 174 // CHECK-NEXT: store volatile <4 x i32> [[INC4]], ptr @si2, align 8 175 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 176 // CHECK-NEXT: [[INC5:%.*]] = add <4 x i32> [[TMP5]], splat (i32 1) 177 // CHECK-NEXT: store volatile <4 x i32> [[INC5]], ptr @ui2, align 8 178 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 179 // CHECK-NEXT: [[INC6:%.*]] = add <2 x i64> [[TMP6]], splat (i64 1) 180 // CHECK-NEXT: store volatile <2 x i64> [[INC6]], ptr @sl2, align 8 181 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 182 // CHECK-NEXT: [[INC7:%.*]] = add <2 x i64> [[TMP7]], splat (i64 1) 183 // CHECK-NEXT: store volatile <2 x i64> [[INC7]], ptr @ul2, align 8 184 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 185 // CHECK-NEXT: [[INC8:%.*]] = add <1 x i128> [[TMP8]], splat (i128 1) 186 // CHECK-NEXT: store volatile <1 x i128> [[INC8]], ptr @slll2, align 8 187 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 188 // CHECK-NEXT: [[INC9:%.*]] = add <1 x i128> [[TMP9]], splat (i128 1) 189 // CHECK-NEXT: store volatile <1 x i128> [[INC9]], ptr @ulll2, align 8 190 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 191 // CHECK-NEXT: [[INC10:%.*]] = fadd <2 x double> [[TMP10]], splat (double 1.000000e+00) 192 // CHECK-NEXT: store volatile <2 x double> [[INC10]], ptr @fd2, align 8 193 // CHECK-NEXT: ret void 194 // 195 void test_preinc(void) { 196 197 ++sc2; 198 ++uc2; 199 200 ++ss2; 201 ++us2; 202 203 ++si2; 204 ++ui2; 205 206 ++sl2; 207 ++ul2; 208 209 ++slll2; 210 ++ulll2; 211 212 ++fd2; 213 } 214 215 // CHECK-LABEL: define dso_local void @test_postinc( 216 // CHECK-SAME: ) #[[ATTR0]] { 217 // CHECK-NEXT: [[ENTRY:.*:]] 218 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 219 // CHECK-NEXT: [[INC:%.*]] = add <16 x i8> [[TMP0]], splat (i8 1) 220 // CHECK-NEXT: store volatile <16 x i8> [[INC]], ptr @sc2, align 8 221 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 222 // CHECK-NEXT: [[INC1:%.*]] = add <16 x i8> [[TMP1]], splat (i8 1) 223 // CHECK-NEXT: store volatile <16 x i8> [[INC1]], ptr @uc2, align 8 224 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 225 // CHECK-NEXT: [[INC2:%.*]] = add <8 x i16> [[TMP2]], splat (i16 1) 226 // CHECK-NEXT: store volatile <8 x i16> [[INC2]], ptr @ss2, align 8 227 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 228 // CHECK-NEXT: [[INC3:%.*]] = add <8 x i16> [[TMP3]], splat (i16 1) 229 // CHECK-NEXT: store volatile <8 x i16> [[INC3]], ptr @us2, align 8 230 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 231 // CHECK-NEXT: [[INC4:%.*]] = add <4 x i32> [[TMP4]], splat (i32 1) 232 // CHECK-NEXT: store volatile <4 x i32> [[INC4]], ptr @si2, align 8 233 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 234 // CHECK-NEXT: [[INC5:%.*]] = add <4 x i32> [[TMP5]], splat (i32 1) 235 // CHECK-NEXT: store volatile <4 x i32> [[INC5]], ptr @ui2, align 8 236 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 237 // CHECK-NEXT: [[INC6:%.*]] = add <2 x i64> [[TMP6]], splat (i64 1) 238 // CHECK-NEXT: store volatile <2 x i64> [[INC6]], ptr @sl2, align 8 239 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 240 // CHECK-NEXT: [[INC7:%.*]] = add <2 x i64> [[TMP7]], splat (i64 1) 241 // CHECK-NEXT: store volatile <2 x i64> [[INC7]], ptr @ul2, align 8 242 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 243 // CHECK-NEXT: [[INC8:%.*]] = add <1 x i128> [[TMP8]], splat (i128 1) 244 // CHECK-NEXT: store volatile <1 x i128> [[INC8]], ptr @slll2, align 8 245 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 246 // CHECK-NEXT: [[INC9:%.*]] = add <1 x i128> [[TMP9]], splat (i128 1) 247 // CHECK-NEXT: store volatile <1 x i128> [[INC9]], ptr @ulll2, align 8 248 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 249 // CHECK-NEXT: [[INC10:%.*]] = fadd <2 x double> [[TMP10]], splat (double 1.000000e+00) 250 // CHECK-NEXT: store volatile <2 x double> [[INC10]], ptr @fd2, align 8 251 // CHECK-NEXT: ret void 252 // 253 void test_postinc(void) { 254 255 sc2++; 256 uc2++; 257 258 ss2++; 259 us2++; 260 261 si2++; 262 ui2++; 263 264 sl2++; 265 ul2++; 266 267 slll2++; 268 ulll2++; 269 270 fd2++; 271 } 272 273 // CHECK-LABEL: define dso_local void @test_predec( 274 // CHECK-SAME: ) #[[ATTR0]] { 275 // CHECK-NEXT: [[ENTRY:.*:]] 276 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 277 // CHECK-NEXT: [[DEC:%.*]] = add <16 x i8> [[TMP0]], splat (i8 -1) 278 // CHECK-NEXT: store volatile <16 x i8> [[DEC]], ptr @sc2, align 8 279 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 280 // CHECK-NEXT: [[DEC1:%.*]] = add <16 x i8> [[TMP1]], splat (i8 -1) 281 // CHECK-NEXT: store volatile <16 x i8> [[DEC1]], ptr @uc2, align 8 282 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 283 // CHECK-NEXT: [[DEC2:%.*]] = add <8 x i16> [[TMP2]], splat (i16 -1) 284 // CHECK-NEXT: store volatile <8 x i16> [[DEC2]], ptr @ss2, align 8 285 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 286 // CHECK-NEXT: [[DEC3:%.*]] = add <8 x i16> [[TMP3]], splat (i16 -1) 287 // CHECK-NEXT: store volatile <8 x i16> [[DEC3]], ptr @us2, align 8 288 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 289 // CHECK-NEXT: [[DEC4:%.*]] = add <4 x i32> [[TMP4]], splat (i32 -1) 290 // CHECK-NEXT: store volatile <4 x i32> [[DEC4]], ptr @si2, align 8 291 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 292 // CHECK-NEXT: [[DEC5:%.*]] = add <4 x i32> [[TMP5]], splat (i32 -1) 293 // CHECK-NEXT: store volatile <4 x i32> [[DEC5]], ptr @ui2, align 8 294 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 295 // CHECK-NEXT: [[DEC6:%.*]] = add <2 x i64> [[TMP6]], splat (i64 -1) 296 // CHECK-NEXT: store volatile <2 x i64> [[DEC6]], ptr @sl2, align 8 297 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 298 // CHECK-NEXT: [[DEC7:%.*]] = add <2 x i64> [[TMP7]], splat (i64 -1) 299 // CHECK-NEXT: store volatile <2 x i64> [[DEC7]], ptr @ul2, align 8 300 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 301 // CHECK-NEXT: [[DEC8:%.*]] = add <1 x i128> [[TMP8]], splat (i128 18446744073709551615) 302 // CHECK-NEXT: store volatile <1 x i128> [[DEC8]], ptr @slll2, align 8 303 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 304 // CHECK-NEXT: [[DEC9:%.*]] = add <1 x i128> [[TMP9]], splat (i128 18446744073709551615) 305 // CHECK-NEXT: store volatile <1 x i128> [[DEC9]], ptr @ulll2, align 8 306 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 307 // CHECK-NEXT: [[DEC10:%.*]] = fadd <2 x double> [[TMP10]], splat (double -1.000000e+00) 308 // CHECK-NEXT: store volatile <2 x double> [[DEC10]], ptr @fd2, align 8 309 // CHECK-NEXT: ret void 310 // 311 void test_predec(void) { 312 313 --sc2; 314 --uc2; 315 316 --ss2; 317 --us2; 318 319 --si2; 320 --ui2; 321 322 --sl2; 323 --ul2; 324 325 --slll2; 326 --ulll2; 327 328 --fd2; 329 } 330 331 // CHECK-LABEL: define dso_local void @test_postdec( 332 // CHECK-SAME: ) #[[ATTR0]] { 333 // CHECK-NEXT: [[ENTRY:.*:]] 334 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 335 // CHECK-NEXT: [[DEC:%.*]] = add <16 x i8> [[TMP0]], splat (i8 -1) 336 // CHECK-NEXT: store volatile <16 x i8> [[DEC]], ptr @sc2, align 8 337 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 338 // CHECK-NEXT: [[DEC1:%.*]] = add <16 x i8> [[TMP1]], splat (i8 -1) 339 // CHECK-NEXT: store volatile <16 x i8> [[DEC1]], ptr @uc2, align 8 340 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 341 // CHECK-NEXT: [[DEC2:%.*]] = add <8 x i16> [[TMP2]], splat (i16 -1) 342 // CHECK-NEXT: store volatile <8 x i16> [[DEC2]], ptr @ss2, align 8 343 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 344 // CHECK-NEXT: [[DEC3:%.*]] = add <8 x i16> [[TMP3]], splat (i16 -1) 345 // CHECK-NEXT: store volatile <8 x i16> [[DEC3]], ptr @us2, align 8 346 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 347 // CHECK-NEXT: [[DEC4:%.*]] = add <4 x i32> [[TMP4]], splat (i32 -1) 348 // CHECK-NEXT: store volatile <4 x i32> [[DEC4]], ptr @si2, align 8 349 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 350 // CHECK-NEXT: [[DEC5:%.*]] = add <4 x i32> [[TMP5]], splat (i32 -1) 351 // CHECK-NEXT: store volatile <4 x i32> [[DEC5]], ptr @ui2, align 8 352 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 353 // CHECK-NEXT: [[DEC6:%.*]] = add <2 x i64> [[TMP6]], splat (i64 -1) 354 // CHECK-NEXT: store volatile <2 x i64> [[DEC6]], ptr @sl2, align 8 355 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 356 // CHECK-NEXT: [[DEC7:%.*]] = add <2 x i64> [[TMP7]], splat (i64 -1) 357 // CHECK-NEXT: store volatile <2 x i64> [[DEC7]], ptr @ul2, align 8 358 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 359 // CHECK-NEXT: [[DEC8:%.*]] = add <1 x i128> [[TMP8]], splat (i128 18446744073709551615) 360 // CHECK-NEXT: store volatile <1 x i128> [[DEC8]], ptr @slll2, align 8 361 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 362 // CHECK-NEXT: [[DEC9:%.*]] = add <1 x i128> [[TMP9]], splat (i128 18446744073709551615) 363 // CHECK-NEXT: store volatile <1 x i128> [[DEC9]], ptr @ulll2, align 8 364 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 365 // CHECK-NEXT: [[DEC10:%.*]] = fadd <2 x double> [[TMP10]], splat (double -1.000000e+00) 366 // CHECK-NEXT: store volatile <2 x double> [[DEC10]], ptr @fd2, align 8 367 // CHECK-NEXT: ret void 368 // 369 void test_postdec(void) { 370 371 sc2--; 372 uc2--; 373 374 ss2--; 375 us2--; 376 377 si2--; 378 ui2--; 379 380 sl2--; 381 ul2--; 382 383 slll2--; 384 ulll2--; 385 386 fd2--; 387 } 388 389 // CHECK-LABEL: define dso_local void @test_add( 390 // CHECK-SAME: ) #[[ATTR0]] { 391 // CHECK-NEXT: [[ENTRY:.*:]] 392 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 393 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 394 // CHECK-NEXT: [[ADD:%.*]] = add <16 x i8> [[TMP0]], [[TMP1]] 395 // CHECK-NEXT: store volatile <16 x i8> [[ADD]], ptr @sc, align 8 396 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 397 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 398 // CHECK-NEXT: [[ADD1:%.*]] = add <16 x i8> [[TMP2]], [[TMP3]] 399 // CHECK-NEXT: store volatile <16 x i8> [[ADD1]], ptr @sc, align 8 400 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 401 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 402 // CHECK-NEXT: [[ADD2:%.*]] = add <16 x i8> [[TMP4]], [[TMP5]] 403 // CHECK-NEXT: store volatile <16 x i8> [[ADD2]], ptr @sc, align 8 404 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 405 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 406 // CHECK-NEXT: [[ADD3:%.*]] = add <16 x i8> [[TMP6]], [[TMP7]] 407 // CHECK-NEXT: store volatile <16 x i8> [[ADD3]], ptr @uc, align 8 408 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 409 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 410 // CHECK-NEXT: [[ADD4:%.*]] = add <16 x i8> [[TMP8]], [[TMP9]] 411 // CHECK-NEXT: store volatile <16 x i8> [[ADD4]], ptr @uc, align 8 412 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 413 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 414 // CHECK-NEXT: [[ADD5:%.*]] = add <16 x i8> [[TMP10]], [[TMP11]] 415 // CHECK-NEXT: store volatile <16 x i8> [[ADD5]], ptr @uc, align 8 416 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 417 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 418 // CHECK-NEXT: [[ADD6:%.*]] = add <8 x i16> [[TMP12]], [[TMP13]] 419 // CHECK-NEXT: store volatile <8 x i16> [[ADD6]], ptr @ss, align 8 420 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 421 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 422 // CHECK-NEXT: [[ADD7:%.*]] = add <8 x i16> [[TMP14]], [[TMP15]] 423 // CHECK-NEXT: store volatile <8 x i16> [[ADD7]], ptr @ss, align 8 424 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 425 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 426 // CHECK-NEXT: [[ADD8:%.*]] = add <8 x i16> [[TMP16]], [[TMP17]] 427 // CHECK-NEXT: store volatile <8 x i16> [[ADD8]], ptr @ss, align 8 428 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @us, align 8 429 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 430 // CHECK-NEXT: [[ADD9:%.*]] = add <8 x i16> [[TMP18]], [[TMP19]] 431 // CHECK-NEXT: store volatile <8 x i16> [[ADD9]], ptr @us, align 8 432 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @us, align 8 433 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 434 // CHECK-NEXT: [[ADD10:%.*]] = add <8 x i16> [[TMP20]], [[TMP21]] 435 // CHECK-NEXT: store volatile <8 x i16> [[ADD10]], ptr @us, align 8 436 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 437 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 438 // CHECK-NEXT: [[ADD11:%.*]] = add <8 x i16> [[TMP22]], [[TMP23]] 439 // CHECK-NEXT: store volatile <8 x i16> [[ADD11]], ptr @us, align 8 440 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <4 x i32>, ptr @si, align 8 441 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 442 // CHECK-NEXT: [[ADD12:%.*]] = add <4 x i32> [[TMP24]], [[TMP25]] 443 // CHECK-NEXT: store volatile <4 x i32> [[ADD12]], ptr @si, align 8 444 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <4 x i32>, ptr @si, align 8 445 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 446 // CHECK-NEXT: [[ADD13:%.*]] = add <4 x i32> [[TMP26]], [[TMP27]] 447 // CHECK-NEXT: store volatile <4 x i32> [[ADD13]], ptr @si, align 8 448 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 449 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 450 // CHECK-NEXT: [[ADD14:%.*]] = add <4 x i32> [[TMP28]], [[TMP29]] 451 // CHECK-NEXT: store volatile <4 x i32> [[ADD14]], ptr @si, align 8 452 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 453 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 454 // CHECK-NEXT: [[ADD15:%.*]] = add <4 x i32> [[TMP30]], [[TMP31]] 455 // CHECK-NEXT: store volatile <4 x i32> [[ADD15]], ptr @ui, align 8 456 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 457 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 458 // CHECK-NEXT: [[ADD16:%.*]] = add <4 x i32> [[TMP32]], [[TMP33]] 459 // CHECK-NEXT: store volatile <4 x i32> [[ADD16]], ptr @ui, align 8 460 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 461 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 462 // CHECK-NEXT: [[ADD17:%.*]] = add <4 x i32> [[TMP34]], [[TMP35]] 463 // CHECK-NEXT: store volatile <4 x i32> [[ADD17]], ptr @ui, align 8 464 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 465 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 466 // CHECK-NEXT: [[ADD18:%.*]] = add <2 x i64> [[TMP36]], [[TMP37]] 467 // CHECK-NEXT: store volatile <2 x i64> [[ADD18]], ptr @sl, align 8 468 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 469 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 470 // CHECK-NEXT: [[ADD19:%.*]] = add <2 x i64> [[TMP38]], [[TMP39]] 471 // CHECK-NEXT: store volatile <2 x i64> [[ADD19]], ptr @sl, align 8 472 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 473 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 474 // CHECK-NEXT: [[ADD20:%.*]] = add <2 x i64> [[TMP40]], [[TMP41]] 475 // CHECK-NEXT: store volatile <2 x i64> [[ADD20]], ptr @sl, align 8 476 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 477 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 478 // CHECK-NEXT: [[ADD21:%.*]] = add <2 x i64> [[TMP42]], [[TMP43]] 479 // CHECK-NEXT: store volatile <2 x i64> [[ADD21]], ptr @ul, align 8 480 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 481 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 482 // CHECK-NEXT: [[ADD22:%.*]] = add <2 x i64> [[TMP44]], [[TMP45]] 483 // CHECK-NEXT: store volatile <2 x i64> [[ADD22]], ptr @ul, align 8 484 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 485 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 486 // CHECK-NEXT: [[ADD23:%.*]] = add <2 x i64> [[TMP46]], [[TMP47]] 487 // CHECK-NEXT: store volatile <2 x i64> [[ADD23]], ptr @ul, align 8 488 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 489 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 490 // CHECK-NEXT: [[ADD24:%.*]] = add <1 x i128> [[TMP48]], [[TMP49]] 491 // CHECK-NEXT: store volatile <1 x i128> [[ADD24]], ptr @slll, align 8 492 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 493 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 494 // CHECK-NEXT: [[ADD25:%.*]] = add <1 x i128> [[TMP50]], [[TMP51]] 495 // CHECK-NEXT: store volatile <1 x i128> [[ADD25]], ptr @slll, align 8 496 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 497 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 498 // CHECK-NEXT: [[ADD26:%.*]] = add <1 x i128> [[TMP52]], [[TMP53]] 499 // CHECK-NEXT: store volatile <1 x i128> [[ADD26]], ptr @slll, align 8 500 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 501 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 502 // CHECK-NEXT: [[ADD27:%.*]] = add <1 x i128> [[TMP54]], [[TMP55]] 503 // CHECK-NEXT: store volatile <1 x i128> [[ADD27]], ptr @ulll, align 8 504 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 505 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 506 // CHECK-NEXT: [[ADD28:%.*]] = add <1 x i128> [[TMP56]], [[TMP57]] 507 // CHECK-NEXT: store volatile <1 x i128> [[ADD28]], ptr @ulll, align 8 508 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 509 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 510 // CHECK-NEXT: [[ADD29:%.*]] = add <1 x i128> [[TMP58]], [[TMP59]] 511 // CHECK-NEXT: store volatile <1 x i128> [[ADD29]], ptr @ulll, align 8 512 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <2 x double>, ptr @fd, align 8 513 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 514 // CHECK-NEXT: [[ADD30:%.*]] = fadd <2 x double> [[TMP60]], [[TMP61]] 515 // CHECK-NEXT: store volatile <2 x double> [[ADD30]], ptr @fd, align 8 516 // CHECK-NEXT: ret void 517 // 518 void test_add(void) { 519 520 sc = sc + sc2; 521 sc = sc + bc2; 522 sc = bc + sc2; 523 uc = uc + uc2; 524 uc = uc + bc2; 525 uc = bc + uc2; 526 527 ss = ss + ss2; 528 ss = ss + bs2; 529 ss = bs + ss2; 530 us = us + us2; 531 us = us + bs2; 532 us = bs + us2; 533 534 si = si + si2; 535 si = si + bi2; 536 si = bi + si2; 537 ui = ui + ui2; 538 ui = ui + bi2; 539 ui = bi + ui2; 540 541 sl = sl + sl2; 542 sl = sl + bl2; 543 sl = bl + sl2; 544 ul = ul + ul2; 545 ul = ul + bl2; 546 ul = bl + ul2; 547 548 slll = slll + slll2; 549 slll = slll + blll2; 550 slll = blll + slll2; 551 ulll = ulll + ulll2; 552 ulll = ulll + blll2; 553 ulll = blll + ulll2; 554 555 fd = fd + fd2; 556 } 557 558 // CHECK-LABEL: define dso_local void @test_add_assign( 559 // CHECK-SAME: ) #[[ATTR0]] { 560 // CHECK-NEXT: [[ENTRY:.*:]] 561 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 562 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 563 // CHECK-NEXT: [[ADD:%.*]] = add <16 x i8> [[TMP1]], [[TMP0]] 564 // CHECK-NEXT: store volatile <16 x i8> [[ADD]], ptr @sc, align 8 565 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 566 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 567 // CHECK-NEXT: [[ADD1:%.*]] = add <16 x i8> [[TMP3]], [[TMP2]] 568 // CHECK-NEXT: store volatile <16 x i8> [[ADD1]], ptr @sc, align 8 569 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 570 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 571 // CHECK-NEXT: [[ADD2:%.*]] = add <16 x i8> [[TMP5]], [[TMP4]] 572 // CHECK-NEXT: store volatile <16 x i8> [[ADD2]], ptr @uc, align 8 573 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 574 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 575 // CHECK-NEXT: [[ADD3:%.*]] = add <16 x i8> [[TMP7]], [[TMP6]] 576 // CHECK-NEXT: store volatile <16 x i8> [[ADD3]], ptr @uc, align 8 577 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 578 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 579 // CHECK-NEXT: [[ADD4:%.*]] = add <8 x i16> [[TMP9]], [[TMP8]] 580 // CHECK-NEXT: store volatile <8 x i16> [[ADD4]], ptr @ss, align 8 581 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 582 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 583 // CHECK-NEXT: [[ADD5:%.*]] = add <8 x i16> [[TMP11]], [[TMP10]] 584 // CHECK-NEXT: store volatile <8 x i16> [[ADD5]], ptr @ss, align 8 585 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 586 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <8 x i16>, ptr @us, align 8 587 // CHECK-NEXT: [[ADD6:%.*]] = add <8 x i16> [[TMP13]], [[TMP12]] 588 // CHECK-NEXT: store volatile <8 x i16> [[ADD6]], ptr @us, align 8 589 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 590 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @us, align 8 591 // CHECK-NEXT: [[ADD7:%.*]] = add <8 x i16> [[TMP15]], [[TMP14]] 592 // CHECK-NEXT: store volatile <8 x i16> [[ADD7]], ptr @us, align 8 593 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 594 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <4 x i32>, ptr @si, align 8 595 // CHECK-NEXT: [[ADD8:%.*]] = add <4 x i32> [[TMP17]], [[TMP16]] 596 // CHECK-NEXT: store volatile <4 x i32> [[ADD8]], ptr @si, align 8 597 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 598 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <4 x i32>, ptr @si, align 8 599 // CHECK-NEXT: [[ADD9:%.*]] = add <4 x i32> [[TMP19]], [[TMP18]] 600 // CHECK-NEXT: store volatile <4 x i32> [[ADD9]], ptr @si, align 8 601 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 602 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 603 // CHECK-NEXT: [[ADD10:%.*]] = add <4 x i32> [[TMP21]], [[TMP20]] 604 // CHECK-NEXT: store volatile <4 x i32> [[ADD10]], ptr @ui, align 8 605 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 606 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 607 // CHECK-NEXT: [[ADD11:%.*]] = add <4 x i32> [[TMP23]], [[TMP22]] 608 // CHECK-NEXT: store volatile <4 x i32> [[ADD11]], ptr @ui, align 8 609 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 610 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 611 // CHECK-NEXT: [[ADD12:%.*]] = add <2 x i64> [[TMP25]], [[TMP24]] 612 // CHECK-NEXT: store volatile <2 x i64> [[ADD12]], ptr @sl, align 8 613 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 614 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 615 // CHECK-NEXT: [[ADD13:%.*]] = add <2 x i64> [[TMP27]], [[TMP26]] 616 // CHECK-NEXT: store volatile <2 x i64> [[ADD13]], ptr @sl, align 8 617 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 618 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 619 // CHECK-NEXT: [[ADD14:%.*]] = add <2 x i64> [[TMP29]], [[TMP28]] 620 // CHECK-NEXT: store volatile <2 x i64> [[ADD14]], ptr @ul, align 8 621 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 622 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 623 // CHECK-NEXT: [[ADD15:%.*]] = add <2 x i64> [[TMP31]], [[TMP30]] 624 // CHECK-NEXT: store volatile <2 x i64> [[ADD15]], ptr @ul, align 8 625 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 626 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 627 // CHECK-NEXT: [[ADD16:%.*]] = add <1 x i128> [[TMP33]], [[TMP32]] 628 // CHECK-NEXT: store volatile <1 x i128> [[ADD16]], ptr @slll, align 8 629 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 630 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 631 // CHECK-NEXT: [[ADD17:%.*]] = add <1 x i128> [[TMP35]], [[TMP34]] 632 // CHECK-NEXT: store volatile <1 x i128> [[ADD17]], ptr @slll, align 8 633 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 634 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 635 // CHECK-NEXT: [[ADD18:%.*]] = add <1 x i128> [[TMP37]], [[TMP36]] 636 // CHECK-NEXT: store volatile <1 x i128> [[ADD18]], ptr @ulll, align 8 637 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 638 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 639 // CHECK-NEXT: [[ADD19:%.*]] = add <1 x i128> [[TMP39]], [[TMP38]] 640 // CHECK-NEXT: store volatile <1 x i128> [[ADD19]], ptr @ulll, align 8 641 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 642 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <2 x double>, ptr @fd, align 8 643 // CHECK-NEXT: [[ADD20:%.*]] = fadd <2 x double> [[TMP41]], [[TMP40]] 644 // CHECK-NEXT: store volatile <2 x double> [[ADD20]], ptr @fd, align 8 645 // CHECK-NEXT: ret void 646 // 647 void test_add_assign(void) { 648 649 sc += sc2; 650 sc += bc2; 651 uc += uc2; 652 uc += bc2; 653 654 ss += ss2; 655 ss += bs2; 656 us += us2; 657 us += bs2; 658 659 si += si2; 660 si += bi2; 661 ui += ui2; 662 ui += bi2; 663 664 sl += sl2; 665 sl += bl2; 666 ul += ul2; 667 ul += bl2; 668 669 slll += slll2; 670 slll += blll2; 671 ulll += ulll2; 672 ulll += blll2; 673 674 fd += fd2; 675 } 676 677 // CHECK-LABEL: define dso_local void @test_sub( 678 // CHECK-SAME: ) #[[ATTR0]] { 679 // CHECK-NEXT: [[ENTRY:.*:]] 680 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 681 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 682 // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i8> [[TMP0]], [[TMP1]] 683 // CHECK-NEXT: store volatile <16 x i8> [[SUB]], ptr @sc, align 8 684 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 685 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 686 // CHECK-NEXT: [[SUB1:%.*]] = sub <16 x i8> [[TMP2]], [[TMP3]] 687 // CHECK-NEXT: store volatile <16 x i8> [[SUB1]], ptr @sc, align 8 688 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 689 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 690 // CHECK-NEXT: [[SUB2:%.*]] = sub <16 x i8> [[TMP4]], [[TMP5]] 691 // CHECK-NEXT: store volatile <16 x i8> [[SUB2]], ptr @sc, align 8 692 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 693 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 694 // CHECK-NEXT: [[SUB3:%.*]] = sub <16 x i8> [[TMP6]], [[TMP7]] 695 // CHECK-NEXT: store volatile <16 x i8> [[SUB3]], ptr @uc, align 8 696 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 697 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 698 // CHECK-NEXT: [[SUB4:%.*]] = sub <16 x i8> [[TMP8]], [[TMP9]] 699 // CHECK-NEXT: store volatile <16 x i8> [[SUB4]], ptr @uc, align 8 700 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 701 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 702 // CHECK-NEXT: [[SUB5:%.*]] = sub <16 x i8> [[TMP10]], [[TMP11]] 703 // CHECK-NEXT: store volatile <16 x i8> [[SUB5]], ptr @uc, align 8 704 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 705 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 706 // CHECK-NEXT: [[SUB6:%.*]] = sub <8 x i16> [[TMP12]], [[TMP13]] 707 // CHECK-NEXT: store volatile <8 x i16> [[SUB6]], ptr @ss, align 8 708 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 709 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 710 // CHECK-NEXT: [[SUB7:%.*]] = sub <8 x i16> [[TMP14]], [[TMP15]] 711 // CHECK-NEXT: store volatile <8 x i16> [[SUB7]], ptr @ss, align 8 712 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 713 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 714 // CHECK-NEXT: [[SUB8:%.*]] = sub <8 x i16> [[TMP16]], [[TMP17]] 715 // CHECK-NEXT: store volatile <8 x i16> [[SUB8]], ptr @ss, align 8 716 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @us, align 8 717 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 718 // CHECK-NEXT: [[SUB9:%.*]] = sub <8 x i16> [[TMP18]], [[TMP19]] 719 // CHECK-NEXT: store volatile <8 x i16> [[SUB9]], ptr @us, align 8 720 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @us, align 8 721 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 722 // CHECK-NEXT: [[SUB10:%.*]] = sub <8 x i16> [[TMP20]], [[TMP21]] 723 // CHECK-NEXT: store volatile <8 x i16> [[SUB10]], ptr @us, align 8 724 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 725 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 726 // CHECK-NEXT: [[SUB11:%.*]] = sub <8 x i16> [[TMP22]], [[TMP23]] 727 // CHECK-NEXT: store volatile <8 x i16> [[SUB11]], ptr @us, align 8 728 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <4 x i32>, ptr @si, align 8 729 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 730 // CHECK-NEXT: [[SUB12:%.*]] = sub <4 x i32> [[TMP24]], [[TMP25]] 731 // CHECK-NEXT: store volatile <4 x i32> [[SUB12]], ptr @si, align 8 732 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <4 x i32>, ptr @si, align 8 733 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 734 // CHECK-NEXT: [[SUB13:%.*]] = sub <4 x i32> [[TMP26]], [[TMP27]] 735 // CHECK-NEXT: store volatile <4 x i32> [[SUB13]], ptr @si, align 8 736 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 737 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 738 // CHECK-NEXT: [[SUB14:%.*]] = sub <4 x i32> [[TMP28]], [[TMP29]] 739 // CHECK-NEXT: store volatile <4 x i32> [[SUB14]], ptr @si, align 8 740 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 741 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 742 // CHECK-NEXT: [[SUB15:%.*]] = sub <4 x i32> [[TMP30]], [[TMP31]] 743 // CHECK-NEXT: store volatile <4 x i32> [[SUB15]], ptr @ui, align 8 744 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 745 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 746 // CHECK-NEXT: [[SUB16:%.*]] = sub <4 x i32> [[TMP32]], [[TMP33]] 747 // CHECK-NEXT: store volatile <4 x i32> [[SUB16]], ptr @ui, align 8 748 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 749 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 750 // CHECK-NEXT: [[SUB17:%.*]] = sub <4 x i32> [[TMP34]], [[TMP35]] 751 // CHECK-NEXT: store volatile <4 x i32> [[SUB17]], ptr @ui, align 8 752 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 753 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 754 // CHECK-NEXT: [[SUB18:%.*]] = sub <2 x i64> [[TMP36]], [[TMP37]] 755 // CHECK-NEXT: store volatile <2 x i64> [[SUB18]], ptr @sl, align 8 756 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 757 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 758 // CHECK-NEXT: [[SUB19:%.*]] = sub <2 x i64> [[TMP38]], [[TMP39]] 759 // CHECK-NEXT: store volatile <2 x i64> [[SUB19]], ptr @sl, align 8 760 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 761 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 762 // CHECK-NEXT: [[SUB20:%.*]] = sub <2 x i64> [[TMP40]], [[TMP41]] 763 // CHECK-NEXT: store volatile <2 x i64> [[SUB20]], ptr @sl, align 8 764 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 765 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 766 // CHECK-NEXT: [[SUB21:%.*]] = sub <2 x i64> [[TMP42]], [[TMP43]] 767 // CHECK-NEXT: store volatile <2 x i64> [[SUB21]], ptr @ul, align 8 768 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 769 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 770 // CHECK-NEXT: [[SUB22:%.*]] = sub <2 x i64> [[TMP44]], [[TMP45]] 771 // CHECK-NEXT: store volatile <2 x i64> [[SUB22]], ptr @ul, align 8 772 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 773 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 774 // CHECK-NEXT: [[SUB23:%.*]] = sub <2 x i64> [[TMP46]], [[TMP47]] 775 // CHECK-NEXT: store volatile <2 x i64> [[SUB23]], ptr @ul, align 8 776 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 777 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 778 // CHECK-NEXT: [[SUB24:%.*]] = sub <1 x i128> [[TMP48]], [[TMP49]] 779 // CHECK-NEXT: store volatile <1 x i128> [[SUB24]], ptr @slll, align 8 780 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 781 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 782 // CHECK-NEXT: [[SUB25:%.*]] = sub <1 x i128> [[TMP50]], [[TMP51]] 783 // CHECK-NEXT: store volatile <1 x i128> [[SUB25]], ptr @slll, align 8 784 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 785 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 786 // CHECK-NEXT: [[SUB26:%.*]] = sub <1 x i128> [[TMP52]], [[TMP53]] 787 // CHECK-NEXT: store volatile <1 x i128> [[SUB26]], ptr @slll, align 8 788 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 789 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 790 // CHECK-NEXT: [[SUB27:%.*]] = sub <1 x i128> [[TMP54]], [[TMP55]] 791 // CHECK-NEXT: store volatile <1 x i128> [[SUB27]], ptr @ulll, align 8 792 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 793 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 794 // CHECK-NEXT: [[SUB28:%.*]] = sub <1 x i128> [[TMP56]], [[TMP57]] 795 // CHECK-NEXT: store volatile <1 x i128> [[SUB28]], ptr @ulll, align 8 796 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 797 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 798 // CHECK-NEXT: [[SUB29:%.*]] = sub <1 x i128> [[TMP58]], [[TMP59]] 799 // CHECK-NEXT: store volatile <1 x i128> [[SUB29]], ptr @ulll, align 8 800 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <2 x double>, ptr @fd, align 8 801 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 802 // CHECK-NEXT: [[SUB30:%.*]] = fsub <2 x double> [[TMP60]], [[TMP61]] 803 // CHECK-NEXT: store volatile <2 x double> [[SUB30]], ptr @fd, align 8 804 // CHECK-NEXT: ret void 805 // 806 void test_sub(void) { 807 808 sc = sc - sc2; 809 sc = sc - bc2; 810 sc = bc - sc2; 811 uc = uc - uc2; 812 uc = uc - bc2; 813 uc = bc - uc2; 814 815 ss = ss - ss2; 816 ss = ss - bs2; 817 ss = bs - ss2; 818 us = us - us2; 819 us = us - bs2; 820 us = bs - us2; 821 822 si = si - si2; 823 si = si - bi2; 824 si = bi - si2; 825 ui = ui - ui2; 826 ui = ui - bi2; 827 ui = bi - ui2; 828 829 sl = sl - sl2; 830 sl = sl - bl2; 831 sl = bl - sl2; 832 ul = ul - ul2; 833 ul = ul - bl2; 834 ul = bl - ul2; 835 836 slll = slll - slll2; 837 slll = slll - blll2; 838 slll = blll - slll2; 839 ulll = ulll - ulll2; 840 ulll = ulll - blll2; 841 ulll = blll - ulll2; 842 843 fd = fd - fd2; 844 } 845 846 // CHECK-LABEL: define dso_local void @test_sub_assign( 847 // CHECK-SAME: ) #[[ATTR0]] { 848 // CHECK-NEXT: [[ENTRY:.*:]] 849 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 850 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 851 // CHECK-NEXT: [[SUB:%.*]] = sub <16 x i8> [[TMP1]], [[TMP0]] 852 // CHECK-NEXT: store volatile <16 x i8> [[SUB]], ptr @sc, align 8 853 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 854 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 855 // CHECK-NEXT: [[SUB1:%.*]] = sub <16 x i8> [[TMP3]], [[TMP2]] 856 // CHECK-NEXT: store volatile <16 x i8> [[SUB1]], ptr @sc, align 8 857 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 858 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 859 // CHECK-NEXT: [[SUB2:%.*]] = sub <16 x i8> [[TMP5]], [[TMP4]] 860 // CHECK-NEXT: store volatile <16 x i8> [[SUB2]], ptr @uc, align 8 861 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 862 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 863 // CHECK-NEXT: [[SUB3:%.*]] = sub <16 x i8> [[TMP7]], [[TMP6]] 864 // CHECK-NEXT: store volatile <16 x i8> [[SUB3]], ptr @uc, align 8 865 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 866 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 867 // CHECK-NEXT: [[SUB4:%.*]] = sub <8 x i16> [[TMP9]], [[TMP8]] 868 // CHECK-NEXT: store volatile <8 x i16> [[SUB4]], ptr @ss, align 8 869 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 870 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 871 // CHECK-NEXT: [[SUB5:%.*]] = sub <8 x i16> [[TMP11]], [[TMP10]] 872 // CHECK-NEXT: store volatile <8 x i16> [[SUB5]], ptr @ss, align 8 873 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 874 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <8 x i16>, ptr @us, align 8 875 // CHECK-NEXT: [[SUB6:%.*]] = sub <8 x i16> [[TMP13]], [[TMP12]] 876 // CHECK-NEXT: store volatile <8 x i16> [[SUB6]], ptr @us, align 8 877 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 878 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @us, align 8 879 // CHECK-NEXT: [[SUB7:%.*]] = sub <8 x i16> [[TMP15]], [[TMP14]] 880 // CHECK-NEXT: store volatile <8 x i16> [[SUB7]], ptr @us, align 8 881 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 882 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <4 x i32>, ptr @si, align 8 883 // CHECK-NEXT: [[SUB8:%.*]] = sub <4 x i32> [[TMP17]], [[TMP16]] 884 // CHECK-NEXT: store volatile <4 x i32> [[SUB8]], ptr @si, align 8 885 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 886 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <4 x i32>, ptr @si, align 8 887 // CHECK-NEXT: [[SUB9:%.*]] = sub <4 x i32> [[TMP19]], [[TMP18]] 888 // CHECK-NEXT: store volatile <4 x i32> [[SUB9]], ptr @si, align 8 889 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 890 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 891 // CHECK-NEXT: [[SUB10:%.*]] = sub <4 x i32> [[TMP21]], [[TMP20]] 892 // CHECK-NEXT: store volatile <4 x i32> [[SUB10]], ptr @ui, align 8 893 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 894 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 895 // CHECK-NEXT: [[SUB11:%.*]] = sub <4 x i32> [[TMP23]], [[TMP22]] 896 // CHECK-NEXT: store volatile <4 x i32> [[SUB11]], ptr @ui, align 8 897 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 898 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 899 // CHECK-NEXT: [[SUB12:%.*]] = sub <2 x i64> [[TMP25]], [[TMP24]] 900 // CHECK-NEXT: store volatile <2 x i64> [[SUB12]], ptr @sl, align 8 901 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 902 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 903 // CHECK-NEXT: [[SUB13:%.*]] = sub <2 x i64> [[TMP27]], [[TMP26]] 904 // CHECK-NEXT: store volatile <2 x i64> [[SUB13]], ptr @sl, align 8 905 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 906 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 907 // CHECK-NEXT: [[SUB14:%.*]] = sub <2 x i64> [[TMP29]], [[TMP28]] 908 // CHECK-NEXT: store volatile <2 x i64> [[SUB14]], ptr @ul, align 8 909 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 910 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 911 // CHECK-NEXT: [[SUB15:%.*]] = sub <2 x i64> [[TMP31]], [[TMP30]] 912 // CHECK-NEXT: store volatile <2 x i64> [[SUB15]], ptr @ul, align 8 913 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 914 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 915 // CHECK-NEXT: [[SUB16:%.*]] = sub <1 x i128> [[TMP33]], [[TMP32]] 916 // CHECK-NEXT: store volatile <1 x i128> [[SUB16]], ptr @slll, align 8 917 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 918 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 919 // CHECK-NEXT: [[SUB17:%.*]] = sub <1 x i128> [[TMP35]], [[TMP34]] 920 // CHECK-NEXT: store volatile <1 x i128> [[SUB17]], ptr @slll, align 8 921 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 922 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 923 // CHECK-NEXT: [[SUB18:%.*]] = sub <1 x i128> [[TMP37]], [[TMP36]] 924 // CHECK-NEXT: store volatile <1 x i128> [[SUB18]], ptr @ulll, align 8 925 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 926 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 927 // CHECK-NEXT: [[SUB19:%.*]] = sub <1 x i128> [[TMP39]], [[TMP38]] 928 // CHECK-NEXT: store volatile <1 x i128> [[SUB19]], ptr @ulll, align 8 929 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 930 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <2 x double>, ptr @fd, align 8 931 // CHECK-NEXT: [[SUB20:%.*]] = fsub <2 x double> [[TMP41]], [[TMP40]] 932 // CHECK-NEXT: store volatile <2 x double> [[SUB20]], ptr @fd, align 8 933 // CHECK-NEXT: ret void 934 // 935 void test_sub_assign(void) { 936 937 sc -= sc2; 938 sc -= bc2; 939 uc -= uc2; 940 uc -= bc2; 941 942 ss -= ss2; 943 ss -= bs2; 944 us -= us2; 945 us -= bs2; 946 947 si -= si2; 948 si -= bi2; 949 ui -= ui2; 950 ui -= bi2; 951 952 sl -= sl2; 953 sl -= bl2; 954 ul -= ul2; 955 ul -= bl2; 956 957 slll -= slll2; 958 slll -= blll2; 959 ulll -= ulll2; 960 ulll -= blll2; 961 962 fd -= fd2; 963 } 964 965 // CHECK-LABEL: define dso_local void @test_mul( 966 // CHECK-SAME: ) #[[ATTR0]] { 967 // CHECK-NEXT: [[ENTRY:.*:]] 968 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 969 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 970 // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i8> [[TMP0]], [[TMP1]] 971 // CHECK-NEXT: store volatile <16 x i8> [[MUL]], ptr @sc, align 8 972 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 973 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 974 // CHECK-NEXT: [[MUL1:%.*]] = mul <16 x i8> [[TMP2]], [[TMP3]] 975 // CHECK-NEXT: store volatile <16 x i8> [[MUL1]], ptr @uc, align 8 976 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 977 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 978 // CHECK-NEXT: [[MUL2:%.*]] = mul <8 x i16> [[TMP4]], [[TMP5]] 979 // CHECK-NEXT: store volatile <8 x i16> [[MUL2]], ptr @ss, align 8 980 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @us, align 8 981 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 982 // CHECK-NEXT: [[MUL3:%.*]] = mul <8 x i16> [[TMP6]], [[TMP7]] 983 // CHECK-NEXT: store volatile <8 x i16> [[MUL3]], ptr @us, align 8 984 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <4 x i32>, ptr @si, align 8 985 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 986 // CHECK-NEXT: [[MUL4:%.*]] = mul <4 x i32> [[TMP8]], [[TMP9]] 987 // CHECK-NEXT: store volatile <4 x i32> [[MUL4]], ptr @si, align 8 988 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 989 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 990 // CHECK-NEXT: [[MUL5:%.*]] = mul <4 x i32> [[TMP10]], [[TMP11]] 991 // CHECK-NEXT: store volatile <4 x i32> [[MUL5]], ptr @ui, align 8 992 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 993 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 994 // CHECK-NEXT: [[MUL6:%.*]] = mul <2 x i64> [[TMP12]], [[TMP13]] 995 // CHECK-NEXT: store volatile <2 x i64> [[MUL6]], ptr @sl, align 8 996 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 997 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 998 // CHECK-NEXT: [[MUL7:%.*]] = mul <2 x i64> [[TMP14]], [[TMP15]] 999 // CHECK-NEXT: store volatile <2 x i64> [[MUL7]], ptr @ul, align 8 1000 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1001 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1002 // CHECK-NEXT: [[MUL8:%.*]] = mul <1 x i128> [[TMP16]], [[TMP17]] 1003 // CHECK-NEXT: store volatile <1 x i128> [[MUL8]], ptr @slll, align 8 1004 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1005 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1006 // CHECK-NEXT: [[MUL9:%.*]] = mul <1 x i128> [[TMP18]], [[TMP19]] 1007 // CHECK-NEXT: store volatile <1 x i128> [[MUL9]], ptr @ulll, align 8 1008 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <2 x double>, ptr @fd, align 8 1009 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 1010 // CHECK-NEXT: [[MUL10:%.*]] = fmul <2 x double> [[TMP20]], [[TMP21]] 1011 // CHECK-NEXT: store volatile <2 x double> [[MUL10]], ptr @fd, align 8 1012 // CHECK-NEXT: ret void 1013 // 1014 void test_mul(void) { 1015 1016 sc = sc * sc2; 1017 uc = uc * uc2; 1018 1019 ss = ss * ss2; 1020 us = us * us2; 1021 1022 si = si * si2; 1023 ui = ui * ui2; 1024 1025 sl = sl * sl2; 1026 ul = ul * ul2; 1027 1028 slll = slll * slll2; 1029 ulll = ulll * ulll2; 1030 1031 fd = fd * fd2; 1032 } 1033 1034 // CHECK-LABEL: define dso_local void @test_mul_assign( 1035 // CHECK-SAME: ) #[[ATTR0]] { 1036 // CHECK-NEXT: [[ENTRY:.*:]] 1037 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1038 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1039 // CHECK-NEXT: [[MUL:%.*]] = mul <16 x i8> [[TMP1]], [[TMP0]] 1040 // CHECK-NEXT: store volatile <16 x i8> [[MUL]], ptr @sc, align 8 1041 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1042 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1043 // CHECK-NEXT: [[MUL1:%.*]] = mul <16 x i8> [[TMP3]], [[TMP2]] 1044 // CHECK-NEXT: store volatile <16 x i8> [[MUL1]], ptr @uc, align 8 1045 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1046 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1047 // CHECK-NEXT: [[MUL2:%.*]] = mul <8 x i16> [[TMP5]], [[TMP4]] 1048 // CHECK-NEXT: store volatile <8 x i16> [[MUL2]], ptr @ss, align 8 1049 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1050 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1051 // CHECK-NEXT: [[MUL3:%.*]] = mul <8 x i16> [[TMP7]], [[TMP6]] 1052 // CHECK-NEXT: store volatile <8 x i16> [[MUL3]], ptr @us, align 8 1053 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1054 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1055 // CHECK-NEXT: [[MUL4:%.*]] = mul <4 x i32> [[TMP9]], [[TMP8]] 1056 // CHECK-NEXT: store volatile <4 x i32> [[MUL4]], ptr @si, align 8 1057 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1058 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1059 // CHECK-NEXT: [[MUL5:%.*]] = mul <4 x i32> [[TMP11]], [[TMP10]] 1060 // CHECK-NEXT: store volatile <4 x i32> [[MUL5]], ptr @ui, align 8 1061 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1062 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1063 // CHECK-NEXT: [[MUL6:%.*]] = mul <2 x i64> [[TMP13]], [[TMP12]] 1064 // CHECK-NEXT: store volatile <2 x i64> [[MUL6]], ptr @sl, align 8 1065 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1066 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1067 // CHECK-NEXT: [[MUL7:%.*]] = mul <2 x i64> [[TMP15]], [[TMP14]] 1068 // CHECK-NEXT: store volatile <2 x i64> [[MUL7]], ptr @ul, align 8 1069 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1070 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1071 // CHECK-NEXT: [[MUL8:%.*]] = mul <1 x i128> [[TMP17]], [[TMP16]] 1072 // CHECK-NEXT: store volatile <1 x i128> [[MUL8]], ptr @slll, align 8 1073 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1074 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1075 // CHECK-NEXT: [[MUL9:%.*]] = mul <1 x i128> [[TMP19]], [[TMP18]] 1076 // CHECK-NEXT: store volatile <1 x i128> [[MUL9]], ptr @ulll, align 8 1077 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 1078 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <2 x double>, ptr @fd, align 8 1079 // CHECK-NEXT: [[MUL10:%.*]] = fmul <2 x double> [[TMP21]], [[TMP20]] 1080 // CHECK-NEXT: store volatile <2 x double> [[MUL10]], ptr @fd, align 8 1081 // CHECK-NEXT: ret void 1082 // 1083 void test_mul_assign(void) { 1084 1085 sc *= sc2; 1086 uc *= uc2; 1087 1088 ss *= ss2; 1089 us *= us2; 1090 1091 si *= si2; 1092 ui *= ui2; 1093 1094 sl *= sl2; 1095 ul *= ul2; 1096 1097 slll *= slll2; 1098 ulll *= ulll2; 1099 1100 fd *= fd2; 1101 } 1102 1103 // CHECK-LABEL: define dso_local void @test_div( 1104 // CHECK-SAME: ) #[[ATTR0]] { 1105 // CHECK-NEXT: [[ENTRY:.*:]] 1106 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1107 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1108 // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i8> [[TMP0]], [[TMP1]] 1109 // CHECK-NEXT: store volatile <16 x i8> [[DIV]], ptr @sc, align 8 1110 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1111 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1112 // CHECK-NEXT: [[DIV1:%.*]] = udiv <16 x i8> [[TMP2]], [[TMP3]] 1113 // CHECK-NEXT: store volatile <16 x i8> [[DIV1]], ptr @uc, align 8 1114 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1115 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1116 // CHECK-NEXT: [[DIV2:%.*]] = sdiv <8 x i16> [[TMP4]], [[TMP5]] 1117 // CHECK-NEXT: store volatile <8 x i16> [[DIV2]], ptr @ss, align 8 1118 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1119 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1120 // CHECK-NEXT: [[DIV3:%.*]] = udiv <8 x i16> [[TMP6]], [[TMP7]] 1121 // CHECK-NEXT: store volatile <8 x i16> [[DIV3]], ptr @us, align 8 1122 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1123 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1124 // CHECK-NEXT: [[DIV4:%.*]] = sdiv <4 x i32> [[TMP8]], [[TMP9]] 1125 // CHECK-NEXT: store volatile <4 x i32> [[DIV4]], ptr @si, align 8 1126 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1127 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1128 // CHECK-NEXT: [[DIV5:%.*]] = udiv <4 x i32> [[TMP10]], [[TMP11]] 1129 // CHECK-NEXT: store volatile <4 x i32> [[DIV5]], ptr @ui, align 8 1130 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1131 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1132 // CHECK-NEXT: [[DIV6:%.*]] = sdiv <2 x i64> [[TMP12]], [[TMP13]] 1133 // CHECK-NEXT: store volatile <2 x i64> [[DIV6]], ptr @sl, align 8 1134 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1135 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1136 // CHECK-NEXT: [[DIV7:%.*]] = udiv <2 x i64> [[TMP14]], [[TMP15]] 1137 // CHECK-NEXT: store volatile <2 x i64> [[DIV7]], ptr @ul, align 8 1138 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1139 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1140 // CHECK-NEXT: [[DIV8:%.*]] = sdiv <1 x i128> [[TMP16]], [[TMP17]] 1141 // CHECK-NEXT: store volatile <1 x i128> [[DIV8]], ptr @slll, align 8 1142 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1143 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1144 // CHECK-NEXT: [[DIV9:%.*]] = udiv <1 x i128> [[TMP18]], [[TMP19]] 1145 // CHECK-NEXT: store volatile <1 x i128> [[DIV9]], ptr @ulll, align 8 1146 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <2 x double>, ptr @fd, align 8 1147 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 1148 // CHECK-NEXT: [[DIV10:%.*]] = fdiv <2 x double> [[TMP20]], [[TMP21]] 1149 // CHECK-NEXT: store volatile <2 x double> [[DIV10]], ptr @fd, align 8 1150 // CHECK-NEXT: ret void 1151 // 1152 void test_div(void) { 1153 1154 sc = sc / sc2; 1155 uc = uc / uc2; 1156 1157 ss = ss / ss2; 1158 us = us / us2; 1159 1160 si = si / si2; 1161 ui = ui / ui2; 1162 1163 sl = sl / sl2; 1164 ul = ul / ul2; 1165 1166 slll = slll / slll2; 1167 ulll = ulll / ulll2; 1168 1169 fd = fd / fd2; 1170 } 1171 1172 // CHECK-LABEL: define dso_local void @test_div_assign( 1173 // CHECK-SAME: ) #[[ATTR0]] { 1174 // CHECK-NEXT: [[ENTRY:.*:]] 1175 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1176 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1177 // CHECK-NEXT: [[DIV:%.*]] = sdiv <16 x i8> [[TMP1]], [[TMP0]] 1178 // CHECK-NEXT: store volatile <16 x i8> [[DIV]], ptr @sc, align 8 1179 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1180 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1181 // CHECK-NEXT: [[DIV1:%.*]] = udiv <16 x i8> [[TMP3]], [[TMP2]] 1182 // CHECK-NEXT: store volatile <16 x i8> [[DIV1]], ptr @uc, align 8 1183 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1184 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1185 // CHECK-NEXT: [[DIV2:%.*]] = sdiv <8 x i16> [[TMP5]], [[TMP4]] 1186 // CHECK-NEXT: store volatile <8 x i16> [[DIV2]], ptr @ss, align 8 1187 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1188 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1189 // CHECK-NEXT: [[DIV3:%.*]] = udiv <8 x i16> [[TMP7]], [[TMP6]] 1190 // CHECK-NEXT: store volatile <8 x i16> [[DIV3]], ptr @us, align 8 1191 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1192 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1193 // CHECK-NEXT: [[DIV4:%.*]] = sdiv <4 x i32> [[TMP9]], [[TMP8]] 1194 // CHECK-NEXT: store volatile <4 x i32> [[DIV4]], ptr @si, align 8 1195 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1196 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1197 // CHECK-NEXT: [[DIV5:%.*]] = udiv <4 x i32> [[TMP11]], [[TMP10]] 1198 // CHECK-NEXT: store volatile <4 x i32> [[DIV5]], ptr @ui, align 8 1199 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1200 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1201 // CHECK-NEXT: [[DIV6:%.*]] = sdiv <2 x i64> [[TMP13]], [[TMP12]] 1202 // CHECK-NEXT: store volatile <2 x i64> [[DIV6]], ptr @sl, align 8 1203 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1204 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1205 // CHECK-NEXT: [[DIV7:%.*]] = udiv <2 x i64> [[TMP15]], [[TMP14]] 1206 // CHECK-NEXT: store volatile <2 x i64> [[DIV7]], ptr @ul, align 8 1207 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1208 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1209 // CHECK-NEXT: [[DIV8:%.*]] = sdiv <1 x i128> [[TMP17]], [[TMP16]] 1210 // CHECK-NEXT: store volatile <1 x i128> [[DIV8]], ptr @slll, align 8 1211 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1212 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1213 // CHECK-NEXT: [[DIV9:%.*]] = udiv <1 x i128> [[TMP19]], [[TMP18]] 1214 // CHECK-NEXT: store volatile <1 x i128> [[DIV9]], ptr @ulll, align 8 1215 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 1216 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <2 x double>, ptr @fd, align 8 1217 // CHECK-NEXT: [[DIV10:%.*]] = fdiv <2 x double> [[TMP21]], [[TMP20]] 1218 // CHECK-NEXT: store volatile <2 x double> [[DIV10]], ptr @fd, align 8 1219 // CHECK-NEXT: ret void 1220 // 1221 void test_div_assign(void) { 1222 1223 sc /= sc2; 1224 uc /= uc2; 1225 1226 ss /= ss2; 1227 us /= us2; 1228 1229 si /= si2; 1230 ui /= ui2; 1231 1232 sl /= sl2; 1233 ul /= ul2; 1234 1235 slll /= slll2; 1236 ulll /= ulll2; 1237 1238 fd /= fd2; 1239 } 1240 1241 // CHECK-LABEL: define dso_local void @test_rem( 1242 // CHECK-SAME: ) #[[ATTR0]] { 1243 // CHECK-NEXT: [[ENTRY:.*:]] 1244 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1245 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1246 // CHECK-NEXT: [[REM:%.*]] = srem <16 x i8> [[TMP0]], [[TMP1]] 1247 // CHECK-NEXT: store volatile <16 x i8> [[REM]], ptr @sc, align 8 1248 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1249 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1250 // CHECK-NEXT: [[REM1:%.*]] = urem <16 x i8> [[TMP2]], [[TMP3]] 1251 // CHECK-NEXT: store volatile <16 x i8> [[REM1]], ptr @uc, align 8 1252 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1253 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1254 // CHECK-NEXT: [[REM2:%.*]] = srem <8 x i16> [[TMP4]], [[TMP5]] 1255 // CHECK-NEXT: store volatile <8 x i16> [[REM2]], ptr @ss, align 8 1256 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1257 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1258 // CHECK-NEXT: [[REM3:%.*]] = urem <8 x i16> [[TMP6]], [[TMP7]] 1259 // CHECK-NEXT: store volatile <8 x i16> [[REM3]], ptr @us, align 8 1260 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1261 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1262 // CHECK-NEXT: [[REM4:%.*]] = srem <4 x i32> [[TMP8]], [[TMP9]] 1263 // CHECK-NEXT: store volatile <4 x i32> [[REM4]], ptr @si, align 8 1264 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1265 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1266 // CHECK-NEXT: [[REM5:%.*]] = urem <4 x i32> [[TMP10]], [[TMP11]] 1267 // CHECK-NEXT: store volatile <4 x i32> [[REM5]], ptr @ui, align 8 1268 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1269 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1270 // CHECK-NEXT: [[REM6:%.*]] = srem <2 x i64> [[TMP12]], [[TMP13]] 1271 // CHECK-NEXT: store volatile <2 x i64> [[REM6]], ptr @sl, align 8 1272 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1273 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1274 // CHECK-NEXT: [[REM7:%.*]] = urem <2 x i64> [[TMP14]], [[TMP15]] 1275 // CHECK-NEXT: store volatile <2 x i64> [[REM7]], ptr @ul, align 8 1276 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1277 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1278 // CHECK-NEXT: [[REM8:%.*]] = srem <1 x i128> [[TMP16]], [[TMP17]] 1279 // CHECK-NEXT: store volatile <1 x i128> [[REM8]], ptr @slll, align 8 1280 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1281 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1282 // CHECK-NEXT: [[REM9:%.*]] = urem <1 x i128> [[TMP18]], [[TMP19]] 1283 // CHECK-NEXT: store volatile <1 x i128> [[REM9]], ptr @ulll, align 8 1284 // CHECK-NEXT: ret void 1285 // 1286 void test_rem(void) { 1287 1288 sc = sc % sc2; 1289 uc = uc % uc2; 1290 1291 ss = ss % ss2; 1292 us = us % us2; 1293 1294 si = si % si2; 1295 ui = ui % ui2; 1296 1297 sl = sl % sl2; 1298 ul = ul % ul2; 1299 1300 slll = slll % slll2; 1301 ulll = ulll % ulll2; 1302 } 1303 1304 // CHECK-LABEL: define dso_local void @test_rem_assign( 1305 // CHECK-SAME: ) #[[ATTR0]] { 1306 // CHECK-NEXT: [[ENTRY:.*:]] 1307 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1308 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1309 // CHECK-NEXT: [[REM:%.*]] = srem <16 x i8> [[TMP1]], [[TMP0]] 1310 // CHECK-NEXT: store volatile <16 x i8> [[REM]], ptr @sc, align 8 1311 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1312 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1313 // CHECK-NEXT: [[REM1:%.*]] = urem <16 x i8> [[TMP3]], [[TMP2]] 1314 // CHECK-NEXT: store volatile <16 x i8> [[REM1]], ptr @uc, align 8 1315 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1316 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1317 // CHECK-NEXT: [[REM2:%.*]] = srem <8 x i16> [[TMP5]], [[TMP4]] 1318 // CHECK-NEXT: store volatile <8 x i16> [[REM2]], ptr @ss, align 8 1319 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1320 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1321 // CHECK-NEXT: [[REM3:%.*]] = urem <8 x i16> [[TMP7]], [[TMP6]] 1322 // CHECK-NEXT: store volatile <8 x i16> [[REM3]], ptr @us, align 8 1323 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1324 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1325 // CHECK-NEXT: [[REM4:%.*]] = srem <4 x i32> [[TMP9]], [[TMP8]] 1326 // CHECK-NEXT: store volatile <4 x i32> [[REM4]], ptr @si, align 8 1327 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1328 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1329 // CHECK-NEXT: [[REM5:%.*]] = urem <4 x i32> [[TMP11]], [[TMP10]] 1330 // CHECK-NEXT: store volatile <4 x i32> [[REM5]], ptr @ui, align 8 1331 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1332 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1333 // CHECK-NEXT: [[REM6:%.*]] = srem <2 x i64> [[TMP13]], [[TMP12]] 1334 // CHECK-NEXT: store volatile <2 x i64> [[REM6]], ptr @sl, align 8 1335 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1336 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1337 // CHECK-NEXT: [[REM7:%.*]] = urem <2 x i64> [[TMP15]], [[TMP14]] 1338 // CHECK-NEXT: store volatile <2 x i64> [[REM7]], ptr @ul, align 8 1339 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1340 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1341 // CHECK-NEXT: [[REM8:%.*]] = srem <1 x i128> [[TMP17]], [[TMP16]] 1342 // CHECK-NEXT: store volatile <1 x i128> [[REM8]], ptr @slll, align 8 1343 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1344 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1345 // CHECK-NEXT: [[REM9:%.*]] = urem <1 x i128> [[TMP19]], [[TMP18]] 1346 // CHECK-NEXT: store volatile <1 x i128> [[REM9]], ptr @ulll, align 8 1347 // CHECK-NEXT: ret void 1348 // 1349 void test_rem_assign(void) { 1350 1351 sc %= sc2; 1352 uc %= uc2; 1353 1354 ss %= ss2; 1355 us %= us2; 1356 1357 si %= si2; 1358 ui %= ui2; 1359 1360 sl %= sl2; 1361 ul %= ul2; 1362 1363 slll %= slll2; 1364 ulll %= ulll2; 1365 } 1366 1367 // CHECK-LABEL: define dso_local void @test_not( 1368 // CHECK-SAME: ) #[[ATTR0]] { 1369 // CHECK-NEXT: [[ENTRY:.*:]] 1370 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1371 // CHECK-NEXT: [[NOT:%.*]] = xor <16 x i8> [[TMP0]], splat (i8 -1) 1372 // CHECK-NEXT: store volatile <16 x i8> [[NOT]], ptr @sc, align 8 1373 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1374 // CHECK-NEXT: [[NOT1:%.*]] = xor <16 x i8> [[TMP1]], splat (i8 -1) 1375 // CHECK-NEXT: store volatile <16 x i8> [[NOT1]], ptr @uc, align 8 1376 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1377 // CHECK-NEXT: [[NOT2:%.*]] = xor <16 x i8> [[TMP2]], splat (i8 -1) 1378 // CHECK-NEXT: store volatile <16 x i8> [[NOT2]], ptr @bc, align 8 1379 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1380 // CHECK-NEXT: [[NOT3:%.*]] = xor <8 x i16> [[TMP3]], splat (i16 -1) 1381 // CHECK-NEXT: store volatile <8 x i16> [[NOT3]], ptr @ss, align 8 1382 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1383 // CHECK-NEXT: [[NOT4:%.*]] = xor <8 x i16> [[TMP4]], splat (i16 -1) 1384 // CHECK-NEXT: store volatile <8 x i16> [[NOT4]], ptr @us, align 8 1385 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1386 // CHECK-NEXT: [[NOT5:%.*]] = xor <8 x i16> [[TMP5]], splat (i16 -1) 1387 // CHECK-NEXT: store volatile <8 x i16> [[NOT5]], ptr @bs, align 8 1388 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1389 // CHECK-NEXT: [[NOT6:%.*]] = xor <4 x i32> [[TMP6]], splat (i32 -1) 1390 // CHECK-NEXT: store volatile <4 x i32> [[NOT6]], ptr @si, align 8 1391 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1392 // CHECK-NEXT: [[NOT7:%.*]] = xor <4 x i32> [[TMP7]], splat (i32 -1) 1393 // CHECK-NEXT: store volatile <4 x i32> [[NOT7]], ptr @ui, align 8 1394 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1395 // CHECK-NEXT: [[NOT8:%.*]] = xor <4 x i32> [[TMP8]], splat (i32 -1) 1396 // CHECK-NEXT: store volatile <4 x i32> [[NOT8]], ptr @bi, align 8 1397 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1398 // CHECK-NEXT: [[NOT9:%.*]] = xor <2 x i64> [[TMP9]], splat (i64 -1) 1399 // CHECK-NEXT: store volatile <2 x i64> [[NOT9]], ptr @sl, align 8 1400 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1401 // CHECK-NEXT: [[NOT10:%.*]] = xor <2 x i64> [[TMP10]], splat (i64 -1) 1402 // CHECK-NEXT: store volatile <2 x i64> [[NOT10]], ptr @ul, align 8 1403 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1404 // CHECK-NEXT: [[NOT11:%.*]] = xor <2 x i64> [[TMP11]], splat (i64 -1) 1405 // CHECK-NEXT: store volatile <2 x i64> [[NOT11]], ptr @bl, align 8 1406 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1407 // CHECK-NEXT: [[NOT12:%.*]] = xor <1 x i128> [[TMP12]], splat (i128 -1) 1408 // CHECK-NEXT: store volatile <1 x i128> [[NOT12]], ptr @slll, align 8 1409 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1410 // CHECK-NEXT: [[NOT13:%.*]] = xor <1 x i128> [[TMP13]], splat (i128 -1) 1411 // CHECK-NEXT: store volatile <1 x i128> [[NOT13]], ptr @ulll, align 8 1412 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1413 // CHECK-NEXT: [[NOT14:%.*]] = xor <1 x i128> [[TMP14]], splat (i128 -1) 1414 // CHECK-NEXT: store volatile <1 x i128> [[NOT14]], ptr @blll, align 8 1415 // CHECK-NEXT: ret void 1416 // 1417 void test_not(void) { 1418 1419 sc = ~sc2; 1420 uc = ~uc2; 1421 bc = ~bc2; 1422 1423 ss = ~ss2; 1424 us = ~us2; 1425 bs = ~bs2; 1426 1427 si = ~si2; 1428 ui = ~ui2; 1429 bi = ~bi2; 1430 1431 sl = ~sl2; 1432 ul = ~ul2; 1433 bl = ~bl2; 1434 1435 slll = ~slll2; 1436 ulll = ~ulll2; 1437 blll = ~blll2; 1438 } 1439 1440 // CHECK-LABEL: define dso_local void @test_and( 1441 // CHECK-SAME: ) #[[ATTR0]] { 1442 // CHECK-NEXT: [[ENTRY:.*:]] 1443 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1444 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1445 // CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[TMP0]], [[TMP1]] 1446 // CHECK-NEXT: store volatile <16 x i8> [[AND]], ptr @sc, align 8 1447 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1448 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1449 // CHECK-NEXT: [[AND1:%.*]] = and <16 x i8> [[TMP2]], [[TMP3]] 1450 // CHECK-NEXT: store volatile <16 x i8> [[AND1]], ptr @sc, align 8 1451 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 1452 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1453 // CHECK-NEXT: [[AND2:%.*]] = and <16 x i8> [[TMP4]], [[TMP5]] 1454 // CHECK-NEXT: store volatile <16 x i8> [[AND2]], ptr @sc, align 8 1455 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1456 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1457 // CHECK-NEXT: [[AND3:%.*]] = and <16 x i8> [[TMP6]], [[TMP7]] 1458 // CHECK-NEXT: store volatile <16 x i8> [[AND3]], ptr @uc, align 8 1459 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1460 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1461 // CHECK-NEXT: [[AND4:%.*]] = and <16 x i8> [[TMP8]], [[TMP9]] 1462 // CHECK-NEXT: store volatile <16 x i8> [[AND4]], ptr @uc, align 8 1463 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 1464 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1465 // CHECK-NEXT: [[AND5:%.*]] = and <16 x i8> [[TMP10]], [[TMP11]] 1466 // CHECK-NEXT: store volatile <16 x i8> [[AND5]], ptr @uc, align 8 1467 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 1468 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1469 // CHECK-NEXT: [[AND6:%.*]] = and <16 x i8> [[TMP12]], [[TMP13]] 1470 // CHECK-NEXT: store volatile <16 x i8> [[AND6]], ptr @bc, align 8 1471 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1472 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1473 // CHECK-NEXT: [[AND7:%.*]] = and <8 x i16> [[TMP14]], [[TMP15]] 1474 // CHECK-NEXT: store volatile <8 x i16> [[AND7]], ptr @ss, align 8 1475 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1476 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1477 // CHECK-NEXT: [[AND8:%.*]] = and <8 x i16> [[TMP16]], [[TMP17]] 1478 // CHECK-NEXT: store volatile <8 x i16> [[AND8]], ptr @ss, align 8 1479 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 1480 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1481 // CHECK-NEXT: [[AND9:%.*]] = and <8 x i16> [[TMP18]], [[TMP19]] 1482 // CHECK-NEXT: store volatile <8 x i16> [[AND9]], ptr @ss, align 8 1483 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1484 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1485 // CHECK-NEXT: [[AND10:%.*]] = and <8 x i16> [[TMP20]], [[TMP21]] 1486 // CHECK-NEXT: store volatile <8 x i16> [[AND10]], ptr @us, align 8 1487 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1488 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1489 // CHECK-NEXT: [[AND11:%.*]] = and <8 x i16> [[TMP22]], [[TMP23]] 1490 // CHECK-NEXT: store volatile <8 x i16> [[AND11]], ptr @us, align 8 1491 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 1492 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1493 // CHECK-NEXT: [[AND12:%.*]] = and <8 x i16> [[TMP24]], [[TMP25]] 1494 // CHECK-NEXT: store volatile <8 x i16> [[AND12]], ptr @us, align 8 1495 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 1496 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1497 // CHECK-NEXT: [[AND13:%.*]] = and <8 x i16> [[TMP26]], [[TMP27]] 1498 // CHECK-NEXT: store volatile <8 x i16> [[AND13]], ptr @bs, align 8 1499 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1500 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1501 // CHECK-NEXT: [[AND14:%.*]] = and <4 x i32> [[TMP28]], [[TMP29]] 1502 // CHECK-NEXT: store volatile <4 x i32> [[AND14]], ptr @si, align 8 1503 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1504 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1505 // CHECK-NEXT: [[AND15:%.*]] = and <4 x i32> [[TMP30]], [[TMP31]] 1506 // CHECK-NEXT: store volatile <4 x i32> [[AND15]], ptr @si, align 8 1507 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 1508 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1509 // CHECK-NEXT: [[AND16:%.*]] = and <4 x i32> [[TMP32]], [[TMP33]] 1510 // CHECK-NEXT: store volatile <4 x i32> [[AND16]], ptr @si, align 8 1511 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1512 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1513 // CHECK-NEXT: [[AND17:%.*]] = and <4 x i32> [[TMP34]], [[TMP35]] 1514 // CHECK-NEXT: store volatile <4 x i32> [[AND17]], ptr @ui, align 8 1515 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1516 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1517 // CHECK-NEXT: [[AND18:%.*]] = and <4 x i32> [[TMP36]], [[TMP37]] 1518 // CHECK-NEXT: store volatile <4 x i32> [[AND18]], ptr @ui, align 8 1519 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 1520 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1521 // CHECK-NEXT: [[AND19:%.*]] = and <4 x i32> [[TMP38]], [[TMP39]] 1522 // CHECK-NEXT: store volatile <4 x i32> [[AND19]], ptr @ui, align 8 1523 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 1524 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1525 // CHECK-NEXT: [[AND20:%.*]] = and <4 x i32> [[TMP40]], [[TMP41]] 1526 // CHECK-NEXT: store volatile <4 x i32> [[AND20]], ptr @bi, align 8 1527 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1528 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1529 // CHECK-NEXT: [[AND21:%.*]] = and <2 x i64> [[TMP42]], [[TMP43]] 1530 // CHECK-NEXT: store volatile <2 x i64> [[AND21]], ptr @sl, align 8 1531 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1532 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1533 // CHECK-NEXT: [[AND22:%.*]] = and <2 x i64> [[TMP44]], [[TMP45]] 1534 // CHECK-NEXT: store volatile <2 x i64> [[AND22]], ptr @sl, align 8 1535 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 1536 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1537 // CHECK-NEXT: [[AND23:%.*]] = and <2 x i64> [[TMP46]], [[TMP47]] 1538 // CHECK-NEXT: store volatile <2 x i64> [[AND23]], ptr @sl, align 8 1539 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1540 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1541 // CHECK-NEXT: [[AND24:%.*]] = and <2 x i64> [[TMP48]], [[TMP49]] 1542 // CHECK-NEXT: store volatile <2 x i64> [[AND24]], ptr @ul, align 8 1543 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1544 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1545 // CHECK-NEXT: [[AND25:%.*]] = and <2 x i64> [[TMP50]], [[TMP51]] 1546 // CHECK-NEXT: store volatile <2 x i64> [[AND25]], ptr @ul, align 8 1547 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 1548 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1549 // CHECK-NEXT: [[AND26:%.*]] = and <2 x i64> [[TMP52]], [[TMP53]] 1550 // CHECK-NEXT: store volatile <2 x i64> [[AND26]], ptr @ul, align 8 1551 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 1552 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1553 // CHECK-NEXT: [[AND27:%.*]] = and <2 x i64> [[TMP54]], [[TMP55]] 1554 // CHECK-NEXT: store volatile <2 x i64> [[AND27]], ptr @bl, align 8 1555 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1556 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1557 // CHECK-NEXT: [[AND28:%.*]] = and <1 x i128> [[TMP56]], [[TMP57]] 1558 // CHECK-NEXT: store volatile <1 x i128> [[AND28]], ptr @slll, align 8 1559 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1560 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1561 // CHECK-NEXT: [[AND29:%.*]] = and <1 x i128> [[TMP58]], [[TMP59]] 1562 // CHECK-NEXT: store volatile <1 x i128> [[AND29]], ptr @slll, align 8 1563 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 1564 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1565 // CHECK-NEXT: [[AND30:%.*]] = and <1 x i128> [[TMP60]], [[TMP61]] 1566 // CHECK-NEXT: store volatile <1 x i128> [[AND30]], ptr @slll, align 8 1567 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1568 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1569 // CHECK-NEXT: [[AND31:%.*]] = and <1 x i128> [[TMP62]], [[TMP63]] 1570 // CHECK-NEXT: store volatile <1 x i128> [[AND31]], ptr @ulll, align 8 1571 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1572 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1573 // CHECK-NEXT: [[AND32:%.*]] = and <1 x i128> [[TMP64]], [[TMP65]] 1574 // CHECK-NEXT: store volatile <1 x i128> [[AND32]], ptr @ulll, align 8 1575 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 1576 // CHECK-NEXT: [[TMP67:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1577 // CHECK-NEXT: [[AND33:%.*]] = and <1 x i128> [[TMP66]], [[TMP67]] 1578 // CHECK-NEXT: store volatile <1 x i128> [[AND33]], ptr @ulll, align 8 1579 // CHECK-NEXT: [[TMP68:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 1580 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1581 // CHECK-NEXT: [[AND34:%.*]] = and <1 x i128> [[TMP68]], [[TMP69]] 1582 // CHECK-NEXT: store volatile <1 x i128> [[AND34]], ptr @blll, align 8 1583 // CHECK-NEXT: ret void 1584 // 1585 void test_and(void) { 1586 1587 sc = sc & sc2; 1588 sc = sc & bc2; 1589 sc = bc & sc2; 1590 uc = uc & uc2; 1591 uc = uc & bc2; 1592 uc = bc & uc2; 1593 bc = bc & bc2; 1594 1595 ss = ss & ss2; 1596 ss = ss & bs2; 1597 ss = bs & ss2; 1598 us = us & us2; 1599 us = us & bs2; 1600 us = bs & us2; 1601 bs = bs & bs2; 1602 1603 si = si & si2; 1604 si = si & bi2; 1605 si = bi & si2; 1606 ui = ui & ui2; 1607 ui = ui & bi2; 1608 ui = bi & ui2; 1609 bi = bi & bi2; 1610 1611 sl = sl & sl2; 1612 sl = sl & bl2; 1613 sl = bl & sl2; 1614 ul = ul & ul2; 1615 ul = ul & bl2; 1616 ul = bl & ul2; 1617 bl = bl & bl2; 1618 1619 slll = slll & slll2; 1620 slll = slll & blll2; 1621 slll = blll & slll2; 1622 ulll = ulll & ulll2; 1623 ulll = ulll & blll2; 1624 ulll = blll & ulll2; 1625 blll = blll & blll2; 1626 } 1627 1628 // CHECK-LABEL: define dso_local void @test_and_assign( 1629 // CHECK-SAME: ) #[[ATTR0]] { 1630 // CHECK-NEXT: [[ENTRY:.*:]] 1631 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1632 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1633 // CHECK-NEXT: [[AND:%.*]] = and <16 x i8> [[TMP1]], [[TMP0]] 1634 // CHECK-NEXT: store volatile <16 x i8> [[AND]], ptr @sc, align 8 1635 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1636 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1637 // CHECK-NEXT: [[AND1:%.*]] = and <16 x i8> [[TMP3]], [[TMP2]] 1638 // CHECK-NEXT: store volatile <16 x i8> [[AND1]], ptr @sc, align 8 1639 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1640 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1641 // CHECK-NEXT: [[AND2:%.*]] = and <16 x i8> [[TMP5]], [[TMP4]] 1642 // CHECK-NEXT: store volatile <16 x i8> [[AND2]], ptr @uc, align 8 1643 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1644 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1645 // CHECK-NEXT: [[AND3:%.*]] = and <16 x i8> [[TMP7]], [[TMP6]] 1646 // CHECK-NEXT: store volatile <16 x i8> [[AND3]], ptr @uc, align 8 1647 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1648 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 1649 // CHECK-NEXT: [[AND4:%.*]] = and <16 x i8> [[TMP9]], [[TMP8]] 1650 // CHECK-NEXT: store volatile <16 x i8> [[AND4]], ptr @bc, align 8 1651 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1652 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1653 // CHECK-NEXT: [[AND5:%.*]] = and <8 x i16> [[TMP11]], [[TMP10]] 1654 // CHECK-NEXT: store volatile <8 x i16> [[AND5]], ptr @ss, align 8 1655 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1656 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1657 // CHECK-NEXT: [[AND6:%.*]] = and <8 x i16> [[TMP13]], [[TMP12]] 1658 // CHECK-NEXT: store volatile <8 x i16> [[AND6]], ptr @ss, align 8 1659 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1660 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1661 // CHECK-NEXT: [[AND7:%.*]] = and <8 x i16> [[TMP15]], [[TMP14]] 1662 // CHECK-NEXT: store volatile <8 x i16> [[AND7]], ptr @us, align 8 1663 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1664 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1665 // CHECK-NEXT: [[AND8:%.*]] = and <8 x i16> [[TMP17]], [[TMP16]] 1666 // CHECK-NEXT: store volatile <8 x i16> [[AND8]], ptr @us, align 8 1667 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1668 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 1669 // CHECK-NEXT: [[AND9:%.*]] = and <8 x i16> [[TMP19]], [[TMP18]] 1670 // CHECK-NEXT: store volatile <8 x i16> [[AND9]], ptr @bs, align 8 1671 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1672 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1673 // CHECK-NEXT: [[AND10:%.*]] = and <4 x i32> [[TMP21]], [[TMP20]] 1674 // CHECK-NEXT: store volatile <4 x i32> [[AND10]], ptr @si, align 8 1675 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1676 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1677 // CHECK-NEXT: [[AND11:%.*]] = and <4 x i32> [[TMP23]], [[TMP22]] 1678 // CHECK-NEXT: store volatile <4 x i32> [[AND11]], ptr @si, align 8 1679 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1680 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1681 // CHECK-NEXT: [[AND12:%.*]] = and <4 x i32> [[TMP25]], [[TMP24]] 1682 // CHECK-NEXT: store volatile <4 x i32> [[AND12]], ptr @ui, align 8 1683 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1684 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1685 // CHECK-NEXT: [[AND13:%.*]] = and <4 x i32> [[TMP27]], [[TMP26]] 1686 // CHECK-NEXT: store volatile <4 x i32> [[AND13]], ptr @ui, align 8 1687 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1688 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 1689 // CHECK-NEXT: [[AND14:%.*]] = and <4 x i32> [[TMP29]], [[TMP28]] 1690 // CHECK-NEXT: store volatile <4 x i32> [[AND14]], ptr @bi, align 8 1691 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1692 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1693 // CHECK-NEXT: [[AND15:%.*]] = and <2 x i64> [[TMP31]], [[TMP30]] 1694 // CHECK-NEXT: store volatile <2 x i64> [[AND15]], ptr @sl, align 8 1695 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1696 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1697 // CHECK-NEXT: [[AND16:%.*]] = and <2 x i64> [[TMP33]], [[TMP32]] 1698 // CHECK-NEXT: store volatile <2 x i64> [[AND16]], ptr @sl, align 8 1699 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1700 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1701 // CHECK-NEXT: [[AND17:%.*]] = and <2 x i64> [[TMP35]], [[TMP34]] 1702 // CHECK-NEXT: store volatile <2 x i64> [[AND17]], ptr @ul, align 8 1703 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1704 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1705 // CHECK-NEXT: [[AND18:%.*]] = and <2 x i64> [[TMP37]], [[TMP36]] 1706 // CHECK-NEXT: store volatile <2 x i64> [[AND18]], ptr @ul, align 8 1707 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1708 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 1709 // CHECK-NEXT: [[AND19:%.*]] = and <2 x i64> [[TMP39]], [[TMP38]] 1710 // CHECK-NEXT: store volatile <2 x i64> [[AND19]], ptr @bl, align 8 1711 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1712 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1713 // CHECK-NEXT: [[AND20:%.*]] = and <1 x i128> [[TMP41]], [[TMP40]] 1714 // CHECK-NEXT: store volatile <1 x i128> [[AND20]], ptr @slll, align 8 1715 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1716 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1717 // CHECK-NEXT: [[AND21:%.*]] = and <1 x i128> [[TMP43]], [[TMP42]] 1718 // CHECK-NEXT: store volatile <1 x i128> [[AND21]], ptr @slll, align 8 1719 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1720 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1721 // CHECK-NEXT: [[AND22:%.*]] = and <1 x i128> [[TMP45]], [[TMP44]] 1722 // CHECK-NEXT: store volatile <1 x i128> [[AND22]], ptr @ulll, align 8 1723 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1724 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1725 // CHECK-NEXT: [[AND23:%.*]] = and <1 x i128> [[TMP47]], [[TMP46]] 1726 // CHECK-NEXT: store volatile <1 x i128> [[AND23]], ptr @ulll, align 8 1727 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1728 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 1729 // CHECK-NEXT: [[AND24:%.*]] = and <1 x i128> [[TMP49]], [[TMP48]] 1730 // CHECK-NEXT: store volatile <1 x i128> [[AND24]], ptr @blll, align 8 1731 // CHECK-NEXT: ret void 1732 // 1733 void test_and_assign(void) { 1734 1735 sc &= sc2; 1736 sc &= bc2; 1737 uc &= uc2; 1738 uc &= bc2; 1739 bc &= bc2; 1740 1741 ss &= ss2; 1742 ss &= bs2; 1743 us &= us2; 1744 us &= bs2; 1745 bs &= bs2; 1746 1747 si &= si2; 1748 si &= bi2; 1749 ui &= ui2; 1750 ui &= bi2; 1751 bi &= bi2; 1752 1753 sl &= sl2; 1754 sl &= bl2; 1755 ul &= ul2; 1756 ul &= bl2; 1757 bl &= bl2; 1758 1759 slll &= slll2; 1760 slll &= blll2; 1761 ulll &= ulll2; 1762 ulll &= blll2; 1763 blll &= blll2; 1764 } 1765 1766 // CHECK-LABEL: define dso_local void @test_or( 1767 // CHECK-SAME: ) #[[ATTR0]] { 1768 // CHECK-NEXT: [[ENTRY:.*:]] 1769 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1770 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1771 // CHECK-NEXT: [[OR:%.*]] = or <16 x i8> [[TMP0]], [[TMP1]] 1772 // CHECK-NEXT: store volatile <16 x i8> [[OR]], ptr @sc, align 8 1773 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1774 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1775 // CHECK-NEXT: [[OR1:%.*]] = or <16 x i8> [[TMP2]], [[TMP3]] 1776 // CHECK-NEXT: store volatile <16 x i8> [[OR1]], ptr @sc, align 8 1777 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 1778 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1779 // CHECK-NEXT: [[OR2:%.*]] = or <16 x i8> [[TMP4]], [[TMP5]] 1780 // CHECK-NEXT: store volatile <16 x i8> [[OR2]], ptr @sc, align 8 1781 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1782 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1783 // CHECK-NEXT: [[OR3:%.*]] = or <16 x i8> [[TMP6]], [[TMP7]] 1784 // CHECK-NEXT: store volatile <16 x i8> [[OR3]], ptr @uc, align 8 1785 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1786 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1787 // CHECK-NEXT: [[OR4:%.*]] = or <16 x i8> [[TMP8]], [[TMP9]] 1788 // CHECK-NEXT: store volatile <16 x i8> [[OR4]], ptr @uc, align 8 1789 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 1790 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1791 // CHECK-NEXT: [[OR5:%.*]] = or <16 x i8> [[TMP10]], [[TMP11]] 1792 // CHECK-NEXT: store volatile <16 x i8> [[OR5]], ptr @uc, align 8 1793 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 1794 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1795 // CHECK-NEXT: [[OR6:%.*]] = or <16 x i8> [[TMP12]], [[TMP13]] 1796 // CHECK-NEXT: store volatile <16 x i8> [[OR6]], ptr @bc, align 8 1797 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1798 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1799 // CHECK-NEXT: [[OR7:%.*]] = or <8 x i16> [[TMP14]], [[TMP15]] 1800 // CHECK-NEXT: store volatile <8 x i16> [[OR7]], ptr @ss, align 8 1801 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1802 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1803 // CHECK-NEXT: [[OR8:%.*]] = or <8 x i16> [[TMP16]], [[TMP17]] 1804 // CHECK-NEXT: store volatile <8 x i16> [[OR8]], ptr @ss, align 8 1805 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 1806 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1807 // CHECK-NEXT: [[OR9:%.*]] = or <8 x i16> [[TMP18]], [[TMP19]] 1808 // CHECK-NEXT: store volatile <8 x i16> [[OR9]], ptr @ss, align 8 1809 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1810 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1811 // CHECK-NEXT: [[OR10:%.*]] = or <8 x i16> [[TMP20]], [[TMP21]] 1812 // CHECK-NEXT: store volatile <8 x i16> [[OR10]], ptr @us, align 8 1813 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1814 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1815 // CHECK-NEXT: [[OR11:%.*]] = or <8 x i16> [[TMP22]], [[TMP23]] 1816 // CHECK-NEXT: store volatile <8 x i16> [[OR11]], ptr @us, align 8 1817 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 1818 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1819 // CHECK-NEXT: [[OR12:%.*]] = or <8 x i16> [[TMP24]], [[TMP25]] 1820 // CHECK-NEXT: store volatile <8 x i16> [[OR12]], ptr @us, align 8 1821 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 1822 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1823 // CHECK-NEXT: [[OR13:%.*]] = or <8 x i16> [[TMP26]], [[TMP27]] 1824 // CHECK-NEXT: store volatile <8 x i16> [[OR13]], ptr @bs, align 8 1825 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1826 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1827 // CHECK-NEXT: [[OR14:%.*]] = or <4 x i32> [[TMP28]], [[TMP29]] 1828 // CHECK-NEXT: store volatile <4 x i32> [[OR14]], ptr @si, align 8 1829 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1830 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1831 // CHECK-NEXT: [[OR15:%.*]] = or <4 x i32> [[TMP30]], [[TMP31]] 1832 // CHECK-NEXT: store volatile <4 x i32> [[OR15]], ptr @si, align 8 1833 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 1834 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1835 // CHECK-NEXT: [[OR16:%.*]] = or <4 x i32> [[TMP32]], [[TMP33]] 1836 // CHECK-NEXT: store volatile <4 x i32> [[OR16]], ptr @si, align 8 1837 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1838 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1839 // CHECK-NEXT: [[OR17:%.*]] = or <4 x i32> [[TMP34]], [[TMP35]] 1840 // CHECK-NEXT: store volatile <4 x i32> [[OR17]], ptr @ui, align 8 1841 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 1842 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1843 // CHECK-NEXT: [[OR18:%.*]] = or <4 x i32> [[TMP36]], [[TMP37]] 1844 // CHECK-NEXT: store volatile <4 x i32> [[OR18]], ptr @ui, align 8 1845 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 1846 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 1847 // CHECK-NEXT: [[OR19:%.*]] = or <4 x i32> [[TMP38]], [[TMP39]] 1848 // CHECK-NEXT: store volatile <4 x i32> [[OR19]], ptr @ui, align 8 1849 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 1850 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 1851 // CHECK-NEXT: [[OR20:%.*]] = or <4 x i32> [[TMP40]], [[TMP41]] 1852 // CHECK-NEXT: store volatile <4 x i32> [[OR20]], ptr @bi, align 8 1853 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1854 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1855 // CHECK-NEXT: [[OR21:%.*]] = or <2 x i64> [[TMP42]], [[TMP43]] 1856 // CHECK-NEXT: store volatile <2 x i64> [[OR21]], ptr @sl, align 8 1857 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 1858 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1859 // CHECK-NEXT: [[OR22:%.*]] = or <2 x i64> [[TMP44]], [[TMP45]] 1860 // CHECK-NEXT: store volatile <2 x i64> [[OR22]], ptr @sl, align 8 1861 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 1862 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 1863 // CHECK-NEXT: [[OR23:%.*]] = or <2 x i64> [[TMP46]], [[TMP47]] 1864 // CHECK-NEXT: store volatile <2 x i64> [[OR23]], ptr @sl, align 8 1865 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1866 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1867 // CHECK-NEXT: [[OR24:%.*]] = or <2 x i64> [[TMP48]], [[TMP49]] 1868 // CHECK-NEXT: store volatile <2 x i64> [[OR24]], ptr @ul, align 8 1869 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 1870 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1871 // CHECK-NEXT: [[OR25:%.*]] = or <2 x i64> [[TMP50]], [[TMP51]] 1872 // CHECK-NEXT: store volatile <2 x i64> [[OR25]], ptr @ul, align 8 1873 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 1874 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 1875 // CHECK-NEXT: [[OR26:%.*]] = or <2 x i64> [[TMP52]], [[TMP53]] 1876 // CHECK-NEXT: store volatile <2 x i64> [[OR26]], ptr @ul, align 8 1877 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 1878 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 1879 // CHECK-NEXT: [[OR27:%.*]] = or <2 x i64> [[TMP54]], [[TMP55]] 1880 // CHECK-NEXT: store volatile <2 x i64> [[OR27]], ptr @bl, align 8 1881 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1882 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1883 // CHECK-NEXT: [[OR28:%.*]] = or <1 x i128> [[TMP56]], [[TMP57]] 1884 // CHECK-NEXT: store volatile <1 x i128> [[OR28]], ptr @slll, align 8 1885 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 1886 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1887 // CHECK-NEXT: [[OR29:%.*]] = or <1 x i128> [[TMP58]], [[TMP59]] 1888 // CHECK-NEXT: store volatile <1 x i128> [[OR29]], ptr @slll, align 8 1889 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 1890 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 1891 // CHECK-NEXT: [[OR30:%.*]] = or <1 x i128> [[TMP60]], [[TMP61]] 1892 // CHECK-NEXT: store volatile <1 x i128> [[OR30]], ptr @slll, align 8 1893 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1894 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1895 // CHECK-NEXT: [[OR31:%.*]] = or <1 x i128> [[TMP62]], [[TMP63]] 1896 // CHECK-NEXT: store volatile <1 x i128> [[OR31]], ptr @ulll, align 8 1897 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 1898 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1899 // CHECK-NEXT: [[OR32:%.*]] = or <1 x i128> [[TMP64]], [[TMP65]] 1900 // CHECK-NEXT: store volatile <1 x i128> [[OR32]], ptr @ulll, align 8 1901 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 1902 // CHECK-NEXT: [[TMP67:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 1903 // CHECK-NEXT: [[OR33:%.*]] = or <1 x i128> [[TMP66]], [[TMP67]] 1904 // CHECK-NEXT: store volatile <1 x i128> [[OR33]], ptr @ulll, align 8 1905 // CHECK-NEXT: [[TMP68:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 1906 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 1907 // CHECK-NEXT: [[OR34:%.*]] = or <1 x i128> [[TMP68]], [[TMP69]] 1908 // CHECK-NEXT: store volatile <1 x i128> [[OR34]], ptr @blll, align 8 1909 // CHECK-NEXT: ret void 1910 // 1911 void test_or(void) { 1912 1913 sc = sc | sc2; 1914 sc = sc | bc2; 1915 sc = bc | sc2; 1916 uc = uc | uc2; 1917 uc = uc | bc2; 1918 uc = bc | uc2; 1919 bc = bc | bc2; 1920 1921 ss = ss | ss2; 1922 ss = ss | bs2; 1923 ss = bs | ss2; 1924 us = us | us2; 1925 us = us | bs2; 1926 us = bs | us2; 1927 bs = bs | bs2; 1928 1929 si = si | si2; 1930 si = si | bi2; 1931 si = bi | si2; 1932 ui = ui | ui2; 1933 ui = ui | bi2; 1934 ui = bi | ui2; 1935 bi = bi | bi2; 1936 1937 sl = sl | sl2; 1938 sl = sl | bl2; 1939 sl = bl | sl2; 1940 ul = ul | ul2; 1941 ul = ul | bl2; 1942 ul = bl | ul2; 1943 bl = bl | bl2; 1944 1945 slll = slll | slll2; 1946 slll = slll | blll2; 1947 slll = blll | slll2; 1948 ulll = ulll | ulll2; 1949 ulll = ulll | blll2; 1950 ulll = blll | ulll2; 1951 blll = blll | blll2; 1952 } 1953 1954 // CHECK-LABEL: define dso_local void @test_or_assign( 1955 // CHECK-SAME: ) #[[ATTR0]] { 1956 // CHECK-NEXT: [[ENTRY:.*:]] 1957 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 1958 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1959 // CHECK-NEXT: [[OR:%.*]] = or <16 x i8> [[TMP1]], [[TMP0]] 1960 // CHECK-NEXT: store volatile <16 x i8> [[OR]], ptr @sc, align 8 1961 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1962 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 1963 // CHECK-NEXT: [[OR1:%.*]] = or <16 x i8> [[TMP3]], [[TMP2]] 1964 // CHECK-NEXT: store volatile <16 x i8> [[OR1]], ptr @sc, align 8 1965 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 1966 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1967 // CHECK-NEXT: [[OR2:%.*]] = or <16 x i8> [[TMP5]], [[TMP4]] 1968 // CHECK-NEXT: store volatile <16 x i8> [[OR2]], ptr @uc, align 8 1969 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1970 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 1971 // CHECK-NEXT: [[OR3:%.*]] = or <16 x i8> [[TMP7]], [[TMP6]] 1972 // CHECK-NEXT: store volatile <16 x i8> [[OR3]], ptr @uc, align 8 1973 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 1974 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 1975 // CHECK-NEXT: [[OR4:%.*]] = or <16 x i8> [[TMP9]], [[TMP8]] 1976 // CHECK-NEXT: store volatile <16 x i8> [[OR4]], ptr @bc, align 8 1977 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 1978 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1979 // CHECK-NEXT: [[OR5:%.*]] = or <8 x i16> [[TMP11]], [[TMP10]] 1980 // CHECK-NEXT: store volatile <8 x i16> [[OR5]], ptr @ss, align 8 1981 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1982 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 1983 // CHECK-NEXT: [[OR6:%.*]] = or <8 x i16> [[TMP13]], [[TMP12]] 1984 // CHECK-NEXT: store volatile <8 x i16> [[OR6]], ptr @ss, align 8 1985 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 1986 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1987 // CHECK-NEXT: [[OR7:%.*]] = or <8 x i16> [[TMP15]], [[TMP14]] 1988 // CHECK-NEXT: store volatile <8 x i16> [[OR7]], ptr @us, align 8 1989 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1990 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @us, align 8 1991 // CHECK-NEXT: [[OR8:%.*]] = or <8 x i16> [[TMP17]], [[TMP16]] 1992 // CHECK-NEXT: store volatile <8 x i16> [[OR8]], ptr @us, align 8 1993 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 1994 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 1995 // CHECK-NEXT: [[OR9:%.*]] = or <8 x i16> [[TMP19]], [[TMP18]] 1996 // CHECK-NEXT: store volatile <8 x i16> [[OR9]], ptr @bs, align 8 1997 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 1998 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <4 x i32>, ptr @si, align 8 1999 // CHECK-NEXT: [[OR10:%.*]] = or <4 x i32> [[TMP21]], [[TMP20]] 2000 // CHECK-NEXT: store volatile <4 x i32> [[OR10]], ptr @si, align 8 2001 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2002 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2003 // CHECK-NEXT: [[OR11:%.*]] = or <4 x i32> [[TMP23]], [[TMP22]] 2004 // CHECK-NEXT: store volatile <4 x i32> [[OR11]], ptr @si, align 8 2005 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2006 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2007 // CHECK-NEXT: [[OR12:%.*]] = or <4 x i32> [[TMP25]], [[TMP24]] 2008 // CHECK-NEXT: store volatile <4 x i32> [[OR12]], ptr @ui, align 8 2009 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2010 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2011 // CHECK-NEXT: [[OR13:%.*]] = or <4 x i32> [[TMP27]], [[TMP26]] 2012 // CHECK-NEXT: store volatile <4 x i32> [[OR13]], ptr @ui, align 8 2013 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2014 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 2015 // CHECK-NEXT: [[OR14:%.*]] = or <4 x i32> [[TMP29]], [[TMP28]] 2016 // CHECK-NEXT: store volatile <4 x i32> [[OR14]], ptr @bi, align 8 2017 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2018 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2019 // CHECK-NEXT: [[OR15:%.*]] = or <2 x i64> [[TMP31]], [[TMP30]] 2020 // CHECK-NEXT: store volatile <2 x i64> [[OR15]], ptr @sl, align 8 2021 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2022 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2023 // CHECK-NEXT: [[OR16:%.*]] = or <2 x i64> [[TMP33]], [[TMP32]] 2024 // CHECK-NEXT: store volatile <2 x i64> [[OR16]], ptr @sl, align 8 2025 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2026 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2027 // CHECK-NEXT: [[OR17:%.*]] = or <2 x i64> [[TMP35]], [[TMP34]] 2028 // CHECK-NEXT: store volatile <2 x i64> [[OR17]], ptr @ul, align 8 2029 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2030 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2031 // CHECK-NEXT: [[OR18:%.*]] = or <2 x i64> [[TMP37]], [[TMP36]] 2032 // CHECK-NEXT: store volatile <2 x i64> [[OR18]], ptr @ul, align 8 2033 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2034 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 2035 // CHECK-NEXT: [[OR19:%.*]] = or <2 x i64> [[TMP39]], [[TMP38]] 2036 // CHECK-NEXT: store volatile <2 x i64> [[OR19]], ptr @bl, align 8 2037 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 2038 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2039 // CHECK-NEXT: [[OR20:%.*]] = or <1 x i128> [[TMP41]], [[TMP40]] 2040 // CHECK-NEXT: store volatile <1 x i128> [[OR20]], ptr @slll, align 8 2041 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2042 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2043 // CHECK-NEXT: [[OR21:%.*]] = or <1 x i128> [[TMP43]], [[TMP42]] 2044 // CHECK-NEXT: store volatile <1 x i128> [[OR21]], ptr @slll, align 8 2045 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 2046 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2047 // CHECK-NEXT: [[OR22:%.*]] = or <1 x i128> [[TMP45]], [[TMP44]] 2048 // CHECK-NEXT: store volatile <1 x i128> [[OR22]], ptr @ulll, align 8 2049 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2050 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2051 // CHECK-NEXT: [[OR23:%.*]] = or <1 x i128> [[TMP47]], [[TMP46]] 2052 // CHECK-NEXT: store volatile <1 x i128> [[OR23]], ptr @ulll, align 8 2053 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2054 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 2055 // CHECK-NEXT: [[OR24:%.*]] = or <1 x i128> [[TMP49]], [[TMP48]] 2056 // CHECK-NEXT: store volatile <1 x i128> [[OR24]], ptr @blll, align 8 2057 // CHECK-NEXT: ret void 2058 // 2059 void test_or_assign(void) { 2060 2061 sc |= sc2; 2062 sc |= bc2; 2063 uc |= uc2; 2064 uc |= bc2; 2065 bc |= bc2; 2066 2067 ss |= ss2; 2068 ss |= bs2; 2069 us |= us2; 2070 us |= bs2; 2071 bs |= bs2; 2072 2073 si |= si2; 2074 si |= bi2; 2075 ui |= ui2; 2076 ui |= bi2; 2077 bi |= bi2; 2078 2079 sl |= sl2; 2080 sl |= bl2; 2081 ul |= ul2; 2082 ul |= bl2; 2083 bl |= bl2; 2084 2085 slll |= slll2; 2086 slll |= blll2; 2087 ulll |= ulll2; 2088 ulll |= blll2; 2089 blll |= blll2; 2090 } 2091 2092 // CHECK-LABEL: define dso_local void @test_xor( 2093 // CHECK-SAME: ) #[[ATTR0]] { 2094 // CHECK-NEXT: [[ENTRY:.*:]] 2095 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2096 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2097 // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i8> [[TMP0]], [[TMP1]] 2098 // CHECK-NEXT: store volatile <16 x i8> [[XOR]], ptr @sc, align 8 2099 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2100 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 2101 // CHECK-NEXT: [[XOR1:%.*]] = xor <16 x i8> [[TMP2]], [[TMP3]] 2102 // CHECK-NEXT: store volatile <16 x i8> [[XOR1]], ptr @sc, align 8 2103 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 2104 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2105 // CHECK-NEXT: [[XOR2:%.*]] = xor <16 x i8> [[TMP4]], [[TMP5]] 2106 // CHECK-NEXT: store volatile <16 x i8> [[XOR2]], ptr @sc, align 8 2107 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2108 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2109 // CHECK-NEXT: [[XOR3:%.*]] = xor <16 x i8> [[TMP6]], [[TMP7]] 2110 // CHECK-NEXT: store volatile <16 x i8> [[XOR3]], ptr @uc, align 8 2111 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2112 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 2113 // CHECK-NEXT: [[XOR4:%.*]] = xor <16 x i8> [[TMP8]], [[TMP9]] 2114 // CHECK-NEXT: store volatile <16 x i8> [[XOR4]], ptr @uc, align 8 2115 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 2116 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2117 // CHECK-NEXT: [[XOR5:%.*]] = xor <16 x i8> [[TMP10]], [[TMP11]] 2118 // CHECK-NEXT: store volatile <16 x i8> [[XOR5]], ptr @uc, align 8 2119 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 2120 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 2121 // CHECK-NEXT: [[XOR6:%.*]] = xor <16 x i8> [[TMP12]], [[TMP13]] 2122 // CHECK-NEXT: store volatile <16 x i8> [[XOR6]], ptr @bc, align 8 2123 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2124 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2125 // CHECK-NEXT: [[XOR7:%.*]] = xor <8 x i16> [[TMP14]], [[TMP15]] 2126 // CHECK-NEXT: store volatile <8 x i16> [[XOR7]], ptr @ss, align 8 2127 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2128 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 2129 // CHECK-NEXT: [[XOR8:%.*]] = xor <8 x i16> [[TMP16]], [[TMP17]] 2130 // CHECK-NEXT: store volatile <8 x i16> [[XOR8]], ptr @ss, align 8 2131 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 2132 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2133 // CHECK-NEXT: [[XOR9:%.*]] = xor <8 x i16> [[TMP18]], [[TMP19]] 2134 // CHECK-NEXT: store volatile <8 x i16> [[XOR9]], ptr @ss, align 8 2135 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2136 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2137 // CHECK-NEXT: [[XOR10:%.*]] = xor <8 x i16> [[TMP20]], [[TMP21]] 2138 // CHECK-NEXT: store volatile <8 x i16> [[XOR10]], ptr @us, align 8 2139 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2140 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 2141 // CHECK-NEXT: [[XOR11:%.*]] = xor <8 x i16> [[TMP22]], [[TMP23]] 2142 // CHECK-NEXT: store volatile <8 x i16> [[XOR11]], ptr @us, align 8 2143 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 2144 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2145 // CHECK-NEXT: [[XOR12:%.*]] = xor <8 x i16> [[TMP24]], [[TMP25]] 2146 // CHECK-NEXT: store volatile <8 x i16> [[XOR12]], ptr @us, align 8 2147 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 2148 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 2149 // CHECK-NEXT: [[XOR13:%.*]] = xor <8 x i16> [[TMP26]], [[TMP27]] 2150 // CHECK-NEXT: store volatile <8 x i16> [[XOR13]], ptr @bs, align 8 2151 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2152 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2153 // CHECK-NEXT: [[XOR14:%.*]] = xor <4 x i32> [[TMP28]], [[TMP29]] 2154 // CHECK-NEXT: store volatile <4 x i32> [[XOR14]], ptr @si, align 8 2155 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2156 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2157 // CHECK-NEXT: [[XOR15:%.*]] = xor <4 x i32> [[TMP30]], [[TMP31]] 2158 // CHECK-NEXT: store volatile <4 x i32> [[XOR15]], ptr @si, align 8 2159 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 2160 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2161 // CHECK-NEXT: [[XOR16:%.*]] = xor <4 x i32> [[TMP32]], [[TMP33]] 2162 // CHECK-NEXT: store volatile <4 x i32> [[XOR16]], ptr @si, align 8 2163 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2164 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2165 // CHECK-NEXT: [[XOR17:%.*]] = xor <4 x i32> [[TMP34]], [[TMP35]] 2166 // CHECK-NEXT: store volatile <4 x i32> [[XOR17]], ptr @ui, align 8 2167 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2168 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2169 // CHECK-NEXT: [[XOR18:%.*]] = xor <4 x i32> [[TMP36]], [[TMP37]] 2170 // CHECK-NEXT: store volatile <4 x i32> [[XOR18]], ptr @ui, align 8 2171 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 2172 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2173 // CHECK-NEXT: [[XOR19:%.*]] = xor <4 x i32> [[TMP38]], [[TMP39]] 2174 // CHECK-NEXT: store volatile <4 x i32> [[XOR19]], ptr @ui, align 8 2175 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 2176 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2177 // CHECK-NEXT: [[XOR20:%.*]] = xor <4 x i32> [[TMP40]], [[TMP41]] 2178 // CHECK-NEXT: store volatile <4 x i32> [[XOR20]], ptr @bi, align 8 2179 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2180 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2181 // CHECK-NEXT: [[XOR21:%.*]] = xor <2 x i64> [[TMP42]], [[TMP43]] 2182 // CHECK-NEXT: store volatile <2 x i64> [[XOR21]], ptr @sl, align 8 2183 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2184 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2185 // CHECK-NEXT: [[XOR22:%.*]] = xor <2 x i64> [[TMP44]], [[TMP45]] 2186 // CHECK-NEXT: store volatile <2 x i64> [[XOR22]], ptr @sl, align 8 2187 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 2188 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2189 // CHECK-NEXT: [[XOR23:%.*]] = xor <2 x i64> [[TMP46]], [[TMP47]] 2190 // CHECK-NEXT: store volatile <2 x i64> [[XOR23]], ptr @sl, align 8 2191 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2192 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2193 // CHECK-NEXT: [[XOR24:%.*]] = xor <2 x i64> [[TMP48]], [[TMP49]] 2194 // CHECK-NEXT: store volatile <2 x i64> [[XOR24]], ptr @ul, align 8 2195 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2196 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2197 // CHECK-NEXT: [[XOR25:%.*]] = xor <2 x i64> [[TMP50]], [[TMP51]] 2198 // CHECK-NEXT: store volatile <2 x i64> [[XOR25]], ptr @ul, align 8 2199 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 2200 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2201 // CHECK-NEXT: [[XOR26:%.*]] = xor <2 x i64> [[TMP52]], [[TMP53]] 2202 // CHECK-NEXT: store volatile <2 x i64> [[XOR26]], ptr @ul, align 8 2203 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 2204 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2205 // CHECK-NEXT: [[XOR27:%.*]] = xor <2 x i64> [[TMP54]], [[TMP55]] 2206 // CHECK-NEXT: store volatile <2 x i64> [[XOR27]], ptr @bl, align 8 2207 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2208 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 2209 // CHECK-NEXT: [[XOR28:%.*]] = xor <1 x i128> [[TMP56]], [[TMP57]] 2210 // CHECK-NEXT: store volatile <1 x i128> [[XOR28]], ptr @slll, align 8 2211 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2212 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2213 // CHECK-NEXT: [[XOR29:%.*]] = xor <1 x i128> [[TMP58]], [[TMP59]] 2214 // CHECK-NEXT: store volatile <1 x i128> [[XOR29]], ptr @slll, align 8 2215 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 2216 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 2217 // CHECK-NEXT: [[XOR30:%.*]] = xor <1 x i128> [[TMP60]], [[TMP61]] 2218 // CHECK-NEXT: store volatile <1 x i128> [[XOR30]], ptr @slll, align 8 2219 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2220 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 2221 // CHECK-NEXT: [[XOR31:%.*]] = xor <1 x i128> [[TMP62]], [[TMP63]] 2222 // CHECK-NEXT: store volatile <1 x i128> [[XOR31]], ptr @ulll, align 8 2223 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2224 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2225 // CHECK-NEXT: [[XOR32:%.*]] = xor <1 x i128> [[TMP64]], [[TMP65]] 2226 // CHECK-NEXT: store volatile <1 x i128> [[XOR32]], ptr @ulll, align 8 2227 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 2228 // CHECK-NEXT: [[TMP67:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 2229 // CHECK-NEXT: [[XOR33:%.*]] = xor <1 x i128> [[TMP66]], [[TMP67]] 2230 // CHECK-NEXT: store volatile <1 x i128> [[XOR33]], ptr @ulll, align 8 2231 // CHECK-NEXT: [[TMP68:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 2232 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2233 // CHECK-NEXT: [[XOR34:%.*]] = xor <1 x i128> [[TMP68]], [[TMP69]] 2234 // CHECK-NEXT: store volatile <1 x i128> [[XOR34]], ptr @blll, align 8 2235 // CHECK-NEXT: ret void 2236 // 2237 void test_xor(void) { 2238 2239 sc = sc ^ sc2; 2240 sc = sc ^ bc2; 2241 sc = bc ^ sc2; 2242 uc = uc ^ uc2; 2243 uc = uc ^ bc2; 2244 uc = bc ^ uc2; 2245 bc = bc ^ bc2; 2246 2247 ss = ss ^ ss2; 2248 ss = ss ^ bs2; 2249 ss = bs ^ ss2; 2250 us = us ^ us2; 2251 us = us ^ bs2; 2252 us = bs ^ us2; 2253 bs = bs ^ bs2; 2254 2255 si = si ^ si2; 2256 si = si ^ bi2; 2257 si = bi ^ si2; 2258 ui = ui ^ ui2; 2259 ui = ui ^ bi2; 2260 ui = bi ^ ui2; 2261 bi = bi ^ bi2; 2262 2263 sl = sl ^ sl2; 2264 sl = sl ^ bl2; 2265 sl = bl ^ sl2; 2266 ul = ul ^ ul2; 2267 ul = ul ^ bl2; 2268 ul = bl ^ ul2; 2269 bl = bl ^ bl2; 2270 2271 slll = slll ^ slll2; 2272 slll = slll ^ blll2; 2273 slll = blll ^ slll2; 2274 ulll = ulll ^ ulll2; 2275 ulll = ulll ^ blll2; 2276 ulll = blll ^ ulll2; 2277 blll = blll ^ blll2; 2278 } 2279 2280 // CHECK-LABEL: define dso_local void @test_xor_assign( 2281 // CHECK-SAME: ) #[[ATTR0]] { 2282 // CHECK-NEXT: [[ENTRY:.*:]] 2283 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2284 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2285 // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i8> [[TMP1]], [[TMP0]] 2286 // CHECK-NEXT: store volatile <16 x i8> [[XOR]], ptr @sc, align 8 2287 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 2288 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2289 // CHECK-NEXT: [[XOR1:%.*]] = xor <16 x i8> [[TMP3]], [[TMP2]] 2290 // CHECK-NEXT: store volatile <16 x i8> [[XOR1]], ptr @sc, align 8 2291 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2292 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2293 // CHECK-NEXT: [[XOR2:%.*]] = xor <16 x i8> [[TMP5]], [[TMP4]] 2294 // CHECK-NEXT: store volatile <16 x i8> [[XOR2]], ptr @uc, align 8 2295 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 2296 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2297 // CHECK-NEXT: [[XOR3:%.*]] = xor <16 x i8> [[TMP7]], [[TMP6]] 2298 // CHECK-NEXT: store volatile <16 x i8> [[XOR3]], ptr @uc, align 8 2299 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 2300 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 2301 // CHECK-NEXT: [[XOR4:%.*]] = xor <16 x i8> [[TMP9]], [[TMP8]] 2302 // CHECK-NEXT: store volatile <16 x i8> [[XOR4]], ptr @bc, align 8 2303 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2304 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2305 // CHECK-NEXT: [[XOR5:%.*]] = xor <8 x i16> [[TMP11]], [[TMP10]] 2306 // CHECK-NEXT: store volatile <8 x i16> [[XOR5]], ptr @ss, align 8 2307 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 2308 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2309 // CHECK-NEXT: [[XOR6:%.*]] = xor <8 x i16> [[TMP13]], [[TMP12]] 2310 // CHECK-NEXT: store volatile <8 x i16> [[XOR6]], ptr @ss, align 8 2311 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2312 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2313 // CHECK-NEXT: [[XOR7:%.*]] = xor <8 x i16> [[TMP15]], [[TMP14]] 2314 // CHECK-NEXT: store volatile <8 x i16> [[XOR7]], ptr @us, align 8 2315 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 2316 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2317 // CHECK-NEXT: [[XOR8:%.*]] = xor <8 x i16> [[TMP17]], [[TMP16]] 2318 // CHECK-NEXT: store volatile <8 x i16> [[XOR8]], ptr @us, align 8 2319 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 2320 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 2321 // CHECK-NEXT: [[XOR9:%.*]] = xor <8 x i16> [[TMP19]], [[TMP18]] 2322 // CHECK-NEXT: store volatile <8 x i16> [[XOR9]], ptr @bs, align 8 2323 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2324 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2325 // CHECK-NEXT: [[XOR10:%.*]] = xor <4 x i32> [[TMP21]], [[TMP20]] 2326 // CHECK-NEXT: store volatile <4 x i32> [[XOR10]], ptr @si, align 8 2327 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2328 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2329 // CHECK-NEXT: [[XOR11:%.*]] = xor <4 x i32> [[TMP23]], [[TMP22]] 2330 // CHECK-NEXT: store volatile <4 x i32> [[XOR11]], ptr @si, align 8 2331 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2332 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2333 // CHECK-NEXT: [[XOR12:%.*]] = xor <4 x i32> [[TMP25]], [[TMP24]] 2334 // CHECK-NEXT: store volatile <4 x i32> [[XOR12]], ptr @ui, align 8 2335 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2336 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2337 // CHECK-NEXT: [[XOR13:%.*]] = xor <4 x i32> [[TMP27]], [[TMP26]] 2338 // CHECK-NEXT: store volatile <4 x i32> [[XOR13]], ptr @ui, align 8 2339 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 2340 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 2341 // CHECK-NEXT: [[XOR14:%.*]] = xor <4 x i32> [[TMP29]], [[TMP28]] 2342 // CHECK-NEXT: store volatile <4 x i32> [[XOR14]], ptr @bi, align 8 2343 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2344 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2345 // CHECK-NEXT: [[XOR15:%.*]] = xor <2 x i64> [[TMP31]], [[TMP30]] 2346 // CHECK-NEXT: store volatile <2 x i64> [[XOR15]], ptr @sl, align 8 2347 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2348 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2349 // CHECK-NEXT: [[XOR16:%.*]] = xor <2 x i64> [[TMP33]], [[TMP32]] 2350 // CHECK-NEXT: store volatile <2 x i64> [[XOR16]], ptr @sl, align 8 2351 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2352 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2353 // CHECK-NEXT: [[XOR17:%.*]] = xor <2 x i64> [[TMP35]], [[TMP34]] 2354 // CHECK-NEXT: store volatile <2 x i64> [[XOR17]], ptr @ul, align 8 2355 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2356 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2357 // CHECK-NEXT: [[XOR18:%.*]] = xor <2 x i64> [[TMP37]], [[TMP36]] 2358 // CHECK-NEXT: store volatile <2 x i64> [[XOR18]], ptr @ul, align 8 2359 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 2360 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 2361 // CHECK-NEXT: [[XOR19:%.*]] = xor <2 x i64> [[TMP39]], [[TMP38]] 2362 // CHECK-NEXT: store volatile <2 x i64> [[XOR19]], ptr @bl, align 8 2363 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 2364 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2365 // CHECK-NEXT: [[XOR20:%.*]] = xor <1 x i128> [[TMP41]], [[TMP40]] 2366 // CHECK-NEXT: store volatile <1 x i128> [[XOR20]], ptr @slll, align 8 2367 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2368 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2369 // CHECK-NEXT: [[XOR21:%.*]] = xor <1 x i128> [[TMP43]], [[TMP42]] 2370 // CHECK-NEXT: store volatile <1 x i128> [[XOR21]], ptr @slll, align 8 2371 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 2372 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2373 // CHECK-NEXT: [[XOR22:%.*]] = xor <1 x i128> [[TMP45]], [[TMP44]] 2374 // CHECK-NEXT: store volatile <1 x i128> [[XOR22]], ptr @ulll, align 8 2375 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2376 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2377 // CHECK-NEXT: [[XOR23:%.*]] = xor <1 x i128> [[TMP47]], [[TMP46]] 2378 // CHECK-NEXT: store volatile <1 x i128> [[XOR23]], ptr @ulll, align 8 2379 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 2380 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 2381 // CHECK-NEXT: [[XOR24:%.*]] = xor <1 x i128> [[TMP49]], [[TMP48]] 2382 // CHECK-NEXT: store volatile <1 x i128> [[XOR24]], ptr @blll, align 8 2383 // CHECK-NEXT: ret void 2384 // 2385 void test_xor_assign(void) { 2386 2387 sc ^= sc2; 2388 sc ^= bc2; 2389 uc ^= uc2; 2390 uc ^= bc2; 2391 bc ^= bc2; 2392 2393 ss ^= ss2; 2394 ss ^= bs2; 2395 us ^= us2; 2396 us ^= bs2; 2397 bs ^= bs2; 2398 2399 si ^= si2; 2400 si ^= bi2; 2401 ui ^= ui2; 2402 ui ^= bi2; 2403 bi ^= bi2; 2404 2405 sl ^= sl2; 2406 sl ^= bl2; 2407 ul ^= ul2; 2408 ul ^= bl2; 2409 bl ^= bl2; 2410 2411 slll ^= slll2; 2412 slll ^= blll2; 2413 ulll ^= ulll2; 2414 ulll ^= blll2; 2415 blll ^= blll2; 2416 } 2417 2418 // CHECK-LABEL: define dso_local void @test_sl( 2419 // CHECK-SAME: ) #[[ATTR0]] { 2420 // CHECK-NEXT: [[ENTRY:.*:]] 2421 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2422 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2423 // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i8> [[TMP0]], [[TMP1]] 2424 // CHECK-NEXT: store volatile <16 x i8> [[SHL]], ptr @sc, align 8 2425 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2426 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2427 // CHECK-NEXT: [[SHL1:%.*]] = shl <16 x i8> [[TMP2]], [[TMP3]] 2428 // CHECK-NEXT: store volatile <16 x i8> [[SHL1]], ptr @sc, align 8 2429 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2430 // CHECK-NEXT: [[TMP5:%.*]] = load volatile i32, ptr @cnt, align 4 2431 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[TMP5]], i64 0 2432 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer 2433 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8> 2434 // CHECK-NEXT: [[SHL2:%.*]] = shl <16 x i8> [[TMP4]], [[SH_PROM]] 2435 // CHECK-NEXT: store volatile <16 x i8> [[SHL2]], ptr @sc, align 8 2436 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2437 // CHECK-NEXT: [[SHL3:%.*]] = shl <16 x i8> [[TMP6]], splat (i8 5) 2438 // CHECK-NEXT: store volatile <16 x i8> [[SHL3]], ptr @sc, align 8 2439 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2440 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2441 // CHECK-NEXT: [[SHL4:%.*]] = shl <16 x i8> [[TMP7]], [[TMP8]] 2442 // CHECK-NEXT: store volatile <16 x i8> [[SHL4]], ptr @uc, align 8 2443 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2444 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2445 // CHECK-NEXT: [[SHL5:%.*]] = shl <16 x i8> [[TMP9]], [[TMP10]] 2446 // CHECK-NEXT: store volatile <16 x i8> [[SHL5]], ptr @uc, align 8 2447 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2448 // CHECK-NEXT: [[TMP12:%.*]] = load volatile i32, ptr @cnt, align 4 2449 // CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> poison, i32 [[TMP12]], i64 0 2450 // CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> poison, <16 x i32> zeroinitializer 2451 // CHECK-NEXT: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8> 2452 // CHECK-NEXT: [[SHL9:%.*]] = shl <16 x i8> [[TMP11]], [[SH_PROM8]] 2453 // CHECK-NEXT: store volatile <16 x i8> [[SHL9]], ptr @uc, align 8 2454 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2455 // CHECK-NEXT: [[SHL10:%.*]] = shl <16 x i8> [[TMP13]], splat (i8 5) 2456 // CHECK-NEXT: store volatile <16 x i8> [[SHL10]], ptr @uc, align 8 2457 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2458 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2459 // CHECK-NEXT: [[SHL11:%.*]] = shl <8 x i16> [[TMP14]], [[TMP15]] 2460 // CHECK-NEXT: store volatile <8 x i16> [[SHL11]], ptr @ss, align 8 2461 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2462 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2463 // CHECK-NEXT: [[SHL12:%.*]] = shl <8 x i16> [[TMP16]], [[TMP17]] 2464 // CHECK-NEXT: store volatile <8 x i16> [[SHL12]], ptr @ss, align 8 2465 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2466 // CHECK-NEXT: [[TMP19:%.*]] = load volatile i32, ptr @cnt, align 4 2467 // CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> poison, i32 [[TMP19]], i64 0 2468 // CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> poison, <8 x i32> zeroinitializer 2469 // CHECK-NEXT: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16> 2470 // CHECK-NEXT: [[SHL16:%.*]] = shl <8 x i16> [[TMP18]], [[SH_PROM15]] 2471 // CHECK-NEXT: store volatile <8 x i16> [[SHL16]], ptr @ss, align 8 2472 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2473 // CHECK-NEXT: [[SHL17:%.*]] = shl <8 x i16> [[TMP20]], splat (i16 5) 2474 // CHECK-NEXT: store volatile <8 x i16> [[SHL17]], ptr @ss, align 8 2475 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2476 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2477 // CHECK-NEXT: [[SHL18:%.*]] = shl <8 x i16> [[TMP21]], [[TMP22]] 2478 // CHECK-NEXT: store volatile <8 x i16> [[SHL18]], ptr @us, align 8 2479 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2480 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2481 // CHECK-NEXT: [[SHL19:%.*]] = shl <8 x i16> [[TMP23]], [[TMP24]] 2482 // CHECK-NEXT: store volatile <8 x i16> [[SHL19]], ptr @us, align 8 2483 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2484 // CHECK-NEXT: [[TMP26:%.*]] = load volatile i32, ptr @cnt, align 4 2485 // CHECK-NEXT: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> poison, i32 [[TMP26]], i64 0 2486 // CHECK-NEXT: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> poison, <8 x i32> zeroinitializer 2487 // CHECK-NEXT: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16> 2488 // CHECK-NEXT: [[SHL23:%.*]] = shl <8 x i16> [[TMP25]], [[SH_PROM22]] 2489 // CHECK-NEXT: store volatile <8 x i16> [[SHL23]], ptr @us, align 8 2490 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2491 // CHECK-NEXT: [[SHL24:%.*]] = shl <8 x i16> [[TMP27]], splat (i16 5) 2492 // CHECK-NEXT: store volatile <8 x i16> [[SHL24]], ptr @us, align 8 2493 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2494 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2495 // CHECK-NEXT: [[SHL25:%.*]] = shl <4 x i32> [[TMP28]], [[TMP29]] 2496 // CHECK-NEXT: store volatile <4 x i32> [[SHL25]], ptr @si, align 8 2497 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2498 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2499 // CHECK-NEXT: [[SHL26:%.*]] = shl <4 x i32> [[TMP30]], [[TMP31]] 2500 // CHECK-NEXT: store volatile <4 x i32> [[SHL26]], ptr @si, align 8 2501 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2502 // CHECK-NEXT: [[TMP33:%.*]] = load volatile i32, ptr @cnt, align 4 2503 // CHECK-NEXT: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> poison, i32 [[TMP33]], i64 0 2504 // CHECK-NEXT: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> poison, <4 x i32> zeroinitializer 2505 // CHECK-NEXT: [[SHL29:%.*]] = shl <4 x i32> [[TMP32]], [[SPLAT_SPLAT28]] 2506 // CHECK-NEXT: store volatile <4 x i32> [[SHL29]], ptr @si, align 8 2507 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2508 // CHECK-NEXT: [[SHL30:%.*]] = shl <4 x i32> [[TMP34]], splat (i32 5) 2509 // CHECK-NEXT: store volatile <4 x i32> [[SHL30]], ptr @si, align 8 2510 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2511 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2512 // CHECK-NEXT: [[SHL31:%.*]] = shl <4 x i32> [[TMP35]], [[TMP36]] 2513 // CHECK-NEXT: store volatile <4 x i32> [[SHL31]], ptr @ui, align 8 2514 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2515 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2516 // CHECK-NEXT: [[SHL32:%.*]] = shl <4 x i32> [[TMP37]], [[TMP38]] 2517 // CHECK-NEXT: store volatile <4 x i32> [[SHL32]], ptr @ui, align 8 2518 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2519 // CHECK-NEXT: [[TMP40:%.*]] = load volatile i32, ptr @cnt, align 4 2520 // CHECK-NEXT: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> poison, i32 [[TMP40]], i64 0 2521 // CHECK-NEXT: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> poison, <4 x i32> zeroinitializer 2522 // CHECK-NEXT: [[SHL35:%.*]] = shl <4 x i32> [[TMP39]], [[SPLAT_SPLAT34]] 2523 // CHECK-NEXT: store volatile <4 x i32> [[SHL35]], ptr @ui, align 8 2524 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2525 // CHECK-NEXT: [[SHL36:%.*]] = shl <4 x i32> [[TMP41]], splat (i32 5) 2526 // CHECK-NEXT: store volatile <4 x i32> [[SHL36]], ptr @ui, align 8 2527 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2528 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2529 // CHECK-NEXT: [[SHL37:%.*]] = shl <2 x i64> [[TMP42]], [[TMP43]] 2530 // CHECK-NEXT: store volatile <2 x i64> [[SHL37]], ptr @sl, align 8 2531 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2532 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2533 // CHECK-NEXT: [[SHL38:%.*]] = shl <2 x i64> [[TMP44]], [[TMP45]] 2534 // CHECK-NEXT: store volatile <2 x i64> [[SHL38]], ptr @sl, align 8 2535 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2536 // CHECK-NEXT: [[TMP47:%.*]] = load volatile i32, ptr @cnt, align 4 2537 // CHECK-NEXT: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> poison, i32 [[TMP47]], i64 0 2538 // CHECK-NEXT: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> poison, <2 x i32> zeroinitializer 2539 // CHECK-NEXT: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64> 2540 // CHECK-NEXT: [[SHL42:%.*]] = shl <2 x i64> [[TMP46]], [[SH_PROM41]] 2541 // CHECK-NEXT: store volatile <2 x i64> [[SHL42]], ptr @sl, align 8 2542 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2543 // CHECK-NEXT: [[SHL43:%.*]] = shl <2 x i64> [[TMP48]], splat (i64 5) 2544 // CHECK-NEXT: store volatile <2 x i64> [[SHL43]], ptr @sl, align 8 2545 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2546 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2547 // CHECK-NEXT: [[SHL44:%.*]] = shl <2 x i64> [[TMP49]], [[TMP50]] 2548 // CHECK-NEXT: store volatile <2 x i64> [[SHL44]], ptr @ul, align 8 2549 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2550 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2551 // CHECK-NEXT: [[SHL45:%.*]] = shl <2 x i64> [[TMP51]], [[TMP52]] 2552 // CHECK-NEXT: store volatile <2 x i64> [[SHL45]], ptr @ul, align 8 2553 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2554 // CHECK-NEXT: [[TMP54:%.*]] = load volatile i32, ptr @cnt, align 4 2555 // CHECK-NEXT: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> poison, i32 [[TMP54]], i64 0 2556 // CHECK-NEXT: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> poison, <2 x i32> zeroinitializer 2557 // CHECK-NEXT: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64> 2558 // CHECK-NEXT: [[SHL49:%.*]] = shl <2 x i64> [[TMP53]], [[SH_PROM48]] 2559 // CHECK-NEXT: store volatile <2 x i64> [[SHL49]], ptr @ul, align 8 2560 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2561 // CHECK-NEXT: [[SHL50:%.*]] = shl <2 x i64> [[TMP55]], splat (i64 5) 2562 // CHECK-NEXT: store volatile <2 x i64> [[SHL50]], ptr @ul, align 8 2563 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2564 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 2565 // CHECK-NEXT: [[SHL51:%.*]] = shl <1 x i128> [[TMP56]], [[TMP57]] 2566 // CHECK-NEXT: store volatile <1 x i128> [[SHL51]], ptr @slll, align 8 2567 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2568 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 2569 // CHECK-NEXT: [[SHL52:%.*]] = shl <1 x i128> [[TMP58]], [[TMP59]] 2570 // CHECK-NEXT: store volatile <1 x i128> [[SHL52]], ptr @slll, align 8 2571 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2572 // CHECK-NEXT: [[TMP61:%.*]] = load volatile i32, ptr @cnt, align 4 2573 // CHECK-NEXT: [[SPLAT_SPLATINSERT53:%.*]] = insertelement <1 x i32> poison, i32 [[TMP61]], i64 0 2574 // CHECK-NEXT: [[SPLAT_SPLAT54:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT53]], <1 x i32> poison, <1 x i32> zeroinitializer 2575 // CHECK-NEXT: [[SH_PROM55:%.*]] = zext <1 x i32> [[SPLAT_SPLAT54]] to <1 x i128> 2576 // CHECK-NEXT: [[SHL56:%.*]] = shl <1 x i128> [[TMP60]], [[SH_PROM55]] 2577 // CHECK-NEXT: store volatile <1 x i128> [[SHL56]], ptr @slll, align 8 2578 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2579 // CHECK-NEXT: [[SHL57:%.*]] = shl <1 x i128> [[TMP62]], splat (i128 5) 2580 // CHECK-NEXT: store volatile <1 x i128> [[SHL57]], ptr @slll, align 8 2581 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2582 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 2583 // CHECK-NEXT: [[SHL58:%.*]] = shl <1 x i128> [[TMP63]], [[TMP64]] 2584 // CHECK-NEXT: store volatile <1 x i128> [[SHL58]], ptr @ulll, align 8 2585 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2586 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 2587 // CHECK-NEXT: [[SHL59:%.*]] = shl <1 x i128> [[TMP65]], [[TMP66]] 2588 // CHECK-NEXT: store volatile <1 x i128> [[SHL59]], ptr @ulll, align 8 2589 // CHECK-NEXT: [[TMP67:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2590 // CHECK-NEXT: [[TMP68:%.*]] = load volatile i32, ptr @cnt, align 4 2591 // CHECK-NEXT: [[SPLAT_SPLATINSERT60:%.*]] = insertelement <1 x i32> poison, i32 [[TMP68]], i64 0 2592 // CHECK-NEXT: [[SPLAT_SPLAT61:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT60]], <1 x i32> poison, <1 x i32> zeroinitializer 2593 // CHECK-NEXT: [[SH_PROM62:%.*]] = zext <1 x i32> [[SPLAT_SPLAT61]] to <1 x i128> 2594 // CHECK-NEXT: [[SHL63:%.*]] = shl <1 x i128> [[TMP67]], [[SH_PROM62]] 2595 // CHECK-NEXT: store volatile <1 x i128> [[SHL63]], ptr @ulll, align 8 2596 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2597 // CHECK-NEXT: [[SHL64:%.*]] = shl <1 x i128> [[TMP69]], splat (i128 5) 2598 // CHECK-NEXT: store volatile <1 x i128> [[SHL64]], ptr @ulll, align 8 2599 // CHECK-NEXT: ret void 2600 // 2601 void test_sl(void) { 2602 2603 sc = sc << sc2; 2604 sc = sc << uc2; 2605 sc = sc << cnt; 2606 sc = sc << 5; 2607 uc = uc << sc2; 2608 uc = uc << uc2; 2609 uc = uc << cnt; 2610 uc = uc << 5; 2611 2612 ss = ss << ss2; 2613 ss = ss << us2; 2614 ss = ss << cnt; 2615 ss = ss << 5; 2616 us = us << ss2; 2617 us = us << us2; 2618 us = us << cnt; 2619 us = us << 5; 2620 2621 si = si << si2; 2622 si = si << ui2; 2623 si = si << cnt; 2624 si = si << 5; 2625 ui = ui << si2; 2626 ui = ui << ui2; 2627 ui = ui << cnt; 2628 ui = ui << 5; 2629 2630 sl = sl << sl2; 2631 sl = sl << ul2; 2632 sl = sl << cnt; 2633 sl = sl << 5; 2634 ul = ul << sl2; 2635 ul = ul << ul2; 2636 ul = ul << cnt; 2637 ul = ul << 5; 2638 2639 slll = slll << slll2; 2640 slll = slll << ulll2; 2641 slll = slll << cnt; 2642 slll = slll << 5; 2643 ulll = ulll << slll2; 2644 ulll = ulll << ulll2; 2645 ulll = ulll << cnt; 2646 ulll = ulll << 5; 2647 } 2648 2649 // CHECK-LABEL: define dso_local void @test_sl_assign( 2650 // CHECK-SAME: ) #[[ATTR0]] { 2651 // CHECK-NEXT: [[ENTRY:.*:]] 2652 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2653 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2654 // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i8> [[TMP1]], [[TMP0]] 2655 // CHECK-NEXT: store volatile <16 x i8> [[SHL]], ptr @sc, align 8 2656 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2657 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2658 // CHECK-NEXT: [[SHL1:%.*]] = shl <16 x i8> [[TMP3]], [[TMP2]] 2659 // CHECK-NEXT: store volatile <16 x i8> [[SHL1]], ptr @sc, align 8 2660 // CHECK-NEXT: [[TMP4:%.*]] = load volatile i32, ptr @cnt, align 4 2661 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[TMP4]], i64 0 2662 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer 2663 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2664 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8> 2665 // CHECK-NEXT: [[SHL2:%.*]] = shl <16 x i8> [[TMP5]], [[SH_PROM]] 2666 // CHECK-NEXT: store volatile <16 x i8> [[SHL2]], ptr @sc, align 8 2667 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2668 // CHECK-NEXT: [[SHL3:%.*]] = shl <16 x i8> [[TMP6]], splat (i8 5) 2669 // CHECK-NEXT: store volatile <16 x i8> [[SHL3]], ptr @sc, align 8 2670 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2671 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2672 // CHECK-NEXT: [[SHL4:%.*]] = shl <16 x i8> [[TMP8]], [[TMP7]] 2673 // CHECK-NEXT: store volatile <16 x i8> [[SHL4]], ptr @uc, align 8 2674 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2675 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2676 // CHECK-NEXT: [[SHL5:%.*]] = shl <16 x i8> [[TMP10]], [[TMP9]] 2677 // CHECK-NEXT: store volatile <16 x i8> [[SHL5]], ptr @uc, align 8 2678 // CHECK-NEXT: [[TMP11:%.*]] = load volatile i32, ptr @cnt, align 4 2679 // CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> poison, i32 [[TMP11]], i64 0 2680 // CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> poison, <16 x i32> zeroinitializer 2681 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2682 // CHECK-NEXT: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8> 2683 // CHECK-NEXT: [[SHL9:%.*]] = shl <16 x i8> [[TMP12]], [[SH_PROM8]] 2684 // CHECK-NEXT: store volatile <16 x i8> [[SHL9]], ptr @uc, align 8 2685 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2686 // CHECK-NEXT: [[SHL10:%.*]] = shl <16 x i8> [[TMP13]], splat (i8 5) 2687 // CHECK-NEXT: store volatile <16 x i8> [[SHL10]], ptr @uc, align 8 2688 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2689 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2690 // CHECK-NEXT: [[SHL11:%.*]] = shl <8 x i16> [[TMP15]], [[TMP14]] 2691 // CHECK-NEXT: store volatile <8 x i16> [[SHL11]], ptr @ss, align 8 2692 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2693 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2694 // CHECK-NEXT: [[SHL12:%.*]] = shl <8 x i16> [[TMP17]], [[TMP16]] 2695 // CHECK-NEXT: store volatile <8 x i16> [[SHL12]], ptr @ss, align 8 2696 // CHECK-NEXT: [[TMP18:%.*]] = load volatile i32, ptr @cnt, align 4 2697 // CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> poison, i32 [[TMP18]], i64 0 2698 // CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> poison, <8 x i32> zeroinitializer 2699 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2700 // CHECK-NEXT: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16> 2701 // CHECK-NEXT: [[SHL16:%.*]] = shl <8 x i16> [[TMP19]], [[SH_PROM15]] 2702 // CHECK-NEXT: store volatile <8 x i16> [[SHL16]], ptr @ss, align 8 2703 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2704 // CHECK-NEXT: [[SHL17:%.*]] = shl <8 x i16> [[TMP20]], splat (i16 5) 2705 // CHECK-NEXT: store volatile <8 x i16> [[SHL17]], ptr @ss, align 8 2706 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2707 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2708 // CHECK-NEXT: [[SHL18:%.*]] = shl <8 x i16> [[TMP22]], [[TMP21]] 2709 // CHECK-NEXT: store volatile <8 x i16> [[SHL18]], ptr @us, align 8 2710 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2711 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2712 // CHECK-NEXT: [[SHL19:%.*]] = shl <8 x i16> [[TMP24]], [[TMP23]] 2713 // CHECK-NEXT: store volatile <8 x i16> [[SHL19]], ptr @us, align 8 2714 // CHECK-NEXT: [[TMP25:%.*]] = load volatile i32, ptr @cnt, align 4 2715 // CHECK-NEXT: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> poison, i32 [[TMP25]], i64 0 2716 // CHECK-NEXT: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> poison, <8 x i32> zeroinitializer 2717 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2718 // CHECK-NEXT: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16> 2719 // CHECK-NEXT: [[SHL23:%.*]] = shl <8 x i16> [[TMP26]], [[SH_PROM22]] 2720 // CHECK-NEXT: store volatile <8 x i16> [[SHL23]], ptr @us, align 8 2721 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2722 // CHECK-NEXT: [[SHL24:%.*]] = shl <8 x i16> [[TMP27]], splat (i16 5) 2723 // CHECK-NEXT: store volatile <8 x i16> [[SHL24]], ptr @us, align 8 2724 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2725 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2726 // CHECK-NEXT: [[SHL25:%.*]] = shl <4 x i32> [[TMP29]], [[TMP28]] 2727 // CHECK-NEXT: store volatile <4 x i32> [[SHL25]], ptr @si, align 8 2728 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2729 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2730 // CHECK-NEXT: [[SHL26:%.*]] = shl <4 x i32> [[TMP31]], [[TMP30]] 2731 // CHECK-NEXT: store volatile <4 x i32> [[SHL26]], ptr @si, align 8 2732 // CHECK-NEXT: [[TMP32:%.*]] = load volatile i32, ptr @cnt, align 4 2733 // CHECK-NEXT: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> poison, i32 [[TMP32]], i64 0 2734 // CHECK-NEXT: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> poison, <4 x i32> zeroinitializer 2735 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2736 // CHECK-NEXT: [[SHL29:%.*]] = shl <4 x i32> [[TMP33]], [[SPLAT_SPLAT28]] 2737 // CHECK-NEXT: store volatile <4 x i32> [[SHL29]], ptr @si, align 8 2738 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2739 // CHECK-NEXT: [[SHL30:%.*]] = shl <4 x i32> [[TMP34]], splat (i32 5) 2740 // CHECK-NEXT: store volatile <4 x i32> [[SHL30]], ptr @si, align 8 2741 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2742 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2743 // CHECK-NEXT: [[SHL31:%.*]] = shl <4 x i32> [[TMP36]], [[TMP35]] 2744 // CHECK-NEXT: store volatile <4 x i32> [[SHL31]], ptr @ui, align 8 2745 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2746 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2747 // CHECK-NEXT: [[SHL32:%.*]] = shl <4 x i32> [[TMP38]], [[TMP37]] 2748 // CHECK-NEXT: store volatile <4 x i32> [[SHL32]], ptr @ui, align 8 2749 // CHECK-NEXT: [[TMP39:%.*]] = load volatile i32, ptr @cnt, align 4 2750 // CHECK-NEXT: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> poison, i32 [[TMP39]], i64 0 2751 // CHECK-NEXT: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> poison, <4 x i32> zeroinitializer 2752 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2753 // CHECK-NEXT: [[SHL35:%.*]] = shl <4 x i32> [[TMP40]], [[SPLAT_SPLAT34]] 2754 // CHECK-NEXT: store volatile <4 x i32> [[SHL35]], ptr @ui, align 8 2755 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2756 // CHECK-NEXT: [[SHL36:%.*]] = shl <4 x i32> [[TMP41]], splat (i32 5) 2757 // CHECK-NEXT: store volatile <4 x i32> [[SHL36]], ptr @ui, align 8 2758 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2759 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2760 // CHECK-NEXT: [[SHL37:%.*]] = shl <2 x i64> [[TMP43]], [[TMP42]] 2761 // CHECK-NEXT: store volatile <2 x i64> [[SHL37]], ptr @sl, align 8 2762 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2763 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2764 // CHECK-NEXT: [[SHL38:%.*]] = shl <2 x i64> [[TMP45]], [[TMP44]] 2765 // CHECK-NEXT: store volatile <2 x i64> [[SHL38]], ptr @sl, align 8 2766 // CHECK-NEXT: [[TMP46:%.*]] = load volatile i32, ptr @cnt, align 4 2767 // CHECK-NEXT: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> poison, i32 [[TMP46]], i64 0 2768 // CHECK-NEXT: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> poison, <2 x i32> zeroinitializer 2769 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2770 // CHECK-NEXT: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64> 2771 // CHECK-NEXT: [[SHL42:%.*]] = shl <2 x i64> [[TMP47]], [[SH_PROM41]] 2772 // CHECK-NEXT: store volatile <2 x i64> [[SHL42]], ptr @sl, align 8 2773 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2774 // CHECK-NEXT: [[SHL43:%.*]] = shl <2 x i64> [[TMP48]], splat (i64 5) 2775 // CHECK-NEXT: store volatile <2 x i64> [[SHL43]], ptr @sl, align 8 2776 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2777 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2778 // CHECK-NEXT: [[SHL44:%.*]] = shl <2 x i64> [[TMP50]], [[TMP49]] 2779 // CHECK-NEXT: store volatile <2 x i64> [[SHL44]], ptr @ul, align 8 2780 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2781 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2782 // CHECK-NEXT: [[SHL45:%.*]] = shl <2 x i64> [[TMP52]], [[TMP51]] 2783 // CHECK-NEXT: store volatile <2 x i64> [[SHL45]], ptr @ul, align 8 2784 // CHECK-NEXT: [[TMP53:%.*]] = load volatile i32, ptr @cnt, align 4 2785 // CHECK-NEXT: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> poison, i32 [[TMP53]], i64 0 2786 // CHECK-NEXT: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> poison, <2 x i32> zeroinitializer 2787 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2788 // CHECK-NEXT: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64> 2789 // CHECK-NEXT: [[SHL49:%.*]] = shl <2 x i64> [[TMP54]], [[SH_PROM48]] 2790 // CHECK-NEXT: store volatile <2 x i64> [[SHL49]], ptr @ul, align 8 2791 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 2792 // CHECK-NEXT: [[SHL50:%.*]] = shl <2 x i64> [[TMP55]], splat (i64 5) 2793 // CHECK-NEXT: store volatile <2 x i64> [[SHL50]], ptr @ul, align 8 2794 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 2795 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2796 // CHECK-NEXT: [[SHL51:%.*]] = shl <1 x i128> [[TMP57]], [[TMP56]] 2797 // CHECK-NEXT: store volatile <1 x i128> [[SHL51]], ptr @slll, align 8 2798 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 2799 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2800 // CHECK-NEXT: [[SHL52:%.*]] = shl <1 x i128> [[TMP59]], [[TMP58]] 2801 // CHECK-NEXT: store volatile <1 x i128> [[SHL52]], ptr @slll, align 8 2802 // CHECK-NEXT: [[TMP60:%.*]] = load volatile i32, ptr @cnt, align 4 2803 // CHECK-NEXT: [[SPLAT_SPLATINSERT53:%.*]] = insertelement <1 x i32> poison, i32 [[TMP60]], i64 0 2804 // CHECK-NEXT: [[SPLAT_SPLAT54:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT53]], <1 x i32> poison, <1 x i32> zeroinitializer 2805 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2806 // CHECK-NEXT: [[SH_PROM55:%.*]] = zext <1 x i32> [[SPLAT_SPLAT54]] to <1 x i128> 2807 // CHECK-NEXT: [[SHL56:%.*]] = shl <1 x i128> [[TMP61]], [[SH_PROM55]] 2808 // CHECK-NEXT: store volatile <1 x i128> [[SHL56]], ptr @slll, align 8 2809 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 2810 // CHECK-NEXT: [[SHL57:%.*]] = shl <1 x i128> [[TMP62]], splat (i128 5) 2811 // CHECK-NEXT: store volatile <1 x i128> [[SHL57]], ptr @slll, align 8 2812 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 2813 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2814 // CHECK-NEXT: [[SHL58:%.*]] = shl <1 x i128> [[TMP64]], [[TMP63]] 2815 // CHECK-NEXT: store volatile <1 x i128> [[SHL58]], ptr @ulll, align 8 2816 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 2817 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2818 // CHECK-NEXT: [[SHL59:%.*]] = shl <1 x i128> [[TMP66]], [[TMP65]] 2819 // CHECK-NEXT: store volatile <1 x i128> [[SHL59]], ptr @ulll, align 8 2820 // CHECK-NEXT: [[TMP67:%.*]] = load volatile i32, ptr @cnt, align 4 2821 // CHECK-NEXT: [[SPLAT_SPLATINSERT60:%.*]] = insertelement <1 x i32> poison, i32 [[TMP67]], i64 0 2822 // CHECK-NEXT: [[SPLAT_SPLAT61:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT60]], <1 x i32> poison, <1 x i32> zeroinitializer 2823 // CHECK-NEXT: [[TMP68:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2824 // CHECK-NEXT: [[SH_PROM62:%.*]] = zext <1 x i32> [[SPLAT_SPLAT61]] to <1 x i128> 2825 // CHECK-NEXT: [[SHL63:%.*]] = shl <1 x i128> [[TMP68]], [[SH_PROM62]] 2826 // CHECK-NEXT: store volatile <1 x i128> [[SHL63]], ptr @ulll, align 8 2827 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 2828 // CHECK-NEXT: [[SHL64:%.*]] = shl <1 x i128> [[TMP69]], splat (i128 5) 2829 // CHECK-NEXT: store volatile <1 x i128> [[SHL64]], ptr @ulll, align 8 2830 // CHECK-NEXT: ret void 2831 // 2832 void test_sl_assign(void) { 2833 2834 sc <<= sc2; 2835 sc <<= uc2; 2836 sc <<= cnt; 2837 sc <<= 5; 2838 uc <<= sc2; 2839 uc <<= uc2; 2840 uc <<= cnt; 2841 uc <<= 5; 2842 2843 ss <<= ss2; 2844 ss <<= us2; 2845 ss <<= cnt; 2846 ss <<= 5; 2847 us <<= ss2; 2848 us <<= us2; 2849 us <<= cnt; 2850 us <<= 5; 2851 2852 si <<= si2; 2853 si <<= ui2; 2854 si <<= cnt; 2855 si <<= 5; 2856 ui <<= si2; 2857 ui <<= ui2; 2858 ui <<= cnt; 2859 ui <<= 5; 2860 2861 sl <<= sl2; 2862 sl <<= ul2; 2863 sl <<= cnt; 2864 sl <<= 5; 2865 ul <<= sl2; 2866 ul <<= ul2; 2867 ul <<= cnt; 2868 ul <<= 5; 2869 2870 slll <<= slll2; 2871 slll <<= ulll2; 2872 slll <<= cnt; 2873 slll <<= 5; 2874 ulll <<= slll2; 2875 ulll <<= ulll2; 2876 ulll <<= cnt; 2877 ulll <<= 5; 2878 } 2879 2880 // CHECK-LABEL: define dso_local void @test_sr( 2881 // CHECK-SAME: ) #[[ATTR0]] { 2882 // CHECK-NEXT: [[ENTRY:.*:]] 2883 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2884 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2885 // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i8> [[TMP0]], [[TMP1]] 2886 // CHECK-NEXT: store volatile <16 x i8> [[SHR]], ptr @sc, align 8 2887 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2888 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2889 // CHECK-NEXT: [[SHR1:%.*]] = ashr <16 x i8> [[TMP2]], [[TMP3]] 2890 // CHECK-NEXT: store volatile <16 x i8> [[SHR1]], ptr @sc, align 8 2891 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2892 // CHECK-NEXT: [[TMP5:%.*]] = load volatile i32, ptr @cnt, align 4 2893 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[TMP5]], i64 0 2894 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer 2895 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8> 2896 // CHECK-NEXT: [[SHR2:%.*]] = ashr <16 x i8> [[TMP4]], [[SH_PROM]] 2897 // CHECK-NEXT: store volatile <16 x i8> [[SHR2]], ptr @sc, align 8 2898 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 2899 // CHECK-NEXT: [[SHR3:%.*]] = ashr <16 x i8> [[TMP6]], splat (i8 5) 2900 // CHECK-NEXT: store volatile <16 x i8> [[SHR3]], ptr @sc, align 8 2901 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2902 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 2903 // CHECK-NEXT: [[SHR4:%.*]] = lshr <16 x i8> [[TMP7]], [[TMP8]] 2904 // CHECK-NEXT: store volatile <16 x i8> [[SHR4]], ptr @uc, align 8 2905 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2906 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 2907 // CHECK-NEXT: [[SHR5:%.*]] = lshr <16 x i8> [[TMP9]], [[TMP10]] 2908 // CHECK-NEXT: store volatile <16 x i8> [[SHR5]], ptr @uc, align 8 2909 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2910 // CHECK-NEXT: [[TMP12:%.*]] = load volatile i32, ptr @cnt, align 4 2911 // CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> poison, i32 [[TMP12]], i64 0 2912 // CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> poison, <16 x i32> zeroinitializer 2913 // CHECK-NEXT: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8> 2914 // CHECK-NEXT: [[SHR9:%.*]] = lshr <16 x i8> [[TMP11]], [[SH_PROM8]] 2915 // CHECK-NEXT: store volatile <16 x i8> [[SHR9]], ptr @uc, align 8 2916 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 2917 // CHECK-NEXT: [[SHR10:%.*]] = lshr <16 x i8> [[TMP13]], splat (i8 5) 2918 // CHECK-NEXT: store volatile <16 x i8> [[SHR10]], ptr @uc, align 8 2919 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2920 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2921 // CHECK-NEXT: [[SHR11:%.*]] = ashr <8 x i16> [[TMP14]], [[TMP15]] 2922 // CHECK-NEXT: store volatile <8 x i16> [[SHR11]], ptr @ss, align 8 2923 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2924 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2925 // CHECK-NEXT: [[SHR12:%.*]] = ashr <8 x i16> [[TMP16]], [[TMP17]] 2926 // CHECK-NEXT: store volatile <8 x i16> [[SHR12]], ptr @ss, align 8 2927 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2928 // CHECK-NEXT: [[TMP19:%.*]] = load volatile i32, ptr @cnt, align 4 2929 // CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> poison, i32 [[TMP19]], i64 0 2930 // CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> poison, <8 x i32> zeroinitializer 2931 // CHECK-NEXT: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16> 2932 // CHECK-NEXT: [[SHR16:%.*]] = ashr <8 x i16> [[TMP18]], [[SH_PROM15]] 2933 // CHECK-NEXT: store volatile <8 x i16> [[SHR16]], ptr @ss, align 8 2934 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 2935 // CHECK-NEXT: [[SHR17:%.*]] = ashr <8 x i16> [[TMP20]], splat (i16 5) 2936 // CHECK-NEXT: store volatile <8 x i16> [[SHR17]], ptr @ss, align 8 2937 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2938 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 2939 // CHECK-NEXT: [[SHR18:%.*]] = lshr <8 x i16> [[TMP21]], [[TMP22]] 2940 // CHECK-NEXT: store volatile <8 x i16> [[SHR18]], ptr @us, align 8 2941 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2942 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 2943 // CHECK-NEXT: [[SHR19:%.*]] = lshr <8 x i16> [[TMP23]], [[TMP24]] 2944 // CHECK-NEXT: store volatile <8 x i16> [[SHR19]], ptr @us, align 8 2945 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2946 // CHECK-NEXT: [[TMP26:%.*]] = load volatile i32, ptr @cnt, align 4 2947 // CHECK-NEXT: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> poison, i32 [[TMP26]], i64 0 2948 // CHECK-NEXT: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> poison, <8 x i32> zeroinitializer 2949 // CHECK-NEXT: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16> 2950 // CHECK-NEXT: [[SHR23:%.*]] = lshr <8 x i16> [[TMP25]], [[SH_PROM22]] 2951 // CHECK-NEXT: store volatile <8 x i16> [[SHR23]], ptr @us, align 8 2952 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @us, align 8 2953 // CHECK-NEXT: [[SHR24:%.*]] = lshr <8 x i16> [[TMP27]], splat (i16 5) 2954 // CHECK-NEXT: store volatile <8 x i16> [[SHR24]], ptr @us, align 8 2955 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2956 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2957 // CHECK-NEXT: [[SHR25:%.*]] = ashr <4 x i32> [[TMP28]], [[TMP29]] 2958 // CHECK-NEXT: store volatile <4 x i32> [[SHR25]], ptr @si, align 8 2959 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2960 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2961 // CHECK-NEXT: [[SHR26:%.*]] = ashr <4 x i32> [[TMP30]], [[TMP31]] 2962 // CHECK-NEXT: store volatile <4 x i32> [[SHR26]], ptr @si, align 8 2963 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2964 // CHECK-NEXT: [[TMP33:%.*]] = load volatile i32, ptr @cnt, align 4 2965 // CHECK-NEXT: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> poison, i32 [[TMP33]], i64 0 2966 // CHECK-NEXT: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> poison, <4 x i32> zeroinitializer 2967 // CHECK-NEXT: [[SHR29:%.*]] = ashr <4 x i32> [[TMP32]], [[SPLAT_SPLAT28]] 2968 // CHECK-NEXT: store volatile <4 x i32> [[SHR29]], ptr @si, align 8 2969 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @si, align 8 2970 // CHECK-NEXT: [[SHR30:%.*]] = ashr <4 x i32> [[TMP34]], splat (i32 5) 2971 // CHECK-NEXT: store volatile <4 x i32> [[SHR30]], ptr @si, align 8 2972 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2973 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 2974 // CHECK-NEXT: [[SHR31:%.*]] = lshr <4 x i32> [[TMP35]], [[TMP36]] 2975 // CHECK-NEXT: store volatile <4 x i32> [[SHR31]], ptr @ui, align 8 2976 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2977 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 2978 // CHECK-NEXT: [[SHR32:%.*]] = lshr <4 x i32> [[TMP37]], [[TMP38]] 2979 // CHECK-NEXT: store volatile <4 x i32> [[SHR32]], ptr @ui, align 8 2980 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2981 // CHECK-NEXT: [[TMP40:%.*]] = load volatile i32, ptr @cnt, align 4 2982 // CHECK-NEXT: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> poison, i32 [[TMP40]], i64 0 2983 // CHECK-NEXT: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> poison, <4 x i32> zeroinitializer 2984 // CHECK-NEXT: [[SHR35:%.*]] = lshr <4 x i32> [[TMP39]], [[SPLAT_SPLAT34]] 2985 // CHECK-NEXT: store volatile <4 x i32> [[SHR35]], ptr @ui, align 8 2986 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 2987 // CHECK-NEXT: [[SHR36:%.*]] = lshr <4 x i32> [[TMP41]], splat (i32 5) 2988 // CHECK-NEXT: store volatile <4 x i32> [[SHR36]], ptr @ui, align 8 2989 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2990 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 2991 // CHECK-NEXT: [[SHR37:%.*]] = ashr <2 x i64> [[TMP42]], [[TMP43]] 2992 // CHECK-NEXT: store volatile <2 x i64> [[SHR37]], ptr @sl, align 8 2993 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2994 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 2995 // CHECK-NEXT: [[SHR38:%.*]] = ashr <2 x i64> [[TMP44]], [[TMP45]] 2996 // CHECK-NEXT: store volatile <2 x i64> [[SHR38]], ptr @sl, align 8 2997 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 2998 // CHECK-NEXT: [[TMP47:%.*]] = load volatile i32, ptr @cnt, align 4 2999 // CHECK-NEXT: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> poison, i32 [[TMP47]], i64 0 3000 // CHECK-NEXT: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> poison, <2 x i32> zeroinitializer 3001 // CHECK-NEXT: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64> 3002 // CHECK-NEXT: [[SHR42:%.*]] = ashr <2 x i64> [[TMP46]], [[SH_PROM41]] 3003 // CHECK-NEXT: store volatile <2 x i64> [[SHR42]], ptr @sl, align 8 3004 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3005 // CHECK-NEXT: [[SHR43:%.*]] = ashr <2 x i64> [[TMP48]], splat (i64 5) 3006 // CHECK-NEXT: store volatile <2 x i64> [[SHR43]], ptr @sl, align 8 3007 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3008 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3009 // CHECK-NEXT: [[SHR44:%.*]] = lshr <2 x i64> [[TMP49]], [[TMP50]] 3010 // CHECK-NEXT: store volatile <2 x i64> [[SHR44]], ptr @ul, align 8 3011 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3012 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3013 // CHECK-NEXT: [[SHR45:%.*]] = lshr <2 x i64> [[TMP51]], [[TMP52]] 3014 // CHECK-NEXT: store volatile <2 x i64> [[SHR45]], ptr @ul, align 8 3015 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3016 // CHECK-NEXT: [[TMP54:%.*]] = load volatile i32, ptr @cnt, align 4 3017 // CHECK-NEXT: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> poison, i32 [[TMP54]], i64 0 3018 // CHECK-NEXT: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> poison, <2 x i32> zeroinitializer 3019 // CHECK-NEXT: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64> 3020 // CHECK-NEXT: [[SHR49:%.*]] = lshr <2 x i64> [[TMP53]], [[SH_PROM48]] 3021 // CHECK-NEXT: store volatile <2 x i64> [[SHR49]], ptr @ul, align 8 3022 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3023 // CHECK-NEXT: [[SHR50:%.*]] = lshr <2 x i64> [[TMP55]], splat (i64 5) 3024 // CHECK-NEXT: store volatile <2 x i64> [[SHR50]], ptr @ul, align 8 3025 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3026 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3027 // CHECK-NEXT: [[SHR51:%.*]] = ashr <1 x i128> [[TMP56]], [[TMP57]] 3028 // CHECK-NEXT: store volatile <1 x i128> [[SHR51]], ptr @slll, align 8 3029 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3030 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3031 // CHECK-NEXT: [[SHR52:%.*]] = ashr <1 x i128> [[TMP58]], [[TMP59]] 3032 // CHECK-NEXT: store volatile <1 x i128> [[SHR52]], ptr @slll, align 8 3033 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3034 // CHECK-NEXT: [[TMP61:%.*]] = load volatile i32, ptr @cnt, align 4 3035 // CHECK-NEXT: [[SPLAT_SPLATINSERT53:%.*]] = insertelement <1 x i32> poison, i32 [[TMP61]], i64 0 3036 // CHECK-NEXT: [[SPLAT_SPLAT54:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT53]], <1 x i32> poison, <1 x i32> zeroinitializer 3037 // CHECK-NEXT: [[SH_PROM55:%.*]] = zext <1 x i32> [[SPLAT_SPLAT54]] to <1 x i128> 3038 // CHECK-NEXT: [[SHR56:%.*]] = ashr <1 x i128> [[TMP60]], [[SH_PROM55]] 3039 // CHECK-NEXT: store volatile <1 x i128> [[SHR56]], ptr @slll, align 8 3040 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3041 // CHECK-NEXT: [[SHR57:%.*]] = ashr <1 x i128> [[TMP62]], splat (i128 5) 3042 // CHECK-NEXT: store volatile <1 x i128> [[SHR57]], ptr @slll, align 8 3043 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3044 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3045 // CHECK-NEXT: [[SHR58:%.*]] = lshr <1 x i128> [[TMP63]], [[TMP64]] 3046 // CHECK-NEXT: store volatile <1 x i128> [[SHR58]], ptr @ulll, align 8 3047 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3048 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3049 // CHECK-NEXT: [[SHR59:%.*]] = lshr <1 x i128> [[TMP65]], [[TMP66]] 3050 // CHECK-NEXT: store volatile <1 x i128> [[SHR59]], ptr @ulll, align 8 3051 // CHECK-NEXT: [[TMP67:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3052 // CHECK-NEXT: [[TMP68:%.*]] = load volatile i32, ptr @cnt, align 4 3053 // CHECK-NEXT: [[SPLAT_SPLATINSERT60:%.*]] = insertelement <1 x i32> poison, i32 [[TMP68]], i64 0 3054 // CHECK-NEXT: [[SPLAT_SPLAT61:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT60]], <1 x i32> poison, <1 x i32> zeroinitializer 3055 // CHECK-NEXT: [[SH_PROM62:%.*]] = zext <1 x i32> [[SPLAT_SPLAT61]] to <1 x i128> 3056 // CHECK-NEXT: [[SHR63:%.*]] = lshr <1 x i128> [[TMP67]], [[SH_PROM62]] 3057 // CHECK-NEXT: store volatile <1 x i128> [[SHR63]], ptr @ulll, align 8 3058 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3059 // CHECK-NEXT: [[SHR64:%.*]] = lshr <1 x i128> [[TMP69]], splat (i128 5) 3060 // CHECK-NEXT: store volatile <1 x i128> [[SHR64]], ptr @ulll, align 8 3061 // CHECK-NEXT: ret void 3062 // 3063 void test_sr(void) { 3064 3065 sc = sc >> sc2; 3066 sc = sc >> uc2; 3067 sc = sc >> cnt; 3068 sc = sc >> 5; 3069 uc = uc >> sc2; 3070 uc = uc >> uc2; 3071 uc = uc >> cnt; 3072 uc = uc >> 5; 3073 3074 ss = ss >> ss2; 3075 ss = ss >> us2; 3076 ss = ss >> cnt; 3077 ss = ss >> 5; 3078 us = us >> ss2; 3079 us = us >> us2; 3080 us = us >> cnt; 3081 us = us >> 5; 3082 3083 si = si >> si2; 3084 si = si >> ui2; 3085 si = si >> cnt; 3086 si = si >> 5; 3087 ui = ui >> si2; 3088 ui = ui >> ui2; 3089 ui = ui >> cnt; 3090 ui = ui >> 5; 3091 3092 sl = sl >> sl2; 3093 sl = sl >> ul2; 3094 sl = sl >> cnt; 3095 sl = sl >> 5; 3096 ul = ul >> sl2; 3097 ul = ul >> ul2; 3098 ul = ul >> cnt; 3099 ul = ul >> 5; 3100 3101 slll = slll >> slll2; 3102 slll = slll >> ulll2; 3103 slll = slll >> cnt; 3104 slll = slll >> 5; 3105 ulll = ulll >> slll2; 3106 ulll = ulll >> ulll2; 3107 ulll = ulll >> cnt; 3108 ulll = ulll >> 5; 3109 } 3110 3111 // CHECK-LABEL: define dso_local void @test_sr_assign( 3112 // CHECK-SAME: ) #[[ATTR0]] { 3113 // CHECK-NEXT: [[ENTRY:.*:]] 3114 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 3115 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3116 // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i8> [[TMP1]], [[TMP0]] 3117 // CHECK-NEXT: store volatile <16 x i8> [[SHR]], ptr @sc, align 8 3118 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 3119 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3120 // CHECK-NEXT: [[SHR1:%.*]] = ashr <16 x i8> [[TMP3]], [[TMP2]] 3121 // CHECK-NEXT: store volatile <16 x i8> [[SHR1]], ptr @sc, align 8 3122 // CHECK-NEXT: [[TMP4:%.*]] = load volatile i32, ptr @cnt, align 4 3123 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[TMP4]], i64 0 3124 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer 3125 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3126 // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i8> 3127 // CHECK-NEXT: [[SHR2:%.*]] = ashr <16 x i8> [[TMP5]], [[SH_PROM]] 3128 // CHECK-NEXT: store volatile <16 x i8> [[SHR2]], ptr @sc, align 8 3129 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3130 // CHECK-NEXT: [[SHR3:%.*]] = ashr <16 x i8> [[TMP6]], splat (i8 5) 3131 // CHECK-NEXT: store volatile <16 x i8> [[SHR3]], ptr @sc, align 8 3132 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 3133 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3134 // CHECK-NEXT: [[SHR4:%.*]] = lshr <16 x i8> [[TMP8]], [[TMP7]] 3135 // CHECK-NEXT: store volatile <16 x i8> [[SHR4]], ptr @uc, align 8 3136 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 3137 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3138 // CHECK-NEXT: [[SHR5:%.*]] = lshr <16 x i8> [[TMP10]], [[TMP9]] 3139 // CHECK-NEXT: store volatile <16 x i8> [[SHR5]], ptr @uc, align 8 3140 // CHECK-NEXT: [[TMP11:%.*]] = load volatile i32, ptr @cnt, align 4 3141 // CHECK-NEXT: [[SPLAT_SPLATINSERT6:%.*]] = insertelement <16 x i32> poison, i32 [[TMP11]], i64 0 3142 // CHECK-NEXT: [[SPLAT_SPLAT7:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT6]], <16 x i32> poison, <16 x i32> zeroinitializer 3143 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3144 // CHECK-NEXT: [[SH_PROM8:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT7]] to <16 x i8> 3145 // CHECK-NEXT: [[SHR9:%.*]] = lshr <16 x i8> [[TMP12]], [[SH_PROM8]] 3146 // CHECK-NEXT: store volatile <16 x i8> [[SHR9]], ptr @uc, align 8 3147 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3148 // CHECK-NEXT: [[SHR10:%.*]] = lshr <16 x i8> [[TMP13]], splat (i8 5) 3149 // CHECK-NEXT: store volatile <16 x i8> [[SHR10]], ptr @uc, align 8 3150 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 3151 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3152 // CHECK-NEXT: [[SHR11:%.*]] = ashr <8 x i16> [[TMP15]], [[TMP14]] 3153 // CHECK-NEXT: store volatile <8 x i16> [[SHR11]], ptr @ss, align 8 3154 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 3155 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3156 // CHECK-NEXT: [[SHR12:%.*]] = ashr <8 x i16> [[TMP17]], [[TMP16]] 3157 // CHECK-NEXT: store volatile <8 x i16> [[SHR12]], ptr @ss, align 8 3158 // CHECK-NEXT: [[TMP18:%.*]] = load volatile i32, ptr @cnt, align 4 3159 // CHECK-NEXT: [[SPLAT_SPLATINSERT13:%.*]] = insertelement <8 x i32> poison, i32 [[TMP18]], i64 0 3160 // CHECK-NEXT: [[SPLAT_SPLAT14:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT13]], <8 x i32> poison, <8 x i32> zeroinitializer 3161 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3162 // CHECK-NEXT: [[SH_PROM15:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT14]] to <8 x i16> 3163 // CHECK-NEXT: [[SHR16:%.*]] = ashr <8 x i16> [[TMP19]], [[SH_PROM15]] 3164 // CHECK-NEXT: store volatile <8 x i16> [[SHR16]], ptr @ss, align 8 3165 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3166 // CHECK-NEXT: [[SHR17:%.*]] = ashr <8 x i16> [[TMP20]], splat (i16 5) 3167 // CHECK-NEXT: store volatile <8 x i16> [[SHR17]], ptr @ss, align 8 3168 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 3169 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3170 // CHECK-NEXT: [[SHR18:%.*]] = lshr <8 x i16> [[TMP22]], [[TMP21]] 3171 // CHECK-NEXT: store volatile <8 x i16> [[SHR18]], ptr @us, align 8 3172 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 3173 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3174 // CHECK-NEXT: [[SHR19:%.*]] = lshr <8 x i16> [[TMP24]], [[TMP23]] 3175 // CHECK-NEXT: store volatile <8 x i16> [[SHR19]], ptr @us, align 8 3176 // CHECK-NEXT: [[TMP25:%.*]] = load volatile i32, ptr @cnt, align 4 3177 // CHECK-NEXT: [[SPLAT_SPLATINSERT20:%.*]] = insertelement <8 x i32> poison, i32 [[TMP25]], i64 0 3178 // CHECK-NEXT: [[SPLAT_SPLAT21:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT20]], <8 x i32> poison, <8 x i32> zeroinitializer 3179 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3180 // CHECK-NEXT: [[SH_PROM22:%.*]] = trunc <8 x i32> [[SPLAT_SPLAT21]] to <8 x i16> 3181 // CHECK-NEXT: [[SHR23:%.*]] = lshr <8 x i16> [[TMP26]], [[SH_PROM22]] 3182 // CHECK-NEXT: store volatile <8 x i16> [[SHR23]], ptr @us, align 8 3183 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3184 // CHECK-NEXT: [[SHR24:%.*]] = lshr <8 x i16> [[TMP27]], splat (i16 5) 3185 // CHECK-NEXT: store volatile <8 x i16> [[SHR24]], ptr @us, align 8 3186 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 3187 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3188 // CHECK-NEXT: [[SHR25:%.*]] = ashr <4 x i32> [[TMP29]], [[TMP28]] 3189 // CHECK-NEXT: store volatile <4 x i32> [[SHR25]], ptr @si, align 8 3190 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 3191 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3192 // CHECK-NEXT: [[SHR26:%.*]] = ashr <4 x i32> [[TMP31]], [[TMP30]] 3193 // CHECK-NEXT: store volatile <4 x i32> [[SHR26]], ptr @si, align 8 3194 // CHECK-NEXT: [[TMP32:%.*]] = load volatile i32, ptr @cnt, align 4 3195 // CHECK-NEXT: [[SPLAT_SPLATINSERT27:%.*]] = insertelement <4 x i32> poison, i32 [[TMP32]], i64 0 3196 // CHECK-NEXT: [[SPLAT_SPLAT28:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT27]], <4 x i32> poison, <4 x i32> zeroinitializer 3197 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3198 // CHECK-NEXT: [[SHR29:%.*]] = ashr <4 x i32> [[TMP33]], [[SPLAT_SPLAT28]] 3199 // CHECK-NEXT: store volatile <4 x i32> [[SHR29]], ptr @si, align 8 3200 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3201 // CHECK-NEXT: [[SHR30:%.*]] = ashr <4 x i32> [[TMP34]], splat (i32 5) 3202 // CHECK-NEXT: store volatile <4 x i32> [[SHR30]], ptr @si, align 8 3203 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 3204 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3205 // CHECK-NEXT: [[SHR31:%.*]] = lshr <4 x i32> [[TMP36]], [[TMP35]] 3206 // CHECK-NEXT: store volatile <4 x i32> [[SHR31]], ptr @ui, align 8 3207 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 3208 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3209 // CHECK-NEXT: [[SHR32:%.*]] = lshr <4 x i32> [[TMP38]], [[TMP37]] 3210 // CHECK-NEXT: store volatile <4 x i32> [[SHR32]], ptr @ui, align 8 3211 // CHECK-NEXT: [[TMP39:%.*]] = load volatile i32, ptr @cnt, align 4 3212 // CHECK-NEXT: [[SPLAT_SPLATINSERT33:%.*]] = insertelement <4 x i32> poison, i32 [[TMP39]], i64 0 3213 // CHECK-NEXT: [[SPLAT_SPLAT34:%.*]] = shufflevector <4 x i32> [[SPLAT_SPLATINSERT33]], <4 x i32> poison, <4 x i32> zeroinitializer 3214 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3215 // CHECK-NEXT: [[SHR35:%.*]] = lshr <4 x i32> [[TMP40]], [[SPLAT_SPLAT34]] 3216 // CHECK-NEXT: store volatile <4 x i32> [[SHR35]], ptr @ui, align 8 3217 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3218 // CHECK-NEXT: [[SHR36:%.*]] = lshr <4 x i32> [[TMP41]], splat (i32 5) 3219 // CHECK-NEXT: store volatile <4 x i32> [[SHR36]], ptr @ui, align 8 3220 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3221 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3222 // CHECK-NEXT: [[SHR37:%.*]] = ashr <2 x i64> [[TMP43]], [[TMP42]] 3223 // CHECK-NEXT: store volatile <2 x i64> [[SHR37]], ptr @sl, align 8 3224 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3225 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3226 // CHECK-NEXT: [[SHR38:%.*]] = ashr <2 x i64> [[TMP45]], [[TMP44]] 3227 // CHECK-NEXT: store volatile <2 x i64> [[SHR38]], ptr @sl, align 8 3228 // CHECK-NEXT: [[TMP46:%.*]] = load volatile i32, ptr @cnt, align 4 3229 // CHECK-NEXT: [[SPLAT_SPLATINSERT39:%.*]] = insertelement <2 x i32> poison, i32 [[TMP46]], i64 0 3230 // CHECK-NEXT: [[SPLAT_SPLAT40:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT39]], <2 x i32> poison, <2 x i32> zeroinitializer 3231 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3232 // CHECK-NEXT: [[SH_PROM41:%.*]] = zext <2 x i32> [[SPLAT_SPLAT40]] to <2 x i64> 3233 // CHECK-NEXT: [[SHR42:%.*]] = ashr <2 x i64> [[TMP47]], [[SH_PROM41]] 3234 // CHECK-NEXT: store volatile <2 x i64> [[SHR42]], ptr @sl, align 8 3235 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3236 // CHECK-NEXT: [[SHR43:%.*]] = ashr <2 x i64> [[TMP48]], splat (i64 5) 3237 // CHECK-NEXT: store volatile <2 x i64> [[SHR43]], ptr @sl, align 8 3238 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3239 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3240 // CHECK-NEXT: [[SHR44:%.*]] = lshr <2 x i64> [[TMP50]], [[TMP49]] 3241 // CHECK-NEXT: store volatile <2 x i64> [[SHR44]], ptr @ul, align 8 3242 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3243 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3244 // CHECK-NEXT: [[SHR45:%.*]] = lshr <2 x i64> [[TMP52]], [[TMP51]] 3245 // CHECK-NEXT: store volatile <2 x i64> [[SHR45]], ptr @ul, align 8 3246 // CHECK-NEXT: [[TMP53:%.*]] = load volatile i32, ptr @cnt, align 4 3247 // CHECK-NEXT: [[SPLAT_SPLATINSERT46:%.*]] = insertelement <2 x i32> poison, i32 [[TMP53]], i64 0 3248 // CHECK-NEXT: [[SPLAT_SPLAT47:%.*]] = shufflevector <2 x i32> [[SPLAT_SPLATINSERT46]], <2 x i32> poison, <2 x i32> zeroinitializer 3249 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3250 // CHECK-NEXT: [[SH_PROM48:%.*]] = zext <2 x i32> [[SPLAT_SPLAT47]] to <2 x i64> 3251 // CHECK-NEXT: [[SHR49:%.*]] = lshr <2 x i64> [[TMP54]], [[SH_PROM48]] 3252 // CHECK-NEXT: store volatile <2 x i64> [[SHR49]], ptr @ul, align 8 3253 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3254 // CHECK-NEXT: [[SHR50:%.*]] = lshr <2 x i64> [[TMP55]], splat (i64 5) 3255 // CHECK-NEXT: store volatile <2 x i64> [[SHR50]], ptr @ul, align 8 3256 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3257 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3258 // CHECK-NEXT: [[SHR51:%.*]] = ashr <1 x i128> [[TMP57]], [[TMP56]] 3259 // CHECK-NEXT: store volatile <1 x i128> [[SHR51]], ptr @slll, align 8 3260 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3261 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3262 // CHECK-NEXT: [[SHR52:%.*]] = ashr <1 x i128> [[TMP59]], [[TMP58]] 3263 // CHECK-NEXT: store volatile <1 x i128> [[SHR52]], ptr @slll, align 8 3264 // CHECK-NEXT: [[TMP60:%.*]] = load volatile i32, ptr @cnt, align 4 3265 // CHECK-NEXT: [[SPLAT_SPLATINSERT53:%.*]] = insertelement <1 x i32> poison, i32 [[TMP60]], i64 0 3266 // CHECK-NEXT: [[SPLAT_SPLAT54:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT53]], <1 x i32> poison, <1 x i32> zeroinitializer 3267 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3268 // CHECK-NEXT: [[SH_PROM55:%.*]] = zext <1 x i32> [[SPLAT_SPLAT54]] to <1 x i128> 3269 // CHECK-NEXT: [[SHR56:%.*]] = ashr <1 x i128> [[TMP61]], [[SH_PROM55]] 3270 // CHECK-NEXT: store volatile <1 x i128> [[SHR56]], ptr @slll, align 8 3271 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3272 // CHECK-NEXT: [[SHR57:%.*]] = ashr <1 x i128> [[TMP62]], splat (i128 5) 3273 // CHECK-NEXT: store volatile <1 x i128> [[SHR57]], ptr @slll, align 8 3274 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3275 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3276 // CHECK-NEXT: [[SHR58:%.*]] = lshr <1 x i128> [[TMP64]], [[TMP63]] 3277 // CHECK-NEXT: store volatile <1 x i128> [[SHR58]], ptr @ulll, align 8 3278 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3279 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3280 // CHECK-NEXT: [[SHR59:%.*]] = lshr <1 x i128> [[TMP66]], [[TMP65]] 3281 // CHECK-NEXT: store volatile <1 x i128> [[SHR59]], ptr @ulll, align 8 3282 // CHECK-NEXT: [[TMP67:%.*]] = load volatile i32, ptr @cnt, align 4 3283 // CHECK-NEXT: [[SPLAT_SPLATINSERT60:%.*]] = insertelement <1 x i32> poison, i32 [[TMP67]], i64 0 3284 // CHECK-NEXT: [[SPLAT_SPLAT61:%.*]] = shufflevector <1 x i32> [[SPLAT_SPLATINSERT60]], <1 x i32> poison, <1 x i32> zeroinitializer 3285 // CHECK-NEXT: [[TMP68:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3286 // CHECK-NEXT: [[SH_PROM62:%.*]] = zext <1 x i32> [[SPLAT_SPLAT61]] to <1 x i128> 3287 // CHECK-NEXT: [[SHR63:%.*]] = lshr <1 x i128> [[TMP68]], [[SH_PROM62]] 3288 // CHECK-NEXT: store volatile <1 x i128> [[SHR63]], ptr @ulll, align 8 3289 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3290 // CHECK-NEXT: [[SHR64:%.*]] = lshr <1 x i128> [[TMP69]], splat (i128 5) 3291 // CHECK-NEXT: store volatile <1 x i128> [[SHR64]], ptr @ulll, align 8 3292 // CHECK-NEXT: ret void 3293 // 3294 void test_sr_assign(void) { 3295 3296 sc >>= sc2; 3297 sc >>= uc2; 3298 sc >>= cnt; 3299 sc >>= 5; 3300 uc >>= sc2; 3301 uc >>= uc2; 3302 uc >>= cnt; 3303 uc >>= 5; 3304 3305 ss >>= ss2; 3306 ss >>= us2; 3307 ss >>= cnt; 3308 ss >>= 5; 3309 us >>= ss2; 3310 us >>= us2; 3311 us >>= cnt; 3312 us >>= 5; 3313 3314 si >>= si2; 3315 si >>= ui2; 3316 si >>= cnt; 3317 si >>= 5; 3318 ui >>= si2; 3319 ui >>= ui2; 3320 ui >>= cnt; 3321 ui >>= 5; 3322 3323 sl >>= sl2; 3324 sl >>= ul2; 3325 sl >>= cnt; 3326 sl >>= 5; 3327 ul >>= sl2; 3328 ul >>= ul2; 3329 ul >>= cnt; 3330 ul >>= 5; 3331 3332 slll >>= slll2; 3333 slll >>= ulll2; 3334 slll >>= cnt; 3335 slll >>= 5; 3336 ulll >>= slll2; 3337 ulll >>= ulll2; 3338 ulll >>= cnt; 3339 ulll >>= 5; 3340 } 3341 3342 3343 // CHECK-LABEL: define dso_local void @test_cmpeq( 3344 // CHECK-SAME: ) #[[ATTR0]] { 3345 // CHECK-NEXT: [[ENTRY:.*:]] 3346 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3347 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 3348 // CHECK-NEXT: [[CMP:%.*]] = icmp eq <16 x i8> [[TMP0]], [[TMP1]] 3349 // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> 3350 // CHECK-NEXT: store volatile <16 x i8> [[SEXT]], ptr @bc, align 8 3351 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3352 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 3353 // CHECK-NEXT: [[CMP1:%.*]] = icmp eq <16 x i8> [[TMP2]], [[TMP3]] 3354 // CHECK-NEXT: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> 3355 // CHECK-NEXT: store volatile <16 x i8> [[SEXT2]], ptr @bc, align 8 3356 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 3357 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 3358 // CHECK-NEXT: [[CMP3:%.*]] = icmp eq <16 x i8> [[TMP4]], [[TMP5]] 3359 // CHECK-NEXT: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> 3360 // CHECK-NEXT: store volatile <16 x i8> [[SEXT4]], ptr @bc, align 8 3361 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3362 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 3363 // CHECK-NEXT: [[CMP5:%.*]] = icmp eq <16 x i8> [[TMP6]], [[TMP7]] 3364 // CHECK-NEXT: [[SEXT6:%.*]] = sext <16 x i1> [[CMP5]] to <16 x i8> 3365 // CHECK-NEXT: store volatile <16 x i8> [[SEXT6]], ptr @bc, align 8 3366 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3367 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 3368 // CHECK-NEXT: [[CMP7:%.*]] = icmp eq <16 x i8> [[TMP8]], [[TMP9]] 3369 // CHECK-NEXT: [[SEXT8:%.*]] = sext <16 x i1> [[CMP7]] to <16 x i8> 3370 // CHECK-NEXT: store volatile <16 x i8> [[SEXT8]], ptr @bc, align 8 3371 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 3372 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 3373 // CHECK-NEXT: [[CMP9:%.*]] = icmp eq <16 x i8> [[TMP10]], [[TMP11]] 3374 // CHECK-NEXT: [[SEXT10:%.*]] = sext <16 x i1> [[CMP9]] to <16 x i8> 3375 // CHECK-NEXT: store volatile <16 x i8> [[SEXT10]], ptr @bc, align 8 3376 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 3377 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 3378 // CHECK-NEXT: [[CMP11:%.*]] = icmp eq <16 x i8> [[TMP12]], [[TMP13]] 3379 // CHECK-NEXT: [[SEXT12:%.*]] = sext <16 x i1> [[CMP11]] to <16 x i8> 3380 // CHECK-NEXT: store volatile <16 x i8> [[SEXT12]], ptr @bc, align 8 3381 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3382 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 3383 // CHECK-NEXT: [[CMP13:%.*]] = icmp eq <8 x i16> [[TMP14]], [[TMP15]] 3384 // CHECK-NEXT: [[SEXT14:%.*]] = sext <8 x i1> [[CMP13]] to <8 x i16> 3385 // CHECK-NEXT: store volatile <8 x i16> [[SEXT14]], ptr @bs, align 8 3386 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3387 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 3388 // CHECK-NEXT: [[CMP15:%.*]] = icmp eq <8 x i16> [[TMP16]], [[TMP17]] 3389 // CHECK-NEXT: [[SEXT16:%.*]] = sext <8 x i1> [[CMP15]] to <8 x i16> 3390 // CHECK-NEXT: store volatile <8 x i16> [[SEXT16]], ptr @bs, align 8 3391 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 3392 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 3393 // CHECK-NEXT: [[CMP17:%.*]] = icmp eq <8 x i16> [[TMP18]], [[TMP19]] 3394 // CHECK-NEXT: [[SEXT18:%.*]] = sext <8 x i1> [[CMP17]] to <8 x i16> 3395 // CHECK-NEXT: store volatile <8 x i16> [[SEXT18]], ptr @bs, align 8 3396 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3397 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 3398 // CHECK-NEXT: [[CMP19:%.*]] = icmp eq <8 x i16> [[TMP20]], [[TMP21]] 3399 // CHECK-NEXT: [[SEXT20:%.*]] = sext <8 x i1> [[CMP19]] to <8 x i16> 3400 // CHECK-NEXT: store volatile <8 x i16> [[SEXT20]], ptr @bs, align 8 3401 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3402 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 3403 // CHECK-NEXT: [[CMP21:%.*]] = icmp eq <8 x i16> [[TMP22]], [[TMP23]] 3404 // CHECK-NEXT: [[SEXT22:%.*]] = sext <8 x i1> [[CMP21]] to <8 x i16> 3405 // CHECK-NEXT: store volatile <8 x i16> [[SEXT22]], ptr @bs, align 8 3406 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 3407 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 3408 // CHECK-NEXT: [[CMP23:%.*]] = icmp eq <8 x i16> [[TMP24]], [[TMP25]] 3409 // CHECK-NEXT: [[SEXT24:%.*]] = sext <8 x i1> [[CMP23]] to <8 x i16> 3410 // CHECK-NEXT: store volatile <8 x i16> [[SEXT24]], ptr @bs, align 8 3411 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 3412 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 3413 // CHECK-NEXT: [[CMP25:%.*]] = icmp eq <8 x i16> [[TMP26]], [[TMP27]] 3414 // CHECK-NEXT: [[SEXT26:%.*]] = sext <8 x i1> [[CMP25]] to <8 x i16> 3415 // CHECK-NEXT: store volatile <8 x i16> [[SEXT26]], ptr @bs, align 8 3416 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3417 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 3418 // CHECK-NEXT: [[CMP27:%.*]] = icmp eq <4 x i32> [[TMP28]], [[TMP29]] 3419 // CHECK-NEXT: [[SEXT28:%.*]] = sext <4 x i1> [[CMP27]] to <4 x i32> 3420 // CHECK-NEXT: store volatile <4 x i32> [[SEXT28]], ptr @bi, align 8 3421 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3422 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 3423 // CHECK-NEXT: [[CMP29:%.*]] = icmp eq <4 x i32> [[TMP30]], [[TMP31]] 3424 // CHECK-NEXT: [[SEXT30:%.*]] = sext <4 x i1> [[CMP29]] to <4 x i32> 3425 // CHECK-NEXT: store volatile <4 x i32> [[SEXT30]], ptr @bi, align 8 3426 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 3427 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 3428 // CHECK-NEXT: [[CMP31:%.*]] = icmp eq <4 x i32> [[TMP32]], [[TMP33]] 3429 // CHECK-NEXT: [[SEXT32:%.*]] = sext <4 x i1> [[CMP31]] to <4 x i32> 3430 // CHECK-NEXT: store volatile <4 x i32> [[SEXT32]], ptr @bi, align 8 3431 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3432 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 3433 // CHECK-NEXT: [[CMP33:%.*]] = icmp eq <4 x i32> [[TMP34]], [[TMP35]] 3434 // CHECK-NEXT: [[SEXT34:%.*]] = sext <4 x i1> [[CMP33]] to <4 x i32> 3435 // CHECK-NEXT: store volatile <4 x i32> [[SEXT34]], ptr @bi, align 8 3436 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3437 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 3438 // CHECK-NEXT: [[CMP35:%.*]] = icmp eq <4 x i32> [[TMP36]], [[TMP37]] 3439 // CHECK-NEXT: [[SEXT36:%.*]] = sext <4 x i1> [[CMP35]] to <4 x i32> 3440 // CHECK-NEXT: store volatile <4 x i32> [[SEXT36]], ptr @bi, align 8 3441 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 3442 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 3443 // CHECK-NEXT: [[CMP37:%.*]] = icmp eq <4 x i32> [[TMP38]], [[TMP39]] 3444 // CHECK-NEXT: [[SEXT38:%.*]] = sext <4 x i1> [[CMP37]] to <4 x i32> 3445 // CHECK-NEXT: store volatile <4 x i32> [[SEXT38]], ptr @bi, align 8 3446 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 3447 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 3448 // CHECK-NEXT: [[CMP39:%.*]] = icmp eq <4 x i32> [[TMP40]], [[TMP41]] 3449 // CHECK-NEXT: [[SEXT40:%.*]] = sext <4 x i1> [[CMP39]] to <4 x i32> 3450 // CHECK-NEXT: store volatile <4 x i32> [[SEXT40]], ptr @bi, align 8 3451 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3452 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3453 // CHECK-NEXT: [[CMP41:%.*]] = icmp eq <2 x i64> [[TMP42]], [[TMP43]] 3454 // CHECK-NEXT: [[SEXT42:%.*]] = sext <2 x i1> [[CMP41]] to <2 x i64> 3455 // CHECK-NEXT: store volatile <2 x i64> [[SEXT42]], ptr @bl, align 8 3456 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3457 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 3458 // CHECK-NEXT: [[CMP43:%.*]] = icmp eq <2 x i64> [[TMP44]], [[TMP45]] 3459 // CHECK-NEXT: [[SEXT44:%.*]] = sext <2 x i1> [[CMP43]] to <2 x i64> 3460 // CHECK-NEXT: store volatile <2 x i64> [[SEXT44]], ptr @bl, align 8 3461 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 3462 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3463 // CHECK-NEXT: [[CMP45:%.*]] = icmp eq <2 x i64> [[TMP46]], [[TMP47]] 3464 // CHECK-NEXT: [[SEXT46:%.*]] = sext <2 x i1> [[CMP45]] to <2 x i64> 3465 // CHECK-NEXT: store volatile <2 x i64> [[SEXT46]], ptr @bl, align 8 3466 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3467 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3468 // CHECK-NEXT: [[CMP47:%.*]] = icmp eq <2 x i64> [[TMP48]], [[TMP49]] 3469 // CHECK-NEXT: [[SEXT48:%.*]] = sext <2 x i1> [[CMP47]] to <2 x i64> 3470 // CHECK-NEXT: store volatile <2 x i64> [[SEXT48]], ptr @bl, align 8 3471 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3472 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 3473 // CHECK-NEXT: [[CMP49:%.*]] = icmp eq <2 x i64> [[TMP50]], [[TMP51]] 3474 // CHECK-NEXT: [[SEXT50:%.*]] = sext <2 x i1> [[CMP49]] to <2 x i64> 3475 // CHECK-NEXT: store volatile <2 x i64> [[SEXT50]], ptr @bl, align 8 3476 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 3477 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3478 // CHECK-NEXT: [[CMP51:%.*]] = icmp eq <2 x i64> [[TMP52]], [[TMP53]] 3479 // CHECK-NEXT: [[SEXT52:%.*]] = sext <2 x i1> [[CMP51]] to <2 x i64> 3480 // CHECK-NEXT: store volatile <2 x i64> [[SEXT52]], ptr @bl, align 8 3481 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 3482 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 3483 // CHECK-NEXT: [[CMP53:%.*]] = icmp eq <2 x i64> [[TMP54]], [[TMP55]] 3484 // CHECK-NEXT: [[SEXT54:%.*]] = sext <2 x i1> [[CMP53]] to <2 x i64> 3485 // CHECK-NEXT: store volatile <2 x i64> [[SEXT54]], ptr @bl, align 8 3486 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3487 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3488 // CHECK-NEXT: [[CMP55:%.*]] = icmp eq <1 x i128> [[TMP56]], [[TMP57]] 3489 // CHECK-NEXT: [[SEXT56:%.*]] = sext <1 x i1> [[CMP55]] to <1 x i128> 3490 // CHECK-NEXT: store volatile <1 x i128> [[SEXT56]], ptr @blll, align 8 3491 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3492 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 3493 // CHECK-NEXT: [[CMP57:%.*]] = icmp eq <1 x i128> [[TMP58]], [[TMP59]] 3494 // CHECK-NEXT: [[SEXT58:%.*]] = sext <1 x i1> [[CMP57]] to <1 x i128> 3495 // CHECK-NEXT: store volatile <1 x i128> [[SEXT58]], ptr @blll, align 8 3496 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 3497 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3498 // CHECK-NEXT: [[CMP59:%.*]] = icmp eq <1 x i128> [[TMP60]], [[TMP61]] 3499 // CHECK-NEXT: [[SEXT60:%.*]] = sext <1 x i1> [[CMP59]] to <1 x i128> 3500 // CHECK-NEXT: store volatile <1 x i128> [[SEXT60]], ptr @blll, align 8 3501 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3502 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3503 // CHECK-NEXT: [[CMP61:%.*]] = icmp eq <1 x i128> [[TMP62]], [[TMP63]] 3504 // CHECK-NEXT: [[SEXT62:%.*]] = sext <1 x i1> [[CMP61]] to <1 x i128> 3505 // CHECK-NEXT: store volatile <1 x i128> [[SEXT62]], ptr @blll, align 8 3506 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3507 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 3508 // CHECK-NEXT: [[CMP63:%.*]] = icmp eq <1 x i128> [[TMP64]], [[TMP65]] 3509 // CHECK-NEXT: [[SEXT64:%.*]] = sext <1 x i1> [[CMP63]] to <1 x i128> 3510 // CHECK-NEXT: store volatile <1 x i128> [[SEXT64]], ptr @blll, align 8 3511 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 3512 // CHECK-NEXT: [[TMP67:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3513 // CHECK-NEXT: [[CMP65:%.*]] = icmp eq <1 x i128> [[TMP66]], [[TMP67]] 3514 // CHECK-NEXT: [[SEXT66:%.*]] = sext <1 x i1> [[CMP65]] to <1 x i128> 3515 // CHECK-NEXT: store volatile <1 x i128> [[SEXT66]], ptr @blll, align 8 3516 // CHECK-NEXT: [[TMP68:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 3517 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 3518 // CHECK-NEXT: [[CMP67:%.*]] = icmp eq <1 x i128> [[TMP68]], [[TMP69]] 3519 // CHECK-NEXT: [[SEXT68:%.*]] = sext <1 x i1> [[CMP67]] to <1 x i128> 3520 // CHECK-NEXT: store volatile <1 x i128> [[SEXT68]], ptr @blll, align 8 3521 // CHECK-NEXT: [[TMP70:%.*]] = load volatile <2 x double>, ptr @fd, align 8 3522 // CHECK-NEXT: [[TMP71:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 3523 // CHECK-NEXT: [[CMP69:%.*]] = fcmp oeq <2 x double> [[TMP70]], [[TMP71]] 3524 // CHECK-NEXT: [[SEXT70:%.*]] = sext <2 x i1> [[CMP69]] to <2 x i64> 3525 // CHECK-NEXT: store volatile <2 x i64> [[SEXT70]], ptr @bl, align 8 3526 // CHECK-NEXT: ret void 3527 // 3528 void test_cmpeq(void) { 3529 3530 bc = sc == sc2; 3531 bc = sc == bc2; 3532 bc = bc == sc2; 3533 bc = uc == uc2; 3534 bc = uc == bc2; 3535 bc = bc == uc2; 3536 bc = bc == bc2; 3537 3538 bs = ss == ss2; 3539 bs = ss == bs2; 3540 bs = bs == ss2; 3541 bs = us == us2; 3542 bs = us == bs2; 3543 bs = bs == us2; 3544 bs = bs == bs2; 3545 3546 bi = si == si2; 3547 bi = si == bi2; 3548 bi = bi == si2; 3549 bi = ui == ui2; 3550 bi = ui == bi2; 3551 bi = bi == ui2; 3552 bi = bi == bi2; 3553 3554 bl = sl == sl2; 3555 bl = sl == bl2; 3556 bl = bl == sl2; 3557 bl = ul == ul2; 3558 bl = ul == bl2; 3559 bl = bl == ul2; 3560 bl = bl == bl2; 3561 3562 blll = slll == slll2; 3563 blll = slll == blll2; 3564 blll = blll == slll2; 3565 blll = ulll == ulll2; 3566 blll = ulll == blll2; 3567 blll = blll == ulll2; 3568 blll = blll == blll2; 3569 3570 bl = fd == fd2; 3571 } 3572 3573 // CHECK-LABEL: define dso_local void @test_cmpne( 3574 // CHECK-SAME: ) #[[ATTR0]] { 3575 // CHECK-NEXT: [[ENTRY:.*:]] 3576 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3577 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 3578 // CHECK-NEXT: [[CMP:%.*]] = icmp ne <16 x i8> [[TMP0]], [[TMP1]] 3579 // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> 3580 // CHECK-NEXT: store volatile <16 x i8> [[SEXT]], ptr @bc, align 8 3581 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3582 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 3583 // CHECK-NEXT: [[CMP1:%.*]] = icmp ne <16 x i8> [[TMP2]], [[TMP3]] 3584 // CHECK-NEXT: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> 3585 // CHECK-NEXT: store volatile <16 x i8> [[SEXT2]], ptr @bc, align 8 3586 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 3587 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 3588 // CHECK-NEXT: [[CMP3:%.*]] = icmp ne <16 x i8> [[TMP4]], [[TMP5]] 3589 // CHECK-NEXT: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> 3590 // CHECK-NEXT: store volatile <16 x i8> [[SEXT4]], ptr @bc, align 8 3591 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3592 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 3593 // CHECK-NEXT: [[CMP5:%.*]] = icmp ne <16 x i8> [[TMP6]], [[TMP7]] 3594 // CHECK-NEXT: [[SEXT6:%.*]] = sext <16 x i1> [[CMP5]] to <16 x i8> 3595 // CHECK-NEXT: store volatile <16 x i8> [[SEXT6]], ptr @bc, align 8 3596 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3597 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 3598 // CHECK-NEXT: [[CMP7:%.*]] = icmp ne <16 x i8> [[TMP8]], [[TMP9]] 3599 // CHECK-NEXT: [[SEXT8:%.*]] = sext <16 x i1> [[CMP7]] to <16 x i8> 3600 // CHECK-NEXT: store volatile <16 x i8> [[SEXT8]], ptr @bc, align 8 3601 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 3602 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 3603 // CHECK-NEXT: [[CMP9:%.*]] = icmp ne <16 x i8> [[TMP10]], [[TMP11]] 3604 // CHECK-NEXT: [[SEXT10:%.*]] = sext <16 x i1> [[CMP9]] to <16 x i8> 3605 // CHECK-NEXT: store volatile <16 x i8> [[SEXT10]], ptr @bc, align 8 3606 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 3607 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 3608 // CHECK-NEXT: [[CMP11:%.*]] = icmp ne <16 x i8> [[TMP12]], [[TMP13]] 3609 // CHECK-NEXT: [[SEXT12:%.*]] = sext <16 x i1> [[CMP11]] to <16 x i8> 3610 // CHECK-NEXT: store volatile <16 x i8> [[SEXT12]], ptr @bc, align 8 3611 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3612 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 3613 // CHECK-NEXT: [[CMP13:%.*]] = icmp ne <8 x i16> [[TMP14]], [[TMP15]] 3614 // CHECK-NEXT: [[SEXT14:%.*]] = sext <8 x i1> [[CMP13]] to <8 x i16> 3615 // CHECK-NEXT: store volatile <8 x i16> [[SEXT14]], ptr @bs, align 8 3616 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3617 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 3618 // CHECK-NEXT: [[CMP15:%.*]] = icmp ne <8 x i16> [[TMP16]], [[TMP17]] 3619 // CHECK-NEXT: [[SEXT16:%.*]] = sext <8 x i1> [[CMP15]] to <8 x i16> 3620 // CHECK-NEXT: store volatile <8 x i16> [[SEXT16]], ptr @bs, align 8 3621 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 3622 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 3623 // CHECK-NEXT: [[CMP17:%.*]] = icmp ne <8 x i16> [[TMP18]], [[TMP19]] 3624 // CHECK-NEXT: [[SEXT18:%.*]] = sext <8 x i1> [[CMP17]] to <8 x i16> 3625 // CHECK-NEXT: store volatile <8 x i16> [[SEXT18]], ptr @bs, align 8 3626 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3627 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 3628 // CHECK-NEXT: [[CMP19:%.*]] = icmp ne <8 x i16> [[TMP20]], [[TMP21]] 3629 // CHECK-NEXT: [[SEXT20:%.*]] = sext <8 x i1> [[CMP19]] to <8 x i16> 3630 // CHECK-NEXT: store volatile <8 x i16> [[SEXT20]], ptr @bs, align 8 3631 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3632 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 3633 // CHECK-NEXT: [[CMP21:%.*]] = icmp ne <8 x i16> [[TMP22]], [[TMP23]] 3634 // CHECK-NEXT: [[SEXT22:%.*]] = sext <8 x i1> [[CMP21]] to <8 x i16> 3635 // CHECK-NEXT: store volatile <8 x i16> [[SEXT22]], ptr @bs, align 8 3636 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 3637 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 3638 // CHECK-NEXT: [[CMP23:%.*]] = icmp ne <8 x i16> [[TMP24]], [[TMP25]] 3639 // CHECK-NEXT: [[SEXT24:%.*]] = sext <8 x i1> [[CMP23]] to <8 x i16> 3640 // CHECK-NEXT: store volatile <8 x i16> [[SEXT24]], ptr @bs, align 8 3641 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 3642 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 3643 // CHECK-NEXT: [[CMP25:%.*]] = icmp ne <8 x i16> [[TMP26]], [[TMP27]] 3644 // CHECK-NEXT: [[SEXT26:%.*]] = sext <8 x i1> [[CMP25]] to <8 x i16> 3645 // CHECK-NEXT: store volatile <8 x i16> [[SEXT26]], ptr @bs, align 8 3646 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3647 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 3648 // CHECK-NEXT: [[CMP27:%.*]] = icmp ne <4 x i32> [[TMP28]], [[TMP29]] 3649 // CHECK-NEXT: [[SEXT28:%.*]] = sext <4 x i1> [[CMP27]] to <4 x i32> 3650 // CHECK-NEXT: store volatile <4 x i32> [[SEXT28]], ptr @bi, align 8 3651 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3652 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 3653 // CHECK-NEXT: [[CMP29:%.*]] = icmp ne <4 x i32> [[TMP30]], [[TMP31]] 3654 // CHECK-NEXT: [[SEXT30:%.*]] = sext <4 x i1> [[CMP29]] to <4 x i32> 3655 // CHECK-NEXT: store volatile <4 x i32> [[SEXT30]], ptr @bi, align 8 3656 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 3657 // CHECK-NEXT: [[TMP33:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 3658 // CHECK-NEXT: [[CMP31:%.*]] = icmp ne <4 x i32> [[TMP32]], [[TMP33]] 3659 // CHECK-NEXT: [[SEXT32:%.*]] = sext <4 x i1> [[CMP31]] to <4 x i32> 3660 // CHECK-NEXT: store volatile <4 x i32> [[SEXT32]], ptr @bi, align 8 3661 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3662 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 3663 // CHECK-NEXT: [[CMP33:%.*]] = icmp ne <4 x i32> [[TMP34]], [[TMP35]] 3664 // CHECK-NEXT: [[SEXT34:%.*]] = sext <4 x i1> [[CMP33]] to <4 x i32> 3665 // CHECK-NEXT: store volatile <4 x i32> [[SEXT34]], ptr @bi, align 8 3666 // CHECK-NEXT: [[TMP36:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3667 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 3668 // CHECK-NEXT: [[CMP35:%.*]] = icmp ne <4 x i32> [[TMP36]], [[TMP37]] 3669 // CHECK-NEXT: [[SEXT36:%.*]] = sext <4 x i1> [[CMP35]] to <4 x i32> 3670 // CHECK-NEXT: store volatile <4 x i32> [[SEXT36]], ptr @bi, align 8 3671 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 3672 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 3673 // CHECK-NEXT: [[CMP37:%.*]] = icmp ne <4 x i32> [[TMP38]], [[TMP39]] 3674 // CHECK-NEXT: [[SEXT38:%.*]] = sext <4 x i1> [[CMP37]] to <4 x i32> 3675 // CHECK-NEXT: store volatile <4 x i32> [[SEXT38]], ptr @bi, align 8 3676 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 3677 // CHECK-NEXT: [[TMP41:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 3678 // CHECK-NEXT: [[CMP39:%.*]] = icmp ne <4 x i32> [[TMP40]], [[TMP41]] 3679 // CHECK-NEXT: [[SEXT40:%.*]] = sext <4 x i1> [[CMP39]] to <4 x i32> 3680 // CHECK-NEXT: store volatile <4 x i32> [[SEXT40]], ptr @bi, align 8 3681 // CHECK-NEXT: [[TMP42:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3682 // CHECK-NEXT: [[TMP43:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3683 // CHECK-NEXT: [[CMP41:%.*]] = icmp ne <2 x i64> [[TMP42]], [[TMP43]] 3684 // CHECK-NEXT: [[SEXT42:%.*]] = sext <2 x i1> [[CMP41]] to <2 x i64> 3685 // CHECK-NEXT: store volatile <2 x i64> [[SEXT42]], ptr @bl, align 8 3686 // CHECK-NEXT: [[TMP44:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3687 // CHECK-NEXT: [[TMP45:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 3688 // CHECK-NEXT: [[CMP43:%.*]] = icmp ne <2 x i64> [[TMP44]], [[TMP45]] 3689 // CHECK-NEXT: [[SEXT44:%.*]] = sext <2 x i1> [[CMP43]] to <2 x i64> 3690 // CHECK-NEXT: store volatile <2 x i64> [[SEXT44]], ptr @bl, align 8 3691 // CHECK-NEXT: [[TMP46:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 3692 // CHECK-NEXT: [[TMP47:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3693 // CHECK-NEXT: [[CMP45:%.*]] = icmp ne <2 x i64> [[TMP46]], [[TMP47]] 3694 // CHECK-NEXT: [[SEXT46:%.*]] = sext <2 x i1> [[CMP45]] to <2 x i64> 3695 // CHECK-NEXT: store volatile <2 x i64> [[SEXT46]], ptr @bl, align 8 3696 // CHECK-NEXT: [[TMP48:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3697 // CHECK-NEXT: [[TMP49:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3698 // CHECK-NEXT: [[CMP47:%.*]] = icmp ne <2 x i64> [[TMP48]], [[TMP49]] 3699 // CHECK-NEXT: [[SEXT48:%.*]] = sext <2 x i1> [[CMP47]] to <2 x i64> 3700 // CHECK-NEXT: store volatile <2 x i64> [[SEXT48]], ptr @bl, align 8 3701 // CHECK-NEXT: [[TMP50:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3702 // CHECK-NEXT: [[TMP51:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 3703 // CHECK-NEXT: [[CMP49:%.*]] = icmp ne <2 x i64> [[TMP50]], [[TMP51]] 3704 // CHECK-NEXT: [[SEXT50:%.*]] = sext <2 x i1> [[CMP49]] to <2 x i64> 3705 // CHECK-NEXT: store volatile <2 x i64> [[SEXT50]], ptr @bl, align 8 3706 // CHECK-NEXT: [[TMP52:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 3707 // CHECK-NEXT: [[TMP53:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3708 // CHECK-NEXT: [[CMP51:%.*]] = icmp ne <2 x i64> [[TMP52]], [[TMP53]] 3709 // CHECK-NEXT: [[SEXT52:%.*]] = sext <2 x i1> [[CMP51]] to <2 x i64> 3710 // CHECK-NEXT: store volatile <2 x i64> [[SEXT52]], ptr @bl, align 8 3711 // CHECK-NEXT: [[TMP54:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 3712 // CHECK-NEXT: [[TMP55:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 3713 // CHECK-NEXT: [[CMP53:%.*]] = icmp ne <2 x i64> [[TMP54]], [[TMP55]] 3714 // CHECK-NEXT: [[SEXT54:%.*]] = sext <2 x i1> [[CMP53]] to <2 x i64> 3715 // CHECK-NEXT: store volatile <2 x i64> [[SEXT54]], ptr @bl, align 8 3716 // CHECK-NEXT: [[TMP56:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3717 // CHECK-NEXT: [[TMP57:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3718 // CHECK-NEXT: [[CMP55:%.*]] = icmp ne <1 x i128> [[TMP56]], [[TMP57]] 3719 // CHECK-NEXT: [[SEXT56:%.*]] = sext <1 x i1> [[CMP55]] to <1 x i128> 3720 // CHECK-NEXT: store volatile <1 x i128> [[SEXT56]], ptr @blll, align 8 3721 // CHECK-NEXT: [[TMP58:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3722 // CHECK-NEXT: [[TMP59:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 3723 // CHECK-NEXT: [[CMP57:%.*]] = icmp ne <1 x i128> [[TMP58]], [[TMP59]] 3724 // CHECK-NEXT: [[SEXT58:%.*]] = sext <1 x i1> [[CMP57]] to <1 x i128> 3725 // CHECK-NEXT: store volatile <1 x i128> [[SEXT58]], ptr @blll, align 8 3726 // CHECK-NEXT: [[TMP60:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 3727 // CHECK-NEXT: [[TMP61:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3728 // CHECK-NEXT: [[CMP59:%.*]] = icmp ne <1 x i128> [[TMP60]], [[TMP61]] 3729 // CHECK-NEXT: [[SEXT60:%.*]] = sext <1 x i1> [[CMP59]] to <1 x i128> 3730 // CHECK-NEXT: store volatile <1 x i128> [[SEXT60]], ptr @blll, align 8 3731 // CHECK-NEXT: [[TMP62:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3732 // CHECK-NEXT: [[TMP63:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3733 // CHECK-NEXT: [[CMP61:%.*]] = icmp ne <1 x i128> [[TMP62]], [[TMP63]] 3734 // CHECK-NEXT: [[SEXT62:%.*]] = sext <1 x i1> [[CMP61]] to <1 x i128> 3735 // CHECK-NEXT: store volatile <1 x i128> [[SEXT62]], ptr @blll, align 8 3736 // CHECK-NEXT: [[TMP64:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3737 // CHECK-NEXT: [[TMP65:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 3738 // CHECK-NEXT: [[CMP63:%.*]] = icmp ne <1 x i128> [[TMP64]], [[TMP65]] 3739 // CHECK-NEXT: [[SEXT64:%.*]] = sext <1 x i1> [[CMP63]] to <1 x i128> 3740 // CHECK-NEXT: store volatile <1 x i128> [[SEXT64]], ptr @blll, align 8 3741 // CHECK-NEXT: [[TMP66:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 3742 // CHECK-NEXT: [[TMP67:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3743 // CHECK-NEXT: [[CMP65:%.*]] = icmp ne <1 x i128> [[TMP66]], [[TMP67]] 3744 // CHECK-NEXT: [[SEXT66:%.*]] = sext <1 x i1> [[CMP65]] to <1 x i128> 3745 // CHECK-NEXT: store volatile <1 x i128> [[SEXT66]], ptr @blll, align 8 3746 // CHECK-NEXT: [[TMP68:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 3747 // CHECK-NEXT: [[TMP69:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 3748 // CHECK-NEXT: [[CMP67:%.*]] = icmp ne <1 x i128> [[TMP68]], [[TMP69]] 3749 // CHECK-NEXT: [[SEXT68:%.*]] = sext <1 x i1> [[CMP67]] to <1 x i128> 3750 // CHECK-NEXT: store volatile <1 x i128> [[SEXT68]], ptr @blll, align 8 3751 // CHECK-NEXT: [[TMP70:%.*]] = load volatile <2 x double>, ptr @fd, align 8 3752 // CHECK-NEXT: [[TMP71:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 3753 // CHECK-NEXT: [[CMP69:%.*]] = fcmp une <2 x double> [[TMP70]], [[TMP71]] 3754 // CHECK-NEXT: [[SEXT70:%.*]] = sext <2 x i1> [[CMP69]] to <2 x i64> 3755 // CHECK-NEXT: store volatile <2 x i64> [[SEXT70]], ptr @bl, align 8 3756 // CHECK-NEXT: ret void 3757 // 3758 void test_cmpne(void) { 3759 3760 bc = sc != sc2; 3761 bc = sc != bc2; 3762 bc = bc != sc2; 3763 bc = uc != uc2; 3764 bc = uc != bc2; 3765 bc = bc != uc2; 3766 bc = bc != bc2; 3767 3768 bs = ss != ss2; 3769 bs = ss != bs2; 3770 bs = bs != ss2; 3771 bs = us != us2; 3772 bs = us != bs2; 3773 bs = bs != us2; 3774 bs = bs != bs2; 3775 3776 bi = si != si2; 3777 bi = si != bi2; 3778 bi = bi != si2; 3779 bi = ui != ui2; 3780 bi = ui != bi2; 3781 bi = bi != ui2; 3782 bi = bi != bi2; 3783 3784 bl = sl != sl2; 3785 bl = sl != bl2; 3786 bl = bl != sl2; 3787 bl = ul != ul2; 3788 bl = ul != bl2; 3789 bl = bl != ul2; 3790 bl = bl != bl2; 3791 3792 blll = slll != slll2; 3793 blll = slll != blll2; 3794 blll = blll != slll2; 3795 blll = ulll != ulll2; 3796 blll = ulll != blll2; 3797 blll = blll != ulll2; 3798 blll = blll != blll2; 3799 3800 bl = fd != fd2; 3801 } 3802 3803 // CHECK-LABEL: define dso_local void @test_cmpge( 3804 // CHECK-SAME: ) #[[ATTR0]] { 3805 // CHECK-NEXT: [[ENTRY:.*:]] 3806 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3807 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 3808 // CHECK-NEXT: [[CMP:%.*]] = icmp sge <16 x i8> [[TMP0]], [[TMP1]] 3809 // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> 3810 // CHECK-NEXT: store volatile <16 x i8> [[SEXT]], ptr @bc, align 8 3811 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3812 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 3813 // CHECK-NEXT: [[CMP1:%.*]] = icmp uge <16 x i8> [[TMP2]], [[TMP3]] 3814 // CHECK-NEXT: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> 3815 // CHECK-NEXT: store volatile <16 x i8> [[SEXT2]], ptr @bc, align 8 3816 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 3817 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 3818 // CHECK-NEXT: [[CMP3:%.*]] = icmp uge <16 x i8> [[TMP4]], [[TMP5]] 3819 // CHECK-NEXT: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> 3820 // CHECK-NEXT: store volatile <16 x i8> [[SEXT4]], ptr @bc, align 8 3821 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3822 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 3823 // CHECK-NEXT: [[CMP5:%.*]] = icmp sge <8 x i16> [[TMP6]], [[TMP7]] 3824 // CHECK-NEXT: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16> 3825 // CHECK-NEXT: store volatile <8 x i16> [[SEXT6]], ptr @bs, align 8 3826 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3827 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 3828 // CHECK-NEXT: [[CMP7:%.*]] = icmp uge <8 x i16> [[TMP8]], [[TMP9]] 3829 // CHECK-NEXT: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16> 3830 // CHECK-NEXT: store volatile <8 x i16> [[SEXT8]], ptr @bs, align 8 3831 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 3832 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 3833 // CHECK-NEXT: [[CMP9:%.*]] = icmp uge <8 x i16> [[TMP10]], [[TMP11]] 3834 // CHECK-NEXT: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16> 3835 // CHECK-NEXT: store volatile <8 x i16> [[SEXT10]], ptr @bs, align 8 3836 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3837 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 3838 // CHECK-NEXT: [[CMP11:%.*]] = icmp sge <4 x i32> [[TMP12]], [[TMP13]] 3839 // CHECK-NEXT: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32> 3840 // CHECK-NEXT: store volatile <4 x i32> [[SEXT12]], ptr @bi, align 8 3841 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3842 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 3843 // CHECK-NEXT: [[CMP13:%.*]] = icmp uge <4 x i32> [[TMP14]], [[TMP15]] 3844 // CHECK-NEXT: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32> 3845 // CHECK-NEXT: store volatile <4 x i32> [[SEXT14]], ptr @bi, align 8 3846 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 3847 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 3848 // CHECK-NEXT: [[CMP15:%.*]] = icmp uge <4 x i32> [[TMP16]], [[TMP17]] 3849 // CHECK-NEXT: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32> 3850 // CHECK-NEXT: store volatile <4 x i32> [[SEXT16]], ptr @bi, align 8 3851 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3852 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3853 // CHECK-NEXT: [[CMP17:%.*]] = icmp sge <2 x i64> [[TMP18]], [[TMP19]] 3854 // CHECK-NEXT: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64> 3855 // CHECK-NEXT: store volatile <2 x i64> [[SEXT18]], ptr @bl, align 8 3856 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3857 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3858 // CHECK-NEXT: [[CMP19:%.*]] = icmp uge <2 x i64> [[TMP20]], [[TMP21]] 3859 // CHECK-NEXT: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64> 3860 // CHECK-NEXT: store volatile <2 x i64> [[SEXT20]], ptr @bl, align 8 3861 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 3862 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 3863 // CHECK-NEXT: [[CMP21:%.*]] = icmp uge <2 x i64> [[TMP22]], [[TMP23]] 3864 // CHECK-NEXT: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64> 3865 // CHECK-NEXT: store volatile <2 x i64> [[SEXT22]], ptr @bl, align 8 3866 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3867 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3868 // CHECK-NEXT: [[CMP23:%.*]] = icmp sge <1 x i128> [[TMP24]], [[TMP25]] 3869 // CHECK-NEXT: [[SEXT24:%.*]] = sext <1 x i1> [[CMP23]] to <1 x i128> 3870 // CHECK-NEXT: store volatile <1 x i128> [[SEXT24]], ptr @blll, align 8 3871 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3872 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3873 // CHECK-NEXT: [[CMP25:%.*]] = icmp uge <1 x i128> [[TMP26]], [[TMP27]] 3874 // CHECK-NEXT: [[SEXT26:%.*]] = sext <1 x i1> [[CMP25]] to <1 x i128> 3875 // CHECK-NEXT: store volatile <1 x i128> [[SEXT26]], ptr @blll, align 8 3876 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 3877 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 3878 // CHECK-NEXT: [[CMP27:%.*]] = icmp uge <1 x i128> [[TMP28]], [[TMP29]] 3879 // CHECK-NEXT: [[SEXT28:%.*]] = sext <1 x i1> [[CMP27]] to <1 x i128> 3880 // CHECK-NEXT: store volatile <1 x i128> [[SEXT28]], ptr @blll, align 8 3881 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x double>, ptr @fd, align 8 3882 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 3883 // CHECK-NEXT: [[CMP29:%.*]] = fcmp oge <2 x double> [[TMP30]], [[TMP31]] 3884 // CHECK-NEXT: [[SEXT30:%.*]] = sext <2 x i1> [[CMP29]] to <2 x i64> 3885 // CHECK-NEXT: store volatile <2 x i64> [[SEXT30]], ptr @bl, align 8 3886 // CHECK-NEXT: ret void 3887 // 3888 void test_cmpge(void) { 3889 3890 bc = sc >= sc2; 3891 bc = uc >= uc2; 3892 bc = bc >= bc2; 3893 3894 bs = ss >= ss2; 3895 bs = us >= us2; 3896 bs = bs >= bs2; 3897 3898 bi = si >= si2; 3899 bi = ui >= ui2; 3900 bi = bi >= bi2; 3901 3902 bl = sl >= sl2; 3903 bl = ul >= ul2; 3904 bl = bl >= bl2; 3905 3906 blll = slll >= slll2; 3907 blll = ulll >= ulll2; 3908 blll = blll >= blll2; 3909 3910 bl = fd >= fd2; 3911 } 3912 3913 // CHECK-LABEL: define dso_local void @test_cmpgt( 3914 // CHECK-SAME: ) #[[ATTR0]] { 3915 // CHECK-NEXT: [[ENTRY:.*:]] 3916 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 3917 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 3918 // CHECK-NEXT: [[CMP:%.*]] = icmp sgt <16 x i8> [[TMP0]], [[TMP1]] 3919 // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> 3920 // CHECK-NEXT: store volatile <16 x i8> [[SEXT]], ptr @bc, align 8 3921 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 3922 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 3923 // CHECK-NEXT: [[CMP1:%.*]] = icmp ugt <16 x i8> [[TMP2]], [[TMP3]] 3924 // CHECK-NEXT: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> 3925 // CHECK-NEXT: store volatile <16 x i8> [[SEXT2]], ptr @bc, align 8 3926 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 3927 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 3928 // CHECK-NEXT: [[CMP3:%.*]] = icmp ugt <16 x i8> [[TMP4]], [[TMP5]] 3929 // CHECK-NEXT: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> 3930 // CHECK-NEXT: store volatile <16 x i8> [[SEXT4]], ptr @bc, align 8 3931 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 3932 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 3933 // CHECK-NEXT: [[CMP5:%.*]] = icmp sgt <8 x i16> [[TMP6]], [[TMP7]] 3934 // CHECK-NEXT: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16> 3935 // CHECK-NEXT: store volatile <8 x i16> [[SEXT6]], ptr @bs, align 8 3936 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <8 x i16>, ptr @us, align 8 3937 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 3938 // CHECK-NEXT: [[CMP7:%.*]] = icmp ugt <8 x i16> [[TMP8]], [[TMP9]] 3939 // CHECK-NEXT: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16> 3940 // CHECK-NEXT: store volatile <8 x i16> [[SEXT8]], ptr @bs, align 8 3941 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 3942 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 3943 // CHECK-NEXT: [[CMP9:%.*]] = icmp ugt <8 x i16> [[TMP10]], [[TMP11]] 3944 // CHECK-NEXT: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16> 3945 // CHECK-NEXT: store volatile <8 x i16> [[SEXT10]], ptr @bs, align 8 3946 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <4 x i32>, ptr @si, align 8 3947 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 3948 // CHECK-NEXT: [[CMP11:%.*]] = icmp sgt <4 x i32> [[TMP12]], [[TMP13]] 3949 // CHECK-NEXT: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32> 3950 // CHECK-NEXT: store volatile <4 x i32> [[SEXT12]], ptr @bi, align 8 3951 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 3952 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 3953 // CHECK-NEXT: [[CMP13:%.*]] = icmp ugt <4 x i32> [[TMP14]], [[TMP15]] 3954 // CHECK-NEXT: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32> 3955 // CHECK-NEXT: store volatile <4 x i32> [[SEXT14]], ptr @bi, align 8 3956 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 3957 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 3958 // CHECK-NEXT: [[CMP15:%.*]] = icmp ugt <4 x i32> [[TMP16]], [[TMP17]] 3959 // CHECK-NEXT: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32> 3960 // CHECK-NEXT: store volatile <4 x i32> [[SEXT16]], ptr @bi, align 8 3961 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 3962 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 3963 // CHECK-NEXT: [[CMP17:%.*]] = icmp sgt <2 x i64> [[TMP18]], [[TMP19]] 3964 // CHECK-NEXT: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64> 3965 // CHECK-NEXT: store volatile <2 x i64> [[SEXT18]], ptr @bl, align 8 3966 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 3967 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 3968 // CHECK-NEXT: [[CMP19:%.*]] = icmp ugt <2 x i64> [[TMP20]], [[TMP21]] 3969 // CHECK-NEXT: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64> 3970 // CHECK-NEXT: store volatile <2 x i64> [[SEXT20]], ptr @bl, align 8 3971 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 3972 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 3973 // CHECK-NEXT: [[CMP21:%.*]] = icmp ugt <2 x i64> [[TMP22]], [[TMP23]] 3974 // CHECK-NEXT: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64> 3975 // CHECK-NEXT: store volatile <2 x i64> [[SEXT22]], ptr @bl, align 8 3976 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 3977 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 3978 // CHECK-NEXT: [[CMP23:%.*]] = icmp sgt <1 x i128> [[TMP24]], [[TMP25]] 3979 // CHECK-NEXT: [[SEXT24:%.*]] = sext <1 x i1> [[CMP23]] to <1 x i128> 3980 // CHECK-NEXT: store volatile <1 x i128> [[SEXT24]], ptr @blll, align 8 3981 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 3982 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 3983 // CHECK-NEXT: [[CMP25:%.*]] = icmp ugt <1 x i128> [[TMP26]], [[TMP27]] 3984 // CHECK-NEXT: [[SEXT26:%.*]] = sext <1 x i1> [[CMP25]] to <1 x i128> 3985 // CHECK-NEXT: store volatile <1 x i128> [[SEXT26]], ptr @blll, align 8 3986 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 3987 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 3988 // CHECK-NEXT: [[CMP27:%.*]] = icmp ugt <1 x i128> [[TMP28]], [[TMP29]] 3989 // CHECK-NEXT: [[SEXT28:%.*]] = sext <1 x i1> [[CMP27]] to <1 x i128> 3990 // CHECK-NEXT: store volatile <1 x i128> [[SEXT28]], ptr @blll, align 8 3991 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x double>, ptr @fd, align 8 3992 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 3993 // CHECK-NEXT: [[CMP29:%.*]] = fcmp ogt <2 x double> [[TMP30]], [[TMP31]] 3994 // CHECK-NEXT: [[SEXT30:%.*]] = sext <2 x i1> [[CMP29]] to <2 x i64> 3995 // CHECK-NEXT: store volatile <2 x i64> [[SEXT30]], ptr @bl, align 8 3996 // CHECK-NEXT: ret void 3997 // 3998 void test_cmpgt(void) { 3999 4000 bc = sc > sc2; 4001 bc = uc > uc2; 4002 bc = bc > bc2; 4003 4004 bs = ss > ss2; 4005 bs = us > us2; 4006 bs = bs > bs2; 4007 4008 bi = si > si2; 4009 bi = ui > ui2; 4010 bi = bi > bi2; 4011 4012 bl = sl > sl2; 4013 bl = ul > ul2; 4014 bl = bl > bl2; 4015 4016 blll = slll > slll2; 4017 blll = ulll > ulll2; 4018 blll = blll > blll2; 4019 4020 bl = fd > fd2; 4021 } 4022 4023 // CHECK-LABEL: define dso_local void @test_cmple( 4024 // CHECK-SAME: ) #[[ATTR0]] { 4025 // CHECK-NEXT: [[ENTRY:.*:]] 4026 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 4027 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 4028 // CHECK-NEXT: [[CMP:%.*]] = icmp sle <16 x i8> [[TMP0]], [[TMP1]] 4029 // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> 4030 // CHECK-NEXT: store volatile <16 x i8> [[SEXT]], ptr @bc, align 8 4031 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 4032 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 4033 // CHECK-NEXT: [[CMP1:%.*]] = icmp ule <16 x i8> [[TMP2]], [[TMP3]] 4034 // CHECK-NEXT: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> 4035 // CHECK-NEXT: store volatile <16 x i8> [[SEXT2]], ptr @bc, align 8 4036 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 4037 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 4038 // CHECK-NEXT: [[CMP3:%.*]] = icmp ule <16 x i8> [[TMP4]], [[TMP5]] 4039 // CHECK-NEXT: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> 4040 // CHECK-NEXT: store volatile <16 x i8> [[SEXT4]], ptr @bc, align 8 4041 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 4042 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 4043 // CHECK-NEXT: [[CMP5:%.*]] = icmp sle <8 x i16> [[TMP6]], [[TMP7]] 4044 // CHECK-NEXT: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16> 4045 // CHECK-NEXT: store volatile <8 x i16> [[SEXT6]], ptr @bs, align 8 4046 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <8 x i16>, ptr @us, align 8 4047 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 4048 // CHECK-NEXT: [[CMP7:%.*]] = icmp ule <8 x i16> [[TMP8]], [[TMP9]] 4049 // CHECK-NEXT: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16> 4050 // CHECK-NEXT: store volatile <8 x i16> [[SEXT8]], ptr @bs, align 8 4051 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 4052 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 4053 // CHECK-NEXT: [[CMP9:%.*]] = icmp ule <8 x i16> [[TMP10]], [[TMP11]] 4054 // CHECK-NEXT: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16> 4055 // CHECK-NEXT: store volatile <8 x i16> [[SEXT10]], ptr @bs, align 8 4056 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <4 x i32>, ptr @si, align 8 4057 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 4058 // CHECK-NEXT: [[CMP11:%.*]] = icmp sle <4 x i32> [[TMP12]], [[TMP13]] 4059 // CHECK-NEXT: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32> 4060 // CHECK-NEXT: store volatile <4 x i32> [[SEXT12]], ptr @bi, align 8 4061 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 4062 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 4063 // CHECK-NEXT: [[CMP13:%.*]] = icmp ule <4 x i32> [[TMP14]], [[TMP15]] 4064 // CHECK-NEXT: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32> 4065 // CHECK-NEXT: store volatile <4 x i32> [[SEXT14]], ptr @bi, align 8 4066 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 4067 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 4068 // CHECK-NEXT: [[CMP15:%.*]] = icmp ule <4 x i32> [[TMP16]], [[TMP17]] 4069 // CHECK-NEXT: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32> 4070 // CHECK-NEXT: store volatile <4 x i32> [[SEXT16]], ptr @bi, align 8 4071 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 4072 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 4073 // CHECK-NEXT: [[CMP17:%.*]] = icmp sle <2 x i64> [[TMP18]], [[TMP19]] 4074 // CHECK-NEXT: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64> 4075 // CHECK-NEXT: store volatile <2 x i64> [[SEXT18]], ptr @bl, align 8 4076 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 4077 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 4078 // CHECK-NEXT: [[CMP19:%.*]] = icmp ule <2 x i64> [[TMP20]], [[TMP21]] 4079 // CHECK-NEXT: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64> 4080 // CHECK-NEXT: store volatile <2 x i64> [[SEXT20]], ptr @bl, align 8 4081 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 4082 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 4083 // CHECK-NEXT: [[CMP21:%.*]] = icmp ule <2 x i64> [[TMP22]], [[TMP23]] 4084 // CHECK-NEXT: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64> 4085 // CHECK-NEXT: store volatile <2 x i64> [[SEXT22]], ptr @bl, align 8 4086 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 4087 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 4088 // CHECK-NEXT: [[CMP23:%.*]] = icmp sle <1 x i128> [[TMP24]], [[TMP25]] 4089 // CHECK-NEXT: [[SEXT24:%.*]] = sext <1 x i1> [[CMP23]] to <1 x i128> 4090 // CHECK-NEXT: store volatile <1 x i128> [[SEXT24]], ptr @blll, align 8 4091 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 4092 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 4093 // CHECK-NEXT: [[CMP25:%.*]] = icmp ule <1 x i128> [[TMP26]], [[TMP27]] 4094 // CHECK-NEXT: [[SEXT26:%.*]] = sext <1 x i1> [[CMP25]] to <1 x i128> 4095 // CHECK-NEXT: store volatile <1 x i128> [[SEXT26]], ptr @blll, align 8 4096 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 4097 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 4098 // CHECK-NEXT: [[CMP27:%.*]] = icmp ule <1 x i128> [[TMP28]], [[TMP29]] 4099 // CHECK-NEXT: [[SEXT28:%.*]] = sext <1 x i1> [[CMP27]] to <1 x i128> 4100 // CHECK-NEXT: store volatile <1 x i128> [[SEXT28]], ptr @blll, align 8 4101 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x double>, ptr @fd, align 8 4102 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 4103 // CHECK-NEXT: [[CMP29:%.*]] = fcmp ole <2 x double> [[TMP30]], [[TMP31]] 4104 // CHECK-NEXT: [[SEXT30:%.*]] = sext <2 x i1> [[CMP29]] to <2 x i64> 4105 // CHECK-NEXT: store volatile <2 x i64> [[SEXT30]], ptr @bl, align 8 4106 // CHECK-NEXT: ret void 4107 // 4108 void test_cmple(void) { 4109 4110 bc = sc <= sc2; 4111 bc = uc <= uc2; 4112 bc = bc <= bc2; 4113 4114 bs = ss <= ss2; 4115 bs = us <= us2; 4116 bs = bs <= bs2; 4117 4118 bi = si <= si2; 4119 bi = ui <= ui2; 4120 bi = bi <= bi2; 4121 4122 bl = sl <= sl2; 4123 bl = ul <= ul2; 4124 bl = bl <= bl2; 4125 4126 blll = slll <= slll2; 4127 blll = ulll <= ulll2; 4128 blll = blll <= blll2; 4129 4130 bl = fd <= fd2; 4131 } 4132 4133 // CHECK-LABEL: define dso_local void @test_cmplt( 4134 // CHECK-SAME: ) #[[ATTR0]] { 4135 // CHECK-NEXT: [[ENTRY:.*:]] 4136 // CHECK-NEXT: [[TMP0:%.*]] = load volatile <16 x i8>, ptr @sc, align 8 4137 // CHECK-NEXT: [[TMP1:%.*]] = load volatile <16 x i8>, ptr @sc2, align 8 4138 // CHECK-NEXT: [[CMP:%.*]] = icmp slt <16 x i8> [[TMP0]], [[TMP1]] 4139 // CHECK-NEXT: [[SEXT:%.*]] = sext <16 x i1> [[CMP]] to <16 x i8> 4140 // CHECK-NEXT: store volatile <16 x i8> [[SEXT]], ptr @bc, align 8 4141 // CHECK-NEXT: [[TMP2:%.*]] = load volatile <16 x i8>, ptr @uc, align 8 4142 // CHECK-NEXT: [[TMP3:%.*]] = load volatile <16 x i8>, ptr @uc2, align 8 4143 // CHECK-NEXT: [[CMP1:%.*]] = icmp ult <16 x i8> [[TMP2]], [[TMP3]] 4144 // CHECK-NEXT: [[SEXT2:%.*]] = sext <16 x i1> [[CMP1]] to <16 x i8> 4145 // CHECK-NEXT: store volatile <16 x i8> [[SEXT2]], ptr @bc, align 8 4146 // CHECK-NEXT: [[TMP4:%.*]] = load volatile <16 x i8>, ptr @bc, align 8 4147 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <16 x i8>, ptr @bc2, align 8 4148 // CHECK-NEXT: [[CMP3:%.*]] = icmp ult <16 x i8> [[TMP4]], [[TMP5]] 4149 // CHECK-NEXT: [[SEXT4:%.*]] = sext <16 x i1> [[CMP3]] to <16 x i8> 4150 // CHECK-NEXT: store volatile <16 x i8> [[SEXT4]], ptr @bc, align 8 4151 // CHECK-NEXT: [[TMP6:%.*]] = load volatile <8 x i16>, ptr @ss, align 8 4152 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <8 x i16>, ptr @ss2, align 8 4153 // CHECK-NEXT: [[CMP5:%.*]] = icmp slt <8 x i16> [[TMP6]], [[TMP7]] 4154 // CHECK-NEXT: [[SEXT6:%.*]] = sext <8 x i1> [[CMP5]] to <8 x i16> 4155 // CHECK-NEXT: store volatile <8 x i16> [[SEXT6]], ptr @bs, align 8 4156 // CHECK-NEXT: [[TMP8:%.*]] = load volatile <8 x i16>, ptr @us, align 8 4157 // CHECK-NEXT: [[TMP9:%.*]] = load volatile <8 x i16>, ptr @us2, align 8 4158 // CHECK-NEXT: [[CMP7:%.*]] = icmp ult <8 x i16> [[TMP8]], [[TMP9]] 4159 // CHECK-NEXT: [[SEXT8:%.*]] = sext <8 x i1> [[CMP7]] to <8 x i16> 4160 // CHECK-NEXT: store volatile <8 x i16> [[SEXT8]], ptr @bs, align 8 4161 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <8 x i16>, ptr @bs, align 8 4162 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <8 x i16>, ptr @bs2, align 8 4163 // CHECK-NEXT: [[CMP9:%.*]] = icmp ult <8 x i16> [[TMP10]], [[TMP11]] 4164 // CHECK-NEXT: [[SEXT10:%.*]] = sext <8 x i1> [[CMP9]] to <8 x i16> 4165 // CHECK-NEXT: store volatile <8 x i16> [[SEXT10]], ptr @bs, align 8 4166 // CHECK-NEXT: [[TMP12:%.*]] = load volatile <4 x i32>, ptr @si, align 8 4167 // CHECK-NEXT: [[TMP13:%.*]] = load volatile <4 x i32>, ptr @si2, align 8 4168 // CHECK-NEXT: [[CMP11:%.*]] = icmp slt <4 x i32> [[TMP12]], [[TMP13]] 4169 // CHECK-NEXT: [[SEXT12:%.*]] = sext <4 x i1> [[CMP11]] to <4 x i32> 4170 // CHECK-NEXT: store volatile <4 x i32> [[SEXT12]], ptr @bi, align 8 4171 // CHECK-NEXT: [[TMP14:%.*]] = load volatile <4 x i32>, ptr @ui, align 8 4172 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8 4173 // CHECK-NEXT: [[CMP13:%.*]] = icmp ult <4 x i32> [[TMP14]], [[TMP15]] 4174 // CHECK-NEXT: [[SEXT14:%.*]] = sext <4 x i1> [[CMP13]] to <4 x i32> 4175 // CHECK-NEXT: store volatile <4 x i32> [[SEXT14]], ptr @bi, align 8 4176 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @bi, align 8 4177 // CHECK-NEXT: [[TMP17:%.*]] = load volatile <4 x i32>, ptr @bi2, align 8 4178 // CHECK-NEXT: [[CMP15:%.*]] = icmp ult <4 x i32> [[TMP16]], [[TMP17]] 4179 // CHECK-NEXT: [[SEXT16:%.*]] = sext <4 x i1> [[CMP15]] to <4 x i32> 4180 // CHECK-NEXT: store volatile <4 x i32> [[SEXT16]], ptr @bi, align 8 4181 // CHECK-NEXT: [[TMP18:%.*]] = load volatile <2 x i64>, ptr @sl, align 8 4182 // CHECK-NEXT: [[TMP19:%.*]] = load volatile <2 x i64>, ptr @sl2, align 8 4183 // CHECK-NEXT: [[CMP17:%.*]] = icmp slt <2 x i64> [[TMP18]], [[TMP19]] 4184 // CHECK-NEXT: [[SEXT18:%.*]] = sext <2 x i1> [[CMP17]] to <2 x i64> 4185 // CHECK-NEXT: store volatile <2 x i64> [[SEXT18]], ptr @bl, align 8 4186 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <2 x i64>, ptr @ul, align 8 4187 // CHECK-NEXT: [[TMP21:%.*]] = load volatile <2 x i64>, ptr @ul2, align 8 4188 // CHECK-NEXT: [[CMP19:%.*]] = icmp ult <2 x i64> [[TMP20]], [[TMP21]] 4189 // CHECK-NEXT: [[SEXT20:%.*]] = sext <2 x i1> [[CMP19]] to <2 x i64> 4190 // CHECK-NEXT: store volatile <2 x i64> [[SEXT20]], ptr @bl, align 8 4191 // CHECK-NEXT: [[TMP22:%.*]] = load volatile <2 x i64>, ptr @bl, align 8 4192 // CHECK-NEXT: [[TMP23:%.*]] = load volatile <2 x i64>, ptr @bl2, align 8 4193 // CHECK-NEXT: [[CMP21:%.*]] = icmp ult <2 x i64> [[TMP22]], [[TMP23]] 4194 // CHECK-NEXT: [[SEXT22:%.*]] = sext <2 x i1> [[CMP21]] to <2 x i64> 4195 // CHECK-NEXT: store volatile <2 x i64> [[SEXT22]], ptr @bl, align 8 4196 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <1 x i128>, ptr @slll, align 8 4197 // CHECK-NEXT: [[TMP25:%.*]] = load volatile <1 x i128>, ptr @slll2, align 8 4198 // CHECK-NEXT: [[CMP23:%.*]] = icmp slt <1 x i128> [[TMP24]], [[TMP25]] 4199 // CHECK-NEXT: [[SEXT24:%.*]] = sext <1 x i1> [[CMP23]] to <1 x i128> 4200 // CHECK-NEXT: store volatile <1 x i128> [[SEXT24]], ptr @blll, align 8 4201 // CHECK-NEXT: [[TMP26:%.*]] = load volatile <1 x i128>, ptr @ulll, align 8 4202 // CHECK-NEXT: [[TMP27:%.*]] = load volatile <1 x i128>, ptr @ulll2, align 8 4203 // CHECK-NEXT: [[CMP25:%.*]] = icmp ult <1 x i128> [[TMP26]], [[TMP27]] 4204 // CHECK-NEXT: [[SEXT26:%.*]] = sext <1 x i1> [[CMP25]] to <1 x i128> 4205 // CHECK-NEXT: store volatile <1 x i128> [[SEXT26]], ptr @blll, align 8 4206 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <1 x i128>, ptr @blll, align 8 4207 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <1 x i128>, ptr @blll2, align 8 4208 // CHECK-NEXT: [[CMP27:%.*]] = icmp ult <1 x i128> [[TMP28]], [[TMP29]] 4209 // CHECK-NEXT: [[SEXT28:%.*]] = sext <1 x i1> [[CMP27]] to <1 x i128> 4210 // CHECK-NEXT: store volatile <1 x i128> [[SEXT28]], ptr @blll, align 8 4211 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <2 x double>, ptr @fd, align 8 4212 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <2 x double>, ptr @fd2, align 8 4213 // CHECK-NEXT: [[CMP29:%.*]] = fcmp olt <2 x double> [[TMP30]], [[TMP31]] 4214 // CHECK-NEXT: [[SEXT30:%.*]] = sext <2 x i1> [[CMP29]] to <2 x i64> 4215 // CHECK-NEXT: store volatile <2 x i64> [[SEXT30]], ptr @bl, align 8 4216 // CHECK-NEXT: ret void 4217 // 4218 void test_cmplt(void) { 4219 4220 bc = sc < sc2; 4221 bc = uc < uc2; 4222 bc = bc < bc2; 4223 4224 bs = ss < ss2; 4225 bs = us < us2; 4226 bs = bs < bs2; 4227 4228 bi = si < si2; 4229 bi = ui < ui2; 4230 bi = bi < bi2; 4231 4232 bl = sl < sl2; 4233 bl = ul < ul2; 4234 bl = bl < bl2; 4235 4236 blll = slll < slll2; 4237 blll = ulll < ulll2; 4238 blll = blll < blll2; 4239 4240 bl = fd < fd2; 4241 } 4242 4243