Lines Matching defs:ui2

15 volatile vector unsigned int ui, ui2;
43 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
66 ui = ui2;
90 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
113 ui = +ui2;
175 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
177 // CHECK-NEXT: store volatile <4 x i32> [[INC5]], ptr @ui2, align 8
204 ++ui2;
233 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
235 // CHECK-NEXT: store volatile <4 x i32> [[INC5]], ptr @ui2, align 8
262 ui2++;
291 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
293 // CHECK-NEXT: store volatile <4 x i32> [[DEC5]], ptr @ui2, align 8
320 --ui2;
349 // CHECK-NEXT: [[TMP5:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
351 // CHECK-NEXT: store volatile <4 x i32> [[DEC5]], ptr @ui2, align 8
378 ui2--;
453 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
461 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
537 ui = ui + ui2;
539 ui = bi + ui2;
601 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
661 ui += ui2;
741 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
749 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
825 ui = ui - ui2;
827 ui = bi - ui2;
889 // CHECK-NEXT: [[TMP20:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
949 ui -= ui2;
989 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1023 ui = ui * ui2;
1057 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1092 ui *= ui2;
1127 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1161 ui = ui / ui2;
1195 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1230 ui /= ui2;
1265 // CHECK-NEXT: [[TMP11:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1295 ui = ui % ui2;
1327 // CHECK-NEXT: [[TMP10:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1358 ui %= ui2;
1391 // CHECK-NEXT: [[TMP7:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1428 ui = ~ui2;
1512 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1520 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1606 ui = ui & ui2;
1608 ui = bi & ui2;
1679 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1749 ui &= ui2;
1838 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1846 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
1932 ui = ui | ui2;
1934 ui = bi | ui2;
2005 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2075 ui |= ui2;
2164 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2172 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2258 ui = ui ^ ui2;
2260 ui = bi ^ ui2;
2331 // CHECK-NEXT: [[TMP24:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2401 ui ^= ui2;
2498 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2515 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2622 si = si << ui2;
2626 ui = ui << ui2;
2728 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2745 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2853 si <<= ui2;
2857 ui <<= ui2;
2960 // CHECK-NEXT: [[TMP31:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
2977 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
3084 si = si >> ui2;
3088 ui = ui >> ui2;
3190 // CHECK-NEXT: [[TMP30:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
3207 // CHECK-NEXT: [[TMP37:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
3315 si >>= ui2;
3319 ui >>= ui2;
3432 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
3442 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
3549 bi = ui == ui2;
3551 bi = bi == ui2;
3662 // CHECK-NEXT: [[TMP35:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
3672 // CHECK-NEXT: [[TMP39:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
3779 bi = ui != ui2;
3781 bi = bi != ui2;
3842 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
3899 bi = ui >= ui2;
3952 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
4009 bi = ui > ui2;
4062 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
4119 bi = ui <= ui2;
4172 // CHECK-NEXT: [[TMP15:%.*]] = load volatile <4 x i32>, ptr @ui2, align 8
4229 bi = ui < ui2;