Lines Matching defs:bi
16 volatile vector bool int bi, bi2;
448 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
460 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
536 si = bi + si2;
539 ui = bi + ui2;
736 // CHECK-NEXT: [[TMP28:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
748 // CHECK-NEXT: [[TMP34:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
824 si = bi - si2;
827 ui = bi - ui2;
1396 // CHECK-NEXT: store volatile <4 x i32> [[NOT8]], ptr @bi, align 8
1429 bi = ~bi2;
1507 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
1519 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
1523 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
1526 // CHECK-NEXT: store volatile <4 x i32> [[AND20]], ptr @bi, align 8
1605 si = bi & si2;
1608 ui = bi & ui2;
1609 bi = bi & bi2;
1688 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
1690 // CHECK-NEXT: store volatile <4 x i32> [[AND14]], ptr @bi, align 8
1751 bi &= bi2;
1833 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
1845 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
1849 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
1852 // CHECK-NEXT: store volatile <4 x i32> [[OR20]], ptr @bi, align 8
1931 si = bi | si2;
1934 ui = bi | ui2;
1935 bi = bi | bi2;
2014 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
2016 // CHECK-NEXT: store volatile <4 x i32> [[OR14]], ptr @bi, align 8
2077 bi |= bi2;
2159 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
2171 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
2175 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
2178 // CHECK-NEXT: store volatile <4 x i32> [[XOR20]], ptr @bi, align 8
2257 si = bi ^ si2;
2260 ui = bi ^ ui2;
2261 bi = bi ^ bi2;
2340 // CHECK-NEXT: [[TMP29:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
2342 // CHECK-NEXT: store volatile <4 x i32> [[XOR14]], ptr @bi, align 8
2403 bi ^= bi2;
3420 // CHECK-NEXT: store volatile <4 x i32> [[SEXT28]], ptr @bi, align 8
3425 // CHECK-NEXT: store volatile <4 x i32> [[SEXT30]], ptr @bi, align 8
3426 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
3430 // CHECK-NEXT: store volatile <4 x i32> [[SEXT32]], ptr @bi, align 8
3435 // CHECK-NEXT: store volatile <4 x i32> [[SEXT34]], ptr @bi, align 8
3440 // CHECK-NEXT: store volatile <4 x i32> [[SEXT36]], ptr @bi, align 8
3441 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
3445 // CHECK-NEXT: store volatile <4 x i32> [[SEXT38]], ptr @bi, align 8
3446 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
3450 // CHECK-NEXT: store volatile <4 x i32> [[SEXT40]], ptr @bi, align 8
3546 bi = si == si2;
3547 bi = si == bi2;
3548 bi = bi == si2;
3549 bi = ui == ui2;
3550 bi = ui == bi2;
3551 bi = bi == ui2;
3552 bi = bi == bi2;
3650 // CHECK-NEXT: store volatile <4 x i32> [[SEXT28]], ptr @bi, align 8
3655 // CHECK-NEXT: store volatile <4 x i32> [[SEXT30]], ptr @bi, align 8
3656 // CHECK-NEXT: [[TMP32:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
3660 // CHECK-NEXT: store volatile <4 x i32> [[SEXT32]], ptr @bi, align 8
3665 // CHECK-NEXT: store volatile <4 x i32> [[SEXT34]], ptr @bi, align 8
3670 // CHECK-NEXT: store volatile <4 x i32> [[SEXT36]], ptr @bi, align 8
3671 // CHECK-NEXT: [[TMP38:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
3675 // CHECK-NEXT: store volatile <4 x i32> [[SEXT38]], ptr @bi, align 8
3676 // CHECK-NEXT: [[TMP40:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
3680 // CHECK-NEXT: store volatile <4 x i32> [[SEXT40]], ptr @bi, align 8
3776 bi = si != si2;
3777 bi = si != bi2;
3778 bi = bi != si2;
3779 bi = ui != ui2;
3780 bi = ui != bi2;
3781 bi = bi != ui2;
3782 bi = bi != bi2;
3840 // CHECK-NEXT: store volatile <4 x i32> [[SEXT12]], ptr @bi, align 8
3845 // CHECK-NEXT: store volatile <4 x i32> [[SEXT14]], ptr @bi, align 8
3846 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
3850 // CHECK-NEXT: store volatile <4 x i32> [[SEXT16]], ptr @bi, align 8
3898 bi = si >= si2;
3899 bi = ui >= ui2;
3900 bi = bi >= bi2;
3950 // CHECK-NEXT: store volatile <4 x i32> [[SEXT12]], ptr @bi, align 8
3955 // CHECK-NEXT: store volatile <4 x i32> [[SEXT14]], ptr @bi, align 8
3956 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
3960 // CHECK-NEXT: store volatile <4 x i32> [[SEXT16]], ptr @bi, align 8
4008 bi = si > si2;
4009 bi = ui > ui2;
4010 bi = bi > bi2;
4060 // CHECK-NEXT: store volatile <4 x i32> [[SEXT12]], ptr @bi, align 8
4065 // CHECK-NEXT: store volatile <4 x i32> [[SEXT14]], ptr @bi, align 8
4066 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
4070 // CHECK-NEXT: store volatile <4 x i32> [[SEXT16]], ptr @bi, align 8
4118 bi = si <= si2;
4119 bi = ui <= ui2;
4120 bi = bi <= bi2;
4170 // CHECK-NEXT: store volatile <4 x i32> [[SEXT12]], ptr @bi, align 8
4175 // CHECK-NEXT: store volatile <4 x i32> [[SEXT14]], ptr @bi, align 8
4176 // CHECK-NEXT: [[TMP16:%.*]] = load volatile <4 x i32>, ptr @bi, align 8
4180 // CHECK-NEXT: store volatile <4 x i32> [[SEXT16]], ptr @bi, align 8
4228 bi = si < si2;
4229 bi = ui < ui2;
4230 bi = bi < bi2;