Lines Matching defs:vu1
12 void test_builtin_reduce_max(float4 vf1, si8 vi1, u4 vu1) {
22 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
24 unsigned r3 = __builtin_reduce_max(vu1);
38 void test_builtin_reduce_min(float4 vf1, si8 vi1, u4 vu1) {
48 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
50 unsigned r3 = __builtin_reduce_min(vu1);
64 void test_builtin_reduce_add(si8 vi1, u4 vu1) {
69 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
71 unsigned r3 = __builtin_reduce_add(vu1);
82 const u4 cvu1 = vu1;
86 void test_builtin_reduce_mul(si8 vi1, u4 vu1) {
91 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
93 unsigned r3 = __builtin_reduce_mul(vu1);
104 const u4 cvu1 = vu1;
108 void test_builtin_reduce_xor(si8 vi1, u4 vu1) {
114 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
116 unsigned r3 = __builtin_reduce_xor(vu1);
119 void test_builtin_reduce_or(si8 vi1, u4 vu1) {
125 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
127 unsigned r3 = __builtin_reduce_or(vu1);
130 void test_builtin_reduce_and(si8 vi1, u4 vu1) {
136 // CHECK: [[VU1:%.+]] = load <4 x i32>, ptr %vu1.addr, align 16
138 unsigned r3 = __builtin_reduce_and(vu1);