Lines Matching +full:4 +full:d
1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
8 ; SVE-NEXT: eor z0.d, z0.d, z1.d
9 ; SVE-NEXT: lsr z1.d, z0.d, #4
10 ; SVE-NEXT: lsl z0.d, z0.d, #60
11 ; SVE-NEXT: orr z0.d, z0.d, z1.d
16 ; SVE2-NEXT: xar z0.d, z0.d, z1.d, #4
26 ; SVE-NEXT: eor z0.d, z0.d, z1.d
27 ; SVE-NEXT: lsl z1.d, z0.d, #60
28 ; SVE-NEXT: lsr z0.d, z0.d, #4
29 ; SVE-NEXT: orr z0.d, z0.d, z1.d
34 ; SVE2-NEXT: xar z0.d, z0.d, z1.d, #4
37 … @llvm.fshr.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %a, <vscale x 2 x i64> splat (i64 4))
42 define <vscale x 4 x i32> @xar_nxv4i32_l(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y) {
45 ; SVE-NEXT: eor z0.d, z0.d, z1.d
46 ; SVE-NEXT: lsr z1.s, z0.s, #4
48 ; SVE-NEXT: orr z0.d, z0.d, z1.d
53 ; SVE2-NEXT: xar z0.s, z0.s, z1.s, #4
55 %a = xor <vscale x 4 x i32> %x, %y
56 …%b = call <vscale x 4 x i32> @llvm.fshl.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vsc…
57 ret <vscale x 4 x i32> %b
60 define <vscale x 4 x i32> @xar_nxv4i32_r(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y) {
63 ; SVE-NEXT: eor z0.d, z0.d, z1.d
65 ; SVE-NEXT: lsr z0.s, z0.s, #4
66 ; SVE-NEXT: orr z0.d, z0.d, z1.d
71 ; SVE2-NEXT: xar z0.s, z0.s, z1.s, #4
73 %a = xor <vscale x 4 x i32> %x, %y
74 …%b = call <vscale x 4 x i32> @llvm.fshr.nxv4i32(<vscale x 4 x i32> %a, <vscale x 4 x i32> %a, <vsc…
75 ret <vscale x 4 x i32> %b
81 ; SVE-NEXT: eor z0.d, z0.d, z1.d
82 ; SVE-NEXT: lsr z1.h, z0.h, #4
84 ; SVE-NEXT: orr z0.d, z0.d, z1.d
89 ; SVE2-NEXT: xar z0.h, z0.h, z1.h, #4
99 ; SVE-NEXT: eor z0.d, z0.d, z1.d
101 ; SVE-NEXT: lsr z0.h, z0.h, #4
102 ; SVE-NEXT: orr z0.d, z0.d, z1.d
107 ; SVE2-NEXT: xar z0.h, z0.h, z1.h, #4
110 … @llvm.fshr.nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %a, <vscale x 8 x i16> splat (i16 4))
117 ; SVE-NEXT: eor z0.d, z0.d, z1.d
118 ; SVE-NEXT: lsr z1.b, z0.b, #4
119 ; SVE-NEXT: lsl z0.b, z0.b, #4
120 ; SVE-NEXT: orr z0.d, z0.d, z1.d
125 ; SVE2-NEXT: xar z0.b, z0.b, z1.b, #4
128 …> @llvm.fshl.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat (i8 4))
135 ; SVE-NEXT: eor z0.d, z0.d, z1.d
136 ; SVE-NEXT: lsl z1.b, z0.b, #4
137 ; SVE-NEXT: lsr z0.b, z0.b, #4
138 ; SVE-NEXT: orr z0.d, z0.d, z1.d
143 ; SVE2-NEXT: xar z0.b, z0.b, z1.b, #4
146 …> @llvm.fshr.nxv16i8(<vscale x 16 x i8> %a, <vscale x 16 x i8> %a, <vscale x 16 x i8> splat (i8 4))
154 ; CHECK-NEXT: mov z3.d, z2.d
155 ; CHECK-NEXT: subr z2.d, z2.d, #0 // =0x0
156 ; CHECK-NEXT: eor z0.d, z0.d, z1.d
157 ; CHECK-NEXT: ptrue p0.d
158 ; CHECK-NEXT: and z3.d, z3.d, #0x3f
159 ; CHECK-NEXT: and z2.d, z2.d, #0x3f
161 ; CHECK-NEXT: lsl z1.d, p0/m, z1.d, z3.d
162 ; CHECK-NEXT: lsr z0.d, p0/m, z0.d, z2.d
163 ; CHECK-NEXT: orr z0.d, z1.d, z0.d
175 ; CHECK-NEXT: orr z0.d, z0.d, z1.d
176 ; CHECK-NEXT: lsr z1.d, z0.d, #4
177 ; CHECK-NEXT: lsl z0.d, z0.d, #60
178 ; CHECK-NEXT: orr z0.d, z0.d, z1.d
189 ; CHECK-NEXT: eor z0.d, z0.d, z1.d
200 ; SVE-NEXT: eor z0.d, z0.d, z1.d
201 ; SVE-NEXT: lsr z1.d, z0.d, #4
202 ; SVE-NEXT: lsl z0.d, z0.d, #60
203 ; SVE-NEXT: orr z0.d, z0.d, z1.d
208 ; SVE2-NEXT: xar z0.d, z0.d, z1.d, #4
212 %shr = lshr <vscale x 2 x i64> %xor, splat (i64 4)
221 ; CHECK-NEXT: eor z0.d, z0.d, z1.d
222 ; CHECK-NEXT: lsl z1.d, z0.d, #60
223 ; CHECK-NEXT: lsr z0.d, z0.d, #3
224 ; CHECK-NEXT: orr z0.d, z1.d, z0.d
234 declare <vscale x 4 x i32> @llvm.fshl.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x…
238 declare <vscale x 4 x i32> @llvm.fshr.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>, <vscale x 4 x…