Lines Matching full:8

16 define <8 x i8> @smulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 {
25 %insert = insertelement <8 x i16> undef, i16 8, i64 0
26 %splat = shufflevector <8 x i16> %insert, <8 x i16> undef, <8 x i32> zeroinitializer
27 %1 = sext <8 x i8> %op1 to <8 x i16>
28 %2 = sext <8 x i8> %op2 to <8 x i16>
29 %mul = mul <8 x i16> %1, %2
30 %shr = lshr <8 x i16> %mul, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
31 %res = trunc <8 x i16> %shr to <8 x i8>
32 ret <8 x i8> %res
48 … <16 x i16> %mul, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i1…
678, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16
99 %insert = insertelement <64 x i16> undef, i16 8, i64 0
1048, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16
110 define void @smulh_v128i8(ptr %a, ptr %b) vscale_range(8,0) #0 {
1248, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16
1448, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16
169 define <8 x i16> @smulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 {
178 %1 = sext <8 x i16> %op1 to <8 x i32>
179 %2 = sext <8 x i16> %op2 to <8 x i32>
180 %mul = mul <8 x i32> %1, %2
181 %shr = lshr <8 x i32> %mul, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
182 %res = trunc <8 x i32> %shr to <8 x i16>
183 ret <8 x i16> %res
241 define void @smulh_v64i16(ptr %a, ptr %b) vscale_range(8,0) #0 {
326 %op1 = load <8 x i32>, ptr %a
327 %op2 = load <8 x i32>, ptr %b
328 %1 = sext <8 x i32> %op1 to <8 x i64>
329 %2 = sext <8 x i32> %op2 to <8 x i64>
330 %mul = mul <8 x i64> %1, %2
331 %shr = lshr <8 x i64> %mul, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
332 %res = trunc <8 x i64> %shr to <8 x i32>
333 store <8 x i32> %res, ptr %a
341 ; VBITS_GE_256-NEXT: mov x8, #8 // =0x8
372 define void @smulh_v32i32(ptr %a, ptr %b) vscale_range(8,0) #0 {
494 %op1 = load <8 x i64>, ptr %a
495 %op2 = load <8 x i64>, ptr %b
496 %1 = sext <8 x i64> %op1 to <8 x i128>
497 %2 = sext <8 x i64> %op2 to <8 x i128>
498 %mul = mul <8 x i128> %1, %2
499 …%shr = lshr <8 x i128> %mul, <i128 64, i128 64, i128 64, i128 64, i128 64, i128 64, i128 64, i128 …
500 %res = trunc <8 x i128> %shr to <8 x i64>
501 store <8 x i64> %res, ptr %a
505 define void @smulh_v16i64(ptr %a, ptr %b) vscale_range(8,0) #0 {
551 define <8 x i8> @umulh_v8i8(<8 x i8> %op1, <8 x i8> %op2) vscale_range(2,0) #0 {
560 %1 = zext <8 x i8> %op1 to <8 x i16>
561 %2 = zext <8 x i8> %op2 to <8 x i16>
562 %mul = mul <8 x i16> %1, %2
563 %shr = lshr <8 x i16> %mul, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8>
564 %res = trunc <8 x i16> %shr to <8 x i8>
565 ret <8 x i8> %res
581 … <16 x i16> %mul, <i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i1…
6008, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16
6358, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16
641 define void @umulh_v128i8(ptr %a, ptr %b) vscale_range(8,0) #0 {
652 %insert = insertelement <128 x i16> undef, i16 8, i64 0
6578, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16
6778, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16 8, i16
703 define <8 x i16> @umulh_v8i16(<8 x i16> %op1, <8 x i16> %op2) vscale_range(2,0) #0 {
712 %1 = zext <8 x i16> %op1 to <8 x i32>
713 %2 = zext <8 x i16> %op2 to <8 x i32>
714 %mul = mul <8 x i32> %1, %2
715 %shr = lshr <8 x i32> %mul, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
716 %res = trunc <8 x i32> %shr to <8 x i16>
717 ret <8 x i16> %res
775 define void @umulh_v64i16(ptr %a, ptr %b) vscale_range(8,0) #0 {
860 %op1 = load <8 x i32>, ptr %a
861 %op2 = load <8 x i32>, ptr %b
862 %insert = insertelement <8 x i64> undef, i64 32, i64 0
863 %splat = shufflevector <8 x i64> %insert, <8 x i64> undef, <8 x i32> zeroinitializer
864 %1 = zext <8 x i32> %op1 to <8 x i64>
865 %2 = zext <8 x i32> %op2 to <8 x i64>
866 %mul = mul <8 x i64> %1, %2
867 %shr = lshr <8 x i64> %mul, <i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32, i64 32>
868 %res = trunc <8 x i64> %shr to <8 x i32>
869 store <8 x i32> %res, ptr %a
877 ; VBITS_GE_256-NEXT: mov x8, #8 // =0x8
908 define void @umulh_v32i32(ptr %a, ptr %b) vscale_range(8,0) #0 {
1028 %op1 = load <8 x i64>, ptr %a
1029 %op2 = load <8 x i64>, ptr %b
1030 %1 = zext <8 x i64> %op1 to <8 x i128>
1031 %2 = zext <8 x i64> %op2 to <8 x i128>
1032 %mul = mul <8 x i128> %1, %2
1033 …%shr = lshr <8 x i128> %mul, <i128 64, i128 64, i128 64, i128 64, i128 64, i128 64, i128 64, i128 …
1034 %res = trunc <8 x i128> %shr to <8 x i64>
1035 store <8 x i64> %res, ptr %a
1039 define void @umulh_v16i64(ptr %a, ptr %b) vscale_range(8,0) #0 {