Home
last modified time | relevance | path

Searched full:mul (Results 1 – 25 of 4919) sorted by relevance

12345678910>>...197

/llvm-project/polly/lib/External/isl/imath/tests/
H A Dmul.tc5 mul:0,5000,0:0
6 mul:0,10235,=1:0
7 mul:0,-58382939939929385885,=2:0
10 mul:0,0,0:0
11 mul:100000000000000000,0,=1:0
12 mul:102328632557663,995533253464107,0:101871556492663391138178301941
13 mul:1099511627775,-65839281,=2:-72391055023845629775
14 mul:424417695707616156,78196371461084006246,0:33187923788210071918733291062874510376
15 mul:8785533773682081,509516924811454802,0:4476378151193669633826349848802962
16 mul:3149810015499242805866,671806182669824281823,=1:2116061842647726364810477474851492861573718
[all …]
/llvm-project/llvm/test/Analysis/ScalarEvolution/
H A Dpr18606.ll4 ; CHECK: %mul.lcssa5 = phi i32 [ %a.promoted4, %entry ], [ %mul.30, %for.body3 ]
5 ; CHECK: %mul = mul nsw i32 %mul.lcssa5, %mul.lcssa5
6 ; CHECK: %mul.30 = mul nsw i32 %mul.29, %mul.29
21 %mul.lcssa5 = phi i32 [ %a.promoted4, %entry ], [ %mul.30, %for.body3 ]
26 %mul = mul nsw i32 %mul.lcssa5, %mul.lcssa5
27 %mul.1 = mul nsw i32 %mul, %mul
28 %mul.2 = mul nsw i32 %mul.1, %mul.1
29 %mul.3 = mul nsw i32 %mul.2, %mul.2
30 %mul.4 = mul nsw i32 %mul.3, %mul.3
31 %mul.5 = mul nsw i32 %mul.4, %mul.4
[all …]
H A Dmax-mulops-inline.ll12 ; CHECK1: %mul.1 = mul nsw i32 %mul, %mul
15 ; CHECK10: %mul.1 = mul nsw i32 %mul, %mul
20 %mul = mul nsw i32 %a.promoted, %a.promoted
21 %mul.1 = mul nsw i32 %mul, %mul
22 %mul.2 = mul nsw i32 %mul.1, %mul.1
23 %mul.3 = mul nsw i32 %mul.2, %mul.2
24 %mul.4 = mul nsw i32 %mul.3, %mul.3
25 %mul.5 = mul nsw i32 %mul.4, %mul.4
26 store i32 %mul.5, ptr @a, align 4
H A Dpr18606-min-zeros.ll5 ; CHECK: %37 = mul i32 %36, %36
27 %6 = mul i32 %5, %5
28 %7 = mul i32 %6, %6
29 %8 = mul i32 %7, %7
30 %9 = mul i32 %8, %8
31 %10 = mul i32 %9, %9
32 %11 = mul i32 %10, %10
33 %12 = mul i32 %11, %11
34 %13 = mul i32 %12, %12
35 %14 = mul i32 %13, %13
[all …]
/llvm-project/llvm/test/Transforms/Reassociate/
H A Drepeats.ll50 %tmp1 = mul i8 3, 3
51 %tmp2 = mul i8 %tmp1, 3
52 %tmp3 = mul i8 %tmp2, 3
53 %tmp4 = mul i8 %tmp3, 3
61 ; CHECK-NEXT: [[TMP3:%.*]] = mul i3 [[X]], [[X]]
62 ; CHECK-NEXT: [[TMP4:%.*]] = mul i3 [[TMP3]], [[X]]
63 ; CHECK-NEXT: [[TMP5:%.*]] = mul i3 [[TMP4]], [[TMP3]]
66 %tmp1 = mul i3 %x, %x
67 %tmp2 = mul i3 %tmp1, %x
68 %tmp3 = mul i3 %tmp2, %x
[all …]
H A Dmulfactor.ll6 ; CHECK-NEXT: [[T2:%.*]] = mul i32 [[A:%.*]], [[A]]
7 ; CHECK-NEXT: [[T6:%.*]] = mul i32 [[A]], 2
9 ; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i32 [[REASS_ADD]], [[B]]
13 %t2 = mul i32 %a, %a
15 %t6 = mul i32 %t5, %b
16 %t8 = mul i32 %b, %b
24 ; CHECK-NEXT: [[REASS_MUL:%.*]] = mul i32 [[T:%.*]], 42
28 %a = mul i32 %t, 6
29 %b = mul i32 %t, 36
38 ; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[X:%.*]], [[X]]
[all …]
H A Dmightymul.ll5 %t0 = mul i32 %x, %x
6 %t1 = mul i32 %t0, %t0
7 %t2 = mul i32 %t1, %t1
8 %t3 = mul i32 %t2, %t2
9 %t4 = mul i32 %t3, %t3
10 %t5 = mul i32 %t4, %t4
11 %t6 = mul i32 %t5, %t5
12 %t7 = mul i32 %t6, %t6
13 %t8 = mul i32 %t7, %t7
14 %t9 = mul i32 %t8, %t8
[all …]
/llvm-project/llvm/test/Transforms/InstCombine/
H A Dvector-mul.ll13 %mul = mul <4 x i8> %InVec, <i8 0, i8 0, i8 0, i8 0>
14 ret <4 x i8> %mul
23 %mul = mul <4 x i8> %InVec, <i8 1, i8 1, i8 1, i8 1>
24 ret <4 x i8> %mul
30 ; CHECK-NEXT: [[MUL:%.*]] = shl <4 x i8> [[INVEC:%.*]], splat (i8 1)
31 ; CHECK-NEXT: ret <4 x i8> [[MUL]]
34 %mul = mul <
[all...]
H A Dmul.ll12 %B = mul i32 %A, 2
21 %B = mul <2 x i32> %A, <i32 8, i32 8>
30 %B = mul i8 %A, 8
31 %C = mul i8 %B, 8
40 %t = mul i32 %i, -1
55 %e = mul i32 %d, %b
67 %e = mul i32 %d, %b
83 %e = mul i32 %d, %b
95 %m = mul i32 %shl, %a
105 %D = mul ns
[all...]
H A Dicmp-mul-and.ll12 %mul = mul i8 %x, 44
13 %and = and i8 %mul, 4
18 ; TODO: Demanded bits does not convert the mul to shift,
23 ; CHECK-NEXT: [[MUL:%.*]] = mul i8 [[X:%.*]], 40
24 ; CHECK-NEXT: call void @use(i8 [[MUL]])
25 ; CHECK-NEXT: [[AND:%.*]] = and i8 [[MUL]], 8
29 %mul = mul i
[all...]
/llvm-project/llvm/test/Transforms/SLPVectorizer/X86/
H A Dmalformed_phis.ll15 ; CHECK-NEXT: [[TMP3:%.*]] = mul i32 4, [[TMP]]
16 ; CHECK-NEXT: [[TMP4:%.*]] = mul i32 [[TMP3]], [[TMP]]
17 ; CHECK-NEXT: [[TMP5:%.*]] = mul i32 [[TMP4]], [[TMP]]
18 ; CHECK-NEXT: [[TMP6:%.*]] = mul i32 [[TMP5]], [[TMP]]
19 ; CHECK-NEXT: [[TMP7:%.*]] = mul i32 [[TMP6]], [[TMP]]
20 ; CHECK-NEXT: [[TMP8:%.*]] = mul i32 [[TMP7]], [[TMP]]
21 ; CHECK-NEXT: [[TMP9:%.*]] = mul i32 [[TMP8]], [[TMP]]
22 ; CHECK-NEXT: [[TMP10:%.*]] = mul i32 [[TMP9]], [[TMP]]
23 ; CHECK-NEXT: [[TMP11:%.*]] = mul i32 [[TMP10]], [[TMP]]
24 ; CHECK-NEXT: [[TMP12:%.*]] = mul i3
[all...]
H A Darith-mul-load.ll19 ; CHECK-NEXT: [[TMP2:%.*]] = mul <4 x i8> [[TMP1]], [[TMP0]]
26 %mul = mul i8 %1, %0
27 store i8 %mul, ptr %r, align 1
32 %mul.1 = mul i8 %3, %2
33 store i8 %mul.1, ptr %arrayidx2.1, align 1
38 %mul.2 = mul i8 %5, %4
39 store i8 %mul.2, ptr %arrayidx2.2, align 1
44 %mul.3 = mul i8 %7, %6
45 store i8 %mul.3, ptr %arrayidx2.3, align 1
54 ; CHECK-NEXT: [[TMP2:%.*]] = mul <8 x i8> [[TMP1]], [[TMP0]]
[all …]
H A Dminbitwidth-transformed-operand.ll13 ; CHECK-NEXT: [[TMP6:%.*]] = mul <16 x i1> [[TMP5]], zeroinitializer
24 %mul.i.1.i = mul i32 %conv12.1.i, 0
26 %mul.i.i = mul i32 %conv12.i, 0
27 %conv14104.i = or i32 %mul.i.1.i, %mul.i.i
29 %mul.i.2.i = mul i32 %conv12.2.i, 0
30 %0 = or i32 %conv14104.i, %mul
[all...]
/llvm-project/llvm/test/CodeGen/AArch64/
H A Dsve-tailcall.ll13 ; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill
17 ; CHECK-NEXT: ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
32 ; CHECK-NEXT: str z9, [sp, #1, mul vl] // 16-byte Folded Spill
36 ; CHECK-NEXT: ldr z9, [sp, #1, mul vl] // 16-byte Folded Reload
55 ; CHECK-NEXT: str p15, [sp, #4, mul vl] // 2-byte Folded Spill
56 ; CHECK-NEXT: str p14, [sp, #5, mul vl] // 2-byte Folded Spill
57 ; CHECK-NEXT: str p13, [sp, #6, mul vl] // 2-byte Folded Spill
58 ; CHECK-NEXT: str p12, [sp, #7, mul vl] // 2-byte Folded Spill
59 ; CHECK-NEXT: str p11, [sp, #8, mul vl] // 2-byte Folded Spill
60 ; CHECK-NEXT: str p10, [sp, #9, mul vl] // 2-byte Folded Spill
[all …]
H A Dsme2-intrinsics-ld1.ll10 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill
11 ; CHECK-NEXT: str z23, [sp, #1, mul vl] // 16-byte Folded Spill
12 ; CHECK-NEXT: str z22, [sp, #2, mul vl] // 16-byte Folded Spill
13 ; CHECK-NEXT: str z21, [sp, #3, mul vl] // 16-byte Folded Spill
14 ; CHECK-NEXT: str z20, [sp, #4, mul vl] // 16-byte Folded Spill
15 ; CHECK-NEXT: str z19, [sp, #5, mul vl] // 16-byte Folded Spill
16 ; CHECK-NEXT: str z18, [sp, #6, mul vl] // 16-byte Folded Spill
17 ; CHECK-NEXT: str z17, [sp, #7, mul vl] // 16-byte Folded Spill
18 ; CHECK-NEXT: str z16, [sp, #8, mul vl] // 16-byte Folded Spill
19 ; CHECK-NEXT: str z15, [sp, #9, mul v
[all...]
H A Dmul_pow2.ll5 ; Convert mul x, pow2 to shift.
6 ; Convert mul x, pow2 +/- 1 to shift + add/sub.
7 ; Convert mul x, (pow2 + 1) * pow2 to shift + add + shift.
21 %mul = shl nsw i32 %x, 1
22 ret i32 %mul
36 %mul = mul nsw i32 %x, 3
37 ret i32 %mul
51 %mul = shl nsw i32 %x, 2
52 ret i32 %mul
67 %mul = mul nsw i32 %x, 5
[all …]
H A Daarch64-mull-masks.ll15 ; CHECK-GI-NEXT: mul x0, x9, x8
20 %mul = mul nuw i64 %and1, %and
21 ret i64 %mul
34 ; CHECK-GI-NEXT: mul x0, x8, x9
39 %mul = mul nuw nsw i64 %and, %conv
40 ret i64 %mul
53 ; CHECK-GI-NEXT: mul x0, x9, x8
58 %mul
[all...]
H A Daarch64-mulv.ll7 declare i8 @llvm.vector.reduce.mul.v2i8(<2 x i8>)
8 declare i8 @llvm.vector.reduce.mul.v3i8(<3 x i8>)
9 declare i8 @llvm.vector.reduce.mul.v4i8(<4 x i8>)
10 declare i8 @llvm.vector.reduce.mul.v8i8(<8 x i8>)
11 declare i8 @llvm.vector.reduce.mul.v16i8(<16 x i8>)
12 declare i8 @llvm.vector.reduce.mul.v32i8(<32 x i8>)
13 declare i16 @llvm.vector.reduce.mul.v2i16(<2 x i16>)
14 declare i16 @llvm.vector.reduce.mul.v3i16(<3 x i16>)
15 declare i16 @llvm.vector.reduce.mul.v4i16(<4 x i16>)
16 declare i16 @llvm.vector.reduce.mul.v8i16(<8 x i16>)
[all …]
H A Daarch64-fix-cortex-a53-835769.ll26 %mul = mul nsw i64 %0, %b
27 %add = add nsw i64 %mul, %a
45 %mul = mul nsw i32 %0, %b
46 %add = add nsw i32 %mul, %a
60 %mul = mul nsw i64 %0, %b
61 %sub = sub nsw i64 %a, %mul
76 %mul = mul nsw i32 %0, %b
77 %sub = sub nsw i32 %a, %mul
91 %mul = mul nsw i64 %0, %b
92 ret i64 %mul
[all …]
/llvm-project/llvm/test/CodeGen/SystemZ/
H A Dint-mul-06.ll10 %mul = mul i64 %a, 2
11 ret i64 %mul
19 %mul = mul i64 %a, 3
20 ret i64 %mul
28 %mul = mul i64 %a, 32767
29 ret i64 %mul
37 %mul = mul i64 %a, 32768
38 ret i64 %mul
46 %mul = mul i64 %a, 32769
47 ret i64 %mul
[all …]
H A Dint-mul-05.ll10 %mul = mul i32 %a, 2
11 ret i32 %mul
19 %mul = mul i32 %a, 3
20 ret i32 %mul
28 %mul = mul i32 %a, 32767
29 ret i32 %mul
37 %mul = mul i32 %a, 32768
38 ret i32 %mul
46 %mul = mul i32 %a, 32769
47 ret i32 %mul
[all …]
/llvm-project/llvm/test/Transforms/ConstraintElimination/
H A Dmul.ll9 ; CHECK-NEXT: [[START_MUL_4:%.*]] = mul nuw i8 [[START:%.*]], 4
15 %start.mul.4 = mul nuw i8 %start, 4
16 %c.1 = icmp ult i8 %start.mul.4, %high
26 ; CHECK-NEXT: [[START_MUL_4:%.*]] = mul nuw i8 [[START:%.*]], 4
29 ; CHECK-NEXT: [[START_MUL_2:%.*]] = mul nuw i8 [[START]], 2
33 %start.mul.4 = mul nuw i8 %start, 4
34 %c.1 = icmp ult i8 %start.mul.4, %high
37 %start.mul.2 = mul nuw i8 %start, 2
38 %t = icmp ult i8 %start.mul.2, %high
45 ; CHECK-NEXT: [[START_MUL_4:%.*]] = mul nuw i8 [[START:%.*]], 4
[all …]
/llvm-project/llvm/test/Analysis/CostModel/X86/
H A Dreduce-mul.ll13 …an estimated cost of 0 for instruction: %V1 = call i64 @llvm.vector.reduce.mul.v1i64(<1 x i64> und…
14 …an estimated cost of 9 for instruction: %V2 = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> und…
15 …n estimated cost of 16 for instruction: %V4 = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> und…
16 …n estimated cost of 30 for instruction: %V8 = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> und…
17 … estimated cost of 58 for instruction: %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> u…
21 …an estimated cost of 0 for instruction: %V1 = call i64 @llvm.vector.reduce.mul.v1i64(<1 x i64> und…
22 …an estimated cost of 9 for instruction: %V2 = call i64 @llvm.vector.reduce.mul.v2i64(<2 x i64> und…
23 …n estimated cost of 16 for instruction: %V4 = call i64 @llvm.vector.reduce.mul.v4i64(<4 x i64> und…
24 …n estimated cost of 30 for instruction: %V8 = call i64 @llvm.vector.reduce.mul.v8i64(<8 x i64> und…
25 … estimated cost of 58 for instruction: %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> u…
[all …]
/llvm-project/llvm/test/Transforms/CorrelatedValuePropagation/
H A Dmul.ll10 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw nsw i8 [[A]], 50
11 ; CHECK-NEXT: ret i8 [[MUL]]
20 %mul = mul i8 %a, 50
21 ret i8 %mul
33 ; CHECK-NEXT: [[MUL:%.*]] = mul nuw i8 [[A]], 50
34 ; CHECK-NEXT: ret i8 [[MUL]]
43 %mul
[all...]
/llvm-project/llvm/test/Transforms/IndVarSimplify/X86/
H A Dhuge_muls.ll31 %tmp7 = mul i32 %tmp6, %tmp1
37 %tmp8 = mul i32 %tmp7, %local_2_24.us.postloop
38 %tmp9 = mul i32 %tmp8, %local_2_24.us.postloop
39 %tmp10 = mul i32 %tmp7, %tmp9
40 %tmp11 = mul i32 %tmp10, %tmp9
41 %tmp12 = mul i32 %tmp7, %tmp11
42 %tmp13 = mul i32 %tmp12, %tmp11
43 %tmp14 = mul i32 %tmp7, %tmp13
44 %tmp15 = mul i32 %tmp14, %tmp13
45 %tmp16 = mul i32 %tmp7, %tmp15
[all …]

12345678910>>...197