xref: /llvm-project/llvm/test/Analysis/CostModel/RISCV/int-sat-math.ll (revision eb3f1aec6eff08ce1c76259bb0801f6457a55400)
1; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
2; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -S -mtriple=riscv64 -mattr=+v | FileCheck %s
3
4define void @sadd.sat() {
5; CHECK-LABEL: 'sadd.sat'
6; CHECK-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %1 = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
7; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
8; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
9; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
10; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
11; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
12; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
13; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
14; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
15; CHECK-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %10 = call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
16; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
17; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
18; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
19; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %14 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
20; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
21; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
22; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
23; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
24; CHECK-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %19 = call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
25; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %20 = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
26; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %21 = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
27; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %22 = call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
28; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %23 = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
29; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
30; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %25 = call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
31; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %26 = call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
32; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %27 = call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
33; CHECK-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %28 = call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
34; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %29 = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
35; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %30 = call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
36; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %31 = call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
37; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %32 = call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
38; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %33 = call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
39; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %34 = call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
40; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %35 = call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
41; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
42;
43  call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
44  call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
45  call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
46  call <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
47  call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
48  call <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
49  call <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
50  call <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
51  call <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
52  call i16 @llvm.sadd.sat.i16(i16 undef, i16 undef)
53  call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
54  call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
55  call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
56  call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
57  call <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
58  call <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
59  call <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
60  call <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
61  call i32 @llvm.sadd.sat.i32(i32 undef, i32 undef)
62  call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
63  call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
64  call <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
65  call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
66  call <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
67  call <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
68  call <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
69  call <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
70  call i64 @llvm.sadd.sat.i64(i64 undef, i64 undef)
71  call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
72  call <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
73  call <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
74  call <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
75  call <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
76  call <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
77  call <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
78  ret void
79}
80
81define void @uadd.sat() {
82; CHECK-LABEL: 'uadd.sat'
83; CHECK-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %1 = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
84; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
85; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
86; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
87; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
88; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
89; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
90; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
91; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
92; CHECK-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %10 = call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
93; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
94; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
95; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
96; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %14 = call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
97; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
98; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
99; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
100; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
101; CHECK-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %19 = call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
102; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %20 = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
103; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %21 = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
104; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %22 = call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
105; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %23 = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
106; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
107; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %25 = call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
108; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %26 = call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
109; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %27 = call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
110; CHECK-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %28 = call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
111; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %29 = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
112; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %30 = call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
113; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %31 = call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
114; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %32 = call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
115; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %33 = call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
116; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %34 = call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
117; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %35 = call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
118; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
119;
120  call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
121  call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
122  call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
123  call <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
124  call <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
125  call <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
126  call <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
127  call <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
128  call <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
129  call i16 @llvm.uadd.sat.i16(i16 undef, i16 undef)
130  call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
131  call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
132  call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
133  call <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
134  call <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
135  call <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
136  call <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
137  call <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
138  call i32 @llvm.uadd.sat.i32(i32 undef, i32 undef)
139  call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
140  call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
141  call <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
142  call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
143  call <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
144  call <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
145  call <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
146  call <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
147  call i64 @llvm.uadd.sat.i64(i64 undef, i64 undef)
148  call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
149  call <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
150  call <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
151  call <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
152  call <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
153  call <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
154  call <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
155  ret void
156}
157
158define void @usub.sat() {
159; CHECK-LABEL: 'usub.sat'
160; CHECK-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %1 = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
161; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
162; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
163; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
164; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
165; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
166; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
167; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
168; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
169; CHECK-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %10 = call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
170; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
171; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
172; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
173; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %14 = call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
174; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
175; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
176; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
177; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
178; CHECK-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %19 = call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
179; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %20 = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
180; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %21 = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
181; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %22 = call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
182; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %23 = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
183; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
184; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %25 = call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
185; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %26 = call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
186; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %27 = call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
187; CHECK-NEXT:  Cost Model: Found an estimated cost of 3 for instruction: %28 = call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
188; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %29 = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
189; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %30 = call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
190; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %31 = call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
191; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %32 = call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
192; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %33 = call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
193; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %34 = call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
194; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %35 = call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
195; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
196;
197  call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
198  call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
199  call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
200  call <8 x i8> @llvm.usub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
201  call <16 x i8> @llvm.usub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
202  call <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
203  call <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
204  call <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
205  call <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
206  call i16 @llvm.usub.sat.i16(i16 undef, i16 undef)
207  call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
208  call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
209  call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
210  call <16 x i16> @llvm.usub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
211  call <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
212  call <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
213  call <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
214  call <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
215  call i32 @llvm.usub.sat.i32(i32 undef, i32 undef)
216  call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
217  call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
218  call <8 x i32> @llvm.usub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
219  call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
220  call <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
221  call <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
222  call <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
223  call <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
224  call i64 @llvm.usub.sat.i64(i64 undef, i64 undef)
225  call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
226  call <4 x i64> @llvm.usub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
227  call <8 x i64> @llvm.usub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
228  call <16 x i64> @llvm.usub.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
229  call <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
230  call <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
231  call <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
232  ret void
233}
234
235define void @ssub.sat() {
236; CHECK-LABEL: 'ssub.sat'
237; CHECK-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %1 = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
238; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %2 = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
239; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %3 = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
240; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %4 = call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
241; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %5 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
242; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %6 = call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
243; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %7 = call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
244; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %8 = call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
245; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %9 = call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
246; CHECK-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %10 = call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
247; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %11 = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
248; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %12 = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
249; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %13 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
250; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %14 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
251; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %15 = call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
252; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %16 = call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
253; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %17 = call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
254; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %18 = call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
255; CHECK-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %19 = call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
256; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %20 = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
257; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %21 = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
258; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %22 = call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
259; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %23 = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
260; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %24 = call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
261; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %25 = call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
262; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %26 = call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
263; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %27 = call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
264; CHECK-NEXT:  Cost Model: Found an estimated cost of 7 for instruction: %28 = call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
265; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %29 = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
266; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %30 = call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
267; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %31 = call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
268; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %32 = call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
269; CHECK-NEXT:  Cost Model: Found an estimated cost of 2 for instruction: %33 = call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
270; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %34 = call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
271; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %35 = call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
272; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
273;
274  call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
275  call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
276  call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
277  call <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
278  call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
279  call <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
280  call <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
281  call <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
282  call <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
283  call i16 @llvm.ssub.sat.i16(i16 undef, i16 undef)
284  call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
285  call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
286  call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
287  call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
288  call <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
289  call <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
290  call <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
291  call <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
292  call i32 @llvm.ssub.sat.i32(i32 undef, i32 undef)
293  call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
294  call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
295  call <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
296  call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
297  call <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
298  call <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
299  call <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
300  call <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
301  call i64 @llvm.ssub.sat.i64(i64 undef, i64 undef)
302  call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
303  call <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
304  call <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
305  call <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
306  call <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
307  call <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
308  call <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
309  ret void
310}
311
312define void @ushl.sat() {
313; CHECK-LABEL: 'ushl.sat'
314; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.ushl.sat.i8(i8 undef, i8 undef)
315; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.ushl.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
316; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.ushl.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
317; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.ushl.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
318; CHECK-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
319; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.ushl.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
320; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.ushl.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
321; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.ushl.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
322; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.ushl.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
323; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.ushl.sat.i16(i16 undef, i16 undef)
324; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
325; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.ushl.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
326; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
327; CHECK-NEXT:  Cost Model: Found an estimated cost of 47 for instruction: %14 = call <16 x i16> @llvm.ushl.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
328; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.ushl.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
329; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.ushl.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
330; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.ushl.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
331; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.ushl.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
332; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.ushl.sat.i32(i32 undef, i32 undef)
333; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
334; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
335; CHECK-NEXT:  Cost Model: Found an estimated cost of 23 for instruction: %22 = call <8 x i32> @llvm.ushl.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
336; CHECK-NEXT:  Cost Model: Found an estimated cost of 47 for instruction: %23 = call <16 x i32> @llvm.ushl.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
337; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.ushl.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
338; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.ushl.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
339; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.ushl.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
340; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.ushl.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
341; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.ushl.sat.i64(i64 undef, i64 undef)
342; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
343; CHECK-NEXT:  Cost Model: Found an estimated cost of 11 for instruction: %30 = call <4 x i64> @llvm.ushl.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
344; CHECK-NEXT:  Cost Model: Found an estimated cost of 23 for instruction: %31 = call <8 x i64> @llvm.ushl.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
345; CHECK-NEXT:  Cost Model: Found an estimated cost of 47 for instruction: %32 = call <16 x i64> @llvm.ushl.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
346; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.ushl.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
347; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.ushl.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
348; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.ushl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
349; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
350;
351  call i8 @llvm.ushl.sat.i8(i8 undef, i8 undef)
352  call <2 x i8> @llvm.ushl.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
353  call <4 x i8> @llvm.ushl.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
354  call <8 x i8> @llvm.ushl.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
355  call <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
356  call <vscale x 2 x i8> @llvm.ushl.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
357  call <vscale x 4 x i8> @llvm.ushl.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
358  call <vscale x 8 x i8> @llvm.ushl.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
359  call <vscale x 16 x i8> @llvm.ushl.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
360  call i16 @llvm.ushl.sat.i16(i16 undef, i16 undef)
361  call <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
362  call <4 x i16> @llvm.ushl.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
363  call <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
364  call <16 x i16> @llvm.ushl.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
365  call <vscale x 2 x i16> @llvm.ushl.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
366  call <vscale x 4 x i16> @llvm.ushl.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
367  call <vscale x 8 x i16> @llvm.ushl.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
368  call <vscale x 16 x i16> @llvm.ushl.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
369  call i32 @llvm.ushl.sat.i32(i32 undef, i32 undef)
370  call <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
371  call <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
372  call <8 x i32> @llvm.ushl.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
373  call <16 x i32> @llvm.ushl.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
374  call <vscale x 2 x i32> @llvm.ushl.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
375  call <vscale x 4 x i32> @llvm.ushl.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
376  call <vscale x 8 x i32> @llvm.ushl.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
377  call <vscale x 16 x i32> @llvm.ushl.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
378  call i64 @llvm.ushl.sat.i64(i64 undef, i64 undef)
379  call <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
380  call <4 x i64> @llvm.ushl.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
381  call <8 x i64> @llvm.ushl.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
382  call <16 x i64> @llvm.ushl.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
383  call <vscale x 2 x i64> @llvm.ushl.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
384  call <vscale x 4 x i64> @llvm.ushl.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
385  call <vscale x 8 x i64> @llvm.ushl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
386  ret void
387}
388
389define void @sshl.sat() {
390; CHECK-LABEL: 'sshl.sat'
391; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %1 = call i8 @llvm.sshl.sat.i8(i8 undef, i8 undef)
392; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %2 = call <2 x i8> @llvm.sshl.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
393; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %3 = call <4 x i8> @llvm.sshl.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
394; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %4 = call <8 x i8> @llvm.sshl.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
395; CHECK-NEXT:  Cost Model: Found an estimated cost of 32 for instruction: %5 = call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
396; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %6 = call <vscale x 2 x i8> @llvm.sshl.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
397; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %7 = call <vscale x 4 x i8> @llvm.sshl.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
398; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %8 = call <vscale x 8 x i8> @llvm.sshl.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
399; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %9 = call <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
400; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %10 = call i16 @llvm.sshl.sat.i16(i16 undef, i16 undef)
401; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %11 = call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
402; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %12 = call <4 x i16> @llvm.sshl.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
403; CHECK-NEXT:  Cost Model: Found an estimated cost of 16 for instruction: %13 = call <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
404; CHECK-NEXT:  Cost Model: Found an estimated cost of 47 for instruction: %14 = call <16 x i16> @llvm.sshl.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
405; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %15 = call <vscale x 2 x i16> @llvm.sshl.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
406; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %16 = call <vscale x 4 x i16> @llvm.sshl.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
407; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %17 = call <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
408; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %18 = call <vscale x 16 x i16> @llvm.sshl.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
409; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %19 = call i32 @llvm.sshl.sat.i32(i32 undef, i32 undef)
410; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %20 = call <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
411; CHECK-NEXT:  Cost Model: Found an estimated cost of 8 for instruction: %21 = call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
412; CHECK-NEXT:  Cost Model: Found an estimated cost of 23 for instruction: %22 = call <8 x i32> @llvm.sshl.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
413; CHECK-NEXT:  Cost Model: Found an estimated cost of 47 for instruction: %23 = call <16 x i32> @llvm.sshl.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
414; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %24 = call <vscale x 2 x i32> @llvm.sshl.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
415; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %25 = call <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
416; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %26 = call <vscale x 8 x i32> @llvm.sshl.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
417; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %27 = call <vscale x 16 x i32> @llvm.sshl.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
418; CHECK-NEXT:  Cost Model: Found an estimated cost of 1 for instruction: %28 = call i64 @llvm.sshl.sat.i64(i64 undef, i64 undef)
419; CHECK-NEXT:  Cost Model: Found an estimated cost of 4 for instruction: %29 = call <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
420; CHECK-NEXT:  Cost Model: Found an estimated cost of 11 for instruction: %30 = call <4 x i64> @llvm.sshl.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
421; CHECK-NEXT:  Cost Model: Found an estimated cost of 23 for instruction: %31 = call <8 x i64> @llvm.sshl.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
422; CHECK-NEXT:  Cost Model: Found an estimated cost of 47 for instruction: %32 = call <16 x i64> @llvm.sshl.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
423; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %33 = call <vscale x 2 x i64> @llvm.sshl.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
424; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %34 = call <vscale x 4 x i64> @llvm.sshl.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
425; CHECK-NEXT:  Cost Model: Invalid cost for instruction: %35 = call <vscale x 8 x i64> @llvm.sshl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
426; CHECK-NEXT:  Cost Model: Found an estimated cost of 0 for instruction: ret void
427;
428  call i8 @llvm.sshl.sat.i8(i8 undef, i8 undef)
429  call <2 x i8> @llvm.sshl.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
430  call <4 x i8> @llvm.sshl.sat.v4i8(<4 x i8> undef, <4 x i8> undef)
431  call <8 x i8> @llvm.sshl.sat.v8i8(<8 x i8> undef, <8 x i8> undef)
432  call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> undef, <16 x i8> undef)
433  call <vscale x 2 x i8> @llvm.sshl.sat.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef)
434  call <vscale x 4 x i8> @llvm.sshl.sat.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef)
435  call <vscale x 8 x i8> @llvm.sshl.sat.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef)
436  call <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef)
437  call i16 @llvm.sshl.sat.i16(i16 undef, i16 undef)
438  call <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16> undef, <2 x i16> undef)
439  call <4 x i16> @llvm.sshl.sat.v4i16(<4 x i16> undef, <4 x i16> undef)
440  call <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16> undef, <8 x i16> undef)
441  call <16 x i16> @llvm.sshl.sat.v16i16(<16 x i16> undef, <16 x i16> undef)
442  call <vscale x 2 x i16> @llvm.sshl.sat.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef)
443  call <vscale x 4 x i16> @llvm.sshl.sat.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef)
444  call <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef)
445  call <vscale x 16 x i16> @llvm.sshl.sat.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef)
446  call i32 @llvm.sshl.sat.i32(i32 undef, i32 undef)
447  call <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32> undef, <2 x i32> undef)
448  call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> undef, <4 x i32> undef)
449  call <8 x i32> @llvm.sshl.sat.v8i32(<8 x i32> undef, <8 x i32> undef)
450  call <16 x i32> @llvm.sshl.sat.v16i32(<16 x i32> undef, <16 x i32> undef)
451  call <vscale x 2 x i32> @llvm.sshl.sat.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef)
452  call <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef)
453  call <vscale x 8 x i32> @llvm.sshl.sat.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef)
454  call <vscale x 16 x i32> @llvm.sshl.sat.nxv16i32(<vscale x 16 x i32> undef, <vscale x 16 x i32> undef)
455  call i64 @llvm.sshl.sat.i64(i64 undef, i64 undef)
456  call <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64> undef, <2 x i64> undef)
457  call <4 x i64> @llvm.sshl.sat.v4i64(<4 x i64> undef, <4 x i64> undef)
458  call <8 x i64> @llvm.sshl.sat.v8i64(<8 x i64> undef, <8 x i64> undef)
459  call <16 x i64> @llvm.sshl.sat.v16i64(<16 x i64> undef, <16 x i64> undef)
460  call <vscale x 2 x i64> @llvm.sshl.sat.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef)
461  call <vscale x 4 x i64> @llvm.sshl.sat.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef)
462  call <vscale x 8 x i64> @llvm.sshl.sat.nxv8i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> undef)
463  ret void
464}
465
466declare i8 @llvm.sadd.sat.i8(i8, i8)
467declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>)
468declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>)
469declare <8 x i8> @llvm.sadd.sat.v8i8(<8 x i8>, <8 x i8>)
470declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
471declare <vscale x 2 x i8> @llvm.sadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
472declare <vscale x 4 x i8> @llvm.sadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
473declare <vscale x 8 x i8> @llvm.sadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
474declare <vscale x 16 x i8> @llvm.sadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
475declare i16 @llvm.sadd.sat.i16(i16, i16)
476declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>)
477declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>)
478declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
479declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
480declare <vscale x 2 x i16> @llvm.sadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
481declare <vscale x 4 x i16> @llvm.sadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
482declare <vscale x 8 x i16> @llvm.sadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
483declare <vscale x 16 x i16> @llvm.sadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
484declare i32 @llvm.sadd.sat.i32(i32, i32)
485declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>)
486declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>)
487declare <8 x i32> @llvm.sadd.sat.v8i32(<8 x i32>, <8 x i32>)
488declare <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32>, <16 x i32>)
489declare <vscale x 2 x i32> @llvm.sadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
490declare <vscale x 4 x i32> @llvm.sadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
491declare <vscale x 8 x i32> @llvm.sadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
492declare <vscale x 16 x i32> @llvm.sadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
493declare i64 @llvm.sadd.sat.i64(i64, i64)
494declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>)
495declare <4 x i64> @llvm.sadd.sat.v4i64(<4 x i64>, <4 x i64>)
496declare <8 x i64> @llvm.sadd.sat.v8i64(<8 x i64>, <8 x i64>)
497declare <16 x i64> @llvm.sadd.sat.v16i64(<16 x i64>, <16 x i64>)
498declare <vscale x 2 x i64> @llvm.sadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
499declare <vscale x 4 x i64> @llvm.sadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
500declare <vscale x 8 x i64> @llvm.sadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
501
502declare i8 @llvm.uadd.sat.i8(i8, i8)
503declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>)
504declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>)
505declare <8 x i8> @llvm.uadd.sat.v8i8(<8 x i8>, <8 x i8>)
506declare <16 x i8> @llvm.uadd.sat.v16i8(<16 x i8>, <16 x i8>)
507declare <vscale x 2 x i8> @llvm.uadd.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
508declare <vscale x 4 x i8> @llvm.uadd.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
509declare <vscale x 8 x i8> @llvm.uadd.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
510declare <vscale x 16 x i8> @llvm.uadd.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
511declare i16 @llvm.uadd.sat.i16(i16, i16)
512declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>)
513declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>)
514declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>)
515declare <16 x i16> @llvm.uadd.sat.v16i16(<16 x i16>, <16 x i16>)
516declare <vscale x 2 x i16> @llvm.uadd.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
517declare <vscale x 4 x i16> @llvm.uadd.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
518declare <vscale x 8 x i16> @llvm.uadd.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
519declare <vscale x 16 x i16> @llvm.uadd.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
520declare i32 @llvm.uadd.sat.i32(i32, i32)
521declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>)
522declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>)
523declare <8 x i32> @llvm.uadd.sat.v8i32(<8 x i32>, <8 x i32>)
524declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>)
525declare <vscale x 2 x i32> @llvm.uadd.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
526declare <vscale x 4 x i32> @llvm.uadd.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
527declare <vscale x 8 x i32> @llvm.uadd.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
528declare <vscale x 16 x i32> @llvm.uadd.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
529declare i64 @llvm.uadd.sat.i64(i64, i64)
530declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>)
531declare <4 x i64> @llvm.uadd.sat.v4i64(<4 x i64>, <4 x i64>)
532declare <8 x i64> @llvm.uadd.sat.v8i64(<8 x i64>, <8 x i64>)
533declare <16 x i64> @llvm.uadd.sat.v16i64(<16 x i64>, <16 x i64>)
534declare <vscale x 2 x i64> @llvm.uadd.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
535declare <vscale x 4 x i64> @llvm.uadd.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
536declare <vscale x 8 x i64> @llvm.uadd.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
537
538declare i8 @llvm.usub.sat.i8(i8, i8)
539declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>)
540declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>)
541declare <8 x i8> @llvm.usub.sat.v8i8(<8 x i8>, <8 x i8>)
542declare <16 x i8> @llvm.usub.sat.v16i8(<16 x i8>, <16 x i8>)
543declare <vscale x 2 x i8> @llvm.usub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
544declare <vscale x 4 x i8> @llvm.usub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
545declare <vscale x 8 x i8> @llvm.usub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
546declare <vscale x 16 x i8> @llvm.usub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
547declare i16 @llvm.usub.sat.i16(i16, i16)
548declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>)
549declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>)
550declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>)
551declare <16 x i16> @llvm.usub.sat.v16i16(<16 x i16>, <16 x i16>)
552declare <vscale x 2 x i16> @llvm.usub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
553declare <vscale x 4 x i16> @llvm.usub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
554declare <vscale x 8 x i16> @llvm.usub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
555declare <vscale x 16 x i16> @llvm.usub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
556declare i32 @llvm.usub.sat.i32(i32, i32)
557declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>)
558declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>)
559declare <8 x i32> @llvm.usub.sat.v8i32(<8 x i32>, <8 x i32>)
560declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>)
561declare <vscale x 2 x i32> @llvm.usub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
562declare <vscale x 4 x i32> @llvm.usub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
563declare <vscale x 8 x i32> @llvm.usub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
564declare <vscale x 16 x i32> @llvm.usub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
565declare i64 @llvm.usub.sat.i64(i64, i64)
566declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>)
567declare <4 x i64> @llvm.usub.sat.v4i64(<4 x i64>, <4 x i64>)
568declare <8 x i64> @llvm.usub.sat.v8i64(<8 x i64>, <8 x i64>)
569declare <16 x i64> @llvm.usub.sat.v16i64(<16 x i64>, <16 x i64>)
570declare <vscale x 2 x i64> @llvm.usub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
571declare <vscale x 4 x i64> @llvm.usub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
572declare <vscale x 8 x i64> @llvm.usub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
573
574declare i8 @llvm.ssub.sat.i8(i8, i8)
575declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>)
576declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>)
577declare <8 x i8> @llvm.ssub.sat.v8i8(<8 x i8>, <8 x i8>)
578declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
579declare <vscale x 2 x i8> @llvm.ssub.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
580declare <vscale x 4 x i8> @llvm.ssub.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
581declare <vscale x 8 x i8> @llvm.ssub.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
582declare <vscale x 16 x i8> @llvm.ssub.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
583declare i16 @llvm.ssub.sat.i16(i16, i16)
584declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>)
585declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>)
586declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
587declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
588declare <vscale x 2 x i16> @llvm.ssub.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
589declare <vscale x 4 x i16> @llvm.ssub.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
590declare <vscale x 8 x i16> @llvm.ssub.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
591declare <vscale x 16 x i16> @llvm.ssub.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
592declare i32 @llvm.ssub.sat.i32(i32, i32)
593declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>)
594declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>)
595declare <8 x i32> @llvm.ssub.sat.v8i32(<8 x i32>, <8 x i32>)
596declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>)
597declare <vscale x 2 x i32> @llvm.ssub.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
598declare <vscale x 4 x i32> @llvm.ssub.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
599declare <vscale x 8 x i32> @llvm.ssub.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
600declare <vscale x 16 x i32> @llvm.ssub.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
601declare i64 @llvm.ssub.sat.i64(i64, i64)
602declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>)
603declare <4 x i64> @llvm.ssub.sat.v4i64(<4 x i64>, <4 x i64>)
604declare <8 x i64> @llvm.ssub.sat.v8i64(<8 x i64>, <8 x i64>)
605declare <16 x i64> @llvm.ssub.sat.v16i64(<16 x i64>, <16 x i64>)
606declare <vscale x 2 x i64> @llvm.ssub.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
607declare <vscale x 4 x i64> @llvm.ssub.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
608declare <vscale x 8 x i64> @llvm.ssub.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
609
610declare i8 @llvm.ushl.sat.i8(i8, i8)
611declare <2 x i8> @llvm.ushl.sat.v2i8(<2 x i8>, <2 x i8>)
612declare <4 x i8> @llvm.ushl.sat.v4i8(<4 x i8>, <4 x i8>)
613declare <8 x i8> @llvm.ushl.sat.v8i8(<8 x i8>, <8 x i8>)
614declare <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8>, <16 x i8>)
615declare <vscale x 2 x i8> @llvm.ushl.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
616declare <vscale x 4 x i8> @llvm.ushl.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
617declare <vscale x 8 x i8> @llvm.ushl.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
618declare <vscale x 16 x i8> @llvm.ushl.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
619declare i16 @llvm.ushl.sat.i16(i16, i16)
620declare <2 x i16> @llvm.ushl.sat.v2i16(<2 x i16>, <2 x i16>)
621declare <4 x i16> @llvm.ushl.sat.v4i16(<4 x i16>, <4 x i16>)
622declare <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16>, <8 x i16>)
623declare <16 x i16> @llvm.ushl.sat.v16i16(<16 x i16>, <16 x i16>)
624declare <vscale x 2 x i16> @llvm.ushl.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
625declare <vscale x 4 x i16> @llvm.ushl.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
626declare <vscale x 8 x i16> @llvm.ushl.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
627declare <vscale x 16 x i16> @llvm.ushl.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
628declare i32 @llvm.ushl.sat.i32(i32, i32)
629declare <2 x i32> @llvm.ushl.sat.v2i32(<2 x i32>, <2 x i32>)
630declare <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32>, <4 x i32>)
631declare <8 x i32> @llvm.ushl.sat.v8i32(<8 x i32>, <8 x i32>)
632declare <16 x i32> @llvm.ushl.sat.v16i32(<16 x i32>, <16 x i32>)
633declare <vscale x 2 x i32> @llvm.ushl.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
634declare <vscale x 4 x i32> @llvm.ushl.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
635declare <vscale x 8 x i32> @llvm.ushl.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
636declare <vscale x 16 x i32> @llvm.ushl.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
637declare i64 @llvm.ushl.sat.i64(i64, i64)
638declare <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64>, <2 x i64>)
639declare <4 x i64> @llvm.ushl.sat.v4i64(<4 x i64>, <4 x i64>)
640declare <8 x i64> @llvm.ushl.sat.v8i64(<8 x i64>, <8 x i64>)
641declare <16 x i64> @llvm.ushl.sat.v16i64(<16 x i64>, <16 x i64>)
642declare <vscale x 2 x i64> @llvm.ushl.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
643declare <vscale x 4 x i64> @llvm.ushl.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
644declare <vscale x 8 x i64> @llvm.ushl.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
645
646declare i8 @llvm.sshl.sat.i8(i8, i8)
647declare <2 x i8> @llvm.sshl.sat.v2i8(<2 x i8>, <2 x i8>)
648declare <4 x i8> @llvm.sshl.sat.v4i8(<4 x i8>, <4 x i8>)
649declare <8 x i8> @llvm.sshl.sat.v8i8(<8 x i8>, <8 x i8>)
650declare <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8>, <16 x i8>)
651declare <vscale x 2 x i8> @llvm.sshl.sat.nxv2i8(<vscale x 2 x i8>, <vscale x 2 x i8>)
652declare <vscale x 4 x i8> @llvm.sshl.sat.nxv4i8(<vscale x 4 x i8>, <vscale x 4 x i8>)
653declare <vscale x 8 x i8> @llvm.sshl.sat.nxv8i8(<vscale x 8 x i8>, <vscale x 8 x i8>)
654declare <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i8>)
655declare i16 @llvm.sshl.sat.i16(i16, i16)
656declare <2 x i16> @llvm.sshl.sat.v2i16(<2 x i16>, <2 x i16>)
657declare <4 x i16> @llvm.sshl.sat.v4i16(<4 x i16>, <4 x i16>)
658declare <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16>, <8 x i16>)
659declare <16 x i16> @llvm.sshl.sat.v16i16(<16 x i16>, <16 x i16>)
660declare <vscale x 2 x i16> @llvm.sshl.sat.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i16>)
661declare <vscale x 4 x i16> @llvm.sshl.sat.nxv4i16(<vscale x 4 x i16>, <vscale x 4 x i16>)
662declare <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i16>)
663declare <vscale x 16 x i16> @llvm.sshl.sat.nxv16i16(<vscale x 16 x i16>, <vscale x 16 x i16>)
664declare i32 @llvm.sshl.sat.i32(i32, i32)
665declare <2 x i32> @llvm.sshl.sat.v2i32(<2 x i32>, <2 x i32>)
666declare <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32>, <4 x i32>)
667declare <8 x i32> @llvm.sshl.sat.v8i32(<8 x i32>, <8 x i32>)
668declare <16 x i32> @llvm.sshl.sat.v16i32(<16 x i32>, <16 x i32>)
669declare <vscale x 2 x i32> @llvm.sshl.sat.nxv2i32(<vscale x 2 x i32>, <vscale x 2 x i32>)
670declare <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i32>)
671declare <vscale x 8 x i32> @llvm.sshl.sat.nxv8i32(<vscale x 8 x i32>, <vscale x 8 x i32>)
672declare <vscale x 16 x i32> @llvm.sshl.sat.nxv16i32(<vscale x 16 x i32>, <vscale x 16 x i32>)
673declare i64 @llvm.sshl.sat.i64(i64, i64)
674declare <2 x i64> @llvm.sshl.sat.v2i64(<2 x i64>, <2 x i64>)
675declare <4 x i64> @llvm.sshl.sat.v4i64(<4 x i64>, <4 x i64>)
676declare <8 x i64> @llvm.sshl.sat.v8i64(<8 x i64>, <8 x i64>)
677declare <16 x i64> @llvm.sshl.sat.v16i64(<16 x i64>, <16 x i64>)
678declare <vscale x 2 x i64> @llvm.sshl.sat.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i64>)
679declare <vscale x 4 x i64> @llvm.sshl.sat.nxv4i64(<vscale x 4 x i64>, <vscale x 4 x i64>)
680declare <vscale x 8 x i64> @llvm.sshl.sat.nxv8i64(<vscale x 8 x i64>, <vscale x 8 x i64>)
681