1 #include "MCTargetDesc/X86MCTargetDesc.h"
2 #include "Views/SummaryView.h"
3 #include "X86TestBase.h"
4 #include "llvm/ADT/SmallPtrSet.h"
5 #include "llvm/MC/MCInstBuilder.h"
6 #include "llvm/MCA/CustomBehaviour.h"
7 #include "llvm/MCA/IncrementalSourceMgr.h"
8 #include "llvm/MCA/InstrBuilder.h"
9 #include "llvm/MCA/Pipeline.h"
10 #include "llvm/Support/Format.h"
11 #include "llvm/Support/JSON.h"
12 #include "llvm/Support/raw_ostream.h"
13 #include <memory>
14 #include <unordered_map>
15
16 using namespace llvm;
17 using namespace mca;
18
TEST_F(X86TestBase,TestResumablePipeline)19 TEST_F(X86TestBase, TestResumablePipeline) {
20 mca::Context MCA(*MRI, *STI);
21
22 mca::IncrementalSourceMgr ISM;
23 // Empty CustomBehaviour.
24 auto CB = std::make_unique<mca::CustomBehaviour>(*STI, ISM, *MCII);
25
26 auto PO = getDefaultPipelineOptions();
27 auto P = MCA.createDefaultPipeline(PO, ISM, *CB);
28 ASSERT_TRUE(P);
29
30 SmallVector<MCInst> MCIs;
31 getSimpleInsts(MCIs, /*Repeats=*/100);
32
33 // Add views.
34 auto SV = std::make_unique<SummaryView>(STI->getSchedModel(), MCIs,
35 PO.DispatchWidth);
36 P->addEventListener(SV.get());
37
38 auto IM = std::make_unique<mca::InstrumentManager>(*STI, *MCII);
39 mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM, /*CallLatency=*/100);
40
41 const SmallVector<mca::Instrument *> Instruments;
42 // Tile size = 7
43 for (unsigned i = 0U, E = MCIs.size(); i < E;) {
44 for (unsigned TE = i + 7; i < TE && i < E; ++i) {
45 Expected<std::unique_ptr<mca::Instruction>> InstOrErr =
46 IB.createInstruction(MCIs[i], Instruments);
47 ASSERT_TRUE(bool(InstOrErr));
48 ISM.addInst(std::move(InstOrErr.get()));
49 }
50
51 // Run the pipeline.
52 Expected<unsigned> Cycles = P->run();
53 if (!Cycles) {
54 // Should be a stream pause error.
55 ASSERT_TRUE(Cycles.errorIsA<mca::InstStreamPause>());
56 llvm::consumeError(Cycles.takeError());
57 }
58 }
59
60 ISM.endOfStream();
61 // Has to terminate properly.
62 Expected<unsigned> Cycles = P->run();
63 ASSERT_TRUE(bool(Cycles));
64
65 json::Value Result = SV->toJSON();
66 auto *ResultObj = Result.getAsObject();
67 ASSERT_TRUE(ResultObj);
68
69 // Run the baseline.
70 json::Object BaselineResult;
71 auto E = runBaselineMCA(BaselineResult, MCIs);
72 ASSERT_FALSE(bool(E)) << "Failed to run baseline";
73 auto *BaselineObj = BaselineResult.getObject(SV->getNameAsString());
74 ASSERT_TRUE(BaselineObj) << "Does not contain SummaryView result";
75
76 // Compare the results.
77 constexpr const char *Fields[] = {"Instructions", "TotalCycles", "TotaluOps",
78 "BlockRThroughput"};
79 for (const auto *F : Fields) {
80 auto V = ResultObj->getInteger(F);
81 auto BV = BaselineObj->getInteger(F);
82 ASSERT_TRUE(V && BV);
83 ASSERT_EQ(*BV, *V) << "Value of '" << F << "' does not match";
84 }
85 }
86
TEST_F(X86TestBase,TestInstructionRecycling)87 TEST_F(X86TestBase, TestInstructionRecycling) {
88 mca::Context MCA(*MRI, *STI);
89
90 std::unordered_map<const mca::InstrDesc *, SmallPtrSet<mca::Instruction *, 2>>
91 RecycledInsts;
92 auto GetRecycledInst = [&](const mca::InstrDesc &Desc) -> mca::Instruction * {
93 auto It = RecycledInsts.find(&Desc);
94 if (It != RecycledInsts.end()) {
95 auto &Insts = It->second;
96 if (Insts.size()) {
97 mca::Instruction *I = *Insts.begin();
98 Insts.erase(I);
99 return I;
100 }
101 }
102 return nullptr;
103 };
104 auto AddRecycledInst = [&](mca::Instruction *I) {
105 const mca::InstrDesc &D = I->getDesc();
106 RecycledInsts[&D].insert(I);
107 };
108
109 mca::IncrementalSourceMgr ISM;
110 ISM.setOnInstFreedCallback(AddRecycledInst);
111
112 // Empty CustomBehaviour.
113 auto CB = std::make_unique<mca::CustomBehaviour>(*STI, ISM, *MCII);
114
115 auto PO = getDefaultPipelineOptions();
116 auto P = MCA.createDefaultPipeline(PO, ISM, *CB);
117 ASSERT_TRUE(P);
118
119 SmallVector<MCInst> MCIs;
120 getSimpleInsts(MCIs, /*Repeats=*/100);
121
122 // Add views.
123 auto SV = std::make_unique<SummaryView>(STI->getSchedModel(), MCIs,
124 PO.DispatchWidth);
125 P->addEventListener(SV.get());
126
127 // Default InstrumentManager
128 auto IM = std::make_unique<mca::InstrumentManager>(*STI, *MCII);
129
130 mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM, /*CallLatency=*/100);
131 IB.setInstRecycleCallback(GetRecycledInst);
132
133 const SmallVector<mca::Instrument *> Instruments;
134 // Tile size = 7
135 for (unsigned i = 0U, E = MCIs.size(); i < E;) {
136 for (unsigned TE = i + 7; i < TE && i < E; ++i) {
137 Expected<std::unique_ptr<mca::Instruction>> InstOrErr =
138 IB.createInstruction(MCIs[i], Instruments);
139
140 if (!InstOrErr) {
141 mca::Instruction *RecycledInst = nullptr;
142 // Check if the returned instruction is a recycled
143 // one.
144 auto RemainingE = handleErrors(InstOrErr.takeError(),
145 [&](const mca::RecycledInstErr &RC) {
146 RecycledInst = RC.getInst();
147 });
148 ASSERT_FALSE(bool(RemainingE));
149 ASSERT_TRUE(RecycledInst);
150 ISM.addRecycledInst(RecycledInst);
151 } else {
152 ISM.addInst(std::move(InstOrErr.get()));
153 }
154 }
155
156 // Run the pipeline.
157 Expected<unsigned> Cycles = P->run();
158 if (!Cycles) {
159 // Should be a stream pause error.
160 ASSERT_TRUE(Cycles.errorIsA<mca::InstStreamPause>());
161 llvm::consumeError(Cycles.takeError());
162 }
163 }
164
165 ISM.endOfStream();
166 // Has to terminate properly.
167 Expected<unsigned> Cycles = P->run();
168 ASSERT_TRUE(bool(Cycles));
169
170 json::Value Result = SV->toJSON();
171 auto *ResultObj = Result.getAsObject();
172 ASSERT_TRUE(ResultObj);
173
174 // Run the baseline.
175 json::Object BaselineResult;
176 auto E = runBaselineMCA(BaselineResult, MCIs);
177 ASSERT_FALSE(bool(E)) << "Failed to run baseline";
178 auto *BaselineObj = BaselineResult.getObject(SV->getNameAsString());
179 ASSERT_TRUE(BaselineObj) << "Does not contain SummaryView result";
180
181 // Compare the results.
182 constexpr const char *Fields[] = {"Instructions", "TotalCycles", "TotaluOps",
183 "BlockRThroughput"};
184 for (const auto *F : Fields) {
185 auto V = ResultObj->getInteger(F);
186 auto BV = BaselineObj->getInteger(F);
187 ASSERT_TRUE(V && BV);
188 ASSERT_EQ(*BV, *V) << "Value of '" << F << "' does not match";
189 }
190 }
191
192 // Test that we do not depend upon the MCInst address for variant description
193 // construction. This test creates two instructions that will use variant
194 // description as they are both zeroing idioms, but write to different
195 // registers. If the key used to access the variant instruction description is
196 // the same between the descriptions (like the MCInst pointer), we will run into
197 // an assertion failure due to the different writes.
TEST_F(X86TestBase,TestVariantInstructionsSameAddress)198 TEST_F(X86TestBase, TestVariantInstructionsSameAddress) {
199 mca::Context MCA(*MRI, *STI);
200
201 mca::IncrementalSourceMgr ISM;
202 // Empty CustomBehaviour.
203 auto CB = std::make_unique<mca::CustomBehaviour>(*STI, ISM, *MCII);
204
205 auto PO = getDefaultPipelineOptions();
206 auto P = MCA.createDefaultPipeline(PO, ISM, *CB);
207 ASSERT_TRUE(P);
208
209 auto IM = std::make_unique<mca::InstrumentManager>(*STI, *MCII);
210 mca::InstrBuilder IB(*STI, *MCII, *MRI, MCIA.get(), *IM, 100);
211
212 const SmallVector<mca::Instrument *> Instruments;
213
214 MCInst InstructionToAdd;
215 InstructionToAdd = MCInstBuilder(X86::XOR64rr)
216 .addReg(X86::RAX)
217 .addReg(X86::RAX)
218 .addReg(X86::RAX);
219 Expected<std::unique_ptr<mca::Instruction>> Instruction1OrErr =
220 IB.createInstruction(InstructionToAdd, Instruments);
221 ASSERT_TRUE(static_cast<bool>(Instruction1OrErr));
222 ISM.addInst(std::move(Instruction1OrErr.get()));
223
224 InstructionToAdd = MCInstBuilder(X86::XORPSrr)
225 .addReg(X86::XMM0)
226 .addReg(X86::XMM0)
227 .addReg(X86::XMM0);
228 Expected<std::unique_ptr<mca::Instruction>> Instruction2OrErr =
229 IB.createInstruction(InstructionToAdd, Instruments);
230 ASSERT_TRUE(static_cast<bool>(Instruction2OrErr));
231 ISM.addInst(std::move(Instruction2OrErr.get()));
232
233 ISM.endOfStream();
234 Expected<unsigned> Cycles = P->run();
235 ASSERT_TRUE(static_cast<bool>(Cycles));
236 }
237