1 //=- AArch64/AArch64MCCodeEmitter.cpp - Convert AArch64 code to machine code-=//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the AArch64MCCodeEmitter class.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "MCTargetDesc/AArch64AddressingModes.h"
14 #include "MCTargetDesc/AArch64FixupKinds.h"
15 #include "MCTargetDesc/AArch64MCExpr.h"
16 #include "Utils/AArch64BaseInfo.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/MC/MCCodeEmitter.h"
20 #include "llvm/MC/MCContext.h"
21 #include "llvm/MC/MCFixup.h"
22 #include "llvm/MC/MCInst.h"
23 #include "llvm/MC/MCInstrInfo.h"
24 #include "llvm/MC/MCRegisterInfo.h"
25 #include "llvm/MC/MCSubtargetInfo.h"
26 #include "llvm/Support/Casting.h"
27 #include "llvm/Support/Endian.h"
28 #include "llvm/Support/EndianStream.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include <cassert>
32 #include <cstdint>
33
34 using namespace llvm;
35
36 #define DEBUG_TYPE "mccodeemitter"
37
38 STATISTIC(MCNumEmitted, "Number of MC instructions emitted.");
39 STATISTIC(MCNumFixups, "Number of MC fixups created.");
40
41 namespace {
42
43 class AArch64MCCodeEmitter : public MCCodeEmitter {
44 MCContext &Ctx;
45 const MCInstrInfo &MCII;
46
47 public:
AArch64MCCodeEmitter(const MCInstrInfo & mcii,MCContext & ctx)48 AArch64MCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
49 : Ctx(ctx), MCII(mcii) {}
50 AArch64MCCodeEmitter(const AArch64MCCodeEmitter &) = delete;
51 void operator=(const AArch64MCCodeEmitter &) = delete;
52 ~AArch64MCCodeEmitter() override = default;
53
54 // getBinaryCodeForInstr - TableGen'erated function for getting the
55 // binary encoding for an instruction.
56 uint64_t getBinaryCodeForInstr(const MCInst &MI,
57 SmallVectorImpl<MCFixup> &Fixups,
58 const MCSubtargetInfo &STI) const;
59
60 /// getMachineOpValue - Return binary encoding of operand. If the machine
61 /// operand requires relocation, record the relocation and return zero.
62 unsigned getMachineOpValue(const MCInst &MI, const MCOperand &MO,
63 SmallVectorImpl<MCFixup> &Fixups,
64 const MCSubtargetInfo &STI) const;
65
66 /// getLdStUImm12OpValue - Return encoding info for 12-bit unsigned immediate
67 /// attached to a load, store or prfm instruction. If operand requires a
68 /// relocation, record it and return zero in that part of the encoding.
69 template <uint32_t FixupKind>
70 uint32_t getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
71 SmallVectorImpl<MCFixup> &Fixups,
72 const MCSubtargetInfo &STI) const;
73
74 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
75 /// target.
76 uint32_t getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
77 SmallVectorImpl<MCFixup> &Fixups,
78 const MCSubtargetInfo &STI) const;
79
80 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
81 /// the 2-bit shift field.
82 uint32_t getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
83 SmallVectorImpl<MCFixup> &Fixups,
84 const MCSubtargetInfo &STI) const;
85
86 /// getCondBranchTargetOpValue - Return the encoded value for a conditional
87 /// branch target.
88 uint32_t getCondBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
89 SmallVectorImpl<MCFixup> &Fixups,
90 const MCSubtargetInfo &STI) const;
91
92 /// getLoadLiteralOpValue - Return the encoded value for a load-literal
93 /// pc-relative address.
94 uint32_t getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
95 SmallVectorImpl<MCFixup> &Fixups,
96 const MCSubtargetInfo &STI) const;
97
98 /// getMemExtendOpValue - Return the encoded value for a reg-extend load/store
99 /// instruction: bit 0 is whether a shift is present, bit 1 is whether the
100 /// operation is a sign extend (as opposed to a zero extend).
101 uint32_t getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
102 SmallVectorImpl<MCFixup> &Fixups,
103 const MCSubtargetInfo &STI) const;
104
105 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
106 /// branch target.
107 uint32_t getTestBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
108 SmallVectorImpl<MCFixup> &Fixups,
109 const MCSubtargetInfo &STI) const;
110
111 /// getBranchTargetOpValue - Return the encoded value for an unconditional
112 /// branch target.
113 uint32_t getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
114 SmallVectorImpl<MCFixup> &Fixups,
115 const MCSubtargetInfo &STI) const;
116
117 /// getMoveWideImmOpValue - Return the encoded value for the immediate operand
118 /// of a MOVZ or MOVK instruction.
119 uint32_t getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
120 SmallVectorImpl<MCFixup> &Fixups,
121 const MCSubtargetInfo &STI) const;
122
123 /// getVecShifterOpValue - Return the encoded value for the vector shifter.
124 uint32_t getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
125 SmallVectorImpl<MCFixup> &Fixups,
126 const MCSubtargetInfo &STI) const;
127
128 /// getMoveVecShifterOpValue - Return the encoded value for the vector move
129 /// shifter (MSL).
130 uint32_t getMoveVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
131 SmallVectorImpl<MCFixup> &Fixups,
132 const MCSubtargetInfo &STI) const;
133
134 /// getFixedPointScaleOpValue - Return the encoded value for the
135 // FP-to-fixed-point scale factor.
136 uint32_t getFixedPointScaleOpValue(const MCInst &MI, unsigned OpIdx,
137 SmallVectorImpl<MCFixup> &Fixups,
138 const MCSubtargetInfo &STI) const;
139
140 uint32_t getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
141 SmallVectorImpl<MCFixup> &Fixups,
142 const MCSubtargetInfo &STI) const;
143 uint32_t getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
144 SmallVectorImpl<MCFixup> &Fixups,
145 const MCSubtargetInfo &STI) const;
146 uint32_t getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
147 SmallVectorImpl<MCFixup> &Fixups,
148 const MCSubtargetInfo &STI) const;
149 uint32_t getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
150 SmallVectorImpl<MCFixup> &Fixups,
151 const MCSubtargetInfo &STI) const;
152 uint32_t getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
153 SmallVectorImpl<MCFixup> &Fixups,
154 const MCSubtargetInfo &STI) const;
155 uint32_t getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
156 SmallVectorImpl<MCFixup> &Fixups,
157 const MCSubtargetInfo &STI) const;
158 uint32_t getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
159 SmallVectorImpl<MCFixup> &Fixups,
160 const MCSubtargetInfo &STI) const;
161 uint32_t getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
162 SmallVectorImpl<MCFixup> &Fixups,
163 const MCSubtargetInfo &STI) const;
164
165 uint32_t getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
166 SmallVectorImpl<MCFixup> &Fixups,
167 const MCSubtargetInfo &STI) const;
168 uint32_t getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
169 SmallVectorImpl<MCFixup> &Fixups,
170 const MCSubtargetInfo &STI) const;
171
172 unsigned fixMOVZ(const MCInst &MI, unsigned EncodedValue,
173 const MCSubtargetInfo &STI) const;
174
175 void encodeInstruction(const MCInst &MI, raw_ostream &OS,
176 SmallVectorImpl<MCFixup> &Fixups,
177 const MCSubtargetInfo &STI) const override;
178
179 unsigned fixMulHigh(const MCInst &MI, unsigned EncodedValue,
180 const MCSubtargetInfo &STI) const;
181
182 template<int hasRs, int hasRt2> unsigned
183 fixLoadStoreExclusive(const MCInst &MI, unsigned EncodedValue,
184 const MCSubtargetInfo &STI) const;
185
186 unsigned fixOneOperandFPComparison(const MCInst &MI, unsigned EncodedValue,
187 const MCSubtargetInfo &STI) const;
188
189 private:
190 FeatureBitset computeAvailableFeatures(const FeatureBitset &FB) const;
191 void
192 verifyInstructionPredicates(const MCInst &MI,
193 const FeatureBitset &AvailableFeatures) const;
194 };
195
196 } // end anonymous namespace
197
198 /// getMachineOpValue - Return binary encoding of operand. If the machine
199 /// operand requires relocation, record the relocation and return zero.
200 unsigned
getMachineOpValue(const MCInst & MI,const MCOperand & MO,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const201 AArch64MCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &MO,
202 SmallVectorImpl<MCFixup> &Fixups,
203 const MCSubtargetInfo &STI) const {
204 if (MO.isReg())
205 return Ctx.getRegisterInfo()->getEncodingValue(MO.getReg());
206
207 assert(MO.isImm() && "did not expect relocated expression");
208 return static_cast<unsigned>(MO.getImm());
209 }
210
211 template<unsigned FixupKind> uint32_t
getLdStUImm12OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const212 AArch64MCCodeEmitter::getLdStUImm12OpValue(const MCInst &MI, unsigned OpIdx,
213 SmallVectorImpl<MCFixup> &Fixups,
214 const MCSubtargetInfo &STI) const {
215 const MCOperand &MO = MI.getOperand(OpIdx);
216 uint32_t ImmVal = 0;
217
218 if (MO.isImm())
219 ImmVal = static_cast<uint32_t>(MO.getImm());
220 else {
221 assert(MO.isExpr() && "unable to encode load/store imm operand");
222 MCFixupKind Kind = MCFixupKind(FixupKind);
223 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
224 ++MCNumFixups;
225 }
226
227 return ImmVal;
228 }
229
230 /// getAdrLabelOpValue - Return encoding info for 21-bit immediate ADR label
231 /// target.
232 uint32_t
getAdrLabelOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const233 AArch64MCCodeEmitter::getAdrLabelOpValue(const MCInst &MI, unsigned OpIdx,
234 SmallVectorImpl<MCFixup> &Fixups,
235 const MCSubtargetInfo &STI) const {
236 const MCOperand &MO = MI.getOperand(OpIdx);
237
238 // If the destination is an immediate, we have nothing to do.
239 if (MO.isImm())
240 return MO.getImm();
241 assert(MO.isExpr() && "Unexpected target type!");
242 const MCExpr *Expr = MO.getExpr();
243
244 MCFixupKind Kind = MI.getOpcode() == AArch64::ADR
245 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_adr_imm21)
246 : MCFixupKind(AArch64::fixup_aarch64_pcrel_adrp_imm21);
247 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
248
249 MCNumFixups += 1;
250
251 // All of the information is in the fixup.
252 return 0;
253 }
254
255 /// getAddSubImmOpValue - Return encoding for the 12-bit immediate value and
256 /// the 2-bit shift field. The shift field is stored in bits 13-14 of the
257 /// return value.
258 uint32_t
getAddSubImmOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const259 AArch64MCCodeEmitter::getAddSubImmOpValue(const MCInst &MI, unsigned OpIdx,
260 SmallVectorImpl<MCFixup> &Fixups,
261 const MCSubtargetInfo &STI) const {
262 // Suboperands are [imm, shifter].
263 const MCOperand &MO = MI.getOperand(OpIdx);
264 const MCOperand &MO1 = MI.getOperand(OpIdx + 1);
265 assert(AArch64_AM::getShiftType(MO1.getImm()) == AArch64_AM::LSL &&
266 "unexpected shift type for add/sub immediate");
267 unsigned ShiftVal = AArch64_AM::getShiftValue(MO1.getImm());
268 assert((ShiftVal == 0 || ShiftVal == 12) &&
269 "unexpected shift value for add/sub immediate");
270 if (MO.isImm())
271 return MO.getImm() | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
272 assert(MO.isExpr() && "Unable to encode MCOperand!");
273 const MCExpr *Expr = MO.getExpr();
274
275 // Encode the 12 bits of the fixup.
276 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_add_imm12);
277 Fixups.push_back(MCFixup::create(0, Expr, Kind, MI.getLoc()));
278
279 ++MCNumFixups;
280
281 // Set the shift bit of the add instruction for relocation types
282 // R_AARCH64_TLSLE_ADD_TPREL_HI12 and R_AARCH64_TLSLD_ADD_DTPREL_HI12.
283 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(Expr)) {
284 AArch64MCExpr::VariantKind RefKind = A64E->getKind();
285 if (RefKind == AArch64MCExpr::VK_TPREL_HI12 ||
286 RefKind == AArch64MCExpr::VK_DTPREL_HI12 ||
287 RefKind == AArch64MCExpr::VK_SECREL_HI12)
288 ShiftVal = 12;
289 }
290 return ShiftVal == 0 ? 0 : (1 << ShiftVal);
291 }
292
293 /// getCondBranchTargetOpValue - Return the encoded value for a conditional
294 /// branch target.
getCondBranchTargetOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const295 uint32_t AArch64MCCodeEmitter::getCondBranchTargetOpValue(
296 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
297 const MCSubtargetInfo &STI) const {
298 const MCOperand &MO = MI.getOperand(OpIdx);
299
300 // If the destination is an immediate, we have nothing to do.
301 if (MO.isImm())
302 return MO.getImm();
303 assert(MO.isExpr() && "Unexpected target type!");
304
305 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch19);
306 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
307
308 ++MCNumFixups;
309
310 // All of the information is in the fixup.
311 return 0;
312 }
313
314 /// getLoadLiteralOpValue - Return the encoded value for a load-literal
315 /// pc-relative address.
316 uint32_t
getLoadLiteralOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const317 AArch64MCCodeEmitter::getLoadLiteralOpValue(const MCInst &MI, unsigned OpIdx,
318 SmallVectorImpl<MCFixup> &Fixups,
319 const MCSubtargetInfo &STI) const {
320 const MCOperand &MO = MI.getOperand(OpIdx);
321
322 // If the destination is an immediate, we have nothing to do.
323 if (MO.isImm())
324 return MO.getImm();
325 assert(MO.isExpr() && "Unexpected target type!");
326
327 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_ldr_pcrel_imm19);
328 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
329
330 ++MCNumFixups;
331
332 // All of the information is in the fixup.
333 return 0;
334 }
335
336 uint32_t
getMemExtendOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const337 AArch64MCCodeEmitter::getMemExtendOpValue(const MCInst &MI, unsigned OpIdx,
338 SmallVectorImpl<MCFixup> &Fixups,
339 const MCSubtargetInfo &STI) const {
340 unsigned SignExtend = MI.getOperand(OpIdx).getImm();
341 unsigned DoShift = MI.getOperand(OpIdx + 1).getImm();
342 return (SignExtend << 1) | DoShift;
343 }
344
345 uint32_t
getMoveWideImmOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const346 AArch64MCCodeEmitter::getMoveWideImmOpValue(const MCInst &MI, unsigned OpIdx,
347 SmallVectorImpl<MCFixup> &Fixups,
348 const MCSubtargetInfo &STI) const {
349 const MCOperand &MO = MI.getOperand(OpIdx);
350
351 if (MO.isImm())
352 return MO.getImm();
353 assert(MO.isExpr() && "Unexpected movz/movk immediate");
354
355 Fixups.push_back(MCFixup::create(
356 0, MO.getExpr(), MCFixupKind(AArch64::fixup_aarch64_movw), MI.getLoc()));
357
358 ++MCNumFixups;
359
360 return 0;
361 }
362
363 /// getTestBranchTargetOpValue - Return the encoded value for a test-bit-and-
364 /// branch target.
getTestBranchTargetOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const365 uint32_t AArch64MCCodeEmitter::getTestBranchTargetOpValue(
366 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
367 const MCSubtargetInfo &STI) const {
368 const MCOperand &MO = MI.getOperand(OpIdx);
369
370 // If the destination is an immediate, we have nothing to do.
371 if (MO.isImm())
372 return MO.getImm();
373 assert(MO.isExpr() && "Unexpected ADR target type!");
374
375 MCFixupKind Kind = MCFixupKind(AArch64::fixup_aarch64_pcrel_branch14);
376 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
377
378 ++MCNumFixups;
379
380 // All of the information is in the fixup.
381 return 0;
382 }
383
384 /// getBranchTargetOpValue - Return the encoded value for an unconditional
385 /// branch target.
386 uint32_t
getBranchTargetOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const387 AArch64MCCodeEmitter::getBranchTargetOpValue(const MCInst &MI, unsigned OpIdx,
388 SmallVectorImpl<MCFixup> &Fixups,
389 const MCSubtargetInfo &STI) const {
390 const MCOperand &MO = MI.getOperand(OpIdx);
391
392 // If the destination is an immediate, we have nothing to do.
393 if (MO.isImm())
394 return MO.getImm();
395 assert(MO.isExpr() && "Unexpected ADR target type!");
396
397 MCFixupKind Kind = MI.getOpcode() == AArch64::BL
398 ? MCFixupKind(AArch64::fixup_aarch64_pcrel_call26)
399 : MCFixupKind(AArch64::fixup_aarch64_pcrel_branch26);
400 Fixups.push_back(MCFixup::create(0, MO.getExpr(), Kind, MI.getLoc()));
401
402 ++MCNumFixups;
403
404 // All of the information is in the fixup.
405 return 0;
406 }
407
408 /// getVecShifterOpValue - Return the encoded value for the vector shifter:
409 ///
410 /// 00 -> 0
411 /// 01 -> 8
412 /// 10 -> 16
413 /// 11 -> 24
414 uint32_t
getVecShifterOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const415 AArch64MCCodeEmitter::getVecShifterOpValue(const MCInst &MI, unsigned OpIdx,
416 SmallVectorImpl<MCFixup> &Fixups,
417 const MCSubtargetInfo &STI) const {
418 const MCOperand &MO = MI.getOperand(OpIdx);
419 assert(MO.isImm() && "Expected an immediate value for the shift amount!");
420
421 switch (MO.getImm()) {
422 default:
423 break;
424 case 0:
425 return 0;
426 case 8:
427 return 1;
428 case 16:
429 return 2;
430 case 24:
431 return 3;
432 }
433
434 llvm_unreachable("Invalid value for vector shift amount!");
435 }
436
437 /// getFixedPointScaleOpValue - Return the encoded value for the
438 // FP-to-fixed-point scale factor.
getFixedPointScaleOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const439 uint32_t AArch64MCCodeEmitter::getFixedPointScaleOpValue(
440 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
441 const MCSubtargetInfo &STI) const {
442 const MCOperand &MO = MI.getOperand(OpIdx);
443 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
444 return 64 - MO.getImm();
445 }
446
447 uint32_t
getVecShiftR64OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const448 AArch64MCCodeEmitter::getVecShiftR64OpValue(const MCInst &MI, unsigned OpIdx,
449 SmallVectorImpl<MCFixup> &Fixups,
450 const MCSubtargetInfo &STI) const {
451 const MCOperand &MO = MI.getOperand(OpIdx);
452 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
453 return 64 - MO.getImm();
454 }
455
456 uint32_t
getVecShiftR32OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const457 AArch64MCCodeEmitter::getVecShiftR32OpValue(const MCInst &MI, unsigned OpIdx,
458 SmallVectorImpl<MCFixup> &Fixups,
459 const MCSubtargetInfo &STI) const {
460 const MCOperand &MO = MI.getOperand(OpIdx);
461 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
462 return 32 - MO.getImm();
463 }
464
465 uint32_t
getVecShiftR16OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const466 AArch64MCCodeEmitter::getVecShiftR16OpValue(const MCInst &MI, unsigned OpIdx,
467 SmallVectorImpl<MCFixup> &Fixups,
468 const MCSubtargetInfo &STI) const {
469 const MCOperand &MO = MI.getOperand(OpIdx);
470 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
471 return 16 - MO.getImm();
472 }
473
474 uint32_t
getVecShiftR8OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const475 AArch64MCCodeEmitter::getVecShiftR8OpValue(const MCInst &MI, unsigned OpIdx,
476 SmallVectorImpl<MCFixup> &Fixups,
477 const MCSubtargetInfo &STI) const {
478 const MCOperand &MO = MI.getOperand(OpIdx);
479 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
480 return 8 - MO.getImm();
481 }
482
483 uint32_t
getVecShiftL64OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const484 AArch64MCCodeEmitter::getVecShiftL64OpValue(const MCInst &MI, unsigned OpIdx,
485 SmallVectorImpl<MCFixup> &Fixups,
486 const MCSubtargetInfo &STI) const {
487 const MCOperand &MO = MI.getOperand(OpIdx);
488 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
489 return MO.getImm() - 64;
490 }
491
492 uint32_t
getVecShiftL32OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const493 AArch64MCCodeEmitter::getVecShiftL32OpValue(const MCInst &MI, unsigned OpIdx,
494 SmallVectorImpl<MCFixup> &Fixups,
495 const MCSubtargetInfo &STI) const {
496 const MCOperand &MO = MI.getOperand(OpIdx);
497 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
498 return MO.getImm() - 32;
499 }
500
501 uint32_t
getVecShiftL16OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const502 AArch64MCCodeEmitter::getVecShiftL16OpValue(const MCInst &MI, unsigned OpIdx,
503 SmallVectorImpl<MCFixup> &Fixups,
504 const MCSubtargetInfo &STI) const {
505 const MCOperand &MO = MI.getOperand(OpIdx);
506 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
507 return MO.getImm() - 16;
508 }
509
510 uint32_t
getVecShiftL8OpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const511 AArch64MCCodeEmitter::getVecShiftL8OpValue(const MCInst &MI, unsigned OpIdx,
512 SmallVectorImpl<MCFixup> &Fixups,
513 const MCSubtargetInfo &STI) const {
514 const MCOperand &MO = MI.getOperand(OpIdx);
515 assert(MO.isImm() && "Expected an immediate value for the scale amount!");
516 return MO.getImm() - 8;
517 }
518
519 uint32_t
getImm8OptLsl(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const520 AArch64MCCodeEmitter::getImm8OptLsl(const MCInst &MI, unsigned OpIdx,
521 SmallVectorImpl<MCFixup> &Fixups,
522 const MCSubtargetInfo &STI) const {
523 // Test shift
524 auto ShiftOpnd = MI.getOperand(OpIdx + 1).getImm();
525 assert(AArch64_AM::getShiftType(ShiftOpnd) == AArch64_AM::LSL &&
526 "Unexpected shift type for imm8_opt_lsl immediate.");
527
528 unsigned ShiftVal = AArch64_AM::getShiftValue(ShiftOpnd);
529 assert((ShiftVal == 0 || ShiftVal == 8) &&
530 "Unexpected shift value for imm8_opt_lsl immediate.");
531
532 // Test immediate
533 auto Immediate = MI.getOperand(OpIdx).getImm();
534 return (Immediate & 0xff) | (ShiftVal == 0 ? 0 : (1 << ShiftVal));
535 }
536
537 uint32_t
getSVEIncDecImm(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const538 AArch64MCCodeEmitter::getSVEIncDecImm(const MCInst &MI, unsigned OpIdx,
539 SmallVectorImpl<MCFixup> &Fixups,
540 const MCSubtargetInfo &STI) const {
541 const MCOperand &MO = MI.getOperand(OpIdx);
542 assert(MO.isImm() && "Expected an immediate value!");
543 // Normalize 1-16 range to 0-15.
544 return MO.getImm() - 1;
545 }
546
547 /// getMoveVecShifterOpValue - Return the encoded value for the vector move
548 /// shifter (MSL).
getMoveVecShifterOpValue(const MCInst & MI,unsigned OpIdx,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const549 uint32_t AArch64MCCodeEmitter::getMoveVecShifterOpValue(
550 const MCInst &MI, unsigned OpIdx, SmallVectorImpl<MCFixup> &Fixups,
551 const MCSubtargetInfo &STI) const {
552 const MCOperand &MO = MI.getOperand(OpIdx);
553 assert(MO.isImm() &&
554 "Expected an immediate value for the move shift amount!");
555 unsigned ShiftVal = AArch64_AM::getShiftValue(MO.getImm());
556 assert((ShiftVal == 8 || ShiftVal == 16) && "Invalid shift amount!");
557 return ShiftVal == 8 ? 0 : 1;
558 }
559
fixMOVZ(const MCInst & MI,unsigned EncodedValue,const MCSubtargetInfo & STI) const560 unsigned AArch64MCCodeEmitter::fixMOVZ(const MCInst &MI, unsigned EncodedValue,
561 const MCSubtargetInfo &STI) const {
562 // If one of the signed fixup kinds is applied to a MOVZ instruction, the
563 // eventual result could be either a MOVZ or a MOVN. It's the MCCodeEmitter's
564 // job to ensure that any bits possibly affected by this are 0. This means we
565 // must zero out bit 30 (essentially emitting a MOVN).
566 MCOperand UImm16MO = MI.getOperand(1);
567
568 // Nothing to do if there's no fixup.
569 if (UImm16MO.isImm())
570 return EncodedValue;
571
572 const MCExpr *E = UImm16MO.getExpr();
573 if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) {
574 switch (A64E->getKind()) {
575 case AArch64MCExpr::VK_DTPREL_G2:
576 case AArch64MCExpr::VK_DTPREL_G1:
577 case AArch64MCExpr::VK_DTPREL_G0:
578 case AArch64MCExpr::VK_GOTTPREL_G1:
579 case AArch64MCExpr::VK_TPREL_G2:
580 case AArch64MCExpr::VK_TPREL_G1:
581 case AArch64MCExpr::VK_TPREL_G0:
582 return EncodedValue & ~(1u << 30);
583 default:
584 // Nothing to do for an unsigned fixup.
585 return EncodedValue;
586 }
587 }
588
589 return EncodedValue;
590 }
591
encodeInstruction(const MCInst & MI,raw_ostream & OS,SmallVectorImpl<MCFixup> & Fixups,const MCSubtargetInfo & STI) const592 void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
593 SmallVectorImpl<MCFixup> &Fixups,
594 const MCSubtargetInfo &STI) const {
595 verifyInstructionPredicates(MI,
596 computeAvailableFeatures(STI.getFeatureBits()));
597
598 if (MI.getOpcode() == AArch64::TLSDESCCALL) {
599 // This is a directive which applies an R_AARCH64_TLSDESC_CALL to the
600 // following (BLR) instruction. It doesn't emit any code itself so it
601 // doesn't go through the normal TableGenerated channels.
602 auto Reloc = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32
603 ? ELF::R_AARCH64_P32_TLSDESC_CALL
604 : ELF::R_AARCH64_TLSDESC_CALL;
605 Fixups.push_back(
606 MCFixup::create(0, MI.getOperand(0).getExpr(),
607 MCFixupKind(FirstLiteralRelocationKind + Reloc)));
608 return;
609 }
610
611 if (MI.getOpcode() == AArch64::CompilerBarrier ||
612 MI.getOpcode() == AArch64::SPACE) {
613 // CompilerBarrier just prevents the compiler from reordering accesses, and
614 // SPACE just increases basic block size, in both cases no actual code.
615 return;
616 }
617
618 uint64_t Binary = getBinaryCodeForInstr(MI, Fixups, STI);
619 support::endian::write<uint32_t>(OS, Binary, support::little);
620 ++MCNumEmitted; // Keep track of the # of mi's emitted.
621 }
622
623 unsigned
fixMulHigh(const MCInst & MI,unsigned EncodedValue,const MCSubtargetInfo & STI) const624 AArch64MCCodeEmitter::fixMulHigh(const MCInst &MI,
625 unsigned EncodedValue,
626 const MCSubtargetInfo &STI) const {
627 // The Ra field of SMULH and UMULH is unused: it should be assembled as 31
628 // (i.e. all bits 1) but is ignored by the processor.
629 EncodedValue |= 0x1f << 10;
630 return EncodedValue;
631 }
632
633 template<int hasRs, int hasRt2> unsigned
fixLoadStoreExclusive(const MCInst & MI,unsigned EncodedValue,const MCSubtargetInfo & STI) const634 AArch64MCCodeEmitter::fixLoadStoreExclusive(const MCInst &MI,
635 unsigned EncodedValue,
636 const MCSubtargetInfo &STI) const {
637 if (!hasRs) EncodedValue |= 0x001F0000;
638 if (!hasRt2) EncodedValue |= 0x00007C00;
639
640 return EncodedValue;
641 }
642
fixOneOperandFPComparison(const MCInst & MI,unsigned EncodedValue,const MCSubtargetInfo & STI) const643 unsigned AArch64MCCodeEmitter::fixOneOperandFPComparison(
644 const MCInst &MI, unsigned EncodedValue, const MCSubtargetInfo &STI) const {
645 // The Rm field of FCMP and friends is unused - it should be assembled
646 // as 0, but is ignored by the processor.
647 EncodedValue &= ~(0x1f << 16);
648 return EncodedValue;
649 }
650
651 #define ENABLE_INSTR_PREDICATE_VERIFIER
652 #include "AArch64GenMCCodeEmitter.inc"
653
createAArch64MCCodeEmitter(const MCInstrInfo & MCII,const MCRegisterInfo & MRI,MCContext & Ctx)654 MCCodeEmitter *llvm::createAArch64MCCodeEmitter(const MCInstrInfo &MCII,
655 const MCRegisterInfo &MRI,
656 MCContext &Ctx) {
657 return new AArch64MCCodeEmitter(MCII, Ctx);
658 }
659