xref: /llvm-project/llvm/lib/CodeGen/AsmPrinter/DwarfExpression.cpp (revision 95eb3d45f6f906a484164cd5148167f331502dda)
1 //===- llvm/CodeGen/DwarfExpression.cpp - Dwarf Debug Framework -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains support for writing dwarf debug info into asm files.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "DwarfExpression.h"
14 #include "DwarfCompileUnit.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/SmallBitVector.h"
17 #include "llvm/BinaryFormat/Dwarf.h"
18 #include "llvm/CodeGen/Register.h"
19 #include "llvm/CodeGen/TargetRegisterInfo.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/MC/MCAsmInfo.h"
22 #include "llvm/Support/ErrorHandling.h"
23 #include <algorithm>
24 
25 using namespace llvm;
26 
27 #define DEBUG_TYPE "dwarfdebug"
28 
29 void DwarfExpression::emitConstu(uint64_t Value) {
30   if (Value < 32)
31     emitOp(dwarf::DW_OP_lit0 + Value);
32   else if (Value == std::numeric_limits<uint64_t>::max()) {
33     // Only do this for 64-bit values as the DWARF expression stack uses
34     // target-address-size values.
35     emitOp(dwarf::DW_OP_lit0);
36     emitOp(dwarf::DW_OP_not);
37   } else {
38     emitOp(dwarf::DW_OP_constu);
39     emitUnsigned(Value);
40   }
41 }
42 
43 void DwarfExpression::addReg(int64_t DwarfReg, const char *Comment) {
44   assert(DwarfReg >= 0 && "invalid negative dwarf register number");
45   assert((isUnknownLocation() || isRegisterLocation()) &&
46          "location description already locked down");
47   LocationKind = Register;
48   if (DwarfReg < 32) {
49     emitOp(dwarf::DW_OP_reg0 + DwarfReg, Comment);
50   } else {
51     emitOp(dwarf::DW_OP_regx, Comment);
52     emitUnsigned(DwarfReg);
53   }
54 }
55 
56 void DwarfExpression::addBReg(int64_t DwarfReg, int64_t Offset) {
57   assert(DwarfReg >= 0 && "invalid negative dwarf register number");
58   assert(!isRegisterLocation() && "location description already locked down");
59   if (DwarfReg < 32) {
60     emitOp(dwarf::DW_OP_breg0 + DwarfReg);
61   } else {
62     emitOp(dwarf::DW_OP_bregx);
63     emitUnsigned(DwarfReg);
64   }
65   emitSigned(Offset);
66 }
67 
68 void DwarfExpression::addFBReg(int64_t Offset) {
69   emitOp(dwarf::DW_OP_fbreg);
70   emitSigned(Offset);
71 }
72 
73 void DwarfExpression::addOpPiece(unsigned SizeInBits, unsigned OffsetInBits) {
74   if (!SizeInBits)
75     return;
76 
77   const unsigned SizeOfByte = 8;
78   if (OffsetInBits > 0 || SizeInBits % SizeOfByte) {
79     emitOp(dwarf::DW_OP_bit_piece);
80     emitUnsigned(SizeInBits);
81     emitUnsigned(OffsetInBits);
82   } else {
83     emitOp(dwarf::DW_OP_piece);
84     unsigned ByteSize = SizeInBits / SizeOfByte;
85     emitUnsigned(ByteSize);
86   }
87   this->OffsetInBits += SizeInBits;
88 }
89 
90 void DwarfExpression::addShr(unsigned ShiftBy) {
91   emitConstu(ShiftBy);
92   emitOp(dwarf::DW_OP_shr);
93 }
94 
95 void DwarfExpression::addAnd(unsigned Mask) {
96   emitConstu(Mask);
97   emitOp(dwarf::DW_OP_and);
98 }
99 
100 bool DwarfExpression::addMachineReg(const TargetRegisterInfo &TRI,
101                                     llvm::Register MachineReg,
102                                     unsigned MaxSize) {
103   if (!MachineReg.isPhysical()) {
104     if (isFrameRegister(TRI, MachineReg)) {
105       DwarfRegs.push_back(Register::createRegister(-1, nullptr));
106       return true;
107     }
108     // Try getting dwarf register for virtual register anyway, eg. for NVPTX.
109     int64_t Reg = TRI.getDwarfRegNum(MachineReg, false);
110     if (Reg > 0) {
111       DwarfRegs.push_back(Register::createRegister(Reg, nullptr));
112       return true;
113     }
114     return false;
115   }
116 
117   int64_t Reg = TRI.getDwarfRegNum(MachineReg, false);
118 
119   // If this is a valid register number, emit it.
120   if (Reg >= 0) {
121     DwarfRegs.push_back(Register::createRegister(Reg, nullptr));
122     return true;
123   }
124 
125   // Walk up the super-register chain until we find a valid number.
126   // For example, EAX on x86_64 is a 32-bit fragment of RAX with offset 0.
127   for (MCPhysReg SR : TRI.superregs(MachineReg)) {
128     Reg = TRI.getDwarfRegNum(SR, false);
129     if (Reg >= 0) {
130       unsigned Idx = TRI.getSubRegIndex(SR, MachineReg);
131       unsigned Size = TRI.getSubRegIdxSize(Idx);
132       unsigned RegOffset = TRI.getSubRegIdxOffset(Idx);
133       DwarfRegs.push_back(Register::createRegister(Reg, "super-register"));
134       // Use a DW_OP_bit_piece to describe the sub-register.
135       setSubRegisterPiece(Size, RegOffset);
136       return true;
137     }
138   }
139 
140   // Otherwise, attempt to find a covering set of sub-register numbers.
141   // For example, Q0 on ARM is a composition of D0+D1.
142   unsigned CurPos = 0;
143   // The size of the register in bits.
144   const TargetRegisterClass *RC = TRI.getMinimalPhysRegClass(MachineReg);
145   unsigned RegSize = TRI.getRegSizeInBits(*RC);
146   // Keep track of the bits in the register we already emitted, so we
147   // can avoid emitting redundant aliasing subregs. Because this is
148   // just doing a greedy scan of all subregisters, it is possible that
149   // this doesn't find a combination of subregisters that fully cover
150   // the register (even though one may exist).
151   SmallBitVector Coverage(RegSize, false);
152   for (MCPhysReg SR : TRI.subregs(MachineReg)) {
153     unsigned Idx = TRI.getSubRegIndex(MachineReg, SR);
154     unsigned Size = TRI.getSubRegIdxSize(Idx);
155     unsigned Offset = TRI.getSubRegIdxOffset(Idx);
156     Reg = TRI.getDwarfRegNum(SR, false);
157     if (Reg < 0)
158       continue;
159 
160     // Used to build the intersection between the bits we already
161     // emitted and the bits covered by this subregister.
162     SmallBitVector CurSubReg(RegSize, false);
163     CurSubReg.set(Offset, Offset + Size);
164 
165     // If this sub-register has a DWARF number and we haven't covered
166     // its range, and its range covers the value, emit a DWARF piece for it.
167     if (Offset < MaxSize && CurSubReg.test(Coverage)) {
168       // Emit a piece for any gap in the coverage.
169       if (Offset > CurPos)
170         DwarfRegs.push_back(Register::createSubRegister(
171             -1, Offset - CurPos, "no DWARF register encoding"));
172       if (Offset == 0 && Size >= MaxSize)
173         DwarfRegs.push_back(Register::createRegister(Reg, "sub-register"));
174       else
175         DwarfRegs.push_back(Register::createSubRegister(
176             Reg, std::min<unsigned>(Size, MaxSize - Offset), "sub-register"));
177     }
178     // Mark it as emitted.
179     Coverage.set(Offset, Offset + Size);
180     CurPos = Offset + Size;
181   }
182   // Failed to find any DWARF encoding.
183   if (CurPos == 0)
184     return false;
185   // Found a partial or complete DWARF encoding.
186   if (CurPos < RegSize)
187     DwarfRegs.push_back(Register::createSubRegister(
188         -1, RegSize - CurPos, "no DWARF register encoding"));
189   return true;
190 }
191 
192 void DwarfExpression::addStackValue() {
193   if (DwarfVersion >= 4)
194     emitOp(dwarf::DW_OP_stack_value);
195 }
196 
197 void DwarfExpression::addSignedConstant(int64_t Value) {
198   assert(isImplicitLocation() || isUnknownLocation());
199   LocationKind = Implicit;
200   emitOp(dwarf::DW_OP_consts);
201   emitSigned(Value);
202 }
203 
204 void DwarfExpression::addUnsignedConstant(uint64_t Value) {
205   assert(isImplicitLocation() || isUnknownLocation());
206   LocationKind = Implicit;
207   emitConstu(Value);
208 }
209 
210 void DwarfExpression::addUnsignedConstant(const APInt &Value) {
211   assert(isImplicitLocation() || isUnknownLocation());
212   LocationKind = Implicit;
213 
214   unsigned Size = Value.getBitWidth();
215   const uint64_t *Data = Value.getRawData();
216 
217   // Chop it up into 64-bit pieces, because that's the maximum that
218   // addUnsignedConstant takes.
219   unsigned Offset = 0;
220   while (Offset < Size) {
221     addUnsignedConstant(*Data++);
222     if (Offset == 0 && Size <= 64)
223       break;
224     addStackValue();
225     addOpPiece(std::min(Size - Offset, 64u), Offset);
226     Offset += 64;
227   }
228 }
229 
230 void DwarfExpression::addConstantFP(const APFloat &APF, const AsmPrinter &AP) {
231   assert(isImplicitLocation() || isUnknownLocation());
232   APInt API = APF.bitcastToAPInt();
233   int NumBytes = API.getBitWidth() / 8;
234   if (NumBytes == 4 /*float*/ || NumBytes == 8 /*double*/) {
235     // FIXME: Add support for `long double`.
236     emitOp(dwarf::DW_OP_implicit_value);
237     emitUnsigned(NumBytes /*Size of the block in bytes*/);
238 
239     // The loop below is emitting the value starting at least significant byte,
240     // so we need to perform a byte-swap to get the byte order correct in case
241     // of a big-endian target.
242     if (AP.getDataLayout().isBigEndian())
243       API = API.byteSwap();
244 
245     for (int i = 0; i < NumBytes; ++i) {
246       emitData1(API.getZExtValue() & 0xFF);
247       API = API.lshr(8);
248     }
249 
250     return;
251   }
252   LLVM_DEBUG(
253       dbgs() << "Skipped DW_OP_implicit_value creation for ConstantFP of size: "
254              << API.getBitWidth() << " bits\n");
255 }
256 
257 bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI,
258                                               DIExpressionCursor &ExprCursor,
259                                               llvm::Register MachineReg,
260                                               unsigned FragmentOffsetInBits) {
261   auto Fragment = ExprCursor.getFragmentInfo();
262   if (!addMachineReg(TRI, MachineReg, Fragment ? Fragment->SizeInBits : ~1U)) {
263     LocationKind = Unknown;
264     return false;
265   }
266 
267   bool HasComplexExpression = false;
268   auto Op = ExprCursor.peek();
269   if (Op && Op->getOp() != dwarf::DW_OP_LLVM_fragment)
270     HasComplexExpression = true;
271 
272   // If the register can only be described by a complex expression (i.e.,
273   // multiple subregisters) it doesn't safely compose with another complex
274   // expression. For example, it is not possible to apply a DW_OP_deref
275   // operation to multiple DW_OP_pieces, since composite location descriptions
276   // do not push anything on the DWARF stack.
277   //
278   // DW_OP_entry_value operations can only hold a DWARF expression or a
279   // register location description, so we can't emit a single entry value
280   // covering a composite location description. In the future we may want to
281   // emit entry value operations for each register location in the composite
282   // location, but until that is supported do not emit anything.
283   if ((HasComplexExpression || IsEmittingEntryValue) && DwarfRegs.size() > 1) {
284     if (IsEmittingEntryValue)
285       cancelEntryValue();
286     DwarfRegs.clear();
287     LocationKind = Unknown;
288     return false;
289   }
290 
291   // Handle simple register locations. If we are supposed to emit
292   // a call site parameter expression and if that expression is just a register
293   // location, emit it with addBReg and offset 0, because we should emit a DWARF
294   // expression representing a value, rather than a location.
295   if ((!isParameterValue() && !isMemoryLocation() && !HasComplexExpression) ||
296       isEntryValue()) {
297     auto FragmentInfo = ExprCursor.getFragmentInfo();
298     unsigned RegSize = 0;
299     for (auto &Reg : DwarfRegs) {
300       RegSize += Reg.SubRegSize;
301       if (Reg.DwarfRegNo >= 0)
302         addReg(Reg.DwarfRegNo, Reg.Comment);
303       if (FragmentInfo)
304         if (RegSize > FragmentInfo->SizeInBits)
305           // If the register is larger than the current fragment stop
306           // once the fragment is covered.
307           break;
308       addOpPiece(Reg.SubRegSize);
309     }
310 
311     if (isEntryValue()) {
312       finalizeEntryValue();
313 
314       if (!isIndirect() && !isParameterValue() && !HasComplexExpression &&
315           DwarfVersion >= 4)
316         emitOp(dwarf::DW_OP_stack_value);
317     }
318 
319     DwarfRegs.clear();
320     // If we need to mask out a subregister, do it now, unless the next
321     // operation would emit an OpPiece anyway.
322     auto NextOp = ExprCursor.peek();
323     if (SubRegisterSizeInBits && NextOp &&
324         (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment))
325       maskSubRegister();
326     return true;
327   }
328 
329   // Don't emit locations that cannot be expressed without DW_OP_stack_value.
330   if (DwarfVersion < 4)
331     if (any_of(ExprCursor, [](DIExpression::ExprOperand Op) -> bool {
332           return Op.getOp() == dwarf::DW_OP_stack_value;
333         })) {
334       DwarfRegs.clear();
335       LocationKind = Unknown;
336       return false;
337     }
338 
339   // TODO: We should not give up here but the following code needs to be changed
340   //       to deal with multiple (sub)registers first.
341   if (DwarfRegs.size() > 1) {
342     LLVM_DEBUG(dbgs() << "TODO: giving up on debug information due to "
343                          "multi-register usage.\n");
344     DwarfRegs.clear();
345     LocationKind = Unknown;
346     return false;
347   }
348 
349   auto Reg = DwarfRegs[0];
350   bool FBReg = isFrameRegister(TRI, MachineReg);
351   int SignedOffset = 0;
352   assert(!Reg.isSubRegister() && "full register expected");
353 
354   // Pattern-match combinations for which more efficient representations exist.
355   // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset].
356   if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) {
357     uint64_t Offset = Op->getArg(0);
358     uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max());
359     if (Offset <= IntMax) {
360       SignedOffset = Offset;
361       ExprCursor.take();
362     }
363   }
364 
365   // [Reg, DW_OP_constu, Offset, DW_OP_plus]  --> [DW_OP_breg, Offset]
366   // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset]
367   // If Reg is a subregister we need to mask it out before subtracting.
368   if (Op && Op->getOp() == dwarf::DW_OP_constu) {
369     uint64_t Offset = Op->getArg(0);
370     uint64_t IntMax = static_cast<uint64_t>(std::numeric_limits<int>::max());
371     auto N = ExprCursor.peekNext();
372     if (N && N->getOp() == dwarf::DW_OP_plus && Offset <= IntMax) {
373       SignedOffset = Offset;
374       ExprCursor.consume(2);
375     } else if (N && N->getOp() == dwarf::DW_OP_minus &&
376                !SubRegisterSizeInBits && Offset <= IntMax + 1) {
377       SignedOffset = -static_cast<int64_t>(Offset);
378       ExprCursor.consume(2);
379     }
380   }
381 
382   if (FBReg)
383     addFBReg(SignedOffset);
384   else
385     addBReg(Reg.DwarfRegNo, SignedOffset);
386   DwarfRegs.clear();
387 
388   // If we need to mask out a subregister, do it now, unless the next
389   // operation would emit an OpPiece anyway.
390   auto NextOp = ExprCursor.peek();
391   if (SubRegisterSizeInBits && NextOp &&
392       (NextOp->getOp() != dwarf::DW_OP_LLVM_fragment))
393     maskSubRegister();
394 
395   return true;
396 }
397 
398 void DwarfExpression::setEntryValueFlags(const MachineLocation &Loc) {
399   LocationFlags |= EntryValue;
400   if (Loc.isIndirect())
401     LocationFlags |= Indirect;
402 }
403 
404 void DwarfExpression::setLocation(const MachineLocation &Loc,
405                                   const DIExpression *DIExpr) {
406   if (Loc.isIndirect())
407     setMemoryLocationKind();
408 
409   if (DIExpr->isEntryValue())
410     setEntryValueFlags(Loc);
411 }
412 
413 void DwarfExpression::beginEntryValueExpression(
414     DIExpressionCursor &ExprCursor) {
415   auto Op = ExprCursor.take();
416   (void)Op;
417   assert(Op && Op->getOp() == dwarf::DW_OP_LLVM_entry_value);
418   assert(!IsEmittingEntryValue && "Already emitting entry value?");
419   assert(Op->getArg(0) == 1 &&
420          "Can currently only emit entry values covering a single operation");
421 
422   SavedLocationKind = LocationKind;
423   LocationKind = Register;
424   LocationFlags |= EntryValue;
425   IsEmittingEntryValue = true;
426   enableTemporaryBuffer();
427 }
428 
429 void DwarfExpression::finalizeEntryValue() {
430   assert(IsEmittingEntryValue && "Entry value not open?");
431   disableTemporaryBuffer();
432 
433   emitOp(CU.getDwarf5OrGNULocationAtom(dwarf::DW_OP_entry_value));
434 
435   // Emit the entry value's size operand.
436   unsigned Size = getTemporaryBufferSize();
437   emitUnsigned(Size);
438 
439   // Emit the entry value's DWARF block operand.
440   commitTemporaryBuffer();
441 
442   LocationFlags &= ~EntryValue;
443   LocationKind = SavedLocationKind;
444   IsEmittingEntryValue = false;
445 }
446 
447 void DwarfExpression::cancelEntryValue() {
448   assert(IsEmittingEntryValue && "Entry value not open?");
449   disableTemporaryBuffer();
450 
451   // The temporary buffer can't be emptied, so for now just assert that nothing
452   // has been emitted to it.
453   assert(getTemporaryBufferSize() == 0 &&
454          "Began emitting entry value block before cancelling entry value");
455 
456   LocationKind = SavedLocationKind;
457   IsEmittingEntryValue = false;
458 }
459 
460 unsigned DwarfExpression::getOrCreateBaseType(unsigned BitSize,
461                                               dwarf::TypeKind Encoding) {
462   // Reuse the base_type if we already have one in this CU otherwise we
463   // create a new one.
464   unsigned I = 0, E = CU.ExprRefedBaseTypes.size();
465   for (; I != E; ++I)
466     if (CU.ExprRefedBaseTypes[I].BitSize == BitSize &&
467         CU.ExprRefedBaseTypes[I].Encoding == Encoding)
468       break;
469 
470   if (I == E)
471     CU.ExprRefedBaseTypes.emplace_back(BitSize, Encoding);
472   return I;
473 }
474 
475 /// Assuming a well-formed expression, match "DW_OP_deref*
476 /// DW_OP_LLVM_fragment?".
477 static bool isMemoryLocation(DIExpressionCursor ExprCursor) {
478   while (ExprCursor) {
479     auto Op = ExprCursor.take();
480     switch (Op->getOp()) {
481     case dwarf::DW_OP_deref:
482     case dwarf::DW_OP_LLVM_fragment:
483       break;
484     default:
485       return false;
486     }
487   }
488   return true;
489 }
490 
491 void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor) {
492   addExpression(std::move(ExprCursor),
493                 [](unsigned Idx, DIExpressionCursor &Cursor) -> bool {
494                   llvm_unreachable("unhandled opcode found in expression");
495                 });
496 }
497 
498 bool DwarfExpression::addExpression(
499     DIExpressionCursor &&ExprCursor,
500     llvm::function_ref<bool(unsigned, DIExpressionCursor &)> InsertArg) {
501   // Entry values can currently only cover the initial register location,
502   // and not any other parts of the following DWARF expression.
503   assert(!IsEmittingEntryValue && "Can't emit entry value around expression");
504 
505   std::optional<DIExpression::ExprOperand> PrevConvertOp;
506 
507   while (ExprCursor) {
508     auto Op = ExprCursor.take();
509     uint64_t OpNum = Op->getOp();
510 
511     if (OpNum >= dwarf::DW_OP_reg0 && OpNum <= dwarf::DW_OP_reg31) {
512       emitOp(OpNum);
513       continue;
514     } else if (OpNum >= dwarf::DW_OP_breg0 && OpNum <= dwarf::DW_OP_breg31) {
515       addBReg(OpNum - dwarf::DW_OP_breg0, Op->getArg(0));
516       continue;
517     }
518 
519     switch (OpNum) {
520     case dwarf::DW_OP_LLVM_arg:
521       if (!InsertArg(Op->getArg(0), ExprCursor)) {
522         LocationKind = Unknown;
523         return false;
524       }
525       break;
526     case dwarf::DW_OP_LLVM_fragment: {
527       unsigned SizeInBits = Op->getArg(1);
528       unsigned FragmentOffset = Op->getArg(0);
529       // The fragment offset must have already been adjusted by emitting an
530       // empty DW_OP_piece / DW_OP_bit_piece before we emitted the base
531       // location.
532       assert(OffsetInBits >= FragmentOffset && "fragment offset not added?");
533       assert(SizeInBits >= OffsetInBits - FragmentOffset && "size underflow");
534 
535       // If addMachineReg already emitted DW_OP_piece operations to represent
536       // a super-register by splicing together sub-registers, subtract the size
537       // of the pieces that was already emitted.
538       SizeInBits -= OffsetInBits - FragmentOffset;
539 
540       // If addMachineReg requested a DW_OP_bit_piece to stencil out a
541       // sub-register that is smaller than the current fragment's size, use it.
542       if (SubRegisterSizeInBits)
543         SizeInBits = std::min<unsigned>(SizeInBits, SubRegisterSizeInBits);
544 
545       // Emit a DW_OP_stack_value for implicit location descriptions.
546       if (isImplicitLocation())
547         addStackValue();
548 
549       // Emit the DW_OP_piece.
550       addOpPiece(SizeInBits, SubRegisterOffsetInBits);
551       setSubRegisterPiece(0, 0);
552       // Reset the location description kind.
553       LocationKind = Unknown;
554       return true;
555     }
556     case dwarf::DW_OP_LLVM_extract_bits_sext:
557     case dwarf::DW_OP_LLVM_extract_bits_zext: {
558       unsigned SizeInBits = Op->getArg(1);
559       unsigned BitOffset = Op->getArg(0);
560 
561       // If we have a memory location then dereference to get the value, though
562       // we have to make sure we don't dereference any bytes past the end of the
563       // object.
564       if (isMemoryLocation()) {
565         emitOp(dwarf::DW_OP_deref_size);
566         emitUnsigned(alignTo(BitOffset + SizeInBits, 8) / 8);
567       }
568 
569       // Extract the bits by a shift left (to shift out the bits after what we
570       // want to extract) followed by shift right (to shift the bits to position
571       // 0 and also sign/zero extend). These operations are done in the DWARF
572       // "generic type" whose size is the size of a pointer.
573       unsigned PtrSizeInBytes = CU.getAsmPrinter()->MAI->getCodePointerSize();
574       unsigned LeftShift = PtrSizeInBytes * 8 - (SizeInBits + BitOffset);
575       unsigned RightShift = LeftShift + BitOffset;
576       if (LeftShift) {
577         emitOp(dwarf::DW_OP_constu);
578         emitUnsigned(LeftShift);
579         emitOp(dwarf::DW_OP_shl);
580       }
581       emitOp(dwarf::DW_OP_constu);
582       emitUnsigned(RightShift);
583       emitOp(OpNum == dwarf::DW_OP_LLVM_extract_bits_sext ? dwarf::DW_OP_shra
584                                                           : dwarf::DW_OP_shr);
585 
586       // The value is now at the top of the stack, so set the location to
587       // implicit so that we get a stack_value at the end.
588       LocationKind = Implicit;
589       break;
590     }
591     case dwarf::DW_OP_plus_uconst:
592       assert(!isRegisterLocation());
593       emitOp(dwarf::DW_OP_plus_uconst);
594       emitUnsigned(Op->getArg(0));
595       break;
596     case dwarf::DW_OP_plus:
597     case dwarf::DW_OP_minus:
598     case dwarf::DW_OP_mul:
599     case dwarf::DW_OP_div:
600     case dwarf::DW_OP_mod:
601     case dwarf::DW_OP_or:
602     case dwarf::DW_OP_and:
603     case dwarf::DW_OP_xor:
604     case dwarf::DW_OP_shl:
605     case dwarf::DW_OP_shr:
606     case dwarf::DW_OP_shra:
607     case dwarf::DW_OP_lit0:
608     case dwarf::DW_OP_not:
609     case dwarf::DW_OP_dup:
610     case dwarf::DW_OP_push_object_address:
611     case dwarf::DW_OP_over:
612     case dwarf::DW_OP_eq:
613     case dwarf::DW_OP_ne:
614     case dwarf::DW_OP_gt:
615     case dwarf::DW_OP_ge:
616     case dwarf::DW_OP_lt:
617     case dwarf::DW_OP_le:
618       emitOp(OpNum);
619       break;
620     case dwarf::DW_OP_deref:
621       assert(!isRegisterLocation());
622       if (!isMemoryLocation() && ::isMemoryLocation(ExprCursor))
623         // Turning this into a memory location description makes the deref
624         // implicit.
625         LocationKind = Memory;
626       else
627         emitOp(dwarf::DW_OP_deref);
628       break;
629     case dwarf::DW_OP_constu:
630       assert(!isRegisterLocation());
631       emitConstu(Op->getArg(0));
632       break;
633     case dwarf::DW_OP_consts:
634       assert(!isRegisterLocation());
635       emitOp(dwarf::DW_OP_consts);
636       emitSigned(Op->getArg(0));
637       break;
638     case dwarf::DW_OP_LLVM_convert: {
639       unsigned BitSize = Op->getArg(0);
640       dwarf::TypeKind Encoding = static_cast<dwarf::TypeKind>(Op->getArg(1));
641       if (DwarfVersion >= 5 && CU.getDwarfDebug().useOpConvert()) {
642         emitOp(dwarf::DW_OP_convert);
643         // If targeting a location-list; simply emit the index into the raw
644         // byte stream as ULEB128, DwarfDebug::emitDebugLocEntry has been
645         // fitted with means to extract it later.
646         // If targeting a inlined DW_AT_location; insert a DIEBaseTypeRef
647         // (containing the index and a resolve mechanism during emit) into the
648         // DIE value list.
649         emitBaseTypeRef(getOrCreateBaseType(BitSize, Encoding));
650       } else {
651         if (PrevConvertOp && PrevConvertOp->getArg(0) < BitSize) {
652           if (Encoding == dwarf::DW_ATE_signed)
653             emitLegacySExt(PrevConvertOp->getArg(0));
654           else if (Encoding == dwarf::DW_ATE_unsigned)
655             emitLegacyZExt(PrevConvertOp->getArg(0));
656           PrevConvertOp = std::nullopt;
657         } else {
658           PrevConvertOp = Op;
659         }
660       }
661       break;
662     }
663     case dwarf::DW_OP_stack_value:
664       LocationKind = Implicit;
665       break;
666     case dwarf::DW_OP_swap:
667       assert(!isRegisterLocation());
668       emitOp(dwarf::DW_OP_swap);
669       break;
670     case dwarf::DW_OP_xderef:
671       assert(!isRegisterLocation());
672       emitOp(dwarf::DW_OP_xderef);
673       break;
674     case dwarf::DW_OP_deref_size:
675       emitOp(dwarf::DW_OP_deref_size);
676       emitData1(Op->getArg(0));
677       break;
678     case dwarf::DW_OP_LLVM_tag_offset:
679       TagOffset = Op->getArg(0);
680       break;
681     case dwarf::DW_OP_regx:
682       emitOp(dwarf::DW_OP_regx);
683       emitUnsigned(Op->getArg(0));
684       break;
685     case dwarf::DW_OP_bregx:
686       emitOp(dwarf::DW_OP_bregx);
687       emitUnsigned(Op->getArg(0));
688       emitSigned(Op->getArg(1));
689       break;
690     default:
691       llvm_unreachable("unhandled opcode found in expression");
692     }
693   }
694 
695   if (isImplicitLocation() && !isParameterValue())
696     // Turn this into an implicit location description.
697     addStackValue();
698 
699   return true;
700 }
701 
702 /// add masking operations to stencil out a subregister.
703 void DwarfExpression::maskSubRegister() {
704   assert(SubRegisterSizeInBits && "no subregister was registered");
705   if (SubRegisterOffsetInBits > 0)
706     addShr(SubRegisterOffsetInBits);
707   uint64_t Mask = (1ULL << (uint64_t)SubRegisterSizeInBits) - 1ULL;
708   addAnd(Mask);
709 }
710 
711 void DwarfExpression::finalize() {
712   assert(DwarfRegs.size() == 0 && "dwarf registers not emitted");
713   // Emit any outstanding DW_OP_piece operations to mask out subregisters.
714   if (SubRegisterSizeInBits == 0)
715     return;
716   // Don't emit a DW_OP_piece for a subregister at offset 0.
717   if (SubRegisterOffsetInBits == 0)
718     return;
719   addOpPiece(SubRegisterSizeInBits, SubRegisterOffsetInBits);
720 }
721 
722 void DwarfExpression::addFragmentOffset(const DIExpression *Expr) {
723   if (!Expr || !Expr->isFragment())
724     return;
725 
726   uint64_t FragmentOffset = Expr->getFragmentInfo()->OffsetInBits;
727   assert(FragmentOffset >= OffsetInBits &&
728          "overlapping or duplicate fragments");
729   if (FragmentOffset > OffsetInBits)
730     addOpPiece(FragmentOffset - OffsetInBits);
731   OffsetInBits = FragmentOffset;
732 }
733 
734 void DwarfExpression::emitLegacySExt(unsigned FromBits) {
735   // (((X >> (FromBits - 1)) * (~0)) << FromBits) | X
736   emitOp(dwarf::DW_OP_dup);
737   emitOp(dwarf::DW_OP_constu);
738   emitUnsigned(FromBits - 1);
739   emitOp(dwarf::DW_OP_shr);
740   emitOp(dwarf::DW_OP_lit0);
741   emitOp(dwarf::DW_OP_not);
742   emitOp(dwarf::DW_OP_mul);
743   emitOp(dwarf::DW_OP_constu);
744   emitUnsigned(FromBits);
745   emitOp(dwarf::DW_OP_shl);
746   emitOp(dwarf::DW_OP_or);
747 }
748 
749 void DwarfExpression::emitLegacyZExt(unsigned FromBits) {
750   // Heuristic to decide the most efficient encoding.
751   // A ULEB can encode 7 1-bits per byte.
752   if (FromBits / 7 < 1+1+1+1+1) {
753     // (X & (1 << FromBits - 1))
754     emitOp(dwarf::DW_OP_constu);
755     emitUnsigned((1ULL << FromBits) - 1);
756   } else {
757     // Note that the DWARF 4 stack consists of pointer-sized elements,
758     // so technically it doesn't make sense to shift left more than 64
759     // bits. We leave that for the consumer to decide though. LLDB for
760     // example uses APInt for the stack elements and can still deal
761     // with this.
762     emitOp(dwarf::DW_OP_lit1);
763     emitOp(dwarf::DW_OP_constu);
764     emitUnsigned(FromBits);
765     emitOp(dwarf::DW_OP_shl);
766     emitOp(dwarf::DW_OP_lit1);
767     emitOp(dwarf::DW_OP_minus);
768   }
769   emitOp(dwarf::DW_OP_and);
770 }
771 
772 void DwarfExpression::addWasmLocation(unsigned Index, uint64_t Offset) {
773   emitOp(dwarf::DW_OP_WASM_location);
774   emitUnsigned(Index == 4/*TI_LOCAL_INDIRECT*/ ? 0/*TI_LOCAL*/ : Index);
775   emitUnsigned(Offset);
776   if (Index == 4 /*TI_LOCAL_INDIRECT*/) {
777     assert(LocationKind == Unknown);
778     LocationKind = Memory;
779   } else {
780     assert(LocationKind == Implicit || LocationKind == Unknown);
781     LocationKind = Implicit;
782   }
783 }
784