1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "HexagonISelLowering.h"
16 #include "HexagonMachineFunctionInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "HexagonTargetMachine.h"
19 #include "HexagonTargetObjectFile.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39
40 using namespace llvm;
41
42 #define DEBUG_TYPE "hexagon-lowering"
43
44 static cl::opt<bool>
45 EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
46 cl::desc("Control jump table emission on Hexagon target"));
47
48 namespace {
49 class HexagonCCState : public CCState {
50 int NumNamedVarArgParams;
51
52 public:
HexagonCCState(CallingConv::ID CC,bool isVarArg,MachineFunction & MF,SmallVectorImpl<CCValAssign> & locs,LLVMContext & C,int NumNamedVarArgParams)53 HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
54 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
55 int NumNamedVarArgParams)
56 : CCState(CC, isVarArg, MF, locs, C),
57 NumNamedVarArgParams(NumNamedVarArgParams) {}
58
getNumNamedVarArgParams() const59 int getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
60 };
61 }
62
63 // Implement calling convention for Hexagon.
64 static bool
65 CC_Hexagon(unsigned ValNo, MVT ValVT,
66 MVT LocVT, CCValAssign::LocInfo LocInfo,
67 ISD::ArgFlagsTy ArgFlags, CCState &State);
68
69 static bool
70 CC_Hexagon32(unsigned ValNo, MVT ValVT,
71 MVT LocVT, CCValAssign::LocInfo LocInfo,
72 ISD::ArgFlagsTy ArgFlags, CCState &State);
73
74 static bool
75 CC_Hexagon64(unsigned ValNo, MVT ValVT,
76 MVT LocVT, CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State);
78
79 static bool
80 RetCC_Hexagon(unsigned ValNo, MVT ValVT,
81 MVT LocVT, CCValAssign::LocInfo LocInfo,
82 ISD::ArgFlagsTy ArgFlags, CCState &State);
83
84 static bool
85 RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
86 MVT LocVT, CCValAssign::LocInfo LocInfo,
87 ISD::ArgFlagsTy ArgFlags, CCState &State);
88
89 static bool
90 RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
91 MVT LocVT, CCValAssign::LocInfo LocInfo,
92 ISD::ArgFlagsTy ArgFlags, CCState &State);
93
94 static bool
CC_Hexagon_VarArg(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)95 CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
96 MVT LocVT, CCValAssign::LocInfo LocInfo,
97 ISD::ArgFlagsTy ArgFlags, CCState &State) {
98 HexagonCCState &HState = static_cast<HexagonCCState &>(State);
99
100 // NumNamedVarArgParams can not be zero for a VarArg function.
101 assert((HState.getNumNamedVarArgParams() > 0) &&
102 "NumNamedVarArgParams is not bigger than zero.");
103
104 if ((int)ValNo < HState.getNumNamedVarArgParams()) {
105 // Deal with named arguments.
106 return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
107 }
108
109 // Deal with un-named arguments.
110 unsigned ofst;
111 if (ArgFlags.isByVal()) {
112 // If pass-by-value, the size allocated on stack is decided
113 // by ArgFlags.getByValSize(), not by the size of LocVT.
114 assert ((ArgFlags.getByValSize() > 8) &&
115 "ByValSize must be bigger than 8 bytes");
116 ofst = State.AllocateStack(ArgFlags.getByValSize(), 4);
117 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
118 return false;
119 }
120 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
121 LocVT = MVT::i32;
122 ValVT = MVT::i32;
123 if (ArgFlags.isSExt())
124 LocInfo = CCValAssign::SExt;
125 else if (ArgFlags.isZExt())
126 LocInfo = CCValAssign::ZExt;
127 else
128 LocInfo = CCValAssign::AExt;
129 }
130 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
131 ofst = State.AllocateStack(4, 4);
132 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
133 return false;
134 }
135 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
136 ofst = State.AllocateStack(8, 8);
137 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
138 return false;
139 }
140 llvm_unreachable(nullptr);
141 }
142
143
144 static bool
CC_Hexagon(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)145 CC_Hexagon (unsigned ValNo, MVT ValVT,
146 MVT LocVT, CCValAssign::LocInfo LocInfo,
147 ISD::ArgFlagsTy ArgFlags, CCState &State) {
148
149 if (ArgFlags.isByVal()) {
150 // Passed on stack.
151 assert ((ArgFlags.getByValSize() > 8) &&
152 "ByValSize must be bigger than 8 bytes");
153 unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4);
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
155 return false;
156 }
157
158 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
159 LocVT = MVT::i32;
160 ValVT = MVT::i32;
161 if (ArgFlags.isSExt())
162 LocInfo = CCValAssign::SExt;
163 else if (ArgFlags.isZExt())
164 LocInfo = CCValAssign::ZExt;
165 else
166 LocInfo = CCValAssign::AExt;
167 }
168
169 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
170 if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
171 return false;
172 }
173
174 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
175 if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
176 return false;
177 }
178
179 return true; // CC didn't match.
180 }
181
182
CC_Hexagon32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)183 static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
184 MVT LocVT, CCValAssign::LocInfo LocInfo,
185 ISD::ArgFlagsTy ArgFlags, CCState &State) {
186
187 static const MCPhysReg RegList[] = {
188 Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
189 Hexagon::R5
190 };
191 if (unsigned Reg = State.AllocateReg(RegList, 6)) {
192 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
193 return false;
194 }
195
196 unsigned Offset = State.AllocateStack(4, 4);
197 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
198 return false;
199 }
200
CC_Hexagon64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)201 static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
202 MVT LocVT, CCValAssign::LocInfo LocInfo,
203 ISD::ArgFlagsTy ArgFlags, CCState &State) {
204
205 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
206 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
207 return false;
208 }
209
210 static const MCPhysReg RegList1[] = {
211 Hexagon::D1, Hexagon::D2
212 };
213 static const MCPhysReg RegList2[] = {
214 Hexagon::R1, Hexagon::R3
215 };
216 if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
217 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
218 return false;
219 }
220
221 unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
222 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
223 return false;
224 }
225
RetCC_Hexagon(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)226 static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
227 MVT LocVT, CCValAssign::LocInfo LocInfo,
228 ISD::ArgFlagsTy ArgFlags, CCState &State) {
229
230
231 if (LocVT == MVT::i1 ||
232 LocVT == MVT::i8 ||
233 LocVT == MVT::i16) {
234 LocVT = MVT::i32;
235 ValVT = MVT::i32;
236 if (ArgFlags.isSExt())
237 LocInfo = CCValAssign::SExt;
238 else if (ArgFlags.isZExt())
239 LocInfo = CCValAssign::ZExt;
240 else
241 LocInfo = CCValAssign::AExt;
242 }
243
244 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
245 if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
246 return false;
247 }
248
249 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
250 if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
251 return false;
252 }
253
254 return true; // CC didn't match.
255 }
256
RetCC_Hexagon32(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)257 static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
258 MVT LocVT, CCValAssign::LocInfo LocInfo,
259 ISD::ArgFlagsTy ArgFlags, CCState &State) {
260
261 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
262 if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
263 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
264 return false;
265 }
266 }
267
268 unsigned Offset = State.AllocateStack(4, 4);
269 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
270 return false;
271 }
272
RetCC_Hexagon64(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)273 static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
274 MVT LocVT, CCValAssign::LocInfo LocInfo,
275 ISD::ArgFlagsTy ArgFlags, CCState &State) {
276 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
277 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
278 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
279 return false;
280 }
281 }
282
283 unsigned Offset = State.AllocateStack(8, 8);
284 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
285 return false;
286 }
287
288 SDValue
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const289 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
290 const {
291 return SDValue();
292 }
293
294 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
295 /// by "Src" to address "Dst" of size "Size". Alignment information is
296 /// specified by the specific parameter attribute. The copy will be passed as
297 /// a byval function parameter. Sometimes what we are copying is the end of a
298 /// larger object, the part that does not fit in registers.
299 static SDValue
CreateCopyOfByValArgument(SDValue Src,SDValue Dst,SDValue Chain,ISD::ArgFlagsTy Flags,SelectionDAG & DAG,SDLoc dl)300 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
301 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
302 SDLoc dl) {
303
304 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
305 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
306 /*isVolatile=*/false, /*AlwaysInline=*/false,
307 MachinePointerInfo(), MachinePointerInfo());
308 }
309
310
311 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
312 // passed by value, the function prototype is modified to return void and
313 // the value is stored in memory pointed by a pointer passed by caller.
314 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,SDLoc dl,SelectionDAG & DAG) const315 HexagonTargetLowering::LowerReturn(SDValue Chain,
316 CallingConv::ID CallConv, bool isVarArg,
317 const SmallVectorImpl<ISD::OutputArg> &Outs,
318 const SmallVectorImpl<SDValue> &OutVals,
319 SDLoc dl, SelectionDAG &DAG) const {
320
321 // CCValAssign - represent the assignment of the return value to locations.
322 SmallVector<CCValAssign, 16> RVLocs;
323
324 // CCState - Info about the registers and stack slot.
325 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
326 *DAG.getContext());
327
328 // Analyze return values of ISD::RET
329 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
330
331 SDValue Flag;
332 SmallVector<SDValue, 4> RetOps(1, Chain);
333
334 // Copy the result values into the output registers.
335 for (unsigned i = 0; i != RVLocs.size(); ++i) {
336 CCValAssign &VA = RVLocs[i];
337
338 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
339
340 // Guarantee that all emitted copies are stuck together with flags.
341 Flag = Chain.getValue(1);
342 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
343 }
344
345 RetOps[0] = Chain; // Update chain.
346
347 // Add the flag if we have it.
348 if (Flag.getNode())
349 RetOps.push_back(Flag);
350
351 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
352 }
353
354
355
356
357 /// LowerCallResult - Lower the result values of an ISD::CALL into the
358 /// appropriate copies out of appropriate physical registers. This assumes that
359 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
360 /// being lowered. Returns a SDNode with the same number of values as the
361 /// ISD::CALL.
362 SDValue
LowerCallResult(SDValue Chain,SDValue InFlag,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,SDLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals,const SmallVectorImpl<SDValue> & OutVals,SDValue Callee) const363 HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
364 CallingConv::ID CallConv, bool isVarArg,
365 const
366 SmallVectorImpl<ISD::InputArg> &Ins,
367 SDLoc dl, SelectionDAG &DAG,
368 SmallVectorImpl<SDValue> &InVals,
369 const SmallVectorImpl<SDValue> &OutVals,
370 SDValue Callee) const {
371
372 // Assign locations to each value returned by this call.
373 SmallVector<CCValAssign, 16> RVLocs;
374
375 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
376 *DAG.getContext());
377
378 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
379
380 // Copy all of the result registers out of their specified physreg.
381 for (unsigned i = 0; i != RVLocs.size(); ++i) {
382 Chain = DAG.getCopyFromReg(Chain, dl,
383 RVLocs[i].getLocReg(),
384 RVLocs[i].getValVT(), InFlag).getValue(1);
385 InFlag = Chain.getValue(2);
386 InVals.push_back(Chain.getValue(0));
387 }
388
389 return Chain;
390 }
391
392 /// LowerCall - Functions arguments are copied from virtual regs to
393 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
394 SDValue
LowerCall(TargetLowering::CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const395 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
396 SmallVectorImpl<SDValue> &InVals) const {
397 SelectionDAG &DAG = CLI.DAG;
398 SDLoc &dl = CLI.DL;
399 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
400 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
401 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
402 SDValue Chain = CLI.Chain;
403 SDValue Callee = CLI.Callee;
404 bool &isTailCall = CLI.IsTailCall;
405 CallingConv::ID CallConv = CLI.CallConv;
406 bool isVarArg = CLI.IsVarArg;
407
408 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
409
410 // Check for varargs.
411 int NumNamedVarArgParams = -1;
412 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
413 {
414 const Function* CalleeFn = nullptr;
415 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
416 if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
417 {
418 // If a function has zero args and is a vararg function, that's
419 // disallowed so it must be an undeclared function. Do not assume
420 // varargs if the callee is undefined.
421 if (CalleeFn->isVarArg() &&
422 CalleeFn->getFunctionType()->getNumParams() != 0) {
423 NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
424 }
425 }
426 }
427
428 // Analyze operands of the call, assigning locations to each operand.
429 SmallVector<CCValAssign, 16> ArgLocs;
430 HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
431 *DAG.getContext(), NumNamedVarArgParams);
432
433 if (NumNamedVarArgParams > 0)
434 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
435 else
436 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
437
438
439 if(isTailCall) {
440 bool StructAttrFlag =
441 DAG.getMachineFunction().getFunction()->hasStructRetAttr();
442 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
443 isVarArg, IsStructRet,
444 StructAttrFlag,
445 Outs, OutVals, Ins, DAG);
446 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){
447 CCValAssign &VA = ArgLocs[i];
448 if (VA.isMemLoc()) {
449 isTailCall = false;
450 break;
451 }
452 }
453 if (isTailCall) {
454 DEBUG(dbgs () << "Eligible for Tail Call\n");
455 } else {
456 DEBUG(dbgs () <<
457 "Argument must be passed on stack. Not eligible for Tail Call\n");
458 }
459 }
460 // Get a count of how many bytes are to be pushed on the stack.
461 unsigned NumBytes = CCInfo.getNextStackOffset();
462 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
463 SmallVector<SDValue, 8> MemOpChains;
464
465 const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
466 DAG.getSubtarget().getRegisterInfo());
467 SDValue StackPtr =
468 DAG.getCopyFromReg(Chain, dl, QRI->getStackRegister(), getPointerTy());
469
470 // Walk the register/memloc assignments, inserting copies/loads.
471 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
472 CCValAssign &VA = ArgLocs[i];
473 SDValue Arg = OutVals[i];
474 ISD::ArgFlagsTy Flags = Outs[i].Flags;
475
476 // Promote the value if needed.
477 switch (VA.getLocInfo()) {
478 default:
479 // Loc info must be one of Full, SExt, ZExt, or AExt.
480 llvm_unreachable("Unknown loc info!");
481 case CCValAssign::Full:
482 break;
483 case CCValAssign::SExt:
484 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
485 break;
486 case CCValAssign::ZExt:
487 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
488 break;
489 case CCValAssign::AExt:
490 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
491 break;
492 }
493
494 if (VA.isMemLoc()) {
495 unsigned LocMemOffset = VA.getLocMemOffset();
496 SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
497 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
498
499 if (Flags.isByVal()) {
500 // The argument is a struct passed by value. According to LLVM, "Arg"
501 // is is pointer.
502 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain,
503 Flags, DAG, dl));
504 } else {
505 // The argument is not passed by value. "Arg" is a buildin type. It is
506 // not a pointer.
507 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
508 MachinePointerInfo(),false, false,
509 0));
510 }
511 continue;
512 }
513
514 // Arguments that can be passed on register must be kept at RegsToPass
515 // vector.
516 if (VA.isRegLoc()) {
517 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
518 }
519 }
520
521 // Transform all store nodes into one single node because all store
522 // nodes are independent of each other.
523 if (!MemOpChains.empty()) {
524 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
525 }
526
527 if (!isTailCall)
528 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
529 getPointerTy(), true),
530 dl);
531
532 // Build a sequence of copy-to-reg nodes chained together with token
533 // chain and flag operands which copy the outgoing args into registers.
534 // The InFlag in necessary since all emitted instructions must be
535 // stuck together.
536 SDValue InFlag;
537 if (!isTailCall) {
538 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
539 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
540 RegsToPass[i].second, InFlag);
541 InFlag = Chain.getValue(1);
542 }
543 }
544
545 // For tail calls lower the arguments to the 'real' stack slot.
546 if (isTailCall) {
547 // Force all the incoming stack arguments to be loaded from the stack
548 // before any new outgoing arguments are stored to the stack, because the
549 // outgoing stack slots may alias the incoming argument stack slots, and
550 // the alias isn't otherwise explicit. This is slightly more conservative
551 // than necessary, because it means that each store effectively depends
552 // on every argument instead of just those arguments it would clobber.
553 //
554 // Do not flag preceding copytoreg stuff together with the following stuff.
555 InFlag = SDValue();
556 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
557 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
558 RegsToPass[i].second, InFlag);
559 InFlag = Chain.getValue(1);
560 }
561 InFlag =SDValue();
562 }
563
564 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
565 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
566 // node so that legalize doesn't hack it.
567 if (flag_aligned_memcpy) {
568 const char *MemcpyName =
569 "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
570 Callee =
571 DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
572 flag_aligned_memcpy = false;
573 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
574 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
575 } else if (ExternalSymbolSDNode *S =
576 dyn_cast<ExternalSymbolSDNode>(Callee)) {
577 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
578 }
579
580 // Returns a chain & a flag for retval copy to use.
581 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
582 SmallVector<SDValue, 8> Ops;
583 Ops.push_back(Chain);
584 Ops.push_back(Callee);
585
586 // Add argument registers to the end of the list so that they are
587 // known live into the call.
588 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
589 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
590 RegsToPass[i].second.getValueType()));
591 }
592
593 if (InFlag.getNode()) {
594 Ops.push_back(InFlag);
595 }
596
597 if (isTailCall)
598 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
599
600 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
601 InFlag = Chain.getValue(1);
602
603 // Create the CALLSEQ_END node.
604 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
605 DAG.getIntPtrConstant(0, true), InFlag, dl);
606 InFlag = Chain.getValue(1);
607
608 // Handle result values, copying them out of physregs into vregs that we
609 // return.
610 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
611 InVals, OutVals, Callee);
612 }
613
getIndexedAddressParts(SDNode * Ptr,EVT VT,bool isSEXTLoad,SDValue & Base,SDValue & Offset,bool & isInc,SelectionDAG & DAG)614 static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
615 bool isSEXTLoad, SDValue &Base,
616 SDValue &Offset, bool &isInc,
617 SelectionDAG &DAG) {
618 if (Ptr->getOpcode() != ISD::ADD)
619 return false;
620
621 if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
622 isInc = (Ptr->getOpcode() == ISD::ADD);
623 Base = Ptr->getOperand(0);
624 Offset = Ptr->getOperand(1);
625 // Ensure that Offset is a constant.
626 return (isa<ConstantSDNode>(Offset));
627 }
628
629 return false;
630 }
631
632 // TODO: Put this function along with the other isS* functions in
633 // HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
634 // functions defined in HexagonOperands.td.
Is_PostInc_S4_Offset(SDNode * S,int ShiftAmount)635 static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
636 ConstantSDNode *N = cast<ConstantSDNode>(S);
637
638 // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
639 // field.
640 int64_t v = (int64_t)N->getSExtValue();
641 int64_t m = 0;
642 if (ShiftAmount > 0) {
643 m = v % ShiftAmount;
644 v = v >> ShiftAmount;
645 }
646 return (v <= 7) && (v >= -8) && (m == 0);
647 }
648
649 /// getPostIndexedAddressParts - returns true by value, base pointer and
650 /// offset pointer and addressing mode by reference if this node can be
651 /// combined with a load / store to form a post-indexed load / store.
getPostIndexedAddressParts(SDNode * N,SDNode * Op,SDValue & Base,SDValue & Offset,ISD::MemIndexedMode & AM,SelectionDAG & DAG) const652 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
653 SDValue &Base,
654 SDValue &Offset,
655 ISD::MemIndexedMode &AM,
656 SelectionDAG &DAG) const
657 {
658 EVT VT;
659 SDValue Ptr;
660 bool isSEXTLoad = false;
661
662 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
663 VT = LD->getMemoryVT();
664 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
665 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
666 VT = ST->getMemoryVT();
667 if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
668 return false;
669 }
670 } else {
671 return false;
672 }
673
674 bool isInc = false;
675 bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
676 isInc, DAG);
677 // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
678 int ShiftAmount = VT.getSizeInBits() / 16;
679 if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
680 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
681 return true;
682 }
683
684 return false;
685 }
686
LowerINLINEASM(SDValue Op,SelectionDAG & DAG) const687 SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
688 SelectionDAG &DAG) const {
689 SDNode *Node = Op.getNode();
690 MachineFunction &MF = DAG.getMachineFunction();
691 HexagonMachineFunctionInfo *FuncInfo =
692 MF.getInfo<HexagonMachineFunctionInfo>();
693 switch (Node->getOpcode()) {
694 case ISD::INLINEASM: {
695 unsigned NumOps = Node->getNumOperands();
696 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
697 --NumOps; // Ignore the flag operand.
698
699 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
700 if (FuncInfo->hasClobberLR())
701 break;
702 unsigned Flags =
703 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
704 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
705 ++i; // Skip the ID value.
706
707 switch (InlineAsm::getKind(Flags)) {
708 default: llvm_unreachable("Bad flags!");
709 case InlineAsm::Kind_RegDef:
710 case InlineAsm::Kind_RegUse:
711 case InlineAsm::Kind_Imm:
712 case InlineAsm::Kind_Clobber:
713 case InlineAsm::Kind_Mem: {
714 for (; NumVals; --NumVals, ++i) {}
715 break;
716 }
717 case InlineAsm::Kind_RegDefEarlyClobber: {
718 for (; NumVals; --NumVals, ++i) {
719 unsigned Reg =
720 cast<RegisterSDNode>(Node->getOperand(i))->getReg();
721
722 // Check it to be lr
723 const HexagonRegisterInfo *QRI =
724 static_cast<const HexagonRegisterInfo *>(
725 DAG.getSubtarget().getRegisterInfo());
726 if (Reg == QRI->getRARegister()) {
727 FuncInfo->setHasClobberLR(true);
728 break;
729 }
730 }
731 break;
732 }
733 }
734 }
735 }
736 } // Node->getOpcode
737 return Op;
738 }
739
740
741 //
742 // Taken from the XCore backend.
743 //
744 SDValue HexagonTargetLowering::
LowerBR_JT(SDValue Op,SelectionDAG & DAG) const745 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
746 {
747 SDValue Chain = Op.getOperand(0);
748 SDValue Table = Op.getOperand(1);
749 SDValue Index = Op.getOperand(2);
750 SDLoc dl(Op);
751 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
752 unsigned JTI = JT->getIndex();
753 MachineFunction &MF = DAG.getMachineFunction();
754 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
755 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
756
757 // Mark all jump table targets as address taken.
758 const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
759 const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
760 for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
761 MachineBasicBlock *MBB = JTBBs[i];
762 MBB->setHasAddressTaken();
763 // This line is needed to set the hasAddressTaken flag on the BasicBlock
764 // object.
765 BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
766 }
767
768 SDValue JumpTableBase = DAG.getNode(HexagonISD::WrapperJT, dl,
769 getPointerTy(), TargetJT);
770 SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
771 DAG.getConstant(2, MVT::i32));
772 SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
773 ShiftIndex);
774 SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
775 MachinePointerInfo(), false, false, false,
776 0);
777 return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
778 }
779
780
781 SDValue
LowerDYNAMIC_STACKALLOC(SDValue Op,SelectionDAG & DAG) const782 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
783 SelectionDAG &DAG) const {
784 SDValue Chain = Op.getOperand(0);
785 SDValue Size = Op.getOperand(1);
786 SDLoc dl(Op);
787
788 unsigned SPReg = getStackPointerRegisterToSaveRestore();
789
790 // Get a reference to the stack pointer.
791 SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
792
793 // Subtract the dynamic size from the actual stack size to
794 // obtain the new stack size.
795 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
796
797 //
798 // For Hexagon, the outgoing memory arguments area should be on top of the
799 // alloca area on the stack i.e., the outgoing memory arguments should be
800 // at a lower address than the alloca area. Move the alloca area down the
801 // stack by adding back the space reserved for outgoing arguments to SP
802 // here.
803 //
804 // We do not know what the size of the outgoing args is at this point.
805 // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
806 // stack pointer. We patch this instruction with the correct, known
807 // offset in emitPrologue().
808 //
809 // Use a placeholder immediate (zero) for now. This will be patched up
810 // by emitPrologue().
811 SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl,
812 MVT::i32,
813 Sub,
814 DAG.getConstant(0, MVT::i32));
815
816 // The Sub result contains the new stack start address, so it
817 // must be placed in the stack pointer register.
818 const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
819 DAG.getSubtarget().getRegisterInfo());
820 SDValue CopyChain = DAG.getCopyToReg(Chain, dl, QRI->getStackRegister(), Sub);
821
822 SDValue Ops[2] = { ArgAdjust, CopyChain };
823 return DAG.getMergeValues(Ops, dl);
824 }
825
826 SDValue
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool isVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,SDLoc dl,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const827 HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
828 CallingConv::ID CallConv,
829 bool isVarArg,
830 const
831 SmallVectorImpl<ISD::InputArg> &Ins,
832 SDLoc dl, SelectionDAG &DAG,
833 SmallVectorImpl<SDValue> &InVals)
834 const {
835
836 MachineFunction &MF = DAG.getMachineFunction();
837 MachineFrameInfo *MFI = MF.getFrameInfo();
838 MachineRegisterInfo &RegInfo = MF.getRegInfo();
839 HexagonMachineFunctionInfo *FuncInfo =
840 MF.getInfo<HexagonMachineFunctionInfo>();
841
842
843 // Assign locations to all of the incoming arguments.
844 SmallVector<CCValAssign, 16> ArgLocs;
845 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
846 *DAG.getContext());
847
848 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
849
850 // For LLVM, in the case when returning a struct by value (>8byte),
851 // the first argument is a pointer that points to the location on caller's
852 // stack where the return value will be stored. For Hexagon, the location on
853 // caller's stack is passed only when the struct size is smaller than (and
854 // equal to) 8 bytes. If not, no address will be passed into callee and
855 // callee return the result direclty through R0/R1.
856
857 SmallVector<SDValue, 4> MemOps;
858
859 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
860 CCValAssign &VA = ArgLocs[i];
861 ISD::ArgFlagsTy Flags = Ins[i].Flags;
862 unsigned ObjSize;
863 unsigned StackLocation;
864 int FI;
865
866 if ( (VA.isRegLoc() && !Flags.isByVal())
867 || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
868 // Arguments passed in registers
869 // 1. int, long long, ptr args that get allocated in register.
870 // 2. Large struct that gets an register to put its address in.
871 EVT RegVT = VA.getLocVT();
872 if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
873 RegVT == MVT::i32 || RegVT == MVT::f32) {
874 unsigned VReg =
875 RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
876 RegInfo.addLiveIn(VA.getLocReg(), VReg);
877 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
878 } else if (RegVT == MVT::i64) {
879 unsigned VReg =
880 RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
881 RegInfo.addLiveIn(VA.getLocReg(), VReg);
882 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
883 } else {
884 assert (0);
885 }
886 } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
887 assert (0 && "ByValSize must be bigger than 8 bytes");
888 } else {
889 // Sanity check.
890 assert(VA.isMemLoc());
891
892 if (Flags.isByVal()) {
893 // If it's a byval parameter, then we need to compute the
894 // "real" size, not the size of the pointer.
895 ObjSize = Flags.getByValSize();
896 } else {
897 ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
898 }
899
900 StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
901 // Create the frame index object for this incoming parameter...
902 FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
903
904 // Create the SelectionDAG nodes cordl, responding to a load
905 // from this parameter.
906 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
907
908 if (Flags.isByVal()) {
909 // If it's a pass-by-value aggregate, then do not dereference the stack
910 // location. Instead, we should generate a reference to the stack
911 // location.
912 InVals.push_back(FIN);
913 } else {
914 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
915 MachinePointerInfo(), false, false,
916 false, 0));
917 }
918 }
919 }
920
921 if (!MemOps.empty())
922 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
923
924 if (isVarArg) {
925 // This will point to the next argument passed via stack.
926 int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize,
927 HEXAGON_LRFP_SIZE +
928 CCInfo.getNextStackOffset(),
929 true);
930 FuncInfo->setVarArgsFrameIndex(FrameIndex);
931 }
932
933 return Chain;
934 }
935
936 SDValue
LowerVASTART(SDValue Op,SelectionDAG & DAG) const937 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
938 // VASTART stores the address of the VarArgsFrameIndex slot into the
939 // memory location argument.
940 MachineFunction &MF = DAG.getMachineFunction();
941 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
942 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
943 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
944 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr,
945 Op.getOperand(1), MachinePointerInfo(SV), false,
946 false, 0);
947 }
948
949 SDValue
LowerConstantPool(SDValue Op,SelectionDAG & DAG) const950 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
951 EVT ValTy = Op.getValueType();
952 SDLoc dl(Op);
953 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
954 SDValue Res;
955 if (CP->isMachineConstantPoolEntry())
956 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
957 CP->getAlignment());
958 else
959 Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
960 CP->getAlignment());
961 return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res);
962 }
963
964 SDValue
LowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const965 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
966 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
967 MachineFunction &MF = DAG.getMachineFunction();
968 MachineFrameInfo *MFI = MF.getFrameInfo();
969 MFI->setReturnAddressIsTaken(true);
970
971 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
972 return SDValue();
973
974 EVT VT = Op.getValueType();
975 SDLoc dl(Op);
976 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
977 if (Depth) {
978 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
979 SDValue Offset = DAG.getConstant(4, MVT::i32);
980 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
981 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
982 MachinePointerInfo(), false, false, false, 0);
983 }
984
985 // Return LR, which contains the return address. Mark it an implicit live-in.
986 unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
987 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
988 }
989
990 SDValue
LowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const991 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
992 const HexagonRegisterInfo *TRI = static_cast<const HexagonRegisterInfo *>(
993 DAG.getSubtarget().getRegisterInfo());
994 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
995 MFI->setFrameAddressIsTaken(true);
996
997 EVT VT = Op.getValueType();
998 SDLoc dl(Op);
999 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1000 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1001 TRI->getFrameRegister(), VT);
1002 while (Depth--)
1003 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1004 MachinePointerInfo(),
1005 false, false, false, 0);
1006 return FrameAddr;
1007 }
1008
LowerATOMIC_FENCE(SDValue Op,SelectionDAG & DAG) const1009 SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
1010 SelectionDAG& DAG) const {
1011 SDLoc dl(Op);
1012 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1013 }
1014
1015
LowerGLOBALADDRESS(SDValue Op,SelectionDAG & DAG) const1016 SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
1017 SelectionDAG &DAG) const {
1018 SDValue Result;
1019 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1020 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
1021 SDLoc dl(Op);
1022 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
1023
1024 const HexagonTargetObjectFile &TLOF =
1025 static_cast<const HexagonTargetObjectFile &>(getObjFileLowering());
1026 if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
1027 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
1028 }
1029
1030 return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
1031 }
1032
1033 SDValue
LowerBlockAddress(SDValue Op,SelectionDAG & DAG) const1034 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1035 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1036 SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32);
1037 SDLoc dl(Op);
1038 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD);
1039 }
1040
1041 //===----------------------------------------------------------------------===//
1042 // TargetLowering Implementation
1043 //===----------------------------------------------------------------------===//
1044
HexagonTargetLowering(const TargetMachine & targetmachine)1045 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
1046 : TargetLowering(targetmachine),
1047 TM(targetmachine) {
1048
1049 const HexagonSubtarget &Subtarget = TM.getSubtarget<HexagonSubtarget>();
1050
1051 // Set up the register classes.
1052 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1053 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1054
1055 if (Subtarget.hasV5TOps()) {
1056 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1057 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1058 }
1059
1060 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1061
1062 computeRegisterProperties();
1063
1064 // Align loop entry
1065 setPrefLoopAlignment(4);
1066
1067 // Limits for inline expansion of memcpy/memmove
1068 MaxStoresPerMemcpy = 6;
1069 MaxStoresPerMemmove = 6;
1070
1071 //
1072 // Library calls for unsupported operations
1073 //
1074
1075 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1076 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1077
1078 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1079 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1080
1081 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1082 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1083
1084 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1085 setOperationAction(ISD::SDIV, MVT::i32, Expand);
1086 setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
1087 setOperationAction(ISD::SREM, MVT::i32, Expand);
1088
1089 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1090 setOperationAction(ISD::SDIV, MVT::i64, Expand);
1091 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1092 setOperationAction(ISD::SREM, MVT::i64, Expand);
1093
1094 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1095 setOperationAction(ISD::UDIV, MVT::i32, Expand);
1096
1097 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1098 setOperationAction(ISD::UDIV, MVT::i64, Expand);
1099
1100 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1101 setOperationAction(ISD::UREM, MVT::i32, Expand);
1102
1103 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1104 setOperationAction(ISD::UREM, MVT::i64, Expand);
1105
1106 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1107 setOperationAction(ISD::FDIV, MVT::f32, Expand);
1108
1109 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1110 setOperationAction(ISD::FDIV, MVT::f64, Expand);
1111
1112 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
1113 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
1114 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1115 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1116
1117 if (Subtarget.hasV5TOps()) {
1118 // Hexagon V5 Support.
1119 setOperationAction(ISD::FADD, MVT::f32, Legal);
1120 setOperationAction(ISD::FADD, MVT::f64, Legal);
1121 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
1122 setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
1123 setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
1124 setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
1125 setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
1126
1127 setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
1128 setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
1129 setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
1130 setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
1131
1132 setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
1133 setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
1134 setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
1135 setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
1136
1137 setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
1138 setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
1139 setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
1140 setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
1141
1142 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1143 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1144
1145 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1146 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1147 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1148 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1149
1150 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1151 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1152 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1153 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1154
1155 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1156 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1157 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1158 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1159
1160 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1161 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1162 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1163 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1164
1165 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1166 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1167 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1168 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1169
1170 setOperationAction(ISD::FABS, MVT::f32, Legal);
1171 setOperationAction(ISD::FABS, MVT::f64, Expand);
1172
1173 setOperationAction(ISD::FNEG, MVT::f32, Legal);
1174 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1175 } else {
1176
1177 // Expand fp<->uint.
1178 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
1179 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
1180
1181 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
1182 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
1183
1184 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
1185 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
1186
1187 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
1188 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
1189
1190 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
1191 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
1192
1193 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
1194 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
1195
1196 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
1197 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
1198
1199 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
1200 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
1201
1202 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
1203 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
1204
1205 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1206 setOperationAction(ISD::FADD, MVT::f64, Expand);
1207
1208 setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
1209 setOperationAction(ISD::FADD, MVT::f32, Expand);
1210
1211 setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
1212 setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
1213
1214 setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
1215 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
1216
1217 setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
1218 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
1219
1220 setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
1221 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
1222
1223 setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
1224 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
1225
1226 setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
1227 setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
1228
1229 setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
1230 setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
1231
1232 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
1233 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
1234
1235 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
1236 setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
1237
1238 setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
1239 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
1240
1241 setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
1242 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
1243
1244 setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
1245 setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
1246
1247 setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
1248 setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
1249
1250 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1251 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1252
1253 setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
1254 setOperationAction(ISD::MUL, MVT::f32, Expand);
1255
1256 setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
1257 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
1258
1259 setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
1260
1261 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1262 setOperationAction(ISD::SUB, MVT::f64, Expand);
1263
1264 setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
1265 setOperationAction(ISD::SUB, MVT::f32, Expand);
1266
1267 setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
1268 setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
1269
1270 setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
1271 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
1272
1273 setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
1274 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
1275
1276 setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
1277 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
1278
1279 setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
1280 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
1281
1282 setOperationAction(ISD::FABS, MVT::f32, Expand);
1283 setOperationAction(ISD::FABS, MVT::f64, Expand);
1284 setOperationAction(ISD::FNEG, MVT::f32, Expand);
1285 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1286 }
1287
1288 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1289 setOperationAction(ISD::SREM, MVT::i32, Expand);
1290
1291 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
1292 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
1293 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
1294 setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
1295
1296 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
1297 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
1298 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
1299 setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
1300
1301 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1302
1303 // Turn FP extload into load/fextend.
1304 for (MVT VT : MVT::fp_valuetypes())
1305 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1306 // Hexagon has a i1 sign extending load.
1307 for (MVT VT : MVT::integer_valuetypes())
1308 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
1309 // Turn FP truncstore into trunc + store.
1310 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1311
1312 // Custom legalize GlobalAddress nodes into CONST32.
1313 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1314 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1315 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1316 // Truncate action?
1317 setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
1318
1319 // Hexagon doesn't have sext_inreg, replace them with shl/sra.
1320 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1321
1322 // Hexagon has no REM or DIVREM operations.
1323 setOperationAction(ISD::UREM, MVT::i32, Expand);
1324 setOperationAction(ISD::SREM, MVT::i32, Expand);
1325 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1326 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1327 setOperationAction(ISD::SREM, MVT::i64, Expand);
1328 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1329 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1330
1331 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1332
1333 // Lower SELECT_CC to SETCC and SELECT.
1334 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
1335 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
1336 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
1337
1338 if (Subtarget.hasV5TOps()) {
1339
1340 // We need to make the operation type of SELECT node to be Custom,
1341 // such that we don't go into the infinite loop of
1342 // select -> setcc -> select_cc -> select loop.
1343 setOperationAction(ISD::SELECT, MVT::f32, Custom);
1344 setOperationAction(ISD::SELECT, MVT::f64, Custom);
1345
1346 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
1347 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
1348
1349 } else {
1350
1351 // Hexagon has no select or setcc: expand to SELECT_CC.
1352 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1353 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1354 }
1355
1356 if (EmitJumpTables) {
1357 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1358 } else {
1359 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1360 }
1361 // Increase jump tables cutover to 5, was 4.
1362 setMinimumJumpTableEntries(5);
1363
1364 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
1365 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
1366 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1367 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
1368 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
1369
1370 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1371
1372 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1373 setOperationAction(ISD::FCOS, MVT::f64, Expand);
1374 setOperationAction(ISD::FREM, MVT::f64, Expand);
1375 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1376 setOperationAction(ISD::FCOS, MVT::f32, Expand);
1377 setOperationAction(ISD::FREM, MVT::f32, Expand);
1378 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1379 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1380
1381 // In V4, we have double word add/sub with carry. The problem with
1382 // modelling this instruction is that it produces 2 results - Rdd and Px.
1383 // To model update of Px, we will have to use Defs[p0..p3] which will
1384 // cause any predicate live range to spill. So, we pretend we dont't
1385 // have these instructions.
1386 setOperationAction(ISD::ADDE, MVT::i8, Expand);
1387 setOperationAction(ISD::ADDE, MVT::i16, Expand);
1388 setOperationAction(ISD::ADDE, MVT::i32, Expand);
1389 setOperationAction(ISD::ADDE, MVT::i64, Expand);
1390 setOperationAction(ISD::SUBE, MVT::i8, Expand);
1391 setOperationAction(ISD::SUBE, MVT::i16, Expand);
1392 setOperationAction(ISD::SUBE, MVT::i32, Expand);
1393 setOperationAction(ISD::SUBE, MVT::i64, Expand);
1394 setOperationAction(ISD::ADDC, MVT::i8, Expand);
1395 setOperationAction(ISD::ADDC, MVT::i16, Expand);
1396 setOperationAction(ISD::ADDC, MVT::i32, Expand);
1397 setOperationAction(ISD::ADDC, MVT::i64, Expand);
1398 setOperationAction(ISD::SUBC, MVT::i8, Expand);
1399 setOperationAction(ISD::SUBC, MVT::i16, Expand);
1400 setOperationAction(ISD::SUBC, MVT::i32, Expand);
1401 setOperationAction(ISD::SUBC, MVT::i64, Expand);
1402
1403 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1404 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
1405 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
1406 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
1407 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
1408 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
1409 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
1410 setOperationAction(ISD::CTLZ, MVT::i64, Expand);
1411 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
1412 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
1413 setOperationAction(ISD::ROTL, MVT::i32, Expand);
1414 setOperationAction(ISD::ROTR, MVT::i32, Expand);
1415 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1416 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1417 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1418 setOperationAction(ISD::FPOW, MVT::f64, Expand);
1419 setOperationAction(ISD::FPOW, MVT::f32, Expand);
1420
1421 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1422 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1423 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1424
1425 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1426 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1427
1428 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1429 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1430
1431 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1432
1433 if (Subtarget.isSubtargetV2()) {
1434 setExceptionPointerRegister(Hexagon::R20);
1435 setExceptionSelectorRegister(Hexagon::R21);
1436 } else {
1437 setExceptionPointerRegister(Hexagon::R0);
1438 setExceptionSelectorRegister(Hexagon::R1);
1439 }
1440
1441 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1442 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1443
1444 // Use the default implementation.
1445 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1446 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1447 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1448 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1449 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1450
1451 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1452 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1453
1454 setMinFunctionAlignment(2);
1455
1456 // Needed for DYNAMIC_STACKALLOC expansion.
1457 const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
1458 TM.getSubtargetImpl()->getRegisterInfo());
1459 setStackPointerRegisterToSaveRestore(QRI->getStackRegister());
1460 setSchedulingPreference(Sched::VLIW);
1461 }
1462
1463 const char*
getTargetNodeName(unsigned Opcode) const1464 HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1465 switch (Opcode) {
1466 default: return nullptr;
1467 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1468 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1469 case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real";
1470 case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
1471 case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
1472 case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
1473 case HexagonISD::BRICC: return "HexagonISD::BRICC";
1474 case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
1475 case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
1476 case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
1477 case HexagonISD::Hi: return "HexagonISD::Hi";
1478 case HexagonISD::Lo: return "HexagonISD::Lo";
1479 case HexagonISD::FTOI: return "HexagonISD::FTOI";
1480 case HexagonISD::ITOF: return "HexagonISD::ITOF";
1481 case HexagonISD::CALL: return "HexagonISD::CALL";
1482 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1483 case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
1484 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1485 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1486 }
1487 }
1488
1489 bool
isTruncateFree(Type * Ty1,Type * Ty2) const1490 HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
1491 EVT MTy1 = EVT::getEVT(Ty1);
1492 EVT MTy2 = EVT::getEVT(Ty2);
1493 if (!MTy1.isSimple() || !MTy2.isSimple()) {
1494 return false;
1495 }
1496 return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32));
1497 }
1498
isTruncateFree(EVT VT1,EVT VT2) const1499 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
1500 if (!VT1.isSimple() || !VT2.isSimple()) {
1501 return false;
1502 }
1503 return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
1504 }
1505
1506 bool
allowTruncateForTailCall(Type * Ty1,Type * Ty2) const1507 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
1508 // Assuming the caller does not have either a signext or zeroext modifier, and
1509 // only one value is accepted, any reasonable truncation is allowed.
1510 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
1511 return false;
1512
1513 // FIXME: in principle up to 64-bit could be made safe, but it would be very
1514 // fragile at the moment: any support for multiple value returns would be
1515 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
1516 return Ty1->getPrimitiveSizeInBits() <= 32;
1517 }
1518
1519 SDValue
LowerEH_RETURN(SDValue Op,SelectionDAG & DAG) const1520 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
1521 SDValue Chain = Op.getOperand(0);
1522 SDValue Offset = Op.getOperand(1);
1523 SDValue Handler = Op.getOperand(2);
1524 SDLoc dl(Op);
1525
1526 // Mark function as containing a call to EH_RETURN.
1527 HexagonMachineFunctionInfo *FuncInfo =
1528 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
1529 FuncInfo->setHasEHReturn();
1530
1531 unsigned OffsetReg = Hexagon::R28;
1532
1533 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
1534 DAG.getRegister(Hexagon::R30, getPointerTy()),
1535 DAG.getIntPtrConstant(4));
1536 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
1537 false, false, 0);
1538 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
1539
1540 // Not needed we already use it as explict input to EH_RETURN.
1541 // MF.getRegInfo().addLiveOut(OffsetReg);
1542
1543 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
1544 }
1545
1546 SDValue
LowerOperation(SDValue Op,SelectionDAG & DAG) const1547 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1548 switch (Op.getOpcode()) {
1549 default: llvm_unreachable("Should not custom lower this!");
1550 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
1551 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
1552 // Frame & Return address. Currently unimplemented.
1553 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
1554 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
1555 case ISD::GlobalTLSAddress:
1556 llvm_unreachable("TLS not implemented for Hexagon.");
1557 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
1558 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
1559 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
1560 case ISD::VASTART: return LowerVASTART(Op, DAG);
1561 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
1562
1563 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1564 case ISD::SELECT: return Op;
1565 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1566 case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
1567
1568 }
1569 }
1570
1571
1572
1573 //===----------------------------------------------------------------------===//
1574 // Hexagon Scheduler Hooks
1575 //===----------------------------------------------------------------------===//
1576 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr * MI,MachineBasicBlock * BB) const1577 HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1578 MachineBasicBlock *BB)
1579 const {
1580 switch (MI->getOpcode()) {
1581 case Hexagon::ADJDYNALLOC: {
1582 MachineFunction *MF = BB->getParent();
1583 HexagonMachineFunctionInfo *FuncInfo =
1584 MF->getInfo<HexagonMachineFunctionInfo>();
1585 FuncInfo->addAllocaAdjustInst(MI);
1586 return BB;
1587 }
1588 default: llvm_unreachable("Unexpected instr type to insert");
1589 } // switch
1590 }
1591
1592 //===----------------------------------------------------------------------===//
1593 // Inline Assembly Support
1594 //===----------------------------------------------------------------------===//
1595
1596 std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string & Constraint,MVT VT) const1597 HexagonTargetLowering::getRegForInlineAsmConstraint(const
1598 std::string &Constraint,
1599 MVT VT) const {
1600 if (Constraint.size() == 1) {
1601 switch (Constraint[0]) {
1602 case 'r': // R0-R31
1603 switch (VT.SimpleTy) {
1604 default:
1605 llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
1606 case MVT::i32:
1607 case MVT::i16:
1608 case MVT::i8:
1609 case MVT::f32:
1610 return std::make_pair(0U, &Hexagon::IntRegsRegClass);
1611 case MVT::i64:
1612 case MVT::f64:
1613 return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
1614 }
1615 default:
1616 llvm_unreachable("Unknown asm register class");
1617 }
1618 }
1619
1620 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1621 }
1622
1623 /// isFPImmLegal - Returns true if the target can instruction select the
1624 /// specified FP immediate natively. If false, the legalizer will
1625 /// materialize the FP immediate as a load from a constant pool.
isFPImmLegal(const APFloat & Imm,EVT VT) const1626 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
1627 return TM.getSubtarget<HexagonSubtarget>().hasV5TOps();
1628 }
1629
1630 /// isLegalAddressingMode - Return true if the addressing mode represented by
1631 /// AM is legal for this target, for a load/store of the specified type.
isLegalAddressingMode(const AddrMode & AM,Type * Ty) const1632 bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1633 Type *Ty) const {
1634 // Allows a signed-extended 11-bit immediate field.
1635 if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) {
1636 return false;
1637 }
1638
1639 // No global is ever allowed as a base.
1640 if (AM.BaseGV) {
1641 return false;
1642 }
1643
1644 int Scale = AM.Scale;
1645 if (Scale < 0) Scale = -Scale;
1646 switch (Scale) {
1647 case 0: // No scale reg, "r+i", "r", or just "i".
1648 break;
1649 default: // No scaled addressing mode.
1650 return false;
1651 }
1652 return true;
1653 }
1654
1655 /// isLegalICmpImmediate - Return true if the specified immediate is legal
1656 /// icmp immediate, that is the target has icmp instructions which can compare
1657 /// a register against the immediate without having to materialize the
1658 /// immediate into a register.
isLegalICmpImmediate(int64_t Imm) const1659 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1660 return Imm >= -512 && Imm <= 511;
1661 }
1662
1663 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1664 /// for tail call optimization. Targets which want to do tail call
1665 /// optimization should implement this function.
IsEligibleForTailCallOptimization(SDValue Callee,CallingConv::ID CalleeCC,bool isVarArg,bool isCalleeStructRet,bool isCallerStructRet,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SmallVectorImpl<ISD::InputArg> & Ins,SelectionDAG & DAG) const1666 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
1667 SDValue Callee,
1668 CallingConv::ID CalleeCC,
1669 bool isVarArg,
1670 bool isCalleeStructRet,
1671 bool isCallerStructRet,
1672 const SmallVectorImpl<ISD::OutputArg> &Outs,
1673 const SmallVectorImpl<SDValue> &OutVals,
1674 const SmallVectorImpl<ISD::InputArg> &Ins,
1675 SelectionDAG& DAG) const {
1676 const Function *CallerF = DAG.getMachineFunction().getFunction();
1677 CallingConv::ID CallerCC = CallerF->getCallingConv();
1678 bool CCMatch = CallerCC == CalleeCC;
1679
1680 // ***************************************************************************
1681 // Look for obvious safe cases to perform tail call optimization that do not
1682 // require ABI changes.
1683 // ***************************************************************************
1684
1685 // If this is a tail call via a function pointer, then don't do it!
1686 if (!(dyn_cast<GlobalAddressSDNode>(Callee))
1687 && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
1688 return false;
1689 }
1690
1691 // Do not optimize if the calling conventions do not match.
1692 if (!CCMatch)
1693 return false;
1694
1695 // Do not tail call optimize vararg calls.
1696 if (isVarArg)
1697 return false;
1698
1699 // Also avoid tail call optimization if either caller or callee uses struct
1700 // return semantics.
1701 if (isCalleeStructRet || isCallerStructRet)
1702 return false;
1703
1704 // In addition to the cases above, we also disable Tail Call Optimization if
1705 // the calling convention code that at least one outgoing argument needs to
1706 // go on the stack. We cannot check that here because at this point that
1707 // information is not available.
1708 return true;
1709 }
1710
1711 // Return true when the given node fits in a positive half word.
isPositiveHalfWord(SDNode * N)1712 bool llvm::isPositiveHalfWord(SDNode *N) {
1713 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1714 if (CN && CN->getSExtValue() > 0 && isInt<16>(CN->getSExtValue()))
1715 return true;
1716
1717 switch (N->getOpcode()) {
1718 default:
1719 return false;
1720 case ISD::SIGN_EXTEND_INREG:
1721 return true;
1722 }
1723 }
1724