1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
13 //===----------------------------------------------------------------------===//
15 #include "HexagonISelLowering.h"
16 #include "HexagonMachineFunctionInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "HexagonTargetMachine.h"
19 #include "HexagonTargetObjectFile.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
42 #define DEBUG_TYPE "hexagon-lowering"
45 EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
46 cl::desc("Control jump table emission on Hexagon target"));
49 class HexagonCCState : public CCState {
50 int NumNamedVarArgParams;
53 HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
54 const TargetMachine &TM, SmallVectorImpl<CCValAssign> &locs,
55 LLVMContext &C, int NumNamedVarArgParams)
56 : CCState(CC, isVarArg, MF, TM, locs, C),
57 NumNamedVarArgParams(NumNamedVarArgParams) {}
59 int getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
63 // Implement calling convention for Hexagon.
65 CC_Hexagon(unsigned ValNo, MVT ValVT,
66 MVT LocVT, CCValAssign::LocInfo LocInfo,
67 ISD::ArgFlagsTy ArgFlags, CCState &State);
70 CC_Hexagon32(unsigned ValNo, MVT ValVT,
71 MVT LocVT, CCValAssign::LocInfo LocInfo,
72 ISD::ArgFlagsTy ArgFlags, CCState &State);
75 CC_Hexagon64(unsigned ValNo, MVT ValVT,
76 MVT LocVT, CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State);
80 RetCC_Hexagon(unsigned ValNo, MVT ValVT,
81 MVT LocVT, CCValAssign::LocInfo LocInfo,
82 ISD::ArgFlagsTy ArgFlags, CCState &State);
85 RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
86 MVT LocVT, CCValAssign::LocInfo LocInfo,
87 ISD::ArgFlagsTy ArgFlags, CCState &State);
90 RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
91 MVT LocVT, CCValAssign::LocInfo LocInfo,
92 ISD::ArgFlagsTy ArgFlags, CCState &State);
95 CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
96 MVT LocVT, CCValAssign::LocInfo LocInfo,
97 ISD::ArgFlagsTy ArgFlags, CCState &State) {
98 HexagonCCState &HState = static_cast<HexagonCCState &>(State);
100 // NumNamedVarArgParams can not be zero for a VarArg function.
101 assert((HState.getNumNamedVarArgParams() > 0) &&
102 "NumNamedVarArgParams is not bigger than zero.");
104 if ((int)ValNo < HState.getNumNamedVarArgParams()) {
105 // Deal with named arguments.
106 return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
109 // Deal with un-named arguments.
111 if (ArgFlags.isByVal()) {
112 // If pass-by-value, the size allocated on stack is decided
113 // by ArgFlags.getByValSize(), not by the size of LocVT.
114 assert ((ArgFlags.getByValSize() > 8) &&
115 "ByValSize must be bigger than 8 bytes");
116 ofst = State.AllocateStack(ArgFlags.getByValSize(), 4);
117 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
120 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
123 if (ArgFlags.isSExt())
124 LocInfo = CCValAssign::SExt;
125 else if (ArgFlags.isZExt())
126 LocInfo = CCValAssign::ZExt;
128 LocInfo = CCValAssign::AExt;
130 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
131 ofst = State.AllocateStack(4, 4);
132 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
135 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
136 ofst = State.AllocateStack(8, 8);
137 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
145 CC_Hexagon (unsigned ValNo, MVT ValVT,
146 MVT LocVT, CCValAssign::LocInfo LocInfo,
147 ISD::ArgFlagsTy ArgFlags, CCState &State) {
149 if (ArgFlags.isByVal()) {
151 assert ((ArgFlags.getByValSize() > 8) &&
152 "ByValSize must be bigger than 8 bytes");
153 unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4);
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
158 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
161 if (ArgFlags.isSExt())
162 LocInfo = CCValAssign::SExt;
163 else if (ArgFlags.isZExt())
164 LocInfo = CCValAssign::ZExt;
166 LocInfo = CCValAssign::AExt;
169 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
170 if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
174 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
175 if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
179 return true; // CC didn't match.
183 static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
184 MVT LocVT, CCValAssign::LocInfo LocInfo,
185 ISD::ArgFlagsTy ArgFlags, CCState &State) {
187 static const MCPhysReg RegList[] = {
188 Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
191 if (unsigned Reg = State.AllocateReg(RegList, 6)) {
192 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
196 unsigned Offset = State.AllocateStack(4, 4);
197 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
201 static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
202 MVT LocVT, CCValAssign::LocInfo LocInfo,
203 ISD::ArgFlagsTy ArgFlags, CCState &State) {
205 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
206 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
210 static const MCPhysReg RegList1[] = {
211 Hexagon::D1, Hexagon::D2
213 static const MCPhysReg RegList2[] = {
214 Hexagon::R1, Hexagon::R3
216 if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
217 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
221 unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
222 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
226 static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
227 MVT LocVT, CCValAssign::LocInfo LocInfo,
228 ISD::ArgFlagsTy ArgFlags, CCState &State) {
231 if (LocVT == MVT::i1 ||
236 if (ArgFlags.isSExt())
237 LocInfo = CCValAssign::SExt;
238 else if (ArgFlags.isZExt())
239 LocInfo = CCValAssign::ZExt;
241 LocInfo = CCValAssign::AExt;
244 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
245 if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
249 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
250 if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
254 return true; // CC didn't match.
257 static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
258 MVT LocVT, CCValAssign::LocInfo LocInfo,
259 ISD::ArgFlagsTy ArgFlags, CCState &State) {
261 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
262 if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
263 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
268 unsigned Offset = State.AllocateStack(4, 4);
269 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
273 static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
274 MVT LocVT, CCValAssign::LocInfo LocInfo,
275 ISD::ArgFlagsTy ArgFlags, CCState &State) {
276 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
277 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
278 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
283 unsigned Offset = State.AllocateStack(8, 8);
284 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
289 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
294 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
295 /// by "Src" to address "Dst" of size "Size". Alignment information is
296 /// specified by the specific parameter attribute. The copy will be passed as
297 /// a byval function parameter. Sometimes what we are copying is the end of a
298 /// larger object, the part that does not fit in registers.
300 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
301 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
304 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
305 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
306 /*isVolatile=*/false, /*AlwaysInline=*/false,
307 MachinePointerInfo(), MachinePointerInfo());
311 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
312 // passed by value, the function prototype is modified to return void and
313 // the value is stored in memory pointed by a pointer passed by caller.
315 HexagonTargetLowering::LowerReturn(SDValue Chain,
316 CallingConv::ID CallConv, bool isVarArg,
317 const SmallVectorImpl<ISD::OutputArg> &Outs,
318 const SmallVectorImpl<SDValue> &OutVals,
319 SDLoc dl, SelectionDAG &DAG) const {
321 // CCValAssign - represent the assignment of the return value to locations.
322 SmallVector<CCValAssign, 16> RVLocs;
324 // CCState - Info about the registers and stack slot.
325 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
326 getTargetMachine(), RVLocs, *DAG.getContext());
328 // Analyze return values of ISD::RET
329 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
332 SmallVector<SDValue, 4> RetOps(1, Chain);
334 // Copy the result values into the output registers.
335 for (unsigned i = 0; i != RVLocs.size(); ++i) {
336 CCValAssign &VA = RVLocs[i];
338 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
340 // Guarantee that all emitted copies are stuck together with flags.
341 Flag = Chain.getValue(1);
342 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
345 RetOps[0] = Chain; // Update chain.
347 // Add the flag if we have it.
349 RetOps.push_back(Flag);
351 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
357 /// LowerCallResult - Lower the result values of an ISD::CALL into the
358 /// appropriate copies out of appropriate physical registers. This assumes that
359 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
360 /// being lowered. Returns a SDNode with the same number of values as the
363 HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
364 CallingConv::ID CallConv, bool isVarArg,
366 SmallVectorImpl<ISD::InputArg> &Ins,
367 SDLoc dl, SelectionDAG &DAG,
368 SmallVectorImpl<SDValue> &InVals,
369 const SmallVectorImpl<SDValue> &OutVals,
370 SDValue Callee) const {
372 // Assign locations to each value returned by this call.
373 SmallVector<CCValAssign, 16> RVLocs;
375 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
376 getTargetMachine(), RVLocs, *DAG.getContext());
378 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
380 // Copy all of the result registers out of their specified physreg.
381 for (unsigned i = 0; i != RVLocs.size(); ++i) {
382 Chain = DAG.getCopyFromReg(Chain, dl,
383 RVLocs[i].getLocReg(),
384 RVLocs[i].getValVT(), InFlag).getValue(1);
385 InFlag = Chain.getValue(2);
386 InVals.push_back(Chain.getValue(0));
392 /// LowerCall - Functions arguments are copied from virtual regs to
393 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
395 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
396 SmallVectorImpl<SDValue> &InVals) const {
397 SelectionDAG &DAG = CLI.DAG;
399 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
400 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
401 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
402 SDValue Chain = CLI.Chain;
403 SDValue Callee = CLI.Callee;
404 bool &isTailCall = CLI.IsTailCall;
405 CallingConv::ID CallConv = CLI.CallConv;
406 bool isVarArg = CLI.IsVarArg;
408 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
410 // Check for varargs.
411 int NumNamedVarArgParams = -1;
412 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
414 const Function* CalleeFn = nullptr;
415 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
416 if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
418 // If a function has zero args and is a vararg function, that's
419 // disallowed so it must be an undeclared function. Do not assume
420 // varargs if the callee is undefined.
421 if (CalleeFn->isVarArg() &&
422 CalleeFn->getFunctionType()->getNumParams() != 0) {
423 NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
428 // Analyze operands of the call, assigning locations to each operand.
429 SmallVector<CCValAssign, 16> ArgLocs;
430 HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
431 getTargetMachine(), ArgLocs, *DAG.getContext(),
432 NumNamedVarArgParams);
434 if (NumNamedVarArgParams > 0)
435 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
437 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
441 bool StructAttrFlag =
442 DAG.getMachineFunction().getFunction()->hasStructRetAttr();
443 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
444 isVarArg, IsStructRet,
446 Outs, OutVals, Ins, DAG);
447 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){
448 CCValAssign &VA = ArgLocs[i];
455 DEBUG(dbgs () << "Eligible for Tail Call\n");
458 "Argument must be passed on stack. Not eligible for Tail Call\n");
461 // Get a count of how many bytes are to be pushed on the stack.
462 unsigned NumBytes = CCInfo.getNextStackOffset();
463 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
464 SmallVector<SDValue, 8> MemOpChains;
467 DAG.getCopyFromReg(Chain, dl, TM.getRegisterInfo()->getStackRegister(),
470 // Walk the register/memloc assignments, inserting copies/loads.
471 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
472 CCValAssign &VA = ArgLocs[i];
473 SDValue Arg = OutVals[i];
474 ISD::ArgFlagsTy Flags = Outs[i].Flags;
476 // Promote the value if needed.
477 switch (VA.getLocInfo()) {
479 // Loc info must be one of Full, SExt, ZExt, or AExt.
480 llvm_unreachable("Unknown loc info!");
481 case CCValAssign::Full:
483 case CCValAssign::SExt:
484 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
486 case CCValAssign::ZExt:
487 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
489 case CCValAssign::AExt:
490 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
495 unsigned LocMemOffset = VA.getLocMemOffset();
496 SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
497 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
499 if (Flags.isByVal()) {
500 // The argument is a struct passed by value. According to LLVM, "Arg"
502 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain,
505 // The argument is not passed by value. "Arg" is a buildin type. It is
507 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
508 MachinePointerInfo(),false, false,
514 // Arguments that can be passed on register must be kept at RegsToPass
517 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
521 // Transform all store nodes into one single node because all store
522 // nodes are independent of each other.
523 if (!MemOpChains.empty()) {
524 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
528 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
529 getPointerTy(), true),
532 // Build a sequence of copy-to-reg nodes chained together with token
533 // chain and flag operands which copy the outgoing args into registers.
534 // The InFlag in necessary since all emitted instructions must be
538 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
539 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
540 RegsToPass[i].second, InFlag);
541 InFlag = Chain.getValue(1);
545 // For tail calls lower the arguments to the 'real' stack slot.
547 // Force all the incoming stack arguments to be loaded from the stack
548 // before any new outgoing arguments are stored to the stack, because the
549 // outgoing stack slots may alias the incoming argument stack slots, and
550 // the alias isn't otherwise explicit. This is slightly more conservative
551 // than necessary, because it means that each store effectively depends
552 // on every argument instead of just those arguments it would clobber.
554 // Do not flag preceding copytoreg stuff together with the following stuff.
556 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
557 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
558 RegsToPass[i].second, InFlag);
559 InFlag = Chain.getValue(1);
564 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
565 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
566 // node so that legalize doesn't hack it.
567 if (flag_aligned_memcpy) {
568 const char *MemcpyName =
569 "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
571 DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
572 flag_aligned_memcpy = false;
573 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
574 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
575 } else if (ExternalSymbolSDNode *S =
576 dyn_cast<ExternalSymbolSDNode>(Callee)) {
577 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
580 // Returns a chain & a flag for retval copy to use.
581 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
582 SmallVector<SDValue, 8> Ops;
583 Ops.push_back(Chain);
584 Ops.push_back(Callee);
586 // Add argument registers to the end of the list so that they are
587 // known live into the call.
588 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
589 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
590 RegsToPass[i].second.getValueType()));
593 if (InFlag.getNode()) {
594 Ops.push_back(InFlag);
598 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
600 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
601 InFlag = Chain.getValue(1);
603 // Create the CALLSEQ_END node.
604 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
605 DAG.getIntPtrConstant(0, true), InFlag, dl);
606 InFlag = Chain.getValue(1);
608 // Handle result values, copying them out of physregs into vregs that we
610 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
611 InVals, OutVals, Callee);
614 static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
615 bool isSEXTLoad, SDValue &Base,
616 SDValue &Offset, bool &isInc,
618 if (Ptr->getOpcode() != ISD::ADD)
621 if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
622 isInc = (Ptr->getOpcode() == ISD::ADD);
623 Base = Ptr->getOperand(0);
624 Offset = Ptr->getOperand(1);
625 // Ensure that Offset is a constant.
626 return (isa<ConstantSDNode>(Offset));
632 // TODO: Put this function along with the other isS* functions in
633 // HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
634 // functions defined in HexagonOperands.td.
635 static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
636 ConstantSDNode *N = cast<ConstantSDNode>(S);
638 // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
640 int64_t v = (int64_t)N->getSExtValue();
642 if (ShiftAmount > 0) {
644 v = v >> ShiftAmount;
646 return (v <= 7) && (v >= -8) && (m == 0);
649 /// getPostIndexedAddressParts - returns true by value, base pointer and
650 /// offset pointer and addressing mode by reference if this node can be
651 /// combined with a load / store to form a post-indexed load / store.
652 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
655 ISD::MemIndexedMode &AM,
656 SelectionDAG &DAG) const
660 bool isSEXTLoad = false;
662 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
663 VT = LD->getMemoryVT();
664 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
665 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
666 VT = ST->getMemoryVT();
667 if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
675 bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
677 // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
678 int ShiftAmount = VT.getSizeInBits() / 16;
679 if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
680 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
687 SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
688 SelectionDAG &DAG) const {
689 SDNode *Node = Op.getNode();
690 MachineFunction &MF = DAG.getMachineFunction();
691 HexagonMachineFunctionInfo *FuncInfo =
692 MF.getInfo<HexagonMachineFunctionInfo>();
693 switch (Node->getOpcode()) {
694 case ISD::INLINEASM: {
695 unsigned NumOps = Node->getNumOperands();
696 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
697 --NumOps; // Ignore the flag operand.
699 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
700 if (FuncInfo->hasClobberLR())
703 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
704 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
705 ++i; // Skip the ID value.
707 switch (InlineAsm::getKind(Flags)) {
708 default: llvm_unreachable("Bad flags!");
709 case InlineAsm::Kind_RegDef:
710 case InlineAsm::Kind_RegUse:
711 case InlineAsm::Kind_Imm:
712 case InlineAsm::Kind_Clobber:
713 case InlineAsm::Kind_Mem: {
714 for (; NumVals; --NumVals, ++i) {}
717 case InlineAsm::Kind_RegDefEarlyClobber: {
718 for (; NumVals; --NumVals, ++i) {
720 cast<RegisterSDNode>(Node->getOperand(i))->getReg();
723 if (Reg == TM.getRegisterInfo()->getRARegister()) {
724 FuncInfo->setHasClobberLR(true);
739 // Taken from the XCore backend.
741 SDValue HexagonTargetLowering::
742 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
744 SDValue Chain = Op.getOperand(0);
745 SDValue Table = Op.getOperand(1);
746 SDValue Index = Op.getOperand(2);
748 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
749 unsigned JTI = JT->getIndex();
750 MachineFunction &MF = DAG.getMachineFunction();
751 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
752 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
754 // Mark all jump table targets as address taken.
755 const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
756 const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
757 for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
758 MachineBasicBlock *MBB = JTBBs[i];
759 MBB->setHasAddressTaken();
760 // This line is needed to set the hasAddressTaken flag on the BasicBlock
762 BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
765 SDValue JumpTableBase = DAG.getNode(HexagonISD::WrapperJT, dl,
766 getPointerTy(), TargetJT);
767 SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
768 DAG.getConstant(2, MVT::i32));
769 SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
771 SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
772 MachinePointerInfo(), false, false, false,
774 return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
779 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
780 SelectionDAG &DAG) const {
781 SDValue Chain = Op.getOperand(0);
782 SDValue Size = Op.getOperand(1);
785 unsigned SPReg = getStackPointerRegisterToSaveRestore();
787 // Get a reference to the stack pointer.
788 SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
790 // Subtract the dynamic size from the actual stack size to
791 // obtain the new stack size.
792 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
795 // For Hexagon, the outgoing memory arguments area should be on top of the
796 // alloca area on the stack i.e., the outgoing memory arguments should be
797 // at a lower address than the alloca area. Move the alloca area down the
798 // stack by adding back the space reserved for outgoing arguments to SP
801 // We do not know what the size of the outgoing args is at this point.
802 // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
803 // stack pointer. We patch this instruction with the correct, known
804 // offset in emitPrologue().
806 // Use a placeholder immediate (zero) for now. This will be patched up
807 // by emitPrologue().
808 SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl,
811 DAG.getConstant(0, MVT::i32));
813 // The Sub result contains the new stack start address, so it
814 // must be placed in the stack pointer register.
815 SDValue CopyChain = DAG.getCopyToReg(Chain, dl,
816 TM.getRegisterInfo()->getStackRegister(),
819 SDValue Ops[2] = { ArgAdjust, CopyChain };
820 return DAG.getMergeValues(Ops, dl);
824 HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
825 CallingConv::ID CallConv,
828 SmallVectorImpl<ISD::InputArg> &Ins,
829 SDLoc dl, SelectionDAG &DAG,
830 SmallVectorImpl<SDValue> &InVals)
833 MachineFunction &MF = DAG.getMachineFunction();
834 MachineFrameInfo *MFI = MF.getFrameInfo();
835 MachineRegisterInfo &RegInfo = MF.getRegInfo();
836 HexagonMachineFunctionInfo *FuncInfo =
837 MF.getInfo<HexagonMachineFunctionInfo>();
840 // Assign locations to all of the incoming arguments.
841 SmallVector<CCValAssign, 16> ArgLocs;
842 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
843 getTargetMachine(), ArgLocs, *DAG.getContext());
845 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
847 // For LLVM, in the case when returning a struct by value (>8byte),
848 // the first argument is a pointer that points to the location on caller's
849 // stack where the return value will be stored. For Hexagon, the location on
850 // caller's stack is passed only when the struct size is smaller than (and
851 // equal to) 8 bytes. If not, no address will be passed into callee and
852 // callee return the result direclty through R0/R1.
854 SmallVector<SDValue, 4> MemOps;
856 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
857 CCValAssign &VA = ArgLocs[i];
858 ISD::ArgFlagsTy Flags = Ins[i].Flags;
860 unsigned StackLocation;
863 if ( (VA.isRegLoc() && !Flags.isByVal())
864 || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
865 // Arguments passed in registers
866 // 1. int, long long, ptr args that get allocated in register.
867 // 2. Large struct that gets an register to put its address in.
868 EVT RegVT = VA.getLocVT();
869 if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
870 RegVT == MVT::i32 || RegVT == MVT::f32) {
872 RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
873 RegInfo.addLiveIn(VA.getLocReg(), VReg);
874 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
875 } else if (RegVT == MVT::i64) {
877 RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
878 RegInfo.addLiveIn(VA.getLocReg(), VReg);
879 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
883 } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
884 assert (0 && "ByValSize must be bigger than 8 bytes");
887 assert(VA.isMemLoc());
889 if (Flags.isByVal()) {
890 // If it's a byval parameter, then we need to compute the
891 // "real" size, not the size of the pointer.
892 ObjSize = Flags.getByValSize();
894 ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
897 StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
898 // Create the frame index object for this incoming parameter...
899 FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
901 // Create the SelectionDAG nodes cordl, responding to a load
902 // from this parameter.
903 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
905 if (Flags.isByVal()) {
906 // If it's a pass-by-value aggregate, then do not dereference the stack
907 // location. Instead, we should generate a reference to the stack
909 InVals.push_back(FIN);
911 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
912 MachinePointerInfo(), false, false,
919 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
922 // This will point to the next argument passed via stack.
923 int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize,
925 CCInfo.getNextStackOffset(),
927 FuncInfo->setVarArgsFrameIndex(FrameIndex);
934 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
935 // VASTART stores the address of the VarArgsFrameIndex slot into the
936 // memory location argument.
937 MachineFunction &MF = DAG.getMachineFunction();
938 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
939 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
940 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
941 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr,
942 Op.getOperand(1), MachinePointerInfo(SV), false,
947 HexagonTargetLowering::LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const {
948 SDValue LHS = Op.getOperand(0);
949 SDValue RHS = Op.getOperand(1);
950 SDValue CC = Op.getOperand(4);
951 SDValue TrueVal = Op.getOperand(2);
952 SDValue FalseVal = Op.getOperand(3);
954 SDNode* OpNode = Op.getNode();
955 EVT SVT = OpNode->getValueType(0);
957 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i1, LHS, RHS, CC);
958 return DAG.getNode(ISD::SELECT, dl, SVT, Cond, TrueVal, FalseVal);
962 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
963 EVT ValTy = Op.getValueType();
965 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
967 if (CP->isMachineConstantPoolEntry())
968 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
971 Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
973 return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res);
977 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
978 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
979 MachineFunction &MF = DAG.getMachineFunction();
980 MachineFrameInfo *MFI = MF.getFrameInfo();
981 MFI->setReturnAddressIsTaken(true);
983 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
986 EVT VT = Op.getValueType();
988 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
990 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
991 SDValue Offset = DAG.getConstant(4, MVT::i32);
992 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
993 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
994 MachinePointerInfo(), false, false, false, 0);
997 // Return LR, which contains the return address. Mark it an implicit live-in.
998 unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
999 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
1003 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
1004 const HexagonRegisterInfo *TRI = TM.getRegisterInfo();
1005 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
1006 MFI->setFrameAddressIsTaken(true);
1008 EVT VT = Op.getValueType();
1010 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1011 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1012 TRI->getFrameRegister(), VT);
1014 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1015 MachinePointerInfo(),
1016 false, false, false, 0);
1020 SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
1021 SelectionDAG& DAG) const {
1023 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1027 SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
1028 SelectionDAG &DAG) const {
1030 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1031 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
1033 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
1035 const HexagonTargetObjectFile &TLOF =
1036 static_cast<const HexagonTargetObjectFile &>(getObjFileLowering());
1037 if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
1038 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
1041 return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
1045 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1046 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1047 SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32);
1049 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD);
1052 //===----------------------------------------------------------------------===//
1053 // TargetLowering Implementation
1054 //===----------------------------------------------------------------------===//
1056 HexagonTargetLowering::HexagonTargetLowering(HexagonTargetMachine
1058 : TargetLowering(targetmachine, new HexagonTargetObjectFile()),
1061 const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
1063 // Set up the register classes.
1064 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1065 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1067 if (QRI->Subtarget.hasV5TOps()) {
1068 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1069 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1072 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1074 computeRegisterProperties();
1077 setPrefLoopAlignment(4);
1079 // Limits for inline expansion of memcpy/memmove
1080 MaxStoresPerMemcpy = 6;
1081 MaxStoresPerMemmove = 6;
1084 // Library calls for unsupported operations
1087 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1088 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1090 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1091 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1093 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1094 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1096 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1097 setOperationAction(ISD::SDIV, MVT::i32, Expand);
1098 setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
1099 setOperationAction(ISD::SREM, MVT::i32, Expand);
1101 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1102 setOperationAction(ISD::SDIV, MVT::i64, Expand);
1103 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1104 setOperationAction(ISD::SREM, MVT::i64, Expand);
1106 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1107 setOperationAction(ISD::UDIV, MVT::i32, Expand);
1109 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1110 setOperationAction(ISD::UDIV, MVT::i64, Expand);
1112 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1113 setOperationAction(ISD::UREM, MVT::i32, Expand);
1115 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1116 setOperationAction(ISD::UREM, MVT::i64, Expand);
1118 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1119 setOperationAction(ISD::FDIV, MVT::f32, Expand);
1121 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1122 setOperationAction(ISD::FDIV, MVT::f64, Expand);
1124 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
1125 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
1126 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1127 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1129 if (QRI->Subtarget.hasV5TOps()) {
1130 // Hexagon V5 Support.
1131 setOperationAction(ISD::FADD, MVT::f32, Legal);
1132 setOperationAction(ISD::FADD, MVT::f64, Legal);
1133 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
1134 setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
1135 setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
1136 setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
1137 setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
1139 setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
1140 setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
1141 setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
1142 setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
1144 setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
1145 setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
1146 setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
1147 setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
1149 setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
1150 setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
1151 setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
1152 setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
1154 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1155 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1157 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1158 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1159 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1160 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1162 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1163 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1164 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1165 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1167 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1168 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1169 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1170 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1172 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1173 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1174 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1175 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1177 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1178 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1179 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1180 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1182 setOperationAction(ISD::FABS, MVT::f32, Legal);
1183 setOperationAction(ISD::FABS, MVT::f64, Expand);
1185 setOperationAction(ISD::FNEG, MVT::f32, Legal);
1186 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1189 // Expand fp<->uint.
1190 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
1191 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
1193 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
1194 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
1196 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
1197 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
1199 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
1200 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
1202 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
1203 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
1205 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
1206 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
1208 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
1209 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
1211 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
1212 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
1214 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
1215 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
1217 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1218 setOperationAction(ISD::FADD, MVT::f64, Expand);
1220 setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
1221 setOperationAction(ISD::FADD, MVT::f32, Expand);
1223 setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
1224 setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
1226 setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
1227 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
1229 setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
1230 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
1232 setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
1233 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
1235 setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
1236 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
1238 setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
1239 setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
1241 setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
1242 setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
1244 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
1245 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
1247 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
1248 setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
1250 setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
1251 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
1253 setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
1254 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
1256 setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
1257 setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
1259 setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
1260 setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
1262 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1263 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1265 setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
1266 setOperationAction(ISD::MUL, MVT::f32, Expand);
1268 setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
1269 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
1271 setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
1273 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1274 setOperationAction(ISD::SUB, MVT::f64, Expand);
1276 setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
1277 setOperationAction(ISD::SUB, MVT::f32, Expand);
1279 setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
1280 setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
1282 setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
1283 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
1285 setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
1286 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
1288 setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
1289 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
1291 setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
1292 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
1294 setOperationAction(ISD::FABS, MVT::f32, Expand);
1295 setOperationAction(ISD::FABS, MVT::f64, Expand);
1296 setOperationAction(ISD::FNEG, MVT::f32, Expand);
1297 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1300 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1301 setOperationAction(ISD::SREM, MVT::i32, Expand);
1303 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
1304 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
1305 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
1306 setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
1308 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
1309 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
1310 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
1311 setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
1313 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1315 // Turn FP extload into load/fextend.
1316 setLoadExtAction(ISD::EXTLOAD, MVT::f32, Expand);
1317 // Hexagon has a i1 sign extending load.
1318 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Expand);
1319 // Turn FP truncstore into trunc + store.
1320 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1322 // Custom legalize GlobalAddress nodes into CONST32.
1323 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1324 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1325 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1327 setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
1329 // Hexagon doesn't have sext_inreg, replace them with shl/sra.
1330 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
1332 // Hexagon has no REM or DIVREM operations.
1333 setOperationAction(ISD::UREM, MVT::i32, Expand);
1334 setOperationAction(ISD::SREM, MVT::i32, Expand);
1335 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1336 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1337 setOperationAction(ISD::SREM, MVT::i64, Expand);
1338 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1339 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1341 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1343 // Lower SELECT_CC to SETCC and SELECT.
1344 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
1345 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom);
1347 if (QRI->Subtarget.hasV5TOps()) {
1349 // We need to make the operation type of SELECT node to be Custom,
1350 // such that we don't go into the infinite loop of
1351 // select -> setcc -> select_cc -> select loop.
1352 setOperationAction(ISD::SELECT, MVT::f32, Custom);
1353 setOperationAction(ISD::SELECT, MVT::f64, Custom);
1355 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
1356 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
1357 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
1361 // Hexagon has no select or setcc: expand to SELECT_CC.
1362 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1363 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1365 // This is a workaround documented in DAGCombiner.cpp:2892 We don't
1366 // support SELECT_CC on every type.
1367 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
1371 if (EmitJumpTables) {
1372 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1374 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1376 // Increase jump tables cutover to 5, was 4.
1377 setMinimumJumpTableEntries(5);
1379 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
1380 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
1381 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1382 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
1383 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
1385 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1387 setOperationAction(ISD::FSIN , MVT::f64, Expand);
1388 setOperationAction(ISD::FCOS , MVT::f64, Expand);
1389 setOperationAction(ISD::FREM , MVT::f64, Expand);
1390 setOperationAction(ISD::FSIN , MVT::f32, Expand);
1391 setOperationAction(ISD::FCOS , MVT::f32, Expand);
1392 setOperationAction(ISD::FREM , MVT::f32, Expand);
1393 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1394 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1396 // In V4, we have double word add/sub with carry. The problem with
1397 // modelling this instruction is that it produces 2 results - Rdd and Px.
1398 // To model update of Px, we will have to use Defs[p0..p3] which will
1399 // cause any predicate live range to spill. So, we pretend we dont't
1400 // have these instructions.
1401 setOperationAction(ISD::ADDE, MVT::i8, Expand);
1402 setOperationAction(ISD::ADDE, MVT::i16, Expand);
1403 setOperationAction(ISD::ADDE, MVT::i32, Expand);
1404 setOperationAction(ISD::ADDE, MVT::i64, Expand);
1405 setOperationAction(ISD::SUBE, MVT::i8, Expand);
1406 setOperationAction(ISD::SUBE, MVT::i16, Expand);
1407 setOperationAction(ISD::SUBE, MVT::i32, Expand);
1408 setOperationAction(ISD::SUBE, MVT::i64, Expand);
1409 setOperationAction(ISD::ADDC, MVT::i8, Expand);
1410 setOperationAction(ISD::ADDC, MVT::i16, Expand);
1411 setOperationAction(ISD::ADDC, MVT::i32, Expand);
1412 setOperationAction(ISD::ADDC, MVT::i64, Expand);
1413 setOperationAction(ISD::SUBC, MVT::i8, Expand);
1414 setOperationAction(ISD::SUBC, MVT::i16, Expand);
1415 setOperationAction(ISD::SUBC, MVT::i32, Expand);
1416 setOperationAction(ISD::SUBC, MVT::i64, Expand);
1418 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1419 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
1420 setOperationAction(ISD::CTTZ , MVT::i32, Expand);
1421 setOperationAction(ISD::CTTZ , MVT::i64, Expand);
1422 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
1423 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
1424 setOperationAction(ISD::CTLZ , MVT::i32, Expand);
1425 setOperationAction(ISD::CTLZ , MVT::i64, Expand);
1426 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
1427 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
1428 setOperationAction(ISD::ROTL , MVT::i32, Expand);
1429 setOperationAction(ISD::ROTR , MVT::i32, Expand);
1430 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1431 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1432 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1433 setOperationAction(ISD::FPOW , MVT::f64, Expand);
1434 setOperationAction(ISD::FPOW , MVT::f32, Expand);
1436 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1437 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1438 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1440 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1441 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1443 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1444 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1446 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1448 if (TM.getSubtargetImpl()->isSubtargetV2()) {
1449 setExceptionPointerRegister(Hexagon::R20);
1450 setExceptionSelectorRegister(Hexagon::R21);
1452 setExceptionPointerRegister(Hexagon::R0);
1453 setExceptionSelectorRegister(Hexagon::R1);
1456 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1457 setOperationAction(ISD::VASTART , MVT::Other, Custom);
1459 // Use the default implementation.
1460 setOperationAction(ISD::VAARG , MVT::Other, Expand);
1461 setOperationAction(ISD::VACOPY , MVT::Other, Expand);
1462 setOperationAction(ISD::VAEND , MVT::Other, Expand);
1463 setOperationAction(ISD::STACKSAVE , MVT::Other, Expand);
1464 setOperationAction(ISD::STACKRESTORE , MVT::Other, Expand);
1467 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32 , Custom);
1468 setOperationAction(ISD::INLINEASM , MVT::Other, Custom);
1470 setMinFunctionAlignment(2);
1472 // Needed for DYNAMIC_STACKALLOC expansion.
1473 unsigned StackRegister = TM.getRegisterInfo()->getStackRegister();
1474 setStackPointerRegisterToSaveRestore(StackRegister);
1475 setSchedulingPreference(Sched::VLIW);
1480 HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1482 default: return nullptr;
1483 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1484 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1485 case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real";
1486 case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
1487 case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
1488 case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
1489 case HexagonISD::BRICC: return "HexagonISD::BRICC";
1490 case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
1491 case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
1492 case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
1493 case HexagonISD::Hi: return "HexagonISD::Hi";
1494 case HexagonISD::Lo: return "HexagonISD::Lo";
1495 case HexagonISD::FTOI: return "HexagonISD::FTOI";
1496 case HexagonISD::ITOF: return "HexagonISD::ITOF";
1497 case HexagonISD::CALL: return "HexagonISD::CALL";
1498 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1499 case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
1500 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1501 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1506 HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
1507 EVT MTy1 = EVT::getEVT(Ty1);
1508 EVT MTy2 = EVT::getEVT(Ty2);
1509 if (!MTy1.isSimple() || !MTy2.isSimple()) {
1512 return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32));
1515 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
1516 if (!VT1.isSimple() || !VT2.isSimple()) {
1519 return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
1523 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
1524 // Assuming the caller does not have either a signext or zeroext modifier, and
1525 // only one value is accepted, any reasonable truncation is allowed.
1526 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
1529 // FIXME: in principle up to 64-bit could be made safe, but it would be very
1530 // fragile at the moment: any support for multiple value returns would be
1531 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
1532 return Ty1->getPrimitiveSizeInBits() <= 32;
1536 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
1537 SDValue Chain = Op.getOperand(0);
1538 SDValue Offset = Op.getOperand(1);
1539 SDValue Handler = Op.getOperand(2);
1542 // Mark function as containing a call to EH_RETURN.
1543 HexagonMachineFunctionInfo *FuncInfo =
1544 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
1545 FuncInfo->setHasEHReturn();
1547 unsigned OffsetReg = Hexagon::R28;
1549 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
1550 DAG.getRegister(Hexagon::R30, getPointerTy()),
1551 DAG.getIntPtrConstant(4));
1552 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
1554 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
1556 // Not needed we already use it as explict input to EH_RETURN.
1557 // MF.getRegInfo().addLiveOut(OffsetReg);
1559 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
1563 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1564 switch (Op.getOpcode()) {
1565 default: llvm_unreachable("Should not custom lower this!");
1566 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
1567 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
1568 // Frame & Return address. Currently unimplemented.
1569 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
1570 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
1571 case ISD::GlobalTLSAddress:
1572 llvm_unreachable("TLS not implemented for Hexagon.");
1573 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
1574 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
1575 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
1576 case ISD::VASTART: return LowerVASTART(Op, DAG);
1577 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
1579 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1580 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
1581 case ISD::SELECT: return Op;
1582 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1583 case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
1590 //===----------------------------------------------------------------------===//
1591 // Hexagon Scheduler Hooks
1592 //===----------------------------------------------------------------------===//
1594 HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1595 MachineBasicBlock *BB)
1597 switch (MI->getOpcode()) {
1598 case Hexagon::ADJDYNALLOC: {
1599 MachineFunction *MF = BB->getParent();
1600 HexagonMachineFunctionInfo *FuncInfo =
1601 MF->getInfo<HexagonMachineFunctionInfo>();
1602 FuncInfo->addAllocaAdjustInst(MI);
1605 default: llvm_unreachable("Unexpected instr type to insert");
1609 //===----------------------------------------------------------------------===//
1610 // Inline Assembly Support
1611 //===----------------------------------------------------------------------===//
1613 std::pair<unsigned, const TargetRegisterClass*>
1614 HexagonTargetLowering::getRegForInlineAsmConstraint(const
1615 std::string &Constraint,
1617 if (Constraint.size() == 1) {
1618 switch (Constraint[0]) {
1620 switch (VT.SimpleTy) {
1622 llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
1627 return std::make_pair(0U, &Hexagon::IntRegsRegClass);
1630 return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
1633 llvm_unreachable("Unknown asm register class");
1637 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1640 /// isFPImmLegal - Returns true if the target can instruction select the
1641 /// specified FP immediate natively. If false, the legalizer will
1642 /// materialize the FP immediate as a load from a constant pool.
1643 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
1644 const HexagonRegisterInfo* QRI = TM.getRegisterInfo();
1645 return QRI->Subtarget.hasV5TOps();
1648 /// isLegalAddressingMode - Return true if the addressing mode represented by
1649 /// AM is legal for this target, for a load/store of the specified type.
1650 bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1652 // Allows a signed-extended 11-bit immediate field.
1653 if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) {
1657 // No global is ever allowed as a base.
1662 int Scale = AM.Scale;
1663 if (Scale < 0) Scale = -Scale;
1665 case 0: // No scale reg, "r+i", "r", or just "i".
1667 default: // No scaled addressing mode.
1673 /// isLegalICmpImmediate - Return true if the specified immediate is legal
1674 /// icmp immediate, that is the target has icmp instructions which can compare
1675 /// a register against the immediate without having to materialize the
1676 /// immediate into a register.
1677 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1678 return Imm >= -512 && Imm <= 511;
1681 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1682 /// for tail call optimization. Targets which want to do tail call
1683 /// optimization should implement this function.
1684 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
1686 CallingConv::ID CalleeCC,
1688 bool isCalleeStructRet,
1689 bool isCallerStructRet,
1690 const SmallVectorImpl<ISD::OutputArg> &Outs,
1691 const SmallVectorImpl<SDValue> &OutVals,
1692 const SmallVectorImpl<ISD::InputArg> &Ins,
1693 SelectionDAG& DAG) const {
1694 const Function *CallerF = DAG.getMachineFunction().getFunction();
1695 CallingConv::ID CallerCC = CallerF->getCallingConv();
1696 bool CCMatch = CallerCC == CalleeCC;
1698 // ***************************************************************************
1699 // Look for obvious safe cases to perform tail call optimization that do not
1700 // require ABI changes.
1701 // ***************************************************************************
1703 // If this is a tail call via a function pointer, then don't do it!
1704 if (!(dyn_cast<GlobalAddressSDNode>(Callee))
1705 && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
1709 // Do not optimize if the calling conventions do not match.
1713 // Do not tail call optimize vararg calls.
1717 // Also avoid tail call optimization if either caller or callee uses struct
1718 // return semantics.
1719 if (isCalleeStructRet || isCallerStructRet)
1722 // In addition to the cases above, we also disable Tail Call Optimization if
1723 // the calling convention code that at least one outgoing argument needs to
1724 // go on the stack. We cannot check that here because at this point that
1725 // information is not available.