1 //===-- HexagonISelLowering.cpp - Hexagon DAG Lowering Implementation -----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the interfaces that Hexagon uses to lower LLVM code
11 // into a selection DAG.
13 //===----------------------------------------------------------------------===//
15 #include "HexagonISelLowering.h"
16 #include "HexagonMachineFunctionInfo.h"
17 #include "HexagonSubtarget.h"
18 #include "HexagonTargetMachine.h"
19 #include "HexagonTargetObjectFile.h"
20 #include "llvm/CodeGen/CallingConvLower.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineJumpTableInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/SelectionDAGISel.h"
27 #include "llvm/CodeGen/ValueTypes.h"
28 #include "llvm/IR/CallingConv.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Function.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
42 #define DEBUG_TYPE "hexagon-lowering"
45 EmitJumpTables("hexagon-emit-jump-tables", cl::init(true), cl::Hidden,
46 cl::desc("Control jump table emission on Hexagon target"));
49 class HexagonCCState : public CCState {
50 int NumNamedVarArgParams;
53 HexagonCCState(CallingConv::ID CC, bool isVarArg, MachineFunction &MF,
54 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C,
55 int NumNamedVarArgParams)
56 : CCState(CC, isVarArg, MF, locs, C),
57 NumNamedVarArgParams(NumNamedVarArgParams) {}
59 int getNumNamedVarArgParams() const { return NumNamedVarArgParams; }
63 // Implement calling convention for Hexagon.
65 CC_Hexagon(unsigned ValNo, MVT ValVT,
66 MVT LocVT, CCValAssign::LocInfo LocInfo,
67 ISD::ArgFlagsTy ArgFlags, CCState &State);
70 CC_Hexagon32(unsigned ValNo, MVT ValVT,
71 MVT LocVT, CCValAssign::LocInfo LocInfo,
72 ISD::ArgFlagsTy ArgFlags, CCState &State);
75 CC_Hexagon64(unsigned ValNo, MVT ValVT,
76 MVT LocVT, CCValAssign::LocInfo LocInfo,
77 ISD::ArgFlagsTy ArgFlags, CCState &State);
80 RetCC_Hexagon(unsigned ValNo, MVT ValVT,
81 MVT LocVT, CCValAssign::LocInfo LocInfo,
82 ISD::ArgFlagsTy ArgFlags, CCState &State);
85 RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
86 MVT LocVT, CCValAssign::LocInfo LocInfo,
87 ISD::ArgFlagsTy ArgFlags, CCState &State);
90 RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
91 MVT LocVT, CCValAssign::LocInfo LocInfo,
92 ISD::ArgFlagsTy ArgFlags, CCState &State);
95 CC_Hexagon_VarArg (unsigned ValNo, MVT ValVT,
96 MVT LocVT, CCValAssign::LocInfo LocInfo,
97 ISD::ArgFlagsTy ArgFlags, CCState &State) {
98 HexagonCCState &HState = static_cast<HexagonCCState &>(State);
100 // NumNamedVarArgParams can not be zero for a VarArg function.
101 assert((HState.getNumNamedVarArgParams() > 0) &&
102 "NumNamedVarArgParams is not bigger than zero.");
104 if ((int)ValNo < HState.getNumNamedVarArgParams()) {
105 // Deal with named arguments.
106 return CC_Hexagon(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State);
109 // Deal with un-named arguments.
111 if (ArgFlags.isByVal()) {
112 // If pass-by-value, the size allocated on stack is decided
113 // by ArgFlags.getByValSize(), not by the size of LocVT.
114 assert ((ArgFlags.getByValSize() > 8) &&
115 "ByValSize must be bigger than 8 bytes");
116 ofst = State.AllocateStack(ArgFlags.getByValSize(), 4);
117 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
120 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
123 if (ArgFlags.isSExt())
124 LocInfo = CCValAssign::SExt;
125 else if (ArgFlags.isZExt())
126 LocInfo = CCValAssign::ZExt;
128 LocInfo = CCValAssign::AExt;
130 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
131 ofst = State.AllocateStack(4, 4);
132 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
135 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
136 ofst = State.AllocateStack(8, 8);
137 State.addLoc(CCValAssign::getMem(ValNo, ValVT, ofst, LocVT, LocInfo));
140 llvm_unreachable(nullptr);
145 CC_Hexagon (unsigned ValNo, MVT ValVT,
146 MVT LocVT, CCValAssign::LocInfo LocInfo,
147 ISD::ArgFlagsTy ArgFlags, CCState &State) {
149 if (ArgFlags.isByVal()) {
151 assert ((ArgFlags.getByValSize() > 8) &&
152 "ByValSize must be bigger than 8 bytes");
153 unsigned Offset = State.AllocateStack(ArgFlags.getByValSize(), 4);
154 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
158 if (LocVT == MVT::i1 || LocVT == MVT::i8 || LocVT == MVT::i16) {
161 if (ArgFlags.isSExt())
162 LocInfo = CCValAssign::SExt;
163 else if (ArgFlags.isZExt())
164 LocInfo = CCValAssign::ZExt;
166 LocInfo = CCValAssign::AExt;
169 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
170 if (!CC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
174 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
175 if (!CC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
179 return true; // CC didn't match.
183 static bool CC_Hexagon32(unsigned ValNo, MVT ValVT,
184 MVT LocVT, CCValAssign::LocInfo LocInfo,
185 ISD::ArgFlagsTy ArgFlags, CCState &State) {
187 static const MCPhysReg RegList[] = {
188 Hexagon::R0, Hexagon::R1, Hexagon::R2, Hexagon::R3, Hexagon::R4,
191 if (unsigned Reg = State.AllocateReg(RegList, 6)) {
192 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
196 unsigned Offset = State.AllocateStack(4, 4);
197 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
201 static bool CC_Hexagon64(unsigned ValNo, MVT ValVT,
202 MVT LocVT, CCValAssign::LocInfo LocInfo,
203 ISD::ArgFlagsTy ArgFlags, CCState &State) {
205 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
206 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
210 static const MCPhysReg RegList1[] = {
211 Hexagon::D1, Hexagon::D2
213 static const MCPhysReg RegList2[] = {
214 Hexagon::R1, Hexagon::R3
216 if (unsigned Reg = State.AllocateReg(RegList1, RegList2, 2)) {
217 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
221 unsigned Offset = State.AllocateStack(8, 8, Hexagon::D2);
222 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
226 static bool RetCC_Hexagon(unsigned ValNo, MVT ValVT,
227 MVT LocVT, CCValAssign::LocInfo LocInfo,
228 ISD::ArgFlagsTy ArgFlags, CCState &State) {
231 if (LocVT == MVT::i1 ||
236 if (ArgFlags.isSExt())
237 LocInfo = CCValAssign::SExt;
238 else if (ArgFlags.isZExt())
239 LocInfo = CCValAssign::ZExt;
241 LocInfo = CCValAssign::AExt;
244 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
245 if (!RetCC_Hexagon32(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
249 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
250 if (!RetCC_Hexagon64(ValNo, ValVT, LocVT, LocInfo, ArgFlags, State))
254 return true; // CC didn't match.
257 static bool RetCC_Hexagon32(unsigned ValNo, MVT ValVT,
258 MVT LocVT, CCValAssign::LocInfo LocInfo,
259 ISD::ArgFlagsTy ArgFlags, CCState &State) {
261 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
262 if (unsigned Reg = State.AllocateReg(Hexagon::R0)) {
263 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
268 unsigned Offset = State.AllocateStack(4, 4);
269 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
273 static bool RetCC_Hexagon64(unsigned ValNo, MVT ValVT,
274 MVT LocVT, CCValAssign::LocInfo LocInfo,
275 ISD::ArgFlagsTy ArgFlags, CCState &State) {
276 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
277 if (unsigned Reg = State.AllocateReg(Hexagon::D0)) {
278 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
283 unsigned Offset = State.AllocateStack(8, 8);
284 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
289 HexagonTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG)
294 /// CreateCopyOfByValArgument - Make a copy of an aggregate at address specified
295 /// by "Src" to address "Dst" of size "Size". Alignment information is
296 /// specified by the specific parameter attribute. The copy will be passed as
297 /// a byval function parameter. Sometimes what we are copying is the end of a
298 /// larger object, the part that does not fit in registers.
300 CreateCopyOfByValArgument(SDValue Src, SDValue Dst, SDValue Chain,
301 ISD::ArgFlagsTy Flags, SelectionDAG &DAG,
304 SDValue SizeNode = DAG.getConstant(Flags.getByValSize(), MVT::i32);
305 return DAG.getMemcpy(Chain, dl, Dst, Src, SizeNode, Flags.getByValAlign(),
306 /*isVolatile=*/false, /*AlwaysInline=*/false,
307 MachinePointerInfo(), MachinePointerInfo());
311 // LowerReturn - Lower ISD::RET. If a struct is larger than 8 bytes and is
312 // passed by value, the function prototype is modified to return void and
313 // the value is stored in memory pointed by a pointer passed by caller.
315 HexagonTargetLowering::LowerReturn(SDValue Chain,
316 CallingConv::ID CallConv, bool isVarArg,
317 const SmallVectorImpl<ISD::OutputArg> &Outs,
318 const SmallVectorImpl<SDValue> &OutVals,
319 SDLoc dl, SelectionDAG &DAG) const {
321 // CCValAssign - represent the assignment of the return value to locations.
322 SmallVector<CCValAssign, 16> RVLocs;
324 // CCState - Info about the registers and stack slot.
325 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
328 // Analyze return values of ISD::RET
329 CCInfo.AnalyzeReturn(Outs, RetCC_Hexagon);
332 SmallVector<SDValue, 4> RetOps(1, Chain);
334 // Copy the result values into the output registers.
335 for (unsigned i = 0; i != RVLocs.size(); ++i) {
336 CCValAssign &VA = RVLocs[i];
338 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(), OutVals[i], Flag);
340 // Guarantee that all emitted copies are stuck together with flags.
341 Flag = Chain.getValue(1);
342 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
345 RetOps[0] = Chain; // Update chain.
347 // Add the flag if we have it.
349 RetOps.push_back(Flag);
351 return DAG.getNode(HexagonISD::RET_FLAG, dl, MVT::Other, RetOps);
357 /// LowerCallResult - Lower the result values of an ISD::CALL into the
358 /// appropriate copies out of appropriate physical registers. This assumes that
359 /// Chain/InFlag are the input chain/flag to use, and that TheCall is the call
360 /// being lowered. Returns a SDNode with the same number of values as the
363 HexagonTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
364 CallingConv::ID CallConv, bool isVarArg,
366 SmallVectorImpl<ISD::InputArg> &Ins,
367 SDLoc dl, SelectionDAG &DAG,
368 SmallVectorImpl<SDValue> &InVals,
369 const SmallVectorImpl<SDValue> &OutVals,
370 SDValue Callee) const {
372 // Assign locations to each value returned by this call.
373 SmallVector<CCValAssign, 16> RVLocs;
375 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs,
378 CCInfo.AnalyzeCallResult(Ins, RetCC_Hexagon);
380 // Copy all of the result registers out of their specified physreg.
381 for (unsigned i = 0; i != RVLocs.size(); ++i) {
382 Chain = DAG.getCopyFromReg(Chain, dl,
383 RVLocs[i].getLocReg(),
384 RVLocs[i].getValVT(), InFlag).getValue(1);
385 InFlag = Chain.getValue(2);
386 InVals.push_back(Chain.getValue(0));
392 /// LowerCall - Functions arguments are copied from virtual regs to
393 /// (physical regs)/(stack frame), CALLSEQ_START and CALLSEQ_END are emitted.
395 HexagonTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
396 SmallVectorImpl<SDValue> &InVals) const {
397 SelectionDAG &DAG = CLI.DAG;
399 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
400 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
401 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
402 SDValue Chain = CLI.Chain;
403 SDValue Callee = CLI.Callee;
404 bool &isTailCall = CLI.IsTailCall;
405 CallingConv::ID CallConv = CLI.CallConv;
406 bool isVarArg = CLI.IsVarArg;
408 bool IsStructRet = (Outs.empty()) ? false : Outs[0].Flags.isSRet();
410 // Check for varargs.
411 int NumNamedVarArgParams = -1;
412 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Callee))
414 const Function* CalleeFn = nullptr;
415 Callee = DAG.getTargetGlobalAddress(GA->getGlobal(), dl, MVT::i32);
416 if ((CalleeFn = dyn_cast<Function>(GA->getGlobal())))
418 // If a function has zero args and is a vararg function, that's
419 // disallowed so it must be an undeclared function. Do not assume
420 // varargs if the callee is undefined.
421 if (CalleeFn->isVarArg() &&
422 CalleeFn->getFunctionType()->getNumParams() != 0) {
423 NumNamedVarArgParams = CalleeFn->getFunctionType()->getNumParams();
428 // Analyze operands of the call, assigning locations to each operand.
429 SmallVector<CCValAssign, 16> ArgLocs;
430 HexagonCCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
431 *DAG.getContext(), NumNamedVarArgParams);
433 if (NumNamedVarArgParams > 0)
434 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon_VarArg);
436 CCInfo.AnalyzeCallOperands(Outs, CC_Hexagon);
440 bool StructAttrFlag =
441 DAG.getMachineFunction().getFunction()->hasStructRetAttr();
442 isTailCall = IsEligibleForTailCallOptimization(Callee, CallConv,
443 isVarArg, IsStructRet,
445 Outs, OutVals, Ins, DAG);
446 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i){
447 CCValAssign &VA = ArgLocs[i];
454 DEBUG(dbgs () << "Eligible for Tail Call\n");
457 "Argument must be passed on stack. Not eligible for Tail Call\n");
460 // Get a count of how many bytes are to be pushed on the stack.
461 unsigned NumBytes = CCInfo.getNextStackOffset();
462 SmallVector<std::pair<unsigned, SDValue>, 16> RegsToPass;
463 SmallVector<SDValue, 8> MemOpChains;
465 const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
466 DAG.getSubtarget().getRegisterInfo());
468 DAG.getCopyFromReg(Chain, dl, QRI->getStackRegister(), getPointerTy());
470 // Walk the register/memloc assignments, inserting copies/loads.
471 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
472 CCValAssign &VA = ArgLocs[i];
473 SDValue Arg = OutVals[i];
474 ISD::ArgFlagsTy Flags = Outs[i].Flags;
476 // Promote the value if needed.
477 switch (VA.getLocInfo()) {
479 // Loc info must be one of Full, SExt, ZExt, or AExt.
480 llvm_unreachable("Unknown loc info!");
481 case CCValAssign::Full:
483 case CCValAssign::SExt:
484 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
486 case CCValAssign::ZExt:
487 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
489 case CCValAssign::AExt:
490 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
495 unsigned LocMemOffset = VA.getLocMemOffset();
496 SDValue PtrOff = DAG.getConstant(LocMemOffset, StackPtr.getValueType());
497 PtrOff = DAG.getNode(ISD::ADD, dl, MVT::i32, StackPtr, PtrOff);
499 if (Flags.isByVal()) {
500 // The argument is a struct passed by value. According to LLVM, "Arg"
502 MemOpChains.push_back(CreateCopyOfByValArgument(Arg, PtrOff, Chain,
505 // The argument is not passed by value. "Arg" is a buildin type. It is
507 MemOpChains.push_back(DAG.getStore(Chain, dl, Arg, PtrOff,
508 MachinePointerInfo(),false, false,
514 // Arguments that can be passed on register must be kept at RegsToPass
517 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
521 // Transform all store nodes into one single node because all store
522 // nodes are independent of each other.
523 if (!MemOpChains.empty()) {
524 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOpChains);
528 Chain = DAG.getCALLSEQ_START(Chain, DAG.getConstant(NumBytes,
529 getPointerTy(), true),
532 // Build a sequence of copy-to-reg nodes chained together with token
533 // chain and flag operands which copy the outgoing args into registers.
534 // The InFlag in necessary since all emitted instructions must be
538 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
539 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
540 RegsToPass[i].second, InFlag);
541 InFlag = Chain.getValue(1);
545 // For tail calls lower the arguments to the 'real' stack slot.
547 // Force all the incoming stack arguments to be loaded from the stack
548 // before any new outgoing arguments are stored to the stack, because the
549 // outgoing stack slots may alias the incoming argument stack slots, and
550 // the alias isn't otherwise explicit. This is slightly more conservative
551 // than necessary, because it means that each store effectively depends
552 // on every argument instead of just those arguments it would clobber.
554 // Do not flag preceding copytoreg stuff together with the following stuff.
556 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
557 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
558 RegsToPass[i].second, InFlag);
559 InFlag = Chain.getValue(1);
564 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every
565 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol
566 // node so that legalize doesn't hack it.
567 if (flag_aligned_memcpy) {
568 const char *MemcpyName =
569 "__hexagon_memcpy_likely_aligned_min32bytes_mult8bytes";
571 DAG.getTargetExternalSymbol(MemcpyName, getPointerTy());
572 flag_aligned_memcpy = false;
573 } else if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
574 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, getPointerTy());
575 } else if (ExternalSymbolSDNode *S =
576 dyn_cast<ExternalSymbolSDNode>(Callee)) {
577 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), getPointerTy());
580 // Returns a chain & a flag for retval copy to use.
581 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
582 SmallVector<SDValue, 8> Ops;
583 Ops.push_back(Chain);
584 Ops.push_back(Callee);
586 // Add argument registers to the end of the list so that they are
587 // known live into the call.
588 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
589 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
590 RegsToPass[i].second.getValueType()));
593 if (InFlag.getNode()) {
594 Ops.push_back(InFlag);
598 return DAG.getNode(HexagonISD::TC_RETURN, dl, NodeTys, Ops);
600 Chain = DAG.getNode(HexagonISD::CALL, dl, NodeTys, Ops);
601 InFlag = Chain.getValue(1);
603 // Create the CALLSEQ_END node.
604 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, true),
605 DAG.getIntPtrConstant(0, true), InFlag, dl);
606 InFlag = Chain.getValue(1);
608 // Handle result values, copying them out of physregs into vregs that we
610 return LowerCallResult(Chain, InFlag, CallConv, isVarArg, Ins, dl, DAG,
611 InVals, OutVals, Callee);
614 static bool getIndexedAddressParts(SDNode *Ptr, EVT VT,
615 bool isSEXTLoad, SDValue &Base,
616 SDValue &Offset, bool &isInc,
618 if (Ptr->getOpcode() != ISD::ADD)
621 if (VT == MVT::i64 || VT == MVT::i32 || VT == MVT::i16 || VT == MVT::i8) {
622 isInc = (Ptr->getOpcode() == ISD::ADD);
623 Base = Ptr->getOperand(0);
624 Offset = Ptr->getOperand(1);
625 // Ensure that Offset is a constant.
626 return (isa<ConstantSDNode>(Offset));
632 // TODO: Put this function along with the other isS* functions in
633 // HexagonISelDAGToDAG.cpp into a common file. Or better still, use the
634 // functions defined in HexagonOperands.td.
635 static bool Is_PostInc_S4_Offset(SDNode * S, int ShiftAmount) {
636 ConstantSDNode *N = cast<ConstantSDNode>(S);
638 // immS4 predicate - True if the immediate fits in a 4-bit sign extended.
640 int64_t v = (int64_t)N->getSExtValue();
642 if (ShiftAmount > 0) {
644 v = v >> ShiftAmount;
646 return (v <= 7) && (v >= -8) && (m == 0);
649 /// getPostIndexedAddressParts - returns true by value, base pointer and
650 /// offset pointer and addressing mode by reference if this node can be
651 /// combined with a load / store to form a post-indexed load / store.
652 bool HexagonTargetLowering::getPostIndexedAddressParts(SDNode *N, SDNode *Op,
655 ISD::MemIndexedMode &AM,
656 SelectionDAG &DAG) const
660 bool isSEXTLoad = false;
662 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
663 VT = LD->getMemoryVT();
664 isSEXTLoad = LD->getExtensionType() == ISD::SEXTLOAD;
665 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) {
666 VT = ST->getMemoryVT();
667 if (ST->getValue().getValueType() == MVT::i64 && ST->isTruncatingStore()) {
675 bool isLegal = getIndexedAddressParts(Op, VT, isSEXTLoad, Base, Offset,
677 // ShiftAmount = number of left-shifted bits in the Hexagon instruction.
678 int ShiftAmount = VT.getSizeInBits() / 16;
679 if (isLegal && Is_PostInc_S4_Offset(Offset.getNode(), ShiftAmount)) {
680 AM = isInc ? ISD::POST_INC : ISD::POST_DEC;
687 SDValue HexagonTargetLowering::LowerINLINEASM(SDValue Op,
688 SelectionDAG &DAG) const {
689 SDNode *Node = Op.getNode();
690 MachineFunction &MF = DAG.getMachineFunction();
691 HexagonMachineFunctionInfo *FuncInfo =
692 MF.getInfo<HexagonMachineFunctionInfo>();
693 switch (Node->getOpcode()) {
694 case ISD::INLINEASM: {
695 unsigned NumOps = Node->getNumOperands();
696 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
697 --NumOps; // Ignore the flag operand.
699 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
700 if (FuncInfo->hasClobberLR())
703 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
704 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
705 ++i; // Skip the ID value.
707 switch (InlineAsm::getKind(Flags)) {
708 default: llvm_unreachable("Bad flags!");
709 case InlineAsm::Kind_RegDef:
710 case InlineAsm::Kind_RegUse:
711 case InlineAsm::Kind_Imm:
712 case InlineAsm::Kind_Clobber:
713 case InlineAsm::Kind_Mem: {
714 for (; NumVals; --NumVals, ++i) {}
717 case InlineAsm::Kind_RegDefEarlyClobber: {
718 for (; NumVals; --NumVals, ++i) {
720 cast<RegisterSDNode>(Node->getOperand(i))->getReg();
723 const HexagonRegisterInfo *QRI =
724 static_cast<const HexagonRegisterInfo *>(
725 DAG.getSubtarget().getRegisterInfo());
726 if (Reg == QRI->getRARegister()) {
727 FuncInfo->setHasClobberLR(true);
742 // Taken from the XCore backend.
744 SDValue HexagonTargetLowering::
745 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
747 SDValue Chain = Op.getOperand(0);
748 SDValue Table = Op.getOperand(1);
749 SDValue Index = Op.getOperand(2);
751 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
752 unsigned JTI = JT->getIndex();
753 MachineFunction &MF = DAG.getMachineFunction();
754 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
755 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
757 // Mark all jump table targets as address taken.
758 const std::vector<MachineJumpTableEntry> &JTE = MJTI->getJumpTables();
759 const std::vector<MachineBasicBlock*> &JTBBs = JTE[JTI].MBBs;
760 for (unsigned i = 0, e = JTBBs.size(); i != e; ++i) {
761 MachineBasicBlock *MBB = JTBBs[i];
762 MBB->setHasAddressTaken();
763 // This line is needed to set the hasAddressTaken flag on the BasicBlock
765 BlockAddress::get(const_cast<BasicBlock *>(MBB->getBasicBlock()));
768 SDValue JumpTableBase = DAG.getNode(HexagonISD::WrapperJT, dl,
769 getPointerTy(), TargetJT);
770 SDValue ShiftIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
771 DAG.getConstant(2, MVT::i32));
772 SDValue JTAddress = DAG.getNode(ISD::ADD, dl, MVT::i32, JumpTableBase,
774 SDValue LoadTarget = DAG.getLoad(MVT::i32, dl, Chain, JTAddress,
775 MachinePointerInfo(), false, false, false,
777 return DAG.getNode(HexagonISD::BR_JT, dl, MVT::Other, Chain, LoadTarget);
782 HexagonTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
783 SelectionDAG &DAG) const {
784 SDValue Chain = Op.getOperand(0);
785 SDValue Size = Op.getOperand(1);
788 unsigned SPReg = getStackPointerRegisterToSaveRestore();
790 // Get a reference to the stack pointer.
791 SDValue StackPointer = DAG.getCopyFromReg(Chain, dl, SPReg, MVT::i32);
793 // Subtract the dynamic size from the actual stack size to
794 // obtain the new stack size.
795 SDValue Sub = DAG.getNode(ISD::SUB, dl, MVT::i32, StackPointer, Size);
798 // For Hexagon, the outgoing memory arguments area should be on top of the
799 // alloca area on the stack i.e., the outgoing memory arguments should be
800 // at a lower address than the alloca area. Move the alloca area down the
801 // stack by adding back the space reserved for outgoing arguments to SP
804 // We do not know what the size of the outgoing args is at this point.
805 // So, we add a pseudo instruction ADJDYNALLOC that will adjust the
806 // stack pointer. We patch this instruction with the correct, known
807 // offset in emitPrologue().
809 // Use a placeholder immediate (zero) for now. This will be patched up
810 // by emitPrologue().
811 SDValue ArgAdjust = DAG.getNode(HexagonISD::ADJDYNALLOC, dl,
814 DAG.getConstant(0, MVT::i32));
816 // The Sub result contains the new stack start address, so it
817 // must be placed in the stack pointer register.
818 const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
819 DAG.getSubtarget().getRegisterInfo());
820 SDValue CopyChain = DAG.getCopyToReg(Chain, dl, QRI->getStackRegister(), Sub);
822 SDValue Ops[2] = { ArgAdjust, CopyChain };
823 return DAG.getMergeValues(Ops, dl);
827 HexagonTargetLowering::LowerFormalArguments(SDValue Chain,
828 CallingConv::ID CallConv,
831 SmallVectorImpl<ISD::InputArg> &Ins,
832 SDLoc dl, SelectionDAG &DAG,
833 SmallVectorImpl<SDValue> &InVals)
836 MachineFunction &MF = DAG.getMachineFunction();
837 MachineFrameInfo *MFI = MF.getFrameInfo();
838 MachineRegisterInfo &RegInfo = MF.getRegInfo();
839 HexagonMachineFunctionInfo *FuncInfo =
840 MF.getInfo<HexagonMachineFunctionInfo>();
843 // Assign locations to all of the incoming arguments.
844 SmallVector<CCValAssign, 16> ArgLocs;
845 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs,
848 CCInfo.AnalyzeFormalArguments(Ins, CC_Hexagon);
850 // For LLVM, in the case when returning a struct by value (>8byte),
851 // the first argument is a pointer that points to the location on caller's
852 // stack where the return value will be stored. For Hexagon, the location on
853 // caller's stack is passed only when the struct size is smaller than (and
854 // equal to) 8 bytes. If not, no address will be passed into callee and
855 // callee return the result direclty through R0/R1.
857 SmallVector<SDValue, 4> MemOps;
859 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
860 CCValAssign &VA = ArgLocs[i];
861 ISD::ArgFlagsTy Flags = Ins[i].Flags;
863 unsigned StackLocation;
866 if ( (VA.isRegLoc() && !Flags.isByVal())
867 || (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() > 8)) {
868 // Arguments passed in registers
869 // 1. int, long long, ptr args that get allocated in register.
870 // 2. Large struct that gets an register to put its address in.
871 EVT RegVT = VA.getLocVT();
872 if (RegVT == MVT::i8 || RegVT == MVT::i16 ||
873 RegVT == MVT::i32 || RegVT == MVT::f32) {
875 RegInfo.createVirtualRegister(&Hexagon::IntRegsRegClass);
876 RegInfo.addLiveIn(VA.getLocReg(), VReg);
877 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
878 } else if (RegVT == MVT::i64) {
880 RegInfo.createVirtualRegister(&Hexagon::DoubleRegsRegClass);
881 RegInfo.addLiveIn(VA.getLocReg(), VReg);
882 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
886 } else if (VA.isRegLoc() && Flags.isByVal() && Flags.getByValSize() <= 8) {
887 assert (0 && "ByValSize must be bigger than 8 bytes");
890 assert(VA.isMemLoc());
892 if (Flags.isByVal()) {
893 // If it's a byval parameter, then we need to compute the
894 // "real" size, not the size of the pointer.
895 ObjSize = Flags.getByValSize();
897 ObjSize = VA.getLocVT().getStoreSizeInBits() >> 3;
900 StackLocation = HEXAGON_LRFP_SIZE + VA.getLocMemOffset();
901 // Create the frame index object for this incoming parameter...
902 FI = MFI->CreateFixedObject(ObjSize, StackLocation, true);
904 // Create the SelectionDAG nodes cordl, responding to a load
905 // from this parameter.
906 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
908 if (Flags.isByVal()) {
909 // If it's a pass-by-value aggregate, then do not dereference the stack
910 // location. Instead, we should generate a reference to the stack
912 InVals.push_back(FIN);
914 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
915 MachinePointerInfo(), false, false,
922 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, MemOps);
925 // This will point to the next argument passed via stack.
926 int FrameIndex = MFI->CreateFixedObject(Hexagon_PointerSize,
928 CCInfo.getNextStackOffset(),
930 FuncInfo->setVarArgsFrameIndex(FrameIndex);
937 HexagonTargetLowering::LowerVASTART(SDValue Op, SelectionDAG &DAG) const {
938 // VASTART stores the address of the VarArgsFrameIndex slot into the
939 // memory location argument.
940 MachineFunction &MF = DAG.getMachineFunction();
941 HexagonMachineFunctionInfo *QFI = MF.getInfo<HexagonMachineFunctionInfo>();
942 SDValue Addr = DAG.getFrameIndex(QFI->getVarArgsFrameIndex(), MVT::i32);
943 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
944 return DAG.getStore(Op.getOperand(0), SDLoc(Op), Addr,
945 Op.getOperand(1), MachinePointerInfo(SV), false,
950 HexagonTargetLowering::LowerConstantPool(SDValue Op, SelectionDAG &DAG) const {
951 EVT ValTy = Op.getValueType();
953 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
955 if (CP->isMachineConstantPoolEntry())
956 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), ValTy,
959 Res = DAG.getTargetConstantPool(CP->getConstVal(), ValTy,
961 return DAG.getNode(HexagonISD::CONST32, dl, ValTy, Res);
965 HexagonTargetLowering::LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const {
966 const TargetRegisterInfo *TRI = DAG.getSubtarget().getRegisterInfo();
967 MachineFunction &MF = DAG.getMachineFunction();
968 MachineFrameInfo *MFI = MF.getFrameInfo();
969 MFI->setReturnAddressIsTaken(true);
971 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
974 EVT VT = Op.getValueType();
976 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
978 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG);
979 SDValue Offset = DAG.getConstant(4, MVT::i32);
980 return DAG.getLoad(VT, dl, DAG.getEntryNode(),
981 DAG.getNode(ISD::ADD, dl, VT, FrameAddr, Offset),
982 MachinePointerInfo(), false, false, false, 0);
985 // Return LR, which contains the return address. Mark it an implicit live-in.
986 unsigned Reg = MF.addLiveIn(TRI->getRARegister(), getRegClassFor(MVT::i32));
987 return DAG.getCopyFromReg(DAG.getEntryNode(), dl, Reg, VT);
991 HexagonTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const {
992 const HexagonRegisterInfo *TRI = static_cast<const HexagonRegisterInfo *>(
993 DAG.getSubtarget().getRegisterInfo());
994 MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
995 MFI->setFrameAddressIsTaken(true);
997 EVT VT = Op.getValueType();
999 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
1000 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), dl,
1001 TRI->getFrameRegister(), VT);
1003 FrameAddr = DAG.getLoad(VT, dl, DAG.getEntryNode(), FrameAddr,
1004 MachinePointerInfo(),
1005 false, false, false, 0);
1009 SDValue HexagonTargetLowering::LowerATOMIC_FENCE(SDValue Op,
1010 SelectionDAG& DAG) const {
1012 return DAG.getNode(HexagonISD::BARRIER, dl, MVT::Other, Op.getOperand(0));
1016 SDValue HexagonTargetLowering::LowerGLOBALADDRESS(SDValue Op,
1017 SelectionDAG &DAG) const {
1019 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
1020 int64_t Offset = cast<GlobalAddressSDNode>(Op)->getOffset();
1022 Result = DAG.getTargetGlobalAddress(GV, dl, getPointerTy(), Offset);
1024 const HexagonTargetObjectFile &TLOF =
1025 static_cast<const HexagonTargetObjectFile &>(getObjFileLowering());
1026 if (TLOF.IsGlobalInSmallSection(GV, getTargetMachine())) {
1027 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), Result);
1030 return DAG.getNode(HexagonISD::CONST32, dl, getPointerTy(), Result);
1034 HexagonTargetLowering::LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const {
1035 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
1036 SDValue BA_SD = DAG.getTargetBlockAddress(BA, MVT::i32);
1038 return DAG.getNode(HexagonISD::CONST32_GP, dl, getPointerTy(), BA_SD);
1041 //===----------------------------------------------------------------------===//
1042 // TargetLowering Implementation
1043 //===----------------------------------------------------------------------===//
1045 HexagonTargetLowering::HexagonTargetLowering(const TargetMachine &targetmachine)
1046 : TargetLowering(targetmachine),
1049 const HexagonSubtarget &Subtarget = TM.getSubtarget<HexagonSubtarget>();
1051 // Set up the register classes.
1052 addRegisterClass(MVT::i32, &Hexagon::IntRegsRegClass);
1053 addRegisterClass(MVT::i64, &Hexagon::DoubleRegsRegClass);
1055 if (Subtarget.hasV5TOps()) {
1056 addRegisterClass(MVT::f32, &Hexagon::IntRegsRegClass);
1057 addRegisterClass(MVT::f64, &Hexagon::DoubleRegsRegClass);
1060 addRegisterClass(MVT::i1, &Hexagon::PredRegsRegClass);
1062 computeRegisterProperties();
1065 setPrefLoopAlignment(4);
1067 // Limits for inline expansion of memcpy/memmove
1068 MaxStoresPerMemcpy = 6;
1069 MaxStoresPerMemmove = 6;
1072 // Library calls for unsupported operations
1075 setLibcallName(RTLIB::SINTTOFP_I128_F64, "__hexagon_floattidf");
1076 setLibcallName(RTLIB::SINTTOFP_I128_F32, "__hexagon_floattisf");
1078 setLibcallName(RTLIB::FPTOUINT_F32_I128, "__hexagon_fixunssfti");
1079 setLibcallName(RTLIB::FPTOUINT_F64_I128, "__hexagon_fixunsdfti");
1081 setLibcallName(RTLIB::FPTOSINT_F32_I128, "__hexagon_fixsfti");
1082 setLibcallName(RTLIB::FPTOSINT_F64_I128, "__hexagon_fixdfti");
1084 setLibcallName(RTLIB::SDIV_I32, "__hexagon_divsi3");
1085 setOperationAction(ISD::SDIV, MVT::i32, Expand);
1086 setLibcallName(RTLIB::SREM_I32, "__hexagon_umodsi3");
1087 setOperationAction(ISD::SREM, MVT::i32, Expand);
1089 setLibcallName(RTLIB::SDIV_I64, "__hexagon_divdi3");
1090 setOperationAction(ISD::SDIV, MVT::i64, Expand);
1091 setLibcallName(RTLIB::SREM_I64, "__hexagon_moddi3");
1092 setOperationAction(ISD::SREM, MVT::i64, Expand);
1094 setLibcallName(RTLIB::UDIV_I32, "__hexagon_udivsi3");
1095 setOperationAction(ISD::UDIV, MVT::i32, Expand);
1097 setLibcallName(RTLIB::UDIV_I64, "__hexagon_udivdi3");
1098 setOperationAction(ISD::UDIV, MVT::i64, Expand);
1100 setLibcallName(RTLIB::UREM_I32, "__hexagon_umodsi3");
1101 setOperationAction(ISD::UREM, MVT::i32, Expand);
1103 setLibcallName(RTLIB::UREM_I64, "__hexagon_umoddi3");
1104 setOperationAction(ISD::UREM, MVT::i64, Expand);
1106 setLibcallName(RTLIB::DIV_F32, "__hexagon_divsf3");
1107 setOperationAction(ISD::FDIV, MVT::f32, Expand);
1109 setLibcallName(RTLIB::DIV_F64, "__hexagon_divdf3");
1110 setOperationAction(ISD::FDIV, MVT::f64, Expand);
1112 setOperationAction(ISD::FSQRT, MVT::f32, Expand);
1113 setOperationAction(ISD::FSQRT, MVT::f64, Expand);
1114 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1115 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1117 if (Subtarget.hasV5TOps()) {
1118 // Hexagon V5 Support.
1119 setOperationAction(ISD::FADD, MVT::f32, Legal);
1120 setOperationAction(ISD::FADD, MVT::f64, Legal);
1121 setOperationAction(ISD::FP_EXTEND, MVT::f32, Legal);
1122 setCondCodeAction(ISD::SETOEQ, MVT::f32, Legal);
1123 setCondCodeAction(ISD::SETOEQ, MVT::f64, Legal);
1124 setCondCodeAction(ISD::SETUEQ, MVT::f32, Legal);
1125 setCondCodeAction(ISD::SETUEQ, MVT::f64, Legal);
1127 setCondCodeAction(ISD::SETOGE, MVT::f32, Legal);
1128 setCondCodeAction(ISD::SETOGE, MVT::f64, Legal);
1129 setCondCodeAction(ISD::SETUGE, MVT::f32, Legal);
1130 setCondCodeAction(ISD::SETUGE, MVT::f64, Legal);
1132 setCondCodeAction(ISD::SETOGT, MVT::f32, Legal);
1133 setCondCodeAction(ISD::SETOGT, MVT::f64, Legal);
1134 setCondCodeAction(ISD::SETUGT, MVT::f32, Legal);
1135 setCondCodeAction(ISD::SETUGT, MVT::f64, Legal);
1137 setCondCodeAction(ISD::SETOLE, MVT::f32, Legal);
1138 setCondCodeAction(ISD::SETOLE, MVT::f64, Legal);
1139 setCondCodeAction(ISD::SETOLT, MVT::f32, Legal);
1140 setCondCodeAction(ISD::SETOLT, MVT::f64, Legal);
1142 setOperationAction(ISD::ConstantFP, MVT::f32, Legal);
1143 setOperationAction(ISD::ConstantFP, MVT::f64, Legal);
1145 setOperationAction(ISD::FP_TO_UINT, MVT::i1, Promote);
1146 setOperationAction(ISD::FP_TO_SINT, MVT::i1, Promote);
1147 setOperationAction(ISD::UINT_TO_FP, MVT::i1, Promote);
1148 setOperationAction(ISD::SINT_TO_FP, MVT::i1, Promote);
1150 setOperationAction(ISD::FP_TO_UINT, MVT::i8, Promote);
1151 setOperationAction(ISD::FP_TO_SINT, MVT::i8, Promote);
1152 setOperationAction(ISD::UINT_TO_FP, MVT::i8, Promote);
1153 setOperationAction(ISD::SINT_TO_FP, MVT::i8, Promote);
1155 setOperationAction(ISD::FP_TO_UINT, MVT::i16, Promote);
1156 setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
1157 setOperationAction(ISD::UINT_TO_FP, MVT::i16, Promote);
1158 setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
1160 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Legal);
1161 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Legal);
1162 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Legal);
1163 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Legal);
1165 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Legal);
1166 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Legal);
1167 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Legal);
1168 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Legal);
1170 setOperationAction(ISD::FABS, MVT::f32, Legal);
1171 setOperationAction(ISD::FABS, MVT::f64, Expand);
1173 setOperationAction(ISD::FNEG, MVT::f32, Legal);
1174 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1177 // Expand fp<->uint.
1178 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Expand);
1179 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Expand);
1181 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Expand);
1182 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Expand);
1184 setLibcallName(RTLIB::SINTTOFP_I64_F32, "__hexagon_floatdisf");
1185 setLibcallName(RTLIB::UINTTOFP_I64_F32, "__hexagon_floatundisf");
1187 setLibcallName(RTLIB::UINTTOFP_I32_F32, "__hexagon_floatunsisf");
1188 setLibcallName(RTLIB::SINTTOFP_I32_F32, "__hexagon_floatsisf");
1190 setLibcallName(RTLIB::SINTTOFP_I64_F64, "__hexagon_floatdidf");
1191 setLibcallName(RTLIB::UINTTOFP_I64_F64, "__hexagon_floatundidf");
1193 setLibcallName(RTLIB::UINTTOFP_I32_F64, "__hexagon_floatunsidf");
1194 setLibcallName(RTLIB::SINTTOFP_I32_F64, "__hexagon_floatsidf");
1196 setLibcallName(RTLIB::FPTOUINT_F32_I32, "__hexagon_fixunssfsi");
1197 setLibcallName(RTLIB::FPTOUINT_F32_I64, "__hexagon_fixunssfdi");
1199 setLibcallName(RTLIB::FPTOSINT_F64_I64, "__hexagon_fixdfdi");
1200 setLibcallName(RTLIB::FPTOSINT_F32_I64, "__hexagon_fixsfdi");
1202 setLibcallName(RTLIB::FPTOUINT_F64_I32, "__hexagon_fixunsdfsi");
1203 setLibcallName(RTLIB::FPTOUINT_F64_I64, "__hexagon_fixunsdfdi");
1205 setLibcallName(RTLIB::ADD_F64, "__hexagon_adddf3");
1206 setOperationAction(ISD::FADD, MVT::f64, Expand);
1208 setLibcallName(RTLIB::ADD_F32, "__hexagon_addsf3");
1209 setOperationAction(ISD::FADD, MVT::f32, Expand);
1211 setLibcallName(RTLIB::FPEXT_F32_F64, "__hexagon_extendsfdf2");
1212 setOperationAction(ISD::FP_EXTEND, MVT::f32, Expand);
1214 setLibcallName(RTLIB::OEQ_F32, "__hexagon_eqsf2");
1215 setCondCodeAction(ISD::SETOEQ, MVT::f32, Expand);
1217 setLibcallName(RTLIB::OEQ_F64, "__hexagon_eqdf2");
1218 setCondCodeAction(ISD::SETOEQ, MVT::f64, Expand);
1220 setLibcallName(RTLIB::OGE_F32, "__hexagon_gesf2");
1221 setCondCodeAction(ISD::SETOGE, MVT::f32, Expand);
1223 setLibcallName(RTLIB::OGE_F64, "__hexagon_gedf2");
1224 setCondCodeAction(ISD::SETOGE, MVT::f64, Expand);
1226 setLibcallName(RTLIB::OGT_F32, "__hexagon_gtsf2");
1227 setCondCodeAction(ISD::SETOGT, MVT::f32, Expand);
1229 setLibcallName(RTLIB::OGT_F64, "__hexagon_gtdf2");
1230 setCondCodeAction(ISD::SETOGT, MVT::f64, Expand);
1232 setLibcallName(RTLIB::FPTOSINT_F64_I32, "__hexagon_fixdfsi");
1233 setOperationAction(ISD::FP_TO_SINT, MVT::f64, Expand);
1235 setLibcallName(RTLIB::FPTOSINT_F32_I32, "__hexagon_fixsfsi");
1236 setOperationAction(ISD::FP_TO_SINT, MVT::f32, Expand);
1238 setLibcallName(RTLIB::OLE_F64, "__hexagon_ledf2");
1239 setCondCodeAction(ISD::SETOLE, MVT::f64, Expand);
1241 setLibcallName(RTLIB::OLE_F32, "__hexagon_lesf2");
1242 setCondCodeAction(ISD::SETOLE, MVT::f32, Expand);
1244 setLibcallName(RTLIB::OLT_F64, "__hexagon_ltdf2");
1245 setCondCodeAction(ISD::SETOLT, MVT::f64, Expand);
1247 setLibcallName(RTLIB::OLT_F32, "__hexagon_ltsf2");
1248 setCondCodeAction(ISD::SETOLT, MVT::f32, Expand);
1250 setLibcallName(RTLIB::MUL_F64, "__hexagon_muldf3");
1251 setOperationAction(ISD::FMUL, MVT::f64, Expand);
1253 setLibcallName(RTLIB::MUL_F32, "__hexagon_mulsf3");
1254 setOperationAction(ISD::MUL, MVT::f32, Expand);
1256 setLibcallName(RTLIB::UNE_F64, "__hexagon_nedf2");
1257 setCondCodeAction(ISD::SETUNE, MVT::f64, Expand);
1259 setLibcallName(RTLIB::UNE_F32, "__hexagon_nesf2");
1261 setLibcallName(RTLIB::SUB_F64, "__hexagon_subdf3");
1262 setOperationAction(ISD::SUB, MVT::f64, Expand);
1264 setLibcallName(RTLIB::SUB_F32, "__hexagon_subsf3");
1265 setOperationAction(ISD::SUB, MVT::f32, Expand);
1267 setLibcallName(RTLIB::FPROUND_F64_F32, "__hexagon_truncdfsf2");
1268 setOperationAction(ISD::FP_ROUND, MVT::f64, Expand);
1270 setLibcallName(RTLIB::UO_F64, "__hexagon_unorddf2");
1271 setCondCodeAction(ISD::SETUO, MVT::f64, Expand);
1273 setLibcallName(RTLIB::O_F64, "__hexagon_unorddf2");
1274 setCondCodeAction(ISD::SETO, MVT::f64, Expand);
1276 setLibcallName(RTLIB::O_F32, "__hexagon_unordsf2");
1277 setCondCodeAction(ISD::SETO, MVT::f32, Expand);
1279 setLibcallName(RTLIB::UO_F32, "__hexagon_unordsf2");
1280 setCondCodeAction(ISD::SETUO, MVT::f32, Expand);
1282 setOperationAction(ISD::FABS, MVT::f32, Expand);
1283 setOperationAction(ISD::FABS, MVT::f64, Expand);
1284 setOperationAction(ISD::FNEG, MVT::f32, Expand);
1285 setOperationAction(ISD::FNEG, MVT::f64, Expand);
1288 setLibcallName(RTLIB::SREM_I32, "__hexagon_modsi3");
1289 setOperationAction(ISD::SREM, MVT::i32, Expand);
1291 setIndexedLoadAction(ISD::POST_INC, MVT::i8, Legal);
1292 setIndexedLoadAction(ISD::POST_INC, MVT::i16, Legal);
1293 setIndexedLoadAction(ISD::POST_INC, MVT::i32, Legal);
1294 setIndexedLoadAction(ISD::POST_INC, MVT::i64, Legal);
1296 setIndexedStoreAction(ISD::POST_INC, MVT::i8, Legal);
1297 setIndexedStoreAction(ISD::POST_INC, MVT::i16, Legal);
1298 setIndexedStoreAction(ISD::POST_INC, MVT::i32, Legal);
1299 setIndexedStoreAction(ISD::POST_INC, MVT::i64, Legal);
1301 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand);
1303 // Turn FP extload into load/fextend.
1304 for (MVT VT : MVT::fp_valuetypes())
1305 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand);
1306 // Hexagon has a i1 sign extending load.
1307 for (MVT VT : MVT::integer_valuetypes())
1308 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand);
1309 // Turn FP truncstore into trunc + store.
1310 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
1312 // Custom legalize GlobalAddress nodes into CONST32.
1313 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
1314 setOperationAction(ISD::GlobalAddress, MVT::i8, Custom);
1315 setOperationAction(ISD::BlockAddress, MVT::i32, Custom);
1317 setOperationAction(ISD::TRUNCATE, MVT::i64, Expand);
1319 // Hexagon doesn't have sext_inreg, replace them with shl/sra.
1320 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
1322 // Hexagon has no REM or DIVREM operations.
1323 setOperationAction(ISD::UREM, MVT::i32, Expand);
1324 setOperationAction(ISD::SREM, MVT::i32, Expand);
1325 setOperationAction(ISD::SDIVREM, MVT::i32, Expand);
1326 setOperationAction(ISD::UDIVREM, MVT::i32, Expand);
1327 setOperationAction(ISD::SREM, MVT::i64, Expand);
1328 setOperationAction(ISD::SDIVREM, MVT::i64, Expand);
1329 setOperationAction(ISD::UDIVREM, MVT::i64, Expand);
1331 setOperationAction(ISD::BSWAP, MVT::i64, Expand);
1333 // Lower SELECT_CC to SETCC and SELECT.
1334 setOperationAction(ISD::SELECT_CC, MVT::i1, Expand);
1335 setOperationAction(ISD::SELECT_CC, MVT::i32, Expand);
1336 setOperationAction(ISD::SELECT_CC, MVT::i64, Expand);
1338 if (Subtarget.hasV5TOps()) {
1340 // We need to make the operation type of SELECT node to be Custom,
1341 // such that we don't go into the infinite loop of
1342 // select -> setcc -> select_cc -> select loop.
1343 setOperationAction(ISD::SELECT, MVT::f32, Custom);
1344 setOperationAction(ISD::SELECT, MVT::f64, Custom);
1346 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
1347 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
1351 // Hexagon has no select or setcc: expand to SELECT_CC.
1352 setOperationAction(ISD::SELECT, MVT::f32, Expand);
1353 setOperationAction(ISD::SELECT, MVT::f64, Expand);
1356 if (EmitJumpTables) {
1357 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
1359 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
1361 // Increase jump tables cutover to 5, was 4.
1362 setMinimumJumpTableEntries(5);
1364 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
1365 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
1366 setOperationAction(ISD::BR_CC, MVT::i1, Expand);
1367 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
1368 setOperationAction(ISD::BR_CC, MVT::i64, Expand);
1370 setOperationAction(ISD::ATOMIC_FENCE, MVT::Other, Custom);
1372 setOperationAction(ISD::FSIN, MVT::f64, Expand);
1373 setOperationAction(ISD::FCOS, MVT::f64, Expand);
1374 setOperationAction(ISD::FREM, MVT::f64, Expand);
1375 setOperationAction(ISD::FSIN, MVT::f32, Expand);
1376 setOperationAction(ISD::FCOS, MVT::f32, Expand);
1377 setOperationAction(ISD::FREM, MVT::f32, Expand);
1378 setOperationAction(ISD::FSINCOS, MVT::f64, Expand);
1379 setOperationAction(ISD::FSINCOS, MVT::f32, Expand);
1381 // In V4, we have double word add/sub with carry. The problem with
1382 // modelling this instruction is that it produces 2 results - Rdd and Px.
1383 // To model update of Px, we will have to use Defs[p0..p3] which will
1384 // cause any predicate live range to spill. So, we pretend we dont't
1385 // have these instructions.
1386 setOperationAction(ISD::ADDE, MVT::i8, Expand);
1387 setOperationAction(ISD::ADDE, MVT::i16, Expand);
1388 setOperationAction(ISD::ADDE, MVT::i32, Expand);
1389 setOperationAction(ISD::ADDE, MVT::i64, Expand);
1390 setOperationAction(ISD::SUBE, MVT::i8, Expand);
1391 setOperationAction(ISD::SUBE, MVT::i16, Expand);
1392 setOperationAction(ISD::SUBE, MVT::i32, Expand);
1393 setOperationAction(ISD::SUBE, MVT::i64, Expand);
1394 setOperationAction(ISD::ADDC, MVT::i8, Expand);
1395 setOperationAction(ISD::ADDC, MVT::i16, Expand);
1396 setOperationAction(ISD::ADDC, MVT::i32, Expand);
1397 setOperationAction(ISD::ADDC, MVT::i64, Expand);
1398 setOperationAction(ISD::SUBC, MVT::i8, Expand);
1399 setOperationAction(ISD::SUBC, MVT::i16, Expand);
1400 setOperationAction(ISD::SUBC, MVT::i32, Expand);
1401 setOperationAction(ISD::SUBC, MVT::i64, Expand);
1403 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
1404 setOperationAction(ISD::CTPOP, MVT::i64, Expand);
1405 setOperationAction(ISD::CTTZ, MVT::i32, Expand);
1406 setOperationAction(ISD::CTTZ, MVT::i64, Expand);
1407 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
1408 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i64, Expand);
1409 setOperationAction(ISD::CTLZ, MVT::i32, Expand);
1410 setOperationAction(ISD::CTLZ, MVT::i64, Expand);
1411 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
1412 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i64, Expand);
1413 setOperationAction(ISD::ROTL, MVT::i32, Expand);
1414 setOperationAction(ISD::ROTR, MVT::i32, Expand);
1415 setOperationAction(ISD::BSWAP, MVT::i32, Expand);
1416 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
1417 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Expand);
1418 setOperationAction(ISD::FPOW, MVT::f64, Expand);
1419 setOperationAction(ISD::FPOW, MVT::f32, Expand);
1421 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
1422 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
1423 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
1425 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Expand);
1426 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Expand);
1428 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand);
1429 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand);
1431 setOperationAction(ISD::EH_RETURN, MVT::Other, Custom);
1433 if (Subtarget.isSubtargetV2()) {
1434 setExceptionPointerRegister(Hexagon::R20);
1435 setExceptionSelectorRegister(Hexagon::R21);
1437 setExceptionPointerRegister(Hexagon::R0);
1438 setExceptionSelectorRegister(Hexagon::R1);
1441 // VASTART needs to be custom lowered to use the VarArgsFrameIndex.
1442 setOperationAction(ISD::VASTART, MVT::Other, Custom);
1444 // Use the default implementation.
1445 setOperationAction(ISD::VAARG, MVT::Other, Expand);
1446 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
1447 setOperationAction(ISD::VAEND, MVT::Other, Expand);
1448 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
1449 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
1451 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Custom);
1452 setOperationAction(ISD::INLINEASM, MVT::Other, Custom);
1454 setMinFunctionAlignment(2);
1456 // Needed for DYNAMIC_STACKALLOC expansion.
1457 const HexagonRegisterInfo *QRI = static_cast<const HexagonRegisterInfo *>(
1458 TM.getSubtargetImpl()->getRegisterInfo());
1459 setStackPointerRegisterToSaveRestore(QRI->getStackRegister());
1460 setSchedulingPreference(Sched::VLIW);
1464 HexagonTargetLowering::getTargetNodeName(unsigned Opcode) const {
1466 default: return nullptr;
1467 case HexagonISD::CONST32: return "HexagonISD::CONST32";
1468 case HexagonISD::CONST32_GP: return "HexagonISD::CONST32_GP";
1469 case HexagonISD::CONST32_Int_Real: return "HexagonISD::CONST32_Int_Real";
1470 case HexagonISD::ADJDYNALLOC: return "HexagonISD::ADJDYNALLOC";
1471 case HexagonISD::CMPICC: return "HexagonISD::CMPICC";
1472 case HexagonISD::CMPFCC: return "HexagonISD::CMPFCC";
1473 case HexagonISD::BRICC: return "HexagonISD::BRICC";
1474 case HexagonISD::BRFCC: return "HexagonISD::BRFCC";
1475 case HexagonISD::SELECT_ICC: return "HexagonISD::SELECT_ICC";
1476 case HexagonISD::SELECT_FCC: return "HexagonISD::SELECT_FCC";
1477 case HexagonISD::Hi: return "HexagonISD::Hi";
1478 case HexagonISD::Lo: return "HexagonISD::Lo";
1479 case HexagonISD::FTOI: return "HexagonISD::FTOI";
1480 case HexagonISD::ITOF: return "HexagonISD::ITOF";
1481 case HexagonISD::CALL: return "HexagonISD::CALL";
1482 case HexagonISD::RET_FLAG: return "HexagonISD::RET_FLAG";
1483 case HexagonISD::BR_JT: return "HexagonISD::BR_JT";
1484 case HexagonISD::TC_RETURN: return "HexagonISD::TC_RETURN";
1485 case HexagonISD::EH_RETURN: return "HexagonISD::EH_RETURN";
1490 HexagonTargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const {
1491 EVT MTy1 = EVT::getEVT(Ty1);
1492 EVT MTy2 = EVT::getEVT(Ty2);
1493 if (!MTy1.isSimple() || !MTy2.isSimple()) {
1496 return ((MTy1.getSimpleVT() == MVT::i64) && (MTy2.getSimpleVT() == MVT::i32));
1499 bool HexagonTargetLowering::isTruncateFree(EVT VT1, EVT VT2) const {
1500 if (!VT1.isSimple() || !VT2.isSimple()) {
1503 return ((VT1.getSimpleVT() == MVT::i64) && (VT2.getSimpleVT() == MVT::i32));
1507 HexagonTargetLowering::allowTruncateForTailCall(Type *Ty1, Type *Ty2) const {
1508 // Assuming the caller does not have either a signext or zeroext modifier, and
1509 // only one value is accepted, any reasonable truncation is allowed.
1510 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy())
1513 // FIXME: in principle up to 64-bit could be made safe, but it would be very
1514 // fragile at the moment: any support for multiple value returns would be
1515 // liable to disallow tail calls involving i64 -> iN truncation in many cases.
1516 return Ty1->getPrimitiveSizeInBits() <= 32;
1520 HexagonTargetLowering::LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const {
1521 SDValue Chain = Op.getOperand(0);
1522 SDValue Offset = Op.getOperand(1);
1523 SDValue Handler = Op.getOperand(2);
1526 // Mark function as containing a call to EH_RETURN.
1527 HexagonMachineFunctionInfo *FuncInfo =
1528 DAG.getMachineFunction().getInfo<HexagonMachineFunctionInfo>();
1529 FuncInfo->setHasEHReturn();
1531 unsigned OffsetReg = Hexagon::R28;
1533 SDValue StoreAddr = DAG.getNode(ISD::ADD, dl, getPointerTy(),
1534 DAG.getRegister(Hexagon::R30, getPointerTy()),
1535 DAG.getIntPtrConstant(4));
1536 Chain = DAG.getStore(Chain, dl, Handler, StoreAddr, MachinePointerInfo(),
1538 Chain = DAG.getCopyToReg(Chain, dl, OffsetReg, Offset);
1540 // Not needed we already use it as explict input to EH_RETURN.
1541 // MF.getRegInfo().addLiveOut(OffsetReg);
1543 return DAG.getNode(HexagonISD::EH_RETURN, dl, MVT::Other, Chain);
1547 HexagonTargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
1548 switch (Op.getOpcode()) {
1549 default: llvm_unreachable("Should not custom lower this!");
1550 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
1551 case ISD::EH_RETURN: return LowerEH_RETURN(Op, DAG);
1552 // Frame & Return address. Currently unimplemented.
1553 case ISD::RETURNADDR: return LowerRETURNADDR(Op, DAG);
1554 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
1555 case ISD::GlobalTLSAddress:
1556 llvm_unreachable("TLS not implemented for Hexagon.");
1557 case ISD::ATOMIC_FENCE: return LowerATOMIC_FENCE(Op, DAG);
1558 case ISD::GlobalAddress: return LowerGLOBALADDRESS(Op, DAG);
1559 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
1560 case ISD::VASTART: return LowerVASTART(Op, DAG);
1561 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
1563 case ISD::DYNAMIC_STACKALLOC: return LowerDYNAMIC_STACKALLOC(Op, DAG);
1564 case ISD::SELECT: return Op;
1565 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
1566 case ISD::INLINEASM: return LowerINLINEASM(Op, DAG);
1573 //===----------------------------------------------------------------------===//
1574 // Hexagon Scheduler Hooks
1575 //===----------------------------------------------------------------------===//
1577 HexagonTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1578 MachineBasicBlock *BB)
1580 switch (MI->getOpcode()) {
1581 case Hexagon::ADJDYNALLOC: {
1582 MachineFunction *MF = BB->getParent();
1583 HexagonMachineFunctionInfo *FuncInfo =
1584 MF->getInfo<HexagonMachineFunctionInfo>();
1585 FuncInfo->addAllocaAdjustInst(MI);
1588 default: llvm_unreachable("Unexpected instr type to insert");
1592 //===----------------------------------------------------------------------===//
1593 // Inline Assembly Support
1594 //===----------------------------------------------------------------------===//
1596 std::pair<unsigned, const TargetRegisterClass*>
1597 HexagonTargetLowering::getRegForInlineAsmConstraint(const
1598 std::string &Constraint,
1600 if (Constraint.size() == 1) {
1601 switch (Constraint[0]) {
1603 switch (VT.SimpleTy) {
1605 llvm_unreachable("getRegForInlineAsmConstraint Unhandled data type");
1610 return std::make_pair(0U, &Hexagon::IntRegsRegClass);
1613 return std::make_pair(0U, &Hexagon::DoubleRegsRegClass);
1616 llvm_unreachable("Unknown asm register class");
1620 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);
1623 /// isFPImmLegal - Returns true if the target can instruction select the
1624 /// specified FP immediate natively. If false, the legalizer will
1625 /// materialize the FP immediate as a load from a constant pool.
1626 bool HexagonTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const {
1627 return TM.getSubtarget<HexagonSubtarget>().hasV5TOps();
1630 /// isLegalAddressingMode - Return true if the addressing mode represented by
1631 /// AM is legal for this target, for a load/store of the specified type.
1632 bool HexagonTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1634 // Allows a signed-extended 11-bit immediate field.
1635 if (AM.BaseOffs <= -(1LL << 13) || AM.BaseOffs >= (1LL << 13)-1) {
1639 // No global is ever allowed as a base.
1644 int Scale = AM.Scale;
1645 if (Scale < 0) Scale = -Scale;
1647 case 0: // No scale reg, "r+i", "r", or just "i".
1649 default: // No scaled addressing mode.
1655 /// isLegalICmpImmediate - Return true if the specified immediate is legal
1656 /// icmp immediate, that is the target has icmp instructions which can compare
1657 /// a register against the immediate without having to materialize the
1658 /// immediate into a register.
1659 bool HexagonTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1660 return Imm >= -512 && Imm <= 511;
1663 /// IsEligibleForTailCallOptimization - Check whether the call is eligible
1664 /// for tail call optimization. Targets which want to do tail call
1665 /// optimization should implement this function.
1666 bool HexagonTargetLowering::IsEligibleForTailCallOptimization(
1668 CallingConv::ID CalleeCC,
1670 bool isCalleeStructRet,
1671 bool isCallerStructRet,
1672 const SmallVectorImpl<ISD::OutputArg> &Outs,
1673 const SmallVectorImpl<SDValue> &OutVals,
1674 const SmallVectorImpl<ISD::InputArg> &Ins,
1675 SelectionDAG& DAG) const {
1676 const Function *CallerF = DAG.getMachineFunction().getFunction();
1677 CallingConv::ID CallerCC = CallerF->getCallingConv();
1678 bool CCMatch = CallerCC == CalleeCC;
1680 // ***************************************************************************
1681 // Look for obvious safe cases to perform tail call optimization that do not
1682 // require ABI changes.
1683 // ***************************************************************************
1685 // If this is a tail call via a function pointer, then don't do it!
1686 if (!(dyn_cast<GlobalAddressSDNode>(Callee))
1687 && !(dyn_cast<ExternalSymbolSDNode>(Callee))) {
1691 // Do not optimize if the calling conventions do not match.
1695 // Do not tail call optimize vararg calls.
1699 // Also avoid tail call optimization if either caller or callee uses struct
1700 // return semantics.
1701 if (isCalleeStructRet || isCallerStructRet)
1704 // In addition to the cases above, we also disable Tail Call Optimization if
1705 // the calling convention code that at least one outgoing argument needs to
1706 // go on the stack. We cannot check that here because at this point that
1707 // information is not available.
1711 // Return true when the given node fits in a positive half word.
1712 bool llvm::isPositiveHalfWord(SDNode *N) {
1713 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N);
1714 if (CN && CN->getSExtValue() > 0 && isInt<16>(CN->getSExtValue()))
1717 switch (N->getOpcode()) {
1720 case ISD::SIGN_EXTEND_INREG: