1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the XCoreTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "xcore-lower"
16 #include "XCoreISelLowering.h"
18 #include "XCoreMachineFunctionInfo.h"
19 #include "XCoreSubtarget.h"
20 #include "XCoreTargetMachine.h"
21 #include "XCoreTargetObjectFile.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalAlias.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
43 const char *XCoreTargetLowering::
44 getTargetNodeName(unsigned Opcode) const
48 case XCoreISD::BL : return "XCoreISD::BL";
49 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
50 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
51 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
52 case XCoreISD::STWSP : return "XCoreISD::STWSP";
53 case XCoreISD::RETSP : return "XCoreISD::RETSP";
54 case XCoreISD::LADD : return "XCoreISD::LADD";
55 case XCoreISD::LSUB : return "XCoreISD::LSUB";
56 case XCoreISD::LMUL : return "XCoreISD::LMUL";
57 case XCoreISD::MACCU : return "XCoreISD::MACCU";
58 case XCoreISD::MACCS : return "XCoreISD::MACCS";
59 case XCoreISD::CRC8 : return "XCoreISD::CRC8";
60 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
61 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
62 default : return NULL;
66 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
67 : TargetLowering(XTM, new XCoreTargetObjectFile()),
69 Subtarget(*XTM.getSubtargetImpl()) {
71 // Set up the register classes.
72 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
74 // Compute derived properties from the register classes
75 computeRegisterProperties();
77 // Division is expensive
78 setIntDivIsCheap(false);
80 setStackPointerRegisterToSaveRestore(XCore::SP);
82 setSchedulingPreference(Sched::Source);
84 // Use i32 for setcc operations results (slt, sgt, ...).
85 setBooleanContents(ZeroOrOneBooleanContent);
86 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
88 // XCore does not have the NodeTypes below.
89 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
90 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
91 setOperationAction(ISD::ADDC, MVT::i32, Expand);
92 setOperationAction(ISD::ADDE, MVT::i32, Expand);
93 setOperationAction(ISD::SUBC, MVT::i32, Expand);
94 setOperationAction(ISD::SUBE, MVT::i32, Expand);
96 // Stop the combiner recombining select and set_cc
97 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
100 setOperationAction(ISD::ADD, MVT::i64, Custom);
101 setOperationAction(ISD::SUB, MVT::i64, Custom);
102 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
103 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
104 setOperationAction(ISD::MULHS, MVT::i32, Expand);
105 setOperationAction(ISD::MULHU, MVT::i32, Expand);
106 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
107 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
108 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
111 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
112 setOperationAction(ISD::ROTL , MVT::i32, Expand);
113 setOperationAction(ISD::ROTR , MVT::i32, Expand);
114 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
115 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
117 setOperationAction(ISD::TRAP, MVT::Other, Legal);
120 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
122 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
123 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom);
125 // Conversion of i64 -> double produces constantpool nodes
126 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
129 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
130 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
131 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
133 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
134 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
136 // Custom expand misaligned loads / stores.
137 setOperationAction(ISD::LOAD, MVT::i32, Custom);
138 setOperationAction(ISD::STORE, MVT::i32, Custom);
141 setOperationAction(ISD::VAEND, MVT::Other, Expand);
142 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
143 setOperationAction(ISD::VAARG, MVT::Other, Custom);
144 setOperationAction(ISD::VASTART, MVT::Other, Custom);
147 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
148 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
149 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
151 // TRAMPOLINE is custom lowered.
152 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
153 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
155 // We want to custom lower some of our intrinsics.
156 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
158 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4;
159 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize
160 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2;
162 // We have target-specific dag combine patterns for the following nodes:
163 setTargetDAGCombine(ISD::STORE);
164 setTargetDAGCombine(ISD::ADD);
166 setMinFunctionAlignment(1);
169 bool XCoreTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
170 if (Val.getOpcode() != ISD::LOAD)
173 EVT VT1 = Val.getValueType();
174 if (!VT1.isSimple() || !VT1.isInteger() ||
175 !VT2.isSimple() || !VT2.isInteger())
178 switch (VT1.getSimpleVT().SimpleTy) {
187 SDValue XCoreTargetLowering::
188 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
189 switch (Op.getOpcode())
191 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
192 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
193 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
194 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
195 case ISD::LOAD: return LowerLOAD(Op, DAG);
196 case ISD::STORE: return LowerSTORE(Op, DAG);
197 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
198 case ISD::VAARG: return LowerVAARG(Op, DAG);
199 case ISD::VASTART: return LowerVASTART(Op, DAG);
200 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
201 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
202 // FIXME: Remove these when LegalizeDAGTypes lands.
204 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
205 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
206 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
207 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
208 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
210 llvm_unreachable("unimplemented operand");
214 /// ReplaceNodeResults - Replace the results of node with an illegal result
215 /// type with new values built out of custom code.
216 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
217 SmallVectorImpl<SDValue>&Results,
218 SelectionDAG &DAG) const {
219 switch (N->getOpcode()) {
221 llvm_unreachable("Don't know how to custom expand this!");
224 Results.push_back(ExpandADDSUB(N, DAG));
229 //===----------------------------------------------------------------------===//
230 // Misc Lower Operation implementation
231 //===----------------------------------------------------------------------===//
233 SDValue XCoreTargetLowering::
234 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
237 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
238 Op.getOperand(3), Op.getOperand(4));
239 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
243 SDValue XCoreTargetLowering::
244 getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
245 SelectionDAG &DAG) const
247 // FIXME there is no actual debug info here
249 const GlobalValue *UnderlyingGV = GV;
250 // If GV is an alias then use the aliasee to determine the wrapper type
251 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
252 UnderlyingGV = GA->resolveAliasedGlobal();
253 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(UnderlyingGV)) {
254 if (GVar->isConstant())
255 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
256 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
258 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
261 SDValue XCoreTargetLowering::
262 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
265 const GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op);
266 const GlobalValue *GV = GN->getGlobal();
267 int64_t Offset = GN->getOffset();
268 // We can only fold positive offsets that are a multiple of the word size.
269 int64_t FoldedOffset = std::max(Offset & ~3, (int64_t)0);
270 SDValue GA = DAG.getTargetGlobalAddress(GV, DL, MVT::i32, FoldedOffset);
271 GA = getGlobalAddressWrapper(GA, GV, DAG);
272 // Handle the rest of the offset.
273 if (Offset != FoldedOffset) {
274 SDValue Remaining = DAG.getConstant(Offset - FoldedOffset, MVT::i32);
275 GA = DAG.getNode(ISD::ADD, DL, MVT::i32, GA, Remaining);
280 SDValue XCoreTargetLowering::
281 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
285 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
286 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy());
288 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result);
291 SDValue XCoreTargetLowering::
292 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
294 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
295 // FIXME there isn't really debug info here
297 EVT PtrVT = Op.getValueType();
299 if (CP->isMachineConstantPoolEntry()) {
300 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
303 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
306 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
309 unsigned XCoreTargetLowering::getJumpTableEncoding() const {
310 return MachineJumpTableInfo::EK_Inline;
313 SDValue XCoreTargetLowering::
314 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
316 SDValue Chain = Op.getOperand(0);
317 SDValue Table = Op.getOperand(1);
318 SDValue Index = Op.getOperand(2);
320 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
321 unsigned JTI = JT->getIndex();
322 MachineFunction &MF = DAG.getMachineFunction();
323 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
324 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
326 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
327 if (NumEntries <= 32) {
328 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
330 assert((NumEntries >> 31) == 0);
331 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
332 DAG.getConstant(1, MVT::i32));
333 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
337 SDValue XCoreTargetLowering::
338 lowerLoadWordFromAlignedBasePlusOffset(SDLoc DL, SDValue Chain, SDValue Base,
339 int64_t Offset, SelectionDAG &DAG) const
341 if ((Offset & 0x3) == 0) {
342 return DAG.getLoad(getPointerTy(), DL, Chain, Base, MachinePointerInfo(),
343 false, false, false, 0);
345 // Lower to pair of consecutive word aligned loads plus some bit shifting.
346 int32_t HighOffset = RoundUpToAlignment(Offset, 4);
347 int32_t LowOffset = HighOffset - 4;
348 SDValue LowAddr, HighAddr;
349 if (GlobalAddressSDNode *GASD =
350 dyn_cast<GlobalAddressSDNode>(Base.getNode())) {
351 LowAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
353 HighAddr = DAG.getGlobalAddress(GASD->getGlobal(), DL, Base.getValueType(),
356 LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
357 DAG.getConstant(LowOffset, MVT::i32));
358 HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
359 DAG.getConstant(HighOffset, MVT::i32));
361 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, MVT::i32);
362 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, MVT::i32);
364 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
365 LowAddr, MachinePointerInfo(),
366 false, false, false, 0);
367 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
368 HighAddr, MachinePointerInfo(),
369 false, false, false, 0);
370 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
371 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
372 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
373 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
375 SDValue Ops[] = { Result, Chain };
376 return DAG.getMergeValues(Ops, 2, DL);
379 static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
381 APInt KnownZero, KnownOne;
382 DAG.ComputeMaskedBits(Value, KnownZero, KnownOne);
383 return KnownZero.countTrailingOnes() >= 2;
386 SDValue XCoreTargetLowering::
387 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
388 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
389 LoadSDNode *LD = cast<LoadSDNode>(Op);
390 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
391 "Unexpected extension type");
392 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
393 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
396 unsigned ABIAlignment = getDataLayout()->
397 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
398 // Leave aligned load alone.
399 if (LD->getAlignment() >= ABIAlignment)
402 SDValue Chain = LD->getChain();
403 SDValue BasePtr = LD->getBasePtr();
406 if (!LD->isVolatile()) {
407 const GlobalValue *GV;
409 if (DAG.isBaseWithConstantOffset(BasePtr) &&
410 isWordAligned(BasePtr->getOperand(0), DAG)) {
411 SDValue NewBasePtr = BasePtr->getOperand(0);
412 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
413 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
416 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
417 MinAlign(GV->getAlignment(), 4) == 4) {
418 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
419 BasePtr->getValueType(0));
420 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
425 if (LD->getAlignment() == 2) {
426 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
427 BasePtr, LD->getPointerInfo(), MVT::i16,
428 LD->isVolatile(), LD->isNonTemporal(), 2);
429 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
430 DAG.getConstant(2, MVT::i32));
431 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
433 LD->getPointerInfo().getWithOffset(2),
434 MVT::i16, LD->isVolatile(),
435 LD->isNonTemporal(), 2);
436 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
437 DAG.getConstant(16, MVT::i32));
438 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
439 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
441 SDValue Ops[] = { Result, Chain };
442 return DAG.getMergeValues(Ops, 2, DL);
445 // Lower to a call to __misaligned_load(BasePtr).
446 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
447 TargetLowering::ArgListTy Args;
448 TargetLowering::ArgListEntry Entry;
451 Entry.Node = BasePtr;
452 Args.push_back(Entry);
454 TargetLowering::CallLoweringInfo CLI(Chain, IntPtrTy, false, false,
455 false, false, 0, CallingConv::C, /*isTailCall=*/false,
456 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
457 DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
459 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
462 { CallResult.first, CallResult.second };
464 return DAG.getMergeValues(Ops, 2, DL);
467 SDValue XCoreTargetLowering::
468 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
470 StoreSDNode *ST = cast<StoreSDNode>(Op);
471 assert(!ST->isTruncatingStore() && "Unexpected store type");
472 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
473 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
476 unsigned ABIAlignment = getDataLayout()->
477 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
478 // Leave aligned store alone.
479 if (ST->getAlignment() >= ABIAlignment) {
482 SDValue Chain = ST->getChain();
483 SDValue BasePtr = ST->getBasePtr();
484 SDValue Value = ST->getValue();
487 if (ST->getAlignment() == 2) {
489 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
490 DAG.getConstant(16, MVT::i32));
491 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
492 ST->getPointerInfo(), MVT::i16,
493 ST->isVolatile(), ST->isNonTemporal(),
495 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
496 DAG.getConstant(2, MVT::i32));
497 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
498 ST->getPointerInfo().getWithOffset(2),
499 MVT::i16, ST->isVolatile(),
500 ST->isNonTemporal(), 2);
501 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
504 // Lower to a call to __misaligned_store(BasePtr, Value).
505 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
506 TargetLowering::ArgListTy Args;
507 TargetLowering::ArgListEntry Entry;
510 Entry.Node = BasePtr;
511 Args.push_back(Entry);
514 Args.push_back(Entry);
516 TargetLowering::CallLoweringInfo CLI(Chain,
517 Type::getVoidTy(*DAG.getContext()), false, false,
518 false, false, 0, CallingConv::C, /*isTailCall=*/false,
519 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
520 DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
522 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
524 return CallResult.second;
527 SDValue XCoreTargetLowering::
528 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
530 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
531 "Unexpected operand to lower!");
533 SDValue LHS = Op.getOperand(0);
534 SDValue RHS = Op.getOperand(1);
535 SDValue Zero = DAG.getConstant(0, MVT::i32);
536 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
537 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
539 SDValue Lo(Hi.getNode(), 1);
540 SDValue Ops[] = { Lo, Hi };
541 return DAG.getMergeValues(Ops, 2, dl);
544 SDValue XCoreTargetLowering::
545 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
547 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
548 "Unexpected operand to lower!");
550 SDValue LHS = Op.getOperand(0);
551 SDValue RHS = Op.getOperand(1);
552 SDValue Zero = DAG.getConstant(0, MVT::i32);
553 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
554 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
556 SDValue Lo(Hi.getNode(), 1);
557 SDValue Ops[] = { Lo, Hi };
558 return DAG.getMergeValues(Ops, 2, dl);
561 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
562 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
563 /// each intermediate result in the calculation must also have a single use.
564 /// If the Op is in the correct form the constituent parts are written to Mul0,
565 /// Mul1, Addend0 and Addend1.
567 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
568 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
570 if (Op.getOpcode() != ISD::ADD)
572 SDValue N0 = Op.getOperand(0);
573 SDValue N1 = Op.getOperand(1);
576 if (N0.getOpcode() == ISD::ADD) {
579 } else if (N1.getOpcode() == ISD::ADD) {
585 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
587 if (OtherOp.getOpcode() == ISD::MUL) {
588 // add(add(a,b),mul(x,y))
589 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
591 Mul0 = OtherOp.getOperand(0);
592 Mul1 = OtherOp.getOperand(1);
593 Addend0 = AddOp.getOperand(0);
594 Addend1 = AddOp.getOperand(1);
597 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
598 // add(add(mul(x,y),a),b)
599 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
601 Mul0 = AddOp.getOperand(0).getOperand(0);
602 Mul1 = AddOp.getOperand(0).getOperand(1);
603 Addend0 = AddOp.getOperand(1);
607 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
608 // add(add(a,mul(x,y)),b)
609 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
611 Mul0 = AddOp.getOperand(1).getOperand(0);
612 Mul1 = AddOp.getOperand(1).getOperand(1);
613 Addend0 = AddOp.getOperand(0);
620 SDValue XCoreTargetLowering::
621 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
625 if (N->getOperand(0).getOpcode() == ISD::MUL) {
626 Mul = N->getOperand(0);
627 Other = N->getOperand(1);
628 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
629 Mul = N->getOperand(1);
630 Other = N->getOperand(0);
635 SDValue LL, RL, AddendL, AddendH;
636 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
637 Mul.getOperand(0), DAG.getConstant(0, MVT::i32));
638 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
639 Mul.getOperand(1), DAG.getConstant(0, MVT::i32));
640 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
641 Other, DAG.getConstant(0, MVT::i32));
642 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
643 Other, DAG.getConstant(1, MVT::i32));
644 APInt HighMask = APInt::getHighBitsSet(64, 32);
645 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
646 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
647 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
648 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
649 // The inputs are both zero-extended.
650 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
651 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
653 SDValue Lo(Hi.getNode(), 1);
654 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
656 if (LHSSB > 32 && RHSSB > 32) {
657 // The inputs are both sign-extended.
658 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
659 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
661 SDValue Lo(Hi.getNode(), 1);
662 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
665 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
666 Mul.getOperand(0), DAG.getConstant(1, MVT::i32));
667 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
668 Mul.getOperand(1), DAG.getConstant(1, MVT::i32));
669 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
670 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
672 SDValue Lo(Hi.getNode(), 1);
673 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
674 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
675 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
676 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
677 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
680 SDValue XCoreTargetLowering::
681 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
683 assert(N->getValueType(0) == MVT::i64 &&
684 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
685 "Unknown operand to lower!");
687 if (N->getOpcode() == ISD::ADD) {
688 SDValue Result = TryExpandADDWithMul(N, DAG);
689 if (Result.getNode() != 0)
695 // Extract components
696 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
697 N->getOperand(0), DAG.getConstant(0, MVT::i32));
698 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
699 N->getOperand(0), DAG.getConstant(1, MVT::i32));
700 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
701 N->getOperand(1), DAG.getConstant(0, MVT::i32));
702 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
703 N->getOperand(1), DAG.getConstant(1, MVT::i32));
706 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
708 SDValue Zero = DAG.getConstant(0, MVT::i32);
709 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
711 SDValue Carry(Lo.getNode(), 1);
713 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
715 SDValue Ignored(Hi.getNode(), 1);
717 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
720 SDValue XCoreTargetLowering::
721 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
723 // Whist llvm does not support aggregate varargs we can ignore
724 // the possibility of the ValueType being an implicit byVal vararg.
725 SDNode *Node = Op.getNode();
726 EVT VT = Node->getValueType(0); // not an aggregate
727 SDValue InChain = Node->getOperand(0);
728 SDValue VAListPtr = Node->getOperand(1);
729 EVT PtrVT = VAListPtr.getValueType();
730 const Value *SV = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
732 SDValue VAList = DAG.getLoad(PtrVT, dl, InChain,
733 VAListPtr, MachinePointerInfo(SV),
734 false, false, false, 0);
735 // Increment the pointer, VAList, to the next vararg
736 SDValue nextPtr = DAG.getNode(ISD::ADD, dl, PtrVT, VAList,
737 DAG.getIntPtrConstant(VT.getSizeInBits() / 8));
738 // Store the incremented VAList to the legalized pointer
739 InChain = DAG.getStore(VAList.getValue(1), dl, nextPtr, VAListPtr,
740 MachinePointerInfo(SV), false, false, 0);
741 // Load the actual argument out of the pointer VAList
742 return DAG.getLoad(VT, dl, InChain, VAList, MachinePointerInfo(),
743 false, false, false, 0);
746 SDValue XCoreTargetLowering::
747 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
750 // vastart stores the address of the VarArgsFrameIndex slot into the
751 // memory location argument
752 MachineFunction &MF = DAG.getMachineFunction();
753 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
754 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
755 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
756 MachinePointerInfo(), false, false, 0);
759 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
760 SelectionDAG &DAG) const {
762 // Depths > 0 not supported yet!
763 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
766 MachineFunction &MF = DAG.getMachineFunction();
767 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
768 return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
769 RegInfo->getFrameRegister(MF), MVT::i32);
772 SDValue XCoreTargetLowering::
773 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
774 return Op.getOperand(0);
777 SDValue XCoreTargetLowering::
778 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
779 SDValue Chain = Op.getOperand(0);
780 SDValue Trmp = Op.getOperand(1); // trampoline
781 SDValue FPtr = Op.getOperand(2); // nested function
782 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
784 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
787 // LDAPF_u10 r11, nest
788 // LDW_2rus r11, r11[0]
789 // STWSP_ru6 r11, sp[0]
790 // LDAPF_u10 r11, fptr
791 // LDW_2rus r11, r11[0]
797 SDValue OutChains[5];
802 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32),
803 Addr, MachinePointerInfo(TrmpAddr), false, false,
806 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
807 DAG.getConstant(4, MVT::i32));
808 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32),
809 Addr, MachinePointerInfo(TrmpAddr, 4), false,
812 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
813 DAG.getConstant(8, MVT::i32));
814 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32),
815 Addr, MachinePointerInfo(TrmpAddr, 8), false,
818 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
819 DAG.getConstant(12, MVT::i32));
820 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr,
821 MachinePointerInfo(TrmpAddr, 12), false, false,
824 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
825 DAG.getConstant(16, MVT::i32));
826 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr,
827 MachinePointerInfo(TrmpAddr, 16), false, false,
830 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5);
833 SDValue XCoreTargetLowering::
834 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
836 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
838 case Intrinsic::xcore_crc8:
839 EVT VT = Op.getValueType();
841 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
842 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
843 SDValue Crc(Data.getNode(), 1);
844 SDValue Results[] = { Crc, Data };
845 return DAG.getMergeValues(Results, 2, DL);
850 //===----------------------------------------------------------------------===//
851 // Calling Convention Implementation
852 //===----------------------------------------------------------------------===//
854 #include "XCoreGenCallingConv.inc"
856 //===----------------------------------------------------------------------===//
857 // Call Calling Convention Implementation
858 //===----------------------------------------------------------------------===//
860 /// XCore call implementation
862 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
863 SmallVectorImpl<SDValue> &InVals) const {
864 SelectionDAG &DAG = CLI.DAG;
866 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
867 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
868 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
869 SDValue Chain = CLI.Chain;
870 SDValue Callee = CLI.Callee;
871 bool &isTailCall = CLI.IsTailCall;
872 CallingConv::ID CallConv = CLI.CallConv;
873 bool isVarArg = CLI.IsVarArg;
875 // XCore target does not yet support tail call optimization.
878 // For now, only CallingConv::C implemented
882 llvm_unreachable("Unsupported calling convention");
883 case CallingConv::Fast:
885 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
886 Outs, OutVals, Ins, dl, DAG, InVals);
890 /// LowerCCCCallTo - functions arguments are copied from virtual
891 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
892 /// CALLSEQ_END are emitted.
893 /// TODO: isTailCall, sret.
895 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
896 CallingConv::ID CallConv, bool isVarArg,
898 const SmallVectorImpl<ISD::OutputArg> &Outs,
899 const SmallVectorImpl<SDValue> &OutVals,
900 const SmallVectorImpl<ISD::InputArg> &Ins,
901 SDLoc dl, SelectionDAG &DAG,
902 SmallVectorImpl<SDValue> &InVals) const {
904 // Analyze operands of the call, assigning locations to each operand.
905 SmallVector<CCValAssign, 16> ArgLocs;
906 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
907 getTargetMachine(), ArgLocs, *DAG.getContext());
909 // The ABI dictates there should be one stack slot available to the callee
910 // on function entry (for saving lr).
911 CCInfo.AllocateStack(4, 4);
913 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
915 // Get a count of how many bytes are to be pushed on the stack.
916 unsigned NumBytes = CCInfo.getNextStackOffset();
918 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
919 getPointerTy(), true), dl);
921 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
922 SmallVector<SDValue, 12> MemOpChains;
924 // Walk the register/memloc assignments, inserting copies/loads.
925 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
926 CCValAssign &VA = ArgLocs[i];
927 SDValue Arg = OutVals[i];
929 // Promote the value if needed.
930 switch (VA.getLocInfo()) {
931 default: llvm_unreachable("Unknown loc info!");
932 case CCValAssign::Full: break;
933 case CCValAssign::SExt:
934 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
936 case CCValAssign::ZExt:
937 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
939 case CCValAssign::AExt:
940 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
944 // Arguments that can be passed on register must be kept at
947 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
949 assert(VA.isMemLoc());
951 int Offset = VA.getLocMemOffset();
953 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
955 DAG.getConstant(Offset/4, MVT::i32)));
959 // Transform all store nodes into one single node because
960 // all store nodes are independent of each other.
961 if (!MemOpChains.empty())
962 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
963 &MemOpChains[0], MemOpChains.size());
965 // Build a sequence of copy-to-reg nodes chained together with token
966 // chain and flag operands which copy the outgoing args into registers.
967 // The InFlag in necessary since all emitted instructions must be
970 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
971 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
972 RegsToPass[i].second, InFlag);
973 InFlag = Chain.getValue(1);
976 // If the callee is a GlobalAddress node (quite common, every direct call is)
977 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
978 // Likewise ExternalSymbol -> TargetExternalSymbol.
979 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
980 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
981 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
982 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
984 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
985 // = Chain, Callee, Reg#1, Reg#2, ...
987 // Returns a chain & a flag for retval copy to use.
988 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
989 SmallVector<SDValue, 8> Ops;
990 Ops.push_back(Chain);
991 Ops.push_back(Callee);
993 // Add argument registers to the end of the list so that they are
994 // known live into the call.
995 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
996 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
997 RegsToPass[i].second.getValueType()));
999 if (InFlag.getNode())
1000 Ops.push_back(InFlag);
1002 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size());
1003 InFlag = Chain.getValue(1);
1005 // Create the CALLSEQ_END node.
1006 Chain = DAG.getCALLSEQ_END(Chain,
1007 DAG.getConstant(NumBytes, getPointerTy(), true),
1008 DAG.getConstant(0, getPointerTy(), true),
1010 InFlag = Chain.getValue(1);
1012 // Handle result values, copying them out of physregs into vregs that we
1014 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
1015 Ins, dl, DAG, InVals);
1018 /// LowerCallResult - Lower the result values of a call into the
1019 /// appropriate copies out of appropriate physical registers.
1021 XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1022 CallingConv::ID CallConv, bool isVarArg,
1023 const SmallVectorImpl<ISD::InputArg> &Ins,
1024 SDLoc dl, SelectionDAG &DAG,
1025 SmallVectorImpl<SDValue> &InVals) const {
1027 // Assign locations to each value returned by this call.
1028 SmallVector<CCValAssign, 16> RVLocs;
1029 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1030 getTargetMachine(), RVLocs, *DAG.getContext());
1032 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1034 // Copy all of the result registers out of their specified physreg.
1035 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1036 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
1037 RVLocs[i].getValVT(), InFlag).getValue(1);
1038 InFlag = Chain.getValue(2);
1039 InVals.push_back(Chain.getValue(0));
1045 //===----------------------------------------------------------------------===//
1046 // Formal Arguments Calling Convention Implementation
1047 //===----------------------------------------------------------------------===//
1050 struct ArgDataPair { SDValue SDV; ISD::ArgFlagsTy Flags; };
1053 /// XCore formal arguments implementation
1055 XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
1056 CallingConv::ID CallConv,
1058 const SmallVectorImpl<ISD::InputArg> &Ins,
1061 SmallVectorImpl<SDValue> &InVals)
1066 llvm_unreachable("Unsupported calling convention");
1067 case CallingConv::C:
1068 case CallingConv::Fast:
1069 return LowerCCCArguments(Chain, CallConv, isVarArg,
1070 Ins, dl, DAG, InVals);
1074 /// LowerCCCArguments - transform physical registers into
1075 /// virtual registers and generate load operations for
1076 /// arguments places on the stack.
1079 XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
1080 CallingConv::ID CallConv,
1082 const SmallVectorImpl<ISD::InputArg>
1086 SmallVectorImpl<SDValue> &InVals) const {
1087 MachineFunction &MF = DAG.getMachineFunction();
1088 MachineFrameInfo *MFI = MF.getFrameInfo();
1089 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1091 // Assign locations to all of the incoming arguments.
1092 SmallVector<CCValAssign, 16> ArgLocs;
1093 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1094 getTargetMachine(), ArgLocs, *DAG.getContext());
1096 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1098 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1100 unsigned LRSaveSize = StackSlotSize;
1102 // All getCopyFromReg ops must precede any getMemcpys to prevent the
1103 // scheduler clobbering a register before it has been copied.
1105 // 1. CopyFromReg (and load) arg & vararg registers.
1106 // 2. Chain CopyFromReg nodes into a TokenFactor.
1107 // 3. Memcpy 'byVal' args & push final InVals.
1108 // 4. Chain mem ops nodes into a TokenFactor.
1109 SmallVector<SDValue, 4> CFRegNode;
1110 SmallVector<ArgDataPair, 4> ArgData;
1111 SmallVector<SDValue, 4> MemOps;
1113 // 1a. CopyFromReg (and load) arg registers.
1114 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1116 CCValAssign &VA = ArgLocs[i];
1119 if (VA.isRegLoc()) {
1120 // Arguments passed in registers
1121 EVT RegVT = VA.getLocVT();
1122 switch (RegVT.getSimpleVT().SimpleTy) {
1126 errs() << "LowerFormalArguments Unhandled argument type: "
1127 << RegVT.getSimpleVT().SimpleTy << "\n";
1129 llvm_unreachable(0);
1132 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1133 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1134 ArgIn = DAG.getCopyFromReg(Chain, dl, VReg, RegVT);
1135 CFRegNode.push_back(ArgIn.getValue(ArgIn->getNumValues() - 1));
1139 assert(VA.isMemLoc());
1140 // Load the argument to a virtual register
1141 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1142 if (ObjSize > StackSlotSize) {
1143 errs() << "LowerFormalArguments Unhandled argument type: "
1144 << EVT(VA.getLocVT()).getEVTString()
1147 // Create the frame index object for this incoming parameter...
1148 int FI = MFI->CreateFixedObject(ObjSize,
1149 LRSaveSize + VA.getLocMemOffset(),
1152 // Create the SelectionDAG nodes corresponding to a load
1153 //from this parameter
1154 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1155 ArgIn = DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1156 MachinePointerInfo::getFixedStack(FI),
1157 false, false, false, 0);
1159 const ArgDataPair ADP = { ArgIn, Ins[i].Flags };
1160 ArgData.push_back(ADP);
1163 // 1b. CopyFromReg vararg registers.
1165 // Argument registers
1166 static const uint16_t ArgRegs[] = {
1167 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1169 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1170 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs,
1171 array_lengthof(ArgRegs));
1172 if (FirstVAReg < array_lengthof(ArgRegs)) {
1174 // Save remaining registers, storing higher register numbers at a higher
1176 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1177 // Create a stack slot
1178 int FI = MFI->CreateFixedObject(4, offset, true);
1179 if (i == (int)FirstVAReg) {
1180 XFI->setVarArgsFrameIndex(FI);
1182 offset -= StackSlotSize;
1183 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1184 // Move argument from phys reg -> virt reg
1185 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1186 RegInfo.addLiveIn(ArgRegs[i], VReg);
1187 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1188 CFRegNode.push_back(Val.getValue(Val->getNumValues() - 1));
1189 // Move argument from virt reg -> stack
1190 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
1191 MachinePointerInfo(), false, false, 0);
1192 MemOps.push_back(Store);
1195 // This will point to the next argument passed via stack.
1196 XFI->setVarArgsFrameIndex(
1197 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1202 // 2. chain CopyFromReg nodes into a TokenFactor.
1203 if (!CFRegNode.empty())
1204 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &CFRegNode[0],
1207 // 3. Memcpy 'byVal' args & push final InVals.
1208 // Aggregates passed "byVal" need to be copied by the callee.
1209 // The callee will use a pointer to this copy, rather than the original
1211 for (SmallVectorImpl<ArgDataPair>::const_iterator ArgDI = ArgData.begin(),
1212 ArgDE = ArgData.end();
1213 ArgDI != ArgDE; ++ArgDI) {
1214 if (ArgDI->Flags.isByVal() && ArgDI->Flags.getByValSize()) {
1215 unsigned Size = ArgDI->Flags.getByValSize();
1216 unsigned Align = ArgDI->Flags.getByValAlign();
1217 // Create a new object on the stack and copy the pointee into it.
1218 int FI = MFI->CreateStackObject(Size, Align, false, false);
1219 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1220 InVals.push_back(FIN);
1221 MemOps.push_back(DAG.getMemcpy(Chain, dl, FIN, ArgDI->SDV,
1222 DAG.getConstant(Size, MVT::i32),
1223 Align, false, false,
1224 MachinePointerInfo(),
1225 MachinePointerInfo()));
1227 InVals.push_back(ArgDI->SDV);
1231 // 4, chain mem ops nodes into a TokenFactor.
1232 if (!MemOps.empty()) {
1233 MemOps.push_back(Chain);
1234 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0],
1241 //===----------------------------------------------------------------------===//
1242 // Return Value Calling Convention Implementation
1243 //===----------------------------------------------------------------------===//
1245 bool XCoreTargetLowering::
1246 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1248 const SmallVectorImpl<ISD::OutputArg> &Outs,
1249 LLVMContext &Context) const {
1250 SmallVector<CCValAssign, 16> RVLocs;
1251 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
1252 return CCInfo.CheckReturn(Outs, RetCC_XCore);
1256 XCoreTargetLowering::LowerReturn(SDValue Chain,
1257 CallingConv::ID CallConv, bool isVarArg,
1258 const SmallVectorImpl<ISD::OutputArg> &Outs,
1259 const SmallVectorImpl<SDValue> &OutVals,
1260 SDLoc dl, SelectionDAG &DAG) const {
1262 // CCValAssign - represent the assignment of
1263 // the return value to a location
1264 SmallVector<CCValAssign, 16> RVLocs;
1266 // CCState - Info about the registers and stack slot.
1267 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1268 getTargetMachine(), RVLocs, *DAG.getContext());
1270 // Analyze return values.
1271 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1274 SmallVector<SDValue, 4> RetOps(1, Chain);
1276 // Return on XCore is always a "retsp 0"
1277 RetOps.push_back(DAG.getConstant(0, MVT::i32));
1279 // Copy the result values into the output registers.
1280 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1281 CCValAssign &VA = RVLocs[i];
1282 assert(VA.isRegLoc() && "Can only return in registers!");
1284 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1287 // guarantee that all emitted copies are
1288 // stuck together, avoiding something bad
1289 Flag = Chain.getValue(1);
1290 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1293 RetOps[0] = Chain; // Update chain.
1295 // Add the flag if we have it.
1297 RetOps.push_back(Flag);
1299 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1300 &RetOps[0], RetOps.size());
1303 //===----------------------------------------------------------------------===//
1304 // Other Lowering Code
1305 //===----------------------------------------------------------------------===//
1308 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1309 MachineBasicBlock *BB) const {
1310 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
1311 DebugLoc dl = MI->getDebugLoc();
1312 assert((MI->getOpcode() == XCore::SELECT_CC) &&
1313 "Unexpected instr type to insert");
1315 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1316 // control-flow pattern. The incoming instruction knows the destination vreg
1317 // to set, the condition code register to branch on, the true/false values to
1318 // select between, and a branch opcode to use.
1319 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1320 MachineFunction::iterator It = BB;
1326 // cmpTY ccX, r1, r2
1328 // fallthrough --> copy0MBB
1329 MachineBasicBlock *thisMBB = BB;
1330 MachineFunction *F = BB->getParent();
1331 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1332 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1333 F->insert(It, copy0MBB);
1334 F->insert(It, sinkMBB);
1336 // Transfer the remainder of BB and its successor edges to sinkMBB.
1337 sinkMBB->splice(sinkMBB->begin(), BB,
1338 llvm::next(MachineBasicBlock::iterator(MI)),
1340 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1342 // Next, add the true and fallthrough blocks as its successors.
1343 BB->addSuccessor(copy0MBB);
1344 BB->addSuccessor(sinkMBB);
1346 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1347 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
1350 // %FalseValue = ...
1351 // # fallthrough to sinkMBB
1354 // Update machine-CFG edges
1355 BB->addSuccessor(sinkMBB);
1358 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1361 BuildMI(*BB, BB->begin(), dl,
1362 TII.get(XCore::PHI), MI->getOperand(0).getReg())
1363 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
1364 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1366 MI->eraseFromParent(); // The pseudo instruction is gone now.
1370 //===----------------------------------------------------------------------===//
1371 // Target Optimization Hooks
1372 //===----------------------------------------------------------------------===//
1374 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1375 DAGCombinerInfo &DCI) const {
1376 SelectionDAG &DAG = DCI.DAG;
1378 switch (N->getOpcode()) {
1380 case XCoreISD::LADD: {
1381 SDValue N0 = N->getOperand(0);
1382 SDValue N1 = N->getOperand(1);
1383 SDValue N2 = N->getOperand(2);
1384 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1385 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1386 EVT VT = N0.getValueType();
1388 // canonicalize constant to RHS
1390 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1392 // fold (ladd 0, 0, x) -> 0, x & 1
1393 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1394 SDValue Carry = DAG.getConstant(0, VT);
1395 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1396 DAG.getConstant(1, VT));
1397 SDValue Ops[] = { Result, Carry };
1398 return DAG.getMergeValues(Ops, 2, dl);
1401 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1403 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1404 APInt KnownZero, KnownOne;
1405 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1406 VT.getSizeInBits() - 1);
1407 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1408 if ((KnownZero & Mask) == Mask) {
1409 SDValue Carry = DAG.getConstant(0, VT);
1410 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1411 SDValue Ops[] = { Result, Carry };
1412 return DAG.getMergeValues(Ops, 2, dl);
1417 case XCoreISD::LSUB: {
1418 SDValue N0 = N->getOperand(0);
1419 SDValue N1 = N->getOperand(1);
1420 SDValue N2 = N->getOperand(2);
1421 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1422 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1423 EVT VT = N0.getValueType();
1425 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1426 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1427 APInt KnownZero, KnownOne;
1428 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1429 VT.getSizeInBits() - 1);
1430 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1431 if ((KnownZero & Mask) == Mask) {
1432 SDValue Borrow = N2;
1433 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1434 DAG.getConstant(0, VT), N2);
1435 SDValue Ops[] = { Result, Borrow };
1436 return DAG.getMergeValues(Ops, 2, dl);
1440 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1442 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1443 APInt KnownZero, KnownOne;
1444 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1445 VT.getSizeInBits() - 1);
1446 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1447 if ((KnownZero & Mask) == Mask) {
1448 SDValue Borrow = DAG.getConstant(0, VT);
1449 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1450 SDValue Ops[] = { Result, Borrow };
1451 return DAG.getMergeValues(Ops, 2, dl);
1456 case XCoreISD::LMUL: {
1457 SDValue N0 = N->getOperand(0);
1458 SDValue N1 = N->getOperand(1);
1459 SDValue N2 = N->getOperand(2);
1460 SDValue N3 = N->getOperand(3);
1461 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1462 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1463 EVT VT = N0.getValueType();
1464 // Canonicalize multiplicative constant to RHS. If both multiplicative
1465 // operands are constant canonicalize smallest to RHS.
1466 if ((N0C && !N1C) ||
1467 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1468 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1472 if (N1C && N1C->isNullValue()) {
1473 // If the high result is unused fold to add(a, b)
1474 if (N->hasNUsesOfValue(0, 0)) {
1475 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1476 SDValue Ops[] = { Lo, Lo };
1477 return DAG.getMergeValues(Ops, 2, dl);
1479 // Otherwise fold to ladd(a, b, 0)
1481 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1482 SDValue Carry(Result.getNode(), 1);
1483 SDValue Ops[] = { Carry, Result };
1484 return DAG.getMergeValues(Ops, 2, dl);
1489 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1490 // lmul(x, y, a, b). The high result of lmul will be ignored.
1491 // This is only profitable if the intermediate results are unused
1493 SDValue Mul0, Mul1, Addend0, Addend1;
1494 if (N->getValueType(0) == MVT::i32 &&
1495 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1496 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1497 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1498 Mul1, Addend0, Addend1);
1499 SDValue Result(Ignored.getNode(), 1);
1502 APInt HighMask = APInt::getHighBitsSet(64, 32);
1503 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1504 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1505 // before type legalization as it is messy to match the operands after
1507 if (N->getValueType(0) == MVT::i64 &&
1508 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1509 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1510 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1511 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1512 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1513 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1514 Mul0, DAG.getConstant(0, MVT::i32));
1515 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1516 Mul1, DAG.getConstant(0, MVT::i32));
1517 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1518 Addend0, DAG.getConstant(0, MVT::i32));
1519 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1520 Addend1, DAG.getConstant(0, MVT::i32));
1521 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1522 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1523 Addend0L, Addend1L);
1524 SDValue Lo(Hi.getNode(), 1);
1525 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1530 // Replace unaligned store of unaligned load with memmove.
1531 StoreSDNode *ST = cast<StoreSDNode>(N);
1532 if (!DCI.isBeforeLegalize() ||
1533 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) ||
1534 ST->isVolatile() || ST->isIndexed()) {
1537 SDValue Chain = ST->getChain();
1539 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1540 if (StoreBits % 8) {
1543 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(
1544 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1545 unsigned Alignment = ST->getAlignment();
1546 if (Alignment >= ABIAlignment) {
1550 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1551 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1552 LD->getAlignment() == Alignment &&
1553 !LD->isVolatile() && !LD->isIndexed() &&
1554 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
1555 return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1557 DAG.getConstant(StoreBits/8, MVT::i32),
1558 Alignment, false, ST->getPointerInfo(),
1559 LD->getPointerInfo());
1568 void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
1571 const SelectionDAG &DAG,
1572 unsigned Depth) const {
1573 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
1574 switch (Op.getOpcode()) {
1576 case XCoreISD::LADD:
1577 case XCoreISD::LSUB:
1578 if (Op.getResNo() == 1) {
1579 // Top bits of carry / borrow are clear.
1580 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1581 KnownZero.getBitWidth() - 1);
1587 //===----------------------------------------------------------------------===//
1588 // Addressing mode description hooks
1589 //===----------------------------------------------------------------------===//
1591 static inline bool isImmUs(int64_t val)
1593 return (val >= 0 && val <= 11);
1596 static inline bool isImmUs2(int64_t val)
1598 return (val%2 == 0 && isImmUs(val/2));
1601 static inline bool isImmUs4(int64_t val)
1603 return (val%4 == 0 && isImmUs(val/4));
1606 /// isLegalAddressingMode - Return true if the addressing mode represented
1607 /// by AM is legal for this target, for a load/store of the specified type.
1609 XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1611 if (Ty->getTypeID() == Type::VoidTyID)
1612 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1614 const DataLayout *TD = TM.getDataLayout();
1615 unsigned Size = TD->getTypeAllocSize(Ty);
1617 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1624 if (AM.Scale == 0) {
1625 return isImmUs(AM.BaseOffs);
1628 return AM.Scale == 1 && AM.BaseOffs == 0;
1632 if (AM.Scale == 0) {
1633 return isImmUs2(AM.BaseOffs);
1636 return AM.Scale == 2 && AM.BaseOffs == 0;
1639 if (AM.Scale == 0) {
1640 return isImmUs4(AM.BaseOffs);
1643 return AM.Scale == 4 && AM.BaseOffs == 0;
1647 //===----------------------------------------------------------------------===//
1648 // XCore Inline Assembly Support
1649 //===----------------------------------------------------------------------===//
1651 std::pair<unsigned, const TargetRegisterClass*>
1652 XCoreTargetLowering::
1653 getRegForInlineAsmConstraint(const std::string &Constraint,
1655 if (Constraint.size() == 1) {
1656 switch (Constraint[0]) {
1659 return std::make_pair(0U, &XCore::GRRegsRegClass);
1662 // Use the default implementation in TargetLowering to convert the register
1663 // constraint into a member of a register class.
1664 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);