1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the XCoreTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "xcore-lower"
16 #include "XCoreISelLowering.h"
17 #include "XCoreMachineFunctionInfo.h"
19 #include "XCoreTargetObjectFile.h"
20 #include "XCoreTargetMachine.h"
21 #include "XCoreSubtarget.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/CallingConv.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/GlobalAlias.h"
28 #include "llvm/CodeGen/CallingConvLower.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/SelectionDAGISel.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/VectorExtras.h"
42 const char *XCoreTargetLowering::
43 getTargetNodeName(unsigned Opcode) const
47 case XCoreISD::BL : return "XCoreISD::BL";
48 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
49 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
50 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
51 case XCoreISD::STWSP : return "XCoreISD::STWSP";
52 case XCoreISD::RETSP : return "XCoreISD::RETSP";
53 case XCoreISD::LADD : return "XCoreISD::LADD";
54 case XCoreISD::LSUB : return "XCoreISD::LSUB";
55 case XCoreISD::LMUL : return "XCoreISD::LMUL";
56 case XCoreISD::MACCU : return "XCoreISD::MACCU";
57 case XCoreISD::MACCS : return "XCoreISD::MACCS";
58 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
59 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
60 default : return NULL;
64 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
65 : TargetLowering(XTM, new XCoreTargetObjectFile()),
67 Subtarget(*XTM.getSubtargetImpl()) {
69 // Set up the register classes.
70 addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass);
72 // Compute derived properties from the register classes
73 computeRegisterProperties();
75 // Division is expensive
76 setIntDivIsCheap(false);
78 setStackPointerRegisterToSaveRestore(XCore::SP);
80 setSchedulingPreference(Sched::RegPressure);
82 // Use i32 for setcc operations results (slt, sgt, ...).
83 setBooleanContents(ZeroOrOneBooleanContent);
85 // XCore does not have the NodeTypes below.
86 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
87 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
88 setOperationAction(ISD::ADDC, MVT::i32, Expand);
89 setOperationAction(ISD::ADDE, MVT::i32, Expand);
90 setOperationAction(ISD::SUBC, MVT::i32, Expand);
91 setOperationAction(ISD::SUBE, MVT::i32, Expand);
93 // Stop the combiner recombining select and set_cc
94 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
97 setOperationAction(ISD::ADD, MVT::i64, Custom);
98 setOperationAction(ISD::SUB, MVT::i64, Custom);
99 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
100 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
101 setOperationAction(ISD::MULHS, MVT::i32, Expand);
102 setOperationAction(ISD::MULHU, MVT::i32, Expand);
103 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
104 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
105 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
108 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
109 setOperationAction(ISD::ROTL , MVT::i32, Expand);
110 setOperationAction(ISD::ROTR , MVT::i32, Expand);
112 setOperationAction(ISD::TRAP, MVT::Other, Legal);
115 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
117 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
118 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom);
120 // Thread Local Storage
121 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
123 // Conversion of i64 -> double produces constantpool nodes
124 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
127 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
128 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
129 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
131 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
132 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
134 // Custom expand misaligned loads / stores.
135 setOperationAction(ISD::LOAD, MVT::i32, Custom);
136 setOperationAction(ISD::STORE, MVT::i32, Custom);
139 setOperationAction(ISD::VAEND, MVT::Other, Expand);
140 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
141 setOperationAction(ISD::VAARG, MVT::Other, Custom);
142 setOperationAction(ISD::VASTART, MVT::Other, Custom);
145 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
146 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
147 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
149 // TRAMPOLINE is custom lowered.
150 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
151 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
153 maxStoresPerMemset = maxStoresPerMemsetOptSize = 4;
154 maxStoresPerMemmove = maxStoresPerMemmoveOptSize
155 = maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 2;
157 // We have target-specific dag combine patterns for the following nodes:
158 setTargetDAGCombine(ISD::STORE);
159 setTargetDAGCombine(ISD::ADD);
161 setMinFunctionAlignment(1);
164 SDValue XCoreTargetLowering::
165 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
166 switch (Op.getOpcode())
168 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
169 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
170 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
171 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
172 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
173 case ISD::LOAD: return LowerLOAD(Op, DAG);
174 case ISD::STORE: return LowerSTORE(Op, DAG);
175 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
176 case ISD::VAARG: return LowerVAARG(Op, DAG);
177 case ISD::VASTART: return LowerVASTART(Op, DAG);
178 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
179 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
180 // FIXME: Remove these when LegalizeDAGTypes lands.
182 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
183 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
184 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
185 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
187 llvm_unreachable("unimplemented operand");
192 /// ReplaceNodeResults - Replace the results of node with an illegal result
193 /// type with new values built out of custom code.
194 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
195 SmallVectorImpl<SDValue>&Results,
196 SelectionDAG &DAG) const {
197 switch (N->getOpcode()) {
199 llvm_unreachable("Don't know how to custom expand this!");
203 Results.push_back(ExpandADDSUB(N, DAG));
208 //===----------------------------------------------------------------------===//
209 // Misc Lower Operation implementation
210 //===----------------------------------------------------------------------===//
212 SDValue XCoreTargetLowering::
213 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
215 DebugLoc dl = Op.getDebugLoc();
216 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
217 Op.getOperand(3), Op.getOperand(4));
218 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
222 SDValue XCoreTargetLowering::
223 getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
224 SelectionDAG &DAG) const
226 // FIXME there is no actual debug info here
227 DebugLoc dl = GA.getDebugLoc();
228 if (isa<Function>(GV)) {
229 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
231 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
233 // If GV is an alias then use the aliasee to determine constness
234 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
235 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal());
237 bool isConst = GVar && GVar->isConstant();
239 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
241 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
244 SDValue XCoreTargetLowering::
245 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
247 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
248 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), MVT::i32);
249 return getGlobalAddressWrapper(GA, GV, DAG);
252 static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) {
253 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
254 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
257 static inline bool isZeroLengthArray(Type *Ty) {
258 ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty);
259 return AT && (AT->getNumElements() == 0);
262 SDValue XCoreTargetLowering::
263 LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
265 // FIXME there isn't really debug info here
266 DebugLoc dl = Op.getDebugLoc();
267 // transform to label + getid() * size
268 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
269 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32);
270 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
272 // If GV is an alias then use the aliasee to determine size
273 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
274 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal());
277 llvm_unreachable("Thread local object not a GlobalVariable?");
280 Type *Ty = cast<PointerType>(GV->getType())->getElementType();
281 if (!Ty->isSized() || isZeroLengthArray(Ty)) {
283 errs() << "Size of thread local object " << GVar->getName()
288 SDValue base = getGlobalAddressWrapper(GA, GV, DAG);
289 const TargetData *TD = TM.getTargetData();
290 unsigned Size = TD->getTypeAllocSize(Ty);
291 SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl),
292 DAG.getConstant(Size, MVT::i32));
293 return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset);
296 SDValue XCoreTargetLowering::
297 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
299 DebugLoc DL = Op.getDebugLoc();
301 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
302 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true);
304 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result);
307 SDValue XCoreTargetLowering::
308 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
310 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
311 // FIXME there isn't really debug info here
312 DebugLoc dl = CP->getDebugLoc();
313 EVT PtrVT = Op.getValueType();
315 if (CP->isMachineConstantPoolEntry()) {
316 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
319 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
322 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
325 unsigned XCoreTargetLowering::getJumpTableEncoding() const {
326 return MachineJumpTableInfo::EK_Inline;
329 SDValue XCoreTargetLowering::
330 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
332 SDValue Chain = Op.getOperand(0);
333 SDValue Table = Op.getOperand(1);
334 SDValue Index = Op.getOperand(2);
335 DebugLoc dl = Op.getDebugLoc();
336 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
337 unsigned JTI = JT->getIndex();
338 MachineFunction &MF = DAG.getMachineFunction();
339 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
340 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
342 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
343 if (NumEntries <= 32) {
344 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
346 assert((NumEntries >> 31) == 0);
347 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
348 DAG.getConstant(1, MVT::i32));
349 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
354 IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase,
357 if (Addr.getOpcode() != ISD::ADD) {
360 ConstantSDNode *CN = 0;
361 if (!(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
364 int64_t off = CN->getSExtValue();
365 const SDValue &Base = Addr.getOperand(0);
366 const SDValue *Root = &Base;
367 if (Base.getOpcode() == ISD::ADD &&
368 Base.getOperand(1).getOpcode() == ISD::SHL) {
369 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Base.getOperand(1)
371 if (CN && (CN->getSExtValue() >= 2)) {
372 Root = &Base.getOperand(0);
375 if (isa<FrameIndexSDNode>(*Root)) {
376 // All frame indicies are word aligned
381 if (Root->getOpcode() == XCoreISD::DPRelativeWrapper ||
382 Root->getOpcode() == XCoreISD::CPRelativeWrapper) {
383 // All dp / cp relative addresses are word aligned
391 SDValue XCoreTargetLowering::
392 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
393 LoadSDNode *LD = cast<LoadSDNode>(Op);
394 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
395 "Unexpected extension type");
396 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
397 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
400 unsigned ABIAlignment = getTargetData()->
401 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
402 // Leave aligned load alone.
403 if (LD->getAlignment() >= ABIAlignment)
406 SDValue Chain = LD->getChain();
407 SDValue BasePtr = LD->getBasePtr();
408 DebugLoc DL = Op.getDebugLoc();
412 if (!LD->isVolatile() &&
413 IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) {
414 if (Offset % 4 == 0) {
415 // We've managed to infer better alignment information than the load
416 // already has. Use an aligned load.
418 return DAG.getLoad(getPointerTy(), DL, Chain, BasePtr,
419 MachinePointerInfo(),
423 // ldw low, base[offset >> 2]
424 // ldw high, base[(offset >> 2) + 1]
425 // shr low_shifted, low, (offset & 0x3) * 8
426 // shl high_shifted, high, 32 - (offset & 0x3) * 8
427 // or result, low_shifted, high_shifted
428 SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32);
429 SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32);
430 SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32);
431 SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32);
433 SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, LowOffset);
434 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset);
436 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
437 LowAddr, MachinePointerInfo(), false, false, 0);
438 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
439 HighAddr, MachinePointerInfo(), false, false, 0);
440 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
441 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
442 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
443 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
445 SDValue Ops[] = { Result, Chain };
446 return DAG.getMergeValues(Ops, 2, DL);
449 if (LD->getAlignment() == 2) {
450 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
451 BasePtr, LD->getPointerInfo(), MVT::i16,
452 LD->isVolatile(), LD->isNonTemporal(), 2);
453 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
454 DAG.getConstant(2, MVT::i32));
455 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
457 LD->getPointerInfo().getWithOffset(2),
458 MVT::i16, LD->isVolatile(),
459 LD->isNonTemporal(), 2);
460 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
461 DAG.getConstant(16, MVT::i32));
462 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
463 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
465 SDValue Ops[] = { Result, Chain };
466 return DAG.getMergeValues(Ops, 2, DL);
469 // Lower to a call to __misaligned_load(BasePtr).
470 Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
471 TargetLowering::ArgListTy Args;
472 TargetLowering::ArgListEntry Entry;
475 Entry.Node = BasePtr;
476 Args.push_back(Entry);
478 std::pair<SDValue, SDValue> CallResult =
479 LowerCallTo(Chain, IntPtrTy, false, false,
480 false, false, 0, CallingConv::C, false,
481 /*isReturnValueUsed=*/true,
482 DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
486 { CallResult.first, CallResult.second };
488 return DAG.getMergeValues(Ops, 2, DL);
491 SDValue XCoreTargetLowering::
492 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
494 StoreSDNode *ST = cast<StoreSDNode>(Op);
495 assert(!ST->isTruncatingStore() && "Unexpected store type");
496 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
497 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
500 unsigned ABIAlignment = getTargetData()->
501 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
502 // Leave aligned store alone.
503 if (ST->getAlignment() >= ABIAlignment) {
506 SDValue Chain = ST->getChain();
507 SDValue BasePtr = ST->getBasePtr();
508 SDValue Value = ST->getValue();
509 DebugLoc dl = Op.getDebugLoc();
511 if (ST->getAlignment() == 2) {
513 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
514 DAG.getConstant(16, MVT::i32));
515 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
516 ST->getPointerInfo(), MVT::i16,
517 ST->isVolatile(), ST->isNonTemporal(),
519 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
520 DAG.getConstant(2, MVT::i32));
521 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
522 ST->getPointerInfo().getWithOffset(2),
523 MVT::i16, ST->isVolatile(),
524 ST->isNonTemporal(), 2);
525 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
528 // Lower to a call to __misaligned_store(BasePtr, Value).
529 Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
530 TargetLowering::ArgListTy Args;
531 TargetLowering::ArgListEntry Entry;
534 Entry.Node = BasePtr;
535 Args.push_back(Entry);
538 Args.push_back(Entry);
540 std::pair<SDValue, SDValue> CallResult =
541 LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false,
542 false, false, 0, CallingConv::C, false,
543 /*isReturnValueUsed=*/true,
544 DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
547 return CallResult.second;
550 SDValue XCoreTargetLowering::
551 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
553 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
554 "Unexpected operand to lower!");
555 DebugLoc dl = Op.getDebugLoc();
556 SDValue LHS = Op.getOperand(0);
557 SDValue RHS = Op.getOperand(1);
558 SDValue Zero = DAG.getConstant(0, MVT::i32);
559 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
560 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
562 SDValue Lo(Hi.getNode(), 1);
563 SDValue Ops[] = { Lo, Hi };
564 return DAG.getMergeValues(Ops, 2, dl);
567 SDValue XCoreTargetLowering::
568 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
570 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
571 "Unexpected operand to lower!");
572 DebugLoc dl = Op.getDebugLoc();
573 SDValue LHS = Op.getOperand(0);
574 SDValue RHS = Op.getOperand(1);
575 SDValue Zero = DAG.getConstant(0, MVT::i32);
576 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
577 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
579 SDValue Lo(Hi.getNode(), 1);
580 SDValue Ops[] = { Lo, Hi };
581 return DAG.getMergeValues(Ops, 2, dl);
584 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
585 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
586 /// each intermediate result in the calculation must also have a single use.
587 /// If the Op is in the correct form the constituent parts are written to Mul0,
588 /// Mul1, Addend0 and Addend1.
590 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
591 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
593 if (Op.getOpcode() != ISD::ADD)
595 SDValue N0 = Op.getOperand(0);
596 SDValue N1 = Op.getOperand(1);
599 if (N0.getOpcode() == ISD::ADD) {
602 } else if (N1.getOpcode() == ISD::ADD) {
608 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
610 if (OtherOp.getOpcode() == ISD::MUL) {
611 // add(add(a,b),mul(x,y))
612 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
614 Mul0 = OtherOp.getOperand(0);
615 Mul1 = OtherOp.getOperand(1);
616 Addend0 = AddOp.getOperand(0);
617 Addend1 = AddOp.getOperand(1);
620 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
621 // add(add(mul(x,y),a),b)
622 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
624 Mul0 = AddOp.getOperand(0).getOperand(0);
625 Mul1 = AddOp.getOperand(0).getOperand(1);
626 Addend0 = AddOp.getOperand(1);
630 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
631 // add(add(a,mul(x,y)),b)
632 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
634 Mul0 = AddOp.getOperand(1).getOperand(0);
635 Mul1 = AddOp.getOperand(1).getOperand(1);
636 Addend0 = AddOp.getOperand(0);
643 SDValue XCoreTargetLowering::
644 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
648 if (N->getOperand(0).getOpcode() == ISD::MUL) {
649 Mul = N->getOperand(0);
650 Other = N->getOperand(1);
651 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
652 Mul = N->getOperand(1);
653 Other = N->getOperand(0);
657 DebugLoc dl = N->getDebugLoc();
658 SDValue LL, RL, AddendL, AddendH;
659 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
660 Mul.getOperand(0), DAG.getConstant(0, MVT::i32));
661 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
662 Mul.getOperand(1), DAG.getConstant(0, MVT::i32));
663 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
664 Other, DAG.getConstant(0, MVT::i32));
665 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
666 Other, DAG.getConstant(1, MVT::i32));
667 APInt HighMask = APInt::getHighBitsSet(64, 32);
668 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
669 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
670 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
671 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
672 // The inputs are both zero-extended.
673 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
674 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
676 SDValue Lo(Hi.getNode(), 1);
677 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
679 if (LHSSB > 32 && RHSSB > 32) {
680 // The inputs are both sign-extended.
681 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
682 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
684 SDValue Lo(Hi.getNode(), 1);
685 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
688 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
689 Mul.getOperand(0), DAG.getConstant(1, MVT::i32));
690 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
691 Mul.getOperand(1), DAG.getConstant(1, MVT::i32));
692 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
693 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
695 SDValue Lo(Hi.getNode(), 1);
696 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
697 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
698 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
699 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
700 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
703 SDValue XCoreTargetLowering::
704 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
706 assert(N->getValueType(0) == MVT::i64 &&
707 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
708 "Unknown operand to lower!");
710 if (N->getOpcode() == ISD::ADD) {
711 SDValue Result = TryExpandADDWithMul(N, DAG);
712 if (Result.getNode() != 0)
716 DebugLoc dl = N->getDebugLoc();
718 // Extract components
719 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
720 N->getOperand(0), DAG.getConstant(0, MVT::i32));
721 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
722 N->getOperand(0), DAG.getConstant(1, MVT::i32));
723 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
724 N->getOperand(1), DAG.getConstant(0, MVT::i32));
725 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
726 N->getOperand(1), DAG.getConstant(1, MVT::i32));
729 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
731 SDValue Zero = DAG.getConstant(0, MVT::i32);
732 SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
734 SDValue Lo(Carry.getNode(), 1);
736 SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
738 SDValue Hi(Ignored.getNode(), 1);
740 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
743 SDValue XCoreTargetLowering::
744 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
746 llvm_unreachable("unimplemented");
747 // FIX Arguments passed by reference need a extra dereference.
748 SDNode *Node = Op.getNode();
749 DebugLoc dl = Node->getDebugLoc();
750 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
751 EVT VT = Node->getValueType(0);
752 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
753 Node->getOperand(1), MachinePointerInfo(V),
755 // Increment the pointer, VAList, to the next vararg
756 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
757 DAG.getConstant(VT.getSizeInBits(),
759 // Store the incremented VAList to the legalized pointer
760 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1),
761 MachinePointerInfo(V), false, false, 0);
762 // Load the actual argument out of the pointer VAList
763 return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
767 SDValue XCoreTargetLowering::
768 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
770 DebugLoc dl = Op.getDebugLoc();
771 // vastart stores the address of the VarArgsFrameIndex slot into the
772 // memory location argument
773 MachineFunction &MF = DAG.getMachineFunction();
774 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
775 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
776 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
777 MachinePointerInfo(), false, false, 0);
780 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
781 SelectionDAG &DAG) const {
782 DebugLoc dl = Op.getDebugLoc();
783 // Depths > 0 not supported yet!
784 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
787 MachineFunction &MF = DAG.getMachineFunction();
788 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
789 return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
790 RegInfo->getFrameRegister(MF), MVT::i32);
793 SDValue XCoreTargetLowering::
794 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
795 return Op.getOperand(0);
798 SDValue XCoreTargetLowering::
799 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
800 SDValue Chain = Op.getOperand(0);
801 SDValue Trmp = Op.getOperand(1); // trampoline
802 SDValue FPtr = Op.getOperand(2); // nested function
803 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
805 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
808 // LDAPF_u10 r11, nest
809 // LDW_2rus r11, r11[0]
810 // STWSP_ru6 r11, sp[0]
811 // LDAPF_u10 r11, fptr
812 // LDW_2rus r11, r11[0]
818 SDValue OutChains[5];
822 DebugLoc dl = Op.getDebugLoc();
823 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32),
824 Addr, MachinePointerInfo(TrmpAddr), false, false,
827 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
828 DAG.getConstant(4, MVT::i32));
829 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32),
830 Addr, MachinePointerInfo(TrmpAddr, 4), false,
833 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
834 DAG.getConstant(8, MVT::i32));
835 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32),
836 Addr, MachinePointerInfo(TrmpAddr, 8), false,
839 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
840 DAG.getConstant(12, MVT::i32));
841 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr,
842 MachinePointerInfo(TrmpAddr, 12), false, false,
845 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
846 DAG.getConstant(16, MVT::i32));
847 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr,
848 MachinePointerInfo(TrmpAddr, 16), false, false,
851 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5);
854 //===----------------------------------------------------------------------===//
855 // Calling Convention Implementation
856 //===----------------------------------------------------------------------===//
858 #include "XCoreGenCallingConv.inc"
860 //===----------------------------------------------------------------------===//
861 // Call Calling Convention Implementation
862 //===----------------------------------------------------------------------===//
864 /// XCore call implementation
866 XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
867 CallingConv::ID CallConv, bool isVarArg,
869 const SmallVectorImpl<ISD::OutputArg> &Outs,
870 const SmallVectorImpl<SDValue> &OutVals,
871 const SmallVectorImpl<ISD::InputArg> &Ins,
872 DebugLoc dl, SelectionDAG &DAG,
873 SmallVectorImpl<SDValue> &InVals) const {
874 // XCore target does not yet support tail call optimization.
877 // For now, only CallingConv::C implemented
881 llvm_unreachable("Unsupported calling convention");
882 case CallingConv::Fast:
884 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
885 Outs, OutVals, Ins, dl, DAG, InVals);
889 /// LowerCCCCallTo - functions arguments are copied from virtual
890 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
891 /// CALLSEQ_END are emitted.
892 /// TODO: isTailCall, sret.
894 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
895 CallingConv::ID CallConv, bool isVarArg,
897 const SmallVectorImpl<ISD::OutputArg> &Outs,
898 const SmallVectorImpl<SDValue> &OutVals,
899 const SmallVectorImpl<ISD::InputArg> &Ins,
900 DebugLoc dl, SelectionDAG &DAG,
901 SmallVectorImpl<SDValue> &InVals) const {
903 // Analyze operands of the call, assigning locations to each operand.
904 SmallVector<CCValAssign, 16> ArgLocs;
905 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
906 getTargetMachine(), ArgLocs, *DAG.getContext());
908 // The ABI dictates there should be one stack slot available to the callee
909 // on function entry (for saving lr).
910 CCInfo.AllocateStack(4, 4);
912 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
914 // Get a count of how many bytes are to be pushed on the stack.
915 unsigned NumBytes = CCInfo.getNextStackOffset();
917 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
918 getPointerTy(), true));
920 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
921 SmallVector<SDValue, 12> MemOpChains;
923 // Walk the register/memloc assignments, inserting copies/loads.
924 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
925 CCValAssign &VA = ArgLocs[i];
926 SDValue Arg = OutVals[i];
928 // Promote the value if needed.
929 switch (VA.getLocInfo()) {
930 default: llvm_unreachable("Unknown loc info!");
931 case CCValAssign::Full: break;
932 case CCValAssign::SExt:
933 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
935 case CCValAssign::ZExt:
936 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
938 case CCValAssign::AExt:
939 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
943 // Arguments that can be passed on register must be kept at
946 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
948 assert(VA.isMemLoc());
950 int Offset = VA.getLocMemOffset();
952 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
954 DAG.getConstant(Offset/4, MVT::i32)));
958 // Transform all store nodes into one single node because
959 // all store nodes are independent of each other.
960 if (!MemOpChains.empty())
961 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
962 &MemOpChains[0], MemOpChains.size());
964 // Build a sequence of copy-to-reg nodes chained together with token
965 // chain and flag operands which copy the outgoing args into registers.
966 // The InFlag in necessary since all emitted instructions must be
969 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
970 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
971 RegsToPass[i].second, InFlag);
972 InFlag = Chain.getValue(1);
975 // If the callee is a GlobalAddress node (quite common, every direct call is)
976 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
977 // Likewise ExternalSymbol -> TargetExternalSymbol.
978 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
979 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
980 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
981 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
983 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
984 // = Chain, Callee, Reg#1, Reg#2, ...
986 // Returns a chain & a flag for retval copy to use.
987 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
988 SmallVector<SDValue, 8> Ops;
989 Ops.push_back(Chain);
990 Ops.push_back(Callee);
992 // Add argument registers to the end of the list so that they are
993 // known live into the call.
994 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
995 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
996 RegsToPass[i].second.getValueType()));
998 if (InFlag.getNode())
999 Ops.push_back(InFlag);
1001 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size());
1002 InFlag = Chain.getValue(1);
1004 // Create the CALLSEQ_END node.
1005 Chain = DAG.getCALLSEQ_END(Chain,
1006 DAG.getConstant(NumBytes, getPointerTy(), true),
1007 DAG.getConstant(0, getPointerTy(), true),
1009 InFlag = Chain.getValue(1);
1011 // Handle result values, copying them out of physregs into vregs that we
1013 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
1014 Ins, dl, DAG, InVals);
1017 /// LowerCallResult - Lower the result values of a call into the
1018 /// appropriate copies out of appropriate physical registers.
1020 XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1021 CallingConv::ID CallConv, bool isVarArg,
1022 const SmallVectorImpl<ISD::InputArg> &Ins,
1023 DebugLoc dl, SelectionDAG &DAG,
1024 SmallVectorImpl<SDValue> &InVals) const {
1026 // Assign locations to each value returned by this call.
1027 SmallVector<CCValAssign, 16> RVLocs;
1028 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1029 getTargetMachine(), RVLocs, *DAG.getContext());
1031 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1033 // Copy all of the result registers out of their specified physreg.
1034 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1035 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
1036 RVLocs[i].getValVT(), InFlag).getValue(1);
1037 InFlag = Chain.getValue(2);
1038 InVals.push_back(Chain.getValue(0));
1044 //===----------------------------------------------------------------------===//
1045 // Formal Arguments Calling Convention Implementation
1046 //===----------------------------------------------------------------------===//
1048 /// XCore formal arguments implementation
1050 XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
1051 CallingConv::ID CallConv,
1053 const SmallVectorImpl<ISD::InputArg> &Ins,
1056 SmallVectorImpl<SDValue> &InVals)
1061 llvm_unreachable("Unsupported calling convention");
1062 case CallingConv::C:
1063 case CallingConv::Fast:
1064 return LowerCCCArguments(Chain, CallConv, isVarArg,
1065 Ins, dl, DAG, InVals);
1069 /// LowerCCCArguments - transform physical registers into
1070 /// virtual registers and generate load operations for
1071 /// arguments places on the stack.
1074 XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
1075 CallingConv::ID CallConv,
1077 const SmallVectorImpl<ISD::InputArg>
1081 SmallVectorImpl<SDValue> &InVals) const {
1082 MachineFunction &MF = DAG.getMachineFunction();
1083 MachineFrameInfo *MFI = MF.getFrameInfo();
1084 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1086 // Assign locations to all of the incoming arguments.
1087 SmallVector<CCValAssign, 16> ArgLocs;
1088 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1089 getTargetMachine(), ArgLocs, *DAG.getContext());
1091 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1093 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1095 unsigned LRSaveSize = StackSlotSize;
1097 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1099 CCValAssign &VA = ArgLocs[i];
1101 if (VA.isRegLoc()) {
1102 // Arguments passed in registers
1103 EVT RegVT = VA.getLocVT();
1104 switch (RegVT.getSimpleVT().SimpleTy) {
1108 errs() << "LowerFormalArguments Unhandled argument type: "
1109 << RegVT.getSimpleVT().SimpleTy << "\n";
1111 llvm_unreachable(0);
1114 unsigned VReg = RegInfo.createVirtualRegister(
1115 XCore::GRRegsRegisterClass);
1116 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1117 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1121 assert(VA.isMemLoc());
1122 // Load the argument to a virtual register
1123 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1124 if (ObjSize > StackSlotSize) {
1125 errs() << "LowerFormalArguments Unhandled argument type: "
1126 << EVT(VA.getLocVT()).getEVTString()
1129 // Create the frame index object for this incoming parameter...
1130 int FI = MFI->CreateFixedObject(ObjSize,
1131 LRSaveSize + VA.getLocMemOffset(),
1134 // Create the SelectionDAG nodes corresponding to a load
1135 //from this parameter
1136 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1137 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1138 MachinePointerInfo::getFixedStack(FI),
1144 /* Argument registers */
1145 static const unsigned ArgRegs[] = {
1146 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1148 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1149 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs,
1150 array_lengthof(ArgRegs));
1151 if (FirstVAReg < array_lengthof(ArgRegs)) {
1152 SmallVector<SDValue, 4> MemOps;
1154 // Save remaining registers, storing higher register numbers at a higher
1156 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1157 // Create a stack slot
1158 int FI = MFI->CreateFixedObject(4, offset, true);
1159 if (i == (int)FirstVAReg) {
1160 XFI->setVarArgsFrameIndex(FI);
1162 offset -= StackSlotSize;
1163 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1164 // Move argument from phys reg -> virt reg
1165 unsigned VReg = RegInfo.createVirtualRegister(
1166 XCore::GRRegsRegisterClass);
1167 RegInfo.addLiveIn(ArgRegs[i], VReg);
1168 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1169 // Move argument from virt reg -> stack
1170 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
1171 MachinePointerInfo(), false, false, 0);
1172 MemOps.push_back(Store);
1174 if (!MemOps.empty())
1175 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1176 &MemOps[0], MemOps.size());
1178 // This will point to the next argument passed via stack.
1179 XFI->setVarArgsFrameIndex(
1180 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1188 //===----------------------------------------------------------------------===//
1189 // Return Value Calling Convention Implementation
1190 //===----------------------------------------------------------------------===//
1192 bool XCoreTargetLowering::
1193 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1195 const SmallVectorImpl<ISD::OutputArg> &Outs,
1196 LLVMContext &Context) const {
1197 SmallVector<CCValAssign, 16> RVLocs;
1198 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
1199 return CCInfo.CheckReturn(Outs, RetCC_XCore);
1203 XCoreTargetLowering::LowerReturn(SDValue Chain,
1204 CallingConv::ID CallConv, bool isVarArg,
1205 const SmallVectorImpl<ISD::OutputArg> &Outs,
1206 const SmallVectorImpl<SDValue> &OutVals,
1207 DebugLoc dl, SelectionDAG &DAG) const {
1209 // CCValAssign - represent the assignment of
1210 // the return value to a location
1211 SmallVector<CCValAssign, 16> RVLocs;
1213 // CCState - Info about the registers and stack slot.
1214 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1215 getTargetMachine(), RVLocs, *DAG.getContext());
1217 // Analyze return values.
1218 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1220 // If this is the first return lowered for this function, add
1221 // the regs to the liveout set for the function.
1222 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1223 for (unsigned i = 0; i != RVLocs.size(); ++i)
1224 if (RVLocs[i].isRegLoc())
1225 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1230 // Copy the result values into the output registers.
1231 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1232 CCValAssign &VA = RVLocs[i];
1233 assert(VA.isRegLoc() && "Can only return in registers!");
1235 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1238 // guarantee that all emitted copies are
1239 // stuck together, avoiding something bad
1240 Flag = Chain.getValue(1);
1243 // Return on XCore is always a "retsp 0"
1245 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1246 Chain, DAG.getConstant(0, MVT::i32), Flag);
1248 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1249 Chain, DAG.getConstant(0, MVT::i32));
1252 //===----------------------------------------------------------------------===//
1253 // Other Lowering Code
1254 //===----------------------------------------------------------------------===//
1257 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1258 MachineBasicBlock *BB) const {
1259 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
1260 DebugLoc dl = MI->getDebugLoc();
1261 assert((MI->getOpcode() == XCore::SELECT_CC) &&
1262 "Unexpected instr type to insert");
1264 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1265 // control-flow pattern. The incoming instruction knows the destination vreg
1266 // to set, the condition code register to branch on, the true/false values to
1267 // select between, and a branch opcode to use.
1268 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1269 MachineFunction::iterator It = BB;
1275 // cmpTY ccX, r1, r2
1277 // fallthrough --> copy0MBB
1278 MachineBasicBlock *thisMBB = BB;
1279 MachineFunction *F = BB->getParent();
1280 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1281 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1282 F->insert(It, copy0MBB);
1283 F->insert(It, sinkMBB);
1285 // Transfer the remainder of BB and its successor edges to sinkMBB.
1286 sinkMBB->splice(sinkMBB->begin(), BB,
1287 llvm::next(MachineBasicBlock::iterator(MI)),
1289 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1291 // Next, add the true and fallthrough blocks as its successors.
1292 BB->addSuccessor(copy0MBB);
1293 BB->addSuccessor(sinkMBB);
1295 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1296 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
1299 // %FalseValue = ...
1300 // # fallthrough to sinkMBB
1303 // Update machine-CFG edges
1304 BB->addSuccessor(sinkMBB);
1307 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1310 BuildMI(*BB, BB->begin(), dl,
1311 TII.get(XCore::PHI), MI->getOperand(0).getReg())
1312 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
1313 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1315 MI->eraseFromParent(); // The pseudo instruction is gone now.
1319 //===----------------------------------------------------------------------===//
1320 // Target Optimization Hooks
1321 //===----------------------------------------------------------------------===//
1323 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1324 DAGCombinerInfo &DCI) const {
1325 SelectionDAG &DAG = DCI.DAG;
1326 DebugLoc dl = N->getDebugLoc();
1327 switch (N->getOpcode()) {
1329 case XCoreISD::LADD: {
1330 SDValue N0 = N->getOperand(0);
1331 SDValue N1 = N->getOperand(1);
1332 SDValue N2 = N->getOperand(2);
1333 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1334 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1335 EVT VT = N0.getValueType();
1337 // canonicalize constant to RHS
1339 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1341 // fold (ladd 0, 0, x) -> 0, x & 1
1342 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1343 SDValue Carry = DAG.getConstant(0, VT);
1344 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1345 DAG.getConstant(1, VT));
1346 SDValue Ops [] = { Carry, Result };
1347 return DAG.getMergeValues(Ops, 2, dl);
1350 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1352 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
1353 APInt KnownZero, KnownOne;
1354 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1355 VT.getSizeInBits() - 1);
1356 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
1357 if (KnownZero == Mask) {
1358 SDValue Carry = DAG.getConstant(0, VT);
1359 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1360 SDValue Ops [] = { Carry, Result };
1361 return DAG.getMergeValues(Ops, 2, dl);
1366 case XCoreISD::LSUB: {
1367 SDValue N0 = N->getOperand(0);
1368 SDValue N1 = N->getOperand(1);
1369 SDValue N2 = N->getOperand(2);
1370 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1371 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1372 EVT VT = N0.getValueType();
1374 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1375 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1376 APInt KnownZero, KnownOne;
1377 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1378 VT.getSizeInBits() - 1);
1379 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
1380 if (KnownZero == Mask) {
1381 SDValue Borrow = N2;
1382 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1383 DAG.getConstant(0, VT), N2);
1384 SDValue Ops [] = { Borrow, Result };
1385 return DAG.getMergeValues(Ops, 2, dl);
1389 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1391 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
1392 APInt KnownZero, KnownOne;
1393 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1394 VT.getSizeInBits() - 1);
1395 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
1396 if (KnownZero == Mask) {
1397 SDValue Borrow = DAG.getConstant(0, VT);
1398 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1399 SDValue Ops [] = { Borrow, Result };
1400 return DAG.getMergeValues(Ops, 2, dl);
1405 case XCoreISD::LMUL: {
1406 SDValue N0 = N->getOperand(0);
1407 SDValue N1 = N->getOperand(1);
1408 SDValue N2 = N->getOperand(2);
1409 SDValue N3 = N->getOperand(3);
1410 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1411 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1412 EVT VT = N0.getValueType();
1413 // Canonicalize multiplicative constant to RHS. If both multiplicative
1414 // operands are constant canonicalize smallest to RHS.
1415 if ((N0C && !N1C) ||
1416 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1417 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT), N1, N0, N2, N3);
1420 if (N1C && N1C->isNullValue()) {
1421 // If the high result is unused fold to add(a, b)
1422 if (N->hasNUsesOfValue(0, 0)) {
1423 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1424 SDValue Ops [] = { Lo, Lo };
1425 return DAG.getMergeValues(Ops, 2, dl);
1427 // Otherwise fold to ladd(a, b, 0)
1428 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1433 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1434 // lmul(x, y, a, b). The high result of lmul will be ignored.
1435 // This is only profitable if the intermediate results are unused
1437 SDValue Mul0, Mul1, Addend0, Addend1;
1438 if (N->getValueType(0) == MVT::i32 &&
1439 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1440 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1441 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1442 Mul1, Addend0, Addend1);
1443 SDValue Result(Ignored.getNode(), 1);
1446 APInt HighMask = APInt::getHighBitsSet(64, 32);
1447 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1448 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1449 // before type legalization as it is messy to match the operands after
1451 if (N->getValueType(0) == MVT::i64 &&
1452 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1453 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1454 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1455 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1456 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1457 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1458 Mul0, DAG.getConstant(0, MVT::i32));
1459 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1460 Mul1, DAG.getConstant(0, MVT::i32));
1461 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1462 Addend0, DAG.getConstant(0, MVT::i32));
1463 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1464 Addend1, DAG.getConstant(0, MVT::i32));
1465 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1466 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1467 Addend0L, Addend1L);
1468 SDValue Lo(Hi.getNode(), 1);
1469 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1474 // Replace unaligned store of unaligned load with memmove.
1475 StoreSDNode *ST = cast<StoreSDNode>(N);
1476 if (!DCI.isBeforeLegalize() ||
1477 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) ||
1478 ST->isVolatile() || ST->isIndexed()) {
1481 SDValue Chain = ST->getChain();
1483 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1484 if (StoreBits % 8) {
1487 unsigned ABIAlignment = getTargetData()->getABITypeAlignment(
1488 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1489 unsigned Alignment = ST->getAlignment();
1490 if (Alignment >= ABIAlignment) {
1494 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1495 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1496 LD->getAlignment() == Alignment &&
1497 !LD->isVolatile() && !LD->isIndexed() &&
1498 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
1499 return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1501 DAG.getConstant(StoreBits/8, MVT::i32),
1502 Alignment, false, ST->getPointerInfo(),
1503 LD->getPointerInfo());
1512 void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
1516 const SelectionDAG &DAG,
1517 unsigned Depth) const {
1518 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
1519 switch (Op.getOpcode()) {
1521 case XCoreISD::LADD:
1522 case XCoreISD::LSUB:
1523 if (Op.getResNo() == 0) {
1524 // Top bits of carry / borrow are clear.
1525 KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(),
1526 Mask.getBitWidth() - 1);
1533 //===----------------------------------------------------------------------===//
1534 // Addressing mode description hooks
1535 //===----------------------------------------------------------------------===//
1537 static inline bool isImmUs(int64_t val)
1539 return (val >= 0 && val <= 11);
1542 static inline bool isImmUs2(int64_t val)
1544 return (val%2 == 0 && isImmUs(val/2));
1547 static inline bool isImmUs4(int64_t val)
1549 return (val%4 == 0 && isImmUs(val/4));
1552 /// isLegalAddressingMode - Return true if the addressing mode represented
1553 /// by AM is legal for this target, for a load/store of the specified type.
1555 XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1557 if (Ty->getTypeID() == Type::VoidTyID)
1558 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1560 const TargetData *TD = TM.getTargetData();
1561 unsigned Size = TD->getTypeAllocSize(Ty);
1563 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1570 if (AM.Scale == 0) {
1571 return isImmUs(AM.BaseOffs);
1574 return AM.Scale == 1 && AM.BaseOffs == 0;
1578 if (AM.Scale == 0) {
1579 return isImmUs2(AM.BaseOffs);
1582 return AM.Scale == 2 && AM.BaseOffs == 0;
1585 if (AM.Scale == 0) {
1586 return isImmUs4(AM.BaseOffs);
1589 return AM.Scale == 4 && AM.BaseOffs == 0;
1595 //===----------------------------------------------------------------------===//
1596 // XCore Inline Assembly Support
1597 //===----------------------------------------------------------------------===//
1599 std::pair<unsigned, const TargetRegisterClass*>
1600 XCoreTargetLowering::
1601 getRegForInlineAsmConstraint(const std::string &Constraint,
1603 if (Constraint.size() == 1) {
1604 switch (Constraint[0]) {
1607 return std::make_pair(0U, XCore::GRRegsRegisterClass);
1610 // Use the default implementation in TargetLowering to convert the register
1611 // constraint into a member of a register class.
1612 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);