1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the XCoreTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "xcore-lower"
16 #include "XCoreISelLowering.h"
17 #include "XCoreMachineFunctionInfo.h"
19 #include "XCoreTargetObjectFile.h"
20 #include "XCoreTargetMachine.h"
21 #include "XCoreSubtarget.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/CallingConv.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/GlobalAlias.h"
28 #include "llvm/CodeGen/CallingConvLower.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/SelectionDAGISel.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
41 const char *XCoreTargetLowering::
42 getTargetNodeName(unsigned Opcode) const
46 case XCoreISD::BL : return "XCoreISD::BL";
47 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
48 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
49 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
50 case XCoreISD::STWSP : return "XCoreISD::STWSP";
51 case XCoreISD::RETSP : return "XCoreISD::RETSP";
52 case XCoreISD::LADD : return "XCoreISD::LADD";
53 case XCoreISD::LSUB : return "XCoreISD::LSUB";
54 case XCoreISD::LMUL : return "XCoreISD::LMUL";
55 case XCoreISD::MACCU : return "XCoreISD::MACCU";
56 case XCoreISD::MACCS : return "XCoreISD::MACCS";
57 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
58 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
59 default : return NULL;
63 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
64 : TargetLowering(XTM, new XCoreTargetObjectFile()),
66 Subtarget(*XTM.getSubtargetImpl()) {
68 // Set up the register classes.
69 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
71 // Compute derived properties from the register classes
72 computeRegisterProperties();
74 // Division is expensive
75 setIntDivIsCheap(false);
77 setStackPointerRegisterToSaveRestore(XCore::SP);
79 setSchedulingPreference(Sched::RegPressure);
81 // Use i32 for setcc operations results (slt, sgt, ...).
82 setBooleanContents(ZeroOrOneBooleanContent);
83 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
85 // XCore does not have the NodeTypes below.
86 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
87 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
88 setOperationAction(ISD::ADDC, MVT::i32, Expand);
89 setOperationAction(ISD::ADDE, MVT::i32, Expand);
90 setOperationAction(ISD::SUBC, MVT::i32, Expand);
91 setOperationAction(ISD::SUBE, MVT::i32, Expand);
93 // Stop the combiner recombining select and set_cc
94 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
97 setOperationAction(ISD::ADD, MVT::i64, Custom);
98 setOperationAction(ISD::SUB, MVT::i64, Custom);
99 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
100 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
101 setOperationAction(ISD::MULHS, MVT::i32, Expand);
102 setOperationAction(ISD::MULHU, MVT::i32, Expand);
103 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
104 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
105 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
108 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
109 setOperationAction(ISD::ROTL , MVT::i32, Expand);
110 setOperationAction(ISD::ROTR , MVT::i32, Expand);
111 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
112 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
114 setOperationAction(ISD::TRAP, MVT::Other, Legal);
117 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
119 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
120 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom);
122 // Thread Local Storage
123 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
125 // Conversion of i64 -> double produces constantpool nodes
126 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
129 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
130 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
131 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
133 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
134 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
136 // Custom expand misaligned loads / stores.
137 setOperationAction(ISD::LOAD, MVT::i32, Custom);
138 setOperationAction(ISD::STORE, MVT::i32, Custom);
141 setOperationAction(ISD::VAEND, MVT::Other, Expand);
142 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
143 setOperationAction(ISD::VAARG, MVT::Other, Custom);
144 setOperationAction(ISD::VASTART, MVT::Other, Custom);
147 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
148 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
149 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
151 // TRAMPOLINE is custom lowered.
152 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
153 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
155 maxStoresPerMemset = maxStoresPerMemsetOptSize = 4;
156 maxStoresPerMemmove = maxStoresPerMemmoveOptSize
157 = maxStoresPerMemcpy = maxStoresPerMemcpyOptSize = 2;
159 // We have target-specific dag combine patterns for the following nodes:
160 setTargetDAGCombine(ISD::STORE);
161 setTargetDAGCombine(ISD::ADD);
163 setMinFunctionAlignment(1);
166 SDValue XCoreTargetLowering::
167 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
168 switch (Op.getOpcode())
170 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
171 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
172 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
173 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
174 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
175 case ISD::LOAD: return LowerLOAD(Op, DAG);
176 case ISD::STORE: return LowerSTORE(Op, DAG);
177 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
178 case ISD::VAARG: return LowerVAARG(Op, DAG);
179 case ISD::VASTART: return LowerVASTART(Op, DAG);
180 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
181 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
182 // FIXME: Remove these when LegalizeDAGTypes lands.
184 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
185 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
186 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
187 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
189 llvm_unreachable("unimplemented operand");
193 /// ReplaceNodeResults - Replace the results of node with an illegal result
194 /// type with new values built out of custom code.
195 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
196 SmallVectorImpl<SDValue>&Results,
197 SelectionDAG &DAG) const {
198 switch (N->getOpcode()) {
200 llvm_unreachable("Don't know how to custom expand this!");
203 Results.push_back(ExpandADDSUB(N, DAG));
208 //===----------------------------------------------------------------------===//
209 // Misc Lower Operation implementation
210 //===----------------------------------------------------------------------===//
212 SDValue XCoreTargetLowering::
213 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
215 DebugLoc dl = Op.getDebugLoc();
216 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
217 Op.getOperand(3), Op.getOperand(4));
218 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
222 SDValue XCoreTargetLowering::
223 getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
224 SelectionDAG &DAG) const
226 // FIXME there is no actual debug info here
227 DebugLoc dl = GA.getDebugLoc();
228 if (isa<Function>(GV)) {
229 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
231 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
233 // If GV is an alias then use the aliasee to determine constness
234 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
235 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal());
237 bool isConst = GVar && GVar->isConstant();
239 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
241 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
244 SDValue XCoreTargetLowering::
245 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
247 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
248 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), MVT::i32);
249 return getGlobalAddressWrapper(GA, GV, DAG);
252 static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) {
253 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
254 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
257 static inline bool isZeroLengthArray(Type *Ty) {
258 ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty);
259 return AT && (AT->getNumElements() == 0);
262 SDValue XCoreTargetLowering::
263 LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const
265 // FIXME there isn't really debug info here
266 DebugLoc dl = Op.getDebugLoc();
267 // transform to label + getid() * size
268 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
269 SDValue GA = DAG.getTargetGlobalAddress(GV, dl, MVT::i32);
270 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
272 // If GV is an alias then use the aliasee to determine size
273 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
274 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal());
277 llvm_unreachable("Thread local object not a GlobalVariable?");
279 Type *Ty = cast<PointerType>(GV->getType())->getElementType();
280 if (!Ty->isSized() || isZeroLengthArray(Ty)) {
282 errs() << "Size of thread local object " << GVar->getName()
287 SDValue base = getGlobalAddressWrapper(GA, GV, DAG);
288 const TargetData *TD = TM.getTargetData();
289 unsigned Size = TD->getTypeAllocSize(Ty);
290 SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl),
291 DAG.getConstant(Size, MVT::i32));
292 return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset);
295 SDValue XCoreTargetLowering::
296 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
298 DebugLoc DL = Op.getDebugLoc();
300 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
301 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true);
303 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result);
306 SDValue XCoreTargetLowering::
307 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
309 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
310 // FIXME there isn't really debug info here
311 DebugLoc dl = CP->getDebugLoc();
312 EVT PtrVT = Op.getValueType();
314 if (CP->isMachineConstantPoolEntry()) {
315 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
318 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
321 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
324 unsigned XCoreTargetLowering::getJumpTableEncoding() const {
325 return MachineJumpTableInfo::EK_Inline;
328 SDValue XCoreTargetLowering::
329 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
331 SDValue Chain = Op.getOperand(0);
332 SDValue Table = Op.getOperand(1);
333 SDValue Index = Op.getOperand(2);
334 DebugLoc dl = Op.getDebugLoc();
335 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
336 unsigned JTI = JT->getIndex();
337 MachineFunction &MF = DAG.getMachineFunction();
338 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
339 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
341 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
342 if (NumEntries <= 32) {
343 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
345 assert((NumEntries >> 31) == 0);
346 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
347 DAG.getConstant(1, MVT::i32));
348 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
353 IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase,
356 if (Addr.getOpcode() != ISD::ADD) {
359 ConstantSDNode *CN = 0;
360 if (!(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
363 int64_t off = CN->getSExtValue();
364 const SDValue &Base = Addr.getOperand(0);
365 const SDValue *Root = &Base;
366 if (Base.getOpcode() == ISD::ADD &&
367 Base.getOperand(1).getOpcode() == ISD::SHL) {
368 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Base.getOperand(1)
370 if (CN && (CN->getSExtValue() >= 2)) {
371 Root = &Base.getOperand(0);
374 if (isa<FrameIndexSDNode>(*Root)) {
375 // All frame indicies are word aligned
380 if (Root->getOpcode() == XCoreISD::DPRelativeWrapper ||
381 Root->getOpcode() == XCoreISD::CPRelativeWrapper) {
382 // All dp / cp relative addresses are word aligned
387 // Check for an aligned global variable.
388 if (GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(*Root)) {
389 const GlobalValue *GV = GA->getGlobal();
390 if (GA->getOffset() == 0 && GV->getAlignment() >= 4) {
399 SDValue XCoreTargetLowering::
400 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
401 LoadSDNode *LD = cast<LoadSDNode>(Op);
402 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
403 "Unexpected extension type");
404 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
405 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
408 unsigned ABIAlignment = getTargetData()->
409 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
410 // Leave aligned load alone.
411 if (LD->getAlignment() >= ABIAlignment)
414 SDValue Chain = LD->getChain();
415 SDValue BasePtr = LD->getBasePtr();
416 DebugLoc DL = Op.getDebugLoc();
420 if (!LD->isVolatile() &&
421 IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) {
422 if (Offset % 4 == 0) {
423 // We've managed to infer better alignment information than the load
424 // already has. Use an aligned load.
426 return DAG.getLoad(getPointerTy(), DL, Chain, BasePtr,
427 MachinePointerInfo(),
428 false, false, false, 0);
431 // ldw low, base[offset >> 2]
432 // ldw high, base[(offset >> 2) + 1]
433 // shr low_shifted, low, (offset & 0x3) * 8
434 // shl high_shifted, high, 32 - (offset & 0x3) * 8
435 // or result, low_shifted, high_shifted
436 SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32);
437 SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32);
438 SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32);
439 SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32);
441 SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, LowOffset);
442 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base, HighOffset);
444 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
445 LowAddr, MachinePointerInfo(),
446 false, false, false, 0);
447 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
448 HighAddr, MachinePointerInfo(),
449 false, false, false, 0);
450 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
451 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
452 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
453 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
455 SDValue Ops[] = { Result, Chain };
456 return DAG.getMergeValues(Ops, 2, DL);
459 if (LD->getAlignment() == 2) {
460 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
461 BasePtr, LD->getPointerInfo(), MVT::i16,
462 LD->isVolatile(), LD->isNonTemporal(), 2);
463 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
464 DAG.getConstant(2, MVT::i32));
465 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
467 LD->getPointerInfo().getWithOffset(2),
468 MVT::i16, LD->isVolatile(),
469 LD->isNonTemporal(), 2);
470 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
471 DAG.getConstant(16, MVT::i32));
472 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
473 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
475 SDValue Ops[] = { Result, Chain };
476 return DAG.getMergeValues(Ops, 2, DL);
479 // Lower to a call to __misaligned_load(BasePtr).
480 Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
481 TargetLowering::ArgListTy Args;
482 TargetLowering::ArgListEntry Entry;
485 Entry.Node = BasePtr;
486 Args.push_back(Entry);
488 TargetLowering::CallLoweringInfo CLI(Chain, IntPtrTy, false, false,
489 false, false, 0, CallingConv::C, /*isTailCall=*/false,
490 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
491 DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
493 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
496 { CallResult.first, CallResult.second };
498 return DAG.getMergeValues(Ops, 2, DL);
501 SDValue XCoreTargetLowering::
502 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
504 StoreSDNode *ST = cast<StoreSDNode>(Op);
505 assert(!ST->isTruncatingStore() && "Unexpected store type");
506 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
507 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
510 unsigned ABIAlignment = getTargetData()->
511 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
512 // Leave aligned store alone.
513 if (ST->getAlignment() >= ABIAlignment) {
516 SDValue Chain = ST->getChain();
517 SDValue BasePtr = ST->getBasePtr();
518 SDValue Value = ST->getValue();
519 DebugLoc dl = Op.getDebugLoc();
521 if (ST->getAlignment() == 2) {
523 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
524 DAG.getConstant(16, MVT::i32));
525 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
526 ST->getPointerInfo(), MVT::i16,
527 ST->isVolatile(), ST->isNonTemporal(),
529 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
530 DAG.getConstant(2, MVT::i32));
531 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
532 ST->getPointerInfo().getWithOffset(2),
533 MVT::i16, ST->isVolatile(),
534 ST->isNonTemporal(), 2);
535 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
538 // Lower to a call to __misaligned_store(BasePtr, Value).
539 Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
540 TargetLowering::ArgListTy Args;
541 TargetLowering::ArgListEntry Entry;
544 Entry.Node = BasePtr;
545 Args.push_back(Entry);
548 Args.push_back(Entry);
550 TargetLowering::CallLoweringInfo CLI(Chain,
551 Type::getVoidTy(*DAG.getContext()), false, false,
552 false, false, 0, CallingConv::C, /*isTailCall=*/false,
553 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
554 DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
556 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
558 return CallResult.second;
561 SDValue XCoreTargetLowering::
562 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
564 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
565 "Unexpected operand to lower!");
566 DebugLoc dl = Op.getDebugLoc();
567 SDValue LHS = Op.getOperand(0);
568 SDValue RHS = Op.getOperand(1);
569 SDValue Zero = DAG.getConstant(0, MVT::i32);
570 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
571 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
573 SDValue Lo(Hi.getNode(), 1);
574 SDValue Ops[] = { Lo, Hi };
575 return DAG.getMergeValues(Ops, 2, dl);
578 SDValue XCoreTargetLowering::
579 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
581 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
582 "Unexpected operand to lower!");
583 DebugLoc dl = Op.getDebugLoc();
584 SDValue LHS = Op.getOperand(0);
585 SDValue RHS = Op.getOperand(1);
586 SDValue Zero = DAG.getConstant(0, MVT::i32);
587 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
588 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
590 SDValue Lo(Hi.getNode(), 1);
591 SDValue Ops[] = { Lo, Hi };
592 return DAG.getMergeValues(Ops, 2, dl);
595 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
596 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
597 /// each intermediate result in the calculation must also have a single use.
598 /// If the Op is in the correct form the constituent parts are written to Mul0,
599 /// Mul1, Addend0 and Addend1.
601 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
602 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
604 if (Op.getOpcode() != ISD::ADD)
606 SDValue N0 = Op.getOperand(0);
607 SDValue N1 = Op.getOperand(1);
610 if (N0.getOpcode() == ISD::ADD) {
613 } else if (N1.getOpcode() == ISD::ADD) {
619 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
621 if (OtherOp.getOpcode() == ISD::MUL) {
622 // add(add(a,b),mul(x,y))
623 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
625 Mul0 = OtherOp.getOperand(0);
626 Mul1 = OtherOp.getOperand(1);
627 Addend0 = AddOp.getOperand(0);
628 Addend1 = AddOp.getOperand(1);
631 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
632 // add(add(mul(x,y),a),b)
633 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
635 Mul0 = AddOp.getOperand(0).getOperand(0);
636 Mul1 = AddOp.getOperand(0).getOperand(1);
637 Addend0 = AddOp.getOperand(1);
641 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
642 // add(add(a,mul(x,y)),b)
643 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
645 Mul0 = AddOp.getOperand(1).getOperand(0);
646 Mul1 = AddOp.getOperand(1).getOperand(1);
647 Addend0 = AddOp.getOperand(0);
654 SDValue XCoreTargetLowering::
655 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
659 if (N->getOperand(0).getOpcode() == ISD::MUL) {
660 Mul = N->getOperand(0);
661 Other = N->getOperand(1);
662 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
663 Mul = N->getOperand(1);
664 Other = N->getOperand(0);
668 DebugLoc dl = N->getDebugLoc();
669 SDValue LL, RL, AddendL, AddendH;
670 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
671 Mul.getOperand(0), DAG.getConstant(0, MVT::i32));
672 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
673 Mul.getOperand(1), DAG.getConstant(0, MVT::i32));
674 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
675 Other, DAG.getConstant(0, MVT::i32));
676 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
677 Other, DAG.getConstant(1, MVT::i32));
678 APInt HighMask = APInt::getHighBitsSet(64, 32);
679 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
680 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
681 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
682 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
683 // The inputs are both zero-extended.
684 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
685 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
687 SDValue Lo(Hi.getNode(), 1);
688 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
690 if (LHSSB > 32 && RHSSB > 32) {
691 // The inputs are both sign-extended.
692 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
693 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
695 SDValue Lo(Hi.getNode(), 1);
696 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
699 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
700 Mul.getOperand(0), DAG.getConstant(1, MVT::i32));
701 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
702 Mul.getOperand(1), DAG.getConstant(1, MVT::i32));
703 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
704 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
706 SDValue Lo(Hi.getNode(), 1);
707 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
708 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
709 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
710 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
711 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
714 SDValue XCoreTargetLowering::
715 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
717 assert(N->getValueType(0) == MVT::i64 &&
718 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
719 "Unknown operand to lower!");
721 if (N->getOpcode() == ISD::ADD) {
722 SDValue Result = TryExpandADDWithMul(N, DAG);
723 if (Result.getNode() != 0)
727 DebugLoc dl = N->getDebugLoc();
729 // Extract components
730 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
731 N->getOperand(0), DAG.getConstant(0, MVT::i32));
732 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
733 N->getOperand(0), DAG.getConstant(1, MVT::i32));
734 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
735 N->getOperand(1), DAG.getConstant(0, MVT::i32));
736 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
737 N->getOperand(1), DAG.getConstant(1, MVT::i32));
740 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
742 SDValue Zero = DAG.getConstant(0, MVT::i32);
743 SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
745 SDValue Lo(Carry.getNode(), 1);
747 SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
749 SDValue Hi(Ignored.getNode(), 1);
751 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
754 SDValue XCoreTargetLowering::
755 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
757 llvm_unreachable("unimplemented");
758 // FIXME Arguments passed by reference need a extra dereference.
759 SDNode *Node = Op.getNode();
760 DebugLoc dl = Node->getDebugLoc();
761 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
762 EVT VT = Node->getValueType(0);
763 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
764 Node->getOperand(1), MachinePointerInfo(V),
765 false, false, false, 0);
766 // Increment the pointer, VAList, to the next vararg
767 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
768 DAG.getConstant(VT.getSizeInBits(),
770 // Store the incremented VAList to the legalized pointer
771 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1),
772 MachinePointerInfo(V), false, false, 0);
773 // Load the actual argument out of the pointer VAList
774 return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
775 false, false, false, 0);
778 SDValue XCoreTargetLowering::
779 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
781 DebugLoc dl = Op.getDebugLoc();
782 // vastart stores the address of the VarArgsFrameIndex slot into the
783 // memory location argument
784 MachineFunction &MF = DAG.getMachineFunction();
785 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
786 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
787 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
788 MachinePointerInfo(), false, false, 0);
791 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
792 SelectionDAG &DAG) const {
793 DebugLoc dl = Op.getDebugLoc();
794 // Depths > 0 not supported yet!
795 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
798 MachineFunction &MF = DAG.getMachineFunction();
799 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
800 return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
801 RegInfo->getFrameRegister(MF), MVT::i32);
804 SDValue XCoreTargetLowering::
805 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
806 return Op.getOperand(0);
809 SDValue XCoreTargetLowering::
810 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
811 SDValue Chain = Op.getOperand(0);
812 SDValue Trmp = Op.getOperand(1); // trampoline
813 SDValue FPtr = Op.getOperand(2); // nested function
814 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
816 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
819 // LDAPF_u10 r11, nest
820 // LDW_2rus r11, r11[0]
821 // STWSP_ru6 r11, sp[0]
822 // LDAPF_u10 r11, fptr
823 // LDW_2rus r11, r11[0]
829 SDValue OutChains[5];
833 DebugLoc dl = Op.getDebugLoc();
834 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32),
835 Addr, MachinePointerInfo(TrmpAddr), false, false,
838 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
839 DAG.getConstant(4, MVT::i32));
840 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32),
841 Addr, MachinePointerInfo(TrmpAddr, 4), false,
844 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
845 DAG.getConstant(8, MVT::i32));
846 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32),
847 Addr, MachinePointerInfo(TrmpAddr, 8), false,
850 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
851 DAG.getConstant(12, MVT::i32));
852 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr,
853 MachinePointerInfo(TrmpAddr, 12), false, false,
856 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
857 DAG.getConstant(16, MVT::i32));
858 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr,
859 MachinePointerInfo(TrmpAddr, 16), false, false,
862 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5);
865 //===----------------------------------------------------------------------===//
866 // Calling Convention Implementation
867 //===----------------------------------------------------------------------===//
869 #include "XCoreGenCallingConv.inc"
871 //===----------------------------------------------------------------------===//
872 // Call Calling Convention Implementation
873 //===----------------------------------------------------------------------===//
875 /// XCore call implementation
877 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
878 SmallVectorImpl<SDValue> &InVals) const {
879 SelectionDAG &DAG = CLI.DAG;
880 DebugLoc &dl = CLI.DL;
881 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
882 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
883 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
884 SDValue Chain = CLI.Chain;
885 SDValue Callee = CLI.Callee;
886 bool &isTailCall = CLI.IsTailCall;
887 CallingConv::ID CallConv = CLI.CallConv;
888 bool isVarArg = CLI.IsVarArg;
890 // XCore target does not yet support tail call optimization.
893 // For now, only CallingConv::C implemented
897 llvm_unreachable("Unsupported calling convention");
898 case CallingConv::Fast:
900 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
901 Outs, OutVals, Ins, dl, DAG, InVals);
905 /// LowerCCCCallTo - functions arguments are copied from virtual
906 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
907 /// CALLSEQ_END are emitted.
908 /// TODO: isTailCall, sret.
910 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
911 CallingConv::ID CallConv, bool isVarArg,
913 const SmallVectorImpl<ISD::OutputArg> &Outs,
914 const SmallVectorImpl<SDValue> &OutVals,
915 const SmallVectorImpl<ISD::InputArg> &Ins,
916 DebugLoc dl, SelectionDAG &DAG,
917 SmallVectorImpl<SDValue> &InVals) const {
919 // Analyze operands of the call, assigning locations to each operand.
920 SmallVector<CCValAssign, 16> ArgLocs;
921 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
922 getTargetMachine(), ArgLocs, *DAG.getContext());
924 // The ABI dictates there should be one stack slot available to the callee
925 // on function entry (for saving lr).
926 CCInfo.AllocateStack(4, 4);
928 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
930 // Get a count of how many bytes are to be pushed on the stack.
931 unsigned NumBytes = CCInfo.getNextStackOffset();
933 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
934 getPointerTy(), true));
936 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
937 SmallVector<SDValue, 12> MemOpChains;
939 // Walk the register/memloc assignments, inserting copies/loads.
940 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
941 CCValAssign &VA = ArgLocs[i];
942 SDValue Arg = OutVals[i];
944 // Promote the value if needed.
945 switch (VA.getLocInfo()) {
946 default: llvm_unreachable("Unknown loc info!");
947 case CCValAssign::Full: break;
948 case CCValAssign::SExt:
949 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
951 case CCValAssign::ZExt:
952 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
954 case CCValAssign::AExt:
955 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
959 // Arguments that can be passed on register must be kept at
962 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
964 assert(VA.isMemLoc());
966 int Offset = VA.getLocMemOffset();
968 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
970 DAG.getConstant(Offset/4, MVT::i32)));
974 // Transform all store nodes into one single node because
975 // all store nodes are independent of each other.
976 if (!MemOpChains.empty())
977 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
978 &MemOpChains[0], MemOpChains.size());
980 // Build a sequence of copy-to-reg nodes chained together with token
981 // chain and flag operands which copy the outgoing args into registers.
982 // The InFlag in necessary since all emitted instructions must be
985 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
986 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
987 RegsToPass[i].second, InFlag);
988 InFlag = Chain.getValue(1);
991 // If the callee is a GlobalAddress node (quite common, every direct call is)
992 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
993 // Likewise ExternalSymbol -> TargetExternalSymbol.
994 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
995 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
996 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
997 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
999 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
1000 // = Chain, Callee, Reg#1, Reg#2, ...
1002 // Returns a chain & a flag for retval copy to use.
1003 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
1004 SmallVector<SDValue, 8> Ops;
1005 Ops.push_back(Chain);
1006 Ops.push_back(Callee);
1008 // Add argument registers to the end of the list so that they are
1009 // known live into the call.
1010 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
1011 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
1012 RegsToPass[i].second.getValueType()));
1014 if (InFlag.getNode())
1015 Ops.push_back(InFlag);
1017 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size());
1018 InFlag = Chain.getValue(1);
1020 // Create the CALLSEQ_END node.
1021 Chain = DAG.getCALLSEQ_END(Chain,
1022 DAG.getConstant(NumBytes, getPointerTy(), true),
1023 DAG.getConstant(0, getPointerTy(), true),
1025 InFlag = Chain.getValue(1);
1027 // Handle result values, copying them out of physregs into vregs that we
1029 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
1030 Ins, dl, DAG, InVals);
1033 /// LowerCallResult - Lower the result values of a call into the
1034 /// appropriate copies out of appropriate physical registers.
1036 XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
1037 CallingConv::ID CallConv, bool isVarArg,
1038 const SmallVectorImpl<ISD::InputArg> &Ins,
1039 DebugLoc dl, SelectionDAG &DAG,
1040 SmallVectorImpl<SDValue> &InVals) const {
1042 // Assign locations to each value returned by this call.
1043 SmallVector<CCValAssign, 16> RVLocs;
1044 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1045 getTargetMachine(), RVLocs, *DAG.getContext());
1047 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
1049 // Copy all of the result registers out of their specified physreg.
1050 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1051 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
1052 RVLocs[i].getValVT(), InFlag).getValue(1);
1053 InFlag = Chain.getValue(2);
1054 InVals.push_back(Chain.getValue(0));
1060 //===----------------------------------------------------------------------===//
1061 // Formal Arguments Calling Convention Implementation
1062 //===----------------------------------------------------------------------===//
1064 /// XCore formal arguments implementation
1066 XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
1067 CallingConv::ID CallConv,
1069 const SmallVectorImpl<ISD::InputArg> &Ins,
1072 SmallVectorImpl<SDValue> &InVals)
1077 llvm_unreachable("Unsupported calling convention");
1078 case CallingConv::C:
1079 case CallingConv::Fast:
1080 return LowerCCCArguments(Chain, CallConv, isVarArg,
1081 Ins, dl, DAG, InVals);
1085 /// LowerCCCArguments - transform physical registers into
1086 /// virtual registers and generate load operations for
1087 /// arguments places on the stack.
1090 XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
1091 CallingConv::ID CallConv,
1093 const SmallVectorImpl<ISD::InputArg>
1097 SmallVectorImpl<SDValue> &InVals) const {
1098 MachineFunction &MF = DAG.getMachineFunction();
1099 MachineFrameInfo *MFI = MF.getFrameInfo();
1100 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1102 // Assign locations to all of the incoming arguments.
1103 SmallVector<CCValAssign, 16> ArgLocs;
1104 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1105 getTargetMachine(), ArgLocs, *DAG.getContext());
1107 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1109 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1111 unsigned LRSaveSize = StackSlotSize;
1113 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1115 CCValAssign &VA = ArgLocs[i];
1117 if (VA.isRegLoc()) {
1118 // Arguments passed in registers
1119 EVT RegVT = VA.getLocVT();
1120 switch (RegVT.getSimpleVT().SimpleTy) {
1124 errs() << "LowerFormalArguments Unhandled argument type: "
1125 << RegVT.getSimpleVT().SimpleTy << "\n";
1127 llvm_unreachable(0);
1130 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1131 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1132 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1136 assert(VA.isMemLoc());
1137 // Load the argument to a virtual register
1138 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1139 if (ObjSize > StackSlotSize) {
1140 errs() << "LowerFormalArguments Unhandled argument type: "
1141 << EVT(VA.getLocVT()).getEVTString()
1144 // Create the frame index object for this incoming parameter...
1145 int FI = MFI->CreateFixedObject(ObjSize,
1146 LRSaveSize + VA.getLocMemOffset(),
1149 // Create the SelectionDAG nodes corresponding to a load
1150 //from this parameter
1151 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1152 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1153 MachinePointerInfo::getFixedStack(FI),
1154 false, false, false, 0));
1159 /* Argument registers */
1160 static const uint16_t ArgRegs[] = {
1161 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1163 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1164 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs,
1165 array_lengthof(ArgRegs));
1166 if (FirstVAReg < array_lengthof(ArgRegs)) {
1167 SmallVector<SDValue, 4> MemOps;
1169 // Save remaining registers, storing higher register numbers at a higher
1171 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1172 // Create a stack slot
1173 int FI = MFI->CreateFixedObject(4, offset, true);
1174 if (i == (int)FirstVAReg) {
1175 XFI->setVarArgsFrameIndex(FI);
1177 offset -= StackSlotSize;
1178 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1179 // Move argument from phys reg -> virt reg
1180 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1181 RegInfo.addLiveIn(ArgRegs[i], VReg);
1182 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1183 // Move argument from virt reg -> stack
1184 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
1185 MachinePointerInfo(), false, false, 0);
1186 MemOps.push_back(Store);
1188 if (!MemOps.empty())
1189 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1190 &MemOps[0], MemOps.size());
1192 // This will point to the next argument passed via stack.
1193 XFI->setVarArgsFrameIndex(
1194 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1202 //===----------------------------------------------------------------------===//
1203 // Return Value Calling Convention Implementation
1204 //===----------------------------------------------------------------------===//
1206 bool XCoreTargetLowering::
1207 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1209 const SmallVectorImpl<ISD::OutputArg> &Outs,
1210 LLVMContext &Context) const {
1211 SmallVector<CCValAssign, 16> RVLocs;
1212 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
1213 return CCInfo.CheckReturn(Outs, RetCC_XCore);
1217 XCoreTargetLowering::LowerReturn(SDValue Chain,
1218 CallingConv::ID CallConv, bool isVarArg,
1219 const SmallVectorImpl<ISD::OutputArg> &Outs,
1220 const SmallVectorImpl<SDValue> &OutVals,
1221 DebugLoc dl, SelectionDAG &DAG) const {
1223 // CCValAssign - represent the assignment of
1224 // the return value to a location
1225 SmallVector<CCValAssign, 16> RVLocs;
1227 // CCState - Info about the registers and stack slot.
1228 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1229 getTargetMachine(), RVLocs, *DAG.getContext());
1231 // Analyze return values.
1232 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1234 // If this is the first return lowered for this function, add
1235 // the regs to the liveout set for the function.
1236 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1237 for (unsigned i = 0; i != RVLocs.size(); ++i)
1238 if (RVLocs[i].isRegLoc())
1239 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1244 // Copy the result values into the output registers.
1245 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1246 CCValAssign &VA = RVLocs[i];
1247 assert(VA.isRegLoc() && "Can only return in registers!");
1249 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1252 // guarantee that all emitted copies are
1253 // stuck together, avoiding something bad
1254 Flag = Chain.getValue(1);
1257 // Return on XCore is always a "retsp 0"
1259 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1260 Chain, DAG.getConstant(0, MVT::i32), Flag);
1262 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1263 Chain, DAG.getConstant(0, MVT::i32));
1266 //===----------------------------------------------------------------------===//
1267 // Other Lowering Code
1268 //===----------------------------------------------------------------------===//
1271 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1272 MachineBasicBlock *BB) const {
1273 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
1274 DebugLoc dl = MI->getDebugLoc();
1275 assert((MI->getOpcode() == XCore::SELECT_CC) &&
1276 "Unexpected instr type to insert");
1278 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1279 // control-flow pattern. The incoming instruction knows the destination vreg
1280 // to set, the condition code register to branch on, the true/false values to
1281 // select between, and a branch opcode to use.
1282 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1283 MachineFunction::iterator It = BB;
1289 // cmpTY ccX, r1, r2
1291 // fallthrough --> copy0MBB
1292 MachineBasicBlock *thisMBB = BB;
1293 MachineFunction *F = BB->getParent();
1294 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1295 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1296 F->insert(It, copy0MBB);
1297 F->insert(It, sinkMBB);
1299 // Transfer the remainder of BB and its successor edges to sinkMBB.
1300 sinkMBB->splice(sinkMBB->begin(), BB,
1301 llvm::next(MachineBasicBlock::iterator(MI)),
1303 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1305 // Next, add the true and fallthrough blocks as its successors.
1306 BB->addSuccessor(copy0MBB);
1307 BB->addSuccessor(sinkMBB);
1309 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1310 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
1313 // %FalseValue = ...
1314 // # fallthrough to sinkMBB
1317 // Update machine-CFG edges
1318 BB->addSuccessor(sinkMBB);
1321 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1324 BuildMI(*BB, BB->begin(), dl,
1325 TII.get(XCore::PHI), MI->getOperand(0).getReg())
1326 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
1327 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1329 MI->eraseFromParent(); // The pseudo instruction is gone now.
1333 //===----------------------------------------------------------------------===//
1334 // Target Optimization Hooks
1335 //===----------------------------------------------------------------------===//
1337 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1338 DAGCombinerInfo &DCI) const {
1339 SelectionDAG &DAG = DCI.DAG;
1340 DebugLoc dl = N->getDebugLoc();
1341 switch (N->getOpcode()) {
1343 case XCoreISD::LADD: {
1344 SDValue N0 = N->getOperand(0);
1345 SDValue N1 = N->getOperand(1);
1346 SDValue N2 = N->getOperand(2);
1347 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1348 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1349 EVT VT = N0.getValueType();
1351 // canonicalize constant to RHS
1353 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1355 // fold (ladd 0, 0, x) -> 0, x & 1
1356 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1357 SDValue Carry = DAG.getConstant(0, VT);
1358 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1359 DAG.getConstant(1, VT));
1360 SDValue Ops [] = { Carry, Result };
1361 return DAG.getMergeValues(Ops, 2, dl);
1364 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1366 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
1367 APInt KnownZero, KnownOne;
1368 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1369 VT.getSizeInBits() - 1);
1370 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1371 if ((KnownZero & Mask) == Mask) {
1372 SDValue Carry = DAG.getConstant(0, VT);
1373 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1374 SDValue Ops [] = { Carry, Result };
1375 return DAG.getMergeValues(Ops, 2, dl);
1380 case XCoreISD::LSUB: {
1381 SDValue N0 = N->getOperand(0);
1382 SDValue N1 = N->getOperand(1);
1383 SDValue N2 = N->getOperand(2);
1384 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1385 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1386 EVT VT = N0.getValueType();
1388 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1389 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1390 APInt KnownZero, KnownOne;
1391 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1392 VT.getSizeInBits() - 1);
1393 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1394 if ((KnownZero & Mask) == Mask) {
1395 SDValue Borrow = N2;
1396 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1397 DAG.getConstant(0, VT), N2);
1398 SDValue Ops [] = { Borrow, Result };
1399 return DAG.getMergeValues(Ops, 2, dl);
1403 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1405 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
1406 APInt KnownZero, KnownOne;
1407 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1408 VT.getSizeInBits() - 1);
1409 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1410 if ((KnownZero & Mask) == Mask) {
1411 SDValue Borrow = DAG.getConstant(0, VT);
1412 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1413 SDValue Ops [] = { Borrow, Result };
1414 return DAG.getMergeValues(Ops, 2, dl);
1419 case XCoreISD::LMUL: {
1420 SDValue N0 = N->getOperand(0);
1421 SDValue N1 = N->getOperand(1);
1422 SDValue N2 = N->getOperand(2);
1423 SDValue N3 = N->getOperand(3);
1424 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1425 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1426 EVT VT = N0.getValueType();
1427 // Canonicalize multiplicative constant to RHS. If both multiplicative
1428 // operands are constant canonicalize smallest to RHS.
1429 if ((N0C && !N1C) ||
1430 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1431 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1435 if (N1C && N1C->isNullValue()) {
1436 // If the high result is unused fold to add(a, b)
1437 if (N->hasNUsesOfValue(0, 0)) {
1438 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1439 SDValue Ops [] = { Lo, Lo };
1440 return DAG.getMergeValues(Ops, 2, dl);
1442 // Otherwise fold to ladd(a, b, 0)
1443 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1448 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1449 // lmul(x, y, a, b). The high result of lmul will be ignored.
1450 // This is only profitable if the intermediate results are unused
1452 SDValue Mul0, Mul1, Addend0, Addend1;
1453 if (N->getValueType(0) == MVT::i32 &&
1454 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1455 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1456 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1457 Mul1, Addend0, Addend1);
1458 SDValue Result(Ignored.getNode(), 1);
1461 APInt HighMask = APInt::getHighBitsSet(64, 32);
1462 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1463 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1464 // before type legalization as it is messy to match the operands after
1466 if (N->getValueType(0) == MVT::i64 &&
1467 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1468 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1469 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1470 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1471 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1472 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1473 Mul0, DAG.getConstant(0, MVT::i32));
1474 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1475 Mul1, DAG.getConstant(0, MVT::i32));
1476 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1477 Addend0, DAG.getConstant(0, MVT::i32));
1478 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1479 Addend1, DAG.getConstant(0, MVT::i32));
1480 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1481 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1482 Addend0L, Addend1L);
1483 SDValue Lo(Hi.getNode(), 1);
1484 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1489 // Replace unaligned store of unaligned load with memmove.
1490 StoreSDNode *ST = cast<StoreSDNode>(N);
1491 if (!DCI.isBeforeLegalize() ||
1492 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) ||
1493 ST->isVolatile() || ST->isIndexed()) {
1496 SDValue Chain = ST->getChain();
1498 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1499 if (StoreBits % 8) {
1502 unsigned ABIAlignment = getTargetData()->getABITypeAlignment(
1503 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1504 unsigned Alignment = ST->getAlignment();
1505 if (Alignment >= ABIAlignment) {
1509 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1510 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1511 LD->getAlignment() == Alignment &&
1512 !LD->isVolatile() && !LD->isIndexed() &&
1513 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
1514 return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1516 DAG.getConstant(StoreBits/8, MVT::i32),
1517 Alignment, false, ST->getPointerInfo(),
1518 LD->getPointerInfo());
1527 void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
1530 const SelectionDAG &DAG,
1531 unsigned Depth) const {
1532 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
1533 switch (Op.getOpcode()) {
1535 case XCoreISD::LADD:
1536 case XCoreISD::LSUB:
1537 if (Op.getResNo() == 0) {
1538 // Top bits of carry / borrow are clear.
1539 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1540 KnownZero.getBitWidth() - 1);
1546 //===----------------------------------------------------------------------===//
1547 // Addressing mode description hooks
1548 //===----------------------------------------------------------------------===//
1550 static inline bool isImmUs(int64_t val)
1552 return (val >= 0 && val <= 11);
1555 static inline bool isImmUs2(int64_t val)
1557 return (val%2 == 0 && isImmUs(val/2));
1560 static inline bool isImmUs4(int64_t val)
1562 return (val%4 == 0 && isImmUs(val/4));
1565 /// isLegalAddressingMode - Return true if the addressing mode represented
1566 /// by AM is legal for this target, for a load/store of the specified type.
1568 XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1570 if (Ty->getTypeID() == Type::VoidTyID)
1571 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1573 const TargetData *TD = TM.getTargetData();
1574 unsigned Size = TD->getTypeAllocSize(Ty);
1576 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1583 if (AM.Scale == 0) {
1584 return isImmUs(AM.BaseOffs);
1587 return AM.Scale == 1 && AM.BaseOffs == 0;
1591 if (AM.Scale == 0) {
1592 return isImmUs2(AM.BaseOffs);
1595 return AM.Scale == 2 && AM.BaseOffs == 0;
1598 if (AM.Scale == 0) {
1599 return isImmUs4(AM.BaseOffs);
1602 return AM.Scale == 4 && AM.BaseOffs == 0;
1606 //===----------------------------------------------------------------------===//
1607 // XCore Inline Assembly Support
1608 //===----------------------------------------------------------------------===//
1610 std::pair<unsigned, const TargetRegisterClass*>
1611 XCoreTargetLowering::
1612 getRegForInlineAsmConstraint(const std::string &Constraint,
1614 if (Constraint.size() == 1) {
1615 switch (Constraint[0]) {
1618 return std::make_pair(0U, &XCore::GRRegsRegClass);
1621 // Use the default implementation in TargetLowering to convert the register
1622 // constraint into a member of a register class.
1623 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);