1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the XCoreTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "xcore-lower"
16 #include "XCoreISelLowering.h"
17 #include "XCoreMachineFunctionInfo.h"
19 #include "XCoreTargetObjectFile.h"
20 #include "XCoreTargetMachine.h"
21 #include "XCoreSubtarget.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Function.h"
24 #include "llvm/Intrinsics.h"
25 #include "llvm/CallingConv.h"
26 #include "llvm/GlobalVariable.h"
27 #include "llvm/GlobalAlias.h"
28 #include "llvm/CodeGen/CallingConvLower.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunction.h"
31 #include "llvm/CodeGen/MachineInstrBuilder.h"
32 #include "llvm/CodeGen/MachineJumpTableInfo.h"
33 #include "llvm/CodeGen/MachineRegisterInfo.h"
34 #include "llvm/CodeGen/SelectionDAGISel.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
39 #include "llvm/ADT/VectorExtras.h"
44 const char *XCoreTargetLowering::
45 getTargetNodeName(unsigned Opcode) const
49 case XCoreISD::BL : return "XCoreISD::BL";
50 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
51 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
52 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
53 case XCoreISD::STWSP : return "XCoreISD::STWSP";
54 case XCoreISD::RETSP : return "XCoreISD::RETSP";
55 case XCoreISD::LADD : return "XCoreISD::LADD";
56 case XCoreISD::LSUB : return "XCoreISD::LSUB";
57 case XCoreISD::LMUL : return "XCoreISD::LMUL";
58 case XCoreISD::MACCU : return "XCoreISD::MACCU";
59 case XCoreISD::MACCS : return "XCoreISD::MACCS";
60 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
61 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
62 default : return NULL;
66 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
67 : TargetLowering(XTM, new XCoreTargetObjectFile()),
69 Subtarget(*XTM.getSubtargetImpl()) {
71 // Set up the register classes.
72 addRegisterClass(MVT::i32, XCore::GRRegsRegisterClass);
74 // Compute derived properties from the register classes
75 computeRegisterProperties();
77 // Division is expensive
78 setIntDivIsCheap(false);
80 setShiftAmountType(MVT::i32);
81 setStackPointerRegisterToSaveRestore(XCore::SP);
83 setSchedulingPreference(SchedulingForRegPressure);
85 // Use i32 for setcc operations results (slt, sgt, ...).
86 setBooleanContents(ZeroOrOneBooleanContent);
88 // XCore does not have the NodeTypes below.
89 setOperationAction(ISD::BR_CC, MVT::Other, Expand);
90 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
91 setOperationAction(ISD::ADDC, MVT::i32, Expand);
92 setOperationAction(ISD::ADDE, MVT::i32, Expand);
93 setOperationAction(ISD::SUBC, MVT::i32, Expand);
94 setOperationAction(ISD::SUBE, MVT::i32, Expand);
96 // Stop the combiner recombining select and set_cc
97 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
100 setOperationAction(ISD::ADD, MVT::i64, Custom);
101 setOperationAction(ISD::SUB, MVT::i64, Custom);
102 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
103 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
104 setOperationAction(ISD::MULHS, MVT::i32, Expand);
105 setOperationAction(ISD::MULHU, MVT::i32, Expand);
106 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
107 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
108 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
111 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
112 setOperationAction(ISD::ROTL , MVT::i32, Expand);
113 setOperationAction(ISD::ROTR , MVT::i32, Expand);
115 setOperationAction(ISD::TRAP, MVT::Other, Legal);
118 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
120 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
121 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom);
123 // Thread Local Storage
124 setOperationAction(ISD::GlobalTLSAddress, MVT::i32, Custom);
126 // Conversion of i64 -> double produces constantpool nodes
127 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
130 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
131 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
132 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
134 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
135 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
137 // Custom expand misaligned loads / stores.
138 setOperationAction(ISD::LOAD, MVT::i32, Custom);
139 setOperationAction(ISD::STORE, MVT::i32, Custom);
142 setOperationAction(ISD::VAEND, MVT::Other, Expand);
143 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
144 setOperationAction(ISD::VAARG, MVT::Other, Custom);
145 setOperationAction(ISD::VASTART, MVT::Other, Custom);
148 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
149 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
150 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
152 maxStoresPerMemset = 4;
153 maxStoresPerMemmove = maxStoresPerMemcpy = 2;
155 // We have target-specific dag combine patterns for the following nodes:
156 setTargetDAGCombine(ISD::STORE);
157 setTargetDAGCombine(ISD::ADD);
160 SDValue XCoreTargetLowering::
161 LowerOperation(SDValue Op, SelectionDAG &DAG) {
162 switch (Op.getOpcode())
164 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
165 case ISD::GlobalTLSAddress: return LowerGlobalTLSAddress(Op, DAG);
166 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
167 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
168 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
169 case ISD::LOAD: return LowerLOAD(Op, DAG);
170 case ISD::STORE: return LowerSTORE(Op, DAG);
171 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
172 case ISD::VAARG: return LowerVAARG(Op, DAG);
173 case ISD::VASTART: return LowerVASTART(Op, DAG);
174 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
175 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
176 // FIXME: Remove these when LegalizeDAGTypes lands.
178 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
179 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
181 llvm_unreachable("unimplemented operand");
186 /// ReplaceNodeResults - Replace the results of node with an illegal result
187 /// type with new values built out of custom code.
188 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
189 SmallVectorImpl<SDValue>&Results,
191 switch (N->getOpcode()) {
193 llvm_unreachable("Don't know how to custom expand this!");
197 Results.push_back(ExpandADDSUB(N, DAG));
202 /// getFunctionAlignment - Return the Log2 alignment of this function.
203 unsigned XCoreTargetLowering::
204 getFunctionAlignment(const Function *) const {
208 //===----------------------------------------------------------------------===//
209 // Misc Lower Operation implementation
210 //===----------------------------------------------------------------------===//
212 SDValue XCoreTargetLowering::
213 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG)
215 DebugLoc dl = Op.getDebugLoc();
216 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
217 Op.getOperand(3), Op.getOperand(4));
218 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
222 SDValue XCoreTargetLowering::
223 getGlobalAddressWrapper(SDValue GA, GlobalValue *GV, SelectionDAG &DAG)
225 // FIXME there is no actual debug info here
226 DebugLoc dl = GA.getDebugLoc();
227 if (isa<Function>(GV)) {
228 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
230 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
232 // If GV is an alias then use the aliasee to determine constness
233 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
234 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal());
236 bool isConst = GVar && GVar->isConstant();
238 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
240 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
243 SDValue XCoreTargetLowering::
244 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG)
246 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
247 SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
248 // If it's a debug information descriptor, don't mess with it.
249 if (DAG.isVerifiedDebugInfoDesc(Op))
251 return getGlobalAddressWrapper(GA, GV, DAG);
254 static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) {
255 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
256 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
259 static inline bool isZeroLengthArray(const Type *Ty) {
260 const ArrayType *AT = dyn_cast_or_null<ArrayType>(Ty);
261 return AT && (AT->getNumElements() == 0);
264 SDValue XCoreTargetLowering::
265 LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG)
267 // FIXME there isn't really debug info here
268 DebugLoc dl = Op.getDebugLoc();
269 // transform to label + getid() * size
270 GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
271 SDValue GA = DAG.getTargetGlobalAddress(GV, MVT::i32);
272 const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV);
274 // If GV is an alias then use the aliasee to determine size
275 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
276 GVar = dyn_cast_or_null<GlobalVariable>(GA->resolveAliasedGlobal());
279 llvm_unreachable("Thread local object not a GlobalVariable?");
282 const Type *Ty = cast<PointerType>(GV->getType())->getElementType();
283 if (!Ty->isSized() || isZeroLengthArray(Ty)) {
285 errs() << "Size of thread local object " << GVar->getName()
290 SDValue base = getGlobalAddressWrapper(GA, GV, DAG);
291 const TargetData *TD = TM.getTargetData();
292 unsigned Size = TD->getTypeAllocSize(Ty);
293 SDValue offset = DAG.getNode(ISD::MUL, dl, MVT::i32, BuildGetId(DAG, dl),
294 DAG.getConstant(Size, MVT::i32));
295 return DAG.getNode(ISD::ADD, dl, MVT::i32, base, offset);
298 SDValue XCoreTargetLowering::
299 LowerBlockAddress(SDValue Op, SelectionDAG &DAG)
301 DebugLoc DL = Op.getDebugLoc();
303 BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
304 SDValue Result = DAG.getBlockAddress(BA, getPointerTy(), /*isTarget=*/true);
306 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result);
309 SDValue XCoreTargetLowering::
310 LowerConstantPool(SDValue Op, SelectionDAG &DAG)
312 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
313 // FIXME there isn't really debug info here
314 DebugLoc dl = CP->getDebugLoc();
315 EVT PtrVT = Op.getValueType();
317 if (CP->isMachineConstantPoolEntry()) {
318 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
321 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
324 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
327 SDValue XCoreTargetLowering::
328 LowerBR_JT(SDValue Op, SelectionDAG &DAG)
330 SDValue Chain = Op.getOperand(0);
331 SDValue Table = Op.getOperand(1);
332 SDValue Index = Op.getOperand(2);
333 DebugLoc dl = Op.getDebugLoc();
334 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
335 unsigned JTI = JT->getIndex();
336 MachineFunction &MF = DAG.getMachineFunction();
337 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
338 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
340 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
341 if (NumEntries <= 32) {
342 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
344 assert((NumEntries >> 31) == 0);
345 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
346 DAG.getConstant(1, MVT::i32));
347 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
352 IsWordAlignedBasePlusConstantOffset(SDValue Addr, SDValue &AlignedBase,
355 if (Addr.getOpcode() != ISD::ADD) {
358 ConstantSDNode *CN = 0;
359 if (!(CN = dyn_cast<ConstantSDNode>(Addr.getOperand(1)))) {
362 int64_t off = CN->getSExtValue();
363 const SDValue &Base = Addr.getOperand(0);
364 const SDValue *Root = &Base;
365 if (Base.getOpcode() == ISD::ADD &&
366 Base.getOperand(1).getOpcode() == ISD::SHL) {
367 ConstantSDNode *CN = dyn_cast<ConstantSDNode>(Base.getOperand(1)
369 if (CN && (CN->getSExtValue() >= 2)) {
370 Root = &Base.getOperand(0);
373 if (isa<FrameIndexSDNode>(*Root)) {
374 // All frame indicies are word aligned
379 if (Root->getOpcode() == XCoreISD::DPRelativeWrapper ||
380 Root->getOpcode() == XCoreISD::CPRelativeWrapper) {
381 // All dp / cp relative addresses are word aligned
389 SDValue XCoreTargetLowering::
390 LowerLOAD(SDValue Op, SelectionDAG &DAG)
392 LoadSDNode *LD = cast<LoadSDNode>(Op);
393 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
394 "Unexpected extension type");
395 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
396 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
399 unsigned ABIAlignment = getTargetData()->
400 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
401 // Leave aligned load alone.
402 if (LD->getAlignment() >= ABIAlignment) {
405 SDValue Chain = LD->getChain();
406 SDValue BasePtr = LD->getBasePtr();
407 DebugLoc dl = Op.getDebugLoc();
411 if (!LD->isVolatile() &&
412 IsWordAlignedBasePlusConstantOffset(BasePtr, Base, Offset)) {
413 if (Offset % 4 == 0) {
414 // We've managed to infer better alignment information than the load
415 // already has. Use an aligned load.
417 // FIXME: No new alignment information is actually passed here.
418 // Should the offset really be 4?
420 return DAG.getLoad(getPointerTy(), dl, Chain, BasePtr, NULL, 4,
424 // ldw low, base[offset >> 2]
425 // ldw high, base[(offset >> 2) + 1]
426 // shr low_shifted, low, (offset & 0x3) * 8
427 // shl high_shifted, high, 32 - (offset & 0x3) * 8
428 // or result, low_shifted, high_shifted
429 SDValue LowOffset = DAG.getConstant(Offset & ~0x3, MVT::i32);
430 SDValue HighOffset = DAG.getConstant((Offset & ~0x3) + 4, MVT::i32);
431 SDValue LowShift = DAG.getConstant((Offset & 0x3) * 8, MVT::i32);
432 SDValue HighShift = DAG.getConstant(32 - (Offset & 0x3) * 8, MVT::i32);
434 SDValue LowAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, LowOffset);
435 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, Base, HighOffset);
437 SDValue Low = DAG.getLoad(getPointerTy(), dl, Chain,
438 LowAddr, NULL, 4, false, false, 0);
439 SDValue High = DAG.getLoad(getPointerTy(), dl, Chain,
440 HighAddr, NULL, 4, false, false, 0);
441 SDValue LowShifted = DAG.getNode(ISD::SRL, dl, MVT::i32, Low, LowShift);
442 SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High, HighShift);
443 SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, LowShifted, HighShifted);
444 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1),
446 SDValue Ops[] = { Result, Chain };
447 return DAG.getMergeValues(Ops, 2, dl);
450 if (LD->getAlignment() == 2) {
451 int SVOffset = LD->getSrcValueOffset();
452 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, dl, MVT::i32, Chain,
453 BasePtr, LD->getSrcValue(), SVOffset, MVT::i16,
454 LD->isVolatile(), LD->isNonTemporal(), 2);
455 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
456 DAG.getConstant(2, MVT::i32));
457 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, dl, MVT::i32, Chain,
458 HighAddr, LD->getSrcValue(), SVOffset + 2,
459 MVT::i16, LD->isVolatile(),
460 LD->isNonTemporal(), 2);
461 SDValue HighShifted = DAG.getNode(ISD::SHL, dl, MVT::i32, High,
462 DAG.getConstant(16, MVT::i32));
463 SDValue Result = DAG.getNode(ISD::OR, dl, MVT::i32, Low, HighShifted);
464 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, Low.getValue(1),
466 SDValue Ops[] = { Result, Chain };
467 return DAG.getMergeValues(Ops, 2, dl);
470 // Lower to a call to __misaligned_load(BasePtr).
471 const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
472 TargetLowering::ArgListTy Args;
473 TargetLowering::ArgListEntry Entry;
476 Entry.Node = BasePtr;
477 Args.push_back(Entry);
479 std::pair<SDValue, SDValue> CallResult =
480 LowerCallTo(Chain, IntPtrTy, false, false,
481 false, false, 0, CallingConv::C, false,
482 /*isReturnValueUsed=*/true,
483 DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
487 { CallResult.first, CallResult.second };
489 return DAG.getMergeValues(Ops, 2, dl);
492 SDValue XCoreTargetLowering::
493 LowerSTORE(SDValue Op, SelectionDAG &DAG)
495 StoreSDNode *ST = cast<StoreSDNode>(Op);
496 assert(!ST->isTruncatingStore() && "Unexpected store type");
497 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
498 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
501 unsigned ABIAlignment = getTargetData()->
502 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
503 // Leave aligned store alone.
504 if (ST->getAlignment() >= ABIAlignment) {
507 SDValue Chain = ST->getChain();
508 SDValue BasePtr = ST->getBasePtr();
509 SDValue Value = ST->getValue();
510 DebugLoc dl = Op.getDebugLoc();
512 if (ST->getAlignment() == 2) {
513 int SVOffset = ST->getSrcValueOffset();
515 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
516 DAG.getConstant(16, MVT::i32));
517 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
518 ST->getSrcValue(), SVOffset, MVT::i16,
519 ST->isVolatile(), ST->isNonTemporal(),
521 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
522 DAG.getConstant(2, MVT::i32));
523 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
524 ST->getSrcValue(), SVOffset + 2,
525 MVT::i16, ST->isVolatile(),
526 ST->isNonTemporal(), 2);
527 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
530 // Lower to a call to __misaligned_store(BasePtr, Value).
531 const Type *IntPtrTy = getTargetData()->getIntPtrType(*DAG.getContext());
532 TargetLowering::ArgListTy Args;
533 TargetLowering::ArgListEntry Entry;
536 Entry.Node = BasePtr;
537 Args.push_back(Entry);
540 Args.push_back(Entry);
542 std::pair<SDValue, SDValue> CallResult =
543 LowerCallTo(Chain, Type::getVoidTy(*DAG.getContext()), false, false,
544 false, false, 0, CallingConv::C, false,
545 /*isReturnValueUsed=*/true,
546 DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
549 return CallResult.second;
552 SDValue XCoreTargetLowering::
553 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG)
555 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
556 "Unexpected operand to lower!");
557 DebugLoc dl = Op.getDebugLoc();
558 SDValue LHS = Op.getOperand(0);
559 SDValue RHS = Op.getOperand(1);
560 SDValue Zero = DAG.getConstant(0, MVT::i32);
561 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
562 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
564 SDValue Lo(Hi.getNode(), 1);
565 SDValue Ops[] = { Lo, Hi };
566 return DAG.getMergeValues(Ops, 2, dl);
569 SDValue XCoreTargetLowering::
570 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG)
572 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
573 "Unexpected operand to lower!");
574 DebugLoc dl = Op.getDebugLoc();
575 SDValue LHS = Op.getOperand(0);
576 SDValue RHS = Op.getOperand(1);
577 SDValue Zero = DAG.getConstant(0, MVT::i32);
578 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
579 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
581 SDValue Lo(Hi.getNode(), 1);
582 SDValue Ops[] = { Lo, Hi };
583 return DAG.getMergeValues(Ops, 2, dl);
586 SDValue XCoreTargetLowering::
587 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG)
591 if (N->getOperand(0).getOpcode() == ISD::MUL) {
592 Mul = N->getOperand(0);
593 Other = N->getOperand(1);
594 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
595 Mul = N->getOperand(1);
596 Other = N->getOperand(0);
600 DebugLoc dl = N->getDebugLoc();
601 SDValue LL, RL, AddendL, AddendH;
602 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
603 Mul.getOperand(0), DAG.getConstant(0, MVT::i32));
604 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
605 Mul.getOperand(1), DAG.getConstant(0, MVT::i32));
606 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
607 Other, DAG.getConstant(0, MVT::i32));
608 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
609 Other, DAG.getConstant(1, MVT::i32));
610 APInt HighMask = APInt::getHighBitsSet(64, 32);
611 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
612 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
613 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
614 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
615 // The inputs are both zero-extended.
616 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
617 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
619 SDValue Lo(Hi.getNode(), 1);
620 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
622 if (LHSSB > 32 && RHSSB > 32) {
623 // The inputs are both sign-extended.
624 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
625 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
627 SDValue Lo(Hi.getNode(), 1);
628 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
631 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
632 Mul.getOperand(0), DAG.getConstant(1, MVT::i32));
633 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
634 Mul.getOperand(1), DAG.getConstant(1, MVT::i32));
635 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
636 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
638 SDValue Lo(Hi.getNode(), 1);
639 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
640 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
641 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
642 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
643 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
646 SDValue XCoreTargetLowering::
647 ExpandADDSUB(SDNode *N, SelectionDAG &DAG)
649 assert(N->getValueType(0) == MVT::i64 &&
650 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
651 "Unknown operand to lower!");
653 if (N->getOpcode() == ISD::ADD) {
654 SDValue Result = TryExpandADDWithMul(N, DAG);
655 if (Result.getNode() != 0)
659 DebugLoc dl = N->getDebugLoc();
661 // Extract components
662 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
663 N->getOperand(0), DAG.getConstant(0, MVT::i32));
664 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
665 N->getOperand(0), DAG.getConstant(1, MVT::i32));
666 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
667 N->getOperand(1), DAG.getConstant(0, MVT::i32));
668 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
669 N->getOperand(1), DAG.getConstant(1, MVT::i32));
672 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
674 SDValue Zero = DAG.getConstant(0, MVT::i32);
675 SDValue Carry = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
677 SDValue Lo(Carry.getNode(), 1);
679 SDValue Ignored = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
681 SDValue Hi(Ignored.getNode(), 1);
683 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
686 SDValue XCoreTargetLowering::
687 LowerVAARG(SDValue Op, SelectionDAG &DAG)
689 llvm_unreachable("unimplemented");
690 // FIX Arguments passed by reference need a extra dereference.
691 SDNode *Node = Op.getNode();
692 DebugLoc dl = Node->getDebugLoc();
693 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
694 EVT VT = Node->getValueType(0);
695 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
696 Node->getOperand(1), V, 0, false, false, 0);
697 // Increment the pointer, VAList, to the next vararg
698 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
699 DAG.getConstant(VT.getSizeInBits(),
701 // Store the incremented VAList to the legalized pointer
702 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1), V, 0,
704 // Load the actual argument out of the pointer VAList
705 return DAG.getLoad(VT, dl, Tmp3, VAList, NULL, 0, false, false, 0);
708 SDValue XCoreTargetLowering::
709 LowerVASTART(SDValue Op, SelectionDAG &DAG)
711 DebugLoc dl = Op.getDebugLoc();
712 // vastart stores the address of the VarArgsFrameIndex slot into the
713 // memory location argument
714 MachineFunction &MF = DAG.getMachineFunction();
715 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
716 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
717 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
718 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1), SV, 0,
722 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) {
723 DebugLoc dl = Op.getDebugLoc();
724 // Depths > 0 not supported yet!
725 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
728 MachineFunction &MF = DAG.getMachineFunction();
729 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
730 return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
731 RegInfo->getFrameRegister(MF), MVT::i32);
734 //===----------------------------------------------------------------------===//
735 // Calling Convention Implementation
736 //===----------------------------------------------------------------------===//
738 #include "XCoreGenCallingConv.inc"
740 //===----------------------------------------------------------------------===//
741 // Call Calling Convention Implementation
742 //===----------------------------------------------------------------------===//
744 /// XCore call implementation
746 XCoreTargetLowering::LowerCall(SDValue Chain, SDValue Callee,
747 CallingConv::ID CallConv, bool isVarArg,
749 const SmallVectorImpl<ISD::OutputArg> &Outs,
750 const SmallVectorImpl<ISD::InputArg> &Ins,
751 DebugLoc dl, SelectionDAG &DAG,
752 SmallVectorImpl<SDValue> &InVals) {
753 // XCore target does not yet support tail call optimization.
756 // For now, only CallingConv::C implemented
760 llvm_unreachable("Unsupported calling convention");
761 case CallingConv::Fast:
763 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
764 Outs, Ins, dl, DAG, InVals);
768 /// LowerCCCCallTo - functions arguments are copied from virtual
769 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
770 /// CALLSEQ_END are emitted.
771 /// TODO: isTailCall, sret.
773 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
774 CallingConv::ID CallConv, bool isVarArg,
776 const SmallVectorImpl<ISD::OutputArg> &Outs,
777 const SmallVectorImpl<ISD::InputArg> &Ins,
778 DebugLoc dl, SelectionDAG &DAG,
779 SmallVectorImpl<SDValue> &InVals) {
781 // Analyze operands of the call, assigning locations to each operand.
782 SmallVector<CCValAssign, 16> ArgLocs;
783 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
784 ArgLocs, *DAG.getContext());
786 // The ABI dictates there should be one stack slot available to the callee
787 // on function entry (for saving lr).
788 CCInfo.AllocateStack(4, 4);
790 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
792 // Get a count of how many bytes are to be pushed on the stack.
793 unsigned NumBytes = CCInfo.getNextStackOffset();
795 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
796 getPointerTy(), true));
798 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
799 SmallVector<SDValue, 12> MemOpChains;
801 // Walk the register/memloc assignments, inserting copies/loads.
802 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
803 CCValAssign &VA = ArgLocs[i];
804 SDValue Arg = Outs[i].Val;
806 // Promote the value if needed.
807 switch (VA.getLocInfo()) {
808 default: llvm_unreachable("Unknown loc info!");
809 case CCValAssign::Full: break;
810 case CCValAssign::SExt:
811 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
813 case CCValAssign::ZExt:
814 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
816 case CCValAssign::AExt:
817 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
821 // Arguments that can be passed on register must be kept at
824 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
826 assert(VA.isMemLoc());
828 int Offset = VA.getLocMemOffset();
830 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
832 DAG.getConstant(Offset/4, MVT::i32)));
836 // Transform all store nodes into one single node because
837 // all store nodes are independent of each other.
838 if (!MemOpChains.empty())
839 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
840 &MemOpChains[0], MemOpChains.size());
842 // Build a sequence of copy-to-reg nodes chained together with token
843 // chain and flag operands which copy the outgoing args into registers.
844 // The InFlag in necessary since all emited instructions must be
847 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
848 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
849 RegsToPass[i].second, InFlag);
850 InFlag = Chain.getValue(1);
853 // If the callee is a GlobalAddress node (quite common, every direct call is)
854 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
855 // Likewise ExternalSymbol -> TargetExternalSymbol.
856 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
857 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), MVT::i32);
858 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
859 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
861 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
862 // = Chain, Callee, Reg#1, Reg#2, ...
864 // Returns a chain & a flag for retval copy to use.
865 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Flag);
866 SmallVector<SDValue, 8> Ops;
867 Ops.push_back(Chain);
868 Ops.push_back(Callee);
870 // Add argument registers to the end of the list so that they are
871 // known live into the call.
872 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
873 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
874 RegsToPass[i].second.getValueType()));
876 if (InFlag.getNode())
877 Ops.push_back(InFlag);
879 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size());
880 InFlag = Chain.getValue(1);
882 // Create the CALLSEQ_END node.
883 Chain = DAG.getCALLSEQ_END(Chain,
884 DAG.getConstant(NumBytes, getPointerTy(), true),
885 DAG.getConstant(0, getPointerTy(), true),
887 InFlag = Chain.getValue(1);
889 // Handle result values, copying them out of physregs into vregs that we
891 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
892 Ins, dl, DAG, InVals);
895 /// LowerCallResult - Lower the result values of a call into the
896 /// appropriate copies out of appropriate physical registers.
898 XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
899 CallingConv::ID CallConv, bool isVarArg,
900 const SmallVectorImpl<ISD::InputArg> &Ins,
901 DebugLoc dl, SelectionDAG &DAG,
902 SmallVectorImpl<SDValue> &InVals) {
904 // Assign locations to each value returned by this call.
905 SmallVector<CCValAssign, 16> RVLocs;
906 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
907 RVLocs, *DAG.getContext());
909 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
911 // Copy all of the result registers out of their specified physreg.
912 for (unsigned i = 0; i != RVLocs.size(); ++i) {
913 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
914 RVLocs[i].getValVT(), InFlag).getValue(1);
915 InFlag = Chain.getValue(2);
916 InVals.push_back(Chain.getValue(0));
922 //===----------------------------------------------------------------------===//
923 // Formal Arguments Calling Convention Implementation
924 //===----------------------------------------------------------------------===//
926 /// XCore formal arguments implementation
928 XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
929 CallingConv::ID CallConv,
931 const SmallVectorImpl<ISD::InputArg> &Ins,
934 SmallVectorImpl<SDValue> &InVals) {
938 llvm_unreachable("Unsupported calling convention");
940 case CallingConv::Fast:
941 return LowerCCCArguments(Chain, CallConv, isVarArg,
942 Ins, dl, DAG, InVals);
946 /// LowerCCCArguments - transform physical registers into
947 /// virtual registers and generate load operations for
948 /// arguments places on the stack.
951 XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
952 CallingConv::ID CallConv,
954 const SmallVectorImpl<ISD::InputArg>
958 SmallVectorImpl<SDValue> &InVals) {
959 MachineFunction &MF = DAG.getMachineFunction();
960 MachineFrameInfo *MFI = MF.getFrameInfo();
961 MachineRegisterInfo &RegInfo = MF.getRegInfo();
963 // Assign locations to all of the incoming arguments.
964 SmallVector<CCValAssign, 16> ArgLocs;
965 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
966 ArgLocs, *DAG.getContext());
968 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
970 unsigned StackSlotSize = XCoreFrameInfo::stackSlotSize();
972 unsigned LRSaveSize = StackSlotSize;
974 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
976 CCValAssign &VA = ArgLocs[i];
979 // Arguments passed in registers
980 EVT RegVT = VA.getLocVT();
981 switch (RegVT.getSimpleVT().SimpleTy) {
985 errs() << "LowerFormalArguments Unhandled argument type: "
986 << RegVT.getSimpleVT().SimpleTy << "\n";
991 unsigned VReg = RegInfo.createVirtualRegister(
992 XCore::GRRegsRegisterClass);
993 RegInfo.addLiveIn(VA.getLocReg(), VReg);
994 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
998 assert(VA.isMemLoc());
999 // Load the argument to a virtual register
1000 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1001 if (ObjSize > StackSlotSize) {
1002 errs() << "LowerFormalArguments Unhandled argument type: "
1003 << (unsigned)VA.getLocVT().getSimpleVT().SimpleTy
1006 // Create the frame index object for this incoming parameter...
1007 int FI = MFI->CreateFixedObject(ObjSize,
1008 LRSaveSize + VA.getLocMemOffset(),
1011 // Create the SelectionDAG nodes corresponding to a load
1012 //from this parameter
1013 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1014 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN, NULL, 0,
1020 /* Argument registers */
1021 static const unsigned ArgRegs[] = {
1022 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1024 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1025 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs,
1026 array_lengthof(ArgRegs));
1027 if (FirstVAReg < array_lengthof(ArgRegs)) {
1028 SmallVector<SDValue, 4> MemOps;
1030 // Save remaining registers, storing higher register numbers at a higher
1032 for (unsigned i = array_lengthof(ArgRegs) - 1; i >= FirstVAReg; --i) {
1033 // Create a stack slot
1034 int FI = MFI->CreateFixedObject(4, offset, true, false);
1035 if (i == FirstVAReg) {
1036 XFI->setVarArgsFrameIndex(FI);
1038 offset -= StackSlotSize;
1039 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1040 // Move argument from phys reg -> virt reg
1041 unsigned VReg = RegInfo.createVirtualRegister(
1042 XCore::GRRegsRegisterClass);
1043 RegInfo.addLiveIn(ArgRegs[i], VReg);
1044 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1045 // Move argument from virt reg -> stack
1046 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN, NULL, 0,
1048 MemOps.push_back(Store);
1050 if (!MemOps.empty())
1051 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1052 &MemOps[0], MemOps.size());
1054 // This will point to the next argument passed via stack.
1055 XFI->setVarArgsFrameIndex(
1056 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1064 //===----------------------------------------------------------------------===//
1065 // Return Value Calling Convention Implementation
1066 //===----------------------------------------------------------------------===//
1068 bool XCoreTargetLowering::
1069 CanLowerReturn(CallingConv::ID CallConv, bool isVarArg,
1070 const SmallVectorImpl<EVT> &OutTys,
1071 const SmallVectorImpl<ISD::ArgFlagsTy> &ArgsFlags,
1072 SelectionDAG &DAG) {
1073 SmallVector<CCValAssign, 16> RVLocs;
1074 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1075 RVLocs, *DAG.getContext());
1076 return CCInfo.CheckReturn(OutTys, ArgsFlags, RetCC_XCore);
1080 XCoreTargetLowering::LowerReturn(SDValue Chain,
1081 CallingConv::ID CallConv, bool isVarArg,
1082 const SmallVectorImpl<ISD::OutputArg> &Outs,
1083 DebugLoc dl, SelectionDAG &DAG) {
1085 // CCValAssign - represent the assignment of
1086 // the return value to a location
1087 SmallVector<CCValAssign, 16> RVLocs;
1089 // CCState - Info about the registers and stack slot.
1090 CCState CCInfo(CallConv, isVarArg, getTargetMachine(),
1091 RVLocs, *DAG.getContext());
1093 // Analize return values.
1094 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1096 // If this is the first return lowered for this function, add
1097 // the regs to the liveout set for the function.
1098 if (DAG.getMachineFunction().getRegInfo().liveout_empty()) {
1099 for (unsigned i = 0; i != RVLocs.size(); ++i)
1100 if (RVLocs[i].isRegLoc())
1101 DAG.getMachineFunction().getRegInfo().addLiveOut(RVLocs[i].getLocReg());
1106 // Copy the result values into the output registers.
1107 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1108 CCValAssign &VA = RVLocs[i];
1109 assert(VA.isRegLoc() && "Can only return in registers!");
1111 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1114 // guarantee that all emitted copies are
1115 // stuck together, avoiding something bad
1116 Flag = Chain.getValue(1);
1119 // Return on XCore is always a "retsp 0"
1121 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1122 Chain, DAG.getConstant(0, MVT::i32), Flag);
1124 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1125 Chain, DAG.getConstant(0, MVT::i32));
1128 //===----------------------------------------------------------------------===//
1129 // Other Lowering Code
1130 //===----------------------------------------------------------------------===//
1133 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1134 MachineBasicBlock *BB,
1135 DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) const {
1136 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
1137 DebugLoc dl = MI->getDebugLoc();
1138 assert((MI->getOpcode() == XCore::SELECT_CC) &&
1139 "Unexpected instr type to insert");
1141 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1142 // control-flow pattern. The incoming instruction knows the destination vreg
1143 // to set, the condition code register to branch on, the true/false values to
1144 // select between, and a branch opcode to use.
1145 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1146 MachineFunction::iterator It = BB;
1152 // cmpTY ccX, r1, r2
1154 // fallthrough --> copy0MBB
1155 MachineBasicBlock *thisMBB = BB;
1156 MachineFunction *F = BB->getParent();
1157 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1158 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1159 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1160 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
1161 F->insert(It, copy0MBB);
1162 F->insert(It, sinkMBB);
1163 // Update machine-CFG edges by first adding all successors of the current
1164 // block to the new block which will contain the Phi node for the select.
1165 // Also inform sdisel of the edge changes.
1166 for (MachineBasicBlock::succ_iterator I = BB->succ_begin(),
1167 E = BB->succ_end(); I != E; ++I) {
1168 EM->insert(std::make_pair(*I, sinkMBB));
1169 sinkMBB->addSuccessor(*I);
1171 // Next, remove all successors of the current block, and add the true
1172 // and fallthrough blocks as its successors.
1173 while (!BB->succ_empty())
1174 BB->removeSuccessor(BB->succ_begin());
1175 // Next, add the true and fallthrough blocks as its successors.
1176 BB->addSuccessor(copy0MBB);
1177 BB->addSuccessor(sinkMBB);
1180 // %FalseValue = ...
1181 // # fallthrough to sinkMBB
1184 // Update machine-CFG edges
1185 BB->addSuccessor(sinkMBB);
1188 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1191 BuildMI(BB, dl, TII.get(XCore::PHI), MI->getOperand(0).getReg())
1192 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
1193 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1195 F->DeleteMachineInstr(MI); // The pseudo instruction is gone now.
1199 //===----------------------------------------------------------------------===//
1200 // Target Optimization Hooks
1201 //===----------------------------------------------------------------------===//
1203 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1204 DAGCombinerInfo &DCI) const {
1205 SelectionDAG &DAG = DCI.DAG;
1206 DebugLoc dl = N->getDebugLoc();
1207 switch (N->getOpcode()) {
1209 case XCoreISD::LADD: {
1210 SDValue N0 = N->getOperand(0);
1211 SDValue N1 = N->getOperand(1);
1212 SDValue N2 = N->getOperand(2);
1213 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1214 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1215 EVT VT = N0.getValueType();
1217 // canonicalize constant to RHS
1219 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1221 // fold (ladd 0, 0, x) -> 0, x & 1
1222 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1223 SDValue Carry = DAG.getConstant(0, VT);
1224 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1225 DAG.getConstant(1, VT));
1226 SDValue Ops [] = { Carry, Result };
1227 return DAG.getMergeValues(Ops, 2, dl);
1230 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1232 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
1233 APInt KnownZero, KnownOne;
1234 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1235 VT.getSizeInBits() - 1);
1236 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
1237 if (KnownZero == Mask) {
1238 SDValue Carry = DAG.getConstant(0, VT);
1239 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1240 SDValue Ops [] = { Carry, Result };
1241 return DAG.getMergeValues(Ops, 2, dl);
1246 case XCoreISD::LSUB: {
1247 SDValue N0 = N->getOperand(0);
1248 SDValue N1 = N->getOperand(1);
1249 SDValue N2 = N->getOperand(2);
1250 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1251 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1252 EVT VT = N0.getValueType();
1254 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1255 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1256 APInt KnownZero, KnownOne;
1257 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1258 VT.getSizeInBits() - 1);
1259 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
1260 if (KnownZero == Mask) {
1261 SDValue Borrow = N2;
1262 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1263 DAG.getConstant(0, VT), N2);
1264 SDValue Ops [] = { Borrow, Result };
1265 return DAG.getMergeValues(Ops, 2, dl);
1269 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1271 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 0)) {
1272 APInt KnownZero, KnownOne;
1273 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1274 VT.getSizeInBits() - 1);
1275 DAG.ComputeMaskedBits(N2, Mask, KnownZero, KnownOne);
1276 if (KnownZero == Mask) {
1277 SDValue Borrow = DAG.getConstant(0, VT);
1278 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1279 SDValue Ops [] = { Borrow, Result };
1280 return DAG.getMergeValues(Ops, 2, dl);
1286 // Fold expressions such as add(add(mul(x,y),a),b) -> lmul(x, y, a, b).
1287 // This is only profitable if the intermediate results are unused
1289 SDValue N0 = N->getOperand(0);
1290 SDValue N1 = N->getOperand(1);
1293 if (N0.getOpcode() == ISD::ADD) {
1296 } else if (N1.getOpcode() == ISD::ADD) {
1302 SDValue Addend0, Addend1;
1305 if (OtherOp.getOpcode() == ISD::MUL) {
1306 // add(add(a,b),mul(x,y))
1307 if (!OtherOp.hasOneUse() || !AddOp.hasOneUse())
1309 Mul0 = OtherOp.getOperand(0);
1310 Mul1 = OtherOp.getOperand(1);
1311 Addend0 = AddOp.getOperand(0);
1312 Addend1 = AddOp.getOperand(1);
1313 } else if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
1314 // add(add(mul(x,y),a),b)
1315 if (!AddOp.getOperand(0).hasOneUse())
1317 Mul0 = AddOp.getOperand(0).getOperand(0);
1318 Mul1 = AddOp.getOperand(0).getOperand(1);
1319 Addend0 = AddOp.getOperand(1);
1321 } else if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
1322 // add(add(a,mul(x,y)),b)
1323 if (!AddOp.getOperand(1).hasOneUse())
1325 Mul0 = AddOp.getOperand(1).getOperand(0);
1326 Mul1 = AddOp.getOperand(1).getOperand(1);
1327 Addend0 = AddOp.getOperand(0);
1332 SDValue Zero = DAG.getConstant(0, MVT::i32);
1333 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1334 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1335 Mul1, Addend0, Addend1);
1336 SDValue Result(Ignored.getNode(), 1);
1341 // Replace unaligned store of unaligned load with memmove.
1342 StoreSDNode *ST = cast<StoreSDNode>(N);
1343 if (!DCI.isBeforeLegalize() ||
1344 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) ||
1345 ST->isVolatile() || ST->isIndexed()) {
1348 SDValue Chain = ST->getChain();
1350 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1351 if (StoreBits % 8) {
1354 unsigned ABIAlignment = getTargetData()->getABITypeAlignment(
1355 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1356 unsigned Alignment = ST->getAlignment();
1357 if (Alignment >= ABIAlignment) {
1361 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1362 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1363 LD->getAlignment() == Alignment &&
1364 !LD->isVolatile() && !LD->isIndexed() &&
1365 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
1366 return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1368 DAG.getConstant(StoreBits/8, MVT::i32),
1369 Alignment, ST->getSrcValue(),
1370 ST->getSrcValueOffset(), LD->getSrcValue(),
1371 LD->getSrcValueOffset());
1380 void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
1384 const SelectionDAG &DAG,
1385 unsigned Depth) const {
1386 KnownZero = KnownOne = APInt(Mask.getBitWidth(), 0);
1387 switch (Op.getOpcode()) {
1389 case XCoreISD::LADD:
1390 case XCoreISD::LSUB:
1391 if (Op.getResNo() == 0) {
1392 // Top bits of carry / borrow are clear.
1393 KnownZero = APInt::getHighBitsSet(Mask.getBitWidth(),
1394 Mask.getBitWidth() - 1);
1401 //===----------------------------------------------------------------------===//
1402 // Addressing mode description hooks
1403 //===----------------------------------------------------------------------===//
1405 static inline bool isImmUs(int64_t val)
1407 return (val >= 0 && val <= 11);
1410 static inline bool isImmUs2(int64_t val)
1412 return (val%2 == 0 && isImmUs(val/2));
1415 static inline bool isImmUs4(int64_t val)
1417 return (val%4 == 0 && isImmUs(val/4));
1420 /// isLegalAddressingMode - Return true if the addressing mode represented
1421 /// by AM is legal for this target, for a load/store of the specified type.
1423 XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1424 const Type *Ty) const {
1425 if (Ty->getTypeID() == Type::VoidTyID)
1426 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1428 const TargetData *TD = TM.getTargetData();
1429 unsigned Size = TD->getTypeAllocSize(Ty);
1431 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1438 if (AM.Scale == 0) {
1439 return isImmUs(AM.BaseOffs);
1442 return AM.Scale == 1 && AM.BaseOffs == 0;
1446 if (AM.Scale == 0) {
1447 return isImmUs2(AM.BaseOffs);
1450 return AM.Scale == 2 && AM.BaseOffs == 0;
1453 if (AM.Scale == 0) {
1454 return isImmUs4(AM.BaseOffs);
1457 return AM.Scale == 4 && AM.BaseOffs == 0;
1463 //===----------------------------------------------------------------------===//
1464 // XCore Inline Assembly Support
1465 //===----------------------------------------------------------------------===//
1467 std::vector<unsigned> XCoreTargetLowering::
1468 getRegClassForInlineAsmConstraint(const std::string &Constraint,
1471 if (Constraint.size() != 1)
1472 return std::vector<unsigned>();
1474 switch (Constraint[0]) {
1477 return make_vector<unsigned>(XCore::R0, XCore::R1, XCore::R2,
1478 XCore::R3, XCore::R4, XCore::R5,
1479 XCore::R6, XCore::R7, XCore::R8,
1480 XCore::R9, XCore::R10, XCore::R11, 0);
1483 return std::vector<unsigned>();