1 //===-- XCoreISelLowering.cpp - XCore DAG Lowering Implementation ---------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the XCoreTargetLowering class.
12 //===----------------------------------------------------------------------===//
14 #define DEBUG_TYPE "xcore-lower"
16 #include "XCoreISelLowering.h"
18 #include "XCoreMachineFunctionInfo.h"
19 #include "XCoreSubtarget.h"
20 #include "XCoreTargetMachine.h"
21 #include "XCoreTargetObjectFile.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/SelectionDAGISel.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/CallingConv.h"
31 #include "llvm/IR/DerivedTypes.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/GlobalAlias.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Intrinsics.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/raw_ostream.h"
41 const char *XCoreTargetLowering::
42 getTargetNodeName(unsigned Opcode) const
46 case XCoreISD::BL : return "XCoreISD::BL";
47 case XCoreISD::PCRelativeWrapper : return "XCoreISD::PCRelativeWrapper";
48 case XCoreISD::DPRelativeWrapper : return "XCoreISD::DPRelativeWrapper";
49 case XCoreISD::CPRelativeWrapper : return "XCoreISD::CPRelativeWrapper";
50 case XCoreISD::STWSP : return "XCoreISD::STWSP";
51 case XCoreISD::RETSP : return "XCoreISD::RETSP";
52 case XCoreISD::LADD : return "XCoreISD::LADD";
53 case XCoreISD::LSUB : return "XCoreISD::LSUB";
54 case XCoreISD::LMUL : return "XCoreISD::LMUL";
55 case XCoreISD::MACCU : return "XCoreISD::MACCU";
56 case XCoreISD::MACCS : return "XCoreISD::MACCS";
57 case XCoreISD::CRC8 : return "XCoreISD::CRC8";
58 case XCoreISD::BR_JT : return "XCoreISD::BR_JT";
59 case XCoreISD::BR_JT32 : return "XCoreISD::BR_JT32";
60 default : return NULL;
64 XCoreTargetLowering::XCoreTargetLowering(XCoreTargetMachine &XTM)
65 : TargetLowering(XTM, new XCoreTargetObjectFile()),
67 Subtarget(*XTM.getSubtargetImpl()) {
69 // Set up the register classes.
70 addRegisterClass(MVT::i32, &XCore::GRRegsRegClass);
72 // Compute derived properties from the register classes
73 computeRegisterProperties();
75 // Division is expensive
76 setIntDivIsCheap(false);
78 setStackPointerRegisterToSaveRestore(XCore::SP);
80 setSchedulingPreference(Sched::RegPressure);
82 // Use i32 for setcc operations results (slt, sgt, ...).
83 setBooleanContents(ZeroOrOneBooleanContent);
84 setBooleanVectorContents(ZeroOrOneBooleanContent); // FIXME: Is this correct?
86 // XCore does not have the NodeTypes below.
87 setOperationAction(ISD::BR_CC, MVT::i32, Expand);
88 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom);
89 setOperationAction(ISD::ADDC, MVT::i32, Expand);
90 setOperationAction(ISD::ADDE, MVT::i32, Expand);
91 setOperationAction(ISD::SUBC, MVT::i32, Expand);
92 setOperationAction(ISD::SUBE, MVT::i32, Expand);
94 // Stop the combiner recombining select and set_cc
95 setOperationAction(ISD::SELECT_CC, MVT::Other, Expand);
98 setOperationAction(ISD::ADD, MVT::i64, Custom);
99 setOperationAction(ISD::SUB, MVT::i64, Custom);
100 setOperationAction(ISD::SMUL_LOHI, MVT::i32, Custom);
101 setOperationAction(ISD::UMUL_LOHI, MVT::i32, Custom);
102 setOperationAction(ISD::MULHS, MVT::i32, Expand);
103 setOperationAction(ISD::MULHU, MVT::i32, Expand);
104 setOperationAction(ISD::SHL_PARTS, MVT::i32, Expand);
105 setOperationAction(ISD::SRA_PARTS, MVT::i32, Expand);
106 setOperationAction(ISD::SRL_PARTS, MVT::i32, Expand);
109 setOperationAction(ISD::CTPOP, MVT::i32, Expand);
110 setOperationAction(ISD::ROTL , MVT::i32, Expand);
111 setOperationAction(ISD::ROTR , MVT::i32, Expand);
112 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Expand);
113 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Expand);
115 setOperationAction(ISD::TRAP, MVT::Other, Legal);
118 setOperationAction(ISD::BR_JT, MVT::Other, Custom);
120 setOperationAction(ISD::GlobalAddress, MVT::i32, Custom);
121 setOperationAction(ISD::BlockAddress, MVT::i32 , Custom);
123 // Conversion of i64 -> double produces constantpool nodes
124 setOperationAction(ISD::ConstantPool, MVT::i32, Custom);
127 setLoadExtAction(ISD::EXTLOAD, MVT::i1, Promote);
128 setLoadExtAction(ISD::ZEXTLOAD, MVT::i1, Promote);
129 setLoadExtAction(ISD::SEXTLOAD, MVT::i1, Promote);
131 setLoadExtAction(ISD::SEXTLOAD, MVT::i8, Expand);
132 setLoadExtAction(ISD::ZEXTLOAD, MVT::i16, Expand);
134 // Custom expand misaligned loads / stores.
135 setOperationAction(ISD::LOAD, MVT::i32, Custom);
136 setOperationAction(ISD::STORE, MVT::i32, Custom);
139 setOperationAction(ISD::VAEND, MVT::Other, Expand);
140 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
141 setOperationAction(ISD::VAARG, MVT::Other, Custom);
142 setOperationAction(ISD::VASTART, MVT::Other, Custom);
145 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
146 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
147 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
149 // TRAMPOLINE is custom lowered.
150 setOperationAction(ISD::INIT_TRAMPOLINE, MVT::Other, Custom);
151 setOperationAction(ISD::ADJUST_TRAMPOLINE, MVT::Other, Custom);
153 // We want to custom lower some of our intrinsics.
154 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
156 MaxStoresPerMemset = MaxStoresPerMemsetOptSize = 4;
157 MaxStoresPerMemmove = MaxStoresPerMemmoveOptSize
158 = MaxStoresPerMemcpy = MaxStoresPerMemcpyOptSize = 2;
160 // We have target-specific dag combine patterns for the following nodes:
161 setTargetDAGCombine(ISD::STORE);
162 setTargetDAGCombine(ISD::ADD);
164 setMinFunctionAlignment(1);
167 SDValue XCoreTargetLowering::
168 LowerOperation(SDValue Op, SelectionDAG &DAG) const {
169 switch (Op.getOpcode())
171 case ISD::GlobalAddress: return LowerGlobalAddress(Op, DAG);
172 case ISD::BlockAddress: return LowerBlockAddress(Op, DAG);
173 case ISD::ConstantPool: return LowerConstantPool(Op, DAG);
174 case ISD::BR_JT: return LowerBR_JT(Op, DAG);
175 case ISD::LOAD: return LowerLOAD(Op, DAG);
176 case ISD::STORE: return LowerSTORE(Op, DAG);
177 case ISD::SELECT_CC: return LowerSELECT_CC(Op, DAG);
178 case ISD::VAARG: return LowerVAARG(Op, DAG);
179 case ISD::VASTART: return LowerVASTART(Op, DAG);
180 case ISD::SMUL_LOHI: return LowerSMUL_LOHI(Op, DAG);
181 case ISD::UMUL_LOHI: return LowerUMUL_LOHI(Op, DAG);
182 // FIXME: Remove these when LegalizeDAGTypes lands.
184 case ISD::SUB: return ExpandADDSUB(Op.getNode(), DAG);
185 case ISD::FRAMEADDR: return LowerFRAMEADDR(Op, DAG);
186 case ISD::INIT_TRAMPOLINE: return LowerINIT_TRAMPOLINE(Op, DAG);
187 case ISD::ADJUST_TRAMPOLINE: return LowerADJUST_TRAMPOLINE(Op, DAG);
188 case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
190 llvm_unreachable("unimplemented operand");
194 /// ReplaceNodeResults - Replace the results of node with an illegal result
195 /// type with new values built out of custom code.
196 void XCoreTargetLowering::ReplaceNodeResults(SDNode *N,
197 SmallVectorImpl<SDValue>&Results,
198 SelectionDAG &DAG) const {
199 switch (N->getOpcode()) {
201 llvm_unreachable("Don't know how to custom expand this!");
204 Results.push_back(ExpandADDSUB(N, DAG));
209 //===----------------------------------------------------------------------===//
210 // Misc Lower Operation implementation
211 //===----------------------------------------------------------------------===//
213 SDValue XCoreTargetLowering::
214 LowerSELECT_CC(SDValue Op, SelectionDAG &DAG) const
216 DebugLoc dl = Op.getDebugLoc();
217 SDValue Cond = DAG.getNode(ISD::SETCC, dl, MVT::i32, Op.getOperand(2),
218 Op.getOperand(3), Op.getOperand(4));
219 return DAG.getNode(ISD::SELECT, dl, MVT::i32, Cond, Op.getOperand(0),
223 SDValue XCoreTargetLowering::
224 getGlobalAddressWrapper(SDValue GA, const GlobalValue *GV,
225 SelectionDAG &DAG) const
227 // FIXME there is no actual debug info here
228 DebugLoc dl = GA.getDebugLoc();
229 const GlobalValue *UnderlyingGV = GV;
230 // If GV is an alias then use the aliasee to determine the wrapper type
231 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(GV))
232 UnderlyingGV = GA->resolveAliasedGlobal();
233 if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(UnderlyingGV)) {
234 if (GVar->isConstant())
235 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, GA);
236 return DAG.getNode(XCoreISD::DPRelativeWrapper, dl, MVT::i32, GA);
238 return DAG.getNode(XCoreISD::PCRelativeWrapper, dl, MVT::i32, GA);
241 SDValue XCoreTargetLowering::
242 LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const
244 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
245 SDValue GA = DAG.getTargetGlobalAddress(GV, Op.getDebugLoc(), MVT::i32);
246 return getGlobalAddressWrapper(GA, GV, DAG);
249 static inline SDValue BuildGetId(SelectionDAG &DAG, DebugLoc dl) {
250 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i32,
251 DAG.getConstant(Intrinsic::xcore_getid, MVT::i32));
254 SDValue XCoreTargetLowering::
255 LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const
257 DebugLoc DL = Op.getDebugLoc();
259 const BlockAddress *BA = cast<BlockAddressSDNode>(Op)->getBlockAddress();
260 SDValue Result = DAG.getTargetBlockAddress(BA, getPointerTy());
262 return DAG.getNode(XCoreISD::PCRelativeWrapper, DL, getPointerTy(), Result);
265 SDValue XCoreTargetLowering::
266 LowerConstantPool(SDValue Op, SelectionDAG &DAG) const
268 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op);
269 // FIXME there isn't really debug info here
270 DebugLoc dl = CP->getDebugLoc();
271 EVT PtrVT = Op.getValueType();
273 if (CP->isMachineConstantPoolEntry()) {
274 Res = DAG.getTargetConstantPool(CP->getMachineCPVal(), PtrVT,
277 Res = DAG.getTargetConstantPool(CP->getConstVal(), PtrVT,
280 return DAG.getNode(XCoreISD::CPRelativeWrapper, dl, MVT::i32, Res);
283 unsigned XCoreTargetLowering::getJumpTableEncoding() const {
284 return MachineJumpTableInfo::EK_Inline;
287 SDValue XCoreTargetLowering::
288 LowerBR_JT(SDValue Op, SelectionDAG &DAG) const
290 SDValue Chain = Op.getOperand(0);
291 SDValue Table = Op.getOperand(1);
292 SDValue Index = Op.getOperand(2);
293 DebugLoc dl = Op.getDebugLoc();
294 JumpTableSDNode *JT = cast<JumpTableSDNode>(Table);
295 unsigned JTI = JT->getIndex();
296 MachineFunction &MF = DAG.getMachineFunction();
297 const MachineJumpTableInfo *MJTI = MF.getJumpTableInfo();
298 SDValue TargetJT = DAG.getTargetJumpTable(JT->getIndex(), MVT::i32);
300 unsigned NumEntries = MJTI->getJumpTables()[JTI].MBBs.size();
301 if (NumEntries <= 32) {
302 return DAG.getNode(XCoreISD::BR_JT, dl, MVT::Other, Chain, TargetJT, Index);
304 assert((NumEntries >> 31) == 0);
305 SDValue ScaledIndex = DAG.getNode(ISD::SHL, dl, MVT::i32, Index,
306 DAG.getConstant(1, MVT::i32));
307 return DAG.getNode(XCoreISD::BR_JT32, dl, MVT::Other, Chain, TargetJT,
311 SDValue XCoreTargetLowering::
312 lowerLoadWordFromAlignedBasePlusOffset(DebugLoc DL, SDValue Chain, SDValue Base,
313 int64_t Offset, SelectionDAG &DAG) const
315 if ((Offset & 0x3) == 0) {
316 return DAG.getLoad(getPointerTy(), DL, Chain, Base, MachinePointerInfo(),
317 false, false, false, 0);
319 // Lower to pair of consecutive word aligned loads plus some bit shifting.
320 int32_t HighOffset = RoundUpToAlignment(Offset, 4);
321 int32_t LowOffset = HighOffset - 4;
322 SDValue LowAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
323 DAG.getConstant(LowOffset, MVT::i32));
324 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, Base,
325 DAG.getConstant(HighOffset, MVT::i32));
326 SDValue LowShift = DAG.getConstant((Offset - LowOffset) * 8, MVT::i32);
327 SDValue HighShift = DAG.getConstant((HighOffset - Offset) * 8, MVT::i32);
329 SDValue Low = DAG.getLoad(getPointerTy(), DL, Chain,
330 LowAddr, MachinePointerInfo(),
331 false, false, false, 0);
332 SDValue High = DAG.getLoad(getPointerTy(), DL, Chain,
333 HighAddr, MachinePointerInfo(),
334 false, false, false, 0);
335 SDValue LowShifted = DAG.getNode(ISD::SRL, DL, MVT::i32, Low, LowShift);
336 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High, HighShift);
337 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, LowShifted, HighShifted);
338 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
340 SDValue Ops[] = { Result, Chain };
341 return DAG.getMergeValues(Ops, 2, DL);
344 static bool isWordAligned(SDValue Value, SelectionDAG &DAG)
346 APInt KnownZero, KnownOne;
347 DAG.ComputeMaskedBits(Value, KnownZero, KnownOne);
348 return KnownZero.countTrailingOnes() >= 2;
351 SDValue XCoreTargetLowering::
352 LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
353 const TargetLowering &TLI = DAG.getTargetLoweringInfo();
354 LoadSDNode *LD = cast<LoadSDNode>(Op);
355 assert(LD->getExtensionType() == ISD::NON_EXTLOAD &&
356 "Unexpected extension type");
357 assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
358 if (allowsUnalignedMemoryAccesses(LD->getMemoryVT()))
361 unsigned ABIAlignment = getDataLayout()->
362 getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
363 // Leave aligned load alone.
364 if (LD->getAlignment() >= ABIAlignment)
367 SDValue Chain = LD->getChain();
368 SDValue BasePtr = LD->getBasePtr();
369 DebugLoc DL = Op.getDebugLoc();
371 if (!LD->isVolatile()) {
372 const GlobalValue *GV;
374 if (DAG.isBaseWithConstantOffset(BasePtr) &&
375 isWordAligned(BasePtr->getOperand(0), DAG)) {
376 SDValue NewBasePtr = BasePtr->getOperand(0);
377 Offset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue();
378 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
381 if (TLI.isGAPlusOffset(BasePtr.getNode(), GV, Offset) &&
382 MinAlign(GV->getAlignment(), 4) == 4) {
383 SDValue NewBasePtr = DAG.getGlobalAddress(GV, DL,
384 BasePtr->getValueType(0));
385 return lowerLoadWordFromAlignedBasePlusOffset(DL, Chain, NewBasePtr,
390 if (LD->getAlignment() == 2) {
391 SDValue Low = DAG.getExtLoad(ISD::ZEXTLOAD, DL, MVT::i32, Chain,
392 BasePtr, LD->getPointerInfo(), MVT::i16,
393 LD->isVolatile(), LD->isNonTemporal(), 2);
394 SDValue HighAddr = DAG.getNode(ISD::ADD, DL, MVT::i32, BasePtr,
395 DAG.getConstant(2, MVT::i32));
396 SDValue High = DAG.getExtLoad(ISD::EXTLOAD, DL, MVT::i32, Chain,
398 LD->getPointerInfo().getWithOffset(2),
399 MVT::i16, LD->isVolatile(),
400 LD->isNonTemporal(), 2);
401 SDValue HighShifted = DAG.getNode(ISD::SHL, DL, MVT::i32, High,
402 DAG.getConstant(16, MVT::i32));
403 SDValue Result = DAG.getNode(ISD::OR, DL, MVT::i32, Low, HighShifted);
404 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, Low.getValue(1),
406 SDValue Ops[] = { Result, Chain };
407 return DAG.getMergeValues(Ops, 2, DL);
410 // Lower to a call to __misaligned_load(BasePtr).
411 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
412 TargetLowering::ArgListTy Args;
413 TargetLowering::ArgListEntry Entry;
416 Entry.Node = BasePtr;
417 Args.push_back(Entry);
419 TargetLowering::CallLoweringInfo CLI(Chain, IntPtrTy, false, false,
420 false, false, 0, CallingConv::C, /*isTailCall=*/false,
421 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
422 DAG.getExternalSymbol("__misaligned_load", getPointerTy()),
424 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
427 { CallResult.first, CallResult.second };
429 return DAG.getMergeValues(Ops, 2, DL);
432 SDValue XCoreTargetLowering::
433 LowerSTORE(SDValue Op, SelectionDAG &DAG) const
435 StoreSDNode *ST = cast<StoreSDNode>(Op);
436 assert(!ST->isTruncatingStore() && "Unexpected store type");
437 assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
438 if (allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
441 unsigned ABIAlignment = getDataLayout()->
442 getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
443 // Leave aligned store alone.
444 if (ST->getAlignment() >= ABIAlignment) {
447 SDValue Chain = ST->getChain();
448 SDValue BasePtr = ST->getBasePtr();
449 SDValue Value = ST->getValue();
450 DebugLoc dl = Op.getDebugLoc();
452 if (ST->getAlignment() == 2) {
454 SDValue High = DAG.getNode(ISD::SRL, dl, MVT::i32, Value,
455 DAG.getConstant(16, MVT::i32));
456 SDValue StoreLow = DAG.getTruncStore(Chain, dl, Low, BasePtr,
457 ST->getPointerInfo(), MVT::i16,
458 ST->isVolatile(), ST->isNonTemporal(),
460 SDValue HighAddr = DAG.getNode(ISD::ADD, dl, MVT::i32, BasePtr,
461 DAG.getConstant(2, MVT::i32));
462 SDValue StoreHigh = DAG.getTruncStore(Chain, dl, High, HighAddr,
463 ST->getPointerInfo().getWithOffset(2),
464 MVT::i16, ST->isVolatile(),
465 ST->isNonTemporal(), 2);
466 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, StoreLow, StoreHigh);
469 // Lower to a call to __misaligned_store(BasePtr, Value).
470 Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
471 TargetLowering::ArgListTy Args;
472 TargetLowering::ArgListEntry Entry;
475 Entry.Node = BasePtr;
476 Args.push_back(Entry);
479 Args.push_back(Entry);
481 TargetLowering::CallLoweringInfo CLI(Chain,
482 Type::getVoidTy(*DAG.getContext()), false, false,
483 false, false, 0, CallingConv::C, /*isTailCall=*/false,
484 /*doesNotRet=*/false, /*isReturnValueUsed=*/true,
485 DAG.getExternalSymbol("__misaligned_store", getPointerTy()),
487 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI);
489 return CallResult.second;
492 SDValue XCoreTargetLowering::
493 LowerSMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
495 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::SMUL_LOHI &&
496 "Unexpected operand to lower!");
497 DebugLoc dl = Op.getDebugLoc();
498 SDValue LHS = Op.getOperand(0);
499 SDValue RHS = Op.getOperand(1);
500 SDValue Zero = DAG.getConstant(0, MVT::i32);
501 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
502 DAG.getVTList(MVT::i32, MVT::i32), Zero, Zero,
504 SDValue Lo(Hi.getNode(), 1);
505 SDValue Ops[] = { Lo, Hi };
506 return DAG.getMergeValues(Ops, 2, dl);
509 SDValue XCoreTargetLowering::
510 LowerUMUL_LOHI(SDValue Op, SelectionDAG &DAG) const
512 assert(Op.getValueType() == MVT::i32 && Op.getOpcode() == ISD::UMUL_LOHI &&
513 "Unexpected operand to lower!");
514 DebugLoc dl = Op.getDebugLoc();
515 SDValue LHS = Op.getOperand(0);
516 SDValue RHS = Op.getOperand(1);
517 SDValue Zero = DAG.getConstant(0, MVT::i32);
518 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
519 DAG.getVTList(MVT::i32, MVT::i32), LHS, RHS,
521 SDValue Lo(Hi.getNode(), 1);
522 SDValue Ops[] = { Lo, Hi };
523 return DAG.getMergeValues(Ops, 2, dl);
526 /// isADDADDMUL - Return whether Op is in a form that is equivalent to
527 /// add(add(mul(x,y),a),b). If requireIntermediatesHaveOneUse is true then
528 /// each intermediate result in the calculation must also have a single use.
529 /// If the Op is in the correct form the constituent parts are written to Mul0,
530 /// Mul1, Addend0 and Addend1.
532 isADDADDMUL(SDValue Op, SDValue &Mul0, SDValue &Mul1, SDValue &Addend0,
533 SDValue &Addend1, bool requireIntermediatesHaveOneUse)
535 if (Op.getOpcode() != ISD::ADD)
537 SDValue N0 = Op.getOperand(0);
538 SDValue N1 = Op.getOperand(1);
541 if (N0.getOpcode() == ISD::ADD) {
544 } else if (N1.getOpcode() == ISD::ADD) {
550 if (requireIntermediatesHaveOneUse && !AddOp.hasOneUse())
552 if (OtherOp.getOpcode() == ISD::MUL) {
553 // add(add(a,b),mul(x,y))
554 if (requireIntermediatesHaveOneUse && !OtherOp.hasOneUse())
556 Mul0 = OtherOp.getOperand(0);
557 Mul1 = OtherOp.getOperand(1);
558 Addend0 = AddOp.getOperand(0);
559 Addend1 = AddOp.getOperand(1);
562 if (AddOp.getOperand(0).getOpcode() == ISD::MUL) {
563 // add(add(mul(x,y),a),b)
564 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(0).hasOneUse())
566 Mul0 = AddOp.getOperand(0).getOperand(0);
567 Mul1 = AddOp.getOperand(0).getOperand(1);
568 Addend0 = AddOp.getOperand(1);
572 if (AddOp.getOperand(1).getOpcode() == ISD::MUL) {
573 // add(add(a,mul(x,y)),b)
574 if (requireIntermediatesHaveOneUse && !AddOp.getOperand(1).hasOneUse())
576 Mul0 = AddOp.getOperand(1).getOperand(0);
577 Mul1 = AddOp.getOperand(1).getOperand(1);
578 Addend0 = AddOp.getOperand(0);
585 SDValue XCoreTargetLowering::
586 TryExpandADDWithMul(SDNode *N, SelectionDAG &DAG) const
590 if (N->getOperand(0).getOpcode() == ISD::MUL) {
591 Mul = N->getOperand(0);
592 Other = N->getOperand(1);
593 } else if (N->getOperand(1).getOpcode() == ISD::MUL) {
594 Mul = N->getOperand(1);
595 Other = N->getOperand(0);
599 DebugLoc dl = N->getDebugLoc();
600 SDValue LL, RL, AddendL, AddendH;
601 LL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
602 Mul.getOperand(0), DAG.getConstant(0, MVT::i32));
603 RL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
604 Mul.getOperand(1), DAG.getConstant(0, MVT::i32));
605 AddendL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
606 Other, DAG.getConstant(0, MVT::i32));
607 AddendH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
608 Other, DAG.getConstant(1, MVT::i32));
609 APInt HighMask = APInt::getHighBitsSet(64, 32);
610 unsigned LHSSB = DAG.ComputeNumSignBits(Mul.getOperand(0));
611 unsigned RHSSB = DAG.ComputeNumSignBits(Mul.getOperand(1));
612 if (DAG.MaskedValueIsZero(Mul.getOperand(0), HighMask) &&
613 DAG.MaskedValueIsZero(Mul.getOperand(1), HighMask)) {
614 // The inputs are both zero-extended.
615 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
616 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
618 SDValue Lo(Hi.getNode(), 1);
619 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
621 if (LHSSB > 32 && RHSSB > 32) {
622 // The inputs are both sign-extended.
623 SDValue Hi = DAG.getNode(XCoreISD::MACCS, dl,
624 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
626 SDValue Lo(Hi.getNode(), 1);
627 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
630 LH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
631 Mul.getOperand(0), DAG.getConstant(1, MVT::i32));
632 RH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
633 Mul.getOperand(1), DAG.getConstant(1, MVT::i32));
634 SDValue Hi = DAG.getNode(XCoreISD::MACCU, dl,
635 DAG.getVTList(MVT::i32, MVT::i32), AddendH,
637 SDValue Lo(Hi.getNode(), 1);
638 RH = DAG.getNode(ISD::MUL, dl, MVT::i32, LL, RH);
639 LH = DAG.getNode(ISD::MUL, dl, MVT::i32, LH, RL);
640 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, RH);
641 Hi = DAG.getNode(ISD::ADD, dl, MVT::i32, Hi, LH);
642 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
645 SDValue XCoreTargetLowering::
646 ExpandADDSUB(SDNode *N, SelectionDAG &DAG) const
648 assert(N->getValueType(0) == MVT::i64 &&
649 (N->getOpcode() == ISD::ADD || N->getOpcode() == ISD::SUB) &&
650 "Unknown operand to lower!");
652 if (N->getOpcode() == ISD::ADD) {
653 SDValue Result = TryExpandADDWithMul(N, DAG);
654 if (Result.getNode() != 0)
658 DebugLoc dl = N->getDebugLoc();
660 // Extract components
661 SDValue LHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
662 N->getOperand(0), DAG.getConstant(0, MVT::i32));
663 SDValue LHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
664 N->getOperand(0), DAG.getConstant(1, MVT::i32));
665 SDValue RHSL = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
666 N->getOperand(1), DAG.getConstant(0, MVT::i32));
667 SDValue RHSH = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
668 N->getOperand(1), DAG.getConstant(1, MVT::i32));
671 unsigned Opcode = (N->getOpcode() == ISD::ADD) ? XCoreISD::LADD :
673 SDValue Zero = DAG.getConstant(0, MVT::i32);
674 SDValue Lo = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
676 SDValue Carry(Lo.getNode(), 1);
678 SDValue Hi = DAG.getNode(Opcode, dl, DAG.getVTList(MVT::i32, MVT::i32),
680 SDValue Ignored(Hi.getNode(), 1);
682 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
685 SDValue XCoreTargetLowering::
686 LowerVAARG(SDValue Op, SelectionDAG &DAG) const
688 llvm_unreachable("unimplemented");
689 // FIXME Arguments passed by reference need a extra dereference.
690 SDNode *Node = Op.getNode();
691 DebugLoc dl = Node->getDebugLoc();
692 const Value *V = cast<SrcValueSDNode>(Node->getOperand(2))->getValue();
693 EVT VT = Node->getValueType(0);
694 SDValue VAList = DAG.getLoad(getPointerTy(), dl, Node->getOperand(0),
695 Node->getOperand(1), MachinePointerInfo(V),
696 false, false, false, 0);
697 // Increment the pointer, VAList, to the next vararg
698 SDValue Tmp3 = DAG.getNode(ISD::ADD, dl, getPointerTy(), VAList,
699 DAG.getConstant(VT.getSizeInBits(),
701 // Store the incremented VAList to the legalized pointer
702 Tmp3 = DAG.getStore(VAList.getValue(1), dl, Tmp3, Node->getOperand(1),
703 MachinePointerInfo(V), false, false, 0);
704 // Load the actual argument out of the pointer VAList
705 return DAG.getLoad(VT, dl, Tmp3, VAList, MachinePointerInfo(),
706 false, false, false, 0);
709 SDValue XCoreTargetLowering::
710 LowerVASTART(SDValue Op, SelectionDAG &DAG) const
712 DebugLoc dl = Op.getDebugLoc();
713 // vastart stores the address of the VarArgsFrameIndex slot into the
714 // memory location argument
715 MachineFunction &MF = DAG.getMachineFunction();
716 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
717 SDValue Addr = DAG.getFrameIndex(XFI->getVarArgsFrameIndex(), MVT::i32);
718 return DAG.getStore(Op.getOperand(0), dl, Addr, Op.getOperand(1),
719 MachinePointerInfo(), false, false, 0);
722 SDValue XCoreTargetLowering::LowerFRAMEADDR(SDValue Op,
723 SelectionDAG &DAG) const {
724 DebugLoc dl = Op.getDebugLoc();
725 // Depths > 0 not supported yet!
726 if (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue() > 0)
729 MachineFunction &MF = DAG.getMachineFunction();
730 const TargetRegisterInfo *RegInfo = getTargetMachine().getRegisterInfo();
731 return DAG.getCopyFromReg(DAG.getEntryNode(), dl,
732 RegInfo->getFrameRegister(MF), MVT::i32);
735 SDValue XCoreTargetLowering::
736 LowerADJUST_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
737 return Op.getOperand(0);
740 SDValue XCoreTargetLowering::
741 LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const {
742 SDValue Chain = Op.getOperand(0);
743 SDValue Trmp = Op.getOperand(1); // trampoline
744 SDValue FPtr = Op.getOperand(2); // nested function
745 SDValue Nest = Op.getOperand(3); // 'nest' parameter value
747 const Value *TrmpAddr = cast<SrcValueSDNode>(Op.getOperand(4))->getValue();
750 // LDAPF_u10 r11, nest
751 // LDW_2rus r11, r11[0]
752 // STWSP_ru6 r11, sp[0]
753 // LDAPF_u10 r11, fptr
754 // LDW_2rus r11, r11[0]
760 SDValue OutChains[5];
764 DebugLoc dl = Op.getDebugLoc();
765 OutChains[0] = DAG.getStore(Chain, dl, DAG.getConstant(0x0a3cd805, MVT::i32),
766 Addr, MachinePointerInfo(TrmpAddr), false, false,
769 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
770 DAG.getConstant(4, MVT::i32));
771 OutChains[1] = DAG.getStore(Chain, dl, DAG.getConstant(0xd80456c0, MVT::i32),
772 Addr, MachinePointerInfo(TrmpAddr, 4), false,
775 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
776 DAG.getConstant(8, MVT::i32));
777 OutChains[2] = DAG.getStore(Chain, dl, DAG.getConstant(0x27fb0a3c, MVT::i32),
778 Addr, MachinePointerInfo(TrmpAddr, 8), false,
781 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
782 DAG.getConstant(12, MVT::i32));
783 OutChains[3] = DAG.getStore(Chain, dl, Nest, Addr,
784 MachinePointerInfo(TrmpAddr, 12), false, false,
787 Addr = DAG.getNode(ISD::ADD, dl, MVT::i32, Trmp,
788 DAG.getConstant(16, MVT::i32));
789 OutChains[4] = DAG.getStore(Chain, dl, FPtr, Addr,
790 MachinePointerInfo(TrmpAddr, 16), false, false,
793 return DAG.getNode(ISD::TokenFactor, dl, MVT::Other, OutChains, 5);
796 SDValue XCoreTargetLowering::
797 LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const {
798 DebugLoc DL = Op.getDebugLoc();
799 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
801 case Intrinsic::xcore_crc8:
802 EVT VT = Op.getValueType();
804 DAG.getNode(XCoreISD::CRC8, DL, DAG.getVTList(VT, VT),
805 Op.getOperand(1), Op.getOperand(2) , Op.getOperand(3));
806 SDValue Crc(Data.getNode(), 1);
807 SDValue Results[] = { Crc, Data };
808 return DAG.getMergeValues(Results, 2, DL);
813 //===----------------------------------------------------------------------===//
814 // Calling Convention Implementation
815 //===----------------------------------------------------------------------===//
817 #include "XCoreGenCallingConv.inc"
819 //===----------------------------------------------------------------------===//
820 // Call Calling Convention Implementation
821 //===----------------------------------------------------------------------===//
823 /// XCore call implementation
825 XCoreTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
826 SmallVectorImpl<SDValue> &InVals) const {
827 SelectionDAG &DAG = CLI.DAG;
828 DebugLoc &dl = CLI.DL;
829 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs;
830 SmallVector<SDValue, 32> &OutVals = CLI.OutVals;
831 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins;
832 SDValue Chain = CLI.Chain;
833 SDValue Callee = CLI.Callee;
834 bool &isTailCall = CLI.IsTailCall;
835 CallingConv::ID CallConv = CLI.CallConv;
836 bool isVarArg = CLI.IsVarArg;
838 // XCore target does not yet support tail call optimization.
841 // For now, only CallingConv::C implemented
845 llvm_unreachable("Unsupported calling convention");
846 case CallingConv::Fast:
848 return LowerCCCCallTo(Chain, Callee, CallConv, isVarArg, isTailCall,
849 Outs, OutVals, Ins, dl, DAG, InVals);
853 /// LowerCCCCallTo - functions arguments are copied from virtual
854 /// regs to (physical regs)/(stack frame), CALLSEQ_START and
855 /// CALLSEQ_END are emitted.
856 /// TODO: isTailCall, sret.
858 XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee,
859 CallingConv::ID CallConv, bool isVarArg,
861 const SmallVectorImpl<ISD::OutputArg> &Outs,
862 const SmallVectorImpl<SDValue> &OutVals,
863 const SmallVectorImpl<ISD::InputArg> &Ins,
864 DebugLoc dl, SelectionDAG &DAG,
865 SmallVectorImpl<SDValue> &InVals) const {
867 // Analyze operands of the call, assigning locations to each operand.
868 SmallVector<CCValAssign, 16> ArgLocs;
869 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
870 getTargetMachine(), ArgLocs, *DAG.getContext());
872 // The ABI dictates there should be one stack slot available to the callee
873 // on function entry (for saving lr).
874 CCInfo.AllocateStack(4, 4);
876 CCInfo.AnalyzeCallOperands(Outs, CC_XCore);
878 // Get a count of how many bytes are to be pushed on the stack.
879 unsigned NumBytes = CCInfo.getNextStackOffset();
881 Chain = DAG.getCALLSEQ_START(Chain,DAG.getConstant(NumBytes,
882 getPointerTy(), true));
884 SmallVector<std::pair<unsigned, SDValue>, 4> RegsToPass;
885 SmallVector<SDValue, 12> MemOpChains;
887 // Walk the register/memloc assignments, inserting copies/loads.
888 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
889 CCValAssign &VA = ArgLocs[i];
890 SDValue Arg = OutVals[i];
892 // Promote the value if needed.
893 switch (VA.getLocInfo()) {
894 default: llvm_unreachable("Unknown loc info!");
895 case CCValAssign::Full: break;
896 case CCValAssign::SExt:
897 Arg = DAG.getNode(ISD::SIGN_EXTEND, dl, VA.getLocVT(), Arg);
899 case CCValAssign::ZExt:
900 Arg = DAG.getNode(ISD::ZERO_EXTEND, dl, VA.getLocVT(), Arg);
902 case CCValAssign::AExt:
903 Arg = DAG.getNode(ISD::ANY_EXTEND, dl, VA.getLocVT(), Arg);
907 // Arguments that can be passed on register must be kept at
910 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg));
912 assert(VA.isMemLoc());
914 int Offset = VA.getLocMemOffset();
916 MemOpChains.push_back(DAG.getNode(XCoreISD::STWSP, dl, MVT::Other,
918 DAG.getConstant(Offset/4, MVT::i32)));
922 // Transform all store nodes into one single node because
923 // all store nodes are independent of each other.
924 if (!MemOpChains.empty())
925 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
926 &MemOpChains[0], MemOpChains.size());
928 // Build a sequence of copy-to-reg nodes chained together with token
929 // chain and flag operands which copy the outgoing args into registers.
930 // The InFlag in necessary since all emitted instructions must be
933 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) {
934 Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first,
935 RegsToPass[i].second, InFlag);
936 InFlag = Chain.getValue(1);
939 // If the callee is a GlobalAddress node (quite common, every direct call is)
940 // turn it into a TargetGlobalAddress node so that legalize doesn't hack it.
941 // Likewise ExternalSymbol -> TargetExternalSymbol.
942 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee))
943 Callee = DAG.getTargetGlobalAddress(G->getGlobal(), dl, MVT::i32);
944 else if (ExternalSymbolSDNode *E = dyn_cast<ExternalSymbolSDNode>(Callee))
945 Callee = DAG.getTargetExternalSymbol(E->getSymbol(), MVT::i32);
947 // XCoreBranchLink = #chain, #target_address, #opt_in_flags...
948 // = Chain, Callee, Reg#1, Reg#2, ...
950 // Returns a chain & a flag for retval copy to use.
951 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
952 SmallVector<SDValue, 8> Ops;
953 Ops.push_back(Chain);
954 Ops.push_back(Callee);
956 // Add argument registers to the end of the list so that they are
957 // known live into the call.
958 for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i)
959 Ops.push_back(DAG.getRegister(RegsToPass[i].first,
960 RegsToPass[i].second.getValueType()));
962 if (InFlag.getNode())
963 Ops.push_back(InFlag);
965 Chain = DAG.getNode(XCoreISD::BL, dl, NodeTys, &Ops[0], Ops.size());
966 InFlag = Chain.getValue(1);
968 // Create the CALLSEQ_END node.
969 Chain = DAG.getCALLSEQ_END(Chain,
970 DAG.getConstant(NumBytes, getPointerTy(), true),
971 DAG.getConstant(0, getPointerTy(), true),
973 InFlag = Chain.getValue(1);
975 // Handle result values, copying them out of physregs into vregs that we
977 return LowerCallResult(Chain, InFlag, CallConv, isVarArg,
978 Ins, dl, DAG, InVals);
981 /// LowerCallResult - Lower the result values of a call into the
982 /// appropriate copies out of appropriate physical registers.
984 XCoreTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
985 CallingConv::ID CallConv, bool isVarArg,
986 const SmallVectorImpl<ISD::InputArg> &Ins,
987 DebugLoc dl, SelectionDAG &DAG,
988 SmallVectorImpl<SDValue> &InVals) const {
990 // Assign locations to each value returned by this call.
991 SmallVector<CCValAssign, 16> RVLocs;
992 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
993 getTargetMachine(), RVLocs, *DAG.getContext());
995 CCInfo.AnalyzeCallResult(Ins, RetCC_XCore);
997 // Copy all of the result registers out of their specified physreg.
998 for (unsigned i = 0; i != RVLocs.size(); ++i) {
999 Chain = DAG.getCopyFromReg(Chain, dl, RVLocs[i].getLocReg(),
1000 RVLocs[i].getValVT(), InFlag).getValue(1);
1001 InFlag = Chain.getValue(2);
1002 InVals.push_back(Chain.getValue(0));
1008 //===----------------------------------------------------------------------===//
1009 // Formal Arguments Calling Convention Implementation
1010 //===----------------------------------------------------------------------===//
1012 /// XCore formal arguments implementation
1014 XCoreTargetLowering::LowerFormalArguments(SDValue Chain,
1015 CallingConv::ID CallConv,
1017 const SmallVectorImpl<ISD::InputArg> &Ins,
1020 SmallVectorImpl<SDValue> &InVals)
1025 llvm_unreachable("Unsupported calling convention");
1026 case CallingConv::C:
1027 case CallingConv::Fast:
1028 return LowerCCCArguments(Chain, CallConv, isVarArg,
1029 Ins, dl, DAG, InVals);
1033 /// LowerCCCArguments - transform physical registers into
1034 /// virtual registers and generate load operations for
1035 /// arguments places on the stack.
1038 XCoreTargetLowering::LowerCCCArguments(SDValue Chain,
1039 CallingConv::ID CallConv,
1041 const SmallVectorImpl<ISD::InputArg>
1045 SmallVectorImpl<SDValue> &InVals) const {
1046 MachineFunction &MF = DAG.getMachineFunction();
1047 MachineFrameInfo *MFI = MF.getFrameInfo();
1048 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1050 // Assign locations to all of the incoming arguments.
1051 SmallVector<CCValAssign, 16> ArgLocs;
1052 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1053 getTargetMachine(), ArgLocs, *DAG.getContext());
1055 CCInfo.AnalyzeFormalArguments(Ins, CC_XCore);
1057 unsigned StackSlotSize = XCoreFrameLowering::stackSlotSize();
1059 unsigned LRSaveSize = StackSlotSize;
1061 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
1063 CCValAssign &VA = ArgLocs[i];
1065 if (VA.isRegLoc()) {
1066 // Arguments passed in registers
1067 EVT RegVT = VA.getLocVT();
1068 switch (RegVT.getSimpleVT().SimpleTy) {
1072 errs() << "LowerFormalArguments Unhandled argument type: "
1073 << RegVT.getSimpleVT().SimpleTy << "\n";
1075 llvm_unreachable(0);
1078 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1079 RegInfo.addLiveIn(VA.getLocReg(), VReg);
1080 InVals.push_back(DAG.getCopyFromReg(Chain, dl, VReg, RegVT));
1084 assert(VA.isMemLoc());
1085 // Load the argument to a virtual register
1086 unsigned ObjSize = VA.getLocVT().getSizeInBits()/8;
1087 if (ObjSize > StackSlotSize) {
1088 errs() << "LowerFormalArguments Unhandled argument type: "
1089 << EVT(VA.getLocVT()).getEVTString()
1092 // Create the frame index object for this incoming parameter...
1093 int FI = MFI->CreateFixedObject(ObjSize,
1094 LRSaveSize + VA.getLocMemOffset(),
1097 // Create the SelectionDAG nodes corresponding to a load
1098 //from this parameter
1099 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1100 InVals.push_back(DAG.getLoad(VA.getLocVT(), dl, Chain, FIN,
1101 MachinePointerInfo::getFixedStack(FI),
1102 false, false, false, 0));
1107 /* Argument registers */
1108 static const uint16_t ArgRegs[] = {
1109 XCore::R0, XCore::R1, XCore::R2, XCore::R3
1111 XCoreFunctionInfo *XFI = MF.getInfo<XCoreFunctionInfo>();
1112 unsigned FirstVAReg = CCInfo.getFirstUnallocated(ArgRegs,
1113 array_lengthof(ArgRegs));
1114 if (FirstVAReg < array_lengthof(ArgRegs)) {
1115 SmallVector<SDValue, 4> MemOps;
1117 // Save remaining registers, storing higher register numbers at a higher
1119 for (int i = array_lengthof(ArgRegs) - 1; i >= (int)FirstVAReg; --i) {
1120 // Create a stack slot
1121 int FI = MFI->CreateFixedObject(4, offset, true);
1122 if (i == (int)FirstVAReg) {
1123 XFI->setVarArgsFrameIndex(FI);
1125 offset -= StackSlotSize;
1126 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
1127 // Move argument from phys reg -> virt reg
1128 unsigned VReg = RegInfo.createVirtualRegister(&XCore::GRRegsRegClass);
1129 RegInfo.addLiveIn(ArgRegs[i], VReg);
1130 SDValue Val = DAG.getCopyFromReg(Chain, dl, VReg, MVT::i32);
1131 // Move argument from virt reg -> stack
1132 SDValue Store = DAG.getStore(Val.getValue(1), dl, Val, FIN,
1133 MachinePointerInfo(), false, false, 0);
1134 MemOps.push_back(Store);
1136 if (!MemOps.empty())
1137 Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
1138 &MemOps[0], MemOps.size());
1140 // This will point to the next argument passed via stack.
1141 XFI->setVarArgsFrameIndex(
1142 MFI->CreateFixedObject(4, LRSaveSize + CCInfo.getNextStackOffset(),
1150 //===----------------------------------------------------------------------===//
1151 // Return Value Calling Convention Implementation
1152 //===----------------------------------------------------------------------===//
1154 bool XCoreTargetLowering::
1155 CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
1157 const SmallVectorImpl<ISD::OutputArg> &Outs,
1158 LLVMContext &Context) const {
1159 SmallVector<CCValAssign, 16> RVLocs;
1160 CCState CCInfo(CallConv, isVarArg, MF, getTargetMachine(), RVLocs, Context);
1161 return CCInfo.CheckReturn(Outs, RetCC_XCore);
1165 XCoreTargetLowering::LowerReturn(SDValue Chain,
1166 CallingConv::ID CallConv, bool isVarArg,
1167 const SmallVectorImpl<ISD::OutputArg> &Outs,
1168 const SmallVectorImpl<SDValue> &OutVals,
1169 DebugLoc dl, SelectionDAG &DAG) const {
1171 // CCValAssign - represent the assignment of
1172 // the return value to a location
1173 SmallVector<CCValAssign, 16> RVLocs;
1175 // CCState - Info about the registers and stack slot.
1176 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(),
1177 getTargetMachine(), RVLocs, *DAG.getContext());
1179 // Analyze return values.
1180 CCInfo.AnalyzeReturn(Outs, RetCC_XCore);
1183 SmallVector<SDValue, 4> RetOps(1, Chain);
1185 // Return on XCore is always a "retsp 0"
1186 RetOps.push_back(DAG.getConstant(0, MVT::i32));
1188 // Copy the result values into the output registers.
1189 for (unsigned i = 0; i != RVLocs.size(); ++i) {
1190 CCValAssign &VA = RVLocs[i];
1191 assert(VA.isRegLoc() && "Can only return in registers!");
1193 Chain = DAG.getCopyToReg(Chain, dl, VA.getLocReg(),
1196 // guarantee that all emitted copies are
1197 // stuck together, avoiding something bad
1198 Flag = Chain.getValue(1);
1199 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
1202 RetOps[0] = Chain; // Update chain.
1204 // Add the flag if we have it.
1206 RetOps.push_back(Flag);
1208 return DAG.getNode(XCoreISD::RETSP, dl, MVT::Other,
1209 &RetOps[0], RetOps.size());
1212 //===----------------------------------------------------------------------===//
1213 // Other Lowering Code
1214 //===----------------------------------------------------------------------===//
1217 XCoreTargetLowering::EmitInstrWithCustomInserter(MachineInstr *MI,
1218 MachineBasicBlock *BB) const {
1219 const TargetInstrInfo &TII = *getTargetMachine().getInstrInfo();
1220 DebugLoc dl = MI->getDebugLoc();
1221 assert((MI->getOpcode() == XCore::SELECT_CC) &&
1222 "Unexpected instr type to insert");
1224 // To "insert" a SELECT_CC instruction, we actually have to insert the diamond
1225 // control-flow pattern. The incoming instruction knows the destination vreg
1226 // to set, the condition code register to branch on, the true/false values to
1227 // select between, and a branch opcode to use.
1228 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1229 MachineFunction::iterator It = BB;
1235 // cmpTY ccX, r1, r2
1237 // fallthrough --> copy0MBB
1238 MachineBasicBlock *thisMBB = BB;
1239 MachineFunction *F = BB->getParent();
1240 MachineBasicBlock *copy0MBB = F->CreateMachineBasicBlock(LLVM_BB);
1241 MachineBasicBlock *sinkMBB = F->CreateMachineBasicBlock(LLVM_BB);
1242 F->insert(It, copy0MBB);
1243 F->insert(It, sinkMBB);
1245 // Transfer the remainder of BB and its successor edges to sinkMBB.
1246 sinkMBB->splice(sinkMBB->begin(), BB,
1247 llvm::next(MachineBasicBlock::iterator(MI)),
1249 sinkMBB->transferSuccessorsAndUpdatePHIs(BB);
1251 // Next, add the true and fallthrough blocks as its successors.
1252 BB->addSuccessor(copy0MBB);
1253 BB->addSuccessor(sinkMBB);
1255 BuildMI(BB, dl, TII.get(XCore::BRFT_lru6))
1256 .addReg(MI->getOperand(1).getReg()).addMBB(sinkMBB);
1259 // %FalseValue = ...
1260 // # fallthrough to sinkMBB
1263 // Update machine-CFG edges
1264 BB->addSuccessor(sinkMBB);
1267 // %Result = phi [ %FalseValue, copy0MBB ], [ %TrueValue, thisMBB ]
1270 BuildMI(*BB, BB->begin(), dl,
1271 TII.get(XCore::PHI), MI->getOperand(0).getReg())
1272 .addReg(MI->getOperand(3).getReg()).addMBB(copy0MBB)
1273 .addReg(MI->getOperand(2).getReg()).addMBB(thisMBB);
1275 MI->eraseFromParent(); // The pseudo instruction is gone now.
1279 //===----------------------------------------------------------------------===//
1280 // Target Optimization Hooks
1281 //===----------------------------------------------------------------------===//
1283 SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
1284 DAGCombinerInfo &DCI) const {
1285 SelectionDAG &DAG = DCI.DAG;
1286 DebugLoc dl = N->getDebugLoc();
1287 switch (N->getOpcode()) {
1289 case XCoreISD::LADD: {
1290 SDValue N0 = N->getOperand(0);
1291 SDValue N1 = N->getOperand(1);
1292 SDValue N2 = N->getOperand(2);
1293 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1294 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1295 EVT VT = N0.getValueType();
1297 // canonicalize constant to RHS
1299 return DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N1, N0, N2);
1301 // fold (ladd 0, 0, x) -> 0, x & 1
1302 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1303 SDValue Carry = DAG.getConstant(0, VT);
1304 SDValue Result = DAG.getNode(ISD::AND, dl, VT, N2,
1305 DAG.getConstant(1, VT));
1306 SDValue Ops[] = { Result, Carry };
1307 return DAG.getMergeValues(Ops, 2, dl);
1310 // fold (ladd x, 0, y) -> 0, add x, y iff carry is unused and y has only the
1312 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1313 APInt KnownZero, KnownOne;
1314 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1315 VT.getSizeInBits() - 1);
1316 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1317 if ((KnownZero & Mask) == Mask) {
1318 SDValue Carry = DAG.getConstant(0, VT);
1319 SDValue Result = DAG.getNode(ISD::ADD, dl, VT, N0, N2);
1320 SDValue Ops[] = { Result, Carry };
1321 return DAG.getMergeValues(Ops, 2, dl);
1326 case XCoreISD::LSUB: {
1327 SDValue N0 = N->getOperand(0);
1328 SDValue N1 = N->getOperand(1);
1329 SDValue N2 = N->getOperand(2);
1330 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1331 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1332 EVT VT = N0.getValueType();
1334 // fold (lsub 0, 0, x) -> x, -x iff x has only the low bit set
1335 if (N0C && N0C->isNullValue() && N1C && N1C->isNullValue()) {
1336 APInt KnownZero, KnownOne;
1337 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1338 VT.getSizeInBits() - 1);
1339 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1340 if ((KnownZero & Mask) == Mask) {
1341 SDValue Borrow = N2;
1342 SDValue Result = DAG.getNode(ISD::SUB, dl, VT,
1343 DAG.getConstant(0, VT), N2);
1344 SDValue Ops[] = { Result, Borrow };
1345 return DAG.getMergeValues(Ops, 2, dl);
1349 // fold (lsub x, 0, y) -> 0, sub x, y iff borrow is unused and y has only the
1351 if (N1C && N1C->isNullValue() && N->hasNUsesOfValue(0, 1)) {
1352 APInt KnownZero, KnownOne;
1353 APInt Mask = APInt::getHighBitsSet(VT.getSizeInBits(),
1354 VT.getSizeInBits() - 1);
1355 DAG.ComputeMaskedBits(N2, KnownZero, KnownOne);
1356 if ((KnownZero & Mask) == Mask) {
1357 SDValue Borrow = DAG.getConstant(0, VT);
1358 SDValue Result = DAG.getNode(ISD::SUB, dl, VT, N0, N2);
1359 SDValue Ops[] = { Result, Borrow };
1360 return DAG.getMergeValues(Ops, 2, dl);
1365 case XCoreISD::LMUL: {
1366 SDValue N0 = N->getOperand(0);
1367 SDValue N1 = N->getOperand(1);
1368 SDValue N2 = N->getOperand(2);
1369 SDValue N3 = N->getOperand(3);
1370 ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
1371 ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
1372 EVT VT = N0.getValueType();
1373 // Canonicalize multiplicative constant to RHS. If both multiplicative
1374 // operands are constant canonicalize smallest to RHS.
1375 if ((N0C && !N1C) ||
1376 (N0C && N1C && N0C->getZExtValue() < N1C->getZExtValue()))
1377 return DAG.getNode(XCoreISD::LMUL, dl, DAG.getVTList(VT, VT),
1381 if (N1C && N1C->isNullValue()) {
1382 // If the high result is unused fold to add(a, b)
1383 if (N->hasNUsesOfValue(0, 0)) {
1384 SDValue Lo = DAG.getNode(ISD::ADD, dl, VT, N2, N3);
1385 SDValue Ops[] = { Lo, Lo };
1386 return DAG.getMergeValues(Ops, 2, dl);
1388 // Otherwise fold to ladd(a, b, 0)
1390 DAG.getNode(XCoreISD::LADD, dl, DAG.getVTList(VT, VT), N2, N3, N1);
1391 SDValue Carry(Result.getNode(), 1);
1392 SDValue Ops[] = { Carry, Result };
1393 return DAG.getMergeValues(Ops, 2, dl);
1398 // Fold 32 bit expressions such as add(add(mul(x,y),a),b) ->
1399 // lmul(x, y, a, b). The high result of lmul will be ignored.
1400 // This is only profitable if the intermediate results are unused
1402 SDValue Mul0, Mul1, Addend0, Addend1;
1403 if (N->getValueType(0) == MVT::i32 &&
1404 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, true)) {
1405 SDValue Ignored = DAG.getNode(XCoreISD::LMUL, dl,
1406 DAG.getVTList(MVT::i32, MVT::i32), Mul0,
1407 Mul1, Addend0, Addend1);
1408 SDValue Result(Ignored.getNode(), 1);
1411 APInt HighMask = APInt::getHighBitsSet(64, 32);
1412 // Fold 64 bit expression such as add(add(mul(x,y),a),b) ->
1413 // lmul(x, y, a, b) if all operands are zero-extended. We do this
1414 // before type legalization as it is messy to match the operands after
1416 if (N->getValueType(0) == MVT::i64 &&
1417 isADDADDMUL(SDValue(N, 0), Mul0, Mul1, Addend0, Addend1, false) &&
1418 DAG.MaskedValueIsZero(Mul0, HighMask) &&
1419 DAG.MaskedValueIsZero(Mul1, HighMask) &&
1420 DAG.MaskedValueIsZero(Addend0, HighMask) &&
1421 DAG.MaskedValueIsZero(Addend1, HighMask)) {
1422 SDValue Mul0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1423 Mul0, DAG.getConstant(0, MVT::i32));
1424 SDValue Mul1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1425 Mul1, DAG.getConstant(0, MVT::i32));
1426 SDValue Addend0L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1427 Addend0, DAG.getConstant(0, MVT::i32));
1428 SDValue Addend1L = DAG.getNode(ISD::EXTRACT_ELEMENT, dl, MVT::i32,
1429 Addend1, DAG.getConstant(0, MVT::i32));
1430 SDValue Hi = DAG.getNode(XCoreISD::LMUL, dl,
1431 DAG.getVTList(MVT::i32, MVT::i32), Mul0L, Mul1L,
1432 Addend0L, Addend1L);
1433 SDValue Lo(Hi.getNode(), 1);
1434 return DAG.getNode(ISD::BUILD_PAIR, dl, MVT::i64, Lo, Hi);
1439 // Replace unaligned store of unaligned load with memmove.
1440 StoreSDNode *ST = cast<StoreSDNode>(N);
1441 if (!DCI.isBeforeLegalize() ||
1442 allowsUnalignedMemoryAccesses(ST->getMemoryVT()) ||
1443 ST->isVolatile() || ST->isIndexed()) {
1446 SDValue Chain = ST->getChain();
1448 unsigned StoreBits = ST->getMemoryVT().getStoreSizeInBits();
1449 if (StoreBits % 8) {
1452 unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(
1453 ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
1454 unsigned Alignment = ST->getAlignment();
1455 if (Alignment >= ABIAlignment) {
1459 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(ST->getValue())) {
1460 if (LD->hasNUsesOfValue(1, 0) && ST->getMemoryVT() == LD->getMemoryVT() &&
1461 LD->getAlignment() == Alignment &&
1462 !LD->isVolatile() && !LD->isIndexed() &&
1463 Chain.reachesChainWithoutSideEffects(SDValue(LD, 1))) {
1464 return DAG.getMemmove(Chain, dl, ST->getBasePtr(),
1466 DAG.getConstant(StoreBits/8, MVT::i32),
1467 Alignment, false, ST->getPointerInfo(),
1468 LD->getPointerInfo());
1477 void XCoreTargetLowering::computeMaskedBitsForTargetNode(const SDValue Op,
1480 const SelectionDAG &DAG,
1481 unsigned Depth) const {
1482 KnownZero = KnownOne = APInt(KnownZero.getBitWidth(), 0);
1483 switch (Op.getOpcode()) {
1485 case XCoreISD::LADD:
1486 case XCoreISD::LSUB:
1487 if (Op.getResNo() == 1) {
1488 // Top bits of carry / borrow are clear.
1489 KnownZero = APInt::getHighBitsSet(KnownZero.getBitWidth(),
1490 KnownZero.getBitWidth() - 1);
1496 //===----------------------------------------------------------------------===//
1497 // Addressing mode description hooks
1498 //===----------------------------------------------------------------------===//
1500 static inline bool isImmUs(int64_t val)
1502 return (val >= 0 && val <= 11);
1505 static inline bool isImmUs2(int64_t val)
1507 return (val%2 == 0 && isImmUs(val/2));
1510 static inline bool isImmUs4(int64_t val)
1512 return (val%4 == 0 && isImmUs(val/4));
1515 /// isLegalAddressingMode - Return true if the addressing mode represented
1516 /// by AM is legal for this target, for a load/store of the specified type.
1518 XCoreTargetLowering::isLegalAddressingMode(const AddrMode &AM,
1520 if (Ty->getTypeID() == Type::VoidTyID)
1521 return AM.Scale == 0 && isImmUs(AM.BaseOffs) && isImmUs4(AM.BaseOffs);
1523 const DataLayout *TD = TM.getDataLayout();
1524 unsigned Size = TD->getTypeAllocSize(Ty);
1526 return Size >= 4 && !AM.HasBaseReg && AM.Scale == 0 &&
1533 if (AM.Scale == 0) {
1534 return isImmUs(AM.BaseOffs);
1537 return AM.Scale == 1 && AM.BaseOffs == 0;
1541 if (AM.Scale == 0) {
1542 return isImmUs2(AM.BaseOffs);
1545 return AM.Scale == 2 && AM.BaseOffs == 0;
1548 if (AM.Scale == 0) {
1549 return isImmUs4(AM.BaseOffs);
1552 return AM.Scale == 4 && AM.BaseOffs == 0;
1556 bool XCoreTargetLowering::
1557 isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const {
1558 // The XCore target isn't yet aware of offsets.
1562 //===----------------------------------------------------------------------===//
1563 // XCore Inline Assembly Support
1564 //===----------------------------------------------------------------------===//
1566 std::pair<unsigned, const TargetRegisterClass*>
1567 XCoreTargetLowering::
1568 getRegForInlineAsmConstraint(const std::string &Constraint,
1570 if (Constraint.size() == 1) {
1571 switch (Constraint[0]) {
1574 return std::make_pair(0U, &XCore::GRRegsRegClass);
1577 // Use the default implementation in TargetLowering to convert the register
1578 // constraint into a member of a register class.
1579 return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT);