1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/CodeGen/Analysis.h"
43 #include "llvm/CodeGen/FastISel.h"
44 #include "llvm/ADT/Optional.h"
45 #include "llvm/ADT/Statistic.h"
46 #include "llvm/Analysis/BranchProbabilityInfo.h"
47 #include "llvm/Analysis/Loads.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FunctionLoweringInfo.h"
50 #include "llvm/CodeGen/MachineFrameInfo.h"
51 #include "llvm/CodeGen/MachineInstrBuilder.h"
52 #include "llvm/CodeGen/MachineModuleInfo.h"
53 #include "llvm/CodeGen/MachineRegisterInfo.h"
54 #include "llvm/CodeGen/StackMaps.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DebugInfo.h"
57 #include "llvm/IR/Function.h"
58 #include "llvm/IR/GlobalVariable.h"
59 #include "llvm/IR/Instructions.h"
60 #include "llvm/IR/IntrinsicInst.h"
61 #include "llvm/IR/Operator.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Target/TargetInstrInfo.h"
65 #include "llvm/Target/TargetLibraryInfo.h"
66 #include "llvm/Target/TargetLowering.h"
67 #include "llvm/Target/TargetMachine.h"
68 #include "llvm/Target/TargetSubtargetInfo.h"
71 #define DEBUG_TYPE "isel"
73 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
74 "target-independent selector");
75 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
76 "target-specific selector");
77 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
79 /// \brief Set CallLoweringInfo attribute flags based on a call instruction
80 /// and called function attributes.
81 void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
83 isSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
84 isZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
85 isInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
86 isSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
87 isNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
88 isByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
89 isInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
90 isReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
91 Alignment = CS->getParamAlignment(AttrIdx);
94 /// startNewBlock - Set the current block to which generated machine
95 /// instructions will be appended, and clear the local CSE map.
97 void FastISel::startNewBlock() {
98 LocalValueMap.clear();
100 // Instructions are appended to FuncInfo.MBB. If the basic block already
101 // contains labels or copies, use the last instruction as the last local
103 EmitStartPt = nullptr;
104 if (!FuncInfo.MBB->empty())
105 EmitStartPt = &FuncInfo.MBB->back();
106 LastLocalValue = EmitStartPt;
109 bool FastISel::LowerArguments() {
110 if (!FuncInfo.CanLowerReturn)
111 // Fallback to SDISel argument lowering code to deal with sret pointer
115 if (!FastLowerArguments())
118 // Enter arguments into ValueMap for uses in non-entry BBs.
119 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
120 E = FuncInfo.Fn->arg_end(); I != E; ++I) {
121 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
122 assert(VI != LocalValueMap.end() && "Missed an argument?");
123 FuncInfo.ValueMap[I] = VI->second;
128 void FastISel::flushLocalValueMap() {
129 LocalValueMap.clear();
130 LastLocalValue = EmitStartPt;
134 bool FastISel::hasTrivialKill(const Value *V) {
135 // Don't consider constants or arguments to have trivial kills.
136 const Instruction *I = dyn_cast<Instruction>(V);
140 // No-op casts are trivially coalesced by fast-isel.
141 if (const CastInst *Cast = dyn_cast<CastInst>(I))
142 if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
143 !hasTrivialKill(Cast->getOperand(0)))
146 // Even the value might have only one use in the LLVM IR, it is possible that
147 // FastISel might fold the use into another instruction and now there is more
148 // than one use at the Machine Instruction level.
149 unsigned Reg = lookUpRegForValue(V);
150 if (Reg && !MRI.use_empty(Reg))
153 // GEPs with all zero indices are trivially coalesced by fast-isel.
154 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I))
155 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
158 // Only instructions with a single use in the same basic block are considered
159 // to have trivial kills.
160 return I->hasOneUse() &&
161 !(I->getOpcode() == Instruction::BitCast ||
162 I->getOpcode() == Instruction::PtrToInt ||
163 I->getOpcode() == Instruction::IntToPtr) &&
164 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
167 unsigned FastISel::getRegForValue(const Value *V) {
168 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
169 // Don't handle non-simple values in FastISel.
170 if (!RealVT.isSimple())
173 // Ignore illegal types. We must do this before looking up the value
174 // in ValueMap because Arguments are given virtual registers regardless
175 // of whether FastISel can handle them.
176 MVT VT = RealVT.getSimpleVT();
177 if (!TLI.isTypeLegal(VT)) {
178 // Handle integer promotions, though, because they're common and easy.
179 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
180 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
185 // Look up the value to see if we already have a register for it.
186 unsigned Reg = lookUpRegForValue(V);
190 // In bottom-up mode, just create the virtual register which will be used
191 // to hold the value. It will be materialized later.
192 if (isa<Instruction>(V) &&
193 (!isa<AllocaInst>(V) ||
194 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
195 return FuncInfo.InitializeRegForValue(V);
197 SavePoint SaveInsertPt = enterLocalValueArea();
199 // Materialize the value in a register. Emit any instructions in the
201 Reg = materializeRegForValue(V, VT);
203 leaveLocalValueArea(SaveInsertPt);
208 unsigned FastISel::MaterializeConstant(const Value *V, MVT VT) {
210 if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
211 if (CI->getValue().getActiveBits() <= 64)
212 Reg = FastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
213 } else if (isa<AllocaInst>(V))
214 Reg = TargetMaterializeAlloca(cast<AllocaInst>(V));
215 else if (isa<ConstantPointerNull>(V))
216 // Translate this as an integer zero so that it can be
217 // local-CSE'd with actual integer zeros.
219 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getContext())));
220 else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
221 if (CF->isNullValue())
222 Reg = TargetMaterializeFloatZero(CF);
224 // Try to emit the constant directly.
225 Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
228 // Try to emit the constant by using an integer constant with a cast.
229 const APFloat &Flt = CF->getValueAPF();
230 EVT IntVT = TLI.getPointerTy();
233 uint32_t IntBitWidth = IntVT.getSizeInBits();
235 (void) Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
236 APFloat::rmTowardZero, &isExact);
238 APInt IntVal(IntBitWidth, x);
240 unsigned IntegerReg =
241 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
243 Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
244 IntegerReg, /*Kill=*/false);
247 } else if (const Operator *Op = dyn_cast<Operator>(V)) {
248 if (!SelectOperator(Op, Op->getOpcode()))
249 if (!isa<Instruction>(Op) ||
250 !TargetSelectInstruction(cast<Instruction>(Op)))
252 Reg = lookUpRegForValue(Op);
253 } else if (isa<UndefValue>(V)) {
254 Reg = createResultReg(TLI.getRegClassFor(VT));
255 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
256 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
261 /// materializeRegForValue - Helper for getRegForValue. This function is
262 /// called when the value isn't already available in a register and must
263 /// be materialized with new instructions.
264 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
266 // Give the target-specific code a try first.
267 if (isa<Constant>(V))
268 Reg = TargetMaterializeConstant(cast<Constant>(V));
270 // If target-specific code couldn't or didn't want to handle the value, then
271 // give target-independent code a try.
273 Reg = MaterializeConstant(V, VT);
275 // Don't cache constant materializations in the general ValueMap.
276 // To do so would require tracking what uses they dominate.
278 LocalValueMap[V] = Reg;
279 LastLocalValue = MRI.getVRegDef(Reg);
284 unsigned FastISel::lookUpRegForValue(const Value *V) {
285 // Look up the value to see if we already have a register for it. We
286 // cache values defined by Instructions across blocks, and other values
287 // only locally. This is because Instructions already have the SSA
288 // def-dominates-use requirement enforced.
289 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
290 if (I != FuncInfo.ValueMap.end())
292 return LocalValueMap[V];
295 /// UpdateValueMap - Update the value map to include the new mapping for this
296 /// instruction, or insert an extra copy to get the result in a previous
297 /// determined register.
298 /// NOTE: This is only necessary because we might select a block that uses
299 /// a value before we select the block that defines the value. It might be
300 /// possible to fix this by selecting blocks in reverse postorder.
301 void FastISel::UpdateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
302 if (!isa<Instruction>(I)) {
303 LocalValueMap[I] = Reg;
307 unsigned &AssignedReg = FuncInfo.ValueMap[I];
308 if (AssignedReg == 0)
309 // Use the new register.
311 else if (Reg != AssignedReg) {
312 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
313 for (unsigned i = 0; i < NumRegs; i++)
314 FuncInfo.RegFixups[AssignedReg+i] = Reg+i;
320 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
321 unsigned IdxN = getRegForValue(Idx);
323 // Unhandled operand. Halt "fast" selection and bail.
324 return std::pair<unsigned, bool>(0, false);
326 bool IdxNIsKill = hasTrivialKill(Idx);
328 // If the index is smaller or larger than intptr_t, truncate or extend it.
329 MVT PtrVT = TLI.getPointerTy();
330 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
331 if (IdxVT.bitsLT(PtrVT)) {
332 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND,
336 else if (IdxVT.bitsGT(PtrVT)) {
337 IdxN = FastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE,
341 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
344 void FastISel::recomputeInsertPt() {
345 if (getLastLocalValue()) {
346 FuncInfo.InsertPt = getLastLocalValue();
347 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
350 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
352 // Now skip past any EH_LABELs, which must remain at the beginning.
353 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
354 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
358 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
359 MachineBasicBlock::iterator E) {
360 assert (I && E && std::distance(I, E) > 0 && "Invalid iterator!");
362 MachineInstr *Dead = &*I;
364 Dead->eraseFromParent();
370 FastISel::SavePoint FastISel::enterLocalValueArea() {
371 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
372 DebugLoc OldDL = DbgLoc;
375 SavePoint SP = { OldInsertPt, OldDL };
379 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
380 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
381 LastLocalValue = std::prev(FuncInfo.InsertPt);
383 // Restore the previous insert position.
384 FuncInfo.InsertPt = OldInsertPt.InsertPt;
385 DbgLoc = OldInsertPt.DL;
388 /// SelectBinaryOp - Select and emit code for a binary operator instruction,
389 /// which has an opcode which directly corresponds to the given ISD opcode.
391 bool FastISel::SelectBinaryOp(const User *I, unsigned ISDOpcode) {
392 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
393 if (VT == MVT::Other || !VT.isSimple())
394 // Unhandled type. Halt "fast" selection and bail.
397 // We only handle legal types. For example, on x86-32 the instruction
398 // selector contains all of the 64-bit instructions from x86-64,
399 // under the assumption that i64 won't be used if the target doesn't
401 if (!TLI.isTypeLegal(VT)) {
402 // MVT::i1 is special. Allow AND, OR, or XOR because they
403 // don't require additional zeroing, which makes them easy.
405 (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
406 ISDOpcode == ISD::XOR))
407 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
412 // Check if the first operand is a constant, and handle it as "ri". At -O0,
413 // we don't have anything that canonicalizes operand order.
414 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
415 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
416 unsigned Op1 = getRegForValue(I->getOperand(1));
417 if (Op1 == 0) return false;
419 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
421 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1,
422 Op1IsKill, CI->getZExtValue(),
424 if (ResultReg == 0) return false;
426 // We successfully emitted code for the given LLVM Instruction.
427 UpdateValueMap(I, ResultReg);
432 unsigned Op0 = getRegForValue(I->getOperand(0));
433 if (Op0 == 0) // Unhandled operand. Halt "fast" selection and bail.
436 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
438 // Check if the second operand is a constant and handle it appropriately.
439 if (ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
440 uint64_t Imm = CI->getZExtValue();
442 // Transform "sdiv exact X, 8" -> "sra X, 3".
443 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
444 cast<BinaryOperator>(I)->isExact() &&
445 isPowerOf2_64(Imm)) {
447 ISDOpcode = ISD::SRA;
450 // Transform "urem x, pow2" -> "and x, pow2-1".
451 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
452 isPowerOf2_64(Imm)) {
454 ISDOpcode = ISD::AND;
457 unsigned ResultReg = FastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
458 Op0IsKill, Imm, VT.getSimpleVT());
459 if (ResultReg == 0) return false;
461 // We successfully emitted code for the given LLVM Instruction.
462 UpdateValueMap(I, ResultReg);
466 // Check if the second operand is a constant float.
467 if (ConstantFP *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
468 unsigned ResultReg = FastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
469 ISDOpcode, Op0, Op0IsKill, CF);
470 if (ResultReg != 0) {
471 // We successfully emitted code for the given LLVM Instruction.
472 UpdateValueMap(I, ResultReg);
477 unsigned Op1 = getRegForValue(I->getOperand(1));
479 // Unhandled operand. Halt "fast" selection and bail.
482 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
484 // Now we have both operands in registers. Emit the instruction.
485 unsigned ResultReg = FastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
490 // Target-specific code wasn't able to find a machine opcode for
491 // the given ISD opcode and type. Halt "fast" selection and bail.
494 // We successfully emitted code for the given LLVM Instruction.
495 UpdateValueMap(I, ResultReg);
499 bool FastISel::SelectGetElementPtr(const User *I) {
500 unsigned N = getRegForValue(I->getOperand(0));
502 // Unhandled operand. Halt "fast" selection and bail.
505 bool NIsKill = hasTrivialKill(I->getOperand(0));
507 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
508 // into a single N = N + TotalOffset.
509 uint64_t TotalOffs = 0;
510 // FIXME: What's a good SWAG number for MaxOffs?
511 uint64_t MaxOffs = 2048;
512 Type *Ty = I->getOperand(0)->getType();
513 MVT VT = TLI.getPointerTy();
514 for (GetElementPtrInst::const_op_iterator OI = I->op_begin()+1,
515 E = I->op_end(); OI != E; ++OI) {
516 const Value *Idx = *OI;
517 if (StructType *StTy = dyn_cast<StructType>(Ty)) {
518 unsigned Field = cast<ConstantInt>(Idx)->getZExtValue();
521 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
522 if (TotalOffs >= MaxOffs) {
523 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
525 // Unhandled operand. Halt "fast" selection and bail.
531 Ty = StTy->getElementType(Field);
533 Ty = cast<SequentialType>(Ty)->getElementType();
535 // If this is a constant subscript, handle it quickly.
536 if (const ConstantInt *CI = dyn_cast<ConstantInt>(Idx)) {
537 if (CI->isZero()) continue;
540 DL.getTypeAllocSize(Ty)*cast<ConstantInt>(CI)->getSExtValue();
541 if (TotalOffs >= MaxOffs) {
542 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
544 // Unhandled operand. Halt "fast" selection and bail.
552 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
554 // Unhandled operand. Halt "fast" selection and bail.
560 // N = N + Idx * ElementSize;
561 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
562 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
563 unsigned IdxN = Pair.first;
564 bool IdxNIsKill = Pair.second;
566 // Unhandled operand. Halt "fast" selection and bail.
569 if (ElementSize != 1) {
570 IdxN = FastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
572 // Unhandled operand. Halt "fast" selection and bail.
576 N = FastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
578 // Unhandled operand. Halt "fast" selection and bail.
583 N = FastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
585 // Unhandled operand. Halt "fast" selection and bail.
589 // We successfully emitted code for the given LLVM Instruction.
590 UpdateValueMap(I, N);
594 /// \brief Add a stackmap or patchpoint intrinsic call's live variable operands
595 /// to a stackmap or patchpoint machine instruction.
596 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
597 const CallInst *CI, unsigned StartIdx) {
598 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
599 Value *Val = CI->getArgOperand(i);
600 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
601 if (auto *C = dyn_cast<ConstantInt>(Val)) {
602 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
603 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
604 } else if (isa<ConstantPointerNull>(Val)) {
605 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
606 Ops.push_back(MachineOperand::CreateImm(0));
607 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
608 // Values coming from a stack location also require a sepcial encoding,
609 // but that is added later on by the target specific frame index
610 // elimination implementation.
611 auto SI = FuncInfo.StaticAllocaMap.find(AI);
612 if (SI != FuncInfo.StaticAllocaMap.end())
613 Ops.push_back(MachineOperand::CreateFI(SI->second));
617 unsigned Reg = getRegForValue(Val);
620 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
627 bool FastISel::SelectStackmap(const CallInst *I) {
628 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
629 // [live variables...])
630 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
631 "Stackmap cannot return a value.");
633 // The stackmap intrinsic only records the live variables (the arguments
634 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
635 // intrinsic, this won't be lowered to a function call. This means we don't
636 // have to worry about calling conventions and target-specific lowering code.
637 // Instead we perform the call lowering right here.
640 // STACKMAP(id, nbytes, ...)
643 SmallVector<MachineOperand, 32> Ops;
645 // Add the <id> and <numBytes> constants.
646 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
647 "Expected a constant integer.");
648 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
649 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
651 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
652 "Expected a constant integer.");
653 const auto *NumBytes =
654 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
655 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
657 // Push live variables for the stack map (skipping the first two arguments
658 // <id> and <numBytes>).
659 if (!addStackMapLiveVars(Ops, I, 2))
662 // We are not adding any register mask info here, because the stackmap doesn't
665 // Add scratch registers as implicit def and early clobber.
666 CallingConv::ID CC = I->getCallingConv();
667 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
668 for (unsigned i = 0; ScratchRegs[i]; ++i)
669 Ops.push_back(MachineOperand::CreateReg(
670 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
671 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
673 // Issue CALLSEQ_START
674 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
675 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
679 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
680 TII.get(TargetOpcode::STACKMAP));
681 for (auto const &MO : Ops)
685 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
686 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
687 .addImm(0).addImm(0);
689 // Inform the Frame Information that we have a stackmap in this function.
690 FuncInfo.MF->getFrameInfo()->setHasStackMap();
695 /// \brief Lower an argument list according to the target calling convention.
697 /// This is a helper for lowering intrinsics that follow a target calling
698 /// convention or require stack pointer adjustment. Only a subset of the
699 /// intrinsic's operands need to participate in the calling convention.
700 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
701 unsigned NumArgs, const Value *Callee,
702 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
704 Args.reserve(NumArgs);
706 // Populate the argument list.
707 // Attributes for args start at offset 1, after the return attribute.
708 ImmutableCallSite CS(CI);
709 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
710 ArgI != ArgE; ++ArgI) {
711 Value *V = CI->getOperand(ArgI);
713 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
717 Entry.Ty = V->getType();
718 Entry.setAttributes(&CS, AttrI);
719 Args.push_back(Entry);
722 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
724 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
726 return LowerCallTo(CLI);
729 bool FastISel::SelectPatchpoint(const CallInst *I) {
730 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
735 // [live variables...])
736 CallingConv::ID CC = I->getCallingConv();
737 bool IsAnyRegCC = CC == CallingConv::AnyReg;
738 bool HasDef = !I->getType()->isVoidTy();
739 Value *Callee = I->getOperand(PatchPointOpers::TargetPos);
741 // Get the real number of arguments participating in the call <numArgs>
742 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
743 "Expected a constant integer.");
744 const auto *NumArgsVal =
745 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
746 unsigned NumArgs = NumArgsVal->getZExtValue();
748 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
749 // This includes all meta-operands up to but not including CC.
750 unsigned NumMetaOpers = PatchPointOpers::CCPos;
751 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
752 "Not enough arguments provided to the patchpoint intrinsic");
754 // For AnyRegCC the arguments are lowered later on manually.
755 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
756 CallLoweringInfo CLI;
757 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
760 assert(CLI.Call && "No call instruction specified.");
762 SmallVector<MachineOperand, 32> Ops;
764 // Add an explicit result reg if we use the anyreg calling convention.
765 if (IsAnyRegCC && HasDef) {
766 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
767 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
768 CLI.NumResultRegs = 1;
769 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
772 // Add the <id> and <numBytes> constants.
773 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
774 "Expected a constant integer.");
775 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
776 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
778 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
779 "Expected a constant integer.");
780 const auto *NumBytes =
781 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
782 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
784 // Assume that the callee is a constant address or null pointer.
785 // FIXME: handle function symbols in the future.
787 if (const auto *C = dyn_cast<IntToPtrInst>(Callee))
788 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
789 else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
790 if (C->getOpcode() == Instruction::IntToPtr)
791 CalleeAddr = cast<ConstantInt>(C->getOperand(0))->getZExtValue();
793 llvm_unreachable("Unsupported ConstantExpr.");
794 } else if (isa<ConstantPointerNull>(Callee))
797 llvm_unreachable("Unsupported callee address.");
799 Ops.push_back(MachineOperand::CreateImm(CalleeAddr));
801 // Adjust <numArgs> to account for any arguments that have been passed on
802 // the stack instead.
803 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
804 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
806 // Add the calling convention
807 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
809 // Add the arguments we omitted previously. The register allocator should
810 // place these in any free register.
812 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
813 unsigned Reg = getRegForValue(I->getArgOperand(i));
816 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
820 // Push the arguments from the call instruction.
821 for (auto Reg : CLI.OutRegs)
822 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
824 // Push live variables for the stack map.
825 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
828 // Push the register mask info.
829 Ops.push_back(MachineOperand::CreateRegMask(TRI.getCallPreservedMask(CC)));
831 // Add scratch registers as implicit def and early clobber.
832 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
833 for (unsigned i = 0; ScratchRegs[i]; ++i)
834 Ops.push_back(MachineOperand::CreateReg(
835 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
836 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
838 // Add implicit defs (return values).
839 for (auto Reg : CLI.InRegs)
840 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
843 // Insert the patchpoint instruction before the call generated by the target.
844 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
845 TII.get(TargetOpcode::PATCHPOINT));
850 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
852 // Delete the original call instruction.
853 CLI.Call->eraseFromParent();
855 // Inform the Frame Information that we have a patchpoint in this function.
856 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
858 if (CLI.NumResultRegs)
859 UpdateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
863 /// Returns an AttributeSet representing the attributes applied to the return
864 /// value of the given call.
865 static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
866 SmallVector<Attribute::AttrKind, 2> Attrs;
868 Attrs.push_back(Attribute::SExt);
870 Attrs.push_back(Attribute::ZExt);
872 Attrs.push_back(Attribute::InReg);
874 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
878 bool FastISel::LowerCallTo(const CallInst *CI, const char *SymName,
880 ImmutableCallSite CS(CI);
882 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
883 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
884 Type *RetTy = FTy->getReturnType();
887 Args.reserve(NumArgs);
889 // Populate the argument list.
890 // Attributes for args start at offset 1, after the return attribute.
891 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
892 Value *V = CI->getOperand(ArgI);
894 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
898 Entry.Ty = V->getType();
899 Entry.setAttributes(&CS, ArgI + 1);
900 Args.push_back(Entry);
903 CallLoweringInfo CLI;
904 CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs);
906 return LowerCallTo(CLI);
909 bool FastISel::LowerCallTo(CallLoweringInfo &CLI) {
910 // Handle the incoming return values from the call.
912 SmallVector<EVT, 4> RetTys;
913 ComputeValueVTs(TLI, CLI.RetTy, RetTys);
915 SmallVector<ISD::OutputArg, 4> Outs;
916 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
918 bool CanLowerReturn = TLI.CanLowerReturn(CLI.CallConv, *FuncInfo.MF,
920 CLI.RetTy->getContext());
922 // FIXME: sret demotion isn't supported yet - bail out.
926 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
928 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
929 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
930 for (unsigned i = 0; i != NumRegs; ++i) {
931 ISD::InputArg MyFlags;
932 MyFlags.VT = RegisterVT;
934 MyFlags.Used = CLI.IsReturnValueUsed;
936 MyFlags.Flags.setSExt();
938 MyFlags.Flags.setZExt();
940 MyFlags.Flags.setInReg();
941 CLI.Ins.push_back(MyFlags);
945 // Handle all of the outgoing arguments.
947 for (auto &Arg : CLI.getArgs()) {
948 Type *FinalType = Arg.Ty;
950 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
951 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
952 FinalType, CLI.CallConv, CLI.IsVarArg);
954 ISD::ArgFlagsTy Flags;
965 if (Arg.isInAlloca) {
967 // Set the byval flag for CCAssignFn callbacks that don't know about
968 // inalloca. This way we can know how many bytes we should've allocated
969 // and how many bytes a callee cleanup function will pop. If we port
970 // inalloca to more targets, we'll have to add custom inalloca handling in
971 // the various CC lowering callbacks.
974 if (Arg.isByVal || Arg.isInAlloca) {
975 PointerType *Ty = cast<PointerType>(Arg.Ty);
976 Type *ElementTy = Ty->getElementType();
977 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
978 // For ByVal, alignment should come from FE. BE will guess if this info is
979 // not there, but there are cases it cannot get right.
980 unsigned FrameAlign = Arg.Alignment;
982 FrameAlign = TLI.getByValTypeAlignment(ElementTy);
983 Flags.setByValSize(FrameSize);
984 Flags.setByValAlign(FrameAlign);
989 Flags.setInConsecutiveRegs();
990 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
991 Flags.setOrigAlign(OriginalAlignment);
993 CLI.OutVals.push_back(Arg.Val);
994 CLI.OutFlags.push_back(Flags);
997 if (!FastLowerCall(CLI))
1000 // Set all unused physreg defs as dead.
1001 assert(CLI.Call && "No call instruction specified.");
1002 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1004 if (CLI.NumResultRegs && CLI.CS)
1005 UpdateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
1010 bool FastISel::LowerCall(const CallInst *CI) {
1011 ImmutableCallSite CS(CI);
1013 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
1014 FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
1015 Type *RetTy = FuncTy->getReturnType();
1019 Args.reserve(CS.arg_size());
1021 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1026 if (V->getType()->isEmptyTy())
1030 Entry.Ty = V->getType();
1032 // Skip the first return-type Attribute to get to params.
1033 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1034 Args.push_back(Entry);
1037 // Check if target-independent constraints permit a tail call here.
1038 // Target-dependent constraints are checked within FastLowerCall.
1039 bool IsTailCall = CI->isTailCall();
1040 if (IsTailCall && !isInTailCallPosition(CS, TM))
1043 CallLoweringInfo CLI;
1044 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1045 .setTailCall(IsTailCall);
1047 return LowerCallTo(CLI);
1050 bool FastISel::SelectCall(const User *I) {
1051 const CallInst *Call = cast<CallInst>(I);
1053 // Handle simple inline asms.
1054 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1055 // If the inline asm has side effects, then make sure that no local value
1056 // lives across by flushing the local value map.
1057 if (IA->hasSideEffects())
1058 flushLocalValueMap();
1060 // Don't attempt to handle constraints.
1061 if (!IA->getConstraintString().empty())
1064 unsigned ExtraInfo = 0;
1065 if (IA->hasSideEffects())
1066 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1067 if (IA->isAlignStack())
1068 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1071 TII.get(TargetOpcode::INLINEASM))
1072 .addExternalSymbol(IA->getAsmString().c_str())
1077 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1078 ComputeUsesVAFloatArgument(*Call, &MMI);
1080 // Handle intrinsic function calls.
1081 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1082 return SelectIntrinsicCall(II);
1084 // Usually, it does not make sense to initialize a value,
1085 // make an unrelated function call and use the value, because
1086 // it tends to be spilled on the stack. So, we move the pointer
1087 // to the last local value to the beginning of the block, so that
1088 // all the values which have already been materialized,
1089 // appear after the call. It also makes sense to skip intrinsics
1090 // since they tend to be inlined.
1091 flushLocalValueMap();
1093 return LowerCall(Call);
1096 bool FastISel::SelectIntrinsicCall(const IntrinsicInst *II) {
1097 switch (II->getIntrinsicID()) {
1099 // At -O0 we don't care about the lifetime intrinsics.
1100 case Intrinsic::lifetime_start:
1101 case Intrinsic::lifetime_end:
1102 // The donothing intrinsic does, well, nothing.
1103 case Intrinsic::donothing:
1105 case Intrinsic::dbg_declare: {
1106 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1107 DIVariable DIVar(DI->getVariable());
1108 assert((!DIVar || DIVar.isVariable()) &&
1109 "Variable in DbgDeclareInst should be either null or a DIVariable.");
1110 if (!DIVar || !FuncInfo.MF->getMMI().hasDebugInfo()) {
1111 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1115 const Value *Address = DI->getAddress();
1116 if (!Address || isa<UndefValue>(Address)) {
1117 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1121 unsigned Offset = 0;
1122 Optional<MachineOperand> Op;
1123 if (const Argument *Arg = dyn_cast<Argument>(Address))
1124 // Some arguments' frame index is recorded during argument lowering.
1125 Offset = FuncInfo.getArgumentFrameIndex(Arg);
1127 Op = MachineOperand::CreateFI(Offset);
1129 if (unsigned Reg = lookUpRegForValue(Address))
1130 Op = MachineOperand::CreateReg(Reg, false);
1132 // If we have a VLA that has a "use" in a metadata node that's then used
1133 // here but it has no other uses, then we have a problem. E.g.,
1135 // int foo (const int *x) {
1140 // If we assign 'a' a vreg and fast isel later on has to use the selection
1141 // DAG isel, it will want to copy the value to the vreg. However, there are
1142 // no uses, which goes counter to what selection DAG isel expects.
1143 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1144 (!isa<AllocaInst>(Address) ||
1145 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1146 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1151 Op->setIsDebug(true);
1152 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1153 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1156 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1157 TII.get(TargetOpcode::DBG_VALUE))
1160 .addMetadata(DI->getVariable());
1162 // We can't yet handle anything else here because it would require
1163 // generating code, thus altering codegen because of debug info.
1164 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1168 case Intrinsic::dbg_value: {
1169 // This form of DBG_VALUE is target-independent.
1170 const DbgValueInst *DI = cast<DbgValueInst>(II);
1171 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1172 const Value *V = DI->getValue();
1174 // Currently the optimizer can produce this; insert an undef to
1175 // help debugging. Probably the optimizer should not do this.
1176 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1177 .addReg(0U).addImm(DI->getOffset())
1178 .addMetadata(DI->getVariable());
1179 } else if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
1180 if (CI->getBitWidth() > 64)
1181 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1182 .addCImm(CI).addImm(DI->getOffset())
1183 .addMetadata(DI->getVariable());
1185 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1186 .addImm(CI->getZExtValue()).addImm(DI->getOffset())
1187 .addMetadata(DI->getVariable());
1188 } else if (const ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
1189 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1190 .addFPImm(CF).addImm(DI->getOffset())
1191 .addMetadata(DI->getVariable());
1192 } else if (unsigned Reg = lookUpRegForValue(V)) {
1193 // FIXME: This does not handle register-indirect values at offset 0.
1194 bool IsIndirect = DI->getOffset() != 0;
1195 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect,
1196 Reg, DI->getOffset(), DI->getVariable());
1198 // We can't yet handle anything else here because it would require
1199 // generating code, thus altering codegen because of debug info.
1200 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1204 case Intrinsic::objectsize: {
1205 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1206 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1207 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1208 unsigned ResultReg = getRegForValue(ResCI);
1211 UpdateValueMap(II, ResultReg);
1214 case Intrinsic::expect: {
1215 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1218 UpdateValueMap(II, ResultReg);
1221 case Intrinsic::experimental_stackmap:
1222 return SelectStackmap(II);
1223 case Intrinsic::experimental_patchpoint_void:
1224 case Intrinsic::experimental_patchpoint_i64:
1225 return SelectPatchpoint(II);
1228 return FastLowerIntrinsicCall(II);
1231 bool FastISel::SelectCast(const User *I, unsigned Opcode) {
1232 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1233 EVT DstVT = TLI.getValueType(I->getType());
1235 if (SrcVT == MVT::Other || !SrcVT.isSimple() ||
1236 DstVT == MVT::Other || !DstVT.isSimple())
1237 // Unhandled type. Halt "fast" selection and bail.
1240 // Check if the destination type is legal.
1241 if (!TLI.isTypeLegal(DstVT))
1244 // Check if the source operand is legal.
1245 if (!TLI.isTypeLegal(SrcVT))
1248 unsigned InputReg = getRegForValue(I->getOperand(0));
1250 // Unhandled operand. Halt "fast" selection and bail.
1253 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1255 unsigned ResultReg = FastEmit_r(SrcVT.getSimpleVT(),
1256 DstVT.getSimpleVT(),
1258 InputReg, InputRegIsKill);
1262 UpdateValueMap(I, ResultReg);
1266 bool FastISel::SelectBitCast(const User *I) {
1267 // If the bitcast doesn't change the type, just use the operand value.
1268 if (I->getType() == I->getOperand(0)->getType()) {
1269 unsigned Reg = getRegForValue(I->getOperand(0));
1272 UpdateValueMap(I, Reg);
1276 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1277 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType());
1278 EVT DstEVT = TLI.getValueType(I->getType());
1279 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1280 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1281 // Unhandled type. Halt "fast" selection and bail.
1284 MVT SrcVT = SrcEVT.getSimpleVT();
1285 MVT DstVT = DstEVT.getSimpleVT();
1286 unsigned Op0 = getRegForValue(I->getOperand(0));
1288 // Unhandled operand. Halt "fast" selection and bail.
1291 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1293 // First, try to perform the bitcast by inserting a reg-reg copy.
1294 unsigned ResultReg = 0;
1295 if (SrcVT == DstVT) {
1296 const TargetRegisterClass* SrcClass = TLI.getRegClassFor(SrcVT);
1297 const TargetRegisterClass* DstClass = TLI.getRegClassFor(DstVT);
1298 // Don't attempt a cross-class copy. It will likely fail.
1299 if (SrcClass == DstClass) {
1300 ResultReg = createResultReg(DstClass);
1301 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1302 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1306 // If the reg-reg copy failed, select a BITCAST opcode.
1308 ResultReg = FastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1313 UpdateValueMap(I, ResultReg);
1318 FastISel::SelectInstruction(const Instruction *I) {
1319 // Just before the terminator instruction, insert instructions to
1320 // feed PHI nodes in successor blocks.
1321 if (isa<TerminatorInst>(I))
1322 if (!HandlePHINodesInSuccessorBlocks(I->getParent()))
1325 DbgLoc = I->getDebugLoc();
1327 MachineBasicBlock::iterator SavedInsertPt = FuncInfo.InsertPt;
1329 if (const CallInst *Call = dyn_cast<CallInst>(I)) {
1330 const Function *F = Call->getCalledFunction();
1333 // As a special case, don't handle calls to builtin library functions that
1334 // may be translated directly to target instructions.
1335 if (F && !F->hasLocalLinkage() && F->hasName() &&
1336 LibInfo->getLibFunc(F->getName(), Func) &&
1337 LibInfo->hasOptimizedCodeGen(Func))
1340 // Don't handle Intrinsic::trap if a trap funciton is specified.
1341 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1342 !TM.Options.getTrapFunctionName().empty())
1346 // First, try doing target-independent selection.
1347 if (SelectOperator(I, I->getOpcode())) {
1348 ++NumFastIselSuccessIndependent;
1349 DbgLoc = DebugLoc();
1352 // Remove dead code. However, ignore call instructions since we've flushed
1353 // the local value map and recomputed the insert point.
1354 if (!isa<CallInst>(I)) {
1355 recomputeInsertPt();
1356 if (SavedInsertPt != FuncInfo.InsertPt)
1357 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1360 // Next, try calling the target to attempt to handle the instruction.
1361 SavedInsertPt = FuncInfo.InsertPt;
1362 if (TargetSelectInstruction(I)) {
1363 ++NumFastIselSuccessTarget;
1364 DbgLoc = DebugLoc();
1367 // Check for dead code and remove as necessary.
1368 recomputeInsertPt();
1369 if (SavedInsertPt != FuncInfo.InsertPt)
1370 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1372 DbgLoc = DebugLoc();
1373 // Undo phi node updates, because they will be added again by SelectionDAG.
1374 if (isa<TerminatorInst>(I))
1375 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1379 /// FastEmitBranch - Emit an unconditional branch to the given block,
1380 /// unless it is the immediate (fall-through) successor, and update
1383 FastISel::FastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
1384 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1385 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1386 // For more accurate line information if this is the only instruction
1387 // in the block then emit it, otherwise we have the unconditional
1388 // fall-through case, which needs no instructions.
1390 // The unconditional branch case.
1391 TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
1392 SmallVector<MachineOperand, 0>(), DbgLoc);
1394 uint32_t BranchWeight = 0;
1396 BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(),
1397 MSucc->getBasicBlock());
1398 FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
1401 /// SelectFNeg - Emit an FNeg operation.
1404 FastISel::SelectFNeg(const User *I) {
1405 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1406 if (OpReg == 0) return false;
1408 bool OpRegIsKill = hasTrivialKill(I);
1410 // If the target has ISD::FNEG, use it.
1411 EVT VT = TLI.getValueType(I->getType());
1412 unsigned ResultReg = FastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(),
1413 ISD::FNEG, OpReg, OpRegIsKill);
1414 if (ResultReg != 0) {
1415 UpdateValueMap(I, ResultReg);
1419 // Bitcast the value to integer, twiddle the sign bit with xor,
1420 // and then bitcast it back to floating-point.
1421 if (VT.getSizeInBits() > 64) return false;
1422 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1423 if (!TLI.isTypeLegal(IntVT))
1426 unsigned IntReg = FastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1427 ISD::BITCAST, OpReg, OpRegIsKill);
1431 unsigned IntResultReg = FastEmit_ri_(IntVT.getSimpleVT(), ISD::XOR,
1432 IntReg, /*Kill=*/true,
1433 UINT64_C(1) << (VT.getSizeInBits()-1),
1434 IntVT.getSimpleVT());
1435 if (IntResultReg == 0)
1438 ResultReg = FastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(),
1439 ISD::BITCAST, IntResultReg, /*Kill=*/true);
1443 UpdateValueMap(I, ResultReg);
1448 FastISel::SelectExtractValue(const User *U) {
1449 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1453 // Make sure we only try to handle extracts with a legal result. But also
1454 // allow i1 because it's easy.
1455 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
1456 if (!RealVT.isSimple())
1458 MVT VT = RealVT.getSimpleVT();
1459 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1462 const Value *Op0 = EVI->getOperand(0);
1463 Type *AggTy = Op0->getType();
1465 // Get the base result register.
1467 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1468 if (I != FuncInfo.ValueMap.end())
1469 ResultReg = I->second;
1470 else if (isa<Instruction>(Op0))
1471 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1473 return false; // fast-isel can't handle aggregate constants at the moment
1475 // Get the actual result register, which is an offset from the base register.
1476 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1478 SmallVector<EVT, 4> AggValueVTs;
1479 ComputeValueVTs(TLI, AggTy, AggValueVTs);
1481 for (unsigned i = 0; i < VTIndex; i++)
1482 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1484 UpdateValueMap(EVI, ResultReg);
1489 FastISel::SelectOperator(const User *I, unsigned Opcode) {
1491 case Instruction::Add:
1492 return SelectBinaryOp(I, ISD::ADD);
1493 case Instruction::FAdd:
1494 return SelectBinaryOp(I, ISD::FADD);
1495 case Instruction::Sub:
1496 return SelectBinaryOp(I, ISD::SUB);
1497 case Instruction::FSub:
1498 // FNeg is currently represented in LLVM IR as a special case of FSub.
1499 if (BinaryOperator::isFNeg(I))
1500 return SelectFNeg(I);
1501 return SelectBinaryOp(I, ISD::FSUB);
1502 case Instruction::Mul:
1503 return SelectBinaryOp(I, ISD::MUL);
1504 case Instruction::FMul:
1505 return SelectBinaryOp(I, ISD::FMUL);
1506 case Instruction::SDiv:
1507 return SelectBinaryOp(I, ISD::SDIV);
1508 case Instruction::UDiv:
1509 return SelectBinaryOp(I, ISD::UDIV);
1510 case Instruction::FDiv:
1511 return SelectBinaryOp(I, ISD::FDIV);
1512 case Instruction::SRem:
1513 return SelectBinaryOp(I, ISD::SREM);
1514 case Instruction::URem:
1515 return SelectBinaryOp(I, ISD::UREM);
1516 case Instruction::FRem:
1517 return SelectBinaryOp(I, ISD::FREM);
1518 case Instruction::Shl:
1519 return SelectBinaryOp(I, ISD::SHL);
1520 case Instruction::LShr:
1521 return SelectBinaryOp(I, ISD::SRL);
1522 case Instruction::AShr:
1523 return SelectBinaryOp(I, ISD::SRA);
1524 case Instruction::And:
1525 return SelectBinaryOp(I, ISD::AND);
1526 case Instruction::Or:
1527 return SelectBinaryOp(I, ISD::OR);
1528 case Instruction::Xor:
1529 return SelectBinaryOp(I, ISD::XOR);
1531 case Instruction::GetElementPtr:
1532 return SelectGetElementPtr(I);
1534 case Instruction::Br: {
1535 const BranchInst *BI = cast<BranchInst>(I);
1537 if (BI->isUnconditional()) {
1538 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1539 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1540 FastEmitBranch(MSucc, BI->getDebugLoc());
1544 // Conditional branches are not handed yet.
1545 // Halt "fast" selection and bail.
1549 case Instruction::Unreachable:
1550 if (TM.Options.TrapUnreachable)
1551 return FastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1555 case Instruction::Alloca:
1556 // FunctionLowering has the static-sized case covered.
1557 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1560 // Dynamic-sized alloca is not handled yet.
1563 case Instruction::Call:
1564 return SelectCall(I);
1566 case Instruction::BitCast:
1567 return SelectBitCast(I);
1569 case Instruction::FPToSI:
1570 return SelectCast(I, ISD::FP_TO_SINT);
1571 case Instruction::ZExt:
1572 return SelectCast(I, ISD::ZERO_EXTEND);
1573 case Instruction::SExt:
1574 return SelectCast(I, ISD::SIGN_EXTEND);
1575 case Instruction::Trunc:
1576 return SelectCast(I, ISD::TRUNCATE);
1577 case Instruction::SIToFP:
1578 return SelectCast(I, ISD::SINT_TO_FP);
1580 case Instruction::IntToPtr: // Deliberate fall-through.
1581 case Instruction::PtrToInt: {
1582 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1583 EVT DstVT = TLI.getValueType(I->getType());
1584 if (DstVT.bitsGT(SrcVT))
1585 return SelectCast(I, ISD::ZERO_EXTEND);
1586 if (DstVT.bitsLT(SrcVT))
1587 return SelectCast(I, ISD::TRUNCATE);
1588 unsigned Reg = getRegForValue(I->getOperand(0));
1589 if (Reg == 0) return false;
1590 UpdateValueMap(I, Reg);
1594 case Instruction::ExtractValue:
1595 return SelectExtractValue(I);
1597 case Instruction::PHI:
1598 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1601 // Unhandled instruction. Halt "fast" selection and bail.
1606 FastISel::FastISel(FunctionLoweringInfo &funcInfo,
1607 const TargetLibraryInfo *libInfo)
1608 : FuncInfo(funcInfo), MF(funcInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1609 MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1610 TM(FuncInfo.MF->getTarget()), DL(*TM.getSubtargetImpl()->getDataLayout()),
1611 TII(*TM.getSubtargetImpl()->getInstrInfo()),
1612 TLI(*TM.getSubtargetImpl()->getTargetLowering()),
1613 TRI(*TM.getSubtargetImpl()->getRegisterInfo()), LibInfo(libInfo) {}
1615 FastISel::~FastISel() {}
1617 bool FastISel::FastLowerArguments() {
1621 bool FastISel::FastLowerCall(CallLoweringInfo &/*CLI*/) {
1625 bool FastISel::FastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1629 unsigned FastISel::FastEmit_(MVT, MVT,
1634 unsigned FastISel::FastEmit_r(MVT, MVT,
1636 unsigned /*Op0*/, bool /*Op0IsKill*/) {
1640 unsigned FastISel::FastEmit_rr(MVT, MVT,
1642 unsigned /*Op0*/, bool /*Op0IsKill*/,
1643 unsigned /*Op1*/, bool /*Op1IsKill*/) {
1647 unsigned FastISel::FastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1651 unsigned FastISel::FastEmit_f(MVT, MVT,
1652 unsigned, const ConstantFP * /*FPImm*/) {
1656 unsigned FastISel::FastEmit_ri(MVT, MVT,
1658 unsigned /*Op0*/, bool /*Op0IsKill*/,
1663 unsigned FastISel::FastEmit_rf(MVT, MVT,
1665 unsigned /*Op0*/, bool /*Op0IsKill*/,
1666 const ConstantFP * /*FPImm*/) {
1670 unsigned FastISel::FastEmit_rri(MVT, MVT,
1672 unsigned /*Op0*/, bool /*Op0IsKill*/,
1673 unsigned /*Op1*/, bool /*Op1IsKill*/,
1678 /// FastEmit_ri_ - This method is a wrapper of FastEmit_ri. It first tries
1679 /// to emit an instruction with an immediate operand using FastEmit_ri.
1680 /// If that fails, it materializes the immediate into a register and try
1681 /// FastEmit_rr instead.
1682 unsigned FastISel::FastEmit_ri_(MVT VT, unsigned Opcode,
1683 unsigned Op0, bool Op0IsKill,
1684 uint64_t Imm, MVT ImmType) {
1685 // If this is a multiply by a power of two, emit this as a shift left.
1686 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1689 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1690 // div x, 8 -> srl x, 3
1695 // Horrible hack (to be removed), check to make sure shift amounts are
1697 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1698 Imm >= VT.getSizeInBits())
1701 // First check if immediate type is legal. If not, we can't use the ri form.
1702 unsigned ResultReg = FastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1705 unsigned MaterialReg = FastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1706 if (MaterialReg == 0) {
1707 // This is a bit ugly/slow, but failing here means falling out of
1708 // fast-isel, which would be very slow.
1709 IntegerType *ITy = IntegerType::get(FuncInfo.Fn->getContext(),
1710 VT.getSizeInBits());
1711 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1712 if (MaterialReg == 0) return 0;
1714 return FastEmit_rr(VT, VT, Opcode,
1716 MaterialReg, /*Kill=*/true);
1719 unsigned FastISel::createResultReg(const TargetRegisterClass* RC) {
1720 return MRI.createVirtualRegister(RC);
1723 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II,
1724 unsigned Op, unsigned OpNum) {
1725 if (TargetRegisterInfo::isVirtualRegister(Op)) {
1726 const TargetRegisterClass *RegClass =
1727 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1728 if (!MRI.constrainRegClass(Op, RegClass)) {
1729 // If it's not legal to COPY between the register classes, something
1730 // has gone very wrong before we got here.
1731 unsigned NewOp = createResultReg(RegClass);
1732 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1733 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1740 unsigned FastISel::FastEmitInst_(unsigned MachineInstOpcode,
1741 const TargetRegisterClass* RC) {
1742 unsigned ResultReg = createResultReg(RC);
1743 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1745 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1749 unsigned FastISel::FastEmitInst_r(unsigned MachineInstOpcode,
1750 const TargetRegisterClass *RC,
1751 unsigned Op0, bool Op0IsKill) {
1752 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1754 unsigned ResultReg = createResultReg(RC);
1755 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1757 if (II.getNumDefs() >= 1)
1758 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1759 .addReg(Op0, Op0IsKill * RegState::Kill);
1761 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1762 .addReg(Op0, Op0IsKill * RegState::Kill);
1763 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1764 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1770 unsigned FastISel::FastEmitInst_rr(unsigned MachineInstOpcode,
1771 const TargetRegisterClass *RC,
1772 unsigned Op0, bool Op0IsKill,
1773 unsigned Op1, bool Op1IsKill) {
1774 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1776 unsigned ResultReg = createResultReg(RC);
1777 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1778 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1780 if (II.getNumDefs() >= 1)
1781 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1782 .addReg(Op0, Op0IsKill * RegState::Kill)
1783 .addReg(Op1, Op1IsKill * RegState::Kill);
1785 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1786 .addReg(Op0, Op0IsKill * RegState::Kill)
1787 .addReg(Op1, Op1IsKill * RegState::Kill);
1788 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1789 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1794 unsigned FastISel::FastEmitInst_rrr(unsigned MachineInstOpcode,
1795 const TargetRegisterClass *RC,
1796 unsigned Op0, bool Op0IsKill,
1797 unsigned Op1, bool Op1IsKill,
1798 unsigned Op2, bool Op2IsKill) {
1799 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1801 unsigned ResultReg = createResultReg(RC);
1802 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1803 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1804 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1806 if (II.getNumDefs() >= 1)
1807 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1808 .addReg(Op0, Op0IsKill * RegState::Kill)
1809 .addReg(Op1, Op1IsKill * RegState::Kill)
1810 .addReg(Op2, Op2IsKill * RegState::Kill);
1812 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1813 .addReg(Op0, Op0IsKill * RegState::Kill)
1814 .addReg(Op1, Op1IsKill * RegState::Kill)
1815 .addReg(Op2, Op2IsKill * RegState::Kill);
1816 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1817 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1822 unsigned FastISel::FastEmitInst_ri(unsigned MachineInstOpcode,
1823 const TargetRegisterClass *RC,
1824 unsigned Op0, bool Op0IsKill,
1826 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1828 unsigned ResultReg = createResultReg(RC);
1829 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1831 if (II.getNumDefs() >= 1)
1832 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1833 .addReg(Op0, Op0IsKill * RegState::Kill)
1836 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1837 .addReg(Op0, Op0IsKill * RegState::Kill)
1839 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1840 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1845 unsigned FastISel::FastEmitInst_rii(unsigned MachineInstOpcode,
1846 const TargetRegisterClass *RC,
1847 unsigned Op0, bool Op0IsKill,
1848 uint64_t Imm1, uint64_t Imm2) {
1849 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1851 unsigned ResultReg = createResultReg(RC);
1852 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1854 if (II.getNumDefs() >= 1)
1855 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1856 .addReg(Op0, Op0IsKill * RegState::Kill)
1860 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1861 .addReg(Op0, Op0IsKill * RegState::Kill)
1864 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1865 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1870 unsigned FastISel::FastEmitInst_rf(unsigned MachineInstOpcode,
1871 const TargetRegisterClass *RC,
1872 unsigned Op0, bool Op0IsKill,
1873 const ConstantFP *FPImm) {
1874 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1876 unsigned ResultReg = createResultReg(RC);
1877 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1879 if (II.getNumDefs() >= 1)
1880 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1881 .addReg(Op0, Op0IsKill * RegState::Kill)
1884 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1885 .addReg(Op0, Op0IsKill * RegState::Kill)
1887 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1888 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1893 unsigned FastISel::FastEmitInst_rri(unsigned MachineInstOpcode,
1894 const TargetRegisterClass *RC,
1895 unsigned Op0, bool Op0IsKill,
1896 unsigned Op1, bool Op1IsKill,
1898 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1900 unsigned ResultReg = createResultReg(RC);
1901 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1902 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1904 if (II.getNumDefs() >= 1)
1905 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1906 .addReg(Op0, Op0IsKill * RegState::Kill)
1907 .addReg(Op1, Op1IsKill * RegState::Kill)
1910 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1911 .addReg(Op0, Op0IsKill * RegState::Kill)
1912 .addReg(Op1, Op1IsKill * RegState::Kill)
1914 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1915 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1920 unsigned FastISel::FastEmitInst_rrii(unsigned MachineInstOpcode,
1921 const TargetRegisterClass *RC,
1922 unsigned Op0, bool Op0IsKill,
1923 unsigned Op1, bool Op1IsKill,
1924 uint64_t Imm1, uint64_t Imm2) {
1925 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1927 unsigned ResultReg = createResultReg(RC);
1928 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1929 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1931 if (II.getNumDefs() >= 1)
1932 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1933 .addReg(Op0, Op0IsKill * RegState::Kill)
1934 .addReg(Op1, Op1IsKill * RegState::Kill)
1935 .addImm(Imm1).addImm(Imm2);
1937 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1938 .addReg(Op0, Op0IsKill * RegState::Kill)
1939 .addReg(Op1, Op1IsKill * RegState::Kill)
1940 .addImm(Imm1).addImm(Imm2);
1941 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1942 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1947 unsigned FastISel::FastEmitInst_i(unsigned MachineInstOpcode,
1948 const TargetRegisterClass *RC,
1950 unsigned ResultReg = createResultReg(RC);
1951 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1953 if (II.getNumDefs() >= 1)
1954 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg).addImm(Imm);
1956 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
1957 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1958 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1963 unsigned FastISel::FastEmitInst_ii(unsigned MachineInstOpcode,
1964 const TargetRegisterClass *RC,
1965 uint64_t Imm1, uint64_t Imm2) {
1966 unsigned ResultReg = createResultReg(RC);
1967 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1969 if (II.getNumDefs() >= 1)
1970 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1971 .addImm(Imm1).addImm(Imm2);
1973 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1).addImm(Imm2);
1974 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1975 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1980 unsigned FastISel::FastEmitInst_extractsubreg(MVT RetVT,
1981 unsigned Op0, bool Op0IsKill,
1983 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1984 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1985 "Cannot yet extract from physregs");
1986 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1987 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
1988 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt,
1989 DbgLoc, TII.get(TargetOpcode::COPY), ResultReg)
1990 .addReg(Op0, getKillRegState(Op0IsKill), Idx);
1994 /// FastEmitZExtFromI1 - Emit MachineInstrs to compute the value of Op
1995 /// with all but the least significant bit set to zero.
1996 unsigned FastISel::FastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1997 return FastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2000 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2001 /// Emit code to ensure constants are copied into registers when needed.
2002 /// Remember the virtual registers that need to be added to the Machine PHI
2003 /// nodes as input. We cannot just directly add them, because expansion
2004 /// might result in multiple MBB's for one BB. As such, the start of the
2005 /// BB might correspond to a different MBB than the end.
2006 bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2007 const TerminatorInst *TI = LLVMBB->getTerminator();
2009 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2010 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2012 // Check successor nodes' PHI nodes that expect a constant to be available
2014 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2015 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2016 if (!isa<PHINode>(SuccBB->begin())) continue;
2017 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2019 // If this terminator has multiple identical successors (common for
2020 // switches), only handle each succ once.
2021 if (!SuccsHandled.insert(SuccMBB)) continue;
2023 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2025 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2026 // nodes and Machine PHI nodes, but the incoming operands have not been
2028 for (BasicBlock::const_iterator I = SuccBB->begin();
2029 const PHINode *PN = dyn_cast<PHINode>(I); ++I) {
2031 // Ignore dead phi's.
2032 if (PN->use_empty()) continue;
2034 // Only handle legal types. Two interesting things to note here. First,
2035 // by bailing out early, we may leave behind some dead instructions,
2036 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2037 // own moves. Second, this check is necessary because FastISel doesn't
2038 // use CreateRegs to create registers, so it always creates
2039 // exactly one register for each non-void instruction.
2040 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
2041 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2042 // Handle integer promotions, though, because they're common and easy.
2043 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
2044 VT = TLI.getTypeToTransformTo(LLVMBB->getContext(), VT);
2046 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2051 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2053 // Set the DebugLoc for the copy. Prefer the location of the operand
2054 // if there is one; use the location of the PHI otherwise.
2055 DbgLoc = PN->getDebugLoc();
2056 if (const Instruction *Inst = dyn_cast<Instruction>(PHIOp))
2057 DbgLoc = Inst->getDebugLoc();
2059 unsigned Reg = getRegForValue(PHIOp);
2061 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2064 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
2065 DbgLoc = DebugLoc();
2072 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2073 assert(LI->hasOneUse() &&
2074 "tryToFoldLoad expected a LoadInst with a single use");
2075 // We know that the load has a single use, but don't know what it is. If it
2076 // isn't one of the folded instructions, then we can't succeed here. Handle
2077 // this by scanning the single-use users of the load until we get to FoldInst.
2078 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2080 const Instruction *TheUser = LI->user_back();
2081 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2082 // Stay in the right block.
2083 TheUser->getParent() == FoldInst->getParent() &&
2084 --MaxUsers) { // Don't scan too far.
2085 // If there are multiple or no uses of this instruction, then bail out.
2086 if (!TheUser->hasOneUse())
2089 TheUser = TheUser->user_back();
2092 // If we didn't find the fold instruction, then we failed to collapse the
2094 if (TheUser != FoldInst)
2097 // Don't try to fold volatile loads. Target has to deal with alignment
2099 if (LI->isVolatile())
2102 // Figure out which vreg this is going into. If there is no assigned vreg yet
2103 // then there actually was no reference to it. Perhaps the load is referenced
2104 // by a dead instruction.
2105 unsigned LoadReg = getRegForValue(LI);
2109 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2110 // may mean that the instruction got lowered to multiple MIs, or the use of
2111 // the loaded value ended up being multiple operands of the result.
2112 if (!MRI.hasOneUse(LoadReg))
2115 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2116 MachineInstr *User = RI->getParent();
2118 // Set the insertion point properly. Folding the load can cause generation of
2119 // other random instructions (like sign extends) for addressing modes; make
2120 // sure they get inserted in a logical place before the new instruction.
2121 FuncInfo.InsertPt = User;
2122 FuncInfo.MBB = User->getParent();
2124 // Ask the target to try folding the load.
2125 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2128 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2130 if (!isa<AddOperator>(Add))
2132 // Type size needs to match.
2133 if (DL.getTypeSizeInBits(GEP->getType()) !=
2134 DL.getTypeSizeInBits(Add->getType()))
2136 // Must be in the same basic block.
2137 if (isa<Instruction>(Add) &&
2138 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2140 // Must have a constant operand.
2141 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2145 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2152 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2153 Alignment = LI->getAlignment();
2154 IsVolatile = LI->isVolatile();
2155 Flags = MachineMemOperand::MOLoad;
2156 Ptr = LI->getPointerOperand();
2157 ValTy = LI->getType();
2158 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2159 Alignment = SI->getAlignment();
2160 IsVolatile = SI->isVolatile();
2161 Flags = MachineMemOperand::MOStore;
2162 Ptr = SI->getPointerOperand();
2163 ValTy = SI->getValueOperand()->getType();
2168 bool IsNonTemporal = I->getMetadata("nontemporal") != nullptr;
2169 bool IsInvariant = I->getMetadata("invariant.load") != nullptr;
2170 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2173 I->getAAMetadata(AAInfo);
2175 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2176 Alignment = DL.getABITypeAlignment(ValTy);
2179 TM.getSubtargetImpl()->getDataLayout()->getTypeStoreSize(ValTy);
2182 Flags |= MachineMemOperand::MOVolatile;
2184 Flags |= MachineMemOperand::MONonTemporal;
2186 Flags |= MachineMemOperand::MOInvariant;
2188 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2189 Alignment, AAInfo, Ranges);