1 //===-- FastISel.cpp - Implementation of the FastISel class ---------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/CodeGen/Analysis.h"
43 #include "llvm/ADT/Optional.h"
44 #include "llvm/ADT/Statistic.h"
45 #include "llvm/Analysis/BranchProbabilityInfo.h"
46 #include "llvm/Analysis/Loads.h"
47 #include "llvm/Analysis/TargetLibraryInfo.h"
48 #include "llvm/CodeGen/Analysis.h"
49 #include "llvm/CodeGen/FastISel.h"
50 #include "llvm/CodeGen/FunctionLoweringInfo.h"
51 #include "llvm/CodeGen/MachineFrameInfo.h"
52 #include "llvm/CodeGen/MachineInstrBuilder.h"
53 #include "llvm/CodeGen/MachineModuleInfo.h"
54 #include "llvm/CodeGen/MachineRegisterInfo.h"
55 #include "llvm/CodeGen/StackMaps.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DebugInfo.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GlobalVariable.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/Operator.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/ErrorHandling.h"
65 #include "llvm/Support/raw_ostream.h"
66 #include "llvm/Target/TargetInstrInfo.h"
67 #include "llvm/Target/TargetLowering.h"
68 #include "llvm/Target/TargetMachine.h"
69 #include "llvm/Target/TargetSubtargetInfo.h"
72 #define DEBUG_TYPE "isel"
74 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
75 "target-independent selector");
76 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
77 "target-specific selector");
78 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
80 void FastISel::ArgListEntry::setAttributes(ImmutableCallSite *CS,
82 IsSExt = CS->paramHasAttr(AttrIdx, Attribute::SExt);
83 IsZExt = CS->paramHasAttr(AttrIdx, Attribute::ZExt);
84 IsInReg = CS->paramHasAttr(AttrIdx, Attribute::InReg);
85 IsSRet = CS->paramHasAttr(AttrIdx, Attribute::StructRet);
86 IsNest = CS->paramHasAttr(AttrIdx, Attribute::Nest);
87 IsByVal = CS->paramHasAttr(AttrIdx, Attribute::ByVal);
88 IsInAlloca = CS->paramHasAttr(AttrIdx, Attribute::InAlloca);
89 IsReturned = CS->paramHasAttr(AttrIdx, Attribute::Returned);
90 Alignment = CS->getParamAlignment(AttrIdx);
93 /// Set the current block to which generated machine instructions will be
94 /// appended, and clear the local CSE map.
95 void FastISel::startNewBlock() {
96 LocalValueMap.clear();
98 // Instructions are appended to FuncInfo.MBB. If the basic block already
99 // contains labels or copies, use the last instruction as the last local
101 EmitStartPt = nullptr;
102 if (!FuncInfo.MBB->empty())
103 EmitStartPt = &FuncInfo.MBB->back();
104 LastLocalValue = EmitStartPt;
107 bool FastISel::lowerArguments() {
108 if (!FuncInfo.CanLowerReturn)
109 // Fallback to SDISel argument lowering code to deal with sret pointer
113 if (!fastLowerArguments())
116 // Enter arguments into ValueMap for uses in non-entry BBs.
117 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
118 E = FuncInfo.Fn->arg_end();
120 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(I);
121 assert(VI != LocalValueMap.end() && "Missed an argument?");
122 FuncInfo.ValueMap[I] = VI->second;
127 void FastISel::flushLocalValueMap() {
128 LocalValueMap.clear();
129 LastLocalValue = EmitStartPt;
131 SavedInsertPt = FuncInfo.InsertPt;
134 bool FastISel::hasTrivialKill(const Value *V) {
135 // Don't consider constants or arguments to have trivial kills.
136 const Instruction *I = dyn_cast<Instruction>(V);
140 // No-op casts are trivially coalesced by fast-isel.
141 if (const auto *Cast = dyn_cast<CastInst>(I))
142 if (Cast->isNoopCast(DL.getIntPtrType(Cast->getContext())) &&
143 !hasTrivialKill(Cast->getOperand(0)))
146 // Even the value might have only one use in the LLVM IR, it is possible that
147 // FastISel might fold the use into another instruction and now there is more
148 // than one use at the Machine Instruction level.
149 unsigned Reg = lookUpRegForValue(V);
150 if (Reg && !MRI.use_empty(Reg))
153 // GEPs with all zero indices are trivially coalesced by fast-isel.
154 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
155 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
158 // Only instructions with a single use in the same basic block are considered
159 // to have trivial kills.
160 return I->hasOneUse() &&
161 !(I->getOpcode() == Instruction::BitCast ||
162 I->getOpcode() == Instruction::PtrToInt ||
163 I->getOpcode() == Instruction::IntToPtr) &&
164 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
167 unsigned FastISel::getRegForValue(const Value *V) {
168 EVT RealVT = TLI.getValueType(V->getType(), /*AllowUnknown=*/true);
169 // Don't handle non-simple values in FastISel.
170 if (!RealVT.isSimple())
173 // Ignore illegal types. We must do this before looking up the value
174 // in ValueMap because Arguments are given virtual registers regardless
175 // of whether FastISel can handle them.
176 MVT VT = RealVT.getSimpleVT();
177 if (!TLI.isTypeLegal(VT)) {
178 // Handle integer promotions, though, because they're common and easy.
179 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
180 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
185 // Look up the value to see if we already have a register for it.
186 unsigned Reg = lookUpRegForValue(V);
190 // In bottom-up mode, just create the virtual register which will be used
191 // to hold the value. It will be materialized later.
192 if (isa<Instruction>(V) &&
193 (!isa<AllocaInst>(V) ||
194 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
195 return FuncInfo.InitializeRegForValue(V);
197 SavePoint SaveInsertPt = enterLocalValueArea();
199 // Materialize the value in a register. Emit any instructions in the
201 Reg = materializeRegForValue(V, VT);
203 leaveLocalValueArea(SaveInsertPt);
208 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
210 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
211 if (CI->getValue().getActiveBits() <= 64)
212 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
213 } else if (isa<AllocaInst>(V))
214 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
215 else if (isa<ConstantPointerNull>(V))
216 // Translate this as an integer zero so that it can be
217 // local-CSE'd with actual integer zeros.
218 Reg = getRegForValue(
219 Constant::getNullValue(DL.getIntPtrType(V->getContext())));
220 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
221 if (CF->isNullValue())
222 Reg = fastMaterializeFloatZero(CF);
224 // Try to emit the constant directly.
225 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
228 // Try to emit the constant by using an integer constant with a cast.
229 const APFloat &Flt = CF->getValueAPF();
230 EVT IntVT = TLI.getPointerTy();
233 uint32_t IntBitWidth = IntVT.getSizeInBits();
235 (void)Flt.convertToInteger(x, IntBitWidth, /*isSigned=*/true,
236 APFloat::rmTowardZero, &isExact);
238 APInt IntVal(IntBitWidth, x);
240 unsigned IntegerReg =
241 getRegForValue(ConstantInt::get(V->getContext(), IntVal));
243 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
247 } else if (const auto *Op = dyn_cast<Operator>(V)) {
248 if (!selectOperator(Op, Op->getOpcode()))
249 if (!isa<Instruction>(Op) ||
250 !fastSelectInstruction(cast<Instruction>(Op)))
252 Reg = lookUpRegForValue(Op);
253 } else if (isa<UndefValue>(V)) {
254 Reg = createResultReg(TLI.getRegClassFor(VT));
255 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
256 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
261 /// Helper for getRegForValue. This function is called when the value isn't
262 /// already available in a register and must be materialized with new
264 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
266 // Give the target-specific code a try first.
267 if (isa<Constant>(V))
268 Reg = fastMaterializeConstant(cast<Constant>(V));
270 // If target-specific code couldn't or didn't want to handle the value, then
271 // give target-independent code a try.
273 Reg = materializeConstant(V, VT);
275 // Don't cache constant materializations in the general ValueMap.
276 // To do so would require tracking what uses they dominate.
278 LocalValueMap[V] = Reg;
279 LastLocalValue = MRI.getVRegDef(Reg);
284 unsigned FastISel::lookUpRegForValue(const Value *V) {
285 // Look up the value to see if we already have a register for it. We
286 // cache values defined by Instructions across blocks, and other values
287 // only locally. This is because Instructions already have the SSA
288 // def-dominates-use requirement enforced.
289 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
290 if (I != FuncInfo.ValueMap.end())
292 return LocalValueMap[V];
295 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
296 if (!isa<Instruction>(I)) {
297 LocalValueMap[I] = Reg;
301 unsigned &AssignedReg = FuncInfo.ValueMap[I];
302 if (AssignedReg == 0)
303 // Use the new register.
305 else if (Reg != AssignedReg) {
306 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
307 for (unsigned i = 0; i < NumRegs; i++)
308 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
314 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
315 unsigned IdxN = getRegForValue(Idx);
317 // Unhandled operand. Halt "fast" selection and bail.
318 return std::pair<unsigned, bool>(0, false);
320 bool IdxNIsKill = hasTrivialKill(Idx);
322 // If the index is smaller or larger than intptr_t, truncate or extend it.
323 MVT PtrVT = TLI.getPointerTy();
324 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
325 if (IdxVT.bitsLT(PtrVT)) {
326 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
329 } else if (IdxVT.bitsGT(PtrVT)) {
331 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
334 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
337 void FastISel::recomputeInsertPt() {
338 if (getLastLocalValue()) {
339 FuncInfo.InsertPt = getLastLocalValue();
340 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
343 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
345 // Now skip past any EH_LABELs, which must remain at the beginning.
346 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
347 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
351 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
352 MachineBasicBlock::iterator E) {
353 assert(I && E && std::distance(I, E) > 0 && "Invalid iterator!");
355 MachineInstr *Dead = &*I;
357 Dead->eraseFromParent();
363 FastISel::SavePoint FastISel::enterLocalValueArea() {
364 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
365 DebugLoc OldDL = DbgLoc;
368 SavePoint SP = {OldInsertPt, OldDL};
372 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
373 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
374 LastLocalValue = std::prev(FuncInfo.InsertPt);
376 // Restore the previous insert position.
377 FuncInfo.InsertPt = OldInsertPt.InsertPt;
378 DbgLoc = OldInsertPt.DL;
381 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
382 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
383 if (VT == MVT::Other || !VT.isSimple())
384 // Unhandled type. Halt "fast" selection and bail.
387 // We only handle legal types. For example, on x86-32 the instruction
388 // selector contains all of the 64-bit instructions from x86-64,
389 // under the assumption that i64 won't be used if the target doesn't
391 if (!TLI.isTypeLegal(VT)) {
392 // MVT::i1 is special. Allow AND, OR, or XOR because they
393 // don't require additional zeroing, which makes them easy.
394 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
395 ISDOpcode == ISD::XOR))
396 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
401 // Check if the first operand is a constant, and handle it as "ri". At -O0,
402 // we don't have anything that canonicalizes operand order.
403 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
404 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
405 unsigned Op1 = getRegForValue(I->getOperand(1));
408 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
411 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
412 CI->getZExtValue(), VT.getSimpleVT());
416 // We successfully emitted code for the given LLVM Instruction.
417 updateValueMap(I, ResultReg);
421 unsigned Op0 = getRegForValue(I->getOperand(0));
422 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
424 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
426 // Check if the second operand is a constant and handle it appropriately.
427 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
428 uint64_t Imm = CI->getSExtValue();
430 // Transform "sdiv exact X, 8" -> "sra X, 3".
431 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
432 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
434 ISDOpcode = ISD::SRA;
437 // Transform "urem x, pow2" -> "and x, pow2-1".
438 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
439 isPowerOf2_64(Imm)) {
441 ISDOpcode = ISD::AND;
444 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
445 Op0IsKill, Imm, VT.getSimpleVT());
449 // We successfully emitted code for the given LLVM Instruction.
450 updateValueMap(I, ResultReg);
454 // Check if the second operand is a constant float.
455 if (const auto *CF = dyn_cast<ConstantFP>(I->getOperand(1))) {
456 unsigned ResultReg = fastEmit_rf(VT.getSimpleVT(), VT.getSimpleVT(),
457 ISDOpcode, Op0, Op0IsKill, CF);
459 // We successfully emitted code for the given LLVM Instruction.
460 updateValueMap(I, ResultReg);
465 unsigned Op1 = getRegForValue(I->getOperand(1));
466 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
468 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
470 // Now we have both operands in registers. Emit the instruction.
471 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
472 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
474 // Target-specific code wasn't able to find a machine opcode for
475 // the given ISD opcode and type. Halt "fast" selection and bail.
478 // We successfully emitted code for the given LLVM Instruction.
479 updateValueMap(I, ResultReg);
483 bool FastISel::selectGetElementPtr(const User *I) {
484 unsigned N = getRegForValue(I->getOperand(0));
485 if (!N) // Unhandled operand. Halt "fast" selection and bail.
487 bool NIsKill = hasTrivialKill(I->getOperand(0));
489 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
490 // into a single N = N + TotalOffset.
491 uint64_t TotalOffs = 0;
492 // FIXME: What's a good SWAG number for MaxOffs?
493 uint64_t MaxOffs = 2048;
494 Type *Ty = I->getOperand(0)->getType();
495 MVT VT = TLI.getPointerTy();
496 for (GetElementPtrInst::const_op_iterator OI = I->op_begin() + 1,
499 const Value *Idx = *OI;
500 if (auto *StTy = dyn_cast<StructType>(Ty)) {
501 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
504 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
505 if (TotalOffs >= MaxOffs) {
506 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
507 if (!N) // Unhandled operand. Halt "fast" selection and bail.
513 Ty = StTy->getElementType(Field);
515 Ty = cast<SequentialType>(Ty)->getElementType();
517 // If this is a constant subscript, handle it quickly.
518 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
522 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
523 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
524 if (TotalOffs >= MaxOffs) {
525 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
526 if (!N) // Unhandled operand. Halt "fast" selection and bail.
534 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
535 if (!N) // Unhandled operand. Halt "fast" selection and bail.
541 // N = N + Idx * ElementSize;
542 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
543 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
544 unsigned IdxN = Pair.first;
545 bool IdxNIsKill = Pair.second;
546 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
549 if (ElementSize != 1) {
550 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
551 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
555 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
556 if (!N) // Unhandled operand. Halt "fast" selection and bail.
561 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
562 if (!N) // Unhandled operand. Halt "fast" selection and bail.
566 // We successfully emitted code for the given LLVM Instruction.
567 updateValueMap(I, N);
571 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
572 const CallInst *CI, unsigned StartIdx) {
573 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
574 Value *Val = CI->getArgOperand(i);
575 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
576 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
577 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
578 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
579 } else if (isa<ConstantPointerNull>(Val)) {
580 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
581 Ops.push_back(MachineOperand::CreateImm(0));
582 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
583 // Values coming from a stack location also require a sepcial encoding,
584 // but that is added later on by the target specific frame index
585 // elimination implementation.
586 auto SI = FuncInfo.StaticAllocaMap.find(AI);
587 if (SI != FuncInfo.StaticAllocaMap.end())
588 Ops.push_back(MachineOperand::CreateFI(SI->second));
592 unsigned Reg = getRegForValue(Val);
595 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
601 bool FastISel::selectStackmap(const CallInst *I) {
602 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
603 // [live variables...])
604 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
605 "Stackmap cannot return a value.");
607 // The stackmap intrinsic only records the live variables (the arguments
608 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
609 // intrinsic, this won't be lowered to a function call. This means we don't
610 // have to worry about calling conventions and target-specific lowering code.
611 // Instead we perform the call lowering right here.
614 // STACKMAP(id, nbytes, ...)
617 SmallVector<MachineOperand, 32> Ops;
619 // Add the <id> and <numBytes> constants.
620 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
621 "Expected a constant integer.");
622 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
623 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
625 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
626 "Expected a constant integer.");
627 const auto *NumBytes =
628 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
629 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
631 // Push live variables for the stack map (skipping the first two arguments
632 // <id> and <numBytes>).
633 if (!addStackMapLiveVars(Ops, I, 2))
636 // We are not adding any register mask info here, because the stackmap doesn't
639 // Add scratch registers as implicit def and early clobber.
640 CallingConv::ID CC = I->getCallingConv();
641 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
642 for (unsigned i = 0; ScratchRegs[i]; ++i)
643 Ops.push_back(MachineOperand::CreateReg(
644 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
645 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
647 // Issue CALLSEQ_START
648 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
649 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown))
653 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
654 TII.get(TargetOpcode::STACKMAP));
655 for (auto const &MO : Ops)
659 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
660 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
664 // Inform the Frame Information that we have a stackmap in this function.
665 FuncInfo.MF->getFrameInfo()->setHasStackMap();
670 /// \brief Lower an argument list according to the target calling convention.
672 /// This is a helper for lowering intrinsics that follow a target calling
673 /// convention or require stack pointer adjustment. Only a subset of the
674 /// intrinsic's operands need to participate in the calling convention.
675 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
676 unsigned NumArgs, const Value *Callee,
677 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
679 Args.reserve(NumArgs);
681 // Populate the argument list.
682 // Attributes for args start at offset 1, after the return attribute.
683 ImmutableCallSite CS(CI);
684 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs, AttrI = ArgIdx + 1;
685 ArgI != ArgE; ++ArgI) {
686 Value *V = CI->getOperand(ArgI);
688 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
692 Entry.Ty = V->getType();
693 Entry.setAttributes(&CS, AttrI);
694 Args.push_back(Entry);
697 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
699 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
701 return lowerCallTo(CLI);
704 bool FastISel::selectPatchpoint(const CallInst *I) {
705 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
710 // [live variables...])
711 CallingConv::ID CC = I->getCallingConv();
712 bool IsAnyRegCC = CC == CallingConv::AnyReg;
713 bool HasDef = !I->getType()->isVoidTy();
714 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
716 // Get the real number of arguments participating in the call <numArgs>
717 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
718 "Expected a constant integer.");
719 const auto *NumArgsVal =
720 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
721 unsigned NumArgs = NumArgsVal->getZExtValue();
723 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
724 // This includes all meta-operands up to but not including CC.
725 unsigned NumMetaOpers = PatchPointOpers::CCPos;
726 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
727 "Not enough arguments provided to the patchpoint intrinsic");
729 // For AnyRegCC the arguments are lowered later on manually.
730 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
731 CallLoweringInfo CLI;
732 CLI.setIsPatchPoint();
733 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
736 assert(CLI.Call && "No call instruction specified.");
738 SmallVector<MachineOperand, 32> Ops;
740 // Add an explicit result reg if we use the anyreg calling convention.
741 if (IsAnyRegCC && HasDef) {
742 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
743 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
744 CLI.NumResultRegs = 1;
745 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
748 // Add the <id> and <numBytes> constants.
749 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
750 "Expected a constant integer.");
751 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
752 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
754 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
755 "Expected a constant integer.");
756 const auto *NumBytes =
757 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
758 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
760 // Add the call target.
761 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
762 uint64_t CalleeConstAddr =
763 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
764 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
765 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
766 if (C->getOpcode() == Instruction::IntToPtr) {
767 uint64_t CalleeConstAddr =
768 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
769 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
771 llvm_unreachable("Unsupported ConstantExpr.");
772 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
773 Ops.push_back(MachineOperand::CreateGA(GV, 0));
774 } else if (isa<ConstantPointerNull>(Callee))
775 Ops.push_back(MachineOperand::CreateImm(0));
777 llvm_unreachable("Unsupported callee address.");
779 // Adjust <numArgs> to account for any arguments that have been passed on
780 // the stack instead.
781 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
782 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
784 // Add the calling convention
785 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
787 // Add the arguments we omitted previously. The register allocator should
788 // place these in any free register.
790 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
791 unsigned Reg = getRegForValue(I->getArgOperand(i));
794 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
798 // Push the arguments from the call instruction.
799 for (auto Reg : CLI.OutRegs)
800 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
802 // Push live variables for the stack map.
803 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
806 // Push the register mask info.
807 Ops.push_back(MachineOperand::CreateRegMask(
808 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
810 // Add scratch registers as implicit def and early clobber.
811 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
812 for (unsigned i = 0; ScratchRegs[i]; ++i)
813 Ops.push_back(MachineOperand::CreateReg(
814 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
815 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
817 // Add implicit defs (return values).
818 for (auto Reg : CLI.InRegs)
819 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
822 // Insert the patchpoint instruction before the call generated by the target.
823 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
824 TII.get(TargetOpcode::PATCHPOINT));
829 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
831 // Delete the original call instruction.
832 CLI.Call->eraseFromParent();
834 // Inform the Frame Information that we have a patchpoint in this function.
835 FuncInfo.MF->getFrameInfo()->setHasPatchPoint();
837 if (CLI.NumResultRegs)
838 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
842 /// Returns an AttributeSet representing the attributes applied to the return
843 /// value of the given call.
844 static AttributeSet getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
845 SmallVector<Attribute::AttrKind, 2> Attrs;
847 Attrs.push_back(Attribute::SExt);
849 Attrs.push_back(Attribute::ZExt);
851 Attrs.push_back(Attribute::InReg);
853 return AttributeSet::get(CLI.RetTy->getContext(), AttributeSet::ReturnIndex,
857 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
859 ImmutableCallSite CS(CI);
861 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
862 FunctionType *FTy = cast<FunctionType>(PT->getElementType());
863 Type *RetTy = FTy->getReturnType();
866 Args.reserve(NumArgs);
868 // Populate the argument list.
869 // Attributes for args start at offset 1, after the return attribute.
870 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
871 Value *V = CI->getOperand(ArgI);
873 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
877 Entry.Ty = V->getType();
878 Entry.setAttributes(&CS, ArgI + 1);
879 Args.push_back(Entry);
882 CallLoweringInfo CLI;
883 CLI.setCallee(RetTy, FTy, SymName, std::move(Args), CS, NumArgs);
885 return lowerCallTo(CLI);
888 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
889 // Handle the incoming return values from the call.
891 SmallVector<EVT, 4> RetTys;
892 ComputeValueVTs(TLI, CLI.RetTy, RetTys);
894 SmallVector<ISD::OutputArg, 4> Outs;
895 GetReturnInfo(CLI.RetTy, getReturnAttrs(CLI), Outs, TLI);
897 bool CanLowerReturn = TLI.CanLowerReturn(
898 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
900 // FIXME: sret demotion isn't supported yet - bail out.
904 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
906 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
907 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
908 for (unsigned i = 0; i != NumRegs; ++i) {
909 ISD::InputArg MyFlags;
910 MyFlags.VT = RegisterVT;
912 MyFlags.Used = CLI.IsReturnValueUsed;
914 MyFlags.Flags.setSExt();
916 MyFlags.Flags.setZExt();
918 MyFlags.Flags.setInReg();
919 CLI.Ins.push_back(MyFlags);
923 // Handle all of the outgoing arguments.
925 for (auto &Arg : CLI.getArgs()) {
926 Type *FinalType = Arg.Ty;
928 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
929 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
930 FinalType, CLI.CallConv, CLI.IsVarArg);
932 ISD::ArgFlagsTy Flags;
943 if (Arg.IsInAlloca) {
945 // Set the byval flag for CCAssignFn callbacks that don't know about
946 // inalloca. This way we can know how many bytes we should've allocated
947 // and how many bytes a callee cleanup function will pop. If we port
948 // inalloca to more targets, we'll have to add custom inalloca handling in
949 // the various CC lowering callbacks.
952 if (Arg.IsByVal || Arg.IsInAlloca) {
953 PointerType *Ty = cast<PointerType>(Arg.Ty);
954 Type *ElementTy = Ty->getElementType();
955 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
956 // For ByVal, alignment should come from FE. BE will guess if this info is
957 // not there, but there are cases it cannot get right.
958 unsigned FrameAlign = Arg.Alignment;
960 FrameAlign = TLI.getByValTypeAlignment(ElementTy);
961 Flags.setByValSize(FrameSize);
962 Flags.setByValAlign(FrameAlign);
967 Flags.setInConsecutiveRegs();
968 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
969 Flags.setOrigAlign(OriginalAlignment);
971 CLI.OutVals.push_back(Arg.Val);
972 CLI.OutFlags.push_back(Flags);
975 if (!fastLowerCall(CLI))
978 // Set all unused physreg defs as dead.
979 assert(CLI.Call && "No call instruction specified.");
980 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
982 if (CLI.NumResultRegs && CLI.CS)
983 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
988 bool FastISel::lowerCall(const CallInst *CI) {
989 ImmutableCallSite CS(CI);
991 PointerType *PT = cast<PointerType>(CS.getCalledValue()->getType());
992 FunctionType *FuncTy = cast<FunctionType>(PT->getElementType());
993 Type *RetTy = FuncTy->getReturnType();
997 Args.reserve(CS.arg_size());
999 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1004 if (V->getType()->isEmptyTy())
1008 Entry.Ty = V->getType();
1010 // Skip the first return-type Attribute to get to params.
1011 Entry.setAttributes(&CS, i - CS.arg_begin() + 1);
1012 Args.push_back(Entry);
1015 // Check if target-independent constraints permit a tail call here.
1016 // Target-dependent constraints are checked within fastLowerCall.
1017 bool IsTailCall = CI->isTailCall();
1018 if (IsTailCall && !isInTailCallPosition(CS, TM))
1021 CallLoweringInfo CLI;
1022 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1023 .setTailCall(IsTailCall);
1025 return lowerCallTo(CLI);
1028 bool FastISel::selectCall(const User *I) {
1029 const CallInst *Call = cast<CallInst>(I);
1031 // Handle simple inline asms.
1032 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1033 // If the inline asm has side effects, then make sure that no local value
1034 // lives across by flushing the local value map.
1035 if (IA->hasSideEffects())
1036 flushLocalValueMap();
1038 // Don't attempt to handle constraints.
1039 if (!IA->getConstraintString().empty())
1042 unsigned ExtraInfo = 0;
1043 if (IA->hasSideEffects())
1044 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1045 if (IA->isAlignStack())
1046 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1048 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1049 TII.get(TargetOpcode::INLINEASM))
1050 .addExternalSymbol(IA->getAsmString().c_str())
1055 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1056 ComputeUsesVAFloatArgument(*Call, &MMI);
1058 // Handle intrinsic function calls.
1059 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1060 return selectIntrinsicCall(II);
1062 // Usually, it does not make sense to initialize a value,
1063 // make an unrelated function call and use the value, because
1064 // it tends to be spilled on the stack. So, we move the pointer
1065 // to the last local value to the beginning of the block, so that
1066 // all the values which have already been materialized,
1067 // appear after the call. It also makes sense to skip intrinsics
1068 // since they tend to be inlined.
1069 flushLocalValueMap();
1071 return lowerCall(Call);
1074 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1075 switch (II->getIntrinsicID()) {
1078 // At -O0 we don't care about the lifetime intrinsics.
1079 case Intrinsic::lifetime_start:
1080 case Intrinsic::lifetime_end:
1081 // The donothing intrinsic does, well, nothing.
1082 case Intrinsic::donothing:
1084 case Intrinsic::eh_actions: {
1085 unsigned ResultReg = getRegForValue(UndefValue::get(II->getType()));
1088 updateValueMap(II, ResultReg);
1091 case Intrinsic::dbg_declare: {
1092 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1093 assert(DI->getVariable() && "Missing variable");
1094 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1095 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1099 const Value *Address = DI->getAddress();
1100 if (!Address || isa<UndefValue>(Address)) {
1101 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1105 unsigned Offset = 0;
1106 Optional<MachineOperand> Op;
1107 if (const auto *Arg = dyn_cast<Argument>(Address))
1108 // Some arguments' frame index is recorded during argument lowering.
1109 Offset = FuncInfo.getArgumentFrameIndex(Arg);
1111 Op = MachineOperand::CreateFI(Offset);
1113 if (unsigned Reg = lookUpRegForValue(Address))
1114 Op = MachineOperand::CreateReg(Reg, false);
1116 // If we have a VLA that has a "use" in a metadata node that's then used
1117 // here but it has no other uses, then we have a problem. E.g.,
1119 // int foo (const int *x) {
1124 // If we assign 'a' a vreg and fast isel later on has to use the selection
1125 // DAG isel, it will want to copy the value to the vreg. However, there are
1126 // no uses, which goes counter to what selection DAG isel expects.
1127 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1128 (!isa<AllocaInst>(Address) ||
1129 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1130 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1134 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1135 "Expected inlined-at fields to agree");
1137 Op->setIsDebug(true);
1138 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1139 TII.get(TargetOpcode::DBG_VALUE), false, Op->getReg(), 0,
1140 DI->getVariable(), DI->getExpression());
1142 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1143 TII.get(TargetOpcode::DBG_VALUE))
1146 .addMetadata(DI->getVariable())
1147 .addMetadata(DI->getExpression());
1149 // We can't yet handle anything else here because it would require
1150 // generating code, thus altering codegen because of debug info.
1151 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1155 case Intrinsic::dbg_value: {
1156 // This form of DBG_VALUE is target-independent.
1157 const DbgValueInst *DI = cast<DbgValueInst>(II);
1158 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1159 const Value *V = DI->getValue();
1160 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1161 "Expected inlined-at fields to agree");
1163 // Currently the optimizer can produce this; insert an undef to
1164 // help debugging. Probably the optimizer should not do this.
1165 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1167 .addImm(DI->getOffset())
1168 .addMetadata(DI->getVariable())
1169 .addMetadata(DI->getExpression());
1170 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1171 if (CI->getBitWidth() > 64)
1172 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1174 .addImm(DI->getOffset())
1175 .addMetadata(DI->getVariable())
1176 .addMetadata(DI->getExpression());
1178 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1179 .addImm(CI->getZExtValue())
1180 .addImm(DI->getOffset())
1181 .addMetadata(DI->getVariable())
1182 .addMetadata(DI->getExpression());
1183 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1184 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1186 .addImm(DI->getOffset())
1187 .addMetadata(DI->getVariable())
1188 .addMetadata(DI->getExpression());
1189 } else if (unsigned Reg = lookUpRegForValue(V)) {
1190 // FIXME: This does not handle register-indirect values at offset 0.
1191 bool IsIndirect = DI->getOffset() != 0;
1192 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1193 DI->getOffset(), DI->getVariable(), DI->getExpression());
1195 // We can't yet handle anything else here because it would require
1196 // generating code, thus altering codegen because of debug info.
1197 DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1201 case Intrinsic::objectsize: {
1202 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1203 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1204 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1205 unsigned ResultReg = getRegForValue(ResCI);
1208 updateValueMap(II, ResultReg);
1211 case Intrinsic::expect: {
1212 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1215 updateValueMap(II, ResultReg);
1218 case Intrinsic::experimental_stackmap:
1219 return selectStackmap(II);
1220 case Intrinsic::experimental_patchpoint_void:
1221 case Intrinsic::experimental_patchpoint_i64:
1222 return selectPatchpoint(II);
1225 return fastLowerIntrinsicCall(II);
1228 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1229 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1230 EVT DstVT = TLI.getValueType(I->getType());
1232 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1234 // Unhandled type. Halt "fast" selection and bail.
1237 // Check if the destination type is legal.
1238 if (!TLI.isTypeLegal(DstVT))
1241 // Check if the source operand is legal.
1242 if (!TLI.isTypeLegal(SrcVT))
1245 unsigned InputReg = getRegForValue(I->getOperand(0));
1247 // Unhandled operand. Halt "fast" selection and bail.
1250 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1252 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1253 Opcode, InputReg, InputRegIsKill);
1257 updateValueMap(I, ResultReg);
1261 bool FastISel::selectBitCast(const User *I) {
1262 // If the bitcast doesn't change the type, just use the operand value.
1263 if (I->getType() == I->getOperand(0)->getType()) {
1264 unsigned Reg = getRegForValue(I->getOperand(0));
1267 updateValueMap(I, Reg);
1271 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1272 EVT SrcEVT = TLI.getValueType(I->getOperand(0)->getType());
1273 EVT DstEVT = TLI.getValueType(I->getType());
1274 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1275 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1276 // Unhandled type. Halt "fast" selection and bail.
1279 MVT SrcVT = SrcEVT.getSimpleVT();
1280 MVT DstVT = DstEVT.getSimpleVT();
1281 unsigned Op0 = getRegForValue(I->getOperand(0));
1282 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1284 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1286 // First, try to perform the bitcast by inserting a reg-reg copy.
1287 unsigned ResultReg = 0;
1288 if (SrcVT == DstVT) {
1289 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1290 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1291 // Don't attempt a cross-class copy. It will likely fail.
1292 if (SrcClass == DstClass) {
1293 ResultReg = createResultReg(DstClass);
1294 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1295 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1299 // If the reg-reg copy failed, select a BITCAST opcode.
1301 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1306 updateValueMap(I, ResultReg);
1310 bool FastISel::selectInstruction(const Instruction *I) {
1311 // Just before the terminator instruction, insert instructions to
1312 // feed PHI nodes in successor blocks.
1313 if (isa<TerminatorInst>(I))
1314 if (!handlePHINodesInSuccessorBlocks(I->getParent()))
1317 DbgLoc = I->getDebugLoc();
1319 SavedInsertPt = FuncInfo.InsertPt;
1321 if (const auto *Call = dyn_cast<CallInst>(I)) {
1322 const Function *F = Call->getCalledFunction();
1325 // As a special case, don't handle calls to builtin library functions that
1326 // may be translated directly to target instructions.
1327 if (F && !F->hasLocalLinkage() && F->hasName() &&
1328 LibInfo->getLibFunc(F->getName(), Func) &&
1329 LibInfo->hasOptimizedCodeGen(Func))
1332 // Don't handle Intrinsic::trap if a trap funciton is specified.
1333 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1334 !TM.Options.getTrapFunctionName().empty())
1338 // First, try doing target-independent selection.
1339 if (!SkipTargetIndependentISel) {
1340 if (selectOperator(I, I->getOpcode())) {
1341 ++NumFastIselSuccessIndependent;
1342 DbgLoc = DebugLoc();
1345 // Remove dead code.
1346 recomputeInsertPt();
1347 if (SavedInsertPt != FuncInfo.InsertPt)
1348 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1349 SavedInsertPt = FuncInfo.InsertPt;
1351 // Next, try calling the target to attempt to handle the instruction.
1352 if (fastSelectInstruction(I)) {
1353 ++NumFastIselSuccessTarget;
1354 DbgLoc = DebugLoc();
1357 // Remove dead code.
1358 recomputeInsertPt();
1359 if (SavedInsertPt != FuncInfo.InsertPt)
1360 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1362 DbgLoc = DebugLoc();
1363 // Undo phi node updates, because they will be added again by SelectionDAG.
1364 if (isa<TerminatorInst>(I))
1365 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1369 /// Emit an unconditional branch to the given block, unless it is the immediate
1370 /// (fall-through) successor, and update the CFG.
1371 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc, DebugLoc DbgLoc) {
1372 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1373 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1374 // For more accurate line information if this is the only instruction
1375 // in the block then emit it, otherwise we have the unconditional
1376 // fall-through case, which needs no instructions.
1378 // The unconditional branch case.
1379 TII.InsertBranch(*FuncInfo.MBB, MSucc, nullptr,
1380 SmallVector<MachineOperand, 0>(), DbgLoc);
1382 uint32_t BranchWeight = 0;
1384 BranchWeight = FuncInfo.BPI->getEdgeWeight(FuncInfo.MBB->getBasicBlock(),
1385 MSucc->getBasicBlock());
1386 FuncInfo.MBB->addSuccessor(MSucc, BranchWeight);
1389 /// Emit an FNeg operation.
1390 bool FastISel::selectFNeg(const User *I) {
1391 unsigned OpReg = getRegForValue(BinaryOperator::getFNegArgument(I));
1394 bool OpRegIsKill = hasTrivialKill(I);
1396 // If the target has ISD::FNEG, use it.
1397 EVT VT = TLI.getValueType(I->getType());
1398 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1399 OpReg, OpRegIsKill);
1401 updateValueMap(I, ResultReg);
1405 // Bitcast the value to integer, twiddle the sign bit with xor,
1406 // and then bitcast it back to floating-point.
1407 if (VT.getSizeInBits() > 64)
1409 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1410 if (!TLI.isTypeLegal(IntVT))
1413 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1414 ISD::BITCAST, OpReg, OpRegIsKill);
1418 unsigned IntResultReg = fastEmit_ri_(
1419 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1420 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1424 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1425 IntResultReg, /*IsKill=*/true);
1429 updateValueMap(I, ResultReg);
1433 bool FastISel::selectExtractValue(const User *U) {
1434 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1438 // Make sure we only try to handle extracts with a legal result. But also
1439 // allow i1 because it's easy.
1440 EVT RealVT = TLI.getValueType(EVI->getType(), /*AllowUnknown=*/true);
1441 if (!RealVT.isSimple())
1443 MVT VT = RealVT.getSimpleVT();
1444 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1447 const Value *Op0 = EVI->getOperand(0);
1448 Type *AggTy = Op0->getType();
1450 // Get the base result register.
1452 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1453 if (I != FuncInfo.ValueMap.end())
1454 ResultReg = I->second;
1455 else if (isa<Instruction>(Op0))
1456 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1458 return false; // fast-isel can't handle aggregate constants at the moment
1460 // Get the actual result register, which is an offset from the base register.
1461 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1463 SmallVector<EVT, 4> AggValueVTs;
1464 ComputeValueVTs(TLI, AggTy, AggValueVTs);
1466 for (unsigned i = 0; i < VTIndex; i++)
1467 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1469 updateValueMap(EVI, ResultReg);
1473 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1475 case Instruction::Add:
1476 return selectBinaryOp(I, ISD::ADD);
1477 case Instruction::FAdd:
1478 return selectBinaryOp(I, ISD::FADD);
1479 case Instruction::Sub:
1480 return selectBinaryOp(I, ISD::SUB);
1481 case Instruction::FSub:
1482 // FNeg is currently represented in LLVM IR as a special case of FSub.
1483 if (BinaryOperator::isFNeg(I))
1484 return selectFNeg(I);
1485 return selectBinaryOp(I, ISD::FSUB);
1486 case Instruction::Mul:
1487 return selectBinaryOp(I, ISD::MUL);
1488 case Instruction::FMul:
1489 return selectBinaryOp(I, ISD::FMUL);
1490 case Instruction::SDiv:
1491 return selectBinaryOp(I, ISD::SDIV);
1492 case Instruction::UDiv:
1493 return selectBinaryOp(I, ISD::UDIV);
1494 case Instruction::FDiv:
1495 return selectBinaryOp(I, ISD::FDIV);
1496 case Instruction::SRem:
1497 return selectBinaryOp(I, ISD::SREM);
1498 case Instruction::URem:
1499 return selectBinaryOp(I, ISD::UREM);
1500 case Instruction::FRem:
1501 return selectBinaryOp(I, ISD::FREM);
1502 case Instruction::Shl:
1503 return selectBinaryOp(I, ISD::SHL);
1504 case Instruction::LShr:
1505 return selectBinaryOp(I, ISD::SRL);
1506 case Instruction::AShr:
1507 return selectBinaryOp(I, ISD::SRA);
1508 case Instruction::And:
1509 return selectBinaryOp(I, ISD::AND);
1510 case Instruction::Or:
1511 return selectBinaryOp(I, ISD::OR);
1512 case Instruction::Xor:
1513 return selectBinaryOp(I, ISD::XOR);
1515 case Instruction::GetElementPtr:
1516 return selectGetElementPtr(I);
1518 case Instruction::Br: {
1519 const BranchInst *BI = cast<BranchInst>(I);
1521 if (BI->isUnconditional()) {
1522 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1523 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1524 fastEmitBranch(MSucc, BI->getDebugLoc());
1528 // Conditional branches are not handed yet.
1529 // Halt "fast" selection and bail.
1533 case Instruction::Unreachable:
1534 if (TM.Options.TrapUnreachable)
1535 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1539 case Instruction::Alloca:
1540 // FunctionLowering has the static-sized case covered.
1541 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1544 // Dynamic-sized alloca is not handled yet.
1547 case Instruction::Call:
1548 return selectCall(I);
1550 case Instruction::BitCast:
1551 return selectBitCast(I);
1553 case Instruction::FPToSI:
1554 return selectCast(I, ISD::FP_TO_SINT);
1555 case Instruction::ZExt:
1556 return selectCast(I, ISD::ZERO_EXTEND);
1557 case Instruction::SExt:
1558 return selectCast(I, ISD::SIGN_EXTEND);
1559 case Instruction::Trunc:
1560 return selectCast(I, ISD::TRUNCATE);
1561 case Instruction::SIToFP:
1562 return selectCast(I, ISD::SINT_TO_FP);
1564 case Instruction::IntToPtr: // Deliberate fall-through.
1565 case Instruction::PtrToInt: {
1566 EVT SrcVT = TLI.getValueType(I->getOperand(0)->getType());
1567 EVT DstVT = TLI.getValueType(I->getType());
1568 if (DstVT.bitsGT(SrcVT))
1569 return selectCast(I, ISD::ZERO_EXTEND);
1570 if (DstVT.bitsLT(SrcVT))
1571 return selectCast(I, ISD::TRUNCATE);
1572 unsigned Reg = getRegForValue(I->getOperand(0));
1575 updateValueMap(I, Reg);
1579 case Instruction::ExtractValue:
1580 return selectExtractValue(I);
1582 case Instruction::PHI:
1583 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1586 // Unhandled instruction. Halt "fast" selection and bail.
1591 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1592 const TargetLibraryInfo *LibInfo,
1593 bool SkipTargetIndependentISel)
1594 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1595 MFI(*FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1596 TM(FuncInfo.MF->getTarget()), DL(*TM.getDataLayout()),
1597 TII(*MF->getSubtarget().getInstrInfo()),
1598 TLI(*MF->getSubtarget().getTargetLowering()),
1599 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1600 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1602 FastISel::~FastISel() {}
1604 bool FastISel::fastLowerArguments() { return false; }
1606 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1608 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1612 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1614 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1615 bool /*Op0IsKill*/) {
1619 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1620 bool /*Op0IsKill*/, unsigned /*Op1*/,
1621 bool /*Op1IsKill*/) {
1625 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1629 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1630 const ConstantFP * /*FPImm*/) {
1634 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1635 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1639 unsigned FastISel::fastEmit_rf(MVT, MVT, unsigned, unsigned /*Op0*/,
1641 const ConstantFP * /*FPImm*/) {
1645 unsigned FastISel::fastEmit_rri(MVT, MVT, unsigned, unsigned /*Op0*/,
1646 bool /*Op0IsKill*/, unsigned /*Op1*/,
1647 bool /*Op1IsKill*/, uint64_t /*Imm*/) {
1651 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1652 /// instruction with an immediate operand using fastEmit_ri.
1653 /// If that fails, it materializes the immediate into a register and try
1654 /// fastEmit_rr instead.
1655 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1656 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1657 // If this is a multiply by a power of two, emit this as a shift left.
1658 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1661 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1662 // div x, 8 -> srl x, 3
1667 // Horrible hack (to be removed), check to make sure shift amounts are
1669 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1670 Imm >= VT.getSizeInBits())
1673 // First check if immediate type is legal. If not, we can't use the ri form.
1674 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1677 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1678 bool IsImmKill = true;
1680 // This is a bit ugly/slow, but failing here means falling out of
1681 // fast-isel, which would be very slow.
1683 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1684 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1687 // FIXME: If the materialized register here has no uses yet then this
1688 // will be the first use and we should be able to mark it as killed.
1689 // However, the local value area for materialising constant expressions
1690 // grows down, not up, which means that any constant expressions we generate
1691 // later which also use 'Imm' could be after this instruction and therefore
1695 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
1698 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
1699 return MRI.createVirtualRegister(RC);
1702 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
1704 if (TargetRegisterInfo::isVirtualRegister(Op)) {
1705 const TargetRegisterClass *RegClass =
1706 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1707 if (!MRI.constrainRegClass(Op, RegClass)) {
1708 // If it's not legal to COPY between the register classes, something
1709 // has gone very wrong before we got here.
1710 unsigned NewOp = createResultReg(RegClass);
1711 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1712 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1719 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1720 const TargetRegisterClass *RC) {
1721 unsigned ResultReg = createResultReg(RC);
1722 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1724 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1728 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1729 const TargetRegisterClass *RC, unsigned Op0,
1731 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1733 unsigned ResultReg = createResultReg(RC);
1734 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1736 if (II.getNumDefs() >= 1)
1737 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1738 .addReg(Op0, getKillRegState(Op0IsKill));
1740 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1741 .addReg(Op0, getKillRegState(Op0IsKill));
1742 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1743 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1749 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1750 const TargetRegisterClass *RC, unsigned Op0,
1751 bool Op0IsKill, unsigned Op1,
1753 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1755 unsigned ResultReg = createResultReg(RC);
1756 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1757 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1759 if (II.getNumDefs() >= 1)
1760 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1761 .addReg(Op0, getKillRegState(Op0IsKill))
1762 .addReg(Op1, getKillRegState(Op1IsKill));
1764 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1765 .addReg(Op0, getKillRegState(Op0IsKill))
1766 .addReg(Op1, getKillRegState(Op1IsKill));
1767 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1768 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1773 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1774 const TargetRegisterClass *RC, unsigned Op0,
1775 bool Op0IsKill, unsigned Op1,
1776 bool Op1IsKill, unsigned Op2,
1778 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1780 unsigned ResultReg = createResultReg(RC);
1781 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1782 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1783 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1785 if (II.getNumDefs() >= 1)
1786 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1787 .addReg(Op0, getKillRegState(Op0IsKill))
1788 .addReg(Op1, getKillRegState(Op1IsKill))
1789 .addReg(Op2, getKillRegState(Op2IsKill));
1791 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1792 .addReg(Op0, getKillRegState(Op0IsKill))
1793 .addReg(Op1, getKillRegState(Op1IsKill))
1794 .addReg(Op2, getKillRegState(Op2IsKill));
1795 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1796 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1801 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
1802 const TargetRegisterClass *RC, unsigned Op0,
1803 bool Op0IsKill, uint64_t Imm) {
1804 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1806 unsigned ResultReg = createResultReg(RC);
1807 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1809 if (II.getNumDefs() >= 1)
1810 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1811 .addReg(Op0, getKillRegState(Op0IsKill))
1814 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1815 .addReg(Op0, getKillRegState(Op0IsKill))
1817 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1818 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1823 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
1824 const TargetRegisterClass *RC, unsigned Op0,
1825 bool Op0IsKill, uint64_t Imm1,
1827 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1829 unsigned ResultReg = createResultReg(RC);
1830 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1832 if (II.getNumDefs() >= 1)
1833 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1834 .addReg(Op0, getKillRegState(Op0IsKill))
1838 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1839 .addReg(Op0, getKillRegState(Op0IsKill))
1842 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1843 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1848 unsigned FastISel::fastEmitInst_rf(unsigned MachineInstOpcode,
1849 const TargetRegisterClass *RC, unsigned Op0,
1850 bool Op0IsKill, const ConstantFP *FPImm) {
1851 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1853 unsigned ResultReg = createResultReg(RC);
1854 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1856 if (II.getNumDefs() >= 1)
1857 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1858 .addReg(Op0, getKillRegState(Op0IsKill))
1861 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1862 .addReg(Op0, getKillRegState(Op0IsKill))
1864 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1865 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1870 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
1871 const TargetRegisterClass *RC, unsigned Op0,
1872 bool Op0IsKill, unsigned Op1,
1873 bool Op1IsKill, uint64_t Imm) {
1874 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1876 unsigned ResultReg = createResultReg(RC);
1877 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1878 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1880 if (II.getNumDefs() >= 1)
1881 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1882 .addReg(Op0, getKillRegState(Op0IsKill))
1883 .addReg(Op1, getKillRegState(Op1IsKill))
1886 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1887 .addReg(Op0, getKillRegState(Op0IsKill))
1888 .addReg(Op1, getKillRegState(Op1IsKill))
1890 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1891 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1896 unsigned FastISel::fastEmitInst_rrii(unsigned MachineInstOpcode,
1897 const TargetRegisterClass *RC,
1898 unsigned Op0, bool Op0IsKill, unsigned Op1,
1899 bool Op1IsKill, uint64_t Imm1,
1901 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1903 unsigned ResultReg = createResultReg(RC);
1904 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1905 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1907 if (II.getNumDefs() >= 1)
1908 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1909 .addReg(Op0, getKillRegState(Op0IsKill))
1910 .addReg(Op1, getKillRegState(Op1IsKill))
1914 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1915 .addReg(Op0, getKillRegState(Op0IsKill))
1916 .addReg(Op1, getKillRegState(Op1IsKill))
1919 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1920 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1925 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
1926 const TargetRegisterClass *RC, uint64_t Imm) {
1927 unsigned ResultReg = createResultReg(RC);
1928 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1930 if (II.getNumDefs() >= 1)
1931 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1934 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
1935 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1936 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1941 unsigned FastISel::fastEmitInst_ii(unsigned MachineInstOpcode,
1942 const TargetRegisterClass *RC, uint64_t Imm1,
1944 unsigned ResultReg = createResultReg(RC);
1945 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1947 if (II.getNumDefs() >= 1)
1948 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1952 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm1)
1954 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1955 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1960 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
1961 bool Op0IsKill, uint32_t Idx) {
1962 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
1963 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
1964 "Cannot yet extract from physregs");
1965 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
1966 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
1967 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
1968 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
1972 /// Emit MachineInstrs to compute the value of Op with all but the least
1973 /// significant bit set to zero.
1974 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
1975 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
1978 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
1979 /// Emit code to ensure constants are copied into registers when needed.
1980 /// Remember the virtual registers that need to be added to the Machine PHI
1981 /// nodes as input. We cannot just directly add them, because expansion
1982 /// might result in multiple MBB's for one BB. As such, the start of the
1983 /// BB might correspond to a different MBB than the end.
1984 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
1985 const TerminatorInst *TI = LLVMBB->getTerminator();
1987 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
1988 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
1990 // Check successor nodes' PHI nodes that expect a constant to be available
1992 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
1993 const BasicBlock *SuccBB = TI->getSuccessor(succ);
1994 if (!isa<PHINode>(SuccBB->begin()))
1996 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
1998 // If this terminator has multiple identical successors (common for
1999 // switches), only handle each succ once.
2000 if (!SuccsHandled.insert(SuccMBB).second)
2003 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2005 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2006 // nodes and Machine PHI nodes, but the incoming operands have not been
2008 for (BasicBlock::const_iterator I = SuccBB->begin();
2009 const auto *PN = dyn_cast<PHINode>(I); ++I) {
2011 // Ignore dead phi's.
2012 if (PN->use_empty())
2015 // Only handle legal types. Two interesting things to note here. First,
2016 // by bailing out early, we may leave behind some dead instructions,
2017 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2018 // own moves. Second, this check is necessary because FastISel doesn't
2019 // use CreateRegs to create registers, so it always creates
2020 // exactly one register for each non-void instruction.
2021 EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true);
2022 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2023 // Handle integer promotions, though, because they're common and easy.
2024 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2025 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2030 const Value *PHIOp = PN->getIncomingValueForBlock(LLVMBB);
2032 // Set the DebugLoc for the copy. Prefer the location of the operand
2033 // if there is one; use the location of the PHI otherwise.
2034 DbgLoc = PN->getDebugLoc();
2035 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2036 DbgLoc = Inst->getDebugLoc();
2038 unsigned Reg = getRegForValue(PHIOp);
2040 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2043 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(MBBI++, Reg));
2044 DbgLoc = DebugLoc();
2051 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2052 assert(LI->hasOneUse() &&
2053 "tryToFoldLoad expected a LoadInst with a single use");
2054 // We know that the load has a single use, but don't know what it is. If it
2055 // isn't one of the folded instructions, then we can't succeed here. Handle
2056 // this by scanning the single-use users of the load until we get to FoldInst.
2057 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2059 const Instruction *TheUser = LI->user_back();
2060 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2061 // Stay in the right block.
2062 TheUser->getParent() == FoldInst->getParent() &&
2063 --MaxUsers) { // Don't scan too far.
2064 // If there are multiple or no uses of this instruction, then bail out.
2065 if (!TheUser->hasOneUse())
2068 TheUser = TheUser->user_back();
2071 // If we didn't find the fold instruction, then we failed to collapse the
2073 if (TheUser != FoldInst)
2076 // Don't try to fold volatile loads. Target has to deal with alignment
2078 if (LI->isVolatile())
2081 // Figure out which vreg this is going into. If there is no assigned vreg yet
2082 // then there actually was no reference to it. Perhaps the load is referenced
2083 // by a dead instruction.
2084 unsigned LoadReg = getRegForValue(LI);
2088 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2089 // may mean that the instruction got lowered to multiple MIs, or the use of
2090 // the loaded value ended up being multiple operands of the result.
2091 if (!MRI.hasOneUse(LoadReg))
2094 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2095 MachineInstr *User = RI->getParent();
2097 // Set the insertion point properly. Folding the load can cause generation of
2098 // other random instructions (like sign extends) for addressing modes; make
2099 // sure they get inserted in a logical place before the new instruction.
2100 FuncInfo.InsertPt = User;
2101 FuncInfo.MBB = User->getParent();
2103 // Ask the target to try folding the load.
2104 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2107 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2109 if (!isa<AddOperator>(Add))
2111 // Type size needs to match.
2112 if (DL.getTypeSizeInBits(GEP->getType()) !=
2113 DL.getTypeSizeInBits(Add->getType()))
2115 // Must be in the same basic block.
2116 if (isa<Instruction>(Add) &&
2117 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2119 // Must have a constant operand.
2120 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2124 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2131 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2132 Alignment = LI->getAlignment();
2133 IsVolatile = LI->isVolatile();
2134 Flags = MachineMemOperand::MOLoad;
2135 Ptr = LI->getPointerOperand();
2136 ValTy = LI->getType();
2137 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2138 Alignment = SI->getAlignment();
2139 IsVolatile = SI->isVolatile();
2140 Flags = MachineMemOperand::MOStore;
2141 Ptr = SI->getPointerOperand();
2142 ValTy = SI->getValueOperand()->getType();
2146 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2147 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2148 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2151 I->getAAMetadata(AAInfo);
2153 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2154 Alignment = DL.getABITypeAlignment(ValTy);
2156 unsigned Size = DL.getTypeStoreSize(ValTy);
2159 Flags |= MachineMemOperand::MOVolatile;
2161 Flags |= MachineMemOperand::MONonTemporal;
2163 Flags |= MachineMemOperand::MOInvariant;
2165 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2166 Alignment, AAInfo, Ranges);
2169 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2170 // If both operands are the same, then try to optimize or fold the cmp.
2171 CmpInst::Predicate Predicate = CI->getPredicate();
2172 if (CI->getOperand(0) != CI->getOperand(1))
2175 switch (Predicate) {
2176 default: llvm_unreachable("Invalid predicate!");
2177 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2178 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2179 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2180 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2181 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2182 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2183 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2184 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2185 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2186 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2187 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2188 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2189 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2190 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2191 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2192 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2194 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2195 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2196 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2197 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2198 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2199 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2200 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2201 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2202 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2203 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;