1 //===-- X86FastISel.cpp - X86 FastISel implementation ---------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines the X86-specific support for the FastISel class. Much
11 // of the target-specific code is generated by tablegen in the file
12 // X86GenFastISel.inc, which is #included here.
14 //===----------------------------------------------------------------------===//
17 #include "X86InstrBuilder.h"
18 #include "X86ISelLowering.h"
19 #include "X86RegisterInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/InstrTypes.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/CodeGen/FastISel.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 class X86FastISel : public FastISel {
30 /// Subtarget - Keep a pointer to the X86Subtarget around so that we can
31 /// make the right decision when generating code for different targets.
32 const X86Subtarget *Subtarget;
35 explicit X86FastISel(MachineFunction &mf,
36 DenseMap<const Value *, unsigned> &vm,
37 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm)
38 : FastISel(mf, vm, bm) {
39 Subtarget = &TM.getSubtarget<X86Subtarget>();
42 virtual bool TargetSelectInstruction(Instruction *I);
44 #include "X86GenFastISel.inc"
47 bool X86SelectConstAddr(Value *V, unsigned &Op0);
49 bool X86SelectLoad(Instruction *I);
51 bool X86SelectStore(Instruction *I);
53 bool X86SelectCmp(Instruction *I);
56 /// X86SelectConstAddr - Select and emit code to materialize constant address.
58 bool X86FastISel::X86SelectConstAddr(Value *V,
60 // FIXME: Only GlobalAddress for now.
61 GlobalValue *GV = dyn_cast<GlobalValue>(V);
65 if (Subtarget->GVRequiresExtraLoad(GV, TM, false)) {
66 // Issue load from stub if necessary.
68 const TargetRegisterClass *RC = NULL;
69 if (TLI.getPointerTy() == MVT::i32) {
71 RC = X86::GR32RegisterClass;
74 RC = X86::GR64RegisterClass;
76 Op0 = createResultReg(RC);
79 addFullAddress(BuildMI(MBB, TII.get(Opc), Op0), AM);
80 // Prevent loading GV stub multiple times in same MBB.
81 LocalValueMap[V] = Op0;
86 /// X86SelectStore - Select and emit code to implement store instructions.
87 bool X86FastISel::X86SelectStore(Instruction* I) {
88 MVT VT = MVT::getMVT(I->getOperand(0)->getType());
89 if (VT == MVT::Other || !VT.isSimple())
90 // Unhandled type. Halt "fast" selection and bail.
94 VT = TLI.getPointerTy();
95 // We only handle legal types. For example, on x86-32 the instruction
96 // selector contains all of the 64-bit instructions from x86-64,
97 // under the assumption that i64 won't be used if the target doesn't
99 if (!TLI.isTypeLegal(VT))
101 unsigned Op0 = getRegForValue(I->getOperand(0));
103 // Unhandled operand. Halt "fast" selection and bail.
106 Value *V = I->getOperand(1);
107 unsigned Op1 = getRegForValue(V);
109 // Handle constant load address.
110 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op1))
111 // Unhandled operand. Halt "fast" selection and bail.
115 // Get opcode and regclass of the output for the given load instruction.
117 const TargetRegisterClass *RC = NULL;
118 switch (VT.getSimpleVT()) {
119 default: return false;
122 RC = X86::GR8RegisterClass;
126 RC = X86::GR16RegisterClass;
130 RC = X86::GR32RegisterClass;
133 // Must be in x86-64 mode.
135 RC = X86::GR64RegisterClass;
138 if (Subtarget->hasSSE1()) {
140 RC = X86::FR32RegisterClass;
143 RC = X86::RFP32RegisterClass;
147 if (Subtarget->hasSSE2()) {
149 RC = X86::FR64RegisterClass;
152 RC = X86::RFP64RegisterClass;
157 RC = X86::RFP80RegisterClass;
163 // Address is in register.
166 AM.GV = cast<GlobalValue>(V);
167 addFullAddress(BuildMI(MBB, TII.get(Opc)), AM).addReg(Op0);
171 /// X86SelectLoad - Select and emit code to implement load instructions.
173 bool X86FastISel::X86SelectLoad(Instruction *I) {
174 MVT VT = MVT::getMVT(I->getType(), /*HandleUnknown=*/true);
175 if (VT == MVT::Other || !VT.isSimple())
176 // Unhandled type. Halt "fast" selection and bail.
180 VT = TLI.getPointerTy();
181 // We only handle legal types. For example, on x86-32 the instruction
182 // selector contains all of the 64-bit instructions from x86-64,
183 // under the assumption that i64 won't be used if the target doesn't
185 if (!TLI.isTypeLegal(VT))
188 Value *V = I->getOperand(0);
189 unsigned Op0 = getRegForValue(V);
191 // Handle constant load address.
192 if (!isa<Constant>(V) || !X86SelectConstAddr(V, Op0))
193 // Unhandled operand. Halt "fast" selection and bail.
197 // Get opcode and regclass of the output for the given load instruction.
199 const TargetRegisterClass *RC = NULL;
200 switch (VT.getSimpleVT()) {
201 default: return false;
204 RC = X86::GR8RegisterClass;
208 RC = X86::GR16RegisterClass;
212 RC = X86::GR32RegisterClass;
215 // Must be in x86-64 mode.
217 RC = X86::GR64RegisterClass;
220 if (Subtarget->hasSSE1()) {
222 RC = X86::FR32RegisterClass;
225 RC = X86::RFP32RegisterClass;
229 if (Subtarget->hasSSE2()) {
231 RC = X86::FR64RegisterClass;
234 RC = X86::RFP64RegisterClass;
239 RC = X86::RFP80RegisterClass;
243 unsigned ResultReg = createResultReg(RC);
246 // Address is in register.
249 AM.GV = cast<GlobalValue>(V);
250 addFullAddress(BuildMI(MBB, TII.get(Opc), ResultReg), AM);
251 UpdateValueMap(I, ResultReg);
255 bool X86FastISel::X86SelectCmp(Instruction *I) {
256 CmpInst *CI = cast<CmpInst>(I);
258 unsigned Op0Reg = getRegForValue(CI->getOperand(0));
259 unsigned Op1Reg = getRegForValue(CI->getOperand(1));
262 switch (TLI.getValueType(I->getOperand(0)->getType()).getSimpleVT()) {
263 case MVT::i8: Opc = X86::CMP8rr; break;
264 case MVT::i16: Opc = X86::CMP16rr; break;
265 case MVT::i32: Opc = X86::CMP32rr; break;
266 case MVT::i64: Opc = X86::CMP64rr; break;
267 case MVT::f32: Opc = X86::UCOMISSrr; break;
268 case MVT::f64: Opc = X86::UCOMISDrr; break;
269 default: return false;
272 unsigned ResultReg = createResultReg(&X86::GR8RegClass);
273 switch (CI->getPredicate()) {
274 case CmpInst::FCMP_OEQ: {
275 unsigned EReg = createResultReg(&X86::GR8RegClass);
276 unsigned NPReg = createResultReg(&X86::GR8RegClass);
277 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
278 BuildMI(MBB, TII.get(X86::SETEr), EReg);
279 BuildMI(MBB, TII.get(X86::SETNPr), NPReg);
280 BuildMI(MBB, TII.get(X86::AND8rr), ResultReg).addReg(NPReg).addReg(EReg);
283 case CmpInst::FCMP_UNE: {
284 unsigned NEReg = createResultReg(&X86::GR8RegClass);
285 unsigned PReg = createResultReg(&X86::GR8RegClass);
286 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
287 BuildMI(MBB, TII.get(X86::SETNEr), NEReg);
288 BuildMI(MBB, TII.get(X86::SETPr), PReg);
289 BuildMI(MBB, TII.get(X86::OR8rr), ResultReg).addReg(PReg).addReg(NEReg);
292 case CmpInst::FCMP_OGT:
293 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
294 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
296 case CmpInst::FCMP_OGE:
297 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
298 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
300 case CmpInst::FCMP_OLT:
301 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
302 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
304 case CmpInst::FCMP_OLE:
305 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
306 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
308 case CmpInst::FCMP_ONE:
309 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
310 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
312 case CmpInst::FCMP_ORD:
313 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
314 BuildMI(MBB, TII.get(X86::SETNPr), ResultReg);
316 case CmpInst::FCMP_UNO:
317 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
318 BuildMI(MBB, TII.get(X86::SETPr), ResultReg);
320 case CmpInst::FCMP_UEQ:
321 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
322 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
324 case CmpInst::FCMP_UGT:
325 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
326 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
328 case CmpInst::FCMP_UGE:
329 BuildMI(MBB, TII.get(Opc)).addReg(Op1Reg).addReg(Op0Reg);
330 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
332 case CmpInst::FCMP_ULT:
333 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
334 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
336 case CmpInst::FCMP_ULE:
337 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
338 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
340 case CmpInst::ICMP_EQ:
341 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
342 BuildMI(MBB, TII.get(X86::SETEr), ResultReg);
344 case CmpInst::ICMP_NE:
345 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
346 BuildMI(MBB, TII.get(X86::SETNEr), ResultReg);
348 case CmpInst::ICMP_UGT:
349 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
350 BuildMI(MBB, TII.get(X86::SETAr), ResultReg);
352 case CmpInst::ICMP_UGE:
353 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
354 BuildMI(MBB, TII.get(X86::SETAEr), ResultReg);
356 case CmpInst::ICMP_ULT:
357 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
358 BuildMI(MBB, TII.get(X86::SETBr), ResultReg);
360 case CmpInst::ICMP_ULE:
361 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
362 BuildMI(MBB, TII.get(X86::SETBEr), ResultReg);
364 case CmpInst::ICMP_SGT:
365 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
366 BuildMI(MBB, TII.get(X86::SETGr), ResultReg);
368 case CmpInst::ICMP_SGE:
369 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
370 BuildMI(MBB, TII.get(X86::SETGEr), ResultReg);
372 case CmpInst::ICMP_SLT:
373 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
374 BuildMI(MBB, TII.get(X86::SETLr), ResultReg);
376 case CmpInst::ICMP_SLE:
377 BuildMI(MBB, TII.get(Opc)).addReg(Op0Reg).addReg(Op1Reg);
378 BuildMI(MBB, TII.get(X86::SETLEr), ResultReg);
384 UpdateValueMap(I, ResultReg);
389 X86FastISel::TargetSelectInstruction(Instruction *I) {
390 switch (I->getOpcode()) {
392 case Instruction::Load:
393 return X86SelectLoad(I);
394 case Instruction::Store:
395 return X86SelectStore(I);
396 case Instruction::ICmp:
397 case Instruction::FCmp:
398 return X86SelectCmp(I);
405 llvm::FastISel *X86::createFastISel(MachineFunction &mf,
406 DenseMap<const Value *, unsigned> &vm,
407 DenseMap<const BasicBlock *, MachineBasicBlock *> &bm) {
408 return new X86FastISel(mf, vm, bm);