1 //===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "X86InstrInfo.h"
16 #include "X86GenInstrInfo.inc"
17 #include "X86InstrBuilder.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineRegisterInfo.h"
23 #include "llvm/CodeGen/LiveVariables.h"
24 #include "llvm/Target/TargetOptions.h"
27 X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
28 : TargetInstrInfoImpl(X86Insts, array_lengthof(X86Insts)),
29 TM(tm), RI(tm, *this) {
32 bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
34 unsigned& destReg) const {
35 MachineOpCode oc = MI.getOpcode();
36 if (oc == X86::MOV8rr || oc == X86::MOV16rr ||
37 oc == X86::MOV32rr || oc == X86::MOV64rr ||
38 oc == X86::MOV16to16_ || oc == X86::MOV32to32_ ||
39 oc == X86::MOV_Fp3232 || oc == X86::MOVSSrr || oc == X86::MOVSDrr ||
40 oc == X86::MOV_Fp3264 || oc == X86::MOV_Fp6432 || oc == X86::MOV_Fp6464 ||
41 oc == X86::FsMOVAPSrr || oc == X86::FsMOVAPDrr ||
42 oc == X86::MOVAPSrr || oc == X86::MOVAPDrr ||
43 oc == X86::MOVSS2PSrr || oc == X86::MOVSD2PDrr ||
44 oc == X86::MOVPS2SSrr || oc == X86::MOVPD2SDrr ||
45 oc == X86::MMX_MOVD64rr || oc == X86::MMX_MOVQ64rr) {
46 assert(MI.getNumOperands() >= 2 &&
47 MI.getOperand(0).isRegister() &&
48 MI.getOperand(1).isRegister() &&
49 "invalid register-register move instruction");
50 sourceReg = MI.getOperand(1).getReg();
51 destReg = MI.getOperand(0).getReg();
57 unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI,
58 int &FrameIndex) const {
59 switch (MI->getOpcode()) {
72 case X86::MMX_MOVD64rm:
73 case X86::MMX_MOVQ64rm:
74 if (MI->getOperand(1).isFI() && MI->getOperand(2).isImm() &&
75 MI->getOperand(3).isReg() && MI->getOperand(4).isImm() &&
76 MI->getOperand(2).getImm() == 1 &&
77 MI->getOperand(3).getReg() == 0 &&
78 MI->getOperand(4).getImm() == 0) {
79 FrameIndex = MI->getOperand(1).getIndex();
80 return MI->getOperand(0).getReg();
87 unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI,
88 int &FrameIndex) const {
89 switch (MI->getOpcode()) {
102 case X86::MMX_MOVD64mr:
103 case X86::MMX_MOVQ64mr:
104 case X86::MMX_MOVNTQmr:
105 if (MI->getOperand(0).isFI() && MI->getOperand(1).isImm() &&
106 MI->getOperand(2).isReg() && MI->getOperand(3).isImm() &&
107 MI->getOperand(1).getImm() == 1 &&
108 MI->getOperand(2).getReg() == 0 &&
109 MI->getOperand(3).getImm() == 0) {
110 FrameIndex = MI->getOperand(0).getIndex();
111 return MI->getOperand(4).getReg();
119 bool X86InstrInfo::isReallyTriviallyReMaterializable(MachineInstr *MI) const {
120 switch (MI->getOpcode()) {
133 case X86::MMX_MOVD64rm:
134 case X86::MMX_MOVQ64rm:
135 // Loads from constant pools are trivially rematerializable.
136 return MI->getOperand(1).isRegister() && MI->getOperand(2).isImmediate() &&
137 MI->getOperand(3).isRegister() && MI->getOperand(4).isConstantPoolIndex() &&
138 MI->getOperand(1).getReg() == 0 &&
139 MI->getOperand(2).getImm() == 1 &&
140 MI->getOperand(3).getReg() == 0;
142 // All other instructions marked M_REMATERIALIZABLE are always trivially
147 /// isReallySideEffectFree - If the M_MAY_HAVE_SIDE_EFFECTS flag is set, this
148 /// method is called to determine if the specific instance of this instruction
149 /// has side effects. This is useful in cases of instructions, like loads, which
150 /// generally always have side effects. A load from a constant pool doesn't have
151 /// side effects, though. So we need to differentiate it from the general case.
152 bool X86InstrInfo::isReallySideEffectFree(MachineInstr *MI) const {
153 switch (MI->getOpcode()) {
156 if (MI->getOperand(1).isRegister()) {
157 unsigned Reg = MI->getOperand(1).getReg();
159 // Loads from global addresses which aren't redefined in the function are
161 if (Reg != 0 && MRegisterInfo::isVirtualRegister(Reg) &&
162 MI->getOperand(2).isImmediate() &&
163 MI->getOperand(3).isRegister() &&
164 MI->getOperand(4).isGlobalAddress() &&
165 MI->getOperand(2).getImm() == 1 &&
166 MI->getOperand(3).getReg() == 0)
180 case X86::MMX_MOVD64rm:
181 case X86::MMX_MOVQ64rm:
182 // Loads from constant pools have no side effects
183 return MI->getOperand(1).isRegister() &&
184 MI->getOperand(2).isImmediate() &&
185 MI->getOperand(3).isRegister() &&
186 MI->getOperand(4).isConstantPoolIndex() &&
187 MI->getOperand(1).getReg() == 0 &&
188 MI->getOperand(2).getImm() == 1 &&
189 MI->getOperand(3).getReg() == 0;
192 // All other instances of these instructions are presumed to have side
197 /// hasLiveCondCodeDef - True if MI has a condition code def, e.g. EFLAGS, that
198 /// is not marked dead.
199 static bool hasLiveCondCodeDef(MachineInstr *MI) {
200 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
201 MachineOperand &MO = MI->getOperand(i);
202 if (MO.isRegister() && MO.isDef() &&
203 MO.getReg() == X86::EFLAGS && !MO.isDead()) {
210 /// convertToThreeAddress - This method must be implemented by targets that
211 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
212 /// may be able to convert a two-address instruction into a true
213 /// three-address instruction on demand. This allows the X86 target (for
214 /// example) to convert ADD and SHL instructions into LEA instructions if they
215 /// would require register copies due to two-addressness.
217 /// This method returns a null pointer if the transformation cannot be
218 /// performed, otherwise it returns the new instruction.
221 X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
222 MachineBasicBlock::iterator &MBBI,
223 LiveVariables &LV) const {
224 MachineInstr *MI = MBBI;
225 // All instructions input are two-addr instructions. Get the known operands.
226 unsigned Dest = MI->getOperand(0).getReg();
227 unsigned Src = MI->getOperand(1).getReg();
229 MachineInstr *NewMI = NULL;
230 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
231 // we have better subtarget support, enable the 16-bit LEA generation here.
232 bool DisableLEA16 = true;
234 unsigned MIOpc = MI->getOpcode();
236 case X86::SHUFPSrri: {
237 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
238 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
240 unsigned A = MI->getOperand(0).getReg();
241 unsigned B = MI->getOperand(1).getReg();
242 unsigned C = MI->getOperand(2).getReg();
243 unsigned M = MI->getOperand(3).getImm();
244 if (B != C) return 0;
245 NewMI = BuildMI(get(X86::PSHUFDri), A).addReg(B).addImm(M);
249 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
250 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
251 // the flags produced by a shift yet, so this is safe.
252 unsigned Dest = MI->getOperand(0).getReg();
253 unsigned Src = MI->getOperand(1).getReg();
254 unsigned ShAmt = MI->getOperand(2).getImm();
255 if (ShAmt == 0 || ShAmt >= 4) return 0;
257 NewMI = BuildMI(get(X86::LEA64r), Dest)
258 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
262 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
263 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
264 // the flags produced by a shift yet, so this is safe.
265 unsigned Dest = MI->getOperand(0).getReg();
266 unsigned Src = MI->getOperand(1).getReg();
267 unsigned ShAmt = MI->getOperand(2).getImm();
268 if (ShAmt == 0 || ShAmt >= 4) return 0;
270 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ?
271 X86::LEA64_32r : X86::LEA32r;
272 NewMI = BuildMI(get(Opc), Dest)
273 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
277 assert(MI->getNumOperands() >= 3 && "Unknown shift instruction!");
278 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
279 // the flags produced by a shift yet, so this is safe.
280 unsigned Dest = MI->getOperand(0).getReg();
281 unsigned Src = MI->getOperand(1).getReg();
282 unsigned ShAmt = MI->getOperand(2).getImm();
283 if (ShAmt == 0 || ShAmt >= 4) return 0;
286 // If 16-bit LEA is disabled, use 32-bit LEA via subregisters.
287 MachineRegisterInfo &RegInfo = MFI->getParent()->getRegInfo();
288 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit()
289 ? X86::LEA64_32r : X86::LEA32r;
290 unsigned leaInReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
291 unsigned leaOutReg = RegInfo.createVirtualRegister(&X86::GR32RegClass);
294 BuildMI(get(X86::INSERT_SUBREG), leaInReg).addReg(Src).addImm(2);
295 Ins->copyKillDeadInfo(MI);
297 NewMI = BuildMI(get(Opc), leaOutReg)
298 .addReg(0).addImm(1 << ShAmt).addReg(leaInReg).addImm(0);
301 BuildMI(get(X86::EXTRACT_SUBREG), Dest).addReg(leaOutReg).addImm(2);
302 Ext->copyKillDeadInfo(MI);
304 MFI->insert(MBBI, Ins); // Insert the insert_subreg
305 LV.instructionChanged(MI, NewMI); // Update live variables
306 LV.addVirtualRegisterKilled(leaInReg, NewMI);
307 MFI->insert(MBBI, NewMI); // Insert the new inst
308 LV.addVirtualRegisterKilled(leaOutReg, Ext);
309 MFI->insert(MBBI, Ext); // Insert the extract_subreg
312 NewMI = BuildMI(get(X86::LEA16r), Dest)
313 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
318 // The following opcodes also sets the condition code register(s). Only
319 // convert them to equivalent lea if the condition code register def's
321 if (hasLiveCondCodeDef(MI))
324 bool is64Bit = TM.getSubtarget<X86Subtarget>().is64Bit();
329 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
330 unsigned Opc = MIOpc == X86::INC64r ? X86::LEA64r
331 : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
332 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, 1);
337 if (DisableLEA16) return 0;
338 assert(MI->getNumOperands() >= 2 && "Unknown inc instruction!");
339 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1);
343 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
344 unsigned Opc = MIOpc == X86::DEC64r ? X86::LEA64r
345 : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
346 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src, -1);
351 if (DisableLEA16) return 0;
352 assert(MI->getNumOperands() >= 2 && "Unknown dec instruction!");
353 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, -1);
357 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
358 unsigned Opc = MIOpc == X86::ADD64rr ? X86::LEA64r
359 : (is64Bit ? X86::LEA64_32r : X86::LEA32r);
360 NewMI = addRegReg(BuildMI(get(Opc), Dest), Src,
361 MI->getOperand(2).getReg());
365 if (DisableLEA16) return 0;
366 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
367 NewMI = addRegReg(BuildMI(get(X86::LEA16r), Dest), Src,
368 MI->getOperand(2).getReg());
372 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
373 if (MI->getOperand(2).isImmediate())
374 NewMI = addRegOffset(BuildMI(get(X86::LEA64r), Dest), Src,
375 MI->getOperand(2).getImm());
379 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
380 if (MI->getOperand(2).isImmediate()) {
381 unsigned Opc = is64Bit ? X86::LEA64_32r : X86::LEA32r;
382 NewMI = addRegOffset(BuildMI(get(Opc), Dest), Src,
383 MI->getOperand(2).getImm());
388 if (DisableLEA16) return 0;
389 assert(MI->getNumOperands() >= 3 && "Unknown add instruction!");
390 if (MI->getOperand(2).isImmediate())
391 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src,
392 MI->getOperand(2).getImm());
395 if (DisableLEA16) return 0;
398 assert(MI->getNumOperands() >= 3 && MI->getOperand(2).isImmediate() &&
399 "Unknown shl instruction!");
400 unsigned ShAmt = MI->getOperand(2).getImm();
401 if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) {
403 AM.Scale = 1 << ShAmt;
405 unsigned Opc = MIOpc == X86::SHL64ri ? X86::LEA64r
406 : (MIOpc == X86::SHL32ri
407 ? (is64Bit ? X86::LEA64_32r : X86::LEA32r) : X86::LEA16r);
408 NewMI = addFullAddress(BuildMI(get(Opc), Dest), AM);
416 NewMI->copyKillDeadInfo(MI);
417 LV.instructionChanged(MI, NewMI); // Update live variables
418 MFI->insert(MBBI, NewMI); // Insert the new inst
422 /// commuteInstruction - We have a few instructions that must be hacked on to
425 MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const {
426 switch (MI->getOpcode()) {
427 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
428 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
429 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
430 case X86::SHLD32rri8: // A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
431 case X86::SHRD64rri8: // A = SHRD64rri8 B, C, I -> A = SHLD64rri8 C, B, (64-I)
432 case X86::SHLD64rri8:{// A = SHLD64rri8 B, C, I -> A = SHRD64rri8 C, B, (64-I)
435 switch (MI->getOpcode()) {
436 default: assert(0 && "Unreachable!");
437 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
438 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
439 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
440 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
441 case X86::SHRD64rri8: Size = 64; Opc = X86::SHLD64rri8; break;
442 case X86::SHLD64rri8: Size = 64; Opc = X86::SHRD64rri8; break;
444 unsigned Amt = MI->getOperand(3).getImm();
445 unsigned A = MI->getOperand(0).getReg();
446 unsigned B = MI->getOperand(1).getReg();
447 unsigned C = MI->getOperand(2).getReg();
448 bool BisKill = MI->getOperand(1).isKill();
449 bool CisKill = MI->getOperand(2).isKill();
450 return BuildMI(get(Opc), A).addReg(C, false, false, CisKill)
451 .addReg(B, false, false, BisKill).addImm(Size-Amt);
456 case X86::CMOVAE16rr:
457 case X86::CMOVAE32rr:
458 case X86::CMOVAE64rr:
462 case X86::CMOVNE16rr:
463 case X86::CMOVNE32rr:
464 case X86::CMOVNE64rr:
465 case X86::CMOVBE16rr:
466 case X86::CMOVBE32rr:
467 case X86::CMOVBE64rr:
474 case X86::CMOVGE16rr:
475 case X86::CMOVGE32rr:
476 case X86::CMOVGE64rr:
477 case X86::CMOVLE16rr:
478 case X86::CMOVLE32rr:
479 case X86::CMOVLE64rr:
486 case X86::CMOVNS16rr:
487 case X86::CMOVNS32rr:
488 case X86::CMOVNS64rr:
492 case X86::CMOVNP16rr:
493 case X86::CMOVNP32rr:
494 case X86::CMOVNP64rr: {
496 switch (MI->getOpcode()) {
498 case X86::CMOVB16rr: Opc = X86::CMOVAE16rr; break;
499 case X86::CMOVB32rr: Opc = X86::CMOVAE32rr; break;
500 case X86::CMOVB64rr: Opc = X86::CMOVAE64rr; break;
501 case X86::CMOVAE16rr: Opc = X86::CMOVB16rr; break;
502 case X86::CMOVAE32rr: Opc = X86::CMOVB32rr; break;
503 case X86::CMOVAE64rr: Opc = X86::CMOVB64rr; break;
504 case X86::CMOVE16rr: Opc = X86::CMOVNE16rr; break;
505 case X86::CMOVE32rr: Opc = X86::CMOVNE32rr; break;
506 case X86::CMOVE64rr: Opc = X86::CMOVNE64rr; break;
507 case X86::CMOVNE16rr: Opc = X86::CMOVE16rr; break;
508 case X86::CMOVNE32rr: Opc = X86::CMOVE32rr; break;
509 case X86::CMOVNE64rr: Opc = X86::CMOVE64rr; break;
510 case X86::CMOVBE16rr: Opc = X86::CMOVA16rr; break;
511 case X86::CMOVBE32rr: Opc = X86::CMOVA32rr; break;
512 case X86::CMOVBE64rr: Opc = X86::CMOVA64rr; break;
513 case X86::CMOVA16rr: Opc = X86::CMOVBE16rr; break;
514 case X86::CMOVA32rr: Opc = X86::CMOVBE32rr; break;
515 case X86::CMOVA64rr: Opc = X86::CMOVBE64rr; break;
516 case X86::CMOVL16rr: Opc = X86::CMOVGE16rr; break;
517 case X86::CMOVL32rr: Opc = X86::CMOVGE32rr; break;
518 case X86::CMOVL64rr: Opc = X86::CMOVGE64rr; break;
519 case X86::CMOVGE16rr: Opc = X86::CMOVL16rr; break;
520 case X86::CMOVGE32rr: Opc = X86::CMOVL32rr; break;
521 case X86::CMOVGE64rr: Opc = X86::CMOVL64rr; break;
522 case X86::CMOVLE16rr: Opc = X86::CMOVG16rr; break;
523 case X86::CMOVLE32rr: Opc = X86::CMOVG32rr; break;
524 case X86::CMOVLE64rr: Opc = X86::CMOVG64rr; break;
525 case X86::CMOVG16rr: Opc = X86::CMOVLE16rr; break;
526 case X86::CMOVG32rr: Opc = X86::CMOVLE32rr; break;
527 case X86::CMOVG64rr: Opc = X86::CMOVLE64rr; break;
528 case X86::CMOVS16rr: Opc = X86::CMOVNS16rr; break;
529 case X86::CMOVS32rr: Opc = X86::CMOVNS32rr; break;
530 case X86::CMOVS64rr: Opc = X86::CMOVNS32rr; break;
531 case X86::CMOVNS16rr: Opc = X86::CMOVS16rr; break;
532 case X86::CMOVNS32rr: Opc = X86::CMOVS32rr; break;
533 case X86::CMOVNS64rr: Opc = X86::CMOVS64rr; break;
534 case X86::CMOVP16rr: Opc = X86::CMOVNP16rr; break;
535 case X86::CMOVP32rr: Opc = X86::CMOVNP32rr; break;
536 case X86::CMOVP64rr: Opc = X86::CMOVNP32rr; break;
537 case X86::CMOVNP16rr: Opc = X86::CMOVP16rr; break;
538 case X86::CMOVNP32rr: Opc = X86::CMOVP32rr; break;
539 case X86::CMOVNP64rr: Opc = X86::CMOVP64rr; break;
542 MI->setInstrDescriptor(get(Opc));
543 // Fallthrough intended.
546 return TargetInstrInfoImpl::commuteInstruction(MI);
550 static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
552 default: return X86::COND_INVALID;
553 case X86::JE: return X86::COND_E;
554 case X86::JNE: return X86::COND_NE;
555 case X86::JL: return X86::COND_L;
556 case X86::JLE: return X86::COND_LE;
557 case X86::JG: return X86::COND_G;
558 case X86::JGE: return X86::COND_GE;
559 case X86::JB: return X86::COND_B;
560 case X86::JBE: return X86::COND_BE;
561 case X86::JA: return X86::COND_A;
562 case X86::JAE: return X86::COND_AE;
563 case X86::JS: return X86::COND_S;
564 case X86::JNS: return X86::COND_NS;
565 case X86::JP: return X86::COND_P;
566 case X86::JNP: return X86::COND_NP;
567 case X86::JO: return X86::COND_O;
568 case X86::JNO: return X86::COND_NO;
572 unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
574 default: assert(0 && "Illegal condition code!");
575 case X86::COND_E: return X86::JE;
576 case X86::COND_NE: return X86::JNE;
577 case X86::COND_L: return X86::JL;
578 case X86::COND_LE: return X86::JLE;
579 case X86::COND_G: return X86::JG;
580 case X86::COND_GE: return X86::JGE;
581 case X86::COND_B: return X86::JB;
582 case X86::COND_BE: return X86::JBE;
583 case X86::COND_A: return X86::JA;
584 case X86::COND_AE: return X86::JAE;
585 case X86::COND_S: return X86::JS;
586 case X86::COND_NS: return X86::JNS;
587 case X86::COND_P: return X86::JP;
588 case X86::COND_NP: return X86::JNP;
589 case X86::COND_O: return X86::JO;
590 case X86::COND_NO: return X86::JNO;
594 /// GetOppositeBranchCondition - Return the inverse of the specified condition,
595 /// e.g. turning COND_E to COND_NE.
596 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
598 default: assert(0 && "Illegal condition code!");
599 case X86::COND_E: return X86::COND_NE;
600 case X86::COND_NE: return X86::COND_E;
601 case X86::COND_L: return X86::COND_GE;
602 case X86::COND_LE: return X86::COND_G;
603 case X86::COND_G: return X86::COND_LE;
604 case X86::COND_GE: return X86::COND_L;
605 case X86::COND_B: return X86::COND_AE;
606 case X86::COND_BE: return X86::COND_A;
607 case X86::COND_A: return X86::COND_BE;
608 case X86::COND_AE: return X86::COND_B;
609 case X86::COND_S: return X86::COND_NS;
610 case X86::COND_NS: return X86::COND_S;
611 case X86::COND_P: return X86::COND_NP;
612 case X86::COND_NP: return X86::COND_P;
613 case X86::COND_O: return X86::COND_NO;
614 case X86::COND_NO: return X86::COND_O;
618 bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
619 const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
620 if (TID->Flags & M_TERMINATOR_FLAG) {
621 // Conditional branch is a special case.
622 if ((TID->Flags & M_BRANCH_FLAG) != 0 && (TID->Flags & M_BARRIER_FLAG) == 0)
624 if ((TID->Flags & M_PREDICABLE) == 0)
626 return !isPredicated(MI);
631 // For purposes of branch analysis do not count FP_REG_KILL as a terminator.
632 static bool isBrAnalysisUnpredicatedTerminator(const MachineInstr *MI,
633 const X86InstrInfo &TII) {
634 if (MI->getOpcode() == X86::FP_REG_KILL)
636 return TII.isUnpredicatedTerminator(MI);
639 bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
640 MachineBasicBlock *&TBB,
641 MachineBasicBlock *&FBB,
642 std::vector<MachineOperand> &Cond) const {
643 // If the block has no terminators, it just falls into the block after it.
644 MachineBasicBlock::iterator I = MBB.end();
645 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this))
648 // Get the last instruction in the block.
649 MachineInstr *LastInst = I;
651 // If there is only one terminator instruction, process it.
652 if (I == MBB.begin() || !isBrAnalysisUnpredicatedTerminator(--I, *this)) {
653 if (!isBranch(LastInst->getOpcode()))
656 // If the block ends with a branch there are 3 possibilities:
657 // it's an unconditional, conditional, or indirect branch.
659 if (LastInst->getOpcode() == X86::JMP) {
660 TBB = LastInst->getOperand(0).getMBB();
663 X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode());
664 if (BranchCode == X86::COND_INVALID)
665 return true; // Can't handle indirect branch.
667 // Otherwise, block ends with fall-through condbranch.
668 TBB = LastInst->getOperand(0).getMBB();
669 Cond.push_back(MachineOperand::CreateImm(BranchCode));
673 // Get the instruction before it if it's a terminator.
674 MachineInstr *SecondLastInst = I;
676 // If there are three terminators, we don't know what sort of block this is.
677 if (SecondLastInst && I != MBB.begin() &&
678 isBrAnalysisUnpredicatedTerminator(--I, *this))
681 // If the block ends with X86::JMP and a conditional branch, handle it.
682 X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode());
683 if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) {
684 TBB = SecondLastInst->getOperand(0).getMBB();
685 Cond.push_back(MachineOperand::CreateImm(BranchCode));
686 FBB = LastInst->getOperand(0).getMBB();
690 // If the block ends with two X86::JMPs, handle it. The second one is not
691 // executed, so remove it.
692 if (SecondLastInst->getOpcode() == X86::JMP &&
693 LastInst->getOpcode() == X86::JMP) {
694 TBB = SecondLastInst->getOperand(0).getMBB();
696 I->eraseFromParent();
700 // Otherwise, can't handle this.
704 unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
705 MachineBasicBlock::iterator I = MBB.end();
706 if (I == MBB.begin()) return 0;
708 if (I->getOpcode() != X86::JMP &&
709 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
712 // Remove the branch.
713 I->eraseFromParent();
717 if (I == MBB.begin()) return 1;
719 if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
722 // Remove the branch.
723 I->eraseFromParent();
727 static const MachineInstrBuilder &X86InstrAddOperand(MachineInstrBuilder &MIB,
728 MachineOperand &MO) {
730 MIB = MIB.addReg(MO.getReg(), MO.isDef(), MO.isImplicit(),
731 false, false, MO.getSubReg());
732 else if (MO.isImmediate())
733 MIB = MIB.addImm(MO.getImm());
734 else if (MO.isFrameIndex())
735 MIB = MIB.addFrameIndex(MO.getIndex());
736 else if (MO.isGlobalAddress())
737 MIB = MIB.addGlobalAddress(MO.getGlobal(), MO.getOffset());
738 else if (MO.isConstantPoolIndex())
739 MIB = MIB.addConstantPoolIndex(MO.getIndex(), MO.getOffset());
740 else if (MO.isJumpTableIndex())
741 MIB = MIB.addJumpTableIndex(MO.getIndex());
742 else if (MO.isExternalSymbol())
743 MIB = MIB.addExternalSymbol(MO.getSymbolName());
745 assert(0 && "Unknown operand for X86InstrAddOperand!");
751 X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
752 MachineBasicBlock *FBB,
753 const std::vector<MachineOperand> &Cond) const {
754 // Shouldn't be a fall through.
755 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
756 assert((Cond.size() == 1 || Cond.size() == 0) &&
757 "X86 branch conditions have one component!");
759 if (FBB == 0) { // One way branch.
761 // Unconditional branch?
762 BuildMI(&MBB, get(X86::JMP)).addMBB(TBB);
764 // Conditional branch.
765 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
766 BuildMI(&MBB, get(Opc)).addMBB(TBB);
771 // Two-way Conditional branch.
772 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
773 BuildMI(&MBB, get(Opc)).addMBB(TBB);
774 BuildMI(&MBB, get(X86::JMP)).addMBB(FBB);
778 void X86InstrInfo::copyRegToReg(MachineBasicBlock &MBB,
779 MachineBasicBlock::iterator MI,
780 unsigned DestReg, unsigned SrcReg,
781 const TargetRegisterClass *DestRC,
782 const TargetRegisterClass *SrcRC) const {
783 if (DestRC != SrcRC) {
784 // Moving EFLAGS to / from another register requires a push and a pop.
785 if (SrcRC == &X86::CCRRegClass) {
786 assert(SrcReg == X86::EFLAGS);
787 if (DestRC == &X86::GR64RegClass) {
788 BuildMI(MBB, MI, get(X86::PUSHFQ));
789 BuildMI(MBB, MI, get(X86::POP64r), DestReg);
791 } else if (DestRC == &X86::GR32RegClass) {
792 BuildMI(MBB, MI, get(X86::PUSHFD));
793 BuildMI(MBB, MI, get(X86::POP32r), DestReg);
796 } else if (DestRC == &X86::CCRRegClass) {
797 assert(DestReg == X86::EFLAGS);
798 if (SrcRC == &X86::GR64RegClass) {
799 BuildMI(MBB, MI, get(X86::PUSH64r)).addReg(SrcReg);
800 BuildMI(MBB, MI, get(X86::POPFQ));
802 } else if (SrcRC == &X86::GR32RegClass) {
803 BuildMI(MBB, MI, get(X86::PUSH32r)).addReg(SrcReg);
804 BuildMI(MBB, MI, get(X86::POPFD));
808 cerr << "Not yet supported!";
813 if (DestRC == &X86::GR64RegClass) {
815 } else if (DestRC == &X86::GR32RegClass) {
817 } else if (DestRC == &X86::GR16RegClass) {
819 } else if (DestRC == &X86::GR8RegClass) {
821 } else if (DestRC == &X86::GR32_RegClass) {
823 } else if (DestRC == &X86::GR16_RegClass) {
825 } else if (DestRC == &X86::RFP32RegClass) {
826 Opc = X86::MOV_Fp3232;
827 } else if (DestRC == &X86::RFP64RegClass || DestRC == &X86::RSTRegClass) {
828 Opc = X86::MOV_Fp6464;
829 } else if (DestRC == &X86::RFP80RegClass) {
830 Opc = X86::MOV_Fp8080;
831 } else if (DestRC == &X86::FR32RegClass) {
832 Opc = X86::FsMOVAPSrr;
833 } else if (DestRC == &X86::FR64RegClass) {
834 Opc = X86::FsMOVAPDrr;
835 } else if (DestRC == &X86::VR128RegClass) {
837 } else if (DestRC == &X86::VR64RegClass) {
838 Opc = X86::MMX_MOVQ64rr;
840 assert(0 && "Unknown regclass");
843 BuildMI(MBB, MI, get(Opc), DestReg).addReg(SrcReg);
846 static unsigned getStoreRegOpcode(const TargetRegisterClass *RC,
847 unsigned StackAlign) {
849 if (RC == &X86::GR64RegClass) {
851 } else if (RC == &X86::GR32RegClass) {
853 } else if (RC == &X86::GR16RegClass) {
855 } else if (RC == &X86::GR8RegClass) {
857 } else if (RC == &X86::GR32_RegClass) {
859 } else if (RC == &X86::GR16_RegClass) {
861 } else if (RC == &X86::RFP80RegClass) {
862 Opc = X86::ST_FpP80m; // pops
863 } else if (RC == &X86::RFP64RegClass) {
865 } else if (RC == &X86::RFP32RegClass) {
867 } else if (RC == &X86::FR32RegClass) {
869 } else if (RC == &X86::FR64RegClass) {
871 } else if (RC == &X86::VR128RegClass) {
872 // FIXME: Use movaps once we are capable of selectively
873 // aligning functions that spill SSE registers on 16-byte boundaries.
874 Opc = StackAlign >= 16 ? X86::MOVAPSmr : X86::MOVUPSmr;
875 } else if (RC == &X86::VR64RegClass) {
876 Opc = X86::MMX_MOVQ64mr;
878 assert(0 && "Unknown regclass");
885 void X86InstrInfo::storeRegToStackSlot(MachineBasicBlock &MBB,
886 MachineBasicBlock::iterator MI,
887 unsigned SrcReg, bool isKill, int FrameIdx,
888 const TargetRegisterClass *RC) const {
889 unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
890 addFrameReference(BuildMI(MBB, MI, get(Opc)), FrameIdx)
891 .addReg(SrcReg, false, false, isKill);
894 void X86InstrInfo::storeRegToAddr(MachineFunction &MF, unsigned SrcReg,
896 SmallVectorImpl<MachineOperand> &Addr,
897 const TargetRegisterClass *RC,
898 SmallVectorImpl<MachineInstr*> &NewMIs) const {
899 unsigned Opc = getStoreRegOpcode(RC, RI.getStackAlignment());
900 MachineInstrBuilder MIB = BuildMI(get(Opc));
901 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
902 MIB = X86InstrAddOperand(MIB, Addr[i]);
903 MIB.addReg(SrcReg, false, false, isKill);
904 NewMIs.push_back(MIB);
907 static unsigned getLoadRegOpcode(const TargetRegisterClass *RC,
908 unsigned StackAlign) {
910 if (RC == &X86::GR64RegClass) {
912 } else if (RC == &X86::GR32RegClass) {
914 } else if (RC == &X86::GR16RegClass) {
916 } else if (RC == &X86::GR8RegClass) {
918 } else if (RC == &X86::GR32_RegClass) {
920 } else if (RC == &X86::GR16_RegClass) {
922 } else if (RC == &X86::RFP80RegClass) {
924 } else if (RC == &X86::RFP64RegClass) {
926 } else if (RC == &X86::RFP32RegClass) {
928 } else if (RC == &X86::FR32RegClass) {
930 } else if (RC == &X86::FR64RegClass) {
932 } else if (RC == &X86::VR128RegClass) {
933 // FIXME: Use movaps once we are capable of selectively
934 // aligning functions that spill SSE registers on 16-byte boundaries.
935 Opc = StackAlign >= 16 ? X86::MOVAPSrm : X86::MOVUPSrm;
936 } else if (RC == &X86::VR64RegClass) {
937 Opc = X86::MMX_MOVQ64rm;
939 assert(0 && "Unknown regclass");
946 void X86InstrInfo::loadRegFromStackSlot(MachineBasicBlock &MBB,
947 MachineBasicBlock::iterator MI,
948 unsigned DestReg, int FrameIdx,
949 const TargetRegisterClass *RC) const{
950 unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
951 addFrameReference(BuildMI(MBB, MI, get(Opc), DestReg), FrameIdx);
954 void X86InstrInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
955 SmallVectorImpl<MachineOperand> &Addr,
956 const TargetRegisterClass *RC,
957 SmallVectorImpl<MachineInstr*> &NewMIs) const {
958 unsigned Opc = getLoadRegOpcode(RC, RI.getStackAlignment());
959 MachineInstrBuilder MIB = BuildMI(get(Opc), DestReg);
960 for (unsigned i = 0, e = Addr.size(); i != e; ++i)
961 MIB = X86InstrAddOperand(MIB, Addr[i]);
962 NewMIs.push_back(MIB);
965 bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
966 if (MBB.empty()) return false;
968 switch (MBB.back().getOpcode()) {
969 case X86::TCRETURNri:
970 case X86::TCRETURNdi:
971 case X86::RET: // Return.
976 case X86::JMP: // Uncond branch.
977 case X86::JMP32r: // Indirect branch.
978 case X86::JMP64r: // Indirect branch (64-bit).
979 case X86::JMP32m: // Indirect branch through mem.
980 case X86::JMP64m: // Indirect branch through mem (64-bit).
982 default: return false;
987 ReverseBranchCondition(std::vector<MachineOperand> &Cond) const {
988 assert(Cond.size() == 1 && "Invalid X86 branch condition!");
989 Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm()));
993 const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const {
994 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
995 if (Subtarget->is64Bit())
996 return &X86::GR64RegClass;
998 return &X86::GR32RegClass;