1 //===- X86InstrInfo.cpp - X86 Instruction Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "X86InstrInfo.h"
16 #include "X86GenInstrInfo.inc"
17 #include "X86InstrBuilder.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/CodeGen/LiveVariables.h"
24 X86InstrInfo::X86InstrInfo(X86TargetMachine &tm)
25 : TargetInstrInfo(X86Insts, sizeof(X86Insts)/sizeof(X86Insts[0])),
26 TM(tm), RI(tm, *this) {
29 bool X86InstrInfo::isMoveInstr(const MachineInstr& MI,
31 unsigned& destReg) const {
32 MachineOpCode oc = MI.getOpcode();
33 if (oc == X86::MOV8rr || oc == X86::MOV16rr ||
34 oc == X86::MOV32rr || oc == X86::MOV64rr ||
35 oc == X86::MOV16to16_ || oc == X86::MOV32to32_ ||
36 oc == X86::MOV_Fp3232 || oc == X86::MOVSSrr || oc == X86::MOVSDrr ||
37 oc == X86::MOV_Fp3264 || oc == X86::MOV_Fp6432 || oc == X86::MOV_Fp6464 ||
38 oc == X86::FsMOVAPSrr || oc == X86::FsMOVAPDrr ||
39 oc == X86::MOVAPSrr || oc == X86::MOVAPDrr ||
40 oc == X86::MOVSS2PSrr || oc == X86::MOVSD2PDrr ||
41 oc == X86::MOVPS2SSrr || oc == X86::MOVPD2SDrr ||
42 oc == X86::MMX_MOVD64rr || oc == X86::MMX_MOVQ64rr) {
43 assert(MI.getNumOperands() >= 2 &&
44 MI.getOperand(0).isRegister() &&
45 MI.getOperand(1).isRegister() &&
46 "invalid register-register move instruction");
47 sourceReg = MI.getOperand(1).getReg();
48 destReg = MI.getOperand(0).getReg();
54 unsigned X86InstrInfo::isLoadFromStackSlot(MachineInstr *MI,
55 int &FrameIndex) const {
56 switch (MI->getOpcode()) {
69 case X86::MMX_MOVD64rm:
70 case X86::MMX_MOVQ64rm:
71 if (MI->getOperand(1).isFrameIndex() && MI->getOperand(2).isImmediate() &&
72 MI->getOperand(3).isRegister() && MI->getOperand(4).isImmediate() &&
73 MI->getOperand(2).getImmedValue() == 1 &&
74 MI->getOperand(3).getReg() == 0 &&
75 MI->getOperand(4).getImmedValue() == 0) {
76 FrameIndex = MI->getOperand(1).getFrameIndex();
77 return MI->getOperand(0).getReg();
84 unsigned X86InstrInfo::isStoreToStackSlot(MachineInstr *MI,
85 int &FrameIndex) const {
86 switch (MI->getOpcode()) {
99 case X86::MMX_MOVD64mr:
100 case X86::MMX_MOVQ64mr:
101 case X86::MMX_MOVNTQmr:
102 if (MI->getOperand(0).isFrameIndex() && MI->getOperand(1).isImmediate() &&
103 MI->getOperand(2).isRegister() && MI->getOperand(3).isImmediate() &&
104 MI->getOperand(1).getImmedValue() == 1 &&
105 MI->getOperand(2).getReg() == 0 &&
106 MI->getOperand(3).getImmedValue() == 0) {
107 FrameIndex = MI->getOperand(0).getFrameIndex();
108 return MI->getOperand(4).getReg();
116 bool X86InstrInfo::isReallyTriviallyReMaterializable(MachineInstr *MI) const {
117 switch (MI->getOpcode()) {
130 case X86::MMX_MOVD64rm:
131 case X86::MMX_MOVQ64rm:
132 // Loads from constant pools are trivially rematerializable.
133 return MI->getOperand(1).isRegister() && MI->getOperand(2).isImmediate() &&
134 MI->getOperand(3).isRegister() && MI->getOperand(4).isConstantPoolIndex() &&
135 MI->getOperand(1).getReg() == 0 &&
136 MI->getOperand(2).getImmedValue() == 1 &&
137 MI->getOperand(3).getReg() == 0;
139 // All other instructions marked M_REMATERIALIZABLE are always trivially
144 /// convertToThreeAddress - This method must be implemented by targets that
145 /// set the M_CONVERTIBLE_TO_3_ADDR flag. When this flag is set, the target
146 /// may be able to convert a two-address instruction into a true
147 /// three-address instruction on demand. This allows the X86 target (for
148 /// example) to convert ADD and SHL instructions into LEA instructions if they
149 /// would require register copies due to two-addressness.
151 /// This method returns a null pointer if the transformation cannot be
152 /// performed, otherwise it returns the new instruction.
155 X86InstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
156 MachineBasicBlock::iterator &MBBI,
157 LiveVariables &LV) const {
158 MachineInstr *MI = MBBI;
159 // All instructions input are two-addr instructions. Get the known operands.
160 unsigned Dest = MI->getOperand(0).getReg();
161 unsigned Src = MI->getOperand(1).getReg();
163 MachineInstr *NewMI = NULL;
164 // FIXME: 16-bit LEA's are really slow on Athlons, but not bad on P4's. When
165 // we have better subtarget support, enable the 16-bit LEA generation here.
166 bool DisableLEA16 = true;
168 switch (MI->getOpcode()) {
170 case X86::SHUFPSrri: {
171 assert(MI->getNumOperands() == 4 && "Unknown shufps instruction!");
172 if (!TM.getSubtarget<X86Subtarget>().hasSSE2()) return 0;
174 unsigned A = MI->getOperand(0).getReg();
175 unsigned B = MI->getOperand(1).getReg();
176 unsigned C = MI->getOperand(2).getReg();
177 unsigned M = MI->getOperand(3).getImm();
178 if (B != C) return 0;
179 NewMI = BuildMI(get(X86::PSHUFDri), A).addReg(B).addImm(M);
183 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!");
184 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
185 // the flags produced by a shift yet, so this is safe.
186 unsigned Dest = MI->getOperand(0).getReg();
187 unsigned Src = MI->getOperand(1).getReg();
188 unsigned ShAmt = MI->getOperand(2).getImm();
189 if (ShAmt == 0 || ShAmt >= 4) return 0;
191 NewMI = BuildMI(get(X86::LEA64r), Dest)
192 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
196 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!");
197 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
198 // the flags produced by a shift yet, so this is safe.
199 unsigned Dest = MI->getOperand(0).getReg();
200 unsigned Src = MI->getOperand(1).getReg();
201 unsigned ShAmt = MI->getOperand(2).getImm();
202 if (ShAmt == 0 || ShAmt >= 4) return 0;
204 unsigned Opc = TM.getSubtarget<X86Subtarget>().is64Bit() ?
205 X86::LEA64_32r : X86::LEA32r;
206 NewMI = BuildMI(get(Opc), Dest)
207 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
211 assert(MI->getNumOperands() == 3 && "Unknown shift instruction!");
212 if (DisableLEA16) return 0;
214 // NOTE: LEA doesn't produce flags like shift does, but LLVM never uses
215 // the flags produced by a shift yet, so this is safe.
216 unsigned Dest = MI->getOperand(0).getReg();
217 unsigned Src = MI->getOperand(1).getReg();
218 unsigned ShAmt = MI->getOperand(2).getImm();
219 if (ShAmt == 0 || ShAmt >= 4) return 0;
221 NewMI = BuildMI(get(X86::LEA16r), Dest)
222 .addReg(0).addImm(1 << ShAmt).addReg(Src).addImm(0);
227 // FIXME: None of these instructions are promotable to LEAs without
228 // additional information. In particular, LEA doesn't set the flags that
229 // add and inc do. :(
231 switch (MI->getOpcode()) {
234 assert(MI->getNumOperands() == 2 && "Unknown inc instruction!");
235 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src, 1);
239 if (DisableLEA16) return 0;
240 assert(MI->getNumOperands() == 2 && "Unknown inc instruction!");
241 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, 1);
245 assert(MI->getNumOperands() == 2 && "Unknown dec instruction!");
246 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src, -1);
250 if (DisableLEA16) return 0;
251 assert(MI->getNumOperands() == 2 && "Unknown dec instruction!");
252 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src, -1);
255 assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
256 NewMI = addRegReg(BuildMI(get(X86::LEA32r), Dest), Src,
257 MI->getOperand(2).getReg());
260 if (DisableLEA16) return 0;
261 assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
262 NewMI = addRegReg(BuildMI(get(X86::LEA16r), Dest), Src,
263 MI->getOperand(2).getReg());
267 assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
268 if (MI->getOperand(2).isImmediate())
269 NewMI = addRegOffset(BuildMI(get(X86::LEA32r), Dest), Src,
270 MI->getOperand(2).getImmedValue());
274 if (DisableLEA16) return 0;
275 assert(MI->getNumOperands() == 3 && "Unknown add instruction!");
276 if (MI->getOperand(2).isImmediate())
277 NewMI = addRegOffset(BuildMI(get(X86::LEA16r), Dest), Src,
278 MI->getOperand(2).getImmedValue());
281 if (DisableLEA16) return 0;
283 assert(MI->getNumOperands() == 3 && MI->getOperand(2).isImmediate() &&
284 "Unknown shl instruction!");
285 unsigned ShAmt = MI->getOperand(2).getImmedValue();
286 if (ShAmt == 1 || ShAmt == 2 || ShAmt == 3) {
288 AM.Scale = 1 << ShAmt;
290 unsigned Opc = MI->getOpcode() == X86::SHL32ri ? X86::LEA32r :X86::LEA16r;
291 NewMI = addFullAddress(BuildMI(get(Opc), Dest), AM);
297 NewMI->copyKillDeadInfo(MI);
298 LV.instructionChanged(MI, NewMI); // Update live variables
299 MFI->insert(MBBI, NewMI); // Insert the new inst
304 /// commuteInstruction - We have a few instructions that must be hacked on to
307 MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const {
308 // FIXME: Can commute cmoves by changing the condition!
309 switch (MI->getOpcode()) {
310 case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
311 case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
312 case X86::SHRD32rri8: // A = SHRD32rri8 B, C, I -> A = SHLD32rri8 C, B, (32-I)
313 case X86::SHLD32rri8:{// A = SHLD32rri8 B, C, I -> A = SHRD32rri8 C, B, (32-I)
316 switch (MI->getOpcode()) {
317 default: assert(0 && "Unreachable!");
318 case X86::SHRD16rri8: Size = 16; Opc = X86::SHLD16rri8; break;
319 case X86::SHLD16rri8: Size = 16; Opc = X86::SHRD16rri8; break;
320 case X86::SHRD32rri8: Size = 32; Opc = X86::SHLD32rri8; break;
321 case X86::SHLD32rri8: Size = 32; Opc = X86::SHRD32rri8; break;
323 unsigned Amt = MI->getOperand(3).getImmedValue();
324 unsigned A = MI->getOperand(0).getReg();
325 unsigned B = MI->getOperand(1).getReg();
326 unsigned C = MI->getOperand(2).getReg();
327 bool BisKill = MI->getOperand(1).isKill();
328 bool CisKill = MI->getOperand(2).isKill();
329 return BuildMI(get(Opc), A).addReg(C, false, false, CisKill)
330 .addReg(B, false, false, BisKill).addImm(Size-Amt);
333 return TargetInstrInfo::commuteInstruction(MI);
337 static X86::CondCode GetCondFromBranchOpc(unsigned BrOpc) {
339 default: return X86::COND_INVALID;
340 case X86::JE: return X86::COND_E;
341 case X86::JNE: return X86::COND_NE;
342 case X86::JL: return X86::COND_L;
343 case X86::JLE: return X86::COND_LE;
344 case X86::JG: return X86::COND_G;
345 case X86::JGE: return X86::COND_GE;
346 case X86::JB: return X86::COND_B;
347 case X86::JBE: return X86::COND_BE;
348 case X86::JA: return X86::COND_A;
349 case X86::JAE: return X86::COND_AE;
350 case X86::JS: return X86::COND_S;
351 case X86::JNS: return X86::COND_NS;
352 case X86::JP: return X86::COND_P;
353 case X86::JNP: return X86::COND_NP;
354 case X86::JO: return X86::COND_O;
355 case X86::JNO: return X86::COND_NO;
359 unsigned X86::GetCondBranchFromCond(X86::CondCode CC) {
361 default: assert(0 && "Illegal condition code!");
362 case X86::COND_E: return X86::JE;
363 case X86::COND_NE: return X86::JNE;
364 case X86::COND_L: return X86::JL;
365 case X86::COND_LE: return X86::JLE;
366 case X86::COND_G: return X86::JG;
367 case X86::COND_GE: return X86::JGE;
368 case X86::COND_B: return X86::JB;
369 case X86::COND_BE: return X86::JBE;
370 case X86::COND_A: return X86::JA;
371 case X86::COND_AE: return X86::JAE;
372 case X86::COND_S: return X86::JS;
373 case X86::COND_NS: return X86::JNS;
374 case X86::COND_P: return X86::JP;
375 case X86::COND_NP: return X86::JNP;
376 case X86::COND_O: return X86::JO;
377 case X86::COND_NO: return X86::JNO;
381 /// GetOppositeBranchCondition - Return the inverse of the specified condition,
382 /// e.g. turning COND_E to COND_NE.
383 X86::CondCode X86::GetOppositeBranchCondition(X86::CondCode CC) {
385 default: assert(0 && "Illegal condition code!");
386 case X86::COND_E: return X86::COND_NE;
387 case X86::COND_NE: return X86::COND_E;
388 case X86::COND_L: return X86::COND_GE;
389 case X86::COND_LE: return X86::COND_G;
390 case X86::COND_G: return X86::COND_LE;
391 case X86::COND_GE: return X86::COND_L;
392 case X86::COND_B: return X86::COND_AE;
393 case X86::COND_BE: return X86::COND_A;
394 case X86::COND_A: return X86::COND_BE;
395 case X86::COND_AE: return X86::COND_B;
396 case X86::COND_S: return X86::COND_NS;
397 case X86::COND_NS: return X86::COND_S;
398 case X86::COND_P: return X86::COND_NP;
399 case X86::COND_NP: return X86::COND_P;
400 case X86::COND_O: return X86::COND_NO;
401 case X86::COND_NO: return X86::COND_O;
405 // For purposes of branch analysis do not count FP_REG_KILL as a terminator.
406 bool X86InstrInfo::isUnpredicatedTerminator(const MachineInstr *MI) const {
407 if (MI->getOpcode() == X86::FP_REG_KILL)
410 const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
411 if (TID->Flags & M_TERMINATOR_FLAG) {
412 // Conditional branch is a special case.
413 if ((TID->Flags & M_BRANCH_FLAG) != 0 && (TID->Flags & M_BARRIER_FLAG) == 0)
415 if ((TID->Flags & M_PREDICABLE) == 0)
417 return !isPredicated(MI);
422 bool X86InstrInfo::AnalyzeBranch(MachineBasicBlock &MBB,
423 MachineBasicBlock *&TBB,
424 MachineBasicBlock *&FBB,
425 std::vector<MachineOperand> &Cond) const {
426 // If the block has no terminators, it just falls into the block after it.
427 MachineBasicBlock::iterator I = MBB.end();
428 if (I == MBB.begin() || !isUnpredicatedTerminator(--I))
431 // Get the last instruction in the block.
432 MachineInstr *LastInst = I;
434 // If there is only one terminator instruction, process it.
435 if (I == MBB.begin() || !isUnpredicatedTerminator(--I)) {
436 if (!isBranch(LastInst->getOpcode()))
439 // If the block ends with a branch there are 3 possibilities:
440 // it's an unconditional, conditional, or indirect branch.
442 if (LastInst->getOpcode() == X86::JMP) {
443 TBB = LastInst->getOperand(0).getMachineBasicBlock();
446 X86::CondCode BranchCode = GetCondFromBranchOpc(LastInst->getOpcode());
447 if (BranchCode == X86::COND_INVALID)
448 return true; // Can't handle indirect branch.
450 // Otherwise, block ends with fall-through condbranch.
451 TBB = LastInst->getOperand(0).getMachineBasicBlock();
452 Cond.push_back(MachineOperand::CreateImm(BranchCode));
456 // Get the instruction before it if it's a terminator.
457 MachineInstr *SecondLastInst = I;
459 // If there are three terminators, we don't know what sort of block this is.
460 if (SecondLastInst && I != MBB.begin() && isUnpredicatedTerminator(--I))
463 // If the block ends with X86::JMP and a conditional branch, handle it.
464 X86::CondCode BranchCode = GetCondFromBranchOpc(SecondLastInst->getOpcode());
465 if (BranchCode != X86::COND_INVALID && LastInst->getOpcode() == X86::JMP) {
466 TBB = SecondLastInst->getOperand(0).getMachineBasicBlock();
467 Cond.push_back(MachineOperand::CreateImm(BranchCode));
468 FBB = LastInst->getOperand(0).getMachineBasicBlock();
472 // If the block ends with two X86::JMPs, handle it. The second one is not
473 // executed, so remove it.
474 if (SecondLastInst->getOpcode() == X86::JMP &&
475 LastInst->getOpcode() == X86::JMP) {
476 TBB = SecondLastInst->getOperand(0).getMachineBasicBlock();
478 I->eraseFromParent();
482 // Otherwise, can't handle this.
486 unsigned X86InstrInfo::RemoveBranch(MachineBasicBlock &MBB) const {
487 MachineBasicBlock::iterator I = MBB.end();
488 if (I == MBB.begin()) return 0;
490 if (I->getOpcode() != X86::JMP &&
491 GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
494 // Remove the branch.
495 I->eraseFromParent();
499 if (I == MBB.begin()) return 1;
501 if (GetCondFromBranchOpc(I->getOpcode()) == X86::COND_INVALID)
504 // Remove the branch.
505 I->eraseFromParent();
510 X86InstrInfo::InsertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB,
511 MachineBasicBlock *FBB,
512 const std::vector<MachineOperand> &Cond) const {
513 // Shouldn't be a fall through.
514 assert(TBB && "InsertBranch must not be told to insert a fallthrough");
515 assert((Cond.size() == 1 || Cond.size() == 0) &&
516 "X86 branch conditions have one component!");
518 if (FBB == 0) { // One way branch.
520 // Unconditional branch?
521 BuildMI(&MBB, get(X86::JMP)).addMBB(TBB);
523 // Conditional branch.
524 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
525 BuildMI(&MBB, get(Opc)).addMBB(TBB);
530 // Two-way Conditional branch.
531 unsigned Opc = GetCondBranchFromCond((X86::CondCode)Cond[0].getImm());
532 BuildMI(&MBB, get(Opc)).addMBB(TBB);
533 BuildMI(&MBB, get(X86::JMP)).addMBB(FBB);
537 bool X86InstrInfo::BlockHasNoFallThrough(MachineBasicBlock &MBB) const {
538 if (MBB.empty()) return false;
540 switch (MBB.back().getOpcode()) {
541 case X86::RET: // Return.
546 case X86::JMP: // Uncond branch.
547 case X86::JMP32r: // Indirect branch.
548 case X86::JMP32m: // Indirect branch through mem.
550 default: return false;
555 ReverseBranchCondition(std::vector<MachineOperand> &Cond) const {
556 assert(Cond.size() == 1 && "Invalid X86 branch condition!");
557 Cond[0].setImm(GetOppositeBranchCondition((X86::CondCode)Cond[0].getImm()));
561 const TargetRegisterClass *X86InstrInfo::getPointerRegClass() const {
562 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
563 if (Subtarget->is64Bit())
564 return &X86::GR64RegClass;
566 return &X86::GR32RegClass;