1 //===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
17 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineLocation.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Target/TargetFrameLowering.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/STLExtras.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/CommandLine.h"
45 ForceStackAlign("force-align-stack",
46 cl::desc("Force align the stack to the minimum alignment"
47 " needed for the function."),
48 cl::init(false), cl::Hidden);
50 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
51 const TargetInstrInfo &tii)
52 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
53 X86::ADJCALLSTACKDOWN64 :
54 X86::ADJCALLSTACKDOWN32,
55 tm.getSubtarget<X86Subtarget>().is64Bit() ?
56 X86::ADJCALLSTACKUP64 :
57 X86::ADJCALLSTACKUP32),
59 // Cache some information.
60 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
61 Is64Bit = Subtarget->is64Bit();
62 IsWin64 = Subtarget->isTargetWin64();
63 StackAlign = TM.getFrameLowering()->getStackAlignment();
76 /// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF
77 /// specific numbering, used in debug info and exception tables.
78 int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
79 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
80 unsigned Flavour = DWARFFlavour::X86_64;
82 if (!Subtarget->is64Bit()) {
83 if (Subtarget->isTargetDarwin()) {
85 Flavour = DWARFFlavour::X86_32_DarwinEH;
87 Flavour = DWARFFlavour::X86_32_Generic;
88 } else if (Subtarget->isTargetCygMing()) {
89 // Unsupported by now, just quick fallback
90 Flavour = DWARFFlavour::X86_32_Generic;
92 Flavour = DWARFFlavour::X86_32_Generic;
96 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour);
99 /// getX86RegNum - This function maps LLVM register identifiers to their X86
100 /// specific numbering, which is used in various places encoding instructions.
101 unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
103 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
104 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;
105 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX;
106 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX;
107 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH:
109 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH:
111 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH:
113 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH:
116 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
118 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
120 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
122 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
124 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
126 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
128 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
130 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
133 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3:
134 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
135 return RegNo-X86::ST0;
137 case X86::XMM0: case X86::XMM8:
138 case X86::YMM0: case X86::YMM8: case X86::MM0:
140 case X86::XMM1: case X86::XMM9:
141 case X86::YMM1: case X86::YMM9: case X86::MM1:
143 case X86::XMM2: case X86::XMM10:
144 case X86::YMM2: case X86::YMM10: case X86::MM2:
146 case X86::XMM3: case X86::XMM11:
147 case X86::YMM3: case X86::YMM11: case X86::MM3:
149 case X86::XMM4: case X86::XMM12:
150 case X86::YMM4: case X86::YMM12: case X86::MM4:
152 case X86::XMM5: case X86::XMM13:
153 case X86::YMM5: case X86::YMM13: case X86::MM5:
155 case X86::XMM6: case X86::XMM14:
156 case X86::YMM6: case X86::YMM14: case X86::MM6:
158 case X86::XMM7: case X86::XMM15:
159 case X86::YMM7: case X86::YMM15: case X86::MM7:
162 case X86::ES: return 0;
163 case X86::CS: return 1;
164 case X86::SS: return 2;
165 case X86::DS: return 3;
166 case X86::FS: return 4;
167 case X86::GS: return 5;
169 case X86::CR0: case X86::CR8 : case X86::DR0: return 0;
170 case X86::CR1: case X86::CR9 : case X86::DR1: return 1;
171 case X86::CR2: case X86::CR10: case X86::DR2: return 2;
172 case X86::CR3: case X86::CR11: case X86::DR3: return 3;
173 case X86::CR4: case X86::CR12: case X86::DR4: return 4;
174 case X86::CR5: case X86::CR13: case X86::DR5: return 5;
175 case X86::CR6: case X86::CR14: case X86::DR6: return 6;
176 case X86::CR7: case X86::CR15: case X86::DR7: return 7;
178 // Pseudo index registers are equivalent to a "none"
179 // scaled index (See Intel Manual 2A, table 2-3)
185 assert(isVirtualRegister(RegNo) && "Unknown physical register!");
186 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!");
191 const TargetRegisterClass *
192 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
193 const TargetRegisterClass *B,
194 unsigned SubIdx) const {
198 if (B == &X86::GR8RegClass) {
199 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8)
201 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) {
202 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
203 A == &X86::GR64_NOREXRegClass ||
204 A == &X86::GR64_NOSPRegClass ||
205 A == &X86::GR64_NOREX_NOSPRegClass)
206 return &X86::GR64_ABCDRegClass;
207 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
208 A == &X86::GR32_NOREXRegClass ||
209 A == &X86::GR32_NOSPRegClass)
210 return &X86::GR32_ABCDRegClass;
211 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
212 A == &X86::GR16_NOREXRegClass)
213 return &X86::GR16_ABCDRegClass;
214 } else if (B == &X86::GR8_NOREXRegClass) {
215 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
216 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
217 return &X86::GR64_NOREXRegClass;
218 else if (A == &X86::GR64_ABCDRegClass)
219 return &X86::GR64_ABCDRegClass;
220 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
221 A == &X86::GR32_NOSPRegClass)
222 return &X86::GR32_NOREXRegClass;
223 else if (A == &X86::GR32_ABCDRegClass)
224 return &X86::GR32_ABCDRegClass;
225 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass)
226 return &X86::GR16_NOREXRegClass;
227 else if (A == &X86::GR16_ABCDRegClass)
228 return &X86::GR16_ABCDRegClass;
231 case X86::sub_8bit_hi:
232 if (B == &X86::GR8_ABCD_HRegClass) {
233 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
234 A == &X86::GR64_NOREXRegClass ||
235 A == &X86::GR64_NOSPRegClass ||
236 A == &X86::GR64_NOREX_NOSPRegClass)
237 return &X86::GR64_ABCDRegClass;
238 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
239 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
240 return &X86::GR32_ABCDRegClass;
241 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
242 A == &X86::GR16_NOREXRegClass)
243 return &X86::GR16_ABCDRegClass;
247 if (B == &X86::GR16RegClass) {
248 if (A->getSize() == 4 || A->getSize() == 8)
250 } else if (B == &X86::GR16_ABCDRegClass) {
251 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
252 A == &X86::GR64_NOREXRegClass ||
253 A == &X86::GR64_NOSPRegClass ||
254 A == &X86::GR64_NOREX_NOSPRegClass)
255 return &X86::GR64_ABCDRegClass;
256 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
257 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
258 return &X86::GR32_ABCDRegClass;
259 } else if (B == &X86::GR16_NOREXRegClass) {
260 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
261 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
262 return &X86::GR64_NOREXRegClass;
263 else if (A == &X86::GR64_ABCDRegClass)
264 return &X86::GR64_ABCDRegClass;
265 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
266 A == &X86::GR32_NOSPRegClass)
267 return &X86::GR32_NOREXRegClass;
268 else if (A == &X86::GR32_ABCDRegClass)
269 return &X86::GR64_ABCDRegClass;
273 if (B == &X86::GR32RegClass) {
274 if (A->getSize() == 8)
276 } else if (B == &X86::GR32_NOSPRegClass) {
277 if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass)
278 return &X86::GR64_NOSPRegClass;
279 if (A->getSize() == 8)
280 return getCommonSubClass(A, &X86::GR64_NOSPRegClass);
281 } else if (B == &X86::GR32_ABCDRegClass) {
282 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
283 A == &X86::GR64_NOREXRegClass ||
284 A == &X86::GR64_NOSPRegClass ||
285 A == &X86::GR64_NOREX_NOSPRegClass)
286 return &X86::GR64_ABCDRegClass;
287 } else if (B == &X86::GR32_NOREXRegClass) {
288 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
289 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
290 return &X86::GR64_NOREXRegClass;
291 else if (A == &X86::GR64_ABCDRegClass)
292 return &X86::GR64_ABCDRegClass;
296 if (B == &X86::FR32RegClass)
300 if (B == &X86::FR64RegClass)
304 if (B == &X86::VR128RegClass)
311 const TargetRegisterClass *
312 X86RegisterInfo::getPointerRegClass(unsigned Kind) const {
314 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
315 case 0: // Normal GPRs.
316 if (TM.getSubtarget<X86Subtarget>().is64Bit())
317 return &X86::GR64RegClass;
318 return &X86::GR32RegClass;
319 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
320 if (TM.getSubtarget<X86Subtarget>().is64Bit())
321 return &X86::GR64_NOSPRegClass;
322 return &X86::GR32_NOSPRegClass;
323 case 2: // Available for tailcall (not callee-saved GPRs).
324 if (TM.getSubtarget<X86Subtarget>().isTargetWin64())
325 return &X86::GR64_TCW64RegClass;
326 if (TM.getSubtarget<X86Subtarget>().is64Bit())
327 return &X86::GR64_TCRegClass;
328 return &X86::GR32_TCRegClass;
332 const TargetRegisterClass *
333 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
334 if (RC == &X86::CCRRegClass) {
336 return &X86::GR64RegClass;
338 return &X86::GR32RegClass;
344 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
345 MachineFunction &MF) const {
346 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
348 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
349 switch (RC->getID()) {
352 case X86::GR32RegClassID:
354 case X86::GR64RegClassID:
356 case X86::VR128RegClassID:
357 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
358 case X86::VR64RegClassID:
364 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
365 bool callsEHReturn = false;
366 bool ghcCall = false;
369 callsEHReturn = MF->getMMI().callsEHReturn();
370 const Function *F = MF->getFunction();
371 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
374 static const unsigned GhcCalleeSavedRegs[] = {
378 static const unsigned CalleeSavedRegs32Bit[] = {
379 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
382 static const unsigned CalleeSavedRegs32EHRet[] = {
383 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
386 static const unsigned CalleeSavedRegs64Bit[] = {
387 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
390 static const unsigned CalleeSavedRegs64EHRet[] = {
391 X86::RAX, X86::RDX, X86::RBX, X86::R12,
392 X86::R13, X86::R14, X86::R15, X86::RBP, 0
395 static const unsigned CalleeSavedRegsWin64[] = {
396 X86::RBX, X86::RBP, X86::RDI, X86::RSI,
397 X86::R12, X86::R13, X86::R14, X86::R15,
398 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9,
399 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13,
400 X86::XMM14, X86::XMM15, 0
404 return GhcCalleeSavedRegs;
405 } else if (Is64Bit) {
407 return CalleeSavedRegsWin64;
409 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit);
411 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit);
415 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
416 BitVector Reserved(getNumRegs());
417 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
419 // Set the stack-pointer register and its aliases as reserved.
420 Reserved.set(X86::RSP);
421 Reserved.set(X86::ESP);
422 Reserved.set(X86::SP);
423 Reserved.set(X86::SPL);
425 // Set the instruction pointer register and its aliases as reserved.
426 Reserved.set(X86::RIP);
427 Reserved.set(X86::EIP);
428 Reserved.set(X86::IP);
430 // Set the frame-pointer register and its aliases as reserved if needed.
431 if (TFI->hasFP(MF)) {
432 Reserved.set(X86::RBP);
433 Reserved.set(X86::EBP);
434 Reserved.set(X86::BP);
435 Reserved.set(X86::BPL);
438 // Mark the x87 stack registers as reserved, since they don't behave normally
439 // with respect to liveness. We don't fully model the effects of x87 stack
440 // pushes and pops after stackification.
441 Reserved.set(X86::ST0);
442 Reserved.set(X86::ST1);
443 Reserved.set(X86::ST2);
444 Reserved.set(X86::ST3);
445 Reserved.set(X86::ST4);
446 Reserved.set(X86::ST5);
447 Reserved.set(X86::ST6);
448 Reserved.set(X86::ST7);
452 //===----------------------------------------------------------------------===//
453 // Stack Frame Processing methods
454 //===----------------------------------------------------------------------===//
456 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
457 const MachineFrameInfo *MFI = MF.getFrameInfo();
458 return (RealignStack &&
459 !MFI->hasVarSizedObjects());
462 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
463 const MachineFrameInfo *MFI = MF.getFrameInfo();
464 const Function *F = MF.getFunction();
465 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
466 F->hasFnAttr(Attribute::StackAlignment));
468 // FIXME: Currently we don't support stack realignment for functions with
469 // variable-sized allocas.
470 // FIXME: It's more complicated than this...
471 if (0 && requiresRealignment && MFI->hasVarSizedObjects())
473 "Stack realignment in presense of dynamic allocas is not supported");
475 // If we've requested that we force align the stack do so now.
477 return canRealignStack(MF);
479 return requiresRealignment && canRealignStack(MF);
482 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
483 unsigned Reg, int &FrameIdx) const {
484 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
486 if (Reg == FramePtr && TFI->hasFP(MF)) {
487 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
493 static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
496 return X86::SUB64ri8;
497 return X86::SUB64ri32;
500 return X86::SUB32ri8;
505 static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
508 return X86::ADD64ri8;
509 return X86::ADD64ri32;
512 return X86::ADD32ri8;
517 void X86RegisterInfo::
518 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
519 MachineBasicBlock::iterator I) const {
520 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
521 bool reseveCallFrame = TFI->hasReservedCallFrame(MF);
522 int Opcode = I->getOpcode();
523 bool isDestroy = Opcode == getCallFrameDestroyOpcode();
524 DebugLoc DL = I->getDebugLoc();
525 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
526 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
529 if (!reseveCallFrame) {
530 // If the stack pointer can be changed after prologue, turn the
531 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
532 // adjcallstackdown instruction into 'add ESP, <amt>'
533 // TODO: consider using push / pop instead of sub + store / add
537 // We need to keep the stack aligned properly. To do this, we round the
538 // amount of space needed for the outgoing arguments up to the next
539 // alignment boundary.
540 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
542 MachineInstr *New = 0;
543 if (Opcode == getCallFrameSetupOpcode()) {
544 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)),
549 assert(Opcode == getCallFrameDestroyOpcode());
551 // Factor out the amount the callee already popped.
555 unsigned Opc = getADDriOpcode(Is64Bit, Amount);
556 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
557 .addReg(StackPtr).addImm(Amount);
562 // The EFLAGS implicit def is dead.
563 New->getOperand(3).setIsDead();
565 // Replace the pseudo instruction with a new instruction.
572 if (Opcode == getCallFrameDestroyOpcode() && CalleeAmt) {
573 // If we are performing frame pointer elimination and if the callee pops
574 // something off the stack pointer, add it back. We do this until we have
575 // more advanced stack pointer tracking ability.
576 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
577 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
578 .addReg(StackPtr).addImm(CalleeAmt);
580 // The EFLAGS implicit def is dead.
581 New->getOperand(3).setIsDead();
587 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
588 int SPAdj, RegScavenger *RS) const{
589 assert(SPAdj == 0 && "Unexpected");
592 MachineInstr &MI = *II;
593 MachineFunction &MF = *MI.getParent()->getParent();
594 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
596 while (!MI.getOperand(i).isFI()) {
598 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
601 int FrameIndex = MI.getOperand(i).getIndex();
604 unsigned Opc = MI.getOpcode();
605 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
606 if (needsStackRealignment(MF))
607 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
611 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
613 // This must be part of a four operand memory reference. Replace the
614 // FrameIndex with base register with EBP. Add an offset to the offset.
615 MI.getOperand(i).ChangeToRegister(BasePtr, false);
617 // Now add the frame object offset to the offset from EBP.
620 // Tail call jmp happens after FP is popped.
621 const MachineFrameInfo *MFI = MF.getFrameInfo();
622 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
624 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
626 if (MI.getOperand(i+3).isImm()) {
627 // Offset is a 32-bit integer.
628 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm());
629 MI.getOperand(i + 3).ChangeToImmediate(Offset);
631 // Offset is symbolic. This is extremely rare.
632 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset();
633 MI.getOperand(i+3).setOffset(Offset);
637 unsigned X86RegisterInfo::getRARegister() const {
638 return Is64Bit ? X86::RIP // Should have dwarf #16.
639 : X86::EIP; // Should have dwarf #8.
642 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
643 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
644 return TFI->hasFP(MF) ? FramePtr : StackPtr;
647 unsigned X86RegisterInfo::getEHExceptionRegister() const {
648 llvm_unreachable("What is the exception register");
652 unsigned X86RegisterInfo::getEHHandlerRegister() const {
653 llvm_unreachable("What is the exception handler register");
658 unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
659 switch (VT.getSimpleVT().SimpleTy) {
665 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
667 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
669 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
671 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
677 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
679 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
681 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
683 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
685 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
687 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
689 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
691 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
693 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
695 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
697 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
699 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
701 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
703 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
705 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
707 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
714 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
716 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
718 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
720 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
722 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
724 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
726 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
728 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
730 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
732 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
734 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
736 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
738 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
740 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
742 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
744 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
750 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
752 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
754 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
756 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
758 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
760 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
762 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
764 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
766 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
768 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
770 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
772 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
774 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
776 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
778 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
780 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
786 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
788 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
790 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
792 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
794 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
796 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
798 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
800 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
802 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
804 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
806 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
808 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
810 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
812 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
814 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
816 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
825 #include "X86GenRegisterInfo.inc"
828 struct MSAH : public MachineFunctionPass {
830 MSAH() : MachineFunctionPass(ID) {}
832 virtual bool runOnMachineFunction(MachineFunction &MF) {
833 const X86TargetMachine *TM =
834 static_cast<const X86TargetMachine *>(&MF.getTarget());
835 const X86RegisterInfo *X86RI = TM->getRegisterInfo();
836 MachineRegisterInfo &RI = MF.getRegInfo();
837 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
838 unsigned StackAlignment = X86RI->getStackAlignment();
840 // Be over-conservative: scan over all vreg defs and find whether vector
841 // registers are used. If yes, there is a possibility that vector register
842 // will be spilled and thus require dynamic stack realignment.
843 for (unsigned i = 0, e = RI.getNumVirtRegs(); i != e; ++i) {
844 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
845 if (RI.getRegClass(Reg)->getAlignment() > StackAlignment) {
846 FuncInfo->setReserveFP(true);
854 virtual const char *getPassName() const {
855 return "X86 Maximal Stack Alignment Check";
858 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
859 AU.setPreservesCFG();
860 MachineFunctionPass::getAnalysisUsage(AU);
868 llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); }