1 //===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
17 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineLocation.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Target/TargetFrameLowering.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/STLExtras.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/CommandLine.h"
45 ForceStackAlign("force-align-stack",
46 cl::desc("Force align the stack to the minimum alignment"
47 " needed for the function."),
48 cl::init(false), cl::Hidden);
50 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
51 const TargetInstrInfo &tii)
52 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
53 X86::ADJCALLSTACKDOWN64 :
54 X86::ADJCALLSTACKDOWN32,
55 tm.getSubtarget<X86Subtarget>().is64Bit() ?
56 X86::ADJCALLSTACKUP64 :
57 X86::ADJCALLSTACKUP32),
59 // Cache some information.
60 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
61 Is64Bit = Subtarget->is64Bit();
62 IsWin64 = Subtarget->isTargetWin64();
63 StackAlign = TM.getFrameLowering()->getStackAlignment();
76 static unsigned getFlavour(const X86Subtarget *Subtarget, bool isEH) {
77 if (!Subtarget->is64Bit()) {
78 if (Subtarget->isTargetDarwin()) {
80 return DWARFFlavour::X86_32_DarwinEH;
82 return DWARFFlavour::X86_32_Generic;
83 } else if (Subtarget->isTargetCygMing()) {
84 // Unsupported by now, just quick fallback
85 return DWARFFlavour::X86_32_Generic;
87 return DWARFFlavour::X86_32_Generic;
90 return DWARFFlavour::X86_64;
93 /// getDwarfRegNum - This function maps LLVM register identifiers to the DWARF
94 /// specific numbering, used in debug info and exception tables.
95 int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
96 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
97 unsigned Flavour = getFlavour(Subtarget, isEH);
99 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour);
102 /// getLLVMRegNum - This function maps DWARF register numbers to LLVM register.
103 int X86RegisterInfo::getLLVMRegNum(unsigned DwarfRegNo, bool isEH) const {
104 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
105 unsigned Flavour = getFlavour(Subtarget, isEH);
107 return X86GenRegisterInfo::getLLVMRegNumFull(DwarfRegNo, Flavour);
111 X86RegisterInfo::getSEHRegNum(unsigned i) const {
112 int reg = getX86RegNum(i);
114 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
115 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
116 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
117 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
118 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
119 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
120 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
121 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
122 case X86::XMM8: case X86::XMM9: case X86::XMM10: case X86::XMM11:
123 case X86::XMM12: case X86::XMM13: case X86::XMM14: case X86::XMM15:
124 case X86::YMM8: case X86::YMM9: case X86::YMM10: case X86::YMM11:
125 case X86::YMM12: case X86::YMM13: case X86::YMM14: case X86::YMM15:
131 /// getX86RegNum - This function maps LLVM register identifiers to their X86
132 /// specific numbering, which is used in various places encoding instructions.
133 unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
135 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
136 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;
137 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX;
138 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX;
139 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH:
141 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH:
143 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH:
145 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH:
148 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
150 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
152 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
154 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
156 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
158 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
160 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
162 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
165 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3:
166 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
167 return RegNo-X86::ST0;
169 case X86::XMM0: case X86::XMM8:
170 case X86::YMM0: case X86::YMM8: case X86::MM0:
172 case X86::XMM1: case X86::XMM9:
173 case X86::YMM1: case X86::YMM9: case X86::MM1:
175 case X86::XMM2: case X86::XMM10:
176 case X86::YMM2: case X86::YMM10: case X86::MM2:
178 case X86::XMM3: case X86::XMM11:
179 case X86::YMM3: case X86::YMM11: case X86::MM3:
181 case X86::XMM4: case X86::XMM12:
182 case X86::YMM4: case X86::YMM12: case X86::MM4:
184 case X86::XMM5: case X86::XMM13:
185 case X86::YMM5: case X86::YMM13: case X86::MM5:
187 case X86::XMM6: case X86::XMM14:
188 case X86::YMM6: case X86::YMM14: case X86::MM6:
190 case X86::XMM7: case X86::XMM15:
191 case X86::YMM7: case X86::YMM15: case X86::MM7:
194 case X86::ES: return 0;
195 case X86::CS: return 1;
196 case X86::SS: return 2;
197 case X86::DS: return 3;
198 case X86::FS: return 4;
199 case X86::GS: return 5;
201 case X86::CR0: case X86::CR8 : case X86::DR0: return 0;
202 case X86::CR1: case X86::CR9 : case X86::DR1: return 1;
203 case X86::CR2: case X86::CR10: case X86::DR2: return 2;
204 case X86::CR3: case X86::CR11: case X86::DR3: return 3;
205 case X86::CR4: case X86::CR12: case X86::DR4: return 4;
206 case X86::CR5: case X86::CR13: case X86::DR5: return 5;
207 case X86::CR6: case X86::CR14: case X86::DR6: return 6;
208 case X86::CR7: case X86::CR15: case X86::DR7: return 7;
210 // Pseudo index registers are equivalent to a "none"
211 // scaled index (See Intel Manual 2A, table 2-3)
217 assert(isVirtualRegister(RegNo) && "Unknown physical register!");
218 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!");
223 const TargetRegisterClass *
224 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
225 const TargetRegisterClass *B,
226 unsigned SubIdx) const {
230 if (B == &X86::GR8RegClass) {
231 if (A->getSize() == 2 || A->getSize() == 4 || A->getSize() == 8)
233 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) {
234 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
235 A == &X86::GR64_NOREXRegClass ||
236 A == &X86::GR64_NOSPRegClass ||
237 A == &X86::GR64_NOREX_NOSPRegClass)
238 return &X86::GR64_ABCDRegClass;
239 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
240 A == &X86::GR32_NOREXRegClass ||
241 A == &X86::GR32_NOSPRegClass)
242 return &X86::GR32_ABCDRegClass;
243 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass ||
244 A == &X86::GR16_NOREXRegClass)
245 return &X86::GR16_ABCDRegClass;
246 } else if (B == &X86::GR8_NOREXRegClass) {
247 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
248 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
249 return &X86::GR64_NOREXRegClass;
250 else if (A == &X86::GR64_ABCDRegClass)
251 return &X86::GR64_ABCDRegClass;
252 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
253 A == &X86::GR32_NOSPRegClass)
254 return &X86::GR32_NOREXRegClass;
255 else if (A == &X86::GR32_ABCDRegClass)
256 return &X86::GR32_ABCDRegClass;
257 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass)
258 return &X86::GR16_NOREXRegClass;
259 else if (A == &X86::GR16_ABCDRegClass)
260 return &X86::GR16_ABCDRegClass;
263 case X86::sub_8bit_hi:
264 if (B->hasSubClassEq(&X86::GR8_ABCD_HRegClass))
265 switch (A->getSize()) {
266 case 2: return getCommonSubClass(A, &X86::GR16_ABCDRegClass);
267 case 4: return getCommonSubClass(A, &X86::GR32_ABCDRegClass);
268 case 8: return getCommonSubClass(A, &X86::GR64_ABCDRegClass);
273 if (B == &X86::GR16RegClass) {
274 if (A->getSize() == 4 || A->getSize() == 8)
276 } else if (B == &X86::GR16_ABCDRegClass) {
277 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
278 A == &X86::GR64_NOREXRegClass ||
279 A == &X86::GR64_NOSPRegClass ||
280 A == &X86::GR64_NOREX_NOSPRegClass)
281 return &X86::GR64_ABCDRegClass;
282 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass ||
283 A == &X86::GR32_NOREXRegClass || A == &X86::GR32_NOSPRegClass)
284 return &X86::GR32_ABCDRegClass;
285 } else if (B == &X86::GR16_NOREXRegClass) {
286 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
287 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
288 return &X86::GR64_NOREXRegClass;
289 else if (A == &X86::GR64_ABCDRegClass)
290 return &X86::GR64_ABCDRegClass;
291 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass ||
292 A == &X86::GR32_NOSPRegClass)
293 return &X86::GR32_NOREXRegClass;
294 else if (A == &X86::GR32_ABCDRegClass)
295 return &X86::GR64_ABCDRegClass;
299 if (B == &X86::GR32RegClass) {
300 if (A->getSize() == 8)
302 } else if (B == &X86::GR32_NOSPRegClass) {
303 if (A == &X86::GR64RegClass || A == &X86::GR64_NOSPRegClass)
304 return &X86::GR64_NOSPRegClass;
305 if (A->getSize() == 8)
306 return getCommonSubClass(A, &X86::GR64_NOSPRegClass);
307 } else if (B == &X86::GR32_ABCDRegClass) {
308 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass ||
309 A == &X86::GR64_NOREXRegClass ||
310 A == &X86::GR64_NOSPRegClass ||
311 A == &X86::GR64_NOREX_NOSPRegClass)
312 return &X86::GR64_ABCDRegClass;
313 } else if (B == &X86::GR32_NOREXRegClass) {
314 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass)
315 return &X86::GR64_NOREXRegClass;
316 else if (A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
317 return &X86::GR64_NOREX_NOSPRegClass;
318 else if (A == &X86::GR64_ABCDRegClass)
319 return &X86::GR64_ABCDRegClass;
320 } else if (B == &X86::GR32_NOREX_NOSPRegClass) {
321 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass ||
322 A == &X86::GR64_NOSPRegClass || A == &X86::GR64_NOREX_NOSPRegClass)
323 return &X86::GR64_NOREX_NOSPRegClass;
324 else if (A == &X86::GR64_ABCDRegClass)
325 return &X86::GR64_ABCDRegClass;
329 if (B == &X86::FR32RegClass)
333 if (B == &X86::FR64RegClass)
337 if (B == &X86::VR128RegClass)
344 const TargetRegisterClass*
345 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
346 const TargetRegisterClass *Super = RC;
347 TargetRegisterClass::sc_iterator I = RC->superclasses_begin();
349 switch (Super->getID()) {
350 case X86::GR8RegClassID:
351 case X86::GR16RegClassID:
352 case X86::GR32RegClassID:
353 case X86::GR64RegClassID:
354 case X86::FR32RegClassID:
355 case X86::FR64RegClassID:
356 case X86::RFP32RegClassID:
357 case X86::RFP64RegClassID:
358 case X86::RFP80RegClassID:
359 case X86::VR128RegClassID:
360 case X86::VR256RegClassID:
361 // Don't return a super-class that would shrink the spill size.
362 // That can happen with the vector and float classes.
363 if (Super->getSize() == RC->getSize())
371 const TargetRegisterClass *
372 X86RegisterInfo::getPointerRegClass(unsigned Kind) const {
374 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
375 case 0: // Normal GPRs.
376 if (TM.getSubtarget<X86Subtarget>().is64Bit())
377 return &X86::GR64RegClass;
378 return &X86::GR32RegClass;
379 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
380 if (TM.getSubtarget<X86Subtarget>().is64Bit())
381 return &X86::GR64_NOSPRegClass;
382 return &X86::GR32_NOSPRegClass;
383 case 2: // Available for tailcall (not callee-saved GPRs).
384 if (TM.getSubtarget<X86Subtarget>().isTargetWin64())
385 return &X86::GR64_TCW64RegClass;
386 if (TM.getSubtarget<X86Subtarget>().is64Bit())
387 return &X86::GR64_TCRegClass;
388 return &X86::GR32_TCRegClass;
392 const TargetRegisterClass *
393 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
394 if (RC == &X86::CCRRegClass) {
396 return &X86::GR64RegClass;
398 return &X86::GR32RegClass;
404 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
405 MachineFunction &MF) const {
406 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
408 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
409 switch (RC->getID()) {
412 case X86::GR32RegClassID:
414 case X86::GR64RegClassID:
416 case X86::VR128RegClassID:
417 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
418 case X86::VR64RegClassID:
424 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
425 bool callsEHReturn = false;
426 bool ghcCall = false;
429 callsEHReturn = MF->getMMI().callsEHReturn();
430 const Function *F = MF->getFunction();
431 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
434 static const unsigned GhcCalleeSavedRegs[] = {
438 static const unsigned CalleeSavedRegs32Bit[] = {
439 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
442 static const unsigned CalleeSavedRegs32EHRet[] = {
443 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
446 static const unsigned CalleeSavedRegs64Bit[] = {
447 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
450 static const unsigned CalleeSavedRegs64EHRet[] = {
451 X86::RAX, X86::RDX, X86::RBX, X86::R12,
452 X86::R13, X86::R14, X86::R15, X86::RBP, 0
455 static const unsigned CalleeSavedRegsWin64[] = {
456 X86::RBX, X86::RBP, X86::RDI, X86::RSI,
457 X86::R12, X86::R13, X86::R14, X86::R15,
458 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9,
459 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13,
460 X86::XMM14, X86::XMM15, 0
464 return GhcCalleeSavedRegs;
465 } else if (Is64Bit) {
467 return CalleeSavedRegsWin64;
469 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit);
471 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit);
475 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
476 BitVector Reserved(getNumRegs());
477 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
479 // Set the stack-pointer register and its aliases as reserved.
480 Reserved.set(X86::RSP);
481 Reserved.set(X86::ESP);
482 Reserved.set(X86::SP);
483 Reserved.set(X86::SPL);
485 // Set the instruction pointer register and its aliases as reserved.
486 Reserved.set(X86::RIP);
487 Reserved.set(X86::EIP);
488 Reserved.set(X86::IP);
490 // Set the frame-pointer register and its aliases as reserved if needed.
491 if (TFI->hasFP(MF)) {
492 Reserved.set(X86::RBP);
493 Reserved.set(X86::EBP);
494 Reserved.set(X86::BP);
495 Reserved.set(X86::BPL);
498 // Mark the x87 stack registers as reserved, since they don't behave normally
499 // with respect to liveness. We don't fully model the effects of x87 stack
500 // pushes and pops after stackification.
501 Reserved.set(X86::ST0);
502 Reserved.set(X86::ST1);
503 Reserved.set(X86::ST2);
504 Reserved.set(X86::ST3);
505 Reserved.set(X86::ST4);
506 Reserved.set(X86::ST5);
507 Reserved.set(X86::ST6);
508 Reserved.set(X86::ST7);
510 // Mark the segment registers as reserved.
511 Reserved.set(X86::CS);
512 Reserved.set(X86::SS);
513 Reserved.set(X86::DS);
514 Reserved.set(X86::ES);
515 Reserved.set(X86::FS);
516 Reserved.set(X86::GS);
518 // Reserve the registers that only exist in 64-bit mode.
520 // These 8-bit registers are part of the x86-64 extension even though their
521 // super-registers are old 32-bits.
522 Reserved.set(X86::SIL);
523 Reserved.set(X86::DIL);
524 Reserved.set(X86::BPL);
525 Reserved.set(X86::SPL);
527 for (unsigned n = 0; n != 8; ++n) {
529 const unsigned GPR64[] = {
530 X86::R8, X86::R9, X86::R10, X86::R11,
531 X86::R12, X86::R13, X86::R14, X86::R15
533 for (const unsigned *AI = getOverlaps(GPR64[n]); unsigned Reg = *AI; ++AI)
537 assert(X86::XMM15 == X86::XMM8+7);
538 for (const unsigned *AI = getOverlaps(X86::XMM8 + n); unsigned Reg = *AI;
547 //===----------------------------------------------------------------------===//
548 // Stack Frame Processing methods
549 //===----------------------------------------------------------------------===//
551 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
552 const MachineFrameInfo *MFI = MF.getFrameInfo();
553 return (RealignStack &&
554 !MFI->hasVarSizedObjects());
557 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
558 const MachineFrameInfo *MFI = MF.getFrameInfo();
559 const Function *F = MF.getFunction();
560 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
561 F->hasFnAttr(Attribute::StackAlignment));
563 // FIXME: Currently we don't support stack realignment for functions with
564 // variable-sized allocas.
565 // FIXME: It's more complicated than this...
566 if (0 && requiresRealignment && MFI->hasVarSizedObjects())
568 "Stack realignment in presence of dynamic allocas is not supported");
570 // If we've requested that we force align the stack do so now.
572 return canRealignStack(MF);
574 return requiresRealignment && canRealignStack(MF);
577 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
578 unsigned Reg, int &FrameIdx) const {
579 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
581 if (Reg == FramePtr && TFI->hasFP(MF)) {
582 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
588 static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
591 return X86::SUB64ri8;
592 return X86::SUB64ri32;
595 return X86::SUB32ri8;
600 static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
603 return X86::ADD64ri8;
604 return X86::ADD64ri32;
607 return X86::ADD32ri8;
612 void X86RegisterInfo::
613 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
614 MachineBasicBlock::iterator I) const {
615 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
616 bool reseveCallFrame = TFI->hasReservedCallFrame(MF);
617 int Opcode = I->getOpcode();
618 bool isDestroy = Opcode == getCallFrameDestroyOpcode();
619 DebugLoc DL = I->getDebugLoc();
620 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
621 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
624 if (!reseveCallFrame) {
625 // If the stack pointer can be changed after prologue, turn the
626 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
627 // adjcallstackdown instruction into 'add ESP, <amt>'
628 // TODO: consider using push / pop instead of sub + store / add
632 // We need to keep the stack aligned properly. To do this, we round the
633 // amount of space needed for the outgoing arguments up to the next
634 // alignment boundary.
635 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
637 MachineInstr *New = 0;
638 if (Opcode == getCallFrameSetupOpcode()) {
639 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)),
644 assert(Opcode == getCallFrameDestroyOpcode());
646 // Factor out the amount the callee already popped.
650 unsigned Opc = getADDriOpcode(Is64Bit, Amount);
651 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
652 .addReg(StackPtr).addImm(Amount);
657 // The EFLAGS implicit def is dead.
658 New->getOperand(3).setIsDead();
660 // Replace the pseudo instruction with a new instruction.
667 if (Opcode == getCallFrameDestroyOpcode() && CalleeAmt) {
668 // If we are performing frame pointer elimination and if the callee pops
669 // something off the stack pointer, add it back. We do this until we have
670 // more advanced stack pointer tracking ability.
671 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
672 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
673 .addReg(StackPtr).addImm(CalleeAmt);
675 // The EFLAGS implicit def is dead.
676 New->getOperand(3).setIsDead();
682 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
683 int SPAdj, RegScavenger *RS) const{
684 assert(SPAdj == 0 && "Unexpected");
687 MachineInstr &MI = *II;
688 MachineFunction &MF = *MI.getParent()->getParent();
689 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
691 while (!MI.getOperand(i).isFI()) {
693 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
696 int FrameIndex = MI.getOperand(i).getIndex();
699 unsigned Opc = MI.getOpcode();
700 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
701 if (needsStackRealignment(MF))
702 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
706 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
708 // This must be part of a four operand memory reference. Replace the
709 // FrameIndex with base register with EBP. Add an offset to the offset.
710 MI.getOperand(i).ChangeToRegister(BasePtr, false);
712 // Now add the frame object offset to the offset from EBP.
715 // Tail call jmp happens after FP is popped.
716 const MachineFrameInfo *MFI = MF.getFrameInfo();
717 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
719 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
721 if (MI.getOperand(i+3).isImm()) {
722 // Offset is a 32-bit integer.
723 int Offset = FIOffset + (int)(MI.getOperand(i + 3).getImm());
724 MI.getOperand(i + 3).ChangeToImmediate(Offset);
726 // Offset is symbolic. This is extremely rare.
727 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset();
728 MI.getOperand(i+3).setOffset(Offset);
732 unsigned X86RegisterInfo::getRARegister() const {
733 return Is64Bit ? X86::RIP // Should have dwarf #16.
734 : X86::EIP; // Should have dwarf #8.
737 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
738 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
739 return TFI->hasFP(MF) ? FramePtr : StackPtr;
742 unsigned X86RegisterInfo::getEHExceptionRegister() const {
743 llvm_unreachable("What is the exception register");
747 unsigned X86RegisterInfo::getEHHandlerRegister() const {
748 llvm_unreachable("What is the exception handler register");
753 unsigned getX86SubSuperRegister(unsigned Reg, EVT VT, bool High) {
754 switch (VT.getSimpleVT().SimpleTy) {
760 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
762 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
764 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
766 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
772 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
774 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
776 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
778 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
780 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
782 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
784 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
786 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
788 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
790 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
792 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
794 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
796 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
798 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
800 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
802 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
809 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
811 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
813 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
815 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
817 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
819 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
821 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
823 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
825 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
827 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
829 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
831 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
833 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
835 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
837 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
839 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
845 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
847 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
849 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
851 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
853 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
855 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
857 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
859 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
861 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
863 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
865 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
867 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
869 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
871 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
873 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
875 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
881 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
883 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
885 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
887 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
889 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
891 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
893 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
895 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
897 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
899 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
901 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
903 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
905 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
907 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
909 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
911 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
920 #include "X86GenRegisterInfo.inc"
923 struct MSAH : public MachineFunctionPass {
925 MSAH() : MachineFunctionPass(ID) {}
927 virtual bool runOnMachineFunction(MachineFunction &MF) {
928 const X86TargetMachine *TM =
929 static_cast<const X86TargetMachine *>(&MF.getTarget());
930 const X86RegisterInfo *X86RI = TM->getRegisterInfo();
931 MachineRegisterInfo &RI = MF.getRegInfo();
932 X86MachineFunctionInfo *FuncInfo = MF.getInfo<X86MachineFunctionInfo>();
933 unsigned StackAlignment = X86RI->getStackAlignment();
935 // Be over-conservative: scan over all vreg defs and find whether vector
936 // registers are used. If yes, there is a possibility that vector register
937 // will be spilled and thus require dynamic stack realignment.
938 for (unsigned i = 0, e = RI.getNumVirtRegs(); i != e; ++i) {
939 unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
940 if (RI.getRegClass(Reg)->getAlignment() > StackAlignment) {
941 FuncInfo->setReserveFP(true);
949 virtual const char *getPassName() const {
950 return "X86 Maximal Stack Alignment Check";
953 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
954 AU.setPreservesCFG();
955 MachineFunctionPass::getAnalysisUsage(AU);
963 llvm::createX86MaxStackAlignmentHeuristicPass() { return new MSAH(); }