1 //===- X86RegisterInfo.cpp - X86 Register Information -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
17 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/Constants.h"
23 #include "llvm/Function.h"
24 #include "llvm/Type.h"
25 #include "llvm/CodeGen/ValueTypes.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineFunction.h"
28 #include "llvm/CodeGen/MachineFunctionPass.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineLocation.h"
31 #include "llvm/CodeGen/MachineModuleInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/Target/TargetAsmInfo.h"
34 #include "llvm/Target/TargetFrameInfo.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/STLExtras.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/ErrorHandling.h"
44 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
45 const TargetInstrInfo &tii)
46 : X86GenRegisterInfo(tm.getSubtarget<X86Subtarget>().is64Bit() ?
47 X86::ADJCALLSTACKDOWN64 :
48 X86::ADJCALLSTACKDOWN32,
49 tm.getSubtarget<X86Subtarget>().is64Bit() ?
50 X86::ADJCALLSTACKUP64 :
51 X86::ADJCALLSTACKUP32),
53 // Cache some information.
54 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
55 Is64Bit = Subtarget->is64Bit();
56 IsWin64 = Subtarget->isTargetWin64();
57 StackAlign = TM.getFrameInfo()->getStackAlignment();
69 // getDwarfRegNum - This function maps LLVM register identifiers to the
70 // Dwarf specific numbering, used in debug info and exception tables.
72 int X86RegisterInfo::getDwarfRegNum(unsigned RegNo, bool isEH) const {
73 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
74 unsigned Flavour = DWARFFlavour::X86_64;
75 if (!Subtarget->is64Bit()) {
76 if (Subtarget->isTargetDarwin()) {
78 Flavour = DWARFFlavour::X86_32_DarwinEH;
80 Flavour = DWARFFlavour::X86_32_Generic;
81 } else if (Subtarget->isTargetCygMing()) {
82 // Unsupported by now, just quick fallback
83 Flavour = DWARFFlavour::X86_32_Generic;
85 Flavour = DWARFFlavour::X86_32_Generic;
89 return X86GenRegisterInfo::getDwarfRegNumFull(RegNo, Flavour);
92 // getX86RegNum - This function maps LLVM register identifiers to their X86
93 // specific numbering, which is used in various places encoding instructions.
95 unsigned X86RegisterInfo::getX86RegNum(unsigned RegNo) {
97 case X86::RAX: case X86::EAX: case X86::AX: case X86::AL: return N86::EAX;
98 case X86::RCX: case X86::ECX: case X86::CX: case X86::CL: return N86::ECX;
99 case X86::RDX: case X86::EDX: case X86::DX: case X86::DL: return N86::EDX;
100 case X86::RBX: case X86::EBX: case X86::BX: case X86::BL: return N86::EBX;
101 case X86::RSP: case X86::ESP: case X86::SP: case X86::SPL: case X86::AH:
103 case X86::RBP: case X86::EBP: case X86::BP: case X86::BPL: case X86::CH:
105 case X86::RSI: case X86::ESI: case X86::SI: case X86::SIL: case X86::DH:
107 case X86::RDI: case X86::EDI: case X86::DI: case X86::DIL: case X86::BH:
110 case X86::R8: case X86::R8D: case X86::R8W: case X86::R8B:
112 case X86::R9: case X86::R9D: case X86::R9W: case X86::R9B:
114 case X86::R10: case X86::R10D: case X86::R10W: case X86::R10B:
116 case X86::R11: case X86::R11D: case X86::R11W: case X86::R11B:
118 case X86::R12: case X86::R12D: case X86::R12W: case X86::R12B:
120 case X86::R13: case X86::R13D: case X86::R13W: case X86::R13B:
122 case X86::R14: case X86::R14D: case X86::R14W: case X86::R14B:
124 case X86::R15: case X86::R15D: case X86::R15W: case X86::R15B:
127 case X86::ST0: case X86::ST1: case X86::ST2: case X86::ST3:
128 case X86::ST4: case X86::ST5: case X86::ST6: case X86::ST7:
129 return RegNo-X86::ST0;
131 case X86::XMM0: case X86::XMM8: case X86::MM0:
133 case X86::XMM1: case X86::XMM9: case X86::MM1:
135 case X86::XMM2: case X86::XMM10: case X86::MM2:
137 case X86::XMM3: case X86::XMM11: case X86::MM3:
139 case X86::XMM4: case X86::XMM12: case X86::MM4:
141 case X86::XMM5: case X86::XMM13: case X86::MM5:
143 case X86::XMM6: case X86::XMM14: case X86::MM6:
145 case X86::XMM7: case X86::XMM15: case X86::MM7:
149 assert(isVirtualRegister(RegNo) && "Unknown physical register!");
150 llvm_unreachable("Register allocator hasn't allocated reg correctly yet!");
155 const TargetRegisterClass *
156 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
157 const TargetRegisterClass *B,
158 unsigned SubIdx) const {
163 if (B == &X86::GR8RegClass) {
164 if (A == &X86::GR64RegClass)
165 return &X86::GR64RegClass;
166 else if (A == &X86::GR32RegClass)
167 return &X86::GR32RegClass;
168 else if (A == &X86::GR16RegClass)
169 return &X86::GR16RegClass;
170 } else if (B == &X86::GR8_ABCD_LRegClass || B == &X86::GR8_ABCD_HRegClass) {
171 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass)
172 return &X86::GR64_ABCDRegClass;
173 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass)
174 return &X86::GR32_ABCDRegClass;
175 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass)
176 return &X86::GR16_ABCDRegClass;
177 } else if (B == &X86::GR8_NOREXRegClass) {
178 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass)
179 return &X86::GR64_NOREXRegClass;
180 else if (A == &X86::GR32RegClass || A == &X86::GR32_NOREXRegClass)
181 return &X86::GR32_NOREXRegClass;
182 else if (A == &X86::GR16RegClass || A == &X86::GR16_NOREXRegClass)
183 return &X86::GR16_NOREXRegClass;
188 if (B == &X86::GR8_ABCD_HRegClass) {
189 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass)
190 return &X86::GR64_ABCDRegClass;
191 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass)
192 return &X86::GR32_ABCDRegClass;
193 else if (A == &X86::GR16RegClass || A == &X86::GR16_ABCDRegClass)
194 return &X86::GR16_ABCDRegClass;
199 if (B == &X86::GR16RegClass) {
200 if (A == &X86::GR64RegClass)
201 return &X86::GR64RegClass;
202 else if (A == &X86::GR32RegClass)
203 return &X86::GR32RegClass;
204 } else if (B == &X86::GR16_ABCDRegClass) {
205 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass)
206 return &X86::GR64_ABCDRegClass;
207 else if (A == &X86::GR32RegClass || A == &X86::GR32_ABCDRegClass)
208 return &X86::GR32_ABCDRegClass;
209 } else if (B == &X86::GR16_NOREXRegClass) {
210 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass)
211 return &X86::GR64_NOREXRegClass;
212 else if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass)
213 return &X86::GR64_ABCDRegClass;
218 if (B == &X86::GR32RegClass) {
219 if (A == &X86::GR64RegClass)
220 return &X86::GR64RegClass;
221 } else if (B == &X86::GR32_ABCDRegClass) {
222 if (A == &X86::GR64RegClass || A == &X86::GR64_ABCDRegClass)
223 return &X86::GR64_ABCDRegClass;
224 } else if (B == &X86::GR32_NOREXRegClass) {
225 if (A == &X86::GR64RegClass || A == &X86::GR64_NOREXRegClass)
226 return &X86::GR64_NOREXRegClass;
233 const TargetRegisterClass *X86RegisterInfo::getPointerRegClass() const {
234 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
235 if (Subtarget->is64Bit())
236 return &X86::GR64RegClass;
238 return &X86::GR32RegClass;
241 const TargetRegisterClass *
242 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
243 if (RC == &X86::CCRRegClass) {
245 return &X86::GR64RegClass;
247 return &X86::GR32RegClass;
253 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
254 bool callsEHReturn = false;
257 const MachineFrameInfo *MFI = MF->getFrameInfo();
258 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
259 callsEHReturn = (MMI ? MMI->callsEHReturn() : false);
262 static const unsigned CalleeSavedRegs32Bit[] = {
263 X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
266 static const unsigned CalleeSavedRegs32EHRet[] = {
267 X86::EAX, X86::EDX, X86::ESI, X86::EDI, X86::EBX, X86::EBP, 0
270 static const unsigned CalleeSavedRegs64Bit[] = {
271 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
274 static const unsigned CalleeSavedRegs64EHRet[] = {
275 X86::RAX, X86::RDX, X86::RBX, X86::R12,
276 X86::R13, X86::R14, X86::R15, X86::RBP, 0
279 static const unsigned CalleeSavedRegsWin64[] = {
280 X86::RBX, X86::RBP, X86::RDI, X86::RSI,
281 X86::R12, X86::R13, X86::R14, X86::R15,
282 X86::XMM6, X86::XMM7, X86::XMM8, X86::XMM9,
283 X86::XMM10, X86::XMM11, X86::XMM12, X86::XMM13,
284 X86::XMM14, X86::XMM15, 0
289 return CalleeSavedRegsWin64;
291 return (callsEHReturn ? CalleeSavedRegs64EHRet : CalleeSavedRegs64Bit);
293 return (callsEHReturn ? CalleeSavedRegs32EHRet : CalleeSavedRegs32Bit);
297 const TargetRegisterClass* const*
298 X86RegisterInfo::getCalleeSavedRegClasses(const MachineFunction *MF) const {
299 bool callsEHReturn = false;
302 const MachineFrameInfo *MFI = MF->getFrameInfo();
303 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
304 callsEHReturn = (MMI ? MMI->callsEHReturn() : false);
307 static const TargetRegisterClass * const CalleeSavedRegClasses32Bit[] = {
308 &X86::GR32RegClass, &X86::GR32RegClass,
309 &X86::GR32RegClass, &X86::GR32RegClass, 0
311 static const TargetRegisterClass * const CalleeSavedRegClasses32EHRet[] = {
312 &X86::GR32RegClass, &X86::GR32RegClass,
313 &X86::GR32RegClass, &X86::GR32RegClass,
314 &X86::GR32RegClass, &X86::GR32RegClass, 0
316 static const TargetRegisterClass * const CalleeSavedRegClasses64Bit[] = {
317 &X86::GR64RegClass, &X86::GR64RegClass,
318 &X86::GR64RegClass, &X86::GR64RegClass,
319 &X86::GR64RegClass, &X86::GR64RegClass, 0
321 static const TargetRegisterClass * const CalleeSavedRegClasses64EHRet[] = {
322 &X86::GR64RegClass, &X86::GR64RegClass,
323 &X86::GR64RegClass, &X86::GR64RegClass,
324 &X86::GR64RegClass, &X86::GR64RegClass,
325 &X86::GR64RegClass, &X86::GR64RegClass, 0
327 static const TargetRegisterClass * const CalleeSavedRegClassesWin64[] = {
328 &X86::GR64RegClass, &X86::GR64RegClass,
329 &X86::GR64RegClass, &X86::GR64RegClass,
330 &X86::GR64RegClass, &X86::GR64RegClass,
331 &X86::GR64RegClass, &X86::GR64RegClass,
332 &X86::VR128RegClass, &X86::VR128RegClass,
333 &X86::VR128RegClass, &X86::VR128RegClass,
334 &X86::VR128RegClass, &X86::VR128RegClass,
335 &X86::VR128RegClass, &X86::VR128RegClass,
336 &X86::VR128RegClass, &X86::VR128RegClass, 0
341 return CalleeSavedRegClassesWin64;
343 return (callsEHReturn ?
344 CalleeSavedRegClasses64EHRet : CalleeSavedRegClasses64Bit);
346 return (callsEHReturn ?
347 CalleeSavedRegClasses32EHRet : CalleeSavedRegClasses32Bit);
351 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
352 BitVector Reserved(getNumRegs());
353 // Set the stack-pointer register and its aliases as reserved.
354 Reserved.set(X86::RSP);
355 Reserved.set(X86::ESP);
356 Reserved.set(X86::SP);
357 Reserved.set(X86::SPL);
358 // Set the frame-pointer register and its aliases as reserved if needed.
360 Reserved.set(X86::RBP);
361 Reserved.set(X86::EBP);
362 Reserved.set(X86::BP);
363 Reserved.set(X86::BPL);
365 // Mark the x87 stack registers as reserved, since they don't
366 // behave normally with respect to liveness. We don't fully
367 // model the effects of x87 stack pushes and pops after
369 Reserved.set(X86::ST0);
370 Reserved.set(X86::ST1);
371 Reserved.set(X86::ST2);
372 Reserved.set(X86::ST3);
373 Reserved.set(X86::ST4);
374 Reserved.set(X86::ST5);
375 Reserved.set(X86::ST6);
376 Reserved.set(X86::ST7);
380 //===----------------------------------------------------------------------===//
381 // Stack Frame Processing methods
382 //===----------------------------------------------------------------------===//
384 static unsigned calculateMaxStackAlignment(const MachineFrameInfo *FFI) {
385 unsigned MaxAlign = 0;
386 for (int i = FFI->getObjectIndexBegin(),
387 e = FFI->getObjectIndexEnd(); i != e; ++i) {
388 if (FFI->isDeadObjectIndex(i))
390 unsigned Align = FFI->getObjectAlignment(i);
391 MaxAlign = std::max(MaxAlign, Align);
397 // hasFP - Return true if the specified function should have a dedicated frame
398 // pointer register. This is true if the function has variable sized allocas or
399 // if frame pointer elimination is disabled.
401 bool X86RegisterInfo::hasFP(const MachineFunction &MF) const {
402 const MachineFrameInfo *MFI = MF.getFrameInfo();
403 const MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
405 return (NoFramePointerElim ||
406 needsStackRealignment(MF) ||
407 MFI->hasVarSizedObjects() ||
408 MFI->isFrameAddressTaken() ||
409 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
410 (MMI && MMI->callsUnwindInit()));
413 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
414 const MachineFrameInfo *MFI = MF.getFrameInfo();
416 // FIXME: Currently we don't support stack realignment for functions with
417 // variable-sized allocas
418 return (RealignStack &&
419 (MFI->getMaxAlignment() > StackAlign &&
420 !MFI->hasVarSizedObjects()));
423 bool X86RegisterInfo::hasReservedCallFrame(MachineFunction &MF) const {
424 return !MF.getFrameInfo()->hasVarSizedObjects();
427 bool X86RegisterInfo::hasReservedSpillSlot(MachineFunction &MF, unsigned Reg,
428 int &FrameIdx) const {
429 if (Reg == FramePtr && hasFP(MF)) {
430 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
438 X86RegisterInfo::getFrameIndexOffset(MachineFunction &MF, int FI) const {
439 int Offset = MF.getFrameInfo()->getObjectOffset(FI) + SlotSize;
440 uint64_t StackSize = MF.getFrameInfo()->getStackSize();
442 if (needsStackRealignment(MF)) {
444 // Skip the saved EBP
447 unsigned Align = MF.getFrameInfo()->getObjectAlignment(FI);
448 assert( (-(Offset + StackSize)) % Align == 0);
450 return Offset + StackSize;
453 // FIXME: Support tail calls
456 return Offset + StackSize;
458 // Skip the saved EBP
461 // Skip the RETADDR move area
462 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
463 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
464 if (TailCallReturnAddrDelta < 0) Offset -= TailCallReturnAddrDelta;
470 void X86RegisterInfo::
471 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
472 MachineBasicBlock::iterator I) const {
473 if (!hasReservedCallFrame(MF)) {
474 // If the stack pointer can be changed after prologue, turn the
475 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
476 // adjcallstackdown instruction into 'add ESP, <amt>'
477 // TODO: consider using push / pop instead of sub + store / add
478 MachineInstr *Old = I;
479 uint64_t Amount = Old->getOperand(0).getImm();
481 // We need to keep the stack aligned properly. To do this, we round the
482 // amount of space needed for the outgoing arguments up to the next
483 // alignment boundary.
484 Amount = (Amount+StackAlign-1)/StackAlign*StackAlign;
486 MachineInstr *New = 0;
487 if (Old->getOpcode() == getCallFrameSetupOpcode()) {
488 New = BuildMI(MF, Old->getDebugLoc(),
489 TII.get(Is64Bit ? X86::SUB64ri32 : X86::SUB32ri),
490 StackPtr).addReg(StackPtr).addImm(Amount);
492 assert(Old->getOpcode() == getCallFrameDestroyOpcode());
493 // factor out the amount the callee already popped.
494 uint64_t CalleeAmt = Old->getOperand(1).getImm();
497 unsigned Opc = (Amount < 128) ?
498 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
499 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri);
500 New = BuildMI(MF, Old->getDebugLoc(), TII.get(Opc), StackPtr)
501 .addReg(StackPtr).addImm(Amount);
506 // The EFLAGS implicit def is dead.
507 New->getOperand(3).setIsDead();
509 // Replace the pseudo instruction with a new instruction...
513 } else if (I->getOpcode() == getCallFrameDestroyOpcode()) {
514 // If we are performing frame pointer elimination and if the callee pops
515 // something off the stack pointer, add it back. We do this until we have
516 // more advanced stack pointer tracking ability.
517 if (uint64_t CalleeAmt = I->getOperand(1).getImm()) {
518 unsigned Opc = (CalleeAmt < 128) ?
519 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
520 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri);
521 MachineInstr *Old = I;
523 BuildMI(MF, Old->getDebugLoc(), TII.get(Opc),
524 StackPtr).addReg(StackPtr).addImm(CalleeAmt);
525 // The EFLAGS implicit def is dead.
526 New->getOperand(3).setIsDead();
535 void X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
536 int SPAdj, RegScavenger *RS) const{
537 assert(SPAdj == 0 && "Unexpected");
540 MachineInstr &MI = *II;
541 MachineFunction &MF = *MI.getParent()->getParent();
542 while (!MI.getOperand(i).isFI()) {
544 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
547 int FrameIndex = MI.getOperand(i).getIndex();
550 if (needsStackRealignment(MF))
551 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
553 BasePtr = (hasFP(MF) ? FramePtr : StackPtr);
555 // This must be part of a four operand memory reference. Replace the
556 // FrameIndex with base register with EBP. Add an offset to the offset.
557 MI.getOperand(i).ChangeToRegister(BasePtr, false);
559 // Now add the frame object offset to the offset from EBP.
560 if (MI.getOperand(i+3).isImm()) {
561 // Offset is a 32-bit integer.
562 int Offset = getFrameIndexOffset(MF, FrameIndex) +
563 (int)(MI.getOperand(i+3).getImm());
565 MI.getOperand(i+3).ChangeToImmediate(Offset);
567 // Offset is symbolic. This is extremely rare.
568 uint64_t Offset = getFrameIndexOffset(MF, FrameIndex) +
569 (uint64_t)MI.getOperand(i+3).getOffset();
570 MI.getOperand(i+3).setOffset(Offset);
575 X86RegisterInfo::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
576 RegScavenger *RS) const {
577 MachineFrameInfo *FFI = MF.getFrameInfo();
579 // Calculate and set max stack object alignment early, so we can decide
580 // whether we will need stack realignment (and thus FP).
581 unsigned MaxAlign = std::max(FFI->getMaxAlignment(),
582 calculateMaxStackAlignment(FFI));
584 FFI->setMaxAlignment(MaxAlign);
586 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
587 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
588 if (TailCallReturnAddrDelta < 0) {
589 // create RETURNADDR area
599 CreateFixedObject(-TailCallReturnAddrDelta,
600 (-1*SlotSize)+TailCallReturnAddrDelta);
603 assert((TailCallReturnAddrDelta <= 0) &&
604 "The Delta should always be zero or negative");
605 // Create a frame entry for the EBP register that must be saved.
606 int FrameIdx = MF.getFrameInfo()->CreateFixedObject(SlotSize,
608 TailCallReturnAddrDelta);
609 assert(FrameIdx == MF.getFrameInfo()->getObjectIndexBegin() &&
610 "Slot for EBP register must be last in order to be found!");
615 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
616 /// stack pointer by a constant value.
618 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
619 unsigned StackPtr, int64_t NumBytes, bool Is64Bit,
620 const TargetInstrInfo &TII) {
621 bool isSub = NumBytes < 0;
622 uint64_t Offset = isSub ? -NumBytes : NumBytes;
625 (Is64Bit ? X86::SUB64ri8 : X86::SUB32ri8) :
626 (Is64Bit ? X86::SUB64ri32 : X86::SUB32ri))
628 (Is64Bit ? X86::ADD64ri8 : X86::ADD32ri8) :
629 (Is64Bit ? X86::ADD64ri32 : X86::ADD32ri));
630 uint64_t Chunk = (1LL << 31) - 1;
631 DebugLoc DL = (MBBI != MBB.end() ? MBBI->getDebugLoc() :
632 DebugLoc::getUnknownLoc());
635 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
637 BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
638 .addReg(StackPtr).addImm(ThisVal);
639 // The EFLAGS implicit def is dead.
640 MI->getOperand(3).setIsDead();
645 // mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
647 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
648 unsigned StackPtr, uint64_t *NumBytes = NULL) {
649 if (MBBI == MBB.begin()) return;
651 MachineBasicBlock::iterator PI = prior(MBBI);
652 unsigned Opc = PI->getOpcode();
653 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
654 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
655 PI->getOperand(0).getReg() == StackPtr) {
657 *NumBytes += PI->getOperand(2).getImm();
659 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
660 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
661 PI->getOperand(0).getReg() == StackPtr) {
663 *NumBytes -= PI->getOperand(2).getImm();
668 // mergeSPUpdatesUp - Merge two stack-manipulating instructions lower iterator.
670 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
671 MachineBasicBlock::iterator &MBBI,
672 unsigned StackPtr, uint64_t *NumBytes = NULL) {
675 if (MBBI == MBB.end()) return;
677 MachineBasicBlock::iterator NI = next(MBBI);
678 if (NI == MBB.end()) return;
680 unsigned Opc = NI->getOpcode();
681 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
682 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
683 NI->getOperand(0).getReg() == StackPtr) {
685 *NumBytes -= NI->getOperand(2).getImm();
688 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
689 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
690 NI->getOperand(0).getReg() == StackPtr) {
692 *NumBytes += NI->getOperand(2).getImm();
698 /// mergeSPUpdates - Checks the instruction before/after the passed
699 /// instruction. If it is an ADD/SUB instruction it is deleted
700 /// argument and the stack adjustment is returned as a positive value for ADD
701 /// and a negative for SUB.
702 static int mergeSPUpdates(MachineBasicBlock &MBB,
703 MachineBasicBlock::iterator &MBBI,
705 bool doMergeWithPrevious) {
707 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
708 (!doMergeWithPrevious && MBBI == MBB.end()))
713 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
714 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : next(MBBI);
715 unsigned Opc = PI->getOpcode();
716 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
717 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
718 PI->getOperand(0).getReg() == StackPtr){
719 Offset += PI->getOperand(2).getImm();
721 if (!doMergeWithPrevious) MBBI = NI;
722 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
723 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
724 PI->getOperand(0).getReg() == StackPtr) {
725 Offset -= PI->getOperand(2).getImm();
727 if (!doMergeWithPrevious) MBBI = NI;
733 void X86RegisterInfo::emitCalleeSavedFrameMoves(MachineFunction &MF,
735 unsigned FramePtr) const {
736 MachineFrameInfo *MFI = MF.getFrameInfo();
737 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
740 // Add callee saved registers to move list.
741 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
742 if (CSI.empty()) return;
744 std::vector<MachineMove> &Moves = MMI->getFrameMoves();
745 const TargetData *TD = MF.getTarget().getTargetData();
746 bool HasFP = hasFP(MF);
748 // Calculate amount of bytes used for return address storing
750 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() ==
751 TargetFrameInfo::StackGrowsUp ?
752 TD->getPointerSize() : -TD->getPointerSize());
754 // FIXME: This is dirty hack. The code itself is pretty mess right now.
755 // It should be rewritten from scratch and generalized sometimes.
757 // Determine maximum offset (minumum due to stack growth)
758 int64_t MaxOffset = 0;
759 for (std::vector<CalleeSavedInfo>::const_iterator
760 I = CSI.begin(), E = CSI.end(); I != E; ++I)
761 MaxOffset = std::min(MaxOffset,
762 MFI->getObjectOffset(I->getFrameIdx()));
764 // Calculate offsets.
765 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
766 for (std::vector<CalleeSavedInfo>::const_iterator
767 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
768 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
769 unsigned Reg = I->getReg();
770 Offset = MaxOffset - Offset + saveAreaOffset;
772 MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
773 MachineLocation CSSrc(Reg);
774 Moves.push_back(MachineMove(LabelId, CSDst, CSSrc));
778 void X86RegisterInfo::emitPrologue(MachineFunction &MF) const {
779 MachineBasicBlock &MBB = MF.front(); // Prolog goes in entry BB
780 MachineFrameInfo *MFI = MF.getFrameInfo();
781 const Function* Fn = MF.getFunction();
782 const X86Subtarget* Subtarget = &MF.getTarget().getSubtarget<X86Subtarget>();
783 MachineModuleInfo *MMI = MFI->getMachineModuleInfo();
784 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
785 MachineBasicBlock::iterator MBBI = MBB.begin();
786 bool needsFrameMoves = (MMI && MMI->hasDebugInfo()) ||
787 !Fn->doesNotThrow() ||
788 UnwindTablesMandatory;
789 bool HasFP = hasFP(MF);
792 // Get the number of bytes to allocate from the FrameInfo.
793 uint64_t StackSize = MFI->getStackSize();
795 // Get desired stack alignment
796 uint64_t MaxAlign = MFI->getMaxAlignment();
798 // Add RETADDR move area to callee saved frame size.
799 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
800 if (TailCallReturnAddrDelta < 0)
801 X86FI->setCalleeSavedFrameSize(
802 X86FI->getCalleeSavedFrameSize() +(-TailCallReturnAddrDelta));
804 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
805 // function, and use up to 128 bytes of stack space, don't have a frame
806 // pointer, calls, or dynamic alloca then we do not need to adjust the
807 // stack pointer (we fit in the Red Zone).
808 bool DisableRedZone = Fn->hasFnAttr(Attribute::NoRedZone);
809 if (Is64Bit && !DisableRedZone &&
810 !needsStackRealignment(MF) &&
811 !MFI->hasVarSizedObjects() && // No dynamic alloca.
812 !MFI->hasCalls() && // No calls.
813 !Subtarget->isTargetWin64()) { // Win64 has no Red Zone
814 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
815 if (HasFP) MinSize += SlotSize;
816 StackSize = std::max(MinSize,
817 StackSize > 128 ? StackSize - 128 : 0);
818 MFI->setStackSize(StackSize);
821 // Insert stack pointer adjustment for later moving of return addr. Only
822 // applies to tail call optimized functions where the callee argument stack
823 // size is bigger than the callers.
824 if (TailCallReturnAddrDelta < 0) {
826 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit? X86::SUB64ri32 : X86::SUB32ri),
827 StackPtr).addReg(StackPtr).addImm(-TailCallReturnAddrDelta);
828 // The EFLAGS implicit def is dead.
829 MI->getOperand(3).setIsDead();
832 // uint64_t StackSize = MFI->getStackSize();
833 std::vector<MachineMove> &Moves = MMI->getFrameMoves();
834 const TargetData *TD = MF.getTarget().getTargetData();
836 (MF.getTarget().getFrameInfo()->getStackGrowthDirection() ==
837 TargetFrameInfo::StackGrowsUp ?
838 TD->getPointerSize() : -TD->getPointerSize());
840 uint64_t NumBytes = 0;
842 // Calculate required stack adjustment
843 uint64_t FrameSize = StackSize - SlotSize;
844 if (needsStackRealignment(MF))
845 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
847 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
849 // Get the offset of the stack slot for the EBP register, which is
850 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
851 // Update the frame offset adjustment.
852 MFI->setOffsetAdjustment(-NumBytes);
854 // Save EBP/RBP into the appropriate stack slot...
855 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
856 .addReg(FramePtr, RegState::Kill);
858 if (needsFrameMoves) {
859 // Mark effective beginning of when frame pointer becomes valid.
860 unsigned FrameLabelId = MMI->NextLabelID();
861 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId);
863 // Define the current CFA rule to use the provided offset.
865 MachineLocation SPDst(MachineLocation::VirtualFP);
866 MachineLocation SPSrc(MachineLocation::VirtualFP,
867 HasFP ? 2 * stackGrowth :
868 -StackSize + stackGrowth);
869 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
871 // FIXME: Verify & implement for FP
872 MachineLocation SPDst(StackPtr);
873 MachineLocation SPSrc(StackPtr, stackGrowth);
874 Moves.push_back(MachineMove(FrameLabelId, SPDst, SPSrc));
877 // Change the rule for the FramePtr to be an "offset" rule.
878 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
879 MachineLocation FPSrc(FramePtr);
880 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
883 // Update EBP with the new base value...
884 BuildMI(MBB, MBBI, DL,
885 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
888 if (needsFrameMoves) {
889 unsigned FrameLabelId = MMI->NextLabelID();
890 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(FrameLabelId);
892 // Define the current CFA to use the EBP/RBP register.
893 MachineLocation FPDst(FramePtr);
894 MachineLocation FPSrc(MachineLocation::VirtualFP);
895 Moves.push_back(MachineMove(FrameLabelId, FPDst, FPSrc));
898 // Mark the FramePtr as live-in in every block except the entry.
899 for (MachineFunction::iterator I = next(MF.begin()), E = MF.end();
901 I->addLiveIn(FramePtr);
904 if (needsStackRealignment(MF)) {
906 BuildMI(MBB, MBBI, DL,
907 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri),
908 StackPtr).addReg(StackPtr).addImm(-MaxAlign);
910 // The EFLAGS implicit def is dead.
911 MI->getOperand(3).setIsDead();
914 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
917 // Skip the callee-saved push instructions.
918 bool RegsSaved = false;
919 while (MBBI != MBB.end() &&
920 (MBBI->getOpcode() == X86::PUSH32r ||
921 MBBI->getOpcode() == X86::PUSH64r)) {
926 if (RegsSaved && needsFrameMoves) {
927 // Mark end of callee-saved push instructions.
928 unsigned LabelId = MMI->NextLabelID();
929 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId);
931 // Emit DWARF info specifying the offsets of the callee-saved registers.
932 emitCalleeSavedFrameMoves(MF, LabelId, HasFP ? FramePtr : StackPtr);
935 if (MBBI != MBB.end())
936 DL = MBBI->getDebugLoc();
938 // Adjust stack pointer: ESP -= numbytes.
939 if (NumBytes >= 4096 && Subtarget->isTargetCygMing()) {
940 // Check, whether EAX is livein for this function.
941 bool isEAXAlive = false;
942 for (MachineRegisterInfo::livein_iterator
943 II = MF.getRegInfo().livein_begin(),
944 EE = MF.getRegInfo().livein_end(); (II != EE) && !isEAXAlive; ++II) {
945 unsigned Reg = II->first;
946 isEAXAlive = (Reg == X86::EAX || Reg == X86::AX ||
947 Reg == X86::AH || Reg == X86::AL);
950 // Function prologue calls _alloca to probe the stack when allocating more
951 // than 4k bytes in one go. Touching the stack at 4K increments is necessary
952 // to ensure that the guard pages used by the OS virtual memory manager are
953 // allocated in correct sequence.
955 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
957 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
958 .addExternalSymbol("_alloca");
961 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
962 .addReg(X86::EAX, RegState::Kill);
964 // Allocate NumBytes-4 bytes on stack. We'll also use 4 already
965 // allocated bytes for EAX.
966 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
967 .addImm(NumBytes - 4);
968 BuildMI(MBB, MBBI, DL, TII.get(X86::CALLpcrel32))
969 .addExternalSymbol("_alloca");
972 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
974 StackPtr, false, NumBytes - 4);
975 MBB.insert(MBBI, MI);
977 } else if (NumBytes) {
978 // If there is an SUB32ri of ESP immediately before this instruction, merge
979 // the two. This can be the case when tail call elimination is enabled and
980 // the callee has more arguments then the caller.
981 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
983 // If there is an ADD32ri or SUB32ri of ESP immediately after this
984 // instruction, merge the two instructions.
985 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
988 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit, TII);
991 if (!HasFP && needsFrameMoves && NumBytes) {
992 // Mark end of stack pointer adjustment.
993 unsigned LabelId = MMI->NextLabelID();
994 BuildMI(MBB, MBBI, DL, TII.get(X86::DBG_LABEL)).addImm(LabelId);
996 // Define the current CFA rule to use the provided offset.
998 MachineLocation SPDst(MachineLocation::VirtualFP);
999 MachineLocation SPSrc(MachineLocation::VirtualFP,
1000 -StackSize + stackGrowth);
1001 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc));
1003 // FIXME: Verify & implement for FP
1004 MachineLocation SPDst(StackPtr);
1005 MachineLocation SPSrc(StackPtr, stackGrowth);
1006 Moves.push_back(MachineMove(LabelId, SPDst, SPSrc));
1011 void X86RegisterInfo::emitEpilogue(MachineFunction &MF,
1012 MachineBasicBlock &MBB) const {
1013 const MachineFrameInfo *MFI = MF.getFrameInfo();
1014 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1015 MachineBasicBlock::iterator MBBI = prior(MBB.end());
1016 unsigned RetOpcode = MBBI->getOpcode();
1017 DebugLoc DL = MBBI->getDebugLoc();
1019 switch (RetOpcode) {
1022 case X86::TCRETURNdi:
1023 case X86::TCRETURNri:
1024 case X86::TCRETURNri64:
1025 case X86::TCRETURNdi64:
1026 case X86::EH_RETURN:
1027 case X86::EH_RETURN64:
1030 case X86::TAILJMPm: break; // These are ok
1032 llvm_unreachable("Can only insert epilog into returning blocks");
1035 // Get the number of bytes to allocate from the FrameInfo
1036 uint64_t StackSize = MFI->getStackSize();
1037 uint64_t MaxAlign = MFI->getMaxAlignment();
1038 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
1039 uint64_t NumBytes = 0;
1042 // Calculate required stack adjustment
1043 uint64_t FrameSize = StackSize - SlotSize;
1044 if (needsStackRealignment(MF))
1045 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
1047 NumBytes = FrameSize - CSSize;
1050 BuildMI(MBB, MBBI, DL,
1051 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
1053 NumBytes = StackSize - CSSize;
1056 // Skip the callee-saved pop instructions.
1057 MachineBasicBlock::iterator LastCSPop = MBBI;
1058 while (MBBI != MBB.begin()) {
1059 MachineBasicBlock::iterator PI = prior(MBBI);
1060 unsigned Opc = PI->getOpcode();
1061 if (Opc != X86::POP32r && Opc != X86::POP64r &&
1062 !PI->getDesc().isTerminator())
1067 DL = MBBI->getDebugLoc();
1069 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1070 // instruction, merge the two instructions.
1071 if (NumBytes || MFI->hasVarSizedObjects())
1072 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
1074 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1075 // slot before popping them off! Same applies for the case, when stack was
1077 if (needsStackRealignment(MF)) {
1078 // We cannot use LEA here, because stack pointer was realigned. We need to
1079 // deallocate local frame back
1081 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
1082 MBBI = prior(LastCSPop);
1085 BuildMI(MBB, MBBI, DL,
1086 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1087 StackPtr).addReg(FramePtr);
1088 } else if (MFI->hasVarSizedObjects()) {
1090 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
1091 MachineInstr *MI = addLeaRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
1092 FramePtr, false, -CSSize);
1093 MBB.insert(MBBI, MI);
1095 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1096 StackPtr).addReg(FramePtr);
1099 // adjust stack pointer back: ESP += numbytes
1101 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, TII);
1104 // We're returning from function via eh_return.
1105 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
1106 MBBI = prior(MBB.end());
1107 MachineOperand &DestAddr = MBBI->getOperand(0);
1108 assert(DestAddr.isReg() && "Offset should be in register!");
1109 BuildMI(MBB, MBBI, DL,
1110 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1111 StackPtr).addReg(DestAddr.getReg());
1112 // Tail call return: adjust the stack pointer and jump to callee
1113 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
1114 RetOpcode== X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64) {
1115 MBBI = prior(MBB.end());
1116 MachineOperand &JumpTarget = MBBI->getOperand(0);
1117 MachineOperand &StackAdjust = MBBI->getOperand(1);
1118 assert(StackAdjust.isImm() && "Expecting immediate value.");
1120 // Adjust stack pointer.
1121 int StackAdj = StackAdjust.getImm();
1122 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
1124 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
1125 // Incoporate the retaddr area.
1126 Offset = StackAdj-MaxTCDelta;
1127 assert(Offset >= 0 && "Offset should never be negative");
1130 // Check for possible merge with preceeding ADD instruction.
1131 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1132 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII);
1135 // Jump to label or value in register.
1136 if (RetOpcode == X86::TCRETURNdi|| RetOpcode == X86::TCRETURNdi64)
1137 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPd)).
1138 addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset());
1139 else if (RetOpcode== X86::TCRETURNri64)
1140 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64), JumpTarget.getReg());
1142 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr), JumpTarget.getReg());
1144 // Delete the pseudo instruction TCRETURN.
1146 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
1147 (X86FI->getTCReturnAddrDelta() < 0)) {
1148 // Add the return addr area delta back since we are not tail calling.
1149 int delta = -1*X86FI->getTCReturnAddrDelta();
1150 MBBI = prior(MBB.end());
1151 // Check for possible merge with preceeding ADD instruction.
1152 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1153 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII);
1157 unsigned X86RegisterInfo::getRARegister() const {
1159 return X86::RIP; // Should have dwarf #16
1161 return X86::EIP; // Should have dwarf #8
1164 unsigned X86RegisterInfo::getFrameRegister(MachineFunction &MF) const {
1165 return hasFP(MF) ? FramePtr : StackPtr;
1168 void X86RegisterInfo::getInitialFrameState(std::vector<MachineMove> &Moves)
1170 // Calculate amount of bytes used for return address storing
1171 int stackGrowth = (Is64Bit ? -8 : -4);
1173 // Initial state of the frame pointer is esp+4.
1174 MachineLocation Dst(MachineLocation::VirtualFP);
1175 MachineLocation Src(StackPtr, stackGrowth);
1176 Moves.push_back(MachineMove(0, Dst, Src));
1178 // Add return address to move list
1179 MachineLocation CSDst(StackPtr, stackGrowth);
1180 MachineLocation CSSrc(getRARegister());
1181 Moves.push_back(MachineMove(0, CSDst, CSSrc));
1184 unsigned X86RegisterInfo::getEHExceptionRegister() const {
1185 llvm_unreachable("What is the exception register");
1189 unsigned X86RegisterInfo::getEHHandlerRegister() const {
1190 llvm_unreachable("What is the exception handler register");
1195 unsigned getX86SubSuperRegister(unsigned Reg, MVT VT, bool High) {
1196 switch (VT.getSimpleVT()) {
1197 default: return Reg;
1202 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1204 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1206 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1208 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1214 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1216 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1218 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1220 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1222 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1224 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1226 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1228 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1230 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1232 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1234 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1236 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1238 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1240 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1242 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1244 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1250 default: return Reg;
1251 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1253 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1255 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1257 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1259 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1261 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1263 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1265 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1267 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1269 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1271 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1273 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1275 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1277 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1279 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1281 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1286 default: return Reg;
1287 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1289 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1291 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1293 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1295 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1297 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1299 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1301 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1303 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1305 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1307 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1309 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1311 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1313 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1315 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1317 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1322 default: return Reg;
1323 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
1325 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
1327 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
1329 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
1331 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
1333 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
1335 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
1337 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
1339 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
1341 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
1343 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
1345 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
1347 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
1349 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
1351 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
1353 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
1362 #include "X86GenRegisterInfo.inc"
1365 struct VISIBILITY_HIDDEN MSAC : public MachineFunctionPass {
1367 MSAC() : MachineFunctionPass(&ID) {}
1369 virtual bool runOnMachineFunction(MachineFunction &MF) {
1370 MachineFrameInfo *FFI = MF.getFrameInfo();
1371 MachineRegisterInfo &RI = MF.getRegInfo();
1373 // Calculate max stack alignment of all already allocated stack objects.
1374 unsigned MaxAlign = calculateMaxStackAlignment(FFI);
1376 // Be over-conservative: scan over all vreg defs and find, whether vector
1377 // registers are used. If yes - there is probability, that vector register
1378 // will be spilled and thus stack needs to be aligned properly.
1379 for (unsigned RegNum = TargetRegisterInfo::FirstVirtualRegister;
1380 RegNum < RI.getLastVirtReg(); ++RegNum)
1381 MaxAlign = std::max(MaxAlign, RI.getRegClass(RegNum)->getAlignment());
1383 FFI->setMaxAlignment(MaxAlign);
1388 virtual const char *getPassName() const {
1389 return "X86 Maximal Stack Alignment Calculator";
1397 llvm::createX86MaxStackAlignmentCalculatorPass() { return new MSAC(); }