1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
16 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineFunctionPass.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/Constants.h"
32 #include "llvm/Function.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Target/TargetFrameLowering.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetOptions.h"
40 #include "llvm/Type.h"
42 #define GET_REGINFO_TARGET_DESC
43 #include "X86GenRegisterInfo.inc"
48 ForceStackAlign("force-align-stack",
49 cl::desc("Force align the stack to the minimum alignment"
50 " needed for the function."),
51 cl::init(false), cl::Hidden);
54 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
55 cl::desc("Enable use of a base pointer for complex stack frames"));
57 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
58 const TargetInstrInfo &tii)
59 : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
60 ? X86::RIP : X86::EIP),
61 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
62 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true),
63 (tm.getSubtarget<X86Subtarget>().is64Bit()
64 ? X86::RIP : X86::EIP)),
66 X86_MC::InitLLVM2SEHRegisterMapping(this);
68 // Cache some information.
69 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
70 Is64Bit = Subtarget->is64Bit();
71 IsWin64 = Subtarget->isTargetWin64();
82 // Use a callee-saved register as the base pointer. These registers must
83 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
84 // requires GOT in the EBX register before function calls via PLT GOT pointer.
85 BasePtr = Is64Bit ? X86::RBX : X86::ESI;
88 /// getCompactUnwindRegNum - This function maps the register to the number for
89 /// compact unwind encoding. Return -1 if the register isn't valid.
90 int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
91 switch (getLLVMRegNum(RegNum, isEH)) {
92 case X86::EBX: case X86::RBX: return 1;
93 case X86::ECX: case X86::R12: return 2;
94 case X86::EDX: case X86::R13: return 3;
95 case X86::EDI: case X86::R14: return 4;
96 case X86::ESI: case X86::R15: return 5;
97 case X86::EBP: case X86::RBP: return 6;
104 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
105 // Only enable when post-RA scheduling is enabled and this is needed.
106 return TM.getSubtargetImpl()->postRAScheduler();
110 X86RegisterInfo::getSEHRegNum(unsigned i) const {
111 return getEncodingValue(i);
114 const TargetRegisterClass *
115 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
116 unsigned Idx) const {
117 // The sub_8bit sub-register index is more constrained in 32-bit mode.
118 // It behaves just like the sub_8bit_hi index.
119 if (!Is64Bit && Idx == X86::sub_8bit)
120 Idx = X86::sub_8bit_hi;
122 // Forward to TableGen's default version.
123 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
126 const TargetRegisterClass *
127 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
128 const TargetRegisterClass *B,
129 unsigned SubIdx) const {
130 // The sub_8bit sub-register index is more constrained in 32-bit mode.
131 if (!Is64Bit && SubIdx == X86::sub_8bit) {
132 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
136 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
139 const TargetRegisterClass*
140 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
141 // Don't allow super-classes of GR8_NOREX. This class is only used after
142 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied
143 // to the full GR8 register class in 64-bit mode, so we cannot allow the
144 // reigster class inflation.
146 // The GR8_NOREX class is always used in a way that won't be constrained to a
147 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
149 if (RC == &X86::GR8_NOREXRegClass)
152 const TargetRegisterClass *Super = RC;
153 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
155 switch (Super->getID()) {
156 case X86::GR8RegClassID:
157 case X86::GR16RegClassID:
158 case X86::GR32RegClassID:
159 case X86::GR64RegClassID:
160 case X86::FR32RegClassID:
161 case X86::FR64RegClassID:
162 case X86::RFP32RegClassID:
163 case X86::RFP64RegClassID:
164 case X86::RFP80RegClassID:
165 case X86::VR128RegClassID:
166 case X86::VR256RegClassID:
167 // Don't return a super-class that would shrink the spill size.
168 // That can happen with the vector and float classes.
169 if (Super->getSize() == RC->getSize())
177 const TargetRegisterClass *
178 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
181 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
182 case 0: // Normal GPRs.
183 if (TM.getSubtarget<X86Subtarget>().is64Bit())
184 return &X86::GR64RegClass;
185 return &X86::GR32RegClass;
186 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
187 if (TM.getSubtarget<X86Subtarget>().is64Bit())
188 return &X86::GR64_NOSPRegClass;
189 return &X86::GR32_NOSPRegClass;
190 case 2: // Available for tailcall (not callee-saved GPRs).
191 if (TM.getSubtarget<X86Subtarget>().isTargetWin64())
192 return &X86::GR64_TCW64RegClass;
193 if (TM.getSubtarget<X86Subtarget>().is64Bit())
194 return &X86::GR64_TCRegClass;
196 const Function *F = MF.getFunction();
197 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
199 return &X86::GR32RegClass;
200 return &X86::GR32_TCRegClass;
204 const TargetRegisterClass *
205 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
206 if (RC == &X86::CCRRegClass) {
208 return &X86::GR64RegClass;
210 return &X86::GR32RegClass;
216 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
217 MachineFunction &MF) const {
218 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
220 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
221 switch (RC->getID()) {
224 case X86::GR32RegClassID:
226 case X86::GR64RegClassID:
228 case X86::VR128RegClassID:
229 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
230 case X86::VR64RegClassID:
236 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
237 bool callsEHReturn = false;
238 bool ghcCall = false;
239 bool oclBiCall = false;
240 bool hipeCall = false;
241 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
244 callsEHReturn = MF->getMMI().callsEHReturn();
245 const Function *F = MF->getFunction();
246 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
247 oclBiCall = (F ? F->getCallingConv() == CallingConv::Intel_OCL_BI : false);
248 hipeCall = (F ? F->getCallingConv() == CallingConv::HiPE : false);
251 if (ghcCall || hipeCall)
252 return CSR_NoRegs_SaveList;
254 if (HasAVX && IsWin64)
255 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
256 if (HasAVX && Is64Bit)
257 return CSR_64_Intel_OCL_BI_AVX_SaveList;
258 if (!HasAVX && !IsWin64 && Is64Bit)
259 return CSR_64_Intel_OCL_BI_SaveList;
263 return CSR_Win64_SaveList;
265 return CSR_64EHRet_SaveList;
266 return CSR_64_SaveList;
269 return CSR_32EHRet_SaveList;
270 return CSR_32_SaveList;
274 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
275 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
277 if (CC == CallingConv::Intel_OCL_BI) {
278 if (IsWin64 && HasAVX)
279 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
280 if (Is64Bit && HasAVX)
281 return CSR_64_Intel_OCL_BI_AVX_RegMask;
282 if (!HasAVX && !IsWin64 && Is64Bit)
283 return CSR_64_Intel_OCL_BI_RegMask;
285 if (CC == CallingConv::GHC || CC == CallingConv::HiPE)
286 return CSR_NoRegs_RegMask;
288 return CSR_32_RegMask;
290 return CSR_Win64_RegMask;
291 return CSR_64_RegMask;
295 X86RegisterInfo::getNoPreservedMask() const {
296 return CSR_NoRegs_RegMask;
299 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
300 BitVector Reserved(getNumRegs());
301 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
303 // Set the stack-pointer register and its aliases as reserved.
304 Reserved.set(X86::RSP);
305 for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I)
308 // Set the instruction pointer register and its aliases as reserved.
309 Reserved.set(X86::RIP);
310 for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I)
313 // Set the frame-pointer register and its aliases as reserved if needed.
314 if (TFI->hasFP(MF)) {
315 Reserved.set(X86::RBP);
316 for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I)
320 // Set the base-pointer register and its aliases as reserved if needed.
321 if (hasBasePointer(MF)) {
322 CallingConv::ID CC = MF.getFunction()->getCallingConv();
323 const uint32_t* RegMask = getCallPreservedMask(CC);
324 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
326 "Stack realignment in presence of dynamic allocas is not supported with"
327 "this calling convention.");
329 Reserved.set(getBaseRegister());
330 for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I)
334 // Mark the segment registers as reserved.
335 Reserved.set(X86::CS);
336 Reserved.set(X86::SS);
337 Reserved.set(X86::DS);
338 Reserved.set(X86::ES);
339 Reserved.set(X86::FS);
340 Reserved.set(X86::GS);
342 // Mark the floating point stack registers as reserved.
343 Reserved.set(X86::ST0);
344 Reserved.set(X86::ST1);
345 Reserved.set(X86::ST2);
346 Reserved.set(X86::ST3);
347 Reserved.set(X86::ST4);
348 Reserved.set(X86::ST5);
349 Reserved.set(X86::ST6);
350 Reserved.set(X86::ST7);
352 // Reserve the registers that only exist in 64-bit mode.
354 // These 8-bit registers are part of the x86-64 extension even though their
355 // super-registers are old 32-bits.
356 Reserved.set(X86::SIL);
357 Reserved.set(X86::DIL);
358 Reserved.set(X86::BPL);
359 Reserved.set(X86::SPL);
361 for (unsigned n = 0; n != 8; ++n) {
363 static const uint16_t GPR64[] = {
364 X86::R8, X86::R9, X86::R10, X86::R11,
365 X86::R12, X86::R13, X86::R14, X86::R15
367 for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI)
371 assert(X86::XMM15 == X86::XMM8+7);
372 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
380 //===----------------------------------------------------------------------===//
381 // Stack Frame Processing methods
382 //===----------------------------------------------------------------------===//
384 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
385 const MachineFrameInfo *MFI = MF.getFrameInfo();
387 if (!EnableBasePointer)
390 // When we need stack realignment and there are dynamic allocas, we can't
391 // reference off of the stack pointer, so we reserve a base pointer.
392 if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
398 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
399 const MachineFrameInfo *MFI = MF.getFrameInfo();
400 const MachineRegisterInfo *MRI = &MF.getRegInfo();
401 if (!MF.getTarget().Options.RealignStack)
404 // Stack realignment requires a frame pointer. If we already started
405 // register allocation with frame pointer elimination, it is too late now.
406 if (!MRI->canReserveReg(FramePtr))
409 // If a base pointer is necessary. Check that it isn't too late to reserve
411 if (MFI->hasVarSizedObjects())
412 return MRI->canReserveReg(BasePtr);
416 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
417 const MachineFrameInfo *MFI = MF.getFrameInfo();
418 const Function *F = MF.getFunction();
419 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
420 bool requiresRealignment =
421 ((MFI->getMaxAlignment() > StackAlign) ||
422 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
423 Attribute::StackAlignment));
425 // If we've requested that we force align the stack do so now.
427 return canRealignStack(MF);
429 return requiresRealignment && canRealignStack(MF);
432 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
433 unsigned Reg, int &FrameIdx) const {
434 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
436 if (Reg == FramePtr && TFI->hasFP(MF)) {
437 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
443 static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
446 return X86::SUB64ri8;
447 return X86::SUB64ri32;
450 return X86::SUB32ri8;
455 static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
458 return X86::ADD64ri8;
459 return X86::ADD64ri32;
462 return X86::ADD32ri8;
467 void X86RegisterInfo::
468 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
469 MachineBasicBlock::iterator I) const {
470 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
471 bool reseveCallFrame = TFI->hasReservedCallFrame(MF);
472 int Opcode = I->getOpcode();
473 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
474 DebugLoc DL = I->getDebugLoc();
475 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
476 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
479 if (!reseveCallFrame) {
480 // If the stack pointer can be changed after prologue, turn the
481 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
482 // adjcallstackdown instruction into 'add ESP, <amt>'
483 // TODO: consider using push / pop instead of sub + store / add
487 // We need to keep the stack aligned properly. To do this, we round the
488 // amount of space needed for the outgoing arguments up to the next
489 // alignment boundary.
490 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
491 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
493 MachineInstr *New = 0;
494 if (Opcode == TII.getCallFrameSetupOpcode()) {
495 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)),
500 assert(Opcode == TII.getCallFrameDestroyOpcode());
502 // Factor out the amount the callee already popped.
506 unsigned Opc = getADDriOpcode(Is64Bit, Amount);
507 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
508 .addReg(StackPtr).addImm(Amount);
513 // The EFLAGS implicit def is dead.
514 New->getOperand(3).setIsDead();
516 // Replace the pseudo instruction with a new instruction.
523 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
524 // If we are performing frame pointer elimination and if the callee pops
525 // something off the stack pointer, add it back. We do this until we have
526 // more advanced stack pointer tracking ability.
527 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
528 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
529 .addReg(StackPtr).addImm(CalleeAmt);
531 // The EFLAGS implicit def is dead.
532 New->getOperand(3).setIsDead();
534 // We are not tracking the stack pointer adjustment by the callee, so make
535 // sure we restore the stack pointer immediately after the call, there may
536 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
537 MachineBasicBlock::iterator B = MBB.begin();
538 while (I != B && !llvm::prior(I)->isCall())
545 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
546 int SPAdj, RegScavenger *RS) const {
547 assert(SPAdj == 0 && "Unexpected");
550 MachineInstr &MI = *II;
551 MachineFunction &MF = *MI.getParent()->getParent();
552 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
554 while (!MI.getOperand(i).isFI()) {
556 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
559 int FrameIndex = MI.getOperand(i).getIndex();
562 unsigned Opc = MI.getOpcode();
563 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
564 if (hasBasePointer(MF))
565 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
566 else if (needsStackRealignment(MF))
567 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
571 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
573 // This must be part of a four operand memory reference. Replace the
574 // FrameIndex with base register with EBP. Add an offset to the offset.
575 MI.getOperand(i).ChangeToRegister(BasePtr, false);
577 // Now add the frame object offset to the offset from EBP.
580 // Tail call jmp happens after FP is popped.
581 const MachineFrameInfo *MFI = MF.getFrameInfo();
582 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
584 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
586 if (MI.getOperand(i+3).isImm()) {
587 // Offset is a 32-bit integer.
588 int Imm = (int)(MI.getOperand(i + 3).getImm());
589 int Offset = FIOffset + Imm;
590 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
591 "Requesting 64-bit offset in 32-bit immediate!");
592 MI.getOperand(i + 3).ChangeToImmediate(Offset);
594 // Offset is symbolic. This is extremely rare.
595 uint64_t Offset = FIOffset + (uint64_t)MI.getOperand(i+3).getOffset();
596 MI.getOperand(i+3).setOffset(Offset);
600 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
601 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
602 return TFI->hasFP(MF) ? FramePtr : StackPtr;
605 unsigned X86RegisterInfo::getEHExceptionRegister() const {
606 llvm_unreachable("What is the exception register");
609 unsigned X86RegisterInfo::getEHHandlerRegister() const {
610 llvm_unreachable("What is the exception handler register");
614 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
617 default: llvm_unreachable("Unexpected VT");
621 default: return getX86SubSuperRegister(Reg, MVT::i64, High);
622 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
624 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
626 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
628 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
633 default: llvm_unreachable("Unexpected register");
634 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
636 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
638 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
640 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
642 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
644 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
646 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
648 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
650 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
652 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
654 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
656 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
658 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
660 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
662 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
664 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
670 default: llvm_unreachable("Unexpected register");
671 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
673 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
675 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
677 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
679 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
681 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
683 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
685 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
687 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
689 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
691 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
693 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
695 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
697 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
699 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
701 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
706 default: llvm_unreachable("Unexpected register");
707 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
709 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
711 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
713 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
715 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
717 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
719 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
721 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
723 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
725 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
727 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
729 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
731 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
733 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
735 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
737 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
741 // For 64-bit mode if we've requested a "high" register and the
742 // Q or r constraints we want one of these high registers or
743 // just the register name otherwise.
746 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
748 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
750 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
752 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
758 default: llvm_unreachable("Unexpected register");
759 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
761 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
763 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
765 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
767 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
769 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
771 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
773 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
775 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
777 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
779 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
781 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
783 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
785 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
787 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
789 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: