1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
16 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineFunctionPass.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Target/TargetFrameLowering.h"
38 #include "llvm/Target/TargetInstrInfo.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Target/TargetOptions.h"
42 #define GET_REGINFO_TARGET_DESC
43 #include "X86GenRegisterInfo.inc"
48 ForceStackAlign("force-align-stack",
49 cl::desc("Force align the stack to the minimum alignment"
50 " needed for the function."),
51 cl::init(false), cl::Hidden);
54 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
55 cl::desc("Enable use of a base pointer for complex stack frames"));
57 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
58 const TargetInstrInfo &tii)
59 : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
60 ? X86::RIP : X86::EIP),
61 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
62 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true),
63 (tm.getSubtarget<X86Subtarget>().is64Bit()
64 ? X86::RIP : X86::EIP)),
66 X86_MC::InitLLVM2SEHRegisterMapping(this);
68 // Cache some information.
69 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
70 Is64Bit = Subtarget->is64Bit();
71 IsWin64 = Subtarget->isTargetWin64();
82 // Use a callee-saved register as the base pointer. These registers must
83 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
84 // requires GOT in the EBX register before function calls via PLT GOT pointer.
85 BasePtr = Is64Bit ? X86::RBX : X86::ESI;
88 /// getCompactUnwindRegNum - This function maps the register to the number for
89 /// compact unwind encoding. Return -1 if the register isn't valid.
90 int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
91 switch (getLLVMRegNum(RegNum, isEH)) {
92 case X86::EBX: case X86::RBX: return 1;
93 case X86::ECX: case X86::R12: return 2;
94 case X86::EDX: case X86::R13: return 3;
95 case X86::EDI: case X86::R14: return 4;
96 case X86::ESI: case X86::R15: return 5;
97 case X86::EBP: case X86::RBP: return 6;
104 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
105 // Only enable when post-RA scheduling is enabled and this is needed.
106 return TM.getSubtargetImpl()->postRAScheduler();
110 X86RegisterInfo::getSEHRegNum(unsigned i) const {
111 return getEncodingValue(i);
114 const TargetRegisterClass *
115 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
116 unsigned Idx) const {
117 // The sub_8bit sub-register index is more constrained in 32-bit mode.
118 // It behaves just like the sub_8bit_hi index.
119 if (!Is64Bit && Idx == X86::sub_8bit)
120 Idx = X86::sub_8bit_hi;
122 // Forward to TableGen's default version.
123 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
126 const TargetRegisterClass *
127 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
128 const TargetRegisterClass *B,
129 unsigned SubIdx) const {
130 // The sub_8bit sub-register index is more constrained in 32-bit mode.
131 if (!Is64Bit && SubIdx == X86::sub_8bit) {
132 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
136 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
139 const TargetRegisterClass*
140 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
141 // Don't allow super-classes of GR8_NOREX. This class is only used after
142 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied
143 // to the full GR8 register class in 64-bit mode, so we cannot allow the
144 // reigster class inflation.
146 // The GR8_NOREX class is always used in a way that won't be constrained to a
147 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
149 if (RC == &X86::GR8_NOREXRegClass)
152 const TargetRegisterClass *Super = RC;
153 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
155 switch (Super->getID()) {
156 case X86::GR8RegClassID:
157 case X86::GR16RegClassID:
158 case X86::GR32RegClassID:
159 case X86::GR64RegClassID:
160 case X86::FR32RegClassID:
161 case X86::FR64RegClassID:
162 case X86::RFP32RegClassID:
163 case X86::RFP64RegClassID:
164 case X86::RFP80RegClassID:
165 case X86::VR128RegClassID:
166 case X86::VR256RegClassID:
167 // Don't return a super-class that would shrink the spill size.
168 // That can happen with the vector and float classes.
169 if (Super->getSize() == RC->getSize())
177 const TargetRegisterClass *
178 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
180 const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
182 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
183 case 0: // Normal GPRs.
184 if (Subtarget.isTarget64BitLP64())
185 return &X86::GR64RegClass;
186 return &X86::GR32RegClass;
187 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
188 if (Subtarget.isTarget64BitLP64())
189 return &X86::GR64_NOSPRegClass;
190 return &X86::GR32_NOSPRegClass;
191 case 2: // Available for tailcall (not callee-saved GPRs).
192 if (Subtarget.isTargetWin64())
193 return &X86::GR64_TCW64RegClass;
194 else if (Subtarget.is64Bit())
195 return &X86::GR64_TCRegClass;
197 const Function *F = MF.getFunction();
198 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
200 return &X86::GR32RegClass;
201 return &X86::GR32_TCRegClass;
205 const TargetRegisterClass *
206 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
207 if (RC == &X86::CCRRegClass) {
209 return &X86::GR64RegClass;
211 return &X86::GR32RegClass;
217 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
218 MachineFunction &MF) const {
219 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
221 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
222 switch (RC->getID()) {
225 case X86::GR32RegClassID:
227 case X86::GR64RegClassID:
229 case X86::VR128RegClassID:
230 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
231 case X86::VR64RegClassID:
237 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
238 bool callsEHReturn = false;
239 bool ghcCall = false;
240 bool oclBiCall = false;
241 bool hipeCall = false;
242 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
245 callsEHReturn = MF->getMMI().callsEHReturn();
246 const Function *F = MF->getFunction();
247 ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
248 oclBiCall = (F ? F->getCallingConv() == CallingConv::Intel_OCL_BI : false);
249 hipeCall = (F ? F->getCallingConv() == CallingConv::HiPE : false);
252 if (ghcCall || hipeCall)
253 return CSR_NoRegs_SaveList;
255 if (HasAVX && IsWin64)
256 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
257 if (HasAVX && Is64Bit)
258 return CSR_64_Intel_OCL_BI_AVX_SaveList;
259 if (!HasAVX && !IsWin64 && Is64Bit)
260 return CSR_64_Intel_OCL_BI_SaveList;
264 return CSR_Win64_SaveList;
266 return CSR_64EHRet_SaveList;
267 return CSR_64_SaveList;
270 return CSR_32EHRet_SaveList;
271 return CSR_32_SaveList;
275 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
276 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
278 if (CC == CallingConv::Intel_OCL_BI) {
279 if (IsWin64 && HasAVX)
280 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
281 if (Is64Bit && HasAVX)
282 return CSR_64_Intel_OCL_BI_AVX_RegMask;
283 if (!HasAVX && !IsWin64 && Is64Bit)
284 return CSR_64_Intel_OCL_BI_RegMask;
286 if (CC == CallingConv::GHC || CC == CallingConv::HiPE)
287 return CSR_NoRegs_RegMask;
289 return CSR_32_RegMask;
291 return CSR_Win64_RegMask;
292 return CSR_64_RegMask;
296 X86RegisterInfo::getNoPreservedMask() const {
297 return CSR_NoRegs_RegMask;
300 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
301 BitVector Reserved(getNumRegs());
302 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
304 // Set the stack-pointer register and its aliases as reserved.
305 Reserved.set(X86::RSP);
306 for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I)
309 // Set the instruction pointer register and its aliases as reserved.
310 Reserved.set(X86::RIP);
311 for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I)
314 // Set the frame-pointer register and its aliases as reserved if needed.
315 if (TFI->hasFP(MF)) {
316 Reserved.set(X86::RBP);
317 for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I)
321 // Set the base-pointer register and its aliases as reserved if needed.
322 if (hasBasePointer(MF)) {
323 CallingConv::ID CC = MF.getFunction()->getCallingConv();
324 const uint32_t* RegMask = getCallPreservedMask(CC);
325 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
327 "Stack realignment in presence of dynamic allocas is not supported with"
328 "this calling convention.");
330 Reserved.set(getBaseRegister());
331 for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I)
335 // Mark the segment registers as reserved.
336 Reserved.set(X86::CS);
337 Reserved.set(X86::SS);
338 Reserved.set(X86::DS);
339 Reserved.set(X86::ES);
340 Reserved.set(X86::FS);
341 Reserved.set(X86::GS);
343 // Mark the floating point stack registers as reserved.
344 Reserved.set(X86::ST0);
345 Reserved.set(X86::ST1);
346 Reserved.set(X86::ST2);
347 Reserved.set(X86::ST3);
348 Reserved.set(X86::ST4);
349 Reserved.set(X86::ST5);
350 Reserved.set(X86::ST6);
351 Reserved.set(X86::ST7);
353 // Reserve the registers that only exist in 64-bit mode.
355 // These 8-bit registers are part of the x86-64 extension even though their
356 // super-registers are old 32-bits.
357 Reserved.set(X86::SIL);
358 Reserved.set(X86::DIL);
359 Reserved.set(X86::BPL);
360 Reserved.set(X86::SPL);
362 for (unsigned n = 0; n != 8; ++n) {
364 static const uint16_t GPR64[] = {
365 X86::R8, X86::R9, X86::R10, X86::R11,
366 X86::R12, X86::R13, X86::R14, X86::R15
368 for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI)
372 assert(X86::XMM15 == X86::XMM8+7);
373 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
381 //===----------------------------------------------------------------------===//
382 // Stack Frame Processing methods
383 //===----------------------------------------------------------------------===//
385 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
386 const MachineFrameInfo *MFI = MF.getFrameInfo();
388 if (!EnableBasePointer)
391 // When we need stack realignment and there are dynamic allocas, we can't
392 // reference off of the stack pointer, so we reserve a base pointer.
393 if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
399 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
400 const MachineFrameInfo *MFI = MF.getFrameInfo();
401 const MachineRegisterInfo *MRI = &MF.getRegInfo();
402 if (!MF.getTarget().Options.RealignStack)
405 // Stack realignment requires a frame pointer. If we already started
406 // register allocation with frame pointer elimination, it is too late now.
407 if (!MRI->canReserveReg(FramePtr))
410 // If a base pointer is necessary. Check that it isn't too late to reserve
412 if (MFI->hasVarSizedObjects())
413 return MRI->canReserveReg(BasePtr);
417 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
418 const MachineFrameInfo *MFI = MF.getFrameInfo();
419 const Function *F = MF.getFunction();
420 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
421 bool requiresRealignment =
422 ((MFI->getMaxAlignment() > StackAlign) ||
423 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
424 Attribute::StackAlignment));
426 // If we've requested that we force align the stack do so now.
428 return canRealignStack(MF);
430 return requiresRealignment && canRealignStack(MF);
433 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
434 unsigned Reg, int &FrameIdx) const {
435 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
437 if (Reg == FramePtr && TFI->hasFP(MF)) {
438 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
444 static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
447 return X86::SUB64ri8;
448 return X86::SUB64ri32;
451 return X86::SUB32ri8;
456 static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
459 return X86::ADD64ri8;
460 return X86::ADD64ri32;
463 return X86::ADD32ri8;
468 void X86RegisterInfo::
469 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
470 MachineBasicBlock::iterator I) const {
471 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
472 bool reseveCallFrame = TFI->hasReservedCallFrame(MF);
473 int Opcode = I->getOpcode();
474 bool isDestroy = Opcode == TII.getCallFrameDestroyOpcode();
475 DebugLoc DL = I->getDebugLoc();
476 uint64_t Amount = !reseveCallFrame ? I->getOperand(0).getImm() : 0;
477 uint64_t CalleeAmt = isDestroy ? I->getOperand(1).getImm() : 0;
480 if (!reseveCallFrame) {
481 // If the stack pointer can be changed after prologue, turn the
482 // adjcallstackup instruction into a 'sub ESP, <amt>' and the
483 // adjcallstackdown instruction into 'add ESP, <amt>'
484 // TODO: consider using push / pop instead of sub + store / add
488 // We need to keep the stack aligned properly. To do this, we round the
489 // amount of space needed for the outgoing arguments up to the next
490 // alignment boundary.
491 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
492 Amount = (Amount + StackAlign - 1) / StackAlign * StackAlign;
494 MachineInstr *New = 0;
495 if (Opcode == TII.getCallFrameSetupOpcode()) {
496 New = BuildMI(MF, DL, TII.get(getSUBriOpcode(Is64Bit, Amount)),
501 assert(Opcode == TII.getCallFrameDestroyOpcode());
503 // Factor out the amount the callee already popped.
507 unsigned Opc = getADDriOpcode(Is64Bit, Amount);
508 New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
509 .addReg(StackPtr).addImm(Amount);
514 // The EFLAGS implicit def is dead.
515 New->getOperand(3).setIsDead();
517 // Replace the pseudo instruction with a new instruction.
524 if (Opcode == TII.getCallFrameDestroyOpcode() && CalleeAmt) {
525 // If we are performing frame pointer elimination and if the callee pops
526 // something off the stack pointer, add it back. We do this until we have
527 // more advanced stack pointer tracking ability.
528 unsigned Opc = getSUBriOpcode(Is64Bit, CalleeAmt);
529 MachineInstr *New = BuildMI(MF, DL, TII.get(Opc), StackPtr)
530 .addReg(StackPtr).addImm(CalleeAmt);
532 // The EFLAGS implicit def is dead.
533 New->getOperand(3).setIsDead();
535 // We are not tracking the stack pointer adjustment by the callee, so make
536 // sure we restore the stack pointer immediately after the call, there may
537 // be spill code inserted between the CALL and ADJCALLSTACKUP instructions.
538 MachineBasicBlock::iterator B = MBB.begin();
539 while (I != B && !llvm::prior(I)->isCall())
546 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
547 int SPAdj, unsigned FIOperandNum,
548 RegScavenger *RS) const {
549 assert(SPAdj == 0 && "Unexpected");
551 MachineInstr &MI = *II;
552 MachineFunction &MF = *MI.getParent()->getParent();
553 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
554 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
557 unsigned Opc = MI.getOpcode();
558 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
559 if (hasBasePointer(MF))
560 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
561 else if (needsStackRealignment(MF))
562 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
566 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
568 // This must be part of a four operand memory reference. Replace the
569 // FrameIndex with base register with EBP. Add an offset to the offset.
570 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
572 // Now add the frame object offset to the offset from EBP.
575 // Tail call jmp happens after FP is popped.
576 const MachineFrameInfo *MFI = MF.getFrameInfo();
577 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
579 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
581 if (MI.getOperand(FIOperandNum+3).isImm()) {
582 // Offset is a 32-bit integer.
583 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
584 int Offset = FIOffset + Imm;
585 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
586 "Requesting 64-bit offset in 32-bit immediate!");
587 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
589 // Offset is symbolic. This is extremely rare.
590 uint64_t Offset = FIOffset +
591 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
592 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
596 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
597 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
598 return TFI->hasFP(MF) ? FramePtr : StackPtr;
601 unsigned X86RegisterInfo::getEHExceptionRegister() const {
602 llvm_unreachable("What is the exception register");
605 unsigned X86RegisterInfo::getEHHandlerRegister() const {
606 llvm_unreachable("What is the exception handler register");
610 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
613 default: llvm_unreachable("Unexpected VT");
617 default: return getX86SubSuperRegister(Reg, MVT::i64, High);
618 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
620 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
622 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
624 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
629 default: llvm_unreachable("Unexpected register");
630 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
632 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
634 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
636 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
638 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
640 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
642 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
644 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
646 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
648 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
650 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
652 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
654 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
656 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
658 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
660 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
666 default: llvm_unreachable("Unexpected register");
667 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
669 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
671 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
673 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
675 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
677 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
679 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
681 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
683 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
685 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
687 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
689 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
691 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
693 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
695 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
697 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
702 default: llvm_unreachable("Unexpected register");
703 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
705 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
707 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
709 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
711 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
713 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
715 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
717 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
719 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
721 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
723 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
725 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
727 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
729 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
731 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
733 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
737 // For 64-bit mode if we've requested a "high" register and the
738 // Q or r constraints we want one of these high registers or
739 // just the register name otherwise.
742 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
744 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
746 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
748 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
754 default: llvm_unreachable("Unexpected register");
755 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
757 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
759 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
761 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
763 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
765 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
767 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
769 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
771 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
773 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
775 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
777 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
779 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
781 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
783 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
785 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: