1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
16 #include "X86RegisterInfo.h"
18 #include "X86InstrBuilder.h"
19 #include "X86MachineFunctionInfo.h"
20 #include "X86Subtarget.h"
21 #include "X86TargetMachine.h"
22 #include "llvm/ADT/BitVector.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineFunctionPass.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineModuleInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/ValueTypes.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/Function.h"
33 #include "llvm/IR/Type.h"
34 #include "llvm/MC/MCAsmInfo.h"
35 #include "llvm/Support/CommandLine.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Target/TargetFrameLowering.h"
38 #include "llvm/Target/TargetInstrInfo.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Target/TargetOptions.h"
42 #define GET_REGINFO_TARGET_DESC
43 #include "X86GenRegisterInfo.inc"
48 ForceStackAlign("force-align-stack",
49 cl::desc("Force align the stack to the minimum alignment"
50 " needed for the function."),
51 cl::init(false), cl::Hidden);
54 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
55 cl::desc("Enable use of a base pointer for complex stack frames"));
57 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm,
58 const TargetInstrInfo &tii)
59 : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
60 ? X86::RIP : X86::EIP),
61 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
62 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true),
63 (tm.getSubtarget<X86Subtarget>().is64Bit()
64 ? X86::RIP : X86::EIP)),
66 X86_MC::InitLLVM2SEHRegisterMapping(this);
68 // Cache some information.
69 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
70 Is64Bit = Subtarget->is64Bit();
71 IsWin64 = Subtarget->isTargetWin64();
82 // Use a callee-saved register as the base pointer. These registers must
83 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
84 // requires GOT in the EBX register before function calls via PLT GOT pointer.
85 BasePtr = Is64Bit ? X86::RBX : X86::ESI;
88 /// getCompactUnwindRegNum - This function maps the register to the number for
89 /// compact unwind encoding. Return -1 if the register isn't valid.
90 int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
91 switch (getLLVMRegNum(RegNum, isEH)) {
92 case X86::EBX: case X86::RBX: return 1;
93 case X86::ECX: case X86::R12: return 2;
94 case X86::EDX: case X86::R13: return 3;
95 case X86::EDI: case X86::R14: return 4;
96 case X86::ESI: case X86::R15: return 5;
97 case X86::EBP: case X86::RBP: return 6;
104 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
105 // Only enable when post-RA scheduling is enabled and this is needed.
106 return TM.getSubtargetImpl()->postRAScheduler();
110 X86RegisterInfo::getSEHRegNum(unsigned i) const {
111 return getEncodingValue(i);
114 const TargetRegisterClass *
115 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
116 unsigned Idx) const {
117 // The sub_8bit sub-register index is more constrained in 32-bit mode.
118 // It behaves just like the sub_8bit_hi index.
119 if (!Is64Bit && Idx == X86::sub_8bit)
120 Idx = X86::sub_8bit_hi;
122 // Forward to TableGen's default version.
123 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
126 const TargetRegisterClass *
127 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
128 const TargetRegisterClass *B,
129 unsigned SubIdx) const {
130 // The sub_8bit sub-register index is more constrained in 32-bit mode.
131 if (!Is64Bit && SubIdx == X86::sub_8bit) {
132 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
136 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
139 const TargetRegisterClass*
140 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
141 // Don't allow super-classes of GR8_NOREX. This class is only used after
142 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied
143 // to the full GR8 register class in 64-bit mode, so we cannot allow the
144 // reigster class inflation.
146 // The GR8_NOREX class is always used in a way that won't be constrained to a
147 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
149 if (RC == &X86::GR8_NOREXRegClass)
152 const TargetRegisterClass *Super = RC;
153 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
155 switch (Super->getID()) {
156 case X86::GR8RegClassID:
157 case X86::GR16RegClassID:
158 case X86::GR32RegClassID:
159 case X86::GR64RegClassID:
160 case X86::FR32RegClassID:
161 case X86::FR64RegClassID:
162 case X86::RFP32RegClassID:
163 case X86::RFP64RegClassID:
164 case X86::RFP80RegClassID:
165 case X86::VR128RegClassID:
166 case X86::VR256RegClassID:
167 // Don't return a super-class that would shrink the spill size.
168 // That can happen with the vector and float classes.
169 if (Super->getSize() == RC->getSize())
177 const TargetRegisterClass *
178 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
180 const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
182 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
183 case 0: // Normal GPRs.
184 if (Subtarget.isTarget64BitLP64())
185 return &X86::GR64RegClass;
186 return &X86::GR32RegClass;
187 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
188 if (Subtarget.isTarget64BitLP64())
189 return &X86::GR64_NOSPRegClass;
190 return &X86::GR32_NOSPRegClass;
191 case 2: // Available for tailcall (not callee-saved GPRs).
192 if (Subtarget.isTargetWin64())
193 return &X86::GR64_TCW64RegClass;
194 else if (Subtarget.is64Bit())
195 return &X86::GR64_TCRegClass;
197 const Function *F = MF.getFunction();
198 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
200 return &X86::GR32RegClass;
201 return &X86::GR32_TCRegClass;
205 const TargetRegisterClass *
206 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
207 if (RC == &X86::CCRRegClass) {
209 return &X86::GR64RegClass;
211 return &X86::GR32RegClass;
217 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
218 MachineFunction &MF) const {
219 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
221 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
222 switch (RC->getID()) {
225 case X86::GR32RegClassID:
227 case X86::GR64RegClassID:
229 case X86::VR128RegClassID:
230 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
231 case X86::VR64RegClassID:
237 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
238 switch (MF->getFunction()->getCallingConv()) {
239 case CallingConv::GHC:
240 case CallingConv::HiPE:
241 return CSR_NoRegs_SaveList;
243 case CallingConv::Intel_OCL_BI: {
244 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
245 if (HasAVX && IsWin64)
246 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
247 if (HasAVX && Is64Bit)
248 return CSR_64_Intel_OCL_BI_AVX_SaveList;
249 if (!HasAVX && !IsWin64 && Is64Bit)
250 return CSR_64_Intel_OCL_BI_SaveList;
254 case CallingConv::Cold:
256 return CSR_MostRegs_64_SaveList;
263 bool CallsEHReturn = MF->getMMI().callsEHReturn();
266 return CSR_Win64_SaveList;
268 return CSR_64EHRet_SaveList;
269 return CSR_64_SaveList;
272 return CSR_32EHRet_SaveList;
273 return CSR_32_SaveList;
277 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
278 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
280 if (CC == CallingConv::Intel_OCL_BI) {
281 if (IsWin64 && HasAVX)
282 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
283 if (Is64Bit && HasAVX)
284 return CSR_64_Intel_OCL_BI_AVX_RegMask;
285 if (!HasAVX && !IsWin64 && Is64Bit)
286 return CSR_64_Intel_OCL_BI_RegMask;
288 if (CC == CallingConv::GHC || CC == CallingConv::HiPE)
289 return CSR_NoRegs_RegMask;
291 return CSR_32_RegMask;
292 if (CC == CallingConv::Cold)
293 return CSR_MostRegs_64_RegMask;
295 return CSR_Win64_RegMask;
296 return CSR_64_RegMask;
300 X86RegisterInfo::getNoPreservedMask() const {
301 return CSR_NoRegs_RegMask;
304 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
305 BitVector Reserved(getNumRegs());
306 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
308 // Set the stack-pointer register and its aliases as reserved.
309 Reserved.set(X86::RSP);
310 for (MCSubRegIterator I(X86::RSP, this); I.isValid(); ++I)
313 // Set the instruction pointer register and its aliases as reserved.
314 Reserved.set(X86::RIP);
315 for (MCSubRegIterator I(X86::RIP, this); I.isValid(); ++I)
318 // Set the frame-pointer register and its aliases as reserved if needed.
319 if (TFI->hasFP(MF)) {
320 Reserved.set(X86::RBP);
321 for (MCSubRegIterator I(X86::RBP, this); I.isValid(); ++I)
325 // Set the base-pointer register and its aliases as reserved if needed.
326 if (hasBasePointer(MF)) {
327 CallingConv::ID CC = MF.getFunction()->getCallingConv();
328 const uint32_t* RegMask = getCallPreservedMask(CC);
329 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
331 "Stack realignment in presence of dynamic allocas is not supported with"
332 "this calling convention.");
334 Reserved.set(getBaseRegister());
335 for (MCSubRegIterator I(getBaseRegister(), this); I.isValid(); ++I)
339 // Mark the segment registers as reserved.
340 Reserved.set(X86::CS);
341 Reserved.set(X86::SS);
342 Reserved.set(X86::DS);
343 Reserved.set(X86::ES);
344 Reserved.set(X86::FS);
345 Reserved.set(X86::GS);
347 // Mark the floating point stack registers as reserved.
348 Reserved.set(X86::ST0);
349 Reserved.set(X86::ST1);
350 Reserved.set(X86::ST2);
351 Reserved.set(X86::ST3);
352 Reserved.set(X86::ST4);
353 Reserved.set(X86::ST5);
354 Reserved.set(X86::ST6);
355 Reserved.set(X86::ST7);
357 // Reserve the registers that only exist in 64-bit mode.
359 // These 8-bit registers are part of the x86-64 extension even though their
360 // super-registers are old 32-bits.
361 Reserved.set(X86::SIL);
362 Reserved.set(X86::DIL);
363 Reserved.set(X86::BPL);
364 Reserved.set(X86::SPL);
366 for (unsigned n = 0; n != 8; ++n) {
368 static const uint16_t GPR64[] = {
369 X86::R8, X86::R9, X86::R10, X86::R11,
370 X86::R12, X86::R13, X86::R14, X86::R15
372 for (MCRegAliasIterator AI(GPR64[n], this, true); AI.isValid(); ++AI)
376 assert(X86::XMM15 == X86::XMM8+7);
377 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
385 //===----------------------------------------------------------------------===//
386 // Stack Frame Processing methods
387 //===----------------------------------------------------------------------===//
389 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
390 const MachineFrameInfo *MFI = MF.getFrameInfo();
392 if (!EnableBasePointer)
395 // When we need stack realignment and there are dynamic allocas, we can't
396 // reference off of the stack pointer, so we reserve a base pointer.
398 // This is also true if the function contain MS-style inline assembly. We
399 // do this because if any stack changes occur in the inline assembly, e.g.,
400 // "pusha", then any C local variable or C argument references in the
401 // inline assembly will be wrong because the SP is not properly tracked.
402 if ((needsStackRealignment(MF) && MFI->hasVarSizedObjects()) ||
409 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
410 const MachineFrameInfo *MFI = MF.getFrameInfo();
411 const MachineRegisterInfo *MRI = &MF.getRegInfo();
412 if (!MF.getTarget().Options.RealignStack)
415 // Stack realignment requires a frame pointer. If we already started
416 // register allocation with frame pointer elimination, it is too late now.
417 if (!MRI->canReserveReg(FramePtr))
420 // If a base pointer is necessary. Check that it isn't too late to reserve
422 if (MFI->hasVarSizedObjects())
423 return MRI->canReserveReg(BasePtr);
427 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
428 const MachineFrameInfo *MFI = MF.getFrameInfo();
429 const Function *F = MF.getFunction();
430 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
431 bool requiresRealignment =
432 ((MFI->getMaxAlignment() > StackAlign) ||
433 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
434 Attribute::StackAlignment));
436 // If we've requested that we force align the stack do so now.
438 return canRealignStack(MF);
440 return requiresRealignment && canRealignStack(MF);
443 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
444 unsigned Reg, int &FrameIdx) const {
445 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
447 if (Reg == FramePtr && TFI->hasFP(MF)) {
448 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
455 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
456 int SPAdj, unsigned FIOperandNum,
457 RegScavenger *RS) const {
458 assert(SPAdj == 0 && "Unexpected");
460 MachineInstr &MI = *II;
461 MachineFunction &MF = *MI.getParent()->getParent();
462 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
463 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
466 unsigned Opc = MI.getOpcode();
467 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
468 if (hasBasePointer(MF))
469 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
470 else if (needsStackRealignment(MF))
471 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
475 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
477 // This must be part of a four operand memory reference. Replace the
478 // FrameIndex with base register with EBP. Add an offset to the offset.
479 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
481 // Now add the frame object offset to the offset from EBP.
484 // Tail call jmp happens after FP is popped.
485 const MachineFrameInfo *MFI = MF.getFrameInfo();
486 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
488 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
490 if (MI.getOperand(FIOperandNum+3).isImm()) {
491 // Offset is a 32-bit integer.
492 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
493 int Offset = FIOffset + Imm;
494 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
495 "Requesting 64-bit offset in 32-bit immediate!");
496 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
498 // Offset is symbolic. This is extremely rare.
499 uint64_t Offset = FIOffset +
500 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
501 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
505 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
506 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
507 return TFI->hasFP(MF) ? FramePtr : StackPtr;
510 unsigned X86RegisterInfo::getEHExceptionRegister() const {
511 llvm_unreachable("What is the exception register");
514 unsigned X86RegisterInfo::getEHHandlerRegister() const {
515 llvm_unreachable("What is the exception handler register");
519 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
522 default: llvm_unreachable("Unexpected VT");
526 default: return getX86SubSuperRegister(Reg, MVT::i64);
527 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
529 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
531 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
533 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
535 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
537 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
539 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
541 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
546 default: llvm_unreachable("Unexpected register");
547 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
549 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
551 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
553 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
555 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
557 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
559 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
561 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
563 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
565 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
567 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
569 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
571 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
573 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
575 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
577 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
583 default: llvm_unreachable("Unexpected register");
584 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
586 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
588 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
590 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
592 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
594 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
596 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
598 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
600 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
602 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
604 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
606 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
608 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
610 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
612 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
614 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
619 default: llvm_unreachable("Unexpected register");
620 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
622 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
624 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
626 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
628 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
630 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
632 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
634 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
636 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
638 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
640 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
642 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
644 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
646 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
648 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
650 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
655 default: llvm_unreachable("Unexpected register");
656 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
658 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
660 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
662 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
664 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
666 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
668 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
670 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
672 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
674 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
676 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
678 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
680 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
682 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
684 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
686 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15: