1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
16 #include "X86RegisterInfo.h"
17 #include "X86InstrBuilder.h"
18 #include "X86MachineFunctionInfo.h"
19 #include "X86Subtarget.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/MachineValueType.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Target/TargetFrameLowering.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetOptions.h"
41 #define GET_REGINFO_TARGET_DESC
42 #include "X86GenRegisterInfo.inc"
47 ForceStackAlign("force-align-stack",
48 cl::desc("Force align the stack to the minimum alignment"
49 " needed for the function."),
50 cl::init(false), cl::Hidden);
53 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
54 cl::desc("Enable use of a base pointer for complex stack frames"));
56 X86RegisterInfo::X86RegisterInfo(X86TargetMachine &tm)
57 : X86GenRegisterInfo((tm.getSubtarget<X86Subtarget>().is64Bit()
58 ? X86::RIP : X86::EIP),
59 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), false),
60 X86_MC::getDwarfRegFlavour(tm.getTargetTriple(), true),
61 (tm.getSubtarget<X86Subtarget>().is64Bit()
62 ? X86::RIP : X86::EIP)),
64 X86_MC::InitLLVM2SEHRegisterMapping(this);
66 // Cache some information.
67 const X86Subtarget *Subtarget = &TM.getSubtarget<X86Subtarget>();
68 Is64Bit = Subtarget->is64Bit();
69 IsWin64 = Subtarget->isTargetWin64();
80 // Use a callee-saved register as the base pointer. These registers must
81 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
82 // requires GOT in the EBX register before function calls via PLT GOT pointer.
83 BasePtr = Is64Bit ? X86::RBX : X86::ESI;
86 /// getCompactUnwindRegNum - This function maps the register to the number for
87 /// compact unwind encoding. Return -1 if the register isn't valid.
88 int X86RegisterInfo::getCompactUnwindRegNum(unsigned RegNum, bool isEH) const {
89 switch (getLLVMRegNum(RegNum, isEH)) {
90 case X86::EBX: case X86::RBX: return 1;
91 case X86::ECX: case X86::R12: return 2;
92 case X86::EDX: case X86::R13: return 3;
93 case X86::EDI: case X86::R14: return 4;
94 case X86::ESI: case X86::R15: return 5;
95 case X86::EBP: case X86::RBP: return 6;
102 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
103 // ExeDepsFixer and PostRAScheduler require liveness.
108 X86RegisterInfo::getSEHRegNum(unsigned i) const {
109 return getEncodingValue(i);
112 const TargetRegisterClass *
113 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
114 unsigned Idx) const {
115 // The sub_8bit sub-register index is more constrained in 32-bit mode.
116 // It behaves just like the sub_8bit_hi index.
117 if (!Is64Bit && Idx == X86::sub_8bit)
118 Idx = X86::sub_8bit_hi;
120 // Forward to TableGen's default version.
121 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
124 const TargetRegisterClass *
125 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
126 const TargetRegisterClass *B,
127 unsigned SubIdx) const {
128 // The sub_8bit sub-register index is more constrained in 32-bit mode.
129 if (!Is64Bit && SubIdx == X86::sub_8bit) {
130 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
134 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
137 const TargetRegisterClass*
138 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
139 // Don't allow super-classes of GR8_NOREX. This class is only used after
140 // extrating sub_8bit_hi sub-registers. The H sub-registers cannot be copied
141 // to the full GR8 register class in 64-bit mode, so we cannot allow the
142 // reigster class inflation.
144 // The GR8_NOREX class is always used in a way that won't be constrained to a
145 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
147 if (RC == &X86::GR8_NOREXRegClass)
150 const TargetRegisterClass *Super = RC;
151 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
153 switch (Super->getID()) {
154 case X86::GR8RegClassID:
155 case X86::GR16RegClassID:
156 case X86::GR32RegClassID:
157 case X86::GR64RegClassID:
158 case X86::FR32RegClassID:
159 case X86::FR64RegClassID:
160 case X86::RFP32RegClassID:
161 case X86::RFP64RegClassID:
162 case X86::RFP80RegClassID:
163 case X86::VR128RegClassID:
164 case X86::VR256RegClassID:
165 // Don't return a super-class that would shrink the spill size.
166 // That can happen with the vector and float classes.
167 if (Super->getSize() == RC->getSize())
175 const TargetRegisterClass *
176 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF, unsigned Kind)
178 const X86Subtarget &Subtarget = TM.getSubtarget<X86Subtarget>();
180 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
181 case 0: // Normal GPRs.
182 if (Subtarget.isTarget64BitLP64())
183 return &X86::GR64RegClass;
184 return &X86::GR32RegClass;
185 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
186 if (Subtarget.isTarget64BitLP64())
187 return &X86::GR64_NOSPRegClass;
188 return &X86::GR32_NOSPRegClass;
189 case 2: // Available for tailcall (not callee-saved GPRs).
190 if (Subtarget.isTargetWin64())
191 return &X86::GR64_TCW64RegClass;
192 else if (Subtarget.is64Bit())
193 return &X86::GR64_TCRegClass;
195 const Function *F = MF.getFunction();
196 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
198 return &X86::GR32RegClass;
199 return &X86::GR32_TCRegClass;
203 const TargetRegisterClass *
204 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
205 if (RC == &X86::CCRRegClass) {
207 return &X86::GR64RegClass;
209 return &X86::GR32RegClass;
215 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
216 MachineFunction &MF) const {
217 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
219 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
220 switch (RC->getID()) {
223 case X86::GR32RegClassID:
225 case X86::GR64RegClassID:
227 case X86::VR128RegClassID:
228 return TM.getSubtarget<X86Subtarget>().is64Bit() ? 10 : 4;
229 case X86::VR64RegClassID:
235 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
236 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
237 bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512();
239 assert(MF && "MachineFunction required");
240 switch (MF->getFunction()->getCallingConv()) {
241 case CallingConv::GHC:
242 case CallingConv::HiPE:
243 return CSR_NoRegs_SaveList;
244 case CallingConv::AnyReg:
246 return CSR_64_AllRegs_AVX_SaveList;
247 return CSR_64_AllRegs_SaveList;
248 case CallingConv::PreserveMost:
249 return CSR_64_RT_MostRegs_SaveList;
250 case CallingConv::PreserveAll:
252 return CSR_64_RT_AllRegs_AVX_SaveList;
253 return CSR_64_RT_AllRegs_SaveList;
254 case CallingConv::Intel_OCL_BI: {
255 if (HasAVX512 && IsWin64)
256 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
257 if (HasAVX512 && Is64Bit)
258 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
259 if (HasAVX && IsWin64)
260 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
261 if (HasAVX && Is64Bit)
262 return CSR_64_Intel_OCL_BI_AVX_SaveList;
263 if (!HasAVX && !IsWin64 && Is64Bit)
264 return CSR_64_Intel_OCL_BI_SaveList;
267 case CallingConv::Cold:
269 return CSR_64_MostRegs_SaveList;
275 bool CallsEHReturn = MF->getMMI().callsEHReturn();
278 return CSR_Win64_SaveList;
280 return CSR_64EHRet_SaveList;
281 return CSR_64_SaveList;
284 return CSR_32EHRet_SaveList;
285 return CSR_32_SaveList;
289 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
290 bool HasAVX = TM.getSubtarget<X86Subtarget>().hasAVX();
291 bool HasAVX512 = TM.getSubtarget<X86Subtarget>().hasAVX512();
294 case CallingConv::GHC:
295 case CallingConv::HiPE:
296 return CSR_NoRegs_RegMask;
297 case CallingConv::AnyReg:
299 return CSR_64_AllRegs_AVX_RegMask;
300 return CSR_64_AllRegs_RegMask;
301 case CallingConv::PreserveMost:
302 return CSR_64_RT_MostRegs_RegMask;
303 case CallingConv::PreserveAll:
305 return CSR_64_RT_AllRegs_AVX_RegMask;
306 return CSR_64_RT_AllRegs_RegMask;
307 case CallingConv::Intel_OCL_BI: {
308 if (HasAVX512 && IsWin64)
309 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
310 if (HasAVX512 && Is64Bit)
311 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
312 if (HasAVX && IsWin64)
313 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
314 if (HasAVX && Is64Bit)
315 return CSR_64_Intel_OCL_BI_AVX_RegMask;
316 if (!HasAVX && !IsWin64 && Is64Bit)
317 return CSR_64_Intel_OCL_BI_RegMask;
320 case CallingConv::Cold:
322 return CSR_64_MostRegs_RegMask;
328 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
332 return CSR_Win64_RegMask;
333 return CSR_64_RegMask;
335 return CSR_32_RegMask;
339 X86RegisterInfo::getNoPreservedMask() const {
340 return CSR_NoRegs_RegMask;
343 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
344 BitVector Reserved(getNumRegs());
345 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
347 // Set the stack-pointer register and its aliases as reserved.
348 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
352 // Set the instruction pointer register and its aliases as reserved.
353 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
357 // Set the frame-pointer register and its aliases as reserved if needed.
358 if (TFI->hasFP(MF)) {
359 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
364 // Set the base-pointer register and its aliases as reserved if needed.
365 if (hasBasePointer(MF)) {
366 CallingConv::ID CC = MF.getFunction()->getCallingConv();
367 const uint32_t* RegMask = getCallPreservedMask(CC);
368 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
370 "Stack realignment in presence of dynamic allocas is not supported with"
371 "this calling convention.");
373 for (MCSubRegIterator I(getBaseRegister(), this, /*IncludeSelf=*/true);
378 // Mark the segment registers as reserved.
379 Reserved.set(X86::CS);
380 Reserved.set(X86::SS);
381 Reserved.set(X86::DS);
382 Reserved.set(X86::ES);
383 Reserved.set(X86::FS);
384 Reserved.set(X86::GS);
386 // Mark the floating point stack registers as reserved.
387 for (unsigned n = 0; n != 8; ++n)
388 Reserved.set(X86::ST0 + n);
390 // Reserve the registers that only exist in 64-bit mode.
392 // These 8-bit registers are part of the x86-64 extension even though their
393 // super-registers are old 32-bits.
394 Reserved.set(X86::SIL);
395 Reserved.set(X86::DIL);
396 Reserved.set(X86::BPL);
397 Reserved.set(X86::SPL);
399 for (unsigned n = 0; n != 8; ++n) {
401 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
405 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
409 if (!Is64Bit || !TM.getSubtarget<X86Subtarget>().hasAVX512()) {
410 for (unsigned n = 16; n != 32; ++n) {
411 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
419 //===----------------------------------------------------------------------===//
420 // Stack Frame Processing methods
421 //===----------------------------------------------------------------------===//
423 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
424 const MachineFrameInfo *MFI = MF.getFrameInfo();
426 if (!EnableBasePointer)
429 // When we need stack realignment, we can't address the stack from the frame
430 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
431 // can't address variables from the stack pointer. MS inline asm can
432 // reference locals while also adjusting the stack pointer. When we can't
433 // use both the SP and the FP, we need a separate base pointer register.
434 bool CantUseFP = needsStackRealignment(MF);
436 MFI->hasVarSizedObjects() || MFI->hasInlineAsmWithSPAdjust();
437 return CantUseFP && CantUseSP;
440 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
441 if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
444 const MachineFrameInfo *MFI = MF.getFrameInfo();
445 const MachineRegisterInfo *MRI = &MF.getRegInfo();
447 // Stack realignment requires a frame pointer. If we already started
448 // register allocation with frame pointer elimination, it is too late now.
449 if (!MRI->canReserveReg(FramePtr))
452 // If a base pointer is necessary. Check that it isn't too late to reserve
454 if (MFI->hasVarSizedObjects())
455 return MRI->canReserveReg(BasePtr);
459 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
460 const MachineFrameInfo *MFI = MF.getFrameInfo();
461 const Function *F = MF.getFunction();
462 unsigned StackAlign = TM.getFrameLowering()->getStackAlignment();
463 bool requiresRealignment =
464 ((MFI->getMaxAlignment() > StackAlign) ||
465 F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
466 Attribute::StackAlignment));
468 // If we've requested that we force align the stack do so now.
470 return canRealignStack(MF);
472 return requiresRealignment && canRealignStack(MF);
475 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
476 unsigned Reg, int &FrameIdx) const {
477 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
479 if (Reg == FramePtr && TFI->hasFP(MF)) {
480 FrameIdx = MF.getFrameInfo()->getObjectIndexBegin();
487 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
488 int SPAdj, unsigned FIOperandNum,
489 RegScavenger *RS) const {
490 assert(SPAdj == 0 && "Unexpected");
492 MachineInstr &MI = *II;
493 MachineFunction &MF = *MI.getParent()->getParent();
494 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
495 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
498 unsigned Opc = MI.getOpcode();
499 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
500 if (hasBasePointer(MF))
501 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
502 else if (needsStackRealignment(MF))
503 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
507 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
509 // This must be part of a four operand memory reference. Replace the
510 // FrameIndex with base register with EBP. Add an offset to the offset.
511 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
513 // Now add the frame object offset to the offset from EBP.
516 // Tail call jmp happens after FP is popped.
517 const MachineFrameInfo *MFI = MF.getFrameInfo();
518 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
520 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
522 // The frame index format for stackmaps and patchpoints is different from the
523 // X86 format. It only has a FI and an offset.
524 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
525 assert(BasePtr == FramePtr && "Expected the FP as base register");
526 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
527 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
531 if (MI.getOperand(FIOperandNum+3).isImm()) {
532 // Offset is a 32-bit integer.
533 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
534 int Offset = FIOffset + Imm;
535 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
536 "Requesting 64-bit offset in 32-bit immediate!");
537 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
539 // Offset is symbolic. This is extremely rare.
540 uint64_t Offset = FIOffset +
541 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
542 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
546 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
547 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
548 return TFI->hasFP(MF) ? FramePtr : StackPtr;
552 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
555 default: llvm_unreachable("Unexpected VT");
559 default: return getX86SubSuperRegister(Reg, MVT::i64);
560 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
562 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
564 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
566 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
568 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
570 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
572 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
574 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
579 default: llvm_unreachable("Unexpected register");
580 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
582 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
584 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
586 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
588 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
590 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
592 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
594 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
596 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
598 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
600 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
602 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
604 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
606 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
608 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
610 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
616 default: llvm_unreachable("Unexpected register");
617 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
619 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
621 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
623 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
625 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
627 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
629 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
631 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
633 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
635 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
637 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
639 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
641 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
643 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
645 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
647 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
652 default: llvm_unreachable("Unexpected register");
653 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
655 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
657 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
659 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
661 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
663 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
665 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
667 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
669 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
671 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
673 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
675 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
677 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
679 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
681 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
683 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
688 default: llvm_unreachable("Unexpected register");
689 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
691 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
693 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
695 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
697 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
699 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
701 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
703 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
705 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
707 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
709 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
711 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
713 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
715 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
717 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
719 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
725 unsigned get512BitSuperRegister(unsigned Reg) {
726 if (Reg >= X86::XMM0 && Reg <= X86::XMM31)
727 return X86::ZMM0 + (Reg - X86::XMM0);
728 if (Reg >= X86::YMM0 && Reg <= X86::YMM31)
729 return X86::ZMM0 + (Reg - X86::YMM0);
730 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
732 llvm_unreachable("Unexpected SIMD register");