1 //===-- X86RegisterInfo.cpp - X86 Register Information --------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the X86 implementation of the TargetRegisterInfo class.
11 // This file is responsible for the frame pointer elimination optimization
14 //===----------------------------------------------------------------------===//
16 #include "X86RegisterInfo.h"
17 #include "X86InstrBuilder.h"
18 #include "X86MachineFunctionInfo.h"
19 #include "X86Subtarget.h"
20 #include "X86TargetMachine.h"
21 #include "llvm/ADT/BitVector.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/MachineValueType.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/Function.h"
32 #include "llvm/IR/Type.h"
33 #include "llvm/MC/MCAsmInfo.h"
34 #include "llvm/Support/CommandLine.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Target/TargetFrameLowering.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetMachine.h"
39 #include "llvm/Target/TargetOptions.h"
43 #define GET_REGINFO_TARGET_DESC
44 #include "X86GenRegisterInfo.inc"
47 ForceStackAlign("force-align-stack",
48 cl::desc("Force align the stack to the minimum alignment"
49 " needed for the function."),
50 cl::init(false), cl::Hidden);
53 EnableBasePointer("x86-use-base-pointer", cl::Hidden, cl::init(true),
54 cl::desc("Enable use of a base pointer for complex stack frames"));
56 X86RegisterInfo::X86RegisterInfo(const X86Subtarget &STI)
58 (STI.is64Bit() ? X86::RIP : X86::EIP),
59 X86_MC::getDwarfRegFlavour(STI.getTargetTriple(), false),
60 X86_MC::getDwarfRegFlavour(STI.getTargetTriple(), true),
61 (STI.is64Bit() ? X86::RIP : X86::EIP)),
63 X86_MC::InitLLVM2SEHRegisterMapping(this);
65 // Cache some information.
66 Is64Bit = Subtarget.is64Bit();
67 IsWin64 = Subtarget.isTargetWin64();
69 // Use a callee-saved register as the base pointer. These registers must
70 // not conflict with any ABI requirements. For example, in 32-bit mode PIC
71 // requires GOT in the EBX register before function calls via PLT GOT pointer.
75 Subtarget.isTarget64BitLP64() || Subtarget.isTargetNaCl64();
76 StackPtr = Use64BitReg ? X86::RSP : X86::ESP;
77 FramePtr = Use64BitReg ? X86::RBP : X86::EBP;
78 BasePtr = Use64BitReg ? X86::RBX : X86::EBX;
88 X86RegisterInfo::trackLivenessAfterRegAlloc(const MachineFunction &MF) const {
89 // ExeDepsFixer and PostRAScheduler require liveness.
94 X86RegisterInfo::getSEHRegNum(unsigned i) const {
95 return getEncodingValue(i);
98 const TargetRegisterClass *
99 X86RegisterInfo::getSubClassWithSubReg(const TargetRegisterClass *RC,
100 unsigned Idx) const {
101 // The sub_8bit sub-register index is more constrained in 32-bit mode.
102 // It behaves just like the sub_8bit_hi index.
103 if (!Is64Bit && Idx == X86::sub_8bit)
104 Idx = X86::sub_8bit_hi;
106 // Forward to TableGen's default version.
107 return X86GenRegisterInfo::getSubClassWithSubReg(RC, Idx);
110 const TargetRegisterClass *
111 X86RegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
112 const TargetRegisterClass *B,
113 unsigned SubIdx) const {
114 // The sub_8bit sub-register index is more constrained in 32-bit mode.
115 if (!Is64Bit && SubIdx == X86::sub_8bit) {
116 A = X86GenRegisterInfo::getSubClassWithSubReg(A, X86::sub_8bit_hi);
120 return X86GenRegisterInfo::getMatchingSuperRegClass(A, B, SubIdx);
123 const TargetRegisterClass*
124 X86RegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC) const{
125 // Don't allow super-classes of GR8_NOREX. This class is only used after
126 // extracting sub_8bit_hi sub-registers. The H sub-registers cannot be copied
127 // to the full GR8 register class in 64-bit mode, so we cannot allow the
128 // reigster class inflation.
130 // The GR8_NOREX class is always used in a way that won't be constrained to a
131 // sub-class, so sub-classes like GR8_ABCD_L are allowed to expand to the
133 if (RC == &X86::GR8_NOREXRegClass)
136 const TargetRegisterClass *Super = RC;
137 TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
139 switch (Super->getID()) {
140 case X86::GR8RegClassID:
141 case X86::GR16RegClassID:
142 case X86::GR32RegClassID:
143 case X86::GR64RegClassID:
144 case X86::FR32RegClassID:
145 case X86::FR64RegClassID:
146 case X86::RFP32RegClassID:
147 case X86::RFP64RegClassID:
148 case X86::RFP80RegClassID:
149 case X86::VR128RegClassID:
150 case X86::VR256RegClassID:
151 // Don't return a super-class that would shrink the spill size.
152 // That can happen with the vector and float classes.
153 if (Super->getSize() == RC->getSize())
161 const TargetRegisterClass *
162 X86RegisterInfo::getPointerRegClass(const MachineFunction &MF,
163 unsigned Kind) const {
165 default: llvm_unreachable("Unexpected Kind in getPointerRegClass!");
166 case 0: // Normal GPRs.
167 if (Subtarget.isTarget64BitLP64())
168 return &X86::GR64RegClass;
169 return &X86::GR32RegClass;
170 case 1: // Normal GPRs except the stack pointer (for encoding reasons).
171 if (Subtarget.isTarget64BitLP64())
172 return &X86::GR64_NOSPRegClass;
173 return &X86::GR32_NOSPRegClass;
174 case 2: // Available for tailcall (not callee-saved GPRs).
175 if (Subtarget.isTargetWin64())
176 return &X86::GR64_TCW64RegClass;
177 else if (Subtarget.is64Bit())
178 return &X86::GR64_TCRegClass;
180 const Function *F = MF.getFunction();
181 bool hasHipeCC = (F ? F->getCallingConv() == CallingConv::HiPE : false);
183 return &X86::GR32RegClass;
184 return &X86::GR32_TCRegClass;
188 const TargetRegisterClass *
189 X86RegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
190 if (RC == &X86::CCRRegClass) {
192 return &X86::GR64RegClass;
194 return &X86::GR32RegClass;
200 X86RegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
201 MachineFunction &MF) const {
202 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
204 unsigned FPDiff = TFI->hasFP(MF) ? 1 : 0;
205 switch (RC->getID()) {
208 case X86::GR32RegClassID:
210 case X86::GR64RegClassID:
212 case X86::VR128RegClassID:
213 return Subtarget.is64Bit() ? 10 : 4;
214 case X86::VR64RegClassID:
220 X86RegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
221 bool HasAVX = Subtarget.hasAVX();
222 bool HasAVX512 = Subtarget.hasAVX512();
223 bool CallsEHReturn = MF->getMMI().callsEHReturn();
225 assert(MF && "MachineFunction required");
226 switch (MF->getFunction()->getCallingConv()) {
227 case CallingConv::GHC:
228 case CallingConv::HiPE:
229 return CSR_NoRegs_SaveList;
230 case CallingConv::AnyReg:
232 return CSR_64_AllRegs_AVX_SaveList;
233 return CSR_64_AllRegs_SaveList;
234 case CallingConv::PreserveMost:
235 return CSR_64_RT_MostRegs_SaveList;
236 case CallingConv::PreserveAll:
238 return CSR_64_RT_AllRegs_AVX_SaveList;
239 return CSR_64_RT_AllRegs_SaveList;
240 case CallingConv::Intel_OCL_BI: {
241 if (HasAVX512 && IsWin64)
242 return CSR_Win64_Intel_OCL_BI_AVX512_SaveList;
243 if (HasAVX512 && Is64Bit)
244 return CSR_64_Intel_OCL_BI_AVX512_SaveList;
245 if (HasAVX && IsWin64)
246 return CSR_Win64_Intel_OCL_BI_AVX_SaveList;
247 if (HasAVX && Is64Bit)
248 return CSR_64_Intel_OCL_BI_AVX_SaveList;
249 if (!HasAVX && !IsWin64 && Is64Bit)
250 return CSR_64_Intel_OCL_BI_SaveList;
253 case CallingConv::Cold:
255 return CSR_64_MostRegs_SaveList;
257 case CallingConv::X86_64_Win64:
258 return CSR_Win64_SaveList;
259 case CallingConv::X86_64_SysV:
261 return CSR_64EHRet_SaveList;
262 return CSR_64_SaveList;
269 return CSR_Win64_SaveList;
271 return CSR_64EHRet_SaveList;
272 return CSR_64_SaveList;
275 return CSR_32EHRet_SaveList;
276 return CSR_32_SaveList;
280 X86RegisterInfo::getCallPreservedMask(CallingConv::ID CC) const {
281 bool HasAVX = Subtarget.hasAVX();
282 bool HasAVX512 = Subtarget.hasAVX512();
285 case CallingConv::GHC:
286 case CallingConv::HiPE:
287 return CSR_NoRegs_RegMask;
288 case CallingConv::AnyReg:
290 return CSR_64_AllRegs_AVX_RegMask;
291 return CSR_64_AllRegs_RegMask;
292 case CallingConv::PreserveMost:
293 return CSR_64_RT_MostRegs_RegMask;
294 case CallingConv::PreserveAll:
296 return CSR_64_RT_AllRegs_AVX_RegMask;
297 return CSR_64_RT_AllRegs_RegMask;
298 case CallingConv::Intel_OCL_BI: {
299 if (HasAVX512 && IsWin64)
300 return CSR_Win64_Intel_OCL_BI_AVX512_RegMask;
301 if (HasAVX512 && Is64Bit)
302 return CSR_64_Intel_OCL_BI_AVX512_RegMask;
303 if (HasAVX && IsWin64)
304 return CSR_Win64_Intel_OCL_BI_AVX_RegMask;
305 if (HasAVX && Is64Bit)
306 return CSR_64_Intel_OCL_BI_AVX_RegMask;
307 if (!HasAVX && !IsWin64 && Is64Bit)
308 return CSR_64_Intel_OCL_BI_RegMask;
311 case CallingConv::Cold:
313 return CSR_64_MostRegs_RegMask;
317 case CallingConv::X86_64_Win64:
318 return CSR_Win64_RegMask;
319 case CallingConv::X86_64_SysV:
320 return CSR_64_RegMask;
323 // Unlike getCalleeSavedRegs(), we don't have MMI so we can't check
327 return CSR_Win64_RegMask;
328 return CSR_64_RegMask;
330 return CSR_32_RegMask;
334 X86RegisterInfo::getNoPreservedMask() const {
335 return CSR_NoRegs_RegMask;
338 BitVector X86RegisterInfo::getReservedRegs(const MachineFunction &MF) const {
339 BitVector Reserved(getNumRegs());
340 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
342 // Set the stack-pointer register and its aliases as reserved.
343 for (MCSubRegIterator I(X86::RSP, this, /*IncludeSelf=*/true); I.isValid();
347 // Set the instruction pointer register and its aliases as reserved.
348 for (MCSubRegIterator I(X86::RIP, this, /*IncludeSelf=*/true); I.isValid();
352 // Set the frame-pointer register and its aliases as reserved if needed.
353 if (TFI->hasFP(MF)) {
354 for (MCSubRegIterator I(X86::RBP, this, /*IncludeSelf=*/true); I.isValid();
359 // Set the base-pointer register and its aliases as reserved if needed.
360 if (hasBasePointer(MF)) {
361 CallingConv::ID CC = MF.getFunction()->getCallingConv();
362 const uint32_t* RegMask = getCallPreservedMask(CC);
363 if (MachineOperand::clobbersPhysReg(RegMask, getBaseRegister()))
365 "Stack realignment in presence of dynamic allocas is not supported with"
366 "this calling convention.");
368 unsigned BasePtr = getX86SubSuperRegister(getBaseRegister(), MVT::i64,
370 for (MCSubRegIterator I(BasePtr, this, /*IncludeSelf=*/true);
375 // Mark the segment registers as reserved.
376 Reserved.set(X86::CS);
377 Reserved.set(X86::SS);
378 Reserved.set(X86::DS);
379 Reserved.set(X86::ES);
380 Reserved.set(X86::FS);
381 Reserved.set(X86::GS);
383 // Mark the floating point stack registers as reserved.
384 for (unsigned n = 0; n != 8; ++n)
385 Reserved.set(X86::ST0 + n);
387 // Reserve the registers that only exist in 64-bit mode.
389 // These 8-bit registers are part of the x86-64 extension even though their
390 // super-registers are old 32-bits.
391 Reserved.set(X86::SIL);
392 Reserved.set(X86::DIL);
393 Reserved.set(X86::BPL);
394 Reserved.set(X86::SPL);
396 for (unsigned n = 0; n != 8; ++n) {
398 for (MCRegAliasIterator AI(X86::R8 + n, this, true); AI.isValid(); ++AI)
402 for (MCRegAliasIterator AI(X86::XMM8 + n, this, true); AI.isValid(); ++AI)
406 if (!Is64Bit || !Subtarget.hasAVX512()) {
407 for (unsigned n = 16; n != 32; ++n) {
408 for (MCRegAliasIterator AI(X86::XMM0 + n, this, true); AI.isValid(); ++AI)
416 //===----------------------------------------------------------------------===//
417 // Stack Frame Processing methods
418 //===----------------------------------------------------------------------===//
420 bool X86RegisterInfo::hasBasePointer(const MachineFunction &MF) const {
421 const MachineFrameInfo *MFI = MF.getFrameInfo();
423 if (!EnableBasePointer)
426 // When we need stack realignment, we can't address the stack from the frame
427 // pointer. When we have dynamic allocas or stack-adjusting inline asm, we
428 // can't address variables from the stack pointer. MS inline asm can
429 // reference locals while also adjusting the stack pointer. When we can't
430 // use both the SP and the FP, we need a separate base pointer register.
431 bool CantUseFP = needsStackRealignment(MF);
433 MFI->hasVarSizedObjects() || MFI->hasInlineAsmWithSPAdjust();
434 return CantUseFP && CantUseSP;
437 bool X86RegisterInfo::canRealignStack(const MachineFunction &MF) const {
438 if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
441 const MachineFrameInfo *MFI = MF.getFrameInfo();
442 const MachineRegisterInfo *MRI = &MF.getRegInfo();
444 // Stack realignment requires a frame pointer. If we already started
445 // register allocation with frame pointer elimination, it is too late now.
446 if (!MRI->canReserveReg(FramePtr))
449 // If a base pointer is necessary. Check that it isn't too late to reserve
451 if (MFI->hasVarSizedObjects())
452 return MRI->canReserveReg(BasePtr);
456 bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
457 const MachineFrameInfo *MFI = MF.getFrameInfo();
458 const Function *F = MF.getFunction();
459 unsigned StackAlign =
460 MF.getSubtarget().getFrameLowering()->getStackAlignment();
461 bool requiresRealignment = ((MFI->getMaxAlignment() > StackAlign) ||
462 F->hasFnAttribute(Attribute::StackAlignment));
464 // If we've requested that we force align the stack do so now.
466 return canRealignStack(MF);
468 return requiresRealignment && canRealignStack(MF);
471 bool X86RegisterInfo::hasReservedSpillSlot(const MachineFunction &MF,
472 unsigned Reg, int &FrameIdx) const {
473 // Since X86 defines assignCalleeSavedSpillSlots which always return true
474 // this function neither used nor tested.
475 llvm_unreachable("Unused function on X86. Otherwise need a test case.");
479 X86RegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
480 int SPAdj, unsigned FIOperandNum,
481 RegScavenger *RS) const {
482 MachineInstr &MI = *II;
483 MachineFunction &MF = *MI.getParent()->getParent();
484 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
485 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
488 unsigned Opc = MI.getOpcode();
489 bool AfterFPPop = Opc == X86::TAILJMPm64 || Opc == X86::TAILJMPm;
490 if (hasBasePointer(MF))
491 BasePtr = (FrameIndex < 0 ? FramePtr : getBaseRegister());
492 else if (needsStackRealignment(MF))
493 BasePtr = (FrameIndex < 0 ? FramePtr : StackPtr);
497 BasePtr = (TFI->hasFP(MF) ? FramePtr : StackPtr);
499 // For LEA64_32r when BasePtr is 32-bits (X32) we can use full-size 64-bit
500 // register as source operand, semantic is the same and destination is
501 // 32-bits. It saves one byte per lea in code since 0x67 prefix is avoided.
502 if (Opc == X86::LEA64_32r && X86::GR32RegClass.contains(BasePtr))
503 BasePtr = getX86SubSuperRegister(BasePtr, MVT::i64, false);
505 // This must be part of a four operand memory reference. Replace the
506 // FrameIndex with base register with EBP. Add an offset to the offset.
507 MI.getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
509 // Now add the frame object offset to the offset from EBP.
512 // Tail call jmp happens after FP is popped.
513 const MachineFrameInfo *MFI = MF.getFrameInfo();
514 FIOffset = MFI->getObjectOffset(FrameIndex) - TFI->getOffsetOfLocalArea();
516 FIOffset = TFI->getFrameIndexOffset(MF, FrameIndex);
518 if (BasePtr == StackPtr)
521 // The frame index format for stackmaps and patchpoints is different from the
522 // X86 format. It only has a FI and an offset.
523 if (Opc == TargetOpcode::STACKMAP || Opc == TargetOpcode::PATCHPOINT) {
524 assert(BasePtr == FramePtr && "Expected the FP as base register");
525 int64_t Offset = MI.getOperand(FIOperandNum + 1).getImm() + FIOffset;
526 MI.getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
530 if (MI.getOperand(FIOperandNum+3).isImm()) {
531 // Offset is a 32-bit integer.
532 int Imm = (int)(MI.getOperand(FIOperandNum + 3).getImm());
533 int Offset = FIOffset + Imm;
534 assert((!Is64Bit || isInt<32>((long long)FIOffset + Imm)) &&
535 "Requesting 64-bit offset in 32-bit immediate!");
536 MI.getOperand(FIOperandNum + 3).ChangeToImmediate(Offset);
538 // Offset is symbolic. This is extremely rare.
539 uint64_t Offset = FIOffset +
540 (uint64_t)MI.getOperand(FIOperandNum+3).getOffset();
541 MI.getOperand(FIOperandNum + 3).setOffset(Offset);
545 unsigned X86RegisterInfo::getFrameRegister(const MachineFunction &MF) const {
546 const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
547 return TFI->hasFP(MF) ? FramePtr : StackPtr;
550 unsigned X86RegisterInfo::getPtrSizedFrameRegister(
551 const MachineFunction &MF) const {
552 unsigned FrameReg = getFrameRegister(MF);
553 if (Subtarget.isTarget64BitILP32())
554 FrameReg = getX86SubSuperRegister(FrameReg, MVT::i32, false);
559 unsigned getX86SubSuperRegister(unsigned Reg, MVT::SimpleValueType VT,
562 default: llvm_unreachable("Unexpected VT");
566 default: return getX86SubSuperRegister(Reg, MVT::i64);
567 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
569 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
571 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
573 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
575 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
577 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
579 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
581 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
586 default: llvm_unreachable("Unexpected register");
587 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
589 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
591 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
593 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
595 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
597 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
599 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
601 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
603 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
605 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
607 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
609 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
611 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
613 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
615 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
617 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
623 default: llvm_unreachable("Unexpected register");
624 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
626 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
628 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
630 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
632 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
634 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
636 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
638 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
640 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
642 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
644 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
646 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
648 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
650 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
652 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
654 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
659 default: llvm_unreachable("Unexpected register");
660 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
662 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
664 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
666 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
668 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
670 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
672 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
674 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
676 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
678 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
680 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
682 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
684 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
686 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
688 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
690 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
695 default: llvm_unreachable("Unexpected register");
696 case X86::AH: case X86::AL: case X86::AX: case X86::EAX: case X86::RAX:
698 case X86::DH: case X86::DL: case X86::DX: case X86::EDX: case X86::RDX:
700 case X86::CH: case X86::CL: case X86::CX: case X86::ECX: case X86::RCX:
702 case X86::BH: case X86::BL: case X86::BX: case X86::EBX: case X86::RBX:
704 case X86::SIL: case X86::SI: case X86::ESI: case X86::RSI:
706 case X86::DIL: case X86::DI: case X86::EDI: case X86::RDI:
708 case X86::BPL: case X86::BP: case X86::EBP: case X86::RBP:
710 case X86::SPL: case X86::SP: case X86::ESP: case X86::RSP:
712 case X86::R8B: case X86::R8W: case X86::R8D: case X86::R8:
714 case X86::R9B: case X86::R9W: case X86::R9D: case X86::R9:
716 case X86::R10B: case X86::R10W: case X86::R10D: case X86::R10:
718 case X86::R11B: case X86::R11W: case X86::R11D: case X86::R11:
720 case X86::R12B: case X86::R12W: case X86::R12D: case X86::R12:
722 case X86::R13B: case X86::R13W: case X86::R13D: case X86::R13:
724 case X86::R14B: case X86::R14W: case X86::R14D: case X86::R14:
726 case X86::R15B: case X86::R15W: case X86::R15D: case X86::R15:
732 unsigned get512BitSuperRegister(unsigned Reg) {
733 if (Reg >= X86::XMM0 && Reg <= X86::XMM31)
734 return X86::ZMM0 + (Reg - X86::XMM0);
735 if (Reg >= X86::YMM0 && Reg <= X86::YMM31)
736 return X86::ZMM0 + (Reg - X86::YMM0);
737 if (Reg >= X86::ZMM0 && Reg <= X86::ZMM31)
739 llvm_unreachable("Unexpected SIMD register");