#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/IR/Constants.h"
else {
Reserved.set(Mips::FP);
Reserved.set(Mips::FP_64);
+
+ // Reserve the base register if we need to both realign the stack and
+ // allocate variable-sized objects at runtime. This should test the
+ // same conditions as MipsFrameLowering::hasBP().
+ if (needsStackRealignment(MF) &&
+ MF.getFrameInfo()->hasVarSizedObjects()) {
+ Reserved.set(Mips::S7);
+ Reserved.set(Mips::S7_64);
+ }
}
}
else
return TFI->hasFP(MF) ? (IsN64 ? Mips::FP_64 : Mips::FP) :
(IsN64 ? Mips::SP_64 : Mips::SP);
+}
+bool MipsRegisterInfo::canRealignStack(const MachineFunction &MF) const {
+ const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
+ unsigned FP = Subtarget.isGP32bit() ? Mips::FP : Mips::FP_64;
+ unsigned BP = Subtarget.isGP32bit() ? Mips::S7 : Mips::S7_64;
+
+ // Support dynamic stack realignment only for targets with standard encoding.
+ if (!Subtarget.hasStandardEncoding())
+ return false;
+
+ // We can't perform dynamic stack realignment if we can't reserve the
+ // frame pointer register.
+ if (!MF.getRegInfo().canReserveReg(FP))
+ return false;
+
+ // We can realign the stack if we know the maximum call frame size and we
+ // don't have variable sized objects.
+ if (Subtarget.getFrameLowering()->hasReservedCallFrame(MF))
+ return true;
+
+ // We have to reserve the base pointer register in the presence of variable
+ // sized objects.
+ return MF.getRegInfo().canReserveReg(BP);
}
+bool MipsRegisterInfo::needsStackRealignment(const MachineFunction &MF) const {
+ const MipsSubtarget &Subtarget = MF.getSubtarget<MipsSubtarget>();
+ const MachineFrameInfo *MFI = MF.getFrameInfo();
+
+ bool CanRealign = canRealignStack(MF);
+
+ // Avoid realigning functions that explicitly do not want to be realigned.
+ // Normally, we should report an error when a function should be dynamically
+ // realigned but also has the attribute no-realign-stack. Unfortunately,
+ // with this attribute, MachineFrameInfo clamps each new object's alignment
+ // to that of the stack's alignment as specified by the ABI. As a result,
+ // the information of whether we have objects with larger alignment
+ // requirement than the stack's alignment is already lost at this point.
+ if (MF.getFunction()->hasFnAttribute("no-realign-stack"))
+ return false;
+
+ const Function *F = MF.getFunction();
+ if (F->hasFnAttribute(Attribute::StackAlignment)) {
+#ifdef DEBUG
+ if (!CanRealign)
+ DEBUG(dbgs() << "It's not possible to realign the stack of the function: "
+ << F->getName() << "\n");
+#endif
+ return CanRealign;
+ }
+
+ unsigned StackAlignment = Subtarget.getFrameLowering()->getStackAlignment();
+ if (MFI->getMaxAlignment() > StackAlignment) {
+#ifdef DEBUG
+ if (!CanRealign)
+ DEBUG(dbgs() << "It's not possible to realign the stack of the function: "
+ << F->getName() << "\n");
+#endif
+ return CanRealign;
+ }
+
+ return false;
+}
unsigned FP = ABI.GetFramePtr();
unsigned ZERO = ABI.GetNullPtr();
unsigned ADDu = ABI.GetPtrAdduOp();
+ unsigned ADDiu = ABI.GetPtrAddiuOp();
+ unsigned AND = ABI.IsN64() ? Mips::AND64 : Mips::AND;
+
+ const TargetRegisterClass *RC = ABI.ArePtrs64bit() ?
+ &Mips::GPR64RegClass : &Mips::GPR32RegClass;
// First, compute final stack size.
uint64_t StackSize = MFI->getStackSize();
}
if (MipsFI->callsEhReturn()) {
- const TargetRegisterClass *PtrRC =
- ABI.ArePtrs64bit() ? &Mips::GPR64RegClass : &Mips::GPR32RegClass;
-
// Insert instructions that spill eh data registers.
for (int I = 0; I < 4; ++I) {
if (!MBB.isLiveIn(ABI.GetEhDataReg(I)))
MBB.addLiveIn(ABI.GetEhDataReg(I));
TII.storeRegToStackSlot(MBB, MBBI, ABI.GetEhDataReg(I), false,
- MipsFI->getEhDataRegFI(I), PtrRC, &RegInfo);
+ MipsFI->getEhDataRegFI(I), RC, &RegInfo);
}
// Emit .cfi_offset directives for eh data registers.
nullptr, MRI->getDwarfRegNum(FP, true)));
BuildMI(MBB, MBBI, dl, TII.get(TargetOpcode::CFI_INSTRUCTION))
.addCFIIndex(CFIIndex);
+
+ if (RegInfo.needsStackRealignment(MF)) {
+ // addiu $Reg, $zero, -MaxAlignment
+ // andi $sp, $sp, $Reg
+ unsigned VR = MF.getRegInfo().createVirtualRegister(RC);
+ assert(isInt<16>(MFI->getMaxAlignment()) &&
+ "Function's alignment size requirement is not supported.");
+ int MaxAlign = - (signed) MFI->getMaxAlignment();
+
+ BuildMI(MBB, MBBI, dl, TII.get(ADDiu), VR).addReg(ZERO) .addImm(MaxAlign);
+ BuildMI(MBB, MBBI, dl, TII.get(AND), SP).addReg(SP).addReg(VR);
+
+ if (hasBP(MF)) {
+ // move $s7, $sp
+ unsigned BP = STI.isABI_N64() ? Mips::S7_64 : Mips::S7;
+ BuildMI(MBB, MBBI, dl, TII.get(ADDu), BP)
+ .addReg(SP)
+ .addReg(ZERO);
+ }
+ }
}
}
MipsFunctionInfo *MipsFI = MF.getInfo<MipsFunctionInfo>();
MipsABIInfo ABI = STI.getABI();
unsigned FP = ABI.GetFramePtr();
+ unsigned BP = ABI.IsN64() ? Mips::S7_64 : Mips::S7;
// Mark $fp as used if function has dedicated frame pointer.
if (hasFP(MF))
MRI.setPhysRegUsed(FP);
+ // Mark $s7 as used if function has dedicated base pointer.
+ if (hasBP(MF))
+ MRI.setPhysRegUsed(BP);
// Create spill slots for eh data registers if function calls eh_return.
if (MipsFI->callsEhReturn())
--- /dev/null
+; RUN: llc < %s -march=mips -mcpu=mips2 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips -mcpu=mips32r6 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP32
+; RUN: llc < %s -march=mips64 -mcpu=mips3 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips64 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N64
+; RUN: llc < %s -march=mips64 -mcpu=mips3 -target-abi n32 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+; RUN: llc < %s -march=mips64 -mcpu=mips64 -target-abi n32 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+; RUN: llc < %s -march=mips64 -mcpu=mips64r6 -target-abi n32 | FileCheck %s \
+; RUN: --check-prefix=ALL --check-prefix=GP64 -check-prefix=N32
+
+; Check dynamic stack realignment in functions without variable-sized objects.
+
+declare void @helper_01(i32, i32, i32, i32, i32*)
+
+; O32 ABI
+define void @func_01() {
+entry:
+; GP32-LABEL: func_01:
+
+ ; prologue
+ ; FIXME: We are currently over-allocating stack space. This particular case
+ ; needs a frame of up to between 16 and 512-bytes but currently
+ ; allocates between 1024 and 1536 bytes
+ ; GP32: addiu $sp, $sp, -1024
+ ; GP32: sw $ra, 1020($sp)
+ ; GP32: sw $fp, 1016($sp)
+ ;
+ ; GP32: move $fp, $sp
+ ; GP32: addiu $[[T0:[0-9]+|ra|gp]], $zero, -512
+ ; GP32-NEXT: and $sp, $sp, $[[T0]]
+
+ ; body
+ ; GP32: addiu $[[T1:[0-9]+]], $sp, 512
+ ; GP32: sw $[[T1]], 16($sp)
+
+ ; epilogue
+ ; GP32: move $sp, $fp
+ ; GP32: lw $fp, 1016($sp)
+ ; GP32: lw $ra, 1020($sp)
+ ; GP32: addiu $sp, $sp, 1024
+
+ %a = alloca i32, align 512
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+ ret void
+}
+
+declare void @helper_02(i32, i32, i32, i32,
+ i32, i32, i32, i32, i32*)
+
+; N32/N64 ABIs
+define void @func_02() {
+entry:
+; GP64-LABEL: func_02:
+
+ ; prologue
+ ; FIXME: We are currently over-allocating stack space. This particular case
+ ; needs a frame of up to between 16 and 512-bytes but currently
+ ; allocates between 1024 and 1536 bytes
+ ; N32: addiu $sp, $sp, -1024
+ ; N64: daddiu $sp, $sp, -1024
+ ; GP64: sd $ra, 1016($sp)
+ ; GP64: sd $fp, 1008($sp)
+ ; N32: sd $gp, 1000($sp)
+ ;
+ ; GP64: move $fp, $sp
+ ; N32: addiu $[[T0:[0-9]+|ra]], $zero, -512
+ ; N64: daddiu $[[T0:[0-9]+|ra]], $zero, -512
+ ; GP64-NEXT: and $sp, $sp, $[[T0]]
+
+ ; body
+ ; N32: addiu $[[T1:[0-9]+]], $sp, 512
+ ; N64: daddiu $[[T1:[0-9]+]], $sp, 512
+ ; GP64: sd $[[T1]], 0($sp)
+
+ ; epilogue
+ ; GP64: move $sp, $fp
+ ; N32: ld $gp, 1000($sp)
+ ; GP64: ld $fp, 1008($sp)
+ ; GP64: ld $ra, 1016($sp)
+ ; N32: addiu $sp, $sp, 1024
+ ; N64: daddiu $sp, $sp, 1024
+
+ %a = alloca i32, align 512
+ call void @helper_02(i32 0, i32 0, i32 0, i32 0,
+ i32 0, i32 0, i32 0, i32 0, i32* %a)
+ ret void
+}
+
+; Verify that we use $fp for referencing incoming arguments.
+
+declare void @helper_03(i32, i32, i32, i32, i32*, i32*)
+
+; O32 ABI
+define void @func_03(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32* %b) {
+entry:
+; GP32-LABEL: func_03:
+
+ ; body
+ ; FIXME: We are currently over-allocating stack space.
+ ; GP32-DAG: addiu $[[T0:[0-9]+]], $sp, 512
+ ; GP32-DAG: sw $[[T0]], 16($sp)
+ ; GP32-DAG: lw $[[T1:[0-9]+]], 1040($fp)
+ ; GP32-DAG: sw $[[T1]], 20($sp)
+
+ %a = alloca i32, align 512
+ call void @helper_03(i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+ ret void
+}
+
+declare void @helper_04(i32, i32, i32, i32,
+ i32, i32, i32, i32, i32*, i32*)
+
+; N32/N64 ABIs
+define void @func_04(i32 %p0, i32 %p1, i32 %p2, i32 %p3,
+ i32 %p4, i32 %p5, i32 %p6, i32 %p7,
+ i32* %b) {
+entry:
+; GP64-LABEL: func_04:
+
+ ; body
+ ; FIXME: We are currently over-allocating stack space.
+ ; N32-DAG: addiu $[[T0:[0-9]+]], $sp, 512
+ ; N64-DAG: daddiu $[[T0:[0-9]+]], $sp, 512
+ ; GP64-DAG: sd $[[T0]], 0($sp)
+ ; GP64-DAG: ld $[[T1:[0-9]+]], 1024($fp)
+ ; GP64-DAG: sd $[[T1]], 8($sp)
+
+ %a = alloca i32, align 512
+ call void @helper_04(i32 0, i32 0, i32 0, i32 0,
+ i32 0, i32 0, i32 0, i32 0, i32* %a, i32* %b)
+ ret void
+}
+
+; Check dynamic stack realignment in functions with variable-sized objects.
+
+; O32 ABI
+define void @func_05(i32 %sz) {
+entry:
+; GP32-LABEL: func_05:
+
+ ; prologue
+ ; FIXME: We are currently over-allocating stack space.
+ ; GP32: addiu $sp, $sp, -1024
+ ; GP32: sw $fp, 1020($sp)
+ ; GP32: sw $23, 1016($sp)
+ ;
+ ; GP32: move $fp, $sp
+ ; GP32: addiu $[[T0:[0-9]+|gp]], $zero, -512
+ ; GP32-NEXT: and $sp, $sp, $[[T0]]
+ ; GP32-NEXT: move $23, $sp
+
+ ; body
+ ; GP32: addiu $[[T1:[0-9]+]], $zero, 222
+ ; GP32: sw $[[T1]], 508($23)
+
+ ; epilogue
+ ; GP32: move $sp, $fp
+ ; GP32: lw $23, 1016($sp)
+ ; GP32: lw $fp, 1020($sp)
+ ; GP32: addiu $sp, $sp, 1024
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ ret void
+}
+
+; N32/N64 ABIs
+define void @func_06(i32 %sz) {
+entry:
+; GP64-LABEL: func_06:
+
+ ; prologue
+ ; FIXME: We are currently over-allocating stack space.
+ ; N32: addiu $sp, $sp, -1024
+ ; N64: daddiu $sp, $sp, -1024
+ ; GP64: sd $fp, 1016($sp)
+ ; GP64: sd $23, 1008($sp)
+ ;
+ ; GP64: move $fp, $sp
+ ; GP64: addiu $[[T0:[0-9]+|gp]], $zero, -512
+ ; GP64-NEXT: and $sp, $sp, $[[T0]]
+ ; GP64-NEXT: move $23, $sp
+
+ ; body
+ ; GP64: addiu $[[T1:[0-9]+]], $zero, 222
+ ; GP64: sw $[[T1]], 508($23)
+
+ ; epilogue
+ ; GP64: move $sp, $fp
+ ; GP64: ld $23, 1008($sp)
+ ; GP64: ld $fp, 1016($sp)
+ ; N32: addiu $sp, $sp, 1024
+ ; N64: daddiu $sp, $sp, 1024
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ ret void
+}
+
+; Verify that we use $fp for referencing incoming arguments and $sp for
+; building outbound arguments for nested function calls.
+
+; O32 ABI
+define void @func_07(i32 %p0, i32 %p1, i32 %p2, i32 %p3, i32 %sz) {
+entry:
+; GP32-LABEL: func_07:
+
+ ; body
+ ; FIXME: We are currently over-allocating stack space.
+ ; GP32-DAG: lw $[[T0:[0-9]+]], 1040($fp)
+ ;
+ ; GP32-DAG: addiu $[[T1:[0-9]+]], $zero, 222
+ ; GP32-DAG: sw $[[T1]], 508($23)
+ ;
+ ; GP32-DAG: sw $[[T2:[0-9]+]], 16($sp)
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a1)
+
+ ret void
+}
+
+; N32/N64 ABIs
+define void @func_08(i32 %p0, i32 %p1, i32 %p2, i32 %p3,
+ i32 %p4, i32 %p5, i32 %p6, i32 %p7,
+ i32 %sz) {
+entry:
+; GP64-LABEL: func_08:
+
+ ; body
+ ; FIXME: We are currently over-allocating stack space.
+ ; N32-DAG: lw $[[T0:[0-9]+]], 1028($fp)
+ ; N64-DAG: lwu $[[T0:[0-9]+]], 1028($fp)
+ ;
+ ; GP64-DAG: addiu $[[T1:[0-9]+]], $zero, 222
+ ; GP64-DAG: sw $[[T1]], 508($23)
+ ;
+ ; GP64-DAG: sd $[[T2:[0-9]+]], 0($sp)
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ call void @helper_02(i32 0, i32 0, i32 0, i32 0,
+ i32 0, i32 0, i32 0, i32 0, i32* %a1)
+ ret void
+}
+
+; Check that we do not perform dynamic stack realignment in the presence of
+; the "no-realign-stack" function attribute.
+define void @func_09() "no-realign-stack" {
+entry:
+; ALL-LABEL: func_09:
+
+ ; ALL-NOT: and $sp, $sp, $[[T0:[0-9]+|ra|gp]]
+
+ %a = alloca i32, align 512
+ call void @helper_01(i32 0, i32 0, i32 0, i32 0, i32* %a)
+ ret void
+}
+
+define void @func_10(i32 %sz) "no-realign-stack" {
+entry:
+; ALL-LABEL: func_10:
+
+ ; ALL-NOT: and $sp, $sp, $[[T0:[0-9]+|ra|gp]]
+
+ %a0 = alloca i32, i32 %sz, align 512
+ %a1 = alloca i32, align 4
+
+ store volatile i32 111, i32* %a0, align 512
+ store volatile i32 222, i32* %a1, align 4
+
+ ret void
+}