Temporary: make R12 available in ARM mode if RegScavenger is being used.
authorEvan Cheng <evan.cheng@apple.com>
Wed, 28 Feb 2007 00:22:44 +0000 (00:22 +0000)
committerEvan Cheng <evan.cheng@apple.com>
Wed, 28 Feb 2007 00:22:44 +0000 (00:22 +0000)
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@34709 91177308-0d34-0410-b5e6-96231b3b80d8

lib/Target/ARM/ARMRegisterInfo.td

index 2f51144e473a890a1da402395fa53fe11a277180..625e3d3afb53a4e00be7c52ebfa4cdf1c3dcd804 100644 (file)
@@ -123,6 +123,32 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
       ARM::R4, ARM::R5, ARM::R6, ARM::R8,
       ARM::R10,ARM::R11,
       ARM::LR, ARM::R7 };
+
+    // FP is R11, R9 is available, R12 is available.
+    static const unsigned ARM_GPR_AO_5[] = {
+      ARM::R3, ARM::R2, ARM::R1, ARM::R0,
+      ARM::R4, ARM::R5, ARM::R6, ARM::R7,
+      ARM::R8, ARM::R9, ARM::R10,ARM::R12,
+      ARM::LR, ARM::R11 };
+    // FP is R11, R9 is not available, R12 is available.
+    static const unsigned ARM_GPR_AO_6[] = {
+      ARM::R3, ARM::R2, ARM::R1, ARM::R0,
+      ARM::R4, ARM::R5, ARM::R6, ARM::R7,
+      ARM::R8, ARM::R10,ARM::R12,
+      ARM::LR, ARM::R11 };
+    // FP is R7, R9 is available, R12 is available.
+    static const unsigned ARM_GPR_AO_7[] = {
+      ARM::R3, ARM::R2, ARM::R1, ARM::R0,
+      ARM::R4, ARM::R5, ARM::R6, ARM::R8,
+      ARM::R9, ARM::R10,ARM::R11,ARM::R12,
+      ARM::LR, ARM::R7 };
+    // FP is R7, R9 is not available, R12 is available.
+    static const unsigned ARM_GPR_AO_8[] = {
+      ARM::R3, ARM::R2, ARM::R1, ARM::R0,
+      ARM::R4, ARM::R5, ARM::R6, ARM::R8,
+      ARM::R10,ARM::R11,ARM::R12,
+      ARM::LR, ARM::R7 };
+
     // FP is R7, only low registers available.
     static const unsigned THUMB_GPR_AO[] = {
       ARM::R2, ARM::R1, ARM::R0,
@@ -131,19 +157,20 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
     GPRClass::iterator
     GPRClass::allocation_order_begin(const MachineFunction &MF) const {
       const TargetMachine &TM = MF.getTarget();
+      const MRegisterInfo *RI = TM.getRegisterInfo();
       const ARMSubtarget &Subtarget = TM.getSubtarget<ARMSubtarget>();
       if (Subtarget.isThumb())
         return THUMB_GPR_AO;
       if (Subtarget.useThumbBacktraces()) {
         if (Subtarget.isR9Reserved())
-          return ARM_GPR_AO_4;
+          return RI->requiresRegisterScavenging() ? ARM_GPR_AO_8 : ARM_GPR_AO_4;
         else
-          return ARM_GPR_AO_3;
+          return RI->requiresRegisterScavenging() ? ARM_GPR_AO_7 : ARM_GPR_AO_3;
       } else {
         if (Subtarget.isR9Reserved())
-          return ARM_GPR_AO_2;
+          return RI->requiresRegisterScavenging() ? ARM_GPR_AO_6 : ARM_GPR_AO_2;
         else
-          return ARM_GPR_AO_1;
+          return RI->requiresRegisterScavenging() ? ARM_GPR_AO_5 : ARM_GPR_AO_1;
       }
     }
 
@@ -156,15 +183,29 @@ def GPR : RegisterClass<"ARM", [i32], 32, [R0, R1, R2, R3, R4, R5, R6,
       if (Subtarget.isThumb())
         I = THUMB_GPR_AO + (sizeof(THUMB_GPR_AO)/sizeof(unsigned));
       else if (Subtarget.useThumbBacktraces()) {
-        if (Subtarget.isR9Reserved())
-          I = ARM_GPR_AO_4 + (sizeof(ARM_GPR_AO_4)/sizeof(unsigned));
-        else
-          I = ARM_GPR_AO_3 + (sizeof(ARM_GPR_AO_3)/sizeof(unsigned));
+        if (Subtarget.isR9Reserved()) {
+          if (RI->requiresRegisterScavenging())
+            I = ARM_GPR_AO_8 + (sizeof(ARM_GPR_AO_8)/sizeof(unsigned));
+          else
+            I = ARM_GPR_AO_4 + (sizeof(ARM_GPR_AO_4)/sizeof(unsigned));
+        } else {
+          if (RI->requiresRegisterScavenging())
+            I = ARM_GPR_AO_7 + (sizeof(ARM_GPR_AO_7)/sizeof(unsigned));
+          else
+            I = ARM_GPR_AO_3 + (sizeof(ARM_GPR_AO_3)/sizeof(unsigned));
+        }
       } else {
-        if (Subtarget.isR9Reserved())
-          I = ARM_GPR_AO_2 + (sizeof(ARM_GPR_AO_2)/sizeof(unsigned));
-        else
-          I = ARM_GPR_AO_1 + (sizeof(ARM_GPR_AO_1)/sizeof(unsigned));
+        if (Subtarget.isR9Reserved()) {
+          if (RI->requiresRegisterScavenging())
+            I = ARM_GPR_AO_6 + (sizeof(ARM_GPR_AO_6)/sizeof(unsigned));
+          else
+            I = ARM_GPR_AO_2 + (sizeof(ARM_GPR_AO_2)/sizeof(unsigned));
+        } else {
+          if (RI->requiresRegisterScavenging())
+            I = ARM_GPR_AO_5 + (sizeof(ARM_GPR_AO_5)/sizeof(unsigned));
+          else
+            I = ARM_GPR_AO_1 + (sizeof(ARM_GPR_AO_1)/sizeof(unsigned));
+        }
       }
 
       // Mac OS X requires FP not to be clobbered for backtracing purpose.