//
//===----------------------------------------------------------------------===//
+#include "llvm/CodeGen/VirtRegMap.h"
#include "llvm/CodeGen/CalcSpillWeights.h"
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
void llvm::calculateSpillWeightsAndHints(LiveIntervals &LIS,
MachineFunction &MF,
+ VirtRegMap *VRM,
const MachineLoopInfo &MLI,
const MachineBlockFrequencyInfo &MBFI,
VirtRegAuxInfo::NormalizingFn norm) {
<< "********** Function: " << MF.getName() << '\n');
MachineRegisterInfo &MRI = MF.getRegInfo();
- VirtRegAuxInfo VRAI(MF, LIS, MLI, MBFI, norm);
+ VirtRegAuxInfo VRAI(MF, LIS, VRM, MLI, MBFI, norm);
for (unsigned i = 0, e = MRI.getNumVirtRegs(); i != e; ++i) {
unsigned Reg = TargetRegisterInfo::index2VirtReg(i);
if (MRI.reg_nodbg_empty(Reg))
// Check if all values in LI are rematerializable
static bool isRematerializable(const LiveInterval &LI,
const LiveIntervals &LIS,
+ VirtRegMap *VRM,
const TargetInstrInfo &TII) {
+ unsigned Reg = LI.reg;
+ unsigned Original = VRM ? VRM->getOriginal(Reg) : 0;
for (LiveInterval::const_vni_iterator I = LI.vni_begin(), E = LI.vni_end();
I != E; ++I) {
const VNInfo *VNI = *I;
MachineInstr *MI = LIS.getInstructionFromIndex(VNI->def);
assert(MI && "Dead valno in interval");
+ // Trace copies introduced by live range splitting. The inline
+ // spiller can rematerialize through these copies, so the spill
+ // weight must reflect this.
+ if (VRM) {
+ while (MI->isFullCopy()) {
+ // The copy destination must match the interval register.
+ if (MI->getOperand(0).getReg() != Reg)
+ return false;
+
+ // Get the source register.
+ Reg = MI->getOperand(1).getReg();
+
+ // If the original (pre-splitting) registers match this
+ // copy came from a split.
+ if (!TargetRegisterInfo::isVirtualRegister(Reg) ||
+ VRM->getOriginal(Reg) != Original)
+ return false;
+
+ // Follow the copy live-in value.
+ const LiveInterval &SrcLI = LIS.getInterval(Reg);
+ LiveQueryResult SrcQ = SrcLI.Query(VNI->def);
+ VNI = SrcQ.valueIn();
+ assert(VNI && "Copy from non-existing value");
+ if (VNI->isPHIDef())
+ return false;
+ MI = LIS.getInstructionFromIndex(VNI->def);
+ assert(MI && "Dead valno in interval");
+ }
+ }
+
if (!TII.isTriviallyReMaterializable(MI, LIS.getAliasAnalysis()))
return false;
}
// it is a preferred candidate for spilling.
// FIXME: this gets much more complicated once we support non-trivial
// re-materialization.
- if (isRematerializable(li, LIS, *MF.getSubtarget().getInstrInfo()))
+ if (isRematerializable(li, LIS, VRM, *MF.getSubtarget().getInstrInfo()))
totalWeight *= 0.5F;
li.weight = normalize(totalWeight, li.getSize(), numInstr);
--- /dev/null
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s\r
+\r
+; Check that we do not get excessive spilling from splitting of constant live ranges.\r
+\r
+; CHECK-LABEL: PR24139:\r
+; CHECK: # 16-byte Spill\r
+; CHECK-NOT: # 16-byte Spill\r
+; CHECK: retq\r
+\r
+define <2 x double> @PR24139(<2 x double> %arg, <2 x double> %arg1, <2 x double> %arg2) {\r
+ %tmp = bitcast <2 x double> %arg to <4 x float>\r
+ %tmp3 = fmul <4 x float> %tmp, <float 0x3FE45F3060000000, float 0x3FE45F3060000000, float 0x3FE45F3060000000, float 0x3FE45F3060000000>\r
+ %tmp4 = bitcast <2 x double> %arg to <4 x i32>\r
+ %tmp5 = and <4 x i32> %tmp4, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>\r
+ %tmp6 = or <4 x i32> %tmp5, <i32 1056964608, i32 1056964608, i32 1056964608, i32 1056964608>\r
+ %tmp7 = bitcast <4 x i32> %tmp6 to <4 x float>\r
+ %tmp8 = fadd <4 x float> %tmp3, %tmp7\r
+ %tmp9 = tail call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %tmp8) #2\r
+ %tmp10 = bitcast <4 x i32> %tmp9 to <2 x i64>\r
+ %tmp11 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp9) #2\r
+ %tmp12 = fmul <4 x float> %tmp11, <float 0x3FF921FB40000000, float 0x3FF921FB40000000, float 0x3FF921FB40000000, float 0x3FF921FB40000000>\r
+ %tmp13 = fsub <4 x float> %tmp, %tmp12\r
+ %tmp14 = fmul <4 x float> %tmp11, <float 0x3E74442D00000000, float 0x3E74442D00000000, float 0x3E74442D00000000, float 0x3E74442D00000000>\r
+ %tmp15 = fsub <4 x float> %tmp13, %tmp14\r
+ %tmp16 = fmul <4 x float> %tmp15, %tmp15\r
+ %tmp17 = fmul <4 x float> %tmp15, %tmp16\r
+ %tmp18 = fmul <4 x float> %tmp16, <float 0xBF56493260000000, float 0xBF56493260000000, float 0xBF56493260000000, float 0xBF56493260000000>\r
+ %tmp19 = fadd <4 x float> %tmp18, <float 0x3FA55406C0000000, float 0x3FA55406C0000000, float 0x3FA55406C0000000, float 0x3FA55406C0000000>\r
+ %tmp20 = fmul <4 x float> %tmp16, <float 0xBF29918DC0000000, float 0xBF29918DC0000000, float 0xBF29918DC0000000, float 0xBF29918DC0000000>\r
+ %tmp21 = fadd <4 x float> %tmp20, <float 0x3F81106840000000, float 0x3F81106840000000, float 0x3F81106840000000, float 0x3F81106840000000>\r
+ %tmp22 = fmul <4 x float> %tmp16, %tmp19\r
+ %tmp23 = fadd <4 x float> %tmp22, <float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000>\r
+ %tmp24 = fmul <4 x float> %tmp16, %tmp21\r
+ %tmp25 = fadd <4 x float> %tmp24, <float 0xBFC5555420000000, float 0xBFC5555420000000, float 0xBFC5555420000000, float 0xBFC5555420000000>\r
+ %tmp26 = fmul <4 x float> %tmp16, %tmp23\r
+ %tmp27 = fadd <4 x float> %tmp26, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>\r
+ %tmp28 = fmul <4 x float> %tmp17, %tmp25\r
+ %tmp29 = fadd <4 x float> %tmp15, %tmp28\r
+ %tmp30 = and <2 x i64> %tmp10, <i64 4294967297, i64 4294967297>\r
+ %tmp31 = bitcast <2 x i64> %tmp30 to <4 x i32>\r
+ %tmp32 = icmp eq <4 x i32> %tmp31, zeroinitializer\r
+ %tmp33 = sext <4 x i1> %tmp32 to <4 x i32>\r
+ %tmp34 = bitcast <4 x i32> %tmp33 to <4 x float>\r
+ %tmp35 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %tmp27, <4 x float> %tmp29, <4 x float> %tmp34) #2\r
+ %tmp36 = and <2 x i64> %tmp10, <i64 8589934594, i64 8589934594>\r
+ %tmp37 = bitcast <2 x i64> %tmp36 to <4 x i32>\r
+ %tmp38 = icmp eq <4 x i32> %tmp37, zeroinitializer\r
+ %tmp39 = sext <4 x i1> %tmp38 to <4 x i32>\r
+ %tmp40 = bitcast <4 x float> %tmp35 to <4 x i32>\r
+ %tmp41 = xor <4 x i32> %tmp40, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>\r
+ %tmp42 = bitcast <4 x i32> %tmp41 to <4 x float>\r
+ %tmp43 = bitcast <4 x i32> %tmp39 to <4 x float>\r
+ %tmp44 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %tmp42, <4 x float> %tmp35, <4 x float> %tmp43) #2\r
+ %tmp45 = bitcast <2 x double> %arg1 to <4 x float>\r
+ %tmp46 = fmul <4 x float> %tmp45, <float 0x3FE45F3060000000, float 0x3FE45F3060000000, float 0x3FE45F3060000000, float 0x3FE45F3060000000>\r
+ %tmp47 = bitcast <2 x double> %arg1 to <4 x i32>\r
+ %tmp48 = and <4 x i32> %tmp47, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>\r
+ %tmp49 = or <4 x i32> %tmp48, <i32 1056964608, i32 1056964608, i32 1056964608, i32 1056964608>\r
+ %tmp50 = bitcast <4 x i32> %tmp49 to <4 x float>\r
+ %tmp51 = fadd <4 x float> %tmp46, %tmp50\r
+ %tmp52 = tail call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %tmp51) #2\r
+ %tmp53 = bitcast <4 x i32> %tmp52 to <2 x i64>\r
+ %tmp54 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp52) #2\r
+ %tmp55 = fmul <4 x float> %tmp54, <float 0x3FF921FB40000000, float 0x3FF921FB40000000, float 0x3FF921FB40000000, float 0x3FF921FB40000000>\r
+ %tmp56 = fsub <4 x float> %tmp45, %tmp55\r
+ %tmp57 = fmul <4 x float> %tmp54, <float 0x3E74442D00000000, float 0x3E74442D00000000, float 0x3E74442D00000000, float 0x3E74442D00000000>\r
+ %tmp58 = fsub <4 x float> %tmp56, %tmp57\r
+ %tmp59 = fmul <4 x float> %tmp58, %tmp58\r
+ %tmp60 = fmul <4 x float> %tmp58, %tmp59\r
+ %tmp61 = fmul <4 x float> %tmp59, <float 0xBF56493260000000, float 0xBF56493260000000, float 0xBF56493260000000, float 0xBF56493260000000>\r
+ %tmp62 = fadd <4 x float> %tmp61, <float 0x3FA55406C0000000, float 0x3FA55406C0000000, float 0x3FA55406C0000000, float 0x3FA55406C0000000>\r
+ %tmp63 = fmul <4 x float> %tmp59, <float 0xBF29918DC0000000, float 0xBF29918DC0000000, float 0xBF29918DC0000000, float 0xBF29918DC0000000>\r
+ %tmp64 = fadd <4 x float> %tmp63, <float 0x3F81106840000000, float 0x3F81106840000000, float 0x3F81106840000000, float 0x3F81106840000000>\r
+ %tmp65 = fmul <4 x float> %tmp59, %tmp62\r
+ %tmp66 = fadd <4 x float> %tmp65, <float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000>\r
+ %tmp67 = fmul <4 x float> %tmp59, %tmp64\r
+ %tmp68 = fadd <4 x float> %tmp67, <float 0xBFC5555420000000, float 0xBFC5555420000000, float 0xBFC5555420000000, float 0xBFC5555420000000>\r
+ %tmp69 = fmul <4 x float> %tmp59, %tmp66\r
+ %tmp70 = fadd <4 x float> %tmp69, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>\r
+ %tmp71 = fmul <4 x float> %tmp60, %tmp68\r
+ %tmp72 = fadd <4 x float> %tmp58, %tmp71\r
+ %tmp73 = and <2 x i64> %tmp53, <i64 4294967297, i64 4294967297>\r
+ %tmp74 = bitcast <2 x i64> %tmp73 to <4 x i32>\r
+ %tmp75 = icmp eq <4 x i32> %tmp74, zeroinitializer\r
+ %tmp76 = sext <4 x i1> %tmp75 to <4 x i32>\r
+ %tmp77 = bitcast <4 x i32> %tmp76 to <4 x float>\r
+ %tmp78 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %tmp70, <4 x float> %tmp72, <4 x float> %tmp77) #2\r
+ %tmp79 = and <2 x i64> %tmp53, <i64 8589934594, i64 8589934594>\r
+ %tmp80 = bitcast <2 x i64> %tmp79 to <4 x i32>\r
+ %tmp81 = icmp eq <4 x i32> %tmp80, zeroinitializer\r
+ %tmp82 = sext <4 x i1> %tmp81 to <4 x i32>\r
+ %tmp83 = bitcast <4 x float> %tmp78 to <4 x i32>\r
+ %tmp84 = xor <4 x i32> %tmp83, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>\r
+ %tmp85 = bitcast <4 x i32> %tmp84 to <4 x float>\r
+ %tmp86 = bitcast <4 x i32> %tmp82 to <4 x float>\r
+ %tmp87 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %tmp85, <4 x float> %tmp78, <4 x float> %tmp86) #2\r
+ %tmp88 = fadd <4 x float> %tmp44, %tmp87\r
+ %tmp89 = bitcast <2 x double> %arg2 to <4 x float>\r
+ %tmp90 = fmul <4 x float> %tmp89, <float 0x3FE45F3060000000, float 0x3FE45F3060000000, float 0x3FE45F3060000000, float 0x3FE45F3060000000>\r
+ %tmp91 = bitcast <2 x double> %arg2 to <4 x i32>\r
+ %tmp92 = and <4 x i32> %tmp91, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>\r
+ %tmp93 = or <4 x i32> %tmp92, <i32 1056964608, i32 1056964608, i32 1056964608, i32 1056964608>\r
+ %tmp94 = bitcast <4 x i32> %tmp93 to <4 x float>\r
+ %tmp95 = fadd <4 x float> %tmp90, %tmp94\r
+ %tmp96 = tail call <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float> %tmp95) #2\r
+ %tmp97 = bitcast <4 x i32> %tmp96 to <2 x i64>\r
+ %tmp98 = tail call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %tmp96) #2\r
+ %tmp99 = fmul <4 x float> %tmp98, <float 0x3FF921FB40000000, float 0x3FF921FB40000000, float 0x3FF921FB40000000, float 0x3FF921FB40000000>\r
+ %tmp100 = fsub <4 x float> %tmp89, %tmp99\r
+ %tmp101 = fmul <4 x float> %tmp98, <float 0x3E74442D00000000, float 0x3E74442D00000000, float 0x3E74442D00000000, float 0x3E74442D00000000>\r
+ %tmp102 = fsub <4 x float> %tmp100, %tmp101\r
+ %tmp103 = fmul <4 x float> %tmp102, %tmp102\r
+ %tmp104 = fmul <4 x float> %tmp102, %tmp103\r
+ %tmp105 = fmul <4 x float> %tmp103, <float 0xBF56493260000000, float 0xBF56493260000000, float 0xBF56493260000000, float 0xBF56493260000000>\r
+ %tmp106 = fadd <4 x float> %tmp105, <float 0x3FA55406C0000000, float 0x3FA55406C0000000, float 0x3FA55406C0000000, float 0x3FA55406C0000000>\r
+ %tmp107 = fmul <4 x float> %tmp103, <float 0xBF29918DC0000000, float 0xBF29918DC0000000, float 0xBF29918DC0000000, float 0xBF29918DC0000000>\r
+ %tmp108 = fadd <4 x float> %tmp107, <float 0x3F81106840000000, float 0x3F81106840000000, float 0x3F81106840000000, float 0x3F81106840000000>\r
+ %tmp109 = fmul <4 x float> %tmp103, %tmp106\r
+ %tmp110 = fadd <4 x float> %tmp109, <float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000, float 0xBFDFFFFBE0000000>\r
+ %tmp111 = fmul <4 x float> %tmp103, %tmp108\r
+ %tmp112 = fadd <4 x float> %tmp111, <float 0xBFC5555420000000, float 0xBFC5555420000000, float 0xBFC5555420000000, float 0xBFC5555420000000>\r
+ %tmp113 = fmul <4 x float> %tmp103, %tmp110\r
+ %tmp114 = fadd <4 x float> %tmp113, <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>\r
+ %tmp115 = fmul <4 x float> %tmp104, %tmp112\r
+ %tmp116 = fadd <4 x float> %tmp102, %tmp115\r
+ %tmp117 = and <2 x i64> %tmp97, <i64 4294967297, i64 4294967297>\r
+ %tmp118 = bitcast <2 x i64> %tmp117 to <4 x i32>\r
+ %tmp119 = icmp eq <4 x i32> %tmp118, zeroinitializer\r
+ %tmp120 = sext <4 x i1> %tmp119 to <4 x i32>\r
+ %tmp121 = bitcast <4 x i32> %tmp120 to <4 x float>\r
+ %tmp122 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %tmp114, <4 x float> %tmp116, <4 x float> %tmp121) #2\r
+ %tmp123 = and <2 x i64> %tmp97, <i64 8589934594, i64 8589934594>\r
+ %tmp124 = bitcast <2 x i64> %tmp123 to <4 x i32>\r
+ %tmp125 = icmp eq <4 x i32> %tmp124, zeroinitializer\r
+ %tmp126 = sext <4 x i1> %tmp125 to <4 x i32>\r
+ %tmp127 = bitcast <4 x float> %tmp122 to <4 x i32>\r
+ %tmp128 = xor <4 x i32> %tmp127, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648>\r
+ %tmp129 = bitcast <4 x i32> %tmp128 to <4 x float>\r
+ %tmp130 = bitcast <4 x i32> %tmp126 to <4 x float>\r
+ %tmp131 = tail call <4 x float> @llvm.x86.sse41.blendvps(<4 x float> %tmp129, <4 x float> %tmp122, <4 x float> %tmp130) #2\r
+ %tmp132 = fadd <4 x float> %tmp88, %tmp131\r
+ %tmp133 = bitcast <4 x float> %tmp132 to <2 x double>\r
+ ret <2 x double> %tmp133\r
+}\r
+\r
+declare <4 x i32> @llvm.x86.sse2.cvttps2dq(<4 x float>)\r
+declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>)\r
+declare <4 x float> @llvm.x86.sse41.blendvps(<4 x float>, <4 x float>, <4 x float>)\r