From: Richard Sandiford Date: Wed, 3 Jul 2013 10:10:02 +0000 (+0000) Subject: [SystemZ] Fold more spills X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=fa487e83a83c260d6a50f3df00a0eb012553a912;p=oota-llvm.git [SystemZ] Fold more spills Add a mapping from register-based R instructions to the corresponding memory-based . Use it to cut down on the number of spill loads. Some instructions extend their operands from smaller fields, so this required a new TSFlags field to say how big the unextended operand is. This optimisation doesn't trigger for C(G)R and CL(G)R because in practice we always combine those instructions with a branch. Adding a test for every other case probably seems excessive, but it did catch a missed optimisation for DSGF (fixed in r185435). git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@185529 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/SystemZ/SystemZInstrFP.td b/lib/Target/SystemZ/SystemZInstrFP.td index 7499d2fb8d9..4317306999b 100644 --- a/lib/Target/SystemZ/SystemZInstrFP.td +++ b/lib/Target/SystemZ/SystemZInstrFP.td @@ -27,26 +27,26 @@ defm CondStoreF64 : CondStores; - def LZDR : InherentRRE<"lzdr", 0xB375, FP64, (fpimm0)>; - def LZXR : InherentRRE<"lzxr", 0xB376, FP128, (fpimm0)>; + def LZER : InherentRRE<"lze", 0xB374, FP32, (fpimm0)>; + def LZDR : InherentRRE<"lzd", 0xB375, FP64, (fpimm0)>; + def LZXR : InherentRRE<"lzx", 0xB376, FP128, (fpimm0)>; } // Moves between two floating-point registers. let neverHasSideEffects = 1 in { - def LER : UnaryRR <"ler", 0x38, null_frag, FP32, FP32>; - def LDR : UnaryRR <"ldr", 0x28, null_frag, FP64, FP64>; - def LXR : UnaryRRE<"lxr", 0xB365, null_frag, FP128, FP128>; + def LER : UnaryRR <"le", 0x38, null_frag, FP32, FP32>; + def LDR : UnaryRR <"ld", 0x28, null_frag, FP64, FP64>; + def LXR : UnaryRRE<"lx", 0xB365, null_frag, FP128, FP128>; } // Moves between 64-bit integer and floating-point registers. -def LGDR : UnaryRRE<"lgdr", 0xB3CD, bitconvert, GR64, FP64>; -def LDGR : UnaryRRE<"ldgr", 0xB3C1, bitconvert, FP64, GR64>; +def LGDR : UnaryRRE<"lgd", 0xB3CD, bitconvert, GR64, FP64>; +def LDGR : UnaryRRE<"ldg", 0xB3C1, bitconvert, FP64, GR64>; // fcopysign with an FP32 result. let isCodeGenOnly = 1 in { - def CPSDRss : BinaryRRF<"cpsdr", 0xB372, fcopysign, FP32, FP32>; - def CPSDRsd : BinaryRRF<"cpsdr", 0xB372, fcopysign, FP32, FP64>; + def CPSDRss : BinaryRRF<"cpsd", 0xB372, fcopysign, FP32, FP32>; + def CPSDRsd : BinaryRRF<"cpsd", 0xB372, fcopysign, FP32, FP64>; } // The sign of an FP128 is in the high register. @@ -55,8 +55,8 @@ def : Pat<(fcopysign FP32:$src1, FP128:$src2), // fcopysign with an FP64 result. let isCodeGenOnly = 1 in - def CPSDRds : BinaryRRF<"cpsdr", 0xB372, fcopysign, FP64, FP32>; -def CPSDRdd : BinaryRRF<"cpsdr", 0xB372, fcopysign, FP64, FP64>; + def CPSDRds : BinaryRRF<"cpsd", 0xB372, fcopysign, FP64, FP32>; +def CPSDRdd : BinaryRRF<"cpsd", 0xB372, fcopysign, FP64, FP64>; // The sign of an FP128 is in the high register. def : Pat<(fcopysign FP64:$src1, FP128:$src2), @@ -80,8 +80,8 @@ def : CopySign128; - defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64>; + defm LE : UnaryRXPair<"le", 0x78, 0xED64, load, FP32, 4>; + defm LD : UnaryRXPair<"ld", 0x68, 0xED65, load, FP64, 8>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -96,8 +96,8 @@ let canFoldAsLoad = 1, SimpleBDXLoad = 1 in { //===----------------------------------------------------------------------===// let SimpleBDXStore = 1 in { - defm STE : StoreRXPair<"ste", 0x70, 0xED66, store, FP32>; - defm STD : StoreRXPair<"std", 0x60, 0xED67, store, FP64>; + defm STE : StoreRXPair<"ste", 0x70, 0xED66, store, FP32, 4>; + defm STD : StoreRXPair<"std", 0x60, 0xED67, store, FP64, 8>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -114,9 +114,9 @@ let SimpleBDXStore = 1 in { // Convert floating-point values to narrower representations, rounding // according to the current mode. The destination of LEXBR and LDXBR // is a 128-bit value, but only the first register of the pair is used. -def LEDBR : UnaryRRE<"ledbr", 0xB344, fround, FP32, FP64>; -def LEXBR : UnaryRRE<"lexbr", 0xB346, null_frag, FP128, FP128>; -def LDXBR : UnaryRRE<"ldxbr", 0xB345, null_frag, FP128, FP128>; +def LEDBR : UnaryRRE<"ledb", 0xB344, fround, FP32, FP64>; +def LEXBR : UnaryRRE<"lexb", 0xB346, null_frag, FP128, FP128>; +def LDXBR : UnaryRRE<"ldxb", 0xB345, null_frag, FP128, FP128>; def : Pat<(f32 (fround FP128:$src)), (EXTRACT_SUBREG (LEXBR FP128:$src), subreg_32bit)>; @@ -124,36 +124,36 @@ def : Pat<(f64 (fround FP128:$src)), (EXTRACT_SUBREG (LDXBR FP128:$src), subreg_high)>; // Extend register floating-point values to wider representations. -def LDEBR : UnaryRRE<"ldebr", 0xB304, fextend, FP64, FP32>; -def LXEBR : UnaryRRE<"lxebr", 0xB306, fextend, FP128, FP32>; -def LXDBR : UnaryRRE<"lxdbr", 0xB305, fextend, FP128, FP64>; +def LDEBR : UnaryRRE<"ldeb", 0xB304, fextend, FP64, FP32>; +def LXEBR : UnaryRRE<"lxeb", 0xB306, fextend, FP128, FP32>; +def LXDBR : UnaryRRE<"lxdb", 0xB305, fextend, FP128, FP64>; // Extend memory floating-point values to wider representations. -def LDEB : UnaryRXE<"ldeb", 0xED04, extloadf32, FP64>; -def LXEB : UnaryRXE<"lxeb", 0xED06, extloadf32, FP128>; -def LXDB : UnaryRXE<"lxdb", 0xED05, extloadf64, FP128>; +def LDEB : UnaryRXE<"ldeb", 0xED04, extloadf32, FP64, 4>; +def LXEB : UnaryRXE<"lxeb", 0xED06, extloadf32, FP128, 4>; +def LXDB : UnaryRXE<"lxdb", 0xED05, extloadf64, FP128, 8>; // Convert a signed integer register value to a floating-point one. let Defs = [CC] in { - def CEFBR : UnaryRRE<"cefbr", 0xB394, sint_to_fp, FP32, GR32>; - def CDFBR : UnaryRRE<"cdfbr", 0xB395, sint_to_fp, FP64, GR32>; - def CXFBR : UnaryRRE<"cxfbr", 0xB396, sint_to_fp, FP128, GR32>; + def CEFBR : UnaryRRE<"cefb", 0xB394, sint_to_fp, FP32, GR32>; + def CDFBR : UnaryRRE<"cdfb", 0xB395, sint_to_fp, FP64, GR32>; + def CXFBR : UnaryRRE<"cxfb", 0xB396, sint_to_fp, FP128, GR32>; - def CEGBR : UnaryRRE<"cegbr", 0xB3A4, sint_to_fp, FP32, GR64>; - def CDGBR : UnaryRRE<"cdgbr", 0xB3A5, sint_to_fp, FP64, GR64>; - def CXGBR : UnaryRRE<"cxgbr", 0xB3A6, sint_to_fp, FP128, GR64>; + def CEGBR : UnaryRRE<"cegb", 0xB3A4, sint_to_fp, FP32, GR64>; + def CDGBR : UnaryRRE<"cdgb", 0xB3A5, sint_to_fp, FP64, GR64>; + def CXGBR : UnaryRRE<"cxgb", 0xB3A6, sint_to_fp, FP128, GR64>; } // Convert a floating-point register value to a signed integer value, // with the second operand (modifier M3) specifying the rounding mode. let Defs = [CC] in { - def CFEBR : UnaryRRF<"cfebr", 0xB398, GR32, FP32>; - def CFDBR : UnaryRRF<"cfdbr", 0xB399, GR32, FP64>; - def CFXBR : UnaryRRF<"cfxbr", 0xB39A, GR32, FP128>; + def CFEBR : UnaryRRF<"cfeb", 0xB398, GR32, FP32>; + def CFDBR : UnaryRRF<"cfdb", 0xB399, GR32, FP64>; + def CFXBR : UnaryRRF<"cfxb", 0xB39A, GR32, FP128>; - def CGEBR : UnaryRRF<"cgebr", 0xB3A8, GR64, FP32>; - def CGDBR : UnaryRRF<"cgdbr", 0xB3A9, GR64, FP64>; - def CGXBR : UnaryRRF<"cgxbr", 0xB3AA, GR64, FP128>; + def CGEBR : UnaryRRF<"cgeb", 0xB3A8, GR64, FP32>; + def CGDBR : UnaryRRF<"cgdb", 0xB3A9, GR64, FP64>; + def CGXBR : UnaryRRF<"cgxb", 0xB3AA, GR64, FP128>; } // fp_to_sint always rounds towards zero, which is modifier value 5. @@ -171,32 +171,32 @@ def : Pat<(i64 (fp_to_sint FP128:$src)), (CGXBR 5, FP128:$src)>; // Negation (Load Complement). let Defs = [CC] in { - def LCEBR : UnaryRRE<"lcebr", 0xB303, fneg, FP32, FP32>; - def LCDBR : UnaryRRE<"lcdbr", 0xB313, fneg, FP64, FP64>; - def LCXBR : UnaryRRE<"lcxbr", 0xB343, fneg, FP128, FP128>; + def LCEBR : UnaryRRE<"lceb", 0xB303, fneg, FP32, FP32>; + def LCDBR : UnaryRRE<"lcdb", 0xB313, fneg, FP64, FP64>; + def LCXBR : UnaryRRE<"lcxb", 0xB343, fneg, FP128, FP128>; } // Absolute value (Load Positive). let Defs = [CC] in { - def LPEBR : UnaryRRE<"lpebr", 0xB300, fabs, FP32, FP32>; - def LPDBR : UnaryRRE<"lpdbr", 0xB310, fabs, FP64, FP64>; - def LPXBR : UnaryRRE<"lpxbr", 0xB340, fabs, FP128, FP128>; + def LPEBR : UnaryRRE<"lpeb", 0xB300, fabs, FP32, FP32>; + def LPDBR : UnaryRRE<"lpdb", 0xB310, fabs, FP64, FP64>; + def LPXBR : UnaryRRE<"lpxb", 0xB340, fabs, FP128, FP128>; } // Negative absolute value (Load Negative). let Defs = [CC] in { - def LNEBR : UnaryRRE<"lnebr", 0xB301, fnabs, FP32, FP32>; - def LNDBR : UnaryRRE<"lndbr", 0xB311, fnabs, FP64, FP64>; - def LNXBR : UnaryRRE<"lnxbr", 0xB341, fnabs, FP128, FP128>; + def LNEBR : UnaryRRE<"lneb", 0xB301, fnabs, FP32, FP32>; + def LNDBR : UnaryRRE<"lndb", 0xB311, fnabs, FP64, FP64>; + def LNXBR : UnaryRRE<"lnxb", 0xB341, fnabs, FP128, FP128>; } // Square root. -def SQEBR : UnaryRRE<"sqebr", 0xB314, fsqrt, FP32, FP32>; -def SQDBR : UnaryRRE<"sqdbr", 0xB315, fsqrt, FP64, FP64>; -def SQXBR : UnaryRRE<"sqxbr", 0xB316, fsqrt, FP128, FP128>; +def SQEBR : UnaryRRE<"sqeb", 0xB314, fsqrt, FP32, FP32>; +def SQDBR : UnaryRRE<"sqdb", 0xB315, fsqrt, FP64, FP64>; +def SQXBR : UnaryRRE<"sqxb", 0xB316, fsqrt, FP128, FP128>; -def SQEB : UnaryRXE<"sqeb", 0xED14, loadu, FP32>; -def SQDB : UnaryRXE<"sqdb", 0xED15, loadu, FP64>; +def SQEB : UnaryRXE<"sqeb", 0xED14, loadu, FP32, 4>; +def SQDB : UnaryRXE<"sqdb", 0xED15, loadu, FP64, 8>; // Round to an integer, with the second operand (modifier M3) specifying // the rounding mode. @@ -205,9 +205,9 @@ def SQDB : UnaryRXE<"sqdb", 0xED15, loadu, FP64>; // that allow this to suppressed (as for fnearbyint), but we don't yet // support -march=z196. let Defs = [CC] in { - def FIEBR : UnaryRRF<"fiebr", 0xB357, FP32, FP32>; - def FIDBR : UnaryRRF<"fidbr", 0xB35F, FP64, FP64>; - def FIXBR : UnaryRRF<"fixbr", 0xB347, FP128, FP128>; + def FIEBR : UnaryRRF<"fieb", 0xB357, FP32, FP32>; + def FIDBR : UnaryRRF<"fidb", 0xB35F, FP64, FP64>; + def FIXBR : UnaryRRF<"fixb", 0xB347, FP128, FP128>; } // frint rounds according to the current mode (modifier 0) and detects @@ -223,92 +223,92 @@ def : Pat<(frint FP128:$src), (FIXBR 0, FP128:$src)>; // Addition. let Defs = [CC] in { let isCommutable = 1 in { - def AEBR : BinaryRRE<"aebr", 0xB30A, fadd, FP32, FP32>; - def ADBR : BinaryRRE<"adbr", 0xB31A, fadd, FP64, FP64>; - def AXBR : BinaryRRE<"axbr", 0xB34A, fadd, FP128, FP128>; + def AEBR : BinaryRRE<"aeb", 0xB30A, fadd, FP32, FP32>; + def ADBR : BinaryRRE<"adb", 0xB31A, fadd, FP64, FP64>; + def AXBR : BinaryRRE<"axb", 0xB34A, fadd, FP128, FP128>; } - def AEB : BinaryRXE<"aeb", 0xED0A, fadd, FP32, load>; - def ADB : BinaryRXE<"adb", 0xED1A, fadd, FP64, load>; + def AEB : BinaryRXE<"aeb", 0xED0A, fadd, FP32, load, 4>; + def ADB : BinaryRXE<"adb", 0xED1A, fadd, FP64, load, 8>; } // Subtraction. let Defs = [CC] in { - def SEBR : BinaryRRE<"sebr", 0xB30B, fsub, FP32, FP32>; - def SDBR : BinaryRRE<"sdbr", 0xB31B, fsub, FP64, FP64>; - def SXBR : BinaryRRE<"sxbr", 0xB34B, fsub, FP128, FP128>; + def SEBR : BinaryRRE<"seb", 0xB30B, fsub, FP32, FP32>; + def SDBR : BinaryRRE<"sdb", 0xB31B, fsub, FP64, FP64>; + def SXBR : BinaryRRE<"sxb", 0xB34B, fsub, FP128, FP128>; - def SEB : BinaryRXE<"seb", 0xED0B, fsub, FP32, load>; - def SDB : BinaryRXE<"sdb", 0xED1B, fsub, FP64, load>; + def SEB : BinaryRXE<"seb", 0xED0B, fsub, FP32, load, 4>; + def SDB : BinaryRXE<"sdb", 0xED1B, fsub, FP64, load, 8>; } // Multiplication. let isCommutable = 1 in { - def MEEBR : BinaryRRE<"meebr", 0xB317, fmul, FP32, FP32>; - def MDBR : BinaryRRE<"mdbr", 0xB31C, fmul, FP64, FP64>; - def MXBR : BinaryRRE<"mxbr", 0xB34C, fmul, FP128, FP128>; + def MEEBR : BinaryRRE<"meeb", 0xB317, fmul, FP32, FP32>; + def MDBR : BinaryRRE<"mdb", 0xB31C, fmul, FP64, FP64>; + def MXBR : BinaryRRE<"mxb", 0xB34C, fmul, FP128, FP128>; } -def MEEB : BinaryRXE<"meeb", 0xED17, fmul, FP32, load>; -def MDB : BinaryRXE<"mdb", 0xED1C, fmul, FP64, load>; +def MEEB : BinaryRXE<"meeb", 0xED17, fmul, FP32, load, 4>; +def MDB : BinaryRXE<"mdb", 0xED1C, fmul, FP64, load, 8>; // f64 multiplication of two FP32 registers. -def MDEBR : BinaryRRE<"mdebr", 0xB30C, null_frag, FP64, FP32>; +def MDEBR : BinaryRRE<"mdeb", 0xB30C, null_frag, FP64, FP32>; def : Pat<(fmul (f64 (fextend FP32:$src1)), (f64 (fextend FP32:$src2))), (MDEBR (INSERT_SUBREG (f64 (IMPLICIT_DEF)), FP32:$src1, subreg_32bit), FP32:$src2)>; // f64 multiplication of an FP32 register and an f32 memory. -def MDEB : BinaryRXE<"mdeb", 0xED0C, null_frag, FP64, load>; +def MDEB : BinaryRXE<"mdeb", 0xED0C, null_frag, FP64, load, 4>; def : Pat<(fmul (f64 (fextend FP32:$src1)), (f64 (extloadf32 bdxaddr12only:$addr))), (MDEB (INSERT_SUBREG (f64 (IMPLICIT_DEF)), FP32:$src1, subreg_32bit), bdxaddr12only:$addr)>; // f128 multiplication of two FP64 registers. -def MXDBR : BinaryRRE<"mxdbr", 0xB307, null_frag, FP128, FP64>; +def MXDBR : BinaryRRE<"mxdb", 0xB307, null_frag, FP128, FP64>; def : Pat<(fmul (f128 (fextend FP64:$src1)), (f128 (fextend FP64:$src2))), (MXDBR (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_high), FP64:$src2)>; // f128 multiplication of an FP64 register and an f64 memory. -def MXDB : BinaryRXE<"mxdb", 0xED07, null_frag, FP128, load>; +def MXDB : BinaryRXE<"mxdb", 0xED07, null_frag, FP128, load, 8>; def : Pat<(fmul (f128 (fextend FP64:$src1)), (f128 (extloadf64 bdxaddr12only:$addr))), (MXDB (INSERT_SUBREG (f128 (IMPLICIT_DEF)), FP64:$src1, subreg_high), bdxaddr12only:$addr)>; // Fused multiply-add. -def MAEBR : TernaryRRD<"maebr", 0xB30E, z_fma, FP32>; -def MADBR : TernaryRRD<"madbr", 0xB31E, z_fma, FP64>; +def MAEBR : TernaryRRD<"maeb", 0xB30E, z_fma, FP32>; +def MADBR : TernaryRRD<"madb", 0xB31E, z_fma, FP64>; -def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, load>; -def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, load>; +def MAEB : TernaryRXF<"maeb", 0xED0E, z_fma, FP32, load, 4>; +def MADB : TernaryRXF<"madb", 0xED1E, z_fma, FP64, load, 8>; // Fused multiply-subtract. -def MSEBR : TernaryRRD<"msebr", 0xB30F, z_fms, FP32>; -def MSDBR : TernaryRRD<"msdbr", 0xB31F, z_fms, FP64>; +def MSEBR : TernaryRRD<"mseb", 0xB30F, z_fms, FP32>; +def MSDBR : TernaryRRD<"msdb", 0xB31F, z_fms, FP64>; -def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, load>; -def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, load>; +def MSEB : TernaryRXF<"mseb", 0xED0F, z_fms, FP32, load, 4>; +def MSDB : TernaryRXF<"msdb", 0xED1F, z_fms, FP64, load, 8>; // Division. -def DEBR : BinaryRRE<"debr", 0xB30D, fdiv, FP32, FP32>; -def DDBR : BinaryRRE<"ddbr", 0xB31D, fdiv, FP64, FP64>; -def DXBR : BinaryRRE<"dxbr", 0xB34D, fdiv, FP128, FP128>; +def DEBR : BinaryRRE<"deb", 0xB30D, fdiv, FP32, FP32>; +def DDBR : BinaryRRE<"ddb", 0xB31D, fdiv, FP64, FP64>; +def DXBR : BinaryRRE<"dxb", 0xB34D, fdiv, FP128, FP128>; -def DEB : BinaryRXE<"deb", 0xED0D, fdiv, FP32, load>; -def DDB : BinaryRXE<"ddb", 0xED1D, fdiv, FP64, load>; +def DEB : BinaryRXE<"deb", 0xED0D, fdiv, FP32, load, 4>; +def DDB : BinaryRXE<"ddb", 0xED1D, fdiv, FP64, load, 8>; //===----------------------------------------------------------------------===// // Comparisons //===----------------------------------------------------------------------===// let Defs = [CC] in { - def CEBR : CompareRRE<"cebr", 0xB309, z_cmp, FP32, FP32>; - def CDBR : CompareRRE<"cdbr", 0xB319, z_cmp, FP64, FP64>; - def CXBR : CompareRRE<"cxbr", 0xB349, z_cmp, FP128, FP128>; + def CEBR : CompareRRE<"ceb", 0xB309, z_cmp, FP32, FP32>; + def CDBR : CompareRRE<"cdb", 0xB319, z_cmp, FP64, FP64>; + def CXBR : CompareRRE<"cxb", 0xB349, z_cmp, FP128, FP128>; - def CEB : CompareRXE<"ceb", 0xED09, z_cmp, FP32, load>; - def CDB : CompareRXE<"cdb", 0xED19, z_cmp, FP64, load>; + def CEB : CompareRXE<"ceb", 0xED09, z_cmp, FP32, load, 4>; + def CDB : CompareRXE<"cdb", 0xED19, z_cmp, FP64, load, 8>; } //===----------------------------------------------------------------------===// diff --git a/lib/Target/SystemZ/SystemZInstrFormats.td b/lib/Target/SystemZ/SystemZInstrFormats.td index d720feefd3b..fb530cc5332 100644 --- a/lib/Target/SystemZ/SystemZInstrFormats.td +++ b/lib/Target/SystemZ/SystemZInstrFormats.td @@ -28,6 +28,12 @@ class InstSystemZR instructions have a memory-based + // counterpart. OpKey uniquely identifies , while OpType is + // "reg" for R and "mem" for . + string OpKey = ""; + string OpType = "none"; + // True if this instruction is a simple D(X,B) load of a register // (with no sign or zero extension). bit SimpleBDXLoad = 0; @@ -46,11 +52,15 @@ class InstSystemZ AccessBytes = 0; + let TSFlags{0} = SimpleBDXLoad; let TSFlags{1} = SimpleBDXStore; let TSFlags{2} = Has20BitOffset; let TSFlags{3} = HasIndex; let TSFlags{4} = Is128Bit; + let TSFlags{9-5} = AccessBytes; } //===----------------------------------------------------------------------===// @@ -76,6 +86,14 @@ def getDisp20Opcode : InstrMapping { let ValueCols = [["20"]]; } +def getMemOpcode : InstrMapping { + let FilterClass = "InstSystemZ"; + let RowFields = ["OpKey"]; + let ColFields = ["OpType"]; + let KeyCol = ["reg"]; + let ValueCols = [["mem"]]; +} + //===----------------------------------------------------------------------===// // Instruction formats //===----------------------------------------------------------------------===// @@ -468,7 +486,7 @@ class InstSS op, dag outs, dag ins, string asmstr, list pattern> class InherentRRE opcode, RegisterOperand cls, dag src> : InstRRE { let R2 = 0; } @@ -492,28 +510,38 @@ class StoreRILPC opcode, SDPatternOperator operator, } class StoreRX opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode = bdxaddr12only> + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdxaddr12only> : InstRX { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayStore = 1; + let AccessBytes = bytes; } class StoreRXY opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode = bdxaddr20only> + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdxaddr20only> : InstRXY { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayStore = 1; + let AccessBytes = bytes; } multiclass StoreRXPair rxOpcode, bits<16> rxyOpcode, - SDPatternOperator operator, RegisterOperand cls> { + SDPatternOperator operator, RegisterOperand cls, + bits<5> bytes> { let DispKey = mnemonic ## #cls in { let DispSize = "12" in - def "" : StoreRX; + def "" : StoreRX; let DispSize = "20" in - def Y : StoreRXY; + def Y : StoreRXY; } } @@ -560,19 +588,28 @@ multiclass StoreSIPair siOpcode, bits<16> siyOpcode, class UnaryRR opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRR; + mnemonic#"r\t$R1, $R2", + [(set cls1:$R1, (operator cls2:$R2))]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} class UnaryRRE opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRRE; + mnemonic#"r\t$R1, $R2", + [(set cls1:$R1, (operator cls2:$R2))]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} class UnaryRRF opcode, RegisterOperand cls1, RegisterOperand cls2> : InstRRF; + mnemonic#"r\t$R1, $R3, $R2", []> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} class UnaryRI opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> @@ -599,44 +636,59 @@ class UnaryRILPC opcode, SDPatternOperator operator, } class UnaryRX opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode = bdxaddr12only> + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdxaddr12only> : InstRX { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } class UnaryRXE opcode, SDPatternOperator operator, - RegisterOperand cls> + RegisterOperand cls, bits<5> bytes> : InstRXE { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } class UnaryRXY opcode, SDPatternOperator operator, - RegisterOperand cls, AddressingMode mode = bdxaddr20only> + RegisterOperand cls, bits<5> bytes, + AddressingMode mode = bdxaddr20only> : InstRXY { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } multiclass UnaryRXPair rxOpcode, bits<16> rxyOpcode, - SDPatternOperator operator, RegisterOperand cls> { + SDPatternOperator operator, RegisterOperand cls, + bits<5> bytes> { let DispKey = mnemonic ## #cls in { let DispSize = "12" in - def "" : UnaryRX; + def "" : UnaryRX; let DispSize = "20" in - def Y : UnaryRXY; + def Y : UnaryRXY; } } class BinaryRR opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRR { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; } @@ -644,8 +696,10 @@ class BinaryRR opcode, SDPatternOperator operator, class BinaryRRE opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRRE { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; } @@ -653,8 +707,11 @@ class BinaryRRE opcode, SDPatternOperator operator, class BinaryRRF opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRRF; + mnemonic#"r\t$R1, $R3, $R2", + [(set cls1:$R1, (operator cls1:$R3, cls2:$R2))]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} class BinaryRI opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> @@ -675,46 +732,56 @@ class BinaryRIL opcode, SDPatternOperator operator, } class BinaryRX opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, + RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr12only> : InstRX { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; let mayLoad = 1; + let AccessBytes = bytes; } class BinaryRXE opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load> + RegisterOperand cls, SDPatternOperator load, bits<5> bytes> : InstRXE { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; let mayLoad = 1; + let AccessBytes = bytes; } class BinaryRXY opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, + RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr20only> : InstRXY { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; let mayLoad = 1; + let AccessBytes = bytes; } multiclass BinaryRXPair rxOpcode, bits<16> rxyOpcode, SDPatternOperator operator, RegisterOperand cls, - SDPatternOperator load> { + SDPatternOperator load, bits<5> bytes> { let DispKey = mnemonic ## #cls in { let DispSize = "12" in - def "" : BinaryRX; + def "" : BinaryRX; let DispSize = "20" in - def Y : BinaryRXY; } } @@ -767,14 +834,20 @@ class ShiftRSY opcode, SDPatternOperator operator, class CompareRR opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRR; + mnemonic#"r\t$R1, $R2", + [(operator cls1:$R1, cls2:$R2)]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} class CompareRRE opcode, SDPatternOperator operator, RegisterOperand cls1, RegisterOperand cls2> : InstRRE; + mnemonic#"r\t$R1, $R2", + [(operator cls1:$R1, cls2:$R2)]> { + let OpKey = mnemonic ## cls1; + let OpType = "reg"; +} class CompareRI opcode, SDPatternOperator operator, RegisterOperand cls, Immediate imm> @@ -801,41 +874,50 @@ class CompareRILPC opcode, SDPatternOperator operator, } class CompareRX opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, + RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr12only> : InstRX { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } class CompareRXE opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load> + RegisterOperand cls, SDPatternOperator load, bits<5> bytes> : InstRXE { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } class CompareRXY opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load, + RegisterOperand cls, SDPatternOperator load, bits<5> bytes, AddressingMode mode = bdxaddr20only> : InstRXY { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let mayLoad = 1; + let AccessBytes = bytes; } multiclass CompareRXPair rxOpcode, bits<16> rxyOpcode, SDPatternOperator operator, RegisterOperand cls, - SDPatternOperator load> { + SDPatternOperator load, bits<5> bytes> { let DispKey = mnemonic ## #cls in { let DispSize = "12" in def "" : CompareRX; + load, bytes, bdxaddr12pair>; let DispSize = "20" in def Y : CompareRXY; + load, bytes, bdxaddr20pair>; } } @@ -880,22 +962,27 @@ multiclass CompareSIPair siOpcode, bits<16> siyOpcode, class TernaryRRD opcode, SDPatternOperator operator, RegisterOperand cls> : InstRRD { + let OpKey = mnemonic ## cls; + let OpType = "reg"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; } class TernaryRXF opcode, SDPatternOperator operator, - RegisterOperand cls, SDPatternOperator load> + RegisterOperand cls, SDPatternOperator load, bits<5> bytes> : InstRXF { + let OpKey = mnemonic ## cls; + let OpType = "mem"; let Constraints = "$R1 = $R1src"; let DisableEncoding = "$R1src"; let mayLoad = 1; + let AccessBytes = bytes; } class CmpSwapRS opcode, SDPatternOperator operator, diff --git a/lib/Target/SystemZ/SystemZInstrInfo.cpp b/lib/Target/SystemZ/SystemZInstrInfo.cpp index e9829d59a68..16207b33b67 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.cpp +++ b/lib/Target/SystemZ/SystemZInstrInfo.cpp @@ -397,6 +397,30 @@ SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, } } + // If the spilled operand is the final one, try to change R + // into . + int MemOpcode = SystemZ::getMemOpcode(MI->getOpcode()); + if (MemOpcode >= 0) { + unsigned NumOps = MI->getNumExplicitOperands(); + if (OpNum == NumOps - 1) { + const MCInstrDesc &MemDesc = get(MemOpcode); + uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags); + assert(AccessBytes != 0 && "Size of access should be known"); + assert(AccessBytes <= Size && "Access outside the frame index"); + uint64_t Offset = Size - AccessBytes; + MachineMemOperand *FrameMMO = getFrameMMO(MF, FrameIndex, Offset, + MachineMemOperand::MOLoad); + MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); + for (unsigned I = 0; I < OpNum; ++I) + MIB.addOperand(MI->getOperand(I)); + MIB.addFrameIndex(FrameIndex).addImm(Offset); + if (MemDesc.TSFlags & SystemZII::HasIndex) + MIB.addReg(0); + MIB.addMemOperand(FrameMMO); + return MIB; + } + } + return 0; } diff --git a/lib/Target/SystemZ/SystemZInstrInfo.h b/lib/Target/SystemZ/SystemZInstrInfo.h index 8d9a3eaacff..11d486cafba 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.h +++ b/lib/Target/SystemZ/SystemZInstrInfo.h @@ -32,8 +32,14 @@ namespace SystemZII { SimpleBDXStore = (1 << 1), Has20BitOffset = (1 << 2), HasIndex = (1 << 3), - Is128Bit = (1 << 4) + Is128Bit = (1 << 4), + AccessSizeMask = (31 << 5), + AccessSizeShift = 5 }; + static inline unsigned getAccessSize(unsigned int Flags) { + return (Flags & AccessSizeMask) >> AccessSizeShift; + } + // SystemZ MachineOperand target flags. enum { // Masks out the bits for the access model. diff --git a/lib/Target/SystemZ/SystemZInstrInfo.td b/lib/Target/SystemZ/SystemZInstrInfo.td index 44b28fd0b60..6b74220a6ef 100644 --- a/lib/Target/SystemZ/SystemZInstrInfo.td +++ b/lib/Target/SystemZ/SystemZInstrInfo.td @@ -217,8 +217,8 @@ def AsmBASR : InstRR<0x0D, (outs), (ins GR64:$R1, ADDR64:$R2), // Register moves. let neverHasSideEffects = 1 in { - def LR : UnaryRR <"lr", 0x18, null_frag, GR32, GR32>; - def LGR : UnaryRRE<"lgr", 0xB904, null_frag, GR64, GR64>; + def LR : UnaryRR <"l", 0x18, null_frag, GR32, GR32>; + def LGR : UnaryRRE<"lg", 0xB904, null_frag, GR64, GR64>; } // Immediate moves. @@ -242,8 +242,8 @@ let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1, // Register loads. let canFoldAsLoad = 1, SimpleBDXLoad = 1 in { - defm L : UnaryRXPair<"l", 0x58, 0xE358, load, GR32>; - def LG : UnaryRXY<"lg", 0xE304, load, GR64>; + defm L : UnaryRXPair<"l", 0x58, 0xE358, load, GR32, 4>; + def LG : UnaryRXY<"lg", 0xE304, load, GR64, 8>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -260,8 +260,8 @@ let canFoldAsLoad = 1 in { // Register stores. let SimpleBDXStore = 1 in { let isCodeGenOnly = 1 in - defm ST32 : StoreRXPair<"st", 0x50, 0xE350, store, GR32>; - def STG : StoreRXY<"stg", 0xE324, store, GR64>; + defm ST32 : StoreRXPair<"st", 0x50, 0xE350, store, GR32, 4>; + def STG : StoreRXY<"stg", 0xE324, store, GR64, 8>; // These instructions are split after register allocation, so we don't // want a custom inserter. @@ -294,15 +294,15 @@ let mayLoad = 1, mayStore = 1 in // 32-bit extensions from registers. let neverHasSideEffects = 1 in { - def LBR : UnaryRRE<"lbr", 0xB926, sext8, GR32, GR32>; - def LHR : UnaryRRE<"lhr", 0xB927, sext16, GR32, GR32>; + def LBR : UnaryRRE<"lb", 0xB926, sext8, GR32, GR32>; + def LHR : UnaryRRE<"lh", 0xB927, sext16, GR32, GR32>; } // 64-bit extensions from registers. let neverHasSideEffects = 1 in { - def LGBR : UnaryRRE<"lgbr", 0xB906, sext8, GR64, GR64>; - def LGHR : UnaryRRE<"lghr", 0xB907, sext16, GR64, GR64>; - def LGFR : UnaryRRE<"lgfr", 0xB914, sext32, GR64, GR32>; + def LGBR : UnaryRRE<"lgb", 0xB906, sext8, GR64, GR64>; + def LGHR : UnaryRRE<"lgh", 0xB907, sext16, GR64, GR64>; + def LGFR : UnaryRRE<"lgf", 0xB914, sext32, GR64, GR32>; } // Match 32-to-64-bit sign extensions in which the source is already @@ -311,14 +311,14 @@ def : Pat<(sext_inreg GR64:$src, i32), (LGFR (EXTRACT_SUBREG GR64:$src, subreg_32bit))>; // 32-bit extensions from memory. -def LB : UnaryRXY<"lb", 0xE376, sextloadi8, GR32>; -defm LH : UnaryRXPair<"lh", 0x48, 0xE378, sextloadi16, GR32>; +def LB : UnaryRXY<"lb", 0xE376, sextloadi8, GR32, 1>; +defm LH : UnaryRXPair<"lh", 0x48, 0xE378, sextloadi16, GR32, 2>; def LHRL : UnaryRILPC<"lhrl", 0xC45, aligned_sextloadi16, GR32>; // 64-bit extensions from memory. -def LGB : UnaryRXY<"lgb", 0xE377, sextloadi8, GR64>; -def LGH : UnaryRXY<"lgh", 0xE315, sextloadi16, GR64>; -def LGF : UnaryRXY<"lgf", 0xE314, sextloadi32, GR64>; +def LGB : UnaryRXY<"lgb", 0xE377, sextloadi8, GR64, 1>; +def LGH : UnaryRXY<"lgh", 0xE315, sextloadi16, GR64, 2>; +def LGF : UnaryRXY<"lgf", 0xE314, sextloadi32, GR64, 4>; def LGHRL : UnaryRILPC<"lghrl", 0xC44, aligned_sextloadi16, GR64>; def LGFRL : UnaryRILPC<"lgfrl", 0xC4C, aligned_sextloadi32, GR64>; @@ -339,15 +339,15 @@ def : Pat<(i64 (extloadi32 bdxaddr20only:$src)), (LGF bdxaddr20only:$src)>; // 32-bit extensions from registers. let neverHasSideEffects = 1 in { - def LLCR : UnaryRRE<"llcr", 0xB994, zext8, GR32, GR32>; - def LLHR : UnaryRRE<"llhr", 0xB995, zext16, GR32, GR32>; + def LLCR : UnaryRRE<"llc", 0xB994, zext8, GR32, GR32>; + def LLHR : UnaryRRE<"llh", 0xB995, zext16, GR32, GR32>; } // 64-bit extensions from registers. let neverHasSideEffects = 1 in { - def LLGCR : UnaryRRE<"llgcr", 0xB984, zext8, GR64, GR64>; - def LLGHR : UnaryRRE<"llghr", 0xB985, zext16, GR64, GR64>; - def LLGFR : UnaryRRE<"llgfr", 0xB916, zext32, GR64, GR32>; + def LLGCR : UnaryRRE<"llgc", 0xB984, zext8, GR64, GR64>; + def LLGHR : UnaryRRE<"llgh", 0xB985, zext16, GR64, GR64>; + def LLGFR : UnaryRRE<"llgf", 0xB916, zext32, GR64, GR32>; } // Match 32-to-64-bit zero extensions in which the source is already @@ -356,14 +356,14 @@ def : Pat<(and GR64:$src, 0xffffffff), (LLGFR (EXTRACT_SUBREG GR64:$src, subreg_32bit))>; // 32-bit extensions from memory. -def LLC : UnaryRXY<"llc", 0xE394, zextloadi8, GR32>; -def LLH : UnaryRXY<"llh", 0xE395, zextloadi16, GR32>; +def LLC : UnaryRXY<"llc", 0xE394, zextloadi8, GR32, 1>; +def LLH : UnaryRXY<"llh", 0xE395, zextloadi16, GR32, 2>; def LLHRL : UnaryRILPC<"llhrl", 0xC42, aligned_zextloadi16, GR32>; // 64-bit extensions from memory. -def LLGC : UnaryRXY<"llgc", 0xE390, zextloadi8, GR64>; -def LLGH : UnaryRXY<"llgh", 0xE391, zextloadi16, GR64>; -def LLGF : UnaryRXY<"llgf", 0xE316, zextloadi32, GR64>; +def LLGC : UnaryRXY<"llgc", 0xE390, zextloadi8, GR64, 1>; +def LLGH : UnaryRXY<"llgh", 0xE391, zextloadi16, GR64, 2>; +def LLGF : UnaryRXY<"llgf", 0xE316, zextloadi32, GR64, 4>; def LLGHRL : UnaryRILPC<"llghrl", 0xC46, aligned_zextloadi16, GR64>; def LLGFRL : UnaryRILPC<"llgfrl", 0xC4E, aligned_zextloadi32, GR64>; @@ -377,16 +377,16 @@ def : Pat<(i32 (trunc GR64:$src)), // Truncations of 32-bit registers to memory. let isCodeGenOnly = 1 in { - defm STC32 : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR32>; - defm STH32 : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR32>; + defm STC32 : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR32, 1>; + defm STH32 : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR32, 2>; def STHRL32 : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR32>; } // Truncations of 64-bit registers to memory. -defm STC : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR64>; -defm STH : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR64>; +defm STC : StoreRXPair<"stc", 0x42, 0xE372, truncstorei8, GR64, 1>; +defm STH : StoreRXPair<"sth", 0x40, 0xE370, truncstorei16, GR64, 2>; def STHRL : StoreRILPC<"sthrl", 0xC47, aligned_truncstorei16, GR64>; -defm ST : StoreRXPair<"st", 0x50, 0xE350, truncstorei32, GR64>; +defm ST : StoreRXPair<"st", 0x50, 0xE350, truncstorei32, GR64, 4>; def STRL : StoreRILPC<"strl", 0xC4F, aligned_truncstorei32, GR64>; //===----------------------------------------------------------------------===// @@ -405,18 +405,19 @@ def STMG : StoreMultipleRSY<"stmg", 0xEB24, GR64>; // Byte-swapping register moves. let neverHasSideEffects = 1 in { - def LRVR : UnaryRRE<"lrvr", 0xB91F, bswap, GR32, GR32>; - def LRVGR : UnaryRRE<"lrvgr", 0xB90F, bswap, GR64, GR64>; + def LRVR : UnaryRRE<"lrv", 0xB91F, bswap, GR32, GR32>; + def LRVGR : UnaryRRE<"lrvg", 0xB90F, bswap, GR64, GR64>; } // Byte-swapping loads. Unlike normal loads, these instructions are // allowed to access storage more than once. -def LRV : UnaryRXY<"lrv", 0xE31E, loadu, GR32>; -def LRVG : UnaryRXY<"lrvg", 0xE30F, loadu, GR64>; +def LRV : UnaryRXY<"lrv", 0xE31E, loadu, GR32, 4>; +def LRVG : UnaryRXY<"lrvg", 0xE30F, loadu, GR64, 8>; // Likewise byte-swapping stores. -def STRV : StoreRXY<"strv", 0xE33E, storeu, GR32>; -def STRVG : StoreRXY<"strvg", 0xE32F, storeu, GR64>; +def STRV : StoreRXY<"strv", 0xE33E, storeu, GR32, 4>; +def STRVG : StoreRXY<"strvg", 0xE32F, storeu, + GR64, 8>; //===----------------------------------------------------------------------===// // Load address instructions @@ -449,9 +450,9 @@ let neverHasSideEffects = 1, isAsCheapAsAMove = 1, isMoveImm = 1, //===----------------------------------------------------------------------===// let Defs = [CC] in { - def LCR : UnaryRR <"lcr", 0x13, ineg, GR32, GR32>; - def LCGR : UnaryRRE<"lcgr", 0xB903, ineg, GR64, GR64>; - def LCGFR : UnaryRRE<"lcgfr", 0xB913, null_frag, GR64, GR32>; + def LCR : UnaryRR <"lc", 0x13, ineg, GR32, GR32>; + def LCGR : UnaryRRE<"lcg", 0xB903, ineg, GR64, GR64>; + def LCGFR : UnaryRRE<"lcgf", 0xB913, null_frag, GR64, GR32>; } defm : SXU; @@ -460,8 +461,8 @@ defm : SXU; //===----------------------------------------------------------------------===// let isCodeGenOnly = 1 in - defm IC32 : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR32, zextloadi8>; -defm IC : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR64, zextloadi8>; + defm IC32 : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR32, zextloadi8, 1>; +defm IC : BinaryRXPair<"ic", 0x43, 0xE373, inserti8, GR64, zextloadi8, 1>; defm : InsertMem<"inserti8", IC32, GR32, zextloadi8, bdxaddr12pair>; defm : InsertMem<"inserti8", IC32Y, GR32, zextloadi8, bdxaddr20pair>; @@ -506,10 +507,10 @@ def : Pat<(or (zext32 GR32:$src), imm64hf32:$imm), let Defs = [CC] in { // Addition of a register. let isCommutable = 1 in { - def AR : BinaryRR <"ar", 0x1A, add, GR32, GR32>; - def AGR : BinaryRRE<"agr", 0xB908, add, GR64, GR64>; + def AR : BinaryRR <"a", 0x1A, add, GR32, GR32>; + def AGR : BinaryRRE<"ag", 0xB908, add, GR64, GR64>; } - def AGFR : BinaryRRE<"agfr", 0xB918, null_frag, GR64, GR32>; + def AGFR : BinaryRRE<"agf", 0xB918, null_frag, GR64, GR32>; // Addition of signed 16-bit immediates. def AHI : BinaryRI<"ahi", 0xA7A, add, GR32, imm32sx16>; @@ -520,10 +521,10 @@ let Defs = [CC] in { def AGFI : BinaryRIL<"agfi", 0xC28, add, GR64, imm64sx32>; // Addition of memory. - defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, sextloadi16>; - defm A : BinaryRXPair<"a", 0x5A, 0xE35A, add, GR32, load>; - def AGF : BinaryRXY<"agf", 0xE318, add, GR64, sextloadi32>; - def AG : BinaryRXY<"ag", 0xE308, add, GR64, load>; + defm AH : BinaryRXPair<"ah", 0x4A, 0xE37A, add, GR32, sextloadi16, 2>; + defm A : BinaryRXPair<"a", 0x5A, 0xE35A, add, GR32, load, 4>; + def AGF : BinaryRXY<"agf", 0xE318, add, GR64, sextloadi32, 4>; + def AG : BinaryRXY<"ag", 0xE308, add, GR64, load, 8>; // Addition to memory. def ASI : BinarySIY<"asi", 0xEB6A, add, imm32sx8>; @@ -535,31 +536,31 @@ defm : SXB; let Defs = [CC] in { // Addition of a register. let isCommutable = 1 in { - def ALR : BinaryRR <"alr", 0x1E, addc, GR32, GR32>; - def ALGR : BinaryRRE<"algr", 0xB90A, addc, GR64, GR64>; + def ALR : BinaryRR <"al", 0x1E, addc, GR32, GR32>; + def ALGR : BinaryRRE<"alg", 0xB90A, addc, GR64, GR64>; } - def ALGFR : BinaryRRE<"algfr", 0xB91A, null_frag, GR64, GR32>; + def ALGFR : BinaryRRE<"algf", 0xB91A, null_frag, GR64, GR32>; // Addition of unsigned 32-bit immediates. def ALFI : BinaryRIL<"alfi", 0xC2B, addc, GR32, uimm32>; def ALGFI : BinaryRIL<"algfi", 0xC2A, addc, GR64, imm64zx32>; // Addition of memory. - defm AL : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load>; - def ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, zextloadi32>; - def ALG : BinaryRXY<"alg", 0xE30A, addc, GR64, load>; + defm AL : BinaryRXPair<"al", 0x5E, 0xE35E, addc, GR32, load, 4>; + def ALGF : BinaryRXY<"algf", 0xE31A, addc, GR64, zextloadi32, 4>; + def ALG : BinaryRXY<"alg", 0xE30A, addc, GR64, load, 8>; } defm : ZXB; // Addition producing and using a carry. let Defs = [CC], Uses = [CC] in { // Addition of a register. - def ALCR : BinaryRRE<"alcr", 0xB998, adde, GR32, GR32>; - def ALCGR : BinaryRRE<"alcgr", 0xB988, adde, GR64, GR64>; + def ALCR : BinaryRRE<"alc", 0xB998, adde, GR32, GR32>; + def ALCGR : BinaryRRE<"alcg", 0xB988, adde, GR64, GR64>; // Addition of memory. - def ALC : BinaryRXY<"alc", 0xE398, adde, GR32, load>; - def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load>; + def ALC : BinaryRXY<"alc", 0xE398, adde, GR32, load, 4>; + def ALCG : BinaryRXY<"alcg", 0xE388, adde, GR64, load, 8>; } //===----------------------------------------------------------------------===// @@ -570,24 +571,24 @@ let Defs = [CC], Uses = [CC] in { // add-immediate instruction instead. let Defs = [CC] in { // Subtraction of a register. - def SR : BinaryRR <"sr", 0x1B, sub, GR32, GR32>; - def SGFR : BinaryRRE<"sgfr", 0xB919, null_frag, GR64, GR32>; - def SGR : BinaryRRE<"sgr", 0xB909, sub, GR64, GR64>; + def SR : BinaryRR <"s", 0x1B, sub, GR32, GR32>; + def SGFR : BinaryRRE<"sgf", 0xB919, null_frag, GR64, GR32>; + def SGR : BinaryRRE<"sg", 0xB909, sub, GR64, GR64>; // Subtraction of memory. - defm SH : BinaryRXPair<"sh", 0x4B, 0xE37B, sub, GR32, sextloadi16>; - defm S : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load>; - def SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, sextloadi32>; - def SG : BinaryRXY<"sg", 0xE309, sub, GR64, load>; + defm SH : BinaryRXPair<"sh", 0x4B, 0xE37B, sub, GR32, sextloadi16, 2>; + defm S : BinaryRXPair<"s", 0x5B, 0xE35B, sub, GR32, load, 4>; + def SGF : BinaryRXY<"sgf", 0xE319, sub, GR64, sextloadi32, 4>; + def SG : BinaryRXY<"sg", 0xE309, sub, GR64, load, 8>; } defm : SXB; // Subtraction producing a carry. let Defs = [CC] in { // Subtraction of a register. - def SLR : BinaryRR <"slr", 0x1F, subc, GR32, GR32>; - def SLGFR : BinaryRRE<"slgfr", 0xB91B, null_frag, GR64, GR32>; - def SLGR : BinaryRRE<"slgr", 0xB90B, subc, GR64, GR64>; + def SLR : BinaryRR <"sl", 0x1F, subc, GR32, GR32>; + def SLGFR : BinaryRRE<"slgf", 0xB91B, null_frag, GR64, GR32>; + def SLGR : BinaryRRE<"slg", 0xB90B, subc, GR64, GR64>; // Subtraction of unsigned 32-bit immediates. These don't match // subc because we prefer addc for constants. @@ -595,21 +596,21 @@ let Defs = [CC] in { def SLGFI : BinaryRIL<"slgfi", 0xC24, null_frag, GR64, imm64zx32>; // Subtraction of memory. - defm SL : BinaryRXPair<"sl", 0x5F, 0xE35F, subc, GR32, load>; - def SLGF : BinaryRXY<"slgf", 0xE31B, subc, GR64, zextloadi32>; - def SLG : BinaryRXY<"slg", 0xE30B, subc, GR64, load>; + defm SL : BinaryRXPair<"sl", 0x5F, 0xE35F, subc, GR32, load, 4>; + def SLGF : BinaryRXY<"slgf", 0xE31B, subc, GR64, zextloadi32, 4>; + def SLG : BinaryRXY<"slg", 0xE30B, subc, GR64, load, 8>; } defm : ZXB; // Subtraction producing and using a carry. let Defs = [CC], Uses = [CC] in { // Subtraction of a register. - def SLBR : BinaryRRE<"slbr", 0xB999, sube, GR32, GR32>; - def SLGBR : BinaryRRE<"slbgr", 0xB989, sube, GR64, GR64>; + def SLBR : BinaryRRE<"slb", 0xB999, sube, GR32, GR32>; + def SLGBR : BinaryRRE<"slbg", 0xB989, sube, GR64, GR64>; // Subtraction of memory. - def SLB : BinaryRXY<"slb", 0xE399, sube, GR32, load>; - def SLBG : BinaryRXY<"slbg", 0xE389, sube, GR64, load>; + def SLB : BinaryRXY<"slb", 0xE399, sube, GR32, load, 4>; + def SLBG : BinaryRXY<"slbg", 0xE389, sube, GR64, load, 8>; } //===----------------------------------------------------------------------===// @@ -619,8 +620,8 @@ let Defs = [CC], Uses = [CC] in { let Defs = [CC] in { // ANDs of a register. let isCommutable = 1 in { - def NR : BinaryRR <"nr", 0x14, and, GR32, GR32>; - def NGR : BinaryRRE<"ngr", 0xB980, and, GR64, GR64>; + def NR : BinaryRR <"n", 0x14, and, GR32, GR32>; + def NGR : BinaryRRE<"ng", 0xB980, and, GR64, GR64>; } // ANDs of a 16-bit immediate, leaving other bits unaffected. @@ -640,8 +641,8 @@ let Defs = [CC] in { def NIHF : BinaryRIL<"nihf", 0xC0A, and, GR64, imm64hf32c>; // ANDs of memory. - defm N : BinaryRXPair<"n", 0x54, 0xE354, and, GR32, load>; - def NG : BinaryRXY<"ng", 0xE380, and, GR64, load>; + defm N : BinaryRXPair<"n", 0x54, 0xE354, and, GR32, load, 4>; + def NG : BinaryRXY<"ng", 0xE380, and, GR64, load, 8>; // AND to memory defm NI : BinarySIPair<"ni", 0x94, 0xEB54, null_frag, uimm8>; @@ -656,8 +657,8 @@ defm : RMWIByte; let Defs = [CC] in { // ORs of a register. let isCommutable = 1 in { - def OR : BinaryRR <"or", 0x16, or, GR32, GR32>; - def OGR : BinaryRRE<"ogr", 0xB981, or, GR64, GR64>; + def OR : BinaryRR <"o", 0x16, or, GR32, GR32>; + def OGR : BinaryRRE<"og", 0xB981, or, GR64, GR64>; } // ORs of a 16-bit immediate, leaving other bits unaffected. @@ -677,8 +678,8 @@ let Defs = [CC] in { def OIHF : BinaryRIL<"oihf", 0xC0C, or, GR64, imm64hf32>; // ORs of memory. - defm O : BinaryRXPair<"o", 0x56, 0xE356, or, GR32, load>; - def OG : BinaryRXY<"og", 0xE381, or, GR64, load>; + defm O : BinaryRXPair<"o", 0x56, 0xE356, or, GR32, load, 4>; + def OG : BinaryRXY<"og", 0xE381, or, GR64, load, 8>; // OR to memory defm OI : BinarySIPair<"oi", 0x96, 0xEB56, null_frag, uimm8>; @@ -693,8 +694,8 @@ defm : RMWIByte; let Defs = [CC] in { // XORs of a register. let isCommutable = 1 in { - def XR : BinaryRR <"xr", 0x17, xor, GR32, GR32>; - def XGR : BinaryRRE<"xgr", 0xB982, xor, GR64, GR64>; + def XR : BinaryRR <"x", 0x17, xor, GR32, GR32>; + def XGR : BinaryRRE<"xg", 0xB982, xor, GR64, GR64>; } // XORs of a 32-bit immediate, leaving other bits unaffected. @@ -704,8 +705,8 @@ let Defs = [CC] in { def XIHF : BinaryRIL<"xihf", 0xC06, xor, GR64, imm64hf32>; // XORs of memory. - defm X : BinaryRXPair<"x",0x57, 0xE357, xor, GR32, load>; - def XG : BinaryRXY<"xg", 0xE382, xor, GR64, load>; + defm X : BinaryRXPair<"x",0x57, 0xE357, xor, GR32, load, 4>; + def XG : BinaryRXY<"xg", 0xE382, xor, GR64, load, 8>; // XOR to memory defm XI : BinarySIPair<"xi", 0x97, 0xEB57, null_frag, uimm8>; @@ -719,10 +720,10 @@ defm : RMWIByte; // Multiplication of a register. let isCommutable = 1 in { - def MSR : BinaryRRE<"msr", 0xB252, mul, GR32, GR32>; - def MSGR : BinaryRRE<"msgr", 0xB90C, mul, GR64, GR64>; + def MSR : BinaryRRE<"ms", 0xB252, mul, GR32, GR32>; + def MSGR : BinaryRRE<"msg", 0xB90C, mul, GR64, GR64>; } -def MSGFR : BinaryRRE<"msgfr", 0xB91C, null_frag, GR64, GR32>; +def MSGFR : BinaryRRE<"msgf", 0xB91C, null_frag, GR64, GR32>; defm : SXB; // Multiplication of a signed 16-bit immediate. @@ -734,32 +735,32 @@ def MSFI : BinaryRIL<"msfi", 0xC21, mul, GR32, simm32>; def MSGFI : BinaryRIL<"msgfi", 0xC20, mul, GR64, imm64sx32>; // Multiplication of memory. -defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, sextloadi16>; -defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load>; -def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, sextloadi32>; -def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load>; +defm MH : BinaryRXPair<"mh", 0x4C, 0xE37C, mul, GR32, sextloadi16, 2>; +defm MS : BinaryRXPair<"ms", 0x71, 0xE351, mul, GR32, load, 4>; +def MSGF : BinaryRXY<"msgf", 0xE31C, mul, GR64, sextloadi32, 4>; +def MSG : BinaryRXY<"msg", 0xE30C, mul, GR64, load, 8>; // Multiplication of a register, producing two results. -def MLGR : BinaryRRE<"mlgr", 0xB986, z_umul_lohi64, GR128, GR64>; +def MLGR : BinaryRRE<"mlg", 0xB986, z_umul_lohi64, GR128, GR64>; // Multiplication of memory, producing two results. -def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load>; +def MLG : BinaryRXY<"mlg", 0xE386, z_umul_lohi64, GR128, load, 8>; //===----------------------------------------------------------------------===// // Division and remainder //===----------------------------------------------------------------------===// // Division and remainder, from registers. -def DSGFR : BinaryRRE<"dsgfr", 0xB91D, z_sdivrem32, GR128, GR32>; -def DSGR : BinaryRRE<"dsgr", 0xB90D, z_sdivrem64, GR128, GR64>; -def DLR : BinaryRRE<"dlr", 0xB997, z_udivrem32, GR128, GR32>; -def DLGR : BinaryRRE<"dlgr", 0xB987, z_udivrem64, GR128, GR64>; +def DSGFR : BinaryRRE<"dsgf", 0xB91D, z_sdivrem32, GR128, GR32>; +def DSGR : BinaryRRE<"dsg", 0xB90D, z_sdivrem64, GR128, GR64>; +def DLR : BinaryRRE<"dl", 0xB997, z_udivrem32, GR128, GR32>; +def DLGR : BinaryRRE<"dlg", 0xB987, z_udivrem64, GR128, GR64>; // Division and remainder, from memory. -def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem32, GR128, load>; -def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load>; -def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load>; -def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load>; +def DSGF : BinaryRXY<"dsgf", 0xE31D, z_sdivrem32, GR128, load, 4>; +def DSG : BinaryRXY<"dsg", 0xE30D, z_sdivrem64, GR128, load, 8>; +def DL : BinaryRXY<"dl", 0xE397, z_udivrem32, GR128, load, 4>; +def DLG : BinaryRXY<"dlg", 0xE387, z_udivrem64, GR128, load, 8>; //===----------------------------------------------------------------------===// // Shifts @@ -805,9 +806,9 @@ let Defs = [CC] in { // Signed comparisons. let Defs = [CC] in { // Comparison with a register. - def CR : CompareRR <"cr", 0x19, z_cmp, GR32, GR32>; - def CGFR : CompareRRE<"cgfr", 0xB930, null_frag, GR64, GR32>; - def CGR : CompareRRE<"cgr", 0xB920, z_cmp, GR64, GR64>; + def CR : CompareRR <"c", 0x19, z_cmp, GR32, GR32>; + def CGFR : CompareRRE<"cgf", 0xB930, null_frag, GR64, GR32>; + def CGR : CompareRRE<"cg", 0xB920, z_cmp, GR64, GR64>; // Comparison with a signed 16-bit immediate. def CHI : CompareRI<"chi", 0xA7E, z_cmp, GR32, imm32sx16>; @@ -818,11 +819,11 @@ let Defs = [CC] in { def CGFI : CompareRIL<"cgfi", 0xC2C, z_cmp, GR64, imm64sx32>; // Comparison with memory. - defm CH : CompareRXPair<"ch", 0x49, 0xE379, z_cmp, GR32, sextloadi16>; - defm C : CompareRXPair<"c", 0x59, 0xE359, z_cmp, GR32, load>; - def CGH : CompareRXY<"cgh", 0xE334, z_cmp, GR64, sextloadi16>; - def CGF : CompareRXY<"cgf", 0xE330, z_cmp, GR64, sextloadi32>; - def CG : CompareRXY<"cg", 0xE320, z_cmp, GR64, load>; + defm CH : CompareRXPair<"ch", 0x49, 0xE379, z_cmp, GR32, sextloadi16, 2>; + defm C : CompareRXPair<"c", 0x59, 0xE359, z_cmp, GR32, load, 4>; + def CGH : CompareRXY<"cgh", 0xE334, z_cmp, GR64, sextloadi16, 2>; + def CGF : CompareRXY<"cgf", 0xE330, z_cmp, GR64, sextloadi32, 4>; + def CG : CompareRXY<"cg", 0xE320, z_cmp, GR64, load, 8>; def CHRL : CompareRILPC<"chrl", 0xC65, z_cmp, GR32, aligned_sextloadi16>; def CRL : CompareRILPC<"crl", 0xC6D, z_cmp, GR32, aligned_load>; def CGHRL : CompareRILPC<"cghrl", 0xC64, z_cmp, GR64, aligned_sextloadi16>; @@ -839,18 +840,18 @@ defm : SXB; // Unsigned comparisons. let Defs = [CC] in { // Comparison with a register. - def CLR : CompareRR <"clr", 0x15, z_ucmp, GR32, GR32>; - def CLGFR : CompareRRE<"clgfr", 0xB931, null_frag, GR64, GR32>; - def CLGR : CompareRRE<"clgr", 0xB921, z_ucmp, GR64, GR64>; + def CLR : CompareRR <"cl", 0x15, z_ucmp, GR32, GR32>; + def CLGFR : CompareRRE<"clgf", 0xB931, null_frag, GR64, GR32>; + def CLGR : CompareRRE<"clg", 0xB921, z_ucmp, GR64, GR64>; // Comparison with a signed 32-bit immediate. def CLFI : CompareRIL<"clfi", 0xC2F, z_ucmp, GR32, uimm32>; def CLGFI : CompareRIL<"clgfi", 0xC2E, z_ucmp, GR64, imm64zx32>; // Comparison with memory. - defm CL : CompareRXPair<"cl", 0x55, 0xE355, z_ucmp, GR32, load>; - def CLGF : CompareRXY<"clgf", 0xE331, z_ucmp, GR64, zextloadi32>; - def CLG : CompareRXY<"clg", 0xE321, z_ucmp, GR64, load>; + defm CL : CompareRXPair<"cl", 0x55, 0xE355, z_ucmp, GR32, load, 4>; + def CLGF : CompareRXY<"clgf", 0xE331, z_ucmp, GR64, zextloadi32, 4>; + def CLG : CompareRXY<"clg", 0xE321, z_ucmp, GR64, load, 8>; def CLHRL : CompareRILPC<"clhrl", 0xC67, z_ucmp, GR32, aligned_zextloadi16>; def CLRL : CompareRILPC<"clrl", 0xC6F, z_ucmp, GR32, @@ -1003,7 +1004,7 @@ def EAR : InstRRE<0xB24F, (outs GR32:$R1), (ins access_reg:$R2), // and the second giving a copy of the source with the leftmost one bit // cleared. We only use the first result here. let Defs = [CC] in { - def FLOGR : UnaryRRE<"flogr", 0xB983, null_frag, GR128, GR64>; + def FLOGR : UnaryRRE<"flog", 0xB983, null_frag, GR128, GR64>; } def : Pat<(ctlz GR64:$src), (EXTRACT_SUBREG (FLOGR GR64:$src), subreg_high)>; diff --git a/test/CodeGen/SystemZ/and-01.ll b/test/CodeGen/SystemZ/and-01.ll index 8dd106b7c01..0da13f9dd7c 100644 --- a/test/CodeGen/SystemZ/and-01.ll +++ b/test/CodeGen/SystemZ/and-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i32 @foo() + ; Check NR. define i32 @f1(i32 %a, i32 %b) { ; CHECK: f1: @@ -127,3 +129,46 @@ define i32 @f11(i32 %a, i64 %src, i64 %index) { %and = and i32 %a, %b ret i32 %and } + +; Check that ANDs of spilled values can use N rather than NR. +define i32 @f12(i32 *%ptr0) { +; CHECK: f12: +; CHECK: brasl %r14, foo@PLT +; CHECK: n %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %ret = call i32 @foo() + + %and0 = and i32 %ret, %val0 + %and1 = and i32 %and0, %val1 + %and2 = and i32 %and1, %val2 + %and3 = and i32 %and2, %val3 + %and4 = and i32 %and3, %val4 + %and5 = and i32 %and4, %val5 + %and6 = and i32 %and5, %val6 + %and7 = and i32 %and6, %val7 + %and8 = and i32 %and7, %val8 + %and9 = and i32 %and8, %val9 + + ret i32 %and9 +} diff --git a/test/CodeGen/SystemZ/and-03.ll b/test/CodeGen/SystemZ/and-03.ll index 3fe8d3cf3bf..172098befb9 100644 --- a/test/CodeGen/SystemZ/and-03.ll +++ b/test/CodeGen/SystemZ/and-03.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check NGR. define i64 @f1(i64 %a, i64 %b) { ; CHECK: f1: @@ -92,3 +94,46 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %and = and i64 %a, %b ret i64 %and } + +; Check that ANDs of spilled values can use NG rather than NGR. +define i64 @f9(i64 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: ng %r2, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + + %ret = call i64 @foo() + + %and0 = and i64 %ret, %val0 + %and1 = and i64 %and0, %val1 + %and2 = and i64 %and1, %val2 + %and3 = and i64 %and2, %val3 + %and4 = and i64 %and3, %val4 + %and5 = and i64 %and4, %val5 + %and6 = and i64 %and5, %val6 + %and7 = and i64 %and6, %val7 + %and8 = and i64 %and7, %val8 + %and9 = and i64 %and8, %val9 + + ret i64 %and9 +} diff --git a/test/CodeGen/SystemZ/bswap-02.ll b/test/CodeGen/SystemZ/bswap-02.ll index 8b99077f4c1..e2ae011f173 100644 --- a/test/CodeGen/SystemZ/bswap-02.ll +++ b/test/CodeGen/SystemZ/bswap-02.ll @@ -97,3 +97,80 @@ define i32 @f8(i32 *%src) { %swapped = call i32 @llvm.bswap.i32(i32 %a) ret i32 %swapped } + +; Test a case where we spill the source of at least one LRVR. We want +; to use LRV if possible. +define void @f9(i32 *%ptr) { +; CHECK: f9: +; CHECK: lrv {{%r[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile i32 *%ptr + %val1 = load volatile i32 *%ptr + %val2 = load volatile i32 *%ptr + %val3 = load volatile i32 *%ptr + %val4 = load volatile i32 *%ptr + %val5 = load volatile i32 *%ptr + %val6 = load volatile i32 *%ptr + %val7 = load volatile i32 *%ptr + %val8 = load volatile i32 *%ptr + %val9 = load volatile i32 *%ptr + %val10 = load volatile i32 *%ptr + %val11 = load volatile i32 *%ptr + %val12 = load volatile i32 *%ptr + %val13 = load volatile i32 *%ptr + %val14 = load volatile i32 *%ptr + %val15 = load volatile i32 *%ptr + + %swapped0 = call i32 @llvm.bswap.i32(i32 %val0) + %swapped1 = call i32 @llvm.bswap.i32(i32 %val1) + %swapped2 = call i32 @llvm.bswap.i32(i32 %val2) + %swapped3 = call i32 @llvm.bswap.i32(i32 %val3) + %swapped4 = call i32 @llvm.bswap.i32(i32 %val4) + %swapped5 = call i32 @llvm.bswap.i32(i32 %val5) + %swapped6 = call i32 @llvm.bswap.i32(i32 %val6) + %swapped7 = call i32 @llvm.bswap.i32(i32 %val7) + %swapped8 = call i32 @llvm.bswap.i32(i32 %val8) + %swapped9 = call i32 @llvm.bswap.i32(i32 %val9) + %swapped10 = call i32 @llvm.bswap.i32(i32 %val10) + %swapped11 = call i32 @llvm.bswap.i32(i32 %val11) + %swapped12 = call i32 @llvm.bswap.i32(i32 %val12) + %swapped13 = call i32 @llvm.bswap.i32(i32 %val13) + %swapped14 = call i32 @llvm.bswap.i32(i32 %val14) + %swapped15 = call i32 @llvm.bswap.i32(i32 %val15) + + store volatile i32 %val0, i32 *%ptr + store volatile i32 %val1, i32 *%ptr + store volatile i32 %val2, i32 *%ptr + store volatile i32 %val3, i32 *%ptr + store volatile i32 %val4, i32 *%ptr + store volatile i32 %val5, i32 *%ptr + store volatile i32 %val6, i32 *%ptr + store volatile i32 %val7, i32 *%ptr + store volatile i32 %val8, i32 *%ptr + store volatile i32 %val9, i32 *%ptr + store volatile i32 %val10, i32 *%ptr + store volatile i32 %val11, i32 *%ptr + store volatile i32 %val12, i32 *%ptr + store volatile i32 %val13, i32 *%ptr + store volatile i32 %val14, i32 *%ptr + store volatile i32 %val15, i32 *%ptr + + store volatile i32 %swapped0, i32 *%ptr + store volatile i32 %swapped1, i32 *%ptr + store volatile i32 %swapped2, i32 *%ptr + store volatile i32 %swapped3, i32 *%ptr + store volatile i32 %swapped4, i32 *%ptr + store volatile i32 %swapped5, i32 *%ptr + store volatile i32 %swapped6, i32 *%ptr + store volatile i32 %swapped7, i32 *%ptr + store volatile i32 %swapped8, i32 *%ptr + store volatile i32 %swapped9, i32 *%ptr + store volatile i32 %swapped10, i32 *%ptr + store volatile i32 %swapped11, i32 *%ptr + store volatile i32 %swapped12, i32 *%ptr + store volatile i32 %swapped13, i32 *%ptr + store volatile i32 %swapped14, i32 *%ptr + store volatile i32 %swapped15, i32 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/bswap-03.ll b/test/CodeGen/SystemZ/bswap-03.ll index df6624e5b46..e3ccc3841e7 100644 --- a/test/CodeGen/SystemZ/bswap-03.ll +++ b/test/CodeGen/SystemZ/bswap-03.ll @@ -97,3 +97,80 @@ define i64 @f8(i64 *%src) { %swapped = call i64 @llvm.bswap.i64(i64 %a) ret i64 %swapped } + +; Test a case where we spill the source of at least one LRVGR. We want +; to use LRVG if possible. +define void @f9(i64 *%ptr) { +; CHECK: f9: +; CHECK: lrvg {{%r[0-9]+}}, 160(%r15) +; CHECK: br %r14 + %val0 = load volatile i64 *%ptr + %val1 = load volatile i64 *%ptr + %val2 = load volatile i64 *%ptr + %val3 = load volatile i64 *%ptr + %val4 = load volatile i64 *%ptr + %val5 = load volatile i64 *%ptr + %val6 = load volatile i64 *%ptr + %val7 = load volatile i64 *%ptr + %val8 = load volatile i64 *%ptr + %val9 = load volatile i64 *%ptr + %val10 = load volatile i64 *%ptr + %val11 = load volatile i64 *%ptr + %val12 = load volatile i64 *%ptr + %val13 = load volatile i64 *%ptr + %val14 = load volatile i64 *%ptr + %val15 = load volatile i64 *%ptr + + %swapped0 = call i64 @llvm.bswap.i64(i64 %val0) + %swapped1 = call i64 @llvm.bswap.i64(i64 %val1) + %swapped2 = call i64 @llvm.bswap.i64(i64 %val2) + %swapped3 = call i64 @llvm.bswap.i64(i64 %val3) + %swapped4 = call i64 @llvm.bswap.i64(i64 %val4) + %swapped5 = call i64 @llvm.bswap.i64(i64 %val5) + %swapped6 = call i64 @llvm.bswap.i64(i64 %val6) + %swapped7 = call i64 @llvm.bswap.i64(i64 %val7) + %swapped8 = call i64 @llvm.bswap.i64(i64 %val8) + %swapped9 = call i64 @llvm.bswap.i64(i64 %val9) + %swapped10 = call i64 @llvm.bswap.i64(i64 %val10) + %swapped11 = call i64 @llvm.bswap.i64(i64 %val11) + %swapped12 = call i64 @llvm.bswap.i64(i64 %val12) + %swapped13 = call i64 @llvm.bswap.i64(i64 %val13) + %swapped14 = call i64 @llvm.bswap.i64(i64 %val14) + %swapped15 = call i64 @llvm.bswap.i64(i64 %val15) + + store volatile i64 %val0, i64 *%ptr + store volatile i64 %val1, i64 *%ptr + store volatile i64 %val2, i64 *%ptr + store volatile i64 %val3, i64 *%ptr + store volatile i64 %val4, i64 *%ptr + store volatile i64 %val5, i64 *%ptr + store volatile i64 %val6, i64 *%ptr + store volatile i64 %val7, i64 *%ptr + store volatile i64 %val8, i64 *%ptr + store volatile i64 %val9, i64 *%ptr + store volatile i64 %val10, i64 *%ptr + store volatile i64 %val11, i64 *%ptr + store volatile i64 %val12, i64 *%ptr + store volatile i64 %val13, i64 *%ptr + store volatile i64 %val14, i64 *%ptr + store volatile i64 %val15, i64 *%ptr + + store volatile i64 %swapped0, i64 *%ptr + store volatile i64 %swapped1, i64 *%ptr + store volatile i64 %swapped2, i64 *%ptr + store volatile i64 %swapped3, i64 *%ptr + store volatile i64 %swapped4, i64 *%ptr + store volatile i64 %swapped5, i64 *%ptr + store volatile i64 %swapped6, i64 *%ptr + store volatile i64 %swapped7, i64 *%ptr + store volatile i64 %swapped8, i64 *%ptr + store volatile i64 %swapped9, i64 *%ptr + store volatile i64 %swapped10, i64 *%ptr + store volatile i64 %swapped11, i64 *%ptr + store volatile i64 %swapped12, i64 *%ptr + store volatile i64 %swapped13, i64 *%ptr + store volatile i64 %swapped14, i64 *%ptr + store volatile i64 %swapped15, i64 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/fp-add-01.ll b/test/CodeGen/SystemZ/fp-add-01.ll index 7ce0777b887..c25820a4d85 100644 --- a/test/CodeGen/SystemZ/fp-add-01.ll +++ b/test/CodeGen/SystemZ/fp-add-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare float @foo() + ; Check register addition. define float @f1(float %f1, float %f2) { ; CHECK: f1: @@ -69,3 +71,49 @@ define float @f6(float %f1, float *%base, i64 %index) { %res = fadd float %f1, %f2 ret float %res } + +; Check that additions of spilled values can use AEB rather than AEBR. +define float @f7(float *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: aeb %f0, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr float *%ptr0, i64 2 + %ptr2 = getelementptr float *%ptr0, i64 4 + %ptr3 = getelementptr float *%ptr0, i64 6 + %ptr4 = getelementptr float *%ptr0, i64 8 + %ptr5 = getelementptr float *%ptr0, i64 10 + %ptr6 = getelementptr float *%ptr0, i64 12 + %ptr7 = getelementptr float *%ptr0, i64 14 + %ptr8 = getelementptr float *%ptr0, i64 16 + %ptr9 = getelementptr float *%ptr0, i64 18 + %ptr10 = getelementptr float *%ptr0, i64 20 + + %val0 = load float *%ptr0 + %val1 = load float *%ptr1 + %val2 = load float *%ptr2 + %val3 = load float *%ptr3 + %val4 = load float *%ptr4 + %val5 = load float *%ptr5 + %val6 = load float *%ptr6 + %val7 = load float *%ptr7 + %val8 = load float *%ptr8 + %val9 = load float *%ptr9 + %val10 = load float *%ptr10 + + %ret = call float @foo() + + %add0 = fadd float %ret, %val0 + %add1 = fadd float %add0, %val1 + %add2 = fadd float %add1, %val2 + %add3 = fadd float %add2, %val3 + %add4 = fadd float %add3, %val4 + %add5 = fadd float %add4, %val5 + %add6 = fadd float %add5, %val6 + %add7 = fadd float %add6, %val7 + %add8 = fadd float %add7, %val8 + %add9 = fadd float %add8, %val9 + %add10 = fadd float %add9, %val10 + + ret float %add10 +} diff --git a/test/CodeGen/SystemZ/fp-add-02.ll b/test/CodeGen/SystemZ/fp-add-02.ll index 08eb90efbfa..58afc1323a4 100644 --- a/test/CodeGen/SystemZ/fp-add-02.ll +++ b/test/CodeGen/SystemZ/fp-add-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare double @foo() + ; Check register addition. define double @f1(double %f1, double %f2) { ; CHECK: f1: @@ -69,3 +71,49 @@ define double @f6(double %f1, double *%base, i64 %index) { %res = fadd double %f1, %f2 ret double %res } + +; Check that additions of spilled values can use ADB rather than ADBR. +define double @f7(double *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: adb %f0, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr double *%ptr0, i64 2 + %ptr2 = getelementptr double *%ptr0, i64 4 + %ptr3 = getelementptr double *%ptr0, i64 6 + %ptr4 = getelementptr double *%ptr0, i64 8 + %ptr5 = getelementptr double *%ptr0, i64 10 + %ptr6 = getelementptr double *%ptr0, i64 12 + %ptr7 = getelementptr double *%ptr0, i64 14 + %ptr8 = getelementptr double *%ptr0, i64 16 + %ptr9 = getelementptr double *%ptr0, i64 18 + %ptr10 = getelementptr double *%ptr0, i64 20 + + %val0 = load double *%ptr0 + %val1 = load double *%ptr1 + %val2 = load double *%ptr2 + %val3 = load double *%ptr3 + %val4 = load double *%ptr4 + %val5 = load double *%ptr5 + %val6 = load double *%ptr6 + %val7 = load double *%ptr7 + %val8 = load double *%ptr8 + %val9 = load double *%ptr9 + %val10 = load double *%ptr10 + + %ret = call double @foo() + + %add0 = fadd double %ret, %val0 + %add1 = fadd double %add0, %val1 + %add2 = fadd double %add1, %val2 + %add3 = fadd double %add2, %val3 + %add4 = fadd double %add3, %val4 + %add5 = fadd double %add4, %val5 + %add6 = fadd double %add5, %val6 + %add7 = fadd double %add6, %val7 + %add8 = fadd double %add7, %val8 + %add9 = fadd double %add8, %val9 + %add10 = fadd double %add9, %val10 + + ret double %add10 +} diff --git a/test/CodeGen/SystemZ/fp-cmp-01.ll b/test/CodeGen/SystemZ/fp-cmp-01.ll index cb2a6be88dd..5aef57f6400 100644 --- a/test/CodeGen/SystemZ/fp-cmp-01.ll +++ b/test/CodeGen/SystemZ/fp-cmp-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare float @foo() + ; Check comparison with registers. define i64 @f1(i64 %a, i64 %b, float %f1, float %f2) { ; CHECK: f1: @@ -87,3 +89,61 @@ define i64 @f6(i64 %a, i64 %b, float %f1, float *%base, i64 %index) { %res = select i1 %cond, i64 %a, i64 %b ret i64 %res } + +; Check that comparisons of spilled values can use CEB rather than CEBR. +define float @f7(float *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: ceb {{%f[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr float *%ptr0, i64 2 + %ptr2 = getelementptr float *%ptr0, i64 4 + %ptr3 = getelementptr float *%ptr0, i64 6 + %ptr4 = getelementptr float *%ptr0, i64 8 + %ptr5 = getelementptr float *%ptr0, i64 10 + %ptr6 = getelementptr float *%ptr0, i64 12 + %ptr7 = getelementptr float *%ptr0, i64 14 + %ptr8 = getelementptr float *%ptr0, i64 16 + %ptr9 = getelementptr float *%ptr0, i64 18 + %ptr10 = getelementptr float *%ptr0, i64 20 + + %val0 = load float *%ptr0 + %val1 = load float *%ptr1 + %val2 = load float *%ptr2 + %val3 = load float *%ptr3 + %val4 = load float *%ptr4 + %val5 = load float *%ptr5 + %val6 = load float *%ptr6 + %val7 = load float *%ptr7 + %val8 = load float *%ptr8 + %val9 = load float *%ptr9 + %val10 = load float *%ptr10 + + %ret = call float @foo() + + %cmp0 = fcmp olt float %ret, %val0 + %cmp1 = fcmp olt float %ret, %val1 + %cmp2 = fcmp olt float %ret, %val2 + %cmp3 = fcmp olt float %ret, %val3 + %cmp4 = fcmp olt float %ret, %val4 + %cmp5 = fcmp olt float %ret, %val5 + %cmp6 = fcmp olt float %ret, %val6 + %cmp7 = fcmp olt float %ret, %val7 + %cmp8 = fcmp olt float %ret, %val8 + %cmp9 = fcmp olt float %ret, %val9 + %cmp10 = fcmp olt float %ret, %val10 + + %sel0 = select i1 %cmp0, float %ret, float 0.0 + %sel1 = select i1 %cmp1, float %sel0, float 1.0 + %sel2 = select i1 %cmp2, float %sel1, float 2.0 + %sel3 = select i1 %cmp3, float %sel2, float 3.0 + %sel4 = select i1 %cmp4, float %sel3, float 4.0 + %sel5 = select i1 %cmp5, float %sel4, float 5.0 + %sel6 = select i1 %cmp6, float %sel5, float 6.0 + %sel7 = select i1 %cmp7, float %sel6, float 7.0 + %sel8 = select i1 %cmp8, float %sel7, float 8.0 + %sel9 = select i1 %cmp9, float %sel8, float 9.0 + %sel10 = select i1 %cmp10, float %sel9, float 10.0 + + ret float %sel10 +} diff --git a/test/CodeGen/SystemZ/fp-cmp-02.ll b/test/CodeGen/SystemZ/fp-cmp-02.ll index 2987d501c76..c5bdd56037e 100644 --- a/test/CodeGen/SystemZ/fp-cmp-02.ll +++ b/test/CodeGen/SystemZ/fp-cmp-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare double @foo() + ; Check comparison with registers. define i64 @f1(i64 %a, i64 %b, double %f1, double %f2) { ; CHECK: f1: @@ -87,3 +89,61 @@ define i64 @f6(i64 %a, i64 %b, double %f1, double *%base, i64 %index) { %res = select i1 %cond, i64 %a, i64 %b ret i64 %res } + +; Check that comparisons of spilled values can use CDB rather than CDBR. +define double @f7(double *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: cdb {{%f[0-9]+}}, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr double *%ptr0, i64 2 + %ptr2 = getelementptr double *%ptr0, i64 4 + %ptr3 = getelementptr double *%ptr0, i64 6 + %ptr4 = getelementptr double *%ptr0, i64 8 + %ptr5 = getelementptr double *%ptr0, i64 10 + %ptr6 = getelementptr double *%ptr0, i64 12 + %ptr7 = getelementptr double *%ptr0, i64 14 + %ptr8 = getelementptr double *%ptr0, i64 16 + %ptr9 = getelementptr double *%ptr0, i64 18 + %ptr10 = getelementptr double *%ptr0, i64 20 + + %val0 = load double *%ptr0 + %val1 = load double *%ptr1 + %val2 = load double *%ptr2 + %val3 = load double *%ptr3 + %val4 = load double *%ptr4 + %val5 = load double *%ptr5 + %val6 = load double *%ptr6 + %val7 = load double *%ptr7 + %val8 = load double *%ptr8 + %val9 = load double *%ptr9 + %val10 = load double *%ptr10 + + %ret = call double @foo() + + %cmp0 = fcmp olt double %ret, %val0 + %cmp1 = fcmp olt double %ret, %val1 + %cmp2 = fcmp olt double %ret, %val2 + %cmp3 = fcmp olt double %ret, %val3 + %cmp4 = fcmp olt double %ret, %val4 + %cmp5 = fcmp olt double %ret, %val5 + %cmp6 = fcmp olt double %ret, %val6 + %cmp7 = fcmp olt double %ret, %val7 + %cmp8 = fcmp olt double %ret, %val8 + %cmp9 = fcmp olt double %ret, %val9 + %cmp10 = fcmp olt double %ret, %val10 + + %sel0 = select i1 %cmp0, double %ret, double 0.0 + %sel1 = select i1 %cmp1, double %sel0, double 1.0 + %sel2 = select i1 %cmp2, double %sel1, double 2.0 + %sel3 = select i1 %cmp3, double %sel2, double 3.0 + %sel4 = select i1 %cmp4, double %sel3, double 4.0 + %sel5 = select i1 %cmp5, double %sel4, double 5.0 + %sel6 = select i1 %cmp6, double %sel5, double 6.0 + %sel7 = select i1 %cmp7, double %sel6, double 7.0 + %sel8 = select i1 %cmp8, double %sel7, double 8.0 + %sel9 = select i1 %cmp9, double %sel8, double 9.0 + %sel10 = select i1 %cmp10, double %sel9, double 10.0 + + ret double %sel10 +} diff --git a/test/CodeGen/SystemZ/fp-conv-02.ll b/test/CodeGen/SystemZ/fp-conv-02.ll index f284e1dc2ae..eb405da2dae 100644 --- a/test/CodeGen/SystemZ/fp-conv-02.ll +++ b/test/CodeGen/SystemZ/fp-conv-02.ll @@ -69,3 +69,84 @@ define double @f6(float *%base, i64 %index) { %res = fpext float %val to double ret double %res } + +; Test a case where we spill the source of at least one LDEBR. We want +; to use LDEB if possible. +define void @f7(double *%ptr1, float *%ptr2) { +; CHECK: f7: +; CHECK: ldeb {{%f[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile float *%ptr2 + %val1 = load volatile float *%ptr2 + %val2 = load volatile float *%ptr2 + %val3 = load volatile float *%ptr2 + %val4 = load volatile float *%ptr2 + %val5 = load volatile float *%ptr2 + %val6 = load volatile float *%ptr2 + %val7 = load volatile float *%ptr2 + %val8 = load volatile float *%ptr2 + %val9 = load volatile float *%ptr2 + %val10 = load volatile float *%ptr2 + %val11 = load volatile float *%ptr2 + %val12 = load volatile float *%ptr2 + %val13 = load volatile float *%ptr2 + %val14 = load volatile float *%ptr2 + %val15 = load volatile float *%ptr2 + %val16 = load volatile float *%ptr2 + + %ext0 = fpext float %val0 to double + %ext1 = fpext float %val1 to double + %ext2 = fpext float %val2 to double + %ext3 = fpext float %val3 to double + %ext4 = fpext float %val4 to double + %ext5 = fpext float %val5 to double + %ext6 = fpext float %val6 to double + %ext7 = fpext float %val7 to double + %ext8 = fpext float %val8 to double + %ext9 = fpext float %val9 to double + %ext10 = fpext float %val10 to double + %ext11 = fpext float %val11 to double + %ext12 = fpext float %val12 to double + %ext13 = fpext float %val13 to double + %ext14 = fpext float %val14 to double + %ext15 = fpext float %val15 to double + %ext16 = fpext float %val16 to double + + store volatile float %val0, float *%ptr2 + store volatile float %val1, float *%ptr2 + store volatile float %val2, float *%ptr2 + store volatile float %val3, float *%ptr2 + store volatile float %val4, float *%ptr2 + store volatile float %val5, float *%ptr2 + store volatile float %val6, float *%ptr2 + store volatile float %val7, float *%ptr2 + store volatile float %val8, float *%ptr2 + store volatile float %val9, float *%ptr2 + store volatile float %val10, float *%ptr2 + store volatile float %val11, float *%ptr2 + store volatile float %val12, float *%ptr2 + store volatile float %val13, float *%ptr2 + store volatile float %val14, float *%ptr2 + store volatile float %val15, float *%ptr2 + store volatile float %val16, float *%ptr2 + + store volatile double %ext0, double *%ptr1 + store volatile double %ext1, double *%ptr1 + store volatile double %ext2, double *%ptr1 + store volatile double %ext3, double *%ptr1 + store volatile double %ext4, double *%ptr1 + store volatile double %ext5, double *%ptr1 + store volatile double %ext6, double *%ptr1 + store volatile double %ext7, double *%ptr1 + store volatile double %ext8, double *%ptr1 + store volatile double %ext9, double *%ptr1 + store volatile double %ext10, double *%ptr1 + store volatile double %ext11, double *%ptr1 + store volatile double %ext12, double *%ptr1 + store volatile double %ext13, double *%ptr1 + store volatile double %ext14, double *%ptr1 + store volatile double %ext15, double *%ptr1 + store volatile double %ext16, double *%ptr1 + + ret void +} diff --git a/test/CodeGen/SystemZ/fp-conv-03.ll b/test/CodeGen/SystemZ/fp-conv-03.ll index 703a141e3e1..963653c8abf 100644 --- a/test/CodeGen/SystemZ/fp-conv-03.ll +++ b/test/CodeGen/SystemZ/fp-conv-03.ll @@ -87,3 +87,84 @@ define void @f6(fp128 *%dst, float *%base, i64 %index) { store fp128 %res, fp128 *%dst ret void } + +; Test a case where we spill the source of at least one LXEBR. We want +; to use LXEB if possible. +define void @f7(fp128 *%ptr1, float *%ptr2) { +; CHECK: f7: +; CHECK: lxeb {{%f[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile float *%ptr2 + %val1 = load volatile float *%ptr2 + %val2 = load volatile float *%ptr2 + %val3 = load volatile float *%ptr2 + %val4 = load volatile float *%ptr2 + %val5 = load volatile float *%ptr2 + %val6 = load volatile float *%ptr2 + %val7 = load volatile float *%ptr2 + %val8 = load volatile float *%ptr2 + %val9 = load volatile float *%ptr2 + %val10 = load volatile float *%ptr2 + %val11 = load volatile float *%ptr2 + %val12 = load volatile float *%ptr2 + %val13 = load volatile float *%ptr2 + %val14 = load volatile float *%ptr2 + %val15 = load volatile float *%ptr2 + %val16 = load volatile float *%ptr2 + + %ext0 = fpext float %val0 to fp128 + %ext1 = fpext float %val1 to fp128 + %ext2 = fpext float %val2 to fp128 + %ext3 = fpext float %val3 to fp128 + %ext4 = fpext float %val4 to fp128 + %ext5 = fpext float %val5 to fp128 + %ext6 = fpext float %val6 to fp128 + %ext7 = fpext float %val7 to fp128 + %ext8 = fpext float %val8 to fp128 + %ext9 = fpext float %val9 to fp128 + %ext10 = fpext float %val10 to fp128 + %ext11 = fpext float %val11 to fp128 + %ext12 = fpext float %val12 to fp128 + %ext13 = fpext float %val13 to fp128 + %ext14 = fpext float %val14 to fp128 + %ext15 = fpext float %val15 to fp128 + %ext16 = fpext float %val16 to fp128 + + store volatile float %val0, float *%ptr2 + store volatile float %val1, float *%ptr2 + store volatile float %val2, float *%ptr2 + store volatile float %val3, float *%ptr2 + store volatile float %val4, float *%ptr2 + store volatile float %val5, float *%ptr2 + store volatile float %val6, float *%ptr2 + store volatile float %val7, float *%ptr2 + store volatile float %val8, float *%ptr2 + store volatile float %val9, float *%ptr2 + store volatile float %val10, float *%ptr2 + store volatile float %val11, float *%ptr2 + store volatile float %val12, float *%ptr2 + store volatile float %val13, float *%ptr2 + store volatile float %val14, float *%ptr2 + store volatile float %val15, float *%ptr2 + store volatile float %val16, float *%ptr2 + + store volatile fp128 %ext0, fp128 *%ptr1 + store volatile fp128 %ext1, fp128 *%ptr1 + store volatile fp128 %ext2, fp128 *%ptr1 + store volatile fp128 %ext3, fp128 *%ptr1 + store volatile fp128 %ext4, fp128 *%ptr1 + store volatile fp128 %ext5, fp128 *%ptr1 + store volatile fp128 %ext6, fp128 *%ptr1 + store volatile fp128 %ext7, fp128 *%ptr1 + store volatile fp128 %ext8, fp128 *%ptr1 + store volatile fp128 %ext9, fp128 *%ptr1 + store volatile fp128 %ext10, fp128 *%ptr1 + store volatile fp128 %ext11, fp128 *%ptr1 + store volatile fp128 %ext12, fp128 *%ptr1 + store volatile fp128 %ext13, fp128 *%ptr1 + store volatile fp128 %ext14, fp128 *%ptr1 + store volatile fp128 %ext15, fp128 *%ptr1 + store volatile fp128 %ext16, fp128 *%ptr1 + + ret void +} diff --git a/test/CodeGen/SystemZ/fp-conv-04.ll b/test/CodeGen/SystemZ/fp-conv-04.ll index b7b51669343..f8a66f8f13a 100644 --- a/test/CodeGen/SystemZ/fp-conv-04.ll +++ b/test/CodeGen/SystemZ/fp-conv-04.ll @@ -87,3 +87,84 @@ define void @f6(fp128 *%dst, double *%base, i64 %index) { store fp128 %res, fp128 *%dst ret void } + +; Test a case where we spill the source of at least one LXDBR. We want +; to use LXDB if possible. +define void @f7(fp128 *%ptr1, double *%ptr2) { +; CHECK: f7: +; CHECK: lxdb {{%f[0-9]+}}, 160(%r15) +; CHECK: br %r14 + %val0 = load volatile double *%ptr2 + %val1 = load volatile double *%ptr2 + %val2 = load volatile double *%ptr2 + %val3 = load volatile double *%ptr2 + %val4 = load volatile double *%ptr2 + %val5 = load volatile double *%ptr2 + %val6 = load volatile double *%ptr2 + %val7 = load volatile double *%ptr2 + %val8 = load volatile double *%ptr2 + %val9 = load volatile double *%ptr2 + %val10 = load volatile double *%ptr2 + %val11 = load volatile double *%ptr2 + %val12 = load volatile double *%ptr2 + %val13 = load volatile double *%ptr2 + %val14 = load volatile double *%ptr2 + %val15 = load volatile double *%ptr2 + %val16 = load volatile double *%ptr2 + + %ext0 = fpext double %val0 to fp128 + %ext1 = fpext double %val1 to fp128 + %ext2 = fpext double %val2 to fp128 + %ext3 = fpext double %val3 to fp128 + %ext4 = fpext double %val4 to fp128 + %ext5 = fpext double %val5 to fp128 + %ext6 = fpext double %val6 to fp128 + %ext7 = fpext double %val7 to fp128 + %ext8 = fpext double %val8 to fp128 + %ext9 = fpext double %val9 to fp128 + %ext10 = fpext double %val10 to fp128 + %ext11 = fpext double %val11 to fp128 + %ext12 = fpext double %val12 to fp128 + %ext13 = fpext double %val13 to fp128 + %ext14 = fpext double %val14 to fp128 + %ext15 = fpext double %val15 to fp128 + %ext16 = fpext double %val16 to fp128 + + store volatile double %val0, double *%ptr2 + store volatile double %val1, double *%ptr2 + store volatile double %val2, double *%ptr2 + store volatile double %val3, double *%ptr2 + store volatile double %val4, double *%ptr2 + store volatile double %val5, double *%ptr2 + store volatile double %val6, double *%ptr2 + store volatile double %val7, double *%ptr2 + store volatile double %val8, double *%ptr2 + store volatile double %val9, double *%ptr2 + store volatile double %val10, double *%ptr2 + store volatile double %val11, double *%ptr2 + store volatile double %val12, double *%ptr2 + store volatile double %val13, double *%ptr2 + store volatile double %val14, double *%ptr2 + store volatile double %val15, double *%ptr2 + store volatile double %val16, double *%ptr2 + + store volatile fp128 %ext0, fp128 *%ptr1 + store volatile fp128 %ext1, fp128 *%ptr1 + store volatile fp128 %ext2, fp128 *%ptr1 + store volatile fp128 %ext3, fp128 *%ptr1 + store volatile fp128 %ext4, fp128 *%ptr1 + store volatile fp128 %ext5, fp128 *%ptr1 + store volatile fp128 %ext6, fp128 *%ptr1 + store volatile fp128 %ext7, fp128 *%ptr1 + store volatile fp128 %ext8, fp128 *%ptr1 + store volatile fp128 %ext9, fp128 *%ptr1 + store volatile fp128 %ext10, fp128 *%ptr1 + store volatile fp128 %ext11, fp128 *%ptr1 + store volatile fp128 %ext12, fp128 *%ptr1 + store volatile fp128 %ext13, fp128 *%ptr1 + store volatile fp128 %ext14, fp128 *%ptr1 + store volatile fp128 %ext15, fp128 *%ptr1 + store volatile fp128 %ext16, fp128 *%ptr1 + + ret void +} diff --git a/test/CodeGen/SystemZ/fp-div-01.ll b/test/CodeGen/SystemZ/fp-div-01.ll index 080d45eb2bf..3e581235395 100644 --- a/test/CodeGen/SystemZ/fp-div-01.ll +++ b/test/CodeGen/SystemZ/fp-div-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare float @foo() + ; Check register division. define float @f1(float %f1, float %f2) { ; CHECK: f1: @@ -69,3 +71,49 @@ define float @f6(float %f1, float *%base, i64 %index) { %res = fdiv float %f1, %f2 ret float %res } + +; Check that divisions of spilled values can use DEB rather than DEBR. +define float @f7(float *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: deb %f0, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr float *%ptr0, i64 2 + %ptr2 = getelementptr float *%ptr0, i64 4 + %ptr3 = getelementptr float *%ptr0, i64 6 + %ptr4 = getelementptr float *%ptr0, i64 8 + %ptr5 = getelementptr float *%ptr0, i64 10 + %ptr6 = getelementptr float *%ptr0, i64 12 + %ptr7 = getelementptr float *%ptr0, i64 14 + %ptr8 = getelementptr float *%ptr0, i64 16 + %ptr9 = getelementptr float *%ptr0, i64 18 + %ptr10 = getelementptr float *%ptr0, i64 20 + + %val0 = load float *%ptr0 + %val1 = load float *%ptr1 + %val2 = load float *%ptr2 + %val3 = load float *%ptr3 + %val4 = load float *%ptr4 + %val5 = load float *%ptr5 + %val6 = load float *%ptr6 + %val7 = load float *%ptr7 + %val8 = load float *%ptr8 + %val9 = load float *%ptr9 + %val10 = load float *%ptr10 + + %ret = call float @foo() + + %div0 = fdiv float %ret, %val0 + %div1 = fdiv float %div0, %val1 + %div2 = fdiv float %div1, %val2 + %div3 = fdiv float %div2, %val3 + %div4 = fdiv float %div3, %val4 + %div5 = fdiv float %div4, %val5 + %div6 = fdiv float %div5, %val6 + %div7 = fdiv float %div6, %val7 + %div8 = fdiv float %div7, %val8 + %div9 = fdiv float %div8, %val9 + %div10 = fdiv float %div9, %val10 + + ret float %div10 +} diff --git a/test/CodeGen/SystemZ/fp-div-02.ll b/test/CodeGen/SystemZ/fp-div-02.ll index c5cae15a824..31b1988b4c5 100644 --- a/test/CodeGen/SystemZ/fp-div-02.ll +++ b/test/CodeGen/SystemZ/fp-div-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare double @foo() + ; Check register division. define double @f1(double %f1, double %f2) { ; CHECK: f1: @@ -69,3 +71,49 @@ define double @f6(double %f1, double *%base, i64 %index) { %res = fdiv double %f1, %f2 ret double %res } + +; Check that divisions of spilled values can use DDB rather than DDBR. +define double @f7(double *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: ddb %f0, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr double *%ptr0, i64 2 + %ptr2 = getelementptr double *%ptr0, i64 4 + %ptr3 = getelementptr double *%ptr0, i64 6 + %ptr4 = getelementptr double *%ptr0, i64 8 + %ptr5 = getelementptr double *%ptr0, i64 10 + %ptr6 = getelementptr double *%ptr0, i64 12 + %ptr7 = getelementptr double *%ptr0, i64 14 + %ptr8 = getelementptr double *%ptr0, i64 16 + %ptr9 = getelementptr double *%ptr0, i64 18 + %ptr10 = getelementptr double *%ptr0, i64 20 + + %val0 = load double *%ptr0 + %val1 = load double *%ptr1 + %val2 = load double *%ptr2 + %val3 = load double *%ptr3 + %val4 = load double *%ptr4 + %val5 = load double *%ptr5 + %val6 = load double *%ptr6 + %val7 = load double *%ptr7 + %val8 = load double *%ptr8 + %val9 = load double *%ptr9 + %val10 = load double *%ptr10 + + %ret = call double @foo() + + %div0 = fdiv double %ret, %val0 + %div1 = fdiv double %div0, %val1 + %div2 = fdiv double %div1, %val2 + %div3 = fdiv double %div2, %val3 + %div4 = fdiv double %div3, %val4 + %div5 = fdiv double %div4, %val5 + %div6 = fdiv double %div5, %val6 + %div7 = fdiv double %div6, %val7 + %div8 = fdiv double %div7, %val8 + %div9 = fdiv double %div8, %val9 + %div10 = fdiv double %div9, %val10 + + ret double %div10 +} diff --git a/test/CodeGen/SystemZ/fp-mul-01.ll b/test/CodeGen/SystemZ/fp-mul-01.ll index 68c78ee2da6..3e6428a0a97 100644 --- a/test/CodeGen/SystemZ/fp-mul-01.ll +++ b/test/CodeGen/SystemZ/fp-mul-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare float @foo() + ; Check register multiplication. define float @f1(float %f1, float %f2) { ; CHECK: f1: @@ -69,3 +71,49 @@ define float @f6(float %f1, float *%base, i64 %index) { %res = fmul float %f1, %f2 ret float %res } + +; Check that multiplications of spilled values can use MEEB rather than MEEBR. +define float @f7(float *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: meeb %f0, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr float *%ptr0, i64 2 + %ptr2 = getelementptr float *%ptr0, i64 4 + %ptr3 = getelementptr float *%ptr0, i64 6 + %ptr4 = getelementptr float *%ptr0, i64 8 + %ptr5 = getelementptr float *%ptr0, i64 10 + %ptr6 = getelementptr float *%ptr0, i64 12 + %ptr7 = getelementptr float *%ptr0, i64 14 + %ptr8 = getelementptr float *%ptr0, i64 16 + %ptr9 = getelementptr float *%ptr0, i64 18 + %ptr10 = getelementptr float *%ptr0, i64 20 + + %val0 = load float *%ptr0 + %val1 = load float *%ptr1 + %val2 = load float *%ptr2 + %val3 = load float *%ptr3 + %val4 = load float *%ptr4 + %val5 = load float *%ptr5 + %val6 = load float *%ptr6 + %val7 = load float *%ptr7 + %val8 = load float *%ptr8 + %val9 = load float *%ptr9 + %val10 = load float *%ptr10 + + %ret = call float @foo() + + %mul0 = fmul float %ret, %val0 + %mul1 = fmul float %mul0, %val1 + %mul2 = fmul float %mul1, %val2 + %mul3 = fmul float %mul2, %val3 + %mul4 = fmul float %mul3, %val4 + %mul5 = fmul float %mul4, %val5 + %mul6 = fmul float %mul5, %val6 + %mul7 = fmul float %mul6, %val7 + %mul8 = fmul float %mul7, %val8 + %mul9 = fmul float %mul8, %val9 + %mul10 = fmul float %mul9, %val10 + + ret float %mul10 +} diff --git a/test/CodeGen/SystemZ/fp-mul-02.ll b/test/CodeGen/SystemZ/fp-mul-02.ll index ec51a4c1d67..632638958e3 100644 --- a/test/CodeGen/SystemZ/fp-mul-02.ll +++ b/test/CodeGen/SystemZ/fp-mul-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare float @foo() + ; Check register multiplication. define double @f1(float %f1, float %f2) { ; CHECK: f1: @@ -81,3 +83,121 @@ define double @f6(float %f1, float *%base, i64 %index) { %res = fmul double %f1x, %f2x ret double %res } + +; Check that multiplications of spilled values can use MDEB rather than MDEBR. +define float @f7(float *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: mdeb %f0, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr float *%ptr0, i64 2 + %ptr2 = getelementptr float *%ptr0, i64 4 + %ptr3 = getelementptr float *%ptr0, i64 6 + %ptr4 = getelementptr float *%ptr0, i64 8 + %ptr5 = getelementptr float *%ptr0, i64 10 + %ptr6 = getelementptr float *%ptr0, i64 12 + %ptr7 = getelementptr float *%ptr0, i64 14 + %ptr8 = getelementptr float *%ptr0, i64 16 + %ptr9 = getelementptr float *%ptr0, i64 18 + %ptr10 = getelementptr float *%ptr0, i64 20 + + %val0 = load float *%ptr0 + %val1 = load float *%ptr1 + %val2 = load float *%ptr2 + %val3 = load float *%ptr3 + %val4 = load float *%ptr4 + %val5 = load float *%ptr5 + %val6 = load float *%ptr6 + %val7 = load float *%ptr7 + %val8 = load float *%ptr8 + %val9 = load float *%ptr9 + %val10 = load float *%ptr10 + + %frob0 = fadd float %val0, %val0 + %frob1 = fadd float %val1, %val1 + %frob2 = fadd float %val2, %val2 + %frob3 = fadd float %val3, %val3 + %frob4 = fadd float %val4, %val4 + %frob5 = fadd float %val5, %val5 + %frob6 = fadd float %val6, %val6 + %frob7 = fadd float %val7, %val7 + %frob8 = fadd float %val8, %val8 + %frob9 = fadd float %val9, %val9 + %frob10 = fadd float %val9, %val10 + + store float %frob0, float *%ptr0 + store float %frob1, float *%ptr1 + store float %frob2, float *%ptr2 + store float %frob3, float *%ptr3 + store float %frob4, float *%ptr4 + store float %frob5, float *%ptr5 + store float %frob6, float *%ptr6 + store float %frob7, float *%ptr7 + store float %frob8, float *%ptr8 + store float %frob9, float *%ptr9 + store float %frob10, float *%ptr10 + + %ret = call float @foo() + + %accext0 = fpext float %ret to double + %ext0 = fpext float %frob0 to double + %mul0 = fmul double %accext0, %ext0 + %extra0 = fmul double %mul0, 1.01 + %trunc0 = fptrunc double %extra0 to float + + %accext1 = fpext float %trunc0 to double + %ext1 = fpext float %frob1 to double + %mul1 = fmul double %accext1, %ext1 + %extra1 = fmul double %mul1, 1.11 + %trunc1 = fptrunc double %extra1 to float + + %accext2 = fpext float %trunc1 to double + %ext2 = fpext float %frob2 to double + %mul2 = fmul double %accext2, %ext2 + %extra2 = fmul double %mul2, 1.21 + %trunc2 = fptrunc double %extra2 to float + + %accext3 = fpext float %trunc2 to double + %ext3 = fpext float %frob3 to double + %mul3 = fmul double %accext3, %ext3 + %extra3 = fmul double %mul3, 1.31 + %trunc3 = fptrunc double %extra3 to float + + %accext4 = fpext float %trunc3 to double + %ext4 = fpext float %frob4 to double + %mul4 = fmul double %accext4, %ext4 + %extra4 = fmul double %mul4, 1.41 + %trunc4 = fptrunc double %extra4 to float + + %accext5 = fpext float %trunc4 to double + %ext5 = fpext float %frob5 to double + %mul5 = fmul double %accext5, %ext5 + %extra5 = fmul double %mul5, 1.51 + %trunc5 = fptrunc double %extra5 to float + + %accext6 = fpext float %trunc5 to double + %ext6 = fpext float %frob6 to double + %mul6 = fmul double %accext6, %ext6 + %extra6 = fmul double %mul6, 1.61 + %trunc6 = fptrunc double %extra6 to float + + %accext7 = fpext float %trunc6 to double + %ext7 = fpext float %frob7 to double + %mul7 = fmul double %accext7, %ext7 + %extra7 = fmul double %mul7, 1.71 + %trunc7 = fptrunc double %extra7 to float + + %accext8 = fpext float %trunc7 to double + %ext8 = fpext float %frob8 to double + %mul8 = fmul double %accext8, %ext8 + %extra8 = fmul double %mul8, 1.81 + %trunc8 = fptrunc double %extra8 to float + + %accext9 = fpext float %trunc8 to double + %ext9 = fpext float %frob9 to double + %mul9 = fmul double %accext9, %ext9 + %extra9 = fmul double %mul9, 1.91 + %trunc9 = fptrunc double %extra9 to float + + ret float %trunc9 +} diff --git a/test/CodeGen/SystemZ/fp-mul-03.ll b/test/CodeGen/SystemZ/fp-mul-03.ll index 9849247decc..5c5d230eb40 100644 --- a/test/CodeGen/SystemZ/fp-mul-03.ll +++ b/test/CodeGen/SystemZ/fp-mul-03.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare double @foo() + ; Check register multiplication. define double @f1(double %f1, double %f2) { ; CHECK: f1: @@ -69,3 +71,49 @@ define double @f6(double %f1, double *%base, i64 %index) { %res = fmul double %f1, %f2 ret double %res } + +; Check that multiplications of spilled values can use MDB rather than MDBR. +define double @f7(double *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: mdb %f0, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr double *%ptr0, i64 2 + %ptr2 = getelementptr double *%ptr0, i64 4 + %ptr3 = getelementptr double *%ptr0, i64 6 + %ptr4 = getelementptr double *%ptr0, i64 8 + %ptr5 = getelementptr double *%ptr0, i64 10 + %ptr6 = getelementptr double *%ptr0, i64 12 + %ptr7 = getelementptr double *%ptr0, i64 14 + %ptr8 = getelementptr double *%ptr0, i64 16 + %ptr9 = getelementptr double *%ptr0, i64 18 + %ptr10 = getelementptr double *%ptr0, i64 20 + + %val0 = load double *%ptr0 + %val1 = load double *%ptr1 + %val2 = load double *%ptr2 + %val3 = load double *%ptr3 + %val4 = load double *%ptr4 + %val5 = load double *%ptr5 + %val6 = load double *%ptr6 + %val7 = load double *%ptr7 + %val8 = load double *%ptr8 + %val9 = load double *%ptr9 + %val10 = load double *%ptr10 + + %ret = call double @foo() + + %mul0 = fmul double %ret, %val0 + %mul1 = fmul double %mul0, %val1 + %mul2 = fmul double %mul1, %val2 + %mul3 = fmul double %mul2, %val3 + %mul4 = fmul double %mul3, %val4 + %mul5 = fmul double %mul4, %val5 + %mul6 = fmul double %mul5, %val6 + %mul7 = fmul double %mul6, %val7 + %mul8 = fmul double %mul7, %val8 + %mul9 = fmul double %mul8, %val9 + %mul10 = fmul double %mul9, %val10 + + ret double %mul10 +} diff --git a/test/CodeGen/SystemZ/fp-mul-04.ll b/test/CodeGen/SystemZ/fp-mul-04.ll index 712ead85cbd..c7f734ff818 100644 --- a/test/CodeGen/SystemZ/fp-mul-04.ll +++ b/test/CodeGen/SystemZ/fp-mul-04.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare double @foo() + ; Check register multiplication. "mxdbr %f0, %f2" is not valid from LLVM's ; point of view, because %f2 is the low register of the FP128 %f0. Pass the ; multiplier in %f4 instead. @@ -101,3 +103,131 @@ define void @f6(double %f1, double *%base, i64 %index, fp128 *%dst) { store fp128 %res, fp128 *%dst ret void } + +; Check that multiplications of spilled values can use MXDB rather than MXDBR. +define double @f7(double *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: mxdb %f0, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr double *%ptr0, i64 2 + %ptr2 = getelementptr double *%ptr0, i64 4 + %ptr3 = getelementptr double *%ptr0, i64 6 + %ptr4 = getelementptr double *%ptr0, i64 8 + %ptr5 = getelementptr double *%ptr0, i64 10 + %ptr6 = getelementptr double *%ptr0, i64 12 + %ptr7 = getelementptr double *%ptr0, i64 14 + %ptr8 = getelementptr double *%ptr0, i64 16 + %ptr9 = getelementptr double *%ptr0, i64 18 + %ptr10 = getelementptr double *%ptr0, i64 20 + + %val0 = load double *%ptr0 + %val1 = load double *%ptr1 + %val2 = load double *%ptr2 + %val3 = load double *%ptr3 + %val4 = load double *%ptr4 + %val5 = load double *%ptr5 + %val6 = load double *%ptr6 + %val7 = load double *%ptr7 + %val8 = load double *%ptr8 + %val9 = load double *%ptr9 + %val10 = load double *%ptr10 + + %frob0 = fadd double %val0, %val0 + %frob1 = fadd double %val1, %val1 + %frob2 = fadd double %val2, %val2 + %frob3 = fadd double %val3, %val3 + %frob4 = fadd double %val4, %val4 + %frob5 = fadd double %val5, %val5 + %frob6 = fadd double %val6, %val6 + %frob7 = fadd double %val7, %val7 + %frob8 = fadd double %val8, %val8 + %frob9 = fadd double %val9, %val9 + %frob10 = fadd double %val9, %val10 + + store double %frob0, double *%ptr0 + store double %frob1, double *%ptr1 + store double %frob2, double *%ptr2 + store double %frob3, double *%ptr3 + store double %frob4, double *%ptr4 + store double %frob5, double *%ptr5 + store double %frob6, double *%ptr6 + store double %frob7, double *%ptr7 + store double %frob8, double *%ptr8 + store double %frob9, double *%ptr9 + store double %frob10, double *%ptr10 + + %ret = call double @foo() + + %accext0 = fpext double %ret to fp128 + %ext0 = fpext double %frob0 to fp128 + %mul0 = fmul fp128 %accext0, %ext0 + %const0 = fpext double 1.01 to fp128 + %extra0 = fmul fp128 %mul0, %const0 + %trunc0 = fptrunc fp128 %extra0 to double + + %accext1 = fpext double %trunc0 to fp128 + %ext1 = fpext double %frob1 to fp128 + %mul1 = fmul fp128 %accext1, %ext1 + %const1 = fpext double 1.11 to fp128 + %extra1 = fmul fp128 %mul1, %const1 + %trunc1 = fptrunc fp128 %extra1 to double + + %accext2 = fpext double %trunc1 to fp128 + %ext2 = fpext double %frob2 to fp128 + %mul2 = fmul fp128 %accext2, %ext2 + %const2 = fpext double 1.21 to fp128 + %extra2 = fmul fp128 %mul2, %const2 + %trunc2 = fptrunc fp128 %extra2 to double + + %accext3 = fpext double %trunc2 to fp128 + %ext3 = fpext double %frob3 to fp128 + %mul3 = fmul fp128 %accext3, %ext3 + %const3 = fpext double 1.31 to fp128 + %extra3 = fmul fp128 %mul3, %const3 + %trunc3 = fptrunc fp128 %extra3 to double + + %accext4 = fpext double %trunc3 to fp128 + %ext4 = fpext double %frob4 to fp128 + %mul4 = fmul fp128 %accext4, %ext4 + %const4 = fpext double 1.41 to fp128 + %extra4 = fmul fp128 %mul4, %const4 + %trunc4 = fptrunc fp128 %extra4 to double + + %accext5 = fpext double %trunc4 to fp128 + %ext5 = fpext double %frob5 to fp128 + %mul5 = fmul fp128 %accext5, %ext5 + %const5 = fpext double 1.51 to fp128 + %extra5 = fmul fp128 %mul5, %const5 + %trunc5 = fptrunc fp128 %extra5 to double + + %accext6 = fpext double %trunc5 to fp128 + %ext6 = fpext double %frob6 to fp128 + %mul6 = fmul fp128 %accext6, %ext6 + %const6 = fpext double 1.61 to fp128 + %extra6 = fmul fp128 %mul6, %const6 + %trunc6 = fptrunc fp128 %extra6 to double + + %accext7 = fpext double %trunc6 to fp128 + %ext7 = fpext double %frob7 to fp128 + %mul7 = fmul fp128 %accext7, %ext7 + %const7 = fpext double 1.71 to fp128 + %extra7 = fmul fp128 %mul7, %const7 + %trunc7 = fptrunc fp128 %extra7 to double + + %accext8 = fpext double %trunc7 to fp128 + %ext8 = fpext double %frob8 to fp128 + %mul8 = fmul fp128 %accext8, %ext8 + %const8 = fpext double 1.81 to fp128 + %extra8 = fmul fp128 %mul8, %const8 + %trunc8 = fptrunc fp128 %extra8 to double + + %accext9 = fpext double %trunc8 to fp128 + %ext9 = fpext double %frob9 to fp128 + %mul9 = fmul fp128 %accext9, %ext9 + %const9 = fpext double 1.91 to fp128 + %extra9 = fmul fp128 %mul9, %const9 + %trunc9 = fptrunc fp128 %extra9 to double + + ret double %trunc9 +} diff --git a/test/CodeGen/SystemZ/fp-sqrt-01.ll b/test/CodeGen/SystemZ/fp-sqrt-01.ll index 7ed27f56d0d..faba390ea59 100644 --- a/test/CodeGen/SystemZ/fp-sqrt-01.ll +++ b/test/CodeGen/SystemZ/fp-sqrt-01.ll @@ -71,3 +71,84 @@ define float @f6(float *%base, i64 %index) { %res = call float @llvm.sqrt.f32(float %val) ret float %res } + +; Test a case where we spill the source of at least one SQEBR. We want +; to use SQEB if possible. +define void @f7(float *%ptr) { +; CHECK: f7: +; CHECK: sqeb {{%f[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile float *%ptr + %val1 = load volatile float *%ptr + %val2 = load volatile float *%ptr + %val3 = load volatile float *%ptr + %val4 = load volatile float *%ptr + %val5 = load volatile float *%ptr + %val6 = load volatile float *%ptr + %val7 = load volatile float *%ptr + %val8 = load volatile float *%ptr + %val9 = load volatile float *%ptr + %val10 = load volatile float *%ptr + %val11 = load volatile float *%ptr + %val12 = load volatile float *%ptr + %val13 = load volatile float *%ptr + %val14 = load volatile float *%ptr + %val15 = load volatile float *%ptr + %val16 = load volatile float *%ptr + + %sqrt0 = call float @llvm.sqrt.f32(float %val0) + %sqrt1 = call float @llvm.sqrt.f32(float %val1) + %sqrt2 = call float @llvm.sqrt.f32(float %val2) + %sqrt3 = call float @llvm.sqrt.f32(float %val3) + %sqrt4 = call float @llvm.sqrt.f32(float %val4) + %sqrt5 = call float @llvm.sqrt.f32(float %val5) + %sqrt6 = call float @llvm.sqrt.f32(float %val6) + %sqrt7 = call float @llvm.sqrt.f32(float %val7) + %sqrt8 = call float @llvm.sqrt.f32(float %val8) + %sqrt9 = call float @llvm.sqrt.f32(float %val9) + %sqrt10 = call float @llvm.sqrt.f32(float %val10) + %sqrt11 = call float @llvm.sqrt.f32(float %val11) + %sqrt12 = call float @llvm.sqrt.f32(float %val12) + %sqrt13 = call float @llvm.sqrt.f32(float %val13) + %sqrt14 = call float @llvm.sqrt.f32(float %val14) + %sqrt15 = call float @llvm.sqrt.f32(float %val15) + %sqrt16 = call float @llvm.sqrt.f32(float %val16) + + store volatile float %val0, float *%ptr + store volatile float %val1, float *%ptr + store volatile float %val2, float *%ptr + store volatile float %val3, float *%ptr + store volatile float %val4, float *%ptr + store volatile float %val5, float *%ptr + store volatile float %val6, float *%ptr + store volatile float %val7, float *%ptr + store volatile float %val8, float *%ptr + store volatile float %val9, float *%ptr + store volatile float %val10, float *%ptr + store volatile float %val11, float *%ptr + store volatile float %val12, float *%ptr + store volatile float %val13, float *%ptr + store volatile float %val14, float *%ptr + store volatile float %val15, float *%ptr + store volatile float %val16, float *%ptr + + store volatile float %sqrt0, float *%ptr + store volatile float %sqrt1, float *%ptr + store volatile float %sqrt2, float *%ptr + store volatile float %sqrt3, float *%ptr + store volatile float %sqrt4, float *%ptr + store volatile float %sqrt5, float *%ptr + store volatile float %sqrt6, float *%ptr + store volatile float %sqrt7, float *%ptr + store volatile float %sqrt8, float *%ptr + store volatile float %sqrt9, float *%ptr + store volatile float %sqrt10, float *%ptr + store volatile float %sqrt11, float *%ptr + store volatile float %sqrt12, float *%ptr + store volatile float %sqrt13, float *%ptr + store volatile float %sqrt14, float *%ptr + store volatile float %sqrt15, float *%ptr + store volatile float %sqrt16, float *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/fp-sqrt-02.ll b/test/CodeGen/SystemZ/fp-sqrt-02.ll index 22a91ad2f4f..e90f7a369e9 100644 --- a/test/CodeGen/SystemZ/fp-sqrt-02.ll +++ b/test/CodeGen/SystemZ/fp-sqrt-02.ll @@ -71,3 +71,84 @@ define double @f6(double *%base, i64 %index) { %res = call double @llvm.sqrt.f64(double %val) ret double %res } + +; Test a case where we spill the source of at least one SQDBR. We want +; to use SQDB if possible. +define void @f7(double *%ptr) { +; CHECK: f7: +; CHECK: sqdb {{%f[0-9]+}}, 160(%r15) +; CHECK: br %r14 + %val0 = load volatile double *%ptr + %val1 = load volatile double *%ptr + %val2 = load volatile double *%ptr + %val3 = load volatile double *%ptr + %val4 = load volatile double *%ptr + %val5 = load volatile double *%ptr + %val6 = load volatile double *%ptr + %val7 = load volatile double *%ptr + %val8 = load volatile double *%ptr + %val9 = load volatile double *%ptr + %val10 = load volatile double *%ptr + %val11 = load volatile double *%ptr + %val12 = load volatile double *%ptr + %val13 = load volatile double *%ptr + %val14 = load volatile double *%ptr + %val15 = load volatile double *%ptr + %val16 = load volatile double *%ptr + + %sqrt0 = call double @llvm.sqrt.f64(double %val0) + %sqrt1 = call double @llvm.sqrt.f64(double %val1) + %sqrt2 = call double @llvm.sqrt.f64(double %val2) + %sqrt3 = call double @llvm.sqrt.f64(double %val3) + %sqrt4 = call double @llvm.sqrt.f64(double %val4) + %sqrt5 = call double @llvm.sqrt.f64(double %val5) + %sqrt6 = call double @llvm.sqrt.f64(double %val6) + %sqrt7 = call double @llvm.sqrt.f64(double %val7) + %sqrt8 = call double @llvm.sqrt.f64(double %val8) + %sqrt9 = call double @llvm.sqrt.f64(double %val9) + %sqrt10 = call double @llvm.sqrt.f64(double %val10) + %sqrt11 = call double @llvm.sqrt.f64(double %val11) + %sqrt12 = call double @llvm.sqrt.f64(double %val12) + %sqrt13 = call double @llvm.sqrt.f64(double %val13) + %sqrt14 = call double @llvm.sqrt.f64(double %val14) + %sqrt15 = call double @llvm.sqrt.f64(double %val15) + %sqrt16 = call double @llvm.sqrt.f64(double %val16) + + store volatile double %val0, double *%ptr + store volatile double %val1, double *%ptr + store volatile double %val2, double *%ptr + store volatile double %val3, double *%ptr + store volatile double %val4, double *%ptr + store volatile double %val5, double *%ptr + store volatile double %val6, double *%ptr + store volatile double %val7, double *%ptr + store volatile double %val8, double *%ptr + store volatile double %val9, double *%ptr + store volatile double %val10, double *%ptr + store volatile double %val11, double *%ptr + store volatile double %val12, double *%ptr + store volatile double %val13, double *%ptr + store volatile double %val14, double *%ptr + store volatile double %val15, double *%ptr + store volatile double %val16, double *%ptr + + store volatile double %sqrt0, double *%ptr + store volatile double %sqrt1, double *%ptr + store volatile double %sqrt2, double *%ptr + store volatile double %sqrt3, double *%ptr + store volatile double %sqrt4, double *%ptr + store volatile double %sqrt5, double *%ptr + store volatile double %sqrt6, double *%ptr + store volatile double %sqrt7, double *%ptr + store volatile double %sqrt8, double *%ptr + store volatile double %sqrt9, double *%ptr + store volatile double %sqrt10, double *%ptr + store volatile double %sqrt11, double *%ptr + store volatile double %sqrt12, double *%ptr + store volatile double %sqrt13, double *%ptr + store volatile double %sqrt14, double *%ptr + store volatile double %sqrt15, double *%ptr + store volatile double %sqrt16, double *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/fp-sub-01.ll b/test/CodeGen/SystemZ/fp-sub-01.ll index b03f04bd017..88ce7fb440e 100644 --- a/test/CodeGen/SystemZ/fp-sub-01.ll +++ b/test/CodeGen/SystemZ/fp-sub-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare float @foo() + ; Check register subtraction. define float @f1(float %f1, float %f2) { ; CHECK: f1: @@ -69,3 +71,49 @@ define float @f6(float %f1, float *%base, i64 %index) { %res = fsub float %f1, %f2 ret float %res } + +; Check that subtractions of spilled values can use SEB rather than SEBR. +define float @f7(float *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: seb %f0, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr float *%ptr0, i64 2 + %ptr2 = getelementptr float *%ptr0, i64 4 + %ptr3 = getelementptr float *%ptr0, i64 6 + %ptr4 = getelementptr float *%ptr0, i64 8 + %ptr5 = getelementptr float *%ptr0, i64 10 + %ptr6 = getelementptr float *%ptr0, i64 12 + %ptr7 = getelementptr float *%ptr0, i64 14 + %ptr8 = getelementptr float *%ptr0, i64 16 + %ptr9 = getelementptr float *%ptr0, i64 18 + %ptr10 = getelementptr float *%ptr0, i64 20 + + %val0 = load float *%ptr0 + %val1 = load float *%ptr1 + %val2 = load float *%ptr2 + %val3 = load float *%ptr3 + %val4 = load float *%ptr4 + %val5 = load float *%ptr5 + %val6 = load float *%ptr6 + %val7 = load float *%ptr7 + %val8 = load float *%ptr8 + %val9 = load float *%ptr9 + %val10 = load float *%ptr10 + + %ret = call float @foo() + + %sub0 = fsub float %ret, %val0 + %sub1 = fsub float %sub0, %val1 + %sub2 = fsub float %sub1, %val2 + %sub3 = fsub float %sub2, %val3 + %sub4 = fsub float %sub3, %val4 + %sub5 = fsub float %sub4, %val5 + %sub6 = fsub float %sub5, %val6 + %sub7 = fsub float %sub6, %val7 + %sub8 = fsub float %sub7, %val8 + %sub9 = fsub float %sub8, %val9 + %sub10 = fsub float %sub9, %val10 + + ret float %sub10 +} diff --git a/test/CodeGen/SystemZ/fp-sub-02.ll b/test/CodeGen/SystemZ/fp-sub-02.ll index bf9848c2fd5..b6409fcd6ee 100644 --- a/test/CodeGen/SystemZ/fp-sub-02.ll +++ b/test/CodeGen/SystemZ/fp-sub-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare double @foo() + ; Check register subtraction. define double @f1(double %f1, double %f2) { ; CHECK: f1: @@ -69,3 +71,49 @@ define double @f6(double %f1, double *%base, i64 %index) { %res = fsub double %f1, %f2 ret double %res } + +; Check that subtractions of spilled values can use SDB rather than SDBR. +define double @f7(double *%ptr0) { +; CHECK: f7: +; CHECK: brasl %r14, foo@PLT +; CHECK: sdb %f0, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr double *%ptr0, i64 2 + %ptr2 = getelementptr double *%ptr0, i64 4 + %ptr3 = getelementptr double *%ptr0, i64 6 + %ptr4 = getelementptr double *%ptr0, i64 8 + %ptr5 = getelementptr double *%ptr0, i64 10 + %ptr6 = getelementptr double *%ptr0, i64 12 + %ptr7 = getelementptr double *%ptr0, i64 14 + %ptr8 = getelementptr double *%ptr0, i64 16 + %ptr9 = getelementptr double *%ptr0, i64 18 + %ptr10 = getelementptr double *%ptr0, i64 20 + + %val0 = load double *%ptr0 + %val1 = load double *%ptr1 + %val2 = load double *%ptr2 + %val3 = load double *%ptr3 + %val4 = load double *%ptr4 + %val5 = load double *%ptr5 + %val6 = load double *%ptr6 + %val7 = load double *%ptr7 + %val8 = load double *%ptr8 + %val9 = load double *%ptr9 + %val10 = load double *%ptr10 + + %ret = call double @foo() + + %sub0 = fsub double %ret, %val0 + %sub1 = fsub double %sub0, %val1 + %sub2 = fsub double %sub1, %val2 + %sub3 = fsub double %sub2, %val3 + %sub4 = fsub double %sub3, %val4 + %sub5 = fsub double %sub4, %val5 + %sub6 = fsub double %sub5, %val6 + %sub7 = fsub double %sub6, %val7 + %sub8 = fsub double %sub7, %val8 + %sub9 = fsub double %sub8, %val9 + %sub10 = fsub double %sub9, %val10 + + ret double %sub10 +} diff --git a/test/CodeGen/SystemZ/int-add-02.ll b/test/CodeGen/SystemZ/int-add-02.ll index 568ad1c4471..bc434a634a4 100644 --- a/test/CodeGen/SystemZ/int-add-02.ll +++ b/test/CodeGen/SystemZ/int-add-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i32 @foo() + ; Check AR. define i32 @f1(i32 %a, i32 %b) { ; CHECK: f1: @@ -127,3 +129,46 @@ define i32 @f11(i32 %a, i64 %src, i64 %index) { %add = add i32 %a, %b ret i32 %add } + +; Check that additions of spilled values can use A rather than AR. +define i32 @f12(i32 *%ptr0) { +; CHECK: f12: +; CHECK: brasl %r14, foo@PLT +; CHECK: a %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %ret = call i32 @foo() + + %add0 = add i32 %ret, %val0 + %add1 = add i32 %add0, %val1 + %add2 = add i32 %add1, %val2 + %add3 = add i32 %add2, %val3 + %add4 = add i32 %add3, %val4 + %add5 = add i32 %add4, %val5 + %add6 = add i32 %add5, %val6 + %add7 = add i32 %add6, %val7 + %add8 = add i32 %add7, %val8 + %add9 = add i32 %add8, %val9 + + ret i32 %add9 +} diff --git a/test/CodeGen/SystemZ/int-add-03.ll b/test/CodeGen/SystemZ/int-add-03.ll index 46103575b7b..bfd163db54f 100644 --- a/test/CodeGen/SystemZ/int-add-03.ll +++ b/test/CodeGen/SystemZ/int-add-03.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check AGFR. define i64 @f1(i64 %a, i32 %b) { ; CHECK: f1: @@ -100,3 +102,79 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %add = add i64 %a, %bext ret i64 %add } + +; Check that additions of spilled values can use AGF rather than AGFR. +define i64 @f9(i32 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: agf %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %frob0 = add i32 %val0, 100 + %frob1 = add i32 %val1, 100 + %frob2 = add i32 %val2, 100 + %frob3 = add i32 %val3, 100 + %frob4 = add i32 %val4, 100 + %frob5 = add i32 %val5, 100 + %frob6 = add i32 %val6, 100 + %frob7 = add i32 %val7, 100 + %frob8 = add i32 %val8, 100 + %frob9 = add i32 %val9, 100 + + store i32 %frob0, i32 *%ptr0 + store i32 %frob1, i32 *%ptr1 + store i32 %frob2, i32 *%ptr2 + store i32 %frob3, i32 *%ptr3 + store i32 %frob4, i32 *%ptr4 + store i32 %frob5, i32 *%ptr5 + store i32 %frob6, i32 *%ptr6 + store i32 %frob7, i32 *%ptr7 + store i32 %frob8, i32 *%ptr8 + store i32 %frob9, i32 *%ptr9 + + %ret = call i64 @foo() + + %ext0 = sext i32 %frob0 to i64 + %ext1 = sext i32 %frob1 to i64 + %ext2 = sext i32 %frob2 to i64 + %ext3 = sext i32 %frob3 to i64 + %ext4 = sext i32 %frob4 to i64 + %ext5 = sext i32 %frob5 to i64 + %ext6 = sext i32 %frob6 to i64 + %ext7 = sext i32 %frob7 to i64 + %ext8 = sext i32 %frob8 to i64 + %ext9 = sext i32 %frob9 to i64 + + %add0 = add i64 %ret, %ext0 + %add1 = add i64 %add0, %ext1 + %add2 = add i64 %add1, %ext2 + %add3 = add i64 %add2, %ext3 + %add4 = add i64 %add3, %ext4 + %add5 = add i64 %add4, %ext5 + %add6 = add i64 %add5, %ext6 + %add7 = add i64 %add6, %ext7 + %add8 = add i64 %add7, %ext8 + %add9 = add i64 %add8, %ext9 + + ret i64 %add9 +} diff --git a/test/CodeGen/SystemZ/int-add-04.ll b/test/CodeGen/SystemZ/int-add-04.ll index 1c2dc76781c..6c8e5cf2268 100644 --- a/test/CodeGen/SystemZ/int-add-04.ll +++ b/test/CodeGen/SystemZ/int-add-04.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check ALGFR. define i64 @f1(i64 %a, i32 %b) { ; CHECK: f1: @@ -100,3 +102,79 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %add = add i64 %a, %bext ret i64 %add } + +; Check that additions of spilled values can use ALGF rather than ALGFR. +define i64 @f9(i32 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: algf %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %frob0 = add i32 %val0, 100 + %frob1 = add i32 %val1, 100 + %frob2 = add i32 %val2, 100 + %frob3 = add i32 %val3, 100 + %frob4 = add i32 %val4, 100 + %frob5 = add i32 %val5, 100 + %frob6 = add i32 %val6, 100 + %frob7 = add i32 %val7, 100 + %frob8 = add i32 %val8, 100 + %frob9 = add i32 %val9, 100 + + store i32 %frob0, i32 *%ptr0 + store i32 %frob1, i32 *%ptr1 + store i32 %frob2, i32 *%ptr2 + store i32 %frob3, i32 *%ptr3 + store i32 %frob4, i32 *%ptr4 + store i32 %frob5, i32 *%ptr5 + store i32 %frob6, i32 *%ptr6 + store i32 %frob7, i32 *%ptr7 + store i32 %frob8, i32 *%ptr8 + store i32 %frob9, i32 *%ptr9 + + %ret = call i64 @foo() + + %ext0 = zext i32 %frob0 to i64 + %ext1 = zext i32 %frob1 to i64 + %ext2 = zext i32 %frob2 to i64 + %ext3 = zext i32 %frob3 to i64 + %ext4 = zext i32 %frob4 to i64 + %ext5 = zext i32 %frob5 to i64 + %ext6 = zext i32 %frob6 to i64 + %ext7 = zext i32 %frob7 to i64 + %ext8 = zext i32 %frob8 to i64 + %ext9 = zext i32 %frob9 to i64 + + %add0 = add i64 %ret, %ext0 + %add1 = add i64 %add0, %ext1 + %add2 = add i64 %add1, %ext2 + %add3 = add i64 %add2, %ext3 + %add4 = add i64 %add3, %ext4 + %add5 = add i64 %add4, %ext5 + %add6 = add i64 %add5, %ext6 + %add7 = add i64 %add6, %ext7 + %add8 = add i64 %add7, %ext8 + %add9 = add i64 %add8, %ext9 + + ret i64 %add9 +} diff --git a/test/CodeGen/SystemZ/int-add-05.ll b/test/CodeGen/SystemZ/int-add-05.ll index ae32cc4ad01..ee840ac6478 100644 --- a/test/CodeGen/SystemZ/int-add-05.ll +++ b/test/CodeGen/SystemZ/int-add-05.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check AGR. define i64 @f1(i64 %a, i64 %b) { ; CHECK: f1: @@ -92,3 +94,46 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %add = add i64 %a, %b ret i64 %add } + +; Check that additions of spilled values can use AG rather than AGR. +define i64 @f9(i64 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: ag %r2, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + + %ret = call i64 @foo() + + %add0 = add i64 %ret, %val0 + %add1 = add i64 %add0, %val1 + %add2 = add i64 %add1, %val2 + %add3 = add i64 %add2, %val3 + %add4 = add i64 %add3, %val4 + %add5 = add i64 %add4, %val5 + %add6 = add i64 %add5, %val6 + %add7 = add i64 %add6, %val7 + %add8 = add i64 %add7, %val8 + %add9 = add i64 %add8, %val9 + + ret i64 %add9 +} diff --git a/test/CodeGen/SystemZ/int-add-08.ll b/test/CodeGen/SystemZ/int-add-08.ll index b1f820fe3d8..0b8c01eac96 100644 --- a/test/CodeGen/SystemZ/int-add-08.ll +++ b/test/CodeGen/SystemZ/int-add-08.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i128 *@foo() + ; Test register addition. define void @f1(i128 *%ptr) { ; CHECK: f1: @@ -108,3 +110,34 @@ define void @f7(i128 *%aptr, i64 %base) { ret void } +; Check that additions of spilled values can use ALG and ALCG rather than +; ALGR and ALCGR. +define void @f8(i128 *%ptr0) { +; CHECK: f8: +; CHECK: brasl %r14, foo@PLT +; CHECK: alg {{%r[0-9]+}}, {{[0-9]+}}(%r15) +; CHECK: alcg {{%r[0-9]+}}, {{[0-9]+}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i128 *%ptr0, i128 2 + %ptr2 = getelementptr i128 *%ptr0, i128 4 + %ptr3 = getelementptr i128 *%ptr0, i128 6 + %ptr4 = getelementptr i128 *%ptr0, i128 8 + + %val0 = load i128 *%ptr0 + %val1 = load i128 *%ptr1 + %val2 = load i128 *%ptr2 + %val3 = load i128 *%ptr3 + %val4 = load i128 *%ptr4 + + %retptr = call i128 *@foo() + + %ret = load i128 *%retptr + %add0 = add i128 %ret, %val0 + %add1 = add i128 %add0, %val1 + %add2 = add i128 %add1, %val2 + %add3 = add i128 %add2, %val3 + %add4 = add i128 %add3, %val4 + store i128 %add4, i128 *%retptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-cmp-05.ll b/test/CodeGen/SystemZ/int-cmp-05.ll index 38cd1a5e669..d953ebfc679 100644 --- a/test/CodeGen/SystemZ/int-cmp-05.ll +++ b/test/CodeGen/SystemZ/int-cmp-05.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check signed register comparison. define double @f1(double %a, double %b, i64 %i1, i32 %unext) { ; CHECK: f1: @@ -201,3 +203,90 @@ define double @f14(double %a, double %b, i64 %i1, i64 %base, i64 %index) { %res = select i1 %cond, double %a, double %b ret double %res } + +; Check that comparisons of spilled values can use CGF rather than CGFR. +define i64 @f15(i32 *%ptr0) { +; CHECK: f15: +; CHECK: brasl %r14, foo@PLT +; CHECK: cgf {{%r[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %frob0 = add i32 %val0, 100 + %frob1 = add i32 %val1, 100 + %frob2 = add i32 %val2, 100 + %frob3 = add i32 %val3, 100 + %frob4 = add i32 %val4, 100 + %frob5 = add i32 %val5, 100 + %frob6 = add i32 %val6, 100 + %frob7 = add i32 %val7, 100 + %frob8 = add i32 %val8, 100 + %frob9 = add i32 %val9, 100 + + store i32 %frob0, i32 *%ptr0 + store i32 %frob1, i32 *%ptr1 + store i32 %frob2, i32 *%ptr2 + store i32 %frob3, i32 *%ptr3 + store i32 %frob4, i32 *%ptr4 + store i32 %frob5, i32 *%ptr5 + store i32 %frob6, i32 *%ptr6 + store i32 %frob7, i32 *%ptr7 + store i32 %frob8, i32 *%ptr8 + store i32 %frob9, i32 *%ptr9 + + %ret = call i64 @foo() + + %ext0 = sext i32 %frob0 to i64 + %ext1 = sext i32 %frob1 to i64 + %ext2 = sext i32 %frob2 to i64 + %ext3 = sext i32 %frob3 to i64 + %ext4 = sext i32 %frob4 to i64 + %ext5 = sext i32 %frob5 to i64 + %ext6 = sext i32 %frob6 to i64 + %ext7 = sext i32 %frob7 to i64 + %ext8 = sext i32 %frob8 to i64 + %ext9 = sext i32 %frob9 to i64 + + %cmp0 = icmp slt i64 %ret, %ext0 + %cmp1 = icmp slt i64 %ret, %ext1 + %cmp2 = icmp slt i64 %ret, %ext2 + %cmp3 = icmp slt i64 %ret, %ext3 + %cmp4 = icmp slt i64 %ret, %ext4 + %cmp5 = icmp slt i64 %ret, %ext5 + %cmp6 = icmp slt i64 %ret, %ext6 + %cmp7 = icmp slt i64 %ret, %ext7 + %cmp8 = icmp slt i64 %ret, %ext8 + %cmp9 = icmp slt i64 %ret, %ext9 + + %sel0 = select i1 %cmp0, i64 %ret, i64 0 + %sel1 = select i1 %cmp1, i64 %sel0, i64 1 + %sel2 = select i1 %cmp2, i64 %sel1, i64 2 + %sel3 = select i1 %cmp3, i64 %sel2, i64 3 + %sel4 = select i1 %cmp4, i64 %sel3, i64 4 + %sel5 = select i1 %cmp5, i64 %sel4, i64 5 + %sel6 = select i1 %cmp6, i64 %sel5, i64 6 + %sel7 = select i1 %cmp7, i64 %sel6, i64 7 + %sel8 = select i1 %cmp8, i64 %sel7, i64 8 + %sel9 = select i1 %cmp9, i64 %sel8, i64 9 + + ret i64 %sel9 +} diff --git a/test/CodeGen/SystemZ/int-cmp-06.ll b/test/CodeGen/SystemZ/int-cmp-06.ll index efb6ad863c8..f8666316d06 100644 --- a/test/CodeGen/SystemZ/int-cmp-06.ll +++ b/test/CodeGen/SystemZ/int-cmp-06.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check unsigned register comparison. define double @f1(double %a, double %b, i64 %i1, i32 %unext) { ; CHECK: f1: @@ -251,3 +253,90 @@ define double @f18(double %a, double %b, i64 %i1, i64 %base, i64 %index) { %res = select i1 %cond, double %a, double %b ret double %res } + +; Check that comparisons of spilled values can use CLGF rather than CLGFR. +define i64 @f19(i32 *%ptr0) { +; CHECK: f19: +; CHECK: brasl %r14, foo@PLT +; CHECK: clgf {{%r[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %frob0 = add i32 %val0, 100 + %frob1 = add i32 %val1, 100 + %frob2 = add i32 %val2, 100 + %frob3 = add i32 %val3, 100 + %frob4 = add i32 %val4, 100 + %frob5 = add i32 %val5, 100 + %frob6 = add i32 %val6, 100 + %frob7 = add i32 %val7, 100 + %frob8 = add i32 %val8, 100 + %frob9 = add i32 %val9, 100 + + store i32 %frob0, i32 *%ptr0 + store i32 %frob1, i32 *%ptr1 + store i32 %frob2, i32 *%ptr2 + store i32 %frob3, i32 *%ptr3 + store i32 %frob4, i32 *%ptr4 + store i32 %frob5, i32 *%ptr5 + store i32 %frob6, i32 *%ptr6 + store i32 %frob7, i32 *%ptr7 + store i32 %frob8, i32 *%ptr8 + store i32 %frob9, i32 *%ptr9 + + %ret = call i64 @foo() + + %ext0 = zext i32 %frob0 to i64 + %ext1 = zext i32 %frob1 to i64 + %ext2 = zext i32 %frob2 to i64 + %ext3 = zext i32 %frob3 to i64 + %ext4 = zext i32 %frob4 to i64 + %ext5 = zext i32 %frob5 to i64 + %ext6 = zext i32 %frob6 to i64 + %ext7 = zext i32 %frob7 to i64 + %ext8 = zext i32 %frob8 to i64 + %ext9 = zext i32 %frob9 to i64 + + %cmp0 = icmp ult i64 %ret, %ext0 + %cmp1 = icmp ult i64 %ret, %ext1 + %cmp2 = icmp ult i64 %ret, %ext2 + %cmp3 = icmp ult i64 %ret, %ext3 + %cmp4 = icmp ult i64 %ret, %ext4 + %cmp5 = icmp ult i64 %ret, %ext5 + %cmp6 = icmp ult i64 %ret, %ext6 + %cmp7 = icmp ult i64 %ret, %ext7 + %cmp8 = icmp ult i64 %ret, %ext8 + %cmp9 = icmp ult i64 %ret, %ext9 + + %sel0 = select i1 %cmp0, i64 %ret, i64 0 + %sel1 = select i1 %cmp1, i64 %sel0, i64 1 + %sel2 = select i1 %cmp2, i64 %sel1, i64 2 + %sel3 = select i1 %cmp3, i64 %sel2, i64 3 + %sel4 = select i1 %cmp4, i64 %sel3, i64 4 + %sel5 = select i1 %cmp5, i64 %sel4, i64 5 + %sel6 = select i1 %cmp6, i64 %sel5, i64 6 + %sel7 = select i1 %cmp7, i64 %sel6, i64 7 + %sel8 = select i1 %cmp8, i64 %sel7, i64 8 + %sel9 = select i1 %cmp9, i64 %sel8, i64 9 + + ret i64 %sel9 +} diff --git a/test/CodeGen/SystemZ/int-conv-01.ll b/test/CodeGen/SystemZ/int-conv-01.ll index 9724ee38d83..335cf7587df 100644 --- a/test/CodeGen/SystemZ/int-conv-01.ll +++ b/test/CodeGen/SystemZ/int-conv-01.ll @@ -103,3 +103,97 @@ define i32 @f9(i64 %src, i64 %index) { %ext = sext i8 %byte to i32 ret i32 %ext } + +; Test a case where we spill the source of at least one LBR. We want +; to use LB if possible. +define void @f10(i32 *%ptr) { +; CHECK: f10: +; CHECK: lb {{%r[0-9]+}}, 16{{[37]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile i32 *%ptr + %val1 = load volatile i32 *%ptr + %val2 = load volatile i32 *%ptr + %val3 = load volatile i32 *%ptr + %val4 = load volatile i32 *%ptr + %val5 = load volatile i32 *%ptr + %val6 = load volatile i32 *%ptr + %val7 = load volatile i32 *%ptr + %val8 = load volatile i32 *%ptr + %val9 = load volatile i32 *%ptr + %val10 = load volatile i32 *%ptr + %val11 = load volatile i32 *%ptr + %val12 = load volatile i32 *%ptr + %val13 = load volatile i32 *%ptr + %val14 = load volatile i32 *%ptr + %val15 = load volatile i32 *%ptr + + %trunc0 = trunc i32 %val0 to i8 + %trunc1 = trunc i32 %val1 to i8 + %trunc2 = trunc i32 %val2 to i8 + %trunc3 = trunc i32 %val3 to i8 + %trunc4 = trunc i32 %val4 to i8 + %trunc5 = trunc i32 %val5 to i8 + %trunc6 = trunc i32 %val6 to i8 + %trunc7 = trunc i32 %val7 to i8 + %trunc8 = trunc i32 %val8 to i8 + %trunc9 = trunc i32 %val9 to i8 + %trunc10 = trunc i32 %val10 to i8 + %trunc11 = trunc i32 %val11 to i8 + %trunc12 = trunc i32 %val12 to i8 + %trunc13 = trunc i32 %val13 to i8 + %trunc14 = trunc i32 %val14 to i8 + %trunc15 = trunc i32 %val15 to i8 + + %ext0 = sext i8 %trunc0 to i32 + %ext1 = sext i8 %trunc1 to i32 + %ext2 = sext i8 %trunc2 to i32 + %ext3 = sext i8 %trunc3 to i32 + %ext4 = sext i8 %trunc4 to i32 + %ext5 = sext i8 %trunc5 to i32 + %ext6 = sext i8 %trunc6 to i32 + %ext7 = sext i8 %trunc7 to i32 + %ext8 = sext i8 %trunc8 to i32 + %ext9 = sext i8 %trunc9 to i32 + %ext10 = sext i8 %trunc10 to i32 + %ext11 = sext i8 %trunc11 to i32 + %ext12 = sext i8 %trunc12 to i32 + %ext13 = sext i8 %trunc13 to i32 + %ext14 = sext i8 %trunc14 to i32 + %ext15 = sext i8 %trunc15 to i32 + + store volatile i32 %val0, i32 *%ptr + store volatile i32 %val1, i32 *%ptr + store volatile i32 %val2, i32 *%ptr + store volatile i32 %val3, i32 *%ptr + store volatile i32 %val4, i32 *%ptr + store volatile i32 %val5, i32 *%ptr + store volatile i32 %val6, i32 *%ptr + store volatile i32 %val7, i32 *%ptr + store volatile i32 %val8, i32 *%ptr + store volatile i32 %val9, i32 *%ptr + store volatile i32 %val10, i32 *%ptr + store volatile i32 %val11, i32 *%ptr + store volatile i32 %val12, i32 *%ptr + store volatile i32 %val13, i32 *%ptr + store volatile i32 %val14, i32 *%ptr + store volatile i32 %val15, i32 *%ptr + + store volatile i32 %ext0, i32 *%ptr + store volatile i32 %ext1, i32 *%ptr + store volatile i32 %ext2, i32 *%ptr + store volatile i32 %ext3, i32 *%ptr + store volatile i32 %ext4, i32 *%ptr + store volatile i32 %ext5, i32 *%ptr + store volatile i32 %ext6, i32 *%ptr + store volatile i32 %ext7, i32 *%ptr + store volatile i32 %ext8, i32 *%ptr + store volatile i32 %ext9, i32 *%ptr + store volatile i32 %ext10, i32 *%ptr + store volatile i32 %ext11, i32 *%ptr + store volatile i32 %ext12, i32 *%ptr + store volatile i32 %ext13, i32 *%ptr + store volatile i32 %ext14, i32 *%ptr + store volatile i32 %ext15, i32 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-02.ll b/test/CodeGen/SystemZ/int-conv-02.ll index 27a049fce39..05d1cd9e2a1 100644 --- a/test/CodeGen/SystemZ/int-conv-02.ll +++ b/test/CodeGen/SystemZ/int-conv-02.ll @@ -112,3 +112,97 @@ define i32 @f10(i64 %src, i64 %index) { %ext = zext i8 %byte to i32 ret i32 %ext } + +; Test a case where we spill the source of at least one LLCR. We want +; to use LLC if possible. +define void @f11(i32 *%ptr) { +; CHECK: f11: +; CHECK: llc {{%r[0-9]+}}, 16{{[37]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile i32 *%ptr + %val1 = load volatile i32 *%ptr + %val2 = load volatile i32 *%ptr + %val3 = load volatile i32 *%ptr + %val4 = load volatile i32 *%ptr + %val5 = load volatile i32 *%ptr + %val6 = load volatile i32 *%ptr + %val7 = load volatile i32 *%ptr + %val8 = load volatile i32 *%ptr + %val9 = load volatile i32 *%ptr + %val10 = load volatile i32 *%ptr + %val11 = load volatile i32 *%ptr + %val12 = load volatile i32 *%ptr + %val13 = load volatile i32 *%ptr + %val14 = load volatile i32 *%ptr + %val15 = load volatile i32 *%ptr + + %trunc0 = trunc i32 %val0 to i8 + %trunc1 = trunc i32 %val1 to i8 + %trunc2 = trunc i32 %val2 to i8 + %trunc3 = trunc i32 %val3 to i8 + %trunc4 = trunc i32 %val4 to i8 + %trunc5 = trunc i32 %val5 to i8 + %trunc6 = trunc i32 %val6 to i8 + %trunc7 = trunc i32 %val7 to i8 + %trunc8 = trunc i32 %val8 to i8 + %trunc9 = trunc i32 %val9 to i8 + %trunc10 = trunc i32 %val10 to i8 + %trunc11 = trunc i32 %val11 to i8 + %trunc12 = trunc i32 %val12 to i8 + %trunc13 = trunc i32 %val13 to i8 + %trunc14 = trunc i32 %val14 to i8 + %trunc15 = trunc i32 %val15 to i8 + + %ext0 = zext i8 %trunc0 to i32 + %ext1 = zext i8 %trunc1 to i32 + %ext2 = zext i8 %trunc2 to i32 + %ext3 = zext i8 %trunc3 to i32 + %ext4 = zext i8 %trunc4 to i32 + %ext5 = zext i8 %trunc5 to i32 + %ext6 = zext i8 %trunc6 to i32 + %ext7 = zext i8 %trunc7 to i32 + %ext8 = zext i8 %trunc8 to i32 + %ext9 = zext i8 %trunc9 to i32 + %ext10 = zext i8 %trunc10 to i32 + %ext11 = zext i8 %trunc11 to i32 + %ext12 = zext i8 %trunc12 to i32 + %ext13 = zext i8 %trunc13 to i32 + %ext14 = zext i8 %trunc14 to i32 + %ext15 = zext i8 %trunc15 to i32 + + store volatile i32 %val0, i32 *%ptr + store volatile i32 %val1, i32 *%ptr + store volatile i32 %val2, i32 *%ptr + store volatile i32 %val3, i32 *%ptr + store volatile i32 %val4, i32 *%ptr + store volatile i32 %val5, i32 *%ptr + store volatile i32 %val6, i32 *%ptr + store volatile i32 %val7, i32 *%ptr + store volatile i32 %val8, i32 *%ptr + store volatile i32 %val9, i32 *%ptr + store volatile i32 %val10, i32 *%ptr + store volatile i32 %val11, i32 *%ptr + store volatile i32 %val12, i32 *%ptr + store volatile i32 %val13, i32 *%ptr + store volatile i32 %val14, i32 *%ptr + store volatile i32 %val15, i32 *%ptr + + store volatile i32 %ext0, i32 *%ptr + store volatile i32 %ext1, i32 *%ptr + store volatile i32 %ext2, i32 *%ptr + store volatile i32 %ext3, i32 *%ptr + store volatile i32 %ext4, i32 *%ptr + store volatile i32 %ext5, i32 *%ptr + store volatile i32 %ext6, i32 *%ptr + store volatile i32 %ext7, i32 *%ptr + store volatile i32 %ext8, i32 *%ptr + store volatile i32 %ext9, i32 *%ptr + store volatile i32 %ext10, i32 *%ptr + store volatile i32 %ext11, i32 *%ptr + store volatile i32 %ext12, i32 *%ptr + store volatile i32 %ext13, i32 *%ptr + store volatile i32 %ext14, i32 *%ptr + store volatile i32 %ext15, i32 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-03.ll b/test/CodeGen/SystemZ/int-conv-03.ll index fae568d7bcc..e3a2cdd7fa7 100644 --- a/test/CodeGen/SystemZ/int-conv-03.ll +++ b/test/CodeGen/SystemZ/int-conv-03.ll @@ -103,3 +103,97 @@ define i64 @f9(i64 %src, i64 %index) { %ext = sext i8 %byte to i64 ret i64 %ext } + +; Test a case where we spill the source of at least one LGBR. We want +; to use LGB if possible. +define void @f10(i64 *%ptr) { +; CHECK: f10: +; CHECK: lgb {{%r[0-9]+}}, 167(%r15) +; CHECK: br %r14 + %val0 = load volatile i64 *%ptr + %val1 = load volatile i64 *%ptr + %val2 = load volatile i64 *%ptr + %val3 = load volatile i64 *%ptr + %val4 = load volatile i64 *%ptr + %val5 = load volatile i64 *%ptr + %val6 = load volatile i64 *%ptr + %val7 = load volatile i64 *%ptr + %val8 = load volatile i64 *%ptr + %val9 = load volatile i64 *%ptr + %val10 = load volatile i64 *%ptr + %val11 = load volatile i64 *%ptr + %val12 = load volatile i64 *%ptr + %val13 = load volatile i64 *%ptr + %val14 = load volatile i64 *%ptr + %val15 = load volatile i64 *%ptr + + %trunc0 = trunc i64 %val0 to i8 + %trunc1 = trunc i64 %val1 to i8 + %trunc2 = trunc i64 %val2 to i8 + %trunc3 = trunc i64 %val3 to i8 + %trunc4 = trunc i64 %val4 to i8 + %trunc5 = trunc i64 %val5 to i8 + %trunc6 = trunc i64 %val6 to i8 + %trunc7 = trunc i64 %val7 to i8 + %trunc8 = trunc i64 %val8 to i8 + %trunc9 = trunc i64 %val9 to i8 + %trunc10 = trunc i64 %val10 to i8 + %trunc11 = trunc i64 %val11 to i8 + %trunc12 = trunc i64 %val12 to i8 + %trunc13 = trunc i64 %val13 to i8 + %trunc14 = trunc i64 %val14 to i8 + %trunc15 = trunc i64 %val15 to i8 + + %ext0 = sext i8 %trunc0 to i64 + %ext1 = sext i8 %trunc1 to i64 + %ext2 = sext i8 %trunc2 to i64 + %ext3 = sext i8 %trunc3 to i64 + %ext4 = sext i8 %trunc4 to i64 + %ext5 = sext i8 %trunc5 to i64 + %ext6 = sext i8 %trunc6 to i64 + %ext7 = sext i8 %trunc7 to i64 + %ext8 = sext i8 %trunc8 to i64 + %ext9 = sext i8 %trunc9 to i64 + %ext10 = sext i8 %trunc10 to i64 + %ext11 = sext i8 %trunc11 to i64 + %ext12 = sext i8 %trunc12 to i64 + %ext13 = sext i8 %trunc13 to i64 + %ext14 = sext i8 %trunc14 to i64 + %ext15 = sext i8 %trunc15 to i64 + + store volatile i64 %val0, i64 *%ptr + store volatile i64 %val1, i64 *%ptr + store volatile i64 %val2, i64 *%ptr + store volatile i64 %val3, i64 *%ptr + store volatile i64 %val4, i64 *%ptr + store volatile i64 %val5, i64 *%ptr + store volatile i64 %val6, i64 *%ptr + store volatile i64 %val7, i64 *%ptr + store volatile i64 %val8, i64 *%ptr + store volatile i64 %val9, i64 *%ptr + store volatile i64 %val10, i64 *%ptr + store volatile i64 %val11, i64 *%ptr + store volatile i64 %val12, i64 *%ptr + store volatile i64 %val13, i64 *%ptr + store volatile i64 %val14, i64 *%ptr + store volatile i64 %val15, i64 *%ptr + + store volatile i64 %ext0, i64 *%ptr + store volatile i64 %ext1, i64 *%ptr + store volatile i64 %ext2, i64 *%ptr + store volatile i64 %ext3, i64 *%ptr + store volatile i64 %ext4, i64 *%ptr + store volatile i64 %ext5, i64 *%ptr + store volatile i64 %ext6, i64 *%ptr + store volatile i64 %ext7, i64 *%ptr + store volatile i64 %ext8, i64 *%ptr + store volatile i64 %ext9, i64 *%ptr + store volatile i64 %ext10, i64 *%ptr + store volatile i64 %ext11, i64 *%ptr + store volatile i64 %ext12, i64 *%ptr + store volatile i64 %ext13, i64 *%ptr + store volatile i64 %ext14, i64 *%ptr + store volatile i64 %ext15, i64 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-04.ll b/test/CodeGen/SystemZ/int-conv-04.ll index 9ff3f58492c..c3d445a5756 100644 --- a/test/CodeGen/SystemZ/int-conv-04.ll +++ b/test/CodeGen/SystemZ/int-conv-04.ll @@ -112,3 +112,97 @@ define i64 @f10(i64 %src, i64 %index) { %ext = zext i8 %byte to i64 ret i64 %ext } + +; Test a case where we spill the source of at least one LLGCR. We want +; to use LLGC if possible. +define void @f11(i64 *%ptr) { +; CHECK: f11: +; CHECK: llgc {{%r[0-9]+}}, 167(%r15) +; CHECK: br %r14 + %val0 = load volatile i64 *%ptr + %val1 = load volatile i64 *%ptr + %val2 = load volatile i64 *%ptr + %val3 = load volatile i64 *%ptr + %val4 = load volatile i64 *%ptr + %val5 = load volatile i64 *%ptr + %val6 = load volatile i64 *%ptr + %val7 = load volatile i64 *%ptr + %val8 = load volatile i64 *%ptr + %val9 = load volatile i64 *%ptr + %val10 = load volatile i64 *%ptr + %val11 = load volatile i64 *%ptr + %val12 = load volatile i64 *%ptr + %val13 = load volatile i64 *%ptr + %val14 = load volatile i64 *%ptr + %val15 = load volatile i64 *%ptr + + %trunc0 = trunc i64 %val0 to i8 + %trunc1 = trunc i64 %val1 to i8 + %trunc2 = trunc i64 %val2 to i8 + %trunc3 = trunc i64 %val3 to i8 + %trunc4 = trunc i64 %val4 to i8 + %trunc5 = trunc i64 %val5 to i8 + %trunc6 = trunc i64 %val6 to i8 + %trunc7 = trunc i64 %val7 to i8 + %trunc8 = trunc i64 %val8 to i8 + %trunc9 = trunc i64 %val9 to i8 + %trunc10 = trunc i64 %val10 to i8 + %trunc11 = trunc i64 %val11 to i8 + %trunc12 = trunc i64 %val12 to i8 + %trunc13 = trunc i64 %val13 to i8 + %trunc14 = trunc i64 %val14 to i8 + %trunc15 = trunc i64 %val15 to i8 + + %ext0 = zext i8 %trunc0 to i64 + %ext1 = zext i8 %trunc1 to i64 + %ext2 = zext i8 %trunc2 to i64 + %ext3 = zext i8 %trunc3 to i64 + %ext4 = zext i8 %trunc4 to i64 + %ext5 = zext i8 %trunc5 to i64 + %ext6 = zext i8 %trunc6 to i64 + %ext7 = zext i8 %trunc7 to i64 + %ext8 = zext i8 %trunc8 to i64 + %ext9 = zext i8 %trunc9 to i64 + %ext10 = zext i8 %trunc10 to i64 + %ext11 = zext i8 %trunc11 to i64 + %ext12 = zext i8 %trunc12 to i64 + %ext13 = zext i8 %trunc13 to i64 + %ext14 = zext i8 %trunc14 to i64 + %ext15 = zext i8 %trunc15 to i64 + + store volatile i64 %val0, i64 *%ptr + store volatile i64 %val1, i64 *%ptr + store volatile i64 %val2, i64 *%ptr + store volatile i64 %val3, i64 *%ptr + store volatile i64 %val4, i64 *%ptr + store volatile i64 %val5, i64 *%ptr + store volatile i64 %val6, i64 *%ptr + store volatile i64 %val7, i64 *%ptr + store volatile i64 %val8, i64 *%ptr + store volatile i64 %val9, i64 *%ptr + store volatile i64 %val10, i64 *%ptr + store volatile i64 %val11, i64 *%ptr + store volatile i64 %val12, i64 *%ptr + store volatile i64 %val13, i64 *%ptr + store volatile i64 %val14, i64 *%ptr + store volatile i64 %val15, i64 *%ptr + + store volatile i64 %ext0, i64 *%ptr + store volatile i64 %ext1, i64 *%ptr + store volatile i64 %ext2, i64 *%ptr + store volatile i64 %ext3, i64 *%ptr + store volatile i64 %ext4, i64 *%ptr + store volatile i64 %ext5, i64 *%ptr + store volatile i64 %ext6, i64 *%ptr + store volatile i64 %ext7, i64 *%ptr + store volatile i64 %ext8, i64 *%ptr + store volatile i64 %ext9, i64 *%ptr + store volatile i64 %ext10, i64 *%ptr + store volatile i64 %ext11, i64 *%ptr + store volatile i64 %ext12, i64 *%ptr + store volatile i64 %ext13, i64 *%ptr + store volatile i64 %ext14, i64 *%ptr + store volatile i64 %ext15, i64 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-05.ll b/test/CodeGen/SystemZ/int-conv-05.ll index 99b7a09af43..b5f23af6f9e 100644 --- a/test/CodeGen/SystemZ/int-conv-05.ll +++ b/test/CodeGen/SystemZ/int-conv-05.ll @@ -138,3 +138,97 @@ define i32 @f12(i64 %src, i64 %index) { %ext = sext i16 %half to i32 ret i32 %ext } + +; Test a case where we spill the source of at least one LHR. We want +; to use LH if possible. +define void @f13(i32 *%ptr) { +; CHECK: f13: +; CHECK: lh {{%r[0-9]+}}, 16{{[26]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile i32 *%ptr + %val1 = load volatile i32 *%ptr + %val2 = load volatile i32 *%ptr + %val3 = load volatile i32 *%ptr + %val4 = load volatile i32 *%ptr + %val5 = load volatile i32 *%ptr + %val6 = load volatile i32 *%ptr + %val7 = load volatile i32 *%ptr + %val8 = load volatile i32 *%ptr + %val9 = load volatile i32 *%ptr + %val10 = load volatile i32 *%ptr + %val11 = load volatile i32 *%ptr + %val12 = load volatile i32 *%ptr + %val13 = load volatile i32 *%ptr + %val14 = load volatile i32 *%ptr + %val15 = load volatile i32 *%ptr + + %trunc0 = trunc i32 %val0 to i16 + %trunc1 = trunc i32 %val1 to i16 + %trunc2 = trunc i32 %val2 to i16 + %trunc3 = trunc i32 %val3 to i16 + %trunc4 = trunc i32 %val4 to i16 + %trunc5 = trunc i32 %val5 to i16 + %trunc6 = trunc i32 %val6 to i16 + %trunc7 = trunc i32 %val7 to i16 + %trunc8 = trunc i32 %val8 to i16 + %trunc9 = trunc i32 %val9 to i16 + %trunc10 = trunc i32 %val10 to i16 + %trunc11 = trunc i32 %val11 to i16 + %trunc12 = trunc i32 %val12 to i16 + %trunc13 = trunc i32 %val13 to i16 + %trunc14 = trunc i32 %val14 to i16 + %trunc15 = trunc i32 %val15 to i16 + + %ext0 = sext i16 %trunc0 to i32 + %ext1 = sext i16 %trunc1 to i32 + %ext2 = sext i16 %trunc2 to i32 + %ext3 = sext i16 %trunc3 to i32 + %ext4 = sext i16 %trunc4 to i32 + %ext5 = sext i16 %trunc5 to i32 + %ext6 = sext i16 %trunc6 to i32 + %ext7 = sext i16 %trunc7 to i32 + %ext8 = sext i16 %trunc8 to i32 + %ext9 = sext i16 %trunc9 to i32 + %ext10 = sext i16 %trunc10 to i32 + %ext11 = sext i16 %trunc11 to i32 + %ext12 = sext i16 %trunc12 to i32 + %ext13 = sext i16 %trunc13 to i32 + %ext14 = sext i16 %trunc14 to i32 + %ext15 = sext i16 %trunc15 to i32 + + store volatile i32 %val0, i32 *%ptr + store volatile i32 %val1, i32 *%ptr + store volatile i32 %val2, i32 *%ptr + store volatile i32 %val3, i32 *%ptr + store volatile i32 %val4, i32 *%ptr + store volatile i32 %val5, i32 *%ptr + store volatile i32 %val6, i32 *%ptr + store volatile i32 %val7, i32 *%ptr + store volatile i32 %val8, i32 *%ptr + store volatile i32 %val9, i32 *%ptr + store volatile i32 %val10, i32 *%ptr + store volatile i32 %val11, i32 *%ptr + store volatile i32 %val12, i32 *%ptr + store volatile i32 %val13, i32 *%ptr + store volatile i32 %val14, i32 *%ptr + store volatile i32 %val15, i32 *%ptr + + store volatile i32 %ext0, i32 *%ptr + store volatile i32 %ext1, i32 *%ptr + store volatile i32 %ext2, i32 *%ptr + store volatile i32 %ext3, i32 *%ptr + store volatile i32 %ext4, i32 *%ptr + store volatile i32 %ext5, i32 *%ptr + store volatile i32 %ext6, i32 *%ptr + store volatile i32 %ext7, i32 *%ptr + store volatile i32 %ext8, i32 *%ptr + store volatile i32 %ext9, i32 *%ptr + store volatile i32 %ext10, i32 *%ptr + store volatile i32 %ext11, i32 *%ptr + store volatile i32 %ext12, i32 *%ptr + store volatile i32 %ext13, i32 *%ptr + store volatile i32 %ext14, i32 *%ptr + store volatile i32 %ext15, i32 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-06.ll b/test/CodeGen/SystemZ/int-conv-06.ll index dc9dcc7eb0e..90a142b7739 100644 --- a/test/CodeGen/SystemZ/int-conv-06.ll +++ b/test/CodeGen/SystemZ/int-conv-06.ll @@ -112,3 +112,97 @@ define i32 @f10(i64 %src, i64 %index) { %ext = zext i16 %half to i32 ret i32 %ext } + +; Test a case where we spill the source of at least one LLHR. We want +; to use LLH if possible. +define void @f11(i32 *%ptr) { +; CHECK: f11: +; CHECK: llh {{%r[0-9]+}}, 16{{[26]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile i32 *%ptr + %val1 = load volatile i32 *%ptr + %val2 = load volatile i32 *%ptr + %val3 = load volatile i32 *%ptr + %val4 = load volatile i32 *%ptr + %val5 = load volatile i32 *%ptr + %val6 = load volatile i32 *%ptr + %val7 = load volatile i32 *%ptr + %val8 = load volatile i32 *%ptr + %val9 = load volatile i32 *%ptr + %val10 = load volatile i32 *%ptr + %val11 = load volatile i32 *%ptr + %val12 = load volatile i32 *%ptr + %val13 = load volatile i32 *%ptr + %val14 = load volatile i32 *%ptr + %val15 = load volatile i32 *%ptr + + %trunc0 = trunc i32 %val0 to i16 + %trunc1 = trunc i32 %val1 to i16 + %trunc2 = trunc i32 %val2 to i16 + %trunc3 = trunc i32 %val3 to i16 + %trunc4 = trunc i32 %val4 to i16 + %trunc5 = trunc i32 %val5 to i16 + %trunc6 = trunc i32 %val6 to i16 + %trunc7 = trunc i32 %val7 to i16 + %trunc8 = trunc i32 %val8 to i16 + %trunc9 = trunc i32 %val9 to i16 + %trunc10 = trunc i32 %val10 to i16 + %trunc11 = trunc i32 %val11 to i16 + %trunc12 = trunc i32 %val12 to i16 + %trunc13 = trunc i32 %val13 to i16 + %trunc14 = trunc i32 %val14 to i16 + %trunc15 = trunc i32 %val15 to i16 + + %ext0 = zext i16 %trunc0 to i32 + %ext1 = zext i16 %trunc1 to i32 + %ext2 = zext i16 %trunc2 to i32 + %ext3 = zext i16 %trunc3 to i32 + %ext4 = zext i16 %trunc4 to i32 + %ext5 = zext i16 %trunc5 to i32 + %ext6 = zext i16 %trunc6 to i32 + %ext7 = zext i16 %trunc7 to i32 + %ext8 = zext i16 %trunc8 to i32 + %ext9 = zext i16 %trunc9 to i32 + %ext10 = zext i16 %trunc10 to i32 + %ext11 = zext i16 %trunc11 to i32 + %ext12 = zext i16 %trunc12 to i32 + %ext13 = zext i16 %trunc13 to i32 + %ext14 = zext i16 %trunc14 to i32 + %ext15 = zext i16 %trunc15 to i32 + + store volatile i32 %val0, i32 *%ptr + store volatile i32 %val1, i32 *%ptr + store volatile i32 %val2, i32 *%ptr + store volatile i32 %val3, i32 *%ptr + store volatile i32 %val4, i32 *%ptr + store volatile i32 %val5, i32 *%ptr + store volatile i32 %val6, i32 *%ptr + store volatile i32 %val7, i32 *%ptr + store volatile i32 %val8, i32 *%ptr + store volatile i32 %val9, i32 *%ptr + store volatile i32 %val10, i32 *%ptr + store volatile i32 %val11, i32 *%ptr + store volatile i32 %val12, i32 *%ptr + store volatile i32 %val13, i32 *%ptr + store volatile i32 %val14, i32 *%ptr + store volatile i32 %val15, i32 *%ptr + + store volatile i32 %ext0, i32 *%ptr + store volatile i32 %ext1, i32 *%ptr + store volatile i32 %ext2, i32 *%ptr + store volatile i32 %ext3, i32 *%ptr + store volatile i32 %ext4, i32 *%ptr + store volatile i32 %ext5, i32 *%ptr + store volatile i32 %ext6, i32 *%ptr + store volatile i32 %ext7, i32 *%ptr + store volatile i32 %ext8, i32 *%ptr + store volatile i32 %ext9, i32 *%ptr + store volatile i32 %ext10, i32 *%ptr + store volatile i32 %ext11, i32 *%ptr + store volatile i32 %ext12, i32 *%ptr + store volatile i32 %ext13, i32 *%ptr + store volatile i32 %ext14, i32 *%ptr + store volatile i32 %ext15, i32 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-07.ll b/test/CodeGen/SystemZ/int-conv-07.ll index bb85fb9f2a2..9b9df46be9c 100644 --- a/test/CodeGen/SystemZ/int-conv-07.ll +++ b/test/CodeGen/SystemZ/int-conv-07.ll @@ -103,3 +103,97 @@ define i64 @f9(i64 %src, i64 %index) { %ext = sext i16 %half to i64 ret i64 %ext } + +; Test a case where we spill the source of at least one LGHR. We want +; to use LGH if possible. +define void @f10(i64 *%ptr) { +; CHECK: f10: +; CHECK: lgh {{%r[0-9]+}}, 166(%r15) +; CHECK: br %r14 + %val0 = load volatile i64 *%ptr + %val1 = load volatile i64 *%ptr + %val2 = load volatile i64 *%ptr + %val3 = load volatile i64 *%ptr + %val4 = load volatile i64 *%ptr + %val5 = load volatile i64 *%ptr + %val6 = load volatile i64 *%ptr + %val7 = load volatile i64 *%ptr + %val8 = load volatile i64 *%ptr + %val9 = load volatile i64 *%ptr + %val10 = load volatile i64 *%ptr + %val11 = load volatile i64 *%ptr + %val12 = load volatile i64 *%ptr + %val13 = load volatile i64 *%ptr + %val14 = load volatile i64 *%ptr + %val15 = load volatile i64 *%ptr + + %trunc0 = trunc i64 %val0 to i16 + %trunc1 = trunc i64 %val1 to i16 + %trunc2 = trunc i64 %val2 to i16 + %trunc3 = trunc i64 %val3 to i16 + %trunc4 = trunc i64 %val4 to i16 + %trunc5 = trunc i64 %val5 to i16 + %trunc6 = trunc i64 %val6 to i16 + %trunc7 = trunc i64 %val7 to i16 + %trunc8 = trunc i64 %val8 to i16 + %trunc9 = trunc i64 %val9 to i16 + %trunc10 = trunc i64 %val10 to i16 + %trunc11 = trunc i64 %val11 to i16 + %trunc12 = trunc i64 %val12 to i16 + %trunc13 = trunc i64 %val13 to i16 + %trunc14 = trunc i64 %val14 to i16 + %trunc15 = trunc i64 %val15 to i16 + + %ext0 = sext i16 %trunc0 to i64 + %ext1 = sext i16 %trunc1 to i64 + %ext2 = sext i16 %trunc2 to i64 + %ext3 = sext i16 %trunc3 to i64 + %ext4 = sext i16 %trunc4 to i64 + %ext5 = sext i16 %trunc5 to i64 + %ext6 = sext i16 %trunc6 to i64 + %ext7 = sext i16 %trunc7 to i64 + %ext8 = sext i16 %trunc8 to i64 + %ext9 = sext i16 %trunc9 to i64 + %ext10 = sext i16 %trunc10 to i64 + %ext11 = sext i16 %trunc11 to i64 + %ext12 = sext i16 %trunc12 to i64 + %ext13 = sext i16 %trunc13 to i64 + %ext14 = sext i16 %trunc14 to i64 + %ext15 = sext i16 %trunc15 to i64 + + store volatile i64 %val0, i64 *%ptr + store volatile i64 %val1, i64 *%ptr + store volatile i64 %val2, i64 *%ptr + store volatile i64 %val3, i64 *%ptr + store volatile i64 %val4, i64 *%ptr + store volatile i64 %val5, i64 *%ptr + store volatile i64 %val6, i64 *%ptr + store volatile i64 %val7, i64 *%ptr + store volatile i64 %val8, i64 *%ptr + store volatile i64 %val9, i64 *%ptr + store volatile i64 %val10, i64 *%ptr + store volatile i64 %val11, i64 *%ptr + store volatile i64 %val12, i64 *%ptr + store volatile i64 %val13, i64 *%ptr + store volatile i64 %val14, i64 *%ptr + store volatile i64 %val15, i64 *%ptr + + store volatile i64 %ext0, i64 *%ptr + store volatile i64 %ext1, i64 *%ptr + store volatile i64 %ext2, i64 *%ptr + store volatile i64 %ext3, i64 *%ptr + store volatile i64 %ext4, i64 *%ptr + store volatile i64 %ext5, i64 *%ptr + store volatile i64 %ext6, i64 *%ptr + store volatile i64 %ext7, i64 *%ptr + store volatile i64 %ext8, i64 *%ptr + store volatile i64 %ext9, i64 *%ptr + store volatile i64 %ext10, i64 *%ptr + store volatile i64 %ext11, i64 *%ptr + store volatile i64 %ext12, i64 *%ptr + store volatile i64 %ext13, i64 *%ptr + store volatile i64 %ext14, i64 *%ptr + store volatile i64 %ext15, i64 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-08.ll b/test/CodeGen/SystemZ/int-conv-08.ll index 090cc754024..0616f1e4561 100644 --- a/test/CodeGen/SystemZ/int-conv-08.ll +++ b/test/CodeGen/SystemZ/int-conv-08.ll @@ -112,3 +112,97 @@ define i64 @f10(i64 %src, i64 %index) { %ext = zext i16 %half to i64 ret i64 %ext } + +; Test a case where we spill the source of at least one LLGHR. We want +; to use LLGH if possible. +define void @f11(i64 *%ptr) { +; CHECK: f11: +; CHECK: llgh {{%r[0-9]+}}, 166(%r15) +; CHECK: br %r14 + %val0 = load volatile i64 *%ptr + %val1 = load volatile i64 *%ptr + %val2 = load volatile i64 *%ptr + %val3 = load volatile i64 *%ptr + %val4 = load volatile i64 *%ptr + %val5 = load volatile i64 *%ptr + %val6 = load volatile i64 *%ptr + %val7 = load volatile i64 *%ptr + %val8 = load volatile i64 *%ptr + %val9 = load volatile i64 *%ptr + %val10 = load volatile i64 *%ptr + %val11 = load volatile i64 *%ptr + %val12 = load volatile i64 *%ptr + %val13 = load volatile i64 *%ptr + %val14 = load volatile i64 *%ptr + %val15 = load volatile i64 *%ptr + + %trunc0 = trunc i64 %val0 to i16 + %trunc1 = trunc i64 %val1 to i16 + %trunc2 = trunc i64 %val2 to i16 + %trunc3 = trunc i64 %val3 to i16 + %trunc4 = trunc i64 %val4 to i16 + %trunc5 = trunc i64 %val5 to i16 + %trunc6 = trunc i64 %val6 to i16 + %trunc7 = trunc i64 %val7 to i16 + %trunc8 = trunc i64 %val8 to i16 + %trunc9 = trunc i64 %val9 to i16 + %trunc10 = trunc i64 %val10 to i16 + %trunc11 = trunc i64 %val11 to i16 + %trunc12 = trunc i64 %val12 to i16 + %trunc13 = trunc i64 %val13 to i16 + %trunc14 = trunc i64 %val14 to i16 + %trunc15 = trunc i64 %val15 to i16 + + %ext0 = zext i16 %trunc0 to i64 + %ext1 = zext i16 %trunc1 to i64 + %ext2 = zext i16 %trunc2 to i64 + %ext3 = zext i16 %trunc3 to i64 + %ext4 = zext i16 %trunc4 to i64 + %ext5 = zext i16 %trunc5 to i64 + %ext6 = zext i16 %trunc6 to i64 + %ext7 = zext i16 %trunc7 to i64 + %ext8 = zext i16 %trunc8 to i64 + %ext9 = zext i16 %trunc9 to i64 + %ext10 = zext i16 %trunc10 to i64 + %ext11 = zext i16 %trunc11 to i64 + %ext12 = zext i16 %trunc12 to i64 + %ext13 = zext i16 %trunc13 to i64 + %ext14 = zext i16 %trunc14 to i64 + %ext15 = zext i16 %trunc15 to i64 + + store volatile i64 %val0, i64 *%ptr + store volatile i64 %val1, i64 *%ptr + store volatile i64 %val2, i64 *%ptr + store volatile i64 %val3, i64 *%ptr + store volatile i64 %val4, i64 *%ptr + store volatile i64 %val5, i64 *%ptr + store volatile i64 %val6, i64 *%ptr + store volatile i64 %val7, i64 *%ptr + store volatile i64 %val8, i64 *%ptr + store volatile i64 %val9, i64 *%ptr + store volatile i64 %val10, i64 *%ptr + store volatile i64 %val11, i64 *%ptr + store volatile i64 %val12, i64 *%ptr + store volatile i64 %val13, i64 *%ptr + store volatile i64 %val14, i64 *%ptr + store volatile i64 %val15, i64 *%ptr + + store volatile i64 %ext0, i64 *%ptr + store volatile i64 %ext1, i64 *%ptr + store volatile i64 %ext2, i64 *%ptr + store volatile i64 %ext3, i64 *%ptr + store volatile i64 %ext4, i64 *%ptr + store volatile i64 %ext5, i64 *%ptr + store volatile i64 %ext6, i64 *%ptr + store volatile i64 %ext7, i64 *%ptr + store volatile i64 %ext8, i64 *%ptr + store volatile i64 %ext9, i64 *%ptr + store volatile i64 %ext10, i64 *%ptr + store volatile i64 %ext11, i64 *%ptr + store volatile i64 %ext12, i64 *%ptr + store volatile i64 %ext13, i64 *%ptr + store volatile i64 %ext14, i64 *%ptr + store volatile i64 %ext15, i64 *%ptr + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-09.ll b/test/CodeGen/SystemZ/int-conv-09.ll index f003c059eb3..ab6c463092c 100644 --- a/test/CodeGen/SystemZ/int-conv-09.ll +++ b/test/CodeGen/SystemZ/int-conv-09.ll @@ -102,3 +102,80 @@ define i64 @f9(i64 %src, i64 %index) { %ext = sext i32 %word to i64 ret i64 %ext } + +; Test a case where we spill the source of at least one LGFR. We want +; to use LGF if possible. +define void @f10(i64 *%ptr1, i32 *%ptr2) { +; CHECK: f10: +; CHECK: lgf {{%r[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile i32 *%ptr2 + %val1 = load volatile i32 *%ptr2 + %val2 = load volatile i32 *%ptr2 + %val3 = load volatile i32 *%ptr2 + %val4 = load volatile i32 *%ptr2 + %val5 = load volatile i32 *%ptr2 + %val6 = load volatile i32 *%ptr2 + %val7 = load volatile i32 *%ptr2 + %val8 = load volatile i32 *%ptr2 + %val9 = load volatile i32 *%ptr2 + %val10 = load volatile i32 *%ptr2 + %val11 = load volatile i32 *%ptr2 + %val12 = load volatile i32 *%ptr2 + %val13 = load volatile i32 *%ptr2 + %val14 = load volatile i32 *%ptr2 + %val15 = load volatile i32 *%ptr2 + + %ext0 = sext i32 %val0 to i64 + %ext1 = sext i32 %val1 to i64 + %ext2 = sext i32 %val2 to i64 + %ext3 = sext i32 %val3 to i64 + %ext4 = sext i32 %val4 to i64 + %ext5 = sext i32 %val5 to i64 + %ext6 = sext i32 %val6 to i64 + %ext7 = sext i32 %val7 to i64 + %ext8 = sext i32 %val8 to i64 + %ext9 = sext i32 %val9 to i64 + %ext10 = sext i32 %val10 to i64 + %ext11 = sext i32 %val11 to i64 + %ext12 = sext i32 %val12 to i64 + %ext13 = sext i32 %val13 to i64 + %ext14 = sext i32 %val14 to i64 + %ext15 = sext i32 %val15 to i64 + + store volatile i32 %val0, i32 *%ptr2 + store volatile i32 %val1, i32 *%ptr2 + store volatile i32 %val2, i32 *%ptr2 + store volatile i32 %val3, i32 *%ptr2 + store volatile i32 %val4, i32 *%ptr2 + store volatile i32 %val5, i32 *%ptr2 + store volatile i32 %val6, i32 *%ptr2 + store volatile i32 %val7, i32 *%ptr2 + store volatile i32 %val8, i32 *%ptr2 + store volatile i32 %val9, i32 *%ptr2 + store volatile i32 %val10, i32 *%ptr2 + store volatile i32 %val11, i32 *%ptr2 + store volatile i32 %val12, i32 *%ptr2 + store volatile i32 %val13, i32 *%ptr2 + store volatile i32 %val14, i32 *%ptr2 + store volatile i32 %val15, i32 *%ptr2 + + store volatile i64 %ext0, i64 *%ptr1 + store volatile i64 %ext1, i64 *%ptr1 + store volatile i64 %ext2, i64 *%ptr1 + store volatile i64 %ext3, i64 *%ptr1 + store volatile i64 %ext4, i64 *%ptr1 + store volatile i64 %ext5, i64 *%ptr1 + store volatile i64 %ext6, i64 *%ptr1 + store volatile i64 %ext7, i64 *%ptr1 + store volatile i64 %ext8, i64 *%ptr1 + store volatile i64 %ext9, i64 *%ptr1 + store volatile i64 %ext10, i64 *%ptr1 + store volatile i64 %ext11, i64 *%ptr1 + store volatile i64 %ext12, i64 *%ptr1 + store volatile i64 %ext13, i64 *%ptr1 + store volatile i64 %ext14, i64 *%ptr1 + store volatile i64 %ext15, i64 *%ptr1 + + ret void +} diff --git a/test/CodeGen/SystemZ/int-conv-10.ll b/test/CodeGen/SystemZ/int-conv-10.ll index 2b2a290af6b..4b078f95455 100644 --- a/test/CodeGen/SystemZ/int-conv-10.ll +++ b/test/CodeGen/SystemZ/int-conv-10.ll @@ -111,3 +111,80 @@ define i64 @f10(i64 %src, i64 %index) { %ext = zext i32 %word to i64 ret i64 %ext } + +; Test a case where we spill the source of at least one LLGFR. We want +; to use LLGF if possible. +define void @f11(i64 *%ptr1, i32 *%ptr2) { +; CHECK: f11: +; CHECK: llgf {{%r[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %val0 = load volatile i32 *%ptr2 + %val1 = load volatile i32 *%ptr2 + %val2 = load volatile i32 *%ptr2 + %val3 = load volatile i32 *%ptr2 + %val4 = load volatile i32 *%ptr2 + %val5 = load volatile i32 *%ptr2 + %val6 = load volatile i32 *%ptr2 + %val7 = load volatile i32 *%ptr2 + %val8 = load volatile i32 *%ptr2 + %val9 = load volatile i32 *%ptr2 + %val10 = load volatile i32 *%ptr2 + %val11 = load volatile i32 *%ptr2 + %val12 = load volatile i32 *%ptr2 + %val13 = load volatile i32 *%ptr2 + %val14 = load volatile i32 *%ptr2 + %val15 = load volatile i32 *%ptr2 + + %ext0 = zext i32 %val0 to i64 + %ext1 = zext i32 %val1 to i64 + %ext2 = zext i32 %val2 to i64 + %ext3 = zext i32 %val3 to i64 + %ext4 = zext i32 %val4 to i64 + %ext5 = zext i32 %val5 to i64 + %ext6 = zext i32 %val6 to i64 + %ext7 = zext i32 %val7 to i64 + %ext8 = zext i32 %val8 to i64 + %ext9 = zext i32 %val9 to i64 + %ext10 = zext i32 %val10 to i64 + %ext11 = zext i32 %val11 to i64 + %ext12 = zext i32 %val12 to i64 + %ext13 = zext i32 %val13 to i64 + %ext14 = zext i32 %val14 to i64 + %ext15 = zext i32 %val15 to i64 + + store volatile i32 %val0, i32 *%ptr2 + store volatile i32 %val1, i32 *%ptr2 + store volatile i32 %val2, i32 *%ptr2 + store volatile i32 %val3, i32 *%ptr2 + store volatile i32 %val4, i32 *%ptr2 + store volatile i32 %val5, i32 *%ptr2 + store volatile i32 %val6, i32 *%ptr2 + store volatile i32 %val7, i32 *%ptr2 + store volatile i32 %val8, i32 *%ptr2 + store volatile i32 %val9, i32 *%ptr2 + store volatile i32 %val10, i32 *%ptr2 + store volatile i32 %val11, i32 *%ptr2 + store volatile i32 %val12, i32 *%ptr2 + store volatile i32 %val13, i32 *%ptr2 + store volatile i32 %val14, i32 *%ptr2 + store volatile i32 %val15, i32 *%ptr2 + + store volatile i64 %ext0, i64 *%ptr1 + store volatile i64 %ext1, i64 *%ptr1 + store volatile i64 %ext2, i64 *%ptr1 + store volatile i64 %ext3, i64 *%ptr1 + store volatile i64 %ext4, i64 *%ptr1 + store volatile i64 %ext5, i64 *%ptr1 + store volatile i64 %ext6, i64 *%ptr1 + store volatile i64 %ext7, i64 *%ptr1 + store volatile i64 %ext8, i64 *%ptr1 + store volatile i64 %ext9, i64 *%ptr1 + store volatile i64 %ext10, i64 *%ptr1 + store volatile i64 %ext11, i64 *%ptr1 + store volatile i64 %ext12, i64 *%ptr1 + store volatile i64 %ext13, i64 *%ptr1 + store volatile i64 %ext14, i64 *%ptr1 + store volatile i64 %ext15, i64 *%ptr1 + + ret void +} diff --git a/test/CodeGen/SystemZ/int-div-01.ll b/test/CodeGen/SystemZ/int-div-01.ll index 9a0066f0d0a..9fa019b98c5 100644 --- a/test/CodeGen/SystemZ/int-div-01.ll +++ b/test/CodeGen/SystemZ/int-div-01.ll @@ -206,3 +206,46 @@ define void @f15(i32 *%dest, i32 *%src) { store i32 %div, i32 *%dest ret void } + +; Check that divisions of spilled values can use DSGF rather than DSGFR. +define i32 @f16(i32 *%ptr0) { +; CHECK: f16: +; CHECK: brasl %r14, foo@PLT +; CHECK: dsgf {{%r[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %ret = call i32 @foo() + + %div0 = sdiv i32 %ret, %val0 + %div1 = sdiv i32 %div0, %val1 + %div2 = sdiv i32 %div1, %val2 + %div3 = sdiv i32 %div2, %val3 + %div4 = sdiv i32 %div3, %val4 + %div5 = sdiv i32 %div4, %val5 + %div6 = sdiv i32 %div5, %val6 + %div7 = sdiv i32 %div6, %val7 + %div8 = sdiv i32 %div7, %val8 + %div9 = sdiv i32 %div8, %val9 + + ret i32 %div9 +} diff --git a/test/CodeGen/SystemZ/int-div-02.ll b/test/CodeGen/SystemZ/int-div-02.ll index 7954384d296..b09172df9da 100644 --- a/test/CodeGen/SystemZ/int-div-02.ll +++ b/test/CodeGen/SystemZ/int-div-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i32 @foo() + ; Test register division. The result is in the second of the two registers. define void @f1(i32 %dummy, i32 %a, i32 %b, i32 *%dest) { ; CHECK: f1: @@ -164,3 +166,46 @@ define i32 @f12(i32 %dummy, i32 %a, i64 %src, i64 %index) { %rem = urem i32 %a, %b ret i32 %rem } + +; Check that divisions of spilled values can use DL rather than DLR. +define i32 @f13(i32 *%ptr0) { +; CHECK: f13: +; CHECK: brasl %r14, foo@PLT +; CHECK: dl {{%r[0-9]+}}, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %ret = call i32 @foo() + + %div0 = udiv i32 %ret, %val0 + %div1 = udiv i32 %div0, %val1 + %div2 = udiv i32 %div1, %val2 + %div3 = udiv i32 %div2, %val3 + %div4 = udiv i32 %div3, %val4 + %div5 = udiv i32 %div4, %val5 + %div6 = udiv i32 %div5, %val6 + %div7 = udiv i32 %div6, %val7 + %div8 = udiv i32 %div7, %val8 + %div9 = udiv i32 %div8, %val9 + + ret i32 %div9 +} diff --git a/test/CodeGen/SystemZ/int-div-04.ll b/test/CodeGen/SystemZ/int-div-04.ll index 3f72be9a47d..b2710a17b1d 100644 --- a/test/CodeGen/SystemZ/int-div-04.ll +++ b/test/CodeGen/SystemZ/int-div-04.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Testg register division. The result is in the second of the two registers. define void @f1(i64 %dummy, i64 %a, i64 %b, i64 *%dest) { ; CHECK: f1: @@ -152,3 +154,49 @@ define i64 @f12(i64 %dummy, i64 %a, i64 %src, i64 %index) { %rem = srem i64 %a, %b ret i64 %rem } + +; Check that divisions of spilled values can use DSG rather than DSGR. +define i64 @f13(i64 *%ptr0) { +; CHECK: f13: +; CHECK: brasl %r14, foo@PLT +; CHECK: dsg {{%r[0-9]+}}, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + %ptr10 = getelementptr i64 *%ptr0, i64 20 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + %val10 = load i64 *%ptr10 + + %ret = call i64 @foo() + + %div0 = sdiv i64 %ret, %val0 + %div1 = sdiv i64 %div0, %val1 + %div2 = sdiv i64 %div1, %val2 + %div3 = sdiv i64 %div2, %val3 + %div4 = sdiv i64 %div3, %val4 + %div5 = sdiv i64 %div4, %val5 + %div6 = sdiv i64 %div5, %val6 + %div7 = sdiv i64 %div6, %val7 + %div8 = sdiv i64 %div7, %val8 + %div9 = sdiv i64 %div8, %val9 + %div10 = sdiv i64 %div9, %val10 + + ret i64 %div10 +} diff --git a/test/CodeGen/SystemZ/int-div-05.ll b/test/CodeGen/SystemZ/int-div-05.ll index 04f622b44e7..31415034986 100644 --- a/test/CodeGen/SystemZ/int-div-05.ll +++ b/test/CodeGen/SystemZ/int-div-05.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Testg register division. The result is in the second of the two registers. define void @f1(i64 %dummy, i64 %a, i64 %b, i64 *%dest) { ; CHECK: f1: @@ -164,3 +166,49 @@ define i64 @f12(i64 %dummy, i64 %a, i64 %src, i64 %index) { %rem = urem i64 %a, %b ret i64 %rem } + +; Check that divisions of spilled values can use DLG rather than DLGR. +define i64 @f13(i64 *%ptr0) { +; CHECK: f13: +; CHECK: brasl %r14, foo@PLT +; CHECK: dlg {{%r[0-9]+}}, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + %ptr10 = getelementptr i64 *%ptr0, i64 20 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + %val10 = load i64 *%ptr10 + + %ret = call i64 @foo() + + %div0 = udiv i64 %ret, %val0 + %div1 = udiv i64 %div0, %val1 + %div2 = udiv i64 %div1, %val2 + %div3 = udiv i64 %div2, %val3 + %div4 = udiv i64 %div3, %val4 + %div5 = udiv i64 %div4, %val5 + %div6 = udiv i64 %div5, %val6 + %div7 = udiv i64 %div6, %val7 + %div8 = udiv i64 %div7, %val8 + %div9 = udiv i64 %div8, %val9 + %div10 = udiv i64 %div9, %val10 + + ret i64 %div10 +} diff --git a/test/CodeGen/SystemZ/int-mul-02.ll b/test/CodeGen/SystemZ/int-mul-02.ll index d39c4dd0961..72990a73b70 100644 --- a/test/CodeGen/SystemZ/int-mul-02.ll +++ b/test/CodeGen/SystemZ/int-mul-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i32 @foo() + ; Check MSR. define i32 @f1(i32 %a, i32 %b) { ; CHECK: f1: @@ -127,3 +129,46 @@ define i32 @f11(i32 %a, i64 %src, i64 %index) { %mul = mul i32 %a, %b ret i32 %mul } + +; Check that multiplications of spilled values can use MS rather than MSR. +define i32 @f12(i32 *%ptr0) { +; CHECK: f12: +; CHECK: brasl %r14, foo@PLT +; CHECK: ms %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %ret = call i32 @foo() + + %mul0 = mul i32 %ret, %val0 + %mul1 = mul i32 %mul0, %val1 + %mul2 = mul i32 %mul1, %val2 + %mul3 = mul i32 %mul2, %val3 + %mul4 = mul i32 %mul3, %val4 + %mul5 = mul i32 %mul4, %val5 + %mul6 = mul i32 %mul5, %val6 + %mul7 = mul i32 %mul6, %val7 + %mul8 = mul i32 %mul7, %val8 + %mul9 = mul i32 %mul8, %val9 + + ret i32 %mul9 +} diff --git a/test/CodeGen/SystemZ/int-mul-03.ll b/test/CodeGen/SystemZ/int-mul-03.ll index ab4ef9edd23..f027bd06065 100644 --- a/test/CodeGen/SystemZ/int-mul-03.ll +++ b/test/CodeGen/SystemZ/int-mul-03.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check MSGFR. define i64 @f1(i64 %a, i32 %b) { ; CHECK: f1: @@ -100,3 +102,79 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %mul = mul i64 %a, %bext ret i64 %mul } + +; Check that multiplications of spilled values can use MSGF rather than MSGFR. +define i64 @f9(i32 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: msgf %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %frob0 = add i32 %val0, 100 + %frob1 = add i32 %val1, 100 + %frob2 = add i32 %val2, 100 + %frob3 = add i32 %val3, 100 + %frob4 = add i32 %val4, 100 + %frob5 = add i32 %val5, 100 + %frob6 = add i32 %val6, 100 + %frob7 = add i32 %val7, 100 + %frob8 = add i32 %val8, 100 + %frob9 = add i32 %val9, 100 + + store i32 %frob0, i32 *%ptr0 + store i32 %frob1, i32 *%ptr1 + store i32 %frob2, i32 *%ptr2 + store i32 %frob3, i32 *%ptr3 + store i32 %frob4, i32 *%ptr4 + store i32 %frob5, i32 *%ptr5 + store i32 %frob6, i32 *%ptr6 + store i32 %frob7, i32 *%ptr7 + store i32 %frob8, i32 *%ptr8 + store i32 %frob9, i32 *%ptr9 + + %ret = call i64 @foo() + + %ext0 = sext i32 %frob0 to i64 + %ext1 = sext i32 %frob1 to i64 + %ext2 = sext i32 %frob2 to i64 + %ext3 = sext i32 %frob3 to i64 + %ext4 = sext i32 %frob4 to i64 + %ext5 = sext i32 %frob5 to i64 + %ext6 = sext i32 %frob6 to i64 + %ext7 = sext i32 %frob7 to i64 + %ext8 = sext i32 %frob8 to i64 + %ext9 = sext i32 %frob9 to i64 + + %mul0 = mul i64 %ret, %ext0 + %mul1 = mul i64 %mul0, %ext1 + %mul2 = mul i64 %mul1, %ext2 + %mul3 = mul i64 %mul2, %ext3 + %mul4 = mul i64 %mul3, %ext4 + %mul5 = mul i64 %mul4, %ext5 + %mul6 = mul i64 %mul5, %ext6 + %mul7 = mul i64 %mul6, %ext7 + %mul8 = mul i64 %mul7, %ext8 + %mul9 = mul i64 %mul8, %ext9 + + ret i64 %mul9 +} diff --git a/test/CodeGen/SystemZ/int-mul-04.ll b/test/CodeGen/SystemZ/int-mul-04.ll index 94c26397834..b37b043fb6a 100644 --- a/test/CodeGen/SystemZ/int-mul-04.ll +++ b/test/CodeGen/SystemZ/int-mul-04.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check MSGR. define i64 @f1(i64 %a, i64 %b) { ; CHECK: f1: @@ -92,3 +94,46 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %mul = mul i64 %a, %b ret i64 %mul } + +; Check that multiplications of spilled values can use MSG rather than MSGR. +define i64 @f9(i64 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: msg %r2, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + + %ret = call i64 @foo() + + %mul0 = mul i64 %ret, %val0 + %mul1 = mul i64 %mul0, %val1 + %mul2 = mul i64 %mul1, %val2 + %mul3 = mul i64 %mul2, %val3 + %mul4 = mul i64 %mul3, %val4 + %mul5 = mul i64 %mul4, %val5 + %mul6 = mul i64 %mul5, %val6 + %mul7 = mul i64 %mul6, %val7 + %mul8 = mul i64 %mul7, %val8 + %mul9 = mul i64 %mul8, %val9 + + ret i64 %mul9 +} diff --git a/test/CodeGen/SystemZ/int-mul-08.ll b/test/CodeGen/SystemZ/int-mul-08.ll index 09ebe7a7b48..e06cfc2cc20 100644 --- a/test/CodeGen/SystemZ/int-mul-08.ll +++ b/test/CodeGen/SystemZ/int-mul-08.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check zero-extended multiplication in which only the high part is used. define i64 @f1(i64 %dummy, i64 %a, i64 %b) { ; CHECK: f1: @@ -186,3 +188,77 @@ define i64 @f12(i64 *%dest, i64 %a, i64 %src, i64 %index) { %high = trunc i128 %highx to i64 ret i64 %high } + +; Check that multiplications of spilled values can use MLG rather than MLGR. +define i64 @f13(i64 *%ptr0) { +; CHECK: f13: +; CHECK: brasl %r14, foo@PLT +; CHECK: mlg {{%r[0-9]+}}, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + + %ret = call i64 @foo() + + %retx = zext i64 %ret to i128 + %val0x = zext i64 %val0 to i128 + %mul0d = mul i128 %retx, %val0x + %mul0x = lshr i128 %mul0d, 64 + + %val1x = zext i64 %val1 to i128 + %mul1d = mul i128 %mul0x, %val1x + %mul1x = lshr i128 %mul1d, 64 + + %val2x = zext i64 %val2 to i128 + %mul2d = mul i128 %mul1x, %val2x + %mul2x = lshr i128 %mul2d, 64 + + %val3x = zext i64 %val3 to i128 + %mul3d = mul i128 %mul2x, %val3x + %mul3x = lshr i128 %mul3d, 64 + + %val4x = zext i64 %val4 to i128 + %mul4d = mul i128 %mul3x, %val4x + %mul4x = lshr i128 %mul4d, 64 + + %val5x = zext i64 %val5 to i128 + %mul5d = mul i128 %mul4x, %val5x + %mul5x = lshr i128 %mul5d, 64 + + %val6x = zext i64 %val6 to i128 + %mul6d = mul i128 %mul5x, %val6x + %mul6x = lshr i128 %mul6d, 64 + + %val7x = zext i64 %val7 to i128 + %mul7d = mul i128 %mul6x, %val7x + %mul7x = lshr i128 %mul7d, 64 + + %val8x = zext i64 %val8 to i128 + %mul8d = mul i128 %mul7x, %val8x + %mul8x = lshr i128 %mul8d, 64 + + %val9x = zext i64 %val9 to i128 + %mul9d = mul i128 %mul8x, %val9x + %mul9x = lshr i128 %mul9d, 64 + + %mul9 = trunc i128 %mul9x to i64 + ret i64 %mul9 +} diff --git a/test/CodeGen/SystemZ/int-sub-01.ll b/test/CodeGen/SystemZ/int-sub-01.ll index 9a738148f7e..96ce36190dc 100644 --- a/test/CodeGen/SystemZ/int-sub-01.ll +++ b/test/CodeGen/SystemZ/int-sub-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i32 @foo() + ; Check SR. define i32 @f1(i32 %a, i32 %b) { ; CHECK: f1: @@ -127,3 +129,46 @@ define i32 @f11(i32 %a, i64 %src, i64 %index) { %sub = sub i32 %a, %b ret i32 %sub } + +; Check that subtractions of spilled values can use S rather than SR. +define i32 @f12(i32 *%ptr0) { +; CHECK: f12: +; CHECK: brasl %r14, foo@PLT +; CHECK: s %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %ret = call i32 @foo() + + %sub0 = sub i32 %ret, %val0 + %sub1 = sub i32 %sub0, %val1 + %sub2 = sub i32 %sub1, %val2 + %sub3 = sub i32 %sub2, %val3 + %sub4 = sub i32 %sub3, %val4 + %sub5 = sub i32 %sub4, %val5 + %sub6 = sub i32 %sub5, %val6 + %sub7 = sub i32 %sub6, %val7 + %sub8 = sub i32 %sub7, %val8 + %sub9 = sub i32 %sub8, %val9 + + ret i32 %sub9 +} diff --git a/test/CodeGen/SystemZ/int-sub-02.ll b/test/CodeGen/SystemZ/int-sub-02.ll index 5150a960a55..99d1c7b7c85 100644 --- a/test/CodeGen/SystemZ/int-sub-02.ll +++ b/test/CodeGen/SystemZ/int-sub-02.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check SGFR. define i64 @f1(i64 %a, i32 %b) { ; CHECK: f1: @@ -100,3 +102,79 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %sub = sub i64 %a, %bext ret i64 %sub } + +; Check that subtractions of spilled values can use SGF rather than SGFR. +define i64 @f9(i32 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: sgf %r2, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %frob0 = add i32 %val0, 100 + %frob1 = add i32 %val1, 100 + %frob2 = add i32 %val2, 100 + %frob3 = add i32 %val3, 100 + %frob4 = add i32 %val4, 100 + %frob5 = add i32 %val5, 100 + %frob6 = add i32 %val6, 100 + %frob7 = add i32 %val7, 100 + %frob8 = add i32 %val8, 100 + %frob9 = add i32 %val9, 100 + + store i32 %frob0, i32 *%ptr0 + store i32 %frob1, i32 *%ptr1 + store i32 %frob2, i32 *%ptr2 + store i32 %frob3, i32 *%ptr3 + store i32 %frob4, i32 *%ptr4 + store i32 %frob5, i32 *%ptr5 + store i32 %frob6, i32 *%ptr6 + store i32 %frob7, i32 *%ptr7 + store i32 %frob8, i32 *%ptr8 + store i32 %frob9, i32 *%ptr9 + + %ret = call i64 @foo() + + %ext0 = sext i32 %frob0 to i64 + %ext1 = sext i32 %frob1 to i64 + %ext2 = sext i32 %frob2 to i64 + %ext3 = sext i32 %frob3 to i64 + %ext4 = sext i32 %frob4 to i64 + %ext5 = sext i32 %frob5 to i64 + %ext6 = sext i32 %frob6 to i64 + %ext7 = sext i32 %frob7 to i64 + %ext8 = sext i32 %frob8 to i64 + %ext9 = sext i32 %frob9 to i64 + + %sub0 = sub i64 %ret, %ext0 + %sub1 = sub i64 %sub0, %ext1 + %sub2 = sub i64 %sub1, %ext2 + %sub3 = sub i64 %sub2, %ext3 + %sub4 = sub i64 %sub3, %ext4 + %sub5 = sub i64 %sub4, %ext5 + %sub6 = sub i64 %sub5, %ext6 + %sub7 = sub i64 %sub6, %ext7 + %sub8 = sub i64 %sub7, %ext8 + %sub9 = sub i64 %sub8, %ext9 + + ret i64 %sub9 +} diff --git a/test/CodeGen/SystemZ/int-sub-03.ll b/test/CodeGen/SystemZ/int-sub-03.ll index 73571b3591f..49040177fde 100644 --- a/test/CodeGen/SystemZ/int-sub-03.ll +++ b/test/CodeGen/SystemZ/int-sub-03.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check SLGFR. define i64 @f1(i64 %a, i32 %b) { ; CHECK: f1: @@ -100,3 +102,79 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %sub = sub i64 %a, %bext ret i64 %sub } + +; Check that subtractions of spilled values can use SLGF rather than SLGFR. +define i64 @f9(i32 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: slgf %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %frob0 = add i32 %val0, 100 + %frob1 = add i32 %val1, 100 + %frob2 = add i32 %val2, 100 + %frob3 = add i32 %val3, 100 + %frob4 = add i32 %val4, 100 + %frob5 = add i32 %val5, 100 + %frob6 = add i32 %val6, 100 + %frob7 = add i32 %val7, 100 + %frob8 = add i32 %val8, 100 + %frob9 = add i32 %val9, 100 + + store i32 %frob0, i32 *%ptr0 + store i32 %frob1, i32 *%ptr1 + store i32 %frob2, i32 *%ptr2 + store i32 %frob3, i32 *%ptr3 + store i32 %frob4, i32 *%ptr4 + store i32 %frob5, i32 *%ptr5 + store i32 %frob6, i32 *%ptr6 + store i32 %frob7, i32 *%ptr7 + store i32 %frob8, i32 *%ptr8 + store i32 %frob9, i32 *%ptr9 + + %ret = call i64 @foo() + + %ext0 = zext i32 %frob0 to i64 + %ext1 = zext i32 %frob1 to i64 + %ext2 = zext i32 %frob2 to i64 + %ext3 = zext i32 %frob3 to i64 + %ext4 = zext i32 %frob4 to i64 + %ext5 = zext i32 %frob5 to i64 + %ext6 = zext i32 %frob6 to i64 + %ext7 = zext i32 %frob7 to i64 + %ext8 = zext i32 %frob8 to i64 + %ext9 = zext i32 %frob9 to i64 + + %sub0 = sub i64 %ret, %ext0 + %sub1 = sub i64 %sub0, %ext1 + %sub2 = sub i64 %sub1, %ext2 + %sub3 = sub i64 %sub2, %ext3 + %sub4 = sub i64 %sub3, %ext4 + %sub5 = sub i64 %sub4, %ext5 + %sub6 = sub i64 %sub5, %ext6 + %sub7 = sub i64 %sub6, %ext7 + %sub8 = sub i64 %sub7, %ext8 + %sub9 = sub i64 %sub8, %ext9 + + ret i64 %sub9 +} diff --git a/test/CodeGen/SystemZ/int-sub-04.ll b/test/CodeGen/SystemZ/int-sub-04.ll index 545d3421680..554f7f61bc0 100644 --- a/test/CodeGen/SystemZ/int-sub-04.ll +++ b/test/CodeGen/SystemZ/int-sub-04.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check SGR. define i64 @f1(i64 %a, i64 %b) { ; CHECK: f1: @@ -92,3 +94,46 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %sub = sub i64 %a, %b ret i64 %sub } + +; Check that subtractions of spilled values can use SG rather than SGR. +define i64 @f9(i64 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: sg %r2, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + + %ret = call i64 @foo() + + %sub0 = sub i64 %ret, %val0 + %sub1 = sub i64 %sub0, %val1 + %sub2 = sub i64 %sub1, %val2 + %sub3 = sub i64 %sub2, %val3 + %sub4 = sub i64 %sub3, %val4 + %sub5 = sub i64 %sub4, %val5 + %sub6 = sub i64 %sub5, %val6 + %sub7 = sub i64 %sub6, %val7 + %sub8 = sub i64 %sub7, %val8 + %sub9 = sub i64 %sub8, %val9 + + ret i64 %sub9 +} diff --git a/test/CodeGen/SystemZ/int-sub-05.ll b/test/CodeGen/SystemZ/int-sub-05.ll index 1475b244f67..5d95e79080b 100644 --- a/test/CodeGen/SystemZ/int-sub-05.ll +++ b/test/CodeGen/SystemZ/int-sub-05.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i128 *@foo() + ; Test register addition. define void @f1(i128 *%ptr, i64 %high, i64 %low) { ; CHECK: f1: @@ -116,3 +118,35 @@ define void @f7(i64 %base) { store i128 %sub, i128 *%aptr ret void } + +; Check that subtractions of spilled values can use SLG and SLBG rather than +; SLGR and SLBGR. +define void @f8(i128 *%ptr0) { +; CHECK: f8: +; CHECK: brasl %r14, foo@PLT +; CHECK: slg {{%r[0-9]+}}, {{[0-9]+}}(%r15) +; CHECK: slbg {{%r[0-9]+}}, {{[0-9]+}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i128 *%ptr0, i128 2 + %ptr2 = getelementptr i128 *%ptr0, i128 4 + %ptr3 = getelementptr i128 *%ptr0, i128 6 + %ptr4 = getelementptr i128 *%ptr0, i128 8 + + %val0 = load i128 *%ptr0 + %val1 = load i128 *%ptr1 + %val2 = load i128 *%ptr2 + %val3 = load i128 *%ptr3 + %val4 = load i128 *%ptr4 + + %retptr = call i128 *@foo() + + %ret = load i128 *%retptr + %sub0 = sub i128 %ret, %val0 + %sub1 = sub i128 %sub0, %val1 + %sub2 = sub i128 %sub1, %val2 + %sub3 = sub i128 %sub2, %val3 + %sub4 = sub i128 %sub3, %val4 + store i128 %sub4, i128 *%retptr + + ret void +} diff --git a/test/CodeGen/SystemZ/or-01.ll b/test/CodeGen/SystemZ/or-01.ll index 20c93129efc..ab869c76ebe 100644 --- a/test/CodeGen/SystemZ/or-01.ll +++ b/test/CodeGen/SystemZ/or-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i32 @foo() + ; Check OR. define i32 @f1(i32 %a, i32 %b) { ; CHECK: f1: @@ -127,3 +129,46 @@ define i32 @f11(i32 %a, i64 %src, i64 %index) { %or = or i32 %a, %b ret i32 %or } + +; Check that ORs of spilled values can use O rather than OR. +define i32 @f12(i32 *%ptr0) { +; CHECK: f12: +; CHECK: brasl %r14, foo@PLT +; CHECK: o %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %ret = call i32 @foo() + + %or0 = or i32 %ret, %val0 + %or1 = or i32 %or0, %val1 + %or2 = or i32 %or1, %val2 + %or3 = or i32 %or2, %val3 + %or4 = or i32 %or3, %val4 + %or5 = or i32 %or4, %val5 + %or6 = or i32 %or5, %val6 + %or7 = or i32 %or6, %val7 + %or8 = or i32 %or7, %val8 + %or9 = or i32 %or8, %val9 + + ret i32 %or9 +} diff --git a/test/CodeGen/SystemZ/or-03.ll b/test/CodeGen/SystemZ/or-03.ll index 16f84f1635a..cf85b0ae7fa 100644 --- a/test/CodeGen/SystemZ/or-03.ll +++ b/test/CodeGen/SystemZ/or-03.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check OGR. define i64 @f1(i64 %a, i64 %b) { ; CHECK: f1: @@ -92,3 +94,46 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %or = or i64 %a, %b ret i64 %or } + +; Check that ORs of spilled values can use OG rather than OGR. +define i64 @f9(i64 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: og %r2, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + + %ret = call i64 @foo() + + %or0 = or i64 %ret, %val0 + %or1 = or i64 %or0, %val1 + %or2 = or i64 %or1, %val2 + %or3 = or i64 %or2, %val3 + %or4 = or i64 %or3, %val4 + %or5 = or i64 %or4, %val5 + %or6 = or i64 %or5, %val6 + %or7 = or i64 %or6, %val7 + %or8 = or i64 %or7, %val8 + %or9 = or i64 %or8, %val9 + + ret i64 %or9 +} diff --git a/test/CodeGen/SystemZ/xor-01.ll b/test/CodeGen/SystemZ/xor-01.ll index 30bdbe7901f..d0c69a0bac4 100644 --- a/test/CodeGen/SystemZ/xor-01.ll +++ b/test/CodeGen/SystemZ/xor-01.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i32 @foo() + ; Check XR. define i32 @f1(i32 %a, i32 %b) { ; CHECK: f1: @@ -127,3 +129,46 @@ define i32 @f11(i32 %a, i64 %src, i64 %index) { %xor = xor i32 %a, %b ret i32 %xor } + +; Check that XORs of spilled values can use X rather than XR. +define i32 @f12(i32 *%ptr0) { +; CHECK: f12: +; CHECK: brasl %r14, foo@PLT +; CHECK: x %r2, 16{{[04]}}(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i32 *%ptr0, i64 2 + %ptr2 = getelementptr i32 *%ptr0, i64 4 + %ptr3 = getelementptr i32 *%ptr0, i64 6 + %ptr4 = getelementptr i32 *%ptr0, i64 8 + %ptr5 = getelementptr i32 *%ptr0, i64 10 + %ptr6 = getelementptr i32 *%ptr0, i64 12 + %ptr7 = getelementptr i32 *%ptr0, i64 14 + %ptr8 = getelementptr i32 *%ptr0, i64 16 + %ptr9 = getelementptr i32 *%ptr0, i64 18 + + %val0 = load i32 *%ptr0 + %val1 = load i32 *%ptr1 + %val2 = load i32 *%ptr2 + %val3 = load i32 *%ptr3 + %val4 = load i32 *%ptr4 + %val5 = load i32 *%ptr5 + %val6 = load i32 *%ptr6 + %val7 = load i32 *%ptr7 + %val8 = load i32 *%ptr8 + %val9 = load i32 *%ptr9 + + %ret = call i32 @foo() + + %xor0 = xor i32 %ret, %val0 + %xor1 = xor i32 %xor0, %val1 + %xor2 = xor i32 %xor1, %val2 + %xor3 = xor i32 %xor2, %val3 + %xor4 = xor i32 %xor3, %val4 + %xor5 = xor i32 %xor4, %val5 + %xor6 = xor i32 %xor5, %val6 + %xor7 = xor i32 %xor6, %val7 + %xor8 = xor i32 %xor7, %val8 + %xor9 = xor i32 %xor8, %val9 + + ret i32 %xor9 +} diff --git a/test/CodeGen/SystemZ/xor-03.ll b/test/CodeGen/SystemZ/xor-03.ll index a4851b33090..f4f1b887ef4 100644 --- a/test/CodeGen/SystemZ/xor-03.ll +++ b/test/CodeGen/SystemZ/xor-03.ll @@ -2,6 +2,8 @@ ; ; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s +declare i64 @foo() + ; Check XGR. define i64 @f1(i64 %a, i64 %b) { ; CHECK: f1: @@ -92,3 +94,46 @@ define i64 @f8(i64 %a, i64 %src, i64 %index) { %xor = xor i64 %a, %b ret i64 %xor } + +; Check that XORs of spilled values can use OG rather than OGR. +define i64 @f9(i64 *%ptr0) { +; CHECK: f9: +; CHECK: brasl %r14, foo@PLT +; CHECK: xg %r2, 160(%r15) +; CHECK: br %r14 + %ptr1 = getelementptr i64 *%ptr0, i64 2 + %ptr2 = getelementptr i64 *%ptr0, i64 4 + %ptr3 = getelementptr i64 *%ptr0, i64 6 + %ptr4 = getelementptr i64 *%ptr0, i64 8 + %ptr5 = getelementptr i64 *%ptr0, i64 10 + %ptr6 = getelementptr i64 *%ptr0, i64 12 + %ptr7 = getelementptr i64 *%ptr0, i64 14 + %ptr8 = getelementptr i64 *%ptr0, i64 16 + %ptr9 = getelementptr i64 *%ptr0, i64 18 + + %val0 = load i64 *%ptr0 + %val1 = load i64 *%ptr1 + %val2 = load i64 *%ptr2 + %val3 = load i64 *%ptr3 + %val4 = load i64 *%ptr4 + %val5 = load i64 *%ptr5 + %val6 = load i64 *%ptr6 + %val7 = load i64 *%ptr7 + %val8 = load i64 *%ptr8 + %val9 = load i64 *%ptr9 + + %ret = call i64 @foo() + + %xor0 = xor i64 %ret, %val0 + %xor1 = xor i64 %xor0, %val1 + %xor2 = xor i64 %xor1, %val2 + %xor3 = xor i64 %xor2, %val3 + %xor4 = xor i64 %xor3, %val4 + %xor5 = xor i64 %xor4, %val5 + %xor6 = xor i64 %xor5, %val6 + %xor7 = xor i64 %xor6, %val7 + %xor8 = xor i64 %xor7, %val8 + %xor9 = xor i64 %xor8, %val9 + + ret i64 %xor9 +}