X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FAArch64%2FAArch64InstrFormats.td;h=0e457329f73558f8a75a9c1a99c528952821a5a9;hb=1106660066504d163543607b38371c97aef7708e;hp=106f2158909635cfa4aca6d86cdaebff21965cd4;hpb=67397358121a3267681d2ebfc4364bbdb71ec5ce;p=oota-llvm.git diff --git a/lib/Target/AArch64/AArch64InstrFormats.td b/lib/Target/AArch64/AArch64InstrFormats.td index 106f2158909..0e457329f73 100644 --- a/lib/Target/AArch64/AArch64InstrFormats.td +++ b/lib/Target/AArch64/AArch64InstrFormats.td @@ -843,7 +843,7 @@ def MRSSystemRegisterOperand : AsmOperandClass { let ParserMethod = "tryParseSysReg"; let DiagnosticType = "MRS"; } -// concatenation of 1, op0, op1, CRn, CRm, op2. 16-bit immediate. +// concatenation of op0, op1, CRn, CRm, op2. 16-bit immediate. def mrs_sysreg_op : Operand { let ParserMatchClass = MRSSystemRegisterOperand; let DecoderMethod = "DecodeMRSSystemRegister"; @@ -863,9 +863,8 @@ def msr_sysreg_op : Operand { class MRSI : RtSystemI<1, (outs GPR64:$Rt), (ins mrs_sysreg_op:$systemreg), "mrs", "\t$Rt, $systemreg"> { - bits<15> systemreg; - let Inst{20} = 1; - let Inst{19-5} = systemreg; + bits<16> systemreg; + let Inst{20-5} = systemreg; } // FIXME: Some of these def NZCV, others don't. Best way to model that? @@ -873,9 +872,8 @@ class MRSI : RtSystemI<1, (outs GPR64:$Rt), (ins mrs_sysreg_op:$systemreg), // would do it, but feels like overkill at this point. class MSRI : RtSystemI<0, (outs), (ins msr_sysreg_op:$systemreg, GPR64:$Rt), "msr", "\t$systemreg, $Rt"> { - bits<15> systemreg; - let Inst{20} = 1; - let Inst{19-5} = systemreg; + bits<16> systemreg; + let Inst{20-5} = systemreg; } def SystemPStateFieldOperand : AsmOperandClass { @@ -1351,14 +1349,15 @@ class BaseMulAccum opc, RegisterClass multype, } multiclass MulAccum { + // MADD/MSUB generation is decided by MachineCombiner.cpp def Wrrr : BaseMulAccum, + [/*(set GPR32:$Rd, (AccNode GPR32:$Ra, (mul GPR32:$Rn, GPR32:$Rm)))*/]>, Sched<[WriteIM32, ReadIM, ReadIM, ReadIMA]> { let Inst{31} = 0; } def Xrrr : BaseMulAccum, + [/*(set GPR64:$Rd, (AccNode GPR64:$Ra, (mul GPR64:$Rn, GPR64:$Rm)))*/]>, Sched<[WriteIM64, ReadIM, ReadIM, ReadIMA]> { let Inst{31} = 1; } @@ -1638,10 +1637,16 @@ multiclass AddSub { let hasSideEffects = 0, isReMaterializable = 1, isAsCheapAsAMove = 1 in { // Add/Subtract immediate + // Increase the weight of the immediate variant to try to match it before + // the extended register variant. + // We used to match the register variant before the immediate when the + // register argument could be implicitly zero-extended. + let AddedComplexity = 6 in def Wri : BaseAddSubImm { let Inst{31} = 0; } + let AddedComplexity = 6 in def Xri : BaseAddSubImm { let Inst{31} = 1; @@ -2997,7 +3002,7 @@ class LoadPreIdx sz, bit V, bits<2> opc, RegisterClass regtype, : BaseLoadStorePreIdx, + "$Rn = $wback,@earlyclobber $wback", []>, Sched<[WriteLD, WriteAdr]>; let mayStore = 1, mayLoad = 0 in @@ -3006,7 +3011,7 @@ class StorePreIdx sz, bit V, bits<2> opc, RegisterClass regtype, : BaseLoadStorePreIdx, Sched<[WriteAdr, WriteST]>; @@ -3016,7 +3021,6 @@ class StorePreIdx sz, bit V, bits<2> opc, RegisterClass regtype, // Load/store post-indexed //--- -// (pre-index) load/stores. class BaseLoadStorePostIdx sz, bit V, bits<2> opc, dag oops, dag iops, string asm, string cstr, list pat> : I { @@ -3044,7 +3048,7 @@ class LoadPostIdx sz, bit V, bits<2> opc, RegisterClass regtype, : BaseLoadStorePostIdx, + asm, "$Rn = $wback,@earlyclobber $wback", []>, Sched<[WriteLD, WriteI]>; let mayStore = 1, mayLoad = 0 in @@ -3053,7 +3057,7 @@ class StorePostIdx sz, bit V, bits<2> opc, RegisterClass regtype, : BaseLoadStorePostIdx, Sched<[WriteAdr, WriteST, ReadAdrBase]>; @@ -3117,7 +3121,7 @@ multiclass StorePairOffset opc, bit V, RegisterClass regtype, // (pre-indexed) class BaseLoadStorePairPreIdx opc, bit V, bit L, dag oops, dag iops, string asm> - : I { + : I { bits<5> Rt; bits<5> Rt2; bits<5> Rn; @@ -3158,7 +3162,7 @@ class StorePairPreIdx opc, bit V, RegisterClass regtype, class BaseLoadStorePairPostIdx opc, bit V, bit L, dag oops, dag iops, string asm> - : I { + : I { bits<5> Rt; bits<5> Rt2; bits<5> Rn; @@ -4385,7 +4389,7 @@ class BaseSIMDVectorLShiftLongBySize size, } multiclass SIMDVectorLShiftLongBySizeBHS { - let neverHasSideEffects = 1 in { + let hasSideEffects = 0 in { def v8i8 : BaseSIMDVectorLShiftLongBySize<0, 0b00, V64, "shll", ".8h", ".8b", "8">; def v16i8 : BaseSIMDVectorLShiftLongBySize<1, 0b00, V128, @@ -5262,6 +5266,10 @@ multiclass SIMDZipVectoropc, string asm, def v2i64 : BaseSIMDZipVector<0b111, opc, V128, asm, ".2d", OpNode, v2i64>; + def : Pat<(v4f16 (OpNode V64:$Rn, V64:$Rm)), + (!cast(NAME#"v4i16") V64:$Rn, V64:$Rm)>; + def : Pat<(v8f16 (OpNode V128:$Rn, V128:$Rm)), + (!cast(NAME#"v8i16") V128:$Rn, V128:$Rm)>; def : Pat<(v2f32 (OpNode V64:$Rn, V64:$Rm)), (!cast(NAME#"v2i32") V64:$Rn, V64:$Rm)>; def : Pat<(v4f32 (OpNode V128:$Rn, V128:$Rm)), @@ -5296,6 +5304,27 @@ class BaseSIMDThreeScalar size, bits<5> opcode, let Inst{4-0} = Rd; } +let mayStore = 0, mayLoad = 0, hasSideEffects = 0 in +class BaseSIMDThreeScalarTied size, bit R, bits<5> opcode, + dag oops, dag iops, string asm, + list pattern> + : I, + Sched<[WriteV]> { + bits<5> Rd; + bits<5> Rn; + bits<5> Rm; + let Inst{31-30} = 0b01; + let Inst{29} = U; + let Inst{28-24} = 0b11110; + let Inst{23-22} = size; + let Inst{21} = R; + let Inst{20-16} = Rm; + let Inst{15-11} = opcode; + let Inst{10} = 1; + let Inst{9-5} = Rn; + let Inst{4-0} = Rd; +} + multiclass SIMDThreeScalarD opc, string asm, SDPatternOperator OpNode> { def v1i64 : BaseSIMDThreeScalar opc, string asm, def v1i16 : BaseSIMDThreeScalar; } +multiclass SIMDThreeScalarHSTied opc, string asm, + SDPatternOperator OpNode = null_frag> { + def v1i32: BaseSIMDThreeScalarTied; + def v1i16: BaseSIMDThreeScalarTied; +} + multiclass SIMDThreeScalarSD opc, string asm, SDPatternOperator OpNode = null_frag> { let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in { @@ -8514,6 +8553,174 @@ multiclass SIMDLdSt4SingleAliases { } } // end of 'let Predicates = [HasNEON]' +//---------------------------------------------------------------------------- +// AdvSIMD v8.1 Rounding Double Multiply Add/Subtract +//---------------------------------------------------------------------------- + +let Predicates = [HasNEON, HasV8_1a] in { + +class BaseSIMDThreeSameVectorTiedR0 size, bits<5> opcode, + RegisterOperand regtype, string asm, + string kind, list pattern> + : BaseSIMDThreeSameVectorTied { + let Inst{21}=0; +} +multiclass SIMDThreeSameVectorSQRDMLxHTiedHS opc, string asm, + SDPatternOperator Accum> { + def v4i16 : BaseSIMDThreeSameVectorTiedR0<0, U, 0b01, opc, V64, asm, ".4h", + [(set (v4i16 V64:$dst), + (Accum (v4i16 V64:$Rd), + (v4i16 (int_aarch64_neon_sqrdmulh (v4i16 V64:$Rn), + (v4i16 V64:$Rm)))))]>; + def v8i16 : BaseSIMDThreeSameVectorTiedR0<1, U, 0b01, opc, V128, asm, ".8h", + [(set (v8i16 V128:$dst), + (Accum (v8i16 V128:$Rd), + (v8i16 (int_aarch64_neon_sqrdmulh (v8i16 V128:$Rn), + (v8i16 V128:$Rm)))))]>; + def v2i32 : BaseSIMDThreeSameVectorTiedR0<0, U, 0b10, opc, V64, asm, ".2s", + [(set (v2i32 V64:$dst), + (Accum (v2i32 V64:$Rd), + (v2i32 (int_aarch64_neon_sqrdmulh (v2i32 V64:$Rn), + (v2i32 V64:$Rm)))))]>; + def v4i32 : BaseSIMDThreeSameVectorTiedR0<1, U, 0b10, opc, V128, asm, ".4s", + [(set (v4i32 V128:$dst), + (Accum (v4i32 V128:$Rd), + (v4i32 (int_aarch64_neon_sqrdmulh (v4i32 V128:$Rn), + (v4i32 V128:$Rm)))))]>; +} + +multiclass SIMDIndexedSQRDMLxHSDTied opc, string asm, + SDPatternOperator Accum> { + def v4i16_indexed : BaseSIMDIndexedTied<0, U, 0, 0b01, opc, + V64, V64, V128_lo, VectorIndexH, + asm, ".4h", ".4h", ".4h", ".h", + [(set (v4i16 V64:$dst), + (Accum (v4i16 V64:$Rd), + (v4i16 (int_aarch64_neon_sqrdmulh + (v4i16 V64:$Rn), + (v4i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), + VectorIndexH:$idx))))))]> { + bits<3> idx; + let Inst{11} = idx{2}; + let Inst{21} = idx{1}; + let Inst{20} = idx{0}; + } + + def v8i16_indexed : BaseSIMDIndexedTied<1, U, 0, 0b01, opc, + V128, V128, V128_lo, VectorIndexH, + asm, ".8h", ".8h", ".8h", ".h", + [(set (v8i16 V128:$dst), + (Accum (v8i16 V128:$Rd), + (v8i16 (int_aarch64_neon_sqrdmulh + (v8i16 V128:$Rn), + (v8i16 (AArch64duplane16 (v8i16 V128_lo:$Rm), + VectorIndexH:$idx))))))]> { + bits<3> idx; + let Inst{11} = idx{2}; + let Inst{21} = idx{1}; + let Inst{20} = idx{0}; + } + + def v2i32_indexed : BaseSIMDIndexedTied<0, U, 0, 0b10, opc, + V64, V64, V128, VectorIndexS, + asm, ".2s", ".2s", ".2s", ".s", + [(set (v2i32 V64:$dst), + (Accum (v2i32 V64:$Rd), + (v2i32 (int_aarch64_neon_sqrdmulh + (v2i32 V64:$Rn), + (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), + VectorIndexS:$idx))))))]> { + bits<2> idx; + let Inst{11} = idx{1}; + let Inst{21} = idx{0}; + } + + // FIXME: it would be nice to use the scalar (v1i32) instruction here, but + // an intermediate EXTRACT_SUBREG would be untyped. + // FIXME: direct EXTRACT_SUBREG from v2i32 to i32 is illegal, that's why we + // got it lowered here as (i32 vector_extract (v4i32 insert_subvector(..))) + def : Pat<(i32 (Accum (i32 FPR32Op:$Rd), + (i32 (vector_extract + (v4i32 (insert_subvector + (undef), + (v2i32 (int_aarch64_neon_sqrdmulh + (v2i32 V64:$Rn), + (v2i32 (AArch64duplane32 + (v4i32 V128:$Rm), + VectorIndexS:$idx)))), + (i32 0))), + (i64 0))))), + (EXTRACT_SUBREG + (v2i32 (!cast(NAME # v2i32_indexed) + (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), + FPR32Op:$Rd, + ssub)), + V64:$Rn, + V128:$Rm, + VectorIndexS:$idx)), + ssub)>; + + def v4i32_indexed : BaseSIMDIndexedTied<1, U, 0, 0b10, opc, + V128, V128, V128, VectorIndexS, + asm, ".4s", ".4s", ".4s", ".s", + [(set (v4i32 V128:$dst), + (Accum (v4i32 V128:$Rd), + (v4i32 (int_aarch64_neon_sqrdmulh + (v4i32 V128:$Rn), + (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), + VectorIndexS:$idx))))))]> { + bits<2> idx; + let Inst{11} = idx{1}; + let Inst{21} = idx{0}; + } + + // FIXME: it would be nice to use the scalar (v1i32) instruction here, but + // an intermediate EXTRACT_SUBREG would be untyped. + def : Pat<(i32 (Accum (i32 FPR32Op:$Rd), + (i32 (vector_extract + (v4i32 (int_aarch64_neon_sqrdmulh + (v4i32 V128:$Rn), + (v4i32 (AArch64duplane32 + (v4i32 V128:$Rm), + VectorIndexS:$idx)))), + (i64 0))))), + (EXTRACT_SUBREG + (v4i32 (!cast(NAME # v4i32_indexed) + (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), + FPR32Op:$Rd, + ssub)), + V128:$Rn, + V128:$Rm, + VectorIndexS:$idx)), + ssub)>; + + def i16_indexed : BaseSIMDIndexedTied<1, U, 1, 0b01, opc, + FPR16Op, FPR16Op, V128_lo, + VectorIndexH, asm, ".h", "", "", ".h", + []> { + bits<3> idx; + let Inst{11} = idx{2}; + let Inst{21} = idx{1}; + let Inst{20} = idx{0}; + } + + def i32_indexed : BaseSIMDIndexedTied<1, U, 1, 0b10, opc, + FPR32Op, FPR32Op, V128, VectorIndexS, + asm, ".s", "", "", ".s", + [(set (i32 FPR32Op:$dst), + (Accum (i32 FPR32Op:$Rd), + (i32 (int_aarch64_neon_sqrdmulh + (i32 FPR32Op:$Rn), + (i32 (vector_extract (v4i32 V128:$Rm), + VectorIndexS:$idx))))))]> { + bits<2> idx; + let Inst{11} = idx{1}; + let Inst{21} = idx{0}; + } +} +} // let Predicates = [HasNeon, HasV8_1a] + //---------------------------------------------------------------------------- // Crypto extensions //----------------------------------------------------------------------------