X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FPowerPC%2FPPCInstrAltivec.td;h=256370fa5f5278030e482557f727001fa479b2aa;hb=38cb1381b5ec8c75242650491c2b8e7e8a302665;hp=35fed0f5310b2466c1fda2a6ef5fb6f84808dddf;hpb=6cea814f2c2268f19b28ea1443cd5f55f932bb1e;p=oota-llvm.git diff --git a/lib/Target/PowerPC/PPCInstrAltivec.td b/lib/Target/PowerPC/PPCInstrAltivec.td index 35fed0f5310..256370fa5f5 100644 --- a/lib/Target/PowerPC/PPCInstrAltivec.td +++ b/lib/Target/PowerPC/PPCInstrAltivec.td @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by Chris Lattner and is distributed under -// the University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -15,228 +15,378 @@ // Altivec transformation functions and pattern fragments. // -// VSPLT_get_imm xform function: convert vector_shuffle mask to VSPLT* imm. -def VSPLT_get_imm : SDNodeXForm; + +def vpkuhum_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVPKUHUMShuffleMask(cast(N), false); +}]>; +def vpkuwum_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVPKUWUMShuffleMask(cast(N), false); +}]>; +def vpkuhum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVPKUHUMShuffleMask(cast(N), true); +}]>; +def vpkuwum_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVPKUWUMShuffleMask(cast(N), true); +}]>; + + +def vmrglb_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 1, false); +}]>; +def vmrglh_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 2, false); +}]>; +def vmrglw_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 4, false); +}]>; +def vmrghb_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 1, false); +}]>; +def vmrghh_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 2, false); +}]>; +def vmrghw_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 4, false); +}]>; + + +def vmrglb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle (v16i8 node:$lhs), node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 1, true); +}]>; +def vmrglh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 2, true); +}]>; +def vmrglw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGLShuffleMask(cast(N), 4, true); +}]>; +def vmrghb_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 1, true); +}]>; +def vmrghh_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 2, true); +}]>; +def vmrghw_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVMRGHShuffleMask(cast(N), 4, true); +}]>; + + +def VSLDOI_get_imm : SDNodeXForm; +def vsldoi_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVSLDOIShuffleMask(N, false) != -1; +}], VSLDOI_get_imm>; -def VSPLT_shuffle_mask : PatLeaf<(build_vector), [{ - return PPC::isSplatShuffleMask(N); -}], VSPLT_get_imm>; + +/// VSLDOI_unary* - These are used to match vsldoi(X,X), which is turned into +/// vector_shuffle(X,undef,mask) by the dag combiner. +def VSLDOI_unary_get_imm : SDNodeXForm; +def vsldoi_unary_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isVSLDOIShuffleMask(N, true) != -1; +}], VSLDOI_unary_get_imm>; + + +// VSPLT*_get_imm xform function: convert vector_shuffle mask to VSPLT* imm. +def VSPLTB_get_imm : SDNodeXForm; +def vspltb_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isSplatShuffleMask(cast(N), 1); +}], VSPLTB_get_imm>; +def VSPLTH_get_imm : SDNodeXForm; +def vsplth_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isSplatShuffleMask(cast(N), 2); +}], VSPLTH_get_imm>; +def VSPLTW_get_imm : SDNodeXForm; +def vspltw_shuffle : PatFrag<(ops node:$lhs, node:$rhs), + (vector_shuffle node:$lhs, node:$rhs), [{ + return PPC::isSplatShuffleMask(cast(N), 4); +}], VSPLTW_get_imm>; // VSPLTISB_get_imm xform function: convert build_vector to VSPLTISB imm. def VSPLTISB_get_imm : SDNodeXForm; def vecspltisb : PatLeaf<(build_vector), [{ - return PPC::isVecSplatImm(N, 1); + return PPC::get_VSPLTI_elt(N, 1, *CurDAG).getNode() != 0; }], VSPLTISB_get_imm>; // VSPLTISH_get_imm xform function: convert build_vector to VSPLTISH imm. def VSPLTISH_get_imm : SDNodeXForm; def vecspltish : PatLeaf<(build_vector), [{ - return PPC::isVecSplatImm(N, 2); + return PPC::get_VSPLTI_elt(N, 2, *CurDAG).getNode() != 0; }], VSPLTISH_get_imm>; // VSPLTISW_get_imm xform function: convert build_vector to VSPLTISW imm. def VSPLTISW_get_imm : SDNodeXForm; def vecspltisw : PatLeaf<(build_vector), [{ - return PPC::isVecSplatImm(N, 4); + return PPC::get_VSPLTI_elt(N, 4, *CurDAG).getNode() != 0; }], VSPLTISW_get_imm>; -class isVDOT { // vector dot instruction. - list Defs = [CR6]; - bit RC = 1; -} +def V_immneg0 : PatLeaf<(build_vector), [{ + return PPC::isAllNegativeZeroVector(N); +}]>; //===----------------------------------------------------------------------===// // Helpers for defining instructions that directly correspond to intrinsics. // VA1a_Int - A VAForm_1a intrinsic definition. class VA1a_Int xo, string opc, Intrinsic IntID> - : VAForm_1a; // VX1_Int - A VXForm_1 intrinsic definition. class VX1_Int xo, string opc, Intrinsic IntID> - : VXForm_1; // VX2_Int - A VXForm_2 intrinsic definition. class VX2_Int xo, string opc, Intrinsic IntID> - : VXForm_2; //===----------------------------------------------------------------------===// // Instruction Definitions. -def IMPLICIT_DEF_VRRC : Pseudo<(ops VRRC:$rD), "; $rD = IMPLICIT_DEF_VRRC", - [(set VRRC:$rD, (v4f32 (undef)))]>; - -let isLoad = 1, PPC970_Unit = 2 in { // Loads. -def LVEBX: XForm_1<31, 7, (ops VRRC:$vD, memrr:$src), +def DSS : DSS_Form<822, (outs), + (ins u5imm:$ZERO0, u5imm:$STRM,u5imm:$ZERO1,u5imm:$ZERO2), + "dss $STRM", LdStGeneral /*FIXME*/, []>; +def DSSALL : DSS_Form<822, (outs), + (ins u5imm:$ONE, u5imm:$ZERO0,u5imm:$ZERO1,u5imm:$ZERO2), + "dssall", LdStGeneral /*FIXME*/, []>; +def DST : DSS_Form<342, (outs), + (ins u5imm:$ZERO, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + "dst $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>; +def DSTT : DSS_Form<342, (outs), + (ins u5imm:$ONE, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + "dstt $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>; +def DSTST : DSS_Form<374, (outs), + (ins u5imm:$ZERO, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + "dstst $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>; +def DSTSTT : DSS_Form<374, (outs), + (ins u5imm:$ONE, u5imm:$STRM, GPRC:$rA, GPRC:$rB), + "dststt $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>; + +def DST64 : DSS_Form<342, (outs), + (ins u5imm:$ZERO, u5imm:$STRM, G8RC:$rA, GPRC:$rB), + "dst $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>; +def DSTT64 : DSS_Form<342, (outs), + (ins u5imm:$ONE, u5imm:$STRM, G8RC:$rA, GPRC:$rB), + "dstt $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>; +def DSTST64 : DSS_Form<374, (outs), + (ins u5imm:$ZERO, u5imm:$STRM, G8RC:$rA, GPRC:$rB), + "dstst $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>; +def DSTSTT64 : DSS_Form<374, (outs), + (ins u5imm:$ONE, u5imm:$STRM, G8RC:$rA, GPRC:$rB), + "dststt $rA, $rB, $STRM", LdStGeneral /*FIXME*/, []>; + +def MFVSCR : VXForm_4<1540, (outs VRRC:$vD), (ins), + "mfvscr $vD", LdStGeneral, + [(set VRRC:$vD, (int_ppc_altivec_mfvscr))]>; +def MTVSCR : VXForm_5<1604, (outs), (ins VRRC:$vB), + "mtvscr $vB", LdStGeneral, + [(int_ppc_altivec_mtvscr VRRC:$vB)]>; + +let canFoldAsLoad = 1, PPC970_Unit = 2 in { // Loads. +def LVEBX: XForm_1<31, 7, (outs VRRC:$vD), (ins memrr:$src), "lvebx $vD, $src", LdStGeneral, [(set VRRC:$vD, (int_ppc_altivec_lvebx xoaddr:$src))]>; -def LVEHX: XForm_1<31, 39, (ops VRRC:$vD, memrr:$src), +def LVEHX: XForm_1<31, 39, (outs VRRC:$vD), (ins memrr:$src), "lvehx $vD, $src", LdStGeneral, [(set VRRC:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>; -def LVEWX: XForm_1<31, 71, (ops VRRC:$vD, memrr:$src), +def LVEWX: XForm_1<31, 71, (outs VRRC:$vD), (ins memrr:$src), "lvewx $vD, $src", LdStGeneral, [(set VRRC:$vD, (int_ppc_altivec_lvewx xoaddr:$src))]>; -def LVX : XForm_1<31, 103, (ops VRRC:$vD, memrr:$src), +def LVX : XForm_1<31, 103, (outs VRRC:$vD), (ins memrr:$src), "lvx $vD, $src", LdStGeneral, [(set VRRC:$vD, (int_ppc_altivec_lvx xoaddr:$src))]>; -def LVXL : XForm_1<31, 359, (ops VRRC:$vD, memrr:$src), +def LVXL : XForm_1<31, 359, (outs VRRC:$vD), (ins memrr:$src), "lvxl $vD, $src", LdStGeneral, [(set VRRC:$vD, (int_ppc_altivec_lvxl xoaddr:$src))]>; } -def LVSL : XForm_1<31, 6, (ops VRRC:$vD, memrr:$src), +def LVSL : XForm_1<31, 6, (outs VRRC:$vD), (ins memrr:$src), "lvsl $vD, $src", LdStGeneral, [(set VRRC:$vD, (int_ppc_altivec_lvsl xoaddr:$src))]>, PPC970_Unit_LSU; -def LVSR : XForm_1<31, 38, (ops VRRC:$vD, memrr:$src), - "lvsl $vD, $src", LdStGeneral, +def LVSR : XForm_1<31, 38, (outs VRRC:$vD), (ins memrr:$src), + "lvsr $vD, $src", LdStGeneral, [(set VRRC:$vD, (int_ppc_altivec_lvsr xoaddr:$src))]>, PPC970_Unit_LSU; -let isStore = 1, noResults = 1, PPC970_Unit = 2 in { // Stores. -def STVEBX: XForm_8<31, 135, (ops VRRC:$rS, memrr:$dst), +let PPC970_Unit = 2 in { // Stores. +def STVEBX: XForm_8<31, 135, (outs), (ins VRRC:$rS, memrr:$dst), "stvebx $rS, $dst", LdStGeneral, [(int_ppc_altivec_stvebx VRRC:$rS, xoaddr:$dst)]>; -def STVEHX: XForm_8<31, 167, (ops VRRC:$rS, memrr:$dst), +def STVEHX: XForm_8<31, 167, (outs), (ins VRRC:$rS, memrr:$dst), "stvehx $rS, $dst", LdStGeneral, [(int_ppc_altivec_stvehx VRRC:$rS, xoaddr:$dst)]>; -def STVEWX: XForm_8<31, 199, (ops VRRC:$rS, memrr:$dst), +def STVEWX: XForm_8<31, 199, (outs), (ins VRRC:$rS, memrr:$dst), "stvewx $rS, $dst", LdStGeneral, [(int_ppc_altivec_stvewx VRRC:$rS, xoaddr:$dst)]>; -def STVX : XForm_8<31, 231, (ops VRRC:$rS, memrr:$dst), +def STVX : XForm_8<31, 231, (outs), (ins VRRC:$rS, memrr:$dst), "stvx $rS, $dst", LdStGeneral, [(int_ppc_altivec_stvx VRRC:$rS, xoaddr:$dst)]>; -def STVXL : XForm_8<31, 487, (ops VRRC:$rS, memrr:$dst), +def STVXL : XForm_8<31, 487, (outs), (ins VRRC:$rS, memrr:$dst), "stvxl $rS, $dst", LdStGeneral, [(int_ppc_altivec_stvxl VRRC:$rS, xoaddr:$dst)]>; } let PPC970_Unit = 5 in { // VALU Operations. // VA-Form instructions. 3-input AltiVec ops. -def VMADDFP : VAForm_1<46, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB), +def VMADDFP : VAForm_1<46, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vC, VRRC:$vB), "vmaddfp $vD, $vA, $vC, $vB", VecFP, [(set VRRC:$vD, (fadd (fmul VRRC:$vA, VRRC:$vC), VRRC:$vB))]>, Requires<[FPContractions]>; -def VNMSUBFP: VAForm_1<47, (ops VRRC:$vD, VRRC:$vA, VRRC:$vC, VRRC:$vB), +def VNMSUBFP: VAForm_1<47, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vC, VRRC:$vB), "vnmsubfp $vD, $vA, $vC, $vB", VecFP, - [(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC), + [(set VRRC:$vD, (fsub V_immneg0, + (fsub (fmul VRRC:$vA, VRRC:$vC), VRRC:$vB)))]>, Requires<[FPContractions]>; + def VMHADDSHS : VA1a_Int<32, "vmhaddshs", int_ppc_altivec_vmhaddshs>; def VMHRADDSHS : VA1a_Int<33, "vmhraddshs", int_ppc_altivec_vmhraddshs>; +def VMLADDUHM : VA1a_Int<34, "vmladduhm", int_ppc_altivec_vmladduhm>; def VPERM : VA1a_Int<43, "vperm", int_ppc_altivec_vperm>; def VSEL : VA1a_Int<42, "vsel", int_ppc_altivec_vsel>; -def VSLDOI : VAForm_2<44, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, u5imm:$SH), +// Shuffles. +def VSLDOI : VAForm_2<44, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB, u5imm:$SH), "vsldoi $vD, $vA, $vB, $SH", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vsldoi VRRC:$vA, VRRC:$vB, - imm:$SH))]>; + [(set VRRC:$vD, + (vsldoi_shuffle:$SH (v16i8 VRRC:$vA), VRRC:$vB))]>; // VX-Form instructions. AltiVec arithmetic ops. -def VADDCUW : VXForm_1<384, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddcuw $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddcuw VRRC:$vA, VRRC:$vB))]>; -def VADDFP : VXForm_1<10, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VADDFP : VXForm_1<10, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vaddfp $vD, $vA, $vB", VecFP, [(set VRRC:$vD, (fadd VRRC:$vA, VRRC:$vB))]>; -def VADDUBM : VXForm_1<0, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VADDUBM : VXForm_1<0, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vaddubm $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (add (v16i8 VRRC:$vA), VRRC:$vB))]>; -def VADDUHM : VXForm_1<64, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VADDUHM : VXForm_1<64, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vadduhm $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (add (v8i16 VRRC:$vA), VRRC:$vB))]>; -def VADDUWM : VXForm_1<128, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VADDUWM : VXForm_1<128, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vadduwm $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (add (v4i32 VRRC:$vA), VRRC:$vB))]>; -def VADDSBS : VXForm_1<768, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddsbs $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddsbs VRRC:$vA, VRRC:$vB))]>; -def VADDSHS : VXForm_1<832, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddshs $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddshs VRRC:$vA, VRRC:$vB))]>; -def VADDSWS : VXForm_1<896, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddsws $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddsws VRRC:$vA, VRRC:$vB))]>; +def VADDCUW : VX1_Int<384, "vaddcuw", int_ppc_altivec_vaddcuw>; +def VADDSBS : VX1_Int<768, "vaddsbs", int_ppc_altivec_vaddsbs>; +def VADDSHS : VX1_Int<832, "vaddshs", int_ppc_altivec_vaddshs>; +def VADDSWS : VX1_Int<896, "vaddsws", int_ppc_altivec_vaddsws>; +def VADDUBS : VX1_Int<512, "vaddubs", int_ppc_altivec_vaddubs>; +def VADDUHS : VX1_Int<576, "vadduhs", int_ppc_altivec_vadduhs>; +def VADDUWS : VX1_Int<640, "vadduws", int_ppc_altivec_vadduws>; -def VADDUBS : VXForm_1<512, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vaddubs $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vaddubs VRRC:$vA, VRRC:$vB))]>; -def VADDUHS : VXForm_1<576, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vadduhs $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vadduhs VRRC:$vA, VRRC:$vB))]>; -def VADDUWS : VXForm_1<640, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vadduws $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, - (int_ppc_altivec_vadduws VRRC:$vA, VRRC:$vB))]>; -def VAND : VXForm_1<1028, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + +def VAND : VXForm_1<1028, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vand $vD, $vA, $vB", VecFP, [(set VRRC:$vD, (and (v4i32 VRRC:$vA), VRRC:$vB))]>; -def VANDC : VXForm_1<1092, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VANDC : VXForm_1<1092, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vandc $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (and (v4i32 VRRC:$vA), (vnot VRRC:$vB)))]>; + [(set VRRC:$vD, (and (v4i32 VRRC:$vA), + (vnot_ppc VRRC:$vB)))]>; -def VCFSX : VXForm_1<842, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), +def VCFSX : VXForm_1<842, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), "vcfsx $vD, $vB, $UIMM", VecFP, [(set VRRC:$vD, (int_ppc_altivec_vcfsx VRRC:$vB, imm:$UIMM))]>; -def VCFUX : VXForm_1<778, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), +def VCFUX : VXForm_1<778, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), "vcfux $vD, $vB, $UIMM", VecFP, [(set VRRC:$vD, (int_ppc_altivec_vcfux VRRC:$vB, imm:$UIMM))]>; -def VCTSXS : VXForm_1<970, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), +def VCTSXS : VXForm_1<970, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), "vctsxs $vD, $vB, $UIMM", VecFP, - []>; -def VCTUXS : VXForm_1<906, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), + [(set VRRC:$vD, + (int_ppc_altivec_vctsxs VRRC:$vB, imm:$UIMM))]>; +def VCTUXS : VXForm_1<906, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), "vctuxs $vD, $vB, $UIMM", VecFP, - []>; -def VEXPTEFP : VXForm_2<394, (ops VRRC:$vD, VRRC:$vB), - "vexptefp $vD, $vB", VecFP, - [(set VRRC:$vD, (int_ppc_altivec_vexptefp VRRC:$vB))]>; -def VLOGEFP : VXForm_2<458, (ops VRRC:$vD, VRRC:$vB), - "vlogefp $vD, $vB", VecFP, - [(set VRRC:$vD, (int_ppc_altivec_vlogefp VRRC:$vB))]>; -def VMAXFP : VXForm_1<1034, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vmaxfp $vD, $vA, $vB", VecFP, - []>; -def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), - "vminfp $vD, $vA, $vB", VecFP, - []>; - -def VMRGHH : VX1_Int<76 , "vmrghh", int_ppc_altivec_vmrghh>; -def VMRGHW : VX1_Int<140, "vmrghw", int_ppc_altivec_vmrghw>; -def VMRGLH : VX1_Int<332, "vmrglh", int_ppc_altivec_vmrglh>; -def VMRGLW : VX1_Int<396, "vmrglw", int_ppc_altivec_vmrglw>; + [(set VRRC:$vD, + (int_ppc_altivec_vctuxs VRRC:$vB, imm:$UIMM))]>; +def VEXPTEFP : VX2_Int<394, "vexptefp", int_ppc_altivec_vexptefp>; +def VLOGEFP : VX2_Int<458, "vlogefp", int_ppc_altivec_vlogefp>; + +def VAVGSB : VX1_Int<1282, "vavgsb", int_ppc_altivec_vavgsb>; +def VAVGSH : VX1_Int<1346, "vavgsh", int_ppc_altivec_vavgsh>; +def VAVGSW : VX1_Int<1410, "vavgsw", int_ppc_altivec_vavgsw>; +def VAVGUB : VX1_Int<1026, "vavgub", int_ppc_altivec_vavgub>; +def VAVGUH : VX1_Int<1090, "vavguh", int_ppc_altivec_vavguh>; +def VAVGUW : VX1_Int<1154, "vavguw", int_ppc_altivec_vavguw>; + +def VMAXFP : VX1_Int<1034, "vmaxfp", int_ppc_altivec_vmaxfp>; +def VMAXSB : VX1_Int< 258, "vmaxsb", int_ppc_altivec_vmaxsb>; +def VMAXSH : VX1_Int< 322, "vmaxsh", int_ppc_altivec_vmaxsh>; +def VMAXSW : VX1_Int< 386, "vmaxsw", int_ppc_altivec_vmaxsw>; +def VMAXUB : VX1_Int< 2, "vmaxub", int_ppc_altivec_vmaxub>; +def VMAXUH : VX1_Int< 66, "vmaxuh", int_ppc_altivec_vmaxuh>; +def VMAXUW : VX1_Int< 130, "vmaxuw", int_ppc_altivec_vmaxuw>; +def VMINFP : VX1_Int<1098, "vminfp", int_ppc_altivec_vminfp>; +def VMINSB : VX1_Int< 770, "vminsb", int_ppc_altivec_vminsb>; +def VMINSH : VX1_Int< 834, "vminsh", int_ppc_altivec_vminsh>; +def VMINSW : VX1_Int< 898, "vminsw", int_ppc_altivec_vminsw>; +def VMINUB : VX1_Int< 514, "vminub", int_ppc_altivec_vminub>; +def VMINUH : VX1_Int< 578, "vminuh", int_ppc_altivec_vminuh>; +def VMINUW : VX1_Int< 642, "vminuw", int_ppc_altivec_vminuw>; + +def VMRGHB : VXForm_1< 12, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + "vmrghb $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vmrghb_shuffle VRRC:$vA, VRRC:$vB))]>; +def VMRGHH : VXForm_1< 76, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + "vmrghh $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vmrghh_shuffle VRRC:$vA, VRRC:$vB))]>; +def VMRGHW : VXForm_1<140, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + "vmrghw $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vmrghw_shuffle VRRC:$vA, VRRC:$vB))]>; +def VMRGLB : VXForm_1<268, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + "vmrglb $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vmrglb_shuffle VRRC:$vA, VRRC:$vB))]>; +def VMRGLH : VXForm_1<332, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + "vmrglh $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vmrglh_shuffle VRRC:$vA, VRRC:$vB))]>; +def VMRGLW : VXForm_1<396, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), + "vmrglw $vD, $vA, $vB", VecFP, + [(set VRRC:$vD, (vmrglw_shuffle VRRC:$vA, VRRC:$vB))]>; def VMSUMMBM : VA1a_Int<37, "vmsummbm", int_ppc_altivec_vmsummbm>; def VMSUMSHM : VA1a_Int<40, "vmsumshm", int_ppc_altivec_vmsumshm>; @@ -263,16 +413,16 @@ def VRSQRTEFP : VX2_Int<330, "vrsqrtefp", int_ppc_altivec_vrsqrtefp>; def VSUBCUW : VX1_Int<74, "vsubcuw", int_ppc_altivec_vsubcuw>; -def VSUBFP : VXForm_1<74, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VSUBFP : VXForm_1<74, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vsubfp $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (fsub VRRC:$vA, VRRC:$vB))]>; -def VSUBUBM : VXForm_1<1024, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VSUBUBM : VXForm_1<1024, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vsububm $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (sub (v16i8 VRRC:$vA), VRRC:$vB))]>; -def VSUBUHM : VXForm_1<1088, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VSUBUHM : VXForm_1<1088, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vsubuhm $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (sub (v8i16 VRRC:$vA), VRRC:$vB))]>; -def VSUBUWM : VXForm_1<1152, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VSUBUWM : VXForm_1<1152, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vsubuwm $vD, $vA, $vB", VecGeneral, [(set VRRC:$vD, (sub (v4i32 VRRC:$vA), VRRC:$vB))]>; @@ -288,34 +438,39 @@ def VSUM4SBS: VX1_Int<1672, "vsum4sbs", int_ppc_altivec_vsum4sbs>; def VSUM4SHS: VX1_Int<1608, "vsum4shs", int_ppc_altivec_vsum4shs>; def VSUM4UBS: VX1_Int<1544, "vsum4ubs", int_ppc_altivec_vsum4ubs>; -def VNOR : VXForm_1<1284, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VNOR : VXForm_1<1284, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vnor $vD, $vA, $vB", VecFP, - [(set VRRC:$vD, (vnot (or (v4i32 VRRC:$vA), VRRC:$vB)))]>; -def VOR : VXForm_1<1156, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), + [(set VRRC:$vD, (vnot_ppc (or (v4i32 VRRC:$vA), + VRRC:$vB)))]>; +def VOR : VXForm_1<1156, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vor $vD, $vA, $vB", VecFP, [(set VRRC:$vD, (or (v4i32 VRRC:$vA), VRRC:$vB))]>; -def VXOR : VXForm_1<1220, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VXOR : VXForm_1<1220, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vxor $vD, $vA, $vB", VecFP, [(set VRRC:$vD, (xor (v4i32 VRRC:$vA), VRRC:$vB))]>; def VRLB : VX1_Int< 4, "vrlb", int_ppc_altivec_vrlb>; def VRLH : VX1_Int< 68, "vrlh", int_ppc_altivec_vrlh>; def VRLW : VX1_Int< 132, "vrlw", int_ppc_altivec_vrlw>; + +def VSL : VX1_Int< 452, "vsl" , int_ppc_altivec_vsl >; def VSLO : VX1_Int<1036, "vslo", int_ppc_altivec_vslo>; def VSLB : VX1_Int< 260, "vslb", int_ppc_altivec_vslb>; def VSLH : VX1_Int< 324, "vslh", int_ppc_altivec_vslh>; def VSLW : VX1_Int< 388, "vslw", int_ppc_altivec_vslw>; -def VSPLTB : VXForm_1<524, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), +def VSPLTB : VXForm_1<524, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), "vspltb $vD, $vB, $UIMM", VecPerm, - []>; -def VSPLTH : VXForm_1<588, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), + [(set VRRC:$vD, + (vspltb_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>; +def VSPLTH : VXForm_1<588, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), "vsplth $vD, $vB, $UIMM", VecPerm, - []>; -def VSPLTW : VXForm_1<652, (ops VRRC:$vD, u5imm:$UIMM, VRRC:$vB), + [(set VRRC:$vD, + (vsplth_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>; +def VSPLTW : VXForm_1<652, (outs VRRC:$vD), (ins u5imm:$UIMM, VRRC:$vB), "vspltw $vD, $vB, $UIMM", VecPerm, - [(set VRRC:$vD, (vector_shuffle (v4f32 VRRC:$vB), (undef), - VSPLT_shuffle_mask:$UIMM))]>; + [(set VRRC:$vD, + (vspltw_shuffle:$UIMM (v16i8 VRRC:$vB), (undef)))]>; def VSR : VX1_Int< 708, "vsr" , int_ppc_altivec_vsr>; def VSRO : VX1_Int<1100, "vsro" , int_ppc_altivec_vsro>; @@ -327,15 +482,15 @@ def VSRH : VX1_Int< 580, "vsrh" , int_ppc_altivec_vsrh>; def VSRW : VX1_Int< 644, "vsrw" , int_ppc_altivec_vsrw>; -def VSPLTISB : VXForm_3<780, (ops VRRC:$vD, s5imm:$SIMM), +def VSPLTISB : VXForm_3<780, (outs VRRC:$vD), (ins s5imm:$SIMM), "vspltisb $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v4f32 vecspltisb:$SIMM))]>; -def VSPLTISH : VXForm_3<844, (ops VRRC:$vD, s5imm:$SIMM), + [(set VRRC:$vD, (v16i8 vecspltisb:$SIMM))]>; +def VSPLTISH : VXForm_3<844, (outs VRRC:$vD), (ins s5imm:$SIMM), "vspltish $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v4f32 vecspltish:$SIMM))]>; -def VSPLTISW : VXForm_3<908, (ops VRRC:$vD, s5imm:$SIMM), + [(set VRRC:$vD, (v8i16 vecspltish:$SIMM))]>; +def VSPLTISW : VXForm_3<908, (outs VRRC:$vD), (ins s5imm:$SIMM), "vspltisw $vD, $SIMM", VecPerm, - [(set VRRC:$vD, (v4f32 vecspltisw:$SIMM))]>; + [(set VRRC:$vD, (v4i32 vecspltisw:$SIMM))]>; // Vector Pack. def VPKPX : VX1_Int<782, "vpkpx", int_ppc_altivec_vpkpx>; @@ -343,44 +498,37 @@ def VPKSHSS : VX1_Int<398, "vpkshss", int_ppc_altivec_vpkshss>; def VPKSHUS : VX1_Int<270, "vpkshus", int_ppc_altivec_vpkshus>; def VPKSWSS : VX1_Int<462, "vpkswss", int_ppc_altivec_vpkswss>; def VPKSWUS : VX1_Int<334, "vpkswus", int_ppc_altivec_vpkswus>; -def VPKUHUM : VXForm_1<14, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VPKUHUM : VXForm_1<14, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vpkuhum $vD, $vA, $vB", VecFP, - [/*TODO*/]>; + [(set VRRC:$vD, + (vpkuhum_shuffle (v16i8 VRRC:$vA), VRRC:$vB))]>; def VPKUHUS : VX1_Int<142, "vpkuhus", int_ppc_altivec_vpkuhus>; -def VPKUWUM : VXForm_1<78, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB), +def VPKUWUM : VXForm_1<78, (outs VRRC:$vD), (ins VRRC:$vA, VRRC:$vB), "vpkuwum $vD, $vA, $vB", VecFP, - [/*TODO*/]>; + [(set VRRC:$vD, + (vpkuwum_shuffle (v16i8 VRRC:$vA), VRRC:$vB))]>; def VPKUWUS : VX1_Int<206, "vpkuwus", int_ppc_altivec_vpkuwus>; // Vector Unpack. -def VUPKHPX : VXForm_2<846, (ops VRRC:$vD, VRRC:$vB), - "vupkhpx $vD, $vB", VecFP, - [(set VRRC:$vD, (int_ppc_altivec_vupkhpx VRRC:$vB))]>; -def VUPKHSB : VXForm_2<526, (ops VRRC:$vD, VRRC:$vB), - "vupkhsb $vD, $vB", VecFP, - [(set VRRC:$vD, (int_ppc_altivec_vupkhsb VRRC:$vB))]>; -def VUPKHSH : VXForm_2<590, (ops VRRC:$vD, VRRC:$vB), - "vupkhsh $vD, $vB", VecFP, - [(set VRRC:$vD, (int_ppc_altivec_vupkhsh VRRC:$vB))]>; -def VUPKLPX : VXForm_2<974, (ops VRRC:$vD, VRRC:$vB), - "vupklpx $vD, $vB", VecFP, - [(set VRRC:$vD, (int_ppc_altivec_vupklpx VRRC:$vB))]>; -def VUPKLSB : VXForm_2<654, (ops VRRC:$vD, VRRC:$vB), - "vupklsb $vD, $vB", VecFP, - [(set VRRC:$vD, (int_ppc_altivec_vupklsb VRRC:$vB))]>; -def VUPKLSH : VXForm_2<718, (ops VRRC:$vD, VRRC:$vB), - "vupklsh $vD, $vB", VecFP, - [(set VRRC:$vD, (int_ppc_altivec_vupklsh VRRC:$vB))]>; +def VUPKHPX : VX2_Int<846, "vupkhpx", int_ppc_altivec_vupkhpx>; +def VUPKHSB : VX2_Int<526, "vupkhsb", int_ppc_altivec_vupkhsb>; +def VUPKHSH : VX2_Int<590, "vupkhsh", int_ppc_altivec_vupkhsh>; +def VUPKLPX : VX2_Int<974, "vupklpx", int_ppc_altivec_vupklpx>; +def VUPKLSB : VX2_Int<654, "vupklsb", int_ppc_altivec_vupklsb>; +def VUPKLSH : VX2_Int<718, "vupklsh", int_ppc_altivec_vupklsh>; // Altivec Comparisons. class VCMP xo, string asmstr, ValueType Ty> - : VXRForm_1; class VCMPo xo, string asmstr, ValueType Ty> - : VXRForm_1,isVDOT; + : VXRForm_1 { + let Defs = [CR6]; + let RC = 1; +} // f32 element comparisons.0 def VCMPBFP : VCMP <966, "vcmpbfp $vD, $vA, $vB" , v4f32>; @@ -416,38 +564,45 @@ def VCMPGTSWo : VCMPo<902, "vcmpgtsw. $vD, $vA, $vB", v4i32>; def VCMPGTUW : VCMP <646, "vcmpgtuw $vD, $vA, $vB" , v4i32>; def VCMPGTUWo : VCMPo<646, "vcmpgtuw. $vD, $vA, $vB", v4i32>; -def V_SET0 : VXForm_setzero<1220, (ops VRRC:$vD), +def V_SET0 : VXForm_setzero<1220, (outs VRRC:$vD), (ins), "vxor $vD, $vD, $vD", VecFP, - [(set VRRC:$vD, (v4f32 immAllZerosV))]>; + [(set VRRC:$vD, (v4i32 immAllZerosV))]>; } //===----------------------------------------------------------------------===// // Additional Altivec Patterns // -// Undef/Zero. -def : Pat<(v16i8 (undef)), (v16i8 (IMPLICIT_DEF_VRRC))>; -def : Pat<(v8i16 (undef)), (v8i16 (IMPLICIT_DEF_VRRC))>; -def : Pat<(v4i32 (undef)), (v4i32 (IMPLICIT_DEF_VRRC))>; -def : Pat<(v16i8 immAllZerosV), (v16i8 (V_SET0))>; -def : Pat<(v8i16 immAllZerosV), (v8i16 (V_SET0))>; -def : Pat<(v4i32 immAllZerosV), (v4i32 (V_SET0))>; +// DS* intrinsics +def : Pat<(int_ppc_altivec_dssall), (DSSALL 1, 0, 0, 0)>; +def : Pat<(int_ppc_altivec_dss imm:$STRM), (DSS 0, imm:$STRM, 0, 0)>; + +// * 32-bit +def : Pat<(int_ppc_altivec_dst GPRC:$rA, GPRC:$rB, imm:$STRM), + (DST 0, imm:$STRM, GPRC:$rA, GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dstt GPRC:$rA, GPRC:$rB, imm:$STRM), + (DSTT 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dstst GPRC:$rA, GPRC:$rB, imm:$STRM), + (DSTST 0, imm:$STRM, GPRC:$rA, GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dststt GPRC:$rA, GPRC:$rB, imm:$STRM), + (DSTSTT 1, imm:$STRM, GPRC:$rA, GPRC:$rB)>; + +// * 64-bit +def : Pat<(int_ppc_altivec_dst G8RC:$rA, GPRC:$rB, imm:$STRM), + (DST64 0, imm:$STRM, (i64 G8RC:$rA), GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dstt G8RC:$rA, GPRC:$rB, imm:$STRM), + (DSTT64 1, imm:$STRM, (i64 G8RC:$rA), GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dstst G8RC:$rA, GPRC:$rB, imm:$STRM), + (DSTST64 0, imm:$STRM, (i64 G8RC:$rA), GPRC:$rB)>; +def : Pat<(int_ppc_altivec_dststt G8RC:$rA, GPRC:$rB, imm:$STRM), + (DSTSTT64 1, imm:$STRM, (i64 G8RC:$rA), GPRC:$rB)>; // Loads. -def : Pat<(v16i8 (load xoaddr:$src)), (v16i8 (LVX xoaddr:$src))>; -def : Pat<(v8i16 (load xoaddr:$src)), (v8i16 (LVX xoaddr:$src))>; -def : Pat<(v4i32 (load xoaddr:$src)), (v4i32 (LVX xoaddr:$src))>; -def : Pat<(v4f32 (load xoaddr:$src)), (v4f32 (LVX xoaddr:$src))>; +def : Pat<(v4i32 (load xoaddr:$src)), (LVX xoaddr:$src)>; // Stores. -def : Pat<(store (v16i8 VRRC:$rS), xoaddr:$dst), - (STVX (v16i8 VRRC:$rS), xoaddr:$dst)>; -def : Pat<(store (v8i16 VRRC:$rS), xoaddr:$dst), - (STVX (v8i16 VRRC:$rS), xoaddr:$dst)>; def : Pat<(store (v4i32 VRRC:$rS), xoaddr:$dst), (STVX (v4i32 VRRC:$rS), xoaddr:$dst)>; -def : Pat<(store (v4f32 VRRC:$rS), xoaddr:$dst), - (STVX (v4f32 VRRC:$rS), xoaddr:$dst)>; // Bit conversions. def : Pat<(v16i8 (bitconvert (v8i16 VRRC:$src))), (v16i8 VRRC:$src)>; @@ -466,35 +621,40 @@ def : Pat<(v4f32 (bitconvert (v16i8 VRRC:$src))), (v4f32 VRRC:$src)>; def : Pat<(v4f32 (bitconvert (v8i16 VRRC:$src))), (v4f32 VRRC:$src)>; def : Pat<(v4f32 (bitconvert (v4i32 VRRC:$src))), (v4f32 VRRC:$src)>; -// Immediate vector formation with vsplti*. -def : Pat<(v16i8 vecspltisb:$invec), (v16i8 (VSPLTISB vecspltisb:$invec))>; -def : Pat<(v16i8 vecspltish:$invec), (v16i8 (VSPLTISH vecspltish:$invec))>; -def : Pat<(v16i8 vecspltisw:$invec), (v16i8 (VSPLTISW vecspltisw:$invec))>; - -def : Pat<(v8i16 vecspltisb:$invec), (v8i16 (VSPLTISB vecspltisb:$invec))>; -def : Pat<(v8i16 vecspltish:$invec), (v8i16 (VSPLTISH vecspltish:$invec))>; -def : Pat<(v8i16 vecspltisw:$invec), (v8i16 (VSPLTISW vecspltisw:$invec))>; - -def : Pat<(v4i32 vecspltisb:$invec), (v4i32 (VSPLTISB vecspltisb:$invec))>; -def : Pat<(v4i32 vecspltish:$invec), (v4i32 (VSPLTISH vecspltish:$invec))>; -def : Pat<(v4i32 vecspltisw:$invec), (v4i32 (VSPLTISW vecspltisw:$invec))>; +// Shuffles. + +// Match vsldoi(x,x), vpkuwum(x,x), vpkuhum(x,x) +def:Pat<(vsldoi_unary_shuffle:$in (v16i8 VRRC:$vA), undef), + (VSLDOI VRRC:$vA, VRRC:$vA, (VSLDOI_unary_get_imm VRRC:$in))>; +def:Pat<(vpkuwum_unary_shuffle (v16i8 VRRC:$vA), undef), + (VPKUWUM VRRC:$vA, VRRC:$vA)>; +def:Pat<(vpkuhum_unary_shuffle (v16i8 VRRC:$vA), undef), + (VPKUHUM VRRC:$vA, VRRC:$vA)>; + +// Match vmrg*(x,x) +def:Pat<(vmrglb_unary_shuffle (v16i8 VRRC:$vA), undef), + (VMRGLB VRRC:$vA, VRRC:$vA)>; +def:Pat<(vmrglh_unary_shuffle (v16i8 VRRC:$vA), undef), + (VMRGLH VRRC:$vA, VRRC:$vA)>; +def:Pat<(vmrglw_unary_shuffle (v16i8 VRRC:$vA), undef), + (VMRGLW VRRC:$vA, VRRC:$vA)>; +def:Pat<(vmrghb_unary_shuffle (v16i8 VRRC:$vA), undef), + (VMRGHB VRRC:$vA, VRRC:$vA)>; +def:Pat<(vmrghh_unary_shuffle (v16i8 VRRC:$vA), undef), + (VMRGHH VRRC:$vA, VRRC:$vA)>; +def:Pat<(vmrghw_unary_shuffle (v16i8 VRRC:$vA), undef), + (VMRGHW VRRC:$vA, VRRC:$vA)>; // Logical Operations -def : Pat<(v16i8 (and VRRC:$A, VRRC:$B)), (v16i8 (VAND VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (and VRRC:$A, VRRC:$B)), (v8i16 (VAND VRRC:$A, VRRC:$B))>; -def : Pat<(v16i8 (or VRRC:$A, VRRC:$B)), (v16i8 (VOR VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (or VRRC:$A, VRRC:$B)), (v8i16 (VOR VRRC:$A, VRRC:$B))>; -def : Pat<(v16i8 (xor VRRC:$A, VRRC:$B)), (v16i8 (VXOR VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (xor VRRC:$A, VRRC:$B)), (v8i16 (VXOR VRRC:$A, VRRC:$B))>; -def : Pat<(v16i8 (vnot (or VRRC:$A, VRRC:$B))),(v16i8 (VNOR VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (vnot (or VRRC:$A, VRRC:$B))),(v8i16 (VNOR VRRC:$A, VRRC:$B))>; -def : Pat<(v16i8 (and VRRC:$A, (vnot VRRC:$B))), - (v16i8 (VANDC VRRC:$A, VRRC:$B))>; -def : Pat<(v8i16 (and VRRC:$A, (vnot VRRC:$B))), - (v8i16 (VANDC VRRC:$A, VRRC:$B))>; +def : Pat<(v4i32 (vnot_ppc VRRC:$vA)), (VNOR VRRC:$vA, VRRC:$vA)>; + +def : Pat<(v4i32 (vnot_ppc (or VRRC:$A, VRRC:$B))), + (VNOR VRRC:$A, VRRC:$B)>; +def : Pat<(v4i32 (and VRRC:$A, (vnot_ppc VRRC:$B))), + (VANDC VRRC:$A, VRRC:$B)>; def : Pat<(fmul VRRC:$vA, VRRC:$vB), - (VMADDFP VRRC:$vA, VRRC:$vB, (V_SET0))>; + (VMADDFP VRRC:$vA, VRRC:$vB, (v4i32 (V_SET0)))>; // Fused multiply add and multiply sub for packed float. These are represented // separately from the real instructions above, for operations that must have @@ -508,14 +668,28 @@ def : Pat<(int_ppc_altivec_vmaddfp VRRC:$A, VRRC:$B, VRRC:$C), (VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>; def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C), (VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>; -def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM), - (v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>; - -def : Pat<(PPCvperm (v4i32 VRRC:$vA), VRRC:$vB, VRRC:$vC), - (v4i32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>; -def : Pat<(PPCvperm (v4f32 VRRC:$vA), VRRC:$vB, VRRC:$vC), - (v4f32 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>; -def : Pat<(PPCvperm (v8i16 VRRC:$vA), VRRC:$vB, VRRC:$vC), - (v8i16 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>; + def : Pat<(PPCvperm (v16i8 VRRC:$vA), VRRC:$vB, VRRC:$vC), - (v16i8 (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC))>; + (VPERM VRRC:$vA, VRRC:$vB, VRRC:$vC)>; + +// Vector shifts +def : Pat<(v16i8 (shl (v16i8 VRRC:$vA), (v16i8 VRRC:$vB))), + (v16i8 (VSLB VRRC:$vA, VRRC:$vB))>; +def : Pat<(v8i16 (shl (v8i16 VRRC:$vA), (v8i16 VRRC:$vB))), + (v8i16 (VSLH VRRC:$vA, VRRC:$vB))>; +def : Pat<(v4i32 (shl (v4i32 VRRC:$vA), (v4i32 VRRC:$vB))), + (v4i32 (VSLW VRRC:$vA, VRRC:$vB))>; + +def : Pat<(v16i8 (srl (v16i8 VRRC:$vA), (v16i8 VRRC:$vB))), + (v16i8 (VSRB VRRC:$vA, VRRC:$vB))>; +def : Pat<(v8i16 (srl (v8i16 VRRC:$vA), (v8i16 VRRC:$vB))), + (v8i16 (VSRH VRRC:$vA, VRRC:$vB))>; +def : Pat<(v4i32 (srl (v4i32 VRRC:$vA), (v4i32 VRRC:$vB))), + (v4i32 (VSRW VRRC:$vA, VRRC:$vB))>; + +def : Pat<(v16i8 (sra (v16i8 VRRC:$vA), (v16i8 VRRC:$vB))), + (v16i8 (VSRAB VRRC:$vA, VRRC:$vB))>; +def : Pat<(v8i16 (sra (v8i16 VRRC:$vA), (v8i16 VRRC:$vB))), + (v8i16 (VSRAH VRRC:$vA, VRRC:$vB))>; +def : Pat<(v4i32 (sra (v4i32 VRRC:$vA), (v4i32 VRRC:$vB))), + (v4i32 (VSRAW VRRC:$vA, VRRC:$vB))>;