Intrinsic<[llvm_int_ty, llvm_int_ty, llvm_v16i8_ty,llvm_v16i8_ty],
[InstrNoMem]>;
- // Saturating adds and subs.
+ // Saturating adds, subs, and multiply-adds
def int_ppc_altivec_vaddubs : GCCBuiltin<"__builtin_altivec_vaddubs">,
Intrinsic<[llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
[InstrNoMem]>;
def int_ppc_altivec_vaddsws : GCCBuiltin<"__builtin_altivec_vaddsws">,
Intrinsic<[llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
[InstrNoMem]>;
+ def int_ppc_altivec_vmhaddshs : GCCBuiltin<"__builtin_altivec_vmhaddshs">,
+ Intrinsic<[llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_v8i16_ty], [InstrNoMem]>;
+ def int_ppc_altivec_vmhraddshs : GCCBuiltin<"__builtin_altivec_vmhraddshs">,
+ Intrinsic<[llvm_v8i16_ty, llvm_v8i16_ty,
+ llvm_v8i16_ty, llvm_v8i16_ty], [InstrNoMem]>;
def int_ppc_altivec_vmaddfp : GCCBuiltin<"__builtin_altivec_vmaddfp">,
Intrinsic<[llvm_v4f32_ty, llvm_v4f32_ty,
def int_ppc_altivec_vrfiz : GCCBuiltin<"__builtin_altivec_vrfiz">,
Intrinsic<[llvm_v4f32_ty, llvm_v4f32_ty], [InstrNoMem]>;
+ // Merges
+ def int_ppc_altivec_vmrghh : GCCBuiltin<"__builtin_altivec_vmrghh">,
+ Intrinsic<[llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [InstrNoMem]>;
+ def int_ppc_altivec_vmrghw : GCCBuiltin<"__builtin_altivec_vmrghw">,
+ Intrinsic<[llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [InstrNoMem]>;
+ def int_ppc_altivec_vmrglh : GCCBuiltin<"__builtin_altivec_vmrglh">,
+ Intrinsic<[llvm_v8i16_ty, llvm_v8i16_ty, llvm_v8i16_ty],
+ [InstrNoMem]>;
+ def int_ppc_altivec_vmrglw : GCCBuiltin<"__builtin_altivec_vmrglw">,
+ Intrinsic<[llvm_v4i32_ty, llvm_v4i32_ty, llvm_v4i32_ty],
+ [InstrNoMem]>;
+
// Left Shifts.
def int_ppc_altivec_vsldoi : GCCBuiltin<"__builtin_altivec_vsldoi_4si">,
Intrinsic<[llvm_v4i32_ty, llvm_v4i32_ty,
[InstrNoMem]>;
// Miscellaneous.
+ def int_ppc_altivec_vperm : GCCBuiltin<"__builtin_altivec_vperm_4si">,
+ Intrinsic<[llvm_v4i32_ty, llvm_v4i32_ty,
+ llvm_v4i32_ty, llvm_v16i8_ty], [InstrNoMem]>;
def int_ppc_altivec_vsel : GCCBuiltin<"__builtin_altivec_vsel_4si">,
Intrinsic<[llvm_v4i32_ty, llvm_v4i32_ty,
llvm_v4i32_ty, llvm_v4i32_ty], [InstrNoMem]>;
[(set VRRC:$vD, (fneg (fsub (fmul VRRC:$vA, VRRC:$vC),
VRRC:$vB)))]>,
Requires<[FPContractions]>;
-
+def VMHADDSHS : VAForm_1a<32, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
+ "vmhaddshs $vD, $vA, $vB, $vC", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vmhaddshs VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
+def VMHRADDSHS : VAForm_1a<33, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
+ "vmhraddshs $vD, $vA, $vB, $vC", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vmhraddshs VRRC:$vA, VRRC:$vB, VRRC:$vC))]>;
def VPERM : VAForm_1a<43, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB, VRRC:$vC),
"vperm $vD, $vA, $vB, $vC", VecPerm,
[(set VRRC:$vD,
def VMINFP : VXForm_1<1098, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
"vminfp $vD, $vA, $vB", VecFP,
[]>;
+def VMRGHH : VXForm_1<76, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
+ "vmrghh $vD, $vA, $vB", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vmrghh VRRC:$vA, VRRC:$vB))]>;
+def VMRGHW : VXForm_1<140, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
+ "vmrghh $vD, $vA, $vB", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vmrghw VRRC:$vA, VRRC:$vB))]>;
+def VMRGLH : VXForm_1<332, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
+ "vmrglh $vD, $vA, $vB", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vmrglh VRRC:$vA, VRRC:$vB))]>;
+def VMRGLW : VXForm_1<396, (ops VRRC:$vD, VRRC:$vA, VRRC:$vB),
+ "vmrglh $vD, $vA, $vB", VecFP,
+ [(set VRRC:$vD,
+ (int_ppc_altivec_vmrglw VRRC:$vA, VRRC:$vB))]>;
def VREFP : VXForm_2<266, (ops VRRC:$vD, VRRC:$vB),
"vrefp $vD, $vB", VecFP,
[(set VRRC:$vD, (int_ppc_altivec_vrefp VRRC:$vB))]>;
(VMADDFP VRRC:$A, VRRC:$B, VRRC:$C)>;
def : Pat<(int_ppc_altivec_vnmsubfp VRRC:$A, VRRC:$B, VRRC:$C),
(VNMSUBFP VRRC:$A, VRRC:$B, VRRC:$C)>;
-
+def : Pat<(int_ppc_altivec_vperm VRRC:$A, VRRC:$B, VRRC:$C),
+ (VPERM VRRC:$A, VRRC:$B, VRRC:$C)>;
def : Pat<(vector_shuffle (v4i32 VRRC:$vB), (undef), VSPLT_shuffle_mask:$UIMM),
(v4i32 (VSPLTW VSPLT_shuffle_mask:$UIMM, VRRC:$vB))>;